Merge branch 'cassandra-4.0' into cassandra-4.1
diff --git a/.build/build-rat.xml b/.build/build-rat.xml
index cab1d4f..49d20cc 100644
--- a/.build/build-rat.xml
+++ b/.build/build-rat.xml
@@ -51,14 +51,18 @@
         <rat:report reportFile="${build.dir}/rat.txt">
             <fileset dir="." includesfile="${build.dir}/.ratinclude">
                  <!-- Config files with not much creativity -->
+                 <exclude name="**/.asf.yaml"/>
                  <exclude name="**/ide/**/*"/>
                  <exclude name="**/metrics-reporter-config-sample.yaml"/>
                  <exclude name="**/cassandra.yaml"/>
                  <exclude name="**/cassandra-murmur.yaml"/>
                  <exclude name="**/cassandra-seeds.yaml"/>
+                 <exclude name="**/harry-generic.yaml"/>
                  <exclude NAME="**/doc/antora.yml"/>
                  <exclude name="**/test/conf/cassandra.yaml"/>
-                 <exclude name="**/test/conf/cassandra_deprecated_parameters_names.yaml"/>
+                 <exclude name="**/test/conf/cassandra-old.yaml"/>
+                 <exclude name="**/test/conf/cassandra-converters-special-cases-old-names.yaml"/>
+                 <exclude name="**/test/conf/cassandra-converters-special-cases.yaml"/>
                  <exclude name="**/test/conf/cassandra_encryption.yaml"/>
                  <exclude name="**/test/conf/cdc.yaml"/>
                  <exclude name="**/test/conf/commitlog_compression_LZ4.yaml"/>
@@ -69,12 +73,18 @@
                  <exclude name="**/test/data/jmxdump/cassandra-3.0-jmx.yaml"/>
                  <exclude name="**/test/data/jmxdump/cassandra-3.11-jmx.yaml"/>
                  <exclude name="**/test/data/jmxdump/cassandra-4.0-jmx.yaml"/>
+                 <exclude name="**/test/data/jmxdump/cassandra-4.1-jmx.yaml"/>
+                 <exclude name="**/test/data/config/version=3.0.0-alpha1.yml"/>
+                 <exclude name="**/test/data/config/version=3.11.0.yml"/>
+                 <exclude name="**/test/data/config/version=4.0-alpha1.yml"/>
+                 <exclude name="**/test/resources/data/config/YamlConfigurationLoaderTest/shared_client_error_reporting_exclusions.yaml"/>
                  <exclude name="**/tools/cqlstress-counter-example.yaml"/>
                  <exclude name="**/tools/cqlstress-example.yaml"/>
                  <exclude name="**/tools/cqlstress-insanity-example.yaml"/>
                  <exclude name="**/tools/cqlstress-lwt-example.yaml"/>
                  <!-- Documentation files -->
                  <exclude NAME="**/doc/modules/**/*"/>
+                 <exclude NAME="**/src/java/**/*.md"/>
                  <!-- NOTICE files -->
                  <exclude NAME="**/NOTICE.md"/>
                  <!-- LICENSE files -->
diff --git a/.build/build-resolver.xml b/.build/build-resolver.xml
index 03db1c8..c38a272 100644
--- a/.build/build-resolver.xml
+++ b/.build/build-resolver.xml
@@ -56,6 +56,8 @@
         <resolver:remoterepos id="all">
             <remoterepo id="resolver-central" url="${artifact.remoteRepository.central}"/>
             <remoterepo id="resolver-apache" url="${artifact.remoteRepository.apache}"/>
+            <!-- Snapshots are not allowed, but for feature branches they may be needed, so uncomment the below to allow snapshots to work -->
+            <!-- <remoterepo id="resolver-apache-snapshot" url="https://repository.apache.org/content/repositories/snapshots" releases="false" snapshots="true" updates="always" checksums="fail" /> -->
         </resolver:remoterepos>
 
         <macrodef name="resolve">
@@ -199,6 +201,7 @@
         <!-- files.pythonhosted.org -->
         <get src="${artifact.python.pypi}/59/a0/cf4cd997e1750f0c2d91c6ea5abea218251c43c3581bcc2f118b00baf5cf/futures-2.1.6-py2.py3-none-any.whl" dest="${local.repository}/org/apache/cassandra/deps/futures-2.1.6-py2.py3-none-any.zip" usetimestamp="true" quiet="true" skipexisting="true"/>
         <get src="${artifact.python.pypi}/73/fb/00a976f728d0d1fecfe898238ce23f502a721c0ac0ecfedb80e0d88c64e9/six-1.12.0-py2.py3-none-any.whl" dest="${local.repository}/org/apache/cassandra/deps/six-1.12.0-py2.py3-none-any.zip" usetimestamp="true" quiet="true" skipexisting="true"/>
+        <get src="${artifact.python.pypi}/37/b2/ef1124540ee2c0b417be8d0f74667957e6aa084a3f26621aa67e2e77f3fb/pure_sasl-0.6.2-py2-none-any.whl" dest="${local.repository}/org/apache/cassandra/deps/pure_sasl-0.6.2-py2-none-any.zip" usetimestamp="true" quiet="true" skipexisting="true"/>
 
         <!-- apache/cassandra/lib -->
         <get src="${lib.download.base.url}/lib/geomet-0.1.0.zip" dest="${local.repository}/org/apache/cassandra/deps/geomet-0.1.0.zip" usetimestamp="true" quiet="true" skipexisting="true"/>
@@ -223,15 +226,13 @@
             <url url="${lib.download.base.url}/lib/sigar-bin/libsigar-x86-freebsd-6.so"/>
             <url url="${lib.download.base.url}/lib/sigar-bin/libsigar-x86-linux.so"/>
             <url url="${lib.download.base.url}/lib/sigar-bin/libsigar-x86-solaris.so"/>
-            <url url="${lib.download.base.url}/lib/sigar-bin/sigar-amd64-winnt.dll"/>
-            <url url="${lib.download.base.url}/lib/sigar-bin/sigar-x86-winnt.dll"/>
-            <url url="${lib.download.base.url}/lib/sigar-bin/sigar-x86-winnt.lib"/>
         </get>
         
         <copy todir="${build.lib}" quiet="true">
             <file file="${local.repository}/org/apache/cassandra/deps/futures-2.1.6-py2.py3-none-any.zip"/>
             <file file="${local.repository}/org/apache/cassandra/deps/six-1.12.0-py2.py3-none-any.zip"/>
             <file file="${local.repository}/org/apache/cassandra/deps/geomet-0.1.0.zip"/>
+            <file file="${local.repository}/org/apache/cassandra/deps/pure_sasl-0.6.2-py2-none-any.zip"/>
         </copy>
         <copy todir="${build.lib}/sigar-bin/" quiet="true">
             <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/libsigar-amd64-freebsd-6.so"/>
@@ -254,9 +255,6 @@
             <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/libsigar-x86-freebsd-6.so"/>
             <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/libsigar-x86-linux.so"/>
             <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/libsigar-x86-solaris.so"/>
-            <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/sigar-amd64-winnt.dll"/>
-            <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/sigar-x86-winnt.dll"/>
-            <file file="${local.repository}/org/apache/cassandra/deps/sigar-bin/sigar-x86-winnt.lib"/>
         </copy>
     </target>
 </project>
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 1bae80b..b009cd1 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -83,7 +83,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -118,6 +118,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -131,6 +133,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -169,111 +172,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=fqltool-test
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant fqltool-test $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=fqltool-test\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant fqltool-test $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -310,6 +209,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -323,76 +224,32 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
-  j8_cqlsh-dtests-py2-with-vnodes:
+  j11_cqlshlib_cython_tests:
     docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
     resource_class: medium
     working_directory: ~/
     shell: /bin/bash -eo pipefail -l
-    parallelism: 4
+    parallelism: 1
     steps:
     - attach_workspace:
         at: /home/cassandra
     - run:
-        name: Clone Cassandra dtest Repository (via git)
+        name: Run cqlshlib Unit Tests
         command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
           export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_with_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_with_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_with_vnodes_raw /tmp/all_dtest_tests_j8_with_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_with_vnodes_raw > /tmp/all_dtest_tests_j8_with_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_with_vnodes > /tmp/split_dtest_tests_j8_with_vnodes.txt\ncat /tmp/split_dtest_tests_j8_with_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_with_vnodes_final.txt\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j8_with_vnodes)
+          export cython="yes"
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
     - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_with_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_with_vnodes_logs
+        path: /tmp/cassandra/pylib
     environment:
     - ANT_HOME: /usr/share/ant
     - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -418,6 +275,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -431,9 +290,11 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
   j8_cqlsh_dtests_py311_vnode:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -487,7 +348,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -525,6 +386,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -538,6 +401,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -590,7 +454,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -678,6 +542,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -691,6 +557,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -728,7 +595,7 @@
     - run:
         name: Run dtests (j8_large_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -762,6 +629,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -775,6 +644,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -843,7 +713,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-system-keyspace-directory -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-system-keyspace-directory   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -878,6 +748,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -891,6 +763,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -949,6 +822,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -962,6 +837,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1000,111 +876,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=stress-test-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant stress-test-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=stress-test-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant stress-test-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1141,6 +913,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1154,6 +928,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1211,7 +986,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1249,6 +1024,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1262,10 +1039,130 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_jvm_dtests_vnode:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine distributed Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep -v upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist -Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16' -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_utests_compression_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -1301,111 +1198,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-compression
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-compression $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-compression\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-compression $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1442,6 +1235,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1455,6 +1250,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1523,7 +1319,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -1558,6 +1354,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1571,6 +1369,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1628,7 +1427,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1666,6 +1465,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1679,6 +1480,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1735,7 +1537,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1773,6 +1575,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1786,6 +1590,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1825,111 +1630,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-compression
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-compression $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-compression\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-compression $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1966,6 +1667,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1979,6 +1682,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2059,7 +1763,8 @@
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
-                    $target == "stress-test" ]]; then
+                    $target == "stress-test" || \
+                    $target == "test-simulator-dtest" ]]; then
                 name="-Dtest.name=$class_name"
               else
                 name="-Dtest.name=$class_path"
@@ -2072,6 +1777,12 @@
                 methods="-Dtest.methods=${REPEATED_ANT_TEST_METHODS}"
               fi
 
+              # Prepare the JVM dtests vnodes argument, which is optional
+              vnodes_args=""
+              if ${REPEATED_ANT_TEST_VNODES}; then
+                vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+              fi
+
               # Run the test target as many times as requested collecting the exit code,
               # stopping the iteration only if stop_on_failure is set.
               exit_code="$?"
@@ -2081,7 +1792,7 @@
 
                 # run the test
                 status="passes"
-                if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                   status="fails"
                   exit_code=1
                 fi
@@ -2152,6 +1863,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2165,6 +1878,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2202,7 +1916,7 @@
     - run:
         name: Run dtests (j11_large_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -2236,6 +1950,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2249,6 +1965,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2280,7 +1997,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -2368,6 +2085,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2381,6 +2100,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2438,7 +2158,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2476,6 +2196,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2489,6 +2211,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -2545,7 +2268,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2583,6 +2306,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2596,6 +2321,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2633,7 +2359,7 @@
     - run:
         name: Run dtests (j11_large_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -2667,6 +2393,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2680,6 +2408,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2719,111 +2448,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-system-keyspace-directory
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-system-keyspace-directory $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-system-keyspace-directory\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-system-keyspace-directory $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2860,6 +2485,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2873,6 +2500,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2930,7 +2558,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2968,6 +2596,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2981,6 +2611,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3037,7 +2668,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -3075,6 +2706,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3088,10 +2721,76 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_cqlshlib_cython_tests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Run cqlshlib Unit Tests
+        command: |
+          export PATH=$JAVA_HOME/bin:$PATH
+          export cython="yes"
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/pylib
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_utests_cdc:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -3157,7 +2856,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-cdc -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-cdc   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3192,6 +2891,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3205,6 +2906,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3263,6 +2965,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3276,6 +2980,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3345,7 +3050,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-system-keyspace-directory -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-system-keyspace-directory   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3380,6 +3085,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3393,6 +3100,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3423,7 +3131,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -3511,6 +3219,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3524,113 +3234,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j8_cqlsh-dtests-py2-offheap:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_dtests_offheap)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_dtests_offheap)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_dtests_offheap_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_dtests_offheap_raw /tmp/all_dtest_tests_j8_dtests_offheap\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_dtests_offheap_raw > /tmp/all_dtest_tests_j8_dtests_offheap || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_dtests_offheap > /tmp/split_dtest_tests_j8_dtests_offheap.txt\ncat /tmp/split_dtest_tests_j8_dtests_offheap.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n"
-    - run:
-        name: Run dtests (j8_dtests_offheap)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt"
-          cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_dtests_offheap
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_dtests_offheap_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3661,7 +3265,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -3749,6 +3353,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3762,6 +3368,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3793,7 +3400,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -3881,6 +3488,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3894,6 +3503,242 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  j11_jvm_dtests_vnode:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine distributed Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep -v upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist -Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16' -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest -Dno-build-test=true
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3962,7 +3807,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-compression -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-compression   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3997,6 +3842,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4010,6 +3857,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4067,6 +3915,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4080,6 +3930,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -4119,111 +3970,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4260,6 +4007,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4273,6 +4022,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4309,7 +4059,7 @@
     - run:
         name: Run dtests (j8_large_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -4343,6 +4093,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4356,113 +4108,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j8_cqlsh-dtests-py2-no-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_without_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_without_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_without_vnodes_raw /tmp/all_dtest_tests_j8_without_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_without_vnodes_raw > /tmp/all_dtest_tests_j8_without_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_without_vnodes > /tmp/split_dtest_tests_j8_without_vnodes.txt\ncat /tmp/split_dtest_tests_j8_without_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_without_vnodes_final.txt\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j8_without_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_without_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_without_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4520,6 +4166,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4533,6 +4181,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -4590,7 +4239,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -4628,6 +4277,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4641,6 +4292,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4671,7 +4323,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_UPGRADE_DTESTS}" == "<nil>" ]; then
@@ -4759,6 +4411,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4772,117 +4426,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-with-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_with_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_with_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_with_vnodes_raw /tmp/all_dtest_tests_j11_with_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_with_vnodes_raw > /tmp/all_dtest_tests_j11_with_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_with_vnodes > /tmp/split_dtest_tests_j11_with_vnodes.txt\ncat /tmp/split_dtest_tests_j11_with_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_with_vnodes_final.txt\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j11_with_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_with_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_with_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j11_utests_cdc_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -4918,111 +4465,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-cdc
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-cdc $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-cdc\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-cdc $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -5059,6 +4502,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5072,6 +4517,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5125,7 +4571,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -5213,6 +4659,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5226,6 +4674,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5265,111 +4714,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=fqltool-test
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant fqltool-test $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=fqltool-test\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant fqltool-test $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -5406,6 +4751,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5419,6 +4766,98 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  j8_jvm_dtests_vnode_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=true\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -5487,7 +4926,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-compression -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-compression   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -5522,6 +4961,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5535,6 +4976,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5592,7 +5034,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5630,6 +5072,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5643,6 +5087,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -5699,7 +5144,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5737,6 +5182,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5750,10 +5197,102 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_SIMULATOR_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-simulator-dtest\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-simulator-dtest $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_cqlsh_dtests_py311_offheap:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -5807,7 +5346,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5845,6 +5384,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5858,6 +5399,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5897,111 +5439,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-system-keyspace-directory
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-system-keyspace-directory $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-system-keyspace-directory\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-system-keyspace-directory $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -6038,6 +5476,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6051,6 +5491,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6081,7 +5522,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -6169,6 +5610,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6182,6 +5625,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6239,7 +5683,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -6277,6 +5721,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6290,6 +5736,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6328,111 +5775,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=stress-test-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant stress-test-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=stress-test-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant stress-test-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -6469,6 +5812,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6482,6 +5827,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6512,7 +5858,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -6600,6 +5946,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6613,6 +5961,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6669,7 +6018,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -6707,6 +6056,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6720,6 +6071,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6757,7 +6109,7 @@
     - run:
         name: Run dtests (j8_upgradetests_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -6791,6 +6143,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6804,6 +6158,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6862,7 +6217,7 @@
     - run:
         name: Run dtests (j11_dtests_offheap)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -6896,6 +6251,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6909,6 +6266,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6948,111 +6306,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_UPGRADE_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_UPGRADE_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7089,6 +6343,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7102,6 +6358,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7158,7 +6415,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -7196,6 +6453,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7209,6 +6468,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7248,111 +6508,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7389,6 +6545,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7402,6 +6560,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7459,7 +6618,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -7497,6 +6656,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7510,6 +6671,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7549,111 +6711,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_LONG} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=long-testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant long-testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_LONG} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=long-testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant long-testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7690,6 +6748,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7703,6 +6763,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7739,7 +6800,7 @@
     - run:
         name: Run dtests (j8_dtests_offheap)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -7773,6 +6834,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7786,6 +6849,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7854,7 +6918,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -7889,6 +6953,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7902,6 +6968,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7970,7 +7037,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -8005,6 +7072,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8018,6 +7087,99 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
+  j11_jvm_dtests_vnode_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=true\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8111,6 +7273,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8124,6 +7288,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8161,7 +7326,7 @@
     - run:
         name: Run dtests (j8_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8195,6 +7360,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8208,117 +7375,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-no-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_without_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_without_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_without_vnodes_raw /tmp/all_dtest_tests_j11_without_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_without_vnodes_raw > /tmp/all_dtest_tests_j11_without_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_without_vnodes > /tmp/split_dtest_tests_j11_without_vnodes.txt\ncat /tmp/split_dtest_tests_j11_without_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_without_vnodes_final.txt\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j11_without_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_without_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_without_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j8_dtests_vnode:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -8352,7 +7412,7 @@
     - run:
         name: Run dtests (j8_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8386,6 +7446,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8399,6 +7461,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -8417,8 +7480,8 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/pylib
@@ -8447,6 +7510,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8460,6 +7525,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8499,111 +7565,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -8640,6 +7602,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8653,6 +7617,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -8711,7 +7676,7 @@
     - run:
         name: Run dtests (j11_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8745,6 +7710,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8758,6 +7725,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8838,7 +7806,8 @@
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
-                    $target == "stress-test" ]]; then
+                    $target == "stress-test" || \
+                    $target == "test-simulator-dtest" ]]; then
                 name="-Dtest.name=$class_name"
               else
                 name="-Dtest.name=$class_path"
@@ -8851,6 +7820,12 @@
                 methods="-Dtest.methods=${REPEATED_ANT_TEST_METHODS}"
               fi
 
+              # Prepare the JVM dtests vnodes argument, which is optional
+              vnodes_args=""
+              if ${REPEATED_ANT_TEST_VNODES}; then
+                vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+              fi
+
               # Run the test target as many times as requested collecting the exit code,
               # stopping the iteration only if stop_on_failure is set.
               exit_code="$?"
@@ -8860,7 +7835,7 @@
 
                 # run the test
                 status="passes"
-                if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                   status="fails"
                   exit_code=1
                 fi
@@ -8931,6 +7906,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8944,6 +7921,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9001,6 +7979,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9014,6 +7994,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9082,7 +8063,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-cdc -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-cdc   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -9117,6 +8098,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9130,6 +8113,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9186,7 +8170,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -9224,6 +8208,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9237,6 +8223,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9267,7 +8254,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -9355,6 +8342,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9368,6 +8357,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9436,7 +8426,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -9471,6 +8461,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9484,6 +8476,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9576,6 +8569,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9589,6 +8584,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9645,7 +8641,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -9683,6 +8679,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9696,117 +8694,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-offheap:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_dtests_offheap)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_dtests_offheap)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_dtests_offheap_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_dtests_offheap_raw /tmp/all_dtest_tests_j11_dtests_offheap\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_dtests_offheap_raw > /tmp/all_dtest_tests_j11_dtests_offheap || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_dtests_offheap > /tmp/split_dtest_tests_j11_dtests_offheap.txt\ncat /tmp/split_dtest_tests_j11_dtests_offheap.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n"
-    - run:
-        name: Run dtests (j11_dtests_offheap)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt"
-          cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_dtests_offheap
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_dtests_offheap_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j11_unit_tests_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -9842,111 +8733,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -9983,6 +8770,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9996,6 +8785,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10054,6 +8844,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10067,6 +8859,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10085,8 +8878,8 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/pylib
@@ -10115,6 +8908,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10128,6 +8923,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10186,7 +8982,7 @@
     - run:
         name: Run dtests (j11_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -10220,6 +9016,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10233,6 +9031,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10272,111 +9071,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-cdc
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-cdc $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-cdc\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-cdc $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -10413,6 +9108,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10426,6 +9123,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10456,7 +9154,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -10544,6 +9242,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10557,6 +9257,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10595,111 +9296,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_LONG} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=long-testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant long-testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_LONG} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=long-testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant long-testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -10736,6 +9333,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10749,6 +9348,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10770,7 +9370,7 @@
           cd ~/cassandra
           mkdir ~/dtest_jars
           git remote add apache https://github.com/apache/cassandra.git
-          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 trunk; do
+          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 cassandra-4.1 trunk; do
             # check out the correct cassandra version:
             git remote set-branches --add apache '$branch'
             git fetch --depth 1 apache $branch
@@ -10839,6 +9439,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10852,6 +9454,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10876,12 +9479,54 @@
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - start_j8_jvm_dtests_vnode:
+        type: approval
+    - j8_jvm_dtests_vnode:
+        requires:
+        - start_j8_jvm_dtests_vnode
+        - j8_build
+    - start_j11_jvm_dtests:
+        type: approval
+    - j11_jvm_dtests:
+        requires:
+        - start_j11_jvm_dtests
+        - j8_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+        - start_j11_jvm_dtests_vnode
+        - j8_build
+    - start_j8_simulator_dtests:
+        type: approval
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_simulator_dtests
+        - j8_build
     - start_j8_cqlshlib_tests:
         type: approval
     - j8_cqlshlib_tests:
         requires:
         - start_j8_cqlshlib_tests
         - j8_build
+    - start_j8_cqlshlib_cython_tests:
+        type: approval
+    - j8_cqlshlib_cython_tests:
+        requires:
+        - start_j8_cqlshlib_cython_tests
+        - j8_build
+    - start_j11_cqlshlib_tests:
+        type: approval
+    - j11_cqlshlib_tests:
+        requires:
+        - start_j11_cqlshlib_tests
+        - j8_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - start_j11_cqlshlib_cython_tests
+        - j8_build
     - start_j11_unit_tests:
         type: approval
     - j11_unit_tests:
@@ -10990,18 +9635,6 @@
         requires:
         - start_j8_dtests_offheap
         - j8_build
-    - start_j8_dtests_large:
-        type: approval
-    - j8_dtests_large:
-        requires:
-        - start_j8_dtests_large
-        - j8_build
-    - start_j8_dtests_large_vnode:
-        type: approval
-    - j8_dtests_large_vnode:
-        requires:
-        - start_j8_dtests_large_vnode
-        - j8_build
     - start_j11_dtests:
         type: approval
     - j11_dtests:
@@ -11020,6 +9653,18 @@
         requires:
         - start_j11_dtests_offheap
         - j8_build
+    - start_j8_dtests_large:
+        type: approval
+    - j8_dtests_large:
+        requires:
+        - start_j8_dtests_large
+        - j8_build
+    - start_j8_dtests_large_vnode:
+        type: approval
+    - j8_dtests_large_vnode:
+        requires:
+        - start_j8_dtests_large_vnode
+        - j8_build
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -11032,15 +9677,15 @@
         requires:
         - start_j11_dtests_large_vnode
         - j8_build
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
-        - start_upgrade_tests
+        - start_upgrade_dtests
         - j8_build
     - start_j8_cqlsh_tests:
         type: approval
-    - j8_cqlsh-dtests-py2-with-vnodes:
+    - j8_cqlsh_dtests_py3:
         requires:
         - start_j8_cqlsh_tests
         - j8_build
@@ -11048,22 +9693,6 @@
         requires:
         - start_j8_cqlsh_tests
         - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
         - start_j8_cqlsh_tests
@@ -11072,12 +9701,16 @@
         requires:
         - start_j8_cqlsh_tests
         - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
     - start_j8_cqlsh_tests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j8_cqlsh_tests_offheap
-        - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
         - start_j8_cqlsh_tests_offheap
@@ -11092,7 +9725,7 @@
         - j8_build
     - start_j11_cqlsh_tests:
         type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
         - start_j11_cqlsh_tests
         - j8_build
@@ -11100,22 +9733,6 @@
         requires:
         - start_j11_cqlsh_tests
         - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
         - start_j11_cqlsh_tests
@@ -11124,12 +9741,16 @@
         requires:
         - start_j11_cqlsh_tests
         - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
     - start_j11_cqlsh_tests_offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh_tests_offheap
-        - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh_tests_offheap
@@ -11152,12 +9773,33 @@
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
+    - j8_jvm_dtests_vnode:
+        requires:
+        - j8_build
+    - j11_jvm_dtests:
+        requires:
+        - j8_build
+    - j11_jvm_dtests_vnode:
+        requires:
+        - j8_build
     - j8_cqlshlib_tests:
         requires:
         - j8_build
+    - j8_cqlshlib_cython_tests:
+        requires:
+        - j8_build
+    - j11_cqlshlib_tests:
+        requires:
+        - j8_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - j8_build
     - j11_unit_tests:
         requires:
         - j8_build
@@ -11273,42 +9915,32 @@
         requires:
         - start_j11_dtests_large
         - j8_build
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
         - j8_build
-        - start_upgrade_tests
-    - j8_cqlsh-dtests-py2-with-vnodes:
+        - start_upgrade_dtests
+    - j8_cqlsh_dtests_py3:
         requires:
         - j8_build
     - j8_cqlsh_dtests_py3_vnode:
         requires:
         - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-        - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
         - j8_build
     - j8_cqlsh_dtests_py311:
         requires:
         - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - j8_build
     - start_j8_cqlsh_dtests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j8_cqlsh_dtests_offheap
-        - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
         - start_j8_cqlsh_dtests_offheap
@@ -11321,36 +9953,26 @@
         requires:
         - start_j8_cqlsh_dtests_offheap
         - j8_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
         - j8_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
         - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
         - j8_build
     - j11_cqlsh_dtests_py311:
         requires:
         - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - j8_build
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh-dtests-offheap
@@ -11382,12 +10004,24 @@
         requires:
         - start_j11_jvm_dtests
         - j11_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+        - start_j11_jvm_dtests_vnode
+        - j11_build
     - start_j11_cqlshlib_tests:
         type: approval
     - j11_cqlshlib_tests:
         requires:
         - start_j11_cqlshlib_tests
         - j11_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - start_j11_cqlshlib_cython_tests
+        - j11_build
     - start_j11_dtests:
         type: approval
     - j11_dtests:
@@ -11406,58 +10040,6 @@
         requires:
         - start_j11_dtests_offheap
         - j11_build
-    - start_j11_cqlsh_tests:
-        type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py3_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py38:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py311:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - start_j11_cqlsh-dtests-offheap:
-        type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py3_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py38_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py311_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -11470,6 +10052,46 @@
         requires:
         - start_j11_dtests_large_vnode
         - j11_build
+    - start_j11_cqlsh_tests:
+        type: approval
+    - j11_cqlsh_dtests_py3:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py3_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py38:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py311:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - start_j11_cqlsh-dtests-offheap:
+        type: approval
+    - j11_cqlsh_dtests_py3_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
+    - j11_cqlsh_dtests_py38_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
+    - j11_cqlsh_dtests_py311_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
     - start_j11_utests_long:
         type: approval
     - j11_utests_long:
@@ -11519,15 +10141,15 @@
     - j11_jvm_dtests:
         requires:
         - j11_build
-    - j11_cqlshlib_tests:
-        requires:
-        - j11_build
-    - j11_jvm_dtests:
+    - j11_jvm_dtests_vnode:
         requires:
         - j11_build
     - j11_cqlshlib_tests:
         requires:
         - j11_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - j11_build
     - j11_dtests:
         requires:
         - j11_build
@@ -11540,36 +10162,36 @@
         requires:
         - start_j11_dtests_offheap
         - j11_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - start_j11_dtests_large:
+        type: approval
+    - j11_dtests_large:
+        requires:
+        - start_j11_dtests_large
+        - j11_build
+    - j11_dtests_large_vnode:
+        requires:
+        - start_j11_dtests_large
+        - j11_build
+    - j11_cqlsh_dtests_py3:
         requires:
         - j11_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
         - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - j11_build
     - j11_cqlsh_dtests_py38:
         requires:
         - j11_build
     - j11_cqlsh_dtests_py311:
         requires:
         - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - j11_build
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh-dtests-offheap
@@ -11582,16 +10204,6 @@
         requires:
         - start_j11_cqlsh-dtests-offheap
         - j11_build
-    - start_j11_dtests_large:
-        type: approval
-    - j11_dtests_large:
-        requires:
-        - start_j11_dtests_large
-        - j11_build
-    - j11_dtests_large_vnode:
-        requires:
-        - start_j11_dtests_large
-        - j11_build
     - start_utests_long:
         type: approval
     - j11_utests_long:
diff --git a/.circleci/config.yml.FREE b/.circleci/config.yml.FREE
index 1bae80b..b009cd1 100644
--- a/.circleci/config.yml.FREE
+++ b/.circleci/config.yml.FREE
@@ -83,7 +83,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -118,6 +118,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -131,6 +133,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -169,111 +172,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=fqltool-test
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant fqltool-test $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=fqltool-test\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant fqltool-test $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -310,6 +209,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -323,76 +224,32 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
-  j8_cqlsh-dtests-py2-with-vnodes:
+  j11_cqlshlib_cython_tests:
     docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
     resource_class: medium
     working_directory: ~/
     shell: /bin/bash -eo pipefail -l
-    parallelism: 4
+    parallelism: 1
     steps:
     - attach_workspace:
         at: /home/cassandra
     - run:
-        name: Clone Cassandra dtest Repository (via git)
+        name: Run cqlshlib Unit Tests
         command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
           export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_with_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_with_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_with_vnodes_raw /tmp/all_dtest_tests_j8_with_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_with_vnodes_raw > /tmp/all_dtest_tests_j8_with_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_with_vnodes > /tmp/split_dtest_tests_j8_with_vnodes.txt\ncat /tmp/split_dtest_tests_j8_with_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_with_vnodes_final.txt\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j8_with_vnodes)
+          export cython="yes"
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
     - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_with_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_with_vnodes_logs
+        path: /tmp/cassandra/pylib
     environment:
     - ANT_HOME: /usr/share/ant
     - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -418,6 +275,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -431,9 +290,11 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
   j8_cqlsh_dtests_py311_vnode:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -487,7 +348,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -525,6 +386,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -538,6 +401,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -590,7 +454,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -678,6 +542,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -691,6 +557,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -728,7 +595,7 @@
     - run:
         name: Run dtests (j8_large_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -762,6 +629,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -775,6 +644,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -843,7 +713,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-system-keyspace-directory -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-system-keyspace-directory   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -878,6 +748,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -891,6 +763,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -949,6 +822,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -962,6 +837,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1000,111 +876,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=stress-test-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant stress-test-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=stress-test-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant stress-test-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1141,6 +913,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1154,6 +928,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1211,7 +986,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1249,6 +1024,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1262,10 +1039,130 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_jvm_dtests_vnode:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine distributed Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep -v upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist -Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16' -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_utests_compression_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -1301,111 +1198,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-compression
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-compression $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-compression\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-compression $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1442,6 +1235,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1455,6 +1250,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1523,7 +1319,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -1558,6 +1354,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1571,6 +1369,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1628,7 +1427,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1666,6 +1465,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1679,6 +1480,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1735,7 +1537,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1773,6 +1575,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1786,6 +1590,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1825,111 +1630,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-compression
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-compression $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-compression\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-compression $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1966,6 +1667,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1979,6 +1682,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2059,7 +1763,8 @@
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
-                    $target == "stress-test" ]]; then
+                    $target == "stress-test" || \
+                    $target == "test-simulator-dtest" ]]; then
                 name="-Dtest.name=$class_name"
               else
                 name="-Dtest.name=$class_path"
@@ -2072,6 +1777,12 @@
                 methods="-Dtest.methods=${REPEATED_ANT_TEST_METHODS}"
               fi
 
+              # Prepare the JVM dtests vnodes argument, which is optional
+              vnodes_args=""
+              if ${REPEATED_ANT_TEST_VNODES}; then
+                vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+              fi
+
               # Run the test target as many times as requested collecting the exit code,
               # stopping the iteration only if stop_on_failure is set.
               exit_code="$?"
@@ -2081,7 +1792,7 @@
 
                 # run the test
                 status="passes"
-                if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                   status="fails"
                   exit_code=1
                 fi
@@ -2152,6 +1863,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2165,6 +1878,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2202,7 +1916,7 @@
     - run:
         name: Run dtests (j11_large_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -2236,6 +1950,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2249,6 +1965,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2280,7 +1997,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -2368,6 +2085,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2381,6 +2100,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2438,7 +2158,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2476,6 +2196,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2489,6 +2211,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -2545,7 +2268,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2583,6 +2306,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2596,6 +2321,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2633,7 +2359,7 @@
     - run:
         name: Run dtests (j11_large_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -2667,6 +2393,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2680,6 +2408,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2719,111 +2448,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-system-keyspace-directory
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-system-keyspace-directory $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-system-keyspace-directory\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-system-keyspace-directory $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2860,6 +2485,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2873,6 +2500,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2930,7 +2558,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2968,6 +2596,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2981,6 +2611,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3037,7 +2668,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -3075,6 +2706,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3088,10 +2721,76 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_cqlshlib_cython_tests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Run cqlshlib Unit Tests
+        command: |
+          export PATH=$JAVA_HOME/bin:$PATH
+          export cython="yes"
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/pylib
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_utests_cdc:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -3157,7 +2856,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-cdc -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-cdc   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3192,6 +2891,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3205,6 +2906,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3263,6 +2965,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3276,6 +2980,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3345,7 +3050,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-system-keyspace-directory -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-system-keyspace-directory   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3380,6 +3085,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3393,6 +3100,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3423,7 +3131,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -3511,6 +3219,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3524,113 +3234,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j8_cqlsh-dtests-py2-offheap:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_dtests_offheap)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_dtests_offheap)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_dtests_offheap_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_dtests_offheap_raw /tmp/all_dtest_tests_j8_dtests_offheap\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_dtests_offheap_raw > /tmp/all_dtest_tests_j8_dtests_offheap || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_dtests_offheap > /tmp/split_dtest_tests_j8_dtests_offheap.txt\ncat /tmp/split_dtest_tests_j8_dtests_offheap.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n"
-    - run:
-        name: Run dtests (j8_dtests_offheap)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt"
-          cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_dtests_offheap
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_dtests_offheap_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3661,7 +3265,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -3749,6 +3353,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3762,6 +3368,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3793,7 +3400,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -3881,6 +3488,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3894,6 +3503,242 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  j11_jvm_dtests_vnode:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine distributed Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep -v upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist -Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16' -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest -Dno-build-test=true
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3962,7 +3807,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-compression -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-compression   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3997,6 +3842,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4010,6 +3857,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4067,6 +3915,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4080,6 +3930,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -4119,111 +3970,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4260,6 +4007,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4273,6 +4022,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4309,7 +4059,7 @@
     - run:
         name: Run dtests (j8_large_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -4343,6 +4093,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4356,113 +4108,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j8_cqlsh-dtests-py2-no-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_without_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_without_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_without_vnodes_raw /tmp/all_dtest_tests_j8_without_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_without_vnodes_raw > /tmp/all_dtest_tests_j8_without_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_without_vnodes > /tmp/split_dtest_tests_j8_without_vnodes.txt\ncat /tmp/split_dtest_tests_j8_without_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_without_vnodes_final.txt\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j8_without_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_without_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_without_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4520,6 +4166,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4533,6 +4181,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -4590,7 +4239,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -4628,6 +4277,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4641,6 +4292,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4671,7 +4323,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_UPGRADE_DTESTS}" == "<nil>" ]; then
@@ -4759,6 +4411,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4772,117 +4426,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-with-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_with_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_with_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_with_vnodes_raw /tmp/all_dtest_tests_j11_with_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_with_vnodes_raw > /tmp/all_dtest_tests_j11_with_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_with_vnodes > /tmp/split_dtest_tests_j11_with_vnodes.txt\ncat /tmp/split_dtest_tests_j11_with_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_with_vnodes_final.txt\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j11_with_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_with_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_with_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j11_utests_cdc_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -4918,111 +4465,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-cdc
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-cdc $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-cdc\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-cdc $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -5059,6 +4502,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5072,6 +4517,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5125,7 +4571,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -5213,6 +4659,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5226,6 +4674,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5265,111 +4714,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=fqltool-test
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant fqltool-test $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=fqltool-test\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant fqltool-test $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -5406,6 +4751,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5419,6 +4766,98 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  j8_jvm_dtests_vnode_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=true\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -5487,7 +4926,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-compression -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-compression   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -5522,6 +4961,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5535,6 +4976,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5592,7 +5034,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5630,6 +5072,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5643,6 +5087,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -5699,7 +5144,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5737,6 +5182,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5750,10 +5197,102 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_SIMULATOR_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-simulator-dtest\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-simulator-dtest $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_cqlsh_dtests_py311_offheap:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -5807,7 +5346,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5845,6 +5384,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5858,6 +5399,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5897,111 +5439,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-system-keyspace-directory
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-system-keyspace-directory $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-system-keyspace-directory\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-system-keyspace-directory $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -6038,6 +5476,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6051,6 +5491,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6081,7 +5522,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -6169,6 +5610,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6182,6 +5625,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6239,7 +5683,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -6277,6 +5721,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6290,6 +5736,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6328,111 +5775,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=stress-test-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant stress-test-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=stress-test-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant stress-test-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -6469,6 +5812,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6482,6 +5827,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6512,7 +5858,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -6600,6 +5946,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6613,6 +5961,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6669,7 +6018,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -6707,6 +6056,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6720,6 +6071,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6757,7 +6109,7 @@
     - run:
         name: Run dtests (j8_upgradetests_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -6791,6 +6143,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6804,6 +6158,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6862,7 +6217,7 @@
     - run:
         name: Run dtests (j11_dtests_offheap)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -6896,6 +6251,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6909,6 +6266,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6948,111 +6306,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_UPGRADE_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_UPGRADE_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7089,6 +6343,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7102,6 +6358,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7158,7 +6415,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -7196,6 +6453,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7209,6 +6468,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7248,111 +6508,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7389,6 +6545,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7402,6 +6560,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7459,7 +6618,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -7497,6 +6656,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7510,6 +6671,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7549,111 +6711,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_LONG} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=long-testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant long-testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_LONG} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=long-testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant long-testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7690,6 +6748,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7703,6 +6763,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7739,7 +6800,7 @@
     - run:
         name: Run dtests (j8_dtests_offheap)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -7773,6 +6834,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7786,6 +6849,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7854,7 +6918,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -7889,6 +6953,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7902,6 +6968,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7970,7 +7037,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -8005,6 +7072,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8018,6 +7087,99 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
+  j11_jvm_dtests_vnode_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=true\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8111,6 +7273,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8124,6 +7288,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8161,7 +7326,7 @@
     - run:
         name: Run dtests (j8_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8195,6 +7360,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8208,117 +7375,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-no-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_without_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_without_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_without_vnodes_raw /tmp/all_dtest_tests_j11_without_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_without_vnodes_raw > /tmp/all_dtest_tests_j11_without_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_without_vnodes > /tmp/split_dtest_tests_j11_without_vnodes.txt\ncat /tmp/split_dtest_tests_j11_without_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_without_vnodes_final.txt\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j11_without_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_without_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_without_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j8_dtests_vnode:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -8352,7 +7412,7 @@
     - run:
         name: Run dtests (j8_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8386,6 +7446,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8399,6 +7461,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -8417,8 +7480,8 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/pylib
@@ -8447,6 +7510,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8460,6 +7525,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8499,111 +7565,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -8640,6 +7602,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8653,6 +7617,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -8711,7 +7676,7 @@
     - run:
         name: Run dtests (j11_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8745,6 +7710,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8758,6 +7725,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8838,7 +7806,8 @@
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
-                    $target == "stress-test" ]]; then
+                    $target == "stress-test" || \
+                    $target == "test-simulator-dtest" ]]; then
                 name="-Dtest.name=$class_name"
               else
                 name="-Dtest.name=$class_path"
@@ -8851,6 +7820,12 @@
                 methods="-Dtest.methods=${REPEATED_ANT_TEST_METHODS}"
               fi
 
+              # Prepare the JVM dtests vnodes argument, which is optional
+              vnodes_args=""
+              if ${REPEATED_ANT_TEST_VNODES}; then
+                vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+              fi
+
               # Run the test target as many times as requested collecting the exit code,
               # stopping the iteration only if stop_on_failure is set.
               exit_code="$?"
@@ -8860,7 +7835,7 @@
 
                 # run the test
                 status="passes"
-                if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                   status="fails"
                   exit_code=1
                 fi
@@ -8931,6 +7906,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8944,6 +7921,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9001,6 +7979,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9014,6 +7994,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9082,7 +8063,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-cdc -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-cdc   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -9117,6 +8098,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9130,6 +8113,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9186,7 +8170,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -9224,6 +8208,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9237,6 +8223,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9267,7 +8254,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -9355,6 +8342,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9368,6 +8357,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9436,7 +8426,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -9471,6 +8461,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9484,6 +8476,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9576,6 +8569,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9589,6 +8584,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9645,7 +8641,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -9683,6 +8679,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9696,117 +8694,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-offheap:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 4
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_dtests_offheap)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_dtests_offheap)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_dtests_offheap_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_dtests_offheap_raw /tmp/all_dtest_tests_j11_dtests_offheap\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_dtests_offheap_raw > /tmp/all_dtest_tests_j11_dtests_offheap || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_dtests_offheap > /tmp/split_dtest_tests_j11_dtests_offheap.txt\ncat /tmp/split_dtest_tests_j11_dtests_offheap.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n"
-    - run:
-        name: Run dtests (j11_dtests_offheap)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt"
-          cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_dtests_offheap
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_dtests_offheap_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j11_unit_tests_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -9842,111 +8733,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -9983,6 +8770,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9996,6 +8785,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10054,6 +8844,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10067,6 +8859,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10085,8 +8878,8 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/pylib
@@ -10115,6 +8908,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10128,6 +8923,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10186,7 +8982,7 @@
     - run:
         name: Run dtests (j11_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -10220,6 +9016,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10233,6 +9031,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10272,111 +9071,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-cdc
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-cdc $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-cdc\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-cdc $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -10413,6 +9108,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10426,6 +9123,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10456,7 +9154,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -10544,6 +9242,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10557,6 +9257,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10595,111 +9296,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_LONG} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=long-testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant long-testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_LONG} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=long-testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant long-testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -10736,6 +9333,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10749,6 +9348,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10770,7 +9370,7 @@
           cd ~/cassandra
           mkdir ~/dtest_jars
           git remote add apache https://github.com/apache/cassandra.git
-          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 trunk; do
+          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 cassandra-4.1 trunk; do
             # check out the correct cassandra version:
             git remote set-branches --add apache '$branch'
             git fetch --depth 1 apache $branch
@@ -10839,6 +9439,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10852,6 +9454,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10876,12 +9479,54 @@
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - start_j8_jvm_dtests_vnode:
+        type: approval
+    - j8_jvm_dtests_vnode:
+        requires:
+        - start_j8_jvm_dtests_vnode
+        - j8_build
+    - start_j11_jvm_dtests:
+        type: approval
+    - j11_jvm_dtests:
+        requires:
+        - start_j11_jvm_dtests
+        - j8_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+        - start_j11_jvm_dtests_vnode
+        - j8_build
+    - start_j8_simulator_dtests:
+        type: approval
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_simulator_dtests
+        - j8_build
     - start_j8_cqlshlib_tests:
         type: approval
     - j8_cqlshlib_tests:
         requires:
         - start_j8_cqlshlib_tests
         - j8_build
+    - start_j8_cqlshlib_cython_tests:
+        type: approval
+    - j8_cqlshlib_cython_tests:
+        requires:
+        - start_j8_cqlshlib_cython_tests
+        - j8_build
+    - start_j11_cqlshlib_tests:
+        type: approval
+    - j11_cqlshlib_tests:
+        requires:
+        - start_j11_cqlshlib_tests
+        - j8_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - start_j11_cqlshlib_cython_tests
+        - j8_build
     - start_j11_unit_tests:
         type: approval
     - j11_unit_tests:
@@ -10990,18 +9635,6 @@
         requires:
         - start_j8_dtests_offheap
         - j8_build
-    - start_j8_dtests_large:
-        type: approval
-    - j8_dtests_large:
-        requires:
-        - start_j8_dtests_large
-        - j8_build
-    - start_j8_dtests_large_vnode:
-        type: approval
-    - j8_dtests_large_vnode:
-        requires:
-        - start_j8_dtests_large_vnode
-        - j8_build
     - start_j11_dtests:
         type: approval
     - j11_dtests:
@@ -11020,6 +9653,18 @@
         requires:
         - start_j11_dtests_offheap
         - j8_build
+    - start_j8_dtests_large:
+        type: approval
+    - j8_dtests_large:
+        requires:
+        - start_j8_dtests_large
+        - j8_build
+    - start_j8_dtests_large_vnode:
+        type: approval
+    - j8_dtests_large_vnode:
+        requires:
+        - start_j8_dtests_large_vnode
+        - j8_build
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -11032,15 +9677,15 @@
         requires:
         - start_j11_dtests_large_vnode
         - j8_build
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
-        - start_upgrade_tests
+        - start_upgrade_dtests
         - j8_build
     - start_j8_cqlsh_tests:
         type: approval
-    - j8_cqlsh-dtests-py2-with-vnodes:
+    - j8_cqlsh_dtests_py3:
         requires:
         - start_j8_cqlsh_tests
         - j8_build
@@ -11048,22 +9693,6 @@
         requires:
         - start_j8_cqlsh_tests
         - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
         - start_j8_cqlsh_tests
@@ -11072,12 +9701,16 @@
         requires:
         - start_j8_cqlsh_tests
         - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
     - start_j8_cqlsh_tests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j8_cqlsh_tests_offheap
-        - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
         - start_j8_cqlsh_tests_offheap
@@ -11092,7 +9725,7 @@
         - j8_build
     - start_j11_cqlsh_tests:
         type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
         - start_j11_cqlsh_tests
         - j8_build
@@ -11100,22 +9733,6 @@
         requires:
         - start_j11_cqlsh_tests
         - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
         - start_j11_cqlsh_tests
@@ -11124,12 +9741,16 @@
         requires:
         - start_j11_cqlsh_tests
         - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
     - start_j11_cqlsh_tests_offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh_tests_offheap
-        - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh_tests_offheap
@@ -11152,12 +9773,33 @@
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
+    - j8_jvm_dtests_vnode:
+        requires:
+        - j8_build
+    - j11_jvm_dtests:
+        requires:
+        - j8_build
+    - j11_jvm_dtests_vnode:
+        requires:
+        - j8_build
     - j8_cqlshlib_tests:
         requires:
         - j8_build
+    - j8_cqlshlib_cython_tests:
+        requires:
+        - j8_build
+    - j11_cqlshlib_tests:
+        requires:
+        - j8_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - j8_build
     - j11_unit_tests:
         requires:
         - j8_build
@@ -11273,42 +9915,32 @@
         requires:
         - start_j11_dtests_large
         - j8_build
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
         - j8_build
-        - start_upgrade_tests
-    - j8_cqlsh-dtests-py2-with-vnodes:
+        - start_upgrade_dtests
+    - j8_cqlsh_dtests_py3:
         requires:
         - j8_build
     - j8_cqlsh_dtests_py3_vnode:
         requires:
         - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-        - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
         - j8_build
     - j8_cqlsh_dtests_py311:
         requires:
         - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - j8_build
     - start_j8_cqlsh_dtests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j8_cqlsh_dtests_offheap
-        - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
         - start_j8_cqlsh_dtests_offheap
@@ -11321,36 +9953,26 @@
         requires:
         - start_j8_cqlsh_dtests_offheap
         - j8_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
         - j8_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
         - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
         - j8_build
     - j11_cqlsh_dtests_py311:
         requires:
         - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - j8_build
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh-dtests-offheap
@@ -11382,12 +10004,24 @@
         requires:
         - start_j11_jvm_dtests
         - j11_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+        - start_j11_jvm_dtests_vnode
+        - j11_build
     - start_j11_cqlshlib_tests:
         type: approval
     - j11_cqlshlib_tests:
         requires:
         - start_j11_cqlshlib_tests
         - j11_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - start_j11_cqlshlib_cython_tests
+        - j11_build
     - start_j11_dtests:
         type: approval
     - j11_dtests:
@@ -11406,58 +10040,6 @@
         requires:
         - start_j11_dtests_offheap
         - j11_build
-    - start_j11_cqlsh_tests:
-        type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py3_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py38:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py311:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - start_j11_cqlsh-dtests-offheap:
-        type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py3_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py38_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py311_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -11470,6 +10052,46 @@
         requires:
         - start_j11_dtests_large_vnode
         - j11_build
+    - start_j11_cqlsh_tests:
+        type: approval
+    - j11_cqlsh_dtests_py3:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py3_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py38:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py311:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - start_j11_cqlsh-dtests-offheap:
+        type: approval
+    - j11_cqlsh_dtests_py3_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
+    - j11_cqlsh_dtests_py38_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
+    - j11_cqlsh_dtests_py311_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
     - start_j11_utests_long:
         type: approval
     - j11_utests_long:
@@ -11519,15 +10141,15 @@
     - j11_jvm_dtests:
         requires:
         - j11_build
-    - j11_cqlshlib_tests:
-        requires:
-        - j11_build
-    - j11_jvm_dtests:
+    - j11_jvm_dtests_vnode:
         requires:
         - j11_build
     - j11_cqlshlib_tests:
         requires:
         - j11_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - j11_build
     - j11_dtests:
         requires:
         - j11_build
@@ -11540,36 +10162,36 @@
         requires:
         - start_j11_dtests_offheap
         - j11_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - start_j11_dtests_large:
+        type: approval
+    - j11_dtests_large:
+        requires:
+        - start_j11_dtests_large
+        - j11_build
+    - j11_dtests_large_vnode:
+        requires:
+        - start_j11_dtests_large
+        - j11_build
+    - j11_cqlsh_dtests_py3:
         requires:
         - j11_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
         - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - j11_build
     - j11_cqlsh_dtests_py38:
         requires:
         - j11_build
     - j11_cqlsh_dtests_py311:
         requires:
         - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - j11_build
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh-dtests-offheap
@@ -11582,16 +10204,6 @@
         requires:
         - start_j11_cqlsh-dtests-offheap
         - j11_build
-    - start_j11_dtests_large:
-        type: approval
-    - j11_dtests_large:
-        requires:
-        - start_j11_dtests_large
-        - j11_build
-    - j11_dtests_large_vnode:
-        requires:
-        - start_j11_dtests_large
-        - j11_build
     - start_utests_long:
         type: approval
     - j11_utests_long:
diff --git a/.circleci/config.yml.PAID b/.circleci/config.yml.PAID
index be6f6f8..3118f37 100644
--- a/.circleci/config.yml.PAID
+++ b/.circleci/config.yml.PAID
@@ -83,7 +83,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -118,6 +118,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -131,6 +133,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -169,111 +172,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=fqltool-test
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant fqltool-test $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=fqltool-test\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant fqltool-test $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -310,6 +209,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -323,76 +224,32 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
-  j8_cqlsh-dtests-py2-with-vnodes:
+  j11_cqlshlib_cython_tests:
     docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: large
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: medium
     working_directory: ~/
     shell: /bin/bash -eo pipefail -l
-    parallelism: 50
+    parallelism: 1
     steps:
     - attach_workspace:
         at: /home/cassandra
     - run:
-        name: Clone Cassandra dtest Repository (via git)
+        name: Run cqlshlib Unit Tests
         command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
           export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_with_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_with_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_with_vnodes_raw /tmp/all_dtest_tests_j8_with_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_with_vnodes_raw > /tmp/all_dtest_tests_j8_with_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_with_vnodes > /tmp/split_dtest_tests_j8_with_vnodes.txt\ncat /tmp/split_dtest_tests_j8_with_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_with_vnodes_final.txt\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j8_with_vnodes)
+          export cython="yes"
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
     - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_with_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_with_vnodes_logs
+        path: /tmp/cassandra/pylib
     environment:
     - ANT_HOME: /usr/share/ant
     - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -418,6 +275,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -431,9 +290,11 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
   j8_cqlsh_dtests_py311_vnode:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -487,7 +348,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -525,6 +386,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -538,6 +401,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -590,7 +454,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -678,6 +542,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -691,6 +557,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -728,7 +595,7 @@
     - run:
         name: Run dtests (j8_large_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -762,6 +629,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -775,6 +644,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -843,7 +713,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-system-keyspace-directory -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-system-keyspace-directory   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -878,6 +748,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -891,6 +763,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -949,6 +822,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -962,6 +837,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1000,111 +876,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=stress-test-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant stress-test-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=stress-test-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant stress-test-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1141,6 +913,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1154,6 +928,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1211,7 +986,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1249,6 +1024,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1262,10 +1039,130 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_jvm_dtests_vnode:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: large
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 10
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine distributed Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep -v upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist -Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16' -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_utests_compression_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -1301,111 +1198,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-compression
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-compression $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-compression\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-compression $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1442,6 +1235,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1455,6 +1250,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1523,7 +1319,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -1558,6 +1354,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1571,6 +1369,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1628,7 +1427,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1666,6 +1465,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1679,6 +1480,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -1735,7 +1537,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -1773,6 +1575,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1786,6 +1590,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -1825,111 +1630,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-compression
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-compression $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-compression\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-compression $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1966,6 +1667,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -1979,6 +1682,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2059,7 +1763,8 @@
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
-                    $target == "stress-test" ]]; then
+                    $target == "stress-test" || \
+                    $target == "test-simulator-dtest" ]]; then
                 name="-Dtest.name=$class_name"
               else
                 name="-Dtest.name=$class_path"
@@ -2072,6 +1777,12 @@
                 methods="-Dtest.methods=${REPEATED_ANT_TEST_METHODS}"
               fi
 
+              # Prepare the JVM dtests vnodes argument, which is optional
+              vnodes_args=""
+              if ${REPEATED_ANT_TEST_VNODES}; then
+                vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+              fi
+
               # Run the test target as many times as requested collecting the exit code,
               # stopping the iteration only if stop_on_failure is set.
               exit_code="$?"
@@ -2081,7 +1792,7 @@
 
                 # run the test
                 status="passes"
-                if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                   status="fails"
                   exit_code=1
                 fi
@@ -2152,6 +1863,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2165,6 +1878,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2202,7 +1916,7 @@
     - run:
         name: Run dtests (j11_large_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_large_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -2236,6 +1950,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2249,6 +1965,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2280,7 +1997,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -2368,6 +2085,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2381,6 +2100,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2438,7 +2158,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2476,6 +2196,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2489,6 +2211,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -2545,7 +2268,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2583,6 +2306,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2596,6 +2321,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2633,7 +2359,7 @@
     - run:
         name: Run dtests (j11_large_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -2667,6 +2393,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2680,6 +2408,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2719,111 +2448,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-system-keyspace-directory
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-system-keyspace-directory $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-system-keyspace-directory\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-system-keyspace-directory $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2860,6 +2485,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2873,6 +2500,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -2930,7 +2558,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -2968,6 +2596,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -2981,6 +2611,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3037,7 +2668,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -3075,6 +2706,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3088,10 +2721,76 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_cqlshlib_cython_tests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Run cqlshlib Unit Tests
+        command: |
+          export PATH=$JAVA_HOME/bin:$PATH
+          export cython="yes"
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/pylib
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_utests_cdc:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -3157,7 +2856,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-cdc -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-cdc   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3192,6 +2891,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3205,6 +2906,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3263,6 +2965,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3276,6 +2980,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3345,7 +3050,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-system-keyspace-directory -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-system-keyspace-directory   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3380,6 +3085,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3393,6 +3100,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3423,7 +3131,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -3511,6 +3219,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3524,113 +3234,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j8_cqlsh-dtests-py2-offheap:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: large
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 50
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_dtests_offheap)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_dtests_offheap)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_dtests_offheap_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_dtests_offheap_raw /tmp/all_dtest_tests_j8_dtests_offheap\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_dtests_offheap_raw > /tmp/all_dtest_tests_j8_dtests_offheap || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_dtests_offheap > /tmp/split_dtest_tests_j8_dtests_offheap.txt\ncat /tmp/split_dtest_tests_j8_dtests_offheap.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n"
-    - run:
-        name: Run dtests (j8_dtests_offheap)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt"
-          cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_dtests_offheap
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_dtests_offheap_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3661,7 +3265,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -3749,6 +3353,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3762,6 +3368,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -3793,7 +3400,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -3881,6 +3488,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -3894,6 +3503,242 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  j11_jvm_dtests_vnode:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: large
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 10
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine distributed Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/distributed/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/distributed/;;g" | grep "Test\.java$" | grep -v upgrade > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.distributed.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist -Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16' -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest -Dno-build-test=true
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -3962,7 +3807,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-compression -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-compression   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -3997,6 +3842,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4010,6 +3857,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4067,6 +3915,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4080,6 +3930,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -4119,111 +3970,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4260,6 +4007,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4273,6 +4022,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4309,7 +4059,7 @@
     - run:
         name: Run dtests (j8_large_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_large_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --only-resource-intensive-tests --force-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_large_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -4343,6 +4093,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4356,113 +4108,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j8_cqlsh-dtests-py2-no-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
-    resource_class: large
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 50
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j8_without_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j8_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j8_without_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j8_without_vnodes_raw /tmp/all_dtest_tests_j8_without_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j8_without_vnodes_raw > /tmp/all_dtest_tests_j8_without_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j8_without_vnodes > /tmp/split_dtest_tests_j8_without_vnodes.txt\ncat /tmp/split_dtest_tests_j8_without_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j8_without_vnodes_final.txt\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j8_without_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j8_without_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j8_without_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4520,6 +4166,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4533,6 +4181,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -4590,7 +4239,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -4628,6 +4277,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4641,6 +4292,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -4671,7 +4323,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_UPGRADE_DTESTS}" == "<nil>" ]; then
@@ -4759,6 +4411,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -4772,117 +4426,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-with-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: large
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 50
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_with_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_with_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_with_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_with_vnodes_raw /tmp/all_dtest_tests_j11_with_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_with_vnodes_raw > /tmp/all_dtest_tests_j11_with_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_with_vnodes > /tmp/split_dtest_tests_j11_with_vnodes.txt\ncat /tmp/split_dtest_tests_j11_with_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_with_vnodes_final.txt\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j11_with_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_with_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_with_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j11_utests_cdc_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -4918,111 +4465,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-cdc
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-cdc $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-cdc\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-cdc $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -5059,6 +4502,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5072,6 +4517,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5125,7 +4571,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -5213,6 +4659,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5226,6 +4674,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5265,111 +4714,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=fqltool-test
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant fqltool-test $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_FQLTOOL} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=fqltool-test\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant fqltool-test $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -5406,6 +4751,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5419,6 +4766,98 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  j8_jvm_dtests_vnode_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 25
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=true\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -5487,7 +4926,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-compression -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-compression   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -5522,6 +4961,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5535,6 +4976,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5592,7 +5034,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5630,6 +5072,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5643,6 +5087,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -5699,7 +5144,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5737,6 +5182,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5750,10 +5197,102 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 25
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_SIMULATOR_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-simulator-dtest\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-simulator-dtest $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_cqlsh_dtests_py311_offheap:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -5807,7 +5346,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -5845,6 +5384,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -5858,6 +5399,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -5897,111 +5439,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-system-keyspace-directory
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-system-keyspace-directory $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-system-keyspace-directory\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-system-keyspace-directory $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -6038,6 +5476,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6051,6 +5491,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6081,7 +5522,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -6169,6 +5610,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6182,6 +5625,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6239,7 +5683,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -6277,6 +5721,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6290,6 +5736,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6328,111 +5775,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=stress-test-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant stress-test-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_STRESS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=stress-test-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant stress-test-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -6469,6 +5812,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6482,6 +5827,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6512,7 +5858,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -6600,6 +5946,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6613,6 +5961,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6669,7 +6018,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -6707,6 +6056,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6720,6 +6071,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6757,7 +6109,7 @@
     - run:
         name: Run dtests (j8_upgradetests_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_upgradetests_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_upgradetests_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -6791,6 +6143,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6804,6 +6158,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -6862,7 +6217,7 @@
     - run:
         name: Run dtests (j11_dtests_offheap)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -6896,6 +6251,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -6909,6 +6266,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -6948,111 +6306,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_UPGRADE_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_UPGRADE_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7089,6 +6343,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7102,6 +6358,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7158,7 +6415,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -7196,6 +6453,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7209,6 +6468,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7248,111 +6508,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7389,6 +6545,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7402,6 +6560,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7459,7 +6618,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -7497,6 +6656,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7510,6 +6671,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -7549,111 +6711,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_LONG} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=long-testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant long-testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_LONG} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=long-testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant long-testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -7690,6 +6748,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7703,6 +6763,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7739,7 +6800,7 @@
     - run:
         name: Run dtests (j8_dtests_offheap)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\"\ncat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -7773,6 +6834,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7786,6 +6849,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -7854,7 +6918,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -7889,6 +6953,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -7902,13 +6968,14 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j11_jvm_dtests:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: medium
+    resource_class: large
     working_directory: ~/
     shell: /bin/bash -eo pipefail -l
     parallelism: 10
@@ -7970,7 +7037,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -8005,6 +7072,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8018,6 +7087,99 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - CASSANDRA_USE_JDK11: true
+  j11_jvm_dtests_vnode_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 25
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=true\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_LARGE_DTESTS: null
+    - REPEATED_LARGE_DTESTS_COUNT: 100
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8111,6 +7273,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8124,6 +7288,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8161,7 +7326,7 @@
     - run:
         name: Run dtests (j8_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8195,6 +7360,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8208,117 +7375,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-no-vnodes:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: large
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 50
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_without_vnodes)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_without_vnodes)***\"\nset -eo pipefail && ./run_dtests.py --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_without_vnodes_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_without_vnodes_raw /tmp/all_dtest_tests_j11_without_vnodes\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_without_vnodes_raw > /tmp/all_dtest_tests_j11_without_vnodes || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_without_vnodes > /tmp/split_dtest_tests_j11_without_vnodes.txt\ncat /tmp/split_dtest_tests_j11_without_vnodes.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_without_vnodes_final.txt\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n"
-    - run:
-        name: Run dtests (j11_without_vnodes)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt"
-          cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_without_vnodes
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_without_vnodes_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j8_dtests_vnode:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -8352,7 +7412,7 @@
     - run:
         name: Run dtests (j8_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j8_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8386,6 +7446,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8399,6 +7461,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -8417,8 +7480,8 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/pylib
@@ -8447,6 +7510,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8460,6 +7525,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8499,111 +7565,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_JVM_DTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-jvm-dtest-some
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-jvm-dtest-some $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_JVM_DTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-jvm-dtest-some\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-jvm-dtest-some $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -8640,6 +7602,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8653,6 +7617,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -8711,7 +7676,7 @@
     - run:
         name: Run dtests (j11_without_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_without_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_without_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_without_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -8745,6 +7710,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8758,6 +7725,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -8838,7 +7806,8 @@
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
-                    $target == "stress-test" ]]; then
+                    $target == "stress-test" || \
+                    $target == "test-simulator-dtest" ]]; then
                 name="-Dtest.name=$class_name"
               else
                 name="-Dtest.name=$class_path"
@@ -8851,6 +7820,12 @@
                 methods="-Dtest.methods=${REPEATED_ANT_TEST_METHODS}"
               fi
 
+              # Prepare the JVM dtests vnodes argument, which is optional
+              vnodes_args=""
+              if ${REPEATED_ANT_TEST_VNODES}; then
+                vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+              fi
+
               # Run the test target as many times as requested collecting the exit code,
               # stopping the iteration only if stop_on_failure is set.
               exit_code="$?"
@@ -8860,7 +7835,7 @@
 
                 # run the test
                 status="passes"
-                if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                   status="fails"
                   exit_code=1
                 fi
@@ -8931,6 +7906,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -8944,6 +7921,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9001,6 +7979,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9014,6 +7994,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9082,7 +8063,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist-cdc -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit -Dno-build-test=true
+          ant testclasslist-cdc   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=unit -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -9117,6 +8098,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9130,6 +8113,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9186,7 +8170,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_dtests_offheap_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -9224,6 +8208,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9237,6 +8223,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9267,7 +8254,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_DTESTS}" == "<nil>" ]; then
@@ -9355,6 +8342,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9368,6 +8357,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9436,7 +8426,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant testclasslist -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=distributed -Dno-build-test=true
+          ant testclasslist   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=distributed -Dno-build-test=true
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -9471,6 +8461,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9484,6 +8476,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9576,6 +8569,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9589,6 +8584,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -9645,7 +8641,7 @@
           # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
           export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j8_with_vnodes_final.txt`
           if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_j8_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
           else
             echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
             (exit 1)
@@ -9683,6 +8679,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9696,117 +8694,10 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-  j11_cqlsh-dtests-py2-offheap:
-    docker:
-    - image: apache/cassandra-testing-ubuntu2004-java11:latest
-    resource_class: large
-    working_directory: ~/
-    shell: /bin/bash -eo pipefail -l
-    parallelism: 50
-    steps:
-    - attach_workspace:
-        at: /home/cassandra
-    - run:
-        name: Clone Cassandra dtest Repository (via git)
-        command: |
-          git clone --single-branch --branch $DTEST_BRANCH --depth 1 $DTEST_REPO ~/cassandra-dtest
-    - run:
-        name: Configure virtualenv and python Dependencies
-        command: |
-          # note, this should be super quick as all dependencies should be pre-installed in the docker image
-          # if additional dependencies were added to requirmeents.txt and the docker image hasn't been updated
-          # we'd have to install it here at runtime -- which will make things slow, so do yourself a favor and
-          # rebuild the docker image! (it automatically pulls the latest requirements.txt on build)
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          pip3 install --exists-action w --upgrade -r ~/cassandra-dtest/requirements.txt
-          pip3 uninstall -y cqlsh
-          pip3 freeze
-    - run:
-        name: Determine Tests to Run (j11_dtests_offheap)
-        no_output_timeout: 5m
-        command: "# reminder: this code (along with all the steps) is independently executed on every circle container\n# so the goal here is to get the circleci script to return the tests *this* container will run\n# which we do via the `circleci` cli tool.\n\ncd cassandra-dtest\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\n\nif [ -n '' ]; then\n  export \nfi\n\necho \"***Collected DTests (j11_dtests_offheap)***\"\nset -eo pipefail && ./run_dtests.py --use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql' --dtest-print-tests-only --dtest-print-tests-output=/tmp/all_dtest_tests_j11_dtests_offheap_raw --cassandra-dir=../cassandra\nif [ -z '' ]; then\n  mv /tmp/all_dtest_tests_j11_dtests_offheap_raw /tmp/all_dtest_tests_j11_dtests_offheap\nelse\n  grep -e '' /tmp/all_dtest_tests_j11_dtests_offheap_raw > /tmp/all_dtest_tests_j11_dtests_offheap || { echo \"Filter did not match any tests! Exiting build.\"; exit 0; }\nfi\nset -eo pipefail && circleci tests split --split-by=timings --timings-type=classname /tmp/all_dtest_tests_j11_dtests_offheap > /tmp/split_dtest_tests_j11_dtests_offheap.txt\ncat /tmp/split_dtest_tests_j11_dtests_offheap.txt | tr '\\n' ' ' > /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\ncat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt\n"
-    - run:
-        name: Run dtests (j11_dtests_offheap)
-        no_output_timeout: 15m
-        command: |
-          echo "cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt"
-          cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt
-
-          source ~/env3.6/bin/activate
-          export PATH=$JAVA_HOME/bin:$PATH
-          if [ -n 'CQLSH_PYTHON=/usr/bin/python2.7' ]; then
-            export CQLSH_PYTHON=/usr/bin/python2.7
-          fi
-
-          java -version
-          cd ~/cassandra-dtest
-          mkdir -p /tmp/dtest
-
-          echo "env: $(env)"
-          echo "** done env"
-          mkdir -p /tmp/results/dtests
-          # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
-          export SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_dtests_offheap_final.txt`
-          if [ ! -z "$SPLIT_TESTS" ]; then
-            set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_dtests_offheap.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
-          else
-            echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
-            (exit 1)
-          fi
-    - store_test_results:
-        path: /tmp/results
-    - store_artifacts:
-        path: /tmp/dtest
-        destination: dtest_j11_dtests_offheap
-    - store_artifacts:
-        path: ~/cassandra-dtest/logs
-        destination: dtest_j11_dtests_offheap_logs
-    environment:
-    - ANT_HOME: /usr/share/ant
-    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
-    - LANG: en_US.UTF-8
-    - KEEP_TEST_DIR: true
-    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
-    - PYTHONIOENCODING: utf-8
-    - PYTHONUNBUFFERED: true
-    - CASS_DRIVER_NO_EXTENSIONS: true
-    - CASS_DRIVER_NO_CYTHON: true
-    - CASSANDRA_SKIP_SYNC: true
-    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
-    - DTEST_BRANCH: trunk
-    - CCM_MAX_HEAP_SIZE: 1024M
-    - CCM_HEAP_NEWSIZE: 256M
-    - REPEATED_TESTS_STOP_ON_FAILURE: false
-    - REPEATED_UTESTS: null
-    - REPEATED_UTESTS_COUNT: 500
-    - REPEATED_UTESTS_FQLTOOL: null
-    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
-    - REPEATED_UTESTS_LONG: null
-    - REPEATED_UTESTS_LONG_COUNT: 100
-    - REPEATED_UTESTS_STRESS: null
-    - REPEATED_UTESTS_STRESS_COUNT: 500
-    - REPEATED_JVM_DTESTS: null
-    - REPEATED_JVM_DTESTS_COUNT: 500
-    - REPEATED_JVM_UPGRADE_DTESTS: null
-    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
-    - REPEATED_DTESTS: null
-    - REPEATED_DTESTS_COUNT: 500
-    - REPEATED_LARGE_DTESTS: null
-    - REPEATED_LARGE_DTESTS_COUNT: 100
-    - REPEATED_UPGRADE_DTESTS: null
-    - REPEATED_UPGRADE_DTESTS_COUNT: 25
-    - REPEATED_ANT_TEST_TARGET: testsome
-    - REPEATED_ANT_TEST_CLASS: null
-    - REPEATED_ANT_TEST_METHODS: null
-    - REPEATED_ANT_TEST_COUNT: 500
-    - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
-    - CASSANDRA_USE_JDK11: true
   j11_unit_tests_repeat:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11:latest
@@ -9842,111 +8733,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -9983,6 +8770,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -9996,6 +8785,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10054,6 +8844,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10067,6 +8859,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10085,8 +8878,8 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: 15m
     - store_test_results:
         path: /tmp/cassandra/pylib
@@ -10115,6 +8908,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10128,6 +8923,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10186,7 +8982,7 @@
     - run:
         name: Run dtests (j11_with_vnodes)
         no_output_timeout: 15m
-        command: "echo \"cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
+        command: "echo \"cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\"\ncat /tmp/split_dtest_tests_j11_with_vnodes_final.txt\n\nsource ~/env3.6/bin/activate\nexport PATH=$JAVA_HOME/bin:$PATH\nif [ -n '' ]; then\n  export \nfi\n\njava -version\ncd ~/cassandra-dtest\nmkdir -p /tmp/dtest\n\necho \"env: $(env)\"\necho \"** done env\"\nmkdir -p /tmp/results/dtests\n# we need the \"set -o pipefail\" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee\nexport SPLIT_TESTS=`cat /tmp/split_dtest_tests_j11_with_vnodes_final.txt`\nif [ ! -z \"$SPLIT_TESTS\" ]; then\n  set -o pipefail && cd ~/cassandra-dtest && pytest --use-vnodes --num-tokens=16 --skip-resource-intensive-tests --log-level=\"DEBUG\" --junit-xml=/tmp/results/dtests/pytest_result_j11_with_vnodes.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt\nelse\n  echo \"Tune your parallelism, there are more containers than test classes. Nothing to do in this container\"\n  (exit 1)\nfi\n"
     - store_test_results:
         path: /tmp/results
     - store_artifacts:
@@ -10220,6 +9016,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10233,6 +9031,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10272,111 +9071,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=test-cdc
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant test-cdc $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=test-cdc\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant test-cdc $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -10413,6 +9108,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10426,6 +9123,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10456,7 +9154,7 @@
           pip3 uninstall -y cqlsh
           pip3 freeze
     - run:
-        name: Run repeated Python dtest
+        name: Run repeated Python DTests
         no_output_timeout: 15m
         command: |
           if [ "${REPEATED_LARGE_DTESTS}" == "<nil>" ]; then
@@ -10544,6 +9242,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10557,6 +9257,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10595,111 +9296,7 @@
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: |
-          set -x
-          export PATH=$JAVA_HOME/bin:$PATH
-          time mv ~/cassandra /tmp
-          cd /tmp/cassandra
-          if [ -d ~/dtest_jars ]; then
-            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
-          fi
-
-          # Calculate the number of test iterations to be run by the current parallel runner.
-          count=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))
-          if (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then
-            count=$((count+1))
-          fi
-
-          # Put manually specified tests and automatically detected tests together, removing duplicates
-          tests=$(echo ${REPEATED_UTESTS_LONG} | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
-          echo "Tests to be repeated: ${tests}"
-
-          # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
-          target=long-testsome
-          testtag=""
-          if [[ $target == "test-cdc" ]]; then
-            testtag="cdc"
-          elif [[ $target == "test-compression" ]]; then
-            testtag="compression"
-          elif [[ $target == "test-system-keyspace-directory" ]]; then
-            testtag="system_keyspace_directory"
-          fi
-
-          # Run each test class as many times as requested.
-          exit_code="$?"
-          for test in $tests; do
-
-              # Split class and method names from the test name
-              if [[ $test =~ "#" ]]; then
-                class=${test%"#"*}
-                method=${test#*"#"}
-              else
-                class=$test
-                method=""
-              fi
-
-              # Prepare the -Dtest.name argument.
-              # It can be the fully qualified class name or the short class name, depending on the target.
-              if [[ $target == "test" || \
-                    $target == "test-cdc" || \
-                    $target == "test-compression" || \
-                    $target == "test-system-keyspace-directory" || \
-                    $target == "fqltool-test" || \
-                    $target == "long-test" || \
-                    $target == "stress-test" ]]; then
-                name_arg="-Dtest.name=${class##*.}"
-              else
-                name_arg="-Dtest.name=$class"
-              fi
-
-              # Prepare the -Dtest.methods argument, which is optional
-              if [[ $method == "" ]]; then
-                methods_arg=""
-              else
-                methods_arg="-Dtest.methods=$method"
-              fi
-
-              for i in $(seq -w 1 $count); do
-                echo "Running test $test, iteration $i of $count"
-
-                # run the test
-                status="passes"
-                if !( set -o pipefail && \
-                      ant long-testsome $name_arg $methods_arg -Dno-build-test=true | \
-                      tee stdout.txt \
-                    ); then
-                  status="fails"
-                  exit_code=1
-                fi
-
-                # move the stdout output file
-                dest=/tmp/results/repeated_utests/stdout/${status}/${i}
-                mkdir -p $dest
-                mv stdout.txt $dest/${test}.txt
-
-                # move the XML output files
-                source=build/test/output/${testtag}
-                dest=/tmp/results/repeated_utests/output/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # move the log files
-                source=build/test/logs/${testtag}
-                dest=/tmp/results/repeated_utests/logs/${status}/${i}
-                mkdir -p $dest
-                if [[ -d $source && -n "$(ls $source)" ]]; then
-                  mv $source/* $dest/
-                fi
-
-                # maybe stop iterations on test failure
-                if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then
-                  break
-                fi
-              done
-          done
-          (exit ${exit_code})
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically detected tests together, removing duplicates\ntests=$(echo ${REPEATED_UTESTS_LONG} | sed -e \"s/<nil>//\" | sed -e \"s/ //\" | tr \",\" \"\\n\" | tr \" \" \"\\n\" | sort -n | uniq -u)\necho \"Tests to be repeated: ${tests}\"\n\n# Prepare the JVM dtests vnodes argument, which is optional.\nvnodes=false\nvnodes_args=\"\"\nif [ \"$vnodes\" = true ] ; then\n  vnodes_args=\"-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'\"\nfi\n\n# Prepare the testtag for the target, used by the test macro in build.xml to group the output files\ntarget=long-testsome\ntesttag=\"\"\nif [[ $target == \"test-cdc\" ]]; then\n  testtag=\"cdc\"\nelif [[ $target == \"test-compression\" ]]; then\n  testtag=\"compression\"\nelif [[ $target == \"test-system-keyspace-directory\" ]]; then\n  testtag=\"system_keyspace_directory\"\nfi\n\n# Run each test class as many times as requested.\nexit_code=\"$?\"\nfor test in $tests; do\n\n    # Split class and method names from the test name\n    if [[ $test =~ \"#\" ]]; then\n      class=${test%\"#\"*}\n      method=${test#*\"#\"}\n    else\n      class=$test\n      method=\"\"\n    fi\n\n    # Prepare the -Dtest.name argument.\n    # It can be the fully qualified class name or the short class name, depending on the target.\n    if [[ $target == \"test\" || \\\n          $target == \"test-cdc\" || \\\n          $target == \"test-compression\" || \\\n          $target == \"test-system-keyspace-directory\" || \\\n          $target == \"fqltool-test\" || \\\n          $target == \"long-test\" || \\\n          $target == \"stress-test\" || \\\n          $target == \"test-simulator-dtest\" ]]; then\n      name_arg=\"-Dtest.name=${class##*.}\"\n    else\n      name_arg=\"-Dtest.name=$class\"\n    fi\n\n    # Prepare the -Dtest.methods argument, which is optional\n    if [[ $method == \"\" ]]; then\n      methods_arg=\"\"\n    else\n      methods_arg=\"-Dtest.methods=$method\"\n    fi\n\n    for i in $(seq -w 1 $count); do\n      echo \"Running test $test, iteration $i of $count\"\n\n      # run the test\n      status=\"passes\"\n      if !( set -o pipefail && \\\n            ant long-testsome $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \\\n            tee stdout.txt \\\n          ); then\n        status=\"fails\"\n        exit_code=1\n      fi\n\n      # move the stdout output file\n      dest=/tmp/results/repeated_utests/stdout/${status}/${i}\n      mkdir -p $dest\n      mv stdout.txt $dest/${test}.txt\n\n      # move the XML output files\n      source=build/test/output/${testtag}\n      dest=/tmp/results/repeated_utests/output/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n\n      # move the log files\n      source=build/test/logs/${testtag}\n      dest=/tmp/results/repeated_utests/logs/${status}/${i}\n      mkdir -p $dest\n      if [[ -d $source && -n \"$(ls $source)\" ]]; then\n        mv $source/* $dest/\n      fi\n      \n      # maybe stop iterations on test failure\n      if [[ ${REPEATED_TESTS_STOP_ON_FAILURE} = true ]] && (( $exit_code > 0 )); then\n        break\n      fi\n    done\ndone\n(exit ${exit_code})\n"
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -10736,6 +9333,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10749,6 +9348,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
@@ -10770,7 +9370,7 @@
           cd ~/cassandra
           mkdir ~/dtest_jars
           git remote add apache https://github.com/apache/cassandra.git
-          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 trunk; do
+          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 cassandra-4.1 trunk; do
             # check out the correct cassandra version:
             git remote set-branches --add apache '$branch'
             git fetch --depth 1 apache $branch
@@ -10839,6 +9439,8 @@
     - REPEATED_UTESTS_LONG_COUNT: 100
     - REPEATED_UTESTS_STRESS: null
     - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
     - REPEATED_JVM_DTESTS: null
     - REPEATED_JVM_DTESTS_COUNT: 500
     - REPEATED_JVM_UPGRADE_DTESTS: null
@@ -10852,6 +9454,7 @@
     - REPEATED_ANT_TEST_TARGET: testsome
     - REPEATED_ANT_TEST_CLASS: null
     - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
     - REPEATED_ANT_TEST_COUNT: 500
     - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
@@ -10876,12 +9479,54 @@
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - start_j8_jvm_dtests_vnode:
+        type: approval
+    - j8_jvm_dtests_vnode:
+        requires:
+        - start_j8_jvm_dtests_vnode
+        - j8_build
+    - start_j11_jvm_dtests:
+        type: approval
+    - j11_jvm_dtests:
+        requires:
+        - start_j11_jvm_dtests
+        - j8_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+        - start_j11_jvm_dtests_vnode
+        - j8_build
+    - start_j8_simulator_dtests:
+        type: approval
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_simulator_dtests
+        - j8_build
     - start_j8_cqlshlib_tests:
         type: approval
     - j8_cqlshlib_tests:
         requires:
         - start_j8_cqlshlib_tests
         - j8_build
+    - start_j8_cqlshlib_cython_tests:
+        type: approval
+    - j8_cqlshlib_cython_tests:
+        requires:
+        - start_j8_cqlshlib_cython_tests
+        - j8_build
+    - start_j11_cqlshlib_tests:
+        type: approval
+    - j11_cqlshlib_tests:
+        requires:
+        - start_j11_cqlshlib_tests
+        - j8_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - start_j11_cqlshlib_cython_tests
+        - j8_build
     - start_j11_unit_tests:
         type: approval
     - j11_unit_tests:
@@ -10990,18 +9635,6 @@
         requires:
         - start_j8_dtests_offheap
         - j8_build
-    - start_j8_dtests_large:
-        type: approval
-    - j8_dtests_large:
-        requires:
-        - start_j8_dtests_large
-        - j8_build
-    - start_j8_dtests_large_vnode:
-        type: approval
-    - j8_dtests_large_vnode:
-        requires:
-        - start_j8_dtests_large_vnode
-        - j8_build
     - start_j11_dtests:
         type: approval
     - j11_dtests:
@@ -11020,6 +9653,18 @@
         requires:
         - start_j11_dtests_offheap
         - j8_build
+    - start_j8_dtests_large:
+        type: approval
+    - j8_dtests_large:
+        requires:
+        - start_j8_dtests_large
+        - j8_build
+    - start_j8_dtests_large_vnode:
+        type: approval
+    - j8_dtests_large_vnode:
+        requires:
+        - start_j8_dtests_large_vnode
+        - j8_build
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -11032,15 +9677,15 @@
         requires:
         - start_j11_dtests_large_vnode
         - j8_build
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
-        - start_upgrade_tests
+        - start_upgrade_dtests
         - j8_build
     - start_j8_cqlsh_tests:
         type: approval
-    - j8_cqlsh-dtests-py2-with-vnodes:
+    - j8_cqlsh_dtests_py3:
         requires:
         - start_j8_cqlsh_tests
         - j8_build
@@ -11048,22 +9693,6 @@
         requires:
         - start_j8_cqlsh_tests
         - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
         - start_j8_cqlsh_tests
@@ -11072,12 +9701,16 @@
         requires:
         - start_j8_cqlsh_tests
         - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
     - start_j8_cqlsh_tests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j8_cqlsh_tests_offheap
-        - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
         - start_j8_cqlsh_tests_offheap
@@ -11092,7 +9725,7 @@
         - j8_build
     - start_j11_cqlsh_tests:
         type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
         - start_j11_cqlsh_tests
         - j8_build
@@ -11100,22 +9733,6 @@
         requires:
         - start_j11_cqlsh_tests
         - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
         - start_j11_cqlsh_tests
@@ -11124,12 +9741,16 @@
         requires:
         - start_j11_cqlsh_tests
         - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
     - start_j11_cqlsh_tests_offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh_tests_offheap
-        - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh_tests_offheap
@@ -11152,12 +9773,33 @@
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
+    - j8_jvm_dtests_vnode:
+        requires:
+        - j8_build
+    - j11_jvm_dtests:
+        requires:
+        - j8_build
+    - j11_jvm_dtests_vnode:
+        requires:
+        - j8_build
     - j8_cqlshlib_tests:
         requires:
         - j8_build
+    - j8_cqlshlib_cython_tests:
+        requires:
+        - j8_build
+    - j11_cqlshlib_tests:
+        requires:
+        - j8_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - j8_build
     - j11_unit_tests:
         requires:
         - j8_build
@@ -11273,42 +9915,32 @@
         requires:
         - start_j11_dtests_large
         - j8_build
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
         - j8_build
-        - start_upgrade_tests
-    - j8_cqlsh-dtests-py2-with-vnodes:
+        - start_upgrade_dtests
+    - j8_cqlsh_dtests_py3:
         requires:
         - j8_build
     - j8_cqlsh_dtests_py3_vnode:
         requires:
         - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-        - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
         - j8_build
     - j8_cqlsh_dtests_py311:
         requires:
         - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - j8_build
     - start_j8_cqlsh_dtests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j8_cqlsh_dtests_offheap
-        - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
         - start_j8_cqlsh_dtests_offheap
@@ -11321,36 +9953,26 @@
         requires:
         - start_j8_cqlsh_dtests_offheap
         - j8_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
         - j8_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
         - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
         - j8_build
     - j11_cqlsh_dtests_py311:
         requires:
         - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - j8_build
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh-dtests-offheap
@@ -11382,12 +10004,24 @@
         requires:
         - start_j11_jvm_dtests
         - j11_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+        - start_j11_jvm_dtests_vnode
+        - j11_build
     - start_j11_cqlshlib_tests:
         type: approval
     - j11_cqlshlib_tests:
         requires:
         - start_j11_cqlshlib_tests
         - j11_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - start_j11_cqlshlib_cython_tests
+        - j11_build
     - start_j11_dtests:
         type: approval
     - j11_dtests:
@@ -11406,58 +10040,6 @@
         requires:
         - start_j11_dtests_offheap
         - j11_build
-    - start_j11_cqlsh_tests:
-        type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py3_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py38:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - j11_cqlsh_dtests_py311:
-        requires:
-        - start_j11_cqlsh_tests
-        - j11_build
-    - start_j11_cqlsh-dtests-offheap:
-        type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py3_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py38_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
-    - j11_cqlsh_dtests_py311_offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -11470,6 +10052,46 @@
         requires:
         - start_j11_dtests_large_vnode
         - j11_build
+    - start_j11_cqlsh_tests:
+        type: approval
+    - j11_cqlsh_dtests_py3:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py3_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py38:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py311:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j11_build
+    - start_j11_cqlsh-dtests-offheap:
+        type: approval
+    - j11_cqlsh_dtests_py3_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
+    - j11_cqlsh_dtests_py38_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
+    - j11_cqlsh_dtests_py311_offheap:
+        requires:
+        - start_j11_cqlsh-dtests-offheap
+        - j11_build
     - start_j11_utests_long:
         type: approval
     - j11_utests_long:
@@ -11519,15 +10141,15 @@
     - j11_jvm_dtests:
         requires:
         - j11_build
-    - j11_cqlshlib_tests:
-        requires:
-        - j11_build
-    - j11_jvm_dtests:
+    - j11_jvm_dtests_vnode:
         requires:
         - j11_build
     - j11_cqlshlib_tests:
         requires:
         - j11_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+        - j11_build
     - j11_dtests:
         requires:
         - j11_build
@@ -11540,36 +10162,36 @@
         requires:
         - start_j11_dtests_offheap
         - j11_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - start_j11_dtests_large:
+        type: approval
+    - j11_dtests_large:
+        requires:
+        - start_j11_dtests_large
+        - j11_build
+    - j11_dtests_large_vnode:
+        requires:
+        - start_j11_dtests_large
+        - j11_build
+    - j11_cqlsh_dtests_py3:
         requires:
         - j11_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
         - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-        - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-        - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-        - j11_build
     - j11_cqlsh_dtests_py38:
         requires:
         - j11_build
     - j11_cqlsh_dtests_py311:
         requires:
         - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+        - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+        - j11_build
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-        - start_j11_cqlsh-dtests-offheap
-        - j11_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
         - start_j11_cqlsh-dtests-offheap
@@ -11582,16 +10204,6 @@
         requires:
         - start_j11_cqlsh-dtests-offheap
         - j11_build
-    - start_j11_dtests_large:
-        type: approval
-    - j11_dtests_large:
-        requires:
-        - start_j11_dtests_large
-        - j11_build
-    - j11_dtests_large_vnode:
-        requires:
-        - start_j11_dtests_large
-        - j11_build
     - start_utests_long:
         type: approval
     - j11_utests_long:
diff --git a/.circleci/config_template.yml b/.circleci/config_template.yml
index 38f5c75..4367bdc 100644
--- a/.circleci/config_template.yml
+++ b/.circleci/config_template.yml
@@ -81,6 +81,14 @@
     # The number of times that new, modified or manually specified stress unit tests should be run.
     REPEATED_UTESTS_STRESS_COUNT: 500
 
+    # Comma-separated list of tests that should be included in the repeated run for simulator dtests,
+    # in addition to automatically detected new and modified tests. For example:
+    # REPEATED_SIMULATOR_DTESTS: org.apache.cassandra.simulator.test.TrivialSimulationTest
+    # REPEATED_SIMULATOR_DTESTS: org.apache.cassandra.simulator.test.TrivialSimulationTest#trivialTest
+    REPEATED_SIMULATOR_DTESTS:
+    # The number of times that new, modified or manually specified simulator dtests should be run.
+    REPEATED_SIMULATOR_DTESTS_COUNT: 500
+
     # Comma-separated list of tests that should be included in the repeated run for JVM dtests,
     # in addition to automatically detected new and modified tests. For example:
     # REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.PagingTest
@@ -139,6 +147,10 @@
     # REPEATED_ANT_TEST_METHODS: testCompoundPartitionKey,testStaticTable
     # Please note that some Ant targets will ignore the -Dtest.methods argument produced by this.
     REPEATED_ANT_TEST_METHODS:
+    # Whether the test iteration should use vnodes for JVM dtests (-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16').
+    # This will only be applied as a default to JVM dtests that don't provide their own initial tokens or token count,
+    # in the same way that it's done for *_jvm_dtests_vnode jobs. Ant targets other than JVM dtests will ignore this.
+    REPEATED_ANT_TEST_VNODES: false
     # The number of times that the repeated JUnit test should be run.
     REPEATED_ANT_TEST_COUNT: 500
 
@@ -258,18 +270,84 @@
         requires:
           - start_j8_jvm_dtests
           - j8_build
+    - start_j8_jvm_dtests_vnode:
+        type: approval
+    - j8_jvm_dtests_vnode:
+        requires:
+          - start_j8_jvm_dtests_vnode
+          - j8_build
     - start_j8_jvm_dtests_repeat:
         type: approval
     - j8_jvm_dtests_repeat:
         requires:
           - start_j8_jvm_dtests_repeat
           - j8_build
+    - start_j8_jvm_dtests_vnode_repeat:
+        type: approval
+    - j8_jvm_dtests_vnode_repeat:
+        requires:
+          - start_j8_jvm_dtests_vnode_repeat
+          - j8_build
+    - start_j11_jvm_dtests:
+        type: approval
+    - j11_jvm_dtests:
+        requires:
+          - start_j11_jvm_dtests
+          - j8_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+          - start_j11_jvm_dtests_vnode
+          - j8_build
+    - start_j11_jvm_dtests_repeat:
+        type: approval
+    - j11_jvm_dtests_repeat:
+        requires:
+          - start_j11_jvm_dtests_repeat
+          - j8_build
+    - start_j11_jvm_dtests_vnode_repeat:
+        type: approval
+    - j11_jvm_dtests_vnode_repeat:
+        requires:
+          - start_j11_jvm_dtests_vnode_repeat
+          - j8_build
+    - start_j8_simulator_dtests:
+        type: approval
+    - j8_simulator_dtests:
+        requires:
+          - start_j8_simulator_dtests
+          - j8_build
+    - start_j8_simulator_dtests_repeat:
+        type: approval
+    - j8_simulator_dtests_repeat:
+        requires:
+          - start_j8_simulator_dtests_repeat
+          - j8_build
     - start_j8_cqlshlib_tests:
         type: approval
     - j8_cqlshlib_tests:
         requires:
           - start_j8_cqlshlib_tests
           - j8_build
+    - start_j8_cqlshlib_cython_tests:
+        type: approval
+    - j8_cqlshlib_cython_tests:
+        requires:
+          - start_j8_cqlshlib_cython_tests
+          - j8_build
+    - start_j11_cqlshlib_tests:
+        type: approval
+    - j11_cqlshlib_tests:
+        requires:
+          - start_j11_cqlshlib_tests
+          - j8_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+          - start_j11_cqlshlib_cython_tests
+          - j8_build
     # Java 11 unit tests
     - start_j11_unit_tests:
         type: approval
@@ -446,31 +524,19 @@
         requires:
           - start_jvm_upgrade_dtests_repeat
           - j8_dtest_jars_build
-    # Java 8 dtests
+    # Python DTests
     - start_j8_dtests:
         type: approval
     - j8_dtests:
         requires:
           - start_j8_dtests
           - j8_build
-    - start_j8_dtests_repeat:
-        type: approval
-    - j8_dtests_repeat:
-        requires:
-          - start_j8_dtests_repeat
-          - j8_build
     - start_j8_dtests_vnode:
         type: approval
     - j8_dtests_vnode:
         requires:
           - start_j8_dtests_vnode
           - j8_build
-    - start_j8_dtests_vnode_repeat:
-        type: approval
-    - j8_dtests_vnode_repeat:
-        requires:
-          - start_j8_dtests_vnode_repeat
-          - j8_build
     # Java 8 off-heap dtests
     - start_j8_dtests_offheap:
         type: approval
@@ -484,7 +550,33 @@
         requires:
           - start_j8_dtests_offheap_repeat
           - j8_build
-    # Java 8 large DTests
+    # Java 11 dtests
+    - start_j11_dtests:
+        type: approval
+    - j11_dtests:
+        requires:
+        - start_j11_dtests
+        - j8_build
+    - start_j11_dtests_vnode:
+        type: approval
+    - j11_dtests_vnode:
+        requires:
+          - start_j11_dtests_vnode
+          - j8_build
+    # Java 11 off-heap dtests
+    - start_j11_dtests_offheap:
+        type: approval
+    - j11_dtests_offheap:
+        requires:
+          - start_j11_dtests_offheap
+          - j8_build
+    - start_j11_dtests_offheap_repeat:
+        type: approval
+    - j11_dtests_offheap_repeat:
+        requires:
+          - start_j11_dtests_offheap_repeat
+          - j8_build
+    # Python large DTests
     - start_j8_dtests_large:
         type: approval
     - j8_dtests_large:
@@ -509,45 +601,6 @@
         requires:
           - start_j8_dtests_large_vnode_repeat
           - j8_build
-    # Java 11 dtests
-    - start_j11_dtests:
-        type: approval
-    - j11_dtests:
-        requires:
-          - start_j11_dtests
-          - j8_build
-    - start_j11_dtests_repeat:
-        type: approval
-    - j11_dtests_repeat:
-        requires:
-          - start_j11_dtests_repeat
-          - j8_build
-    - start_j11_dtests_vnode:
-        type: approval
-    - j11_dtests_vnode:
-        requires:
-          - start_j11_dtests_vnode
-          - j8_build
-    - start_j11_dtests_vnode_repeat:
-        type: approval
-    - j11_dtests_vnode_repeat:
-        requires:
-          - start_j11_dtests_vnode_repeat
-          - j8_build
-    # Java 11 off-heap dtests
-    - start_j11_dtests_offheap:
-        type: approval
-    - j11_dtests_offheap:
-        requires:
-          - start_j11_dtests_offheap
-          - j8_build
-    - start_j11_dtests_offheap_repeat:
-        type: approval
-    - j11_dtests_offheap_repeat:
-        requires:
-          - start_j11_dtests_offheap_repeat
-          - j8_build
-    # Java 11 Python large DTests
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -566,50 +619,30 @@
         requires:
           - start_j11_dtests_large_vnode
           - j8_build
+    - start_j11_dtests_large_vnode_repeat:
+        type: approval
     - j11_dtests_large_vnode_repeat:
         requires:
-          - start_j11_dtests_large_repeat
+          - start_j11_dtests_large_vnode_repeat
           - j8_build
     # Java 8 upgrade tests
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
-          - start_upgrade_tests
-          - j8_build
-    - start_j8_upgrade_dtests_repeat:
-        type: approval
-    - j8_upgrade_dtests_repeat:
-        requires:
-          - start_j8_upgrade_dtests_repeat
+          - start_upgrade_dtests
           - j8_build
     # Java 8 cqlsh dtests
     - start_j8_cqlsh_tests:
         type: approval
-    - j8_cqlsh-dtests-py2-with-vnodes:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py3_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-        - start_j8_cqlsh_tests
-        - j8_build
     - j8_cqlsh_dtests_py3:
         requires:
-        - start_j8_cqlsh_tests
-        - j8_build
+          - start_j8_cqlsh_tests
+          - j8_build
+    - j8_cqlsh_dtests_py3_vnode:
+        requires:
+          - start_j8_cqlsh_tests
+          - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
           - start_j8_cqlsh_tests
@@ -618,13 +651,17 @@
         requires:
           - start_j8_cqlsh_tests
           - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+        - start_j8_cqlsh_tests
+        - j8_build
     # Java 8 cqlsh offheap dtests offheap
     - start_j8_cqlsh_tests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-          - start_j8_cqlsh_tests_offheap
-          - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
           - start_j8_cqlsh_tests_offheap
@@ -640,30 +677,14 @@
     # Java 11 cqlsh dtests
     - start_j11_cqlsh_tests:
         type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py3_vnode:
-        requires:
-        - start_j11_cqlsh_tests
-        - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-          - start_j11_cqlsh_tests
-          - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-          - start_j11_cqlsh_tests
-          - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-          - start_j11_cqlsh_tests
-          - j8_build
     - j11_cqlsh_dtests_py3:
         requires:
           - start_j11_cqlsh_tests
           - j8_build
+    - j11_cqlsh_dtests_py3_vnode:
+        requires:
+        - start_j11_cqlsh_tests
+        - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
           - start_j11_cqlsh_tests
@@ -672,13 +693,17 @@
         requires:
           - start_j11_cqlsh_tests
           - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+          - start_j11_cqlsh_tests
+          - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+          - start_j11_cqlsh_tests
+          - j8_build
     # Java 11 cqlsh dtests off-heap
     - start_j11_cqlsh_tests_offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-          - start_j11_cqlsh_tests_offheap
-          - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
           - start_j11_cqlsh_tests_offheap
@@ -705,6 +730,39 @@
         requires:
           - start_j11_repeated_ant_test
           - j8_build
+    # Java 8 repeated dtest
+    - start_j8_dtests_repeat:
+        type: approval
+    - j8_dtests_repeat:
+        requires:
+          - start_j8_dtests_repeat
+          - j8_build
+    - start_j8_dtests_vnode_repeat:
+        type: approval
+    - j8_dtests_vnode_repeat:
+        requires:
+          - start_j8_dtests_vnode_repeat
+          - j8_build
+    # Java 11 repeated dtest
+    - start_j11_dtests_repeat:
+        type: approval
+    - j11_dtests_repeat:
+        requires:
+          - start_j11_dtests_repeat
+          - j8_build
+    - start_j11_dtests_vnode_repeat:
+        type: approval
+    - j11_dtests_vnode_repeat:
+        requires:
+          - start_j11_dtests_vnode_repeat
+          - j8_build
+    # Repeated Python upgrade dtest
+    - start_j8_upgrade_dtests_repeat:
+        type: approval
+    - j8_upgrade_dtests_repeat:
+        requires:
+            - start_j8_upgrade_dtests_repeat
+            - j8_build
 
 j8_pre-commit_jobs: &j8_pre-commit_jobs
   jobs:
@@ -720,15 +778,48 @@
     - j8_unit_tests_repeat:
         requires:
           - j8_build
+    - j8_simulator_dtests:
+        requires:
+          - j8_build
+    - j8_simulator_dtests_repeat:
+        requires:
+          - j8_build
     - j8_jvm_dtests:
         requires:
           - j8_build
     - j8_jvm_dtests_repeat:
         requires:
           - j8_build
+    - j8_jvm_dtests_vnode:
+        requires:
+          - j8_build
+    - j8_jvm_dtests_vnode_repeat:
+        requires:
+          - j8_build
+    - j11_jvm_dtests:
+        requires:
+          - j8_build
+    - j11_jvm_dtests_repeat:
+        requires:
+          - j8_build
+    - j11_jvm_dtests_vnode:
+        requires:
+          - j8_build
+    - j11_jvm_dtests_vnode_repeat:
+        requires:
+          - j8_build
     - j8_cqlshlib_tests:
         requires:
           - j8_build
+    - j8_cqlshlib_cython_tests:
+        requires:
+          - j8_build
+    - j11_cqlshlib_tests:
+        requires:
+          - j8_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+          - j8_build
     # Java 11 unit tests
     - j11_unit_tests:
         requires:
@@ -855,7 +946,7 @@
     - j8_jvm_upgrade_dtests_repeat:
         requires:
           - j8_dtest_jars_build
-    # Java 8 dtests
+    # Python DTests
     - j8_dtests:
         requires:
           - j8_build
@@ -901,7 +992,7 @@
         requires:
           - start_j11_dtests_offheap
           - j8_build
-    # Java 8 large DTests
+    # Large Python DTests
     - start_j8_dtests_large:
         type: approval
     - j8_dtests_large:
@@ -920,7 +1011,6 @@
         requires:
           - start_j8_dtests_large
           - j8_build
-    # Java 11 large DTests
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -940,48 +1030,38 @@
           - start_j11_dtests_large
           - j8_build
     # Java 8 upgrade tests (on request)
-    - start_upgrade_tests:
+    - start_upgrade_dtests:
         type: approval
     - j8_upgrade_dtests:
         requires:
           - j8_build
-          - start_upgrade_tests
+          - start_upgrade_dtests
     - j8_upgrade_dtests_repeat:
         requires:
           - j8_build
-          - start_upgrade_tests
+          - start_upgrade_dtests
     # Java 8 cqlsh dtests
-    - j8_cqlsh-dtests-py2-with-vnodes:
+    - j8_cqlsh_dtests_py3:
         requires:
           - j8_build
     - j8_cqlsh_dtests_py3_vnode:
         requires:
           - j8_build
-    - j8_cqlsh_dtests_py38_vnode:
-        requires:
-          - j8_build
-    - j8_cqlsh_dtests_py311_vnode:
-        requires:
-          - j8_build
-    - j8_cqlsh-dtests-py2-no-vnodes:
-        requires:
-          - j8_build
-    - j8_cqlsh_dtests_py3:
-        requires:
-          - j8_build
     - j8_cqlsh_dtests_py38:
         requires:
           - j8_build
     - j8_cqlsh_dtests_py311:
         requires:
           - j8_build
+    - j8_cqlsh_dtests_py38_vnode:
+        requires:
+          - j8_build
+    - j8_cqlsh_dtests_py311_vnode:
+        requires:
+          - j8_build
     # Java 8 cqlsh dtests offheap
     - start_j8_cqlsh_dtests_offheap:
         type: approval
-    - j8_cqlsh-dtests-py2-offheap:
-        requires:
-          - start_j8_cqlsh_dtests_offheap
-          - j8_build
     - j8_cqlsh_dtests_py3_offheap:
         requires:
           - start_j8_cqlsh_dtests_offheap
@@ -995,37 +1075,27 @@
           - start_j8_cqlsh_dtests_offheap
           - j8_build
     # Java 11 cqlsh dtests
-    - j11_cqlsh-dtests-py2-with-vnodes:
+    - j11_cqlsh_dtests_py3:
         requires:
           - j8_build
     - j11_cqlsh_dtests_py3_vnode:
         requires:
           - j8_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-          - j8_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-          - j8_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-          - j8_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-          - j8_build
     - j11_cqlsh_dtests_py38:
         requires:
           - j8_build
     - j11_cqlsh_dtests_py311:
         requires:
           - j8_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+          - j8_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+          - j8_build
     # Java 11 cqlsh dtests off-heap
     - start_j11_cqlsh-dtests-offheap:
         type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j8_build
     - j11_cqlsh_dtests_py3_offheap:
         requires:
           - start_j11_cqlsh-dtests-offheap
@@ -1065,18 +1135,36 @@
         requires:
           - start_j11_jvm_dtests
           - j11_build
+    - start_j11_jvm_dtests_vnode:
+        type: approval
+    - j11_jvm_dtests_vnode:
+        requires:
+          - start_j11_jvm_dtests_vnode
+          - j11_build
     - start_j11_jvm_dtests_repeat:
         type: approval
     - j11_jvm_dtests_repeat:
         requires:
           - start_j11_jvm_dtests_repeat
           - j11_build
+    - start_j11_jvm_dtests_vnode_repeat:
+        type: approval
+    - j11_jvm_dtests_vnode_repeat:
+        requires:
+          - start_j11_jvm_dtests_vnode_repeat
+          - j11_build
     - start_j11_cqlshlib_tests:
         type: approval
     - j11_cqlshlib_tests:
         requires:
           - start_j11_cqlshlib_tests
           - j11_build
+    - start_j11_cqlshlib_cython_tests:
+        type: approval
+    - j11_cqlshlib_cython_tests:
+        requires:
+          - start_j11_cqlshlib_cython_tests
+          - j11_build
     # Java 11 dtests
     - start_j11_dtests:
         type: approval
@@ -1090,18 +1178,6 @@
         requires:
           - start_j11_dtests_vnode
           - j11_build
-    - start_j11_dtests_repeat:
-        type: approval
-    - j11_dtests_repeat:
-        requires:
-          - start_j11_dtests_repeat
-          - j11_build
-    - start_j11_dtests_vnode_repeat:
-        type: approval
-    - j11_dtests_vnode_repeat:
-        requires:
-          - start_j11_dtests_vnode_repeat
-          - j11_build
     - start_j11_dtests_offheap:
         type: approval
     - j11_dtests_offheap:
@@ -1114,59 +1190,6 @@
         requires:
           - start_j11_dtests_offheap_repeat
           - j11_build
-    - start_j11_cqlsh_tests:
-        type: approval
-    - j11_cqlsh-dtests-py2-with-vnodes:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh_dtests_py3_vnode:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh_dtests_py38:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - j11_cqlsh_dtests_py311:
-        requires:
-          - start_j11_cqlsh_tests
-          - j11_build
-    - start_j11_cqlsh-dtests-offheap:
-        type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    - j11_cqlsh_dtests_py3_offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    - j11_cqlsh_dtests_py38_offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    - j11_cqlsh_dtests_py311_offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    # Python large DTests
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -1191,6 +1214,46 @@
         requires:
           - start_j11_dtests_large_vnode_repeat
           - j11_build
+    - start_j11_cqlsh_tests:
+        type: approval
+    - j11_cqlsh_dtests_py3:
+        requires:
+          - start_j11_cqlsh_tests
+          - j11_build
+    - j11_cqlsh_dtests_py3_vnode:
+        requires:
+          - start_j11_cqlsh_tests
+          - j11_build
+    - j11_cqlsh_dtests_py38:
+        requires:
+          - start_j11_cqlsh_tests
+          - j11_build
+    - j11_cqlsh_dtests_py311:
+        requires:
+          - start_j11_cqlsh_tests
+          - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+          - start_j11_cqlsh_tests
+          - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+          - start_j11_cqlsh_tests
+          - j11_build
+    - start_j11_cqlsh-dtests-offheap:
+        type: approval
+    - j11_cqlsh_dtests_py3_offheap:
+        requires:
+          - start_j11_cqlsh-dtests-offheap
+          - j11_build
+    - j11_cqlsh_dtests_py38_offheap:
+        requires:
+          - start_j11_cqlsh-dtests-offheap
+          - j11_build
+    - j11_cqlsh_dtests_py311_offheap:
+        requires:
+          - start_j11_cqlsh-dtests-offheap
+          - j11_build
     # specialized unit tests (all run on request)
     - start_j11_utests_long:
         type: approval
@@ -1271,6 +1334,19 @@
         requires:
           - start_j11_repeated_ant_test
           - j11_build
+    # Java 11 repeated dtest
+    - start_j11_dtests_repeat:
+        type: approval
+    - j11_dtests_repeat:
+        requires:
+          - start_j11_dtests_repeat
+          - j11_build
+    - start_j11_dtests_vnode_repeat:
+        type: approval
+    - j11_dtests_vnode_repeat:
+        requires:
+          - start_j11_dtests_vnode_repeat
+          - j11_build
 
 j11_pre-commit_jobs: &j11_pre-commit_jobs
   jobs:
@@ -1291,15 +1367,18 @@
     - j11_jvm_dtests_repeat:
         requires:
           - j11_build
-    - j11_cqlshlib_tests:
+    - j11_jvm_dtests_vnode:
         requires:
           - j11_build
-    - j11_jvm_dtests:
+    - j11_jvm_dtests_vnode_repeat:
         requires:
           - j11_build
     - j11_cqlshlib_tests:
         requires:
           - j11_build
+    - j11_cqlshlib_cython_tests:
+        requires:
+          - j11_build
     - j11_dtests:
         requires:
           - j11_build
@@ -1324,49 +1403,6 @@
         requires:
           - start_j11_dtests_offheap_repeat
           - j11_build
-    - j11_cqlsh-dtests-py2-with-vnodes:
-        requires:
-          - j11_build
-    - j11_cqlsh_dtests_py3_vnode:
-        requires:
-          - j11_build
-    - j11_cqlsh_dtests_py38_vnode:
-        requires:
-          - j11_build
-    - j11_cqlsh_dtests_py311_vnode:
-        requires:
-          - j11_build
-    - j11_cqlsh-dtests-py2-no-vnodes:
-        requires:
-          - j11_build
-    - j11_cqlsh_dtests_py3:
-        requires:
-          - j11_build
-    - j11_cqlsh_dtests_py38:
-        requires:
-          - j11_build
-    - j11_cqlsh_dtests_py311:
-        requires:
-          - j11_build
-    - start_j11_cqlsh-dtests-offheap:
-        type: approval
-    - j11_cqlsh-dtests-py2-offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    - j11_cqlsh_dtests_py3_offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    - j11_cqlsh_dtests_py38_offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    - j11_cqlsh_dtests_py311_offheap:
-        requires:
-          - start_j11_cqlsh-dtests-offheap
-          - j11_build
-    # Large Python DTests
     - start_j11_dtests_large:
         type: approval
     - j11_dtests_large:
@@ -1385,6 +1421,38 @@
         requires:
           - start_j11_dtests_large
           - j11_build
+    - j11_cqlsh_dtests_py3:
+        requires:
+          - j11_build
+    - j11_cqlsh_dtests_py3_vnode:
+        requires:
+          - j11_build
+    - j11_cqlsh_dtests_py38:
+        requires:
+          - j11_build
+    - j11_cqlsh_dtests_py311:
+        requires:
+          - j11_build
+    - j11_cqlsh_dtests_py38_vnode:
+        requires:
+          - j11_build
+    - j11_cqlsh_dtests_py311_vnode:
+        requires:
+          - j11_build
+    - start_j11_cqlsh-dtests-offheap:
+        type: approval
+    - j11_cqlsh_dtests_py3_offheap:
+        requires:
+          - start_j11_cqlsh-dtests-offheap
+          - j11_build
+    - j11_cqlsh_dtests_py38_offheap:
+        requires:
+          - start_j11_cqlsh-dtests-offheap
+          - j11_build
+    - j11_cqlsh_dtests_py311_offheap:
+        requires:
+          - start_j11_cqlsh-dtests-offheap
+          - j11_build
     # specialized unit tests (all run on request)
     - start_utests_long:
         type: approval
@@ -1529,6 +1597,15 @@
       - log_environment
       - run_parallel_junit_tests
 
+  j8_simulator_dtests:
+    <<: *j8_small_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - create_junit_containers
+      - log_environment
+      - run_simulator_tests
+
   j8_jvm_dtests:
     <<: *j8_small_par_executor
     steps:
@@ -1542,6 +1619,20 @@
           classlistprefix: distributed
           target: "testclasslist"
 
+  j8_jvm_dtests_vnode:
+    <<: *j8_small_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - create_junit_containers:
+          classlistprefix: distributed
+          extra_filters: "| grep -v upgrade"
+      - log_environment
+      - run_parallel_junit_tests:
+          classlistprefix: distributed
+          target: "testclasslist"
+          arguments: "-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+
   j11_jvm_dtests:
     <<: *j11_small_par_executor
     steps:
@@ -1555,6 +1646,20 @@
           classlistprefix: distributed
           target: "testclasslist"
 
+  j11_jvm_dtests_vnode:
+    <<: *j11_small_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - create_junit_containers:
+          classlistprefix: distributed
+          extra_filters: "| grep -v upgrade"
+      - log_environment
+      - run_parallel_junit_tests:
+          classlistprefix: distributed
+          target: "testclasslist"
+          arguments: "-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+
   j8_jvm_upgrade_dtests:
     <<: *j8_medium_par_executor
     steps:
@@ -1584,6 +1689,13 @@
           at: /home/cassandra
       - run_cqlshlib_tests
 
+  j8_cqlshlib_cython_tests:
+    <<: *j8_small_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - run_cqlshlib_cython_tests
+
   j11_cqlshlib_tests:
     <<: *j11_small_executor
     steps:
@@ -1591,6 +1703,13 @@
           at: /home/cassandra
       - run_cqlshlib_tests
 
+  j11_cqlshlib_cython_tests:
+    <<: *j11_small_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - run_cqlshlib_cython_tests
+
   j8_utests_long:
     <<: *j8_seq_executor
     steps:
@@ -1786,62 +1905,6 @@
         file_tag: j11_without_vnodes
         pytest_extra_args: '--skip-resource-intensive-tests'
 
-  j8_dtests_large_vnode:
-    <<: *j8_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j8_large_with_vnodes
-          run_dtests_extra_args: '--use-vnodes --only-resource-intensive-tests --force-resource-intensive-tests'
-      - run_dtests:
-          file_tag: j8_large_with_vnodes
-          pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
-
-  j8_dtests_large:
-    <<: *j8_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j8_large_without_vnodes
-          run_dtests_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
-      - run_dtests:
-          file_tag: j8_large_without_vnodes
-          pytest_extra_args: '--num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
-
-  j11_dtests_large_vnode:
-    <<: *j11_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j11_large_with_vnodes
-          run_dtests_extra_args: '--use-vnodes --only-resource-intensive-tests --force-resource-intensive-tests'
-      - run_dtests:
-          file_tag: j11_large_with_vnodes
-          pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
-
-  j11_dtests_large:
-    <<: *j11_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j11_large_without_vnodes
-          run_dtests_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
-      - run_dtests:
-          file_tag: j11_large_without_vnodes
-          pytest_extra_args: '--num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
-
   j8_upgrade_dtests:
     <<: *j8_par_executor
     steps:
@@ -1856,36 +1919,6 @@
           file_tag: j8_upgradetests_without_vnodes
           pytest_extra_args: '--execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all'
 
-  j8_cqlsh-dtests-py2-with-vnodes:
-    <<: *j8_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j8_with_vnodes
-          run_dtests_extra_args: "--use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql'"
-      - run_dtests:
-          file_tag: j8_with_vnodes
-          pytest_extra_args: '--use-vnodes --num-tokens=16 --skip-resource-intensive-tests'
-          extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
-
-  j8_cqlsh-dtests-py2-offheap:
-    <<: *j8_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j8_dtests_offheap
-          run_dtests_extra_args: "--use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql'"
-      - run_dtests:
-          file_tag: j8_dtests_offheap
-          pytest_extra_args: '--use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests'
-          extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
-
   j8_cqlsh_dtests_py3_vnode:
     <<: *j8_par_executor
     steps:
@@ -1988,21 +2021,6 @@
           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.11'
           python_version: '3.11'
 
-  j8_cqlsh-dtests-py2-no-vnodes:
-    <<: *j8_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j8_without_vnodes
-          run_dtests_extra_args: "--skip-resource-intensive-tests --pytest-options '-k cql'"
-      - run_dtests:
-          file_tag: j8_without_vnodes
-          pytest_extra_args: '--skip-resource-intensive-tests'
-          extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
-
   j8_cqlsh_dtests_py3:
     <<: *j8_par_executor
     steps:
@@ -2054,36 +2072,6 @@
           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.11'
           python_version: '3.11'
 
-  j11_cqlsh-dtests-py2-with-vnodes:
-    <<: *j11_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j11_with_vnodes
-          run_dtests_extra_args: "--use-vnodes --skip-resource-intensive-tests --pytest-options '-k cql'"
-      - run_dtests:
-          file_tag: j11_with_vnodes
-          pytest_extra_args: '--use-vnodes --num-tokens=16 --skip-resource-intensive-tests'
-          extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
-
-  j11_cqlsh-dtests-py2-offheap:
-    <<: *j11_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j11_dtests_offheap
-          run_dtests_extra_args: "--use-vnodes --use-off-heap-memtables --skip-resource-intensive-tests --pytest-options '-k cql'"
-      - run_dtests:
-          file_tag: j11_dtests_offheap
-          pytest_extra_args: '--use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests'
-          extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
-
   j11_cqlsh_dtests_py3_vnode:
     <<: *j11_par_executor
     steps:
@@ -2186,21 +2174,6 @@
           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.11'
           python_version: '3.11'
 
-  j11_cqlsh-dtests-py2-no-vnodes:
-    <<: *j11_par_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - create_dtest_containers:
-          file_tag: j11_without_vnodes
-          run_dtests_extra_args: "--skip-resource-intensive-tests --pytest-options '-k cql'"
-      - run_dtests:
-          file_tag: j11_without_vnodes
-          pytest_extra_args: '--skip-resource-intensive-tests'
-          extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
-
   j11_cqlsh_dtests_py3:
     <<: *j11_par_executor
     steps:
@@ -2252,6 +2225,62 @@
           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.11'
           python_version: '3.11'
 
+  j8_dtests_large_vnode:
+    <<: *j8_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - create_dtest_containers:
+          file_tag: j8_large_with_vnodes
+          run_dtests_extra_args: '--use-vnodes --only-resource-intensive-tests --force-resource-intensive-tests'
+      - run_dtests:
+          file_tag: j8_large_with_vnodes
+          pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
+
+  j8_dtests_large:
+    <<: *j8_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - create_dtest_containers:
+          file_tag: j8_large_without_vnodes
+          run_dtests_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
+      - run_dtests:
+          file_tag: j8_large_without_vnodes
+          pytest_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
+
+  j11_dtests_large_vnode:
+    <<: *j11_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - create_dtest_containers:
+          file_tag: j11_large_with_vnodes
+          run_dtests_extra_args: '--use-vnodes --only-resource-intensive-tests --force-resource-intensive-tests'
+      - run_dtests:
+          file_tag: j11_large_with_vnodes
+          pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
+
+  j11_dtests_large:
+    <<: *j11_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - create_dtest_containers:
+          file_tag: j11_large_without_vnodes
+          run_dtests_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
+      - run_dtests:
+          file_tag: j11_large_without_vnodes
+          pytest_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
+
   j8_unit_tests_repeat:
     <<: *j8_repeated_utest_executor
     steps:
@@ -2372,66 +2401,21 @@
       - log_environment
       - run_jvm_dtests_repeat
 
-
-  j8_dtests_large_repeat:
-    <<: *j8_repeated_dtest_executor
+  j8_jvm_dtests_vnode_repeat:
+    <<: *j8_repeated_utest_executor
     steps:
       - attach_workspace:
           at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - run_repeated_dtest:
-          tests: ${REPEATED_LARGE_DTESTS}
-          vnodes: "false"
-          upgrade: "false"
-          count: ${REPEATED_LARGE_DTESTS_COUNT}
-          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
-          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
+      - log_environment
+      - run_jvm_dtests_vnode_repeat
 
-  j8_dtests_large_vnode_repeat:
-    <<: *j8_repeated_dtest_executor
+  j8_simulator_dtests_repeat:
+    <<: *j8_repeated_utest_executor
     steps:
       - attach_workspace:
           at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - run_repeated_dtest:
-          tests: ${REPEATED_LARGE_DTESTS}
-          vnodes: "true"
-          upgrade: "false"
-          count: ${REPEATED_LARGE_DTESTS_COUNT}
-          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
-          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
-
-  j11_dtests_large_repeat:
-    <<: *j11_repeated_dtest_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - run_repeated_dtest:
-          tests: ${REPEATED_LARGE_DTESTS}
-          vnodes: "false"
-          upgrade: "false"
-          count: ${REPEATED_LARGE_DTESTS_COUNT}
-          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
-          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
-
-  j11_dtests_large_vnode_repeat:
-    <<: *j11_repeated_dtest_executor
-    steps:
-      - attach_workspace:
-          at: /home/cassandra
-      - clone_dtest
-      - create_venv
-      - run_repeated_dtest:
-          tests: ${REPEATED_LARGE_DTESTS}
-          vnodes: "true"
-          upgrade: "false"
-          count: ${REPEATED_LARGE_DTESTS_COUNT}
-          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
-          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
+      - log_environment
+      - run_simulator_dtests_repeat
 
   j8_jvm_upgrade_dtests_repeat:
     <<: *j8_repeated_jvm_upgrade_dtest_executor
@@ -2449,6 +2433,14 @@
       - log_environment
       - run_jvm_dtests_repeat
 
+  j11_jvm_dtests_vnode_repeat:
+    <<: *j11_repeated_utest_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - log_environment
+      - run_jvm_dtests_vnode_repeat
+
   j8_repeated_ant_test:
     <<: *j8_repeated_utest_executor
     steps:
@@ -2459,6 +2451,7 @@
           target: ${REPEATED_ANT_TEST_TARGET}
           class: ${REPEATED_ANT_TEST_CLASS}
           methods: ${REPEATED_ANT_TEST_METHODS}
+          vnodes: ${REPEATED_ANT_TEST_VNODES}
           count: ${REPEATED_ANT_TEST_COUNT}
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
 
@@ -2472,6 +2465,7 @@
           target: ${REPEATED_ANT_TEST_TARGET}
           class: ${REPEATED_ANT_TEST_CLASS}
           methods: ${REPEATED_ANT_TEST_METHODS}
+          vnodes: ${REPEATED_ANT_TEST_VNODES}
           count: ${REPEATED_ANT_TEST_COUNT}
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
 
@@ -2518,6 +2512,36 @@
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
           extra_dtest_args: "--use-off-heap-memtables --skip-resource-intensive-tests"
 
+  j8_dtests_large_repeat:
+    <<: *j8_repeated_dtest_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - run_repeated_dtest:
+          tests: ${REPEATED_LARGE_DTESTS}
+          vnodes: "false"
+          upgrade: "false"
+          count: ${REPEATED_LARGE_DTESTS_COUNT}
+          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
+
+  j8_dtests_large_vnode_repeat:
+    <<: *j8_repeated_dtest_executor
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - clone_dtest
+    - create_venv
+    - run_repeated_dtest:
+        tests: ${REPEATED_LARGE_DTESTS}
+        vnodes: "true"
+        upgrade: "false"
+        count: ${REPEATED_LARGE_DTESTS_COUNT}
+        stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+        extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
+
   j11_dtests_repeat:
     <<: *j11_repeated_dtest_executor
     steps:
@@ -2563,6 +2587,36 @@
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
           extra_dtest_args: "--use-off-heap-memtables --skip-resource-intensive-tests"
 
+  j11_dtests_large_repeat:
+    <<: *j11_repeated_dtest_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - run_repeated_dtest:
+          tests: ${REPEATED_LARGE_DTESTS}
+          vnodes: "false"
+          upgrade: "false"
+          count: ${REPEATED_LARGE_DTESTS_COUNT}
+          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
+
+  j11_dtests_large_vnode_repeat:
+    <<: *j11_repeated_dtest_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - clone_dtest
+      - create_venv
+      - run_repeated_dtest:
+          tests: ${REPEATED_LARGE_DTESTS}
+          vnodes: "true"
+          upgrade: "false"
+          count: ${REPEATED_LARGE_DTESTS_COUNT}
+          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+          extra_dtest_args: "--only-resource-intensive-tests --force-resource-intensive-tests"
+
   j8_upgrade_dtests_repeat:
     <<: *j8_repeated_upgrade_dtest_executor
     steps:
@@ -2648,7 +2702,7 @@
           cd ~/cassandra
           mkdir ~/dtest_jars
           git remote add apache https://github.com/apache/cassandra.git
-          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 trunk; do
+          for branch in cassandra-2.2 cassandra-3.0 cassandra-3.11 cassandra-4.0 cassandra-4.1 trunk; do
             # check out the correct cassandra version:
             git remote set-branches --add apache '$branch'
             git fetch --depth 1 apache $branch
@@ -2728,6 +2782,33 @@
 
         no_output_timeout: 15m
 
+  run_simulator_tests:
+    parameters:
+      no_output_timeout:
+        type: string
+        default: 30m
+    steps:
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest -Dno-build-test=true
+        no_output_timeout: <<parameters.no_output_timeout>>
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+
   run_junit_tests:
     parameters:
       target:
@@ -2767,12 +2848,30 @@
         command: |
           export PATH=$JAVA_HOME/bin:$PATH
           time mv ~/cassandra /tmp
-          cd /tmp/cassandra/pylib
-          ./cassandra-cqlsh-tests.sh ..
+          cd /tmp/cassandra/
+          ./pylib/cassandra-cqlsh-tests.sh $(pwd)
         no_output_timeout: <<parameters.no_output_timeout>>
     - store_test_results:
         path: /tmp/cassandra/pylib
 
+  run_cqlshlib_cython_tests:
+    parameters:
+      no_output_timeout:
+        type: string
+        default: 15m
+    steps:
+      - run:
+          name: Run cqlshlib Unit Tests
+          command: |
+            export PATH=$JAVA_HOME/bin:$PATH
+            export cython="yes"
+            time mv ~/cassandra /tmp
+            cd /tmp/cassandra/
+            ./pylib/cassandra-cqlsh-tests.sh $(pwd)
+          no_output_timeout: <<parameters.no_output_timeout>>
+      - store_test_results:
+          path: /tmp/cassandra/pylib
+
   run_parallel_junit_tests:
     parameters:
       target:
@@ -2784,6 +2883,9 @@
       classlistprefix:
         type: string
         default: unit
+      arguments:
+        type: string
+        default: " "
     steps:
     - run:
         name: Run Unit Tests (<<parameters.target>>)
@@ -2799,7 +2901,7 @@
           if [ -z "$test_timeout" ]; then
             test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
           fi
-          ant <<parameters.target>> -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=<<parameters.classlistprefix>> -Dno-build-test=true
+          ant <<parameters.target>> <<parameters.arguments>> -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt -Dtest.classlistprefix=<<parameters.classlistprefix>> -Dno-build-test=true
         no_output_timeout: <<parameters.no_output_timeout>>
     - store_test_results:
         path: /tmp/cassandra/build/test/output/
@@ -2913,7 +3015,7 @@
             # we need the "set -o pipefail" here so that the exit code that circleci will actually use is from pytest and not the exit code from tee
             export SPLIT_TESTS=`cat /tmp/split_dtest_tests_<<parameters.file_tag>>_final.txt`
             if [ ! -z "$SPLIT_TESTS" ]; then
-              set -o pipefail && cd ~/cassandra-dtest && pytest <<parameters.pytest_extra_args>> --log-cli-level=DEBUG --junit-xml=/tmp/results/dtests/pytest_result_<<parameters.file_tag>>.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
+              set -o pipefail && cd ~/cassandra-dtest && pytest <<parameters.pytest_extra_args>> --log-level="DEBUG" --junit-xml=/tmp/results/dtests/pytest_result_<<parameters.file_tag>>.xml -s --cassandra-dir=/home/cassandra/cassandra --keep-test-dir $SPLIT_TESTS 2>&1 | tee /tmp/dtest/stdout.txt
             else
               echo "Tune your parallelism, there are more containers than test classes. Nothing to do in this container"
               (exit 1)
@@ -2989,6 +3091,24 @@
           target: test-jvm-dtest-some
           tests: ${REPEATED_JVM_DTESTS}
           count: ${REPEATED_JVM_DTESTS_COUNT}
+          vnodes: false
+          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+
+  run_simulator_dtests_repeat:
+    steps:
+      - run_repeated_utests:
+          target: test-simulator-dtest
+          tests: ${REPEATED_SIMULATOR_DTESTS}
+          count: ${REPEATED_SIMULATOR_DTESTS_COUNT}
+          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+
+  run_jvm_dtests_vnode_repeat:
+    steps:
+      - run_repeated_utests:
+          target: test-jvm-dtest-some
+          tests: ${REPEATED_JVM_DTESTS}
+          count: ${REPEATED_JVM_DTESTS_COUNT}
+          vnodes: true
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
 
   run_jvm_upgrade_dtests_repeat:
@@ -2997,6 +3117,7 @@
           target: test-jvm-dtest-some
           tests: ${REPEATED_JVM_UPGRADE_DTESTS}
           count: ${REPEATED_JVM_UPGRADE_DTESTS_COUNT}
+          vnodes: false
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
 
   run_repeated_utests:
@@ -3007,6 +3128,9 @@
         type: string
       count:
         type: string
+      vnodes:
+        type: boolean
+        default: false
       stop_on_failure:
         type: string
     steps:
@@ -3032,6 +3156,13 @@
             tests=$(echo <<parameters.tests>> | sed -e "s/<nil>//" | sed -e "s/ //" | tr "," "\n" | tr " " "\n" | sort -n | uniq -u)
             echo "Tests to be repeated: ${tests}"
 
+            # Prepare the JVM dtests vnodes argument, which is optional.
+            vnodes=<<parameters.vnodes>>
+            vnodes_args=""
+            if [ "$vnodes" = true ] ; then
+              vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+            fi
+
             # Prepare the testtag for the target, used by the test macro in build.xml to group the output files
             target=<<parameters.target>>
             testtag=""
@@ -3064,7 +3195,8 @@
                       $target == "test-system-keyspace-directory" || \
                       $target == "fqltool-test" || \
                       $target == "long-test" || \
-                      $target == "stress-test" ]]; then
+                      $target == "stress-test" || \
+                      $target == "test-simulator-dtest" ]]; then
                   name_arg="-Dtest.name=${class##*.}"
                 else
                   name_arg="-Dtest.name=$class"
@@ -3083,7 +3215,7 @@
                   # run the test
                   status="passes"
                   if !( set -o pipefail && \
-                        ant <<parameters.target>> $name_arg $methods_arg -Dno-build-test=true | \
+                        ant <<parameters.target>> $name_arg $methods_arg $vnodes_args -Dno-build-test=true | \
                         tee stdout.txt \
                       ); then
                     status="fails"
@@ -3110,7 +3242,7 @@
                   if [[ -d $source && -n "$(ls $source)" ]]; then
                     mv $source/* $dest/
                   fi
-            
+                  
                   # maybe stop iterations on test failure
                   if [[ <<parameters.stop_on_failure>> = true ]] && (( $exit_code > 0 )); then
                     break
@@ -3138,6 +3270,8 @@
         type: string
       methods:
         type: string
+      vnodes:
+        type: string
       count:
         type: string
       stop_on_failure:
@@ -3187,7 +3321,8 @@
                       $target == "test-system-keyspace-directory" || \
                       $target == "fqltool-test" || \
                       $target == "long-test" || \
-                      $target == "stress-test" ]]; then
+                      $target == "stress-test" || \
+                      $target == "test-simulator-dtest" ]]; then
                   name="-Dtest.name=$class_name"
                 else
                   name="-Dtest.name=$class_path"
@@ -3199,6 +3334,12 @@
                 else
                   methods="-Dtest.methods=<<parameters.methods>>"
                 fi
+            
+                # Prepare the JVM dtests vnodes argument, which is optional
+                vnodes_args=""
+                if <<parameters.vnodes>>; then
+                  vnodes_args="-Dtest.jvm.args='-Dcassandra.dtest.num_tokens=16'"
+                fi
 
                 # Run the test target as many times as requested collecting the exit code,
                 # stopping the iteration only if stop_on_failure is set.
@@ -3209,7 +3350,7 @@
 
                   # run the test
                   status="passes"
-                  if !( set -o pipefail && ant $target $name $methods -Dno-build-test=true | tee stdout.txt ); then
+                  if !( set -o pipefail && ant $target $name $methods $vnodes_args -Dno-build-test=true | tee stdout.txt ); then
                     status="fails"
                     exit_code=1
                   fi
@@ -3273,7 +3414,7 @@
         default: ""
     steps:
       - run:
-          name: Run repeated Python dtest
+          name: Run repeated Python DTests
           no_output_timeout: 15m
           command: |
             if [ "<<parameters.tests>>" == "<nil>" ]; then
diff --git a/.circleci/config_template.yml.PAID.patch b/.circleci/config_template.yml.PAID.patch
index a39b6ec..098ccd2 100644
--- a/.circleci/config_template.yml.PAID.patch
+++ b/.circleci/config_template.yml.PAID.patch
@@ -1,6 +1,6 @@
---- config-2_1.yml	2023-02-07 21:22:17.000000000 -0500
-+++ config-2_1.yml.MIDRES	2023-02-08 10:15:45.000000000 -0500
-@@ -145,14 +145,14 @@
+--- config-2_1.yml	2023-02-02 21:24:39.000000000 -0500
++++ config-2_1.yml.MIDRES	2023-02-02 21:25:05.000000000 -0500
+@@ -157,14 +157,14 @@
  j8_par_executor: &j8_par_executor
    executor:
      name: java8-executor
@@ -19,15 +19,10 @@
  
  j8_small_executor: &j8_small_executor
    executor:
-@@ -163,26 +163,38 @@
- j8_medium_par_executor: &j8_medium_par_executor
-   executor:
-     name: java8-executor
--    #exec_resource_class: xlarge
--  parallelism: 1
-+    exec_resource_class: xlarge
-+  parallelism: 4
-+
+@@ -172,29 +172,41 @@
+     exec_resource_class: medium
+   parallelism: 1
+ 
 +j8_large_par_executor: &j8_large_par_executor
 +  executor:
 +    name: java8-executor
@@ -39,6 +34,14 @@
 +    name: java8-executor
 +    exec_resource_class: xlarge
 +  parallelism: 100
++
+ j8_medium_par_executor: &j8_medium_par_executor
+   executor:
+     name: java8-executor
+-    #exec_resource_class: xlarge
+-  parallelism: 1
++    exec_resource_class: xlarge
++  parallelism: 4
  
  j8_seq_executor: &j8_seq_executor
    executor:
@@ -60,12 +63,12 @@
      name: java11-executor
 -    #exec_resource_class: xlarge
 -  parallelism: 1
-+    #exec_resource_class: large
++    exec_resource_class: large
 +  parallelism: 10
  
  j11_small_executor: &j11_small_executor
    executor:
-@@ -193,44 +205,56 @@
+@@ -205,44 +217,56 @@
  j11_medium_par_executor: &j11_medium_par_executor
    executor:
      name: java11-executor
@@ -131,7 +134,7 @@
  
  j8_separate_jobs: &j8_separate_jobs
    jobs:
-@@ -1700,7 +1724,7 @@
+@@ -1819,7 +1843,7 @@
            target: testclasslist-system-keyspace-directory
  
    j8_dtests_vnode:
@@ -140,7 +143,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1714,7 +1738,7 @@
+@@ -1833,7 +1857,7 @@
            pytest_extra_args: '--use-vnodes --num-tokens=16 --skip-resource-intensive-tests'
  
    j8_dtests_offheap:
@@ -149,7 +152,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1728,7 +1752,7 @@
+@@ -1847,7 +1871,7 @@
            pytest_extra_args: '--use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests'
  
    j11_dtests_vnode:
@@ -158,7 +161,7 @@
      steps:
      - attach_workspace:
          at: /home/cassandra
-@@ -1743,7 +1767,7 @@
+@@ -1862,7 +1886,7 @@
          pytest_extra_args: '--use-vnodes --num-tokens=16 --skip-resource-intensive-tests'
  
    j11_dtests_offheap:
@@ -167,7 +170,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1758,7 +1782,7 @@
+@@ -1877,7 +1901,7 @@
            pytest_extra_args: '--use-vnodes --num-tokens=16 --use-off-heap-memtables --skip-resource-intensive-tests'
  
    j8_dtests:
@@ -176,7 +179,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1772,7 +1796,7 @@
+@@ -1891,7 +1915,7 @@
            pytest_extra_args: '--skip-resource-intensive-tests'
  
    j11_dtests:
@@ -185,79 +188,25 @@
      steps:
      - attach_workspace:
          at: /home/cassandra
-@@ -1787,7 +1811,7 @@
+@@ -1906,7 +1930,7 @@
          pytest_extra_args: '--skip-resource-intensive-tests'
  
-   j8_dtests_large_vnode:
--    <<: *j8_par_executor
-+    <<: *j8_medium_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -1801,7 +1825,7 @@
-           pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
- 
-   j8_dtests_large:
--    <<: *j8_par_executor
-+    <<: *j8_medium_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -1815,7 +1839,7 @@
-           pytest_extra_args: '--num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
- 
-   j11_dtests_large_vnode:
--    <<: *j11_par_executor
-+    <<: *j11_medium_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -1829,7 +1853,7 @@
-           pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
- 
-   j11_dtests_large:
--    <<: *j11_par_executor
-+    <<: *j11_medium_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -1843,7 +1867,7 @@
-           pytest_extra_args: '--num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
- 
    j8_upgrade_dtests:
 -    <<: *j8_par_executor
 +    <<: *j8_very_large_par_executor
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1857,7 +1881,7 @@
+@@ -1920,7 +1944,7 @@
            pytest_extra_args: '--execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all'
  
-   j8_cqlsh-dtests-py2-with-vnodes:
--    <<: *j8_par_executor
-+    <<: *j8_large_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -1872,7 +1896,7 @@
-           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
- 
-   j8_cqlsh-dtests-py2-offheap:
--    <<: *j8_par_executor
-+    <<: *j8_large_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -1887,7 +1911,7 @@
-           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
- 
    j8_cqlsh_dtests_py3_vnode:
 -    <<: *j8_par_executor
 +    <<: *j8_large_par_executor
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1902,7 +1926,7 @@
+@@ -1935,7 +1959,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j8_cqlsh_dtests_py3_offheap:
@@ -266,7 +215,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1917,7 +1941,7 @@
+@@ -1950,7 +1974,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j8_cqlsh_dtests_py38_vnode:
@@ -275,7 +224,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1935,7 +1959,7 @@
+@@ -1968,7 +1992,7 @@
            python_version: '3.8'
  
    j8_cqlsh_dtests_py311_vnode:
@@ -284,7 +233,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1953,7 +1977,7 @@
+@@ -1986,7 +2010,7 @@
            python_version: '3.11'
  
    j8_cqlsh_dtests_py38_offheap:
@@ -293,7 +242,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1971,7 +1995,7 @@
+@@ -2004,7 +2028,7 @@
            python_version: '3.8'
  
    j8_cqlsh_dtests_py311_offheap:
@@ -302,25 +251,16 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1989,7 +2013,7 @@
+@@ -2022,7 +2046,7 @@
            python_version: '3.11'
  
-   j8_cqlsh-dtests-py2-no-vnodes:
--    <<: *j8_par_executor
-+    <<: *j8_large_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -2004,7 +2028,7 @@
-           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
- 
    j8_cqlsh_dtests_py3:
 -    <<: *j8_par_executor
 +    <<: *j8_large_par_executor
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2019,7 +2043,7 @@
+@@ -2037,7 +2061,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j8_cqlsh_dtests_py38:
@@ -329,7 +269,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2037,7 +2061,7 @@
+@@ -2055,7 +2079,7 @@
            python_version: '3.8'
  
    j8_cqlsh_dtests_py311:
@@ -338,34 +278,16 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2055,7 +2079,7 @@
+@@ -2073,7 +2097,7 @@
            python_version: '3.11'
  
-   j11_cqlsh-dtests-py2-with-vnodes:
--    <<: *j11_par_executor
-+    <<: *j11_large_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -2070,7 +2094,7 @@
-           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
- 
-   j11_cqlsh-dtests-py2-offheap:
--    <<: *j11_par_executor
-+    <<: *j11_large_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -2085,7 +2109,7 @@
-           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
- 
    j11_cqlsh_dtests_py3_vnode:
 -    <<: *j11_par_executor
 +    <<: *j11_large_par_executor
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2100,7 +2124,7 @@
+@@ -2088,7 +2112,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j11_cqlsh_dtests_py3_offheap:
@@ -374,7 +296,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2115,7 +2139,7 @@
+@@ -2103,7 +2127,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j11_cqlsh_dtests_py38_vnode:
@@ -383,7 +305,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2133,7 +2157,7 @@
+@@ -2121,7 +2145,7 @@
            python_version: '3.8'
  
    j11_cqlsh_dtests_py311_vnode:
@@ -392,7 +314,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2151,7 +2175,7 @@
+@@ -2139,7 +2163,7 @@
            python_version: '3.11'
  
    j11_cqlsh_dtests_py38_offheap:
@@ -401,7 +323,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2169,7 +2193,7 @@
+@@ -2157,7 +2181,7 @@
            python_version: '3.8'
  
    j11_cqlsh_dtests_py311_offheap:
@@ -410,25 +332,16 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2187,7 +2211,7 @@
+@@ -2175,7 +2199,7 @@
            python_version: '3.11'
  
-   j11_cqlsh-dtests-py2-no-vnodes:
--    <<: *j11_par_executor
-+    <<: *j11_large_par_executor
-     steps:
-       - attach_workspace:
-           at: /home/cassandra
-@@ -2202,7 +2226,7 @@
-           extra_env_args: 'CQLSH_PYTHON=/usr/bin/python2.7'
- 
    j11_cqlsh_dtests_py3:
 -    <<: *j11_par_executor
 +    <<: *j11_large_par_executor
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2217,7 +2241,7 @@
+@@ -2190,7 +2214,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j11_cqlsh_dtests_py38:
@@ -437,7 +350,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -2235,7 +2259,7 @@
+@@ -2208,7 +2232,7 @@
            python_version: '3.8'
  
    j11_cqlsh_dtests_py311:
@@ -446,8 +359,39 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -3336,3 +3360,4 @@
-       - store_artifacts:
-           path: ~/cassandra-dtest/logs
-           destination: dtest_logs
-+
+@@ -2226,7 +2250,7 @@
+           python_version: '3.11'
+ 
+   j8_dtests_large_vnode:
+-    <<: *j8_par_executor
++    <<: *j8_medium_par_executor
+     steps:
+       - attach_workspace:
+           at: /home/cassandra
+@@ -2240,7 +2264,7 @@
+           pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
+ 
+   j8_dtests_large:
+-    <<: *j8_par_executor
++    <<: *j8_medium_par_executor
+     steps:
+       - attach_workspace:
+           at: /home/cassandra
+@@ -2254,7 +2278,7 @@
+           pytest_extra_args: '--only-resource-intensive-tests --force-resource-intensive-tests'
+ 
+   j11_dtests_large_vnode:
+-    <<: *j11_par_executor
++    <<: *j11_medium_par_executor
+     steps:
+       - attach_workspace:
+           at: /home/cassandra
+@@ -2268,7 +2292,7 @@
+           pytest_extra_args: '--use-vnodes --num-tokens=16 --only-resource-intensive-tests --force-resource-intensive-tests'
+ 
+   j11_dtests_large:
+-    <<: *j11_par_executor
++    <<: *j11_medium_par_executor
+     steps:
+       - attach_workspace:
+           at: /home/cassandra
diff --git a/.circleci/generate.sh b/.circleci/generate.sh
index 4fee247..29f66a2 100755
--- a/.circleci/generate.sh
+++ b/.circleci/generate.sh
@@ -18,7 +18,7 @@
 #
 
 BASEDIR=`dirname $0`
-BASE_BRANCH=cassandra-4.0
+BASE_BRANCH=cassandra-4.1
 set -e
 
 die ()
@@ -48,6 +48,8 @@
   echo "                   -e REPEATED_UTESTS_LONG_COUNT=100"
   echo "                   -e REPEATED_UTESTS_STRESS=org.apache.cassandra.stress.generate.DistributionGaussianTest"
   echo "                   -e REPEATED_UTESTS_STRESS_COUNT=500"
+  echo "                   -e REPEATED_SIMULATOR_DTESTS=org.apache.cassandra.simulator.test.TrivialSimulationTest"
+  echo "                   -e REPEATED_SIMULATOR_DTESTS_COUNT=500"
   echo "                   -e REPEATED_JVM_DTESTS=org.apache.cassandra.distributed.test.PagingTest"
   echo "                   -e REPEATED_JVM_DTESTS_COUNT=500"
   echo "                   -e REPEATED_JVM_UPGRADE_DTESTS=org.apache.cassandra.distributed.upgrade.GroupByTest"
@@ -61,6 +63,7 @@
   echo "                   -e REPEATED_ANT_TEST_TARGET=testsome"
   echo "                   -e REPEATED_ANT_TEST_CLASS=org.apache.cassandra.cql3.ViewTest"
   echo "                   -e REPEATED_ANT_TEST_METHODS=testCompoundPartitionKey,testStaticTable"
+  echo "                   -e REPEATED_ANT_TEST_VNODES=false"
   echo "                   -e REPEATED_ANT_TEST_COUNT=500"
   echo "                  For the complete list of environment variables, please check the"
   echo "                  list of examples in config_template.yml and/or the documentation."
@@ -83,7 +86,7 @@
           ;;
       p ) paid=true
           ;;
-      e ) if (!($has_env_vars)); then
+      e ) if (! ($has_env_vars)); then
             env_vars="$OPTARG"
           else
             env_vars="$env_vars|$OPTARG"
@@ -116,6 +119,8 @@
        [ "$key" != "REPEATED_UTESTS_LONG_COUNT" ] &&
        [ "$key" != "REPEATED_UTESTS_STRESS" ] &&
        [ "$key" != "REPEATED_UTESTS_STRESS_COUNT" ] &&
+       [ "$key" != "REPEATED_SIMULATOR_DTESTS" ] &&
+       [ "$key" != "REPEATED_SIMULATOR_DTESTS_COUNT" ] &&
        [ "$key" != "REPEATED_JVM_DTESTS" ] &&
        [ "$key" != "REPEATED_JVM_DTESTS_COUNT" ] &&
        [ "$key" != "REPEATED_JVM_UPGRADE_DTESTS" ]  &&
@@ -129,6 +134,7 @@
        [ "$key" != "REPEATED_ANT_TEST_TARGET" ] &&
        [ "$key" != "REPEATED_ANT_TEST_CLASS" ] &&
        [ "$key" != "REPEATED_ANT_TEST_METHODS" ] &&
+       [ "$key" != "REPEATED_ANT_TEST_VNODES" ] &&
        [ "$key" != "REPEATED_ANT_TEST_COUNT" ]; then
       die "Unrecognised environment variable name: $key"
     fi
@@ -170,13 +176,13 @@
   # copy free tier into config.yml to make sure this gets updated
   cp $BASEDIR/config.yml.FREE $BASEDIR/config.yml
 
-elif (!($has_env_vars)); then
+elif (! ($has_env_vars)); then
   print_help
   exit 0
 fi
 
 # add new or modified tests to the sets of tests to be repeated
-if (!($all)); then
+if (! ($all)); then
   add_diff_tests ()
   {
     dir="${BASEDIR}/../${2}"
@@ -207,6 +213,7 @@
   add_diff_tests "REPEATED_UTESTS_LONG" "test/long/" "org.apache.cassandra"
   add_diff_tests "REPEATED_UTESTS_STRESS" "tools/stress/test/unit/" "org.apache.cassandra.stress"
   add_diff_tests "REPEATED_UTESTS_FQLTOOL" "tools/fqltool/test/unit/" "org.apache.cassandra.fqltool"
+  add_diff_tests "REPEATED_SIMULATOR_DTESTS" "test/simulator/test/" "org.apache.cassandra.simulator.test"
   add_diff_tests "REPEATED_JVM_DTESTS" "test/distributed/" "org.apache.cassandra.distributed.test"
   add_diff_tests "REPEATED_JVM_UPGRADE_DTESTS" "test/distributed/" "org.apache.cassandra.distributed.upgrade"
 fi
@@ -266,6 +273,10 @@
     delete_job "$1" "j8_utests_fqltool_repeat"
     delete_job "$1" "j11_utests_fqltool_repeat"
   fi
+  if (! (echo "$env_vars" | grep -q "REPEATED_SIMULATOR_DTESTS=")); then
+    delete_job "$1" "j8_simulator_dtests_repeat"
+    delete_job "$1" "j11_simulator_dtests_repeat"
+  fi
   if (! (echo "$env_vars" | grep -q "REPEATED_JVM_DTESTS=")); then
     delete_job "$1" "j8_jvm_dtests_repeat"
     delete_job "$1" "j8_jvm_dtests_vnode_repeat"
diff --git a/.circleci/readme.md b/.circleci/readme.md
index 67b00e6..d5389f6 100644
--- a/.circleci/readme.md
+++ b/.circleci/readme.md
@@ -99,6 +99,7 @@
   -e REPEATED_UTESTS_FQLTOOL=org.apache.cassandra.fqltool.FQLCompareTest \
   -e REPEATED_UTESTS_LONG=org.apache.cassandra.io.sstable.CQLSSTableWriterLongTest#testWideRow \
   -e REPEATED_UTESTS_STRESS=org.apache.cassandra.stress.generate.DistributionGaussianTest \
+  -e REPEATED_SIMULATOR_DTESTS=org.apache.cassandra.simulator.test.TrivialSimulationTest \
   -e REPEATED_DTESTS=cql_test.py,consistency_test.py::TestAvailability::test_simple_strategy \
   -e REPEATED_LARGE_DTESTS=replace_address_test.py::TestReplaceAddress::test_replace_stopped_node \
   -e REPEATED_JVM_DTESTS=org.apache.cassandra.distributed.test.PagingTest#testPaging \
@@ -112,6 +113,7 @@
   -e REPEATED_ANT_TEST_TARGET=test-cdc \
   -e REPEATED_ANT_TEST_CLASS=org.apache.cassandra.cql3.ViewTest \
   -e REPEATED_ANT_TEST_METHODS=testCompoundPartitionKey,testStaticTable \
+  -e REPEATED_ANT_TEST_VNODES=false \
   -e REPEATED_ANT_TEST_COUNT=500
 ```
 Putting all together, you can have runs as complex as:
@@ -126,6 +128,8 @@
   -e REPEATED_UTESTS_LONG_COUNT=100 \
   -e REPEATED_UTESTS_STRESS=org.apache.cassandra.stress.generate.DistributionGaussianTest \
   -e REPEATED_UTESTS_STRESS_COUNT=500 \
+  -e REPEATED_SIMULATOR_DTESTS=org.apache.cassandra.simulator.test.TrivialSimulationTest \
+  -e REPEATED_SIMULATOR_DTESTS_COUNT=500 \
   -e REPEATED_DTESTS=cql_test.py,consistency_test.py::TestAvailability::test_simple_strategy \
   -e REPEATED_DTESTS_COUNT=500 \
   -e REPEATED_LARGE_DTESTS=replace_address_test.py,materialized_views_test.py \
@@ -139,6 +143,7 @@
   -e REPEATED_ANT_TEST_TARGET=test-cdc \
   -e REPEATED_ANT_TEST_CLASS=org.apache.cassandra.cql3.ViewTest \
   -e REPEATED_ANT_TEST_METHODS=testCompoundPartitionKey,testStaticTable \
+  -e REPEATED_ANT_TEST_VNODES=false \
   -e REPEATED_ANT_TEST_COUNT=500
 ```
 
diff --git a/.gitignore b/.gitignore
index 318f200..916ab0d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,7 +13,9 @@
 doc/build/
 lib/
 pylib/src/
+**/cqlshlib.xml
 !lib/cassandra-driver-internal-only-*.zip
+!lib/puresasl-*.zip
 
 # C* debs
 build-stamp
@@ -66,8 +68,9 @@
 *.tmp
 .DS_Store
 Thumbs.db
+.ccm/
 
-/.ant-targets-build.xml
+**/.ant-targets-build.xml
 .ant_targets
 
 # Generated files from the documentation
@@ -81,3 +84,5 @@
 # build-scripts will put cassandra-builds into the directory
 cassandra-builds/
 cassandra-dtest/
+
+conf/triggers/trigger-example.jar
diff --git a/CHANGES.txt b/CHANGES.txt
index 786a8a6..246ba58 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,9 +1,17 @@
-4.0.11
+4.1.3
+Merged from 4.0:
  * Remove unnecessary shuffling of GossipDigests in Gossiper#makeRandomGossipDigest (CASSANDRA-18546)
 Merged from 3.11:
 Merged from 3.0:
 
-4.0.10
+4.1.2
+ * Allow keystore and trustrore passwords to be nullable (CASSANDRA-18124)
+ * Return snapshots with dots in their name in nodetool listsnapshots (CASSANDRA-18371)
+ * Fix NPE when loading snapshots and data directory is one directory from root (CASSANDRA-18359)
+ * Do not submit hints when hinted_handoff_enabled=false (CASSANDRA-18304)
+ * Fix COPY ... TO STDOUT behavior in cqlsh (CASSANDRA-18353)
+ * Remove six and Py2SaferScanner merge cruft (CASSANDRA-18354)
+Merged from 4.0:
  * Improve nodetool enable{audit,fullquery}log (CASSANDRA-18550)
  * Report network cache info in nodetool (CASSANDRa-18400)
  * Partial compaction can resurrect deleted data (CASSANDRA-18507)
@@ -14,6 +22,7 @@
  * Incremental repairs fail on mixed IPv4/v6 addresses serializing SyncRequest (CASSANDRA-18474)
  * Deadlock updating sstable metadata if disk boundaries need reloading (CASSANDRA-18443)
  * Fix nested selection of reversed collections (CASSANDRA-17913)
+ * Update zstd-jni library to version 1.5.5 (CASSANDRA-18429)
 Merged from 3.11:
  * Remove unnecessary String.format invocation in QueryProcessor when getting a prepared statement from cache (CASSANDRA-17202)
  * Fix the capital P usage in the CQL parser (CASSANDRA-17919)
@@ -37,11 +46,24 @@
  * Suppress CVE-2022-45688 (CASSANDRA-18389)
  * Fix Splitter sometimes creating more splits than requested (CASSANDRA-18013)
 Merged from 3.0:
+ * Do not remove SSTables when cause of FSReadError is OutOfMemoryError while using best_effort disk failure policy (CASSANDRA-18336)
+ * Do not remove truncated_at entry in system.local while dropping an index (CASSANDRA-18105)
  * Save host id to system.local and flush immediately after startup (CASSANDRA-18153)
- * Fix the ordering of sstables when running sstableupgrade tool (CASSANDRA-18143)
- * Fix default file system error handler for disk_failure_policy die (CASSANDRA-18294)
 
-4.0.8
+4.1.1
+ * Deprecate org.apache.cassandra.hadoop code (CASSANDRA-16984)
+ * Fix too early schema version change in sysem local table (CASSANDRA-18291)
+ * Fix copying of JAR of a trigger to temporary file (CASSANDRA-18264)
+ * Fix possible NoSuchFileException when removing a snapshot (CASSANDRA-18211)
+ * PaxosPrepare may add instances to the Electorate that are not in gossip (CASSANDRA-18194)
+ * Fix PAXOS2_COMMIT_AND_PREPARE_RSP serialisation AssertionError (CASSANDRA-18164)
+ * Streaming progress virtual table lock contention can trigger TCP_USER_TIMEOUT and fail streaming (CASSANDRA-18110)
+ * Fix perpetual load of denylist on read in cases where denylist can never be loaded (CASSANDRA-18116)
+Merged from 4.0:
+ * Fix BufferPool incorrect memoryInUse when putUnusedPortion is used (CASSANDRA-18311)
+ * Improve memtable allocator accounting when updating AtomicBTreePartition (CASSANDRA-18125)
+ * Update zstd-jni to version 1.5.4-1 (CASSANDRA-18259)
+ * Split and order IDEA workspace template VM_PARAMETERS (CASSANDRA-18242)
  * Log warning message on aggregation queries without key or on multiple keys (CASSANDRA-18219)
  * Fix the output of FQL dump tool to properly separate entries (CASSANDRA-18215)
  * Add cache type information for maximum memory usage warning message (CASSANDRA-18184)
@@ -55,13 +77,10 @@
  * Fix sstable loading of keyspaces named snapshots or backups (CASSANDRA-14013)
  * Avoid ConcurrentModificationException in STCS/DTCS/TWCS.getSSTables (CASSANDRA-17977)
  * Restore internode custom tracing on 4.0's new messaging system (CASSANDRA-17981)
- * Harden parsing of boolean values in CQL in PropertyDefinitions (CASSANDRA-17878)
- * Fix error message about type hints (CASSANDRA-17915)
- * Fix possible race condition on repair snapshots (CASSANDRA-17955)
- * Fix ASM bytecode version inconsistency (CASSANDRA-17873)
 Merged from 3.11:
- * Fix Splitter sometimes creating more splits than requested (CASSANDRA-18013)
 Merged from 3.0:
+ * Fix the ordering of sstables when running sstableupgrade tool (CASSANDRA-18143)
+ * Fix default file system error handler for disk_failure_policy die (CASSANDRA-18294)
  * Introduce check for names of test classes (CASSANDRA-17964)
  * Suppress CVE-2022-41915 (CASSANDRA-18147)
  * Suppress CVE-2021-1471, CVE-2021-3064, CVE-2021-4235 (CASSANDRA-18149)
@@ -73,33 +92,79 @@
  * Add to the IntelliJ Git Window issue navigation links to Cassandra's Jira (CASSANDRA-18126)
  * Avoid anticompaction mixing data from two different time windows with TWCS (CASSANDRA-17970)
  * Do not spam the logs with MigrationCoordinator not being able to pull schemas (CASSANDRA-18096)
+
+4.1.0
+ * Fix ContentionStrategy backoff and Clock.waitUntil (CASSANDRA-18086)
+Merged from 4.0:
+Merged from 3.11:
+Merged from 3.0:
  * Fix incorrect resource name in LIST PERMISSION output (CASSANDRA-17848)
  * Suppress CVE-2022-41854 and similar (CASSANDRA-18083)
- * Fix running Ant rat targets without git (CASSANDRA-17974)
 
-4.0.7
+
+4.1-rc1
+ * Avoid schema mismatch problems on memtable API misconfiguration (CASSANDRA-18040)
+ * Start Paxos auto repair in CassandraDaemon (CASSANDRA-18029)
+ * Restore streaming_keep_alive_period on the netty control streaming channel (CASSANDRA-17768)
+ * Move Schema.FORCE_LOAD_KEYSPACES and Schema.FORCE_LOAD_KEYSPACES_PROP to CassandraRelevantProps (CASSANDRA-17783)
+ * Add --resolve-ip option to nodetool gossipinfo (CASSANDRA-17934)
+ * Allow pre-V5 global limit on bytes in flight to revert to zero asynchronously in RateLimitingTest (CASSANDRA-17927)
+Merged from 4.0:
+ * Backport CASSANDRA-17205 to 4.0 branch - Remove self-reference in SSTableTidier (CASSANDRA-18332)
+ * Avoid loading the preferred IP for BulkLoader streaming (CASSANDRA-18370)
+ * Harden parsing of boolean values in CQL in PropertyDefinitions (CASSANDRA-17878)
+ * Fix error message about type hints (CASSANDRA-17915)
+ * Fix possible race condition on repair snapshots (CASSANDRA-17955)
+ * Fix ASM bytecode version inconsistency (CASSANDRA-17873)
  * Remove empty cq4 files in log directory to not fail the startup of BinLog (CASSANDRA-17933)
  * Fix multiple BufferPool bugs (CASSANDRA-16681)
  * Fix StorageService.getNativeaddress handling of IPv6 addresses (CASSANDRA-17945)
+Merged from 3.11:
+ * Fix Splitter sometimes creating more splits than requested (CASSANDRA-18013)
+ * Suppress CVE-2022-42003 and CVE-2022-42004 (CASSANDRA-17966)
+ * Make LongBufferPoolTest insensitive to timing (CASSANDRA-16681)
+Merged from 3.0:
+ * Fix running Ant rat targets without git (CASSANDRA-17974)
+ * Harden JMX by resolving beanshooter issues (CASSANDRA-17921)
+ * Suppress CVE-2019-2684 (CASSANDRA-17965)
+ * Fix auto-completing "WITH" when creating a materialized view (CASSANDRA-17879)
+
+4.1-beta1
+ * We should not emit deprecation warning on startup for `key_cache_save_period`, `row_cache_save_period`, `counter_cache_save_period` (CASSANDRA-17904)
+ * upsert with adder support is not consistent with numbers and strings in LWT (CASSANDRA-17857)
+ * Fix race and return after failing connections (CASSANDRA-17618)
+ * Speculative execution threshold unit mismatch (CASSANDRA-17877)
+ * Fix BulkLoader to load entireSSTableThrottle and entireSSTableInterDcThrottle (CASSANDRA-17677)
+ * Fix a race condition where a keyspace can be oopened while it is being removed (CASSANDRA-17658)
+ * DatabaseDescriptor will set the default failure detector during client initialization (CASSANDRA-17782)
+ * Avoid initializing schema via SystemKeyspace.getPreferredIP() with the BulkLoader tool (CASSANDRA-17740)
+ * Improve JMX methods signatures, fix JMX and config backward compatibility (CASSANDRA-17725)
+ * Fix sstable_preemptive_open_interval disabled value. sstable_preemptive_open_interval = null backward compatible with
+   sstable_preemptive_open_interval_in_mb = -1 (CASSANDRA-17737)
+ * Remove usages of Path#toFile() in the snapshot apparatus (CASSANDRA-17769)
+ * Fix Settings Virtual Table to update paxos_variant after startup and rename enable_uuid_sstable_identifiers to
+   uuid_sstable_identifiers_enabled as per our config naming conventions (CASSANDRA-17738)
+ * index_summary_resize_interval_in_minutes = -1 is equivalent to index_summary_resize_interval being set to null or
+   disabled. JMX MBean IndexSummaryManager, setResizeIntervalInMinutes method still takes resizeIntervalInMinutes = -1 for disabled (CASSANDRA-17735)
+ * min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size (CASSANDRA-17733)
+ * Remove commons-lang dependency during build runtime (CASSANDRA-17724)
+ * Relax synchronization on StreamSession#onError() to avoid deadlock (CASSANDRA-17706)
+ * Fix AbstractCell#toString throws MarshalException for cell in collection (CASSANDRA-17695)
+ * Add new vtable output option to compactionstats (CASSANDRA-17683)
+ * Fix commitLogUpperBound initialization in AbstractMemtableWithCommitlog (CASSANDRA-17587)
+ * Fix widening to long in getBatchSizeFailThreshold (CASSANDRA-17650)
+ * Fix widening from mebibytes to bytes in IntMebibytesBound (CASSANDRA-17716)
+ * Revert breaking change in nodetool clientstats and expose cient options through nodetool clientstats --client-options. (CASSANDRA-17715)
+ * Fix missed nowInSec values in QueryProcessor (CASSANDRA-17458)
+ * Revert removal of withBufferSizeInMB(int size) in CQLSSTableWriter.Builder class and deprecate it in favor of withBufferSizeInMiB(int size) (CASSANDRA-17675)
+ * Remove expired snapshots of dropped tables after restart (CASSANDRA-17619)
+Merged from 4.0:
  * Mitigate direct buffer memory OOM on replacements (CASSANDRA-17895)
  * Fix repair failure on assertion if two peers have overlapping mismatching ranges (CASSANDRA-17900)
  * Better handle null state in Gossip schema migration to avoid NPE (CASSANDRA-17864)
  * HintedHandoffAddRemoveNodesTest now accounts for the fact that StorageMetrics.totalHints is not updated synchronously w/ writes (CASSANDRA-16679)
  * Avoid getting hanging repairs due to repair message timeouts (CASSANDRA-17613)
  * Prevent infinite loop in repair coordinator on FailSession (CASSANDRA-17834)
-Merged from 3.11:
- * Suppress CVE-2022-42003 and CVE-2022-42004 (CASSANDRA-17966)
- * Make LongBufferPoolTest insensitive to timing (CASSANDRA-16681)
- * Suppress CVE-2022-25857 and other snakeyaml CVEs (CASSANDRA-17907)
- * Fix potential IndexOutOfBoundsException in PagingState in mixed mode clusters (CASSANDRA-17840)
-Merged from 3.0:
- * Harden JMX by resolving beanshooter issues (CASSANDRA-17921)
- * Suppress CVE-2019-2684 (CASSANDRA-17965)
- * Fix auto-completing "WITH" when creating a materialized view (CASSANDRA-17879)
- * Fix scrubber falling into infinite loop when the last partition is broken (CASSANDRA-17862)
- * Fix resetting schema (CASSANDRA-17819)
-
-4.0.6
  * Fix race condition on updating cdc size and advancing to next segment (CASSANDRA-17792)
  * Add 'noboolean' rpm build for older distros like CentOS7 (CASSANDRA-17765)
  * Fix default value for compaction_throughput_mb_per_sec in Config class to match  the one in cassandra.yaml (CASSANDRA-17790)
@@ -109,30 +174,202 @@
  * Fix Settings Virtual Table - index_summary_resize_interval and index_summary_capacity were not updated after startup (CASSANDRA-17735)
  * Clean up ScheduledExecutors, CommitLog, and MessagingService shutdown for in-JVM dtests (CASSANDRA-17731)
  * Remove extra write to system table for prepared statements (CASSANDRA-17764)
-Merged from 3.11:
- * Document usage of closed token intervals in manual compaction (CASSANDRA-17575)
-Merged from 3.0:
- * Improve libjemalloc resolution in bin/cassandra (CASSANDRA-15767)
- * Fix restarting of services on gossipping-only member (CASSANDRA-17752)
-
-4.0.5
- * Utilise BTree improvements to reduce garbage and improve throughput (CASSANDRA-15511)
- * Make sure existing delayed tasks in StreamTransferTask cannot prevent clean shutdown (CASSANDRA-17706)
  * SSL storage port in sstableloader is deprecated (CASSANDRA-17602)
  * Fix counter write timeouts at ONE (CASSANDRA-17411)
  * Fix NPE in getLocalPrimaryRangeForEndpoint (CASSANDRA-17680)
- * Remove SSL storage port from sstableloader (CASSANDRA-17602)
  * Allow Java 11 to satisfy RPM/Debian packaging (CASSANDRA-17669)
  * Ensure FileStreamTask cannot compromise shared channel proxy for system table when interrupted (CASSANDRA-17663)
  * silence benign SslClosedEngineException (CASSANDRA-17565)
 Merged from 3.11:
+ * Fix potential IndexOutOfBoundsException in PagingState in mixed mode clusters (CASSANDRA-17840)
+ * Document usage of closed token intervals in manual compaction (CASSANDRA-17575)
  * Creating of a keyspace on insufficient number of replicas should filter out gosspping-only members (CASSANDRA-17759)
+ * Suppress CVE-2022-25857 and other snakeyaml CVEs (CASSANDRA-17907)
+Merged from 3.0:
+ * Improve libjemalloc resolution in bin/cassandra (CASSANDRA-15767)
+ * Fix restarting of services on gossipping-only member (CASSANDRA-17752)
+ * Fix scrubber falling into infinite loop when the last partition is broken (CASSANDRA-17862)
+ * Fix resetting schema (CASSANDRA-17819)
+
+4.1-alpha1
+ * Handle config parameters upper bound on startup; Fix auto_snapshot_ttl and paxos_purge_grace_period min unit validations (CASSANDRA-17571)
+ * Fix leak of non-standard Java types in our Exceptions as clients using JMX are unable to handle them.
+   Remove useless validation that leads to unnecessary additional read of cassandra.yaml on startup (CASSANDRA-17638)
+ * Fix repair_request_timeout_in_ms and remove paxos_auto_repair_threshold_mb (CASSANDRA-17557)
+ * Incremental repair leaks SomeRepairFailedException after switch away from flatMap (CASSANDRA-17620)
+ * StorageService read threshold get methods throw NullPointerException due to not handling null configs (CASSANDRA-17593)
+ * Rename truncate_drop guardrail to drop_truncate_table (CASSANDRA-17592)
+ * nodetool enablefullquerylog can NPE when directory has no files (CASSANDRA-17595)
+ * Add auto_snapshot_ttl configuration (CASSANDRA-16790)
+ * List snapshots of dropped tables (CASSANDRA-16843)
+ * Add information whether sstables are dropped to SchemaChangeListener (CASSANDRA-17582)
+ * Add a pluggable memtable API (CEP-11 / CASSANDRA-17034)
+ * Save sstable id as string in activity table (CASSANDRA-17585)
+ * Implement startup check to prevent Cassandra to potentially spread zombie data (CASSANDRA-17180)
+ * Allow failing startup on duplicate config keys (CASSANDRA-17379)
+ * Migrate threshold for minimum keyspace replication factor to guardrails (CASSANDRA-17212)
+ * Add guardrail to disallow TRUNCATE and DROP TABLE commands (CASSANDRA-17558)
+ * Add plugin support for CQLSH (CASSANDRA-16456)
+ * Add guardrail to disallow querying with ALLOW FILTERING (CASSANDRA-17370)
+ * Enhance SnakeYAML properties to be reusable outside of YAML parsing, support camel case conversion to snake case, and add support to ignore properties (CASSANDRA-17166)
+ * nodetool compact should support using a key string to find the range to avoid operators having to manually do this (CASSANDRA-17537)
+ * Add guardrail for data disk usage (CASSANDRA-17150)
+ * Tool to list data paths of existing tables (CASSANDRA-17568)
+ * Migrate track_warnings to more standard naming conventions and use latest configuration types rather than long (CASSANDRA-17560)
+ * Add support for CONTAINS and CONTAINS KEY in conditional UPDATE and DELETE statement (CASSANDRA-10537)
+ * Migrate advanced config parameters to the new Config types (CASSANDRA-17431)
+ * Make null to be meaning disabled and leave 0 as a valid value for permissions_update_interval, roles_update_interval, credentials_update_interval (CASSANDRA-17431)
+ * Fix typo in Config annotation (CASSANDRA-17431)
+ * Made Converters type safe and fixed a few cases where converters used the wrong type (CASSANDRA-17431)
+ * Fix null bug in DataStorageSpec and DurationSpec and require units to be added when providing 0 value (CASSANDRA-17431)
+ * Shutdown ScheduledExecutors as part of node drainage (CASSANDRA-17493)
+ * Provide JMX endpoint to allow transient logging of blocking read repairs (CASSANDRA-17471)
+ * Add guardrail for GROUP BY queries (CASSANDRA-17509)
+ * make pylib PEP and pylint compliant (CASSANDRA-17546)
+ * Add support for vnodes in jvm-dtest (CASSANDRA-17332)
+ * Remove guardrails global enable flag (CASSANDRA-17499)
+ * Clients using JMX are unable to handle non-standard java types but we leak this into our interfaces (CASSANDRA-17527)
+ * Remove stress server functionality (CASSANDRA-17535)
+ * Reduce histogram snapshot long[] allocation overhead during speculative read and write threshold updates (CASSANDRA-17523)
+ * Add guardrail for creation of secondary indexes (CASSANDRA-17498)
+ * Add guardrail to disallow creation of uncompressed tables (CASSANDRA-17504)
+ * Add guardrail to disallow creation of new COMPACT STORAGE tables (CASSANDRA-17522)
+ * repair vtables should expose a completed field due to lack of filtering options in CQL (CASSANDRA-17520)
+ * remove outdated code from cqlsh (CASSANDRA-17490)
+ * remove support for deprecated version specific TLS in Python 3.6 (CASSANDRA-17365)
+ * Add support for IF EXISTS and IF NOT EXISTS in ALTER statements (CASSANDRA-16916)
+ * resolve several pylint issues in cqlsh.py and pylib (CASSANDRA-17480)
+ * Streaming sessions longer than 3 minutes fail with timeout (CASSANDRA-17510)
+ * Add ability to track state in repair (CASSANDRA-15399)
+ * Remove unused 'parse' module (CASSANDRA-17484)
+ * change six functions in cqlshlib to native Python 3 (CASSANDRA-17417)
+ * reduce hot-path object allocations required to record local/remote requests against the client request metrics (CASSANDRA-17424)
+ * Disallow removing DC from system_auth while nodes are active in the DC (CASSANDRA-17478)
+ * Add guardrail for the number of fields per UDT (CASSANDRA-17385)
+ * Allow users to change cqlsh history location using env variable (CASSANDRA-17448)
+ * Add required -f option to use nodetool verify and standalone sstableverify (CASSANDRA-17017)
+ * Add support for UUID based sstable generation identifiers (CASSANDRA-17048)
+ * Log largest memtable flush at info instead of debug (CASSANDRA-17472)
+ * Add native transport rate limiter options to example cassandra.yaml, and expose metric for dispatch rate (CASSANDRA-17423)
+ * Add diagnostic events for guardrails (CASSANDRA-17197)
+ * Pre hashed passwords in CQL (CASSANDRA-17334)
+ * Increase cqlsh version (CASSANDRA-17432)
+ * Update SUPPORTED_UPGRADE_PATHS to include 3.0 and 3.x to 4.1 paths and remove obsolete tests (CASSANDRA-17362)
+ * Support DELETE in CQLSSTableWriter (CASSANDRA-14797)
+ * Failed inbound internode authentication failures generate ugly warning with stack trace (CASSANDRA-17068)
+ * Expose gossip information in system_views.gossip_info virtual table (CASSANDRA-17002)
+ * Add guardrails for collection items and size (CASSANDRA-17153)
+ * Improve guardrails messages (CASSANDRA-17430)
+ * Remove all usages of junit.framework and ban them via Checkstyle (CASSANDRA-17316)
+ * Add guardrails for read/write consistency levels (CASSANDRA-17188)
+ * Add guardrail for SELECT IN terms and their cartesian product (CASSANDRA-17187)
+ * remove unused imports in cqlsh.py and cqlshlib (CASSANDRA-17413)
+ * deprecate property windows_timer_interval (CASSANDRA-17404)
+ * Expose streaming as a vtable (CASSANDRA-17390)
+ * Expose all client options via system_views.clients and nodetool clientstats (CASSANDRA-16378)
+ * Make startup checks configurable (CASSANDRA-17220)
+ * Add guardrail for number of partition keys on IN queries (CASSANDRA-17186)
+ * update Python test framework from nose to pytest (CASSANDRA-17293)
+ * Fix improper CDC commit log segments deletion in non-blocking mode (CASSANDRA-17233)
+ * Add support for string concatenations through the + operator (CASSANDRA-17190)
+ * Limit the maximum hints size per host (CASSANDRA-17142)
+ * Add a virtual table for exposing batch metrics (CASSANDRA-17225)
+ * Flatten guardrails config (CASSANDRA-17353)
+ * Instance failed to start up due to NPE in StartupClusterConnectivityChecker (CASSANDRA-17347)
+ * add the shorter version of version flag (-v) in cqlsh (CASSANDRA-17236)
+ * Make vtables accessible via internode messaging (CASSANDRA-17295)
+ * Add support for PEM based key material for SSL (CASSANDRA-17031)
+ * Standardize storage configuration parameters' names. Support unit suffixes. (CASSANDRA-15234)
+ * Remove support for Windows (CASSANDRA-16956)
+ * Runtime-configurable YAML option to prohibit USE statements (CASSANDRA-17318)
+ * When streaming sees a ClosedChannelException this triggers the disk failure policy (CASSANDRA-17116)
+ * Add a virtual table for exposing prepared statements metrics (CASSANDRA-17224)
+ * Remove python 2.x support from cqlsh (CASSANDRA-17242)
+ * Prewarm role and credential caches to avoid timeouts at startup (CASSANDRA-16958)
+ * Make capacity/validity/updateinterval/activeupdate for Auth Caches configurable via nodetool (CASSANDRA-17063)
+ * Added startup check for read_ahead_kb setting (CASSANDRA-16436)
+ * Avoid unecessary array allocations and initializations when performing query checks (CASSANDRA-17209)
+ * Add guardrail for list operations that require read before write (CASSANDRA-17154)
+ * Migrate thresholds for number of keyspaces and tables to guardrails (CASSANDRA-17195)
+ * Remove self-reference in SSTableTidier (CASSANDRA-17205)
+ * Add guardrail for query page size (CASSANDRA-17189)
+ * Allow column_index_size_in_kb to be configurable through nodetool (CASSANDRA-17121)
+ * Emit a metric for number of local read and write calls
+ * Add non-blocking mode for CDC writes (CASSANDRA-17001)
+ * Add guardrails framework (CASSANDRA-17147)
+ * Harden resource management on SSTable components to prevent future leaks (CASSANDRA-17174)
+ * Make nodes more resilient to local unrelated files during startup (CASSANDRA-17082)
+ * repair prepare message would produce a wrong error message if network timeout happened rather than reply wait timeout (CASSANDRA-16992)
+ * Log queries that fail on timeout or unavailable errors up to once per minute by default (CASSANDRA-17159)
+ * Refactor normal/preview/IR repair to standardize repair cleanup and error handling of failed RepairJobs (CASSANDRA-17069)
+ * Log missing peers in StartupClusterConnectivityChecker (CASSANDRA-17130)
+ * Introduce separate rate limiting settings for entire SSTable streaming (CASSANDRA-17065)
+ * Implement Virtual Tables for Auth Caches (CASSANDRA-16914)
+ * Actively update auth cache in the background (CASSANDRA-16957)
+ * Add unix time conversion functions (CASSANDRA-17029)
+ * JVMStabilityInspector.forceHeapSpaceOomMaybe should handle all non-heap OOMs rather than only supporting direct only (CASSANDRA-17128)
+ * Forbid other Future implementations with checkstyle (CASSANDRA-17055)
+ * commit log was switched from non-daemon to daemon threads, which causes the JVM to exit in some case as no non-daemon threads are active (CASSANDRA-17085)
+ * Add a Denylist to block reads and writes on specific partition keys (CASSANDRA-12106)
+ * v4+ protocol did not clean up client warnings, which caused leaking the state (CASSANDRA-17054)
+ * Remove duplicate toCQLString in ReadCommand (CASSANDRA-17023)
+ * Ensure hint window is persistent across restarts of a node (CASSANDRA-14309)
+ * Allow to GRANT or REVOKE multiple permissions in a single statement (CASSANDRA-17030)
+ * Allow to grant permission for all tables in a keyspace (CASSANDRA-17027)
+ * Log time spent writing keys during compaction (CASSANDRA-17037)
+ * Make nodetool compactionstats and sstable_tasks consistent (CASSANDRA-16976)
+ * Add metrics and logging around index summary redistribution (CASSANDRA-17036)
+ * Add configuration options for minimum allowable replication factor and default replication factor (CASSANDRA-14557)
+ * Expose information about stored hints via a nodetool command and a virtual table (CASSANDRA-14795)
+ * Add broadcast_rpc_address to system.local (CASSANDRA-11181)
+ * Add support for type casting in WHERE clause components and in the values of INSERT/UPDATE statements (CASSANDRA-14337)
+ * add credentials file support to CQLSH (CASSANDRA-16983)
+ * Skip remaining bytes in the Envelope buffer when a ProtocolException is thrown to avoid double decoding (CASSANDRA-17026)
+ * Allow reverse iteration of resources during permissions checking (CASSANDRA-17016)
+ * Add feature to verify correct ownership of attached locations on disk at startup (CASSANDRA-16879)
+ * Make SSLContext creation pluggable/extensible (CASSANDRA-16666)
+ * Add soft/hard limits to local reads to protect against reading too much data in a single query (CASSANDRA-16896)
+ * Avoid token cache invalidation for removing a non-member node (CASSANDRA-15290)
+ * Allow configuration of consistency levels on auth operations (CASSANDRA-12988)
+ * Add number of sstables in a compaction to compactionstats output (CASSANDRA-16844)
+ * Upgrade Caffeine to 2.9.2 (CASSANDRA-15153)
+ * Allow DELETE and TRUNCATE to work on Virtual Tables if the implementation allows it (CASSANDRA-16806)
+ * Include SASI components to snapshots (CASSANDRA-15134)
+ * Fix missed wait latencies in the output of `nodetool tpstats -F` (CASSANDRA-16938)
+ * Reduce native transport max frame size to 16MB (CASSANDRA-16886)
+ * Add support for filtering using IN restrictions (CASSANDRA-14344)
+ * Provide a nodetool command to invalidate auth caches (CASSANDRA-16404)
+ * Catch read repair timeout exceptions and add metric (CASSANDRA-16880)
+ * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies (CASSANDRA-16854)
+ * Add client warnings and abort to tombstone and coordinator reads which go past a low/high watermark (CASSANDRA-16850)
+ * Add TTL support to nodetool snapshots (CASSANDRA-16789)
+ * Allow CommitLogSegmentReader to optionally skip sync marker CRC checks (CASSANDRA-16842)
+ * allow blocking IPs from updating metrics about traffic (CASSANDRA-16859)
+ * Request-Based Native Transport Rate-Limiting (CASSANDRA-16663)
+ * Implement nodetool getauditlog command (CASSANDRA-16725)
+ * Clean up repair code (CASSANDRA-13720)
+ * Background schedule to clean up orphaned hints files (CASSANDRA-16815)
+ * Modify SecondaryIndexManager#indexPartition() to retrieve only columns for which indexes are actually being built (CASSANDRA-16776)
+ * Batch the token metadata update to improve the speed (CASSANDRA-15291)
+ * Reduce the log level on "expected" repair exceptions (CASSANDRA-16775)
+ * Make JMXTimer expose attributes using consistent time unit (CASSANDRA-16760)
+ * Remove check on gossip status from DynamicEndpointSnitch::updateScores (CASSANDRA-11671)
+ * Fix AbstractReadQuery::toCQLString not returning valid CQL (CASSANDRA-16510)
+ * Log when compacting many tombstones (CASSANDRA-16780)
+ * Display bytes per level in tablestats for LCS tables (CASSANDRA-16799)
+ * Add isolated flush timer to CommitLogMetrics and ensure writes correspond to single WaitingOnCommit data points (CASSANDRA-16701)
+ * GossiperTest.testHasVersion3Nodes didn't take into account trunk version changes, fixed to rely on latest version (CASSANDRA-16651)
+ * Update JNA library to 5.9.0 and snappy-java to version 1.1.8.4 (CASSANDRA-17040)
+Merged from 4.0:
+ * Utilise BTree improvements to reduce garbage and improve throughput (CASSANDRA-15511)
+Merged from 3.11:
 Merged from 3.0:
  * Fix writetime and ttl functions forbidden for collections instead of multicell columns (CASSANDRA-17628)
  * Fix issue where frozen maps may not be serialized in the correct order (CASSANDRA-17623)
  * Suppress CVE-2022-24823 (CASSANDRA-17633)
  * fsync TOC and digest files (CASSANDRA-10709)
 
+
 4.0.4
  * Optimise BTree build,update and transform operations (CASSANDRA-15510)
  * Clean up schema migration coordinator and tests (CASSANDRA-17533)
@@ -148,10 +385,6 @@
  * Fix ignored streaming encryption settings in sstableloader (CASSANDRA-17367)
  * Streaming tasks handle empty SSTables correctly (CASSANDRA-16349)
  * Prevent SSTableLoader from doing unnecessary work (CASSANDRA-16349)
- * emit warning on keyspace creation when replication factor is bigger than the number of nodes (CASSANDRA-16747)
- * Fix snapshot true size calculation (CASSANDRA-17267)
- * dropping of a materialized view creates a snapshot with dropped- prefix (CASSANDRA-17415)
- * Validate existence of DCs when repairing (CASSANDRA-17407)
 Merged from 3.11:
  * Upgrade jackson-databind to 2.13.2.2 (CASSANDRA-17556)
  * Upgrade slf4j to 1.7.25 (CASSANDRA-17474)
@@ -161,6 +394,7 @@
  * Validate existence of DCs when repairing (CASSANDRA-17407)
  * dropping of a materialized view creates a snapshot with dropped- prefix (CASSANDRA-17415)
 Merged from 3.0:
+ * fsync TOC and digest files (CASSANDRA-10709)
  * Fix URISyntaxException in nodetool with updated Java (CASSANDRA-17581)
  * Schema mutations may not be completed on drain (CASSANDRA-17524)
  * Fix data corruption in AbstractCompositeType due to static boolean byte buffers (CASSANDRA-14752)
@@ -179,8 +413,6 @@
  * Deprecate otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages,
    otc_backlog_expiration_interval_ms (CASSANDRA-17377)
  * Improve start up processing of Incremental Repair information read from system.repairs (CASSANDRA-17342)
-
-4.0.2
  * Extend operator control over the UDF threading model for CVE-2021-44521 (CASSANDRA-17352)
  * Full Java 11 support (CASSANDRA-16894)
  * Remove unused 'geomet' package from cqlsh path (CASSANDRA-17271)
@@ -204,19 +436,54 @@
  * Avoid rewriting all sstables during cleanup when transient replication is enabled (CASSANDRA-16966)
  * Prevent CQLSH from failure on Python 3.10 (CASSANDRA-16987)
  * Avoid trying to acquire 0 permits from the rate limiter when taking snapshot (CASSANDRA-16872)
- * Upgrade Caffeine to 2.5.6 (CASSANDRA-15153)
- * Include SASI components to snapshots (CASSANDRA-15134)
- * Fix missed wait latencies in the output of `nodetool tpstats -F` (CASSANDRA-16938)
  * Remove all the state pollution between tests in SSTableReaderTest (CASSANDRA-16888)
  * Delay auth setup until after gossip has settled to avoid unavailables on startup (CASSANDRA-16783)
- * Fix clustering order logic in CREATE MATERIALIZED VIEW (CASSANDRA-16898)
  * org.apache.cassandra.db.rows.ArrayCell#unsharedHeapSizeExcludingData includes data twice (CASSANDRA-16900)
+ * Fix clustering order logic in CREATE MATERIALIZED VIEW (CASSANDRA-16898)
  * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies (CASSANDRA-16854)
+ * Tolerate missing DNS entry when completing a host replacement (CASSANDRA-16873)
+ * Harden PrunableArrayQueue against Pruner implementations that might throw exceptions (CASSANDRA-16866)
+ * Move RepairedDataInfo to the execution controller rather than the ReadCommand to avoid unintended sharing (CASSANDRA-16721)
+ * Bump zstd-jni version to 1.5.0-4 (CASSANDRA-16884)
+ * Remove assumption that all urgent messages are small (CASSANDRA-16877)
+ * ArrayClustering.unsharedHeapSize does not include the data so undercounts the heap size (CASSANDRA-16845)
+ * Improve help, doc and error messages about sstabledump -k and -x arguments (CASSANDRA-16818)
+ * Add repaired/unrepaired bytes back to nodetool (CASSANDRA-15282)
+ * Upgrade lz4-java to 1.8.0 to add RH6 support back (CASSANDRA-16753)
+ * Improve DiagnosticEventService.publish(event) logging message of events (CASSANDRA-16749)
+ * Cleanup dependency scopes (CASSANDRA-16704)
+ * Make JmxHistogram#getRecentValues() and JmxTimer#getRecentValues() thread-safe (CASSANDRA-16707)
 Merged from 3.11:
+ * Upgrade jackson-databind to 2.13.2.2 (CASSANDRA-17556)
+ * Upgrade slf4j to 1.7.25 (CASSANDRA-17474)
+ * Upgrade jackson to 2.13.2 (CASSANDRA-17492)
+ * emit warning on keyspace creation when replication factor is bigger than the number of nodes (CASSANDRA-16747)
+ * Fix snapshot true size calculation (CASSANDRA-17267)
+ * dropping of a materialized view creates a snapshot with dropped- prefix (CASSANDRA-17415)
+ * Validate existence of DCs when repairing (CASSANDRA-17407)
  * Add key validation to ssstablescrub (CASSANDRA-16969)
  * Update Jackson from 2.9.10 to 2.12.5 (CASSANDRA-16851)
  * Make assassinate more resilient to missing tokens (CASSANDRA-16847)
+ * Validate SASI tokenizer options before adding index to schema (CASSANDRA-15135)
+ * Fixup scrub output when no data post-scrub and clear up old use of row, which really means partition (CASSANDRA-16835)
+ * Reduce thread contention in CommitLogSegment and HintsBuffer (CASSANDRA-16072)
+ * Make cqlsh use the same set of reserved keywords than the server uses (CASSANDRA-15663)
+ * Optimize bytes skipping when reading SSTable files (CASSANDRA-14415)
+ * Enable tombstone compactions when unchecked_tombstone_compaction is set in TWCS (CASSANDRA-14496)
+ * Read only the required SSTables for single partition queries (CASSANDRA-16737)
 Merged from 3.0:
+ * Schema mutations may not be completed on drain (CASSANDRA-17524)
+ * Fix data corruption in AbstractCompositeType due to static boolean byte buffers (CASSANDRA-14752)
+ * Add procps dependency to RPM/Debian packages (CASSANDRA-17516)
+ * Suppress CVE-2021-44521 (CASSANDRA-17492)
+ * ConnectionLimitHandler may leaks connection count if remote connection drops (CASSANDRA-17252)
+ * Require ant >= 1.10 (CASSANDRA-17428)
+ * Disallow CONTAINS for UPDATE and DELETE (CASSANDRA-15266)
+ * Suppress inapplicable CVEs (CASSANDRA-17368)
+ * Fix flaky test - test_cqlsh_completion.TestCqlshCompletion (CASSANDRA-17338)
+ * Fixed TestCqlshOutput failing tests (CASSANDRA-17386)
+ * Lazy transaction log replica creation allows incorrect replica content divergence during anticompaction (CASSANDRA-17273)
+ * LeveledCompactionStrategy disk space check improvements (CASSANDRA-17272)
  * Fix conversion from megabits to bytes in streaming rate limiter (CASSANDRA-17243)
  * Upgrade logback to 1.2.9 (CASSANDRA-17204)
  * Avoid race in AbstractReplicationStrategy endpoint caching (CASSANDRA-16673)
@@ -237,30 +504,7 @@
  * Catch UnsatisfiedLinkError in WindowsTimer (CASSANDRA-16085)
  * Avoid removing batch when it's not created during view replication (CASSANDRA-16175)
  * Make the addition of regular column to COMPACT tables throw an InvalidRequestException (CASSANDRA-14564)
- * Race in CompactionExecutorTest (CASSANDRA-17239)
-
-4.0.1
- * Tolerate missing DNS entry when completing a host replacement (CASSANDRA-16873)
- * Harden PrunableArrayQueue against Pruner implementations that might throw exceptions (CASSANDRA-16866)
- * Move RepairedDataInfo to the execution controller rather than the ReadCommand to avoid unintended sharing (CASSANDRA-16721)
- * Bump zstd-jni version to 1.5.0-4 (CASSANDRA-16884)
- * Remove assumption that all urgent messages are small (CASSANDRA-16877)
- * ArrayClustering.unsharedHeapSize does not include the data so undercounts the heap size (CASSANDRA-16845)
- * Improve help, doc and error messages about sstabledump -k and -x arguments (CASSANDRA-16818)
- * Add repaired/unrepaired bytes back to nodetool (CASSANDRA-15282)
- * Upgrade lz4-java to 1.8.0 to add RH6 support back (CASSANDRA-16753)
- * Improve DiagnosticEventService.publish(event) logging message of events (CASSANDRA-16749)
- * Cleanup dependency scopes (CASSANDRA-16704)
- * Make JmxHistogram#getRecentValues() and JmxTimer#getRecentValues() thread-safe (CASSANDRA-16707)
-Merged from 3.11:
- * Validate SASI tokenizer options before adding index to schema (CASSANDRA-15135)
- * Fixup scrub output when no data post-scrub and clear up old use of row, which really means partition (CASSANDRA-16835)
- * Reduce thread contention in CommitLogSegment and HintsBuffer (CASSANDRA-16072)
- * Make cqlsh use the same set of reserved keywords than the server uses (CASSANDRA-15663)
- * Optimize bytes skipping when reading SSTable files (CASSANDRA-14415)
- * Enable tombstone compactions when unchecked_tombstone_compaction is set in TWCS (CASSANDRA-14496)
- * Read only the required SSTables for single partition queries (CASSANDRA-16737)
-Merged from 3.0:
+ * Make the addition of regular column to COMPACT tables throw an InvalidRequestException (CASSANDRA-14564)
  * Fix secondary indexes on primary key columns skipping some writes (CASSANDRA-16868)
  * Use JMX to validate nodetool --jobs parameter (CASSANDRA-16104)
  * Handle properly UnsatisfiedLinkError in NativeLibrary#getProcessID() (CASSANDRA-16578)
@@ -281,8 +525,6 @@
  * Fix fwd to/from headers in DC write forwarding (CASSANDRA-16797)
  * Fix CassandraVersion::compareTo (CASSANDRA-16794)
  * BinLog does not close chronicle queue leaving this to GC to cleanup (CASSANDRA-16774)
-Merged from 3.11:
-Merged from 3.0:
 
 4.0-rc2
  * Improved password obfuscation (CASSANDRA-16801)
diff --git a/NEWS.txt b/NEWS.txt
index 4915652..0930ac0 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -51,86 +51,257 @@
 'sstableloader' tool. You can upgrade the file format of your snapshots
 using the provided 'sstableupgrade' tool.
 
-4.0.8
+
+4.1.1
 =====
 
-New features
-------------
+G1GC Recommended
+----------------
+    - The G1 settings in jvm8-server.options and jvm11-server.options are updated according to broad feedback
+      and testing. The G1 settings remain commented out by default in 4.1.x. It is recommended to switch
+      to G1 for performance and for simpler GC tuning. CMS is already deprecated in JDK9, and the next major
+      release of Cassandra makes G1 the default configuration.
 
 Upgrading
 ---------
     - All previous versions of 4.x contained a mistake on the implementation of the old CQL native protocol v3. That
-    mistake produced issues when paging over tables with compact storage and a single clustering column during rolling
-    upgrades involving 3.x and 4.x nodes. The fix for that issue makes it can now appear during rolling upgrades from
-    4.0.0-4.0.7. If that is your case, please use protocol v4 or higher in your driver. See CASSANDRA-17507 for further
-    details.
-
-4.0.6
-=====
-
-New features
-------------
-
-
-Upgrading
----------
-    - If you haven't set the compaction_thoroughput_mb_per_sec in your 4.0 cassandra.yaml file but you relied on the internal
-     default value,then compaction_throughput_mb_per_sec was equal to an old default value of 16MiB/s in Cassandra 4.0.
-     After CASSANDRA-17790 this is changed to 64MiB/s to match the default value in cassandra.yaml. If you prefer the old
-     one of 16MiB/s, you need to set it explicitly in your cassandra.yaml file.
+     mistake produced issues when paging over tables with compact storage and a single clustering column during rolling
+     upgrades involving 3.x and 4.x nodes. The fix for that issue makes it can now appear during rolling upgrades from
+     4.1.0 or 4.0.0-4.0.7. If that is your case, please use protocol v4 or higher in your driver. See CASSANDRA-17507
+     for further details.
 
 Deprecation
----------
+-----------
+    - Hadoop integration in package org.apache.cassandra.hadoop is deprecated and no longer actively maintained.
+      This code is scheduled to be removed in the next major version of Cassandra.
 
-4.0.3
-=====
+4.1
+===
 
 New features
 ------------
-
-
-Upgrading
----------
-    - otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages,
-      otc_backlog_expiration_interval_ms are deprecated and will be removed at earliest with next major release.
-      otc_coalescing_strategy is disabled since 3.11.
-
-Deprecation
----------
-    - JavaScript user-defined functions have been deprecated. They are planned for removal
-      in the next major release. (CASSANDRA-17280)
-
-4.0.2
-=====
-
-New features
-------------
-    - Full support for Java 11, it is not experimental anymore.
+    - Added API for alternative memtable implementations. For details, see
+      src/java/org/apache/cassandra/db/memtable/Memtable_API.md
+    - Added a new guardrails framework allowing to define soft/hard limits for different user actions, such as limiting
+      the number of tables, columns per table or the size of collections. These guardrails are only applied to regular
+      user queries, and superusers and internal queries are excluded. Reaching the soft limit raises a client warning,
+      whereas reaching the hard limit aborts the query. In both cases a log message and a diagnostic event are emitted.
+      Additionally, some guardrails are not linked to specific user queries due to techincal limitations, such as
+      detecting the size of large collections during compaction or periodically monitoring the disk usage. These
+      guardrails would only emit the proper logs and diagnostic events when triggered, without aborting any processes.
+      Guardrails config is defined through cassandra.yaml properties, and they can be dynamically updated through the
+      JMX MBean `org.apache.cassandra.db:type=Guardrails`. There are guardrails for:
+        - Number of user keyspaces.
+        - Number of user tables.
+        - Number of columns per table.
+        - Number of secondary indexes per table.
+        - Number of materialized tables per table.
+        - Number of fields per user-defined type.
+        - Number of items in a collection .
+        - Number of partition keys selected by an IN restriction.
+        - Number of partition keys selected by the cartesian product of multiple IN restrictions.
+        - Allowed table properties.
+        - Allowed read consistency levels.
+        - Allowed write consistency levels.
+        - Collections size.
+        - Query page size.
+        - Minimum replication factor.
+        - Data disk usage, defined either as a percentage or as an absolute size.
+        - Whether user-defined timestamps are allowed.
+        - Whether GROUP BY queries are allowed.
+        - Whether the creation of secondary indexes is allowed.
+        - Whether the creation of uncompressed tables is allowed.
+        - Whether querying with ALLOW FILTERING is allowed.
+        - Whether DROP or TRUNCATE TABLE commands are allowed.
+    - Add support for the use of pure monotonic functions on the last attribute of the GROUP BY clause.
+    - Add floor functions that can be use to group by time range.
+    - Support for native transport rate limiting via native_transport_rate_limiting_enabled and
+      native_transport_max_requests_per_second in cassandra.yaml.
+    - Support for pre hashing passwords on CQL DCL commands
+    - Expose all client options via system_views.clients and nodetool clientstats --client-options.
+    - Add new nodetool compactionstats --vtable option to match the sstable_tasks vtable.
+    - Support for String concatenation has been added through the + operator.
+    - New configuration max_hints_size_per_host to limit the size of local hints files per host in mebibytes. Setting to
+      non-positive value disables the limit, which is the default behavior. Setting to a positive value to ensure
+      the total size of the hints files per host does not exceed the limit.
+    - Added ability to configure auth caches through corresponding `nodetool` commands.
+    - CDC data flushing now can be configured to be non-blocking with the configuration cdc_block_writes. Setting to true,
+      any writes to the CDC-enabled tables will be blocked when reaching to the limit for CDC data on disk, which is the
+      existing and the default behavior. Setting to false, the writes to the CDC-enabled tables will be accepted and
+      the oldest CDC data on disk will be deleted to ensure the size constraint.
+    - Top partitions based on partition size or tombstone count are now tracked per table. These partitions are stored
+      in a new system.top_partitions table and exposed via JMX and nodetool tablestats. The partitions are tracked
+      during full or validation repairs but not incremental ones since those don't include all sstables and the partition
+      size/tombstone count would not be correct.
+    - New native functions to convert unix time values into C* native types: toDate(bigint), toTimestamp(bigint),
+      mintimeuuid(bigint) and maxtimeuuid(bigint)
+    - Support for multiple permission in a single GRANT/REVOKE/LIST statement has been added. It allows to
+      grant/revoke/list multiple permissions using a single statement by providing a list of comma-separated
+      permissions.
+    - A new ALL TABLES IN KEYSPACE resource has been added. It allows to grant permissions for all tables and user types
+      in a keyspace while preventing the user to use those permissions on the keyspace itself.
+    - Added support for type casting in the WHERE clause components and in the values of INSERT and UPDATE statements.
+    - A new implementation of Paxos (named v2) has been included that improves the safety and performance of LWT operations.
+      Importantly, v2 guarantees linearizability across safe range movements, so users are encouraged to enable v2.
+      v2 also halves the number of WAN messages required to be exchanged if used on conjunction with the new Paxos Repair
+      mechanism (see below) and with some minor modifications to applications using LWTs.
+      The new implementation may be enabled at any time by setting paxos_variant: v2, and disabled by setting to v1,
+      and this alone will reduce the number of WAN round-trips by between one and two for reads, and one for writes.
+    - A new Paxos Repair mechanism has been introduced as part of Repair, that permits further reducing the number of WAN
+      round-trips for write LWTs. This process may be manually executed for v1 and is run automatically alongside normal
+      repairs for v2. Once users are running regular repairs that include paxos repairs they are encouraged to set
+      paxos_state_purging: repaired. Once this has been set across the cluster, users are encouraged to set their
+      applications to supply a Commit consistency level of ANY with their LWT write operations, saving one additional WAN
+      round-trip. See upgrade notes below.
+    - Warn/fail thresholds added to read queries notifying clients when these thresholds trigger (by
+      emitting a client warning or failing the query).  This feature is disabled by default, scheduled
+      to be enabled in 4.2; it is controlled with the configuration read_thresholds_enabled,
+      setting to true will enable this feature.  Each check has its own warn/fail thresholds, currently
+      tombstones (tombstone_warn_threshold, and tombstone_failure_threshold), coordinator result set
+      materialized size (coordinator_read_size_warn_threshold and coordinator_read_size_fail_threshold),
+      local read materialized heap size
+      (local_read_size_warn_threshold and local_read_size_fail_threshold),
+      and RowIndexEntry estimated memory size (row_index_read_size_warn_threshold and
+      row_index_read_size_fail_threshold) are supported; more checks will be added over time.
+    - Prior to this version, the hint system was storing a window of hints as defined by
+      configuration property max_hint_window_in_ms, however this window is not persistent across restarts.
+      For example, if a node is restarted, it will be still eligible for a hint to be sent to it because it
+      was down less than max_hint_window_in_ms. Hence if that node continues restarting without hint delivery completing,
+      hints will be sent to that node indefinitely which would occupy more and more disk space.
+      This behaviour was changed in CASSANDRA-14309. From now on, by default, if a node is not down longer than
+      max_hint_window_in_ms, there is an additional check to see if there is a hint to be delivered which is older
+      than max_window_in_ms. If there is, a hint is not persisted. If there is not, it is.
+      This behaviour might be reverted as it was in previous version by property hint_window_persistent_enabled by
+      setting it to false. This property is by default set to true.
+    - Added a new feature to allow denylisting (i.e. blocking read, write, or range read configurable) access to partition
+      keys in configured keyspaces and tables. See doc/operating/denylisting_partitions.rst for details on using this new
+      feature. Also see CASSANDRA-12106.
+    - Information about pending hints is now available through `nodetool listpendinghints` and `pending_hints` virtual
+      table.
+    - Added ability to invalidate auth caches through corresponding `nodetool` commands and virtual tables.
     - DCL statements in audit logs will now obscure only the password if they don't fail to parse.
+    - Starting from 4.1 sstables support UUID based generation identifiers. They are globally unique and thus they let
+      the node to create sstables without any prior knowledge about the existing sstables in the data directory.
+      The feature is disabled by default in cassandra.yaml because once enabled, there is no easy way to downgrade.
+      When the node is restarted with UUID based generation identifiers enabled, each newly created sstable will have
+      a UUID based generation identifier and such files are not readable by previous Cassandra versions. In the future
+      those new identifiers will become enabled by default.
+    - Resetting schema behavior has changed in 4.1 so that: 1) resetting schema is prohibited when there is no live node
+      where the schema could be fetched from, and 2) truncating local schema keyspace is postponed to the moment when
+      the node receives schema from some other node.
 
 Upgrading
 ---------
+    - `cache_load_timeout_seconds` being negative for disabled is equivalent to `cache_load_timeout` = 0 for disabled.
+    - `sstable_preemptive_open_interval_in_mb` being negative for disabled is equivalent to `sstable_preemptive_open_interval`
+      being null again. In the JMX MBean `org.apache.cassandra.db:type=StorageService`, the setter method
+      `setSSTablePreemptiveOpenIntervalInMB`still takes `intervalInMB` negative numbers for disabled.
+    - `enable_uuid_sstable_identifiers` parameter from 4.1 alpha1 was renamed to `uuid_sstable_identifiers_enabled`.
+    - `index_summary_resize_interval_in_minutes = -1` is equivalent to index_summary_resize_interval being set to `null` or
+      disabled. In the JMX MBean `org.apache.cassandra.db:type=IndexSummaryManager`, the setter method `setResizeIntervalInMinutes` still takes
+      `resizeIntervalInMinutes = -1` for disabled.
+    - min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size.
+    - Parameters of type data storage, duration and data rate cannot be set to Long.MAX_VALUE (former parameters of long type)
+      and Integer.MAX_VALUE (former parameters of int type). Those numbers are used during conversion between units to prevent
+      an overflow from happening. (CASSANDRA-17571)
+    - We added new JMX methods `setStreamThroughputMbitPerSec`, `getStreamThroughputMbitPerSec`, `setInterDCStreamThroughputMbitPerSec`,
+      `getInterDCStreamThroughputMbitPerSec` to the JMX MBean `org.apache.cassandra.db:type=StorageService`. They replace the now
+      deprecated methods `setStreamThroughputMbPerSec`, `getStreamThroughputMbPerSec`, `setInterDCStreamThroughputMbPerSec`, and
+      `getInterDCStreamThroughputMbPerSec`, which will be removed in a future major release.
+    - The config property `repair_session_space_in_mb` was wrongly advertised in previous versions that it should be set in
+      megabytes when it is interpreted internally in mebibytes. To reduce the confusion we added two new JMX methods
+      `setRepairSessionSpaceInMebibytes(int sizeInMebibytes)` and `getRepairSessionSpaceInMebibytes`. They replace the now
+      deprecated methods `setRepairSessionSpaceInMegabytes(int sizeInMegabytes)` and `getRepairSessionSpaceInMegabytes`, which
+      will be removed in a future major release.
+    - There is a new cassandra.yaml version 2. Units suffixes should be provided for all rates(B/s|KiB/s|MiB/s),
+      memory (B|KiB|MiB|GiB) and duration(d|h|m|s|ms|us|µs|ns)
+      parameters. List of changed parameters and details to consider during configuration setup can be
+      found at https://cassandra.apache.org/doc/latest/cassandra/new/configuration.html. (CASSANDRA-15234)
+      Backward compatibility with the old cassandra.yaml file will be in place until at least the next major version.
+      By default we refuse starting Cassandra with a config containing both old and new config keys for the same parameter. Start
+      Cassandra with -Dcassandra.allow_new_old_config_keys=true to override. For historical reasons duplicate config keys
+      in cassandra.yaml are allowed by default, start Cassandra with -Dcassandra.allow_duplicate_config_keys=false to disallow this.
+    - Many cassandra.yaml parameters' names have been changed. Full list and details to consider during configuration setup
+      when installing/upgrading Cassandra can be found at https://cassandra.apache.org/doc/latest/cassandra/new/configuration.html (CASSANDRA-15234)
+    - Negative values cannot be used for parameters of type data rate, duration and data storage with both old and new cassandra.yaml version.
+      Only exception is if you use old cassandra.yaml, pre-CASSANDRA-15234 - then -1 or other negative values which were advertised as an option
+      to disable config parameters in the old cassandra.yaml are still used. Those are probably converted to null value with the new cassandra.yaml,
+      as written in the new cassandra.yaml version and docs.
+    - Before you upgrade, if you are using `cassandra.auth_bcrypt_gensalt_log2_rounds` property,
+      confirm it is set to value lower than 31 otherwise Cassandra will fail to start. See CASSANDRA-9384
+      for further details. You also need to regenerate passwords for users for who the password
+      was created while the above property was set to be more than 30 otherwise they will not be able to log in.
+    - JNA library was updated from 5.6.0 to 5.9.0. In version 5.7.0, Darwin support for M1 devices
+      was fixed but prebuild native library for Darwin x86 (32bit Java on Mac OS) was removed.
     - The config properties for setting the streaming throughput `stream_throughput_outbound_megabits_per_sec` and
       `inter_dc_stream_throughput_outbound_megabits_per_sec` were incorrectly interpreted as mebibits. This has
       been fixed by CASSANDRA-17243, so the values for these properties will now indicate a throughput ~4.6% lower than
       what was actually applied in previous versions. This also affects the setters and getters for these properties in
       the JMX MBean `org.apache.cassandra.db:type=StorageService` and the nodetool commands `set/getstreamthroughput`
       and `set/getinterdcstreamthroughput`.
-    - Before you upgrade, if you are using `cassandra.auth_bcrypt_gensalt_log2_rounds` property,
-      confirm it is set to value lower than 31 otherwise Cassandra will fail to start. See CASSANDRA-9384
-      for further details. You also need to regenerate passwords for users for who the password
-      was created while the above property was set to be more than 30 otherwise they will not be able to log in.
-    - As part of the Internode Messaging improvement work in CASSANDRA-15066, internode_send_buff_size_in_bytes and
-      internode_recv_buff_size_in_bytes were renamed to internode_socket_send_buffer_size_in_bytes and
-      internode_socket_receive_buffer_size_in_bytes. To support upgrades pre-4.0, we add backward compatibility and
-      currently both old and new names should work. Cassandra 4.0.0 and Cassandra 4.0.1 work ONLY with the new names
-      (They weren't updated in cassandra.yaml though).
+    - Steps for upgrading Paxos
+        - Set paxos_variant: v2 across the cluster. This may be set via JMX, but should also be written
+          persistently to any yaml.
+        - Ensure paxos repairs are running regularly, either as part of normal incremental repair workflows or on their
+          own separate schedule. These operations are cheap and better to run frequently (e.g. once per hour)
+        - Set paxos_state_purging: repaired across the cluster.  This may be set via JMX, but should also be written
+          persistently to any yaml. NOTE: once this has been set, you must not restore paxos_state_purging: legacy. If
+          this setting must be disabled you must instead set paxos_state_purging: gc_grace. This may be necessary if
+          paxos repairs must be disabled for some reason on an extended basis, but in this case your applications must
+          restore default commit consistency to ensure correctness.
+        - Applications may now safely be updated to use ANY commit consistency level (or LOCAL_QUORUM, as preferred).
+          Uncontended writes should now take 2 round-trips, and uncontended reads should typically take one round-trip.
+    - A required [f|force] flag has been added to both "nodetool verify" and the standalone "sstableverify" tools.
+      These tools have some subtleties and should not be used unless the operator is familiar with what they do
+      and do not do, as well as the edge cases associated with their use.
+      NOTE: ANY SCRIPTS THAT RELY ON sstableverify OR nodetool verify WILL STOP WORKING UNTIL MODIFIED.
+      Please see CASSANDRA-17017 for details: https://issues.apache.org/jira/browse/CASSANDRA-17017
+    - `MutationExceededMaxSizeException` thrown when a mutation exceeds `max_mutation_size` inherits
+      from `InvalidRequestException` instead of `RuntimeException`. See CASSANDRA-17456 for details.
+
+Deprecation
+-----------
+    - In the command line options for `org.apache.cassandra.tools.LoaderOptions`: deprecate the `-t`, `--throttle`,
+      `-idct`, and `--inter-dc-throttle` options for setting the throttle and inter-datacenter throttle options in
+      Mbps. Instead, users are instructed to use the `--throttle-mib`, and `--inter-dc-throttle-mib` for setting the
+      throttling options in MiB/s. Additionally, in the loader options builder
+      `org.apache.cassandra.tools.LoaderOptions$Builder`: deprecate the `throttle(int)`, `interDcThrottle(int)`,
+      `entireSSTableThrottle(int)`, and the `entireSSTableInterDcThrottle(int)` methods.
+    - In the JMX MBean `org.apache.cassandra.db:type=StorageService`: deprecate getter method `getStreamThroughputMbitPerSec`
+      in favor of getter method `getStreamThroughputMbitPerSecAsDouble`; deprecate getter method `getStreamThroughputMbPerSec`
+      in favor of getter methods `getStreamThroughputMebibytesPerSec` and `getStreamThroughputMebibytesPerSecAsDouble`;
+      deprecate getter method `getInterDCStreamThroughputMbitPerSec` in favor of getter method `getInterDCStreamThroughputMbitPerSecAsDouble`;
+      deprecate getter method `getInterDCStreamThroughputMbPerSec` in favor of getter methods `getInterDCStreamThroughputMebibytesPerSecAsDouble`;
+      deprecate getter method `getCompactionThroughputMbPerSec` in favor of getter methods `getCompactionThroughtputMibPerSecAsDouble`
+      and `getCompactionThroughtputBytesPerSec`; deprecate setter methods `setStreamThroughputMbPerSec` and `setStreamThroughputMbitPerSec`
+      in favor of `setStreamThroughputMebibytesPerSec`; deprecate setter methods `setInterDCStreamThroughputMbitPerSec` and
+      `setInterDCStreamThroughputMbPerSec` in favor of `setInterDCStreamThroughputMebibytesPerSec`. The deprecated JMX methods
+      may return a rounded value so if precision is important, you want to use the new getters. While those deprecated JMX getters
+      will return a rounded number, the nodetool commands `getstreamthroughput` and `getinterdcstreamthroughput`
+      will throw Runtime Exceptions advising to use the new -d flag in case an integer cannot be returned. See CASSANDRA-17725 for further details.
+    - Deprecate public method `setRate(final double throughputMbPerSec)` in `Compaction Manager` in favor of
+      `setRateInBytes(final double throughputBytesPerSec)`
+    - `withBufferSizeInMB(int size)` in `StressCQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
+      No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
+      standartization.
+    - `withBufferSizeInMB(int size)` in `CQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
+      No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
+      standartization.
+    - The properties `keyspace_count_warn_threshold` and `table_count_warn_threshold` in cassandra.yaml have been
+      deprecated in favour of the new `guardrails.keyspaces` and `guardrails.tables` properties and will be removed
+      in a subsequent major version. This also affects the setters and getters for those properties in the JMX MBean
+      `org.apache.cassandra.db:type=StorageService`, which are equally deprecated in favour of the analogous methods
+      in the JMX MBean `org.apache.cassandra.db:type=Guardrails`. See CASSANDRA-17195 for further details.
+    - The functionality behind the property `windows_timer_interval` was removed as part of CASSANDRA-16956. The
+      property is still present but it is deprecated and it is just a place-holder to prevent breaking upgrades. This
+      property is expected to be fully removed in the next major release of Cassandra.
 
 4.0
 ===
 
 New features
 ------------
+    - Full support for Java 11, it is not experimental anymore.
     - The data of the system keyspaces using a local strategy (at the exception of the system.batches,
       system.paxos, system.compaction_history, system.prepared_statements and system.repair tables)
       is now stored by default in the first data directory, instead of being distributed among all
@@ -214,9 +385,24 @@
       See CASSANDRA-10190 for details.
     - Support for server side DESCRIBE statements has been added. See CASSANDRA-14825
     - It is now possible to rate limit snapshot creation/clearing. See CASSANDRA-13019
+    - Authentication reads and writes have been changed from a mix of ONE, LOCAL_ONE, and QUORUM
+      to LOCAL_QUORUM on reads and EACH_QUORUM on writes. This is configurable via cassandra.yaml with
+      auth_read_consistency_level and auth_write_consistency_level respectively. See CASSANDRA-12988.
 
 Upgrading
 ---------
+    - If you were on 4.0.1 - 4.0.5 and if you haven't set the compaction_thoroughput_mb_per_sec in your 4.0 cassandra.yaml
+      file but you relied on the internal default value,then compaction_throughput_mb_per_sec was equal to an old default
+      value of 16MiB/s in Cassandra 4.0. After CASSANDRA-17790 this is changed to 64MiB/s to match the default value in
+      cassandra.yaml. If you prefer the old one of 16MiB/s, you need to set it explicitly in your cassandra.yaml file.
+    - otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages,
+      otc_backlog_expiration_interval_ms are deprecated and will be removed at earliest with next major release.
+      otc_coalescing_strategy is disabled since 3.11.
+    - As part of the Internode Messaging improvement work in CASSANDRA-15066, internode_send_buff_size_in_bytes and
+      internode_recv_buff_size_in_bytes were renamed to internode_socket_send_buffer_size_in_bytes and
+      internode_socket_receive_buffer_size_in_bytes. To support upgrades pre-4.0, we add backward compatibility and
+      currently both old and new names should work. Cassandra 4.0.0 and Cassandra 4.0.1 work ONLY with the new names
+      (They weren't updated in cassandra.yaml though).
     - DESCRIBE|DESC was moved to server side in Cassandra 4.0. As a consequence DESRIBE|DESC will not work in cqlsh 6.0.0
       being connected to earlier major Cassandra versions where DESCRIBE does not exist server side.
     - cqlsh shell startup script now prefers 'python3' before 'python' when identifying a runtime.
@@ -382,7 +568,8 @@
 
 Deprecation
 -----------
-
+    - JavaScript user-defined functions have been deprecated. They are planned for removal
+      in the next major release. (CASSANDRA-17280)
     - The JMX MBean org.apache.cassandra.metrics:type=Streaming,name=ActiveOutboundStreams has been
       deprecated and will be removed in a subsequent major version. This metric was not updated since several version
       already.
diff --git a/README.asc b/README.asc
index f1270a8..f484aa2 100644
--- a/README.asc
+++ b/README.asc
@@ -12,7 +12,7 @@
 Requirements
 ------------
 . Java >= 1.8 (OpenJDK and Oracle JVMS have been tested)
-. Python 3.6+ (for cqlsh; 2.7 works but is deprecated)
+. Python 3.6+ (for cqlsh)
 
 Getting started
 ---------------
@@ -39,7 +39,7 @@
 
 ----
 Connected to Test Cluster at localhost:9160.
-[cqlsh 2.2.0 | Cassandra 1.2.0 | CQL spec 3.0.0 | Thrift protocol 19.35.0]
+[cqlsh 6.0.0 | Cassandra 4.1 | CQL spec 3.4.6 | Native protocol v5]
 Use HELP for help.
 cqlsh>
 ----
diff --git a/bin/cassandra b/bin/cassandra
index 1e0b4c0..3fafe91 100755
--- a/bin/cassandra
+++ b/bin/cassandra
@@ -119,14 +119,6 @@
   CASSANDRA_LOG_DIR=$CASSANDRA_HOME/logs
 fi
 
-# Special-case path variables.
-case "`uname`" in
-    CYGWIN*|MINGW*) 
-        CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-        CASSANDRA_CONF=`cygpath -p -w "$CASSANDRA_CONF"`
-    ;;
-esac
-
 # Cassandra uses an installed jemalloc via LD_PRELOAD / DYLD_INSERT_LIBRARIES by default to improve off-heap
 # memory allocation performance. The following code searches for an installed libjemalloc.dylib/.so/.1.so using
 # Linux and OS-X specific approaches.
diff --git a/bin/cqlsh b/bin/cqlsh
index a962e11..90c9b00 100755
--- a/bin/cqlsh
+++ b/bin/cqlsh
@@ -62,8 +62,8 @@
     version=$1
     major_version="${version%.*}"
     minor_version="${version#*.}"
-    # python3.6+ is supported. python2.7 is deprecated but still compatible.
-    if [ "$major_version" = 3 ] && [ "$minor_version" -ge 6 ] || [ "$version" = "2.7" ]; then
+    # python3.6+ is supported
+    if [ "$major_version" = 3 ] && [ "$minor_version" -ge 6 ]; then
         echo "supported"
     else
         echo "unsupported"
@@ -79,6 +79,8 @@
         if [ "$(is_supported_version "$version")" = "supported" ]; then
             exec "$interpreter" "$($interpreter -c "import os; print(os.path.dirname(os.path.realpath('$0')))")/cqlsh.py" "$@"
             exit
+        else
+            echo "Warning: unsupported version of Python:" $version >&2
         fi
     fi
 }
@@ -88,7 +90,7 @@
     # run a user specified Python interpreter
     run_if_supported_version "$USER_SPECIFIED_PYTHON" "$@"
 else
-    for interpreter in python3 python python2.7; do
+    for interpreter in python3 python; do
         run_if_supported_version "$interpreter" "$@"
     done
 fi
diff --git a/bin/cqlsh.py b/bin/cqlsh.py
index df08203..6c1e7bd 100755
--- a/bin/cqlsh.py
+++ b/bin/cqlsh.py
@@ -16,36 +16,38 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import division, unicode_literals, print_function
-
 import cmd
 import codecs
+import configparser
 import csv
+import errno
 import getpass
 import optparse
 import os
 import platform
 import re
+import stat
+import subprocess
 import sys
 import traceback
 import warnings
 import webbrowser
 from contextlib import contextmanager
 from glob import glob
+from io import StringIO
 from uuid import UUID
 
-if sys.version_info < (3, 6) and sys.version_info[0:2] != (2, 7):
-    sys.exit("\ncqlsh requires Python 3.6+ or Python 2.7 (deprecated)\n")
+if sys.version_info < (3, 6):
+    sys.exit("\ncqlsh requires Python 3.6+\n")
 
 # see CASSANDRA-10428
 if platform.python_implementation().startswith('Jython'):
     sys.exit("\nCQL Shell does not run on Jython\n")
 
 UTF8 = 'utf-8'
-CP65001 = 'cp65001'  # Win utf-8 variant
 
 description = "CQL Shell for Apache Cassandra"
-version = "6.0.0"
+version = "6.1.0"
 
 readline = None
 try:
@@ -90,14 +92,8 @@
 # use bundled lib for python-cql if available. if there
 # is a ../lib dir, use bundled libs there preferentially.
 ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
-myplatform = platform.system()
-is_win = myplatform == 'Windows'
 
-# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
-if is_win and sys.version_info < (3, 3):
-    codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
-
-if myplatform == 'Linux':
+if platform.system() == 'Linux':
     ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
 
 if os.environ.get('CQLSH_NO_BUNDLED', ''):
@@ -116,20 +112,14 @@
     ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
     sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
 
-third_parties = ('futures-', 'six-')
+# the driver needs dependencies
+third_parties = ('six-', 'pure_sasl-')
 
 for lib in third_parties:
     lib_zip = find_zip(lib)
     if lib_zip:
         sys.path.insert(0, lib_zip)
 
-# We cannot import six until we add its location to sys.path so the Python
-# interpreter can find it. Do not move this to the top.
-import six
-
-from six.moves import configparser, input
-from six import StringIO, ensure_text, ensure_str
-
 warnings.filterwarnings("ignore", r".*blist.*")
 try:
     import cassandra
@@ -144,8 +134,7 @@
 from cassandra.cluster import Cluster
 from cassandra.cqltypes import cql_typename
 from cassandra.marshal import int64_unpack
-from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
-                                TableMetadata, protect_name, protect_names)
+from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata, TableMetadata)
 from cassandra.policies import WhiteListRoundRobinPolicy
 from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
 from cassandra.util import datetime_from_timestamp
@@ -156,15 +145,17 @@
 if os.path.isdir(cqlshlibdir):
     sys.path.insert(0, cqlshlibdir)
 
-from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
+from cqlshlib import cql3handling, pylexotron, sslhandling, cqlshhandling, authproviderhandling
 from cqlshlib.copyutil import ExportTask, ImportTask
 from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
                                  RED, WHITE, FormattedValue, colorme)
 from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
                                  DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
-                                 format_by_type, formatter_for)
+                                 format_by_type)
 from cqlshlib.tracing import print_trace, print_trace_session
-from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
+from cqlshlib.util import get_file_encoding_bomsize
+from cqlshlib.util import is_file_secure
+
 
 DEFAULT_HOST = '127.0.0.1'
 DEFAULT_PORT = 9042
@@ -211,6 +202,7 @@
 parser.add_option("--encoding", help="Specify a non-default encoding for output."
                   + " (Default: %s)" % (UTF8,))
 parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
+parser.add_option("--credentials", help="Specify an alternative credentials file location.")
 parser.add_option('--cqlversion', default=None,
                   help='Specify a particular CQL version, '
                        'by default the highest version supported by the server will be used.'
@@ -225,42 +217,58 @@
                   help='Specify the default request timeout in seconds (default: %default seconds).')
 parser.add_option("-t", "--tty", action='store_true', dest='tty',
                   help='Force tty mode (command prompt).')
+parser.add_option('-v', action="version", help='Print the current version of cqlsh.')
 
-optvalues = optparse.Values()
-(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
+# This is a hidden option to suppress the warning when the -p/--password command line option is used.
+# Power users may use this option if they know no other people has access to the system where cqlsh is run or don't care about security.
+# Use of this option in scripting is discouraged. Please use a (temporary) credentials file where possible.
+# The Cassandra distributed tests (dtests) also use this option in some tests when a well-known password is supplied via the command line.
+parser.add_option("--insecure-password-without-warning", action='store_true', dest='insecure_password_without_warning',
+                  help=optparse.SUPPRESS_HELP)
+
+opt_values = optparse.Values()
+(options, arguments) = parser.parse_args(sys.argv[1:], values=opt_values)
 
 # BEGIN history/config definition
-HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
+
+
+def mkdirp(path):
+    """Creates all parent directories up to path parameter or fails when path exists, but it is not a directory."""
+
+    try:
+        os.makedirs(path)
+    except OSError:
+        if not os.path.isdir(path):
+            raise
+
+
+def resolve_cql_history_file():
+    default_cql_history = os.path.expanduser(os.path.join('~', '.cassandra', 'cqlsh_history'))
+    if 'CQL_HISTORY' in os.environ:
+        return os.environ['CQL_HISTORY']
+    else:
+        return default_cql_history
+
+
+HISTORY = resolve_cql_history_file()
+HISTORY_DIR = os.path.dirname(HISTORY)
+
+try:
+    mkdirp(HISTORY_DIR)
+except OSError:
+    print('\nWarning: Cannot create directory at `%s`. Command history will not be saved. Please check what was the environment property CQL_HISTORY set to.\n' % HISTORY_DIR)
+
+DEFAULT_CQLSHRC = os.path.expanduser(os.path.join('~', '.cassandra', 'cqlshrc'))
 
 if hasattr(options, 'cqlshrc'):
-    CONFIG_FILE = options.cqlshrc
+    CONFIG_FILE = os.path.expanduser(options.cqlshrc)
     if not os.path.exists(CONFIG_FILE):
-        print('\nWarning: Specified cqlshrc location `%s` does not exist.  Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR))
-        CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
+        print('\nWarning: Specified cqlshrc location `%s` does not exist.  Using `%s` instead.\n' % (CONFIG_FILE, DEFAULT_CQLSHRC))
+        CONFIG_FILE = DEFAULT_CQLSHRC
 else:
-    CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
+    CONFIG_FILE = DEFAULT_CQLSHRC
 
-HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
-if not os.path.exists(HISTORY_DIR):
-    try:
-        os.mkdir(HISTORY_DIR)
-    except OSError:
-        print('\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR)
-
-OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
-if os.path.exists(OLD_CONFIG_FILE):
-    if os.path.exists(CONFIG_FILE):
-        print('\nWarning: cqlshrc config files were found at both the old location ({0})'
-              + ' and the new location ({1}), the old config file will not be migrated to the new'
-              + ' location, and the new location will be used for now.  You should manually'
-              + ' consolidate the config files at the new location and remove the old file.'
-              .format(OLD_CONFIG_FILE, CONFIG_FILE))
-    else:
-        os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
-OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
-if os.path.exists(OLD_HISTORY):
-    os.rename(OLD_HISTORY, HISTORY)
-# END history/config definition
+CQL_DIR = os.path.dirname(CONFIG_FILE)
 
 CQL_ERRORS = (
     cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
@@ -336,7 +344,7 @@
 
 
 def maybe_ensure_text(val):
-    return ensure_text(val) if val else val
+    return str(val) if val else val
 
 
 class FormatError(DecodeError):
@@ -401,7 +409,7 @@
 
 
 class Shell(cmd.Cmd):
-    custom_prompt = ensure_text(os.getenv('CQLSH_PROMPT', ''))
+    custom_prompt = os.getenv('CQLSH_PROMPT', '')
     if custom_prompt != '':
         custom_prompt += "\n"
     default_prompt = custom_prompt + "cqlsh> "
@@ -420,7 +428,7 @@
     default_page_size = 100
 
     def __init__(self, hostname, port, color=False,
-                 username=None, password=None, encoding=None, stdin=None, tty=True,
+                 username=None, encoding=None, stdin=None, tty=True,
                  completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
                  cqlver=None, keyspace=None,
                  tracing_enabled=False, expand_enabled=False,
@@ -436,16 +444,21 @@
                  request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
                  protocol_version=None,
                  connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
-                 is_subshell=False):
+                 is_subshell=False,
+                 auth_provider=None):
         cmd.Cmd.__init__(self, completekey=completekey)
         self.hostname = hostname
         self.port = port
-        self.auth_provider = None
-        if username:
-            if not password:
-                password = getpass.getpass()
-            self.auth_provider = PlainTextAuthProvider(username=username, password=password)
+        self.auth_provider = auth_provider
         self.username = username
+
+        if isinstance(auth_provider, PlainTextAuthProvider):
+            self.username = auth_provider.username
+            if not auth_provider.password:
+                # if no password is provided, we need to query the user to get one.
+                password = getpass.getpass()
+                self.auth_provider = PlainTextAuthProvider(username=auth_provider.username, password=password)
+
         self.keyspace = keyspace
         self.ssl = ssl
         self.tracing_enabled = tracing_enabled
@@ -498,7 +511,6 @@
 
         self.tty = tty
         self.encoding = encoding
-        self.check_windows_encoding()
 
         self.output_codec = codecs.lookup(encoding)
 
@@ -512,7 +524,6 @@
 
         if tty:
             self.reset_prompt()
-            self.maybe_warn_py2()
             self.report_connection()
             print('Use HELP for help.')
         else:
@@ -531,19 +542,6 @@
     def batch_mode(self):
         return not self.tty
 
-    @property
-    def is_using_utf8(self):
-        # utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
-        return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
-
-    def check_windows_encoding(self):
-        if is_win and os.name == 'nt' and self.tty and \
-           self.is_using_utf8 and sys.stdout.encoding != CP65001:
-            self.printerr("\nWARNING: console codepage must be set to cp65001 "
-                          "to support {} encoding on Windows platforms.\n"
-                          "If you experience encoding problems, change your console"
-                          " codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
-
     def set_expanded_cql_version(self, ver):
         ver, vertuple = full_cql_version(ver)
         self.cql_version = ver
@@ -599,12 +597,6 @@
         vers['cql'] = self.cql_version
         print("[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers)
 
-    def maybe_warn_py2(self):
-        py2_suppress_warn = 'CQLSH_NO_WARN_PY2'
-        if sys.version_info[0:2] == (2, 7) and not os.environ.get(py2_suppress_warn):
-            print("Python 2.7 support is deprecated. "
-                  "Install Python 3.6+ or set %s to suppress this message.\n" % (py2_suppress_warn,))
-
     def show_session(self, sessionid, partial_session=False):
         print_trace_session(self, self.session, sessionid, partial_session)
 
@@ -768,13 +760,6 @@
 
         raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
 
-    def get_usertypes_meta(self):
-        data = self.session.execute("select * from system.schema_usertypes")
-        if not data:
-            return cql3handling.UserTypesMeta({})
-
-        return cql3handling.UserTypesMeta.from_layout(data)
-
     def get_trigger_names(self, ksname=None):
         if ksname is None:
             ksname = self.current_keyspace
@@ -814,8 +799,6 @@
             try:
                 import readline
             except ImportError:
-                if is_win:
-                    print("WARNING: pyreadline dependency missing.  Install to enable tab completion.")
                 pass
             else:
                 old_completer = readline.get_completer()
@@ -829,9 +812,9 @@
         # start coverage collection if requested, unless in subshell
         if self.coverage and not self.is_subshell:
             # check for coveragerc file, write it if missing
-            if os.path.exists(HISTORY_DIR):
-                self.coveragerc_path = os.path.join(HISTORY_DIR, '.coveragerc')
-                covdata_path = os.path.join(HISTORY_DIR, '.coverage')
+            if os.path.exists(CQL_DIR):
+                self.coveragerc_path = os.path.join(CQL_DIR, '.coveragerc')
+                covdata_path = os.path.join(CQL_DIR, '.coverage')
                 if not os.path.isfile(self.coveragerc_path):
                     with open(self.coveragerc_path, 'w') as f:
                         f.writelines(["[run]\n",
@@ -853,15 +836,14 @@
 
     def get_input_line(self, prompt=''):
         if self.tty:
-            self.lastcmd = input(ensure_str(prompt))
-            line = ensure_text(self.lastcmd) + '\n'
+            self.lastcmd = input(str(prompt))
+            line = self.lastcmd + '\n'
         else:
-            self.lastcmd = ensure_text(self.stdin.readline())
+            self.lastcmd = self.stdin.readline()
             line = self.lastcmd
             if not len(line):
                 raise EOFError
         self.lineno += 1
-        line = ensure_text(line)
         return line
 
     def use_stdin_reader(self, until='', prompt=''):
@@ -875,7 +857,7 @@
                 return
             yield newline
 
-    def cmdloop(self):
+    def cmdloop(self, intro=None):
         """
         Adapted from cmd.Cmd's version, because there is literally no way with
         cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
@@ -922,7 +904,6 @@
         Returns true if the statement is complete and was handled (meaning it
         can be reset).
         """
-        statementtext = ensure_text(statementtext)
         statementtext = self.strip_comment_blocks(statementtext)
         try:
             statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
@@ -968,7 +949,7 @@
         if readline is not None:
             nl_count = srcstr.count("\n")
 
-            new_hist = ensure_str(srcstr.replace("\n", " ").rstrip())
+            new_hist = srcstr.replace("\n", " ").rstrip()
 
             if nl_count > 1 and self.last_hist != new_hist:
                 readline.add_history(new_hist)
@@ -1019,7 +1000,6 @@
         self.tracing_enabled = tracing_was_enabled
 
     def perform_statement(self, statement):
-        statement = ensure_text(statement)
 
         stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
         success, future = self.perform_simple_statement(stmt)
@@ -1075,7 +1055,7 @@
         try:
             result = future.result()
         except CQL_ERRORS as err:
-            err_msg = ensure_text(err.message if hasattr(err, 'message') else str(err))
+            err_msg = err.message if hasattr(err, 'message') else str(err)
             self.printerr(str(err.__class__.__name__) + ": " + err_msg)
         except Exception:
             import traceback
@@ -1114,11 +1094,11 @@
         def print_all(result, table_meta, tty):
             # Return the number of rows in total
             num_rows = 0
-            isFirst = True
+            is_first = True
             while True:
                 # Always print for the first page even it is empty
-                if result.current_rows or isFirst:
-                    with_header = isFirst or tty
+                if result.current_rows or is_first:
+                    with_header = is_first or tty
                     self.print_static_result(result, table_meta, with_header, tty, num_rows)
                     num_rows += len(result.current_rows)
                 if result.has_more_pages:
@@ -1130,7 +1110,7 @@
                     if not tty:
                         self.writeresult("")
                     break
-                isFirst = False
+                is_first = False
             return num_rows
 
         num_rows = print_all(result, table_meta, self.tty)
@@ -1390,7 +1370,7 @@
                     self.describe_element(result)
 
             except CQL_ERRORS as err:
-                err_msg = ensure_text(err.message if hasattr(err, 'message') else str(err))
+                err_msg = err.message if hasattr(err, 'message') else str(err)
                 self.printerr(err_msg.partition("message=")[2].strip('"'))
             except Exception:
                 import traceback
@@ -1406,7 +1386,7 @@
         """
         Print the output for a DESCRIBE KEYSPACES query
         """
-        names = [ensure_str(r['name']) for r in rows]
+        names = [r['name'] for r in rows]
 
         print('')
         cmd.Cmd.columnize(self, names)
@@ -1426,7 +1406,7 @@
                 keyspace = row['keyspace_name']
                 names = list()
 
-            names.append(ensure_str(row['name']))
+            names.append(str(row['name']))
 
         if keyspace is not None:
             self.print_keyspace_element_names(keyspace, names)
@@ -1564,7 +1544,7 @@
         if fname is not None:
             fname = self.cql_unprotect_value(fname)
 
-        copyoptnames = list(map(six.text_type.lower, parsed.get_binding('optnames', ())))
+        copyoptnames = list(map(str.lower, parsed.get_binding('optnames', ())))
         copyoptvals = list(map(self.cql_unprotect_value, parsed.get_binding('optvals', ())))
         opts = dict(list(zip(copyoptnames, copyoptvals)))
 
@@ -1640,10 +1620,8 @@
         except IOError as e:
             self.printerr('Could not open %r: %s' % (fname, e))
             return
-        username = self.auth_provider.username if self.auth_provider else None
-        password = self.auth_provider.password if self.auth_provider else None
         subshell = Shell(self.hostname, self.port, color=self.color,
-                         username=username, password=password,
+                         username=self.username,
                          encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
                          cqlver=self.cql_version, keyspace=self.current_keyspace,
                          tracing_enabled=self.tracing_enabled,
@@ -1656,7 +1634,8 @@
                          max_trace_wait=self.max_trace_wait, ssl=self.ssl,
                          request_timeout=self.session.default_timeout,
                          connect_timeout=self.conn.connect_timeout,
-                         is_subshell=True)
+                         is_subshell=True,
+                         auth_provider=self.auth_provider)
         # duplicate coverage related settings in subshell
         if self.coverage:
             subshell.coverage = True
@@ -1883,8 +1862,7 @@
 
         Clears the console.
         """
-        import subprocess
-        subprocess.call(['clear', 'cls'][is_win], shell=True)
+        subprocess.call('clear', shell=True)
     do_cls = do_clear
 
     def do_debug(self, parsed):
@@ -1986,11 +1964,10 @@
             out = self.query_out
 
         # convert Exceptions, etc to text
-        if not isinstance(text, six.text_type):
-            text = "{}".format(text)
+        if not isinstance(text, str):
+            text = str(text)
 
         to_write = self.applycolor(text, color) + ('\n' if newline else '')
-        to_write = ensure_str(to_write)
         out.write(to_write)
 
     def flush_output(self):
@@ -2068,7 +2045,7 @@
             binary_switch_value = True
         except (ValueError, TypeError):
             value = None
-        return (binary_switch_value, value)
+        return binary_switch_value, value
 
 
 def option_with_default(cparser_getter, section, option, default=None):
@@ -2095,7 +2072,6 @@
     if os.environ.get('TERM', '') in ('dumb', ''):
         return False
     try:
-        import subprocess
         p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
         stdout, _ = p.communicate()
         if int(stdout.strip()) < 8:
@@ -2108,15 +2084,27 @@
 
 
 def read_options(cmdlineargs, environment):
-    configs = configparser.SafeConfigParser() if sys.version_info < (3, 2) else configparser.ConfigParser()
+    configs = configparser.ConfigParser()
     configs.read(CONFIG_FILE)
 
     rawconfigs = configparser.RawConfigParser()
     rawconfigs.read(CONFIG_FILE)
 
+    username_from_cqlshrc = option_with_default(configs.get, 'authentication', 'username')
+    password_from_cqlshrc = option_with_default(rawconfigs.get, 'authentication', 'password')
+    if username_from_cqlshrc or password_from_cqlshrc:
+        if password_from_cqlshrc and not is_file_secure(os.path.expanduser(CONFIG_FILE)):
+            print("\nWarning: Password is found in an insecure cqlshrc file. The file is owned or readable by other users on the system.",
+                  end='', file=sys.stderr)
+        print("\nNotice: Credentials in the cqlshrc file is deprecated and will be ignored in the future."
+              "\nPlease use a credentials file to specify the username and password.\n", file=sys.stderr)
+
     optvalues = optparse.Values()
-    optvalues.username = option_with_default(configs.get, 'authentication', 'username')
-    optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
+
+    optvalues.username = None
+    optvalues.password = None
+    optvalues.credentials = os.path.expanduser(option_with_default(configs.get, 'authentication', 'credentials',
+                                                                   os.path.join(CQL_DIR, 'credentials')))
     optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
     optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
     optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
@@ -2153,8 +2141,46 @@
     optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
     optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
     optvalues.execute = None
+    optvalues.insecure_password_without_warning = False
 
     (options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
+
+    # Credentials from cqlshrc will be expanded,
+    # credentials from the command line are also expanded if there is a space...
+    # we need the following so that these two scenarios will work
+    #   cqlsh --credentials=~/.cassandra/creds
+    #   cqlsh --credentials ~/.cassandra/creds
+    options.credentials = os.path.expanduser(options.credentials)
+
+    if not is_file_secure(options.credentials):
+        print("\nWarning: Credentials file '{0}' exists but is not used, because:"
+              "\n  a. the file owner is not the current user; or"
+              "\n  b. the file is readable by group or other."
+              "\nPlease ensure the file is owned by the current user and is not readable by group or other."
+              "\nOn a Linux or UNIX-like system, you often can do this by using the `chown` and `chmod` commands:"
+              "\n  chown YOUR_USERNAME credentials"
+              "\n  chmod 600 credentials\n".format(options.credentials),
+              file=sys.stderr)
+        options.credentials = ''  # ConfigParser.read() will ignore unreadable files
+
+    if not options.username:
+        credentials = configparser.ConfigParser()
+        credentials.read(options.credentials)
+
+        # use the username from credentials file but fallback to cqlshrc if username is absent from the command line parameters
+        options.username = username_from_cqlshrc
+
+    if not options.password:
+        rawcredentials = configparser.RawConfigParser()
+        rawcredentials.read(options.credentials)
+
+        # handling password in the same way as username, priority cli > credentials > cqlshrc
+        options.password = option_with_default(rawcredentials.get, 'plain_text_auth', 'password', password_from_cqlshrc)
+        options.password = password_from_cqlshrc
+    elif not options.insecure_password_without_warning:
+        print("\nWarning: Using a password on the command line interface can be insecure."
+              "\nRecommendation: use the credentials file to securely provide the password.\n", file=sys.stderr)
+
     # Make sure some user values read from the command line are in unicode
     options.execute = maybe_ensure_text(options.execute)
     options.username = maybe_ensure_text(options.username)
@@ -2304,7 +2330,6 @@
                       port,
                       color=options.color,
                       username=options.username,
-                      password=options.password,
                       stdin=stdin,
                       tty=options.tty,
                       completekey=options.completekey,
@@ -2323,7 +2348,12 @@
                       single_statement=options.execute,
                       request_timeout=options.request_timeout,
                       connect_timeout=options.connect_timeout,
-                      encoding=options.encoding)
+                      encoding=options.encoding,
+                      auth_provider=authproviderhandling.load_auth_provider(
+                          config_file=CONFIG_FILE,
+                          cred_file=options.credentials,
+                          username=options.username,
+                          password=options.password))
     except KeyboardInterrupt:
         sys.exit('Connection aborted.')
     except CQL_ERRORS as e:
@@ -2350,7 +2380,7 @@
 
 
 # always call this regardless of module name: when a sub-process is spawned
-# on Windows then the module name is not __main__, see CASSANDRA-9304
+# on Windows then the module name is not __main__, see CASSANDRA-9304 (Windows support was dropped in CASSANDRA-16956)
 insert_driver_hooks()
 
 if __name__ == '__main__':
diff --git a/bin/debug-cql b/bin/debug-cql
index 9550ddf..d5866d3 100755
--- a/bin/debug-cql
+++ b/bin/debug-cql
@@ -36,14 +36,6 @@
     . "$CASSANDRA_CONF/cassandra-env.sh"
 fi
 
-# Special-case path variables.
-case "`uname`" in
-    CYGWIN*|MINGW*) 
-        CLASSPATH="`cygpath -p -w "$CLASSPATH"`"
-        CASSANDRA_CONF="`cygpath -p -w "$CASSANDRA_CONF"`"
-    ;;
-esac
-
 class="org.apache.cassandra.transport.Client"
 cassandra_parms="-Dlogback.configurationFile=logback-tools.xml"
 "$JAVA" $JVM_OPTS $cassandra_parms  -cp "$CLASSPATH" "$class" $@
diff --git a/build.xml b/build.xml
index f9dc014..e56c1f8 100644
--- a/build.xml
+++ b/build.xml
@@ -33,7 +33,7 @@
     <property name="debuglevel" value="source,lines,vars"/>
 
     <!-- default version and SCM information -->
-    <property name="base.version" value="4.0.11"/>
+    <property name="base.version" value="4.1.3"/>
     <property name="scm.connection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
     <property name="scm.developerConnection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
     <property name="scm.url" value="https://gitbox.apache.org/repos/asf?p=cassandra.git;a=tree"/>
@@ -63,8 +63,8 @@
     <property name="test.classlistfile" value="testlist.txt"/>
     <property name="test.classlistprefix" value="unit"/>
     <property name="benchmark.name" value=""/>
-    <property name="test.anttasks.src" value="${test.dir}/anttasks"/>
     <property name="test.methods" value=""/>
+    <property name="test.anttasks.src" value="${test.dir}/anttasks"/>
     <property name="test.unit.src" value="${test.dir}/unit"/>
     <property name="test.long.src" value="${test.dir}/long"/>
     <property name="test.burn.src" value="${test.dir}/burn"/>
@@ -72,8 +72,13 @@
     <property name="test.microbench.src" value="${test.dir}/microbench"/>
     <property name="test.distributed.src" value="${test.dir}/distributed"/>
     <property name="test.compression.algo" value="LZ4"/>
-    <property name="test.driver.connection_timeout_ms" value="5000"/>
-    <property name="test.driver.read_timeout_ms" value="12000"/>
+    <property name="test.simulator.src" value="${test.dir}/simulator/main"/>
+    <property name="test.simulator-asm.src" value="${test.dir}/simulator/asm"/>
+    <property name="test.simulator-bootstrap.src" value="${test.dir}/simulator/bootstrap"/>
+    <property name="test.simulator-test.src" value="${test.dir}/simulator/test"/>
+    <property name="test.driver.connection_timeout_ms" value="10000"/>
+    <property name="test.driver.read_timeout_ms" value="24000"/>
+    <property name="test.jvm.args" value="" />
     <property name="dist.dir" value="${build.dir}/dist"/>
     <property name="tmp.dir" value="${java.io.tmpdir}"/>
 
@@ -105,17 +110,18 @@
     <property name="maven-repository-url" value="https://repository.apache.org/content/repositories/snapshots"/>
     <property name="maven-repository-id" value="apache.snapshots.https"/>
 
-    <property name="test.timeout" value="240000" />
+    <property name="test.timeout" value="480000" />
     <property name="test.memory.timeout" value="480000" />
     <property name="test.long.timeout" value="600000" />
     <property name="test.burn.timeout" value="60000000" />
     <property name="test.distributed.timeout" value="900000" />
+    <property name="test.simulation.timeout" value="1800000" />
 
     <!-- default for cql tests. Can be overridden by -Dcassandra.test.use_prepared=false -->
     <property name="cassandra.test.use_prepared" value="true" />
 
     <!-- The number of active processors seen by JVM -->
-    <property name="cassandra.test.processorCount" value="4"/>
+    <property name="cassandra.test.processorCount" value="2"/>
 
     <!-- skip flushing schema tables during tests -->
     <property name="cassandra.test.flush_local_schema_changes" value="false" />
@@ -134,7 +140,7 @@
     <property name="jamm.version" value="0.3.2"/>
     <property name="ecj.version" value="4.6.1"/>
     <property name="ohc.version" value="0.5.1"/>
-    <property name="asm.version" value="7.1"/>
+    <property name="asm.version" value="9.1"/>
     <property name="allocation-instrumenter.version" value="3.1.0"/>
     <property name="bytebuddy.version" value="1.10.10"/>
     <property name="jflex.version" value="1.8.2"/>
@@ -146,6 +152,8 @@
     <property name="chronicle-wire.version" value="2.20.117" />
     <property name="chronicle-threads.version" value="2.20.111" />
 
+    <property name="dtest-api.version" value="0.0.13" />
+
     <condition property="maven-ant-tasks.jar.exists">
       <available file="${build.dir}/maven-ant-tasks-${maven-ant-tasks.version}.jar" />
     </condition>
@@ -162,19 +170,22 @@
         <os family="windows" />
     </condition>
 
-    <!-- Check if all tests are being run or just one. If it's all tests don't spam the console with test output.
+    <!-- Check if all tests are being run or just one (check testclasslist target). If it's all tests don't spam the
+         console with test output.
          If it's an individual test print the output from the test under the assumption someone is debugging the test
          and wants to know what is going on without having to context switch to the log file that is generated.
          Debug level output still needs to be retrieved from the log file.  -->
-    <script language="javascript">
-        if (project.getProperty("cassandra.keepBriefBrief") == null)
-        {
-            if (project.getProperty("test.name").equals("*Test"))
-                project.setProperty("cassandra.keepBriefBrief", "true");
-            else
-                project.setProperty("cassandra.keepBriefBrief", "false");
-        }
-    </script>
+    <macrodef name="set-keepbrief-property">
+        <attribute name="test-name" />
+        <sequential>
+            <condition property="cassandra.keepBriefBrief" value="false" else="true">
+                <not>
+                    <equals arg1="@{test-name}" arg2="*Test"/>
+                </not>
+            </condition>
+        </sequential>
+    </macrodef>
+    <set-keepbrief-property test-name="${test.name}" />
 
     <condition property="java.version.8">
         <equals arg1="${ant.java.version}" arg2="1.8"/>
@@ -250,7 +261,7 @@
         <string>-Dio.netty.tryReflectionSetAccessible=true</string>
     </resources>
     <pathconvert property="_jvm11_test_arg_items_concat" refid="_jvm11_test_arg_items" pathsep=" "/>
-    <condition property="test-jvmargs" value="${_jvm11_test_arg_items_concat}" else="${_jvm8_test_arg_items_concat}">
+    <condition property="_std-test-jvmargs" value="${_jvm11_test_arg_items_concat}" else="${_jvm8_test_arg_items_concat}">
         <not>
             <equals arg1="${ant.java.version}" arg2="1.8"/>
         </not>
@@ -397,7 +408,7 @@
 
     <target name="realclean" depends="clean" description="Remove the entire build directory and all downloaded artifacts">
         <delete>
-          <fileset dir="${build.lib}" excludes="cassandra-driver-internal-only-*"/>
+          <fileset dir="${build.lib}" excludes="cassandra-driver-internal-only-*,puresasl-internal-only-*"/>
         </delete>
         <delete dir="${build.dir}" />
         <delete dir="${doc.dir}/build" />
@@ -509,7 +520,7 @@
         <license name="The Apache Software License, Version 2.0" url="https://www.apache.org/licenses/LICENSE-2.0.txt"/>
         <scm connection="${scm.connection}" developerConnection="${scm.developerConnection}" url="${scm.url}"/>
         <dependencyManagement>
-          <dependency groupId="org.xerial.snappy" artifactId="snappy-java" version="1.1.2.6"/>
+          <dependency groupId="org.xerial.snappy" artifactId="snappy-java" version="1.1.8.4"/>
           <dependency groupId="org.lz4" artifactId="lz4-java" version="1.8.0"/>
           <dependency groupId="com.ning" artifactId="compress-lzf" version="0.8.4" scope="provided"/>
           <dependency groupId="com.github.luben" artifactId="zstd-jni" version="1.5.5-1"/>
@@ -521,10 +532,11 @@
             <exclusion groupId="org.checkerframework" artifactId="checker-qual" />
             <exclusion groupId="com.google.errorprone" artifactId="error_prone_annotations" />
           </dependency>
+          <dependency groupId="com.google.jimfs" artifactId="jimfs" version="1.1"/>
           <dependency groupId="org.hdrhistogram" artifactId="HdrHistogram" version="2.1.9"/>
           <dependency groupId="commons-cli" artifactId="commons-cli" version="1.1"/>
           <dependency groupId="commons-codec" artifactId="commons-codec" version="1.9"/>
-          <dependency groupId="commons-io" artifactId="commons-io" version="2.6" scope="test"/>
+          <dependency groupId="commons-io" artifactId="commons-io" version="2.6"/>
           <dependency groupId="org.apache.commons" artifactId="commons-lang3" version="3.11"/>
           <dependency groupId="org.apache.commons" artifactId="commons-math3" version="3.2"/>
           <dependency groupId="org.antlr" artifactId="antlr" version="3.5.2" scope="provided">
@@ -542,6 +554,10 @@
           <dependency groupId="com.fasterxml.jackson.core" artifactId="jackson-core" version="2.13.2"/>
           <dependency groupId="com.fasterxml.jackson.core" artifactId="jackson-databind" version="2.13.2.2"/>
           <dependency groupId="com.fasterxml.jackson.core" artifactId="jackson-annotations" version="2.13.2"/>
+          <dependency groupId="com.fasterxml.jackson.datatype" artifactId="jackson-datatype-jsr310" version="2.13.2"/>
+          <dependency groupId="com.fasterxml.jackson.dataformat" artifactId="jackson-dataformat-yaml" version="2.13.2"  scope="test">
+            <exclusion groupId="org.yaml" artifactId="snakeyaml"/>
+          </dependency>
           <dependency groupId="com.googlecode.json-simple" artifactId="json-simple" version="1.1"/>
           <dependency groupId="com.boundary" artifactId="high-scale-lib" version="1.0.6"/>
           <dependency groupId="com.github.jbellis" artifactId="jamm" version="${jamm.version}"/>
@@ -554,11 +570,14 @@
           <dependency groupId="com.google.code.java-allocation-instrumenter" artifactId="java-allocation-instrumenter" version="${allocation-instrumenter.version}" scope="test">
             <exclusion groupId="com.google.guava" artifactId="guava"/>
           </dependency>
-          <dependency groupId="org.apache.cassandra" artifactId="dtest-api" version="0.0.13" scope="test"/>
+          <dependency groupId="org.apache.cassandra" artifactId="harry-core" version="0.0.1" scope="test"/>
           <dependency groupId="org.reflections" artifactId="reflections" version="0.10.2" scope="test"/>
+          <dependency groupId="org.apache.cassandra" artifactId="dtest-api" version="${dtest-api.version}" scope="test"/>
+          <dependency groupId="com.puppycrawl.tools" artifactId="checkstyle" version="8.40" scope="test"/>
           <dependency groupId="org.apache.hadoop" artifactId="hadoop-core" version="1.0.3" scope="provided">
             <exclusion groupId="org.mortbay.jetty" artifactId="servlet-api"/>
             <exclusion groupId="commons-logging" artifactId="commons-logging"/>
+            <exclusion groupId="commons-lang" artifactId="commons-lang"/>
             <exclusion groupId="org.eclipse.jdt" artifactId="core"/>
             <exclusion groupId="ant" artifactId="ant"/>
             <exclusion groupId="junit" artifactId="junit"/>
@@ -567,13 +586,15 @@
           </dependency>
           <dependency groupId="org.apache.hadoop" artifactId="hadoop-minicluster" version="1.0.3" scope="provided">
             <exclusion groupId="asm" artifactId="asm"/> <!-- this is the outdated version 3.1 -->
-            <exclusion groupId="org.slf4j" artifactId="slf4j-api"/>
             <exclusion groupId="org.codehaus.jackson" artifactId="jackson-mapper-asl"/>
+            <exclusion groupId="org.slf4j" artifactId="slf4j-api"/>
           </dependency>
-          <dependency groupId="net.java.dev.jna" artifactId="jna" version="5.6.0"/>
+          <dependency groupId="net.java.dev.jna" artifactId="jna" version="5.9.0"/>
 
           <dependency groupId="org.jacoco" artifactId="org.jacoco.agent" version="${jacoco.version}" scope="test"/>
-          <dependency groupId="org.jacoco" artifactId="org.jacoco.ant" version="${jacoco.version}" scope="test"/>
+          <dependency groupId="org.jacoco" artifactId="org.jacoco.ant" version="${jacoco.version}" scope="test">
+            <exclusion groupId="org.ow2.asm" artifactId="asm"/>
+          </dependency>
 
           <dependency groupId="org.jboss.byteman" artifactId="byteman-install" version="${byteman.version}" scope="provided"/>
           <dependency groupId="org.jboss.byteman" artifactId="byteman" version="${byteman.version}" scope="provided"/>
@@ -621,7 +642,7 @@
             <exclusion groupId="net.java.dev.jna" artifactId="jna" />
             <exclusion groupId="net.java.dev.jna" artifactId="jna-platform" />
           </dependency>
-          <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2" scope="provided"/>
+          <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2"/>
           <dependency groupId="com.clearspring.analytics" artifactId="stream" version="2.5.2">
             <exclusion groupId="it.unimi.dsi" artifactId="fastutil" />
           </dependency>
@@ -649,11 +670,12 @@
           </dependency>
           <dependency groupId="com.github.rholder" artifactId="snowball-stemmer" version="1.3.0.581.1" />
           <dependency groupId="com.googlecode.concurrent-trees" artifactId="concurrent-trees" version="2.4.0" />
-          <dependency groupId="com.github.ben-manes.caffeine" artifactId="caffeine" version="2.5.6" />
+          <dependency groupId="com.github.ben-manes.caffeine" artifactId="caffeine" version="2.9.2" />
           <dependency groupId="org.jctools" artifactId="jctools-core" version="3.1.0"/>
           <dependency groupId="org.ow2.asm" artifactId="asm" version="${asm.version}"/>
           <dependency groupId="org.ow2.asm" artifactId="asm-tree" version="${asm.version}" scope="test"/>
           <dependency groupId="org.ow2.asm" artifactId="asm-commons" version="${asm.version}" scope="test"/>
+          <dependency groupId="org.ow2.asm" artifactId="asm-util" version="${asm.version}" scope="test"/>
           <dependency groupId="org.gridkit.jvmtool" artifactId="sjk-cli" version="0.14"/>
           <dependency groupId="org.gridkit.jvmtool" artifactId="sjk-core" version="0.14">
             <exclusion groupId="org.gridkit.jvmtool" artifactId="sjk-hflame"/>
@@ -680,6 +702,7 @@
             <exclusion groupId="org.hamcrest" artifactId="hamcrest"/>
           </dependency>
           <dependency groupId="org.hamcrest" artifactId="hamcrest" version="2.2" scope="test"/>
+          <dependency groupId="com.github.seancfoley" artifactId="ipaddress" version="5.3.3" />
         </dependencyManagement>
         <developer id="adelapena" name="Andres de la Peña"/>
         <developer id="alakshman" name="Avinash Lakshman"/>
@@ -736,16 +759,24 @@
                 artifactId="cassandra-parent"
                 version="${version}"
                 relativePath="${final.name}-parent.pom"/>
-        <dependency groupId="junit" artifactId="junit"/>
-        <dependency groupId="commons-io" artifactId="commons-io"/>
-        <dependency groupId="org.mockito" artifactId="mockito-core"/>
-        <dependency groupId="org.quicktheories" artifactId="quicktheories"/>
-        <dependency groupId="org.reflections" artifactId="reflections"/>
-        <dependency groupId="com.google.code.java-allocation-instrumenter" artifactId="java-allocation-instrumenter" version="${allocation-instrumenter.version}"/>
-        <dependency groupId="org.apache.cassandra" artifactId="dtest-api"/>
-        <dependency groupId="org.openjdk.jmh" artifactId="jmh-core"/>
-        <dependency groupId="org.openjdk.jmh" artifactId="jmh-generator-annprocess"/>
-        <dependency groupId="org.apache.ant" artifactId="ant-junit"/>
+        <dependency groupId="junit" artifactId="junit" scope="test"/>
+        <dependency groupId="commons-io" artifactId="commons-io" scope="test"/>
+        <dependency groupId="org.mockito" artifactId="mockito-core" scope="test"/>
+        <dependency groupId="org.ow2.asm" artifactId="asm" version="${asm.version}"/>
+        <dependency groupId="org.ow2.asm" artifactId="asm-tree" version="${asm.version}" scope="test"/>
+        <dependency groupId="org.ow2.asm" artifactId="asm-commons" version="${asm.version}" scope="test"/>
+        <dependency groupId="org.ow2.asm" artifactId="asm-util" version="${asm.version}" scope="test"/>
+        <dependency groupId="com.google.jimfs" artifactId="jimfs" version="1.1" scope="test"/>
+        <dependency groupId="com.puppycrawl.tools" artifactId="checkstyle" scope="test"/>
+        <dependency groupId="org.quicktheories" artifactId="quicktheories" scope="test"/>
+        <dependency groupId="org.reflections" artifactId="reflections" scope="test"/>
+        <dependency groupId="com.google.code.java-allocation-instrumenter" artifactId="java-allocation-instrumenter" version="${allocation-instrumenter.version}" scope="test"/>
+        <dependency groupId="org.apache.cassandra" artifactId="dtest-api" scope="test"/>
+        <dependency groupId="org.openjdk.jmh" artifactId="jmh-core" scope="test"/>
+        <dependency groupId="org.openjdk.jmh" artifactId="jmh-generator-annprocess" scope="test"/>
+        <dependency groupId="net.ju-n.compile-command-annotations" artifactId="compile-command-annotations" scope="test"/>
+        <dependency groupId="org.apache.ant" artifactId="ant-junit" scope="test"/>
+        <dependency groupId="org.apache.cassandra" artifactId="harry-core"/>
         <!-- adding this dependency is necessary for assertj. When updating assertj, need to also update the version of
              this that the new assertj's `assertj-parent-pom` depends on. -->
         <dependency groupId="org.junit" artifactId="junit-bom" type="pom"/>
@@ -754,6 +785,8 @@
         <!-- coverage debs -->
         <dependency groupId="org.jacoco" artifactId="org.jacoco.agent"/>
         <dependency groupId="org.jacoco" artifactId="org.jacoco.ant"/>
+
+        <dependency groupId="com.fasterxml.jackson.dataformat" artifactId="jackson-dataformat-yaml"/>
       </artifact:pom>
 
       <!-- now the pom's for artifacts being deployed to Maven Central -->
@@ -783,6 +816,7 @@
         <dependency groupId="com.fasterxml.jackson.core" artifactId="jackson-core"/>
         <dependency groupId="com.fasterxml.jackson.core" artifactId="jackson-databind"/>
         <dependency groupId="com.fasterxml.jackson.core" artifactId="jackson-annotations"/>
+        <dependency groupId="com.fasterxml.jackson.datatype" artifactId="jackson-datatype-jsr310"/>
         <dependency groupId="com.googlecode.json-simple" artifactId="json-simple"/>
         <dependency groupId="com.boundary" artifactId="high-scale-lib"/>
         <dependency groupId="org.yaml" artifactId="snakeyaml"/>
@@ -851,6 +885,7 @@
         <dependency groupId="org.jboss.byteman" artifactId="byteman"/>
         <dependency groupId="org.jboss.byteman" artifactId="byteman-submit"/>
         <dependency groupId="org.jboss.byteman" artifactId="byteman-bmunit"/>
+        <dependency groupId="com.github.seancfoley" artifactId="ipaddress" />
       </artifact:pom>
     </target>
 
@@ -878,6 +913,7 @@
           <pathelement location="${test.conf}"/>
         </classpath>
         <jvmarg value="-Dstorage-config=${test.conf}"/>
+        <jvmarg value="-Dcassandra.reads.thresholds.coordinator.defensive_checks_enabled=true" /> <!-- enable defensive checks -->
         <jvmarg value="-javaagent:${build.lib}/jamm-${jamm.version}.jar" />
         <jvmarg value="-ea"/>
         <jvmarg line="${java11-jvmargs}"/>
@@ -887,7 +923,8 @@
     <!--
         The build target builds all the .class files
     -->
-    <target name="build" depends="resolver-retrieve-build,build-project" description="Compile Cassandra classes"/>
+    <target name="build" depends="resolver-retrieve-build,build-project,checkstyle" description="Compile Cassandra classes"/>
+    <target name="_build_unsafe" depends="resolver-retrieve-build,build-project" description="Compile Cassandra classes without checks"/>
     <target name="codecoverage" depends="jacoco-run,jacoco-report" description="Create code coverage report"/>
 
     <target name="_build_java">
@@ -1018,6 +1055,60 @@
         </testmacro>
     </target>
 
+    <!--
+        simulator asm build file
+        -->
+    <property name="simulator-asm.build.src" value="${test.simulator-asm.src}" />
+    <property name="simulator-asm.build.classes" value="${build.classes}/simulator-asm" />
+    <property name="simulator-asm.manifest" value="${simulator-asm.build.classes}/MANIFEST.MF" />
+
+    <property name="simulator-bootstrap.build.src" value="${test.simulator-bootstrap.src}" />
+    <property name="simulator-bootstrap.build.classes" value="${build.classes}/simulator-bootstrap" />
+    <property name="simulator-bootstrap.manifest" value="${simulator-bootstrap.build.classes}/MANIFEST.MF" />
+
+    <target name="simulator-asm-build" depends="_build_unsafe" description="build simulator-asm">
+        <antcall target="_simulator-asm_build"/>
+    </target>
+
+    <target name="simulator-bootstrap-build" depends="_build_unsafe" description="build simulator-bootstrap">
+        <antcall target="_simulator-bootstrap_build"/>
+    </target>
+
+    <target name="_simulator-asm_build">
+    	<mkdir dir="${simulator-asm.build.classes}" />
+        <javac compiler="modern" debug="true" debuglevel="${debuglevel}"
+               source="${source.version}" target="${target.version}"
+               encoding="utf-8" destdir="${simulator-asm.build.classes}" includeantruntime="true">
+            <src path="${simulator-asm.build.src}" />
+            <classpath>
+                <fileset dir="${test.lib}">
+                     <include name="**/asm-*${asm.version}.jar" />
+                </fileset>
+                <fileset dir="${build.lib}">
+                     <include name="**/asm-*${asm.version}.jar" />
+                </fileset>
+            </classpath>
+        </javac>
+    </target>
+
+    <target name="_simulator-bootstrap_build">
+    	<mkdir dir="${simulator-bootstrap.build.classes}" />
+        <javac compiler="modern" debug="true" debuglevel="${debuglevel}"
+               source="${source.version}" target="${target.version}"
+               encoding="utf-8" destdir="${simulator-bootstrap.build.classes}" includeantruntime="true">
+            <src path="${simulator-bootstrap.build.src}" />
+            <classpath>
+                <fileset dir="${test.lib}">
+                     <include name="**/asm-*${asm.version}.jar" />
+                </fileset>
+                <fileset dir="${build.lib}">
+                     <include name="**/asm-*${asm.version}.jar" />
+                </fileset>
+            </classpath>
+            <compilerarg value="-XDignore.symbol.file"/>
+        </javac>
+    </target>
+
 	<target name="_write-poms" depends="maven-declare-dependencies">
 	    <artifact:writepom pomRefId="parent-pom" file="${build.dir}/${final.name}-parent.pom"/>
 	    <artifact:writepom pomRefId="all-pom" file="${build.dir}/${final.name}.pom"/>
@@ -1054,29 +1145,76 @@
         </manifest>
       </jar>
     </target>
-    <target name="jar"
-            depends="_main-jar,build-test,stress-build,fqltool-build,write-poms"
+
+    <target name="stress-jar"
+            depends="stress-build"
             description="Assemble Cassandra JAR files">
-      <!-- Stress jar -->
-      <manifest file="${stress.manifest}">
-        <attribute name="Built-By" value="Pavel Yaskevich"/>
-        <attribute name="Main-Class" value="org.apache.cassandra.stress.Stress"/>
-      </manifest>
-      <mkdir dir="${stress.build.classes}/META-INF" />
-      <mkdir dir="${build.dir}/tools/lib/" />
-      <jar destfile="${build.dir}/tools/lib/stress.jar" manifest="${stress.manifest}">
-        <fileset dir="${stress.build.classes}"/>
-      </jar>
-      <!-- fqltool jar -->
-      <manifest file="${fqltool.manifest}">
-        <attribute name="Built-By" value="Marcus Eriksson"/>
-        <attribute name="Main-Class" value="org.apache.cassandra.fqltool.FullQueryLogTool"/>
-      </manifest>
-      <mkdir dir="${fqltool.build.classes}/META-INF" />
-      <mkdir dir="${build.dir}/tools/lib/" />
-      <jar destfile="${build.dir}/tools/lib/fqltool.jar" manifest="${fqltool.manifest}">
-        <fileset dir="${fqltool.build.classes}"/>
-      </jar>
+        <!-- Stress jar -->
+        <manifest file="${stress.manifest}">
+            <attribute name="Built-By" value="Pavel Yaskevich"/>
+            <attribute name="Main-Class" value="org.apache.cassandra.stress.Stress"/>
+        </manifest>
+        <mkdir dir="${stress.build.classes}/META-INF" />
+        <mkdir dir="${build.dir}/tools/lib/" />
+        <jar destfile="${build.dir}/tools/lib/stress.jar" manifest="${stress.manifest}">
+            <fileset dir="${stress.build.classes}"/>
+        </jar>
+    </target>
+
+    <target name="fqltool-jar"
+            depends="fqltool-build"
+            description="Assemble Cassandra JAR files">
+
+        <!-- fqltool jar -->
+        <manifest file="${fqltool.manifest}">
+            <attribute name="Built-By" value="Marcus Eriksson"/>
+            <attribute name="Main-Class" value="org.apache.cassandra.fqltool.FullQueryLogTool"/>
+        </manifest>
+        <mkdir dir="${fqltool.build.classes}/META-INF" />
+        <mkdir dir="${build.dir}/tools/lib/" />
+        <jar destfile="${build.dir}/tools/lib/fqltool.jar" manifest="${fqltool.manifest}">
+            <fileset dir="${fqltool.build.classes}"/>
+        </jar>
+    </target>
+
+    <target name="simulator-jars"
+            depends="simulator-asm-build,simulator-bootstrap-build"
+            description="Assemble Cassandra JAR files">
+
+        <!-- simulator asm jar -->
+        <manifest file="${simulator-asm.manifest}">
+            <attribute name="Built-By" value="Benedict Elliott Smith"/>
+            <attribute name="Premain-Class" value="org.apache.cassandra.simulator.asm.InterceptAgent"/>
+            <attribute name="Agent-Class" value="org.apache.cassandra.simulator.asm.InterceptAgent"/>
+            <attribute name="Can-Redefine-Classes" value="true"/>
+            <attribute name="Can-Retransform-Classes" value="true"/>
+        </manifest>
+        <mkdir dir="${simulator-asm.build.classes}/META-INF" />
+        <mkdir dir="${test.lib}/jars/" />
+        <jar destfile="${test.lib}/jars/simulator-asm.jar" manifest="${simulator-asm.manifest}">
+            <fileset dir="${simulator-asm.build.classes}"/>
+            <fileset dir="${test.lib}/jars">
+                <include name="**/asm-*${asm.version}.jar" />
+            </fileset>
+        </jar>
+
+        <!-- simulator bootstrap jar -->
+        <manifest file="${simulator-bootstrap.manifest}">
+            <attribute name="Built-By" value="Benedict Elliott Smith"/>
+        </manifest>
+        <mkdir dir="${simulator-bootstrap.build.classes}/META-INF" />
+        <mkdir dir="${test.lib}/jars/" />
+        <jar destfile="${test.lib}/jars/simulator-bootstrap.jar" manifest="${simulator-bootstrap.manifest}">
+            <fileset dir="${simulator-bootstrap.build.classes}"/>
+            <fileset dir="${test.lib}/jars">
+                <include name="**/asm-*${asm.version}.jar" />
+            </fileset>
+        </jar>
+    </target>
+
+    <target name="jar"
+            depends="_main-jar,build-test,stress-jar,fqltool-jar,simulator-jars,write-poms"
+            description="Assemble Cassandra JAR files">
     </target>
 
     <!--
@@ -1283,7 +1421,7 @@
     <antcall target="build-test" inheritRefs="true"/>
   </target>
 
-  <target name="build-test" depends="_main-jar,stress-build-test,fqltool-build,resolver-dist-lib"
+  <target name="build-test" depends="_main-jar,stress-build-test,fqltool-build,resolver-dist-lib,simulator-jars,checkstyle-test"
           description="Compile test classes">
     <antcall target="_build-test"/>
   </target>
@@ -1310,6 +1448,10 @@
      <src path="${test.memory.src}"/>
      <src path="${test.microbench.src}"/>
      <src path="${test.distributed.src}"/>
+     <src path="${test.simulator.src}"/>
+     <src path="${test.simulator-asm.src}"/>
+     <src path="${test.simulator-bootstrap.src}"/>
+     <src path="${test.simulator-test.src}"/>
     </javac>
 
     <checktestnameshelper/>
@@ -1336,7 +1478,8 @@
   <macrodef name="testhelper">
     <attribute name="testdelegate"/>
     <sequential>
-      <testhelper_ testdelegate="@{testdelegate}"/>
+        <taskdef name="testhelper_" classname="org.apache.cassandra.anttasks.TestHelper" classpath="${test.classes}"/>
+        <testhelper_ property="@{testdelegate}"/>
       <fail message="Some test(s) failed.">
         <condition>
             <and>
@@ -1353,21 +1496,6 @@
   <!-- Run a list of junit tasks but don't track errors or generate a report after
        If a test fails the testfailed property will be set. All the tests are run using the testdelegate
        macro that is specified as an attribute and they will be run sequentially in this ant process -->
-  <scriptdef name="testhelper_" language="javascript">
-    <attribute name="testdelegate"/>
-    <![CDATA[
-        sep = project.getProperty("path.separator");
-        all = project.getProperty("all-test-classes").split(sep);
-        var p = project.createTask('sequential');
-        for (i = 0; i < all.length; i++) {
-            if (all[i] == undefined) continue;
-            task = project.createTask( attributes.get("testdelegate") );
-            task.setDynamicAttribute( "test.file.list", "" + all[i]);
-            p.addTask(task);
-        }
-        p.perform();
-    ]]>
-  </scriptdef>
 
   <!-- Defines how to run a set of tests. If you change the defaults for attributes
        you should also update them in testmacro.,
@@ -1427,13 +1555,15 @@
         <jvmarg value="-Dcassandra.testtag=@{testtag}"/>
         <jvmarg value="-Dcassandra.keepBriefBrief=${cassandra.keepBriefBrief}" />
         <jvmarg value="-Dcassandra.strict.runtime.checks=true" />
+        <jvmarg value="-Dcassandra.reads.thresholds.coordinator.defensive_checks_enabled=true" /> <!-- enable defensive checks -->
         <jvmarg value="-Dcassandra.test.flush_local_schema_changes=${cassandra.test.flush_local_schema_changes}"/>
         <jvmarg value="-Dcassandra.test.messagingService.nonGracefulShutdown=${cassandra.test.messagingService.nonGracefulShutdown}"/>
         <jvmarg value="-Dcassandra.use_nix_recursive_delete=${cassandra.use_nix_recursive_delete}"/>
         <jvmarg line="${java11-jvmargs}"/>
         <!-- disable shrinks in quicktheories CASSANDRA-15554 -->
         <jvmarg value="-DQT_SHRINKS=0"/>
-        <jvmarg line="${test-jvmargs}" />
+        <jvmarg line="${_std-test-jvmargs}" />
+        <jvmarg line="${test.jvm.args}" />
         <optjvmargs/>
         <!-- Uncomment to debug unittest, attach debugger to port 1416 -->
         <!--
@@ -1443,11 +1573,13 @@
           <pathelement path="${java.class.path}"/>
           <pathelement location="${stress.build.classes}"/>
           <pathelement location="${fqltool.build.classes}"/>
-          <path refid="cassandra.classpath.test" />
           <pathelement location="${test.classes}"/>
+          <path refid="cassandra.classpath.test" />
           <pathelement location="${stress.test.classes}"/>
           <pathelement location="${fqltool.test.classes}"/>
           <pathelement location="${test.conf}"/>
+          <pathelement path="${java.class.path}"/>
+          <path refid="cassandra.classpath" />
           <fileset dir="${test.lib}">
             <include name="**/*.jar" />
               <exclude name="**/ant-*.jar"/>
@@ -1570,6 +1702,7 @@
   </target>
 
   <!-- Use this with an FQDN for test class, and an optional csv list of methods like this:
+    ant testsome -Dtest.name=org.apache.cassandra.service.StorageServiceServerTest
     ant testsome -Dtest.name=org.apache.cassandra.service.StorageServiceServerTest -Dtest.methods=testRegularMode,testGetAllRangesEmpty
   -->
   <target name="testsome" depends="maybe-build-test" description="Execute specific unit tests" >
@@ -1724,9 +1857,9 @@
   </target>
 
   <!-- Use this with an simple class name for test class, and an optional csv list of methods like this:
-      ant cql-test-some -Dtest.name=ListsTest
-      ant cql-test-some -Dtest.name=ListsTest -Dtest.methods=testPrecisionTime_getNext_simple
-    -->
+    ant cql-test-some -Dtest.name=ListsTest
+    ant cql-test-some -Dtest.name=ListsTest -Dtest.methods=testPrecisionTime_getNext_simple
+  -->
   <target name="cql-test-some" depends="maybe-build-test" description="Execute specific CQL tests" >
     <sequential>
       <echo message="running ${test.methods} tests from ${test.name}"/>
@@ -1903,17 +2036,18 @@
   </target>
 
   <target name="dtest-jar" depends="build-test, build" description="Create dtest-compatible jar, including all dependencies">
-      <jar jarfile="${build.dir}/dtest-${base.version}.jar">
-          <zipgroupfileset dir="${build.lib}" includes="*.jar" excludes="META-INF/*.SF"/>
-          <zipgroupfileset dir="${build.dir.lib}/jars" includes="javassist-*.jar,reflections-*.jar" excludes="META-INF/*.SF"/>
+      <jar jarfile="${build.dir}/dtest-${base.version}.jar" duplicate="preserve">
           <fileset dir="${build.classes.main}"/>
           <fileset dir="${test.classes}"/>
           <fileset dir="${test.conf}" />
+          <zipgroupfileset dir="${build.lib}" includes="*.jar" excludes="META-INF/*.SF"/>
+          <zipgroupfileset dir="${test.lib}/jars" includes="jimfs-1.1.jar,dtest-api-*.jar,asm-*.jar,javassist-*.jar,reflections-*.jar,semver4j-*.jar" excludes="META-INF/*.SF"/>
+          <zipgroupfileset dir="${build.dir.lib}/jars" includes="asm-*.jar" excludes="META-INF/*.SF"/>
       </jar>
   </target>
 
   <target name="test-jvm-dtest" depends="maybe-build-test" description="Execute in-jvm dtests">
-    <testmacro inputdir="${test.distributed.src}" timeout="${test.distributed.timeout}" forkmode="once" showoutput="true" filter="**/test/*Test.java">
+    <testmacro inputdir="${test.distributed.src}" timeout="${test.distributed.timeout}" forkmode="once" showoutput="true" filter="**/test/${test.name}.java">
       <jvmarg value="-Dlogback.configurationFile=test/conf/logback-dtest.xml"/>
       <jvmarg value="-Dcassandra.ring_delay_ms=10000"/>
       <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
@@ -1921,6 +2055,27 @@
     </testmacro>
   </target>
 
+  <target name="test-simulator-dtest" depends="maybe-build-test" description="Execute simulator dtests">
+    <testmacro inputdir="${test.simulator-test.src}" timeout="${test.simulation.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
+      <jvmarg value="-Dlogback.configurationFile=test/conf/logback-simulator.xml"/>
+      <jvmarg value="-Dcassandra.ring_delay_ms=10000"/>
+      <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
+      <jvmarg value="-Dcassandra.skip_sync=true" />
+      <jvmarg value="-Dcassandra.debugrefcount=false"/>
+      <jvmarg value="-Dcassandra.test.simulator.determinismcheck=strict"/>
+      <!-- Support Simulator Tests -->
+      <jvmarg line="-javaagent:${test.lib}/jars/simulator-asm.jar"/>
+      <jvmarg line="-Xbootclasspath/a:${test.lib}/jars/simulator-bootstrap.jar"/>
+      <jvmarg line="-XX:ActiveProcessorCount=4"/>
+      <jvmarg line="-XX:-TieredCompilation"/>
+      <jvmarg line="-XX:-BackgroundCompilation"/>
+      <jvmarg line="-XX:CICompilerCount=1"/>
+      <jvmarg line="-XX:Tier4CompileThreshold=1000"/>
+      <jvmarg line="-XX:ReservedCodeCacheSize=256M"/>
+      <jvmarg line="-Xmx8G"/>
+    </testmacro>
+  </target>
+
   <target name="test-jvm-upgrade-dtest" depends="maybe-build-test" description="Execute in-jvm dtests">
     <testmacro inputdir="${test.distributed.src}" timeout="${test.distributed.timeout}" forkmode="once" showoutput="true" filter="**/upgrade/*Test.java">
       <jvmarg value="-Dlogback.configurationFile=test/conf/logback-dtest.xml"/>
@@ -1931,6 +2086,7 @@
   </target>
 
   <!-- Use this with an FQDN for test class, and an optional csv list of methods like this:
+      ant test-jvm-dtest-some -Dtest.name=org.apache.cassandra.distributed.test.ResourceLeakTest
       ant test-jvm-dtest-some -Dtest.name=org.apache.cassandra.distributed.test.ResourceLeakTest -Dtest.methods=looperTest
     -->
   <target name="test-jvm-dtest-some" depends="maybe-build-test" description="Execute some in-jvm dtests">
@@ -1992,7 +2148,7 @@
               <pathelement location="${test.classes}"/>
               <pathelement location="${test.conf}"/>
               <fileset dir="${test.lib}">
-                  <include name="**/*.jar" />
+                  asm-<include name="**/*.jar" />
               </fileset>
           </classpath>
       </java>
@@ -2055,7 +2211,24 @@
   </natures>
 </projectDescription>]]>
     </echo>
-	<echo file=".classpath"><![CDATA[<?xml version="1.0" encoding="UTF-8"?>
+    <path id="eclipse-project-libs-path">
+        <fileset dir="lib">
+            <include name="**/*.jar" />
+        </fileset>
+        <fileset dir="build/lib/jars">
+            <include name="**/*.jar" />
+        </fileset>
+        <fileset dir="build/test/lib/jars">
+            <include name="**/*.jar" />
+        </fileset>
+    </path>
+    <pathconvert property="eclipse-libs-list" refid="eclipse-project-libs-path" pathsep="${line.separator}">
+        <mapper>
+            <regexpmapper from="^(.*)$$" to='&lt;classpathentry kind="lib" path="\1\" \/&gt;'/>
+        </mapper>
+    </pathconvert>
+    <property name="eclipse-project-libs" refid="eclipse-project-libs-path"/>
+    <echo file=".classpath"><![CDATA[<?xml version="1.0" encoding="UTF-8"?>
 <classpath>
   <classpathentry kind="src" path="src/java"/>
   <classpathentry kind="src" path="src/resources"/>
@@ -2064,6 +2237,8 @@
   <classpathentry kind="src" output="build/test/classes" path="test/unit"/>
   <classpathentry kind="src" output="build/test/classes" path="test/long"/>
   <classpathentry kind="src" output="build/test/classes" path="test/distributed"/>
+  <classpathentry kind="src" output="build/test/classes" path="test/simulator/asm"/>
+  <classpathentry kind="src" output="build/test/classes" path="test/simulator/main"/>
   <classpathentry kind="src" output="build/test/classes" path="test/resources" />
   <classpathentry kind="src" path="tools/stress/src"/>
   <classpathentry kind="src" path="tools/fqltool/src"/>
@@ -2073,51 +2248,16 @@
   <classpathentry kind="output" path="build/classes/eclipse"/>
   <classpathentry kind="lib" path="test/conf"/>
   <classpathentry kind="lib" path="${java.home}/../lib/tools.jar"/>
+  ${eclipse-libs-list}
+</classpath>
 ]]>
 	</echo>
-  	<path id="eclipse-project-libs-path">
-  	 <fileset dir="lib">
-  	    <include name="**/*.jar" />
-     </fileset>
- 	 <fileset dir="build/lib/jars">
-  	    <include name="**/*.jar" />
-  	 </fileset>
-     <fileset dir="build/test/lib/jars">
-        <include name="**/*.jar" />
-     </fileset>
-  	</path>
-  	<property name="eclipse-project-libs" refid="eclipse-project-libs-path"/>
-       <script language="javascript">
+    <taskdef name="echoeclipseprojectslibs" classname="org.apache.cassandra.anttasks.EchoEclipseProjectLibs" classpath="${test.classes}">
         <classpath>
             <path refid="cassandra.classpath"/>
             <path refid="cassandra.classpath.test"/>
         </classpath>
-        <![CDATA[
-        var File = java.io.File;
-  		var FilenameUtils = Packages.org.apache.commons.io.FilenameUtils;
-  		jars = project.getProperty("eclipse-project-libs").split(project.getProperty("path.separator"));
-
-  		cp = "";
-  	    for (i=0; i< jars.length; i++) {
-  	       srcjar = FilenameUtils.getBaseName(jars[i]) + '-sources.jar';
-           srcdir = FilenameUtils.concat(project.getProperty("build.test.dir"), 'sources');
-  		   srcfile = new File(FilenameUtils.concat(srcdir, srcjar));
-
-  		   cp += ' <classpathentry kind="lib" path="' + jars[i] + '"';
-  		   if (srcfile.exists()) {
-  		      cp += ' sourcepath="' + srcfile.getAbsolutePath() + '"';
-  		   }
-  		   cp += '/>\n';
-  		}
-
-  		cp += '</classpath>';
-
-  		echo = project.createTask("echo");
-  	    echo.setMessage(cp);
-  		echo.setFile(new File(".classpath"));
-  		echo.setAppend(true);
-  	    echo.perform();
-  	]]> </script>
+    </taskdef>
     <mkdir dir=".settings" />
   </target>
 
@@ -2167,6 +2307,47 @@
         </java>
   </target>
 
+  <target name="init-checkstyle" depends="maven-ant-tasks-retrieve-build,build-project" unless="no-checkstyle">
+      <path id="checkstyle.lib.path">
+          <fileset dir="${test.lib}/jars" includes="*.jar"/>
+      </path>
+      <!-- Sevntu custom checks are retrieved by Ivy into lib folder
+         and will be accessible to checkstyle-->
+      <taskdef resource="com/puppycrawl/tools/checkstyle/ant/checkstyle-ant-task.properties"
+               classpathref="checkstyle.lib.path"/>
+  </target>
+
+  <target name="checkstyle" depends="init-checkstyle,maven-ant-tasks-retrieve-build,build-project" description="Run custom checkstyle code analysis" if="java.version.8" unless="no-checkstyle">
+      <property name="checkstyle.log.dir" value="${build.dir}/checkstyle" />
+      <property name="checkstyle.report.file" value="${checkstyle.log.dir}/checkstyle_report.xml"/>
+      <mkdir  dir="${checkstyle.log.dir}" />
+
+      <property name="checkstyle.properties" value="${basedir}/checkstyle.xml" />
+      <property name="checkstyle.suppressions" value="${basedir}/checkstyle_suppressions.xml" />
+      <checkstyle config="${checkstyle.properties}"
+                  failureProperty="checkstyle.failure"
+                  failOnViolation="true">
+          <formatter type="plain"/>
+          <formatter type="xml" tofile="${checkstyle.report.file}"/>
+          <fileset dir="${build.src.java}" includes="**/*.java"/>
+      </checkstyle>
+  </target>
+
+  <target name="checkstyle-test" depends="init-checkstyle,maven-ant-tasks-retrieve-build,build-project" description="Run custom checkstyle code analysis on tests" if="java.version.8" unless="no-checkstyle">
+      <property name="checkstyle.log.dir" value="${build.dir}/checkstyle" />
+      <property name="checkstyle_test.report.file" value="${checkstyle.log.dir}/checkstyle_report_test.xml"/>
+      <mkdir  dir="${checkstyle.log.dir}" />
+
+      <property name="checkstyle_test.properties" value="${basedir}/checkstyle_test.xml" />
+      <property name="checkstyle.suppressions" value="${basedir}/checkstyle_suppressions.xml" />
+      <checkstyle config="${checkstyle_test.properties}"
+                  failureProperty="checkstyle.failure"
+                  failOnViolation="true">
+          <formatter type="plain"/>
+          <formatter type="xml" tofile="${checkstyle_test.report.file}"/>
+          <fileset dir="${test.dir}" includes="**/*.java"/>
+      </checkstyle>
+  </target>
 
   <!-- Installs artifacts to local Maven repository -->
   <target name="mvn-install"
diff --git a/checkstyle.xml b/checkstyle.xml
new file mode 100644
index 0000000..053cc73
--- /dev/null
+++ b/checkstyle.xml
@@ -0,0 +1,113 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!DOCTYPE module PUBLIC
+          "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
+          "https://checkstyle.org/dtds/configuration_1_3.dtd">
+
+<module name="Checker">
+  <property name="severity" value="error"/>
+
+  <property name="fileExtensions" value="java, properties, xml"/>
+
+  <module name="BeforeExecutionExclusionFileFilter">
+    <property name="fileNamePattern" value="module\-info\.java$"/>
+  </module>
+
+  <!-- https://checkstyle.org/config_filters.html#SuppressionFilter -->
+  <!-- this exists only because for some reason the comment filter does not seem to work for Semaphore -->
+  <module name="SuppressionFilter">
+    <property name="file" value="${checkstyle.suppressions}"
+              default="checkstyle-suppressions.xml" />
+    <property name="optional" value="false"/>
+  </module>
+
+  <module name="TreeWalker">
+    <module name="SuppressWithNearbyCommentFilter">
+       <property name="commentFormat" value="checkstyle: permit this import"/>
+       <property name="checkFormat" value="IllegalImport"/>
+       <property name="influenceFormat" value="0"/>
+    </module>
+ 
+    <module name="SuppressWithNearbyCommentFilter">
+       <property name="commentFormat" value="checkstyle: permit this instantiation"/>
+       <property name="checkFormat" value="IllegalInstantiation"/>
+       <property name="influenceFormat" value="0"/>
+    </module>
+ 
+    <module name="SuppressWithNearbyCommentFilter">
+       <property name="commentFormat" value="checkstyle: permit system clock"/>
+       <property name="idFormat" value="blockSystemClock"/>
+       <property name="influenceFormat" value="0"/>
+    </module>
+
+    <module name="SuppressWithNearbyCommentFilter">
+      <property name="commentFormat" value="checkstyle: permit this invocation"/>
+      <property name="idFormat" value="blockPathToFile"/>
+      <property name="influenceFormat" value="0"/>
+    </module>
+ 
+    <module name="RegexpSinglelineJava">
+      <!-- block system time -->
+      <property name="id" value="blockSystemClock"/>
+      <property name="format" value="System\.(currentTimeMillis|nanoTime)"/>
+      <property name="ignoreComments" value="true"/>
+      <property name="message" value="Avoid System for time, should use org.apache.cassandra.utils.Clock.Global or org.apache.cassandra.utils.Clock interface" />
+    </module>
+
+    <module name="RegexpSinglelineJava">
+      <!-- block Instant.now -->
+      <property name="id" value="blockInstantNow"/>
+      <property name="format" value="Instant\.now"/>
+      <property name="ignoreComments" value="true"/>
+      <property name="message" value="Avoid Instant.now() for time, should use org.apache.cassandra.util.FBUtilities.now()" />
+    </module>
+
+    <module name="RegexpSinglelineJava">
+      <!-- block normal executors -->
+      <property name="id" value="blockExecutors"/>
+      <property name="format" value="newSingleThreadExecutor|newFixedThreadPool|newCachedThreadPool|newSingleThreadScheduledExecutor|newWorkStealingPool|newScheduledThreadPool|defaultThreadFactory"/>
+      <property name="ignoreComments" value="true"/>
+      <property name="message" value="Avoid creating an executor directly, should use org.apache.cassandra.concurrent.ExecutorFactory.Global#executorFactory" />
+    </module>
+    <module name="RegexpSinglelineJava">
+      <!-- block guavas directExecutor -->
+      <property name="id" value="blockGuavaDirectExecutor"/>
+      <property name="format" value="MoreExecutors\.directExecutor"/>
+      <property name="ignoreComments" value="true"/>
+      <property name="message" value="Avoid MoreExecutors.directExecutor() in favor of ImmediateExecutor.INSTANCE" />
+    </module>
+    <module name="IllegalImport">
+      <property name="illegalPkgs" value="junit.framework"/>
+      <property name="illegalClasses" value="java.io.File,java.io.FileInputStream,java.io.FileOutputStream,java.io.FileReader,java.io.FileWriter,java.io.RandomAccessFile,java.util.concurrent.Semaphore,java.util.concurrent.CountDownLatch,java.util.concurrent.Executors,java.util.concurrent.LinkedBlockingQueue,java.util.concurrent.SynchronousQueue,java.util.concurrent.ArrayBlockingQueue,com.google.common.util.concurrent.Futures,java.util.concurrent.CompletableFuture,io.netty.util.concurrent.Future,io.netty.util.concurrent.Promise,io.netty.util.concurrent.AbstractFuture,com.google.common.util.concurrent.ListenableFutureTask,com.google.common.util.concurrent.ListenableFuture,com.google.common.util.concurrent.AbstractFuture,java.nio.file.Paths"/>
+    </module>
+    <module name="IllegalInstantiation">
+      <property name="classes" value="java.io.File,java.lang.Thread,java.util.concurrent.FutureTask,java.util.concurrent.Semaphore,java.util.concurrent.CountDownLatch,java.util.concurrent.ScheduledThreadPoolExecutor,java.util.concurrent.ThreadPoolExecutor,java.util.concurrent.ForkJoinPool,java.lang.OutOfMemoryError"/>
+    </module>
+
+    <module name="RegexpSinglelineJava">
+      <!-- block Path#toFile() -->
+      <property name="id" value="blockPathToFile"/>
+      <property name="format" value="toFile\(\)"/>
+      <property name="message" value="Avoid Path#toFile(), as some implementations may not support it." />
+    </module>
+
+    <module name="RedundantImport"/>
+    <module name="UnusedImports"/>
+  </module>
+
+</module>
diff --git a/checkstyle_suppressions.xml b/checkstyle_suppressions.xml
new file mode 100644
index 0000000..ed4d144
--- /dev/null
+++ b/checkstyle_suppressions.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!DOCTYPE suppressions PUBLIC
+        "-//Checkstyle//DTD SuppressionFilter Configuration 1.1//EN"
+        "https://checkstyle.org/dtds/suppressions_1_1.dtd">
+
+<suppressions>
+  <suppress checks="RegexpSinglelineJava" files="Semaphore\.java"/>
+</suppressions>
diff --git a/checkstyle_test.xml b/checkstyle_test.xml
new file mode 100644
index 0000000..d237827
--- /dev/null
+++ b/checkstyle_test.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!DOCTYPE module PUBLIC
+          "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
+          "https://checkstyle.org/dtds/configuration_1_3.dtd">
+
+<module name="Checker">
+  <property name="severity" value="error"/>
+
+  <property name="fileExtensions" value="java, properties, xml"/>
+
+  <module name="BeforeExecutionExclusionFileFilter">
+    <property name="fileNamePattern" value="module\-info\.java$"/>
+  </module>
+
+  <!-- https://checkstyle.org/config_filters.html#SuppressionFilter -->
+  <!-- this exists only because for some reason the comment filter does not seem to work for Semaphore -->
+  <module name="SuppressionFilter">
+    <property name="file" value="${checkstyle.suppressions}"
+              default="checkstyle-suppressions.xml" />
+    <property name="optional" value="false"/>
+  </module>
+
+  <module name="TreeWalker">
+    <module name="SuppressWithNearbyCommentFilter">
+       <property name="commentFormat" value="checkstyle: permit this import"/>
+       <property name="checkFormat" value="IllegalImport"/>
+       <property name="influenceFormat" value="0"/>
+    </module>
+
+    <module name="SuppressWithNearbyCommentFilter">
+       <property name="commentFormat" value="checkstyle: permit this instantiation"/>
+       <property name="checkFormat" value="IllegalInstantiation"/>
+       <property name="influenceFormat" value="0"/>
+    </module>
+
+    <module name="IllegalImport">
+      <property name="illegalPkgs" value="junit.framework"/>
+      <property name="illegalClasses" value=""/>
+    </module>
+    <module name="IllegalInstantiation">
+      <property name="classes" value=""/>
+    </module>
+
+    <module name="RedundantImport"/>
+    <module name="UnusedImports"/>
+  </module>
+
+</module>
diff --git a/conf/cassandra-topology.properties b/conf/cassandra-topology.properties.example
similarity index 100%
rename from conf/cassandra-topology.properties
rename to conf/cassandra-topology.properties.example
diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml
index 3e4da52..4b2711c 100644
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@ -61,14 +61,16 @@
 # this defines the maximum amount of time a dead host will have hints
 # generated.  After it has been dead this long, new hints for it will not be
 # created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
+# Min unit: ms
+max_hint_window: 3h
 
-# Maximum throttle in KBs per second, per delivery thread.  This will be
+# Maximum throttle in KiBs per second, per delivery thread.  This will be
 # reduced proportionally to the number of nodes in the cluster.  (If there
 # are two nodes in the cluster, each delivery thread will use the maximum
 # rate; if there are three, each will throttle to half of the maximum,
 # since we expect two nodes to be delivering hints simultaneously.)
-hinted_handoff_throttle_in_kb: 1024
+# Min unit: KiB
+hinted_handoff_throttle: 1024KiB
 
 # Number of threads with which to deliver hints;
 # Consider increasing this number when you have multi-dc deployments, since
@@ -81,10 +83,21 @@
 
 # How often hints should be flushed from the internal buffers to disk.
 # Will *not* trigger fsync.
-hints_flush_period_in_ms: 10000
+# Min unit: ms
+hints_flush_period: 10000ms
 
-# Maximum size for a single hints file, in megabytes.
-max_hints_file_size_in_mb: 128
+# Maximum size for a single hints file, in mebibytes.
+# Min unit: MiB
+max_hints_file_size: 128MiB
+
+# The file size limit to store hints for an unreachable host, in mebibytes.
+# Once the local hints files have reached the limit, no more new hints will be created.
+# Set a non-positive value will disable the size limit.
+# max_hints_size_per_host: 0MiB
+
+# Enable / disable automatic cleanup for the expired and orphaned hints file.
+# Disable the option in order to preserve those hints on the disk.
+auto_hints_cleanup_enabled: false
 
 # Compression to apply to the hint files. If omitted, hints files
 # will be written uncompressed. LZ4, Snappy, and Deflate compressors
@@ -94,9 +107,24 @@
 #     parameters:
 #         -
 
-# Maximum throttle in KBs per second, total. This will be
+# Enable / disable persistent hint windows.
+#
+# If set to false, a hint will be stored only in case a respective node
+# that hint is for is down less than or equal to max_hint_window.
+#
+# If set to true, a hint will be stored in case there is not any
+# hint which was stored earlier than max_hint_window. This is for cases
+# when a node keeps to restart and hints are not delivered yet, we would be saving
+# hints for that node indefinitely.
+#
+# Defaults to true.
+#
+# hint_window_persistent_enabled: true
+
+# Maximum throttle in KiBs per second, total. This will be
 # reduced proportionally to the number of nodes in the cluster.
-batchlog_replay_throttle_in_kb: 1024
+# Min unit: KiB
+batchlog_replay_throttle: 1024KiB
 
 # Authentication backend, implementing IAuthenticator; used to identify users
 # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
@@ -139,35 +167,68 @@
 #   increase system_auth keyspace replication factor if you use this authorizer.
 network_authorizer: AllowAllNetworkAuthorizer
 
+# Depending on the auth strategy of the cluster, it can be beneficial to iterate
+# from root to table (root -> ks -> table) instead of table to root (table -> ks -> root).
+# As the auth entries are whitelisting, once a permission is found you know it to be
+# valid. We default to false as the legacy behavior is to query at the table level then
+# move back up to the root. See CASSANDRA-17016 for details.
+# traverse_auth_from_root: false
+
 # Validity period for roles cache (fetching granted roles can be an expensive
 # operation depending on the role manager, CassandraRoleManager is one example)
 # Granted roles are cached for authenticated sessions in AuthenticatedUser and
 # after the period specified here, become eligible for (async) reload.
 # Defaults to 2000, set to 0 to disable caching entirely.
 # Will be disabled automatically for AllowAllAuthenticator.
-roles_validity_in_ms: 2000
+# For a long-running cache using roles_cache_active_update, consider
+# setting to something longer such as a daily validation: 86400000
+# Min unit: ms
+roles_validity: 2000ms
 
 # Refresh interval for roles cache (if enabled).
 # After this interval, cache entries become eligible for refresh. Upon next
 # access, an async reload is scheduled and the old value returned until it
-# completes. If roles_validity_in_ms is non-zero, then this must be
+# completes. If roles_validity is non-zero, then this must be
 # also.
-# Defaults to the same value as roles_validity_in_ms.
-# roles_update_interval_in_ms: 2000
+# This setting is also used to inform the interval of auto-updating if
+# using roles_cache_active_update.
+# Defaults to the same value as roles_validity.
+# For a long-running cache, consider setting this to 60000 (1 hour) etc.
+# Min unit: ms
+# roles_update_interval: 2000ms
+
+# If true, cache contents are actively updated by a background task at the
+# interval set by roles_update_interval. If false, cache entries
+# become eligible for refresh after their update interval. Upon next access,
+# an async reload is scheduled and the old value returned until it completes.
+# roles_cache_active_update: false
 
 # Validity period for permissions cache (fetching permissions can be an
 # expensive operation depending on the authorizer, CassandraAuthorizer is
 # one example). Defaults to 2000, set to 0 to disable.
 # Will be disabled automatically for AllowAllAuthorizer.
-permissions_validity_in_ms: 2000
+# For a long-running cache using permissions_cache_active_update, consider
+# setting to something longer such as a daily validation: 86400000ms
+# Min unit: ms
+permissions_validity: 2000ms
 
 # Refresh interval for permissions cache (if enabled).
 # After this interval, cache entries become eligible for refresh. Upon next
 # access, an async reload is scheduled and the old value returned until it
-# completes. If permissions_validity_in_ms is non-zero, then this must be
+# completes. If permissions_validity is non-zero, then this must be
 # also.
-# Defaults to the same value as permissions_validity_in_ms.
-# permissions_update_interval_in_ms: 2000
+# This setting is also used to inform the interval of auto-updating if
+# using permissions_cache_active_update.
+# Defaults to the same value as permissions_validity.
+# For a longer-running permissions cache, consider setting to update hourly (60000)
+# Min unit: ms
+# permissions_update_interval: 2000ms
+
+# If true, cache contents are actively updated by a background task at the
+# interval set by permissions_update_interval. If false, cache entries
+# become eligible for refresh after their update interval. Upon next access,
+# an async reload is scheduled and the old value returned until it completes.
+# permissions_cache_active_update: false
 
 # Validity period for credentials cache. This cache is tightly coupled to
 # the provided PasswordAuthenticator implementation of IAuthenticator. If
@@ -178,15 +239,28 @@
 # underlying table, it may not  bring a significant reduction in the
 # latency of individual authentication attempts.
 # Defaults to 2000, set to 0 to disable credentials caching.
-credentials_validity_in_ms: 2000
+# For a long-running cache using credentials_cache_active_update, consider
+# setting to something longer such as a daily validation: 86400000
+# Min unit: ms
+credentials_validity: 2000ms
 
 # Refresh interval for credentials cache (if enabled).
 # After this interval, cache entries become eligible for refresh. Upon next
 # access, an async reload is scheduled and the old value returned until it
-# completes. If credentials_validity_in_ms is non-zero, then this must be
+# completes. If credentials_validity is non-zero, then this must be
 # also.
-# Defaults to the same value as credentials_validity_in_ms.
-# credentials_update_interval_in_ms: 2000
+# This setting is also used to inform the interval of auto-updating if
+# using credentials_cache_active_update.
+# Defaults to the same value as credentials_validity.
+# For a longer-running permissions cache, consider setting to update hourly (60000)
+# Min unit: ms
+# credentials_update_interval: 2000ms
+
+# If true, cache contents are actively updated by a background task at the
+# interval set by credentials_update_interval. If false (default), cache entries
+# become eligible for refresh after their update interval. Upon next access,
+# an async reload is scheduled and the old value returned until it completes.
+# credentials_cache_active_update: false
 
 # The partitioner is responsible for distributing groups of rows (by
 # partition key) across nodes in the cluster. The partitioner can NOT be
@@ -286,8 +360,9 @@
 # fit in the cache. In most cases it is not neccessary to change this value.
 # Constantly re-preparing statements is a performance penalty.
 #
-# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
-prepared_statements_cache_size_mb:
+# Default value ("auto") is 1/256th of the heap or 10MiB, whichever is greater
+# Min unit: MiB
+prepared_statements_cache_size:
 
 # Maximum size of the key cache in memory.
 #
@@ -300,8 +375,9 @@
 #
 # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
 #
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
+# Default value is empty to make it "auto" (min(5% of Heap (in MiB), 100MiB)). Set to 0 to disable key cache.
+# Min unit: MiB
+key_cache_size:
 
 # Duration in seconds after which Cassandra should
 # save the key cache. Caches are saved to saved_caches_directory as
@@ -312,7 +388,8 @@
 # has limited use.
 #
 # Default is 14400 or 4 hours.
-key_cache_save_period: 14400
+# Min unit: s
+key_cache_save_period: 4h
 
 # Number of keys from the key cache to save
 # Disabled by default, meaning all keys are going to be saved
@@ -336,7 +413,8 @@
 # headroom for OS block level cache. Do never allow your system to swap.
 #
 # Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
+# Min unit: MiB
+row_cache_size: 0MiB
 
 # Duration in seconds after which Cassandra should save the row cache.
 # Caches are saved to saved_caches_directory as specified in this configuration file.
@@ -346,7 +424,8 @@
 # has limited use.
 #
 # Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
+# Min unit: s
+row_cache_save_period: 0s
 
 # Number of keys from the row cache to save.
 # Specify 0 (which is the default), meaning all keys are going to be saved
@@ -363,16 +442,18 @@
 #
 # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
 #
-# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
+# Default value is empty to make it "auto" (min(2.5% of Heap (in MiB), 50MiB)). Set to 0 to disable counter cache.
 # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
-counter_cache_size_in_mb:
+# Min unit: MiB
+counter_cache_size:
 
 # Duration in seconds after which Cassandra should
 # save the counter cache (keys only). Caches are saved to saved_caches_directory as
 # specified in this configuration file.
 #
 # Default is 7200 or 2 hours.
-counter_cache_save_period: 7200
+# Min unit: s
+counter_cache_save_period: 7200s
 
 # Number of keys from the counter cache to save
 # Disabled by default, meaning all keys are going to be saved
@@ -383,9 +464,10 @@
 # saved_caches_directory: /var/lib/cassandra/saved_caches
 
 # Number of seconds the server will wait for each cache (row, key, etc ...) to load while starting
-# the Cassandra process. Setting this to a negative value is equivalent to disabling all cache loading on startup
+# the Cassandra process. Setting this to zero is equivalent to disabling all cache loading on startup
 # while still having the cache during runtime.
-# cache_load_timeout_seconds: 30
+# Min unit: s
+# cache_load_timeout: 30s
 
 # commitlog_sync may be either "periodic", "group", or "batch." 
 # 
@@ -398,19 +480,22 @@
 #
 # group mode is similar to batch mode, where Cassandra will not ack writes
 # until the commit log has been flushed to disk. The difference is group
-# mode will wait up to commitlog_sync_group_window_in_ms between flushes.
+# mode will wait up to commitlog_sync_group_window between flushes.
 #
-# commitlog_sync_group_window_in_ms: 1000
+# Min unit: ms
+# commitlog_sync_group_window: 1000ms
 #
 # the default option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# and the CommitLog is simply synced every commitlog_sync_period
 # milliseconds.
 commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
+# Min unit: ms
+commitlog_sync_period: 10000ms
 
 # When in periodic commitlog mode, the number of milliseconds to block writes
 # while waiting for a slow disk flush to complete.
-# periodic_commitlog_sync_lag_block_in_ms: 
+# Min unit: ms
+# periodic_commitlog_sync_lag_block:
 
 # The size of the individual commitlog file segments.  A commitlog
 # segment may be archived, deleted, or recycled once all the data
@@ -421,14 +506,15 @@
 # archiving commitlog segments (see commitlog_archiving.properties),
 # then you probably want a finer granularity of archiving; 8 or 16 MB
 # is reasonable.
-# Max mutation size is also configurable via max_mutation_size_in_kb setting in
-# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
+# Max mutation size is also configurable via max_mutation_size setting in
+# cassandra.yaml. The default is half the size commitlog_segment_size in bytes.
 # This should be positive and less than 2048.
 #
-# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
-# be set to at least twice the size of max_mutation_size_in_kb / 1024
+# NOTE: If max_mutation_size is set explicitly then commitlog_segment_size must
+# be set to at least twice the size of max_mutation_size
 #
-commitlog_segment_size_in_mb: 32
+# Min unit: MiB
+commitlog_segment_size: 32MiB
 
 # Compression to apply to the commit log. If omitted, the commit log
 # will be written uncompressed.  LZ4, Snappy, and Deflate compressors
@@ -456,15 +542,15 @@
 # any class that implements the SeedProvider interface and has a
 # constructor that takes a Map<String, String> of parameters will do.
 seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-          - seeds: "127.0.0.1:7000"
+  # Addresses of hosts that are deemed contact points.
+  # Cassandra nodes use this list of hosts to find each other and learn
+  # the topology of the ring.  You must change this if you are running
+  # multiple nodes!
+  - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+    parameters:
+      # seeds is actually a comma-delimited list of addresses.
+      # Ex: "<ip1>,<ip2>,<ip3>"
+      - seeds: "127.0.0.1:7000"
 
 # For workloads with more data than can fit in memory, Cassandra's
 # bottleneck will be reads that need to fetch data from
@@ -492,7 +578,8 @@
 # overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
 # if the default 64k chunk size is used).
 # Memory is only allocated when needed.
-# networking_cache_size_in_mb: 128
+# Min unit: MiB
+# networking_cache_size: 128MiB
 
 # Enable the sstable chunk cache.  The chunk cache will store recently accessed
 # sections of the sstable in-memory as uncompressed buffers.
@@ -506,11 +593,12 @@
 # overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
 # if the default 64k chunk size is used).
 # Memory is only allocated when needed.
-# file_cache_size_in_mb: 512
+# Min unit: MiB
+# file_cache_size: 512MiB
 
 # Flag indicating whether to allocate on or off heap when the sstable buffer
 # pool is exhausted, that is when it has exceeded the maximum memory
-# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
+# file_cache_size, beyond which it will not cache buffers but allocate on request.
 
 # buffer_pool_use_heap_if_exhausted: true
 
@@ -524,8 +612,10 @@
 # accepting writes when the limit is exceeded until a flush completes,
 # and will trigger a flush based on memtable_cleanup_threshold
 # If omitted, Cassandra will set both to 1/4 the size of the heap.
-# memtable_heap_space_in_mb: 2048
-# memtable_offheap_space_in_mb: 2048
+# Min unit: MiB
+# memtable_heap_space: 2048MiB
+# Min unit: MiB
+# memtable_offheap_space: 2048MiB
 
 # memtable_cleanup_threshold is deprecated. The default calculation
 # is the only reasonable choice. See the comments on  memtable_flush_writers
@@ -557,12 +647,13 @@
 # is 1/16th of the available heap. The main tradeoff is that smaller trees
 # have less resolution, which can lead to over-streaming data. If you see heap
 # pressure during repairs, consider lowering this, but you cannot go below
-# one megabyte. If you see lots of over-streaming, consider raising
+# one mebibyte. If you see lots of over-streaming, consider raising
 # this or using subrange repair.
 #
 # For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.
 #
-# repair_session_space_in_mb:
+# Min unit: MiB
+# repair_session_space:
 
 # Total space to use for commit logs on disk.
 #
@@ -573,7 +664,7 @@
 # The default value is the smaller of 8192, and 1/4 of the total space
 # of the commitlog volume.
 #
-# commitlog_total_space_in_mb: 8192
+# commitlog_total_space: 8192MiB
 
 # This sets the number of memtable flush writer threads per disk
 # as well as the total number of memtables that can be flushed concurrently.
@@ -602,7 +693,7 @@
 # and flush size and frequency. More is not better you just need enough flush writers
 # to never stall waiting for flushing to free memory.
 #
-#memtable_flush_writers: 2
+# memtable_flush_writers: 2
 
 # Total space to use for change-data-capture logs on disk.
 #
@@ -610,14 +701,16 @@
 # on Mutations including tables with CDC enabled. A CDCCompactor is responsible
 # for parsing the raw CDC logs and deleting them when parsing is completed.
 #
-# The default value is the min of 4096 mb and 1/8th of the total space
+# The default value is the min of 4096 MiB and 1/8th of the total space
 # of the drive where cdc_raw_directory resides.
-# cdc_total_space_in_mb: 4096
+# Min unit: MiB
+# cdc_total_space: 4096MiB
 
 # When we hit our cdc_raw limit and the CDCCompactor is either running behind
 # or experiencing backpressure, we check at the following interval to see if any
 # new space for cdc-tracked tables has been made available. Default to 250ms
-# cdc_free_space_check_interval_ms: 250
+# Min unit: ms
+# cdc_free_space_check_interval: 250ms
 
 # A fixed memory pool size in MB for for SSTable index summaries. If left
 # empty, this will default to 5% of the heap size. If the memory usage of
@@ -625,13 +718,15 @@
 # shrink their index summaries in order to meet this limit.  However, this
 # is a best-effort process. In extreme conditions Cassandra may need to use
 # more than this amount of memory.
-index_summary_capacity_in_mb:
+# Min unit: KiB
+index_summary_capacity:
 
 # How frequently index summaries should be resampled.  This is done
 # periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates.  Setting to -1 will disable this
+# proportional their recent read rates.  Setting to null value will disable this
 # process, leaving existing index summaries at their current sampling level.
-index_summary_resize_interval_in_minutes: 60
+# Min unit: m
+index_summary_resize_interval: 60m
 
 # Whether to, when doing sequential writing, fsync() at intervals in
 # order to force the operating system to flush the dirty
@@ -639,7 +734,8 @@
 # impacting read latencies. Almost always a good idea on SSDs; not
 # necessarily on platters.
 trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
+# Min unit: KiB
+trickle_fsync_interval: 10240KiB
 
 # TCP port, for commands and data
 # For security reasons, you should not expose this port to the internet.  Firewall it if needed.
@@ -711,9 +807,10 @@
 # native_transport_max_threads: 128
 #
 # The maximum size of allowed frame. Frame (requests) larger than this will
-# be rejected as invalid. The default is 256MB. If you're changing this parameter,
-# you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.
-# native_transport_max_frame_size_in_mb: 256
+# be rejected as invalid. The default is 16MiB. If you're changing this parameter,
+# you may want to adjust max_value_size accordingly. This should be positive and less than 2048.
+# Min unit: MiB
+# native_transport_max_frame_size: 16MiB
 
 # The maximum number of concurrent client connections.
 # The default is -1, which means unlimited.
@@ -735,7 +832,18 @@
 # values for heartbeat intervals have to be set on the client side.
 #
 # Idle connection timeouts are disabled by default.
-# native_transport_idle_timeout_in_ms: 60000
+# Min unit: ms
+# native_transport_idle_timeout: 60000ms
+
+# When enabled, limits the number of native transport requests dispatched for processing per second.
+# Behavior once the limit has been breached depends on the value of THROW_ON_OVERLOAD specified in
+# the STARTUP message sent by the client during connection establishment. (See section "4.1.1. STARTUP"
+# in "CQL BINARY PROTOCOL v5".) With the THROW_ON_OVERLOAD flag enabled, messages that breach the limit
+# are dropped, and an OverloadedException is thrown for the client to handle. When the flag is not
+# enabled, the server will stop consuming messages from the channel/socket, putting backpressure on
+# the client while already dispatched messages are processed.
+# native_transport_rate_limiting_enabled: false
+# native_transport_max_requests_per_second: 1000000
 
 # The address or interface to bind the native transport server to.
 #
@@ -778,12 +886,14 @@
 # /proc/sys/net/ipv4/tcp_wmem
 # /proc/sys/net/ipv4/tcp_wmem
 # and 'man tcp'
-# internode_socket_send_buffer_size_in_bytes:
+# Min unit: B
+# internode_socket_send_buffer_size:
 
 # Uncomment to set socket buffer size for internode communication
 # Note that when setting this, the buffer size is limited by net.core.wmem_max
 # and when not setting it it is defined by net.ipv4.tcp_wmem
-# internode_socket_receive_buffer_size_in_bytes:
+# Min unit: B
+# internode_socket_receive_buffer_size:
 
 # Set to true to have Cassandra create a hard link to each sstable
 # flushed or streamed locally in a backups/ subdirectory of the
@@ -803,6 +913,14 @@
 # lose data on truncation or drop.
 auto_snapshot: true
 
+# Adds a time-to-live (TTL) to auto snapshots generated by table
+# truncation or drop (when enabled).
+# After the TTL is elapsed, the snapshot is automatically cleared.
+# By default, auto snapshots *do not* have TTL, uncomment the property below
+# to enable TTL on auto snapshots.
+# Accepted units: d (days), h (hours) or m (minutes)
+# auto_snapshot_ttl: 30d
+
 # The act of creating or clearing a snapshot involves creating or removing
 # potentially tens of thousands of links, which can cause significant performance
 # impact, especially on consumer grade SSDs. A non-zero value here can
@@ -820,7 +938,8 @@
 # - but, Cassandra will keep the collation index in memory for hot
 #   rows (as part of the key cache), so a larger granularity means
 #   you can cache more hot rows
-column_index_size_in_kb: 64
+# Min unit: KiB
+column_index_size: 64KiB
 
 # Per sstable indexed key cache entries (the collation index in memory
 # mentioned above) exceeding this size will not be held on heap.
@@ -829,7 +948,8 @@
 #
 # Note that this size refers to the size of the
 # serialized index information and not the size of the partition.
-column_index_cache_size_in_kb: 2
+# Min unit: KiB
+column_index_cache_size: 2KiB
 
 # Number of simultaneous compactions to allow, NOT including
 # validation "compactions" for anti-entropy repair.  Simultaneous
@@ -838,14 +958,14 @@
 # during a single long running compactions. The default is usually
 # fine and if you experience problems with compaction running too
 # slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
+# compaction_throughput first.
 #
 # concurrent_compactors defaults to the smaller of (number of disks,
 # number of cores), with a minimum of 2 and a maximum of 8.
 # 
 # If your data directories are backed by SSD, you should increase this
 # to the number of cores.
-#concurrent_compactors: 1
+# concurrent_compactors: 1
 
 # Number of simultaneous repair validations to allow. If not set or set to
 # a value less than 1, it defaults to the value of concurrent_compactors.
@@ -866,37 +986,60 @@
 # Setting this to 0 disables throttling. Note that this accounts for all types
 # of compaction, including validation compaction (building Merkle trees
 # for repairs).
-compaction_throughput_mb_per_sec: 64
+compaction_throughput: 64MiB/s
 
 # When compacting, the replacement sstable(s) can be opened before they
 # are completely written, and used in place of the prior sstables for
 # any range that has been written. This helps to smoothly transfer reads 
 # between the sstables, reducing page cache churn and keeping hot rows hot
-sstable_preemptive_open_interval_in_mb: 50
+# Set sstable_preemptive_open_interval to null for disabled which is equivalent to
+# sstable_preemptive_open_interval_in_mb being negative
+# Min unit: MiB
+sstable_preemptive_open_interval: 50MiB
+
+# Starting from 4.1 sstables support UUID based generation identifiers. They are disabled by default
+# because once enabled, there is no easy way to downgrade. When the node is restarted with this option
+# set to true, each newly created sstable will have a UUID based generation identifier and such files are
+# not readable by previous Cassandra versions. At some point, this option will become true by default
+# and eventually get removed from the configuration.
+uuid_sstable_identifiers_enabled: false
 
 # When enabled, permits Cassandra to zero-copy stream entire eligible
 # SSTables between nodes, including every component.
 # This speeds up the network transfer significantly subject to
-# throttling specified by stream_throughput_outbound_megabits_per_sec.
+# throttling specified by entire_sstable_stream_throughput_outbound,
+# and entire_sstable_inter_dc_stream_throughput_outbound
+# for inter-DC transfers.
 # Enabling this will reduce the GC pressure on sending and receiving node.
 # When unset, the default is enabled. While this feature tries to keep the
 # disks balanced, it cannot guarantee it. This feature will be automatically
 # disabled if internode encryption is enabled.
 # stream_entire_sstables: true
 
+# Throttles entire SSTable outbound streaming file transfers on
+# this node to the given total throughput in Mbps.
+# Setting this value to 0 it disables throttling.
+# When unset, the default is 200 Mbps or 24 MiB/s.
+# entire_sstable_stream_throughput_outbound: 24MiB/s
+
+# Throttles entire SSTable file streaming between datacenters.
+# Setting this value to 0 disables throttling for entire SSTable inter-DC file streaming.
+# When unset, the default is 200 Mbps or 24 MiB/s.
+# entire_sstable_inter_dc_stream_throughput_outbound: 24MiB/s
+
 # Throttles all outbound streaming file transfers on this node to the
 # given total throughput in Mbps. This is necessary because Cassandra does
 # mostly sequential IO when streaming data during bootstrap or repair, which
 # can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 200 Mbps or 25 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 200
+# When unset, the default is 200 Mbps or 24 MiB/s.
+# stream_throughput_outbound: 24MiB/s
 
 # Throttles all streaming file transfer between the datacenters,
 # this setting allows users to throttle inter dc stream throughput in addition
 # to throttling all network stream traffic as configured with
 # stream_throughput_outbound_megabits_per_sec
-# When unset, the default is 200 Mbps or 25 MB/s
-# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
+# When unset, the default is 200 Mbps or 24 MiB/s.
+# inter_dc_stream_throughput_outbound: 24MiB/s
 
 # Server side timeouts for requests. The server will return a timeout exception
 # to the client if it can't complete an operation within the corresponding
@@ -905,52 +1048,62 @@
 #      failures.
 #   2) operations that use too much CPU/read too much data (leading to memory build
 #      up) by putting a limit to how long an operation will execute.
-# For this reason, you should avoid putting these settings too high. In other words, 
-# if you are timing out requests because of underlying resource constraints then 
-# increasing the timeout will just cause more problems. Of course putting them too 
-# low is equally ill-advised since clients could get timeouts even for successful 
+# For this reason, you should avoid putting these settings too high. In other words,
+# if you are timing out requests because of underlying resource constraints then
+# increasing the timeout will just cause more problems. Of course putting them too
+# low is equally ill-advised since clients could get timeouts even for successful
 # operations just because the timeout setting is too tight.
 
 # How long the coordinator should wait for read operations to complete.
 # Lowest acceptable value is 10 ms.
-read_request_timeout_in_ms: 5000
+# Min unit: ms
+read_request_timeout: 5000ms
 # How long the coordinator should wait for seq or index scans to complete.
 # Lowest acceptable value is 10 ms.
-range_request_timeout_in_ms: 10000
+# Min unit: ms
+range_request_timeout: 10000ms
 # How long the coordinator should wait for writes to complete.
 # Lowest acceptable value is 10 ms.
-write_request_timeout_in_ms: 2000
+# Min unit: ms
+write_request_timeout: 2000ms
 # How long the coordinator should wait for counter writes to complete.
 # Lowest acceptable value is 10 ms.
-counter_write_request_timeout_in_ms: 5000
+# Min unit: ms
+counter_write_request_timeout: 5000ms
 # How long a coordinator should continue to retry a CAS operation
 # that contends with other proposals for the same row.
 # Lowest acceptable value is 10 ms.
-cas_contention_timeout_in_ms: 1000
+# Min unit: ms
+cas_contention_timeout: 1000ms
 # How long the coordinator should wait for truncates to complete
 # (This can be much longer, because unless auto_snapshot is disabled
 # we need to flush first so we can snapshot before removing the data.)
 # Lowest acceptable value is 10 ms.
-truncate_request_timeout_in_ms: 60000
+# Min unit: ms
+truncate_request_timeout: 60000ms
 # The default timeout for other, miscellaneous operations.
 # Lowest acceptable value is 10 ms.
-request_timeout_in_ms: 10000
+# Min unit: ms
+request_timeout: 10000ms
 
 # Defensive settings for protecting Cassandra from true network partitions.
 # See (CASSANDRA-14358) for details.
 #
 # The amount of time to wait for internode tcp connections to establish.
-# internode_tcp_connect_timeout_in_ms: 2000
+# Min unit: ms
+# internode_tcp_connect_timeout: 2000ms
 #
 # The amount of time unacknowledged data is allowed on a connection before we throw out the connection
 # Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000
 # (it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0
 # which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8.
-# internode_tcp_user_timeout_in_ms: 30000
+# Min unit: ms
+# internode_tcp_user_timeout: 30000ms
 
 # The amount of time unacknowledged data is allowed on a streaming connection.
 # The default is 5 minutes. Increase it or set it to 0 in order to increase the timeout.
-# internode_streaming_tcp_user_timeout_in_ms: 300000
+# Min unit: ms
+# internode_streaming_tcp_user_timeout: 300000ms
 
 # Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes
 # and waiting to be processed on arrival from other nodes in the cluster.  These limits are applied to the on-wire
@@ -958,7 +1111,7 @@
 #
 # The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed.
 # Each node-pair has three links: urgent, small and large.  So any given node may have a maximum of
-# N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes)
+# N*3*(internode_application_send_queue_capacity+internode_application_receive_queue_capacity)
 # messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens
 # nodes should need to communicate with significant bandwidth.
 #
@@ -967,18 +1120,20 @@
 # The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit,
 # on all links to or from any node in the cluster.
 #
-# internode_application_send_queue_capacity_in_bytes: 4194304                       #4MiB
-# internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728    #128MiB
-# internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912      #512MiB
-# internode_application_receive_queue_capacity_in_bytes: 4194304                    #4MiB
-# internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB
-# internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912   #512MiB
+# Min unit: B
+# internode_application_send_queue_capacity: 4MiB
+# internode_application_send_queue_reserve_endpoint_capacity: 128MiB
+# internode_application_send_queue_reserve_global_capacity: 512MiB
+# internode_application_receive_queue_capacity: 4MiB
+# internode_application_receive_queue_reserve_endpoint_capacity: 128MiB
+# internode_application_receive_queue_reserve_global_capacity: 512MiB
 
 
 # How long before a node logs slow queries. Select queries that take longer than
 # this timeout to execute, will generate an aggregated log message, so that slow queries
 # can be identified. Set this value to zero to disable slow query logging.
-slow_query_log_timeout_in_ms: 500
+# Min unit: ms
+slow_query_log_timeout: 500ms
 
 # Enable operation timeout information exchange between nodes to accurately
 # measure request timeouts.  If disabled, replicas will assume that requests
@@ -988,21 +1143,66 @@
 #
 # Warning: It is generally assumed that users have setup NTP on their clusters, and that clocks are modestly in sync, 
 # since this is a requirement for general correctness of last write wins.
-#cross_node_timeout: true
+# internode_timeout: true
 
-# Set keep-alive period for streaming
-# This node will send a keep-alive message periodically with this period.
-# If the node does not receive a keep-alive message from the peer for
-# 2 keep-alive cycles the stream session times out and fail
-# Default value is 300s (5 minutes), which means stalled stream
-# times out in 10 minutes by default
-# streaming_keep_alive_period_in_secs: 300
+# Set period for idle state control messages for earlier detection of failed streams
+# This node will send a keep-alive message periodically on the streaming's control channel.
+# This ensures that any eventual SocketTimeoutException will occur within 2 keep-alive cycles
+# If the node cannot send, or timeouts sending, the keep-alive message on the netty control channel
+# the stream session is closed.
+# Default value is 300s (5 minutes), which means stalled streams
+# are detected within 10 minutes
+# Specify 0 to disable.
+# Min unit: s
+# streaming_keep_alive_period: 300s
 
 # Limit number of connections per host for streaming
 # Increase this when you notice that joins are CPU-bound rather that network
 # bound (for example a few nodes with big files).
 # streaming_connections_per_host: 1
 
+# Settings for stream stats tracking; used by system_views.streaming table
+# How long before a stream is evicted from tracking; this impacts both historic and currently running
+# streams.
+# streaming_state_expires: 3d
+# How much memory may be used for tracking before evicting session from tracking; once crossed
+# historic and currently running streams maybe impacted.
+# streaming_state_size: 40MiB
+# Enable/Disable tracking of streaming stats
+# streaming_stats_enabled: true
+
+# Allows denying configurable access (rw/rr) to operations on configured ks, table, and partitions, intended for use by
+# operators to manage cluster health vs application access. See CASSANDRA-12106 and CEP-13 for more details.
+# partition_denylist_enabled: false
+
+# denylist_writes_enabled: true
+# denylist_reads_enabled: true
+# denylist_range_reads_enabled: true
+
+# The interval at which keys in the cache for denylisting will "expire" and async refresh from the backing DB.
+# Note: this serves only as a fail-safe, as the usage pattern is expected to be "mutate state, refresh cache" on any
+# changes to the underlying denylist entries. See documentation for details.
+# Min unit: s
+# denylist_refresh: 600s
+
+# In the event of errors on attempting to load the denylist cache, retry on this interval.
+# Min unit: s
+# denylist_initial_load_retry: 5s
+
+# We cap the number of denylisted keys allowed per table to keep things from growing unbounded. Nodes will warn above
+# this limit while allowing new denylisted keys to be inserted. Denied keys are loaded in natural query / clustering
+# ordering by partition key in case of overflow.
+# denylist_max_keys_per_table: 1000
+
+# We cap the total number of denylisted keys allowed in the cluster to keep things from growing unbounded.
+# Nodes will warn on initial cache load that there are too many keys and be direct the operator to trim down excess
+# entries to within the configured limits.
+# denylist_max_keys_total: 10000
+
+# Since the denylist in many ways serves to protect the health of the cluster from partitions operators have identified
+# as being in a bad state, we usually want more robustness than just CL.ONE on operations to/from these tables to
+# ensure that these safeguards are in place. That said, we allow users to configure this if they're so inclined.
+# denylist_consistency_level: QUORUM
 
 # phi value that must be reached for a host to be marked down.
 # most users should never need to adjust this.
@@ -1075,10 +1275,12 @@
 
 # controls how often to perform the more expensive part of host score
 # calculation
-dynamic_snitch_update_interval_in_ms: 100 
+# Min unit: ms
+dynamic_snitch_update_interval: 100ms
 # controls how often to reset all host scores, allowing a bad host to
 # possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
+# Min unit: ms
+dynamic_snitch_reset_interval: 600000ms
 # if set greater than zero, this will allow
 # 'pinning' of replicas to hosts in order to increase cache capacity.
 # The badness threshold will control how much worse the pinned host has to be
@@ -1111,39 +1313,44 @@
 # Step 2: Set optional=false (or remove it) and if you generated truststores and want to use mutual
 # auth set require_client_auth=true. Restart all nodes
 server_encryption_options:
-    # On outbound connections, determine which type of peers to securely connect to.
-    #   The available options are :
-    #     none : Do not encrypt outgoing connections
-    #     dc   : Encrypt connections to peers in other datacenters but not within datacenters
-    #     rack : Encrypt connections to peers in other racks but not within racks
-    #     all  : Always use encrypted connections
-    internode_encryption: none
-    # When set to true, encrypted and unencrypted connections are allowed on the storage_port
-    # This should _only be true_ while in unencrypted or transitional operation
-    # optional defaults to true if internode_encryption is none
-    # optional: true
-    # If enabled, will open up an encrypted listening socket on ssl_storage_port. Should only be used
-    # during upgrade to 4.0; otherwise, set to false.
-    enable_legacy_ssl_storage_port: false
-    # Set to a valid keystore if internode_encryption is dc, rack or all
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # Verify peer server certificates
-    require_client_auth: false
-    # Set to a valid trustore if require_client_auth is true
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # Verify that the host name in the certificate matches the connected host
-    require_endpoint_verification: false
-    # More advanced defaults:
-    # protocol: TLS
-    # store_type: JKS
-    # cipher_suites: [
-    #   TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
-    #   TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
-    #   TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA,
-    #   TLS_RSA_WITH_AES_256_CBC_SHA
-    # ]
+  # On outbound connections, determine which type of peers to securely connect to.
+  #   The available options are :
+  #     none : Do not encrypt outgoing connections
+  #     dc   : Encrypt connections to peers in other datacenters but not within datacenters
+  #     rack : Encrypt connections to peers in other racks but not within racks
+  #     all  : Always use encrypted connections
+  internode_encryption: none
+  # When set to true, encrypted and unencrypted connections are allowed on the storage_port
+  # This should _only be true_ while in unencrypted or transitional operation
+  # optional defaults to true if internode_encryption is none
+  # optional: true
+  # If enabled, will open up an encrypted listening socket on ssl_storage_port. Should only be used
+  # during upgrade to 4.0; otherwise, set to false.
+  legacy_ssl_storage_port_enabled: false
+  # Set to a valid keystore if internode_encryption is dc, rack or all
+  keystore: conf/.keystore
+  keystore_password: cassandra
+  # Configure the way Cassandra creates SSL contexts.
+  # To use PEM-based key material, see org.apache.cassandra.security.PEMBasedSslContextFactory
+  # ssl_context_factory:
+  #     # Must be an instance of org.apache.cassandra.security.ISslContextFactory
+  #     class_name: org.apache.cassandra.security.DefaultSslContextFactory
+  # Verify peer server certificates
+  require_client_auth: false
+  # Set to a valid trustore if require_client_auth is true
+  truststore: conf/.truststore
+  truststore_password: cassandra
+  # Verify that the host name in the certificate matches the connected host
+  require_endpoint_verification: false
+  # More advanced defaults:
+  # protocol: TLS
+  # store_type: JKS
+  # cipher_suites: [
+  #   TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+  #   TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+  #   TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA,
+  #   TLS_RSA_WITH_AES_256_CBC_SHA
+  # ]
 
 # Configure client-to-server encryption.
 #
@@ -1158,29 +1365,34 @@
 # Step 2: Set optional=false (or remove it) and if you generated truststores and want to use mutual
 # auth set require_client_auth=true. Restart all nodes
 client_encryption_options:
-    # Enable client-to-server encryption
-    enabled: false
-    # When set to true, encrypted and unencrypted connections are allowed on the native_transport_port
-    # This should _only be true_ while in unencrypted or transitional operation
-    # optional defaults to true when enabled is false, and false when enabled is true.
-    # optional: true
-    # Set keystore and keystore_password to valid keystores if enabled is true
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # Verify client certificates
-    require_client_auth: false
-    # Set trustore and truststore_password if require_client_auth is true
-    # truststore: conf/.truststore
-    # truststore_password: cassandra
-    # More advanced defaults:
-    # protocol: TLS
-    # store_type: JKS
-    # cipher_suites: [
-    #   TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
-    #   TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
-    #   TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA,
-    #   TLS_RSA_WITH_AES_256_CBC_SHA
-    # ]
+  # Enable client-to-server encryption
+  enabled: false
+  # When set to true, encrypted and unencrypted connections are allowed on the native_transport_port
+  # This should _only be true_ while in unencrypted or transitional operation
+  # optional defaults to true when enabled is false, and false when enabled is true.
+  # optional: true
+  # Set keystore and keystore_password to valid keystores if enabled is true
+  keystore: conf/.keystore
+  keystore_password: cassandra
+  # Configure the way Cassandra creates SSL contexts.
+  # To use PEM-based key material, see org.apache.cassandra.security.PEMBasedSslContextFactory
+  # ssl_context_factory:
+  #     # Must be an instance of org.apache.cassandra.security.ISslContextFactory
+  #     class_name: org.apache.cassandra.security.DefaultSslContextFactory
+  # Verify client certificates
+  require_client_auth: false
+  # Set trustore and truststore_password if require_client_auth is true
+  # truststore: conf/.truststore
+  # truststore_password: cassandra
+  # More advanced defaults:
+  # protocol: TLS
+  # store_type: JKS
+  # cipher_suites: [
+  #   TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+  #   TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+  #   TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA,
+  #   TLS_RSA_WITH_AES_256_CBC_SHA
+  # ]
 
 # internode_compression controls whether traffic between nodes is
 # compressed.
@@ -1203,28 +1415,22 @@
 inter_dc_tcp_nodelay: false
 
 # TTL for different trace types used during logging of the repair process.
-tracetype_query_ttl: 86400
-tracetype_repair_ttl: 604800
+# Min unit: s
+trace_type_query_ttl: 1d
+# Min unit: s
+trace_type_repair_ttl: 7d
 
-# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
+# If unset, all GC Pauses greater than gc_log_threshold will log at
 # INFO level
 # UDFs (user defined functions) are disabled by default.
 # As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
-enable_user_defined_functions: false
+user_defined_functions_enabled: false
 
 # Enables scripted UDFs (JavaScript UDFs).
-# Java UDFs are always enabled, if enable_user_defined_functions is true.
+# Java UDFs are always enabled, if user_defined_functions_enabled is true.
 # Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
-# This option has no effect, if enable_user_defined_functions is false.
-enable_scripted_user_defined_functions: false
-
-# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
-# Lowering this value on Windows can provide much tighter latency and better throughput, however
-# some virtualized environments may see a negative performance impact from changing this setting
-# below their system default. The sysinternals 'clockres' tool can confirm your system's default
-# setting.
-windows_timer_interval: 1
-
+# This option has no effect, if user_defined_functions_enabled is false.
+scripted_user_defined_functions_enabled: false
 
 # Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
 # a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
@@ -1239,19 +1445,19 @@
 # Currently, only the following file types are supported for transparent data encryption, although
 # more are coming in future cassandra releases: commitlog, hints
 transparent_data_encryption_options:
-    enabled: false
-    chunk_length_kb: 64
-    cipher: AES/CBC/PKCS5Padding
-    key_alias: testing:1
-    # CBC IV length for AES needs to be 16 bytes (which is also the default size)
-    # iv_length: 16
-    key_provider:
-      - class_name: org.apache.cassandra.security.JKSKeyProvider
-        parameters:
-          - keystore: conf/.keystore
-            keystore_password: cassandra
-            store_type: JCEKS
-            key_password: cassandra
+  enabled: false
+  chunk_length_kb: 64
+  cipher: AES/CBC/PKCS5Padding
+  key_alias: testing:1
+  # CBC IV length for AES needs to be 16 bytes (which is also the default size)
+  # iv_length: 16
+  key_provider:
+    - class_name: org.apache.cassandra.security.JKSKeyProvider
+      parameters:
+        - keystore: conf/.keystore
+          keystore_password: cassandra
+          store_type: JCEKS
+          key_password: cassandra
 
 
 #####################
@@ -1290,32 +1496,54 @@
     cached_rows_warn_threshold: 2000
     cached_rows_fail_threshold: 32000
 
-# Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default.
+# Log WARN on any multiple-partition batch size exceeding this value. 5KiB per batch by default.
 # Caution should be taken on increasing the size of this threshold as it can lead to node instability.
-batch_size_warn_threshold_in_kb: 5
+# Min unit: KiB
+batch_size_warn_threshold: 5KiB
 
-# Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.
-batch_size_fail_threshold_in_kb: 50
+# Fail any multiple-partition batch exceeding this value. 50KiB (10x warn threshold) by default.
+# Min unit: KiB
+batch_size_fail_threshold: 50KiB
 
 # Log WARN on any batches not of type LOGGED than span across more partitions than this limit
 unlogged_batch_across_partitions_warn_threshold: 10
 
 # Log a warning when compacting partitions larger than this value
-compaction_large_partition_warning_threshold_mb: 100
+compaction_large_partition_warning_threshold: 100MiB
+
+# Log a warning when writing more tombstones than this value to a partition
+compaction_tombstone_warning_threshold: 100000
 
 # GC Pauses greater than 200 ms will be logged at INFO level
 # This threshold can be adjusted to minimize logging if necessary
-# gc_log_threshold_in_ms: 200
+# Min unit: ms
+# gc_log_threshold: 200ms
 
-# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
+# GC Pauses greater than gc_warn_threshold will be logged at WARN level
 # Adjust the threshold based on your application throughput requirement. Setting to 0
 # will deactivate the feature.
-# gc_warn_threshold_in_ms: 1000
+# Min unit: ms
+# gc_warn_threshold: 1000ms
 
 # Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
 # early. Any value size larger than this threshold will result into marking an SSTable
-# as corrupted. This should be positive and less than 2048.
-# max_value_size_in_mb: 256
+# as corrupted. This should be positive and less than 2GiB.
+# Min unit: MiB
+# max_value_size: 256MiB
+
+# ** Impact on keyspace creation **
+# If replication factor is not mentioned as part of keyspace creation, default_keyspace_rf would apply.
+# Changing this configuration would only take effect for keyspaces created after the change, but does not impact
+# existing keyspaces created prior to the change.
+# ** Impact on keyspace alter **
+# When altering a keyspace from NetworkTopologyStrategy to SimpleStrategy, default_keyspace_rf is applied if rf is not
+# explicitly mentioned.
+# ** Impact on system keyspaces **
+# This would also apply for any system keyspaces that need replication factor.
+# A further note about system keyspaces - system_traces and system_distributed keyspaces take RF of 2 or default,
+# whichever is higher, and system_auth keyspace takes RF of 1 or default, whichever is higher.
+# Suggested value for use in production: 3
+# default_keyspace_rf: 1
 
 # Track a metric per keyspace indicating whether replication achieved the ideal consistency
 # level for writes without timing out. This is different from the consistency level requested by
@@ -1331,38 +1559,38 @@
 # Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs
 # on audit_logging for full details about the various configuration options.
 audit_logging_options:
-    enabled: false
-    logger:
-      - class_name: BinAuditLogger
-    # audit_logs_dir:
-    # included_keyspaces:
-    # excluded_keyspaces: system, system_schema, system_virtual_schema
-    # included_categories:
-    # excluded_categories:
-    # included_users:
-    # excluded_users:
-    # roll_cycle: HOURLY
-    # block: true
-    # max_queue_weight: 268435456 # 256 MiB
-    # max_log_size: 17179869184 # 16 GiB
-    ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
-    # archive_command:
-    # max_archive_retries: 10
+  enabled: false
+  logger:
+    - class_name: BinAuditLogger
+  # audit_logs_dir:
+  # included_keyspaces:
+  # excluded_keyspaces: system, system_schema, system_virtual_schema
+  # included_categories:
+  # excluded_categories:
+  # included_users:
+  # excluded_users:
+  # roll_cycle: HOURLY
+  # block: true
+  # max_queue_weight: 268435456 # 256 MiB
+  # max_log_size: 17179869184 # 16 GiB
+  ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
+  # archive_command:
+  # max_archive_retries: 10
 
 
 # default options for full query logging - these can be overridden from command line when executing
 # nodetool enablefullquerylog
-#full_query_logging_options:
-    # log_dir:
-    # roll_cycle: HOURLY
-    # block: true
-    # max_queue_weight: 268435456 # 256 MiB
-    # max_log_size: 17179869184 # 16 GiB
-    ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
-    # archive_command:
-    ## note that enabling this allows anyone with JMX/nodetool access to run local shell commands as the user running cassandra
-    # allow_nodetool_archive_command: false
-    # max_archive_retries: 10
+# full_query_logging_options:
+  # log_dir:
+  # roll_cycle: HOURLY
+  # block: true
+  # max_queue_weight: 268435456 # 256 MiB
+  # max_log_size: 17179869184 # 16 GiB
+  ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
+  # archive_command:
+  ## note that enabling this allows anyone with JMX/nodetool access to run local shell commands as the user running cassandra
+  # allow_nodetool_archive_command: false
+  # max_archive_retries: 10
 
 # validate tombstones on reads and compaction
 # can be either "disabled", "warn" or "exception"
@@ -1397,25 +1625,195 @@
 # Having many tables and/or keyspaces negatively affects performance of many operations in the
 # cluster. When the number of tables/keyspaces in the cluster exceeds the following thresholds
 # a client warning will be sent back to the user when creating a table or keyspace.
+# As of cassandra 4.1, these properties are deprecated in favor of keyspaces_warn_threshold and tables_warn_threshold
 # table_count_warn_threshold: 150
 # keyspace_count_warn_threshold: 40
 
+# configure the read and write consistency levels for modifications to auth tables
+# auth_read_consistency_level: LOCAL_QUORUM
+# auth_write_consistency_level: EACH_QUORUM
+
+# Delays on auth resolution can lead to a thundering herd problem on reconnects; this option will enable
+# warming of auth caches prior to node completing startup. See CASSANDRA-16958
+# auth_cache_warming_enabled: false
+
 #########################
 # EXPERIMENTAL FEATURES #
 #########################
 
 # Enables materialized view creation on this node.
 # Materialized views are considered experimental and are not recommended for production use.
-enable_materialized_views: false
+materialized_views_enabled: false
 
 # Enables SASI index creation on this node.
 # SASI indexes are considered experimental and are not recommended for production use.
-enable_sasi_indexes: false
+sasi_indexes_enabled: false
 
 # Enables creation of transiently replicated keyspaces on this node.
 # Transient replication is experimental and is not recommended for production use.
-enable_transient_replication: false
+transient_replication_enabled: false
 
 # Enables the used of 'ALTER ... DROP COMPACT STORAGE' statements on this node.
 # 'ALTER ... DROP COMPACT STORAGE' is considered experimental and is not recommended for production use.
-enable_drop_compact_storage: false
+drop_compact_storage_enabled: false
+
+# Whether or not USE <keyspace> is allowed. This is enabled by default to avoid failure on upgrade.
+#use_statements_enabled: true
+
+# When the client triggers a protocol exception or unknown issue (Cassandra bug) we increment
+# a client metric showing this; this logic will exclude specific subnets from updating these
+# metrics
+#client_error_reporting_exclusions:
+#  subnets:
+#    - 127.0.0.1
+#    - 127.0.0.0/31
+
+# Enables read thresholds (warn/fail) across all replicas for reporting back to the client.
+# See: CASSANDRA-16850
+# read_thresholds_enabled: false # scheduled to be set true in 4.2
+# When read_thresholds_enabled: true, this tracks the materialized size of a query on the
+# coordinator. If coordinator_read_size_warn_threshold is defined, this will emit a warning
+# to clients with details on what query triggered this as well as the size of the result set; if
+# coordinator_read_size_fail_threshold is defined, this will fail the query after it
+# has exceeded this threshold, returning a read error to the user.
+# coordinator_read_size_warn_threshold:
+# coordinator_read_size_fail_threshold:
+# When read_thresholds_enabled: true, this tracks the size of the local read (as defined by
+# heap size), and will warn/fail based off these thresholds; undefined disables these checks.
+# local_read_size_warn_threshold:
+# local_read_size_fail_threshold:
+# When read_thresholds_enabled: true, this tracks the expected memory size of the RowIndexEntry
+# and will warn/fail based off these thresholds; undefined disables these checks
+# row_index_read_size_warn_threshold:
+# row_index_read_size_fail_threshold:
+
+# Guardrail to warn or fail when creating more user keyspaces than threshold.
+# The two thresholds default to -1 to disable.
+# keyspaces_warn_threshold: -1
+# keyspaces_fail_threshold: -1
+# Guardrail to warn or fail when creating more user tables than threshold.
+# The two thresholds default to -1 to disable.
+# tables_warn_threshold: -1
+# tables_fail_threshold: -1
+# Guardrail to enable or disable the ability to create uncompressed tables
+# uncompressed_tables_enabled: true
+# Guardrail to warn or fail when creating/altering a table with more columns per table than threshold.
+# The two thresholds default to -1 to disable.
+# columns_per_table_warn_threshold: -1
+# columns_per_table_fail_threshold: -1
+# Guardrail to warn or fail when creating more secondary indexes per table than threshold.
+# The two thresholds default to -1 to disable.
+# secondary_indexes_per_table_warn_threshold: -1
+# secondary_indexes_per_table_fail_threshold: -1
+# Guardrail to enable or disable the creation of secondary indexes
+# secondary_indexes_enabled: true
+# Guardrail to warn or fail when creating more materialized views per table than threshold.
+# The two thresholds default to -1 to disable.
+# materialized_views_per_table_warn_threshold: -1
+# materialized_views_per_table_fail_threshold: -1
+# Guardrail to warn about, ignore or reject properties when creating tables. By default all properties are allowed.
+# table_properties_warned: []
+# table_properties_ignored: []
+# table_properties_disallowed: []
+# Guardrail to allow/disallow user-provided timestamps. Defaults to true.
+# user_timestamps_enabled: true
+# Guardrail to allow/disallow GROUP BY functionality.
+# group_by_enabled: true
+# Guardrail to allow/disallow TRUNCATE and DROP TABLE statements
+# drop_truncate_table_enabled: true
+# Guardrail to warn or fail when using a page size greater than threshold.
+# The two thresholds default to -1 to disable.
+# page_size_warn_threshold: -1
+# page_size_fail_threshold: -1
+# Guardrail to allow/disallow list operations that require read before write, i.e. setting list element by index and
+# removing list elements by either index or value. Defaults to true.
+# read_before_write_list_operations_enabled: true
+# Guardrail to warn or fail when querying with an IN restriction selecting more partition keys than threshold.
+# The two thresholds default to -1 to disable.
+# partition_keys_in_select_warn_threshold: -1
+# partition_keys_in_select_fail_threshold: -1
+# Guardrail to warn or fail when an IN query creates a cartesian product with a size exceeding threshold,
+# eg. "a in (1,2,...10) and b in (1,2...10)" results in cartesian product of 100.
+# The two thresholds default to -1 to disable.
+# in_select_cartesian_product_warn_threshold: -1
+# in_select_cartesian_product_fail_threshold: -1
+# Guardrail to warn about or reject read consistency levels. By default, all consistency levels are allowed.
+# read_consistency_levels_warned: []
+# read_consistency_levels_disallowed: []
+# Guardrail to warn about or reject write consistency levels. By default, all consistency levels are allowed.
+# write_consistency_levels_warned: []
+# write_consistency_levels_disallowed: []
+# Guardrail to warn or fail when encountering larger size of collection data than threshold.
+# At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
+# of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
+# prevent read-before-write. The guardrail is also checked at sstable write time to detect large non-frozen collections,
+# although in that case exceeding the fail threshold will only log an error message, without interrupting the operation.
+# The two thresholds default to null to disable.
+# Min unit: B
+# collection_size_warn_threshold:
+# Min unit: B
+# collection_size_fail_threshold:
+# Guardrail to warn or fail when encountering more elements in collection than threshold.
+# At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
+# of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
+# prevent read-before-write. The guardrail is also checked at sstable write time to detect large non-frozen collections,
+# although in that case exceeding the fail threshold will only log an error message, without interrupting the operation.
+# The two thresholds default to -1 to disable.
+# items_per_collection_warn_threshold: -1
+# items_per_collection_fail_threshold: -1
+# Guardrail to allow/disallow querying with ALLOW FILTERING. Defaults to true.
+# allow_filtering_enabled: true
+# Guardrail to warn or fail when creating a user-defined-type with more fields in than threshold.
+# Default -1 to disable.
+# fields_per_udt_warn_threshold: -1
+# fields_per_udt_fail_threshold: -1
+# Guardrail to warn or fail when local data disk usage percentage exceeds threshold. Valid values are in [1, 100].
+# This is only used for the disks storing data directories, so it won't count any separate disks used for storing
+# the commitlog, hints nor saved caches. The disk usage is the ratio between the amount of space used by the data
+# directories and the addition of that same space and the remaining free space on disk. The main purpose of this
+# guardrail is rejecting user writes when the disks are over the defined usage percentage, so the writes done by
+# background processes such as compaction and streaming don't fail due to a full disk. The limits should be defined
+# accordingly to the expected data growth due to those background processes, so for example a compaction strategy
+# doubling the size of the data would require to keep the disk usage under 50%.
+# The two thresholds default to -1 to disable.
+# data_disk_usage_percentage_warn_threshold: -1
+# data_disk_usage_percentage_fail_threshold: -1
+# Allows defining the max disk size of the data directories when calculating thresholds for
+# disk_usage_percentage_warn_threshold and disk_usage_percentage_fail_threshold, so if this is greater than zero they
+# become percentages of a fixed size on disk instead of percentages of the physically available disk size. This should
+# be useful when we have a large disk and we only want to use a part of it for Cassandra's data directories.
+# Valid values are in [1, max available disk size of all data directories].
+# Defaults to null to disable and use the physically available disk size of data directories during calculations.
+# Min unit: B
+# data_disk_usage_max_disk_size:
+# Guardrail to warn or fail when the minimum replication factor is lesser than threshold.
+# This would also apply to system keyspaces.
+# Suggested value for use in production: 2 or higher
+# minimum_replication_factor_warn_threshold: -1
+# minimum_replication_factor_fail_threshold: -1
+
+# Startup Checks are executed as part of Cassandra startup process, not all of them
+# are configurable (so you can disable them) but these which are enumerated bellow.
+# Uncomment the startup checks and configure them appropriately to cover your needs.
+#
+#startup_checks:
+# Verifies correct ownership of attached locations on disk at startup. See CASSANDRA-16879 for more details.
+#  check_filesystem_ownership:
+#    enabled: false
+#    ownership_token: "sometoken" # (overriden by "CassandraOwnershipToken" system property)
+#    ownership_filename: ".cassandra_fs_ownership" # (overriden by "cassandra.fs_ownership_filename")
+# Prevents a node from starting if snitch's data center differs from previous data center.
+#  check_dc:
+#    enabled: true # (overriden by cassandra.ignore_dc system property)
+# Prevents a node from starting if snitch's rack differs from previous rack.
+#  check_rack:
+#    enabled: true # (overriden by cassandra.ignore_rack system property)
+# Enable this property to fail startup if the node is down for longer than gc_grace_seconds, to potentially
+# prevent data resurrection on tables with deletes. By default, this will run against all keyspaces and tables
+# except the ones specified on excluded_keyspaces and excluded_tables.
+#  check_data_resurrection:
+#    enabled: false
+# file where Cassandra periodically writes the last time it was known to run
+#    heartbeat_file: /var/lib/cassandra/data/cassandra-heartbeat
+#    excluded_keyspaces: # comma separated list of keyspaces to exclude from the check
+#    excluded_tables: # comma separated list of keyspace.table pairs to exclude from the check
diff --git a/conf/commitlog_archiving.properties b/conf/commitlog_archiving.properties
index 393259c..1488ced 100644
--- a/conf/commitlog_archiving.properties
+++ b/conf/commitlog_archiving.properties
@@ -44,5 +44,14 @@
 # or equal to this timestamp will be applied.
 restore_point_in_time=
 
+# Snapshot commit log position override. This should not be normally necessary, unless the snapshot used a method other
+# than sstables to store data (e.g. persistent memtable was restored from snapshot).
+# Format: segmentId, position
+#
+# Recovery will not replay any commit log data before the specified commit log position. It will NOT exclude the
+# intervals covered by existing sstables from the replay interval, i.e. it will replay data that may already be in
+# sstables.
+snapshot_commitlog_position=
+
 # precision of the timestamp used in the inserts (MILLISECONDS, MICROSECONDS, ...)
 precision=MICROSECONDS
diff --git a/conf/cqlshrc.sample b/conf/cqlshrc.sample
index 2c00d4a..4878b58 100644
--- a/conf/cqlshrc.sample
+++ b/conf/cqlshrc.sample
@@ -19,11 +19,19 @@
 
 [authentication]
 ;; If Cassandra has auth enabled, fill out these options
-; username = fred
-; password = !!bang!!$
+;; Path to the credentials file, an initial ~ or ~user is expanded to that user's home directory
+; credentials = ~/.cassandra/credentials
 ; keyspace = ks1
 
 
+[auth_provider]
+;; you can specify any auth provider found in your python environment
+;; module and class will be used to dynamically load the class
+;; all other properties found here and in the credentials file under the class name
+;; will be passed to the constructor
+; module = cassandra.auth
+; classname = PlainTextAuthProvider
+; username = user1
 
 [ui]
 ;; Whether or not to display query results with colors
@@ -112,7 +120,9 @@
 ;; To be provided when require_client_auth=true
 ;usercert = ~/cert.pem
 
-
+; this is effectively ignored from 4.1 included as TLS protocol is auto-negotiated and will
+; be removed in the next major version of Cassandra, possible values were TLSv1, TLSv1_1 or TLSv1_2
+;version =
 
 ;; Optional section, overrides default certfile in [ssl] section, if present
 ; [certfiles]
diff --git a/conf/credentials.sample b/conf/credentials.sample
new file mode 100644
index 0000000..23d0beb
--- /dev/null
+++ b/conf/credentials.sample
@@ -0,0 +1,25 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements.  See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership.  The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License.  You may obtain a copy of the License at
+;
+;   http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied.  See the License for the
+; specific language governing permissions and limitations
+; under the License.
+;
+; Sample ~/.cassandra/credentials file.
+;
+; Please ensure this file is owned by the user and is not readable by group and other users
+
+[PlainTextAuthProvider]
+; username = fred
+; password = !!bang!!$
+
diff --git a/conf/jvm11-server.options b/conf/jvm11-server.options
index 7e78467..1fc3503 100644
--- a/conf/jvm11-server.options
+++ b/conf/jvm11-server.options
@@ -29,6 +29,8 @@
 ## Use the Hotspot garbage-first collector.
 #-XX:+UseG1GC
 #-XX:+ParallelRefProcEnabled
+#-XX:MaxTenuringThreshold=1
+#-XX:G1HeapRegionSize=16m
 
 #
 ## Have the JVM do less remembered set work during STW, instead
@@ -38,7 +40,7 @@
 ## Main G1GC tunable: lowering the pause target will lower throughput and vise versa.
 ## 200ms is the JVM default and lowest viable setting
 ## 1000ms increases throughput. Keep it smaller than the timeouts in cassandra.yaml.
-#-XX:MaxGCPauseMillis=500
+#-XX:MaxGCPauseMillis=300
 
 ## Optional G1 Settings
 # Save CPU time on large (>= 16GB) heaps by delaying region scanning
diff --git a/conf/jvm8-server.options b/conf/jvm8-server.options
index 6214669..ba800db 100644
--- a/conf/jvm8-server.options
+++ b/conf/jvm8-server.options
@@ -35,6 +35,8 @@
 ## Use the Hotspot garbage-first collector.
 #-XX:+UseG1GC
 #-XX:+ParallelRefProcEnabled
+#-XX:MaxTenuringThreshold=1
+#-XX:G1HeapRegionSize=16m
 
 #
 ## Have the JVM do less remembered set work during STW, instead
@@ -44,7 +46,7 @@
 ## Main G1GC tunable: lowering the pause target will lower throughput and vise versa.
 ## 200ms is the JVM default and lowest viable setting
 ## 1000ms increases throughput. Keep it smaller than the timeouts in cassandra.yaml.
-#-XX:MaxGCPauseMillis=500
+#-XX:MaxGCPauseMillis=300
 
 ## Optional G1 Settings
 # Save CPU time on large (>= 16GB) heaps by delaying region scanning
diff --git a/debian/cassandra.install b/debian/cassandra.install
index 6df6f57..f54d1ad 100644
--- a/debian/cassandra.install
+++ b/debian/cassandra.install
@@ -2,7 +2,7 @@
 conf/cassandra-env.sh etc/cassandra
 conf/cassandra-rackdc.properties etc/cassandra
 conf/commitlog_archiving.properties etc/cassandra
-conf/cassandra-topology.properties etc/cassandra
+conf/cassandra-topology.properties.example etc/cassandra
 conf/logback.xml etc/cassandra
 conf/logback-tools.xml etc/cassandra
 conf/jvm*.options etc/cassandra
@@ -24,6 +24,7 @@
 tools/bin/fqltool usr/bin
 tools/bin/auditlogviewer usr/bin
 tools/bin/jmxtool usr/bin
+tools/bin/hash_password usr/bin
 lib/*.jar usr/share/cassandra/lib
 lib/*.zip usr/share/cassandra/lib
 lib/sigar-bin/* usr/share/cassandra/lib/sigar-bin
diff --git a/debian/changelog b/debian/changelog
index 937aa65..1bb1d0d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,74 +1,44 @@
-cassandra (4.0.11) UNRELEASED; urgency=medium
+cassandra (4.1.3) UNRELEASED; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Thu, 25 May 2023 14:44:07 +0200
+ -- Mick Semb Wever <mck@apache.org>  Thu, 25 May 2023 16:11:28 +0200
 
-cassandra (4.0.10) unstable; urgency=medium
+cassandra (4.1.2) unstable; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Thu, 25 May 2023 14:44:07 +0200
+ -- Mick Semb Wever <mck@apache.org>  Thu, 25 May 2023 16:11:28 +0200
 
-cassandra (4.0.9) unstable; urgency=medium
+cassandra (4.1.1) unstable; urgency=medium
 
   * New release
 
- -- Stefan Miklosovic <smiklosovic@apache.org>  Tue, 11 Apr 2023 09:31:33 +0200
+ -- Stefan Miklosovic <smiklosovic@apache.org>  Wed, 15 Mar 2023 09:08:40 +0100
 
-cassandra (4.0.8) unstable; urgency=medium
+cassandra (4.1.0) unstable; urgency=medium
 
   * New release
 
- -- Stefan Miklosovic <smiklosovic@apache.org>  Tue, 07 Feb 2023 22:33:43 +0100
+ -- Mick Semb Wever <mck@apache.org>  Wed, 07 Dec 2022 21:53:33 +0100
 
-cassandra (4.0.7) unstable; urgency=medium
+cassandra (4.1~rc1) unstable; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Wed, 19 Oct 2022 10:57:06 +0200
+ -- Mick Semb Wever <mck@apache.org>  Thu, 17 Nov 2022 11:33:25 +0100
 
-cassandra (4.0.6) unstable; urgency=medium
+cassandra (4.1~beta1) unstable; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Thu, 18 Aug 2022 20:30:16 +0200
+ -- Mick Semb Wever <mck@apache.org>  Tue, 27 Sep 2022 00:08:07 +0200
 
-cassandra (4.0.5) unstable; urgency=medium
+cassandra (4.1~alpha1) unstable; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Tue, 12 Jul 2022 12:16:22 +0200
-
-cassandra (4.0.4) unstable; urgency=medium
-
-  * New release
-
- -- Mick Semb Wever <mck@apache.org>  Fri, 06 May 2022 18:38:33 +0200
-
-cassandra (4.0.3) unstable; urgency=medium
-
-  * New release
-
- -- Mick Semb Wever <mck@apache.org>  Sun, 13 Feb 2022 22:37:37 +0100
-
-cassandra (4.0.2) unstable; urgency=medium
-
-  * New release 
-
- -- Mick Semb Wever <mck@apache.org>  Mon, 07 Feb 2022 14:42:07 +0100
-
-cassandra (4.0.1) unstable; urgency=medium
-
-  * New release
-
- -- Sam Tunnicliffe <samt@apache.org>  Mon, 30 Aug 2021 11:28:21 +0100
-
-cassandra (4.0~rc2) unstable; urgency=medium
-
-  * New release
-
- -- Mick Semb Wever <mck@apache.org>  Sun, 27 Jun 2021 16:36:29 +0200
+ -- Mick Semb Wever <mck@apache.org>  Fri, 20 May 2022 22:02:50 +0200
 
 cassandra (4.0~rc1) unstable; urgency=medium
 
diff --git a/doc/antora.yml b/doc/antora.yml
index 771311b..08ed5e2 100644
--- a/doc/antora.yml
+++ b/doc/antora.yml
@@ -1,6 +1,6 @@
 name: Cassandra
-version: '4.0'
-display_version: '4.0'
+version: '4.1'
+display_version: '4.1'
 asciidoc:
   attributes:
     cass_url: 'http://cassandra.apache.org/'
diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile
index f85cb81..d2a4b72 100644
--- a/doc/cql3/CQL.textile
+++ b/doc/cql3/CQL.textile
@@ -18,7 +18,7 @@
 #
 -->
 
-h1. Cassandra Query Language (CQL) v3.4.5
+h1. Cassandra Query Language (CQL) v3.4.6
 
 
 
@@ -122,6 +122,7 @@
                | <collection-literal>
                | <variable>
                | <function> '(' (<term> (',' <term>)*)? ')'
+               | CAST '(' <term> AS <type> ')'
 
   <collection-literal> ::= <map-literal>
                          | <set-literal>
@@ -214,7 +215,7 @@
 __Syntax:__
 
 bc(syntax).. 
-<create-keyspace-stmt> ::= ALTER KEYSPACE <identifier> WITH <properties>
+<create-keyspace-stmt> ::= ALTER KEYSPACE (IF EXISTS)? <identifier> WITH <properties>
 p. 
 __Sample:__
 
@@ -224,6 +225,7 @@
 
 p. 
 The @ALTER KEYSPACE@ statement alters the properties of an existing keyspace. The supported @<properties>@ are the same as for the "@CREATE KEYSPACE@":#createKeyspaceStmt statement.
+If the keyspace does not exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
 
 
 h3(#dropKeyspaceStmt). DROP KEYSPACE
@@ -409,12 +411,11 @@
 __Syntax:__
 
 bc(syntax).. 
-<alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) <tablename> <instruction>
+<alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) (IF NOT EXISTS)? <tablename> <instruction>
 
-<instruction> ::= ADD   <identifier> <type>
-                | ADD   ( <identifier> <type> ( , <identifier> <type> )* )
-                | DROP  <identifier>
-                | DROP  ( <identifier> ( , <identifier> )* )
+<instruction> ::= ADD (IF NOT EXISTS)? ( <identifier> <type> ( , <identifier> <type> )* )
+                | DROP  (IF EXISTS)? ( <identifier> ( , <identifier> )* )
+                | RENAME (IF EXISTS)? <identifier> to <identifier> (AND <identifier> to <identifier>)*
                 | DROP COMPACT STORAGE
                 | WITH  <option> ( AND <option> )*
 p. 
@@ -431,9 +432,12 @@
 p. 
 The @ALTER@ statement is used to manipulate table definitions. It allows for adding new columns, dropping existing ones, or updating the table options. As with table creation, @ALTER COLUMNFAMILY@ is allowed as an alias for @ALTER TABLE@.
 
+If the table does not exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
+
 The @<tablename>@ is the table name optionally preceded by the keyspace name.  The @<instruction>@ defines the alteration to perform:
-* @ADD@: Adds a new column to the table. The @<identifier>@ for the new column must not conflict with an existing column. Moreover, columns cannot be added to tables defined with the @COMPACT STORAGE@ option.
-* @DROP@: Removes a column from the table. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won't return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can't be dropped from tables defined with the @COMPACT STORAGE@ option.
+* @ADD@: Adds a new column to the table. The @<identifier>@ for the new column must not conflict with an existing column. Moreover, columns cannot be added to tables defined with the @COMPACT STORAGE@ option. If the new column already exists, the statement will return an error, unless @IF NOT EXISTS@ is used in which case the operation is a no-op.
+* @DROP@: Removes a column from the table. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won't return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can't be dropped from tables defined with the @COMPACT STORAGE@ option. If the dropped column does not already exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
+* @RENAME@ a primary key column of a table. Non primary key columns cannot be renamed. Furthermore, renaming a column to another name which already exists isn't allowed. It's important to keep in mind that renamed columns shouldn't have dependent secondary indexes. If the renamed column does not already exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
 * @DROP COMPACT STORAGE@: Removes Thrift compatibility mode from the table.
 * @WITH@: Allows to update the options of the table. The "supported @<option>@":#createTableOptions (and syntax) are the same as for the @CREATE TABLE@ statement except that @COMPACT STORAGE@ is not supported. Note that setting any @compaction@ sub-options has the effect of erasing all previous @compaction@ options, so you  need to re-specify all the sub-options if you want to keep them. The same note applies to the set of @compression@ sub-options.
 
@@ -567,12 +571,12 @@
 
 __Syntax:__
 
-bc(syntax). <alter-materialized-view-stmt> ::= ALTER MATERIALIZED VIEW <viewname>
+bc(syntax). <alter-materialized-view-stmt> ::= ALTER MATERIALIZED VIEW (IF EXISTS)? <viewname>
                                                  WITH <option> ( AND <option> )*
 
 p. 
 The @ALTER MATERIALIZED VIEW@ statement allows options to be update; these options are the same as <a href="#createTableOptions">@CREATE TABLE@'s options</a>.
-
+If the view does not exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
 
 h3(#dropMVStmt). DROP MATERIALIZED VIEW
 
@@ -632,10 +636,10 @@
 __Syntax:__
 
 bc(syntax).. 
-<alter-type-stmt> ::= ALTER TYPE <typename> <instruction>
+<alter-type-stmt> ::= ALTER TYPE (IF EXISTS)? <typename> <instruction>
 
-<instruction> ::= ADD <field-name> <type>
-                | RENAME <field-name> TO <field-name> ( AND <field-name> TO <field-name> )*
+<instruction> ::= ADD (IF NOT EXISTS)? <field-name> <type>
+                | RENAME (IF EXISTS)? <field-name> TO <field-name> ( AND <field-name> TO <field-name> )*
 p. 
 __Sample:__
 
@@ -645,6 +649,7 @@
 ALTER TYPE address RENAME zip TO zipcode AND street_name TO street
 p. 
 The @ALTER TYPE@ statement is used to manipulate type definitions. It allows for adding new fields, renaming existing fields, or changing the type of existing fields.
+If the type does not exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
 
 h3(#dropTypeStmt). DROP TYPE
 
@@ -854,7 +859,7 @@
 bc(syntax).. 
 <insertStatement> ::= INSERT INTO <tablename>
                       ( ( <name-list> VALUES <value-list> )
-                      | ( JSON <string> [ DEFAULT ( NULL | UNSET ) ]))
+                      | ( JSON <string> (DEFAULT ( NULL | UNSET ))?))
                       ( IF NOT EXISTS )?
                       ( USING <option> ( AND <option> )* )?
 
@@ -908,7 +913,7 @@
               | <identifier> '.' <field> <op> <term>
               | <identifier> '.' <field> IN <in-values>
 
-<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>'
+<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>' | CONTAINS ( KEY )?
 <in-values> ::= (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
 
 <where-clause> ::= <relation> ( AND <relation> )*
@@ -979,7 +984,7 @@
              | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
              | '(' <identifier> (',' <identifier>)* ')' IN <variable>
 
-<op> ::= '=' | '<' | '>' | '<=' | '>='
+<op> ::= '=' | '<' | '>' | '<=' | '>=' | CONTAINS ( KEY )?
 <in-values> ::= (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
 
 <condition> ::= <identifier> (<op> | '!=') <term>
@@ -1099,7 +1104,7 @@
              | TOKEN '(' <identifier> ( ',' <identifer>)* ')' <op> <term>
 
 <op> ::= '=' | '<' | '>' | '<=' | '>=' | CONTAINS | CONTAINS KEY
-<group-by> ::= <identifier> (',' <identifier>)*
+<group-by> ::= (<identifier>,)* (<identifier> | <function>)
 <order-by> ::= <ordering> ( ',' <odering> )*
 <ordering> ::= <identifer> ( ASC | DESC )?
 <term-tuple> ::= '(' <term> (',' <term>)* ')'
@@ -1200,7 +1205,7 @@
 
 The @GROUP BY@ option allows to condense into a single row all selected rows that share the same values for a set of columns.
 
-Using the @GROUP BY@ option, it is only possible to group rows at the partition key level or at a clustering column level. By consequence, the @GROUP BY@ option only accept as arguments primary key column names in the primary key order. If a primary key column is restricted by an equality restriction it is not required to be present in the @GROUP BY@ clause.
+Using the @GROUP BY@ option, it is only possible to group rows at the partition key level or at a clustering column level. By consequence, the @GROUP BY@ option only accept as arguments primary key column names in the primary key order. If a primary key column is restricted by an equality restriction it is not required to be present in the @GROUP BY@ clause. The last argument can be a monotonic function on the primary key column.
 
 Aggregate functions will produce a separate value for each group. If no @GROUP BY@ clause is specified, aggregates functions will produce a single value for all the rows.
 
@@ -1257,7 +1262,7 @@
 bc(syntax).. 
 <create-role-stmt> ::= CREATE ROLE ( IF NOT EXISTS )? <identifier> ( WITH <option> ( AND <option> )* )?
 
-<option> ::= PASSWORD = <string>
+<option> ::= ("HASHED")? PASSWORD = <string>
            | LOGIN = <boolean>
            | SUPERUSER = <boolean>
            | OPTIONS = <map_literal>
@@ -1299,7 +1304,7 @@
 __Syntax:__
 
 bc(syntax).. 
-<alter-role-stmt> ::= ALTER ROLE <identifier> ( WITH <option> ( AND <option> )* )?
+<alter-role-stmt> ::= ALTER ROLE (IF EXISTS)? <identifier> ( WITH <option> ( AND <option> )* )?
 
 <option> ::= PASSWORD = <string>
            | LOGIN = <boolean>
@@ -1318,7 +1323,7 @@
 * A client cannot alter the @SUPERUSER@ status of any role it currently holds
 * A client can only modify certain properties of the role with which it identified at login (e.g. @PASSWORD@)
 * To modify properties of a role, the client must be granted @ALTER@ "permission":#permissions on that role
-
+If the role does not exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
 
 h3(#dropRoleStmt). DROP ROLE
 
@@ -1410,7 +1415,7 @@
 __Syntax:__ 
 
 bc(syntax).. 
-<create-user-statement> ::= CREATE USER ( IF NOT EXISTS )? <identifier> ( WITH PASSWORD <string> )? (<option>)?
+<create-user-statement> ::= CREATE USER ( IF NOT EXISTS )? <identifier> ( WITH ("HASHED")? PASSWORD <string> )? (<option>)?
 
 <option> ::= SUPERUSER
            | NOSUPERUSER
@@ -1446,11 +1451,12 @@
 __Syntax:__ 
 
 bc(syntax).. 
-<alter-user-statement> ::= ALTER USER <identifier> ( WITH PASSWORD <string> )? ( <option> )?
+<alter-user-statement> ::= ALTER USER (IF EXISTS)? <identifier> ( WITH PASSWORD <string> )? ( <option> )?
 
 <option> ::= SUPERUSER
            | NOSUPERUSER
 p. 
+If the user does not exist, the statement will return an error, unless @IF EXISTS@ is used in which case the operation is a no-op.
 
 bc(sample). 
 ALTER USER alice WITH PASSWORD 'PASSWORD_A';
@@ -1580,12 +1586,13 @@
 __Syntax:__ 
 
 bc(syntax).. 
-<grant-permission-stmt> ::= GRANT ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> TO <identifier>
+<grant-permission-stmt> ::= GRANT ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? (, PERMISSION)* ) ON <resource> TO <identifier>
 
 <permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESRIBE | EXECUTE
 
 <resource> ::= ALL KEYSPACES
              | KEYSPACE <identifier>
+             | ALL TABLES IN KEYSPACE <identifier>
              | ( TABLE )? <tablename>
              | ALL ROLES
              | ROLE <identifier>
@@ -1636,12 +1643,13 @@
 __Syntax:__ 
 
 bc(syntax).. 
-<revoke-permission-stmt> ::= REVOKE ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> FROM <identifier>
+<revoke-permission-stmt> ::= REVOKE ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? (, PERMISSION)* ) ON <resource> FROM <identifier>
 
 <permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESRIBE | EXECUTE
 
 <resource> ::= ALL KEYSPACES
              | KEYSPACE <identifier>
+             | ALL TABLES IN KEYSPACE <identifier>
              | ( TABLE )? <tablename>
              | ALL ROLES
              | ROLE <identifier>
@@ -1672,6 +1680,7 @@
 
 <resource> ::= ALL KEYSPACES
              | KEYSPACE <identifier>
+             | ALL TABLES IN KEYSPACE <identifier>
              | ( TABLE )? <tablename>
              | ALL ROLES
              | ROLE <identifier>
@@ -2032,7 +2041,7 @@
 
 h3(#castFun). Cast
 
-The @cast@ function can be used to converts one native datatype to another.
+The @cast@ function can be used to convert one native datatype to another.
 
 The following table describes the conversions supported by the @cast@ function. Cassandra will silently ignore any cast converting a datatype into its own datatype.
 
@@ -2052,7 +2061,7 @@
 |@timestamp@|@date@, @text@, @varchar@                                                                           |
 |@timeuuid@ |@timestamp@, @date@, @text@, @varchar@                                                              |
 |@tinyint@  |@tinyint@, @smallint@, @int@, @bigint@, @float@, @double@, @decimal@, @varint@, @text@, @varchar@   |
-|@uuid@      |@text@, @varchar@                                                                                  |
+|@uuid@     |@text@, @varchar@                                                                                  |
 |@varint@   |@tinyint@, @smallint@, @int@, @bigint@, @float@, @double@, @decimal@, @text@, @varchar@             |
 
 
@@ -2119,11 +2128,11 @@
 
 The following functions can be used to retrieve the date/time at the time where the function is invoked:
 
-|_. function name       |_.    output type       |
-| @currentTimestamp@    |  @timestamp@           |
-| @currentDate@         |  @date@                |
-| @currentTime@         |  @time@                |
-| @currentTimeUUID@     |  @timeUUID@            |
+|_. function name         |_.    output type       |
+| @currentTimestamp@      |  @timestamp@           |
+| @currentDate@           |  @date@                |
+| @currentTime@           |  @time@                |
+| @currentTimeUUID@       |  @timeUUID@            |
 
 For example the last 2 days of data can be retrieved using:
 
@@ -2145,6 +2154,16 @@
 |@dateOf@            |@timeuuid@      |Similar to @toTimestamp(timeuuid)@ (DEPRECATED)|
 |@unixTimestampOf@   |@timeuuid@      |Similar to @toUnixTimestamp(timeuuid)@ (DEPRECATED)|
 
+h4(#floorFun). Floor function
+
+Rounds date and time to the nearest value.
+
+|_. type      |_. function                                     |_. |
+|@timestamp@  | floor(timestamp, duration [, start_timestamp]) | If the start_timestamp is not used, then the start timestamp is January 1, 1970 00:00:00.000 GMT |
+|@timeuuid@   | floor(timeuuid, duration [, start_timestamp])  | If the start_timestamp is not used, then the start timestamp is January 1, 1970 00:00:00.000 GMT |
+|@date@       | floor(date, duration [, start_date])           | If the start_date is not used, then the start date is January 1, 1970 GMT |
+|@time@       | floor(time, duration [, start_time])           | ==If the start_time is not used, then the start time is 00:00:00[000000000]== |
+
 h3(#blobFun). Blob conversion functions
 
 A number of functions are provided to "convert" the native types into binary data (@blob@). For every @<native-type>@ @type@ supported by CQL3 (a notable exceptions is @blob@, for obvious reasons), the function @typeAsBlob@ takes a argument of type @type@ and return it as a @blob@.  Conversely, the function @blobAsType@ takes a 64-bit @blob@ argument and convert it to a @bigint@ value.  And so for instance, @bigintAsBlob(3)@ is @0x0000000000000003@ and @blobAsBigint(0x0000000000000003)@ is @3@.
@@ -2551,6 +2570,17 @@
 
 The following describes the changes in each version of CQL.
 
+h3. 3.4.6
+
+* Add support for @IF EXISTS@ and @IF NOT EXISTS@ in @ALTER@ statements (see "CASSANDRA-16916":https://issues.apache.org/jira/browse/CASSANDRA-16916).
+* Allow @GRANT/REVOKE@ multiple permissions in a single statement (see "CASSANDRA-17030":https://issues.apache.org/jira/browse/CASSANDRA-17030).
+* Pre hashed passwords in CQL (see "CASSANDRA-17334":https://issues.apache.org/jira/browse/CASSANDRA-17334).
+* Add support for type casting in @WHERE@ clause components and in the values of @INSERT/UPDATE@ statements (see "CASSANDRA-14337":https://issues.apache.org/jira/browse/CASSANDRA-14337).
+* Add support for @CONTAINS@ and @CONTAINS KEY@ in conditional @UPDATE@ and @DELETE@ statement (see "CASSANDRA-10537":https://issues.apache.org/jira/browse/CASSANDRA-10537).
+* Allow to grant permission for all tables in a keyspace (see "CASSANDRA-17027":https://issues.apache.org/jira/browse/CASSANDRA-17027).
+* Allow to use pure monotonic functions on the last attribute of the @GROUP BY@ clause (see "CASSANDRA-11871":https://issues.apache.org/jira/browse/CASSANDRA-11871).
+* Add floor function to allow grouping by time range (see "CASSANDRA-11871":https://issues.apache.org/jira/browse/CASSANDRA-11871).
+
 h3. 3.4.5
 
 * Adds support for arithmetic operators. See "Number Arithmetic":#numberArithmetic (see "CASSANDRA-11935":https://issues.apache.org/jira/browse/CASSANDRA-11935).
@@ -2572,9 +2602,6 @@
 h3. 3.4.2
 
 * Support for selecting elements and slices of a collection ("CASSANDRA-7396":https://issues.apache.org/jira/browse/CASSANDRA-7396).
-
-h3. 3.4.2
-
 * "@INSERT/UPDATE options@":#updateOptions for tables having a default_time_to_live specifying a TTL of 0 will remove the TTL from the inserted or updated values
 * "@ALTER TABLE@":#alterTableStmt @ADD@ and @DROP@ now allow mutiple columns to be added/removed
 * New "@PER PARTITION LIMIT@":#selectLimit option (see "CASSANDRA-7017":https://issues.apache.org/jira/browse/CASSANDRA-7017).
diff --git a/doc/modules/ROOT/pages/index.adoc b/doc/modules/ROOT/pages/index.adoc
index 6a0c745..4c7a3fd 100644
--- a/doc/modules/ROOT/pages/index.adoc
+++ b/doc/modules/ROOT/pages/index.adoc
@@ -17,7 +17,7 @@
 
 | xref:cassandra:getting_started/index.adoc[Getting started] | Newbie starting point
 
-| xref:cassandra:new/index.adoc[What's new in 4.0] | What's new in Cassandra 4.0
+| xref:cassandra:new/index.adoc[What's new in 4.1] | What's new in Cassandra 4.1
 
 | xref:cassandra:architecture/index.adoc[Architecture] | Cassandra's big picture
 
diff --git a/doc/modules/cassandra/assets/images/cassandra_ssl_context_factory_pem.png b/doc/modules/cassandra/assets/images/cassandra_ssl_context_factory_pem.png
new file mode 100644
index 0000000..5c666b0
--- /dev/null
+++ b/doc/modules/cassandra/assets/images/cassandra_ssl_context_factory_pem.png
Binary files differ
diff --git a/doc/modules/cassandra/examples/BASH/get_deb_package.sh b/doc/modules/cassandra/examples/BASH/get_deb_package.sh
index 5445122..69648e8 100644
--- a/doc/modules/cassandra/examples/BASH/get_deb_package.sh
+++ b/doc/modules/cassandra/examples/BASH/get_deb_package.sh
@@ -1,2 +1,2 @@
-$ echo "deb https://debian.cassandra.apache.org 40x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-deb https://debian.cassandra.apache.org 40x main
+$ echo "deb https://debian.cassandra.apache.org 41x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
+deb https://debian.cassandra.apache.org 41x main
diff --git a/doc/modules/cassandra/examples/BNF/alter_ks.bnf b/doc/modules/cassandra/examples/BNF/alter_ks.bnf
index 5f82d34..6f5e4fe 100644
--- a/doc/modules/cassandra/examples/BNF/alter_ks.bnf
+++ b/doc/modules/cassandra/examples/BNF/alter_ks.bnf
@@ -1,2 +1,2 @@
-alter_keyspace_statement::= ALTER KEYSPACE keyspace_name
+alter_keyspace_statement::= ALTER KEYSPACE [ IF EXISTS ] keyspace_name
 	WITH options
diff --git a/doc/modules/cassandra/examples/BNF/alter_mv_statement.bnf b/doc/modules/cassandra/examples/BNF/alter_mv_statement.bnf
index ff97edb..886e913 100644
--- a/doc/modules/cassandra/examples/BNF/alter_mv_statement.bnf
+++ b/doc/modules/cassandra/examples/BNF/alter_mv_statement.bnf
@@ -1 +1 @@
-alter_materialized_view_statement::= ALTER MATERIALIZED VIEW view_name WITH table_options
+alter_materialized_view_statement::= ALTER MATERIALIZED VIEW [ IF EXISTS ] view_name WITH table_options
diff --git a/doc/modules/cassandra/examples/BNF/alter_role_statement.bnf b/doc/modules/cassandra/examples/BNF/alter_role_statement.bnf
index 36958d7..7b67608 100644
--- a/doc/modules/cassandra/examples/BNF/alter_role_statement.bnf
+++ b/doc/modules/cassandra/examples/BNF/alter_role_statement.bnf
@@ -1 +1 @@
-alter_role_statement ::= ALTER ROLE role_name WITH role_options
+alter_role_statement ::= ALTER ROLE [ IF EXISTS ] role_name WITH role_options
diff --git a/doc/modules/cassandra/examples/BNF/alter_table.bnf b/doc/modules/cassandra/examples/BNF/alter_table.bnf
index bf1b4b7..728a78a 100644
--- a/doc/modules/cassandra/examples/BNF/alter_table.bnf
+++ b/doc/modules/cassandra/examples/BNF/alter_table.bnf
@@ -1,4 +1,5 @@
-alter_table_statement::= ALTER TABLE table_name alter_table_instruction 
-alter_table_instruction::= ADD column_name cql_type ( ',' column_name cql_type )* 
-	| DROP column_name ( column_name )*  
+alter_table_statement::= ALTER TABLE [ IF EXISTS ] table_name alter_table_instruction
+alter_table_instruction::= ADD [ IF NOT EXISTS ] column_name cql_type ( ',' column_name cql_type )*
+	| DROP [ IF EXISTS ] column_name ( column_name )*
+	| RENAME [ IF EXISTS ] column_name to column_name (AND column_name to column_name)*
 	| WITH options
diff --git a/doc/modules/cassandra/examples/BNF/alter_udt_statement.bnf b/doc/modules/cassandra/examples/BNF/alter_udt_statement.bnf
index 4f409e6..c23ed41 100644
--- a/doc/modules/cassandra/examples/BNF/alter_udt_statement.bnf
+++ b/doc/modules/cassandra/examples/BNF/alter_udt_statement.bnf
@@ -1,3 +1,3 @@
-alter_type_statement::= ALTER TYPE udt_name alter_type_modification
-alter_type_modification::= ADD field_definition
-        | RENAME identifier TO identifier( identifier TO identifier )*
+alter_type_statement::= ALTER TYPE [ IF EXISTS ] udt_name alter_type_modification
+alter_type_modification::= ADD [ IF NOT EXISTS ] field_definition
+        | RENAME [ IF EXISTS ] identifier TO identifier (AND identifier TO identifier )*
diff --git a/doc/modules/cassandra/examples/BNF/alter_user_statement.bnf b/doc/modules/cassandra/examples/BNF/alter_user_statement.bnf
index 129607c..06bef1b 100644
--- a/doc/modules/cassandra/examples/BNF/alter_user_statement.bnf
+++ b/doc/modules/cassandra/examples/BNF/alter_user_statement.bnf
@@ -1 +1 @@
-alter_user_statement ::= ALTER USER role_name [ WITH PASSWORD string] [ user_option]
+alter_user_statement ::= ALTER USER [ IF EXISTS ] role_name [ WITH [ HASHED ] PASSWORD string] [ user_option]
diff --git a/doc/modules/cassandra/examples/BNF/create_role_statement.bnf b/doc/modules/cassandra/examples/BNF/create_role_statement.bnf
index bc93fbc..4236cc6 100644
--- a/doc/modules/cassandra/examples/BNF/create_role_statement.bnf
+++ b/doc/modules/cassandra/examples/BNF/create_role_statement.bnf
@@ -2,6 +2,7 @@
                           [ WITH role_options# ]
 role_options ::= role_option ( AND role_option)*
 role_option ::= PASSWORD '=' string
+                | HASHED PASSWORD '=' string
                 | LOGIN '=' boolean
                 | SUPERUSER '=' boolean
                 | OPTIONS '=' map_literal
diff --git a/doc/modules/cassandra/examples/BNF/create_user_statement.bnf b/doc/modules/cassandra/examples/BNF/create_user_statement.bnf
index 19f9903..e090e38 100644
--- a/doc/modules/cassandra/examples/BNF/create_user_statement.bnf
+++ b/doc/modules/cassandra/examples/BNF/create_user_statement.bnf
@@ -1,4 +1,4 @@
 create_user_statement ::= CREATE USER [ IF NOT EXISTS ] role_name
-                          [ WITH PASSWORD string ]
+                          [ WITH [ HASHED ] PASSWORD string ]
                           [ user_option ]
 user_option: SUPERUSER | NOSUPERUSER
diff --git a/doc/modules/cassandra/examples/CQL/alter_role.cql b/doc/modules/cassandra/examples/CQL/alter_role.cql
index c5f7d3d..1e858ae 100644
--- a/doc/modules/cassandra/examples/CQL/alter_role.cql
+++ b/doc/modules/cassandra/examples/CQL/alter_role.cql
@@ -1 +1,2 @@
 ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
+ALTER ROLE bob WITH HASHED PASSWORD = '$2a$10$JSJEMFm6GeaW9XxT5JIheuEtPvat6i7uKbnTcxX3c1wshIIsGyUtG' AND SUPERUSER = false;
diff --git a/doc/modules/cassandra/examples/CQL/alter_user.cql b/doc/modules/cassandra/examples/CQL/alter_user.cql
index 97de7ba..a0bf30e 100644
--- a/doc/modules/cassandra/examples/CQL/alter_user.cql
+++ b/doc/modules/cassandra/examples/CQL/alter_user.cql
@@ -1,2 +1,3 @@
 ALTER USER alice WITH PASSWORD 'PASSWORD_A';
+ALTER USER alice WITH HASHED PASSWORD '$2a$10$JSJEMFm6GeaW9XxT5JIheuEtPvat6i7uKbnTcxX3c1wshIIsGyUtG';
 ALTER USER bob SUPERUSER;
diff --git a/doc/modules/cassandra/examples/CQL/create_role.cql b/doc/modules/cassandra/examples/CQL/create_role.cql
index c8d0d64..2ceee54 100644
--- a/doc/modules/cassandra/examples/CQL/create_role.cql
+++ b/doc/modules/cassandra/examples/CQL/create_role.cql
@@ -1,5 +1,6 @@
 CREATE ROLE new_role;
 CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
+CREATE ROLE alice WITH HASHED PASSWORD = '$2a$10$JSJEMFm6GeaW9XxT5JIheuEtPvat6i7uKbnTcxX3c1wshIIsGyUtG' AND LOGIN = true;
 CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
 CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
 CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'};
diff --git a/doc/modules/cassandra/examples/CQL/create_user.cql b/doc/modules/cassandra/examples/CQL/create_user.cql
index b6531eb..d754227 100644
--- a/doc/modules/cassandra/examples/CQL/create_user.cql
+++ b/doc/modules/cassandra/examples/CQL/create_user.cql
@@ -1,2 +1,3 @@
 CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
 CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
+CREATE USER bob WITH HASHED PASSWORD '$2a$10$JSJEMFm6GeaW9XxT5JIheuEtPvat6i7uKbnTcxX3c1wshIIsGyUtG' NOSUPERUSER;
diff --git a/doc/modules/cassandra/examples/RESULTS/add_yum_repo.result b/doc/modules/cassandra/examples/RESULTS/add_yum_repo.result
index 24409f3..6fe704a 100644
--- a/doc/modules/cassandra/examples/RESULTS/add_yum_repo.result
+++ b/doc/modules/cassandra/examples/RESULTS/add_yum_repo.result
@@ -1,6 +1,6 @@
 [cassandra]
 name=Apache Cassandra
-baseurl=https://redhat.cassandra.apache.org/40x/
+baseurl=https://redhat.cassandra.apache.org/41x/
 gpgcheck=1
 repo_gpgcheck=1
 gpgkey=https://downloads.apache.org/cassandra/KEYS
diff --git a/doc/modules/cassandra/nav.adoc b/doc/modules/cassandra/nav.adoc
index 2188006..4849abb 100644
--- a/doc/modules/cassandra/nav.adoc
+++ b/doc/modules/cassandra/nav.adoc
@@ -4,22 +4,18 @@
 *** xref:getting_started/configuring.adoc[Configuring Cassandra]
 *** xref:getting_started/querying.adoc[Inserting and querying]
 *** xref:getting_started/drivers.adoc[Client drivers]
+*** xref:getting_started/java11.adoc[Support for Java 11]
 *** xref:getting_started/production.adoc[Production recommendations]
 
 ** xref:new/index.adoc[What's new]
-*** xref:new/java11.adoc[Support for Java 11]
-*** xref:new/virtualtables.adoc[Virtual tables]
-*** xref:new/auditlogging.adoc[Audit logging]
-*** xref:new/fqllogging.adoc[Full query logging]
-*** xref:new/messaging.adoc[Improved internode Messaging]
-*** xref:new/streaming.adoc[Improved streaming]
-*** xref:new/transientreplication.adoc[Transient replication]
 
 ** xref:architecture/index.adoc[Architecture]
 *** xref:architecture/overview.adoc[Overview]
 *** xref:architecture/dynamo.adoc[Dynamo]		
 *** xref:architecture/storage_engine.adoc[Storage engine]
 *** xref:architecture/guarantees.adoc[Guarantees]
+*** xref:architecture/messaging.adoc[Improved internode messaging]
+*** xref:architecture/streaming.adoc[Improved streaming]
 
 ** xref:data_modeling/index.adoc[Data modeling]
 *** xref:data_modeling/intro.adoc[Introduction]
@@ -57,6 +53,7 @@
 *** xref:configuration/cass_cl_archive_file.adoc[commitlog-archiving.properties]
 *** xref:configuration/cass_logback_xml_file.adoc[logback.xml]
 *** xref:configuration/cass_jvm_options_file.adoc[jvm-* files]
+*** xref:configuration/configuration.adoc[Liberating cassandra.yaml Parameters' Names from Their Units]
 
 ** xref:operating/index.adoc[Operating]
 *** xref:operating/snitch.adoc[Snitches]
@@ -72,8 +69,12 @@
 *** xref:operating/metrics.adoc[Metrics]
 *** xref:operating/security.adoc[Security]
 *** xref:operating/hardware.adoc[Hardware]
-*** xref:operating/audit_logging.adoc[Audit logging]
-*** xref:operating/compaction/index.adoc[Compaction]		
+*** xref:operating/compaction/index.adoc[Compaction]
+*** xref:operating/virtualtables.adoc[Virtual tables]
+*** xref:operating/auditlogging.adoc[Audit logging]
+*** xref:operating/audit_logging.adoc[Audit logging 2]
+*** xref:operating/fqllogging.adoc[Full query logging]
+*** xref:operating/transientreplication.adoc[Transient replication]
 
 ** xref:tools/index.adoc[Tools]
 *** xref:tools/cqlsh.adoc[cqlsh: the CQL shell]
diff --git a/doc/modules/cassandra/pages/architecture/messaging.adoc b/doc/modules/cassandra/pages/architecture/messaging.adoc
new file mode 100644
index 0000000..2e01fd4
--- /dev/null
+++ b/doc/modules/cassandra/pages/architecture/messaging.adoc
@@ -0,0 +1,360 @@
+= Improved Internode Messaging
+
+Apache Cassandra 4.0 has added several new improvements to internode
+messaging.
+
+== Optimized Internode Messaging Protocol
+
+The internode messaging protocol has been optimized
+(https://issues.apache.org/jira/browse/CASSANDRA-14485[CASSANDRA-14485]).
+Previously the `IPAddressAndPort` of the sender was included with each
+message that was sent even though the `IPAddressAndPort` had already
+been sent once when the initial connection/session was established. In
+Cassandra 4.0 `IPAddressAndPort` has been removed from every separate
+message sent and only sent when connection/session is initiated.
+
+Another improvement is that at several instances (listed) a fixed 4-byte
+integer value has been replaced with `vint` as a `vint` is almost always
+less than 1 byte:
+
+* The `paramSize` (the number of parameters in the header)
+* Each individual parameter value
+* The `payloadSize`
+
+== NIO Messaging
+
+In Cassandra 4.0 peer-to-peer (internode) messaging has been switched to
+non-blocking I/O (NIO) via Netty
+(https://issues.apache.org/jira/browse/CASSANDRA-8457[CASSANDRA-8457]).
+
+As serialization format, each message contains a header with several
+fixed fields, an optional key-value parameters section, and then the
+message payload itself. Note: the IP address in the header may be either
+IPv4 (4 bytes) or IPv6 (16 bytes).
+
+____
+The diagram below shows the IPv4 address for brevity.
+____
+
+....
+1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4 5 5 5 5 5 6 6
+0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+|                       PROTOCOL MAGIC                          |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+|                         Message ID                            |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+|                         Timestamp                             |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+|  Addr len |           IP Address (IPv4)                       /
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+/           |                 Verb                              /
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+/           |            Parameters size                        /
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+/           |             Parameter data                        /
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+/                                                               |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+|                        Payload size                           |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+|                                                               /
+/                           Payload                             /
+/                                                               |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+....
+
+An individual parameter has a String key and a byte array value. The key
+is serialized with its length, encoded as two bytes, followed by the
+UTF-8 byte encoding of the string. The body is serialized with its
+length, encoded as four bytes, followed by the bytes of the value.
+
+== Resource limits on Queued Messages
+
+System stability is improved by enforcing strict resource limits
+(https://issues.apache.org/jira/browse/CASSANDRA-15066[CASSANDRA-15066])
+on the number of outbound messages that are queued, measured by the
+`serializedSize` of the message. There are three separate limits imposed
+simultaneously to ensure that progress is always made without any
+reasonable combination of failures impacting a node’s stability.
+
+[arabic]
+. Global, per-endpoint and per-connection limits are imposed on messages
+queued for delivery to other nodes and waiting to be processed on
+arrival from other nodes in the cluster. These limits are applied to the
+on-wire size of the message being sent or received.
+. The basic per-link limit is consumed in isolation before any endpoint
+or global limit is imposed. Each node-pair has three links: urgent,
+small and large. Any given node may have a maximum of
+`N*3 * (internode_application_send_queue_capacity in bytes + internode_application_receive_queue_capacity in bytes)`
+of messages data queued without any coordination between them although in
+practice, with token-aware routing, only RF*tokens nodes should need to
+communicate with significant bandwidth.
+. The per-endpoint limit is imposed on all messages exceeding the
+per-link limit, simultaneously with the global limit, on all links to or
+from a single node in the cluster. The global limit is imposed on all
+messages exceeding the per-link limit, simultaneously with the
+per-endpoint limit, on all links to or from any node in the cluster. The
+following configuration settings have been added to `cassandra.yaml` for
+resource limits on queued messages.
+
+....
+internode_application_send_queue_capacity: 4MiB
+internode_application_send_queue_reserve_endpoint_capacity: 128MiB
+internode_application_send_queue_reserve_global_capacity: 512MiB
+internode_application_receive_queue_capacity: 4MiB
+internode_application_receive_queue_reserve_endpoint_capacity: 128MiB
+internode_application_receive_queue_reserve_global_capacity: 512MiB
+....
+
+== Virtual Tables for Messaging Metrics
+
+Metrics is improved by keeping metrics using virtual tables for
+inter-node inbound and outbound messaging
+(https://issues.apache.org/jira/browse/CASSANDRA-15066[CASSANDRA-15066]).
+For inbound messaging a virtual table (`internode_inbound`) has been
+added to keep metrics for:
+
+* Bytes and count of messages that could not be serialized or flushed
+due to an error
+* Bytes and count of messages scheduled
+* Bytes and count of messages successfully processed
+* Bytes and count of messages successfully received
+* Nanos and count of messages throttled
+* Bytes and count of messages expired
+* Corrupt frames recovered and unrecovered
+
+A separate virtual table (`internode_outbound`) has been added for
+outbound inter-node messaging. The outbound virtual table keeps metrics
+for:
+
+* Bytes and count of messages pending
+* Bytes and count of messages sent
+* Bytes and count of messages expired
+* Bytes and count of messages that could not be sent due to an error
+* Bytes and count of messages overloaded
+* Active Connection Count
+* Connection Attempts
+* Successful Connection Attempts
+
+== Hint Messaging
+
+A specialized version of hint message that takes an already encoded in a
+`ByteBuffer` hint and sends it verbatim has been added. It is an
+optimization for when dispatching a hint file of the current messaging
+version to a node of the same messaging version, which is the most
+common case. It saves on extra `ByteBuffer` allocations one redundant
+hint deserialization-serialization cycle.
+
+== Internode Application Timeout
+
+A configuration setting has been added to `cassandra.yaml` for the
+maximum continuous period a connection may be unwritable in application
+space.
+
+....
+# internode_application_timeout_in_ms = 30000
+....
+
+Some other new features include logging of message size to trace message
+for tracing a query.
+
+== Paxos prepare and propose stage for local requests optimized
+
+In pre-4.0 Paxos prepare and propose messages always go through entire
+`MessagingService` stack in Cassandra even if request is to be served
+locally, we can enhance and make local requests severed w/o involving
+`MessagingService`. Similar things are done elsewhere in Cassandra which
+skips `MessagingService` stage for local requests.
+
+This is what it looks like in pre 4.0 if we have tracing on and run a
+light-weight transaction:
+
+....
+Sending PAXOS_PREPARE message to /A.B.C.D [MessagingService-Outgoing-/A.B.C.D] | 2017-09-11
+21:55:18.971000 | A.B.C.D | 15045
+… REQUEST_RESPONSE message received from /A.B.C.D [MessagingService-Incoming-/A.B.C.D] |
+2017-09-11 21:55:18.976000 | A.B.C.D | 20270
+… Processing response from /A.B.C.D [SharedPool-Worker-4] | 2017-09-11 21:55:18.976000 |
+A.B.C.D | 20372
+....
+
+Same thing applies for Propose stage as well.
+
+In version 4.0 Paxos prepare and propose stage for local requests are
+optimized
+(https://issues.apache.org/jira/browse/CASSANDRA-13862[CASSANDRA-13862]).
+
+== Quality Assurance
+
+Several other quality assurance improvements have been made in version
+4.0
+(https://issues.apache.org/jira/browse/CASSANDRA-15066[CASSANDRA-15066]).
+
+=== Framing
+
+Version 4.0 introduces framing to all internode messages, i.e. the
+grouping of messages into a single logical payload with headers and
+trailers; these frames are guaranteed to either contain at most one
+message, that is split into its own unique sequence of frames (for large
+messages), or that a frame contains only complete messages.
+
+=== Corruption prevention
+
+Previously, intra-datacenter internode messages would be unprotected
+from corruption by default, as only LZ4 provided any integrity checks.
+All messages to post 4.0 nodes are written to explicit frames, which may
+be:
+
+* LZ4 encoded
+* CRC protected
+
+The Unprotected option is still available.
+
+=== Resilience
+
+For resilience, all frames are written with a separate CRC protected
+header, of 8 and 6 bytes respectively. If corruption occurs in this
+header, the connection must be reset, as before. If corruption occurs
+anywhere outside of the header, the corrupt frame will be skipped,
+leaving the connection intact and avoiding the loss of any messages
+unnecessarily.
+
+Previously, any issue at any point in the stream would result in the
+connection being reset, with the loss of any in-flight messages.
+
+=== Efficiency
+
+The overall memory usage, and number of byte shuffles, on both inbound
+and outbound messages is reduced.
+
+Outbound the Netty LZ4 encoder maintains a chunk size buffer (64KiB),
+that is filled before any compressed frame can be produced. Our frame
+encoders avoid this redundant copy, as well as freeing 192KiB per
+endpoint.
+
+Inbound, frame decoders guarantee only to copy the number of bytes
+necessary to parse a frame, and to never store more bytes than
+necessary. This improvement applies twice to LZ4 connections, improving
+both the message decode and the LZ4 frame decode.
+
+=== Inbound Path
+
+Version 4.0 introduces several improvements to the inbound path.
+
+An appropriate message handler is used based on whether large or small
+messages are expected on a particular connection as set in a flag.
+`NonblockingBufferHandler`, running on event loop, is used for small
+messages, and `BlockingBufferHandler`, running off event loop, for large
+messages. The single implementation of `InboundMessageHandler` handles
+messages of any size effectively by deriving size of the incoming
+message from the byte stream. In addition to deriving size of the
+message from the stream, incoming message expiration time is proactively
+read, before attempting to deserialize the entire message. If it’s
+expired at the time when a message is encountered the message is just
+skipped in the byte stream altogether. And if a message fails to be
+deserialized while still on the receiving side - say, because of table
+id or column being unknown - bytes are skipped, without dropping the
+entire connection and losing all the buffered messages. An immediately
+reply back is sent to the coordinator node with the failure reason,
+rather than waiting for the coordinator callback to expire. This logic
+is extended to a corrupted frame; a corrupted frame is safely skipped
+over without dropping the connection.
+
+Inbound path imposes strict limits on memory utilization. Specifically,
+the memory occupied by all parsed, but unprocessed messages is bound -
+on per-connection, per-endpoint, and global basis. Once a connection
+exceeds its local unprocessed capacity and cannot borrow any permits
+from per-endpoint and global reserve, it simply stops processing further
+messages, providing natural backpressure - until sufficient capacity is
+regained.
+
+=== Outbound Connections
+
+==== Opening a connection
+
+A consistent approach is adopted for all kinds of failure to connect,
+including: refused by endpoint, incompatible versions, or unexpected
+exceptions;
+
+* Retry forever, until either success or no messages waiting to deliver.
+* Wait incrementally longer periods before reconnecting, up to a maximum
+of 1s.
+* While failing to connect, no reserve queue limits are acquired.
+
+==== Closing a connection
+
+* Correctly drains outbound messages that are waiting to be delivered
+(unless disconnected and fail to reconnect).
+* Messages written to a closing connection are either delivered or
+rejected, with a new connection being opened if the old is irrevocably
+closed.
+* Unused connections are pruned eventually.
+
+==== Reconnecting
+
+We sometimes need to reconnect a perfectly valid connection, e.g. if the
+preferred IP address changes. We ensure that the underlying connection
+has no in-progress operations before closing it and reconnecting.
+
+==== Message Failure
+
+Propagates to callbacks instantly, better preventing overload by
+reclaiming committed memory.
+
+===== Expiry
+
+* No longer experiences head-of-line blocking (e.g. undroppable message
+preventing all droppable messages from being expired).
+* While overloaded, expiry is attempted eagerly on enqueuing threads.
+* While disconnected we schedule regular pruning, to handle the case
+where messages are no longer being sent, but we have a large backlog to
+expire.
+
+===== Overload
+
+* Tracked by bytes queued, as opposed to number of messages.
+
+===== Serialization Errors
+
+* Do not result in the connection being invalidated; the message is
+simply completed with failure, and then erased from the frame.
+* Includes detected mismatch between calculated serialization size to
+actual.
+
+Failures to flush to network, perhaps because the connection has been
+reset are not currently notified to callback handlers, as the necessary
+information has been discarded, though it would be possible to do so in
+future if we decide it is worth our while.
+
+==== QoS
+
+"Gossip" connection has been replaced with a general purpose "Urgent"
+connection, for any small messages impacting system stability.
+
+==== Metrics
+
+We track, and expose via Virtual Table and JMX, the number of messages
+and bytes that: we could not serialize or flush due to an error, we
+dropped due to overload or timeout, are pending, and have successfully
+sent.
+
+== Added a Message size limit
+
+Cassandra pre-4.0 doesn't protect the server from allocating huge
+buffers for the inter-node Message objects. Adding a message size limit
+would be good to deal with issues such as a malfunctioning cluster
+participant. Version 4.0 introduced max message size config param, akin
+to max mutation size - set to endpoint reserve capacity by default.
+
+== Recover from unknown table when deserializing internode messages
+
+As discussed in
+(https://issues.apache.org/jira/browse/CASSANDRA-9289[CASSANDRA-9289])
+it would be nice to gracefully recover from seeing an unknown table in a
+message from another node. Pre-4.0, we close the connection and
+reconnect, which can cause other concurrent queries to fail. Version 4.0
+fixes the issue by wrapping message in-stream with
+`TrackedDataInputPlus`, catching `UnknownCFException`, and skipping the
+remaining bytes in this message. TCP won't be closed and it will remain
+connected for other messages.
diff --git a/doc/modules/cassandra/pages/architecture/snitch.adoc b/doc/modules/cassandra/pages/architecture/snitch.adoc
index 90b32fb..3ae066d 100644
--- a/doc/modules/cassandra/pages/architecture/snitch.adoc
+++ b/doc/modules/cassandra/pages/architecture/snitch.adoc
@@ -18,9 +18,9 @@
 
 * `dynamic_snitch`: whether the dynamic snitch should be enabled or
 disabled.
-* `dynamic_snitch_update_interval_in_ms`: controls how often to perform
+* `dynamic_snitch_update_interval`: 100ms, controls how often to perform
 the more expensive part of host score calculation.
-* `dynamic_snitch_reset_interval_in_ms`: if set greater than zero, this
+* `dynamic_snitch_reset_interval`: 10m, if set greater than zero, this
 will allow 'pinning' of replicas to hosts in order to increase cache
 capacity.
 * `dynamic_snitch_badness_threshold:`: The badness threshold will
diff --git a/doc/modules/cassandra/pages/architecture/storage_engine.adoc b/doc/modules/cassandra/pages/architecture/storage_engine.adoc
index 77c52e5..9a0c37a 100644
--- a/doc/modules/cassandra/pages/architecture/storage_engine.adoc
+++ b/doc/modules/cassandra/pages/architecture/storage_engine.adoc
@@ -11,7 +11,7 @@
 
 All mutations write optimized by storing in commitlog segments, reducing
 the number of seeks needed to write to disk. Commitlog Segments are
-limited by the `commitlog_segment_size_in_mb` option, once the size is
+limited by the `commitlog_segment_size` option, once the size is
 reached, a new commitlog segment is created. Commitlog segments can be
 archived, deleted, or recycled once all its data has been flushed to
 SSTables. Commitlog segments are truncated when Cassandra has written
@@ -19,16 +19,16 @@
 drain" before stopping Cassandra will write everything in the memtables
 to SSTables and remove the need to sync with the commitlogs on startup.
 
-* `commitlog_segment_size_in_mb`: The default size is 32, which is
+* `commitlog_segment_size`: The default size is 32MiB, which is
 almost always fine, but if you are archiving commitlog segments (see
 commitlog_archiving.properties), then you probably want a finer
-granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is
-also configurable via `max_mutation_size_in_kb` setting in `cassandra.yaml`.
-The default is half the size `commitlog_segment_size_in_mb * 1024`.
+granularity of archiving; 8 or 16 MiB is reasonable. `commitlog_segment_size`
+also determines the default value of `max_mutation_size` in cassandra.yaml.
+By default, max_mutation_size is half the size of `commitlog_segment_size`.
 
-**NOTE: If `max_mutation_size_in_kb` is set explicitly then
-`commitlog_segment_size_in_mb` must be set to at least twice the size of
-`max_mutation_size_in_kb / 1024`**.
+**NOTE: If `max_mutation_size` is set explicitly then
+`commitlog_segment_size` must be set to at least twice the size of
+`max_mutation_size`**.
 
 Commitlogs are an append only log of all mutations local to a Cassandra
 node. Any data written to Cassandra will first be written to a commit
@@ -47,11 +47,11 @@
 - `commitlog_sync_batch_window_in_ms`: Time to wait between "batch"
 fsyncs _Default Value:_ 2
 ** `periodic`: In periodic mode, writes are immediately ack'ed, and the
-CommitLog is simply synced every "commitlog_sync_period_in_ms"
+CommitLog is simply synced every "commitlog_sync_period"
 milliseconds.
 +
-- `commitlog_sync_period_in_ms`: Time to wait between "periodic" fsyncs
-_Default Value:_ 10000
+- `commitlog_sync_period`: Time to wait between "periodic" fsyncs
+_Default Value:_ 10000ms
 
 _Default Value:_ batch
 
@@ -79,7 +79,7 @@
 #     parameters:
 ----
 
-* `commitlog_total_space_in_mb`: Total space to use for commit logs on
+* `commitlog_total_space`: Total space to use for commit logs on
 disk.
 
 If space gets above this value, Cassandra will flush every dirty CF in
@@ -89,7 +89,7 @@
 The default value is the smaller of 8192, and 1/4 of the total space of
 the commitlog volume.
 
-_Default Value:_ 8192
+_Default Value:_ 8192MiB
 
 == Memtables
 
diff --git a/doc/modules/cassandra/pages/architecture/streaming.adoc b/doc/modules/cassandra/pages/architecture/streaming.adoc
new file mode 100644
index 0000000..8495ccf
--- /dev/null
+++ b/doc/modules/cassandra/pages/architecture/streaming.adoc
@@ -0,0 +1,217 @@
+= Improved Streaming
+
+Apache Cassandra 4.0 has made several improvements to streaming.
+Streaming is the process used by nodes of a cluster to exchange data in
+the form of SSTables. Streaming of SSTables is performed for several
+operations, such as:
+
+* SSTable Repair
+* Host Replacement
+* Range movements
+* Bootstrapping
+* Rebuild
+* Cluster expansion
+
+== Streaming based on Netty
+
+Streaming in Cassandra 4.0 is based on Non-blocking Input/Output (NIO)
+with Netty
+(https://issues.apache.org/jira/browse/CASSANDRA-12229[CASSANDRA-12229]).
+It replaces the single-threaded (or sequential), synchronous, blocking
+model of streaming messages and transfer of files. Netty supports
+non-blocking, asynchronous, multi-threaded streaming with which multiple
+connections are opened simultaneously. Non-blocking implies that threads
+are not blocked as they don’t wait for a response for a sent request. A
+response could be returned in a different thread. With asynchronous,
+connections and threads are decoupled and do not have a 1:1 relation.
+Several more connections than threads may be opened.
+
+== Zero Copy Streaming
+
+Pre-4.0, during streaming Cassandra reifies the SSTables into objects.
+This creates unnecessary garbage and slows down the whole streaming
+process as some SSTables can be transferred as a whole file rather than
+individual partitions. Cassandra 4.0 has added support for streaming
+entire SSTables when possible
+(https://issues.apache.org/jira/browse/CASSANDRA-14556[CASSANDRA-14556])
+for faster Streaming using ZeroCopy APIs. If enabled, Cassandra will use
+ZeroCopy for eligible SSTables significantly speeding up transfers and
+increasing throughput. A zero-copy path avoids bringing data into
+user-space on both sending and receiving side. Any streaming related
+operations will notice corresponding improvement. Zero copy streaming is
+hardware bound; only limited by the hardware limitations (Network and
+Disk IO ).
+
+=== High Availability
+
+In benchmark tests Zero Copy Streaming is 5x faster than partitions
+based streaming. Faster streaming provides the benefit of improved
+availability. A cluster’s recovery mainly depends on the streaming
+speed, Cassandra clusters with failed nodes will be able to recover much
+more quickly (5x faster). If a node fails, SSTables need to be streamed
+to a replacement node. During the replacement operation, the new
+Cassandra node streams SSTables from the neighboring nodes that hold
+copies of the data belonging to this new node’s token range. Depending
+on the amount of data stored, this process can require substantial
+network bandwidth, taking some time to complete. The longer these range
+movement operations take, the more the cluster availability is lost.
+Failure of multiple nodes would reduce high availability greatly. The
+faster the new node completes streaming its data, the faster it can
+serve traffic, increasing the availability of the cluster.
+
+=== Enabling Zero Copy Streaming
+
+Zero copy streaming is enabled by setting the following setting in
+`cassandra.yaml`.
+
+....
+stream_entire_sstables: true
+....
+
+By default zero copy streaming is enabled.
+
+=== SSTables Eligible for Zero Copy Streaming
+
+Zero copy streaming is used if all partitions within the SSTable need to
+be transmitted. This is common when using `LeveledCompactionStrategy` or
+when partitioning SSTables by token range has been enabled. All
+partition keys in the SSTables are iterated over to determine the
+eligibility for Zero Copy streaming.
+
+=== Benefits of Zero Copy Streaming
+
+When enabled, it permits Cassandra to zero-copy stream entire eligible
+SSTables between nodes, including every component. This speeds up the
+network transfer significantly subject to throttling specified by
+`stream_throughput_outbound`.
+
+Enabling this will reduce the GC pressure on sending and receiving node.
+While this feature tries to keep the disks balanced, it cannot guarantee
+it. This feature will be automatically disabled if internode encryption
+is enabled. Currently this can be used with Leveled Compaction.
+
+=== Configuring for Zero Copy Streaming
+
+Throttling would reduce the streaming speed. The
+`stream_throughput_outbound` throttles all outbound
+streaming file transfers on a node to the given total throughput in
+Mbps. When unset, the default is 200 Mbps or 24 MiB/s.
+
+....
+stream_throughput_outbound: 24MiB/s
+....
+
+To run any Zero Copy streaming benchmark the
+`stream_throughput_outbound` must be set to a really
+high value otherwise, throttling will be significant and the benchmark
+results will not be meaningful.
+
+The `inter_dc_stream_throughput_outbound` throttles all
+streaming file transfer between the datacenters, this setting allows
+users to throttle inter dc stream throughput in addition to throttling
+all network stream traffic as configured with
+`stream_throughput_outbound`. When unset, the default
+is 200 Mbps or 25 MB/s.
+
+....
+inter_dc_stream_throughput_outbound: 24MiB/s
+....
+
+=== SSTable Components Streamed with Zero Copy Streaming
+
+Zero Copy Streaming streams entire SSTables. SSTables are made up of
+multiple components in separate files. SSTable components streamed are
+listed in Table 1.
+
+Table 1. SSTable Components
+
+[width="98%",cols="27%,73%",]
+|===
+|SSTable Component |Description
+
+|Data.db |The base data for an SSTable: the remaining components can be
+regenerated based on the data component.
+
+|Index.db |Index of the row keys with pointers to their positions in the
+data file.
+
+|Filter.db |Serialized bloom filter for the row keys in the SSTable.
+
+|CompressionInfo.db |File to hold information about uncompressed data
+length, chunk offsets etc.
+
+|Statistics.db |Statistical metadata about the content of the SSTable.
+
+|Digest.crc32 |Holds CRC32 checksum of the data file size_bytes.
+
+|CRC.db |Holds the CRC32 for chunks in an uncompressed file.
+
+|Summary.db |Holds SSTable Index Summary (sampling of Index component)
+
+|TOC.txt |Table of contents, stores the list of all components for the
+SSTable.
+|===
+
+Custom component, used by e.g. custom compaction strategy may also be
+included.
+
+== Repair Streaming Preview
+
+Repair with `nodetool repair` involves streaming of repaired SSTables
+and a repair preview has been added to provide an estimate of the amount
+of repair streaming that would need to be performed. Repair preview
+(https://issues.apache.org/jira/browse/CASSANDRA-13257[CASSANDRA-13257])
+is invoke with `nodetool repair --preview` using option:
+
+....
+-prv, --preview
+....
+
+It determines ranges and amount of data to be streamed, but doesn't
+actually perform repair.
+
+== Parallelizing of Streaming of Keyspaces
+
+The streaming of the different keyspaces for bootstrap and rebuild has
+been parallelized in Cassandra 4.0
+(https://issues.apache.org/jira/browse/CASSANDRA-4663[CASSANDRA-4663]).
+
+== Unique nodes for Streaming in Multi-DC deployment
+
+Range Streamer picks unique nodes to stream data from when number of
+replicas in each DC is three or more
+(https://issues.apache.org/jira/browse/CASSANDRA-4650[CASSANDRA-4650]).
+What the optimization does is to even out the streaming load across the
+cluster. Without the optimization, some node can be picked up to stream
+more data than others. This patch allows to select dedicated node to
+stream only one range.
+
+This will increase the performance of bootstrapping a node and will also
+put less pressure on nodes serving the data. This does not affect if N <
+3 in each DC as then it streams data from only 2 nodes.
+
+Stream Operation Types ^^^^^^^^^^^^^
+
+It is important to know the type or purpose of a certain stream. Version
+4.0
+(https://issues.apache.org/jira/browse/CASSANDRA-13064[CASSANDRA-13064])
+adds an `enum` to distinguish between the different types of streams.
+Stream types are available both in a stream request and a stream task.
+The different stream types are:
+
+* Restore replica count
+* Unbootstrap
+* Relocation
+* Bootstrap
+* Rebuild
+* Bulk Load
+* Repair
+
+== Disallow Decommission when number of Replicas will drop below configured RF
+
+https://issues.apache.org/jira/browse/CASSANDRA-12510[CASSANDRA-12510]
+guards against decommission that will drop # of replicas below
+configured replication factor (RF), and adds the `--force` option that
+allows decommission to continue if intentional; force decommission of
+this node even when it reduces the number of replicas to below
+configured RF.
diff --git a/doc/modules/cassandra/pages/configuration/configuration.adoc b/doc/modules/cassandra/pages/configuration/configuration.adoc
new file mode 100644
index 0000000..9b4df24
--- /dev/null
+++ b/doc/modules/cassandra/pages/configuration/configuration.adoc
@@ -0,0 +1,220 @@
+= Liberating cassandra.yaml Parameters' Names from Their Units
+
+== Objective
+
+Three big things happened as part of https://issues.apache.org/jira/browse/CASSANDRA-15234[CASSANDRA-15234]:
+
+1) Renaming of parameters in `cassandra.yaml` to follow the form `noun_verb`.
+
+2) Liberating `cassandra.yaml` parameters from their units (DataStorage, DataRate and Duration) and introducing temporary smallest accepted unit per parameter (only for DataStorage and Duration ones)
+
+3) Backward compatibility framework to support the old names and lack of units support until at least the next major release.
+
+
+== Renamed Parameters
+
+The community has decided to allow operators to specify units for Cassandra parameters of types duration, data storage, and data rate.
+All parameters which had a particular unit (most of the time added as a suffix to their name) can be now set by using the format [value][unit]. The unit suffix has been removed from their names.
+Supported units:
+[cols=",",options="header",]
+|===
+|Parameter Type |Units Supported
+|Duration | d, h, m, s, ms, us, µs, ns
+|Data Storage | B, KiB, MiB, GiB
+|Data Rate | B/s, MiB/s, KiB/s
+|===
+
+
+*Example*:
+
+Old name and value format:
+....
+permissions_update_interval_ms: 0
+....
+New name and possible value formats:
+....
+permissions_update_interval: 0ms
+permissions_update_interval: 0s
+permissions_update_interval: 0d
+permissions_update_interval: 0us
+permissions_update_interval: 0µs
+....
+
+The work in https://issues.apache.org/jira/browse/CASSANDRA-15234[CASSANDRA-15234] was already quite big, so we decided
+to introduce the notion of the smallest allowed unit per parameter for duration and data storage parameters. What does this mean?
+Cassandra's internals still use the old units for parameters. If, for example, seconds are used internally, but you want
+to add a value in nanoseconds in `cassandra.yaml`, you will get a configuration exception that contains the following information:
+....
+Accepted units: seconds, minutes, hours, days.
+....
+
+Why was this needed?
+Because we can run into precision issues. The full solution to the problem is to convert internally all parameters’ values
+to be manipulated with the smallest supported by Cassandra unit. A series of tickets to assess and maybe migrate to the smallest unit
+our parameters (incrementally, post https://issues.apache.org/jira/browse/CASSANDRA-15234[CASSANDRA-15234]) will be opened in the future.
+
+
+[cols=",,",options="header",]
+|===
+|Old Name |New Name |The Smallest Supported Unit
+|permissions_validity_in_ms |permissions_validity |ms
+|permissions_update_interval_in_ms |permissions_update_interval |ms
+|roles_validity_in_ms |roles_validity |ms
+|roles_update_interval_in_ms |roles_update_interval |ms
+|credentials_validity_in_ms |credentials_validity |ms
+|credentials_update_interval_in_ms |credentials_update_interval |ms
+|max_hint_window_in_ms |max_hint_window |ms
+|native_transport_idle_timeout_in_ms |native_transport_idle_timeout |ms
+|request_timeout_in_ms |request_timeout |ms
+|read_request_timeout_in_ms |read_request_timeout |ms
+|range_request_timeout_in_ms |range_request_timeout |ms
+|write_request_timeout_in_ms |write_request_timeout |ms
+|counter_write_request_timeout_in_ms |counter_write_request_timeout |ms
+|cas_contention_timeout_in_ms |cas_contention_timeout |ms
+|truncate_request_timeout_in_ms |truncate_request_timeout |ms
+|streaming_keep_alive_period_in_secs |streaming_keep_alive_period |s
+|cross_node_timeout |internode_timeout |-
+|slow_query_log_timeout_in_ms |slow_query_log_timeout |ms
+|memtable_heap_space_in_mb |memtable_heap_space |MiB
+|memtable_offheap_space_in_mb |memtable_offheap_space |MiB
+|repair_session_space_in_mb |repair_session_space |MiB
+|internode_max_message_size_in_bytes |internode_max_message_size |B
+|internode_send_buff_size_in_bytes |internode_socket_send_buffer_size |B
+|internode_socket_send_buffer_size_in_bytes |internode_socket_send_buffer_size |B
+|internode_socket_receive_buffer_size_in_bytes |internode_socket_receive_buffer_size |B
+|internode_recv_buff_size_in_bytes |internode_socket_receive_buffer_size |B
+|internode_application_send_queue_capacity_in_bytes |internode_application_send_queue_capacity |B
+|internode_application_send_queue_reserve_endpoint_capacity_in_bytes |internode_application_send_queue_reserve_endpoint_capacity |B
+|internode_application_send_queue_reserve_global_capacity_in_bytes |internode_application_send_queue_reserve_global_capacity |B
+|internode_application_receive_queue_capacity_in_bytes |internode_application_receive_queue_capacity |B
+|internode_application_receive_queue_reserve_endpoint_capacity_in_bytes |internode_application_receive_queue_reserve_endpoint_capacity |B
+|internode_application_receive_queue_reserve_global_capacity_in_bytes |internode_application_receive_queue_reserve_global_capacity |B
+|internode_tcp_connect_timeout_in_ms |internode_tcp_connect_timeout |ms
+|internode_tcp_user_timeout_in_ms |internode_tcp_user_timeout |ms
+|internode_streaming_tcp_user_timeout_in_ms |internode_streaming_tcp_user_timeout |ms
+|native_transport_max_frame_size_in_mb |native_transport_max_frame_size |MiB
+|max_value_size_in_mb |max_value_size |MiB
+|column_index_size_in_kb |column_index_size |KiB
+|column_index_cache_size_in_kb |column_index_cache_size |KiB
+|batch_size_warn_threshold_in_kb |batch_size_warn_threshold |KiB
+|batch_size_fail_threshold_in_kb |batch_size_fail_threshold |KiB
+|compaction_throughput_mb_per_sec |compaction_throughput |MiB/s
+|compaction_large_partition_warning_threshold_mb |compaction_large_partition_warning_threshold |MiB
+|min_free_space_per_drive_in_mb |min_free_space_per_drive |MiB
+|stream_throughput_outbound_megabits_per_sec |stream_throughput_outbound |MiB/s
+|inter_dc_stream_throughput_outbound_megabits_per_sec |inter_dc_stream_throughput_outbound |MiB/s
+|commitlog_total_space_in_mb |commitlog_total_space |MiB
+|commitlog_sync_group_window_in_ms |commitlog_sync_group_window |ms
+|commitlog_sync_period_in_ms |commitlog_sync_period |ms
+|commitlog_segment_size_in_mb |commitlog_segment_size |MiB
+|periodic_commitlog_sync_lag_block_in_ms |periodic_commitlog_sync_lag_block |ms
+|max_mutation_size_in_kb |max_mutation_size |KiB
+|cdc_total_space_in_mb |cdc_total_space |MiB
+|cdc_free_space_check_interval_ms |cdc_free_space_check_interval |ms
+|dynamic_snitch_update_interval_in_ms |dynamic_snitch_update_interval |ms
+|dynamic_snitch_reset_interval_in_ms |dynamic_snitch_reset_interval |ms
+|hinted_handoff_throttle_in_kb |hinted_handoff_throttle |KiB
+|batchlog_replay_throttle_in_kb |batchlog_replay_throttle |KiB
+|hints_flush_period_in_ms |hints_flush_period |ms
+|max_hints_file_size_in_mb |max_hints_file_size |MiB
+|trickle_fsync_interval_in_kb |trickle_fsync_interval |KiB
+|sstable_preemptive_open_interval_in_mb |sstable_preemptive_open_interval |MiB
+|key_cache_size_in_mb |key_cache_size |MiB
+|row_cache_size_in_mb |row_cache_size |MiB
+|counter_cache_size_in_mb |counter_cache_size |MiB
+|networking_cache_size_in_mb |networking_cache_size |MiB
+|file_cache_size_in_mb |file_cache_size |MiB
+|index_summary_capacity_in_mb |index_summary_capacity |MiB
+|index_summary_resize_interval_in_minutes |index_summary_resize_interval |m
+|gc_log_threshold_in_ms |gc_log_threshold |ms
+|gc_warn_threshold_in_ms |gc_warn_threshold |ms
+|tracetype_query_ttl |trace_type_query_ttl |s
+|tracetype_repair_ttl |trace_type_repair_ttl |s
+|prepared_statements_cache_size_mb |prepared_statements_cache_size |MiB
+|enable_user_defined_functions |user_defined_functions_enabled |-
+|enable_scripted_user_defined_functions |scripted_user_defined_functions_enabled |-
+|enable_materialized_views |materialized_views_enabled |-
+|enable_transient_replication |transient_replication_enabled |-
+|enable_sasi_indexes |sasi_indexes_enabled |-
+|enable_drop_compact_storage |drop_compact_storage_enabled |-
+|enable_user_defined_functions_threads |user_defined_functions_threads_enabled |-
+|enable_legacy_ssl_storage_port |legacy_ssl_storage_port_enabled |-
+|user_defined_function_fail_timeout |user_defined_functions_fail_timeout |ms
+|user_defined_function_warn_timeout |user_defined_functions_warn_timeout |ms
+|cache_load_timeout_seconds |cache_load_timeout |s
+|===
+
+Another TO DO is to add JMX methods supporting the new format. However, we may abandon this if virtual tables support
+configuration changes in the near future.
+
+*Notes for Cassandra Developers*:
+
+- Most of our parameters are already moved to the new framework as part of https://issues.apache.org/jira/browse/CASSANDRA-15234[CASSANDRA-15234].
+`@Replaces` is the annotation to be used when you make changes to any configuration parameters in `Config` class and `cassandra.yaml`, and you want to add backward
+compatibility with previous Cassandra versions. `Converters` class enumerates the different methods used for backward compatibility.
+`IDENTITY` is the one used for name change only. For more information about the other Converters, please, check the JavaDoc in the class.
+For backward compatibility virtual table `Settings` contains both the old and the new
+parameters with the old and the new value format. Only exception at the moment are the following three parameters: `key_cache_save_period`,
+`row_cache_save_period` and `counter_cache_save_period` which appear only once with the new value format.
+The old names and value format still can be used at least until the next major release. Deprecation warning is emitted on startup.
+If the parameter is of type duration, data rate or data storage, its value should be accompanied by a unit when new name is used.
+
+- Please follow the new format `noun_verb` when adding new configuration parameters.
+
+- Please consider adding any new parameters with the lowest supported by Cassandra unit when possible. Our new types also
+support long and integer upper bound, depending on your needs. All options for configuration parameters' types are nested
+classes in our three main abstract classes - `DurationSpec`, `DataStorageSpec`, `DataRateSpec`.
+
+- If for some reason you consider the smallest unit for a new parameter shouldn’t be the one that is supported as such in
+Cassandra, you can use the rest of the nested classes in `DurationSpec`, `DataStorageSpec`. The smallest allowed unit is
+the one we use internally for the property, so we don't have to do conversions to bigger units which will lead to precision
+problems. This is a problem only with `DurationSpec` and `DataStorageSpec`. `DataRateSpec` is handled internally in double.
+
+- New parameters should be added as non-negative numbers. For parameters where you would have set -1 to disable in the past, you might
+want to consider a separate flag parameter or null value. In case you use the null value, please, ensure that any default value
+introduced in the DatabaseDescriptor to handle it is also duplicated in any related setters.
+
+- Parameters of type data storage, duration and data rate cannot be set to Long.MAX_VALUE (former parameters of long type)
+and Integer.MAX_VALUE (former parameters of int type). That numbers are used during conversion between units to prevent
+an overflow from happening.
+
+- Any time you add @Replaces with a name change, we need to add an entry in this https://github.com/riptano/ccm/blob/808b6ca13526785b0fddfe1ead2383c060c4b8b6/ccmlib/common.py#L62[Python dictionary in CCM] to support the same backward compatibility as SnakeYAML.
+
+Please follow the instructions in requirements.txt in the DTest repo how to retag CCM after committing any changes.
+You might want to test also with tagging in your repo to ensure that there will be no surprise after retagging the official CCM.
+Please be sure to run a full CI after any changes as CCM affects a few of our testing suites.
+
+- Some configuration parameters are not announced in cassandra.yaml, but they are presented in the Config class for advanced users.
+Those also should be using the new framework and naming conventions.
+
+- As we have backward compatibility, we didn’t have to rework all python DTests to set config in the new format, and we exercise
+the backward compatibility while testing. Please consider adding any new tests using the new names and value format though.
+
+- In-JVM upgrade tests do not support per-version configuration at the moment, so we have to keep the old names and value format.
+Currently, if we try to use the new config for a newer version, that will be silently ignored and default config will be used.
+
+- SnakeYAML supports overloading of parameters. This means that if you add a configuration parameter more than once in your `cassandra.yaml` -
+the latest occasion will be the one to load in Config during Cassandra startup. In order to make upgrades as less disruptive as possible,
+we continue supporting that behavior also with adding old and new names of a parameter into `cassandra.yaml`.
+
+- Please ensure that any JMX setters/getters update the Config class properties and not some local copies. Settings Virtual Table
+reports the configuration loaded at any time from the Config class.
+
+*Example*:
+
+If you add the following to `cassandra.yaml`:
+....
+hinted_handoff_enabled: true
+enabled_hinted_handolff: false
+....
+
+you will get loaded in `Config`:
+....
+hinted_handoff_enabled: false
+....
+
+https://issues.apache.org/jira/browse/CASSANDRA-17379[CASSANDRA-17379] was opened to improve the user experience and deprecate the overloading.
+By default, we refuse starting Cassandra with a config containing both old and new config keys for the same parameter. Start
+Cassandra with `-Dcassandra.allow_new_old_config_keys=true` to override. For historical reasons duplicate config keys
+in `cassandra.yaml` are allowed by default, start Cassandra with `-Dcassandra.allow_duplicate_config_keys=false` to disallow this.
+Please note that `key_cache_save_period`, `row_cache_save_period`, `counter_cache_save_period` will be affected only by `-Dcassandra.allow_duplicate_config_keys`.
\ No newline at end of file
diff --git a/doc/modules/cassandra/pages/cql/changes.adoc b/doc/modules/cassandra/pages/cql/changes.adoc
index 1f89469..df99a39 100644
--- a/doc/modules/cassandra/pages/cql/changes.adoc
+++ b/doc/modules/cassandra/pages/cql/changes.adoc
@@ -2,6 +2,16 @@
 
 The following describes the changes in each version of CQL.
 
+== 3.4.6
+
+* Add support for IF EXISTS and IF NOT EXISTS in ALTER statements  (`16916`)
+* Allow GRANT/REVOKE multiple permissions in a single statement (`17030`)
+* Pre hashed passwords in CQL (`17334`)
+* Add support for type casting in WHERE clause components and in the values of INSERT/UPDATE statements (`14337`)
+* Add support for CONTAINS and CONTAINS KEY in conditional UPDATE and DELETE statement (`10537`)
+* Allow to grant permission for all tables in a keyspace (`17027`)
+* Allow to aggregate by time intervals (`11871`)
+
 == 3.4.5
 
 * Adds support for arithmetic operators (`11935`)
diff --git a/doc/modules/cassandra/pages/cql/cql_singlefile.adoc b/doc/modules/cassandra/pages/cql/cql_singlefile.adoc
index 89ed359..d99e12b 100644
--- a/doc/modules/cassandra/pages/cql/cql_singlefile.adoc
+++ b/doc/modules/cassandra/pages/cql/cql_singlefile.adoc
@@ -323,7 +323,7 @@
 _Syntax:_
 
 bc(syntax).. +
-::= ALTER KEYSPACE WITH  +
+::= ALTER KEYSPACE (IF EXISTS)? WITH  +
 p. +
 _Sample:_
 
@@ -713,12 +713,13 @@
 _Syntax:_
 
 bc(syntax).. +
-::= ALTER (TABLE | COLUMNFAMILY)
+::= ALTER (TABLE | COLUMNFAMILY) (IF EXISTS)?
 
-::= ADD  +
-| ADD ( ( , )* ) +
-| DROP  +
-| DROP ( ( , )* ) +
+::= ADD (IF NOT EXISTS)? +
+| ADD  (IF NOT EXISTS)? ( ( , )* ) +
+| DROP (IF EXISTS)?  +
+| DROP (IF EXISTS)? ( ( , )* ) +
+| RENAME (IF EXISTS)? TO (AND TO)* +
 | WITH ( AND )* +
 p. +
 _Sample:_
@@ -736,6 +737,7 @@
 for adding new columns, dropping existing ones, or updating the table
 options. As with table creation, `ALTER COLUMNFAMILY` is allowed as an
 alias for `ALTER TABLE`.
+If the table does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 
 The `<tablename>` is the table name optionally preceded by the keyspace
 name. The `<instruction>` defines the alteration to perform:
@@ -743,13 +745,20 @@
 * `ADD`: Adds a new column to the table. The `<identifier>` for the new
 column must not conflict with an existing column. Moreover, columns
 cannot be added to tables defined with the `COMPACT STORAGE` option.
+If the new column already exists, the statement will return an error, unless `IF NOT EXISTS` is used in which case the operation is a no-op.
 * `DROP`: Removes a column from the table. Dropped columns will
 immediately become unavailable in the queries and will not be included
 in compacted sstables in the future. If a column is readded, queries
 won’t return values written before the column was last dropped. It is
 assumed that timestamps represent actual time, so if this is not your
-case, you should NOT readd previously dropped columns. Columns can’t be
+case, you should NOT read previously dropped columns. Columns can’t be
 dropped from tables defined with the `COMPACT STORAGE` option.
+If the dropped column does not already exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
+* `RENAME` a primary key column of a table. Non primary key columns cannot be renamed.
+Furthermore, renaming a column to another name which already exists isn't allowed.
+It's important to keep in mind that renamed columns shouldn't have dependent seconday indexes.
+If the renamed column does not already exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
+
 * `WITH`: Allows to update the options of the table. The
 link:#createTableOptions[supported `<option>`] (and syntax) are the same
 as for the `CREATE TABLE` statement except that `COMPACT STORAGE` is not
@@ -1020,10 +1029,10 @@
 _Syntax:_
 
 bc(syntax).. +
-::= ALTER TYPE
+::= ALTER TYPE (IF EXISTS)?
 
-::= ADD  +
-| RENAME TO ( AND TO )* +
+::= ADD (IF NOT EXISTS)?  +
+| RENAME (IF EXISTS)? TO ( AND TO )* +
 p. +
 _Sample:_
 
@@ -1034,7 +1043,7 @@
 p. +
 The `ALTER TYPE` statement is used to manipulate type definitions. It
 allows for adding new fields, renaming existing fields, or changing the
-type of existing fields.
+type of existing fields. If the type does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 
 [[dropTypeStmt]]
 ==== DROP TYPE
@@ -1377,6 +1386,7 @@
 | `.' `='
 
 ::=  +
+| CONTAINS (KEY)? +
 | IN  +
 | `[' `]'  +
 | `[' `]' IN  +
@@ -1489,6 +1499,7 @@
 ::= ( | `(' ( ( `,' )* )? `)')
 
 ::= ( | `!=')  +
+| CONTAINS (KEY)? +
 | IN  +
 | `[' `]' ( | `!=')  +
 | `[' `]' IN  +
@@ -1990,7 +2001,7 @@
 _Syntax:_
 
 bc(syntax).. +
-::= ALTER ROLE ( WITH ( AND )* )?
+::= ALTER ROLE (IF EXISTS)? ( WITH ( AND )* )?
 
 ::= PASSWORD =  +
 | LOGIN =  +
@@ -2003,6 +2014,8 @@
 bc(sample). +
 ALTER ROLE bob WITH PASSWORD = `PASSWORD_B' AND SUPERUSER = false;
 
+If the role does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
+
 Conditions on executing `ALTER ROLE` statements:
 
 * A client must have `SUPERUSER` status to alter the `SUPERUSER` status
@@ -2161,7 +2174,7 @@
 _Syntax:_
 
 bc(syntax).. +
-::= ALTER USER ( WITH PASSWORD )? ( )?
+::= ALTER USER (IF EXISTS)? ( WITH PASSWORD )? ( )?
 
 ::= SUPERUSER +
 | NOSUPERUSER +
@@ -2171,6 +2184,8 @@
 ALTER USER alice WITH PASSWORD `PASSWORD_A'; +
 ALTER USER bob SUPERUSER;
 
+If the user does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
+
 [[dropUserStmt]]
 ==== DROP USER
 
diff --git a/doc/modules/cassandra/pages/cql/ddl.adoc b/doc/modules/cassandra/pages/cql/ddl.adoc
index be93bc2..36cce45 100644
--- a/doc/modules/cassandra/pages/cql/ddl.adoc
+++ b/doc/modules/cassandra/pages/cql/ddl.adoc
@@ -211,7 +211,7 @@
 ----
 include::example$CQL/alter_ks.cql[]
 ----
-
+If the keyspace does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 The supported options are the same as for xref:cql/ddl.adoc#create-keyspace-statement[creating a keyspace].
 
 [[drop-keyspace-statement]]
@@ -729,6 +729,7 @@
 ----
 include::example$BNF/alter_table.bnf[]
 ----
+If the table does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 
 For example:
 
@@ -743,12 +744,14 @@
 * `ADD` a new column to a table. The primary key of a table cannot ever be altered.
 A new column, thus, cannot be part of the primary key. 
 Adding a column is a constant-time operation based on the amount of data in the table.
+If the new column already exists, the statement will return an error, unless `IF NOT EXISTS` is used in which case the operation is a no-op.
 * `DROP` a column from a table. This command drops both the column and all
 its content. Be aware that, while the column becomes immediately
 unavailable, its content are removed lazily during compaction. Because of this lazy removal,
 the command is a constant-time operation based on the amount of data in the table. 
 Also, it is important to know that once a column is dropped, a column with the same name can be re-added,
-unless the dropped column was a non-frozen column like a collection. 
+unless the dropped column was a non-frozen column like a collection.
+If the dropped column does not already exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 
 [WARNING]
 .Warning
@@ -761,6 +764,11 @@
 if you do so, dropping a column will not correctly execute.
 ====
 
+* `RENAME` a primary key column of a table. Non primary key columns cannot be renamed.
+Furthermore, renaming a column to another name which already exists isn't allowed.
+It's important to keep in mind that renamed columns shouldn't have dependent seconday indexes.
+If the renamed column does not already exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
+
 * Use `WITH` to change a table option. The xref:CQL/ddl.adoc#create-table-options[supported options]
 are the same as those used when creating a table, with the exception of `CLUSTERING ORDER`.
 However, setting any `compaction` sub-options will erase *ALL* previous `compaction` options, so you need to re-specify
diff --git a/doc/modules/cassandra/pages/cql/functions.adoc b/doc/modules/cassandra/pages/cql/functions.adoc
index 7f7dbf9..93439a3 100644
--- a/doc/modules/cassandra/pages/cql/functions.adoc
+++ b/doc/modules/cassandra/pages/cql/functions.adoc
@@ -18,7 +18,7 @@
 security concerns (even when enabled, the execution of user-defined
 functions is sandboxed and a "rogue" function should not be allowed to
 do evil, but no sandbox is perfect so using user-defined functions is
-opt-in). See the `enable_user_defined_functions` in `cassandra.yaml` to
+opt-in). See the `user_defined_functions_enabled` in `cassandra.yaml` to
 enable them.
 ====
 
diff --git a/doc/modules/cassandra/pages/cql/mvs.adoc b/doc/modules/cassandra/pages/cql/mvs.adoc
index 6da0fa4..89ba0ce 100644
--- a/doc/modules/cassandra/pages/cql/mvs.adoc
+++ b/doc/modules/cassandra/pages/cql/mvs.adoc
@@ -127,7 +127,7 @@
 
 The options that can be updated are the same than at creation time and
 thus the `same than for tables
-<create-table-options>`.
+<create-table-options>`. If the view does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 
 [[drop-materialized-view-statement]]
 == DROP MATERIALIZED VIEW
diff --git a/doc/modules/cassandra/pages/cql/security.adoc b/doc/modules/cassandra/pages/cql/security.adoc
index 7ea0620..904dea0 100644
--- a/doc/modules/cassandra/pages/cql/security.adoc
+++ b/doc/modules/cassandra/pages/cql/security.adoc
@@ -58,6 +58,8 @@
 If internal authentication has not been set up or the role does not have
 `LOGIN` privileges, the `WITH PASSWORD` clause is not necessary.
 
+USE `WITH HASHED PASSWORD` to provide the jBcrypt hashed password directly. See the `hash_password` tool.
+
 ==== Restricting connections to specific datacenters
 
 If a `network_authorizer` has been configured, you can restrict login
@@ -94,6 +96,9 @@
 ----
 include::example$CQL/alter_role.cql[]
 ----
+If the role does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
+
+USE `WITH HASHED PASSWORD` to provide the jBcrypt hashed password directly. See the `hash_password` tool.
 
 ==== Restricting connections to specific datacenters
 
@@ -277,7 +282,7 @@
 ----
 include::example$BNF/alter_user_statement.bnf[]
 ----
-
+If the role does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 For example:
 
 [source,cql]
diff --git a/doc/modules/cassandra/pages/cql/types.adoc b/doc/modules/cassandra/pages/cql/types.adoc
index 0cee1f3..17c78b5 100644
--- a/doc/modules/cassandra/pages/cql/types.adoc
+++ b/doc/modules/cassandra/pages/cql/types.adoc
@@ -457,13 +457,13 @@
 ----
 include::example$BNF/alter_udt_statement.bnf[]
 ----
-
+If the type does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 You can:
 
 * Add a new field to the type (`ALTER TYPE address ADD country text`).
 That new field will be `NULL` for any values of the type created before
-the addition.
-* Rename the fields of the type.
+the addition. If the new field exists, the statement will return an error, unless `IF NOT EXISTS` is used in which case the operation is a no-op.
+* Rename the fields of the type. If the field(s) does not exist, the statement will return an error, unless `IF EXISTS` is used in which case the operation is a no-op.
 
 [source,cql]
 ----
diff --git a/doc/modules/cassandra/pages/faq/index.adoc b/doc/modules/cassandra/pages/faq/index.adoc
index 03868b3..df74db9 100644
--- a/doc/modules/cassandra/pages/faq/index.adoc
+++ b/doc/modules/cassandra/pages/faq/index.adoc
@@ -89,10 +89,10 @@
 storing small blobs (less than single digit MB) should not be a problem,
 but it is advised to manually split large blobs into smaller chunks.
 
-Please note in particular that by default, any value greater than 16MB
-will be rejected by Cassandra due the `max_mutation_size_in_kb`
+Please note in particular that by default, any value greater than 16MiB
+will be rejected by Cassandra due the `max_mutation_size`
 configuration of the `cassandra-yaml` file (which default to half of
-`commitlog_segment_size_in_mb`, which itself default to 32MB).
+`commitlog_segment_size`, which itself default to 32MiB).
 
 [[nodetool-connection-refused]]
 == Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives?
diff --git a/doc/modules/cassandra/pages/new/java11.adoc b/doc/modules/cassandra/pages/getting_started/java11.adoc
similarity index 100%
rename from doc/modules/cassandra/pages/new/java11.adoc
rename to doc/modules/cassandra/pages/getting_started/java11.adoc
diff --git a/doc/modules/cassandra/pages/new/Figure_1.jpg b/doc/modules/cassandra/pages/new/Figure_1.jpg
deleted file mode 100644
index ccaec67..0000000
--- a/doc/modules/cassandra/pages/new/Figure_1.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/modules/cassandra/pages/new/Figure_2.jpg b/doc/modules/cassandra/pages/new/Figure_2.jpg
deleted file mode 100644
index 099e15f..0000000
--- a/doc/modules/cassandra/pages/new/Figure_2.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/modules/cassandra/pages/new/index.adoc b/doc/modules/cassandra/pages/new/index.adoc
index 50fafa7..a6545a7 100644
--- a/doc/modules/cassandra/pages/new/index.adoc
+++ b/doc/modules/cassandra/pages/new/index.adoc
@@ -1,11 +1,20 @@
-= New Features in Apache Cassandra 4.0
+= New Features in Apache Cassandra 4.1
 
-This section covers the new features in Apache Cassandra 4.0.
+This section covers the new features in Apache Cassandra 4.1.
 
-* xref:new/java11.adoc[Java 11]
-* xref:new/virtualtables.adoc[Virtual tables]
-* xref:new/auditlogging.adoc[Audit logging]
-* xref:new/fqllogging.adoc[Full query logging]
-* xref:new/messaging.adoc[Messaging]
-* xref:new/streaming.adoc[Streaming]
-* xref:new/transientreplication.adoc[Transient replication]
+
+* https://issues.apache.org/jira/browse/CASSANDRA-17164[Paxos v2]
+* link:/_/blog/Apache-Cassandra-4.1-Features-Guardrails-Framework.html[Guardrails]
+* link:/_/blog/Apache-Cassandra-4.1-Configuration-Standardization.html[New and Improved Configuration Format]
+* link:/_/blog/Apache-Cassandra-4.1-Features-Client-side-Password-Hashing.html[Client-side Password Hashing]
+* link:/_/blog/Apache-Cassandra-4.1-Denylisting-Partitions.html[Partition Denylist]
+* Lots of CQL improvements
+* link:/_/blog/Apache-Cassandra-4.1-New-SSTable-Identifiers.html[New SSTable Identifiers]
+* https://issues.apache.org/jira/browse/CASSANDRA-17423[Native Transport rate limiting]
+* https://issues.apache.org/jira/browse/CASSANDRA-16310[Top partition tracking per table]
+* https://issues.apache.org/jira/browse/CASSANDRA-14309[Hint Window consistency]
+* https://issues.apache.org/jira/browse/CASSANDRA-17044[Pluggability]
+** link:/_/blog/Apache-Cassandra-4.1-Features-Pluggable-Memtable-Implementations.html[Memtable]
+** Encryption
+** link:/_/blog/Apache-Cassandra-4.1-Features-Authentication-Plugin-Support-for-CQLSH.html[CQLSH Authentication]
+* and much link:https://github.com/apache/cassandra/blob/cassandra-4.1/NEWS.txt[more]
diff --git a/doc/modules/cassandra/pages/new/messaging.adoc b/doc/modules/cassandra/pages/new/messaging.adoc
deleted file mode 100644
index 07a423b..0000000
--- a/doc/modules/cassandra/pages/new/messaging.adoc
+++ /dev/null
@@ -1,360 +0,0 @@
-= Improved Internode Messaging
-
-Apache Cassandra 4.0 has added several new improvements to internode
-messaging.
-
-== Optimized Internode Messaging Protocol
-
-The internode messaging protocol has been optimized
-(https://issues.apache.org/jira/browse/CASSANDRA-14485[CASSANDRA-14485]).
-Previously the `IPAddressAndPort` of the sender was included with each
-message that was sent even though the `IPAddressAndPort` had already
-been sent once when the initial connection/session was established. In
-Cassandra 4.0 `IPAddressAndPort` has been removed from every separate
-message sent and only sent when connection/session is initiated.
-
-Another improvement is that at several instances (listed) a fixed 4-byte
-integer value has been replaced with `vint` as a `vint` is almost always
-less than 1 byte:
-
-* The `paramSize` (the number of parameters in the header)
-* Each individual parameter value
-* The `payloadSize`
-
-== NIO Messaging
-
-In Cassandra 4.0 peer-to-peer (internode) messaging has been switched to
-non-blocking I/O (NIO) via Netty
-(https://issues.apache.org/jira/browse/CASSANDRA-8457[CASSANDRA-8457]).
-
-As serialization format, each message contains a header with several
-fixed fields, an optional key-value parameters section, and then the
-message payload itself. Note: the IP address in the header may be either
-IPv4 (4 bytes) or IPv6 (16 bytes).
-
-____
-The diagram below shows the IPv4 address for brevity.
-____
-
-....
-1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4 5 5 5 5 5 6 6
-0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                       PROTOCOL MAGIC                          |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                         Message ID                            |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                         Timestamp                             |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|  Addr len |           IP Address (IPv4)                       /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |                 Verb                              /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |            Parameters size                        /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |             Parameter data                        /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/                                                               |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                        Payload size                           |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                                                               /
-/                           Payload                             /
-/                                                               |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-....
-
-An individual parameter has a String key and a byte array value. The key
-is serialized with its length, encoded as two bytes, followed by the
-UTF-8 byte encoding of the string. The body is serialized with its
-length, encoded as four bytes, followed by the bytes of the value.
-
-== Resource limits on Queued Messages
-
-System stability is improved by enforcing strict resource limits
-(https://issues.apache.org/jira/browse/CASSANDRA-15066[CASSANDRA-15066])
-on the number of outbound messages that are queued, measured by the
-`serializedSize` of the message. There are three separate limits imposed
-simultaneously to ensure that progress is always made without any
-reasonable combination of failures impacting a node’s stability.
-
-[arabic]
-. Global, per-endpoint and per-connection limits are imposed on messages
-queued for delivery to other nodes and waiting to be processed on
-arrival from other nodes in the cluster. These limits are applied to the
-on-wire size of the message being sent or received.
-. The basic per-link limit is consumed in isolation before any endpoint
-or global limit is imposed. Each node-pair has three links: urgent,
-small and large. So any given node may have a maximum of
-`N*3 * (internode_application_send_queue_capacity_in_bytes + internode_application_receive_queue_capacity_in_bytes)`
-messages queued without any coordination between them although in
-practice, with token-aware routing, only RF*tokens nodes should need to
-communicate with significant bandwidth.
-. The per-endpoint limit is imposed on all messages exceeding the
-per-link limit, simultaneously with the global limit, on all links to or
-from a single node in the cluster. The global limit is imposed on all
-messages exceeding the per-link limit, simultaneously with the
-per-endpoint limit, on all links to or from any node in the cluster. The
-following configuration settings have been added to `cassandra.yaml` for
-resource limits on queued messages.
-
-....
-internode_application_send_queue_capacity_in_bytes: 4194304 #4MiB
-internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728  #128MiB
-internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912    #512MiB
-internode_application_receive_queue_capacity_in_bytes: 4194304                  #4MiB
-internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB
-internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912   #512MiB
-....
-
-== Virtual Tables for Messaging Metrics
-
-Metrics is improved by keeping metrics using virtual tables for
-inter-node inbound and outbound messaging
-(https://issues.apache.org/jira/browse/CASSANDRA-15066[CASSANDRA-15066]).
-For inbound messaging a virtual table (`internode_inbound`) has been
-added to keep metrics for:
-
-* Bytes and count of messages that could not be serialized or flushed
-due to an error
-* Bytes and count of messages scheduled
-* Bytes and count of messages successfully processed
-* Bytes and count of messages successfully received
-* Nanos and count of messages throttled
-* Bytes and count of messages expired
-* Corrupt frames recovered and unrecovered
-
-A separate virtual table (`internode_outbound`) has been added for
-outbound inter-node messaging. The outbound virtual table keeps metrics
-for:
-
-* Bytes and count of messages pending
-* Bytes and count of messages sent
-* Bytes and count of messages expired
-* Bytes and count of messages that could not be sent due to an error
-* Bytes and count of messages overloaded
-* Active Connection Count
-* Connection Attempts
-* Successful Connection Attempts
-
-== Hint Messaging
-
-A specialized version of hint message that takes an already encoded in a
-`ByteBuffer` hint and sends it verbatim has been added. It is an
-optimization for when dispatching a hint file of the current messaging
-version to a node of the same messaging version, which is the most
-common case. It saves on extra `ByteBuffer` allocations one redundant
-hint deserialization-serialization cycle.
-
-== Internode Application Timeout
-
-A configuration setting has been added to `cassandra.yaml` for the
-maximum continuous period a connection may be unwritable in application
-space.
-
-....
-# internode_application_timeout_in_ms = 30000
-....
-
-Some other new features include logging of message size to trace message
-for tracing a query.
-
-== Paxos prepare and propose stage for local requests optimized
-
-In pre-4.0 Paxos prepare and propose messages always go through entire
-`MessagingService` stack in Cassandra even if request is to be served
-locally, we can enhance and make local requests severed w/o involving
-`MessagingService`. Similar things are done elsewhere in Cassandra which
-skips `MessagingService` stage for local requests.
-
-This is what it looks like in pre 4.0 if we have tracing on and run a
-light-weight transaction:
-
-....
-Sending PAXOS_PREPARE message to /A.B.C.D [MessagingService-Outgoing-/A.B.C.D] | 2017-09-11
-21:55:18.971000 | A.B.C.D | 15045
-… REQUEST_RESPONSE message received from /A.B.C.D [MessagingService-Incoming-/A.B.C.D] |
-2017-09-11 21:55:18.976000 | A.B.C.D | 20270
-… Processing response from /A.B.C.D [SharedPool-Worker-4] | 2017-09-11 21:55:18.976000 |
-A.B.C.D | 20372
-....
-
-Same thing applies for Propose stage as well.
-
-In version 4.0 Paxos prepare and propose stage for local requests are
-optimized
-(https://issues.apache.org/jira/browse/CASSANDRA-13862[CASSANDRA-13862]).
-
-== Quality Assurance
-
-Several other quality assurance improvements have been made in version
-4.0
-(https://issues.apache.org/jira/browse/CASSANDRA-15066[CASSANDRA-15066]).
-
-=== Framing
-
-Version 4.0 introduces framing to all internode messages, i.e. the
-grouping of messages into a single logical payload with headers and
-trailers; these frames are guaranteed to either contain at most one
-message, that is split into its own unique sequence of frames (for large
-messages), or that a frame contains only complete messages.
-
-=== Corruption prevention
-
-Previously, intra-datacenter internode messages would be unprotected
-from corruption by default, as only LZ4 provided any integrity checks.
-All messages to post 4.0 nodes are written to explicit frames, which may
-be:
-
-* LZ4 encoded
-* CRC protected
-
-The Unprotected option is still available.
-
-=== Resilience
-
-For resilience, all frames are written with a separate CRC protected
-header, of 8 and 6 bytes respectively. If corruption occurs in this
-header, the connection must be reset, as before. If corruption occurs
-anywhere outside of the header, the corrupt frame will be skipped,
-leaving the connection intact and avoiding the loss of any messages
-unnecessarily.
-
-Previously, any issue at any point in the stream would result in the
-connection being reset, with the loss of any in-flight messages.
-
-=== Efficiency
-
-The overall memory usage, and number of byte shuffles, on both inbound
-and outbound messages is reduced.
-
-Outbound the Netty LZ4 encoder maintains a chunk size buffer (64KiB),
-that is filled before any compressed frame can be produced. Our frame
-encoders avoid this redundant copy, as well as freeing 192KiB per
-endpoint.
-
-Inbound, frame decoders guarantee only to copy the number of bytes
-necessary to parse a frame, and to never store more bytes than
-necessary. This improvement applies twice to LZ4 connections, improving
-both the message decode and the LZ4 frame decode.
-
-=== Inbound Path
-
-Version 4.0 introduces several improvements to the inbound path.
-
-An appropriate message handler is used based on whether large or small
-messages are expected on a particular connection as set in a flag.
-`NonblockingBufferHandler`, running on event loop, is used for small
-messages, and `BlockingBufferHandler`, running off event loop, for large
-messages. The single implementation of `InboundMessageHandler` handles
-messages of any size effectively by deriving size of the incoming
-message from the byte stream. In addition to deriving size of the
-message from the stream, incoming message expiration time is proactively
-read, before attempting to deserialize the entire message. If it’s
-expired at the time when a message is encountered the message is just
-skipped in the byte stream altogether. And if a message fails to be
-deserialized while still on the receiving side - say, because of table
-id or column being unknown - bytes are skipped, without dropping the
-entire connection and losing all the buffered messages. An immediately
-reply back is sent to the coordinator node with the failure reason,
-rather than waiting for the coordinator callback to expire. This logic
-is extended to a corrupted frame; a corrupted frame is safely skipped
-over without dropping the connection.
-
-Inbound path imposes strict limits on memory utilization. Specifically,
-the memory occupied by all parsed, but unprocessed messages is bound -
-on per-connection, per-endpoint, and global basis. Once a connection
-exceeds its local unprocessed capacity and cannot borrow any permits
-from per-endpoint and global reserve, it simply stops processing further
-messages, providing natural backpressure - until sufficient capacity is
-regained.
-
-=== Outbound Connections
-
-==== Opening a connection
-
-A consistent approach is adopted for all kinds of failure to connect,
-including: refused by endpoint, incompatible versions, or unexpected
-exceptions;
-
-* Retry forever, until either success or no messages waiting to deliver.
-* Wait incrementally longer periods before reconnecting, up to a maximum
-of 1s.
-* While failing to connect, no reserve queue limits are acquired.
-
-==== Closing a connection
-
-* Correctly drains outbound messages that are waiting to be delivered
-(unless disconnected and fail to reconnect).
-* Messages written to a closing connection are either delivered or
-rejected, with a new connection being opened if the old is irrevocably
-closed.
-* Unused connections are pruned eventually.
-
-==== Reconnecting
-
-We sometimes need to reconnect a perfectly valid connection, e.g. if the
-preferred IP address changes. We ensure that the underlying connection
-has no in-progress operations before closing it and reconnecting.
-
-==== Message Failure
-
-Propagates to callbacks instantly, better preventing overload by
-reclaiming committed memory.
-
-===== Expiry
-
-* No longer experiences head-of-line blocking (e.g. undroppable message
-preventing all droppable messages from being expired).
-* While overloaded, expiry is attempted eagerly on enqueuing threads.
-* While disconnected we schedule regular pruning, to handle the case
-where messages are no longer being sent, but we have a large backlog to
-expire.
-
-===== Overload
-
-* Tracked by bytes queued, as opposed to number of messages.
-
-===== Serialization Errors
-
-* Do not result in the connection being invalidated; the message is
-simply completed with failure, and then erased from the frame.
-* Includes detected mismatch between calculated serialization size to
-actual.
-
-Failures to flush to network, perhaps because the connection has been
-reset are not currently notified to callback handlers, as the necessary
-information has been discarded, though it would be possible to do so in
-future if we decide it is worth our while.
-
-==== QoS
-
-"Gossip" connection has been replaced with a general purpose "Urgent"
-connection, for any small messages impacting system stability.
-
-==== Metrics
-
-We track, and expose via Virtual Table and JMX, the number of messages
-and bytes that: we could not serialize or flush due to an error, we
-dropped due to overload or timeout, are pending, and have successfully
-sent.
-
-== Added a Message size limit
-
-Cassandra pre-4.0 doesn't protect the server from allocating huge
-buffers for the inter-node Message objects. Adding a message size limit
-would be good to deal with issues such as a malfunctioning cluster
-participant. Version 4.0 introduced max message size config param, akin
-to max mutation size - set to endpoint reserve capacity by default.
-
-== Recover from unknown table when deserializing internode messages
-
-As discussed in
-(https://issues.apache.org/jira/browse/CASSANDRA-9289[CASSANDRA-9289])
-it would be nice to gracefully recover from seeing an unknown table in a
-message from another node. Pre-4.0, we close the connection and
-reconnect, which can cause other concurrent queries to fail. Version 4.0
-fixes the issue by wrapping message in-stream with
-`TrackedDataInputPlus`, catching `UnknownCFException`, and skipping the
-remaining bytes in this message. TCP won't be closed and it will remain
-connected for other messages.
diff --git a/doc/modules/cassandra/pages/new/streaming.adoc b/doc/modules/cassandra/pages/new/streaming.adoc
deleted file mode 100644
index 991bec7..0000000
--- a/doc/modules/cassandra/pages/new/streaming.adoc
+++ /dev/null
@@ -1,217 +0,0 @@
-= Improved Streaming
-
-Apache Cassandra 4.0 has made several improvements to streaming.
-Streaming is the process used by nodes of a cluster to exchange data in
-the form of SSTables. Streaming of SSTables is performed for several
-operations, such as:
-
-* SSTable Repair
-* Host Replacement
-* Range movements
-* Bootstrapping
-* Rebuild
-* Cluster expansion
-
-== Streaming based on Netty
-
-Streaming in Cassandra 4.0 is based on Non-blocking Input/Output (NIO)
-with Netty
-(https://issues.apache.org/jira/browse/CASSANDRA-12229[CASSANDRA-12229]).
-It replaces the single-threaded (or sequential), synchronous, blocking
-model of streaming messages and transfer of files. Netty supports
-non-blocking, asynchronous, multi-threaded streaming with which multiple
-connections are opened simultaneously. Non-blocking implies that threads
-are not blocked as they don’t wait for a response for a sent request. A
-response could be returned in a different thread. With asynchronous,
-connections and threads are decoupled and do not have a 1:1 relation.
-Several more connections than threads may be opened.
-
-== Zero Copy Streaming
-
-Pre-4.0, during streaming Cassandra reifies the SSTables into objects.
-This creates unnecessary garbage and slows down the whole streaming
-process as some SSTables can be transferred as a whole file rather than
-individual partitions. Cassandra 4.0 has added support for streaming
-entire SSTables when possible
-(https://issues.apache.org/jira/browse/CASSANDRA-14556[CASSANDRA-14556])
-for faster Streaming using ZeroCopy APIs. If enabled, Cassandra will use
-ZeroCopy for eligible SSTables significantly speeding up transfers and
-increasing throughput. A zero-copy path avoids bringing data into
-user-space on both sending and receiving side. Any streaming related
-operations will notice corresponding improvement. Zero copy streaming is
-hardware bound; only limited by the hardware limitations (Network and
-Disk IO ).
-
-=== High Availability
-
-In benchmark tests Zero Copy Streaming is 5x faster than partitions
-based streaming. Faster streaming provides the benefit of improved
-availability. A cluster’s recovery mainly depends on the streaming
-speed, Cassandra clusters with failed nodes will be able to recover much
-more quickly (5x faster). If a node fails, SSTables need to be streamed
-to a replacement node. During the replacement operation, the new
-Cassandra node streams SSTables from the neighboring nodes that hold
-copies of the data belonging to this new node’s token range. Depending
-on the amount of data stored, this process can require substantial
-network bandwidth, taking some time to complete. The longer these range
-movement operations take, the more the cluster availability is lost.
-Failure of multiple nodes would reduce high availability greatly. The
-faster the new node completes streaming its data, the faster it can
-serve traffic, increasing the availability of the cluster.
-
-=== Enabling Zero Copy Streaming
-
-Zero copy streaming is enabled by setting the following setting in
-`cassandra.yaml`.
-
-....
-stream_entire_sstables: true
-....
-
-By default zero copy streaming is enabled.
-
-=== SSTables Eligible for Zero Copy Streaming
-
-Zero copy streaming is used if all partitions within the SSTable need to
-be transmitted. This is common when using `LeveledCompactionStrategy` or
-when partitioning SSTables by token range has been enabled. All
-partition keys in the SSTables are iterated over to determine the
-eligibility for Zero Copy streaming.
-
-=== Benefits of Zero Copy Streaming
-
-When enabled, it permits Cassandra to zero-copy stream entire eligible
-SSTables between nodes, including every component. This speeds up the
-network transfer significantly subject to throttling specified by
-`stream_throughput_outbound_megabits_per_sec`.
-
-Enabling this will reduce the GC pressure on sending and receiving node.
-While this feature tries to keep the disks balanced, it cannot guarantee
-it. This feature will be automatically disabled if internode encryption
-is enabled. Currently this can be used with Leveled Compaction.
-
-=== Configuring for Zero Copy Streaming
-
-Throttling would reduce the streaming speed. The
-`stream_throughput_outbound_megabits_per_sec` throttles all outbound
-streaming file transfers on a node to the given total throughput in
-Mbps. When unset, the default is 200 Mbps or 25 MB/s.
-
-....
-stream_throughput_outbound_megabits_per_sec: 200
-....
-
-To run any Zero Copy streaming benchmark the
-`stream_throughput_outbound_megabits_per_sec` must be set to a really
-high value otherwise, throttling will be significant and the benchmark
-results will not be meaningful.
-
-The `inter_dc_stream_throughput_outbound_megabits_per_sec` throttles all
-streaming file transfer between the datacenters, this setting allows
-users to throttle inter dc stream throughput in addition to throttling
-all network stream traffic as configured with
-`stream_throughput_outbound_megabits_per_sec`. When unset, the default
-is 200 Mbps or 25 MB/s.
-
-....
-inter_dc_stream_throughput_outbound_megabits_per_sec: 200
-....
-
-=== SSTable Components Streamed with Zero Copy Streaming
-
-Zero Copy Streaming streams entire SSTables. SSTables are made up of
-multiple components in separate files. SSTable components streamed are
-listed in Table 1.
-
-Table 1. SSTable Components
-
-[width="98%",cols="27%,73%",]
-|===
-|SSTable Component |Description
-
-|Data.db |The base data for an SSTable: the remaining components can be
-regenerated based on the data component.
-
-|Index.db |Index of the row keys with pointers to their positions in the
-data file.
-
-|Filter.db |Serialized bloom filter for the row keys in the SSTable.
-
-|CompressionInfo.db |File to hold information about uncompressed data
-length, chunk offsets etc.
-
-|Statistics.db |Statistical metadata about the content of the SSTable.
-
-|Digest.crc32 |Holds CRC32 checksum of the data file size_bytes.
-
-|CRC.db |Holds the CRC32 for chunks in an uncompressed file.
-
-|Summary.db |Holds SSTable Index Summary (sampling of Index component)
-
-|TOC.txt |Table of contents, stores the list of all components for the
-SSTable.
-|===
-
-Custom component, used by e.g. custom compaction strategy may also be
-included.
-
-== Repair Streaming Preview
-
-Repair with `nodetool repair` involves streaming of repaired SSTables
-and a repair preview has been added to provide an estimate of the amount
-of repair streaming that would need to be performed. Repair preview
-(https://issues.apache.org/jira/browse/CASSANDRA-13257[CASSANDRA-13257])
-is invoke with `nodetool repair --preview` using option:
-
-....
--prv, --preview
-....
-
-It determines ranges and amount of data to be streamed, but doesn't
-actually perform repair.
-
-== Parallelizing of Streaming of Keyspaces
-
-The streaming of the different keyspaces for bootstrap and rebuild has
-been parallelized in Cassandra 4.0
-(https://issues.apache.org/jira/browse/CASSANDRA-4663[CASSANDRA-4663]).
-
-== Unique nodes for Streaming in Multi-DC deployment
-
-Range Streamer picks unique nodes to stream data from when number of
-replicas in each DC is three or more
-(https://issues.apache.org/jira/browse/CASSANDRA-4650[CASSANDRA-4650]).
-What the optimization does is to even out the streaming load across the
-cluster. Without the optimization, some node can be picked up to stream
-more data than others. This patch allows to select dedicated node to
-stream only one range.
-
-This will increase the performance of bootstrapping a node and will also
-put less pressure on nodes serving the data. This does not affect if N <
-3 in each DC as then it streams data from only 2 nodes.
-
-Stream Operation Types ^^^^^^^^^^^^^
-
-It is important to know the type or purpose of a certain stream. Version
-4.0
-(https://issues.apache.org/jira/browse/CASSANDRA-13064[CASSANDRA-13064])
-adds an `enum` to distinguish between the different types of streams.
-Stream types are available both in a stream request and a stream task.
-The different stream types are:
-
-* Restore replica count
-* Unbootstrap
-* Relocation
-* Bootstrap
-* Rebuild
-* Bulk Load
-* Repair
-
-== Disallow Decommission when number of Replicas will drop below configured RF
-
-https://issues.apache.org/jira/browse/CASSANDRA-12510[CASSANDRA-12510]
-guards against decommission that will drop # of replicas below
-configured replication factor (RF), and adds the `--force` option that
-allows decommission to continue if intentional; force decommission of
-this node even when it reduces the number of replicas to below
-configured RF.
diff --git a/doc/modules/cassandra/pages/new/transientreplication.adoc b/doc/modules/cassandra/pages/new/transientreplication.adoc
deleted file mode 100644
index c939497..0000000
--- a/doc/modules/cassandra/pages/new/transientreplication.adoc
+++ /dev/null
@@ -1,186 +0,0 @@
-= Transient Replication
-
-*Note*:
-
-Transient Replication
-(https://issues.apache.org/jira/browse/CASSANDRA-14404[CASSANDRA-14404])
-is an experimental feature designed for expert Apache Cassandra users
-who are able to validate every aspect of the database for their
-application and deployment. That means being able to check that
-operations like reads, writes, decommission, remove, rebuild, repair,
-and replace all work with your queries, data, configuration, operational
-practices, and availability requirements. Apache Cassandra 4.0 has the
-initial implementation of transient replication. Future releases of
-Cassandra will make this feature suitable for a wider audience. It is
-anticipated that a future version will support monotonic reads with
-transient replication as well as LWT, logged batches, and counters.
-Being experimental, Transient replication is *not* recommended for
-production use.
-
-== Objective
-
-The objective of transient replication is to decouple storage
-requirements from data redundancy (or consensus group size) using
-incremental repair, in order to reduce storage overhead. Certain nodes
-act as full replicas (storing all the data for a given token range), and
-some nodes act as transient replicas, storing only unrepaired data for
-the same token ranges.
-
-The optimization that is made possible with transient replication is
-called "Cheap quorums", which implies that data redundancy is increased
-without corresponding increase in storage usage.
-
-Transient replication is useful when sufficient full replicas are
-unavailable to receive and store all the data. Transient replication
-allows you to configure a subset of replicas to only replicate data that
-hasn't been incrementally repaired. As an optimization, we can avoid
-writing data to a transient replica if we have successfully written data
-to the full replicas.
-
-After incremental repair, transient data stored on transient replicas
-can be discarded.
-
-== Enabling Transient Replication
-
-Transient replication is not enabled by default. Transient replication
-must be enabled on each node in a cluster separately by setting the
-following configuration property in `cassandra.yaml`.
-
-....
-enable_transient_replication: true
-....
-
-Transient replication may be configured with both `SimpleStrategy` and
-`NetworkTopologyStrategy`. Transient replication is configured by
-setting replication factor as `<total_replicas>/<transient_replicas>`.
-
-As an example, create a keyspace with replication factor (RF) 3.
-
-....
-CREATE KEYSPACE CassandraKeyspaceSimple WITH replication = {'class': 'SimpleStrategy',
-'replication_factor' : 4/1};
-....
-
-As another example, `some_keysopace keyspace` will have 3 replicas in
-DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are
-transient:
-
-....
-CREATE KEYSPACE some_keysopace WITH replication = {'class': 'NetworkTopologyStrategy',
-'DC1' : '3/1'', 'DC2' : '5/2'};
-....
-
-Transiently replicated keyspaces only support tables with `read_repair`
-set to `NONE`.
-
-Important Restrictions:
-
-* RF cannot be altered while some endpoints are not in a normal state
-(no range movements).
-* You can't add full replicas if there are any transient replicas. You
-must first remove all transient replicas, then change the # of full
-replicas, then add back the transient replicas.
-* You can only safely increase number of transients one at a time with
-incremental repair run in between each time.
-
-Additionally, transient replication cannot be used for:
-
-* Monotonic Reads
-* Lightweight Transactions (LWTs)
-* Logged Batches
-* Counters
-* Keyspaces using materialized views
-* Secondary indexes (2i)
-
-== Cheap Quorums
-
-Cheap quorums are a set of optimizations on the write path to avoid
-writing to transient replicas unless sufficient full replicas are not
-available to satisfy the requested consistency level. Hints are never
-written for transient replicas. Optimizations on the read path prefer
-reading from transient replicas. When writing at quorum to a table
-configured to use transient replication the quorum will always prefer
-available full replicas over transient replicas so that transient
-replicas don't have to process writes. Tail latency is reduced by rapid
-write protection (similar to rapid read protection) when full replicas
-are slow or unavailable by sending writes to transient replicas.
-Transient replicas can serve reads faster as they don't have to do
-anything beyond bloom filter checks if they have no data. With vnodes
-and large cluster sizes they will not have a large quantity of data even
-for failure of one or more full replicas where transient replicas start
-to serve a steady amount of write traffic for some of their transiently
-replicated ranges.
-
-== Speculative Write Option
-
-The `CREATE TABLE` adds an option `speculative_write_threshold` for use
-with transient replicas. The option is of type `simple` with default
-value as `99PERCENTILE`. When replicas are slow or unresponsive
-`speculative_write_threshold` specifies the threshold at which a cheap
-quorum write will be upgraded to include transient replicas.
-
-== Pending Ranges and Transient Replicas
-
-Pending ranges refers to the movement of token ranges between transient
-replicas. When a transient range is moved, there will be a period of
-time where both transient replicas would need to receive any write
-intended for the logical transient replica so that after the movement
-takes effect a read quorum is able to return a response. Nodes are _not_
-temporarily transient replicas during expansion. They stream data like a
-full replica for the transient range before they can serve reads. A
-pending state is incurred similar to how there is a pending state for
-full replicas. Transient replicas also always receive writes when they
-are pending. Pending transient ranges are sent a bit more data and
-reading from them is avoided.
-
-== Read Repair and Transient Replicas
-
-Read repair never attempts to repair a transient replica. Reads will
-always include at least one full replica. They should also prefer
-transient replicas where possible. Range scans ensure the entire scanned
-range performs replica selection that satisfies the requirement that
-every range scanned includes one full replica. During incremental &
-validation repair handling, at transient replicas anti-compaction does
-not output any data for transient ranges as the data will be dropped
-after repair, and transient replicas never have data streamed to them.
-
-== Transitioning between Full Replicas and Transient Replicas
-
-The additional state transitions that transient replication introduces
-requires streaming and `nodetool cleanup` to behave differently. When
-data is streamed it is ensured that it is streamed from a full replica
-and not a transient replica.
-
-Transitioning from not replicated to transiently replicated means that a
-node must stay pending until the next incremental repair completes at
-which point the data for that range is known to be available at full
-replicas.
-
-Transitioning from transiently replicated to fully replicated requires
-streaming from a full replica and is identical to how data is streamed
-when transitioning from not replicated to replicated. The transition is
-managed so the transient replica is not read from as a full replica
-until streaming completes. It can be used immediately for a write
-quorum.
-
-Transitioning from fully replicated to transiently replicated requires
-cleanup to remove repaired data from the transiently replicated range to
-reclaim space. It can be used immediately for a write quorum.
-
-Transitioning from transiently replicated to not replicated requires
-cleanup to be run to remove the formerly transiently replicated data.
-
-When transient replication is in use ring changes are supported
-including add/remove node, change RF, add/remove DC.
-
-== Transient Replication supports EACH_QUORUM
-
-(https://issues.apache.org/jira/browse/CASSANDRA-14727[CASSANDRA-14727])
-adds support for Transient Replication support for `EACH_QUORUM`. Per
-(https://issues.apache.org/jira/browse/CASSANDRA-14768[CASSANDRA-14768]),
-we ensure we write to at least a `QUORUM` of nodes in every DC,
-regardless of how many responses we need to wait for and our requested
-consistency level. This is to minimally surprise users with transient
-replication; with normal writes, we soft-ensure that we reach `QUORUM`
-in all DCs we are able to, by writing to every node; even if we don't
-wait for ACK, we have in both cases sent sufficient messages.
diff --git a/doc/modules/cassandra/pages/new/virtualtables.adoc b/doc/modules/cassandra/pages/new/virtualtables.adoc
index b18ba31..7a7a4be 100644
--- a/doc/modules/cassandra/pages/new/virtualtables.adoc
+++ b/doc/modules/cassandra/pages/new/virtualtables.adoc
@@ -74,8 +74,12 @@
 
 |coordinator_write_latency |Records counts, keyspace_name, table_name, max, median, and per_second for coordinator writes.
 
+|cql_metrics |Metrics specific to CQL prepared statement caching.
+
 |disk_usage |Records disk usage including disk_space, keyspace_name, and table_name, sorted by system keyspaces.
 
+|gossip_info |Lists the gossip information for the cluster.
+
 |internode_inbound |Lists information about the inbound internode messaging.
 
 |internode_outbound |Information about the outbound internode messaging.
@@ -99,6 +103,7 @@
 |thread_pools |Lists metrics for each thread pool.
 
 |tombstones_per_read |Records counts, keyspace_name, tablek_name, max, and median for tombstones.
+
 |===
 
 We shall discuss some of the virtual tables in more detail next.
@@ -106,18 +111,80 @@
 === Clients Virtual Table
 
 The `clients` virtual table lists all active connections (connected
-clients) including their ip address, port, connection stage, driver
+clients) including their ip address, port, client_options, connection stage, driver
 name, driver version, hostname, protocol version, request count, ssl
 enabled, ssl protocol and user name:
 
 ....
-cqlsh:system_views> select * from system_views.clients;
- address   | port  | connection_stage | driver_name | driver_version | hostname  | protocol_version | request_count | ssl_cipher_suite | ssl_enabled | ssl_protocol | username
------------+-------+------------------+-------------+----------------+-----------+------------------+---------------+------------------+-------------+--------------+-----------
- 127.0.0.1 | 50628 |            ready |        null |           null | localhost |                4 |            55 |             null |       False |         null | anonymous
- 127.0.0.1 | 50630 |            ready |        null |           null | localhost |                4 |            70 |             null |       False |         null | anonymous
+cqlsh> EXPAND ON ;
+Now Expanded output is enabled
+cqlsh> SELECT * FROM system_views.clients;
 
-(2 rows)
+@ Row 1
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50687
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ connection_stage | ready
+ driver_name      | DataStax Python Driver
+ driver_version   | 3.25.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 16
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+@ Row 2
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50688
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ connection_stage | ready
+ driver_name      | DataStax Python Driver
+ driver_version   | 3.25.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 4
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+@ Row 3
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50753
+ client_options   | {'APPLICATION_NAME': 'TestApp', 'APPLICATION_VERSION': '1.0.0', 'CLIENT_ID': '55b3efbd-c56b-469d-8cca-016b860b2f03', 'CQL_VERSION': '3.0.0', 'DRIVER_NAME': 'DataStax Java driver for Apache Cassandra(R)', 'DRIVER_VERSION': '4.13.0'}
+ connection_stage | ready
+ driver_name      | DataStax Java driver for Apache Cassandra(R)
+ driver_version   | 4.13.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 18
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+@ Row 4
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50755
+ client_options   | {'APPLICATION_NAME': 'TestApp', 'APPLICATION_VERSION': '1.0.0', 'CLIENT_ID': '55b3efbd-c56b-469d-8cca-016b860b2f03', 'CQL_VERSION': '3.0.0', 'DRIVER_NAME': 'DataStax Java driver for Apache Cassandra(R)', 'DRIVER_VERSION': '4.13.0'}
+ connection_stage | ready
+ driver_name      | DataStax Java driver for Apache Cassandra(R)
+ driver_version   | 4.13.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 7
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+(4 rows)
 ....
 
 Some examples of how `clients` can be used are:
@@ -127,29 +194,36 @@
 `nodetool disableoldprotocolversions` during upgrades.
 * To identify clients sending too many requests.
 * To find if SSL is enabled during the migration to and from ssl.
+* To identify all options the client is sending, e.g. APPLICATION_NAME and APPLICATION_VERSION
 
 The virtual tables may be described with `DESCRIBE` statement. The DDL
 listed however cannot be run to create a virtual table. As an example
 describe the `system_views.clients` virtual table:
 
 ....
-cqlsh:system_views> DESC TABLE system_views.clients;
-CREATE TABLE system_views.clients (
-  address inet,
-  connection_stage text,
-  driver_name text,
-  driver_version text,
-  hostname text,
-  port int,
-  protocol_version int,
-  request_count bigint,
-  ssl_cipher_suite text,
-  ssl_enabled boolean,
-  ssl_protocol text,
-  username text,
-  PRIMARY KEY (address, port)) WITH CLUSTERING ORDER BY (port ASC)
-  AND compaction = {'class': 'None'}
-  AND compression = {};
+cqlsh> DESCRIBE TABLE system_views.clients;
+
+/*
+Warning: Table system_views.clients is a virtual table and cannot be recreated with CQL.
+Structure, for reference:
+VIRTUAL TABLE system_views.clients (
+    address inet,
+    port int,
+    client_options frozen<map<text, text>>,
+    connection_stage text,
+    driver_name text,
+    driver_version text,
+    hostname text,
+    protocol_version int,
+    request_count bigint,
+    ssl_cipher_suite text,
+    ssl_enabled boolean,
+    ssl_protocol text,
+    username text,
+    PRIMARY KEY (address, port)
+) WITH CLUSTERING ORDER BY (port ASC)
+    AND comment = 'currently connected clients';
+*/
 ....
 
 === Caches Virtual Table
@@ -170,6 +244,21 @@
 (4 rows)
 ....
 
+=== CQL metrics Virtual Table
+The `cql_metrics` virtual table lists metrics specific to CQL prepared statement caching. A query on `cql_metrics` virtual table lists below metrics.
+
+....
+cqlsh> select * from system_views.cql_metrics ;
+
+ name                         | value
+------------------------------+-------
+    prepared_statements_count |     0
+  prepared_statements_evicted |     0
+ prepared_statements_executed |     0
+    prepared_statements_ratio |     0
+  regular_statements_executed |    17
+....
+
 === Settings Virtual Table
 
 The `settings` table is rather useful and lists all the current
@@ -188,7 +277,7 @@
   auto_snapshot                      | true
   automatic_sstable_upgrade          | false
   cluster_name                       | Test Cluster
-  enable_transient_replication       | false
+  transient_replication_enabled      | false
   hinted_handoff_enabled             | true
   hints_directory                    | /home/ec2-user/cassandra/data/hints
   incremental_backups                | false
@@ -290,6 +379,24 @@
 FROM system_views.sstable_tasks;
 ....
 
+=== Gossip Information Virtual Table
+
+The `gossip_info` virtual table lists the Gossip information for the cluster. An example query is as follows:
+
+....
+cqlsh> select address, port, generation, heartbeat, load, dc, rack from system_views.gossip_info;
+
+ address   | port | generation | heartbeat | load    | dc          | rack
+-----------+------+------------+-----------+---------+-------------+-------
+ 127.0.0.1 | 7000 | 1645575140 |       312 | 70542.0 | datacenter1 | rack1
+ 127.0.0.2 | 7000 | 1645575135 |       318 | 70499.0 | datacenter1 | rack1
+ 127.0.0.3 | 7000 | 1645575140 |       312 | 70504.0 | datacenter1 | rack1
+ 127.0.0.4 | 7000 | 1645575141 |       311 | 70502.0 | datacenter1 | rack1
+ 127.0.0.5 | 7000 | 1645575136 |       315 | 70500.0 | datacenter1 | rack1
+
+(5 rows)
+....
+
 === Other Virtual Tables
 
 Some examples of using other virtual tables are as follows.
diff --git a/doc/modules/cassandra/pages/new/auditlogging.adoc b/doc/modules/cassandra/pages/operating/auditlogging.adoc
similarity index 100%
rename from doc/modules/cassandra/pages/new/auditlogging.adoc
rename to doc/modules/cassandra/pages/operating/auditlogging.adoc
diff --git a/doc/modules/cassandra/pages/operating/bulk_loading.adoc b/doc/modules/cassandra/pages/operating/bulk_loading.adoc
index 2b11f27..0d85ae4 100644
--- a/doc/modules/cassandra/pages/operating/bulk_loading.adoc
+++ b/doc/modules/cassandra/pages/operating/bulk_loading.adoc
@@ -80,13 +80,23 @@
 -d,--nodes <initial hosts>                                   Required.
                                                              Try to connect to these hosts (comma separated) initially for ring information
 
+--entire-sstable-throttle-mib <throttle-mib>                 Entire SSTable throttle
+                                                             speed in MiB/s (default 0 for unlimited).
+
+--entire-sstable-inter-dc-throttle-mib                       <inter-dc-throttle-mib>
+                                                             Entire SSTable inter-datacenter throttle
+                                                             speed in MiB/s (default 0 for unlimited).
+
 -f,--conf-path <path to config file>                         cassandra.yaml file path for streaming throughput and client/server SSL.
 
 -h,--help                                                    Display this help message
 
 -i,--ignore <NODES>                                          Don't stream to this (comma separated) list of nodes
 
--idct,--inter-dc-throttle <inter-dc-throttle>                Inter-datacenter throttle speed in Mbits (default unlimited)
+-idct,--inter-dc-throttle <inter-dc-throttle>                (deprecated) Inter-datacenter throttle speed in Mbits (default 0 for unlimited).
+                                                             Use --inter-dc-throttle-mib instead.
+
+--inter-dc-throttle-mib <inter-dc-throttle-mib>              Inter-datacenter throttle speed in MiB/s (default 0 for unlimited)
 
 -k,--target-keyspace <target keyspace name>                  Target
                                                              keyspace name
@@ -111,8 +121,10 @@
                                                              for TLS internode communication (default 7001)
 -st,--store-type <STORE-TYPE>                                Client SSL:
                                                              type of store
--t,--throttle <throttle>                                     Throttle
-                                                             speed in Mbits (default unlimited)
+-t,--throttle <throttle>                                     (deprecated) Throttle speed in Mbits (default 0 for unlimited).
+                                                             Use --throttle-mib instead.
+--throttle-mib <throttle-mib>                                Throttle
+                                                             speed in MiB/s (default 0 for unlimited)
 -ts,--truststore <TRUSTSTORE>                                Client SSL:
                                                              full path to truststore
 -tspw,--truststore-password <TRUSTSTORE-PASSWORD>            Client SSL:
@@ -821,13 +833,17 @@
 variables will be bound to values by the resulting SSTable writer. This
 is a mandatory option.
 
-|withBufferSizeInMB(int size) |The size of the buffer to use. This
+|withBufferSizeInMiB(int size) |The size of the buffer to use. This
 defines how much data will be buffered before being written as a new
 SSTable. This corresponds roughly to the data size that will have the
 created SSTable. The default is 128MB, which should be reasonable for a
 1GB heap. If OutOfMemory exception gets generated while using the
 SSTable writer, should lower this value.
 
+|withBufferSizeInMB(int size) |Deprecated, and it will be available
+at least until next major release. Please use withBufferSizeInMiB(int size)
+which is the same method with a new name.
+
 |sorted() |Creates a CQLSSTableWriter that expects sorted inputs. If
 this option is used, the resulting SSTable writer will expect rows to be
 added in SSTable sorted order (and an exception will be thrown if that
diff --git a/doc/modules/cassandra/pages/operating/cdc.adoc b/doc/modules/cassandra/pages/operating/cdc.adoc
index b0d5c19..e12decd 100644
--- a/doc/modules/cassandra/pages/operating/cdc.adoc
+++ b/doc/modules/cassandra/pages/operating/cdc.adoc
@@ -16,12 +16,17 @@
 human-readable word "COMPLETED" will be added to the _cdc.idx file
 indicating that Cassandra has completed all processing on the file.
 
-We we use an index file rather than just encouraging clients to parse
+We use an index file rather than just encouraging clients to parse
 the log realtime off a memory mapped handle as data can be reflected in
 a kernel buffer that is not yet persisted to disk. Parsing only up to
 the listed offset in the _cdc.idx file will ensure that you only parse
 CDC data for data that is durable.
 
+Please note that in rare chances, e.g. slow disk, it is possible for the
+consumer to read an empty value from the _cdc.idx file because update is
+achieved with first truncating the file then write to the file. In such
+case, the consumer should retry read the index file.
+
 A threshold of total disk space allowed is specified in the yaml at
 which time newly allocated CommitLogSegments will not allow CDC data
 until a consumer parses and removes files from the specified cdc_raw
@@ -52,10 +57,10 @@
 `cdc_raw_directory` (default: `$CASSANDRA_HOME/data/cdc_raw`)::
   Destination for CommitLogSegments to be moved after all corresponding
   memtables are flushed.
-`cdc_free_space_in_mb`: (default: min of 4096 and 1/8th volume space)::
+`cdc_total_space`: (default: min of 4096MiB and 1/8th volume space)::
   Calculated as sum of all active CommitLogSegments that permit CDC +
   all flushed CDC segments in `cdc_raw_directory`.
-`cdc_free_space_check_interval_ms` (default: 250)::
+`cdc_free_space_check_interval` (default: 250ms)::
   When at capacity, we limit the frequency with which we re-calculate
   the space taken up by `cdc_raw_directory` to prevent burning CPU
   cycles unnecessarily. Default is to check 4 times per second.
diff --git a/doc/modules/cassandra/pages/new/fqllogging.adoc b/doc/modules/cassandra/pages/operating/fqllogging.adoc
similarity index 100%
rename from doc/modules/cassandra/pages/new/fqllogging.adoc
rename to doc/modules/cassandra/pages/operating/fqllogging.adoc
diff --git a/doc/modules/cassandra/pages/operating/hints.adoc b/doc/modules/cassandra/pages/operating/hints.adoc
index 567e0cd..4de2cb7 100644
--- a/doc/modules/cassandra/pages/operating/hints.adoc
+++ b/doc/modules/cassandra/pages/operating/hints.adoc
@@ -39,7 +39,7 @@
 to the client. If a replica node is unavailable, however, the
 coordinator stores a hint locally to the filesystem for later
 application. New hints will be retained for up to
-`max_hint_window_in_ms` of downtime (defaults to `3 hours`). If the
+`max_hint_windowin_ms` of downtime (defaults to `3 h`). If the
 unavailable replica does return to the cluster before the window
 expires, the coordinator applies any pending hinted mutations against
 the replica to ensure that eventual consistency is maintained.
@@ -87,18 +87,18 @@
 === Hints for Timed Out Write Requests
 
 Hints are also stored for write requests that time out. The
-`write_request_timeout_in_ms` setting in `cassandra.yaml` configures the
+`write_request_timeout` setting in `cassandra.yaml` configures the
 timeout for write requests.
 
 [source,none]
 ----
-write_request_timeout_in_ms: 2000
+write_request_timeout: 2000ms
 ----
 
 The coordinator waits for the configured amount of time for write
 requests to complete, at which point it will time out and generate a
 hint for the timed out request. The lowest acceptable value for
-`write_request_timeout_in_ms` is 10 ms.
+`write_request_timeout` is 10 ms.
 
 == Configuring Hints
 
@@ -127,15 +127,15 @@
 
 |`unset`
 
-|`max_hint_window_in_ms` |Defines the maximum amount of time (ms) a node
-shall have hints generated after it has failed. |`10800000` # 3 hours
+|`max_hint_window` |Defines the maximum amount of time a node
+shall have hints generated after it has failed. |`3h` 
 
-|`hinted_handoff_throttle_in_kb` |Maximum throttle in KBs per second,
+|`hinted_handoff_throttle` |Maximum throttle in KiBs per second,
 per delivery thread. This will be reduced proportionally to the number
 of nodes in the cluster. (If there are two nodes in the cluster, each
 delivery thread will use the maximum rate; if there are 3, each will
 throttle to half of the maximum,since it is expected for two nodes to be
-delivering hints simultaneously.) |`1024`
+delivering hints simultaneously.) |`1024KiB`
 
 |`max_hints_delivery_threads` |Number of threads with which to deliver
 hints; Consider increasing this number when you have multi-dc
@@ -144,11 +144,11 @@
 |`hints_directory` |Directory where Cassandra stores hints.
 |`$CASSANDRA_HOME/data/hints`
 
-|`hints_flush_period_in_ms` |How often hints should be flushed from the
-internal buffers to disk. Will _not_ trigger fsync. |`10000`
+|`hints_flush_period` |How often hints should be flushed from the
+internal buffers to disk. Will _not_ trigger fsync. |`10000ms`
 
-|`max_hints_file_size_in_mb` |Maximum size for a single hints file, in
-megabytes. |`128`
+|`max_hints_file_size |Maximum size for a single hints file, in
+megabytes. |`128MiB`
 
 |`hints_compression` |Compression to apply to the hint files. If
 omitted, hints files will be written uncompressed. LZ4, Snappy, and
@@ -222,9 +222,9 @@
 === Allow a Node to be Down Longer at Runtime
 
 Sometimes a node may be down for more than the normal
-`max_hint_window_in_ms`, (default of three hours), but the hardware and
+`max_hint_window`, (default of three hours), but the hardware and
 data itself will still be accessible. In such a case you may consider
-raising the `max_hint_window_in_ms` dynamically via the
+raising the `max_hint_window` dynamically via the
 `nodetool setmaxhintwindow` command added in Cassandra 4.0
 (https://issues.apache.org/jira/browse/CASSANDRA-11720[CASSANDRA-11720]).
 This will instruct Cassandra to continue holding hints for the down
@@ -232,7 +232,7 @@
 
 This command should be applied on all nodes in the cluster that may be
 holding hints. If needed, the setting can be applied permanently by
-setting the `max_hint_window_in_ms` setting in `cassandra.yaml` followed
+setting the `max_hint_window` setting in `cassandra.yaml` followed
 by a rolling restart.
 
 == Monitoring Hint Delivery
diff --git a/doc/modules/cassandra/pages/operating/security.adoc b/doc/modules/cassandra/pages/operating/security.adoc
index a74c042..258917d 100644
--- a/doc/modules/cassandra/pages/operating/security.adoc
+++ b/doc/modules/cassandra/pages/operating/security.adoc
@@ -43,20 +43,91 @@
 https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html[the
 java document on FIPS] for more details.
 
-For information on generating the keystore and truststore files used in
-SSL communications, see the
+Cassandra provides flexibility of using Java based key material or
+completely customizing the SSL context. You can choose any keystore
+format supported by Java (JKS, PKCS12 etc) as well as other standards
+like PEM. You can even customize the SSL context creation to use Cloud
+Native technologies like Kuberenetes Secrets for storing the key
+material or to integrate with your in-house Key Management System.
+
+For information on generating the keystore and truststore files
+required with the Java supported keystores used in SSL communications,
+see the
 http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore[java
-documentation on creating keystores]
+documentation on creating keystores].
+
+For customizing the SSL context creation you can implement
+https://github.com/apache/cassandra/blob/trunk/src/java/org/apache/cassandra/security/ISslContextFactory.java[ISslContextCreationFactory]
+interface or extend one of its public subclasses appropriately. You
+can then use the `ssl_context_factory` setting for
+`server_encryption_options` or `client_encryption_options` sections
+appropriately. See https://github.com/apache/cassandra/tree/trunk/examples/ssl-factory[ssl-factory examples]
+for details. Refer to the below class diagram to understand the
+class hierarchy.
+
+image::cassandra_ssl_context_factory_pem.png[image]
+
+=== Using PEM based key material
+
+You can use the in-built class `PEMBasedSSLContextFactory` as the
+`ssl_context_factory` setting for the PEM based key material.
+
+You can configure this factory with either inline PEM data or with the
+files having the required PEM data as shown below,
+
+* Configuration: PEM keys/certs defined in-line (mind the spaces in the
+YAML!)
+
+....
+   client/server_encryption_options:
+     ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+            private_key: |
+             -----BEGIN ENCRYPTED PRIVATE KEY----- OR -----BEGIN PRIVATE KEY-----
+             <your base64 encoded private key>
+             -----END ENCRYPTED PRIVATE KEY----- OR -----END PRIVATE KEY-----
+             -----BEGIN CERTIFICATE-----
+             <your base64 encoded certificate chain>
+             -----END CERTIFICATE-----
+
+            private_key_password: "<your password if the private key is encrypted with a password>"
+
+            trusted_certificates: |
+              -----BEGIN CERTIFICATE-----
+              <your base64 encoded certificate>
+              -----END CERTIFICATE-----
+....
+
+* Configuration: PEM keys/certs defined in files
+....
+    client/server_encryption_options:
+     ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+     keystore: <file path to the keystore file in the PEM format with the private key and the certificate chain>
+     keystore_password: "<your password if the private key is encrypted with a password>"
+     truststore: <file path to the truststore file in the PEM format>
+....
 
 == SSL Certificate Hot Reloading
 
 Beginning with Cassandra 4, Cassandra supports hot reloading of SSL
-Certificates. If SSL/TLS support is enabled in Cassandra, the node
-periodically polls the Trust and Key Stores specified in cassandra.yaml.
-When the files are updated, Cassandra will reload them and use them for
-subsequent connections. Please note that the Trust & Key Store passwords
-are part of the yaml so the updated files should also use the same
-passwords. The default polling interval is 10 minutes.
+Certificates. If SSL/TLS support is enabled in Cassandra and you are
+using default file based key material, the node periodically (every
+10 minutes) polls the Trust and Key Stores specified in
+cassandra.yaml. When the files are updated, Cassandra will reload
+them and use them for subsequent connections. Please note that the
+Trust & Key Store passwords are part of the yaml so the updated files
+should also use the same passwords.
+
+If you are customizing the SSL configuration via `ssl_context_factory`
+setting, Cassandra polls (at the same periodic interval mentioned above)
+your implementation to check if the SSL certificates need to be
+reloaded. See the https://github.com/apache/cassandra/blob/trunk/src/java/org/apache/cassandra/security/ISslContextFactory.java#L90[ISslContextFactory] documentation for more details.
+If you are using one of the Cassandra's in-built SSL context factory
+class (example: PEMBasedSslContextFactory) with file based key
+material, it supports the hot reloading of the SSL certificates like
+mentioned above.
 
 Certificate Hot reloading may also be triggered using the
 `nodetool reloadssl` command. Use this if you want to Cassandra to
@@ -327,8 +398,8 @@
 more closely with Cassandra's own auth subsystem.
 
 The default settings for Cassandra make JMX accessible only from
-localhost. To enable remote JMX connections, edit `cassandra-env.sh` (or
-`cassandra-env.ps1` on Windows) to change the `LOCAL_JMX` setting to
+localhost. To enable remote JMX connections, edit `cassandra-env.sh`
+to change the `LOCAL_JMX` setting to
 `no`. Under the standard configuration, when remote JMX connections are
 enabled, `standard JMX authentication <standard-jmx-auth>` is also
 switched on.
@@ -490,7 +561,7 @@
 
 JMX SSL configuration is controlled by a number of system properties,
 some of which are optional. To turn on SSL, edit the relevant lines in
-`cassandra-env.sh` (or `cassandra-env.ps1` on Windows) to uncomment and
+`cassandra-env.sh` to uncomment and
 set the values of these properties as required:
 
 `com.sun.management.jmxremote.ssl`::
diff --git a/doc/modules/cassandra/pages/operating/topo_changes.adoc b/doc/modules/cassandra/pages/operating/topo_changes.adoc
index 368056d..98b17c2 100644
--- a/doc/modules/cassandra/pages/operating/topo_changes.adoc
+++ b/doc/modules/cassandra/pages/operating/topo_changes.adoc
@@ -111,10 +111,10 @@
 the replacement process.
 
 [arabic]
-. The node is down for longer than `max_hint_window_in_ms` before being
+. The node is down for longer than `max_hint_window` before being
 replaced.
 . You are replacing using the same IP address as the dead node *and*
-replacement takes longer than `max_hint_window_in_ms`.
+replacement takes longer than `max_hint_window`.
 ====
 
 == Monitoring progress
diff --git a/doc/modules/cassandra/pages/operating/transientreplication.adoc b/doc/modules/cassandra/pages/operating/transientreplication.adoc
new file mode 100644
index 0000000..c2a8384
--- /dev/null
+++ b/doc/modules/cassandra/pages/operating/transientreplication.adoc
@@ -0,0 +1,186 @@
+= Transient Replication
+
+*Note*:
+
+Transient Replication
+(https://issues.apache.org/jira/browse/CASSANDRA-14404[CASSANDRA-14404])
+is an experimental feature designed for expert Apache Cassandra users
+who are able to validate every aspect of the database for their
+application and deployment. That means being able to check that
+operations like reads, writes, decommission, remove, rebuild, repair,
+and replace all work with your queries, data, configuration, operational
+practices, and availability requirements. Apache Cassandra 4.0 has the
+initial implementation of transient replication. Future releases of
+Cassandra will make this feature suitable for a wider audience. It is
+anticipated that a future version will support monotonic reads with
+transient replication as well as LWT, logged batches, and counters.
+Being experimental, Transient replication is *not* recommended for
+production use.
+
+== Objective
+
+The objective of transient replication is to decouple storage
+requirements from data redundancy (or consensus group size) using
+incremental repair, in order to reduce storage overhead. Certain nodes
+act as full replicas (storing all the data for a given token range), and
+some nodes act as transient replicas, storing only unrepaired data for
+the same token ranges.
+
+The optimization that is made possible with transient replication is
+called "Cheap quorums", which implies that data redundancy is increased
+without corresponding increase in storage usage.
+
+Transient replication is useful when sufficient full replicas are
+unavailable to receive and store all the data. Transient replication
+allows you to configure a subset of replicas to only replicate data that
+hasn't been incrementally repaired. As an optimization, we can avoid
+writing data to a transient replica if we have successfully written data
+to the full replicas.
+
+After incremental repair, transient data stored on transient replicas
+can be discarded.
+
+== Enabling Transient Replication
+
+Transient replication is not enabled by default. Transient replication
+must be enabled on each node in a cluster separately by setting the
+following configuration property in `cassandra.yaml`.
+
+....
+transient_replication_enabled: true
+....
+
+Transient replication may be configured with both `SimpleStrategy` and
+`NetworkTopologyStrategy`. Transient replication is configured by
+setting replication factor as `<total_replicas>/<transient_replicas>`.
+
+As an example, create a keyspace with replication factor (RF) 3.
+
+....
+CREATE KEYSPACE CassandraKeyspaceSimple WITH replication = {'class': 'SimpleStrategy',
+'replication_factor' : 4/1};
+....
+
+As another example, `some_keysopace keyspace` will have 3 replicas in
+DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are
+transient:
+
+....
+CREATE KEYSPACE some_keysopace WITH replication = {'class': 'NetworkTopologyStrategy',
+'DC1' : '3/1'', 'DC2' : '5/2'};
+....
+
+Transiently replicated keyspaces only support tables with `read_repair`
+set to `NONE`.
+
+Important Restrictions:
+
+* RF cannot be altered while some endpoints are not in a normal state
+(no range movements).
+* You can't add full replicas if there are any transient replicas. You
+must first remove all transient replicas, then change the # of full
+replicas, then add back the transient replicas.
+* You can only safely increase number of transients one at a time with
+incremental repair run in between each time.
+
+Additionally, transient replication cannot be used for:
+
+* Monotonic Reads
+* Lightweight Transactions (LWTs)
+* Logged Batches
+* Counters
+* Keyspaces using materialized views
+* Secondary indexes (2i)
+
+== Cheap Quorums
+
+Cheap quorums are a set of optimizations on the write path to avoid
+writing to transient replicas unless sufficient full replicas are not
+available to satisfy the requested consistency level. Hints are never
+written for transient replicas. Optimizations on the read path prefer
+reading from transient replicas. When writing at quorum to a table
+configured to use transient replication the quorum will always prefer
+available full replicas over transient replicas so that transient
+replicas don't have to process writes. Tail latency is reduced by rapid
+write protection (similar to rapid read protection) when full replicas
+are slow or unavailable by sending writes to transient replicas.
+Transient replicas can serve reads faster as they don't have to do
+anything beyond bloom filter checks if they have no data. With vnodes
+and large cluster sizes they will not have a large quantity of data even
+for failure of one or more full replicas where transient replicas start
+to serve a steady amount of write traffic for some of their transiently
+replicated ranges.
+
+== Speculative Write Option
+
+The `CREATE TABLE` adds an option `speculative_write_threshold` for use
+with transient replicas. The option is of type `simple` with default
+value as `99PERCENTILE`. When replicas are slow or unresponsive
+`speculative_write_threshold` specifies the threshold at which a cheap
+quorum write will be upgraded to include transient replicas.
+
+== Pending Ranges and Transient Replicas
+
+Pending ranges refers to the movement of token ranges between transient
+replicas. When a transient range is moved, there will be a period of
+time where both transient replicas would need to receive any write
+intended for the logical transient replica so that after the movement
+takes effect a read quorum is able to return a response. Nodes are _not_
+temporarily transient replicas during expansion. They stream data like a
+full replica for the transient range before they can serve reads. A
+pending state is incurred similar to how there is a pending state for
+full replicas. Transient replicas also always receive writes when they
+are pending. Pending transient ranges are sent a bit more data and
+reading from them is avoided.
+
+== Read Repair and Transient Replicas
+
+Read repair never attempts to repair a transient replica. Reads will
+always include at least one full replica. They should also prefer
+transient replicas where possible. Range scans ensure the entire scanned
+range performs replica selection that satisfies the requirement that
+every range scanned includes one full replica. During incremental &
+validation repair handling, at transient replicas anti-compaction does
+not output any data for transient ranges as the data will be dropped
+after repair, and transient replicas never have data streamed to them.
+
+== Transitioning between Full Replicas and Transient Replicas
+
+The additional state transitions that transient replication introduces
+requires streaming and `nodetool cleanup` to behave differently. When
+data is streamed it is ensured that it is streamed from a full replica
+and not a transient replica.
+
+Transitioning from not replicated to transiently replicated means that a
+node must stay pending until the next incremental repair completes at
+which point the data for that range is known to be available at full
+replicas.
+
+Transitioning from transiently replicated to fully replicated requires
+streaming from a full replica and is identical to how data is streamed
+when transitioning from not replicated to replicated. The transition is
+managed so the transient replica is not read from as a full replica
+until streaming completes. It can be used immediately for a write
+quorum.
+
+Transitioning from fully replicated to transiently replicated requires
+cleanup to remove repaired data from the transiently replicated range to
+reclaim space. It can be used immediately for a write quorum.
+
+Transitioning from transiently replicated to not replicated requires
+cleanup to be run to remove the formerly transiently replicated data.
+
+When transient replication is in use ring changes are supported
+including add/remove node, change RF, add/remove DC.
+
+== Transient Replication supports EACH_QUORUM
+
+(https://issues.apache.org/jira/browse/CASSANDRA-14727[CASSANDRA-14727])
+adds support for Transient Replication support for `EACH_QUORUM`. Per
+(https://issues.apache.org/jira/browse/CASSANDRA-14768[CASSANDRA-14768]),
+we ensure we write to at least a `QUORUM` of nodes in every DC,
+regardless of how many responses we need to wait for and our requested
+consistency level. This is to minimally surprise users with transient
+replication; with normal writes, we soft-ensure that we reach `QUORUM`
+in all DCs we are able to, by writing to every node; even if we don't
+wait for ACK, we have in both cases sent sufficient messages.
diff --git a/doc/modules/cassandra/pages/operating/virtualtables.adoc b/doc/modules/cassandra/pages/operating/virtualtables.adoc
new file mode 100644
index 0000000..2963ecb
--- /dev/null
+++ b/doc/modules/cassandra/pages/operating/virtualtables.adoc
@@ -0,0 +1,478 @@
+= Virtual Tables
+
+Apache Cassandra 4.0 implements virtual tables (https://issues.apache.org/jira/browse/CASSANDRA-7622[CASSANDRA-7622]).
+Virtual tables are tables backed by an API instead of data explicitly managed and stored as SSTables. 
+Apache Cassandra 4.0 implements a virtual keyspace interface for virtual tables. 
+Virtual tables are specific to each node.
+
+Some of the features of virtual tables are the ability to:
+
+* expose metrics through CQL
+* expose YAML configuration information
+
+Virtual keyspaces and tables are quite different from regular tables and keyspaces:
+
+* Virtual tables are created in special keyspaces and not just any keyspace.
+* Virtual tables are managed by Cassandra. Users cannot run DDL to create new virtual tables or DML to modify existing virtual tables.
+* Virtual tables are currently read-only, although that may change in a later version.
+* Virtual tables are local only, non-distributed, and thus not replicated.
+* Virtual tables have no associated SSTables.
+* Consistency level of the queries sent to virtual tables are ignored.
+* All existing virtual tables use `LocalPartitioner`. 
+Since a virtual table is not replicated the partitioner sorts in order of partition keys instead of by their hash.
+* Making advanced queries using `ALLOW FILTERING` and aggregation functions can be executed in virtual tables, even though in normal tables we dont recommend it.
+
+== Virtual Keyspaces
+
+Apache Cassandra 4.0 has added two new keyspaces for virtual tables:
+
+* `system_virtual_schema` 
+* `system_views`. 
+
+The `system_virtual_schema` keyspace has three tables: `keyspaces`,
+`columns` and `tables` for the virtual keyspace, table, and column definitions, respectively.
+These tables contain schema information for the virtual tables.
+It is used by Cassandra internally and a user should not access it directly.
+
+The `system_views` keyspace contains the actual virtual tables.
+
+== Virtual Table Limitations
+
+Before disccusing virtual keyspaces and tables, note that virtual keyspaces and tables have some limitations. 
+These limitations are subject to change.
+Virtual keyspaces cannot be altered or dropped. 
+In fact, no operations can be performed against virtual keyspaces.
+
+Virtual tables cannot be created in virtual keyspaces.
+Virtual tables cannot be altered, dropped, or truncated.
+Secondary indexes, types, functions, aggregates, materialized views, and triggers cannot be created for virtual tables.
+Expiring time-to-live (TTL) columns cannot be created.
+Virtual tables do not support conditional updates or deletes.
+Aggregates may be run in SELECT statements.
+
+Conditional batch statements cannot include mutations for virtual tables, nor can a virtual table statement be included in a logged batch.
+In fact, mutations for virtual and regular tables cannot occur in the same batch table.
+
+== Virtual Tables
+
+Each of the virtual tables in the `system_views` virtual keyspace contain different information.
+
+The following table describes the virtual tables: 
+
+[width="98%",cols="27%,73%",]
+|===
+|Virtual Table |Description
+
+|caches |Displays the general cache information including cache name, capacity_bytes, entry_count, hit_count, hit_ratio double,
+recent_hit_rate_per_second, recent_request_rate_per_second, request_count, and size_bytes.
+
+|clients |Lists information about all connected clients.
+
+|coordinator_read_latency |Records counts, keyspace_name, table_name, max, median, and per_second for coordinator reads.
+
+|coordinator_scan |Records counts, keyspace_name, table_name, max, median, and per_second for coordinator scans.
+
+|coordinator_write_latency |Records counts, keyspace_name, table_name, max, median, and per_second for coordinator writes.
+
+|disk_usage |Records disk usage including disk_space, keyspace_name, and table_name, sorted by system keyspaces.
+
+|internode_inbound |Lists information about the inbound internode messaging.
+
+|internode_outbound |Information about the outbound internode messaging.
+
+|local_read_latency |Records counts, keyspace_name, table_name, max, median, and per_second for local reads.
+
+|local_scan |Records counts, keyspace_name, table_name, max, median, and per_second for local scans.
+
+|local_write_latency |Records counts, keyspace_name, table_name, max, median, and per_second for local writes.
+
+|max_partition_size |A table metric for maximum partition size.
+
+|rows_per_read |Records counts, keyspace_name, tablek_name, max, and median for rows read.
+
+|settings |Displays configuration settings in cassandra.yaml.
+
+|sstable_tasks |Lists currently running tasks and progress on SSTables, for operations like compaction and upgrade.
+
+|system_properties |Displays environmental system properties set on the node.
+
+|thread_pools |Lists metrics for each thread pool.
+
+|tombstones_per_read |Records counts, keyspace_name, tablek_name, max, and median for tombstones.
+|===
+
+We shall discuss some of the virtual tables in more detail next.
+
+=== Clients Virtual Table
+
+The `clients` virtual table lists all active connections (connected
+clients) including their ip address, port, client_options, connection stage, driver
+name, driver version, hostname, protocol version, request count, ssl
+enabled, ssl protocol and user name:
+
+....
+cqlsh> EXPAND ON ;
+Now Expanded output is enabled
+cqlsh> SELECT * FROM system_views.clients;
+
+@ Row 1
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50687
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ connection_stage | ready
+ driver_name      | DataStax Python Driver
+ driver_version   | 3.25.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 16
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+@ Row 2
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50688
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ connection_stage | ready
+ driver_name      | DataStax Python Driver
+ driver_version   | 3.25.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 4
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+@ Row 3
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50753
+ client_options   | {'APPLICATION_NAME': 'TestApp', 'APPLICATION_VERSION': '1.0.0', 'CLIENT_ID': '55b3efbd-c56b-469d-8cca-016b860b2f03', 'CQL_VERSION': '3.0.0', 'DRIVER_NAME': 'DataStax Java driver for Apache Cassandra(R)', 'DRIVER_VERSION': '4.13.0'}
+ connection_stage | ready
+ driver_name      | DataStax Java driver for Apache Cassandra(R)
+ driver_version   | 4.13.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 18
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+@ Row 4
+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ address          | 127.0.0.1
+ port             | 50755
+ client_options   | {'APPLICATION_NAME': 'TestApp', 'APPLICATION_VERSION': '1.0.0', 'CLIENT_ID': '55b3efbd-c56b-469d-8cca-016b860b2f03', 'CQL_VERSION': '3.0.0', 'DRIVER_NAME': 'DataStax Java driver for Apache Cassandra(R)', 'DRIVER_VERSION': '4.13.0'}
+ connection_stage | ready
+ driver_name      | DataStax Java driver for Apache Cassandra(R)
+ driver_version   | 4.13.0
+ hostname         | localhost
+ protocol_version | 5
+ request_count    | 7
+ ssl_cipher_suite | null
+ ssl_enabled      | False
+ ssl_protocol     | null
+ username         | anonymous
+
+(4 rows)
+....
+
+Some examples of how `clients` can be used are:
+
+* To find applications using old incompatible versions of drivers before
+upgrading and with `nodetool enableoldprotocolversions` and
+`nodetool disableoldprotocolversions` during upgrades.
+* To identify clients sending too many requests.
+* To find if SSL is enabled during the migration to and from ssl.
+
+The virtual tables may be described with `DESCRIBE` statement. The DDL
+listed however cannot be run to create a virtual table. As an example
+describe the `system_views.clients` virtual table:
+
+....
+cqlsh> DESCRIBE TABLE system_views.clients;
+
+/*
+Warning: Table system_views.clients is a virtual table and cannot be recreated with CQL.
+Structure, for reference:
+VIRTUAL TABLE system_views.clients (
+  address inet,
+  port int,
+  client_options frozen<map<text, text>>,
+  connection_stage text,
+  driver_name text,
+  driver_version text,
+  hostname text,
+  protocol_version int,
+  request_count bigint,
+  ssl_cipher_suite text,
+  ssl_enabled boolean,
+  ssl_protocol text,
+  username text,
+    PRIMARY KEY (address, port)
+) WITH CLUSTERING ORDER BY (port ASC)
+    AND comment = 'currently connected clients';
+*/
+....
+
+=== Caches Virtual Table
+
+The `caches` virtual table lists information about the caches. The four
+caches presently created are chunks, counters, keys and rows. A query on
+the `caches` virtual table returns the following details:
+
+....
+cqlsh:system_views> SELECT * FROM system_views.caches;
+name     | capacity_bytes | entry_count | hit_count | hit_ratio | recent_hit_rate_per_second | recent_request_rate_per_second | request_count | size_bytes
+---------+----------------+-------------+-----------+-----------+----------------------------+--------------------------------+---------------+------------
+  chunks |      229638144 |          29 |       166 |      0.83 |                          5 |                              6 |           200 |     475136
+counters |       26214400 |           0 |         0 |       NaN |                          0 |                              0 |             0 |          0
+    keys |       52428800 |          14 |       124 |  0.873239 |                          4 |                              4 |           142 |       1248
+    rows |              0 |           0 |         0 |       NaN |                          0 |                              0 |             0 |          0
+
+(4 rows)
+....
+
+=== Settings Virtual Table
+
+The `settings` table is rather useful and lists all the current
+configuration settings from the `cassandra.yaml`. The encryption options
+are overridden to hide the sensitive truststore information or
+passwords. The configuration settings however cannot be set using DML on
+the virtual table presently: :
+
+....
+cqlsh:system_views> SELECT * FROM system_views.settings;
+
+name                                 | value
+-------------------------------------+--------------------
+  allocate_tokens_for_keyspace       | null
+  audit_logging_options_enabled      | false
+  auto_snapshot                      | true
+  automatic_sstable_upgrade          | false
+  cluster_name                       | Test Cluster
+  enable_transient_replication       | false
+  hinted_handoff_enabled             | true
+  hints_directory                    | /home/ec2-user/cassandra/data/hints
+  incremental_backups                | false
+  initial_token                      | null
+                           ...
+                           ...
+                           ...
+  rpc_address                        | localhost
+  ssl_storage_port                   | 7001
+  start_native_transport             | true
+  storage_port                       | 7000
+  stream_entire_sstables             | true
+  (224 rows)
+....
+
+The `settings` table can be really useful if yaml file has been changed
+since startup and dont know running configuration, or to find if they
+have been modified via jmx/nodetool or virtual tables.
+
+=== Thread Pools Virtual Table
+
+The `thread_pools` table lists information about all thread pools.
+Thread pool information includes active tasks, active tasks limit,
+blocked tasks, blocked tasks all time, completed tasks, and pending
+tasks. A query on the `thread_pools` returns following details:
+
+....
+cqlsh:system_views> select * from system_views.thread_pools;
+
+name                         | active_tasks | active_tasks_limit | blocked_tasks | blocked_tasks_all_time | completed_tasks | pending_tasks
+------------------------------+--------------+--------------------+---------------+------------------------+-----------------+---------------
+            AntiEntropyStage |            0 |                  1 |             0 |                      0 |               0 |             0
+        CacheCleanupExecutor |            0 |                  1 |             0 |                      0 |               0 |             0
+          CompactionExecutor |            0 |                  2 |             0 |                      0 |             881 |             0
+        CounterMutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
+                 GossipStage |            0 |                  1 |             0 |                      0 |               0 |             0
+             HintsDispatcher |            0 |                  2 |             0 |                      0 |               0 |             0
+       InternalResponseStage |            0 |                  2 |             0 |                      0 |               0 |             0
+         MemtableFlushWriter |            0 |                  2 |             0 |                      0 |               1 |             0
+           MemtablePostFlush |            0 |                  1 |             0 |                      0 |               2 |             0
+       MemtableReclaimMemory |            0 |                  1 |             0 |                      0 |               1 |             0
+              MigrationStage |            0 |                  1 |             0 |                      0 |               0 |             0
+                   MiscStage |            0 |                  1 |             0 |                      0 |               0 |             0
+               MutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
+   Native-Transport-Requests |            1 |                128 |             0 |                      0 |             130 |             0
+      PendingRangeCalculator |            0 |                  1 |             0 |                      0 |               1 |             0
+PerDiskMemtableFlushWriter_0 |            0 |                  2 |             0 |                      0 |               1 |             0
+                   ReadStage |            0 |                 32 |             0 |                      0 |              13 |             0
+                 Repair-Task |            0 |         2147483647 |             0 |                      0 |               0 |             0
+        RequestResponseStage |            0 |                  2 |             0 |                      0 |               0 |             0
+                     Sampler |            0 |                  1 |             0 |                      0 |               0 |             0
+    SecondaryIndexManagement |            0 |                  1 |             0 |                      0 |               0 |             0
+          ValidationExecutor |            0 |         2147483647 |             0 |                      0 |               0 |             0
+           ViewBuildExecutor |            0 |                  1 |             0 |                      0 |               0 |             0
+           ViewMutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
+....
+
+(24 rows)
+
+=== Internode Inbound Messaging Virtual Table
+
+The `internode_inbound` virtual table is for the internode inbound
+messaging. Initially no internode inbound messaging may get listed. In
+addition to the address, port, datacenter and rack information includes
+corrupt frames recovered, corrupt frames unrecovered, error bytes, error
+count, expired bytes, expired count, processed bytes, processed count,
+received bytes, received count, scheduled bytes, scheduled count,
+throttled count, throttled nanos, using bytes, using reserve bytes. A
+query on the `internode_inbound` returns following details:
+
+....
+cqlsh:system_views> SELECT * FROM system_views.internode_inbound;
+address | port | dc | rack | corrupt_frames_recovered | corrupt_frames_unrecovered |
+error_bytes | error_count | expired_bytes | expired_count | processed_bytes |
+processed_count | received_bytes | received_count | scheduled_bytes | scheduled_count | throttled_count | throttled_nanos | using_bytes | using_reserve_bytes
+---------+------+----+------+--------------------------+----------------------------+-
+----------
+(0 rows)
+....
+
+=== SSTables Tasks Virtual Table
+
+The `sstable_tasks` could be used to get information about running
+tasks. It lists following columns:
+
+....
+cqlsh:system_views> SELECT * FROM sstable_tasks;
+keyspace_name | table_name | task_id                              | kind       | progress | total    | unit
+---------------+------------+--------------------------------------+------------+----------+----------+-------
+       basic |      wide2 | c3909740-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 60418761 | 70882110 | bytes
+       basic |      wide2 | c7556770-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction |  2995623 | 40314679 | bytes
+....
+
+As another example, to find how much time is remaining for SSTable
+tasks, use the following query:
+
+....
+SELECT total - progress AS remaining
+FROM system_views.sstable_tasks;
+....
+
+=== Other Virtual Tables
+
+Some examples of using other virtual tables are as follows.
+
+Find tables with most disk usage:
+
+....
+cqlsh> SELECT * FROM disk_usage WHERE mebibytes > 1 ALLOW FILTERING;
+
+keyspace_name | table_name | mebibytes
+---------------+------------+-----------
+   keyspace1 |  standard1 |       288
+  tlp_stress |   keyvalue |      3211
+....
+
+Find queries on table/s with greatest read latency:
+
+....
+cqlsh> SELECT * FROM  local_read_latency WHERE per_second > 1 ALLOW FILTERING;
+
+keyspace_name | table_name | p50th_ms | p99th_ms | count    | max_ms  | per_second
+---------------+------------+----------+----------+----------+---------+------------
+  tlp_stress |   keyvalue |    0.043 |    0.152 | 49785158 | 186.563 |  11418.356
+....
+
+
+== Example
+
+[arabic, start=1]
+. To list the keyspaces, enter ``cqlsh`` and run the CQL command ``DESCRIBE KEYSPACES``:
+
+[source, cql]
+----
+cqlsh> DESC KEYSPACES;
+system_schema  system          system_distributed  system_virtual_schema
+system_auth    system_traces   system_views
+----
+
+[arabic, start=2]
+. To view the virtual table schema, run the CQL commands ``USE system_virtual_schema`` and ``SELECT * FROM tables``:
+
+[source, cql]
+----
+cqlsh> USE system_virtual_schema;
+cqlsh> SELECT * FROM tables;
+----
+ 
+results in:
+
+[source, cql]
+----
+ keyspace_name         | table_name                | comment
+-----------------------+---------------------------+--------------------------------------
+          system_views |                    caches |                        system caches
+          system_views |                   clients |          currently connected clients
+          system_views |  coordinator_read_latency |
+          system_views |  coordinator_scan_latency |
+          system_views | coordinator_write_latency |
+          system_views |                disk_usage |
+          system_views |         internode_inbound |
+          system_views |        internode_outbound |
+          system_views |        local_read_latency |
+          system_views |        local_scan_latency |
+          system_views |       local_write_latency |
+          system_views |        max_partition_size |
+          system_views |             rows_per_read |
+          system_views |                  settings |                     current settings
+          system_views |             sstable_tasks |                current sstable tasks
+          system_views |         system_properties | Cassandra relevant system properties
+          system_views |              thread_pools |
+          system_views |       tombstones_per_read |
+ system_virtual_schema |                   columns |           virtual column definitions
+ system_virtual_schema |                 keyspaces |         virtual keyspace definitions
+ system_virtual_schema |                    tables |            virtual table definitions
+
+(21 rows)
+----
+
+[arabic, start=3]
+. To view the virtual tables, run the CQL commands ``USE system_view`` and ``DESCRIBE tables``:
+
+[source, cql]
+----
+cqlsh> USE system_view;;
+cqlsh> DESCRIBE tables;
+----
+
+results in:
+
+[source, cql]
+----
+sstable_tasks       clients                   coordinator_write_latency
+disk_usage          local_write_latency       tombstones_per_read
+thread_pools        internode_outbound        settings
+local_scan_latency  coordinator_scan_latency  system_properties
+internode_inbound   coordinator_read_latency  max_partition_size
+local_read_latency  rows_per_read             caches
+----
+
+[arabic, start=4]
+. To look at any table data, run the CQL command ``SELECT``:
+
+[source, cql]
+----
+cqlsh> USE system_view;;
+cqlsh> SELECT * FROM clients LIMIT 2;
+----
+ results in:
+
+[source, cql]
+----
+ address   | port  | connection_stage | driver_name            | driver_version | hostname  | protocol_version | request_count | ssl_cipher_suite | ssl_enabled | ssl_protocol | username
+-----------+-------+------------------+------------------------+----------------+-----------+------------------+---------------+------------------+-------------+--------------+-----------
+ 127.0.0.1 | 37308 |            ready | DataStax Python Driver |   3.21.0.post0 | localhost |                4 |            17 |             null |       False |         null | anonymous
+ 127.0.0.1 | 37310 |            ready | DataStax Python Driver |   3.21.0.post0 | localhost |                4 |             8 |             null |       False |         null | anonymous
+
+(2 rows)
+----
diff --git a/doc/modules/cassandra/pages/tools/cassandra_stress.adoc b/doc/modules/cassandra/pages/tools/cassandra_stress.adoc
index bcef193..7cf3548 100644
--- a/doc/modules/cassandra/pages/tools/cassandra_stress.adoc
+++ b/doc/modules/cassandra/pages/tools/cassandra_stress.adoc
@@ -67,8 +67,6 @@
     Custom transport factories
   -port:;;
     The port to connect to cassandra nodes on
-  -sendto:;;
-    Specify a stress server to send this command to
   -graph:;;
     Graph recorded metrics
   -tokenrange:;;
diff --git a/doc/modules/cassandra/pages/tools/cqlsh.adoc b/doc/modules/cassandra/pages/tools/cqlsh.adoc
index 162259a..8050ee5 100644
--- a/doc/modules/cassandra/pages/tools/cqlsh.adoc
+++ b/doc/modules/cassandra/pages/tools/cqlsh.adoc
@@ -46,52 +46,72 @@
 You can also view the latest version of the
 https://github.com/apache/cassandra/blob/trunk/conf/cqlshrc.sample[cqlshrc file online].
 
+[[cql_history]]
+== cql history
+
+All CQL commands you execute are written to a history file. By default, CQL history will be written to `~/.cassandra/cql_history`. You can change this default by setting the environment variable `CQL_HISTORY` like `~/some/other/path/to/cqlsh_history` where `cqlsh_history` is a file. All parent directories to history file will be created if they do not exist. If you do not want to persist history, you can do so by setting CQL_HISTORY to /dev/null.
+This feature is supported from Cassandra 4.1.
+
 == Command Line Options
 
-Usage:
+Usage: `cqlsh.py [options] [host [port]]`
 
-`cqlsh [options] [host [port]]`
+CQL Shell for Apache Cassandra
 
 Options:
 
+`--version`::
+  show program's version number and exit
+
+`-h` `--help`::
+  show this help message and exit
 `-C` `--color`::
-  Force color output
+  Always use color output
 `--no-color`::
-  Disable color output
-`--browser`::
-  Specify the browser to use for displaying cqlsh help. This can be one
-  of the https://docs.python.org/2/library/webbrowser.html[supported
-  browser names] (e.g. `firefox`) or a browser path followed by `%s`
-  (e.g. `/usr/bin/google-chrome-stable %s`).
+  Never use color output
+`--browser=BROWSER`::
+  The browser to use to display CQL help, where BROWSER can be:
+  one of the supported browsers in https://docs.python.org/3/library/webbrowser.html.
+  browser path followed by %s, example: /usr/bin/google-chrome-stable %s
 `--ssl`::
-  Use SSL when connecting to Cassandra
-`-u` `--user`::
-  Username to authenticate against Cassandra with
-`-p` `--password`::
-  Password to authenticate against Cassandra with, should be used in
-  conjunction with `--user`
-`-k` `--keyspace`::
-  Keyspace to authenticate to, should be used in conjunction with
-  `--user`
-`-f` `--file`::
-  Execute commands from the given file, then exit
+  Use SSL
+
+`-u USERNAME` `--username=USERNAME`::
+  Authenticate as user.
+`-p PASSWORD` `--password=PASSWORD`::
+  Authenticate using password.
+`-k KEYSPACE` `--keyspace=KEYSPACE`::
+  Authenticate to the given keyspace.
+`-f FILE` `--file=FILE`::
+  Execute commands from FILE, then exit
 `--debug`::
-  Print additional debugging information
-`--encoding`::
-  Specify a non-default encoding for output (defaults to UTF-8)
-`--cqlshrc`::
-  Specify a non-default location for the `cqlshrc` file
-`-e` `--execute`::
-  Execute the given statement, then exit
-`--connect-timeout`::
-  Specify the connection timeout in seconds (defaults to 2s)
-`--python /path/to/python`::
-  Specify the full path to Python interpreter to override default on
-  systems with multiple interpreters installed
-`--request-timeout`::
-  Specify the request timeout in seconds (defaults to 10s)
-`-t` `--tty`::
-  Force tty mode (command prompt)
+  Show additional debugging information
+`--coverage`::
+  Collect coverage data
+`--encoding=ENCODING`::
+  Specify a non-default encoding for output. (Default: utf-8)
+`--cqlshrc=CQLSHRC`::
+  Specify an alternative cqlshrc file location.
+`--credentials=CREDENTIALS`::
+  Specify an alternative credentials file location.
+`--cqlversion=CQLVERSION`::
+  Specify a particular CQL version, by default the
+  highest version supported by the server will be used.
+  Examples: "3.0.3", "3.1.0"
+`--protocol-version=PROTOCOL_VERSION`::
+  Specify a specific protcol version otherwise the
+  client will default and downgrade as necessary
+`-e EXECUTE` `--execute=EXECUTE`::
+  Execute the statement and quit.
+`--connect-timeout=CONNECT_TIMEOUT`::
+  Specify the connection timeout in seconds (default: 5 seconds).
+`--request-timeout=REQUEST_TIMEOUT`::
+  Specify the default request timeout in seconds
+  (default: 10 seconds).
+`-t, --tty`::
+  Force tty mode (command prompt).
+`-v` `--v`::
+  Print the current version of cqlsh.
 
 == Special Commands
 
diff --git a/doc/modules/cassandra/pages/tools/hash_password.adoc b/doc/modules/cassandra/pages/tools/hash_password.adoc
new file mode 100644
index 0000000..b2e8e0f
--- /dev/null
+++ b/doc/modules/cassandra/pages/tools/hash_password.adoc
@@ -0,0 +1,31 @@
+= Hash password
+
+The `hash_password` tool is used to get the jBcrypt hash of a password. This hash 
+can be used in CREATE/ALTER ROLE/USER statements for improved security.
+
+This feature can be useful if we want to make sure no intermediate system, logging or 
+any other possible plain text password leak can happen.
+
+== Usage
+
+hash_password <options>
+
+[cols=",",]
+|===
+
+|-h,--help |Displays help message
+
+|-e,--environment-var <arg> |Use value of the specified environment
+variable as the password
+
+|-i,--input <arg> |Input is a file (or - for stdin) to read the
+password from. Make sure that the whole input including newlines is
+considered. For example, the shell command `echo -n foobar \| hash_password
+-i -` will work as intended and just hash 'foobar'.
+
+|-p,--plain <arg> |Argument is the plain text password
+
+|-r,--logrounds <arg> |Number of hash rounds (default: 10).
+|===
+
+One of the options --environment-var, --plain or --input must be used.
\ No newline at end of file
diff --git a/doc/modules/cassandra/pages/tools/index.adoc b/doc/modules/cassandra/pages/tools/index.adoc
index a25af55..ca8b791 100644
--- a/doc/modules/cassandra/pages/tools/index.adoc
+++ b/doc/modules/cassandra/pages/tools/index.adoc
@@ -7,3 +7,4 @@
 * xref:tools/nodetool/nodetool.adoc[nodetool]
 * xref:tools/sstable/index.adoc[SSTable tools] 
 * xref:tools/cassandra_stress.adoc[cassandra-stress tool]
+* xref:tools/hash_password.adoc[hash password tool]
diff --git a/doc/modules/cassandra/pages/tools/sstable/sstableloader.adoc b/doc/modules/cassandra/pages/tools/sstable/sstableloader.adoc
index 4234a0b..e980dc2 100644
--- a/doc/modules/cassandra/pages/tools/sstable/sstableloader.adoc
+++ b/doc/modules/cassandra/pages/tools/sstable/sstableloader.adoc
@@ -45,10 +45,23 @@
 
 |--no-progress |don't display progress
 
-|-t, --throttle <throttle> |throttle speed in Mbits (default unlimited)
+|-t, --throttle <throttle> |(deprecated) throttle speed in Mbits
+(default 0 for unlimited) Use --throttle-mib instead
 
-|-idct, --inter-dc-throttle <inter-dc-throttle> |inter-datacenter
-throttle speed in Mbits (default unlimited)
+|--throttle-mib <throttle-mib> |throttle speed in MiB/s
+(default 0 for unlimited)
+
+|-idct, --inter-dc-throttle <inter-dc-throttle> |(deprecated) inter-datacenter
+throttle speed in Mbits (default 0 for unlimited) Use --inter-dc-throttle-mib instead
+
+|--inter-dc-throttle-mib <inter-dc-throttle-mib> |inter-datacenter
+throttle speed in MiB/s (default 0 for unlimited)
+
+|--entire-sstable-throttle-mib <throttle-mib> |entire SSTable throttle
+speed in MiB/s (default 0 for unlimited)
+
+|--entire-sstable-inter-dc-throttle-mib <inter-dc-throttle-mib> |entire
+SSTable inter-datacenter throttle speed in MiB/s (default 0 for unlimited)
 
 |-cph, --connections-per-host <connectionsPerHost> |number of concurrent
 connections-per-host
@@ -89,7 +102,7 @@
 
 You can provide a cassandra.yaml file with the -f command line option to
 set up streaming throughput, and client and server encryption options.
-Only stream_throughput_outbound_megabits_per_sec,
+Only stream_throughput_outbound,
 server_encryption_options, and client_encryption_options are read from
 yaml. You can override options read from cassandra.yaml with
 corresponding command line options.
diff --git a/doc/modules/cassandra/pages/tools/sstable/sstableverify.adoc b/doc/modules/cassandra/pages/tools/sstable/sstableverify.adoc
index 0af2f15..061edf4 100644
--- a/doc/modules/cassandra/pages/tools/sstable/sstableverify.adoc
+++ b/doc/modules/cassandra/pages/tools/sstable/sstableverify.adoc
@@ -8,6 +8,11 @@
 results will occur. Note: the script does not verify that Cassandra is
 stopped.
 
+== WARNING
+See CASSANDRA-9947 and CASSANDRA-17017 for discussion around risks with this tool. Specifically: "We mark sstables that fail verification as unrepaired, but that's not going to do what you think.  What it means is that the local node will use that sstable in the next repair, but other nodes will not. So all we'll end up doing is streaming whatever data we can read from it, to the other replicas.  If we could magically mark whatever sstables correspond on the remote nodes, to the data in the local sstable, that would work, but we can't."
+
+This tool requires the use of a -f or --force flag to indicate that the user understands the risks and would like to attempt its usage anyway.
+
 == Usage
 
 sstableverify <options> <keyspace> <table>
@@ -18,6 +23,7 @@
 |-e, --extended |extended verification
 |-h, --help |display this help message
 |-v, --verbose |verbose output
+|-f, --force |allow use of tool (see CASSANDRA-17017 for risks)
 |===
 
 == Basic Verification
diff --git a/doc/modules/cassandra/pages/troubleshooting/use_nodetool.adoc b/doc/modules/cassandra/pages/troubleshooting/use_nodetool.adoc
index f80d039..a313432 100644
--- a/doc/modules/cassandra/pages/troubleshooting/use_nodetool.adoc
+++ b/doc/modules/cassandra/pages/troubleshooting/use_nodetool.adoc
@@ -240,3 +240,46 @@
 take too many resources away from query threads is very important for
 performance. If you notice compaction unable to keep up, try tuning
 Cassandra's `concurrent_compactors` or `compaction_throughput` options.
+
+[[nodetool-datapaths]]
+== Paths used for data files
+
+Cassandra is persisting data on disk within the configured directories. Data
+files are distributed among the directories configured with `data_file_directories`.
+Resembling the structure of keyspaces and tables, Cassandra is creating
+subdirectories within `data_file_directories`. However, directories aren't removed
+even if the tables and keyspaces are dropped. While these directories are kept with
+the reason of holding snapshots, they are subject to removal. This is where operators
+need to know which directories are still in use. Running the `nodetool datapaths`
+command is an easy way to list in which directories Cassandra is actually storing
+sstable data on disk.
+
+[source, bash]
+----
+% nodetool datapaths -- system_auth
+Keyspace: system_auth
+	Table: role_permissions
+	Paths:
+		/var/lib/cassandra/data/system_auth/role_permissions-3afbe79f219431a7add7f5ab90d8ec9c
+
+	Table: network_permissions
+	Paths:
+		/var/lib/cassandra/data/system_auth/network_permissions-d46780c22f1c3db9b4c1b8d9fbc0cc23
+
+	Table: resource_role_permissons_index
+	Paths:
+		/var/lib/cassandra/data/system_auth/resource_role_permissons_index-5f2fbdad91f13946bd25d5da3a5c35ec
+
+	Table: roles
+	Paths:
+		/var/lib/cassandra/data/system_auth/roles-5bc52802de2535edaeab188eecebb090
+
+	Table: role_members
+	Paths:
+		/var/lib/cassandra/data/system_auth/role_members-0ecdaa87f8fb3e6088d174fb36fe5c0d
+
+----
+
+By default all keyspaces and tables are listed, however, a list of `keyspace` and
+`keyspace.table` arguments can be given to query specific data paths. Using the `--format`
+option the output can be formatted as YAML or JSON.
diff --git a/doc/native_protocol_v4.spec b/doc/native_protocol_v4.spec
index 2220000..6def737 100644
--- a/doc/native_protocol_v4.spec
+++ b/doc/native_protocol_v4.spec
@@ -1103,7 +1103,7 @@
                              - "VIEW": the timeout occured when a write involves
                                 VIEW update and failure to acqiure local view(MV)
                                 lock for key within timeout
-                             - "CDC": the timeout occured when cdc_total_space_in_mb is
+                             - "CDC": the timeout occured when cdc_total_space is
                                 exceeded when doing a write to data tracked by cdc.
     0x1200    Read_timeout: Timeout exception during a read request. The rest
               of the ERROR message body will be
@@ -1176,7 +1176,7 @@
                              - "VIEW": the failure occured when a write involves
                                 VIEW update and failure to acqiure local view(MV)
                                 lock for key within timeout
-                             - "CDC": the failure occured when cdc_total_space_in_mb is
+                             - "CDC": the failure occured when cdc_total_space is
                                 exceeded when doing a write to data tracked by cdc.
 
     0x2000    Syntax_error: The submitted query has a syntax error.
diff --git a/doc/native_protocol_v5.spec b/doc/native_protocol_v5.spec
index 3e24707..17f7368 100644
--- a/doc/native_protocol_v5.spec
+++ b/doc/native_protocol_v5.spec
@@ -1316,7 +1316,7 @@
                              - "VIEW": the timeout occured when a write involves
                                VIEW update and failure to acqiure local view(MV)
                                lock for key within timeout
-                             - "CDC": the timeout occured when cdc_total_space_in_mb is
+                             - "CDC": the timeout occured when cdc_total_space is
                                exceeded when doing a write to data tracked by cdc.
                 <contentions> is a [short] that describes the number of contentions occured during the CAS operation.
                               The field only presents when the <writeType> is "CAS".
@@ -1399,7 +1399,7 @@
                              - "VIEW": the failure occured when a write involves
                                VIEW update and failure to acqiure local view(MV)
                                lock for key within timeout
-                             - "CDC": the failure occured when cdc_total_space_in_mb is
+                             - "CDC": the failure occured when cdc_total_space is
                                exceeded when doing a write to data tracked by cdc.
     0x1600    CDC_WRITE_FAILURE: // todo
     0x1700    CAS_WRITE_UNKNOWN: An exception occured due to contended Compare And Set write/update.
diff --git a/examples/ssl-factory/README.adoc b/examples/ssl-factory/README.adoc
new file mode 100644
index 0000000..eb89f6f
--- /dev/null
+++ b/examples/ssl-factory/README.adoc
@@ -0,0 +1,22 @@
+Cassandra Custom SslContextFactory Example
+==========================================
+
+This example shows custom SslContextFactory implementation based on Kubernetes secrets
+For the documentation please refer to the javadocs for `K8SecretsSslContextFactory.java.`
+
+Installation
+-------------
+
+Step 1: Build the Cassandra classes locally
+
+----
+$ cd <cassandra_src_dir>
+$ ant build
+----
+
+Step 2: Run tests for the security examples
+
+----
+$ cd <cassandra_src_dir>/examples/ssl-factory
+$ ant test
+----
diff --git a/examples/ssl-factory/build.xml b/examples/ssl-factory/build.xml
new file mode 100644
index 0000000..d803aeb
--- /dev/null
+++ b/examples/ssl-factory/build.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements.  See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership.  The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License.  You may obtain a copy of the License at
+ ~
+ ~   http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied.  See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<project default="jar" name="ssl-factory-example">
+	<property name="cassandra.dir" value="../.." />
+	<property name="cassandra.dir.lib" value="${cassandra.dir}/lib" />
+	<property name="cassandra.classes" value="${cassandra.dir}/build/classes/main" />
+	<property name="cassandra.test.lib" value="${cassandra.dir}/build/test/lib" />
+	<property name="build.src" value="${basedir}/src" />
+	<property name="build.dir" value="${basedir}/build" />
+	<property name="conf.dir" value="${basedir}/conf" />
+	<property name="build.classes" value="${build.dir}/classes" />
+	<property name="test.src" value="${basedir}/test/unit" />
+	<property name="test.build.dir" value="${build.dir}/test" />
+	<property name="test.conf.dir" value="${test.src}/conf" />
+	<property name="test.build.classes" value="${test.build.dir}/classes" />
+	<property name="test.build.conf" value="${test.build.dir}/conf" />
+	<property name="final.name" value="ssl-factory-example" />
+
+	<path id="build.classpath">
+		<fileset dir="${cassandra.dir.lib}">
+			<include name="**/*.jar" />
+		</fileset>
+		<fileset dir="${cassandra.dir}/build/lib/jars">
+			<include name="**/*.jar" />
+		</fileset>
+		<pathelement location="${cassandra.classes}" />
+	</path>
+
+	<path id="test.classpath">
+		<fileset dir="${cassandra.test.lib}/jars">
+			<include name="**/*.jar" />
+			<exclude name="**/ant-*.jar"/>
+		</fileset>
+		<path refid="build.classpath"/>
+		<path location="${build.dir}/${final.name}.jar"/>
+		<pathelement location="${build.classes}"/>
+		<pathelement location="${test.build.classes}"/>
+		<pathelement location="${test.build.conf}"/>
+	</path>
+
+	<target name="init">
+		<mkdir dir="${build.classes}" />
+		<mkdir dir="${test.build.classes}" />
+	</target>
+
+	<target name="build" depends="init">
+		<javac destdir="${build.classes}" debug="true" includeantruntime="false">
+			<src path="${build.src}" />
+			<classpath refid="build.classpath" />
+		</javac>
+	</target>
+
+	<target name="jar" depends="build">
+		<jar jarfile="${build.dir}/${final.name}.jar">
+			<fileset dir="${build.classes}" />
+		</jar>
+	</target>
+
+	<target name="buildTests" depends="build">
+		<delete dir="${test.build.dir}/conf"/>
+		<copy todir="${test.build.dir}/conf">
+			<fileset dir="test/conf" includes="**"/>
+		</copy>
+		<javac destdir="${test.build.classes}" debug="true" includeantruntime="false">
+			<src path="${test.src}" />
+			<src path="${test.build.dir}/conf" />
+			<classpath refid="test.classpath"/>
+		</javac>
+	</target>
+
+	<target name="test" depends="jar, buildTests">
+		<junit printsummary="on" haltonfailure="yes" fork="true">
+			<classpath refid="test.classpath"/>
+			<formatter type="brief" usefile="false" />
+			<batchtest>
+				<fileset dir="${test.src}" includes="**/*Test.java" />
+			</batchtest>
+		</junit>
+	</target>
+
+	<target name="clean">
+		<delete dir="${build.dir}" />
+		<delete dir="${test.build.classes}" />
+	</target>
+</project>
diff --git a/examples/ssl-factory/src/org/apache/cassandra/security/KubernetesSecretsPEMSslContextFactory.java b/examples/ssl-factory/src/org/apache/cassandra/security/KubernetesSecretsPEMSslContextFactory.java
new file mode 100644
index 0000000..fb11c91
--- /dev/null
+++ b/examples/ssl-factory/src/org/apache/cassandra/security/KubernetesSecretsPEMSslContextFactory.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.util.Map;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManagerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Custom {@link ISslContextFactory} implementation based on Kubernetes Secrets. It allows the keystore and
+ * truststore paths to be configured from the K8 secrets via volumeMount and passwords via K8 secrets environment
+ * variables. The official Kubernetes Secret Spec can be found <a href="https://kubernetes.io/docs/concepts/configuration/secret/ ">here</a>.
+ * <p>
+ * When keystore or truststore is updated, this implementation can detect that based on updated K8 secrets
+ * at the mounted paths ({@code KEYSTORE_UPDATED_TIMESTAMP_PATH} for the keystore and {@code
+ * TRUSTSTORE_UPDATED_TIMESTAMP_PATH} for the truststore. The values in those paths are expected to be numeric values.
+ * The most obvious choice might be to just use the time in nano/milli-seconds precision but any other strategy would work
+ * as well, as far as the comparison of those values can be done in a consistent/predictable manner. Again, those
+ * values do not have to necessarily reflect actual file's update timestamps, using the actual file's timestamps is
+ * just one of the valid options to signal updates.
+ * <p>
+ * Defaults:
+ * <pre>
+ *     private key password = cassandra
+ *     keystore updated timestamp path = /etc/my-ssl-store/keystore-last-updatedtime
+ *     truststore updated timestamp path = /etc/my-ssl-store/truststore-last-updatedtime
+ * </pre>
+ * <p>
+ * Customization: In order to customize the K8s secret configuration, override appropriate values in the below Cassandra
+ * configuration. The similar configuration can be applied to {@code client_encryption_options}.
+ * <pre>
+ *     server_encryption_options:
+ *       internode_encryption: none
+ *       ssl_context_factory:
+ *         class_name: org.apache.cassandra.security.KubernetesSecretsPEMSslContextFactory
+ *         parameters:
+ *           PRIVATE_KEY_ENV_VAR: PRIVATE_KEY
+ *           PRIVATE_KEY_PASSWORD_ENV_VAR: PRIVATE_KEY_PASSWORD
+ *           KEYSTORE_UPDATED_TIMESTAMP_PATH: /etc/my-ssl-store/keystore-last-updatedtime
+ *           TRUSTED_CERTIFICATES_ENV_VAR: TRUSTED_CERTIFICATES
+ *           TRUSTSTORE_UPDATED_TIMESTAMP_PATH: /etc/my-ssl-store/truststore-last-updatedtime
+ * </pre>
+ * <p>
+ * Below is the corresponding sample YAML configuration for K8 env.
+ * <pre>
+ * apiVersion: v1
+ * kind: Pod
+ * metadata:
+ *   name: my-pod
+ *   labels:
+ *     app: my-app
+ * spec:
+ *   containers:
+ *   - name: my-app
+ *     image: my-app:latest
+ *     imagePullPolicy: Always
+ *     env:
+ *       - name: PRIVATE_KEY
+ *         valueFrom:
+ *           secretKeyRef:
+ *             name: my-ssl-store
+ *             key: private-key
+ *       - name: PRIVATE_KEY_PASSWORD
+ *         valueFrom:
+ *           secretKeyRef:
+ *             name: my-ssl-store
+ *             key: private-key-password
+ *       - name: TRUSTED_CERTIFICATES
+ *         valueFrom:
+ *           secretKeyRef:
+ *             name: my-ssl-store
+ *             key: trusted-certificates
+ *     volumeMounts:
+ *     - name: my-ssl-store
+ *       mountPath: "/etc/my-ssl-store"
+ *       readOnly: true
+ *   volumes:
+ *   - name: my-ssl-store
+ *     secret:
+ *       secretName: my-ssl-store
+ *       items:
+ *         - key: keystore-last-updatedtime
+ *           path: keystore-last-updatedtime
+ *         - key: truststore-last-updatedtime
+ *           path: truststore-last-updatedtime
+ * </pre>
+ */
+public class KubernetesSecretsPEMSslContextFactory extends KubernetesSecretsSslContextFactory
+{
+    public static final String DEFAULT_PRIVATE_KEY = "";
+    public static final String DEFAULT_PRIVATE_KEY_PASSWORD = "";
+    public static final String DEFAULT_TRUSTED_CERTIFICATES = "";
+
+    @VisibleForTesting
+    static final String DEFAULT_PRIVATE_KEY_ENV_VAR_NAME = "PRIVATE_KEY";
+    @VisibleForTesting
+    static final String DEFAULT_PRIVATE_KEY_PASSWORD_ENV_VAR_NAME = "PRIVATE_KEY_PASSWORD";
+    @VisibleForTesting
+    static final String DEFAULT_TRUSTED_CERTIFICATES_ENV_VAR_NAME = "TRUSTED_CERTIFICATES";
+
+    private static final Logger logger = LoggerFactory.getLogger(KubernetesSecretsPEMSslContextFactory.class);
+    private String pemEncodedKey;
+    private String keyPassword;
+    private String pemEncodedCertificates;
+    private PEMBasedSslContextFactory pemBasedSslContextFactory;
+
+    public KubernetesSecretsPEMSslContextFactory()
+    {
+        pemBasedSslContextFactory = new PEMBasedSslContextFactory();
+    }
+
+    public KubernetesSecretsPEMSslContextFactory(Map<String, Object> parameters)
+    {
+        super(parameters);
+
+        pemEncodedKey = getValueFromEnv(getString(PEMConfigKey.PRIVATE_KEY_ENV_VAR, DEFAULT_PRIVATE_KEY_ENV_VAR_NAME),
+                                        DEFAULT_PRIVATE_KEY);
+        keyPassword = getValueFromEnv(getString(PEMConfigKey.PRIVATE_KEY_PASSWORD_ENV_VAR,
+                                                DEFAULT_PRIVATE_KEY_PASSWORD_ENV_VAR_NAME),
+                                      DEFAULT_PRIVATE_KEY_PASSWORD);
+        pemEncodedCertificates = getValueFromEnv(getString(PEMConfigKey.TRUSTED_CERTIFICATE_ENV_VAR, DEFAULT_TRUSTED_CERTIFICATES_ENV_VAR_NAME),
+                                                 DEFAULT_TRUSTED_CERTIFICATES);
+
+        parameters.put(PEMBasedSslContextFactory.ConfigKey.ENCODED_KEY.getKeyName(), pemEncodedKey);
+        parameters.put(PEMBasedSslContextFactory.ConfigKey.KEY_PASSWORD.getKeyName(), keyPassword);
+        parameters.put(PEMBasedSslContextFactory.ConfigKey.ENCODED_CERTIFICATES.getKeyName(), pemEncodedCertificates);
+
+        pemBasedSslContextFactory = new PEMBasedSslContextFactory(parameters);
+    }
+
+    @Override
+    public synchronized void initHotReloading()
+    {
+        // No-op
+    }
+
+    @Override
+    public boolean hasKeystore()
+    {
+        return pemBasedSslContextFactory.hasKeystore();
+    }
+
+    @Override
+    protected KeyManagerFactory buildKeyManagerFactory() throws SSLException
+    {
+        KeyManagerFactory kmf = pemBasedSslContextFactory.buildKeyManagerFactory();
+        checkedExpiry = pemBasedSslContextFactory.checkedExpiry;
+        return kmf;
+    }
+
+    @Override
+    protected TrustManagerFactory buildTrustManagerFactory() throws SSLException
+    {
+        return pemBasedSslContextFactory.buildTrustManagerFactory();
+    }
+
+    public interface PEMConfigKey
+    {
+        String PRIVATE_KEY_ENV_VAR = "PRIVATE_KEY_ENV_VAR";
+        String PRIVATE_KEY_PASSWORD_ENV_VAR = "PRIVATE_KEY_PASSWORD_ENV_VAR";
+        String TRUSTED_CERTIFICATE_ENV_VAR = "TRUSTED_CERTIFICATE_ENV_VAR";
+    }
+}
diff --git a/examples/ssl-factory/src/org/apache/cassandra/security/KubernetesSecretsSslContextFactory.java b/examples/ssl-factory/src/org/apache/cassandra/security/KubernetesSecretsSslContextFactory.java
new file mode 100644
index 0000000..c83fb03
--- /dev/null
+++ b/examples/ssl-factory/src/org/apache/cassandra/security/KubernetesSecretsSslContextFactory.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Map;
+import java.util.Optional;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.EncryptionOptions;
+
+/**
+ * Custom {@link ISslContextFactory} implementation based on Kubernetes Secrets. It allows the keystore and
+ * truststore paths to be configured from the K8 secrets via volumeMount and passwords via K8 secrets environment
+ * variables. The official Kubernetes Secret Spec can be found <a href="https://kubernetes.io/docs/concepts/configuration/secret/ ">here</a>.
+ *
+ * When keystore or truststore is updated, this implementation can detect that based on updated K8 secrets
+ * at the mounted paths ({@code KEYSTORE_UPDATED_TIMESTAMP_PATH} for the keystore and {@code
+ * TRUSTSTORE_UPDATED_TIMESTAMP_PATH} for the truststore. The values in those paths are expected to be numeric values.
+ * The most obvious choice might be to just use the time in nano/milli-seconds precision but any other strategy would work
+ * as well, as far as the comparison of those values can be done in a consistent/predictable manner. Again, those
+ * values do not have to necessarily reflect actual file's update timestamps, using the actual file's timestamps is
+ * just one of the valid options to signal updates.
+ *
+ * Defaults:
+ * <pre>
+ *     keystore path = /etc/my-ssl-store/keystore
+ *     keystore password = cassandra
+ *     keystore updated timestamp path = /etc/my-ssl-store/keystore-last-updatedtime
+ *     truststore path = /etc/my-ssl-store/truststore
+ *     truststore password = cassandra
+ *     truststore updated timestamp path = /etc/my-ssl-store/truststore-last-updatedtime
+ * </pre>
+ *
+ * Customization: In order to customize the K8s secret configuration, override appropriate values in the below Cassandra
+ * configuration. The similar configuration can be applied to {@code client_encryption_options}.
+ * <pre>
+ *     server_encryption_options:
+ *       internode_encryption: none
+ *       ssl_context_factory:
+ *         class_name: org.apache.cassandra.security.KubernetesSecretsSslContextFactory
+ *         parameters:
+ *           KEYSTORE_PASSWORD_ENV_VAR: KEYSTORE_PASSWORD
+ *           KEYSTORE_UPDATED_TIMESTAMP_PATH: /etc/my-ssl-store/keystore-last-updatedtime
+ *           TRUSTSTORE_PASSWORD_ENV_VAR: TRUSTSTORE_PASSWORD
+ *           TRUSTSTORE_UPDATED_TIMESTAMP_PATH: /etc/my-ssl-store/truststore-last-updatedtime
+ *       keystore: /etc/my-ssl-store/keystore
+ *       truststore: /etc/my-ssl-store/truststore
+ * </pre>
+ *
+ * Below is the corresponding sample YAML configuration for K8 env.
+ * <pre>
+ * apiVersion: v1
+ * kind: Pod
+ * metadata:
+ *   name: my-pod
+ *   labels:
+ *     app: my-app
+ * spec:
+ *   containers:
+ *   - name: my-app
+ *     image: my-app:latest
+ *     imagePullPolicy: Always
+ *     env:
+ *       - name: KEYSTORE_PASSWORD
+ *         valueFrom:
+ *           secretKeyRef:
+ *             name: my-ssl-store
+ *             key: keystore-password
+ *       - name: TRUSTSTORE_PASSWORD
+ *         valueFrom:
+ *           secretKeyRef:
+ *             name: my-ssl-store
+ *             key: truststore-password
+ *     volumeMounts:
+ *     - name: my-ssl-store
+ *       mountPath: "/etc/my-ssl-store"
+ *       readOnly: true
+ *   volumes:
+ *   - name: my-ssl-store
+ *     secret:
+ *       secretName: my-ssl-store
+ *       items:
+ *         - key: cassandra_ssl_keystore
+ *           path: keystore
+ *         - key: keystore-last-updatedtime
+ *           path: keystore-last-updatedtime
+ *         - key: cassandra_ssl_truststore
+ *           path: truststore
+ *         - key: truststore-last-updatedtime
+ *           path: truststore-last-updatedtime
+ * </pre>
+ */
+public class KubernetesSecretsSslContextFactory extends FileBasedSslContextFactory
+{
+    private static final Logger logger = LoggerFactory.getLogger(KubernetesSecretsSslContextFactory.class);
+
+    /**
+     * Use below config-keys to configure this factory.
+     */
+    public interface ConfigKeys {
+        String KEYSTORE_PASSWORD_ENV_VAR = "KEYSTORE_PASSWORD_ENV_VAR";
+        String TRUSTSTORE_PASSWORD_ENV_VAR = "TRUSTSTORE_PASSWORD_ENV_VAR";
+        String KEYSTORE_UPDATED_TIMESTAMP_PATH = "KEYSTORE_UPDATED_TIMESTAMP_PATH";
+        String TRUSTSTORE_UPDATED_TIMESTAMP_PATH = "TRUSTSTORE_UPDATED_TIMESTAMP_PATH";
+    }
+
+    public static final String DEFAULT_KEYSTORE_PASSWORD = "";
+    public static final String DEFAULT_TRUSTSTORE_PASSWORD = "";
+
+    @VisibleForTesting
+    static final String DEFAULT_KEYSTORE_PASSWORD_ENV_VAR_NAME = "KEYSTORE_PASSWORD";
+    @VisibleForTesting
+    static final String DEFAULT_TRUSTSTORE_PASSWORD_ENV_VAR_NAME = "TRUSTSTORE_PASSWORD";
+
+    private static final String KEYSTORE_PATH_VALUE = "/etc/my-ssl-store/keystore";
+    private static final String TRUSTSTORE_PATH_VALUE = "/etc/my-ssl-store/truststore";
+    private static final String KEYSTORE_PASSWORD_ENV_VAR_NAME = DEFAULT_KEYSTORE_PASSWORD_ENV_VAR_NAME;
+    private static final String KEYSTORE_UPDATED_TIMESTAMP_PATH_VALUE = "/etc/my-ssl-store/keystore-last-updatedtime";
+    private static final String TRUSTSTORE_PASSWORD_ENV_VAR_NAME = DEFAULT_TRUSTSTORE_PASSWORD_ENV_VAR_NAME;
+    private static final String TRUSTSTORE_UPDATED_TIMESTAMP_PATH_VALUE = "/etc/my-ssl-store/truststore-last-updatedtime";
+
+    private final String keystoreUpdatedTimeSecretKeyPath;
+    private final String truststoreUpdatedTimeSecretKeyPath;
+    private long keystoreLastUpdatedTime;
+    private long truststoreLastUpdatedTime;
+
+    public KubernetesSecretsSslContextFactory()
+    {
+        keystore = getString(EncryptionOptions.ConfigKey.KEYSTORE.toString(), KEYSTORE_PATH_VALUE);
+        keystore_password = getValueFromEnv(KEYSTORE_PASSWORD_ENV_VAR_NAME,
+                                            DEFAULT_KEYSTORE_PASSWORD);
+        truststore = getString(EncryptionOptions.ConfigKey.TRUSTSTORE.toString(), TRUSTSTORE_PATH_VALUE);
+        truststore_password = getValueFromEnv(TRUSTSTORE_PASSWORD_ENV_VAR_NAME,
+                                              DEFAULT_TRUSTSTORE_PASSWORD);
+        keystoreLastUpdatedTime = System.nanoTime();
+        keystoreUpdatedTimeSecretKeyPath = getString(ConfigKeys.KEYSTORE_UPDATED_TIMESTAMP_PATH,
+                                                     KEYSTORE_UPDATED_TIMESTAMP_PATH_VALUE);
+        truststoreLastUpdatedTime = keystoreLastUpdatedTime;
+        truststoreUpdatedTimeSecretKeyPath = getString(ConfigKeys.TRUSTSTORE_UPDATED_TIMESTAMP_PATH,
+                                                       TRUSTSTORE_UPDATED_TIMESTAMP_PATH_VALUE);
+    }
+
+    public KubernetesSecretsSslContextFactory(Map<String, Object> parameters)
+    {
+        super(parameters);
+        keystore = getString(EncryptionOptions.ConfigKey.KEYSTORE.toString(), KEYSTORE_PATH_VALUE);
+        keystore_password = getValueFromEnv(getString(ConfigKeys.KEYSTORE_PASSWORD_ENV_VAR,
+                                                      KEYSTORE_PASSWORD_ENV_VAR_NAME), DEFAULT_KEYSTORE_PASSWORD);
+        truststore = getString(EncryptionOptions.ConfigKey.TRUSTSTORE.toString(), TRUSTSTORE_PATH_VALUE);
+        truststore_password = getValueFromEnv(getString(ConfigKeys.TRUSTSTORE_PASSWORD_ENV_VAR,
+                                                        TRUSTSTORE_PASSWORD_ENV_VAR_NAME), DEFAULT_TRUSTSTORE_PASSWORD);
+        keystoreLastUpdatedTime = System.nanoTime();
+        keystoreUpdatedTimeSecretKeyPath = getString(ConfigKeys.KEYSTORE_UPDATED_TIMESTAMP_PATH,
+                                                     KEYSTORE_UPDATED_TIMESTAMP_PATH_VALUE);
+        truststoreLastUpdatedTime = keystoreLastUpdatedTime;
+        truststoreUpdatedTimeSecretKeyPath = getString(ConfigKeys.TRUSTSTORE_UPDATED_TIMESTAMP_PATH,
+                                                       TRUSTSTORE_UPDATED_TIMESTAMP_PATH_VALUE);
+    }
+
+    @Override
+    public synchronized void initHotReloading() {
+        // No-op
+    }
+
+    /**
+     * Checks mounted paths for {@code KEYSTORE_UPDATED_TIMESTAMP_PATH} and {@code TRUSTSTORE_UPDATED_TIMESTAMP_PATH}
+     * and compares the values for those variables with the current timestamps. In case the mounted paths are
+     * not valid (either they are not initialized yet, got removed or got corrupted in-flight), this method considers
+     * that nothing has changed.
+     * @return {@code true} if either of the timestamps (keystore or truststore) got updated;{@code false} otherwise
+     */
+    @Override
+    public boolean shouldReload()
+    {
+        return hasKeystoreUpdated() || hasTruststoreUpdated();
+    }
+
+    @VisibleForTesting
+    String getValueFromEnv(String envVarName, String defaultValue) {
+        String valueFromEnv = StringUtils.isEmpty(envVarName) ? null : System.getenv(envVarName);
+        return StringUtils.isEmpty(valueFromEnv) ? defaultValue : valueFromEnv;
+    }
+
+    private boolean hasKeystoreUpdated() {
+        long keystoreUpdatedTime = getKeystoreLastUpdatedTime();
+        logger.info("Comparing keystore timestamps oldValue {} and newValue {}", keystoreLastUpdatedTime,
+                    keystoreUpdatedTime);
+        if (keystoreUpdatedTime > keystoreLastUpdatedTime) {
+            logger.info("Updating the keystoreLastUpdatedTime from oldValue {} to newValue {}",
+                        keystoreLastUpdatedTime, keystoreUpdatedTime);
+            keystoreLastUpdatedTime = keystoreUpdatedTime;
+            return true;
+        } else {
+            logger.info("Based on the comparision, no keystore update needed");
+            return false;
+        }
+    }
+
+    private boolean hasTruststoreUpdated() {
+        long truststoreUpdatedTime = getTruststoreLastUpdatedTime();
+        logger.info("Comparing truststore timestamps oldValue {} and newValue {}", truststoreLastUpdatedTime,
+                    truststoreUpdatedTime);
+        if (truststoreUpdatedTime > truststoreLastUpdatedTime) {
+            logger.info("Updating the truststoreLastUpdatedTime from oldValue {} to newValue {}",
+                        truststoreLastUpdatedTime, truststoreUpdatedTime);
+            truststoreLastUpdatedTime = truststoreUpdatedTime;
+            return true;
+        } else {
+            logger.info("Based on the comparision, no truststore update needed");
+            return false;
+        }
+    }
+
+    private long getKeystoreLastUpdatedTime() {
+        Optional<String> keystoreUpdatedTimeSecretKeyValue = readSecretFromMountedVolume(keystoreUpdatedTimeSecretKeyPath);
+        if (keystoreUpdatedTimeSecretKeyValue.isPresent())
+        {
+            return parseLastUpdatedTime(keystoreUpdatedTimeSecretKeyValue.get(), keystoreLastUpdatedTime);
+        }
+        else
+        {
+            logger.warn("Failed to load {}'s value. Will use existing value {}", keystoreUpdatedTimeSecretKeyPath,
+                        keystoreLastUpdatedTime);
+            return keystoreLastUpdatedTime;
+        }
+    }
+
+    private long getTruststoreLastUpdatedTime() {
+        Optional<String> truststoreUpdatedTimeSecretKeyValue = readSecretFromMountedVolume(truststoreUpdatedTimeSecretKeyPath);
+        if (truststoreUpdatedTimeSecretKeyValue.isPresent())
+        {
+            return parseLastUpdatedTime(truststoreUpdatedTimeSecretKeyValue.get(), truststoreLastUpdatedTime);
+        }
+        else
+        {
+            logger.warn("Failed to load {}'s value. Will use existing value {}", truststoreUpdatedTimeSecretKeyPath,
+                        truststoreLastUpdatedTime);
+            return truststoreLastUpdatedTime;
+        }
+    }
+
+    private Optional<String> readSecretFromMountedVolume(String secretKeyPath) {
+        try
+        {
+            return Optional.of(new String(Files.readAllBytes(Paths.get(secretKeyPath))));
+        }
+        catch (IOException e)
+        {
+            logger.warn(String.format("Failed to read secretKeyPath %s from the mounted volume: %s", secretKeyPath, e.getMessage()));
+            return Optional.empty();
+        }
+    }
+
+    private long parseLastUpdatedTime(String latestUpdatedTime, long currentUpdatedTime) {
+        try
+        {
+            return Long.parseLong(latestUpdatedTime);
+        } catch(NumberFormatException e) {
+            logger.warn("Failed to parse the latestUpdatedTime {}. Will use current time {}", latestUpdatedTime,
+                        currentUpdatedTime, e);
+            return currentUpdatedTime;
+        }
+    }
+}
diff --git a/examples/ssl-factory/test/conf/cassandra_encrypted_private_key.pem b/examples/ssl-factory/test/conf/cassandra_encrypted_private_key.pem
new file mode 100644
index 0000000..ed981cc
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_encrypted_private_key.pem
@@ -0,0 +1,51 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/
+g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl
+xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29
+L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V
+sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/
+f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8
+RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR
+0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs
+evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU
+tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6
+wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN
+K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv
+zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5
+mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo
+WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ
+jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6
+eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny
+DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn
+AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY
+Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow
+Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut
+ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr
+bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH
+hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI
+RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw
+pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP
+FujZhqbKUDbYAcqTkoQ=
+-----END ENCRYPTED PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+n3MVF9w=
+-----END CERTIFICATE-----
diff --git a/examples/ssl-factory/test/conf/cassandra_encrypted_private_key_multiplecerts.pem b/examples/ssl-factory/test/conf/cassandra_encrypted_private_key_multiplecerts.pem
new file mode 100644
index 0000000..fed57d4
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_encrypted_private_key_multiplecerts.pem
@@ -0,0 +1,71 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIE6TAbBgkqhkiG9w0BBQMwDgQI4QuRiKYzf88CAggABIIEyPRVmPp38SIFr8H3
+wi+oc6b+HJH7SPflXO6XZe4Tignw/aSyBTsLm2dWrzojRAYMIRd1xC7yQ2ffYrvx
+uoYbtOQeAminNqvwXdRTnwu6oC0rxdBT8RQ9NK7xL2tQyD/shmOeTJG/glXxaeqS
+rT0CZ5P5GJh6xdIWLEu3lEa3NSWVFE2YacUphmxBoaWjBjsJfWTgkF665SgP+2lh
+8R2WTcHrHjD8jR4jHB03wlup0LOmOwzplUmqHB9TyuA4wF6tlJajwBcPa0PNI6ny
+e9YcdcRr7Y0IxnPQr7PhQNV5AQb9TivwX4WaZxR+BXtwMglp+mz0ohjwLS3z6pqr
+tLrFhv2qcacSl+CKukFb9umV/QBkUk/iu+jwLcNJKPC965GWdieNbO0akBQpQsUN
+mqaF9DYHogW5lRnybl8WWPIR8tXmSCbSUIgzw4lRK+o15I4vaMI0NfkwFD/2y1sn
+t3m9LnVBukkpx3g/CPKd9PbZZeWpOTrnRJQfOu9Fj2lmkpGp0peCBqLJpO0pieVl
+87EQ0ZCErtAGLGIAhWnDUqRK0MaWZ+DMQNKYn5klF4YTVBkfRc9tQbIgBaa77wvz
+gvVWBuJtTFpCt9c8jByTH1gLbchC4bhLsy1nO7moevypMmNW4rqw9x5f0EIR3zCU
+L5/buoIh91TG5JB7BaIbVHtbB/Y2siARRXJibuw3ChBjqPOfzQ66j//NCMqhfTwT
+x2wn7L1BB4xyLJgVW9803FUTzaaL3EvJjzpdvrGC7vzcB6Jd0La9cjHhWSAPOKro
+nD9XPCbgLs10vW9g1Tc8drnZklhw6f7xrIQhWFg6VlwmVpvCQhEpX48rCCo2PH9X
+uzeJA+oqFEH3zfDp0r+q6jbAl+5TkkbBBgC2GCoI1vTYSKaWjeKKHkgzGG0QQLAz
+YXWMXvWKOME4wVPkeVxJv80RqDv0JsoOrnVoaFAzAHJMWa7UZQnjkrbVz/y8ZbV4
+oLJjQjvuOnU2PoXXs6SXbzOs1xx5zbX1UUH3Wc7/CCaUec2hemQJ5m6b1XJciyrY
+GHpMKWtXky9Mo1ruTP7ZH1nk03b4PTObKSx2gQD5AZ/ASuTeahMqMb/2PJkDkpHA
+sy8b1zOn2YTbf4K6NWVNIOkiaApmKhhX0Af6Lg8Wr2ymRTXdp/Um8f+SQLADpB/F
+xOydEN622wmihKDge9DrUFqPG/bdIiRGLXLg8efNboC6/cn/i/sheO7/YlrvcUNo
+qxDa/Bb1N/DgmtgAQ1ZP+AKjk6FKkwZRF1X/VZkZ6auscDlaPetF7razPeIJUrKN
+z/x4AD2txGYKmeYztYR577hPXBw+PPKdggRhIugb6z5Tk89C2pDEwfnByA/wcGJr
+w5avxrubosPrp0QtJpZMzouOvcD52VUiZzDfu9dqI/hpinyt5rETj5E19qxBjIZt
+X3Nq5lY2ktbyqVIo8Z8w4EUU+3XHZKqDwjyYvjxCxv5lVVnqvQrH9h3kENBMrPYQ
+4XonQHpUGG7g7pA3ylmHi+nEedr0H5qKHzyFZlRdI7CLVNoAtBSdwvmtGd2cVVXU
+EaToKNbHPXXYYPX/oVAWZYwD7PHXIRJkiEZnrFARNhLypicU7yjvejUPXcVy5HMh
+XqEbrODPp4VXfbYhVg==
+-----END ENCRYPTED PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDXjCCAkYCAhI0MA0GCSqGSIb3DQEBBAUAMHcxCzAJBgNVBAYTAlVTMRMwEQYD
+VQQIDApDYWxpZm9ybmlhMREwDwYDVQQHDAhTYW4gSm9zZTEXMBUGA1UECgwOUGVy
+c29uYWwsIEluYy4xEDAOBgNVBAsMB1Jvb3QgQ0ExFTATBgNVBAMMDG15ZG9tYWlu
+LmNvbTAeFw0yMTExMjIyMjQ5MzlaFw0yMjExMjIyMjQ5MzlaMHIxCzAJBgNVBAYT
+AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMREwDwYDVQQHDAhTYW4gSm9zZTEXMBUG
+A1UECgwOUGVyc29uYWwsIEluYy4xCzAJBgNVBAsMAklUMRUwEwYDVQQDDAxteWRv
+bWFpbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5fdA7wwD9
+9e5RcdLscvGB+hqJUEHuNC53SYKg5X4Sf0H4ExQUbsy8UaoWzWHhgGbCtTvUVavl
+72xsO74ei0EblopW7QknF0kaTO8Vi3mxhUAdtZFLG/o0NS9J16HdGDGojJwuqU9+
+sMQt1w0HCTMlriELnxaUFKP7M9b0uK5VODEKJ38QKNGXUDt66D7BVYeT/6hz2cXK
+QWDoHk/JadALSzW5ES8KIHfxCLnl2TcKxQhJ4CnL8qeGvc8N3VyTh2AXajaJW5RB
+8Oy4CVoYxcdmP1IapxCD+yNcmNt9XpUTD+6eM5gnvtbye+MSfwPz2MW+fWEDZXOv
+3VxhJyTRFNVTAgMBAAEwDQYJKoZIhvcNAQEEBQADggEBADYK/pn6QG7bvUL0Xnnw
+1KPf1nx36gfJE2V0bNk4uyNNeYufMKS8gPLzC+a3RigCEDc+hIZFE5BJexHd7DXA
+CWgHZJtdjM/Xlgoxbf1yfGV3DWeIZlNFSFZujBIpbm1Ug2BAeV31YRWODPZlUSEZ
+0jv8NEs8/oEz9bM4jwRdn09lo4D9hE6o8qDnrzmN2LBZP5dDIJ6g/M+mq/SJFnho
+qBrfUABZhbgk2+tkZ89OI2xpASCLo6X/vqua2iho6km3x+cz6EI9BbvVr6xOOdVK
+m6gs/Bi4MGTh35jdmvyXoyBUOd1w3yBBj86qbEt2ZHYqreRTxntQYx06598Q9Dsi
+xdg=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDajCCAlICCQD/7mxPcMTPIDANBgkqhkiG9w0BAQsFADB3MQswCQYDVQQGEwJV
+UzETMBEGA1UECAwKQ2FsaWZvcm5pYTERMA8GA1UEBwwIU2FuIEpvc2UxFzAVBgNV
+BAoMDlBlcnNvbmFsLCBJbmMuMRAwDgYDVQQLDAdSb290IENBMRUwEwYDVQQDDAxt
+eWRvbWFpbi5jb20wHhcNMjExMTIyMjExODAwWhcNNDkwNDA5MjExODAwWjB3MQsw
+CQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTERMA8GA1UEBwwIU2FuIEpv
+c2UxFzAVBgNVBAoMDlBlcnNvbmFsLCBJbmMuMRAwDgYDVQQLDAdSb290IENBMRUw
+EwYDVQQDDAxteWRvbWFpbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCkIwuNGv3ckew/o2UwaDlYgXH9bh1jap4ZCb6qpjvR3tq9nCerY6XMli0Z
+Xxg0wMHDNUr/jmVYIdQjbz0DVNz/l6ZBJHzHCEgqR40pNM3NgC5sDyuNhF3WLNvj
+WgHEwYosfb/9kFRjKUPqqtJ0ccj87OP3XrE/4epCTdJdmugroAQSpXt1ZZfwwPO4
+K27DzMD9W01EmeLcUhMfrpUnKGCfL22c0sZZm/6Khk4BExC3pSILP/NREKeUEAHw
++rxhNqbUyD/e4/DutdtJ5zONA+GVVGYCpu1Iy0W78Jve4MD2/TFPcEzf5omiWpPz
+WjpOWayD43ur0SZnYJ5haUlZ+bSLAgMBAAEwDQYJKoZIhvcNAQELBQADggEBABqN
+/eb+mKEw2zklPZuzy5uhOc7ImG2LP/ha3oc2ntwkqZQ2LmVA/2ZNVfcNrRRngIfn
+Ir9Kual7djmKmIQvanpnSyXOKOlIiuN0VOrewgzlBZZwlFJL/AH1l7K9uZfBbV5h
+oFfaR9wc+vRGsg3nqO9+hEuk6xbp0jk8QCt26EVhEPlijxzbxTQYiNPNSLuf/kPW
+C9xtIKSWIDT4N6DtH7BtHGQKQdRJ2b4SSUF4joEmBe6jcrLBeDybmuFtKqlVJKUk
+tzBd9CPseqMML1c518KzxlSkXNxTCa7PWEnuN5atLZ+pGGjxtGcDKkrZ9Cgi09G8
+MzB8b4C/goypyhBNlyI=
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/examples/ssl-factory/test/conf/cassandra_ssl_test.keystore b/examples/ssl-factory/test/conf/cassandra_ssl_test.keystore
new file mode 100644
index 0000000..8b2b218
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_ssl_test.keystore
Binary files differ
diff --git a/examples/ssl-factory/test/conf/cassandra_ssl_test.truststore b/examples/ssl-factory/test/conf/cassandra_ssl_test.truststore
new file mode 100644
index 0000000..49cf332
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_ssl_test.truststore
Binary files differ
diff --git a/examples/ssl-factory/test/conf/cassandra_ssl_test.truststore-without-password b/examples/ssl-factory/test/conf/cassandra_ssl_test.truststore-without-password
new file mode 100644
index 0000000..0031b15
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_ssl_test.truststore-without-password
Binary files differ
diff --git a/examples/ssl-factory/test/conf/cassandra_trusted_certificates.pem b/examples/ssl-factory/test/conf/cassandra_trusted_certificates.pem
new file mode 100644
index 0000000..8806ce8
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_trusted_certificates.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+n3MVF9w=
+-----END CERTIFICATE-----
diff --git a/examples/ssl-factory/test/conf/cassandra_unencrypted_private_key.pem b/examples/ssl-factory/test/conf/cassandra_unencrypted_private_key.pem
new file mode 100644
index 0000000..ce3d8e7
--- /dev/null
+++ b/examples/ssl-factory/test/conf/cassandra_unencrypted_private_key.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOSZVf8dLj1xLw
+efqjbogbAwhwRXd3tmEfQY1zHyudJF3XR1T0Xp26BKNvAYUxxZRDNg16M3prPRZv
+wkhDJdE9NaN+BpZPJUavRsZOfGDq/CHN8j/2PPKrn3G/b065JjV3GZjfV/Sln047
+DeZptaSyOg7ZN7F8qjGqxEcnw+szV/wTzhnjGjZMlcbOm4jFGQAn6xUOooQCGsoB
+9FgxKB0nvNG3xVe/2eCNfbS4DabT3Y1wfQqZ62hOa5ZS0rwT+pw3tQs3zFFY1Sfi
+G7qYbqZrKTheWIZGojVVm6mqH4yA2ofOe5N3RsBitCwU/D0dpnaG9Wcl98nmXueM
+B6Rk04v7AgMBAAECggEAYnxIKjrFz/JkJ5MmiszM5HV698r9YB0aqHnFIHPoykIL
+uiCjiumantDrFsCkosixULwvI/BRwbxstTpyrheU9psT6P1CONICVPvV8ylgJAYU
+l+ofn56cEXKxVuICSWFLDH7pM1479g+IJJQAchbKQpqxAGTuMu3SpvJolfuj5srt
+bM7/RYhJFLwDuvHNA3ivlogMneItP03+C25aaxstM+lBuBf68+n78zMgSvt6J/6Y
+G2TOMKnxveMlG2qu9l2lAw/2i8daG/qre08nTH7wpRx0gZLZqNpe45exkrzticzF
+FgWYjG2K2brX21jqHroFgMhdXF7zhhRgLoIeC0BrIQKBgQDCfGfWrJESKBbVai5u
+7wqD9nlzjv6N6FXfTDOPXO1vz5frdvtLVWbs0SMPy+NglkaZK0iqHvb9mf2of8eC
+0D5cmewjn7WCDBQMypIMYgT912ak/BBVuGXcxb6UgD+xARfSARo2C8NG1hfprw1W
+ad14CjS5xhFMs44HpVYhI7iPYwKBgQC7SqVG/b37vZ7CINemdvoMujLvvYXDJJM8
+N21LqNJfVXdukdH3T0xuLnh9Z/wPHjJDMF/9+1foxSEPHijtyz5P19EilNEC/3qw
+fI19+VZoY0mdhPtXSGzy+rbTE2v71QgwFLizSos14Gr+eNiIjF7FYccK05++K/zk
+cd8ZA3bwiQKBgQCl+HTFBs9mpz+VMOAfW2+l3hkXPNiPUc62mNkHZ05ZNNd44jjh
+uSf0wSUiveR08MmevQlt5K7zDQ8jVKh2QjB15gVXAVxsdtJFeDnax2trFP9LnLBz
+9sE2/qn9INU5wK0LUlWD+dXUBbCyg+jl7cJKRqtoPldVFYYHkFlIPqup8QKBgHXv
+hyuw1FUVDkdHzwOvn70r8q8sNHKxMVWVwWkHIZGOi+pAQGrusD4hXRX6yKnsZdIR
+QCD6iFy25R5T64nxlYdJaxPPid3NakB/7ckJnPOWseBSwMIxhQlr/nvjmve1Kba9
+FaEwq4B9lGIxToiNe4/nBiM3JzvlDxX67nUdzWOhAoGAdFvriyvjshSJ4JHgIY9K
+37BVB0VKMcFV2P8fLVWO5oyRtE1bJhU4QVpQmauABU4RGSojJ3NPIVH1wxmJeYtj
+Q3b7EZaqI6ovna2eK2qtUx4WwxhRaXTT8xueBI2lgL6sBSTGG+K69ZOzGQzG/Mfr
+RXKInnLInFD9JD94VqmMozo=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+n3MVF9w=
+-----END CERTIFICATE-----
diff --git a/examples/ssl-factory/test/unit/org/apache/cassandra/security/KubernetesSecretsPEMSslContextFactoryTest.java b/examples/ssl-factory/test/unit/org/apache/cassandra/security/KubernetesSecretsPEMSslContextFactoryTest.java
new file mode 100644
index 0000000..2d127f3
--- /dev/null
+++ b/examples/ssl-factory/test/unit/org/apache/cassandra/security/KubernetesSecretsPEMSslContextFactoryTest.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManagerFactory;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.cassandra.security.KubernetesSecretsPEMSslContextFactory.DEFAULT_PRIVATE_KEY_ENV_VAR_NAME;
+import static org.apache.cassandra.security.KubernetesSecretsPEMSslContextFactory.DEFAULT_PRIVATE_KEY_PASSWORD_ENV_VAR_NAME;
+import static org.apache.cassandra.security.KubernetesSecretsPEMSslContextFactory.PEMConfigKey.PRIVATE_KEY_PASSWORD_ENV_VAR;
+import static org.apache.cassandra.security.KubernetesSecretsPEMSslContextFactory.PEMConfigKey.TRUSTED_CERTIFICATE_ENV_VAR;
+import static org.apache.cassandra.security.KubernetesSecretsSslContextFactory.ConfigKeys.KEYSTORE_UPDATED_TIMESTAMP_PATH;
+import static org.apache.cassandra.security.KubernetesSecretsSslContextFactory.ConfigKeys.TRUSTSTORE_UPDATED_TIMESTAMP_PATH;
+
+public class KubernetesSecretsPEMSslContextFactoryTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(KubernetesSecretsPEMSslContextFactoryTest.class);
+
+    private static final String ENCRYPTED_PRIVATE_KEY_FILEPATH = "build/test/conf/cassandra_encrypted_private_key.pem";
+    private static final String ENCRYPTED_PRIVATE_KEY_WITH_MULTIPLE_CERTS_IN_CERTCHAIN_FILEPATH = "build/test/conf" +
+                                                                                                  "/cassandra_encrypted_private_key_multiplecerts.pem";
+    private static final String UNENCRYPTED_PRIVATE_KEY_FILEPATH = "build/test/conf/cassandra_unencrypted_private_key.pem";
+    private static final String TRUSTED_CERTIFICATES_FILEPATH = "build/test/conf/cassandra_trusted_certificates.pem";
+    private final static String TRUSTSTORE_UPDATED_TIMESTAMP_FILEPATH = "build/test/conf/cassandra_truststore_last_updatedtime";
+    private final static String KEYSTORE_UPDATED_TIMESTAMP_FILEPATH = "build/test/conf/cassandra_keystore_last_updatedtime";
+
+    private static String private_key;
+    private static String unencrypted_private_key;
+    private static String trusted_certificates;
+
+    private final Map<String, Object> commonConfig = new HashMap<>();
+
+    @BeforeClass
+    public static void prepare()
+    {
+        deleteFileIfExists(TRUSTSTORE_UPDATED_TIMESTAMP_FILEPATH);
+        deleteFileIfExists(KEYSTORE_UPDATED_TIMESTAMP_FILEPATH);
+        private_key = readFile(ENCRYPTED_PRIVATE_KEY_FILEPATH);
+        unencrypted_private_key = readFile(UNENCRYPTED_PRIVATE_KEY_FILEPATH);
+        trusted_certificates = readFile(TRUSTED_CERTIFICATES_FILEPATH);
+    }
+
+    private static void deleteFileIfExists(String filePath)
+    {
+        try
+        {
+            logger.info("Deleting the file {} to prepare for the tests", new File(filePath).getAbsolutePath());
+            File file = new File(filePath);
+            if (file.exists())
+            {
+                file.delete();
+            }
+        }
+        catch (Exception e)
+        {
+            logger.warn("File {} could not be deleted.", filePath, e);
+        }
+    }
+
+    private static String readFile(String file)
+    {
+        try
+        {
+            return new String(Files.readAllBytes(Paths.get(file)));
+        }
+        catch (Exception e)
+        {
+            logger.error("Unable to read the file {}. Without this tests in this file would fail.", file);
+        }
+        return null;
+    }
+
+    @Before
+    public void setup()
+    {
+        commonConfig.put(TRUSTED_CERTIFICATE_ENV_VAR,
+                         "MY_TRUSTED_CERTIFICATES");
+        commonConfig.put("MY_TRUSTED_CERTIFICATES", trusted_certificates);
+        commonConfig.put(TRUSTSTORE_UPDATED_TIMESTAMP_PATH, TRUSTSTORE_UPDATED_TIMESTAMP_FILEPATH);
+        /*
+         * In order to test with real 'env' variables comment out this line and set appropriate env variable. This is
+         * done to avoid having a dependency on env in the unit test.
+         */
+        commonConfig.put("require_client_auth", Boolean.FALSE);
+        commonConfig.put("cipher_suites", Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA"));
+    }
+
+    private void addKeystoreOptions(Map<String, Object> config)
+    {
+        config.put(DEFAULT_PRIVATE_KEY_ENV_VAR_NAME, private_key);
+        config.put(PRIVATE_KEY_PASSWORD_ENV_VAR, "MY_KEY_PASSWORD");
+        config.put(KEYSTORE_UPDATED_TIMESTAMP_PATH, KEYSTORE_UPDATED_TIMESTAMP_FILEPATH);
+        /*
+         * In order to test with real 'env' variables comment out this line and set appropriate env variable. This is
+         *  done to avoid having a dependency on env in the unit test.
+         */
+        config.put("MY_KEY_PASSWORD", "cassandra");
+    }
+
+    private void addUnencryptedKeystoreOptions(Map<String, Object> config)
+    {
+        config.put(DEFAULT_PRIVATE_KEY_ENV_VAR_NAME, unencrypted_private_key);
+        config.put(KEYSTORE_UPDATED_TIMESTAMP_PATH, KEYSTORE_UPDATED_TIMESTAMP_FILEPATH);
+        config.remove(DEFAULT_PRIVATE_KEY_PASSWORD_ENV_VAR_NAME);
+        config.remove(PRIVATE_KEY_PASSWORD_ENV_VAR);
+    }
+
+    @Test(expected = SSLException.class)
+    public void buildTrustManagerFactoryWithInvalidTrustedCertificates() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put("MY_TRUSTED_CERTIFICATES", trusted_certificates.replaceAll("\\s", String.valueOf(System.nanoTime())));
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory =
+        new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void buildTrustManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+    }
+
+    @Test(expected = SSLException.class)
+    public void buildKeyManagerFactoryWithInvalidPrivateKey() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put(DEFAULT_PRIVATE_KEY_ENV_VAR_NAME, private_key.replaceAll("\\s", String.valueOf(System.nanoTime())));
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory =
+        new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        kubernetesSecretsSslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put(DEFAULT_PRIVATE_KEY_ENV_VAR_NAME, private_key);
+        config.put(DEFAULT_PRIVATE_KEY_PASSWORD_ENV_VAR_NAME, "HomeOfBadPasswords");
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory =
+        new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory1 = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        // Make sure the exiry check didn't happen so far for the private key
+        Assert.assertFalse(kubernetesSecretsSslContextFactory1.checkedExpiry);
+
+        addKeystoreOptions(config);
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory2 = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        // Trigger the private key loading. That will also check for expired private key
+        kubernetesSecretsSslContextFactory2.buildKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(kubernetesSecretsSslContextFactory2.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory3 =
+        new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory3.checkedExpiry);
+        kubernetesSecretsSslContextFactory3.buildKeyManagerFactory();
+        Assert.assertTrue(kubernetesSecretsSslContextFactory3.checkedExpiry);
+    }
+
+    @Test
+    public void buildKeyManagerFactoryWithMultipleCertsInCertChain() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeystoreOptions(config);
+        config.put(DEFAULT_PRIVATE_KEY_ENV_VAR_NAME, readFile(ENCRYPTED_PRIVATE_KEY_WITH_MULTIPLE_CERTS_IN_CERTCHAIN_FILEPATH));
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory2 = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        // Trigger the private key loading. That will also check for expired private key
+        kubernetesSecretsSslContextFactory2.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryHappyPathForUnencryptedKey() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory1 = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        // Make sure the exiry check didn't happen so far for the private key
+        Assert.assertFalse(kubernetesSecretsSslContextFactory1.checkedExpiry);
+
+        addUnencryptedKeystoreOptions(config);
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory2 = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        // Trigger the private key loading. That will also check for expired private key
+        kubernetesSecretsSslContextFactory2.buildKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(kubernetesSecretsSslContextFactory2.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory3 =
+        new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory3.checkedExpiry);
+        kubernetesSecretsSslContextFactory3.buildKeyManagerFactory();
+        Assert.assertTrue(kubernetesSecretsSslContextFactory3.checkedExpiry);
+    }
+
+    @Test
+    public void checkTruststoreUpdateReloading() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeystoreOptions(config);
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+
+        updateTimestampFile(config, TRUSTSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertTrue(kubernetesSecretsSslContextFactory.shouldReload());
+
+        config.remove(TRUSTSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+    }
+
+    @Test
+    public void checkKeystoreUpdateReloading() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeystoreOptions(config);
+
+        KubernetesSecretsPEMSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsPEMSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        KeyManagerFactory keyManagerFactory = kubernetesSecretsSslContextFactory.buildKeyManagerFactory();
+        Assert.assertNotNull(keyManagerFactory);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+
+        updateTimestampFile(config, KEYSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertTrue(kubernetesSecretsSslContextFactory.shouldReload());
+
+        config.remove(KEYSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+    }
+
+    private void updateTimestampFile(Map<String, Object> config, String filePathKey)
+    {
+        String filePath = config.containsKey(filePathKey) ? config.get(filePathKey).toString() : null;
+        try (OutputStream os = Files.newOutputStream(Paths.get(filePath)))
+        {
+            String timestamp = String.valueOf(System.nanoTime());
+            os.write(timestamp.getBytes());
+            logger.info("Successfully wrote to file {}", filePath);
+        }
+        catch (IOException e)
+        {
+            logger.warn("Failed to write to filePath {} from the mounted volume", filePath, e);
+        }
+    }
+
+    private static class KubernetesSecretsPEMSslContextFactoryForTestOnly extends KubernetesSecretsPEMSslContextFactory
+    {
+
+        public KubernetesSecretsPEMSslContextFactoryForTestOnly()
+        {
+        }
+
+        public KubernetesSecretsPEMSslContextFactoryForTestOnly(Map<String, Object> config)
+        {
+            super(config);
+        }
+
+        /*
+         * This is overriden to first give priority to the input map configuration since we should not be setting env
+         * variables from the unit tests. However, if the input map configuration doesn't have the value for the
+         * given key then fallback to loading from the real environment variables.
+         */
+        @Override
+        String getValueFromEnv(String envVarName, String defaultValue)
+        {
+            String envVarValue = parameters.get(envVarName) != null ? parameters.get(envVarName).toString() : null;
+            if (StringUtils.isEmpty(envVarValue))
+            {
+                logger.info("Configuration doesn't have env variable {}. Will use parent's implementation", envVarName);
+                return super.getValueFromEnv(envVarName, defaultValue);
+            }
+            else
+            {
+                logger.info("Configuration has environment variable {} with value {}. Will use that.",
+                            envVarName, envVarValue);
+                return envVarValue;
+            }
+        }
+    }
+}
diff --git a/examples/ssl-factory/test/unit/org/apache/cassandra/security/KubernetesSecretsSslContextFactoryTest.java b/examples/ssl-factory/test/unit/org/apache/cassandra/security/KubernetesSecretsSslContextFactoryTest.java
new file mode 100644
index 0000000..d37992a
--- /dev/null
+++ b/examples/ssl-factory/test/unit/org/apache/cassandra/security/KubernetesSecretsSslContextFactoryTest.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.TrustManagerFactory;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.io.util.File;
+
+import static org.apache.cassandra.security.KubernetesSecretsSslContextFactory.ConfigKeys.KEYSTORE_PASSWORD_ENV_VAR;
+import static org.apache.cassandra.security.KubernetesSecretsSslContextFactory.ConfigKeys.KEYSTORE_UPDATED_TIMESTAMP_PATH;
+import static org.apache.cassandra.security.KubernetesSecretsSslContextFactory.ConfigKeys.TRUSTSTORE_PASSWORD_ENV_VAR;
+import static org.apache.cassandra.security.KubernetesSecretsSslContextFactory.ConfigKeys.TRUSTSTORE_UPDATED_TIMESTAMP_PATH;
+
+public class KubernetesSecretsSslContextFactoryTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(KubernetesSecretsSslContextFactoryTest.class);
+    private static final String TRUSTSTORE_PATH = EncryptionOptions.ConfigKey.TRUSTSTORE.toString();
+    private static final String KEYSTORE_PATH = EncryptionOptions.ConfigKey.KEYSTORE.toString();
+    private final static String truststoreUpdatedTimestampFilepath = "build/test/conf/cassandra_truststore_last_updatedtime";
+    private final static String keystoreUpdatedTimestampFilepath = "build/test/conf/cassandra_keystore_last_updatedtime";
+    private final Map<String, Object> commonConfig = new HashMap<>();
+
+    @BeforeClass
+    public static void prepare()
+    {
+        deleteFileIfExists(truststoreUpdatedTimestampFilepath);
+        deleteFileIfExists(keystoreUpdatedTimestampFilepath);
+    }
+
+    private static void deleteFileIfExists(String file)
+    {
+        Path filePath = Paths.get(file);
+        boolean deleted = new File(filePath).toJavaIOFile().delete();
+        if (!deleted)
+        {
+            logger.warn("File {} could not be deleted.", filePath);
+        }
+    }
+
+    @Before
+    public void setup()
+    {
+        commonConfig.put(TRUSTSTORE_PATH, "build/test/conf/cassandra_ssl_test.truststore");
+        commonConfig.put(TRUSTSTORE_PASSWORD_ENV_VAR, "MY_TRUSTSTORE_PASSWORD");
+        commonConfig.put(TRUSTSTORE_UPDATED_TIMESTAMP_PATH, truststoreUpdatedTimestampFilepath);
+        /*
+         * In order to test with real 'env' variables comment out this line and set appropriate env variable. This is
+         * done to avoid having a dependency on env in the unit test.
+         */
+        commonConfig.put("MY_TRUSTSTORE_PASSWORD", "cassandra");
+        commonConfig.put("require_client_auth", Boolean.FALSE);
+        commonConfig.put("cipher_suites", Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA"));
+    }
+
+    private void addKeystoreOptions(Map<String, Object> config)
+    {
+        config.put(KEYSTORE_PATH, "build/test/conf/cassandra_ssl_test.keystore");
+        config.put(KEYSTORE_PASSWORD_ENV_VAR, "MY_KEYSTORE_PASSWORD");
+        config.put(KEYSTORE_UPDATED_TIMESTAMP_PATH, keystoreUpdatedTimestampFilepath);
+        /*
+         * In order to test with real 'env' variables comment out this line and set appropriate env variable. This is
+         *  done to avoid having a dependency on env in the unit test.
+         */
+        config.put("MY_KEYSTORE_PASSWORD", "cassandra");
+    }
+
+    @Test(expected = IOException.class)
+    public void buildTrustManagerFactoryWithInvalidTruststoreFile() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put(TRUSTSTORE_PATH, "/this/is/probably/not/a/file/on/your/test/machine");
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildTrustManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.remove(TRUSTSTORE_PASSWORD_ENV_VAR);
+        config.put(KubernetesSecretsSslContextFactory.DEFAULT_TRUSTSTORE_PASSWORD_ENV_VAR_NAME, "HomeOfBadPasswords");
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void buildTrustManagerFactoryWithEmptyPassword() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put(TRUSTSTORE_PATH, "build/test/conf/cassandra_ssl_test.truststore-without-password");
+        config.remove(TRUSTSTORE_PASSWORD_ENV_VAR);
+        config.put(KubernetesSecretsSslContextFactory.DEFAULT_TRUSTSTORE_PASSWORD_ENV_VAR_NAME, "");
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void buildTrustManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithInvalidKeystoreFile() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put(KEYSTORE_PATH, "/this/is/probably/not/a/file/on/your/test/machine");
+        config.put(KEYSTORE_PASSWORD_ENV_VAR, "MY_KEYSTORE_PASSWORD");
+        config.put("MY_KEYSTORE_PASSWORD","ThisWontMatter");
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        kubernetesSecretsSslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put(KEYSTORE_PATH, "build/test/conf/cassandra_ssl_test.keystore");
+        config.put(KubernetesSecretsSslContextFactory.DEFAULT_KEYSTORE_PASSWORD_ENV_VAR_NAME, "HomeOfBadPasswords");
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory1 = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        // Make sure the exiry check didn't happen so far for the private key
+        Assert.assertFalse(kubernetesSecretsSslContextFactory1.checkedExpiry);
+
+        addKeystoreOptions(config);
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory2 = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        // Trigger the private key loading. That will also check for expired private key
+        kubernetesSecretsSslContextFactory2.buildKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(kubernetesSecretsSslContextFactory2.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory3 = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory3.checkedExpiry);
+        kubernetesSecretsSslContextFactory3.buildKeyManagerFactory();
+        Assert.assertTrue(kubernetesSecretsSslContextFactory3.checkedExpiry);
+    }
+
+    @Test
+    public void checkTruststoreUpdateReloading() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeystoreOptions(config);
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = kubernetesSecretsSslContextFactory.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+
+        updateTimestampFile(config, TRUSTSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertTrue(kubernetesSecretsSslContextFactory.shouldReload());
+
+        config.remove(TRUSTSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+    }
+
+    @Test
+    public void checkKeystoreUpdateReloading() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeystoreOptions(config);
+
+        KubernetesSecretsSslContextFactory kubernetesSecretsSslContextFactory = new KubernetesSecretsSslContextFactoryForTestOnly(config);
+        kubernetesSecretsSslContextFactory.checkedExpiry = false;
+        KeyManagerFactory keyManagerFactory = kubernetesSecretsSslContextFactory.buildKeyManagerFactory();
+        Assert.assertNotNull(keyManagerFactory);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+
+        updateTimestampFile(config, KEYSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertTrue(kubernetesSecretsSslContextFactory.shouldReload());
+
+        config.remove(KEYSTORE_UPDATED_TIMESTAMP_PATH);
+        Assert.assertFalse(kubernetesSecretsSslContextFactory.shouldReload());
+    }
+
+    private void updateTimestampFile(Map<String, Object> config, String filePathKey)
+    {
+        String filePath = config.containsKey(filePathKey) ? config.get(filePathKey).toString() : null;
+        try (OutputStream os = Files.newOutputStream(Paths.get(filePath)))
+        {
+            String timestamp = String.valueOf(System.nanoTime());
+            os.write(timestamp.getBytes());
+            logger.info("Successfully wrote to file {}", filePath);
+        }
+        catch (IOException e)
+        {
+            logger.warn("Failed to write to filePath {} from the mounted volume", filePath, e);
+        }
+    }
+
+    private static class KubernetesSecretsSslContextFactoryForTestOnly extends KubernetesSecretsSslContextFactory
+    {
+
+        public KubernetesSecretsSslContextFactoryForTestOnly()
+        {
+        }
+
+        public KubernetesSecretsSslContextFactoryForTestOnly(Map<String, Object> config)
+        {
+            super(config);
+        }
+
+        /*
+         * This is overriden to first give priority to the input map configuration since we should not be setting env
+         * variables from the unit tests. However, if the input map configuration doesn't have the value for the
+         * given key then fallback to loading from the real environment variables.
+         */
+        @Override
+        String getValueFromEnv(String envVarName, String defaultValue)
+        {
+            String envVarValue = parameters.get(envVarName) != null ? parameters.get(envVarName).toString() : null;
+            if (StringUtils.isEmpty(envVarValue))
+            {
+                logger.info("Configuration doesn't have env variable {}. Will use parent's implementation", envVarName);
+                return super.getValueFromEnv(envVarName, defaultValue);
+            }
+            else
+            {
+                logger.info("Configuration has env variable {} with value {}. Will use that.",
+                            envVarName, envVarValue);
+                return envVarValue;
+            }
+        }
+    }
+}
diff --git a/examples/triggers/README.adoc b/examples/triggers/README.adoc
new file mode 100644
index 0000000..9c2461d
--- /dev/null
+++ b/examples/triggers/README.adoc
@@ -0,0 +1,63 @@
+Cassandra Trigger Example
+==========================
+
+The `AuditTrigger` class will create a basic audit of
+activity on a table.
+
+Installation
+-------------
+
+----
+$ cd <cassandra_src_dir>/examples/triggers
+$ ant install
+----
+
+It will build the trigger and copy it to `conf/triggers`. `AuditTrigger.properties`
+in `conf` directory of this example will be automatically bundled into built jar file.
+It is not needed to copy it to `conf/triggers` directory.
+
+Usage
+-----
+
+Create the keyspace and table configured in `AuditTrigger.properties`:
+
+----
+cqlsh> CREATE KEYSPACE test WITH REPLICATION =
+    { 'class' : 'SimpleStrategy', 'replication_factor' : '1' };
+cqlsh> CREATE TABLE test.audit (key timeuuid, keyspace_name text,
+    table_name text, primary_key text, PRIMARY KEY(key));
+----
+
+Create a table to add the trigger to:
+
+Note: The example currently only handles non-composite partition keys
+----
+cqlsh> CREATE TABLE test.test (key text, value text, PRIMARY KEY(key));
+----
+
+Configure the trigger on the table:
+
+----
+cqlsh> CREATE TRIGGER test1 ON test.test USING 'org.apache.cassandra.triggers.AuditTrigger';
+----
+
+Start inserting data to the table that has the trigger. For each
+partition added to the table an entry should appear in the 'audit' table:
+
+----
+cqlsh> INSERT INTO test.test (key, value) VALUES ('1', '1');
+----
+
+An entry will be automatically added to `test.audit` table:
+
+----
+cqlsh> SELECT * FROM test.audit ;
+
+@ Row 1
+---------------+--------------------------------------
+ key           | 885141d0-ad7c-11ed-b917-9958320828b8
+ keyspace_name | test
+ primary_key   | 1
+ table_name    | test
+----
+
diff --git a/examples/triggers/README.txt b/examples/triggers/README.txt
deleted file mode 100644
index e5f1ecf..0000000
--- a/examples/triggers/README.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Cassandra Trigger Example:
-==========================
-
-The AuditTrigger class will create a basic audit of
-activity on a table.
-
-Installation:
-============
-change directory to <cassandra_src_dir>/examples/triggers
-run "ant jar"
-Copy build/trigger-example.jar to <cassandra_conf>/triggers/
-Copy conf/* to <cassandra_home>/conf/
-
-Create the keyspace and table configured in AuditTrigger.properties:
-    CREATE KEYSPACE test WITH REPLICATION =
-        { 'class' : 'SimpleStrategy', 'replication_factor' : '1' };
-    CREATE TABLE test.audit (key timeuuid, keyspace_name text,
-        table_name text, primary_key text, PRIMARY KEY(key));
-
-Create a table to add the trigger to:
-    CREATE TABLE test.test (key text, value text, PRIMARY KEY(key));
-    Note: The example currently only handles non-composite partition keys
-
-Configure the trigger on the table:
-    CREATE TRIGGER test1 ON test.test
-        USING 'org.apache.cassandra.triggers.AuditTrigger';
-
-Start inserting data to the table that has the trigger. For each
-partition added to the table an entry should appear in the 'audit' table:
-    INSERT INTO test.test (key, value) values ('1', '1');
-    SELECT * FROM test.audit;
-
-    key                                  | keyspace_name | primary_key | table_name
-   --------------------------------------+---------------+-------------+------------
-    7dc75b60-770f-11e5-9019-033d8af33e6f |          test |           1 |       test
-
diff --git a/examples/triggers/build.xml b/examples/triggers/build.xml
index 450def6..b5a0f6f 100644
--- a/examples/triggers/build.xml
+++ b/examples/triggers/build.xml
@@ -57,7 +57,12 @@
 		</jar>
 	</target>
 
+	<target name="install" depends="jar">
+		<copy verbose="true" file="${build.dir}/${final.name}.jar" todir="${cassandra.dir}/conf/triggers" overwrite="true"/>
+	</target>
+
 	<target name="clean">
 		<delete dir="${build.dir}" />
+		<delete file="${cassandra.dir}/conf/triggers/${final.name}.jar"/>
 	</target>
 </project>
diff --git a/examples/triggers/src/org/apache/cassandra/triggers/AuditTrigger.java b/examples/triggers/src/org/apache/cassandra/triggers/AuditTrigger.java
index b0172b0..657394a 100644
--- a/examples/triggers/src/org/apache/cassandra/triggers/AuditTrigger.java
+++ b/examples/triggers/src/org/apache/cassandra/triggers/AuditTrigger.java
@@ -29,19 +29,27 @@
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class AuditTrigger implements ITrigger
 {
-    private Properties properties = loadProperties();
+    private static final String AUDIT_PROPERTIES_FILE_NAME = "AuditTrigger.properties";
+
+    private final Properties properties;
+    private final String auditKeyspace;
+    private final String auditTable;
+
+    public AuditTrigger()
+    {
+        properties = loadProperties();
+        auditKeyspace = properties.getProperty("keyspace");
+        auditTable = properties.getProperty("table");
+    }
 
     public Collection<Mutation> augment(Partition update)
     {
-        String auditKeyspace = properties.getProperty("keyspace");
-        String auditTable = properties.getProperty("table");
-
         TableMetadata metadata = Schema.instance.getTableMetadata(auditKeyspace, auditTable);
-        PartitionUpdate.SimpleBuilder audit = PartitionUpdate.simpleBuilder(metadata, UUIDGen.getTimeUUID());
+        PartitionUpdate.SimpleBuilder audit = PartitionUpdate.simpleBuilder(metadata, TimeUUID.Generator.nextTimeUUID());
 
         audit.row()
              .add("keyspace_name", update.metadata().keyspace)
@@ -54,7 +62,7 @@
     private static Properties loadProperties()
     {
         Properties properties = new Properties();
-        InputStream stream = AuditTrigger.class.getClassLoader().getResourceAsStream("AuditTrigger.properties");
+        InputStream stream = AuditTrigger.class.getClassLoader().getResourceAsStream(AUDIT_PROPERTIES_FILE_NAME);
         try
         {
             properties.load(stream);
diff --git a/ide/idea-iml-file.xml b/ide/idea-iml-file.xml
index 59fb99e..c0b5584 100644
--- a/ide/idea-iml-file.xml
+++ b/ide/idea-iml-file.xml
@@ -36,6 +36,9 @@
             <sourceFolder url="file://$MODULE_DIR$/test/microbench" isTestSource="true" />
             <sourceFolder url="file://$MODULE_DIR$/test/burn" isTestSource="true" />
             <sourceFolder url="file://$MODULE_DIR$/test/distributed" isTestSource="true" />
+            <sourceFolder url="file://$MODULE_DIR$/test/simulator/asm" isTestSource="true" />
+            <sourceFolder url="file://$MODULE_DIR$/test/simulator/bootstrap" isTestSource="true" />
+            <sourceFolder url="file://$MODULE_DIR$/test/simulator/main" isTestSource="true" />
             <sourceFolder url="file://$MODULE_DIR$/test/resources" type="java-test-resource" />
             <sourceFolder url="file://$MODULE_DIR$/test/conf" type="java-test-resource" />
             <excludeFolder url="file://$MODULE_DIR$/.idea" />
diff --git a/ide/idea/workspace.xml b/ide/idea/workspace.xml
index 5f28853..8aea315 100644
--- a/ide/idea/workspace.xml
+++ b/ide/idea/workspace.xml
@@ -147,6 +147,7 @@
                                           -DQT_SHRINKS=0
                                           -Dcassandra.config=file://$PROJECT_DIR$/conf/cassandra.yaml
                                           -Dcassandra.logdir=$PROJECT_DIR$/data/logs
+                                          -Dcassandra.reads.thresholds.coordinator.defensive_checks_enabled=true
                                           -Dcassandra.storagedir=$PROJECT_DIR$/data
                                           -Djava.library.path=$PROJECT_DIR$/lib/sigar-bin
                                           -Dlogback.configurationFile=file://$PROJECT_DIR$/conf/logback.xml
@@ -183,6 +184,7 @@
       <option name="VM_PARAMETERS" value="
                                           -Dcassandra.config=file://$PROJECT_DIR$/test/conf/cassandra.yaml
                                           -Dcassandra.logdir=$PROJECT_DIR$/build/test/logs
+                                          -Dcassandra.reads.thresholds.coordinator.defensive_checks_enabled=true
                                           -Dcassandra.ring_delay_ms=1000
                                           -Dcassandra.skip_sync=true
                                           -Dcassandra.strict.runtime.checks=true
@@ -195,7 +197,7 @@
                                           -Dlegacy-sstable-root=$PROJECT_DIR$/test/data/legacy-sstables
                                           -Dlogback.configurationFile=file://$PROJECT_DIR$/test/conf/logback-test.xml
                                           -Dmigration-sstable-root=$PROJECT_DIR$/test/data/migration-sstables
-                                          -XX:ActiveProcessorCount=4
+                                          -XX:ActiveProcessorCount=2
                                           -XX:MaxMetaspaceSize=384M
                                           -XX:SoftRefLRUPolicyMSPerMB=0
                                           -ea" />
@@ -223,7 +225,9 @@
                                           -Dcassandra.config=file://$PROJECT_DIR$/conf/cassandra.yaml
                                           -Dcassandra.jmx.local.port=7199
                                           -Dcassandra.logdir=$PROJECT_DIR$/data/logs
+                                          -Dcassandra.reads.thresholds.coordinator.defensive_checks_enabled=true
                                           -Dcassandra.storagedir=$PROJECT_DIR$/data
+                                          -Dcassandra.triggers_dir=$PROJECT_DIR$/conf/triggers
                                           -Djava.library.path=$PROJECT_DIR$/lib/sigar-bin
                                           -Dlogback.configurationFile=file://$PROJECT_DIR$/conf/logback.xml
                                           -Xmx1G
diff --git a/ide/nbproject/project.xml b/ide/nbproject/project.xml
index 12c06b6..3900f1f 100644
--- a/ide/nbproject/project.xml
+++ b/ide/nbproject/project.xml
@@ -7,7 +7,7 @@
             <properties>
                 <property name="project.dir">..</property>
                 <!-- the compile classpaths should be distinct per compilation unit… but it is kept simple and the build will catch errors -->
-                <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.5.6.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang-2.4.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.13.2.jar:${project.dir}/build/lib/jars/jackson-core-2.13.2.jar:${project.dir}/build/lib/jars/jackson-databind-2.13.2.2.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.4.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.9.jar:${project.dir}/build/lib/jars/logback-core-1.2.9.jar:${project.dir}/build/lib/jars/lz4-java-1.8.0.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.5.0-4.jar:${project.dir}/build/test/lib/jars/ant-1.10.12.jar:${project.dir}/build/test/lib/jars/ant-junit-1.10.12.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.10.12.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.13.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/semver4j-3.1.0.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
+                <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-9.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.9.2.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/checker-qual-3.10.0.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/error_prone_annotations-2.5.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/ipaddress-5.3.3.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.13.2.jar:${project.dir}/build/lib/jars/jackson-core-2.13.2.jar:${project.dir}/build/lib/jars/jackson-databind-2.13.2.2.jar:${project.dir}/build/lib/jars/jackson-datatype-jsr310-2.13.2.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.4.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.9.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.9.jar:${project.dir}/build/lib/jars/logback-core-1.2.9.jar:${project.dir}/build/lib/jars/lz4-java-1.8.0.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.8.4.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.5.0-4.jar:${project.dir}/build/test/lib/jars/Saxon-HE-10.3.jar:${project.dir}/build/test/lib/jars/ant-1.10.12.jar:${project.dir}/build/test/lib/jars/ant-junit-1.10.12.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.10.12.jar:${project.dir}/build/test/lib/jars/antlr-2.7.7.jar:${project.dir}/build/test/lib/jars/antlr4-runtime-4.9.1.jar:${project.dir}/build/test/lib/jars/asm-analysis-9.1.jar:${project.dir}/build/test/lib/jars/asm-commons-9.1.jar:${project.dir}/build/test/lib/jars/asm-tree-9.1.jar:${project.dir}/build/test/lib/jars/asm-util-9.1.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/checkstyle-8.40.jar:${project.dir}/build/test/lib/jars/commons-beanutils-1.9.4.jar:${project.dir}/build/test/lib/jars/commons-collections-3.2.2.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-logging-1.2.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.13.jar:${project.dir}/build/test/lib/jars/guava-18.0.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/harry-core-0.0.1.jar:${project.dir}/build/test/lib/jars/jackson-annotations-2.11.3.jar:${project.dir}/build/test/lib/jars/jackson-core-2.13.2.jar:${project.dir}/build/test/lib/jars/jackson-databind-2.11.3.jar:${project.dir}/build/test/lib/jars/jackson-dataformat-yaml-2.13.2.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.28.0-GA.jar:${project.dir}/build/test/lib/jars/jimfs-1.1.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-3.0.2.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/picocli-4.6.1.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.10.2.jar:${project.dir}/build/test/lib/jars/semver4j-3.1.0.jar:${project.dir}/build/test/lib/jars/simulator-asm.jar:${project.dir}/build/test/lib/jars/simulator-bootstrap.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.32.jar:</property>
             </properties>
             <folders>
                 <source-folder>
diff --git a/pylib/Dockerfile.ubuntu.py2 b/pylib/Dockerfile.ubuntu.py2
deleted file mode 100644
index 93016f0..0000000
--- a/pylib/Dockerfile.ubuntu.py2
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:bionic
-RUN apt-get update && apt-get install -y python-minimal
diff --git a/pylib/README.asc b/pylib/README.asc
index b0b6c7d..53c40ea 100644
--- a/pylib/README.asc
+++ b/pylib/README.asc
@@ -1,17 +1,26 @@
 == Overview
 
 This directory contains code primarily for cqlsh. cqlsh uses cqlshlib in this directory.
-Currently, cqlshlib supports Python 2 as well as Python 3. Support for Python 3 is relatively
-new.
 
 == Requirements
-. Python 3 and 2.7 (for cqlsh)
+. Python 3.6+ (for cqlsh)
 . virtualenv
 . Docker (optional)
 
 == Running tests
 
-In order to run tests for cqlshlib, run cassandra-cqlsh-tests.sh in this directory. It will
+The following environment variables can be configured for the database connection -
+
+. CQL_TEST_HOST, default 127.0.0.1
+. CQL_TEST_PORT, default 9042
+. CQL_TEST_USER, default 'cassandra'
+. CQL_TEST_PWD
+
+You can run tests with a local Cassandra server simply by -
+
+  $ pytest
+
+In order to run tests in a virtual environment for cqlshlib, run cassandra-cqlsh-tests.sh in this directory. It will
 automatically setup a virtualenv with the appropriate version of Python and run tests inside it.
 
 There are Dockerfiles that can be used to test whether cqlsh works with a default, barebones
@@ -32,4 +41,4 @@
 with minimal Python installation. It will try to connect to the Cassandra instance running on the
 Docker host at port 9042. If you have Cassandra running elsewhere, replace host.docker.internal
 with the IP / hostname as usual. Please ensure that the IP / host is accessible from _within_ the
-Docker container.
\ No newline at end of file
+Docker container.
diff --git a/pylib/cassandra-cqlsh-tests.sh b/pylib/cassandra-cqlsh-tests.sh
index b2d2c50..ad6ae75 100755
--- a/pylib/cassandra-cqlsh-tests.sh
+++ b/pylib/cassandra-cqlsh-tests.sh
@@ -24,21 +24,13 @@
 ################################
 
 WORKSPACE=$1
-PYTHON_VERSION=$2
 
 if [ "${WORKSPACE}" = "" ]; then
     echo "Specify Cassandra source directory"
     exit
 fi
 
-if [ "${PYTHON_VERSION}" = "" ]; then
-    PYTHON_VERSION=python3
-fi
-
-if [ "${PYTHON_VERSION}" != "python3" -a "${PYTHON_VERSION}" != "python2" ]; then
-    echo "Specify Python version python3 or python2"
-    exit
-fi
+PYTHON_VERSION=python3
 
 export PYTHONIOENCODING="utf-8"
 export PYTHONUNBUFFERED=true
@@ -47,7 +39,7 @@
 export CCM_MAX_HEAP_SIZE="2048M"
 export CCM_HEAP_NEWSIZE="200M"
 export CCM_CONFIG_DIR=${WORKSPACE}/.ccm
-export NUM_TOKENS="32"
+export NUM_TOKENS="16"
 export CASSANDRA_DIR=${WORKSPACE}
 export TESTSUITE_NAME="cqlshlib.${PYTHON_VERSION}"
 
@@ -104,8 +96,8 @@
 
 ccm remove test || true # in case an old ccm cluster is left behind
 ccm create test -n 1 --install-dir=${CASSANDRA_DIR}
-ccm updateconf "enable_user_defined_functions: true"
-ccm updateconf "enable_scripted_user_defined_functions: true"
+ccm updateconf "user_defined_functions_enabled: true"
+ccm updateconf "scripted_user_defined_functions_enabled: true"
 
 version_from_build=$(ccm node1 versionfrombuild)
 export pre_or_post_cdc=$(python -c """from distutils.version import LooseVersion
@@ -129,14 +121,13 @@
 cd ${CASSANDRA_DIR}/pylib/cqlshlib/
 
 set +e # disable immediate exit from this point
-pytest
+pytest --junitxml=${WORKSPACE}/cqlshlib.xml
 RETURN="$?"
 
+sed -i "s/testsuite errors=\(\".*\"\) failures=\(\".*\"\) hostname=\(\".*\"\) name=\"pytest\"/testsuite errors=\1 failures=\2 hostname=\3 name=\"${TESTSUITE_NAME}\"/g" ${WORKSPACE}/cqlshlib.xml
+sed -i "s/testcase classname=\"cqlshlib./testcase classname=\"${TESTSUITE_NAME}./g" ${WORKSPACE}/cqlshlib.xml
+
 ccm remove
-# hack around --xunit-prefix-with-testsuite-name not being available in nose 1.3.7
-sed -i "s/testsuite name=\"nosetests\"/testsuite name=\"${TESTSUITE_NAME}\"/g" nosetests.xml
-sed -i "s/testcase classname=\"cqlshlib./testcase classname=\"${TESTSUITE_NAME}./g" nosetests.xml
-mv nosetests.xml ${WORKSPACE}/cqlshlib.xml
 
 ################################
 #
diff --git a/pylib/cqlshlib/authproviderhandling.py b/pylib/cqlshlib/authproviderhandling.py
new file mode 100644
index 0000000..68031e5
--- /dev/null
+++ b/pylib/cqlshlib/authproviderhandling.py
@@ -0,0 +1,176 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Handles loading of AuthProvider for CQLSH authentication.
+"""
+
+import configparser
+import sys
+from importlib import import_module
+from cqlshlib.util import is_file_secure
+
+
+def _warn_for_plain_text_security(config_file, provider_settings):
+    """
+    Call when using PlainTextAuthProvider
+    check to see if password appears in the basic provider settings
+    as this is a security risk
+
+    Will write errors to stderr
+    """
+    if 'password' in provider_settings:
+        if not is_file_secure(config_file):
+            print("""\nWarning: Password is found in an insecure cqlshrc file.
+                    The file is owned or readable by other users on the system.""",
+                  end='',
+                  file=sys.stderr)
+        print("""\nNotice: Credentials in the cqlshrc file is deprecated and
+        will be ignored in the future.\n
+        Please use a credentials file to
+        specify the username and password.\n""",
+              file=sys.stderr)
+
+
+def load_auth_provider(config_file=None, cred_file=None, username=None, password=None):
+    """
+    Function which loads an auth provider from available config.
+
+    Params:
+    * config_file ..: path to cqlsh config file (usually ~/.cassandra/cqlshrc).
+    * cred_file ....: path to cqlsh credentials file (default is  ~/.cassandra/credentials).
+    * username .....: override used to return PlainTextAuthProvider according to legacy case
+    * password .....: override used to return PlainTextAuthProvider according to legacy case
+
+    Will attempt to load an auth provider from available config file, using what's found in
+    credentials file as an override.
+
+    Config file is expected to list module name /class in the *auth_provider*
+    section for dynamic loading (which is to be of type auth_provider)
+
+    Additional params passed to the constructor of class should be specified
+    in the *auth_provider* section and can be freely named to match
+    auth provider's expectation.
+
+    If passed username and password these will be overridden and passed to auth provider
+
+    None is returned if no possible auth provider is found, and no username/password can be
+    returned.  If a username is found, system will assume that PlainTextAuthProvider was
+    specified
+
+    EXAMPLE  CQLSHRC:
+    # .. inside cqlshrc file
+
+    [auth_provider]
+    module = cassandra.auth
+    classname = PlainTextAuthProvider
+    username = user1
+    password = password1
+
+    if credentials file is specified put relevant properties under the class name
+    EXAMPLE
+    # ... inside credentials file for above example
+    [PlainTextAuthProvider]
+    password = password2
+
+    Credential attributes will override found in the cqlshrc.
+    in the above example, PlainTextAuthProvider would be used with a password of 'password2',
+    and username of 'user1'
+    """
+
+    def get_settings_from_config(section_name,
+                                 conf_file,
+                                 interpolation=configparser.BasicInterpolation()):
+        """
+        Returns dict from section_name, and ini based conf_file
+
+        * section_name ..: Section to read map of properties from (ex: [auth_provider])
+        * conf_file .....: Ini based config file to read.  Will return empty dict if None.
+        * interpolation .: Interpolation to use.
+
+        If section is not found, or conf_file is None, function will return an empty dictionary.
+        """
+        conf = configparser.ConfigParser(interpolation=interpolation)
+        if conf_file is None:
+            return {}
+
+        conf.read(conf_file)
+        if section_name in conf.sections():
+            return dict(conf.items(section_name))
+        return {}
+
+    def get_cred_file_settings(classname, creds_file):
+        # Since this is the credentials file we may be encountering raw strings
+        # as these are what passwords, or security tokens may inadvertently fall into
+        # we don't want interpolation to mess with them.
+        return get_settings_from_config(
+            section_name=classname,
+            conf_file=creds_file,
+            interpolation=None)
+
+    def get_auth_provider_settings(conf_file):
+        return get_settings_from_config(
+            section_name='auth_provider',
+            conf_file=conf_file)
+
+    def get_legacy_settings(legacy_username, legacy_password):
+        result = {}
+        if legacy_username is not None:
+            result['username'] = legacy_username
+        if legacy_password is not None:
+            result['password'] = legacy_password
+        return result
+
+    provider_settings = get_auth_provider_settings(config_file)
+
+    module_name = provider_settings.pop('module', None)
+    class_name = provider_settings.pop('classname', None)
+
+    if module_name is None and class_name is None:
+        # not specified, default to plaintext auth provider
+        module_name = 'cassandra.auth'
+        class_name = 'PlainTextAuthProvider'
+    elif module_name is None or class_name is None:
+        # then this was PARTIALLY specified.
+        return None
+
+    credential_settings = get_cred_file_settings(class_name, cred_file)
+
+    if module_name == 'cassandra.auth' and class_name == 'PlainTextAuthProvider':
+        # merge credential settings as overrides on top of provider settings.
+
+        # we need to ensure that password property gets "set" in all cases.
+        # this is to support the ability to give the user a prompt in other parts
+        # of the code.
+        _warn_for_plain_text_security(config_file, provider_settings)
+        ctor_args = {'password': None,
+                     **provider_settings,
+                     **credential_settings,
+                     **get_legacy_settings(username, password)}
+        # if no username, we can't create PlainTextAuthProvider
+        if 'username' not in ctor_args:
+            return None
+    else:
+        # merge credential settings as overrides on top of provider settings.
+        ctor_args = {**provider_settings,
+                     **credential_settings,
+                     **get_legacy_settings(username, password)}
+
+    # Load class definitions
+    module = import_module(module_name)
+    auth_provider_klass = getattr(module, class_name)
+
+    # instantiate the class
+    return auth_provider_klass(**ctor_args)
diff --git a/pylib/cqlshlib/copyutil.py b/pylib/cqlshlib/copyutil.py
index c1bdffe..2a8a11d 100644
--- a/pylib/cqlshlib/copyutil.py
+++ b/pylib/cqlshlib/copyutil.py
@@ -16,7 +16,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import unicode_literals
 import csv
 import datetime
 import json
@@ -27,32 +26,27 @@
 import random
 import re
 import signal
-import six
 import struct
 import sys
 import threading
 import time
 import traceback
-import errno
 
 from bisect import bisect_right
 from calendar import timegm
 from collections import defaultdict, namedtuple
 from decimal import Decimal
 from random import randint
-from io import BytesIO, StringIO
+from io import StringIO
 from select import select
 from uuid import UUID
-from .util import profile_on, profile_off
 
-from six import ensure_str, ensure_text
-from six.moves import configparser
-from six.moves import range
-from six.moves.queue import Queue
+import configparser
+from queue import Queue
 
 from cassandra import OperationTimedOut
 from cassandra.cluster import Cluster, DefaultConnection
-from cassandra.cqltypes import ReversedType, UserType, BytesType, VarcharType
+from cassandra.cqltypes import ReversedType, UserType, VarcharType
 from cassandra.metadata import protect_name, protect_names, protect_value
 from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
 from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
@@ -67,6 +61,7 @@
 PROFILE_ON = False
 STRACE_ON = False
 DEBUG = False  # This may be set to True when initializing the task
+# TODO: review this for MacOS, maybe use in ('Linux', 'Darwin')
 IS_LINUX = platform.system() == 'Linux'
 
 CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
@@ -85,14 +80,14 @@
         printmsg(msg)
 
 
-def printmsg(msg, eol='\n', encoding='utf8'):
+def printmsg(msg, eol='\n'):
     sys.stdout.write(msg)
     sys.stdout.write(eol)
     sys.stdout.flush()
 
 
 # Keep arguments in sync with printmsg
-def swallowmsg(msg, eol='', encoding=''):
+def swallowmsg(msg, eol='\n'):
     None
 
 
@@ -175,7 +170,7 @@
         for ch in self.channels:
             try:
                 ch.close()
-            except Exception:
+            except ValueError:
                 pass
 
 
@@ -201,10 +196,8 @@
         while True:
             try:
                 readable, _, _ = select(self._readers, [], [], timeout)
-            except select.error as exc:
-                # Do not abort on window resize:
-                if exc[0] != errno.EINTR:
-                    raise
+            except OSError:
+                raise
             else:
                 break
         for r in readable:
@@ -237,7 +230,7 @@
         for ch in self.channels:
             try:
                 ch.close()
-            except Exception:
+            except ValueError:
                 pass
 
 
@@ -261,8 +254,7 @@
             DEBUG = True
 
         # do not display messages when exporting to STDOUT unless --debug is set
-        self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
-            else swallowmsg
+        self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG else swallowmsg
         self.options = self.parse_options(opts, direction)
 
         self.num_processes = self.options.copy['numprocesses']
@@ -292,7 +284,7 @@
             return opts
 
         configs = configparser.RawConfigParser()
-        configs.readfp(open(config_file))
+        configs.read_file(open(config_file))
 
         ret = dict()
         config_sections = list(['copy', 'copy-%s' % (direction,),
@@ -334,9 +326,9 @@
         opts = self.clean_options(self.maybe_read_config_file(opts, direction))
 
         dialect_options = dict()
-        dialect_options['quotechar'] = ensure_str(opts.pop('quote', '"'))
-        dialect_options['escapechar'] = ensure_str(opts.pop('escape', '\\'))
-        dialect_options['delimiter'] = ensure_str(opts.pop('delimiter', ','))
+        dialect_options['quotechar'] = opts.pop('quote', '"')
+        dialect_options['escapechar'] = opts.pop('escape', '\\')
+        dialect_options['delimiter'] = opts.pop('delimiter', ',')
         if dialect_options['quotechar'] == dialect_options['escapechar']:
             dialect_options['doublequote'] = True
             del dialect_options['escapechar']
@@ -344,7 +336,7 @@
             dialect_options['doublequote'] = False
 
         copy_options = dict()
-        copy_options['nullval'] = ensure_str(opts.pop('null', ''))
+        copy_options['nullval'] = opts.pop('null', '')
         copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
         copy_options['encoding'] = opts.pop('encoding', 'utf8')
         copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
@@ -366,7 +358,7 @@
         copy_options['consistencylevel'] = shell.consistency_level
         copy_options['decimalsep'] = opts.pop('decimalsep', '.')
         copy_options['thousandssep'] = opts.pop('thousandssep', '')
-        copy_options['boolstyle'] = [ensure_str(s.strip()) for s in opts.pop('boolstyle', 'True, False').split(',')]
+        copy_options['boolstyle'] = [s.strip() for s in opts.pop('boolstyle', 'True, False').split(',')]
         copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
         copy_options['begintoken'] = opts.pop('begintoken', '')
         copy_options['endtoken'] = opts.pop('endtoken', '')
@@ -569,7 +561,7 @@
 
         if self.header:
             writer = csv.writer(self.current_dest.output, **self.options.dialect)
-            writer.writerow([ensure_str(c) for c in self.columns])
+            writer.writerow([str(c) for c in self.columns])
 
         return True
 
@@ -665,7 +657,7 @@
             return 0
 
         columns = "[" + ", ".join(self.columns) + "]"
-        self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
+        self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns))
 
         params = self.make_params()
         for i in range(self.num_processes):
@@ -774,6 +766,7 @@
             #  For the last ring interval we query the same replicas that hold the first token in the ring
             if previous is not None and (not end_token or previous < end_token):
                 ranges[(previous, end_token)] = first_range_data
+            # TODO: fix this logic added in 4.0: if previous is None, then it can't be compared with less than
             elif previous is None and (not end_token or previous < end_token):
                 previous = begin_token if begin_token else min_token
                 ranges[(previous, end_token)] = first_range_data
@@ -885,12 +878,13 @@
         self.max_rows = options.copy['maxrows']
         self.skip_rows = options.copy['skiprows']
         self.fname = fname
-        self.sources = None  # must be created later due to pickle problems on Windows
+        self.sources = None  # might be initialised directly here? (see CASSANDRA-17350)
         self.num_sources = 0
         self.current_source = None
         self.num_read = 0
 
-    def get_source(self, paths):
+    @staticmethod
+    def get_source(paths):
         """
          Return a source generator. Each source is a named tuple
          wrapping the source input, file name and a boolean indicating
@@ -912,7 +906,7 @@
                     raise IOError("Can't open %r for reading: no matching file found" % (path,))
 
                 for f in result:
-                    yield (make_source(f))
+                    yield make_source(f)
 
     def start(self):
         self.sources = self.get_source(self.fname)
@@ -1167,7 +1161,7 @@
             return 0
 
         columns = "[" + ", ".join(self.valid_columns) + "]"
-        self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
+        self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns))
 
         try:
             params = self.make_params()
@@ -1300,9 +1294,9 @@
         self.inpipe = inpipe
         self.outpipe = outpipe
         self.worker_pipes = worker_pipes
-        self.inmsg = None  # must be created after forking on Windows
-        self.outmsg = None  # must be created after forking on Windows
-        self.worker_channels = None  # must be created after forking on Windows
+        self.inmsg = None  # might be initialised directly here? (see CASSANDRA-17350)
+        self.outmsg = None  # might be initialised directly here? (see CASSANDRA-17350)
+        self.worker_channels = None  # might be initialised directly here? (see CASSANDRA-17350)
         self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
         self.send_meter = RateMeter(log_fcn=None, update_interval=1)
         self.ingest_rate = options.copy['ingestrate']
@@ -1341,7 +1335,8 @@
         try:
             reader.start()
         except IOError as exc:
-            self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
+            self.outmsg.send(
+                ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
 
         channels = self.worker_channels
         max_pending_chunks = self.max_pending_chunks
@@ -1370,7 +1365,8 @@
                     if rows:
                         sent += self.send_chunk(ch, rows)
                 except Exception as exc:
-                    self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
+                    self.outmsg.send(
+                        ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
 
                 if reader.exhausted:
                     break
@@ -1406,8 +1402,8 @@
         super(ChildProcess, self).__init__(target=target)
         self.inpipe = params['inpipe']
         self.outpipe = params['outpipe']
-        self.inmsg = None  # must be initialized after fork on Windows
-        self.outmsg = None  # must be initialized after fork on Windows
+        self.inmsg = None  # might be initialised directly here? (see CASSANDRA-17350)
+        self.outmsg = None  # might be initialised directly here? (see CASSANDRA-17350)
         self.ks = params['ks']
         self.table = params['table']
         self.local_dc = params['local_dc']
@@ -1728,7 +1724,7 @@
             return  # no rows in this range
 
         try:
-            output = StringIO() if six.PY3 else BytesIO()
+            output = StringIO()
             writer = csv.writer(output, **self.options.dialect)
 
             for row in rows:
@@ -1758,7 +1754,7 @@
                               float_precision=cqltype.precision, nullval=self.nullval, quote=False,
                               decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
                               boolean_styles=self.boolean_styles)
-        return formatted if six.PY3 else formatted.encode('utf8')
+        return formatted
 
     def close(self):
         ChildProcess.close(self)
@@ -1898,7 +1894,7 @@
         select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
                                                          protect_name(parent.table),
                                                          where_clause)
-        return parent.session.prepare(ensure_str(select_query))
+        return parent.session.prepare(select_query)
 
     @staticmethod
     def unprotect(v):
@@ -1934,20 +1930,20 @@
                 return BlobType(v[2:].decode("hex"))
 
         def convert_text(v, **_):
-            return ensure_str(v)
+            return str(v)
 
         def convert_uuid(v, **_):
             return UUID(v)
 
         def convert_bool(v, **_):
-            return True if v.lower() == ensure_str(self.boolean_styles[0]).lower() else False
+            return True if v.lower() == self.boolean_styles[0].lower() else False
 
         def get_convert_integer_fcn(adapter=int):
             """
             Return a slow and a fast integer conversion function depending on self.thousands_sep
             """
             if self.thousands_sep:
-                return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ensure_str('')))
+                return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
             else:
                 return lambda v, ct=cql_type: adapter(v)
 
@@ -1955,10 +1951,11 @@
             """
             Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
             """
-            empty_str = ensure_str('')
-            dot_str = ensure_str('.')
+            empty_str = ''
+            dot_str = '.'
             if self.thousands_sep and self.decimal_sep:
-                return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str).replace(self.decimal_sep, dot_str))
+                return lambda v, ct=cql_type: \
+                    adapter(v.replace(self.thousands_sep, empty_str).replace(self.decimal_sep, dot_str))
             elif self.thousands_sep:
                 return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str))
             elif self.decimal_sep:
@@ -2014,20 +2011,14 @@
             return ret
 
         # this should match all possible CQL and CQLSH datetime formats
-        p = re.compile(r"(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?"  # YYYY-MM-DD[( |'T')]
+        p = re.compile(r"(\d{4})-(\d{2})-(\d{2})\s?(?:'T')?"  # YYYY-MM-DD[( |'T')]
                        + r"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?"  # [HH:MM[:SS[.NNNNNN]]]
                        + r"(?:([+\-])(\d{2}):?(\d{2}))?")  # [(+|-)HH[:]MM]]
 
         def convert_datetime(val, **_):
             try:
-                if six.PY2:
-                    # Python 2 implementation
-                    tval = time.strptime(val, self.date_time_format)
-                    return timegm(tval) * 1e3  # scale seconds to millis for the raw value
-                else:
-                    # Python 3 implementation
-                    dtval = datetime.datetime.strptime(val, self.date_time_format)
-                    return dtval.timestamp() * 1000
+                dtval = datetime.datetime.strptime(val, self.date_time_format)
+                return dtval.timestamp() * 1000
             except ValueError:
                 pass  # if it's not in the default format we try CQL formats
 
@@ -2078,8 +2069,8 @@
             """
             See ImmutableDict above for a discussion of why a special object is needed here.
             """
-            split_format_str = ensure_str('{%s}')
-            sep = ensure_str(':')
+            split_format_str = '{%s}'
+            sep = ':'
             return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
                                  for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]))
 
@@ -2092,8 +2083,8 @@
             Also note that it is possible that the subfield names in the csv are in the
             wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
             """
-            split_format_str = ensure_str('{%s}')
-            sep = ensure_str(':')
+            split_format_str = '{%s}'
+            sep = ':'
             vals = [v for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]]
             dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
             sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
@@ -2151,7 +2142,7 @@
         or "NULL" otherwise. Note that for counters we never use prepared statements, so we
         only check is_counter when use_prepared_statements is false.
         """
-        return None if self.use_prepared_statements else (ensure_str("0") if self.is_counter else ensure_str("NULL"))
+        return None if self.use_prepared_statements else ("0" if self.is_counter else "NULL")
 
     def convert_row(self, row):
         """
@@ -2436,7 +2427,6 @@
             if self.ttl >= 0:
                 query += 'USING TTL %s' % (self.ttl,)
             make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
-            query = ensure_str(query)
 
         conv = ImportConversion(self, table_meta, prepared_statement)
         tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
@@ -2504,12 +2494,12 @@
             set_clause = []
             for i, value in enumerate(row):
                 if i in conv.primary_key_indexes:
-                    where_clause.append(ensure_text("{}={}").format(self.valid_columns[i], ensure_text(value)))
+                    where_clause.append("{}={}".format(self.valid_columns[i], str(value)))
                 else:
-                    set_clause.append(ensure_text("{}={}+{}").format(self.valid_columns[i], self.valid_columns[i], ensure_text(value)))
+                    set_clause.append("{}={}+{}".format(self.valid_columns[i], self.valid_columns[i], str(value)))
 
-            full_query_text = query % (ensure_text(',').join(set_clause), ensure_text(' AND ').join(where_clause))
-            statement.add(ensure_str(full_query_text))
+            full_query_text = query % (','.join(set_clause), ' AND '.join(where_clause))
+            statement.add(full_query_text)
         return statement
 
     def make_prepared_batch_statement(self, query, _, batch, replicas):
@@ -2533,7 +2523,7 @@
         statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
         statement.replicas = replicas
         statement.keyspace = self.ks
-        field_sep = b',' if six.PY2 else ','
+        field_sep = ','
         statement._statements_and_parameters = [(False, query % (field_sep.join(r),), ()) for r in batch['rows']]
         return statement
 
@@ -2642,7 +2632,8 @@
                     yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
             else:
                 # select only the first valid replica to guarantee more overlap or none at all
-                rows_by_replica[tuple(filter_replicas(replicas[ring_pos])[:1])].extend(rows)  # TODO: revisit tuple wrapper
+                # TODO: revisit tuple wrapper
+                rows_by_replica[tuple(filter_replicas(replicas[ring_pos])[:1])].extend(rows)
 
         # Now send the batches by replica
         for replicas, rows in rows_by_replica.items():
@@ -2664,6 +2655,7 @@
             future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
                                  errback=self.err_callback, errback_args=(batch, chunk, replicas))
 
+    # TODO: review why this is defined twice
     def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
         if self.debug and sys.exc_info()[1] == err:
             traceback.print_exc()
diff --git a/pylib/cqlshlib/cql3handling.py b/pylib/cqlshlib/cql3handling.py
index c45b310..03e06d8 100644
--- a/pylib/cqlshlib/cql3handling.py
+++ b/pylib/cqlshlib/cql3handling.py
@@ -18,8 +18,9 @@
 from cqlshlib import helptopics
 from cqlshlib.cqlhandling import CqlParsingRuleSet, Hint
 
-simple_cql_types = set(('ascii', 'bigint', 'blob', 'boolean', 'counter', 'date', 'decimal', 'double', 'duration', 'float',
-                        'inet', 'int', 'smallint', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'uuid', 'varchar', 'varint'))
+simple_cql_types = {'ascii', 'bigint', 'blob', 'boolean', 'counter', 'date', 'decimal', 'double', 'duration', 'float',
+                    'inet', 'int', 'smallint', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'uuid', 'varchar',
+                    'varint'}
 simple_cql_types.difference_update(('set', 'map', 'list'))
 
 cqldocs = helptopics.CQL3HelpTopics()
@@ -34,7 +35,8 @@
         return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
 
 
-SYSTEM_KEYSPACES = ('system', 'system_schema', 'system_traces', 'system_auth', 'system_distributed', 'system_views', 'system_virtual_schema')
+SYSTEM_KEYSPACES = ('system', 'system_schema', 'system_traces', 'system_auth', 'system_distributed', 'system_views',
+                    'system_virtual_schema')
 NONALTERBALE_KEYSPACES = ('system', 'system_schema', 'system_views', 'system_virtual_schema')
 
 
@@ -58,7 +60,8 @@
         # (CQL3 option name, schema_columnfamilies column name (or None if same),
         #  list of known map keys)
         ('compaction', 'compaction_strategy_options',
-            ('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction', 'only_purge_repaired_tombstones', 'provide_overlapping_tombstones')),
+            ('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled',
+             'unchecked_tombstone_compaction', 'only_purge_repaired_tombstones', 'provide_overlapping_tombstones')),
         ('compression', 'compression_parameters',
             ('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
         ('caching', None,
@@ -455,7 +458,7 @@
     else:
         return ["'class'"]
     if repclass == 'SimpleStrategy':
-        opts = set(('replication_factor',))
+        opts = {'replication_factor'}
     elif repclass == 'NetworkTopologyStrategy':
         return [Hint('<dc_name>')]
     return list(map(escape_value, opts.difference(keysseen)))
@@ -744,7 +747,13 @@
 <orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
                   ;
 <groupByClause> ::= [groupcol]=<cident>
+                  | <functionName><groupByFunctionArguments>
                   ;
+<groupByFunctionArguments> ::= "(" ( <groupByFunctionArgument> ( "," <groupByFunctionArgument> )* )? ")"
+                             ;
+<groupByFunctionArgument> ::= [groupcol]=<cident>
+                            | <term>
+                            ;
 '''
 
 
@@ -958,7 +967,7 @@
                ;
 <conditions> ::=  <condition> ( "AND" <condition> )*
                ;
-<condition_op_and_rhs> ::= (("=" | "<" | ">" | "<=" | ">=" | "!=") <term>)
+<condition_op_and_rhs> ::= (("=" | "<" | ">" | "<=" | ">=" | "!=" | "CONTAINS" ( "KEY" )? ) <term>)
                            | ("IN" "(" <term> ( "," <term> )* ")" )
                          ;
 <condition> ::= conditioncol=<cident>
@@ -1403,21 +1412,21 @@
 
 
 syntax_rules += r'''
-<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
+<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
                                <alterInstructions>
                         ;
-<alterInstructions> ::= "ADD" newcol=<cident> <storageType> ("static")?
-                      | "DROP" existcol=<cident>
+<alterInstructions> ::= "ADD" ("IF" "NOT" "EXISTS")? newcol=<cident> <storageType> ("static")?
+                      | "DROP" ("IF" "EXISTS")? existcol=<cident>
                       | "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
-                      | "RENAME" existcol=<cident> "TO" newcol=<cident>
+                      | "RENAME" ("IF" "EXISTS")? existcol=<cident> "TO" newcol=<cident>
                          ( "AND" existcol=<cident> "TO" newcol=<cident> )*
                       ;
 
-<alterUserTypeStatement> ::= "ALTER" "TYPE" ut=<userTypeName>
+<alterUserTypeStatement> ::= "ALTER" "TYPE" ("IF" "EXISTS")? ut=<userTypeName>
                                <alterTypeInstructions>
                              ;
-<alterTypeInstructions> ::= "ADD" newcol=<cident> <storageType>
-                           | "RENAME" existcol=<cident> "TO" newcol=<cident>
+<alterTypeInstructions> ::= "ADD" ("IF" "NOT" "EXISTS")? newcol=<cident> <storageType>
+                           | "RENAME" ("IF" "EXISTS")? existcol=<cident> "TO" newcol=<cident>
                               ( "AND" existcol=<cident> "TO" newcol=<cident> )*
                            ;
 '''
@@ -1433,7 +1442,7 @@
 @completer_for('alterTypeInstructions', 'existcol')
 def alter_type_field_completer(ctxt, cass):
     layout = get_ut_layout(ctxt, cass)
-    fields = [tuple[0] for tuple in layout]
+    fields = [atuple[0] for atuple in layout]
     return list(map(maybe_escape_name, fields))
 
 
@@ -1442,7 +1451,7 @@
 
 
 syntax_rules += r'''
-<alterKeyspaceStatement> ::= "ALTER" wat=( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
+<alterKeyspaceStatement> ::= "ALTER" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "EXISTS")? ks=<alterableKeyspaceName>
                                  "WITH" <property> ( "AND" <property> )*
                            ;
 '''
@@ -1452,11 +1461,11 @@
              ;
 
 <createUserStatement> ::= "CREATE" "USER" ( "IF" "NOT" "EXISTS" )? <username>
-                              ( "WITH" "PASSWORD" <stringLiteral> )?
+                              ( "WITH" ("HASHED")? "PASSWORD" <stringLiteral> )?
                               ( "SUPERUSER" | "NOSUPERUSER" )?
                         ;
 
-<alterUserStatement> ::= "ALTER" "USER" <username>
+<alterUserStatement> ::= "ALTER" "USER" ("IF" "EXISTS")? <username>
                               ( "WITH" "PASSWORD" <stringLiteral> )?
                               ( "SUPERUSER" | "NOSUPERUSER" )?
                        ;
@@ -1478,11 +1487,11 @@
                               ( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
                         ;
 
-<alterRoleStatement> ::= "ALTER" "ROLE" <rolename>
+<alterRoleStatement> ::= "ALTER" "ROLE" ("IF" "EXISTS")? <rolename>
                               ( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
                        ;
 
-<roleProperty> ::= "PASSWORD" "=" <stringLiteral>
+<roleProperty> ::= (("HASHED")? "PASSWORD") "=" <stringLiteral>
                  | "OPTIONS" "=" <mapLiteral>
                  | "SUPERUSER" "=" <boolean>
                  | "LOGIN" "=" <boolean>
@@ -1525,7 +1534,7 @@
                | "EXECUTE"
                ;
 
-<permissionExpr> ::= ( <permission> "PERMISSION"? )
+<permissionExpr> ::= ( [newpermission]=<permission> "PERMISSION"? ( "," [newpermission]=<permission> "PERMISSION"? )* )
                    | ( "ALL" "PERMISSIONS"? )
                    ;
 
@@ -1537,6 +1546,7 @@
 
 <dataResource> ::= ( "ALL" "KEYSPACES" )
                  | ( "KEYSPACE" <keyspaceName> )
+                 | ( "ALL" "TABLES" "IN" "KEYSPACE" <keyspaceName> )
                  | ( "TABLE"? <columnFamilyName> )
                  ;
 
@@ -1559,6 +1569,16 @@
 '''
 
 
+@completer_for('permissionExpr', 'newpermission')
+def permission_completer(ctxt, _):
+    new_permissions = set([permission.upper() for permission in ctxt.get_binding('newpermission')])
+    all_permissions = set([permission.arg for permission in ctxt.ruleset['permission'].arg])
+    suggestions = all_permissions - new_permissions
+    if len(suggestions) == 0:
+        return [Hint('No more permissions here.')]
+    return suggestions
+
+
 @completer_for('username', 'name')
 def username_name_completer(ctxt, cass):
     def maybe_quote(name):
diff --git a/pylib/cqlshlib/cqlhandling.py b/pylib/cqlshlib/cqlhandling.py
index 3da0ab1..ca12a25 100644
--- a/pylib/cqlshlib/cqlhandling.py
+++ b/pylib/cqlshlib/cqlhandling.py
@@ -24,13 +24,12 @@
 
 Hint = pylexotron.Hint
 
-cql_keywords_reserved = set((
-    'add', 'allow', 'alter', 'and', 'apply', 'asc', 'authorize', 'batch', 'begin', 'by', 'columnfamily', 'create',
-    'delete', 'desc', 'describe', 'drop', 'entries', 'execute', 'from', 'full', 'grant', 'if', 'in', 'index',
-    'infinity', 'insert', 'into', 'is', 'keyspace', 'limit', 'materialized', 'modify', 'nan', 'norecursive', 'not',
-    'null', 'of', 'on', 'or', 'order', 'primary', 'rename', 'revoke', 'schema', 'select', 'set', 'table', 'to', 'token',
-    'truncate', 'unlogged', 'update', 'use', 'using', 'view', 'where', 'with'
-))
+cql_keywords_reserved = {'add', 'allow', 'alter', 'and', 'apply', 'asc', 'authorize', 'batch', 'begin', 'by',
+                         'columnfamily', 'create', 'delete', 'desc', 'describe', 'drop', 'entries', 'execute', 'from',
+                         'full', 'grant', 'if', 'in', 'index', 'infinity', 'insert', 'into', 'is', 'keyspace', 'limit',
+                         'materialized', 'modify', 'nan', 'norecursive', 'not', 'null', 'of', 'on', 'or', 'order',
+                         'primary', 'rename', 'revoke', 'schema', 'select', 'set', 'table', 'to', 'token', 'truncate',
+                         'unlogged', 'update', 'use', 'using', 'view', 'where', 'with'}
 """
 Set of reserved keywords in CQL.
 
@@ -60,7 +59,7 @@
     )
 
     def __init__(self, *args, **kwargs):
-        pylexotron.ParsingRuleSet.__init__(self, *args, **kwargs)
+        pylexotron.ParsingRuleSet.__init__(self)
 
         # note: commands_end_with_newline may be extended by callers.
         self.commands_end_with_newline = set()
@@ -111,12 +110,6 @@
                     # don't put any 'endline' tokens in output
                     continue
 
-            # Convert all unicode tokens to ascii, where possible.  This
-            # helps avoid problems with performing unicode-incompatible
-            # operations on tokens (like .lower()).  See CASSANDRA-9083
-            # for one example of this.
-            str_token = t[1]
-
             curstmt.append(t)
             if t[0] == 'endtoken':
                 term_on_nl = False
@@ -158,10 +151,10 @@
                     in_batch = True
         return output, in_batch or in_pg_string
 
-    def cql_complete_single(self, text, partial, init_bindings={}, ignore_case=True,
+    def cql_complete_single(self, text, partial, init_bindings=None, ignore_case=True,
                             startsymbol='Start'):
         tokens = (self.cql_split_statements(text)[0] or [[]])[-1]
-        bindings = init_bindings.copy()
+        bindings = {} if init_bindings is None else init_bindings.copy()
 
         # handle some different completion scenarios- in particular, completing
         # inside a string literal
diff --git a/pylib/cqlshlib/displaying.py b/pylib/cqlshlib/displaying.py
index ef076f7..424d633 100644
--- a/pylib/cqlshlib/displaying.py
+++ b/pylib/cqlshlib/displaying.py
@@ -14,8 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import unicode_literals
-
 from collections import defaultdict
 
 RED = '\033[0;1;31m'
@@ -43,7 +41,7 @@
     return val
 
 
-class FormattedValue(object):
+class FormattedValue:
 
     def __init__(self, strval, coloredval=None, displaywidth=None):
         self.strval = strval
diff --git a/pylib/cqlshlib/formatting.py b/pylib/cqlshlib/formatting.py
index e521544..ebf9fc7 100644
--- a/pylib/cqlshlib/formatting.py
+++ b/pylib/cqlshlib/formatting.py
@@ -14,18 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import unicode_literals
-
-import binascii
 import calendar
 import datetime
 import math
 import os
 import re
 import sys
-import platform
-
-from six import ensure_text
 
 from collections import defaultdict
 
@@ -35,8 +29,6 @@
 from .displaying import colorme, get_str, FormattedValue, DEFAULT_VALUE_COLORS, NO_COLOR_MAP
 from .util import UTC
 
-is_win = platform.system() == 'Windows'
-
 unicode_controlchars_re = re.compile(r'[\x00-\x1f\x7f-\xa0]')
 controlchars_re = re.compile(r'[\x00-\x1f\x7f-\xff]')
 
@@ -126,7 +118,7 @@
         self.milliseconds_only = milliseconds_only  # the microseconds part, .NNNNNN, wil be rounded to .NNN
 
 
-class CqlType(object):
+class CqlType:
     """
     A class for converting a string into a cql type name that can match a formatter
     and a list of its sub-types, if any.
@@ -208,7 +200,7 @@
 
 
 def format_value_default(val, colormap, **_):
-    val = ensure_text(str(val))
+    val = str(val)
     escapedval = val.replace('\\', '\\\\')
     bval = controlchars_re.sub(_show_control_chars, escapedval)
     return bval if colormap is NO_COLOR_MAP else color_text(bval, colormap)
@@ -240,7 +232,7 @@
     return registrator
 
 
-class BlobType(object):
+class BlobType:
     def __init__(self, val):
         self.val = val
 
@@ -250,7 +242,7 @@
 
 @formatter_for('BlobType')
 def format_value_blob(val, colormap, **_):
-    bval = ensure_text('0x') + ensure_text(binascii.hexlify(val))
+    bval = '0x' + val.hex()
     return colorme(bval, colormap, 'blob')
 
 
@@ -260,7 +252,7 @@
 
 
 def format_python_formatted_type(val, colormap, color, quote=False):
-    bval = ensure_text(str(val))
+    bval = str(val)
     if quote:
         bval = "'%s'" % bval
     return colorme(bval, colormap, color)
@@ -330,7 +322,7 @@
 def format_integer_type(val, colormap, thousands_sep=None, **_):
     # base-10 only for now; support others?
     bval = format_integer_with_thousands_sep(val, thousands_sep) if thousands_sep else str(val)
-    bval = ensure_text(bval)
+    bval = str(bval)
     return colorme(bval, colormap, 'int')
 
 
@@ -365,7 +357,7 @@
         if date_time_format.milliseconds_only:
             bval = round_microseconds(bval)
     else:
-        bval = ensure_text(str(val))
+        bval = str(val)
 
     if quote:
         bval = "'%s'" % bval
diff --git a/pylib/cqlshlib/helptopics.py b/pylib/cqlshlib/helptopics.py
index 46cd156..9be56b9 100644
--- a/pylib/cqlshlib/helptopics.py
+++ b/pylib/cqlshlib/helptopics.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 
 
-class CQL3HelpTopics(object):
+class CQL3HelpTopics:
     def get_help_topics(self):
         return [t[5:] for t in dir(self) if t.startswith('help_')]
 
diff --git a/pylib/cqlshlib/saferscanner.py b/pylib/cqlshlib/saferscanner.py
index 6d7ba57..2c2b610 100644
--- a/pylib/cqlshlib/saferscanner.py
+++ b/pylib/cqlshlib/saferscanner.py
@@ -19,7 +19,6 @@
 # regex in-pattern flags. Any of those can break correct operation of Scanner.
 
 import re
-import six
 try:
     from sre_constants import BRANCH, SUBPATTERN, GROUPREF, GROUPREF_IGNORE, GROUPREF_EXISTS
 except ImportError:
@@ -53,23 +52,6 @@
         return re.sre_parse.SubPattern(sub.pattern, scrubbedsub)
 
 
-class Py2SaferScanner(SaferScannerBase):
-
-    def __init__(self, lexicon, flags=0):
-        self.lexicon = lexicon
-        p = []
-        s = re.sre_parse.Pattern()
-        s.flags = flags
-        for phrase, action in lexicon:
-            p.append(re.sre_parse.SubPattern(s, [
-                (SUBPATTERN, (len(p) + 1, self.subpat(phrase, flags))),
-            ]))
-        s.groups = len(p) + 1
-        p = re.sre_parse.SubPattern(s, [(BRANCH, (None, p))])
-        self.p = p
-        self.scanner = re.sre_compile.compile(p)
-
-
 class Py36SaferScanner(SaferScannerBase):
 
     def __init__(self, lexicon, flags=0):
@@ -118,7 +100,7 @@
         self.scanner = re._compiler.compile(p)
 
 
-SaferScanner = Py36SaferScanner if six.PY3 else Py2SaferScanner
+SaferScanner = Py36SaferScanner
 if version_info >= (3, 11):
     SaferScanner = Py311SaferScanner
 elif version_info >= (3, 8):
diff --git a/pylib/cqlshlib/setup.cfg b/pylib/cqlshlib/setup.cfg
deleted file mode 100644
index 6c523ee..0000000
--- a/pylib/cqlshlib/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[nosetests]
-verbosity=3
-detailed-errors=1
-with-xunit=1
diff --git a/pylib/cqlshlib/sslhandling.py b/pylib/cqlshlib/sslhandling.py
index 8a7592c..4fc6eae 100644
--- a/pylib/cqlshlib/sslhandling.py
+++ b/pylib/cqlshlib/sslhandling.py
@@ -18,7 +18,7 @@
 import sys
 import ssl
 
-from six.moves import configparser
+import configparser
 
 
 def ssl_settings(host, config_file, env=os.environ):
@@ -39,7 +39,7 @@
     either in the config file or as an environment variable.
     Environment variables override any options set in cqlsh config file.
     """
-    configs = configparser.SafeConfigParser()
+    configs = configparser.ConfigParser()
     configs.read(config_file)
 
     def get_option(section, option):
@@ -49,16 +49,9 @@
             return None
 
     def get_best_tls_protocol(ssl_ver_str):
-        # newer python versions suggest to use PROTOCOL_TLS to negotiate the highest TLS version.
-        # older protocol versions have been deprecated:
-        # https://docs.python.org/2/library/ssl.html#ssl.PROTOCOL_TLS
-        # https://docs.python.org/3/library/ssl.html#ssl.PROTOCOL_TLS
         if ssl_ver_str:
-            return getattr(ssl, "PROTOCOL_%s" % ssl_ver_str, None)
-        for protocol in ['PROTOCOL_TLS', 'PROTOCOL_TLSv1_2', 'PROTOCOL_TLSv1_1', 'PROTOCOL_TLSv1']:
-            if hasattr(ssl, protocol):
-                return getattr(ssl, protocol)
-        return None
+            print("Warning: Explicit SSL and TLS versions in the cqlshrc file or in SSL_VERSION environment property are ignored as the protocol is auto-negotiated.\n")
+        return ssl.PROTOCOL_TLS
 
     ssl_validate = env.get('SSL_VALIDATE')
     if ssl_validate is None:
@@ -70,9 +63,6 @@
         ssl_version_str = get_option('ssl', 'version')
 
     ssl_version = get_best_tls_protocol(ssl_version_str)
-    if ssl_version is None:
-        sys.exit("%s is not a valid SSL protocol, please use one of "
-                 "TLS, TLSv1_2, TLSv1_1, or TLSv1" % (ssl_version_str,))
 
     ssl_certfile = env.get('SSL_CERTFILE')
     if ssl_certfile is None:
diff --git a/pylib/cqlshlib/test/__init__.py b/pylib/cqlshlib/test/__init__.py
index 4bb037e..635f0d9 100644
--- a/pylib/cqlshlib/test/__init__.py
+++ b/pylib/cqlshlib/test/__init__.py
@@ -13,5 +13,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-from .cassconnect import create_db, remove_db
diff --git a/pylib/cqlshlib/test/ansi_colors.py b/pylib/cqlshlib/test/ansi_colors.py
index 9fc3411..494b7c6 100644
--- a/pylib/cqlshlib/test/ansi_colors.py
+++ b/pylib/cqlshlib/test/ansi_colors.py
@@ -14,10 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import unicode_literals
-
 import re
-import six
 
 LIGHT = 0o10
 
@@ -65,6 +62,7 @@
     for c in nameset:
         colors_by_name[c] = colorcode
 
+
 class ColoredChar(object):
     def __init__(self, c, colorcode):
         self.c = c
@@ -104,9 +102,10 @@
     def colortag(self):
         return lookup_letter_from_code(self._colorcode)
 
+
 class ColoredText(object):
     def __init__(self, source=''):
-        if isinstance(source, six.text_type):
+        if isinstance(source, str):
             plain, colors = self.parse_ansi_colors(source)
             self.chars = list(map(ColoredChar, plain, colors))
         else:
@@ -152,7 +151,6 @@
 
     @staticmethod
     def parse_sgr_param(curclr, paramstr):
-        oldclr = curclr
         args = list(map(int, paramstr.split(';')))
         for a in args:
             if a == 0:
@@ -179,15 +177,19 @@
     def colortags(self):
         return ''.join([c.colortag() for c in self.chars])
 
+
 def lookup_colorcode(name):
     return colors_by_name[name]
 
+
 def lookup_colorname(code):
     return colors_by_num.get(code, 'Unknown-color-0%o' % code)
 
+
 def lookup_colorletter(letter):
     return colors_by_letter[letter]
 
+
 def lookup_letter_from_code(code):
     letr = letters_by_num.get(code, ' ')
     if letr == 'n':
diff --git a/pylib/cqlshlib/test/basecase.py b/pylib/cqlshlib/test/basecase.py
index 096853f..ce4fd5c 100644
--- a/pylib/cqlshlib/test/basecase.py
+++ b/pylib/cqlshlib/test/basecase.py
@@ -29,14 +29,10 @@
 
 sys.path.append(cqlsh_dir)
 
-import cqlsh
-
-cql = cqlsh.cassandra.cluster.Cluster
-policy = cqlsh.cassandra.policies.RoundRobinPolicy()
-quote_name = cqlsh.cassandra.metadata.maybe_escape_name
-
 TEST_HOST = os.environ.get('CQL_TEST_HOST', '127.0.0.1')
 TEST_PORT = int(os.environ.get('CQL_TEST_PORT', 9042))
+TEST_USER = os.environ.get('CQL_TEST_USER', 'cassandra')
+TEST_PWD = os.environ.get('CQL_TEST_PWD')
 
 
 class BaseTestCase(unittest.TestCase):
diff --git a/pylib/cqlshlib/test/cassconnect.py b/pylib/cqlshlib/test/cassconnect.py
index 909e88a..0118e07 100644
--- a/pylib/cqlshlib/test/cassconnect.py
+++ b/pylib/cqlshlib/test/cassconnect.py
@@ -20,16 +20,23 @@
 import os.path
 import random
 import string
-import pytest
 
-from .basecase import TEST_HOST, TEST_PORT, cql, cqlsh, cqlshlog, policy, quote_name, test_dir
+from cassandra.cluster import Cluster
+from cassandra.metadata import maybe_escape_name as quote_name
+from cassandra.auth import PlainTextAuthProvider
+from cqlshlib.cql3handling import CqlRuleSet
+
+from .basecase import TEST_HOST, TEST_PORT, TEST_USER, TEST_PWD, cqlshlog, test_dir
 from .run_cqlsh import run_cqlsh, call_cqlsh
 
 test_keyspace_init = os.path.join(test_dir, 'test_keyspace_init.cql')
 
 
 def get_cassandra_connection(cql_version=None):
-    conn = cql((TEST_HOST,), TEST_PORT, cql_version=cql_version, load_balancing_policy=policy)
+
+    auth_provider = PlainTextAuthProvider(username=TEST_USER, password=TEST_PWD)
+    conn = Cluster((TEST_HOST,), TEST_PORT, auth_provider=auth_provider, cql_version=cql_version)
+
     # until the cql lib does this for us
     conn.cql_version = cql_version
     return conn
@@ -118,7 +125,8 @@
     try:
         yield conn
     finally:
-        conn.close()
+        conn.shutdown()
+
 
 @contextlib.contextmanager
 def cassandra_cursor(cql_version=None, ks=''):
@@ -146,13 +154,14 @@
 
 
 def cql_rule_set():
-    return cqlsh.cql3handling.CqlRuleSet
+    return CqlRuleSet
 
 
-class DEFAULTVAL: pass
+class DEFAULTVAL:
+    pass
 
 
-@pytest.mark.skip(reason="not a test")
+__TEST__ = False
 def testrun_cqlsh(keyspace=DEFAULTVAL, **kwargs):
     # use a positive default sentinel so that keyspace=None can be used
     # to override the default behavior
@@ -161,7 +170,7 @@
     return run_cqlsh(keyspace=keyspace, **kwargs)
 
 
-@pytest.mark.skip(reason="not a test")
+__TEST__ = False
 def testcall_cqlsh(keyspace=None, **kwargs):
     if keyspace is None:
         keyspace = get_keyspace()
diff --git a/pylib/cqlshlib/test/config/sslhandling.config b/pylib/cqlshlib/test/config/sslhandling.config
deleted file mode 100644
index 63f41c7..0000000
--- a/pylib/cqlshlib/test/config/sslhandling.config
+++ /dev/null
@@ -1,2 +0,0 @@
-[ssl]
-version = TLSv1
\ No newline at end of file
diff --git a/pylib/cqlshlib/test/config/sslhandling_invalid.config b/pylib/cqlshlib/test/config/sslhandling_invalid.config
deleted file mode 100644
index 90e061f..0000000
--- a/pylib/cqlshlib/test/config/sslhandling_invalid.config
+++ /dev/null
@@ -1,2 +0,0 @@
-[ssl]
-version = invalid_ssl
\ No newline at end of file
diff --git a/pylib/cqlshlib/test/run_cqlsh.py b/pylib/cqlshlib/test/run_cqlsh.py
index cd14b7f..180796a 100644
--- a/pylib/cqlshlib/test/run_cqlsh.py
+++ b/pylib/cqlshlib/test/run_cqlsh.py
@@ -16,31 +16,21 @@
 
 # NOTE: this testing tool is *nix specific
 
-from __future__ import unicode_literals
-
 import os
 import sys
 import re
 import contextlib
 import subprocess
 import signal
-import math
 from time import time
 from . import basecase
-from os.path import join, normpath
+from os.path import join
 
 
-def is_win():
-    return sys.platform in ("cygwin", "win32")
+import pty
+DEFAULT_PREFIX = os.linesep
 
-if is_win():
-    from .winpty import WinPty
-    DEFAULT_PREFIX = ''
-else:
-    import pty
-    DEFAULT_PREFIX = os.linesep
-
-DEFAULT_CQLSH_PROMPT = DEFAULT_PREFIX + '(\S+@)?cqlsh(:\S+)?> '
+DEFAULT_CQLSH_PROMPT = DEFAULT_PREFIX + r'(\S+@)?cqlsh(:\S+)?> '
 DEFAULT_CQLSH_TERM = 'xterm'
 
 try:
@@ -49,6 +39,7 @@
     # Python 3.7+
     Pattern = re.Pattern
 
+
 def get_smm_sequence(term='xterm'):
     """
     Return the set meta mode (smm) sequence, if any.
@@ -56,19 +47,20 @@
     before each prompt.
     """
     result = ''
-    if not is_win():
-        tput_proc = subprocess.Popen(['tput', '-T{}'.format(term), 'smm'], stdout=subprocess.PIPE)
-        tput_stdout = tput_proc.communicate()[0]
-        if (tput_stdout and (tput_stdout != b'')):
-            result = tput_stdout
-            if isinstance(result, bytes):
-                result = result.decode("utf-8")
+    tput_proc = subprocess.Popen(['tput', '-T{}'.format(term), 'smm'], stdout=subprocess.PIPE)
+    tput_stdout = tput_proc.communicate()[0]
+    if (tput_stdout and (tput_stdout != b'')):
+        result = tput_stdout
+        if isinstance(result, bytes):
+            result = result.decode("utf-8")
     return result
 
+
 DEFAULT_SMM_SEQUENCE = get_smm_sequence()
 
 cqlshlog = basecase.cqlshlog
 
+
 def set_controlling_pty(master, slave):
     os.setsid()
     os.close(master)
@@ -78,6 +70,7 @@
         os.close(slave)
     os.close(os.open(os.ttyname(1), os.O_RDWR))
 
+
 @contextlib.contextmanager
 def raising_signal(signum, exc):
     """
@@ -93,11 +86,13 @@
     finally:
         signal.signal(signum, oldhandlr)
 
+
 class TimeoutError(Exception):
     pass
 
+
 @contextlib.contextmanager
-def timing_out_itimer(seconds):
+def timing_out(seconds):
     if seconds is None:
         yield
         return
@@ -111,46 +106,16 @@
         finally:
             signal.setitimer(signal.ITIMER_REAL, 0)
 
-@contextlib.contextmanager
-def timing_out_alarm(seconds):
-    if seconds is None:
-        yield
-        return
-    with raising_signal(signal.SIGALRM, TimeoutError):
-        oldval = signal.alarm(int(math.ceil(seconds)))
-        if oldval != 0:
-            signal.alarm(oldval)
-            raise RuntimeError("SIGALRM already in use")
-        try:
-            yield
-        finally:
-            signal.alarm(0)
-
-if is_win():
-    try:
-        import eventlet
-    except ImportError as e:
-        sys.exit("evenlet library required to run cqlshlib tests on Windows")
-
-    def timing_out(seconds):
-        return eventlet.Timeout(seconds, TimeoutError)
-else:
-    # setitimer is new in 2.6, but it's still worth supporting, for potentially
-    # faster tests because of sub-second resolution on timeouts.
-    if hasattr(signal, 'setitimer'):
-        timing_out = timing_out_itimer
-    else:
-        timing_out = timing_out_alarm
 
 def noop(*a):
     pass
 
+
 class ProcRunner:
     def __init__(self, path, tty=True, env=None, args=()):
         self.exe_path = path
         self.args = args
-        self.tty = bool(tty)
-        self.realtty = self.tty and not is_win()
+        self.tty = tty
         if env is None:
             env = {}
         self.env = env
@@ -163,7 +128,7 @@
         stdin = stdout = stderr = None
         cqlshlog.info("Spawning %r subprocess with args: %r and env: %r"
                       % (self.exe_path, self.args, self.env))
-        if self.realtty:
+        if self.tty:
             masterfd, slavefd = pty.openpty()
             preexec = (lambda: set_controlling_pty(masterfd, slavefd))
             self.proc = subprocess.Popen((self.exe_path,) + tuple(self.args),
@@ -181,15 +146,11 @@
                                          env=self.env, stdin=stdin, stdout=stdout,
                                          stderr=stderr, bufsize=0, close_fds=False)
             self.send = self.send_pipe
-            if self.tty:
-                self.winpty = WinPty(self.proc.stdout)
-                self.read = self.read_winpty
-            else:
-                self.read = self.read_pipe
+            self.read = self.read_pipe
 
     def close(self):
         cqlshlog.info("Closing %r subprocess." % (self.exe_path,))
-        if self.realtty:
+        if self.tty:
             os.close(self.childpty)
         else:
             self.proc.stdin.close()
@@ -216,12 +177,6 @@
             buf = buf.decode("utf-8")
         return buf
 
-    def read_winpty(self, blksize, timeout=None):
-        buf = self.winpty.read(blksize, timeout)
-        if isinstance(buf, bytes):
-            buf = buf.decode("utf-8")
-        return buf
-
     def read_until(self, until, blksize=4096, timeout=None,
                    flags=0, ptty_timeout=None, replace=[]):
         if not isinstance(until, Pattern):
@@ -271,10 +226,10 @@
             curtime = time()
         return got
 
+
 class CqlshRunner(ProcRunner):
     def __init__(self, path=None, host=None, port=None, keyspace=None, cqlver=None,
-                 args=(), prompt=DEFAULT_CQLSH_PROMPT, env=None,
-                 win_force_colors=True, tty=True, **kwargs):
+                 args=(), prompt=DEFAULT_CQLSH_PROMPT, env=None, tty=True, **kwargs):
         if path is None:
             path = join(basecase.cqlsh_dir, 'cqlsh')
         if host is None:
@@ -283,9 +238,6 @@
             port = basecase.TEST_PORT
         if env is None:
             env = {}
-        if is_win():
-            env['PYTHONUNBUFFERED'] = '1'
-            env.update(os.environ.copy())
         env.setdefault('TERM', 'xterm')
         env.setdefault('CQLSH_NO_BUNDLED', os.environ.get('CQLSH_NO_BUNDLED', ''))
         env.setdefault('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
@@ -297,11 +249,6 @@
             args += ('--cqlversion', str(cqlver))
         if keyspace is not None:
             args += ('--keyspace', keyspace.lower())
-        if tty and is_win():
-            args += ('--tty',)
-            args += ('--encoding', 'utf-8')
-            if win_force_colors:
-                args += ('--color',)
         if coverage:
             args += ('--coverage',)
         self.keyspace = keyspace
@@ -314,7 +261,7 @@
             self.output_header = self.read_to_next_prompt()
 
     def read_to_next_prompt(self, timeout=10.0):
-        return self.read_until(self.prompt, timeout=timeout, ptty_timeout=3, replace=[DEFAULT_SMM_SEQUENCE,])
+        return self.read_until(self.prompt, timeout=timeout, ptty_timeout=3, replace=[DEFAULT_SMM_SEQUENCE])
 
     def read_up_to_timeout(self, timeout, blksize=4096):
         output = ProcRunner.read_up_to_timeout(self, timeout, blksize=blksize)
@@ -330,7 +277,7 @@
         output = output.replace(' \r', '')
         output = output.replace('\r', '')
         output = output.replace(' \b', '')
-        if self.realtty:
+        if self.tty:
             echo, output = output.split('\n', 1)
             assert echo == cmd, "unexpected echo %r instead of %r" % (echo, cmd)
         try:
@@ -339,12 +286,14 @@
             promptline = output
             output = ''
         assert re.match(self.prompt, DEFAULT_PREFIX + promptline), \
-                'last line of output %r does not match %r?' % (promptline, self.prompt)
+            'last line of output %r does not match %r?' % (promptline, self.prompt)
         return output + '\n'
 
+
 def run_cqlsh(**kwargs):
     return contextlib.closing(CqlshRunner(**kwargs))
 
+
 def call_cqlsh(**kwargs):
     kwargs.setdefault('prompt', None)
     proginput = kwargs.pop('input', '')
diff --git a/pylib/cqlshlib/test/test_authproviderhandling.py b/pylib/cqlshlib/test/test_authproviderhandling.py
new file mode 100644
index 0000000..19a6133
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling.py
@@ -0,0 +1,190 @@
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import unittest
+import io
+import os
+import sys
+import pytest
+
+from cassandra.auth import PlainTextAuthProvider
+from cqlshlib.authproviderhandling import load_auth_provider
+
+
+def construct_config_path(config_file_name):
+    return os.path.join(os.path.dirname(__file__),
+                        'test_authproviderhandling_config',
+                        config_file_name)
+
+
+# Simple class to help verify AuthProviders that don't need arguments.
+class NoUserNamePlainTextAuthProvider(PlainTextAuthProvider):
+    def __init__(self):
+        super(NoUserNamePlainTextAuthProvider, self).__init__('', '')
+
+
+class ComplexTextAuthProvider(PlainTextAuthProvider):
+    def __init__(self, username, password='default_pass', extra_flag=None):
+        super(ComplexTextAuthProvider, self).__init__(username, password)
+        self.extra_flag = extra_flag
+
+
+def _assert_auth_provider_matches(actual, klass, expected_props):
+    """
+    Assert that the provider matches class and properties
+    * actual ..........: Thing to compare with it
+    * klass ...........: Class to ensure this matches to (ie PlainTextAuthProvider)
+    * expected_props ..: Dict of var properties to match
+    """
+    assert isinstance(actual, klass)
+    assert expected_props == vars(actual)
+
+class CustomAuthProviderTest(unittest.TestCase):
+
+    def setUp(self):
+        self._captured_std_err = io.StringIO()
+        sys.stderr = self._captured_std_err
+
+    def tearDown(self):
+        self._captured_std_err.close()
+        sys.stdout = sys.__stderr__
+
+    def test_no_warning_insecure_if_no_pass(self):
+        load_auth_provider(construct_config_path('plain_text_partial_example'))
+        err_msg = self._captured_std_err.getvalue()
+        assert err_msg == ''
+
+    def test_insecure_creds(self):
+        load_auth_provider(construct_config_path('full_plain_text_example'))
+        err_msg = self._captured_std_err.getvalue()
+        assert "Notice:" in err_msg
+        assert "Warning:" in err_msg
+
+    def test_creds_not_checked_for_non_plaintext(self):
+        load_auth_provider(construct_config_path('complex_auth_provider_with_pass'))
+        err_msg = self._captured_std_err.getvalue()
+        assert err_msg == ''
+
+    def test_partial_property_example(self):
+        actual = load_auth_provider(construct_config_path('partial_example'))
+        _assert_auth_provider_matches(
+                actual,
+                NoUserNamePlainTextAuthProvider,
+                {"username": '',
+                 "password": ''})
+
+    def test_full_property_example(self):
+        actual = load_auth_provider(construct_config_path('full_plain_text_example'))
+        _assert_auth_provider_matches(
+                actual,
+                PlainTextAuthProvider,
+                {"username": 'user1',
+                 "password": 'pass1'})
+
+    def test_empty_example(self):
+        actual = load_auth_provider(construct_config_path('empty_example'))
+        assert actual is None
+
+    def test_plaintextauth_when_not_defined(self):
+        creds_file = construct_config_path('plain_text_full_creds')
+        actual = load_auth_provider(cred_file=creds_file)
+        _assert_auth_provider_matches(
+                actual,
+                PlainTextAuthProvider,
+                {"username": 'user2',
+                 "password": 'pass2'})
+
+    def test_no_cqlshrc_file(self):
+        actual = load_auth_provider()
+        assert actual is None
+
+    def test_no_classname_example(self):
+        actual = load_auth_provider(construct_config_path('no_classname_example'))
+        assert actual is None
+
+    def test_improper_config_example(self):
+        with pytest.raises(ModuleNotFoundError) as error:
+            load_auth_provider(construct_config_path('illegal_example'))
+            assert error is not None
+
+    def test_username_password_passed_from_commandline(self):
+        creds_file = construct_config_path('complex_auth_provider_creds')
+        cqlshrc = construct_config_path('complex_auth_provider')
+
+        actual = load_auth_provider(cqlshrc, creds_file, 'user-from-legacy', 'pass-from-legacy')
+        _assert_auth_provider_matches(
+                 actual,
+                 ComplexTextAuthProvider,
+                 {"username": 'user-from-legacy',
+                  "password": 'pass-from-legacy',
+                  "extra_flag": 'flag2'})
+
+    def test_creds_example(self):
+        creds_file = construct_config_path('complex_auth_provider_creds')
+        cqlshrc = construct_config_path('complex_auth_provider')
+
+        actual = load_auth_provider(cqlshrc, creds_file)
+        _assert_auth_provider_matches(
+                actual,
+                ComplexTextAuthProvider,
+                {"username": 'user1',
+                 "password": 'pass2',
+                 "extra_flag": 'flag2'})
+
+    def test_legacy_example_use_passed_username(self):
+        creds_file = construct_config_path('plain_text_partial_creds')
+        cqlshrc = construct_config_path('plain_text_partial_example')
+
+        actual = load_auth_provider(cqlshrc, creds_file, 'user3')
+        _assert_auth_provider_matches(
+                actual,
+                PlainTextAuthProvider,
+                {"username": 'user3',
+                 "password": 'pass2'})
+
+    def test_legacy_example_no_auth_provider_given(self):
+        cqlshrc = construct_config_path('empty_example')
+        creds_file = construct_config_path('complex_auth_provider_creds')
+
+        actual = load_auth_provider(cqlshrc, creds_file, 'user3', 'pass3')
+        _assert_auth_provider_matches(
+                actual,
+                PlainTextAuthProvider,
+                {"username": 'user3',
+                 "password": 'pass3'})
+
+    def test_shouldnt_pass_no_password_when_alt_auth_provider(self):
+        cqlshrc = construct_config_path('complex_auth_provider')
+        creds_file = None
+
+        actual = load_auth_provider(cqlshrc, creds_file, 'user3')
+        _assert_auth_provider_matches(
+                actual,
+                ComplexTextAuthProvider,
+                {"username": 'user3',
+                 "password": 'default_pass',
+                 "extra_flag": 'flag1'})
+
+    def test_legacy_example_no_password(self):
+        cqlshrc = construct_config_path('plain_text_partial_example')
+        creds_file = None
+
+        actual = load_auth_provider(cqlshrc, creds_file, 'user3')
+        _assert_auth_provider_matches(
+                actual,
+                PlainTextAuthProvider,
+                {"username": 'user3',
+                 "password": None})
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider b/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider
new file mode 100644
index 0000000..879b7a6
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider
@@ -0,0 +1,10 @@
+; Config for a custom auth provider that uses the auth_provider field
+; ComplexTextAuthProvider is a PlainTextAuthProvider in the driver which 
+; takes an extra field (extra_flag).
+; used by unit testing
+
+[auth_provider]
+module = cqlshlib.test.test_authproviderhandling
+classname = ComplexTextAuthProvider
+username = user1
+extra_flag = flag1
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider_creds b/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider_creds
new file mode 100644
index 0000000..bb102bc
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider_creds
@@ -0,0 +1,3 @@
+[ComplexTextAuthProvider]
+extra_flag = flag2
+password = pass2
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider_with_pass b/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider_with_pass
new file mode 100644
index 0000000..c322008
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/complex_auth_provider_with_pass
@@ -0,0 +1,11 @@
+; Config for a custom auth provider that uses the auth_provider field
+; ComplexTextAuthProvider is a PlainTextAuthProvider in the driver which
+; takes an extra field (extra_flag).
+; used by unit testing
+
+[auth_provider]
+module = cqlshlib.test.test_authproviderhandling
+classname = ComplexTextAuthProvider
+username = user1
+password = pass1
+extra_flag = flag1
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/empty_example b/pylib/cqlshlib/test/test_authproviderhandling_config/empty_example
new file mode 100644
index 0000000..3dfda04
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/empty_example
@@ -0,0 +1,2 @@
+; Config for a custom auth provider that uses only the auth_provider field
+
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/full_plain_text_example b/pylib/cqlshlib/test/test_authproviderhandling_config/full_plain_text_example
new file mode 100644
index 0000000..b962e63
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/full_plain_text_example
@@ -0,0 +1,10 @@
+; Config for a custom auth provider that uses all possible fields
+; This example loads the PlainTextAuthProvider and passes username and password to constructor
+; dynamically.  
+; used by unit testing
+
+[auth_provider]
+module = cassandra.auth
+classname = PlainTextAuthProvider
+username = user1
+password = pass1
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/illegal_example b/pylib/cqlshlib/test/test_authproviderhandling_config/illegal_example
new file mode 100644
index 0000000..615fe9f
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/illegal_example
@@ -0,0 +1,5 @@
+; Example that shouldn't work
+
+[auth_provider]
+module = nowhere.illegal.wrong
+classname = badclass
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/no_classname_example b/pylib/cqlshlib/test/test_authproviderhandling_config/no_classname_example
new file mode 100644
index 0000000..cf27bfd
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/no_classname_example
@@ -0,0 +1,5 @@
+; Config for a custom auth provider that uses only the auth_provider field
+; this version doesn't have a classname, but has a module name.
+
+[auth_provider]
+module = cqlshlib.test
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/partial_example b/pylib/cqlshlib/test/test_authproviderhandling_config/partial_example
new file mode 100644
index 0000000..23be26e
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/partial_example
@@ -0,0 +1,8 @@
+; Config for a custom auth provider that uses only the auth_provider field
+; NoUserNamePlainTextAuthProvider is a PlainTextAuthProvider in the driver which 
+; doesn't take a username or password.
+; used by unit testing
+
+[auth_provider]
+module = cqlshlib.test.test_authproviderhandling
+classname = NoUserNamePlainTextAuthProvider
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_full_creds b/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_full_creds
new file mode 100644
index 0000000..3cd4470
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_full_creds
@@ -0,0 +1,3 @@
+[PlainTextAuthProvider]
+password = pass2
+username = user2
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_partial_creds b/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_partial_creds
new file mode 100644
index 0000000..1faf24d
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_partial_creds
@@ -0,0 +1,2 @@
+[PlainTextAuthProvider]
+password = pass2
diff --git a/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_partial_example b/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_partial_example
new file mode 100644
index 0000000..37baebd
--- /dev/null
+++ b/pylib/cqlshlib/test/test_authproviderhandling_config/plain_text_partial_example
@@ -0,0 +1,8 @@
+; Config for a custom auth provider that uses some possible fields
+; validate that the partial breakdown works successfully
+; used by unit testing
+
+[auth_provider]
+module = cassandra.auth
+classname = PlainTextAuthProvider
+username = user1
diff --git a/pylib/cqlshlib/test/test_copyutil.py b/pylib/cqlshlib/test/test_copyutil.py
index 18b167a..19fcdd1 100644
--- a/pylib/cqlshlib/test/test_copyutil.py
+++ b/pylib/cqlshlib/test/test_copyutil.py
@@ -20,7 +20,7 @@
 
 import unittest
 
-from cassandra.metadata import MIN_LONG, Murmur3Token, TokenMap
+from cassandra.metadata import MIN_LONG, Murmur3Token
 from cassandra.policies import SimpleConvictionPolicy
 from cassandra.pool import Host
 from unittest.mock import Mock
@@ -75,10 +75,11 @@
         }
         # merge override options with standard options
         overridden_opts = dict(self.opts)
-        for k,v in opts.items():
+        for k, v in opts.items():
             overridden_opts[k] = v
         export_task = ExportTask(shell, self.ks, self.table, self.columns, self.fname, overridden_opts, self.protocol_version, self.config_file)
         assert export_task.get_ranges() == expected_ranges
+        export_task.close()
 
     def test_get_ranges_murmur3(self):
         """
@@ -92,19 +93,19 @@
         self._test_get_ranges_murmur3_base({'begintoken': 1, 'endtoken': -1}, {})
 
         # simple case of a single range
-        expected_ranges = {(1,2): {'hosts': ('10.0.0.4', '10.0.0.1', '10.0.0.2'), 'attempts': 0, 'rows': 0, 'workerno': -1}}
+        expected_ranges = {(1, 2): {'hosts': ('10.0.0.4', '10.0.0.1', '10.0.0.2'), 'attempts': 0, 'rows': 0, 'workerno': -1}}
         self._test_get_ranges_murmur3_base({'begintoken': 1, 'endtoken': 2}, expected_ranges)
 
         # simple case of two contiguous ranges
         expected_ranges = {
-            (-4611686018427387903,0): {'hosts': ('10.0.0.3', '10.0.0.4', '10.0.0.1'), 'attempts': 0, 'rows': 0, 'workerno': -1},
-            (0,1): {'hosts': ('10.0.0.4', '10.0.0.1', '10.0.0.2'), 'attempts': 0, 'rows': 0, 'workerno': -1}
+            (-4611686018427387903, 0): {'hosts': ('10.0.0.3', '10.0.0.4', '10.0.0.1'), 'attempts': 0, 'rows': 0, 'workerno': -1},
+            (0, 1): {'hosts': ('10.0.0.4', '10.0.0.1', '10.0.0.2'), 'attempts': 0, 'rows': 0, 'workerno': -1}
         }
         self._test_get_ranges_murmur3_base({'begintoken': -4611686018427387903, 'endtoken': 1}, expected_ranges)
 
         # specify a begintoken only (endtoken defaults to None)
         expected_ranges = {
-            (4611686018427387905,None): {'hosts': ('10.0.0.1', '10.0.0.2', '10.0.0.3'), 'attempts': 0, 'rows': 0, 'workerno': -1}
+            (4611686018427387905, None): {'hosts': ('10.0.0.1', '10.0.0.2', '10.0.0.3'), 'attempts': 0, 'rows': 0, 'workerno': -1}
         }
         self._test_get_ranges_murmur3_base({'begintoken': 4611686018427387905}, expected_ranges)
 
@@ -113,4 +114,3 @@
             (None, MIN_LONG + 1): {'hosts': ('10.0.0.2', '10.0.0.3', '10.0.0.4'), 'attempts': 0, 'rows': 0, 'workerno': -1}
         }
         self._test_get_ranges_murmur3_base({'endtoken': MIN_LONG + 1}, expected_ranges)
-
diff --git a/pylib/cqlshlib/test/test_cql_parsing.py b/pylib/cqlshlib/test/test_cql_parsing.py
index 8631d7a..b1f45f5 100644
--- a/pylib/cqlshlib/test/test_cql_parsing.py
+++ b/pylib/cqlshlib/test/test_cql_parsing.py
@@ -778,12 +778,12 @@
 
 
 def parse_cqlsh_statements(text):
-    '''
+    """
     Runs its argument through the sequence of parsing steps that cqlsh takes its
     input through.
 
     Currently does not handle batch statements.
-    '''
+    """
     # based on onecmd
     statements, _ = CqlRuleSet.cql_split_statements(text)
     # stops here. For regular cql commands, onecmd just splits it and sends it
@@ -799,13 +799,13 @@
 
 
 def strip_final_empty_items(xs):
-    '''
+    """
     Returns its a copy of argument as a list, but with any terminating
     subsequence of falsey values removed.
 
     >>> strip_final_empty_items([[3, 4], [5, 6, 7], [], [], [1], []])
     [[3, 4], [5, 6, 7], [], [], [1]]
-    '''
+    """
     rv = list(xs)
 
     while rv and not rv[-1]:
diff --git a/pylib/cqlshlib/test/test_cqlsh_completion.py b/pylib/cqlshlib/test/test_cqlsh_completion.py
index 9d902e4..af9d05e 100644
--- a/pylib/cqlshlib/test/test_cqlsh_completion.py
+++ b/pylib/cqlshlib/test/test_cqlsh_completion.py
@@ -21,11 +21,10 @@
 import locale
 import os
 import re
-from .basecase import BaseTestCase, cqlsh, cqlshlog
+from .basecase import BaseTestCase
 from .cassconnect import create_db, remove_db, testrun_cqlsh
 from .run_cqlsh import TimeoutError
-import unittest
-import sys
+from cqlshlib.cql3handling import CqlRuleSet
 
 BEL = '\x07'  # the terminal-bell character
 CTRL_C = '\x03'
@@ -40,7 +39,6 @@
 completion_separation_re = re.compile(r'\s+')
 
 
-@unittest.skipIf(sys.platform == "win32", 'Tab completion tests not supported on Windows')
 class CqlshCompletionCase(BaseTestCase):
 
     @classmethod
@@ -55,7 +53,7 @@
         env = os.environ.copy()
         env['COLUMNS'] = '100000'
         if (locale.getpreferredencoding() != 'UTF-8'):
-             env['LC_CTYPE'] = 'en_US.utf8'
+            env['LC_CTYPE'] = 'en_US.utf8'
         self.cqlsh_runner = testrun_cqlsh(cqlver=None, env=env)
         self.cqlsh = self.cqlsh_runner.__enter__()
 
@@ -153,15 +151,13 @@
                 # retry once
                 self.cqlsh.send(CTRL_C)
                 self.cqlsh.read_to_next_prompt(timeout=10.0)
- 
 
     def strategies(self):
-        return self.module.CqlRuleSet.replication_strategies
+        return CqlRuleSet.replication_strategies
 
 
 class TestCqlshCompletion(CqlshCompletionCase):
     cqlver = '3.1.6'
-    module = cqlsh.cql3handling
 
     def test_complete_on_empty_string(self):
         self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
@@ -385,7 +381,16 @@
                             choices=['EXISTS', '<quotedName>', '<identifier>'])
 
         self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ",
-                            choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>', '.'])
+                            choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>', '.', 'CONTAINS'])
+
+        self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF lonelykey ",
+                            choices=['>=', '!=', '<=', 'IN', '=', '<', '>', 'CONTAINS'])
+
+        self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF lonelykey CONTAINS ",
+                            choices=['false', 'true', '<pgStringLiteral>',
+                                     '-', '<float>', 'TOKEN', '<identifier>',
+                                     '<uuid>', '{', '[', 'NULL', '<quotedStringLiteral>',
+                                     '<blobLiteral>', '<wholenumber>', 'KEY'])
 
     def test_complete_in_delete(self):
         self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>'])
@@ -467,7 +472,13 @@
                             choices=['EXISTS', '<identifier>', '<quotedName>'])
         self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
                              'TOKEN(a) >= TOKEN(0) IF b '),
-                            choices=['>=', '!=', '<=', 'IN', '=', '<', '>'])
+                            choices=['>=', '!=', '<=', 'IN', '=', '<', '>', 'CONTAINS'])
+        self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
+                             'TOKEN(a) >= TOKEN(0) IF b CONTAINS '),
+                            choices=['false', 'true', '<pgStringLiteral>',
+                                     '-', '<float>', 'TOKEN', '<identifier>',
+                                     '<uuid>', '{', '[', 'NULL','<quotedStringLiteral>',
+                                     '<blobLiteral>','<wholenumber>', 'KEY'])
         self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
                              'TOKEN(a) >= TOKEN(0) IF b < 0 '),
                             choices=['AND', ';'])
@@ -684,7 +695,7 @@
                                      'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
                                      'tombstone_compaction_interval', 'tombstone_threshold',
                                      'enabled', 'unchecked_tombstone_compaction',
-                                     'only_purge_repaired_tombstones','provide_overlapping_tombstones'])
+                                     'only_purge_repaired_tombstones', 'provide_overlapping_tombstones'])
 
     def test_complete_in_create_columnfamily(self):
         self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM'])
@@ -833,7 +844,111 @@
     def test_complete_in_alter_keyspace(self):
         self.trycompletions('ALTER KEY', 'SPACE ')
         self.trycompletions('ALTER KEYSPACE ', '', choices=[self.cqlsh.keyspace, 'system_auth',
-                                                            'system_distributed', 'system_traces'])
+                                                            'system_distributed', 'system_traces', 'IF'])
+        self.trycompletions('ALTER KEYSPACE I', immediate='F EXISTS ')
         self.trycompletions('ALTER KEYSPACE system_trac', "es WITH replication = {'class': '")
         self.trycompletions("ALTER KEYSPACE system_traces WITH replication = {'class': '", '',
                             choices=['NetworkTopologyStrategy', 'SimpleStrategy'])
+
+    def test_complete_in_grant(self):
+        self.trycompletions("GR",
+                            immediate='ANT ')
+        self.trycompletions("GRANT ",
+                            choices=['ALL', 'ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'MODIFY', 'SELECT'],
+                            other_choices_ok=True)
+        self.trycompletions("GRANT MODIFY ",
+                            choices=[',', 'ON', 'PERMISSION'])
+        self.trycompletions("GRANT MODIFY P",
+                            immediate='ERMISSION ')
+        self.trycompletions("GRANT MODIFY PERMISSION ",
+                            choices=[',', 'ON'])
+        self.trycompletions("GRANT MODIFY PERMISSION, ",
+                            choices=['ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'SELECT'])
+        self.trycompletions("GRANT MODIFY PERMISSION, D",
+                            choices=['DESCRIBE', 'DROP'])
+        self.trycompletions("GRANT MODIFY PERMISSION, DR",
+                            immediate='OP ')
+        self.trycompletions("GRANT MODIFY PERMISSION, DROP O",
+                            immediate='N ')
+        self.trycompletions("GRANT MODIFY, DROP ON ",
+                            choices=['ALL', 'KEYSPACE', 'MBEANS', 'ROLE', 'FUNCTION', 'MBEAN', 'TABLE'],
+                            other_choices_ok=True)
+        self.trycompletions("GRANT MODIFY, DROP ON ALL ",
+                            choices=['KEYSPACES', 'TABLES'],
+                            other_choices_ok=True)
+        self.trycompletions("GRANT MODIFY PERMISSION ON KEY",
+                            immediate='SPACE ')
+        self.trycompletions("GRANT MODIFY PERMISSION ON KEYSPACE system_tr",
+                            immediate='aces TO ')
+
+    def test_complete_in_revoke(self):
+        self.trycompletions("RE",
+                            immediate='VOKE ')
+        self.trycompletions("REVOKE ",
+                            choices=['ALL', 'ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'MODIFY', 'SELECT'],
+                            other_choices_ok=True)
+        self.trycompletions("REVOKE MODIFY ",
+                            choices=[',', 'ON', 'PERMISSION'])
+        self.trycompletions("REVOKE MODIFY P",
+                            immediate='ERMISSION ')
+        self.trycompletions("REVOKE MODIFY PERMISSION ",
+                            choices=[',', 'ON'])
+        self.trycompletions("REVOKE MODIFY PERMISSION, ",
+                            choices=['ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'SELECT'])
+        self.trycompletions("REVOKE MODIFY PERMISSION, D",
+                            choices=['DESCRIBE', 'DROP'])
+        self.trycompletions("REVOKE MODIFY PERMISSION, DR",
+                            immediate='OP ')
+        self.trycompletions("REVOKE MODIFY PERMISSION, DROP ",
+                            choices=[',', 'ON', 'PERMISSION'])
+        self.trycompletions("REVOKE MODIFY PERMISSION, DROP O",
+                            immediate='N ')
+        self.trycompletions("REVOKE MODIFY PERMISSION, DROP ON ",
+                            choices=['ALL', 'KEYSPACE', 'MBEANS', 'ROLE', 'FUNCTION', 'MBEAN', 'TABLE'],
+                            other_choices_ok=True)
+        self.trycompletions("REVOKE MODIFY, DROP ON ALL ",
+                            choices=['KEYSPACES', 'TABLES'],
+                            other_choices_ok=True)
+        self.trycompletions("REVOKE MODIFY PERMISSION, DROP ON KEY",
+                            immediate='SPACE ')
+        self.trycompletions("REVOKE MODIFY PERMISSION, DROP ON KEYSPACE system_tr",
+                            immediate='aces FROM ')
+
+    def test_complete_in_alter_table(self):
+        self.trycompletions('ALTER TABLE I', immediate='F EXISTS ')
+        self.trycompletions('ALTER TABLE IF', immediate=' EXISTS ')
+        self.trycompletions('ALTER TABLE ', choices=['IF', 'twenty_rows_table',
+                                                     'ascii_with_special_chars', 'users',
+                                                     'has_all_types', 'system.',
+                                                     'empty_composite_table', 'empty_table',
+                                                     'system_auth.', 'undefined_values_table',
+                                                     'dynamic_columns',
+                                                     'twenty_rows_composite_table',
+                                                     'utf8_with_special_chars',
+                                                     'system_traces.', 'songs', 'system_views.',
+                                                     'system_virtual_schema.',
+                                                     'system_schema.', 'system_distributed.',
+                                                     self.cqlsh.keyspace + '.'])
+        self.trycompletions('ALTER TABLE IF EXISTS new_table ADD ', choices=['<new_column_name>', 'IF'])
+        self.trycompletions('ALTER TABLE IF EXISTS new_table ADD IF NOT EXISTS ', choices=['<new_column_name>'])
+        self.trycompletions('ALTER TABLE new_table ADD IF NOT EXISTS ', choices=['<new_column_name>'])
+        self.trycompletions('ALTER TABLE IF EXISTS new_table RENAME ', choices=['IF', '<quotedName>', '<identifier>'])
+        self.trycompletions('ALTER TABLE new_table RENAME ', choices=['IF', '<quotedName>', '<identifier>'])
+        self.trycompletions('ALTER TABLE IF EXISTS new_table DROP ', choices=['IF', '<quotedName>', '<identifier>'])
+
+    def test_complete_in_alter_type(self):
+        self.trycompletions('ALTER TYPE I', immediate='F EXISTS ')
+        self.trycompletions('ALTER TYPE ', choices=['IF', 'system_views.',
+                                                    'tags', 'system_traces.', 'system_distributed.',
+                                                    'phone_number', 'band_info_type', 'address', 'system.', 'system_schema.',
+                                                    'system_auth.', 'system_virtual_schema.', self.cqlsh.keyspace + '.'
+                                                    ])
+        self.trycompletions('ALTER TYPE IF EXISTS new_type ADD ', choices=['<new_field_name>', 'IF'])
+        self.trycompletions('ALTER TYPE IF EXISTS new_type ADD IF NOT EXISTS ', choices=['<new_field_name>'])
+        self.trycompletions('ALTER TYPE IF EXISTS new_type RENAME ', choices=['IF', '<quotedName>', '<identifier>'])
+
+    def test_complete_in_alter_user(self):
+        self.trycompletions('ALTER USER ', choices=['<identifier>', 'IF', '<pgStringLiteral>', '<quotedStringLiteral>'])
+
+    def test_complete_in_alter_role(self):
+        self.trycompletions('ALTER ROLE ', choices=['<identifier>', 'IF', '<quotedName>'])
diff --git a/pylib/cqlshlib/test/test_cqlsh_output.py b/pylib/cqlshlib/test/test_cqlsh_output.py
index ed40683..52f564a 100644
--- a/pylib/cqlshlib/test/test_cqlsh_output.py
+++ b/pylib/cqlshlib/test/test_cqlsh_output.py
@@ -17,18 +17,12 @@
 # to configure behavior, define $CQL_TEST_HOST to the destination address
 # and $CQL_TEST_PORT to the associated port.
 
-from __future__ import unicode_literals, with_statement
-
 import locale
 import os
 import re
-import subprocess
-import sys
-import six
-import unittest
 
 from .basecase import (BaseTestCase, TEST_HOST, TEST_PORT,
-                       at_a_time, cqlsh, cqlshlog, dedent)
+                       at_a_time, cqlshlog, dedent)
 from .cassconnect import (cassandra_cursor, create_db, get_keyspace,
                           quote_name, remove_db, split_cql_commands,
                           testcall_cqlsh, testrun_cqlsh)
@@ -124,8 +118,7 @@
         for termname in ('', 'dumb', 'vt100'):
             cqlshlog.debug('TERM=%r' % termname)
             env['TERM'] = termname
-            with testrun_cqlsh(tty=True, env=env,
-                               win_force_colors=False) as c:
+            with testrun_cqlsh(tty=True, env=env) as c:
                 c.send('select * from has_all_types;\n')
                 self.assertNoHasColors(c.read_to_next_prompt())
                 c.send('select count(*) from has_all_types;\n')
@@ -610,7 +603,7 @@
             outputlines = c.read_to_next_prompt().splitlines()
 
             start_index = 0
-            if c.realtty:
+            if c.tty:
                 self.assertEqual(outputlines[start_index], 'use NONEXISTENTKEYSPACE;')
                 start_index = 1
 
@@ -688,6 +681,7 @@
                 AND comment = ''
                 AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
                 AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
+                AND memtable = 'default'
                 AND crc_check_chance = 1.0
                 AND default_time_to_live = 0
                 AND extensions = {}
@@ -791,37 +785,19 @@
                 self.assertIn('VIRTUAL KEYSPACE system_virtual_schema', output)
                 self.assertIn("\nCREATE KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}  AND durable_writes = true;\n",
                               output)
-                self.assertRegex(output, '.*\s*$')
+                self.assertRegex(output, r'.*\s*$')
 
     def test_show_output(self):
         with testrun_cqlsh(tty=True, env=self.default_env) as c:
             output = c.cmd_and_response('show version;')
             self.assertRegex(output,
-                    '^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Native protocol \S+\]$')
+                    r'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Native protocol \S+\]$')
 
             output = c.cmd_and_response('show host;')
             self.assertHasColors(output)
             self.assertRegex(output, '^Connected to .* at %s:%d$'
                                              % (re.escape(TEST_HOST), TEST_PORT))
 
-    @unittest.skipIf(six.PY3, 'Will not emit warning when running Python 3')
-    def test_warn_py2(self):
-        # has the warning
-        with testrun_cqlsh(tty=True, env=self.default_env) as c:
-            self.assertIn('Python 2.7 support is deprecated.', c.output_header, 'cqlsh did not output expected warning.')
-
-        # can suppress
-        env = self.default_env.copy()
-        env['CQLSH_NO_WARN_PY2'] = '1'
-        with testrun_cqlsh(tty=True, env=env) as c:
-            self.assertNotIn('Python 2.7 support is deprecated.', c.output_header, 'cqlsh did not output expected warning.')
-
-    @unittest.skipIf(six.PY2, 'Warning will be emitted when running Python 2.7')
-    def test_no_warn_py3(self):
-        with testrun_cqlsh(tty=True, env=self.default_env) as c:
-            self.assertNotIn('Python 2.7 support is deprecated.', c.output_header, 'cqlsh did not output expected warning.')
-
-    @unittest.skipIf(sys.platform == "win32", 'EOF signaling not supported on Windows')
     def test_eof_prints_newline(self):
         with testrun_cqlsh(tty=True, env=self.default_env) as c:
             c.send(CONTROL_D)
@@ -836,9 +812,8 @@
             with testrun_cqlsh(tty=True, env=self.default_env) as c:
                 cmd = 'exit%s\n' % semicolon
                 c.send(cmd)
-                if c.realtty:
-                    out = c.read_lines(1)[0].replace('\r', '')
-                    self.assertEqual(out, cmd)
+                out = c.read_lines(1)[0].replace('\r', '')
+                self.assertEqual(out, cmd)
                 with self.assertRaises(BaseException) as cm:
                     c.read_lines(1)
                 self.assertIn(type(cm.exception), (EOFError, OSError))
diff --git a/pylib/cqlshlib/test/test_sslhandling.py b/pylib/cqlshlib/test/test_sslhandling.py
deleted file mode 100644
index a96089d..0000000
--- a/pylib/cqlshlib/test/test_sslhandling.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-from cassandra.policies import SimpleConvictionPolicy
-from cassandra.pool import Host
-from cqlshlib.sslhandling import ssl_settings
-
-import unittest
-import os
-import ssl
-import pytest
-
-class SslSettingsTest(unittest.TestCase):
-
-    def setUp(self):
-        os.environ['SSL_VALIDATE'] = 'False'
-        self.config_file = 'test_config'
-        self.host = Host('10.0.0.1', SimpleConvictionPolicy, 9000)
-
-    def tearDown(self):
-        del os.environ['SSL_VALIDATE']
-        try:
-            del os.environ['SSL_VERSION']
-        except KeyError:
-            pass
-
-    def _test_ssl_version_from_env(self, version):
-        """
-        Getting SSL version string from env variable SSL_VERSION.
-        """
-        os.environ['SSL_VERSION'] = version
-        ssl_ret_val = ssl_settings(self.host, self.config_file)
-        assert ssl_ret_val is not None
-        assert ssl_ret_val.get('ssl_version') == getattr(ssl, 'PROTOCOL_%s' % version)
-
-    def test_ssl_versions_from_env(self):
-        versions = ['TLS', 'TLSv1_1', 'TLSv1_2', 'TLSv1']
-        for version in versions:
-            self._test_ssl_version_from_env(version)
-
-    def test_invalid_ssl_versions_from_env(self):
-        msg = "invalid_ssl is not a valid SSL protocol, please use one of TLSv1, TLSv1_1, or TLSv1_2"
-        with pytest.raises(SystemExit) as error:
-            self._test_ssl_version_from_env('invalid_ssl')
-            assert msg == error.exception.message
-
-    def test_default_ssl_version(self):
-        ssl_ret_val = ssl_settings(self.host, self.config_file)
-        assert ssl_ret_val is not None
-        assert ssl_ret_val.get('ssl_version') == getattr(ssl, 'PROTOCOL_TLS')
-
-    def test_ssl_version_config(self):
-        ssl_ret_val = ssl_settings(self.host, os.path.join('test', 'config', 'sslhandling.config'))
-        assert ssl_ret_val is not None
-        assert ssl_ret_val.get('ssl_version') == getattr(ssl, 'PROTOCOL_TLSv1')
-
-    def test_invalid_ssl_version_config(self):
-        msg = "invalid_ssl is not a valid SSL protocol, please use one of TLSv1, TLSv1_1, or TLSv1_2"
-        with pytest.raises(SystemExit) as error:
-            ssl_settings(self.host, os.path.join('test', 'config', 'sslhandling_invalid.config'))
-            assert msg in error.exception.message
-            
diff --git a/pylib/cqlshlib/test/test_unicode.py b/pylib/cqlshlib/test/test_unicode.py
index 9fc052f..d24a787 100644
--- a/pylib/cqlshlib/test/test_unicode.py
+++ b/pylib/cqlshlib/test/test_unicode.py
@@ -15,13 +15,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import unicode_literals, with_statement
-
 import os
-import subprocess
 
 from .basecase import BaseTestCase
-from .cassconnect import (get_cassandra_connection, create_keyspace, testrun_cqlsh)
+from .cassconnect import (get_cassandra_connection, create_keyspace, remove_db, testrun_cqlsh)
 from cqlshlib.formatting import unicode_controlchars_re
 
 
@@ -38,6 +35,10 @@
         env['LC_CTYPE'] = 'UTF-8'
         cls.default_env = env
 
+    @classmethod
+    def tearDownClass(cls):
+        remove_db()
+
     def test_unicode_value_round_trip(self):
         with testrun_cqlsh(tty=True, env=self.default_env) as c:
             value = 'ϑΉӁװڜ'
@@ -72,7 +73,7 @@
             output = c.cmd_and_response('CREATE TYPE "%s" ( "%s" int );' % (v1, v2))
             output = c.cmd_and_response('DESC TYPES;')
             self.assertIn(v1, output)
-            output = c.cmd_and_response('DESC TYPE "%s";' %(v1,))
+            output = c.cmd_and_response('DESC TYPE "%s";' % (v1,))
             self.assertIn(v2, output)
 
     def test_unicode_esc(self):  # CASSANDRA-17617
diff --git a/pylib/cqlshlib/test/winpty.py b/pylib/cqlshlib/test/winpty.py
index f197aa5..02b1981 100644
--- a/pylib/cqlshlib/test/winpty.py
+++ b/pylib/cqlshlib/test/winpty.py
@@ -15,8 +15,8 @@
 # limitations under the License.
 
 from threading import Thread
-from six import StringIO
-from six.moves.queue import Queue, Empty
+from io import StringIO
+from queue import Queue, Empty
 
 
 class WinPty(object):
@@ -47,4 +47,4 @@
                 count = count + 1
         except Empty:
             pass
-        return buf.getvalue()
\ No newline at end of file
+        return buf.getvalue()
diff --git a/pylib/cqlshlib/tracing.py b/pylib/cqlshlib/tracing.py
index 0f1988a..3db8e34 100644
--- a/pylib/cqlshlib/tracing.py
+++ b/pylib/cqlshlib/tracing.py
@@ -14,12 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from datetime import datetime, timedelta
+from datetime import datetime
 import time
 
 from cassandra.query import QueryTrace, TraceUnavailable
 from cqlshlib.displaying import MAGENTA
-from cqlshlib.formatting import CqlType
 
 
 def print_trace_session(shell, session, session_id, partial_session=False):
@@ -74,8 +73,6 @@
     if trace.duration:
         finished_at = (datetime_from_utc_to_local(trace.started_at) + trace.duration)
         rows.append(['Request complete', str(finished_at), trace.coordinator, total_micro_seconds(trace.duration), trace.client])
-    else:
-        finished_at = trace.duration = "--"
 
     return rows
 
diff --git a/pylib/cqlshlib/util.py b/pylib/cqlshlib/util.py
index 82a332f..144586a 100644
--- a/pylib/cqlshlib/util.py
+++ b/pylib/cqlshlib/util.py
@@ -18,10 +18,12 @@
 import cProfile
 import codecs
 import pstats
-
+import os
+import errno
+import stat
 
 from datetime import timedelta, tzinfo
-from six import StringIO
+from io import StringIO
 
 try:
     from line_profiler import LineProfiler
@@ -112,6 +114,21 @@
     return s
 
 
+def is_file_secure(filename):
+    try:
+        st = os.stat(filename)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+        # the file doesn't exist, the security of it is irrelevant
+        return True
+    uid = os.getuid()
+
+    # Skip enforcing the file owner and UID matching for the root user (uid == 0).
+    # This is to allow "sudo cqlsh" to work with user owned credentials file.
+    return (uid == 0 or st.st_uid == uid) and stat.S_IMODE(st.st_mode) & (stat.S_IRGRP | stat.S_IROTH) == 0
+
+
 def get_file_encoding_bomsize(filename):
     """
     Checks the beginning of a file for a Unicode BOM.  Based on this check,
diff --git a/pylib/pytest.ini b/pylib/pytest.ini
deleted file mode 100644
index d360f07..0000000
--- a/pylib/pytest.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[pytest]
-addopts = --junit-xml=nosetests.xml
diff --git a/pylib/requirements.txt b/pylib/requirements.txt
index 8788690..c030460 100644
--- a/pylib/requirements.txt
+++ b/pylib/requirements.txt
@@ -1,17 +1,5 @@
-# See python driver docs: six have to be installed before
-# cythonizing the driver, perhaps only on old pips.
-# http://datastax.github.io/python-driver/installation.html#cython-based-extensions
-six>=1.12.0
 -e git+https://github.com/datastax/python-driver.git@cassandra-test#egg=cassandra-driver
 # Used ccm version is tracked by cassandra-test branch in ccm repo. Please create a PR there for fixes or upgrades to new releases.
 -e git+https://github.com/riptano/ccm.git@cassandra-test#egg=ccm
 coverage
-decorator
-docopt
-enum34
-flaky
-mock
 pytest
-parse
-pycodestyle
-psutil
diff --git a/redhat/cassandra.spec b/redhat/cassandra.spec
index 4785262..7431c1c 100644
--- a/redhat/cassandra.spec
+++ b/redhat/cassandra.spec
@@ -191,7 +191,6 @@
 
 %files tools
 %attr(755,root,root) %{_bindir}/sstabledump
-%attr(755,root,root) %{_bindir}/cassandra-stressd
 %attr(755,root,root) %{_bindir}/compaction-stress
 %attr(755,root,root) %{_bindir}/sstableexpiredblockers
 %attr(755,root,root) %{_bindir}/sstablelevelreset
@@ -203,6 +202,7 @@
 %attr(755,root,root) %{_bindir}/jmxtool
 %attr(755,root,root) %{_bindir}/fqltool
 %attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/hash_password
 
 
 %changelog
diff --git a/redhat/noboolean/cassandra.spec b/redhat/noboolean/cassandra.spec
index c1ff712..a3abaa6 100644
--- a/redhat/noboolean/cassandra.spec
+++ b/redhat/noboolean/cassandra.spec
@@ -191,7 +191,6 @@
 
 %files tools
 %attr(755,root,root) %{_bindir}/sstabledump
-%attr(755,root,root) %{_bindir}/cassandra-stressd
 %attr(755,root,root) %{_bindir}/compaction-stress
 %attr(755,root,root) %{_bindir}/sstableexpiredblockers
 %attr(755,root,root) %{_bindir}/sstablelevelreset
@@ -203,6 +202,7 @@
 %attr(755,root,root) %{_bindir}/jmxtool
 %attr(755,root,root) %{_bindir}/fqltool
 %attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/hash_password
 
 
 %changelog
diff --git a/src/antlr/Lexer.g b/src/antlr/Lexer.g
index d7c0d23..72ab7db 100644
--- a/src/antlr/Lexer.g
+++ b/src/antlr/Lexer.g
@@ -150,6 +150,7 @@
 K_SUPERUSER:   S U P E R U S E R;
 K_NOSUPERUSER: N O S U P E R U S E R;
 K_PASSWORD:    P A S S W O R D;
+K_HASHED:      H A S H E D;
 K_LOGIN:       L O G I N;
 K_NOLOGIN:     N O L O G I N;
 K_OPTIONS:     O P T I O N S;
diff --git a/src/antlr/Parser.g b/src/antlr/Parser.g
index b3ba7b3..d061ee4 100644
--- a/src/antlr/Parser.g
+++ b/src/antlr/Parser.g
@@ -267,7 +267,7 @@
         Term.Raw limit = null;
         Term.Raw perPartitionLimit = null;
         Map<ColumnIdentifier, Boolean> orderings = new LinkedHashMap<>();
-        List<ColumnIdentifier> groups = new ArrayList<>();
+        List<Selectable.Raw> groups = new ArrayList<>();
         boolean allowFiltering = false;
         boolean isJson = false;
     }
@@ -463,8 +463,8 @@
     : c=cident (K_ASC | K_DESC { reversed = true; })? { orderings.put(c, reversed); }
     ;
 
-groupByClause[List<ColumnIdentifier> groups]
-    : c=cident { groups.add(c); }
+groupByClause[List<Selectable.Raw> groups]
+    : s=unaliasedSelector { groups.add(s); }
     ;
 
 /**
@@ -915,37 +915,45 @@
     ;
 
 /**
- * ALTER KEYSPACE <KS> WITH <property> = <value>;
+ * ALTER KEYSPACE [IF EXISTS] <KS> WITH <property> = <value>;
  */
 alterKeyspaceStatement returns [AlterKeyspaceStatement.Raw stmt]
-    @init { KeyspaceAttributes attrs = new KeyspaceAttributes(); }
-    : K_ALTER K_KEYSPACE ks=keyspaceName
-        K_WITH properties[attrs] { $stmt = new AlterKeyspaceStatement.Raw(ks, attrs); }
+    @init {
+     KeyspaceAttributes attrs = new KeyspaceAttributes();
+     boolean ifExists = false;
+    }
+    : K_ALTER K_KEYSPACE (K_IF K_EXISTS { ifExists = true; } )? ks=keyspaceName
+        K_WITH properties[attrs] { $stmt = new AlterKeyspaceStatement.Raw(ks, attrs, ifExists); }
     ;
 
 /**
  * ALTER TABLE <table> ALTER <column> TYPE <newtype>;
- * ALTER TABLE <table> ADD <column> <newtype>; | ALTER TABLE <table> ADD (<column> <newtype>,<column1> <newtype1>..... <column n> <newtype n>)
- * ALTER TABLE <table> DROP <column>; | ALTER TABLE <table> DROP ( <column>,<column1>.....<column n>)
- * ALTER TABLE <table> RENAME <column> TO <column>;
- * ALTER TABLE <table> WITH <property> = <value>;
+ * ALTER TABLE [IF EXISTS] <table> ADD [IF NOT EXISTS] <column> <newtype>; | ALTER TABLE [IF EXISTS] <table> ADD [IF NOT EXISTS] (<column> <newtype>,<column1> <newtype1>..... <column n> <newtype n>)
+ * ALTER TABLE [IF EXISTS] <table> DROP [IF EXISTS] <column>; | ALTER TABLE [IF EXISTS] <table> DROP [IF EXISTS] ( <column>,<column1>.....<column n>)
+ * ALTER TABLE [IF EXISTS] <table> RENAME [IF EXISTS] <column> TO <column>;
+ * ALTER TABLE [IF EXISTS] <table> WITH <property> = <value>;
  */
 alterTableStatement returns [AlterTableStatement.Raw stmt]
-    : K_ALTER K_COLUMNFAMILY cf=columnFamilyName { $stmt = new AlterTableStatement.Raw(cf); }
+    @init { boolean ifExists = false; }
+    : K_ALTER K_COLUMNFAMILY (K_IF K_EXISTS { ifExists = true; } )?
+      cf=columnFamilyName { $stmt = new AlterTableStatement.Raw(cf, ifExists); }
       (
         K_ALTER id=cident K_TYPE v=comparatorType { $stmt.alter(id, v); }
 
-      | K_ADD  (        id=ident  v=comparatorType  b=isStaticColumn { $stmt.add(id,  v,  b);  }
+      | K_ADD ( K_IF K_NOT K_EXISTS { $stmt.ifColumnNotExists(true); } )?
+              (        id=ident  v=comparatorType  b=isStaticColumn { $stmt.add(id,  v,  b);  }
                | ('('  id1=ident v1=comparatorType b1=isStaticColumn { $stmt.add(id1, v1, b1); }
                  ( ',' idn=ident vn=comparatorType bn=isStaticColumn { $stmt.add(idn, vn, bn); } )* ')') )
 
-      | K_DROP (        id=ident { $stmt.drop(id);  }
+      | K_DROP ( K_IF K_EXISTS { $stmt.ifColumnExists(true); } )?
+               (       id=ident { $stmt.drop(id);  }
                | ('('  id1=ident { $stmt.drop(id1); }
                  ( ',' idn=ident { $stmt.drop(idn); } )* ')') )
                ( K_USING K_TIMESTAMP t=INTEGER { $stmt.timestamp(Long.parseLong(Constants.Literal.integer($t.text).getText())); } )?
 
-      | K_RENAME id1=ident K_TO toId1=ident { $stmt.rename(id1, toId1); }
-         ( K_AND idn=ident K_TO toIdn=ident { $stmt.rename(idn, toIdn); } )*
+      | K_RENAME ( K_IF K_EXISTS { $stmt.ifColumnExists(true); } )?
+               (        id1=ident K_TO toId1=ident { $stmt.rename(id1, toId1); }
+                ( K_AND idn=ident K_TO toIdn=ident { $stmt.rename(idn, toIdn); } )* )
 
       | K_DROP K_COMPACT K_STORAGE { $stmt.dropCompactStorage(); }
 
@@ -961,28 +969,32 @@
 alterMaterializedViewStatement returns [AlterViewStatement.Raw stmt]
     @init {
         TableAttributes attrs = new TableAttributes();
+        boolean ifExists = false;
     }
-    : K_ALTER K_MATERIALIZED K_VIEW name=columnFamilyName
+    : K_ALTER K_MATERIALIZED K_VIEW (K_IF K_EXISTS { ifExists = true; } )? name=columnFamilyName
           K_WITH properties[attrs]
     {
-        $stmt = new AlterViewStatement.Raw(name, attrs);
+        $stmt = new AlterViewStatement.Raw(name, attrs, ifExists);
     }
     ;
 
 
 /**
- * ALTER TYPE <name> ALTER <field> TYPE <newtype>;
- * ALTER TYPE <name> ADD <field> <newtype>;
- * ALTER TYPE <name> RENAME <field> TO <newtype> AND ...;
+ * ALTER TYPE [IF EXISTS] <name> ALTER <field> TYPE <newtype>;
+ * ALTER TYPE [IF EXISTS] <name> ADD [IF NOT EXISTS]<field> <newtype>;
+ * ALTER TYPE [IF EXISTS] <name> RENAME [IF EXISTS] <field> TO <newtype> AND ...;
  */
 alterTypeStatement returns [AlterTypeStatement.Raw stmt]
-    : K_ALTER K_TYPE name=userTypeName { $stmt = new AlterTypeStatement.Raw(name); }
+    @init {
+        boolean ifExists = false;
+    }
+    : K_ALTER K_TYPE (K_IF K_EXISTS { ifExists = true; } )? name=userTypeName { $stmt = new AlterTypeStatement.Raw(name, ifExists); }
       (
         K_ALTER   f=fident K_TYPE v=comparatorType { $stmt.alter(f, v); }
 
-      | K_ADD     f=fident v=comparatorType        { $stmt.add(f, v); }
+      | K_ADD (K_IF K_NOT K_EXISTS { $stmt.ifFieldNotExists(true); } )?     f=fident v=comparatorType        { $stmt.add(f, v); }
 
-      | K_RENAME f1=fident K_TO toF1=fident        { $stmt.rename(f1, toF1); }
+      | K_RENAME (K_IF K_EXISTS { $stmt.ifFieldExists(true); } )? f1=fident K_TO toF1=fident        { $stmt.rename(f1, toF1); }
          ( K_AND fn=fident K_TO toFn=fident        { $stmt.rename(fn, toFn); } )*
       )
     ;
@@ -1037,7 +1049,7 @@
     ;
 
 /**
- * GRANT <permission> ON <resource> TO <rolename>
+ * GRANT <permission>[, <permission>]* | ALL ON <resource> TO <rolename>
  */
 grantPermissionsStatement returns [GrantPermissionsStatement stmt]
     : K_GRANT
@@ -1050,7 +1062,7 @@
     ;
 
 /**
- * REVOKE <permission> ON <resource> FROM <rolename>
+ * REVOKE <permission>[, <permission>]* | ALL ON <resource> FROM <rolename>
  */
 revokePermissionsStatement returns [RevokePermissionsStatement stmt]
     : K_REVOKE
@@ -1105,7 +1117,7 @@
 
 permissionOrAll returns [Set<Permission> perms]
     : K_ALL ( K_PERMISSIONS )?       { $perms = Permission.ALL; }
-    | p=permission ( K_PERMISSION )? { $perms = EnumSet.of($p.perm); }
+    | p=permission ( K_PERMISSION )? { $perms = EnumSet.of($p.perm); } ( ',' p=permission ( K_PERMISSION )? { $perms.add($p.perm); } )*
     ;
 
 resource returns [IResource res]
@@ -1118,8 +1130,8 @@
 dataResource returns [DataResource res]
     : K_ALL K_KEYSPACES { $res = DataResource.root(); }
     | K_KEYSPACE ks = keyspaceName { $res = DataResource.keyspace($ks.id); }
-    | ( K_COLUMNFAMILY )? cf = columnFamilyName
-      { $res = DataResource.table($cf.name.getKeyspace(), $cf.name.getName()); }
+    | ( K_COLUMNFAMILY )? cf = columnFamilyName { $res = DataResource.table($cf.name.getKeyspace(), $cf.name.getName()); }
+    | K_ALL K_TABLES K_IN K_KEYSPACE ks = keyspaceName { $res = DataResource.allTables($ks.id); }
     ;
 
 jmxResource returns [JMXResource res]
@@ -1169,22 +1181,33 @@
       ( K_WITH userPassword[opts] )?
       ( K_SUPERUSER { superuser = true; } | K_NOSUPERUSER { superuser = false; } )?
       { opts.setOption(IRoleManager.Option.SUPERUSER, superuser);
+        if (opts.getPassword().isPresent() && opts.getHashedPassword().isPresent())
+        {
+           throw new SyntaxException("Options 'password' and 'hashed password' are mutually exclusive");
+        }
         $stmt = new CreateRoleStatement(name, opts, DCPermissions.all(), ifNotExists); }
     ;
 
 /**
- * ALTER USER <username> [WITH PASSWORD <password>] [SUPERUSER|NOSUPERUSER]
+ * ALTER USER [IF EXISTS] <username> [WITH PASSWORD <password>] [SUPERUSER|NOSUPERUSER]
  */
 alterUserStatement returns [AlterRoleStatement stmt]
     @init {
         RoleOptions opts = new RoleOptions();
         RoleName name = new RoleName();
+        boolean ifExists = false;
     }
-    : K_ALTER K_USER u=username { name.setName($u.text, true); }
+    : K_ALTER K_USER (K_IF K_EXISTS { ifExists = true; })? u=username { name.setName($u.text, true); }
       ( K_WITH userPassword[opts] )?
       ( K_SUPERUSER { opts.setOption(IRoleManager.Option.SUPERUSER, true); }
         | K_NOSUPERUSER { opts.setOption(IRoleManager.Option.SUPERUSER, false); } ) ?
-      {  $stmt = new AlterRoleStatement(name, opts, null); }
+      {
+         if (opts.getPassword().isPresent() && opts.getHashedPassword().isPresent())
+         {
+            throw new SyntaxException("Options 'password' and 'hashed password' are mutually exclusive");
+         }
+         $stmt = new AlterRoleStatement(name, opts, null, ifExists);
+      }
     ;
 
 /**
@@ -1232,12 +1255,16 @@
         {
             opts.setOption(IRoleManager.Option.SUPERUSER, false);
         }
+        if (opts.getPassword().isPresent() && opts.getHashedPassword().isPresent())
+        {
+            throw new SyntaxException("Options 'password' and 'hashed password' are mutually exclusive");
+        }
         $stmt = new CreateRoleStatement(name, opts, dcperms.build(), ifNotExists);
       }
     ;
 
 /**
- * ALTER ROLE <rolename> [ [WITH] option [ [AND] option ]* ]
+ * ALTER ROLE [IF EXISTS] <rolename> [ [WITH] option [ [AND] option ]* ]
  *
  * where option can be:
  *  PASSWORD = '<password>'
@@ -1249,10 +1276,17 @@
     @init {
         RoleOptions opts = new RoleOptions();
         DCPermissions.Builder dcperms = DCPermissions.builder();
+        boolean ifExists = false;
     }
-    : K_ALTER K_ROLE name=userOrRoleName
+    : K_ALTER K_ROLE (K_IF K_EXISTS { ifExists = true; })? name=userOrRoleName
       ( K_WITH roleOptions[opts, dcperms] )?
-      {  $stmt = new AlterRoleStatement(name, opts, dcperms.isModified() ? dcperms.build() : null); }
+      {
+         if (opts.getPassword().isPresent() && opts.getHashedPassword().isPresent())
+         {
+            throw new SyntaxException("Options 'password' and 'hashed password' are mutually exclusive");
+         }
+         $stmt = new AlterRoleStatement(name, opts, dcperms.isModified() ? dcperms.build() : null, ifExists);
+      }
     ;
 
 /**
@@ -1286,6 +1320,7 @@
 
 roleOption[RoleOptions opts, DCPermissions.Builder dcperms]
     :  K_PASSWORD '=' v=STRING_LITERAL { opts.setOption(IRoleManager.Option.PASSWORD, $v.text); }
+    |  K_HASHED K_PASSWORD '=' v=STRING_LITERAL { opts.setOption(IRoleManager.Option.HASHED_PASSWORD, $v.text); }
     |  K_OPTIONS '=' m=fullMapLiteral { opts.setOption(IRoleManager.Option.OPTIONS, convertPropertyMap(m)); }
     |  K_SUPERUSER '=' b=BOOLEAN { opts.setOption(IRoleManager.Option.SUPERUSER, Boolean.valueOf($b.text)); }
     |  K_LOGIN '=' b=BOOLEAN { opts.setOption(IRoleManager.Option.LOGIN, Boolean.valueOf($b.text)); }
@@ -1300,6 +1335,7 @@
 // for backwards compatibility in CREATE/ALTER USER, this has no '='
 userPassword[RoleOptions opts]
     :  K_PASSWORD v=STRING_LITERAL { opts.setOption(IRoleManager.Option.PASSWORD, $v.text); }
+    |  K_HASHED K_PASSWORD v=STRING_LITERAL { opts.setOption(IRoleManager.Option.HASHED_PASSWORD, $v.text); }
     ;
 
 /**
@@ -1554,9 +1590,10 @@
     ;
 
 simpleTerm returns [Term.Raw term]
-    : v=value                                 { $term = v; }
-    | f=function                              { $term = f; }
-    | '(' c=comparatorType ')' t=simpleTerm   { $term = new TypeCast(c, t); }
+    : v=value                                        { $term = v; }
+    | f=function                                     { $term = f; }
+    | '(' c=comparatorType ')' t=simpleTerm          { $term = new TypeCast(c, t); }
+    | K_CAST '(' t=simpleTerm K_AS n=native_type ')' { $term = FunctionCall.Raw.newCast(t, n); }
     ;
 
 columnOperation[List<Pair<ColumnIdentifier, Operation.RawUpdate>> operations]
@@ -1625,6 +1662,7 @@
     // Note: we'll reject duplicates later
     : key=cident
         ( op=relationType t=term { conditions.add(Pair.create(key, ColumnCondition.Raw.simpleCondition(t, op))); }
+        | op=containsOperator t=term { conditions.add(Pair.create(key, ColumnCondition.Raw.simpleCondition(t, op))); }
         | K_IN
             ( values=singleColumnInValues { conditions.add(Pair.create(key, ColumnCondition.Raw.simpleInCondition(values))); }
             | marker=inMarker { conditions.add(Pair.create(key, ColumnCondition.Raw.simpleInCondition(marker))); }
@@ -1776,7 +1814,7 @@
     | K_COUNTER   { $t = CQL3Type.Native.COUNTER; }
     | K_DECIMAL   { $t = CQL3Type.Native.DECIMAL; }
     | K_DOUBLE    { $t = CQL3Type.Native.DOUBLE; }
-    | K_DURATION    { $t = CQL3Type.Native.DURATION; }
+    | K_DURATION  { $t = CQL3Type.Native.DURATION; }
     | K_FLOAT     { $t = CQL3Type.Native.FLOAT; }
     | K_INET      { $t = CQL3Type.Native.INET;}
     | K_INT       { $t = CQL3Type.Native.INT; }
@@ -1868,6 +1906,7 @@
         | K_NOLOGIN
         | K_OPTIONS
         | K_PASSWORD
+        | K_HASHED
         | K_EXISTS
         | K_CUSTOM
         | K_TRIGGER
diff --git a/src/java/org/apache/cassandra/audit/AuditLogEntry.java b/src/java/org/apache/cassandra/audit/AuditLogEntry.java
index 4d3b867..02db076 100644
--- a/src/java/org/apache/cassandra/audit/AuditLogEntry.java
+++ b/src/java/org/apache/cassandra/audit/AuditLogEntry.java
@@ -32,6 +32,8 @@
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class AuditLogEntry
 {
     private final InetAddressAndPort host = FBUtilities.getBroadcastAddressAndPort();
@@ -74,10 +76,10 @@
         StringBuilder builder = new StringBuilder(100);
         builder.append("user:").append(user)
                .append("|host:").append(host)
-               .append("|source:").append(source.address);
-        if (source.port > 0)
+               .append("|source:").append(source.getAddress());
+        if (source.getPort() > 0)
         {
-            builder.append("|port:").append(source.port);
+            builder.append("|port:").append(source.getPort());
         }
 
         builder.append("|timestamp:").append(timestamp)
@@ -214,7 +216,7 @@
                 user = AuthenticatedUser.SYSTEM_USER.getName();
             }
 
-            timestamp = System.currentTimeMillis();
+            timestamp = currentTimeMillis();
         }
 
         public Builder(AuditLogEntry entry)
@@ -312,7 +314,7 @@
 
         public AuditLogEntry build()
         {
-            timestamp = timestamp > 0 ? timestamp : System.currentTimeMillis();
+            timestamp = timestamp > 0 ? timestamp : currentTimeMillis();
             return new AuditLogEntry(type, source, user, timestamp, batch, keyspace, scope, operation, options, state);
         }
     }
diff --git a/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java b/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java
index 616658c..9db4ce0 100644
--- a/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java
+++ b/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java
@@ -24,4 +24,4 @@
 public enum AuditLogEntryCategory
 {
     QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/audit/AuditLogFilter.java b/src/java/org/apache/cassandra/audit/AuditLogFilter.java
index 163114d..d240e78 100644
--- a/src/java/org/apache/cassandra/audit/AuditLogFilter.java
+++ b/src/java/org/apache/cassandra/audit/AuditLogFilter.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.audit;
 
-import java.util.HashSet;
 import java.util.Set;
 
 import com.google.common.collect.ImmutableSet;
@@ -25,18 +24,18 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class AuditLogFilter
+final class AuditLogFilter
 {
     private static final Logger logger = LoggerFactory.getLogger(AuditLogFilter.class);
 
     private static ImmutableSet<String> EMPTY_FILTERS = ImmutableSet.of();
 
-    private final ImmutableSet<String> excludedKeyspaces;
-    private final ImmutableSet<String> includedKeyspaces;
-    private final ImmutableSet<String> excludedCategories;
-    private final ImmutableSet<String> includedCategories;
-    private final ImmutableSet<String> includedUsers;
-    private final ImmutableSet<String> excludedUsers;
+    final ImmutableSet<String> excludedKeyspaces;
+    final ImmutableSet<String> includedKeyspaces;
+    final ImmutableSet<String> excludedCategories;
+    final ImmutableSet<String> includedCategories;
+    final ImmutableSet<String> includedUsers;
+    final ImmutableSet<String> excludedUsers;
 
     private AuditLogFilter(ImmutableSet<String> excludedKeyspaces, ImmutableSet<String> includedKeyspaces, ImmutableSet<String> excludedCategories, ImmutableSet<String> includedCategories, ImmutableSet<String> excludedUsers, ImmutableSet<String> includedUsers)
     {
diff --git a/src/java/org/apache/cassandra/audit/AuditLogManager.java b/src/java/org/apache/cassandra/audit/AuditLogManager.java
index 88e0251..ed10460 100644
--- a/src/java/org/apache/cassandra/audit/AuditLogManager.java
+++ b/src/java/org/apache/cassandra/audit/AuditLogManager.java
@@ -25,6 +25,7 @@
 import java.util.UUID;
 
 import javax.annotation.Nullable;
+import javax.management.openmbean.CompositeData;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
@@ -49,29 +50,32 @@
 import org.apache.cassandra.transport.Message;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.MBeanWrapper;
 
 /**
  * Central location for managing the logging of client/user-initated actions (like queries, log in commands, and so on).
  *
  */
-public class AuditLogManager implements QueryEvents.Listener, AuthEvents.Listener
+public class AuditLogManager implements QueryEvents.Listener, AuthEvents.Listener, AuditLogManagerMBean
 {
     private static final Logger logger = LoggerFactory.getLogger(AuditLogManager.class);
+
+    public static final String MBEAN_NAME = "org.apache.cassandra.db:type=AuditLogManager";
     public static final AuditLogManager instance = new AuditLogManager();
 
     // auditLogger can write anywhere, as it's pluggable (logback, BinLog, DiagnosticEvents, etc ...)
     private volatile IAuditLogger auditLogger;
-
     private volatile AuditLogFilter filter;
+    private volatile AuditLogOptions auditLogOptions;
 
     private AuditLogManager()
     {
-        final AuditLogOptions auditLogOptions = DatabaseDescriptor.getAuditLoggingOptions();
+        auditLogOptions = DatabaseDescriptor.getAuditLoggingOptions();
 
         if (auditLogOptions.enabled)
         {
             logger.info("Audit logging is enabled.");
-            auditLogger = getAuditLogger(auditLogOptions.logger);
+            auditLogger = getAuditLogger(auditLogOptions);
         }
         else
         {
@@ -86,16 +90,21 @@
     {
         if (DatabaseDescriptor.getAuditLoggingOptions().enabled)
             registerAsListener();
+
+        if (!MBeanWrapper.instance.isRegistered(MBEAN_NAME))
+            MBeanWrapper.instance.registerMBean(this, MBEAN_NAME);
     }
 
-    private IAuditLogger getAuditLogger(ParameterizedClass logger) throws ConfigurationException
+    private IAuditLogger getAuditLogger(AuditLogOptions options) throws ConfigurationException
     {
-        if (logger.class_name != null)
+        final ParameterizedClass logger = options.logger;
+
+        if (logger != null && logger.class_name != null)
         {
             return FBUtilities.newAuditLogger(logger.class_name, logger.parameters == null ? Collections.emptyMap() : logger.parameters);
         }
 
-        return FBUtilities.newAuditLogger(BinAuditLogger.class.getName(), Collections.emptyMap());
+        return new BinAuditLogger(options);
     }
 
     @VisibleForTesting
@@ -109,6 +118,17 @@
         return auditLogger.isEnabled();
     }
 
+    public AuditLogOptions getAuditLogOptions()
+    {
+        return auditLogger.isEnabled() ? auditLogOptions : DatabaseDescriptor.getAuditLoggingOptions();
+    }
+
+    @Override
+    public CompositeData getAuditLogOptionsData()
+    {
+        return AuditLogOptionsCompositeData.toCompositeData(AuditLogManager.instance.getAuditLogOptions());
+    }
+
     /**
      * Logs AudigLogEntry to standard audit logger
      * @param logEntry AuditLogEntry to be logged
@@ -166,16 +186,28 @@
      */
     public synchronized void enable(AuditLogOptions auditLogOptions) throws ConfigurationException
     {
-        // always reload the filters
-        filter = AuditLogFilter.create(auditLogOptions);
-
-        // next, check to see if we're changing the logging implementation; if not, keep the same instance and bail.
-        // note: auditLogger should never be null
         IAuditLogger oldLogger = auditLogger;
-        if (oldLogger.getClass().getSimpleName().equals(auditLogOptions.logger.class_name))
-            return;
 
-        auditLogger = getAuditLogger(auditLogOptions.logger);
+        try
+        {
+            // next, check to see if we're changing the logging implementation; if not, keep the same instance and bail.
+            // note: auditLogger should never be null
+            if (oldLogger.getClass().getSimpleName().equals(auditLogOptions.logger.class_name))
+                return;
+
+            auditLogger = getAuditLogger(auditLogOptions);
+            // switch to these audit log options after getAuditLogger() has not thrown
+            // otherwise we might stay with new options but with old logger if it failed
+            this.auditLogOptions = auditLogOptions;
+        }
+        finally
+        {
+            // always reload the filters
+            filter = AuditLogFilter.create(auditLogOptions);
+            // update options so the changed filters are reflected in options,
+            // for example upon nodetool's getauditlog command
+            updateAuditLogOptions(this.auditLogOptions, filter);
+        }
 
         // note that we might already be registered here and we rely on the fact that Query/AuthEvents have a Set of listeners
         registerAsListener();
@@ -185,6 +217,16 @@
         oldLogger.stop();
     }
 
+    private void updateAuditLogOptions(final AuditLogOptions options, final AuditLogFilter filter)
+    {
+        options.included_keyspaces = String.join(",", filter.includedKeyspaces.asList());
+        options.excluded_keyspaces = String.join(",", filter.excludedKeyspaces.asList());
+        options.included_categories = String.join(",", filter.includedCategories.asList());
+        options.excluded_categories = String.join(",", filter.excludedCategories.asList());
+        options.included_users = String.join(",", filter.includedUsers.asList());
+        options.excluded_users = String.join(",", filter.excludedUsers.asList());
+    }
+
     private void registerAsListener()
     {
         QueryEvents.instance.registerListener(this);
diff --git a/src/java/org/apache/cassandra/audit/AuditLogManagerMBean.java b/src/java/org/apache/cassandra/audit/AuditLogManagerMBean.java
new file mode 100644
index 0000000..5d2f661
--- /dev/null
+++ b/src/java/org/apache/cassandra/audit/AuditLogManagerMBean.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.audit;
+
+import javax.management.openmbean.CompositeData;
+
+public interface AuditLogManagerMBean
+{
+    /**
+     * Returns the current state of Audit Log framework.
+     */
+    CompositeData getAuditLogOptionsData();
+}
diff --git a/src/java/org/apache/cassandra/audit/AuditLogOptions.java b/src/java/org/apache/cassandra/audit/AuditLogOptions.java
index e8691df..e9e31c9 100644
--- a/src/java/org/apache/cassandra/audit/AuditLogOptions.java
+++ b/src/java/org/apache/cassandra/audit/AuditLogOptions.java
@@ -17,12 +17,21 @@
  */
 package org.apache.cassandra.audit;
 
+import java.nio.file.Path;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
 
+import com.google.common.base.Strings;
 import org.apache.commons.lang3.StringUtils;
 
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.binlog.BinLogOptions;
 
 public class AuditLogOptions extends BinLogOptions
@@ -37,11 +46,242 @@
     public String included_users = StringUtils.EMPTY;
     public String excluded_users = StringUtils.EMPTY;
 
-    /**
-     * AuditLogs directory can be configured using `cassandra.logdir.audit` or default is set to `cassandra.logdir` + /audit/
-     */
-    public String audit_logs_dir = System.getProperty("cassandra.logdir.audit",
-                                                      System.getProperty("cassandra.logdir",".")+"/audit/");
+    public String audit_logs_dir;
+
+    public AuditLogOptions()
+    {
+        String auditLogDir = CassandraRelevantProperties.LOG_DIR_AUDIT.getString();
+        String logDir = CassandraRelevantProperties.LOG_DIR.getString() + "/audit";
+        Path path = auditLogDir == null ? File.getPath(logDir) : File.getPath(auditLogDir);
+        audit_logs_dir = path.normalize().toString();
+    }
+
+    public static AuditLogOptions validate(final AuditLogOptions options) throws ConfigurationException
+    {
+        // not validating keyspaces nor users on purpose,
+        // logging might be enabled on these entities before they exist
+        // so they are picked up automatically
+
+        validateCategories(options.included_categories);
+        validateCategories(options.excluded_categories);
+
+        // other fields in BinLogOptions are validated upon BinAuditLogger initialisation
+
+        return options;
+    }
+
+    public static class Builder
+    {
+        private boolean enabled;
+        private ParameterizedClass logger;
+        private String includedKeyspaces;
+        private String excludedKeyspaces;
+        private String includedCategories;
+        private String excludedCategories;
+        private String includedUsers;
+        private String excludedUsers;
+        private String auditLogDir;
+        private int maxQueueWeight;
+        private int maxArchiveRetries;
+        private String rollCycle;
+        private String archiveCommand;
+        private boolean block;
+        private long maxLogSize;
+
+        public Builder()
+        {
+            this(new AuditLogOptions());
+        }
+
+        public Builder(final AuditLogOptions opts)
+        {
+            this.enabled = opts.enabled;
+            this.logger = opts.logger;
+            this.includedKeyspaces = opts.included_keyspaces;
+            this.excludedKeyspaces = opts.excluded_keyspaces;
+            this.includedCategories = opts.included_categories;
+            this.excludedCategories = opts.excluded_categories;
+            this.includedUsers = opts.included_users;
+            this.excludedUsers = opts.excluded_users;
+            this.auditLogDir = opts.audit_logs_dir;
+            this.maxQueueWeight = opts.max_queue_weight;
+            this.maxArchiveRetries = opts.max_archive_retries;
+            this.rollCycle = opts.roll_cycle;
+            this.archiveCommand = opts.archive_command;
+            this.block = opts.block;
+            this.maxLogSize = opts.max_log_size;
+        }
+
+        public Builder withEnabled(boolean enabled)
+        {
+            this.enabled = enabled;
+            return this;
+        }
+
+        public Builder withLogger(final String loggerName, Map<String, String> parameters)
+        {
+
+            if (loggerName != null && !loggerName.trim().isEmpty())
+            {
+                this.logger = new ParameterizedClass(loggerName.trim(), parameters);
+            }
+
+            return this;
+        }
+
+        public Builder withIncludedKeyspaces(final String includedKeyspaces)
+        {
+            sanitise(includedKeyspaces).map(v -> this.includedKeyspaces = v);
+            return this;
+        }
+
+        public Builder withExcludedKeyspaces(final String excludedKeyspaces)
+        {
+            sanitise(excludedKeyspaces).map(v -> this.excludedKeyspaces = v);
+            return this;
+        }
+
+        public Builder withIncludedCategories(final String includedCategories)
+        {
+            sanitise(includedCategories).map(v -> this.includedCategories = v.toUpperCase());
+            return this;
+        }
+
+        public Builder withExcludedCategories(final String excludedCategories)
+        {
+            sanitise(excludedCategories).map(v -> this.excludedCategories = v.toUpperCase());
+            return this;
+        }
+
+        public Builder withIncludedUsers(final String includedUsers)
+        {
+            sanitise(includedUsers).map(v -> this.includedUsers = v);
+            return this;
+        }
+
+        public Builder withExcludedUsers(final String excludedUsers)
+        {
+            sanitise(excludedUsers).map(v -> this.excludedUsers = v);
+            return this;
+        }
+
+        public Builder withAuditLogDir(final String auditLogDir)
+        {
+            this.auditLogDir = auditLogDir;
+            return this;
+        }
+
+        public Builder withRollCycle(final String rollCycle)
+        {
+            sanitise(rollCycle).map(v -> this.rollCycle = v.toUpperCase());
+            return this;
+        }
+
+        public Builder withArchiveCommand(final String archiveCommand)
+        {
+            if (archiveCommand != null)
+            {
+                this.archiveCommand = archiveCommand;
+            }
+            return this;
+        }
+
+        public Builder withBlock(final Boolean block)
+        {
+            if (block != null)
+            {
+                this.block = block;
+            }
+            return this;
+        }
+
+        public Builder withMaxLogSize(final long maxLogSize)
+        {
+            if (maxLogSize != Long.MIN_VALUE)
+            {
+                this.maxLogSize = maxLogSize;
+            }
+            return this;
+        }
+
+        public Builder withMaxArchiveRetries(final int maxArchiveRetries)
+        {
+            if (maxArchiveRetries != Integer.MIN_VALUE)
+            {
+                this.maxArchiveRetries = maxArchiveRetries;
+            }
+            return this;
+        }
+
+        public Builder withMaxQueueWeight(final int maxQueueWeight)
+        {
+            if (maxQueueWeight != Integer.MIN_VALUE)
+            {
+                this.maxQueueWeight = maxQueueWeight;
+            }
+            return this;
+        }
+
+        public AuditLogOptions build()
+        {
+            final AuditLogOptions opts = new AuditLogOptions();
+
+            opts.enabled = this.enabled;
+            opts.logger = this.logger;
+            sanitise(this.includedKeyspaces).map(v -> opts.included_keyspaces = v);
+            sanitise(this.excludedKeyspaces).map(v -> opts.excluded_keyspaces = v);
+            sanitise(this.includedCategories).map(v -> opts.included_categories = v.toUpperCase());
+            sanitise(this.excludedCategories).map(v -> opts.excluded_categories = v.toUpperCase());
+            sanitise(this.includedUsers).map(v -> opts.included_users = v);
+            sanitise(this.excludedUsers).map(v -> opts.excluded_users = v);
+            opts.roll_cycle = this.rollCycle;
+            opts.audit_logs_dir = this.auditLogDir;
+            opts.max_queue_weight = this.maxQueueWeight;
+            opts.max_archive_retries = this.maxArchiveRetries;
+            opts.archive_command = this.archiveCommand;
+            opts.block = this.block;
+            opts.max_log_size = this.maxLogSize;
+
+            AuditLogOptions.validate(opts);
+
+            return opts;
+        }
+
+        private static Optional<String> sanitise(final String input)
+        {
+            if (input == null || input.trim().isEmpty())
+                return Optional.empty();
+
+            return Optional.of(Arrays.stream(input.split(","))
+                                     .map(String::trim)
+                                     .map(Strings::emptyToNull)
+                                     .filter(Objects::nonNull)
+                                     .collect(Collectors.joining(",")));
+        }
+    }
+
+    private static void validateCategories(final String categories)
+    {
+        assert categories != null;
+
+        if (categories.isEmpty())
+            return;
+
+        for (final String includedCategory : categories.split(","))
+        {
+            try
+            {
+                AuditLogEntryCategory.valueOf(includedCategory);
+            }
+            catch (final IllegalArgumentException ex)
+            {
+                throw new ConfigurationException(String.format("category %s not found in %s",
+                                                               includedCategory,
+                                                               AuditLogEntryCategory.class.getName()),
+                                                 ex);
+            }
+        }
+    }
 
     public String toString()
     {
@@ -60,6 +300,7 @@
                ", block=" + block +
                ", max_queue_weight=" + max_queue_weight +
                ", max_log_size=" + max_log_size +
+               ", max_archive_retries=" + max_archive_retries +
                '}';
     }
 }
diff --git a/src/java/org/apache/cassandra/audit/AuditLogOptionsCompositeData.java b/src/java/org/apache/cassandra/audit/AuditLogOptionsCompositeData.java
new file mode 100644
index 0000000..6812e71
--- /dev/null
+++ b/src/java/org/apache/cassandra/audit/AuditLogOptionsCompositeData.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.audit;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import javax.management.openmbean.CompositeData;
+import javax.management.openmbean.CompositeDataSupport;
+import javax.management.openmbean.CompositeType;
+import javax.management.openmbean.OpenDataException;
+import javax.management.openmbean.OpenType;
+import javax.management.openmbean.SimpleType;
+
+import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.utils.Pair;
+
+import static org.apache.cassandra.audit.AuditLogOptionsCompositeData.AuditLogOption.option;
+
+public class AuditLogOptionsCompositeData
+{
+    public static class AuditLogOption
+    {
+        // TODO these constants will be used in upcoming audit log vtable too, see CASSANDRA-16725
+        public static final String AUDIT_LOGS_DIR = "audit_logs_dir";
+        public static final String ARCHIVE_COMMAND = "archive_command";
+        public static final String ROLL_CYCLE = "roll_cycle";
+        public static final String BLOCK = "block";
+        public static final String MAX_QUEUE_WEIGHT = "max_queue_weight";
+        public static final String MAX_LOG_SIZE = "max_log_size";
+        public static final String MAX_ARCHIVE_RETRIES = "max_archive_retries";
+        public static final String ENABLED = "enabled";
+        public static final String INCLUDED_KEYSPACES = "included_keyspaces";
+        public static final String EXCLUDED_KEYSPACES = "excluded_keyspaces";
+        public static final String INCLUDED_CATEGORIES = "included_categories";
+        public static final String EXCLUDED_CATEGORIES = "excluded_categories";
+        public static final String INCLUDED_USERS = "included_users";
+        public static final String EXCLUDED_USERS = "excluded_users";
+        public static final String LOGGER = "logger";
+
+        private final String name;
+        private final String description;
+        private final OpenType<?> type;
+        private final Function<AuditLogOptions, Object> toCompositeMapping;
+        private final BiConsumer<AuditLogOptions, Object> fromCompositeMapping;
+
+        public AuditLogOption(final String name,
+                              final String description,
+                              final OpenType<?> type,
+                              final Function<AuditLogOptions, Object> toCompositeMapping,
+                              final BiConsumer<AuditLogOptions, Object> fromCompositeMapping)
+        {
+            this.name = name;
+            this.description = description;
+            this.type = type;
+            this.toCompositeMapping = toCompositeMapping;
+            this.fromCompositeMapping = fromCompositeMapping;
+        }
+
+        public static AuditLogOption option(final String name,
+                                            final String description,
+                                            final OpenType<?> type,
+                                            final Function<AuditLogOptions, Object> toCompositeMapping,
+                                            final BiConsumer<AuditLogOptions, Object> fromCompositeMapping)
+        {
+            return new AuditLogOption(name, description, type, toCompositeMapping, fromCompositeMapping);
+        }
+    }
+
+    private static final AuditLogOption[] options = new AuditLogOption[]{
+    option(AuditLogOption.AUDIT_LOGS_DIR,
+           "directory where audit data are stored",
+           SimpleType.STRING,
+           o -> o.audit_logs_dir,
+           (opts, obj) -> opts.audit_logs_dir = (String) obj),
+
+    option(AuditLogOption.ARCHIVE_COMMAND,
+           "archive command for audit data",
+           SimpleType.STRING,
+           o -> o.archive_command,
+           (opts, obj) -> opts.archive_command = (String) obj),
+
+    option(AuditLogOption.ROLL_CYCLE,
+           "how often to roll BinLog segments so they can potentially be reclaimed",
+           SimpleType.STRING,
+           o -> o.roll_cycle,
+           (opts, obj) -> opts.roll_cycle = (String) obj),
+
+    option(AuditLogOption.BLOCK,
+           "indicates if the BinLog should block if it falls behind or should drop bin log records",
+           SimpleType.BOOLEAN,
+           o -> o.block,
+           (opts, obj) -> opts.block = (Boolean) obj),
+
+    option(AuditLogOption.MAX_QUEUE_WEIGHT,
+           "maximum weight of in-memory queue for records waiting to be written to the binlog file before blocking or dropping the log records",
+           SimpleType.INTEGER,
+           o -> o.max_queue_weight,
+           (opts, obj) -> opts.max_queue_weight = (Integer) obj),
+
+    option(AuditLogOption.MAX_LOG_SIZE,
+           "maximum size of the rolled files to retain on disk before deleting the oldest file",
+           SimpleType.LONG,
+           o -> o.max_log_size,
+           (opts, obj) -> opts.max_log_size = (Long) obj),
+
+    option(AuditLogOption.MAX_ARCHIVE_RETRIES,
+           "number of times to retry an archive command",
+           SimpleType.INTEGER,
+           o -> o.max_archive_retries,
+           (opts, obj) -> opts.max_archive_retries = (Integer) obj),
+
+    option(AuditLogOption.ENABLED,
+           "boolean telling if we are enabled or not",
+           SimpleType.BOOLEAN,
+           o -> o.enabled,
+           (opts, obj) -> opts.enabled = (Boolean) obj),
+
+    option(AuditLogOption.INCLUDED_KEYSPACES,
+           "included keyspaces",
+           SimpleType.STRING,
+           o -> o.included_keyspaces,
+           (opts, obj) -> opts.included_keyspaces = (String) obj),
+
+    option(AuditLogOption.EXCLUDED_KEYSPACES,
+           "excluded keyspaces",
+           SimpleType.STRING,
+           o -> o.excluded_keyspaces,
+           (opts, obj) -> opts.excluded_keyspaces = (String) obj),
+
+    option(AuditLogOption.INCLUDED_CATEGORIES,
+           "included categories",
+           SimpleType.STRING,
+           o -> o.included_categories,
+           (opts, obj) -> opts.included_categories = (String) obj),
+
+    option(AuditLogOption.EXCLUDED_CATEGORIES,
+           "excluded categories",
+           SimpleType.STRING,
+           o -> o.excluded_categories,
+           (opts, obj) -> opts.excluded_categories = (String) obj),
+
+    option(AuditLogOption.INCLUDED_USERS,
+           "included users",
+           SimpleType.STRING,
+           o -> o.included_users,
+           (opts, obj) -> opts.included_users = (String) obj),
+
+    option(AuditLogOption.EXCLUDED_USERS,
+           "excluded users",
+           SimpleType.STRING,
+           o -> o.excluded_users,
+           (opts, obj) -> opts.excluded_users = (String) obj),
+
+    option(AuditLogOption.LOGGER,
+           "audit logger implementation class name",
+           SimpleType.STRING,
+           o -> o.logger.class_name,
+           (opts, obj) -> opts.logger = new ParameterizedClass((String) obj, new HashMap<>()))
+    };
+
+    public static final CompositeType COMPOSITE_TYPE;
+
+    static
+    {
+        try
+        {
+            COMPOSITE_TYPE = new CompositeType(AuditLogOptions.class.getName(),
+                                               "AuditLogOptions",
+                                               Arrays.stream(options).map(o -> o.name).toArray(String[]::new),
+                                               Arrays.stream(options).map(o -> o.description).toArray(String[]::new),
+                                               Arrays.stream(options).map(o -> o.type).toArray(OpenType[]::new));
+        }
+        catch (final OpenDataException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static CompositeData toCompositeData(final AuditLogOptions opts)
+    {
+        try
+        {
+            final Map<String, Object> valueMap = new HashMap<>();
+
+            for (final Pair<String, Function<AuditLogOptions, Object>> pair : Arrays.stream(options).map(o -> Pair.create(o.name, o.toCompositeMapping)).collect(Collectors.toList()))
+            {
+                valueMap.put(pair.left, pair.right.apply(opts));
+            }
+
+            return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
+        }
+        catch (final OpenDataException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static AuditLogOptions fromCompositeData(final CompositeData data)
+    {
+        assert data.getCompositeType().equals(COMPOSITE_TYPE);
+
+        final Object[] values = data.getAll(Arrays.stream(options).map(o -> o.name).toArray(String[]::new));
+        final AuditLogOptions opts = new AuditLogOptions();
+
+        for (int i = 0; i < values.length; i++)
+        {
+            options[i].fromCompositeMapping.accept(opts, values[i]);
+        }
+
+        return opts;
+    }
+}
diff --git a/src/java/org/apache/cassandra/audit/BinAuditLogger.java b/src/java/org/apache/cassandra/audit/BinAuditLogger.java
index 95a53f1..607d9fe 100644
--- a/src/java/org/apache/cassandra/audit/BinAuditLogger.java
+++ b/src/java/org/apache/cassandra/audit/BinAuditLogger.java
@@ -17,17 +17,16 @@
  */
 package org.apache.cassandra.audit;
 
-import java.nio.file.Paths;
 import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Ints;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import net.openhft.chronicle.wire.WireOut;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.binlog.BinLog;
 import org.apache.cassandra.utils.concurrent.WeightedQueue;
@@ -41,11 +40,9 @@
 
     private volatile BinLog binLog;
 
-    public BinAuditLogger(Map<String, String> params)
+    public BinAuditLogger(AuditLogOptions auditLoggingOptions)
     {
-        AuditLogOptions auditLoggingOptions = DatabaseDescriptor.getAuditLoggingOptions();
-
-        this.binLog = new BinLog.Builder().path(Paths.get(auditLoggingOptions.audit_logs_dir))
+        this.binLog = new BinLog.Builder().path(File.getPath(auditLoggingOptions.audit_logs_dir))
                                           .rollCycle(auditLoggingOptions.roll_cycle)
                                           .blocking(auditLoggingOptions.block)
                                           .maxQueueWeight(auditLoggingOptions.max_queue_weight)
@@ -55,6 +52,11 @@
                                           .build(false);
     }
 
+    public BinAuditLogger(Map<String, String> params)
+    {
+        this(DatabaseDescriptor.getAuditLoggingOptions());
+    }
+
     /**
      * Stop the audit log leaving behind any generated files.
      */
diff --git a/src/java/org/apache/cassandra/auth/AllowAllAuthorizer.java b/src/java/org/apache/cassandra/auth/AllowAllAuthorizer.java
index 3b40979..a943db0 100644
--- a/src/java/org/apache/cassandra/auth/AllowAllAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/AllowAllAuthorizer.java
@@ -33,12 +33,12 @@
         return resource.applicablePermissions();
     }
 
-    public void grant(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource to)
+    public Set<Permission> grant(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource to)
     {
         throw new UnsupportedOperationException("GRANT operation is not supported by AllowAllAuthorizer");
     }
 
-    public void revoke(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource from)
+    public Set<Permission> revoke(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource from)
     {
         throw new UnsupportedOperationException("REVOKE operation is not supported by AllowAllAuthorizer");
     }
diff --git a/src/java/org/apache/cassandra/auth/AuthCache.java b/src/java/org/apache/cassandra/auth/AuthCache.java
index 6393da7..66a2a4f 100644
--- a/src/java/org/apache/cassandra/auth/AuthCache.java
+++ b/src/java/org/apache/cassandra/auth/AuthCache.java
@@ -18,43 +18,82 @@
 
 package org.apache.cassandra.auth;
 
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.BiPredicate;
 import java.util.function.BooleanSupplier;
+import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.IntConsumer;
 import java.util.function.IntSupplier;
+import java.util.function.Supplier;
 
+import com.google.common.util.concurrent.Uninterruptibles;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
+import com.github.benmanes.caffeine.cache.Policy;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.concurrent.Shutdownable;
+import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.MBeanWrapper;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
-public class AuthCache<K, V> implements AuthCacheMBean
+public class AuthCache<K, V> implements AuthCacheMBean, Shutdownable
 {
     private static final Logger logger = LoggerFactory.getLogger(AuthCache.class);
 
-    private static final String MBEAN_NAME_BASE = "org.apache.cassandra.auth:type=";
+    public static final String MBEAN_NAME_BASE = "org.apache.cassandra.auth:type=";
+
+    // We expect default values on cache retries and interval to be sufficient for everyone but have this escape hatch
+    // just in case.
+    static final String CACHE_LOAD_RETRIES_PROPERTY = "cassandra.auth_cache.warming.max_retries";
+    static final String CACHE_LOAD_RETRY_INTERVAL_PROPERTY = "cassandra.auth_cache.warming.retry_interval_ms";
+
+    private volatile ScheduledFuture cacheRefresher = null;
+
+    // Keep a handle on created instances so their executors can be terminated cleanly
+    private static final Set<Shutdownable> REGISTRY = new HashSet<>(4);
+
+    public static void shutdownAllAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownNowAndWait(timeout, unit, REGISTRY);
+    }
 
     /**
      * Underlying cache. LoadingCache will call underlying load function on {@link #get} if key is not present
      */
     protected volatile LoadingCache<K, V> cache;
-    private DebuggableThreadPoolExecutor cacheRefreshExecutor;
+    private ExecutorPlus cacheRefreshExecutor;
 
-    private String name;
-    private IntConsumer setValidityDelegate;
-    private IntSupplier getValidityDelegate;
-    private IntConsumer setUpdateIntervalDelegate;
-    private IntSupplier getUpdateIntervalDelegate;
-    private IntConsumer setMaxEntriesDelegate;
-    private IntSupplier getMaxEntriesDelegate;
-    private Function<K, V> loadFunction;
-    private BooleanSupplier enableCache;
+    private final String name;
+    private final IntConsumer setValidityDelegate;
+    private final IntSupplier getValidityDelegate;
+    private final IntConsumer setUpdateIntervalDelegate;
+    private final IntSupplier getUpdateIntervalDelegate;
+    private final IntConsumer setMaxEntriesDelegate;
+    private final IntSupplier getMaxEntriesDelegate;
+    private final Consumer<Boolean> setActiveUpdate;
+    private final BooleanSupplier getActiveUpdate;
+    private final Function<K, V> loadFunction;
+    private final Supplier<Map<K, V>> bulkLoadFunction;
+    private final BooleanSupplier enableCache;
+
+    // Determines whether the presence of a specific value should trigger the invalidation of
+    // the supplied key. Used by CredentialsCache & CacheRefresher to identify when the
+    // credentials for a role couldn't be loaded without throwing an exception or serving stale
+    // values until the natural expiry time.
+    private final BiPredicate<K, V> invalidateCondition;
 
     /**
      * @param name Used for MBean
@@ -64,6 +103,8 @@
      * @param getUpdateIntervalDelegate Getter for update interval
      * @param setMaxEntriesDelegate Used to set max # entries in cache. See {@link com.github.benmanes.caffeine.cache.Policy.Eviction#setMaximum(long)}
      * @param getMaxEntriesDelegate Getter for max entries.
+     * @param setActiveUpdate Method to process config to actively update the auth cache prior to configured cache expiration
+     * @param getActiveUpdate Getter for active update
      * @param loadFunction Function to load the cache. Called on {@link #get(Object)}
      * @param cacheEnabledDelegate Used to determine if cache is enabled.
      */
@@ -74,9 +115,56 @@
                         IntSupplier getUpdateIntervalDelegate,
                         IntConsumer setMaxEntriesDelegate,
                         IntSupplier getMaxEntriesDelegate,
+                        Consumer<Boolean> setActiveUpdate,
+                        BooleanSupplier getActiveUpdate,
                         Function<K, V> loadFunction,
+                        Supplier<Map<K, V>> bulkLoadFunction,
                         BooleanSupplier cacheEnabledDelegate)
     {
+        this(name,
+             setValidityDelegate,
+             getValidityDelegate,
+             setUpdateIntervalDelegate,
+             getUpdateIntervalDelegate,
+             setMaxEntriesDelegate,
+             getMaxEntriesDelegate,
+             setActiveUpdate,
+             getActiveUpdate,
+             loadFunction,
+             bulkLoadFunction,
+             cacheEnabledDelegate,
+             (k, v) -> false);
+    }
+
+    /**
+     * @param name Used for MBean
+     * @param setValidityDelegate Used to set cache validity period. See {@link Policy#expireAfterWrite()}
+     * @param getValidityDelegate Getter for validity period
+     * @param setUpdateIntervalDelegate Used to set cache update interval. See {@link Policy#refreshAfterWrite()}
+     * @param getUpdateIntervalDelegate Getter for update interval
+     * @param setMaxEntriesDelegate Used to set max # entries in cache. See {@link com.github.benmanes.caffeine.cache.Policy.Eviction#setMaximum(long)}
+     * @param getMaxEntriesDelegate Getter for max entries.
+     * @param setActiveUpdate Actively update the cache before expiry
+     * @param getActiveUpdate Getter for active update
+     * @param loadFunction Function to load the cache. Called on {@link #get(Object)}
+     * @param cacheEnabledDelegate Used to determine if cache is enabled.
+     * @param invalidationCondition Used during active updates to determine if a refreshed value indicates a missing
+     *                              entry in the underlying table. If satisfied, the key will be invalidated.
+     */
+    protected AuthCache(String name,
+                        IntConsumer setValidityDelegate,
+                        IntSupplier getValidityDelegate,
+                        IntConsumer setUpdateIntervalDelegate,
+                        IntSupplier getUpdateIntervalDelegate,
+                        IntConsumer setMaxEntriesDelegate,
+                        IntSupplier getMaxEntriesDelegate,
+                        Consumer<Boolean> setActiveUpdate,
+                        BooleanSupplier getActiveUpdate,
+                        Function<K, V> loadFunction,
+                        Supplier<Map<K, V>> bulkLoadFunction,
+                        BooleanSupplier cacheEnabledDelegate,
+                        BiPredicate<K, V> invalidationCondition)
+    {
         this.name = checkNotNull(name);
         this.setValidityDelegate = checkNotNull(setValidityDelegate);
         this.getValidityDelegate = checkNotNull(getValidityDelegate);
@@ -84,8 +172,12 @@
         this.getUpdateIntervalDelegate = checkNotNull(getUpdateIntervalDelegate);
         this.setMaxEntriesDelegate = checkNotNull(setMaxEntriesDelegate);
         this.getMaxEntriesDelegate = checkNotNull(getMaxEntriesDelegate);
+        this.setActiveUpdate = checkNotNull(setActiveUpdate);
+        this.getActiveUpdate = checkNotNull(getActiveUpdate);
         this.loadFunction = checkNotNull(loadFunction);
+        this.bulkLoadFunction = checkNotNull(bulkLoadFunction);
         this.enableCache = checkNotNull(cacheEnabledDelegate);
+        this.invalidateCondition = checkNotNull(invalidationCondition);
         init();
     }
 
@@ -94,9 +186,10 @@
      */
     protected void init()
     {
-        this.cacheRefreshExecutor = new DebuggableThreadPoolExecutor(name + "Refresh", Thread.NORM_PRIORITY);
+        this.cacheRefreshExecutor = executorFactory().sequential(name + "Refresh");
         cache = initCache(null);
         MBeanWrapper.instance.registerMBean(this, getObjectName());
+        REGISTRY.add(this);
     }
 
     protected void unregisterMBean()
@@ -110,6 +203,18 @@
     }
 
     /**
+     * Retrieve all cached entries. Will call {@link LoadingCache#asMap()} which does not trigger "load".
+     * @return a map of cached key-value pairs
+     */
+    public Map<K, V> getAll()
+    {
+        if (cache == null)
+            return Collections.emptyMap();
+
+        return Collections.unmodifiableMap(cache.asMap());
+    }
+
+    /**
      * Retrieve a value from the cache. Will call {@link LoadingCache#get(Object)} which will
      * "load" the value if it's not present, thus populating the key.
      * @param k
@@ -128,13 +233,13 @@
     /**
      * Invalidate the entire cache.
      */
-    public void invalidate()
+    public synchronized void invalidate()
     {
         cache = initCache(null);
     }
 
     /**
-     * Invalidate a key
+     * Invalidate a key.
      * @param k key to invalidate
      */
     public void invalidate(K k)
@@ -147,7 +252,7 @@
      * Time in milliseconds that a value in the cache will expire after.
      * @param validityPeriod in milliseconds
      */
-    public void setValidity(int validityPeriod)
+    public synchronized void setValidity(int validityPeriod)
     {
         if (Boolean.getBoolean("cassandra.disable_auth_caches_remote_configuration"))
             throw new UnsupportedOperationException("Remote configuration of auth caches is disabled");
@@ -165,7 +270,7 @@
      * Time in milliseconds after which an entry in the cache should be refreshed (it's load function called again)
      * @param updateInterval in milliseconds
      */
-    public void setUpdateInterval(int updateInterval)
+    public synchronized void setUpdateInterval(int updateInterval)
     {
         if (Boolean.getBoolean("cassandra.disable_auth_caches_remote_configuration"))
             throw new UnsupportedOperationException("Remote configuration of auth caches is disabled");
@@ -183,7 +288,7 @@
      * Set maximum number of entries in the cache.
      * @param maxEntries
      */
-    public void setMaxEntries(int maxEntries)
+    public synchronized void setMaxEntries(int maxEntries)
     {
         if (Boolean.getBoolean("cassandra.disable_auth_caches_remote_configuration"))
             throw new UnsupportedOperationException("Remote configuration of auth caches is disabled");
@@ -197,6 +302,25 @@
         return getMaxEntriesDelegate.getAsInt();
     }
 
+    public boolean getActiveUpdate()
+    {
+        return getActiveUpdate.getAsBoolean();
+    }
+
+    public synchronized void setActiveUpdate(boolean update)
+    {
+        if (Boolean.getBoolean("cassandra.disable_auth_caches_remote_configuration"))
+            throw new UnsupportedOperationException("Remote configuration of auth caches is disabled");
+
+        setActiveUpdate.accept(update);
+        cache = initCache(cache);
+    }
+
+    public long getEstimatedSize()
+    {
+        return cache == null ? 0L : cache.estimatedSize();
+    }
+
     /**
      * (Re-)initialise the underlying cache. Will update validity, max entries, and update interval if
      * any have changed. The underlying {@link LoadingCache} will be initiated based on the provided {@code loadFunction}.
@@ -212,25 +336,109 @@
         if (getValidity() <= 0)
             return null;
 
-        logger.info("(Re)initializing {} (validity period/update interval/max entries) ({}/{}/{})",
-                    name, getValidity(), getUpdateInterval(), getMaxEntries());
+        boolean activeUpdate = getActiveUpdate();
+        logger.info("(Re)initializing {} (validity period/update interval/max entries/active update) ({}/{}/{}/{})",
+                    name, getValidity(), getUpdateInterval(), getMaxEntries(), activeUpdate);
+        LoadingCache<K, V> updatedCache;
 
-        if (existing == null) {
-          return Caffeine.newBuilder()
-                         .refreshAfterWrite(getUpdateInterval(), TimeUnit.MILLISECONDS)
-                         .expireAfterWrite(getValidity(), TimeUnit.MILLISECONDS)
-                         .maximumSize(getMaxEntries())
-                         .executor(cacheRefreshExecutor)
-                         .build(loadFunction::apply);
+        if (existing == null)
+        {
+            updatedCache = Caffeine.newBuilder().refreshAfterWrite(activeUpdate ? getValidity() : getUpdateInterval(), TimeUnit.MILLISECONDS)
+                                   .expireAfterWrite(getValidity(), TimeUnit.MILLISECONDS)
+                                   .maximumSize(getMaxEntries())
+                                   .executor(cacheRefreshExecutor)
+                                   .build(loadFunction::apply);
+        }
+        else
+        {
+            updatedCache = cache;
+            // Always set as mandatory
+            cache.policy().refreshAfterWrite().ifPresent(policy ->
+                policy.setExpiresAfter(activeUpdate ? getValidity() : getUpdateInterval(), TimeUnit.MILLISECONDS));
+            cache.policy().expireAfterWrite().ifPresent(policy -> policy.setExpiresAfter(getValidity(), TimeUnit.MILLISECONDS));
+            cache.policy().eviction().ifPresent(policy -> policy.setMaximum(getMaxEntries()));
         }
 
-        // Always set as mandatory
-        cache.policy().refreshAfterWrite().ifPresent(policy ->
-            policy.setExpiresAfter(getUpdateInterval(), TimeUnit.MILLISECONDS));
-        cache.policy().expireAfterWrite().ifPresent(policy ->
-            policy.setExpiresAfter(getValidity(), TimeUnit.MILLISECONDS));
-        cache.policy().eviction().ifPresent(policy ->
-            policy.setMaximum(getMaxEntries()));
-        return cache;
+        if (cacheRefresher != null)
+        {
+            cacheRefresher.cancel(false); // permit the two refreshers to race until the old one dies, should be harmless.
+            cacheRefresher = null;
+        }
+
+        if (activeUpdate)
+        {
+            cacheRefresher = ScheduledExecutors.optionalTasks.scheduleAtFixedRate(CacheRefresher.create(name,
+                                                                                                        updatedCache,
+                                                                                                        invalidateCondition),
+                                                                                  getUpdateInterval(),
+                                                                                  getUpdateInterval(),
+                                                                                  TimeUnit.MILLISECONDS);
+        }
+        return updatedCache;
+    }
+
+    @Override
+    public boolean isTerminated()
+    {
+        return cacheRefreshExecutor.isTerminated();
+    }
+
+    @Override
+    public void shutdown()
+    {
+        cacheRefreshExecutor.shutdown();
+    }
+
+    @Override
+    public Object shutdownNow()
+    {
+        return cacheRefreshExecutor.shutdownNow();
+    }
+
+    @Override
+    public boolean awaitTermination(long timeout, TimeUnit units) throws InterruptedException
+    {
+        return cacheRefreshExecutor.awaitTermination(timeout, units);
+    }
+
+    public void warm()
+    {
+        if (cache == null)
+        {
+            logger.info("{} cache not enabled, skipping pre-warming", name);
+            return;
+        }
+
+        int retries = Integer.getInteger(CACHE_LOAD_RETRIES_PROPERTY, 10);
+        long retryInterval = Long.getLong(CACHE_LOAD_RETRY_INTERVAL_PROPERTY, 1000);
+
+        while (retries-- > 0)
+        {
+            try
+            {
+                Map<K, V> entries = bulkLoadFunction.get();
+                cache.putAll(entries);
+                break;
+            }
+            catch (Exception e)
+            {
+                Uninterruptibles.sleepUninterruptibly(retryInterval, TimeUnit.MILLISECONDS);
+            }
+        }
+    }
+
+    /*
+     * Implemented when we can provide an efficient way to bulk load all entries for a cache. This isn't a
+     * @FunctionalInterface due to the default impl, which is for IRoleManager, IAuthorizer, and INetworkAuthorizer.
+     * They all extend this interface so that implementations only need to provide an override if it's useful.
+     * IAuthenticator doesn't implement this interface because CredentialsCache is more tightly coupled to
+     * PasswordAuthenticator, which does expose a bulk loader.
+     */
+    public interface BulkLoader<K, V>
+    {
+        default Supplier<Map<K, V>> bulkLoader()
+        {
+            return Collections::emptyMap;
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/auth/AuthCacheMBean.java b/src/java/org/apache/cassandra/auth/AuthCacheMBean.java
index 43fb88e..e443434 100644
--- a/src/java/org/apache/cassandra/auth/AuthCacheMBean.java
+++ b/src/java/org/apache/cassandra/auth/AuthCacheMBean.java
@@ -33,4 +33,10 @@
     public void setMaxEntries(int maxEntries);
 
     public int getMaxEntries();
+
+    public boolean getActiveUpdate();
+
+    public void setActiveUpdate(boolean update);
+
+    public long getEstimatedSize();
 }
diff --git a/src/java/org/apache/cassandra/auth/AuthCacheService.java b/src/java/org/apache/cassandra/auth/AuthCacheService.java
new file mode 100644
index 0000000..f6ee02e
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/AuthCacheService.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@ThreadSafe
+public class AuthCacheService
+{
+    private static final Logger logger = LoggerFactory.getLogger(AuthCacheService.class);
+    public static final AuthCacheService instance = new AuthCacheService();
+
+    private final Set<AuthCache<?, ?>> caches = new HashSet<>();
+    private static final AtomicBoolean cachesRegistered = new AtomicBoolean(false);
+
+    public synchronized void register(AuthCache<?, ?> cache)
+    {
+        Preconditions.checkNotNull(cache);
+        caches.add(cache);
+    }
+
+    public synchronized void unregister(AuthCache<?, ?> cache)
+    {
+        Preconditions.checkNotNull(cache);
+        caches.remove(cache);
+    }
+
+    public synchronized void warmCaches()
+    {
+        logger.info("Initiating bulk load of {} auth cache(s)", caches.size());
+        for (AuthCache<?, ?> cache : caches)
+        {
+            cache.warm();
+        }
+    }
+
+    /**
+     * NOTE: Can only be called once per instance run.
+     *
+     * We have a couple of static initializer functions to create caches scattered across various classes, some solo
+     * and some with multiple member variables. As we expect these caches to be created and initialized in one logical
+     * block, we tie them together and use them here.
+     *
+     * Note: We also register the PasswordAuthenticator cache with the {@link AuthCacheService} in it's constructor
+     */
+    @VisibleForTesting
+    public static void initializeAndRegisterCaches()
+    {
+        if (!cachesRegistered.getAndSet(true))
+        {
+            AuthenticatedUser.init();
+            Roles.init();
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/auth/AuthConfig.java b/src/java/org/apache/cassandra/auth/AuthConfig.java
index cc38296..9c5fceb 100644
--- a/src/java/org/apache/cassandra/auth/AuthConfig.java
+++ b/src/java/org/apache/cassandra/auth/AuthConfig.java
@@ -56,11 +56,11 @@
         // work with PasswordAuthenticator, so log a message if some other authenticator
         // is in use and non-default values are detected
         if (!(authenticator instanceof PasswordAuthenticator)
-            && (conf.credentials_update_interval_in_ms != -1
-                || conf.credentials_validity_in_ms != 2000
+            && (conf.credentials_update_interval != null
+                || conf.credentials_validity.toMilliseconds() != 2000
                 || conf.credentials_cache_max_entries != 1000))
         {
-            logger.info("Configuration options credentials_update_interval_in_ms, credentials_validity_in_ms and " +
+            logger.info("Configuration options credentials_update_interval, credentials_validity and " +
                         "credentials_cache_max_entries may not be applicable for the configured authenticator ({})",
                         authenticator.getClass().getName());
         }
diff --git a/src/java/org/apache/cassandra/auth/AuthKeyspace.java b/src/java/org/apache/cassandra/auth/AuthKeyspace.java
index a57257c..67fc9c1 100644
--- a/src/java/org/apache/cassandra/auth/AuthKeyspace.java
+++ b/src/java/org/apache/cassandra/auth/AuthKeyspace.java
@@ -19,7 +19,9 @@
 
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.SchemaConstants;
@@ -35,6 +37,8 @@
     {
     }
 
+    private static final int DEFAULT_RF = CassandraRelevantProperties.SYSTEM_AUTH_DEFAULT_RF.getInt();
+
     /**
      * Generation is used as a timestamp for automatic table creation on startup.
      * If you make any changes to the tables below, make sure to increment the
@@ -109,7 +113,7 @@
     public static KeyspaceMetadata metadata()
     {
         return KeyspaceMetadata.create(SchemaConstants.AUTH_KEYSPACE_NAME,
-                                       KeyspaceParams.simple(1),
+                                       KeyspaceParams.simple(Math.max(DEFAULT_RF, DatabaseDescriptor.getDefaultKeyspaceRF())),
                                        Tables.of(Roles, RoleMembers, RolePermissions, ResourceRoleIndex, NetworkPermissions));
     }
 }
diff --git a/src/java/org/apache/cassandra/auth/AuthProperties.java b/src/java/org/apache/cassandra/auth/AuthProperties.java
new file mode 100644
index 0000000..036cbe2
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/AuthProperties.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import javax.management.ObjectName;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.utils.MBeanWrapper;
+
+public class AuthProperties implements AuthPropertiesMXBean
+{
+    public static AuthProperties instance = new AuthProperties(DatabaseDescriptor.getAuthWriteConsistencyLevel(),
+                                                               DatabaseDescriptor.getAuthReadConsistencyLevel(),
+                                                               true);
+
+    public AuthProperties(ConsistencyLevel writeConsistencyLevel, ConsistencyLevel readConsistencyLevel, boolean registerMBean)
+    {
+        setWriteConsistencyLevel(writeConsistencyLevel);
+        setReadConsistencyLevel(readConsistencyLevel);
+
+        if (registerMBean)
+        {
+            try
+            {
+                MBeanWrapper.instance.registerMBean(this, new ObjectName("org.apache.cassandra.auth:type=AuthProperties"));
+            }
+            catch (Exception e)
+            {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    public void setWriteConsistencyLevel(ConsistencyLevel cl)
+    {
+        DatabaseDescriptor.setAuthWriteConsistencyLevel(cl);
+    }
+
+    public ConsistencyLevel getWriteConsistencyLevel()
+    {
+        return DatabaseDescriptor.getAuthWriteConsistencyLevel();
+    }
+
+    public void setReadConsistencyLevel(ConsistencyLevel cl)
+    {
+        DatabaseDescriptor.setAuthReadConsistencyLevel(cl);
+    }
+
+    public ConsistencyLevel getReadConsistencyLevel()
+    {
+        return DatabaseDescriptor.getAuthReadConsistencyLevel();
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/auth/AuthPropertiesMXBean.java b/src/java/org/apache/cassandra/auth/AuthPropertiesMXBean.java
new file mode 100644
index 0000000..f52a380
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/AuthPropertiesMXBean.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+
+public interface AuthPropertiesMXBean
+{
+    void setReadConsistencyLevel(ConsistencyLevel cl);
+    ConsistencyLevel getReadConsistencyLevel();
+    void setWriteConsistencyLevel(ConsistencyLevel cl);
+    ConsistencyLevel getWriteConsistencyLevel();
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/auth/AuthSchemaChangeListener.java b/src/java/org/apache/cassandra/auth/AuthSchemaChangeListener.java
index 6c21d7b..88a2940 100644
--- a/src/java/org/apache/cassandra/auth/AuthSchemaChangeListener.java
+++ b/src/java/org/apache/cassandra/auth/AuthSchemaChangeListener.java
@@ -17,37 +17,43 @@
  */
 package org.apache.cassandra.auth;
 
-import java.util.List;
-
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.cql3.functions.UDAggregate;
+import org.apache.cassandra.cql3.functions.UDFunction;
+import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.SchemaChangeListener;
+import org.apache.cassandra.schema.TableMetadata;
 
 /**
  * SchemaChangeListener implementation that cleans up permissions on dropped resources.
  */
-public class AuthSchemaChangeListener extends SchemaChangeListener
+public class AuthSchemaChangeListener implements SchemaChangeListener
 {
-    public void onDropKeyspace(String ksName)
+    @Override
+    public void onDropKeyspace(KeyspaceMetadata keyspace, boolean dropData)
     {
-        DatabaseDescriptor.getAuthorizer().revokeAllOn(DataResource.keyspace(ksName));
-        DatabaseDescriptor.getAuthorizer().revokeAllOn(FunctionResource.keyspace(ksName));
+        DatabaseDescriptor.getAuthorizer().revokeAllOn(DataResource.keyspace(keyspace.name));
+        DatabaseDescriptor.getAuthorizer().revokeAllOn(DataResource.allTables(keyspace.name));
+        DatabaseDescriptor.getAuthorizer().revokeAllOn(FunctionResource.keyspace(keyspace.name));
     }
 
-    public void onDropTable(String ksName, String cfName)
+    @Override
+    public void onDropTable(TableMetadata table, boolean dropData)
     {
-        DatabaseDescriptor.getAuthorizer().revokeAllOn(DataResource.table(ksName, cfName));
+        DatabaseDescriptor.getAuthorizer().revokeAllOn(DataResource.table(table.keyspace, table.name));
     }
 
-    public void onDropFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+    @Override
+    public void onDropFunction(UDFunction function)
     {
         DatabaseDescriptor.getAuthorizer()
-                          .revokeAllOn(FunctionResource.function(ksName, functionName, argTypes));
+                          .revokeAllOn(FunctionResource.function(function.name().keyspace, function.name().name, function.argTypes()));
     }
 
-    public void onDropAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+    @Override
+    public void onDropAggregate(UDAggregate aggregate)
     {
         DatabaseDescriptor.getAuthorizer()
-                          .revokeAllOn(FunctionResource.function(ksName, aggregateName, argTypes));
+                          .revokeAllOn(FunctionResource.function(aggregate.name().keyspace, aggregate.name().name, aggregate.argTypes()));
     }
 }
diff --git a/src/java/org/apache/cassandra/auth/AuthenticatedUser.java b/src/java/org/apache/cassandra/auth/AuthenticatedUser.java
index 9f22bea..620a1d2 100644
--- a/src/java/org/apache/cassandra/auth/AuthenticatedUser.java
+++ b/src/java/org/apache/cassandra/auth/AuthenticatedUser.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.auth;
 
 import java.util.Set;
-
 import com.google.common.base.Objects;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -28,7 +27,7 @@
  * Returned from IAuthenticator#authenticate(), represents an authenticated user everywhere internally.
  *
  * Holds the name of the user and the roles that have been granted to the user. The roles will be cached
- * for roles_validity_in_ms.
+ * for roles_validity.
  */
 public class AuthenticatedUser
 {
@@ -39,11 +38,19 @@
     public static final AuthenticatedUser ANONYMOUS_USER = new AuthenticatedUser(ANONYMOUS_USERNAME);
 
     // User-level permissions cache.
-    private static final PermissionsCache permissionsCache = new PermissionsCache(DatabaseDescriptor.getAuthorizer());
-    private static final NetworkAuthCache networkAuthCache = new NetworkAuthCache(DatabaseDescriptor.getNetworkAuthorizer());
+    public static final PermissionsCache permissionsCache = new PermissionsCache(DatabaseDescriptor.getAuthorizer());
+    public static final NetworkPermissionsCache networkPermissionsCache = new NetworkPermissionsCache(DatabaseDescriptor.getNetworkAuthorizer());
+
+    /** Use {@link AuthCacheService#initializeAndRegisterCaches} rather than calling this directly */
+    public static void init()
+    {
+        AuthCacheService.instance.register(permissionsCache);
+        AuthCacheService.instance.register(networkPermissionsCache);
+    }
 
     private final String name;
-    // primary Role of the logged in user
+
+    // Primary Role of the logged in user
     private final RoleResource role;
 
     public AuthenticatedUser(String name)
@@ -136,7 +143,7 @@
      */
     public boolean hasLocalAccess()
     {
-        return networkAuthCache.get(this.getPrimaryRole()).canAccess(Datacenters.thisDatacenter());
+        return networkPermissionsCache.get(this.getPrimaryRole()).canAccess(Datacenters.thisDatacenter());
     }
 
     @Override
@@ -164,5 +171,4 @@
     {
         return Objects.hashCode(name);
     }
-
 }
diff --git a/src/java/org/apache/cassandra/auth/CacheRefresher.java b/src/java/org/apache/cassandra/auth/CacheRefresher.java
new file mode 100644
index 0000000..a199601
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/CacheRefresher.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import java.util.Set;
+import java.util.function.BiPredicate;
+import java.util.function.BooleanSupplier;
+
+import com.github.benmanes.caffeine.cache.LoadingCache;
+import com.google.common.annotations.VisibleForTesting;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.service.StorageService;
+
+public class CacheRefresher<K, V> implements Runnable
+{
+    private static final Logger logger = LoggerFactory.getLogger(CacheRefresher.class);
+
+    private final String name;
+    private final LoadingCache<K, V> cache;
+    private final BiPredicate<K, V> invalidationCondition;
+    private final BooleanSupplier skipCondition;
+
+    private CacheRefresher(String name, LoadingCache<K, V> cache,  BiPredicate<K, V> invalidationCondition, BooleanSupplier skipCondition)
+    {
+        this.name = name;
+        this.cache = cache;
+        this.invalidationCondition = invalidationCondition;
+        this.skipCondition = skipCondition;
+    }
+
+    public void run()
+    {
+        if (skipCondition.getAsBoolean())
+        {
+            logger.debug("Skipping {} cache refresh", name);
+            return;
+        }
+
+        try
+        {
+            logger.debug("Refreshing {} cache", name);
+            Set<K> ks = cache.asMap().keySet();
+            for (K key : ks)
+            {
+                cache.refresh(key);
+                V value = cache.getIfPresent(key);
+                if (invalidationCondition.test(key, value))
+                {
+                    logger.debug("Invalidating key");
+                    cache.invalidate(key);
+                }
+            }
+        }
+        catch (Exception e)
+        {
+            logger.error("Unexpected exception refreshing {} cache", name, e);
+        }
+    }
+
+    @VisibleForTesting
+    public static <K, V> CacheRefresher<K, V> create(String name,
+                                                     LoadingCache<K, V> cache,
+                                                     BiPredicate<K, V> invalidationCondition,
+                                                     BooleanSupplier skipCondition)
+    {
+        logger.info("Creating CacheRefresher for {}", name);
+        return new CacheRefresher<>(name, cache, invalidationCondition, skipCondition);
+    }
+
+    public static <K, V> CacheRefresher<K, V> create(String name, LoadingCache<K, V> cache, BiPredicate<K, V> invalidationCondition)
+    {
+        // By default we skip cache refreshes if the node has been decommed
+        return create(name, cache, invalidationCondition, StorageService.instance::isDecommissioned);
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
index 6397154..b3f85e8 100644
--- a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
@@ -18,27 +18,39 @@
 package org.apache.cassandra.auth;
 
 import java.util.*;
+import java.util.function.BiConsumer;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.HashBasedTable;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Table;
+import com.google.common.collect.Sets;
+
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.cql3.UntypedResultSet.Row;
 import org.apache.cassandra.cql3.statements.BatchStatement;
 import org.apache.cassandra.cql3.statements.ModificationStatement;
-import org.apache.cassandra.cql3.statements.SelectStatement;
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.cql3.statements.SelectStatement;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.Pair;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * CassandraAuthorizer is an IAuthorizer implementation that keeps
@@ -83,18 +95,37 @@
         }
     }
 
-    public void grant(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource grantee)
+    public Set<Permission> grant(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource grantee)
     throws RequestValidationException, RequestExecutionException
     {
-        modifyRolePermissions(permissions, resource, grantee, "+");
-        addLookupEntry(resource, grantee);
+        String roleName = escape(grantee.getRoleName());
+        String resourceName = escape(resource.getName());
+        Set<Permission> existingPermissions = getExistingPermissions(roleName, resourceName, permissions);
+        Set<Permission> nonExistingPermissions = Sets.difference(permissions, existingPermissions);
+
+        if (!nonExistingPermissions.isEmpty())
+        {
+            modifyRolePermissions(nonExistingPermissions, resource, grantee, "+");
+            addLookupEntry(resource, grantee);
+        }
+
+        return nonExistingPermissions;
     }
 
-    public void revoke(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource revokee)
+    public Set<Permission> revoke(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource revokee)
     throws RequestValidationException, RequestExecutionException
     {
-        modifyRolePermissions(permissions, resource, revokee, "-");
-        removeLookupEntry(resource, revokee);
+        String roleName = escape(revokee.getRoleName());
+        String resourceName = escape(resource.getName());
+        Set<Permission> existingPermissions = getExistingPermissions(roleName, resourceName, permissions);
+
+        if (!existingPermissions.isEmpty())
+        {
+            modifyRolePermissions(existingPermissions, resource, revokee, "-");
+            removeLookupEntry(resource, revokee);
+        }
+
+        return existingPermissions;
     }
 
     // Called when deleting a role with DROP ROLE query.
@@ -109,7 +140,8 @@
             UntypedResultSet rows = process(String.format("SELECT resource FROM %s.%s WHERE role = '%s'",
                                                           SchemaConstants.AUTH_KEYSPACE_NAME,
                                                           AuthKeyspace.ROLE_PERMISSIONS,
-                                                          escape(revokee.getRoleName())));
+                                                          escape(revokee.getRoleName())),
+                                            authReadConsistencyLevel());
 
             List<CQLStatement> statements = new ArrayList<>();
             for (UntypedResultSet.Row row : rows)
@@ -148,7 +180,8 @@
             UntypedResultSet rows = process(String.format("SELECT role FROM %s.%s WHERE resource = '%s'",
                                                           SchemaConstants.AUTH_KEYSPACE_NAME,
                                                           AuthKeyspace.RESOURCE_ROLE_INDEX,
-                                                          escape(droppedResource.getName())));
+                                                          escape(droppedResource.getName())),
+                                            authReadConsistencyLevel());
 
             List<CQLStatement> statements = new ArrayList<>();
             for (UntypedResultSet.Row row : rows)
@@ -175,6 +208,40 @@
         }
     }
 
+    /**
+     * Checks that the specified role has at least one of the expected permissions on the resource.
+     *
+     * @param roleName the role name
+     * @param resourceName the resource name
+     * @param expectedPermissions the permissions to check for
+     * @return The existing permissions
+     */
+    private Set<Permission> getExistingPermissions(String roleName,
+                                                   String resourceName,
+                                                   Set<Permission> expectedPermissions)
+    {
+        UntypedResultSet rs = process(String.format("SELECT permissions FROM %s.%s WHERE role = '%s' AND resource = '%s'",
+                                                    SchemaConstants.AUTH_KEYSPACE_NAME,
+                                                    AuthKeyspace.ROLE_PERMISSIONS,
+                                                    roleName,
+                                                    resourceName),
+                                      ConsistencyLevel.LOCAL_ONE);
+
+        if (rs.isEmpty())
+            return Collections.emptySet();
+
+        Row one = rs.one();
+
+        Set<Permission> existingPermissions = Sets.newHashSetWithExpectedSize(expectedPermissions.size());
+        for (String permissionName : one.getSet("permissions", UTF8Type.instance))
+        {
+            Permission permission = Permission.valueOf(permissionName);
+            if (expectedPermissions.contains(permission))
+                existingPermissions.add(permission);
+        }
+        return existingPermissions;
+    }
+
     private void executeLoggedBatch(List<CQLStatement> statements)
     throws RequestExecutionException, RequestValidationException
     {
@@ -189,7 +256,7 @@
     private void addPermissionsForRole(Set<Permission> permissions, IResource resource, RoleResource role)
     throws RequestExecutionException, RequestValidationException
     {
-        QueryOptions options = QueryOptions.forInternalCalls(ConsistencyLevel.LOCAL_ONE,
+        QueryOptions options = QueryOptions.forInternalCalls(authReadConsistencyLevel(),
                                                              Lists.newArrayList(ByteBufferUtil.bytes(role.getRoleName()),
                                                                                 ByteBufferUtil.bytes(resource.getName())));
 
@@ -216,7 +283,8 @@
                               op,
                               "'" + StringUtils.join(permissions, "','") + "'",
                               escape(role.getRoleName()),
-                              escape(resource.getName())));
+                              escape(resource.getName())),
+                authWriteConsistencyLevel());
     }
 
     // Removes an entry from the inverted index table (from resource -> role with defined permissions)
@@ -226,7 +294,8 @@
                               SchemaConstants.AUTH_KEYSPACE_NAME,
                               AuthKeyspace.RESOURCE_ROLE_INDEX,
                               escape(resource.getName()),
-                              escape(role.getRoleName())));
+                              escape(role.getRoleName())),
+                authWriteConsistencyLevel());
     }
 
     // Adds an entry to the inverted index table (from resource -> role with defined permissions)
@@ -236,7 +305,8 @@
                               SchemaConstants.AUTH_KEYSPACE_NAME,
                               AuthKeyspace.RESOURCE_ROLE_INDEX,
                               escape(resource.getName()),
-                              escape(role.getRoleName())));
+                              escape(role.getRoleName())),
+                authWriteConsistencyLevel());
     }
 
     // 'grantee' can be null - in that case everyone's permissions have been requested. Otherwise, only single user's.
@@ -274,7 +344,7 @@
     throws RequestExecutionException
     {
         Set<PermissionDetails> details = new HashSet<>();
-        for (UntypedResultSet.Row row : process(buildListQuery(resource, role)))
+        for (UntypedResultSet.Row row : process(buildListQuery(resource, role), authReadConsistencyLevel()))
         {
             if (row.has(PERMISSIONS))
             {
@@ -351,19 +421,114 @@
 
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(QueryState.forInternalCalls(), options, nanoTime());
     }
 
-    UntypedResultSet process(String query) throws RequestExecutionException
+    /**
+     * This is exposed so we can override the consistency level for tests that are single node
+     */
+    @VisibleForTesting
+    UntypedResultSet process(String query, ConsistencyLevel cl) throws RequestExecutionException
     {
-        return QueryProcessor.process(query, ConsistencyLevel.LOCAL_ONE);
+        return QueryProcessor.process(query, cl);
     }
 
     void processBatch(BatchStatement statement)
     {
+        QueryOptions options = QueryOptions.forInternalCalls(authWriteConsistencyLevel(), Collections.emptyList());
         QueryProcessor.instance.processBatch(statement,
                                              QueryState.forInternalCalls(),
-                                             BatchQueryOptions.withoutPerStatementVariables(QueryOptions.DEFAULT),
-                                             System.nanoTime());
+                                             BatchQueryOptions.withoutPerStatementVariables(options),
+                                             nanoTime());
+    }
+
+    public static ConsistencyLevel authWriteConsistencyLevel()
+    {
+        return AuthProperties.instance.getWriteConsistencyLevel();
+    }
+
+    public static ConsistencyLevel authReadConsistencyLevel()
+    {
+        return AuthProperties.instance.getReadConsistencyLevel();
+    }
+
+    /**
+     * Get an initial set of permissions to load into the PermissionsCache at startup
+     * @return map of User/Resource -> Permissions for cache initialisation
+     */
+    public Supplier<Map<Pair<AuthenticatedUser, IResource>, Set<Permission>>> bulkLoader()
+    {
+        return () ->
+        {
+            Map<Pair<AuthenticatedUser, IResource>, Set<Permission>> entries = new HashMap<>();
+            String cqlTemplate = "SELECT %s, %s, %s FROM %s.%s";
+
+            logger.info("Warming permissions cache from role_permissions table");
+            UntypedResultSet results = process(String.format(cqlTemplate,
+                                                             ROLE, RESOURCE, PERMISSIONS,
+                                                             SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS),
+                                                             AuthProperties.instance.getReadConsistencyLevel());
+
+            // role_name -> (resource, permissions)
+            Table<String, IResource, Set<Permission>> individualRolePermissions = HashBasedTable.create();
+            results.forEach(row -> {
+                if (row.has(PERMISSIONS))
+                {
+                    individualRolePermissions.put(row.getString(ROLE),
+                                                  Resources.fromName(row.getString(RESOURCE)),
+                                                  permissions(row.getSet(PERMISSIONS, UTF8Type.instance)));
+                }
+            });
+
+            // Iterate all user level roles in the system and accumulate the permissions of their granted roles
+            Roles.getAllRoles().forEach(roleResource -> {
+                // If the role has login priv, accumulate the permissions of all its granted roles
+                if (Roles.canLogin(roleResource))
+                {
+                    // Structure to accumulate the resource -> permission mappings for the closure of granted roles
+                    Map<IResource, ImmutableSet.Builder<Permission>> userPermissions = new HashMap<>();
+                    BiConsumer<IResource, Set<Permission>> accumulator = accumulator(userPermissions);
+
+                    // For each role granted to this primary, lookup the specific resource/permissions grants
+                    // we read in the first step. We'll accumlate those in the userPermissions map, which we'll turn
+                    // into cache entries when we're done.
+                    // Note: we need to provide a default empty set of permissions for roles without any explicitly
+                    // granted to them (e.g. superusers or roles with no direct perms).
+                    Roles.getRoleDetails(roleResource).forEach(grantedRole ->
+                                                               individualRolePermissions.rowMap()
+                                                                                        .getOrDefault(grantedRole.resource.getRoleName(), Collections.emptyMap())
+                                                                                        .forEach(accumulator));
+
+                    // Having iterated all the roles granted to this user, finalize the transitive permissions
+                    // (i.e. turn them into entries for the PermissionsCache)
+                    userPermissions.forEach((resource, builder) -> entries.put(cacheKey(roleResource, resource),
+                                                                               builder.build()));
+                }
+            });
+
+            return entries;
+        };
+    }
+
+    // Helper function to group the transitive set of permissions granted
+    // to user by the specific resources to which they apply
+    private static BiConsumer<IResource, Set<Permission>> accumulator(Map<IResource, ImmutableSet.Builder<Permission>> accumulator)
+    {
+        return (resource, permissions) -> accumulator.computeIfAbsent(resource, k -> new ImmutableSet.Builder<>()).addAll(permissions);
+    }
+
+    private static Set<Permission> permissions(Set<String> permissionNames)
+    {
+        return permissionNames.stream().map(Permission::valueOf).collect(Collectors.toSet());
+    }
+
+    private static Pair<AuthenticatedUser, IResource> cacheKey(RoleResource role, IResource resource)
+    {
+        return cacheKey(role.getRoleName(), resource);
+    }
+
+    private static Pair<AuthenticatedUser, IResource> cacheKey(String roleName, IResource resource)
+    {
+        return Pair.create(new AuthenticatedUser(roleName), resource);
     }
 }
diff --git a/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java b/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java
index 6fdcd69..cc08e93 100644
--- a/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java
@@ -18,27 +18,36 @@
 
 package org.apache.cassandra.auth;
 
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Set;
+import java.util.function.Supplier;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.cql3.statements.SelectStatement;
-import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.service.ClientState;
-import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CassandraNetworkAuthorizer implements INetworkAuthorizer
 {
+    private static final Logger logger = LoggerFactory.getLogger(CassandraNetworkAuthorizer.class);
     private SelectStatement authorizeUserStatement = null;
 
     public void setup()
@@ -52,18 +61,21 @@
     @VisibleForTesting
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(forInternalCalls(), options, nanoTime());
     }
 
+    /**
+     * This is exposed so we can override the consistency level for tests that are single node
+     */
     @VisibleForTesting
-    void process(String query)
+    UntypedResultSet process(String query, ConsistencyLevel cl) throws RequestExecutionException
     {
-        QueryProcessor.process(query, ConsistencyLevel.LOCAL_ONE);
+        return QueryProcessor.process(query, cl);
     }
 
     private Set<String> getAuthorizedDcs(String name)
     {
-        QueryOptions options = QueryOptions.forInternalCalls(ConsistencyLevel.LOCAL_ONE,
+        QueryOptions options = QueryOptions.forInternalCalls(CassandraAuthorizer.authReadConsistencyLevel(),
                                                              Lists.newArrayList(ByteBufferUtil.bytes(name)));
 
         ResultMessage.Rows rows = select(authorizeUserStatement, options);
@@ -137,7 +149,7 @@
                                      getSetString(permissions),
                                      role.getName());
 
-        process(query);
+        process(query, CassandraAuthorizer.authWriteConsistencyLevel());
     }
 
     public void drop(RoleResource role)
@@ -147,11 +159,36 @@
                                      AuthKeyspace.NETWORK_PERMISSIONS,
                                      role.getName());
 
-        process(query);
+        process(query, CassandraAuthorizer.authWriteConsistencyLevel());
     }
 
     public void validateConfiguration() throws ConfigurationException
     {
         // noop
     }
+
+    @Override
+    public Supplier<Map<RoleResource, DCPermissions>> bulkLoader()
+    {
+        return () -> {
+            logger.info("Pre-warming datacenter permissions cache from network_permissions table");
+            Map<RoleResource, DCPermissions> entries = new HashMap<>();
+            UntypedResultSet rows = process(String.format("SELECT role, dcs FROM %s.%s",
+                                                          SchemaConstants.AUTH_KEYSPACE_NAME,
+                                                          AuthKeyspace.NETWORK_PERMISSIONS),
+                                            CassandraAuthorizer.authReadConsistencyLevel());
+
+            for (UntypedResultSet.Row row : rows)
+            {
+                RoleResource role = RoleResource.role(row.getString("role"));
+                DCPermissions.Builder builder = new DCPermissions.Builder();
+                Set<String> dcs = row.getFrozenSet("dcs", UTF8Type.instance);
+                for (String dc : dcs)
+                    builder.add(dc);
+                entries.put(role, builder.build());
+            }
+
+            return entries;
+        };
+    }
 }
diff --git a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
index 7aed51a..733e9da 100644
--- a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
@@ -22,6 +22,7 @@
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.function.Predicate;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
@@ -42,12 +43,14 @@
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.service.ClientState;
-import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.mindrot.jbcrypt.BCrypt;
 
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Responsible for the creation, maintenance and deletion of roles
  * for the purposes of authentication and authorization.
@@ -77,9 +80,16 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(CassandraRoleManager.class);
 
-    static final String DEFAULT_SUPERUSER_NAME = "cassandra";
+    public static final String DEFAULT_SUPERUSER_NAME = "cassandra";
     static final String DEFAULT_SUPERUSER_PASSWORD = "cassandra";
 
+    /**
+     * We need to treat the default superuser as a special case since during initial node startup, we may end up with
+     * duplicate creation or deletion + re-creation of this user on different nodes unless we check at quorum to see if
+     * it's already been done.
+     */
+    static final ConsistencyLevel DEFAULT_SUPERUSER_CONSISTENCY_LEVEL = ConsistencyLevel.QUORUM;
+
     // Transform a row in the AuthKeyspace.ROLES to a Role instance
     private static final Function<UntypedResultSet.Row, Role> ROW_TO_ROLE = row ->
     {
@@ -122,30 +132,33 @@
     private final Set<Option> supportedOptions;
     private final Set<Option> alterableOptions;
 
-    // Will be set to true when all nodes in the cluster are on a version which supports roles (i.e. 2.2+)
-    private volatile boolean isClusterReady = false;
-
     public CassandraRoleManager()
     {
-        supportedOptions = DatabaseDescriptor.getAuthenticator().getClass() == PasswordAuthenticator.class
-                         ? ImmutableSet.of(Option.LOGIN, Option.SUPERUSER, Option.PASSWORD)
+        supportedOptions = DatabaseDescriptor.getAuthenticator() instanceof PasswordAuthenticator
+                         ? ImmutableSet.of(Option.LOGIN, Option.SUPERUSER, Option.PASSWORD, Option.HASHED_PASSWORD)
                          : ImmutableSet.of(Option.LOGIN, Option.SUPERUSER);
-        alterableOptions = DatabaseDescriptor.getAuthenticator().getClass().equals(PasswordAuthenticator.class)
-                         ? ImmutableSet.of(Option.PASSWORD)
+        alterableOptions = DatabaseDescriptor.getAuthenticator() instanceof PasswordAuthenticator
+                         ? ImmutableSet.of(Option.PASSWORD, Option.HASHED_PASSWORD)
                          : ImmutableSet.<Option>of();
     }
 
+    @Override
     public void setup()
     {
-        loadRoleStatement = (SelectStatement) prepare("SELECT * from %s.%s WHERE role = ?",
-                                                      SchemaConstants.AUTH_KEYSPACE_NAME,
-                                                      AuthKeyspace.ROLES);
+        loadRoleStatement();
         scheduleSetupTask(() -> {
             setupDefaultRole();
             return null;
         });
     }
 
+    protected final void loadRoleStatement()
+    {
+        loadRoleStatement = (SelectStatement) prepare("SELECT * from %s.%s WHERE role = ?",
+                                                      SchemaConstants.AUTH_KEYSPACE_NAME,
+                                                      AuthKeyspace.ROLES);
+    }
+
     public Set<Option> supportedOptions()
     {
         return supportedOptions;
@@ -159,21 +172,21 @@
     public void createRole(AuthenticatedUser performer, RoleResource role, RoleOptions options)
     throws RequestValidationException, RequestExecutionException
     {
-        String insertCql = options.getPassword().isPresent()
+        String insertCql = options.getPassword().isPresent() || options.getHashedPassword().isPresent()
                          ? String.format("INSERT INTO %s.%s (role, is_superuser, can_login, salted_hash) VALUES ('%s', %s, %s, '%s')",
                                          SchemaConstants.AUTH_KEYSPACE_NAME,
                                          AuthKeyspace.ROLES,
                                          escape(role.getRoleName()),
-                                         options.getSuperuser().or(false),
-                                         options.getLogin().or(false),
-                                         escape(hashpw(options.getPassword().get())))
+                                         options.getSuperuser().orElse(false),
+                                         options.getLogin().orElse(false),
+                                         options.getHashedPassword().orElseGet(() -> escape(hashpw(options.getPassword().get()))))
                          : String.format("INSERT INTO %s.%s (role, is_superuser, can_login) VALUES ('%s', %s, %s)",
                                          SchemaConstants.AUTH_KEYSPACE_NAME,
                                          AuthKeyspace.ROLES,
                                          escape(role.getRoleName()),
-                                         options.getSuperuser().or(false),
-                                         options.getLogin().or(false));
-        process(insertCql, consistencyForRole(role.getRoleName()));
+                                         options.getSuperuser().orElse(false),
+                                         options.getLogin().orElse(false));
+        process(insertCql, consistencyForRoleWrite(role.getRoleName()));
     }
 
     public void dropRole(AuthenticatedUser performer, RoleResource role) throws RequestValidationException, RequestExecutionException
@@ -182,7 +195,7 @@
                               SchemaConstants.AUTH_KEYSPACE_NAME,
                               AuthKeyspace.ROLES,
                               escape(role.getRoleName())),
-                consistencyForRole(role.getRoleName()));
+                consistencyForRoleWrite(role.getRoleName()));
         removeAllMembers(role.getRoleName());
     }
 
@@ -198,7 +211,7 @@
                                   AuthKeyspace.ROLES,
                                   assignments,
                                   escape(role.getRoleName())),
-                    consistencyForRole(role.getRoleName()));
+                    consistencyForRoleWrite(role.getRoleName()));
         }
     }
 
@@ -220,7 +233,7 @@
                               AuthKeyspace.ROLE_MEMBERS,
                               escape(role.getRoleName()),
                               escape(grantee.getRoleName())),
-                consistencyForRole(role.getRoleName()));
+                consistencyForRoleWrite(role.getRoleName()));
     }
 
     public void revokeRole(AuthenticatedUser performer, RoleResource role, RoleResource revokee)
@@ -237,7 +250,7 @@
                               AuthKeyspace.ROLE_MEMBERS,
                               escape(role.getRoleName()),
                               escape(revokee.getRoleName())),
-                consistencyForRole(role.getRoleName()));
+                consistencyForRoleWrite(role.getRoleName()));
     }
 
     public Set<RoleResource> getRoles(RoleResource grantee, boolean includeInherited)
@@ -245,7 +258,8 @@
     {
         return collectRoles(getRole(grantee.getRoleName()),
                             includeInherited,
-                            filter())
+                            filter(),
+                            this::getRole)
                .map(r -> r.resource)
                .collect(Collectors.toSet());
     }
@@ -254,10 +268,16 @@
     {
         return collectRoles(getRole(grantee.getRoleName()),
                             true,
-                            filter())
+                            filter(),
+                            this::getRole)
                .collect(Collectors.toSet());
     }
 
+    /**
+     * We hard-code this query to Quorum regardless of the role or auth credentials of the queryer given the nature of
+     * this query: we expect to know *all* roles across the entire cluster when we query this, not just local quorum or
+     * on a single node.
+     */
     public Set<RoleResource> getAllRoles() throws RequestValidationException, RequestExecutionException
     {
         ImmutableSet.Builder<RoleResource> builder = ImmutableSet.builder();
@@ -329,13 +349,8 @@
         {
             if (!hasExistingRoles())
             {
-                QueryProcessor.process(String.format("INSERT INTO %s.%s (role, is_superuser, can_login, salted_hash) " +
-                                                     "VALUES ('%s', true, true, '%s') USING TIMESTAMP 0",
-                                                     SchemaConstants.AUTH_KEYSPACE_NAME,
-                                                     AuthKeyspace.ROLES,
-                                                     DEFAULT_SUPERUSER_NAME,
-                                                     escape(hashpw(DEFAULT_SUPERUSER_PASSWORD))),
-                                       consistencyForRole(DEFAULT_SUPERUSER_NAME));
+                QueryProcessor.process(createDefaultRoleQuery(),
+                                       consistencyForRoleWrite(DEFAULT_SUPERUSER_NAME));
                 logger.info("Created default superuser role '{}'", DEFAULT_SUPERUSER_NAME);
             }
         }
@@ -347,6 +362,16 @@
     }
 
     @VisibleForTesting
+    public static String createDefaultRoleQuery()
+    {
+        return String.format("INSERT INTO %s.%s (role, is_superuser, can_login, salted_hash) VALUES ('%s', true, true, '%s') USING TIMESTAMP 0",
+                             SchemaConstants.AUTH_KEYSPACE_NAME,
+                             AuthKeyspace.ROLES,
+                             DEFAULT_SUPERUSER_NAME,
+                             escape(hashpw(DEFAULT_SUPERUSER_PASSWORD)));
+    }
+
+    @VisibleForTesting
     public static boolean hasExistingRoles() throws RequestExecutionException
     {
         // Try looking up the 'cassandra' default role first, to avoid the range query if possible.
@@ -360,8 +385,7 @@
     protected void scheduleSetupTask(final Callable<Void> setupTask)
     {
         // The delay is to give the node a chance to see its peers before attempting the operation
-        ScheduledExecutors.optionalTasks.schedule(() -> {
-            isClusterReady = true;
+        ScheduledExecutors.optionalTasks.scheduleSelfRecurring(() -> {
             try
             {
                 setupTask.call();
@@ -386,19 +410,22 @@
         }
     }
 
-    private Stream<Role> collectRoles(Role role, boolean includeInherited, Predicate<String> distinctFilter)
+    // Providing a function to fetch the details of granted roles allows us to read from the underlying tables during
+    // normal usage and fetch from a prepopulated in memory structure when building an initial set of roles to warm
+    // the RolesCache at startup
+    private Stream<Role> collectRoles(Role role, boolean includeInherited, Predicate<String> distinctFilter, Function<String, Role> loaderFunction)
     {
         if (Roles.isNullRole(role))
             return Stream.empty();
 
         if (!includeInherited)
-            return Stream.concat(Stream.of(role), role.memberOf.stream().map(this::getRole));
+            return Stream.concat(Stream.of(role), role.memberOf.stream().map(loaderFunction));
 
 
         return Stream.concat(Stream.of(role),
                              role.memberOf.stream()
                                           .filter(distinctFilter)
-                                          .flatMap(r -> collectRoles(getRole(r), true, distinctFilter)));
+                                          .flatMap(r -> collectRoles(loaderFunction.apply(r), true, distinctFilter, loaderFunction)));
     }
 
     // Used as a stateful filtering function when recursively collecting granted roles
@@ -415,7 +442,7 @@
      */
     private Role getRole(String name)
     {
-        QueryOptions options = QueryOptions.forInternalCalls(consistencyForRole(name),
+        QueryOptions options = QueryOptions.forInternalCalls(consistencyForRoleRead(name),
                                                              Collections.singletonList(ByteBufferUtil.bytes(name)));
         ResultMessage.Rows rows = select(loadRoleStatement, options);
         if (rows.result.isEmpty())
@@ -437,7 +464,7 @@
                               op,
                               escape(role),
                               escape(grantee)),
-                consistencyForRole(grantee));
+                consistencyForRoleWrite(grantee));
     }
 
     /*
@@ -450,7 +477,7 @@
                                                       SchemaConstants.AUTH_KEYSPACE_NAME,
                                                       AuthKeyspace.ROLE_MEMBERS,
                                                       escape(role)),
-                                        consistencyForRole(role));
+                                        consistencyForRoleRead(role));
         if (rows.isEmpty())
             return;
 
@@ -463,7 +490,7 @@
                               SchemaConstants.AUTH_KEYSPACE_NAME,
                               AuthKeyspace.ROLE_MEMBERS,
                               escape(role)),
-                consistencyForRole(role));
+                consistencyForRoleWrite(role));
     }
 
     /*
@@ -484,6 +511,8 @@
                                        return String.format("is_superuser = %s", entry.getValue());
                                    case PASSWORD:
                                        return String.format("salted_hash = '%s'", escape(hashpw((String) entry.getValue())));
+                                   case HASHED_PASSWORD:
+                                       return String.format("salted_hash = '%s'", (String) entry.getValue());
                                    default:
                                        return null;
                                }
@@ -492,14 +521,6 @@
                       .collect(Collectors.joining(","));
     }
 
-    protected static ConsistencyLevel consistencyForRole(String role)
-    {
-        if (role.equals(DEFAULT_SUPERUSER_NAME))
-            return ConsistencyLevel.QUORUM;
-        else
-            return ConsistencyLevel.LOCAL_ONE;
-    }
-
     private static String hashpw(String password)
     {
         return BCrypt.hashpw(password, BCrypt.gensalt(GENSALT_LOG2_ROUNDS));
@@ -510,6 +531,21 @@
         return StringUtils.replace(name, "'", "''");
     }
 
+    /** Allows selective overriding of the consistency level for specific roles. */
+    protected static ConsistencyLevel consistencyForRoleWrite(String role)
+    {
+        return role.equals(DEFAULT_SUPERUSER_NAME) ?
+               DEFAULT_SUPERUSER_CONSISTENCY_LEVEL :
+               CassandraAuthorizer.authWriteConsistencyLevel();
+    }
+
+    protected static ConsistencyLevel consistencyForRoleRead(String role)
+    {
+        return role.equals(DEFAULT_SUPERUSER_NAME) ?
+               DEFAULT_SUPERUSER_CONSISTENCY_LEVEL :
+               CassandraAuthorizer.authReadConsistencyLevel();
+    }
+
     /**
      * Executes the provided query.
      * This shouldn't be used during setup as this will directly return an error if the manager is not setup yet. Setup tasks
@@ -519,18 +555,35 @@
     UntypedResultSet process(String query, ConsistencyLevel consistencyLevel)
     throws RequestValidationException, RequestExecutionException
     {
-        if (!isClusterReady)
-            throw new InvalidRequestException("Cannot process role related query as the role manager isn't yet setup. "
-                                            + "This is likely because some of nodes in the cluster are on version 2.1 or earlier. "
-                                            + "You need to upgrade all nodes to Cassandra 2.2 or more to use roles.");
-
         return QueryProcessor.process(query, consistencyLevel);
     }
 
     @VisibleForTesting
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(forInternalCalls(), options, nanoTime());
     }
 
+    @Override
+    public Supplier<Map<RoleResource, Set<Role>>> bulkLoader()
+    {
+        return () ->
+        {
+            Map<RoleResource, Set<Role>> entries = new HashMap<>();
+
+            logger.info("Warming roles cache from roles table");
+            UntypedResultSet results = process("SELECT * FROM system_auth.roles", CassandraAuthorizer.authReadConsistencyLevel());
+
+            // Create flat temporary lookup of name -> role mappings
+            Map<String, Role> roles = new HashMap<>();
+            results.forEach(row -> roles.put(row.getString("role"), ROW_TO_ROLE.apply(row)));
+
+            // Iterate the flat structure and populate the fully hierarchical one
+            roles.forEach((key, value) ->
+                          entries.put(RoleResource.role(key),
+                                      collectRoles(value, true, filter(), roles::get).collect(Collectors.toSet()))
+            );
+            return entries;
+        };
+    }
 }
diff --git a/src/java/org/apache/cassandra/auth/DataResource.java b/src/java/org/apache/cassandra/auth/DataResource.java
index c3f5b32..2421930 100644
--- a/src/java/org/apache/cassandra/auth/DataResource.java
+++ b/src/java/org/apache/cassandra/auth/DataResource.java
@@ -31,13 +31,14 @@
  * Used to represent a table or a keyspace or the root level "data" resource.
  * "data"                                 - the root level data resource.
  * "data/keyspace_name"                   - keyspace-level data resource.
+ * "data/keyspace_name/*"                 - all tables-level data resource.
  * "data/keyspace_name/table_name"        - table-level data resource.
  */
 public class DataResource implements IResource
 {
     enum Level
     {
-        ROOT, KEYSPACE, TABLE
+        ROOT, KEYSPACE, ALL_TABLES, TABLE
     }
 
     // permissions which may be granted on tables
@@ -46,6 +47,15 @@
                                                                                          Permission.SELECT,
                                                                                          Permission.MODIFY,
                                                                                          Permission.AUTHORIZE);
+
+    // permissions which may be granted on all tables of a given keyspace
+    private static final Set<Permission> ALL_TABLES_LEVEL_PERMISSIONS = Sets.immutableEnumSet(Permission.CREATE,
+                                                                                              Permission.ALTER,
+                                                                                              Permission.DROP,
+                                                                                              Permission.SELECT,
+                                                                                              Permission.MODIFY,
+                                                                                              Permission.AUTHORIZE);
+
     // permissions which may be granted on one or all keyspaces
     private static final Set<Permission> KEYSPACE_LEVEL_PERMISSIONS = Sets.immutableEnumSet(Permission.CREATE,
                                                                                             Permission.ALTER,
@@ -92,6 +102,17 @@
     }
 
     /**
+     * Creates a DataResource representing all tables of a keyspace.
+     *
+     * @param keyspace Name of the keyspace.
+     * @return DataResource instance representing the keyspace.
+     */
+    public static DataResource allTables(String keyspace)
+    {
+        return new DataResource(Level.ALL_TABLES, keyspace, null);
+    }
+
+    /**
      * Creates a DataResource instance representing a table.
      *
      * @param keyspace Name of the keyspace.
@@ -122,6 +143,9 @@
         if (parts.length == 2)
             return keyspace(parts[1]);
 
+        if ("*".equals(parts[2]))
+            return allTables(parts[1]);
+
         return table(parts[1], parts[2]);
     }
 
@@ -136,6 +160,8 @@
                 return ROOT_NAME;
             case KEYSPACE:
                 return String.format("%s/%s", ROOT_NAME, keyspace);
+            case ALL_TABLES:
+                return String.format("%s/%s/*", ROOT_NAME, keyspace);
             case TABLE:
                 return String.format("%s/%s/%s", ROOT_NAME, keyspace, table);
         }
@@ -151,8 +177,10 @@
         {
             case KEYSPACE:
                 return root();
-            case TABLE:
+            case ALL_TABLES:
                 return keyspace(keyspace);
+            case TABLE:
+                return allTables(keyspace);
         }
         throw new IllegalStateException("Root-level resource can't have a parent");
     }
@@ -167,6 +195,11 @@
         return level == Level.KEYSPACE;
     }
 
+    public boolean isAllTablesLevel()
+    {
+        return level == Level.ALL_TABLES;
+    }
+
     public boolean isTableLevel()
     {
         return level == Level.TABLE;
@@ -209,6 +242,7 @@
             case ROOT:
                 return true;
             case KEYSPACE:
+            case ALL_TABLES:
                 return Schema.instance.getKeyspaces().contains(keyspace);
             case TABLE:
                 return Schema.instance.getTableMetadata(keyspace, table) != null;
@@ -223,6 +257,8 @@
             case ROOT:
             case KEYSPACE:
                 return KEYSPACE_LEVEL_PERMISSIONS;
+            case ALL_TABLES:
+                return ALL_TABLES_LEVEL_PERMISSIONS;
             case TABLE:
                 return TABLE_LEVEL_PERMISSIONS;
         }
@@ -238,6 +274,8 @@
                 return "<all keyspaces>";
             case KEYSPACE:
                 return String.format("<keyspace %s>", keyspace);
+            case ALL_TABLES:
+                return String.format("<all tables in %s>", keyspace);
             case TABLE:
                 return String.format("<table %s.%s>", keyspace, table);
         }
diff --git a/src/java/org/apache/cassandra/auth/FunctionResource.java b/src/java/org/apache/cassandra/auth/FunctionResource.java
index ef7d99a..a67ef1a 100644
--- a/src/java/org/apache/cassandra/auth/FunctionResource.java
+++ b/src/java/org/apache/cassandra/auth/FunctionResource.java
@@ -158,7 +158,7 @@
                                               "explictly qualified by a keyspace");
         List<AbstractType<?>> abstractTypes = new ArrayList<>(argTypes.size());
         for (CQL3Type.Raw cqlType : argTypes)
-            abstractTypes.add(cqlType.prepare(keyspace).getType());
+            abstractTypes.add(cqlType.prepare(keyspace).getType().udfType());
 
         return new FunctionResource(keyspace, name, abstractTypes);
     }
diff --git a/src/java/org/apache/cassandra/auth/IAuthorizer.java b/src/java/org/apache/cassandra/auth/IAuthorizer.java
index a023e3e..b39e315 100644
--- a/src/java/org/apache/cassandra/auth/IAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/IAuthorizer.java
@@ -22,11 +22,12 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.exceptions.RequestValidationException;
+import org.apache.cassandra.utils.Pair;
 
 /**
  * Primary Cassandra authorization interface.
  */
-public interface IAuthorizer
+public interface IAuthorizer extends AuthCache.BulkLoader<Pair<AuthenticatedUser, IResource>, Set<Permission>>
 {
     /**
      * Whether or not the authorizer will attempt authorization.
@@ -61,12 +62,14 @@
      * @param permissions Set of permissions to grant.
      * @param resource Resource on which to grant the permissions.
      * @param grantee Role to which the permissions are to be granted.
+     * @return the permissions that have been successfully granted, comprised by the requested permissions excluding
+     * those permissions that were already granted.
      *
      * @throws RequestValidationException
      * @throws RequestExecutionException
      * @throws java.lang.UnsupportedOperationException
      */
-    void grant(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource grantee)
+    Set<Permission> grant(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource grantee)
     throws RequestValidationException, RequestExecutionException;
 
     /**
@@ -79,12 +82,14 @@
      * @param permissions Set of permissions to revoke.
      * @param revokee Role from which to the permissions are to be revoked.
      * @param resource Resource on which to revoke the permissions.
+     * @return the permissions that have been successfully revoked, comprised by the requested permissions excluding
+     * those permissions that were already not granted.
      *
      * @throws RequestValidationException
      * @throws RequestExecutionException
      * @throws java.lang.UnsupportedOperationException
      */
-    void revoke(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource revokee)
+    Set<Permission> revoke(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource revokee)
     throws RequestValidationException, RequestExecutionException;
 
     /**
diff --git a/src/java/org/apache/cassandra/auth/INetworkAuthorizer.java b/src/java/org/apache/cassandra/auth/INetworkAuthorizer.java
index 4582b5e..9f815ac 100644
--- a/src/java/org/apache/cassandra/auth/INetworkAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/INetworkAuthorizer.java
@@ -20,7 +20,7 @@
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 
-public interface INetworkAuthorizer
+public interface INetworkAuthorizer extends AuthCache.BulkLoader<RoleResource, DCPermissions>
 {
     /**
      * Whether or not the authorizer will attempt authorization.
@@ -46,7 +46,7 @@
     void setRoleDatacenters(RoleResource role, DCPermissions permissions);
 
     /**
-     * Called when a role is deleted, so any corresponding network auth
+     * Called when a role is deleted, so any corresponding network permissions
      * data can also be cleaned up
      */
     void drop(RoleResource role);
diff --git a/src/java/org/apache/cassandra/auth/IRoleManager.java b/src/java/org/apache/cassandra/auth/IRoleManager.java
index 1d47bee..688d5bb 100644
--- a/src/java/org/apache/cassandra/auth/IRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/IRoleManager.java
@@ -31,7 +31,7 @@
  * alteration and the granting and revoking of roles to other
  * roles.
  */
-public interface IRoleManager
+public interface IRoleManager extends AuthCache.BulkLoader<RoleResource, Set<Role>>
 {
 
     /**
@@ -41,7 +41,7 @@
      */
     public enum Option
     {
-        SUPERUSER, PASSWORD, LOGIN, OPTIONS
+        SUPERUSER, PASSWORD, LOGIN, OPTIONS, HASHED_PASSWORD
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/auth/NetworkAuthCache.java b/src/java/org/apache/cassandra/auth/NetworkAuthCache.java
deleted file mode 100644
index 6b3c74e..0000000
--- a/src/java/org/apache/cassandra/auth/NetworkAuthCache.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.auth;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-
-public class NetworkAuthCache extends AuthCache<RoleResource, DCPermissions>
-{
-    public NetworkAuthCache(INetworkAuthorizer authorizer)
-    {
-        super("NetworkAuthCache",
-              DatabaseDescriptor::setRolesValidity,
-              DatabaseDescriptor::getRolesValidity,
-              DatabaseDescriptor::setRolesUpdateInterval,
-              DatabaseDescriptor::getRolesUpdateInterval,
-              DatabaseDescriptor::setRolesCacheMaxEntries,
-              DatabaseDescriptor::getRolesCacheMaxEntries,
-              authorizer::authorize,
-              () -> DatabaseDescriptor.getAuthenticator().requireAuthentication());
-    }
-}
diff --git a/src/java/org/apache/cassandra/auth/NetworkPermissionsCache.java b/src/java/org/apache/cassandra/auth/NetworkPermissionsCache.java
new file mode 100644
index 0000000..1c18fed
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/NetworkPermissionsCache.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.utils.MBeanWrapper;
+
+public class NetworkPermissionsCache extends AuthCache<RoleResource, DCPermissions> implements NetworkPermissionsCacheMBean
+{
+    public NetworkPermissionsCache(INetworkAuthorizer authorizer)
+    {
+        super(CACHE_NAME,
+              DatabaseDescriptor::setRolesValidity,
+              DatabaseDescriptor::getRolesValidity,
+              DatabaseDescriptor::setRolesUpdateInterval,
+              DatabaseDescriptor::getRolesUpdateInterval,
+              DatabaseDescriptor::setRolesCacheMaxEntries,
+              DatabaseDescriptor::getRolesCacheMaxEntries,
+              DatabaseDescriptor::setRolesCacheActiveUpdate,
+              DatabaseDescriptor::getRolesCacheActiveUpdate,
+              authorizer::authorize,
+              authorizer.bulkLoader(),
+              () -> DatabaseDescriptor.getAuthenticator().requireAuthentication());
+
+        MBeanWrapper.instance.registerMBean(this, MBEAN_NAME_BASE + DEPRECATED_CACHE_NAME);
+    }
+
+    public void invalidateNetworkPermissions(String roleName)
+    {
+        invalidate(RoleResource.role(roleName));
+    }
+
+    @Override
+    protected void unregisterMBean()
+    {
+        super.unregisterMBean();
+        MBeanWrapper.instance.unregisterMBean(MBEAN_NAME_BASE + DEPRECATED_CACHE_NAME, MBeanWrapper.OnException.LOG);
+    }
+}
diff --git a/src/java/org/apache/cassandra/auth/NetworkPermissionsCacheMBean.java b/src/java/org/apache/cassandra/auth/NetworkPermissionsCacheMBean.java
new file mode 100644
index 0000000..b0e72b0
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/NetworkPermissionsCacheMBean.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+public interface NetworkPermissionsCacheMBean extends AuthCacheMBean
+{
+    public static final String CACHE_NAME = "NetworkPermissionsCache";
+    @Deprecated
+    public static final String DEPRECATED_CACHE_NAME = "NetworkAuthCache";
+
+    public void invalidateNetworkPermissions(String roleName);
+}
diff --git a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
index 9da99a9..0ce96d8 100644
--- a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
+++ b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
@@ -20,15 +20,19 @@
 import java.net.InetAddress;
 import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
+import java.util.function.Supplier;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.cql3.QueryOptions;
@@ -43,7 +47,8 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.mindrot.jbcrypt.BCrypt;
 
-import static org.apache.cassandra.auth.CassandraRoleManager.consistencyForRole;
+import static org.apache.cassandra.auth.CassandraRoleManager.consistencyForRoleRead;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * PasswordAuthenticator is an IAuthenticator implementation
@@ -54,10 +59,13 @@
  * PasswordAuthenticator requires the use of CassandraRoleManager
  * for storage and retrieval of encrypted passwords.
  */
-public class PasswordAuthenticator implements IAuthenticator
+public class PasswordAuthenticator implements IAuthenticator, AuthCache.BulkLoader<String, String>
 {
     private static final Logger logger = LoggerFactory.getLogger(PasswordAuthenticator.class);
 
+    /** We intentionally use an empty string sentinel to allow object equality comparison */
+    private static final String NO_SUCH_CREDENTIAL = "";
+
     // name of the hash column.
     private static final String SALTED_HASH = "salted_hash";
 
@@ -68,7 +76,13 @@
     static final byte NUL = 0;
     private SelectStatement authenticateStatement;
 
-    private CredentialsCache cache;
+    private final CredentialsCache cache;
+
+    public PasswordAuthenticator()
+    {
+        cache = new CredentialsCache(this);
+        AuthCacheService.instance.register(cache);
+    }
 
     // No anonymous access.
     public boolean requireAuthentication()
@@ -76,6 +90,30 @@
         return true;
     }
 
+    @Override
+    public Supplier<Map<String, String>> bulkLoader()
+    {
+        return () ->
+        {
+            Map<String, String> entries = new HashMap<>();
+
+            logger.info("Pre-warming credentials cache from roles table");
+            UntypedResultSet results = process("SELECT role, salted_hash FROM system_auth.roles", CassandraAuthorizer.authReadConsistencyLevel());
+            results.forEach(row -> {
+                if (row.has("salted_hash"))
+                {
+                    entries.put(row.getString("role"), row.getString("salted_hash"));
+                }
+            });
+            return entries;
+        };
+    }
+
+    public CredentialsCache getCredentialsCache()
+    {
+        return cache;
+    }
+
     protected static boolean checkpw(String password, String hash)
     {
         try
@@ -90,9 +128,39 @@
         }
     }
 
+    /**
+     * This is exposed so we can override the consistency level for tests that are single node
+     */
+    @VisibleForTesting
+    UntypedResultSet process(String query, ConsistencyLevel cl)
+    {
+        return QueryProcessor.process(query, cl);
+    }
+
     private AuthenticatedUser authenticate(String username, String password) throws AuthenticationException
     {
         String hash = cache.get(username);
+
+        // intentional use of object equality
+        if (hash == NO_SUCH_CREDENTIAL)
+        {
+            // The cache was unable to load credentials via queryHashedPassword, probably because the supplied
+            // rolename doesn't exist. If caching is enabled we will have now cached the sentinel value for that key
+            // so we should invalidate it otherwise the cache will continue to serve that until it expires which
+            // will be a problem if the role is added in the meantime.
+            //
+            // We can't just throw the AuthenticationException directly from queryHashedPassword for a similar reason:
+            // if an existing role is dropped and active updates are enabled for the cache, the refresh in
+            // CacheRefresher::run will log and swallow the exception and keep serving the stale credentials until they
+            // eventually expire.
+            //
+            // So whenever we encounter the sentinal value, here and also in CacheRefresher (if active updates are
+            // enabled), we manually expunge the key from the cache. If caching is not enabled, AuthCache::invalidate
+            // is a safe no-op.
+            cache.invalidateCredentials(username);
+            throw new AuthenticationException(String.format("Provided username %s and/or password are incorrect", username));
+        }
+
         if (!checkpw(password, hash))
             throw new AuthenticationException(String.format("Provided username %s and/or password are incorrect", username));
 
@@ -103,21 +171,21 @@
     {
         try
         {
-            ResultMessage.Rows rows =
-            authenticateStatement.execute(QueryState.forInternalCalls(),
-                                            QueryOptions.forInternalCalls(consistencyForRole(username),
-                                                                          Lists.newArrayList(ByteBufferUtil.bytes(username))),
-                                            System.nanoTime());
+            QueryOptions options = QueryOptions.forInternalCalls(consistencyForRoleRead(username),
+                    Lists.newArrayList(ByteBufferUtil.bytes(username)));
+
+            ResultMessage.Rows rows = select(authenticateStatement, options);
 
             // If either a non-existent role name was supplied, or no credentials
-            // were found for that role we don't want to cache the result so we throw
-            // an exception.
+            // were found for that role, we don't want to cache the result so we
+            // return a sentinel value. On receiving the sentinel, the caller can
+            // invalidate the cache and throw an appropriate exception.
             if (rows.result.isEmpty())
-                throw new AuthenticationException(String.format("Provided username %s and/or password are incorrect", username));
+                return NO_SUCH_CREDENTIAL;
 
             UntypedResultSet result = UntypedResultSet.create(rows.result);
             if (!result.one().has(SALTED_HASH))
-                throw new AuthenticationException(String.format("Provided username %s and/or password are incorrect", username));
+                return NO_SUCH_CREDENTIAL;
 
             return result.one().getString(SALTED_HASH);
         }
@@ -127,6 +195,12 @@
         }
     }
 
+    @VisibleForTesting
+    ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
+    {
+        return statement.execute(QueryState.forInternalCalls(), options, nanoTime());
+    }
+
     public Set<DataResource> protectedResources()
     {
         // Also protected by CassandraRoleManager, but the duplication doesn't hurt and is more explicit
@@ -144,8 +218,6 @@
                                      SchemaConstants.AUTH_KEYSPACE_NAME,
                                      AuthKeyspace.ROLES);
         authenticateStatement = prepare(query);
-
-        cache = new CredentialsCache(this);
     }
 
     public AuthenticatedUser legacyAuthenticate(Map<String, String> credentials) throws AuthenticationException
@@ -239,19 +311,24 @@
         }
     }
 
-    private static class CredentialsCache extends AuthCache<String, String> implements CredentialsCacheMBean
+    public static class CredentialsCache extends AuthCache<String, String> implements CredentialsCacheMBean
     {
         private CredentialsCache(PasswordAuthenticator authenticator)
         {
-            super("CredentialsCache",
+            super(CACHE_NAME,
                   DatabaseDescriptor::setCredentialsValidity,
                   DatabaseDescriptor::getCredentialsValidity,
                   DatabaseDescriptor::setCredentialsUpdateInterval,
                   DatabaseDescriptor::getCredentialsUpdateInterval,
                   DatabaseDescriptor::setCredentialsCacheMaxEntries,
                   DatabaseDescriptor::getCredentialsCacheMaxEntries,
+                  DatabaseDescriptor::setCredentialsCacheActiveUpdate,
+                  DatabaseDescriptor::getCredentialsCacheActiveUpdate,
                   authenticator::queryHashedPassword,
-                  () -> true);
+                  authenticator.bulkLoader(),
+                  () -> true,
+                  (k,v) -> NO_SUCH_CREDENTIAL == v); // use a known object as a sentinel value. CacheRefresher will
+                                                     // invalidate the key if the sentinel is loaded during a refresh
         }
 
         public void invalidateCredentials(String roleName)
@@ -262,6 +339,8 @@
 
     public static interface CredentialsCacheMBean extends AuthCacheMBean
     {
+        public static final String CACHE_NAME = "CredentialsCache";
+
         public void invalidateCredentials(String roleName);
     }
 }
diff --git a/src/java/org/apache/cassandra/auth/PermissionsCache.java b/src/java/org/apache/cassandra/auth/PermissionsCache.java
index a33f5d1..0757b5e 100644
--- a/src/java/org/apache/cassandra/auth/PermissionsCache.java
+++ b/src/java/org/apache/cassandra/auth/PermissionsCache.java
@@ -23,22 +23,31 @@
 import org.apache.cassandra.utils.Pair;
 
 public class PermissionsCache extends AuthCache<Pair<AuthenticatedUser, IResource>, Set<Permission>>
+        implements PermissionsCacheMBean
 {
     public PermissionsCache(IAuthorizer authorizer)
     {
-        super("PermissionsCache",
+        super(CACHE_NAME,
               DatabaseDescriptor::setPermissionsValidity,
               DatabaseDescriptor::getPermissionsValidity,
               DatabaseDescriptor::setPermissionsUpdateInterval,
               DatabaseDescriptor::getPermissionsUpdateInterval,
               DatabaseDescriptor::setPermissionsCacheMaxEntries,
               DatabaseDescriptor::getPermissionsCacheMaxEntries,
+              DatabaseDescriptor::setPermissionsCacheActiveUpdate,
+              DatabaseDescriptor::getPermissionsCacheActiveUpdate,
               (p) -> authorizer.authorize(p.left, p.right),
-              () -> DatabaseDescriptor.getAuthorizer().requireAuthorization());
+              authorizer.bulkLoader(),
+              authorizer::requireAuthorization);
     }
 
     public Set<Permission> getPermissions(AuthenticatedUser user, IResource resource)
     {
         return get(Pair.create(user, resource));
     }
+
+    public void invalidatePermissions(String roleName, String resourceName)
+    {
+        invalidate(Pair.create(new AuthenticatedUser(roleName), Resources.fromName(resourceName)));
+    }
 }
diff --git a/src/java/org/apache/cassandra/auth/PermissionsCacheMBean.java b/src/java/org/apache/cassandra/auth/PermissionsCacheMBean.java
new file mode 100644
index 0000000..4d55c37
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/PermissionsCacheMBean.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+public interface PermissionsCacheMBean extends AuthCacheMBean
+{
+    public static final String CACHE_NAME = "PermissionsCache";
+
+    public void invalidatePermissions(String roleName, String resourceName);
+}
diff --git a/src/java/org/apache/cassandra/auth/Role.java b/src/java/org/apache/cassandra/auth/Role.java
index e98cc7d..5c9140a 100644
--- a/src/java/org/apache/cassandra/auth/Role.java
+++ b/src/java/org/apache/cassandra/auth/Role.java
@@ -34,7 +34,7 @@
      * for IRoleManager implementations (in particular, CassandraRoleManager)
      */
 
-    public final RoleResource resource ;
+    public final RoleResource resource;
     public final boolean isSuper;
     public final boolean canLogin;
     public final Set<String> memberOf;
diff --git a/src/java/org/apache/cassandra/auth/RoleOptions.java b/src/java/org/apache/cassandra/auth/RoleOptions.java
index 1205d34..c3ec56c 100644
--- a/src/java/org/apache/cassandra/auth/RoleOptions.java
+++ b/src/java/org/apache/cassandra/auth/RoleOptions.java
@@ -19,13 +19,13 @@
 
 import java.util.HashMap;
 import java.util.Map;
-
-import com.google.common.base.Optional;
+import java.util.Optional;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.utils.FBUtilities;
+import org.mindrot.jbcrypt.BCrypt;
 
 public class RoleOptions
 {
@@ -68,7 +68,7 @@
      */
     public Optional<Boolean> getSuperuser()
     {
-        return Optional.fromNullable((Boolean)options.get(IRoleManager.Option.SUPERUSER));
+        return Optional.ofNullable((Boolean) options.get(IRoleManager.Option.SUPERUSER));
     }
 
     /**
@@ -77,7 +77,7 @@
      */
     public Optional<Boolean> getLogin()
     {
-        return Optional.fromNullable((Boolean)options.get(IRoleManager.Option.LOGIN));
+        return Optional.ofNullable((Boolean) options.get(IRoleManager.Option.LOGIN));
     }
 
     /**
@@ -86,7 +86,16 @@
      */
     public Optional<String> getPassword()
     {
-        return Optional.fromNullable((String)options.get(IRoleManager.Option.PASSWORD));
+        return Optional.ofNullable((String)options.get(IRoleManager.Option.PASSWORD));
+    }
+
+    /**
+     * Return the string value of the hashed password option.
+     * @return hashed password option value
+     */
+    public Optional<String> getHashedPassword()
+    {
+        return Optional.ofNullable((String) options.get(IRoleManager.Option.HASHED_PASSWORD));
     }
 
     /**
@@ -99,7 +108,7 @@
     @SuppressWarnings("unchecked")
     public Optional<Map<String, String>> getCustomOptions()
     {
-        return Optional.fromNullable((Map<String, String>)options.get(IRoleManager.Option.OPTIONS));
+        return Optional.ofNullable((Map<String, String>) options.get(IRoleManager.Option.OPTIONS));
     }
 
     /**
@@ -134,6 +143,26 @@
                         throw new InvalidRequestException(String.format("Invalid value for property '%s'. " +
                                                                         "It must be a string",
                                                                         option.getKey()));
+                    if (options.containsKey(IRoleManager.Option.HASHED_PASSWORD))
+                        throw new InvalidRequestException(String.format("Properties '%s' and '%s' are mutually exclusive",
+                                                                        IRoleManager.Option.PASSWORD, IRoleManager.Option.HASHED_PASSWORD));
+                    break;
+                case HASHED_PASSWORD:
+                    if (!(option.getValue() instanceof String))
+                        throw new InvalidRequestException(String.format("Invalid value for property '%s'. " +
+                                                                        "It must be a string",
+                                                                        option.getKey()));
+                    if (options.containsKey(IRoleManager.Option.PASSWORD))
+                        throw new InvalidRequestException(String.format("Properties '%s' and '%s' are mutually exclusive",
+                                                                        IRoleManager.Option.PASSWORD, IRoleManager.Option.HASHED_PASSWORD));
+                    try
+                    {
+                        BCrypt.checkpw("dummy", (String) option.getValue());
+                    }
+                    catch (Exception e)
+                    {
+                        throw new InvalidRequestException("Invalid hashed password value. Please use jBcrypt.");
+                    }
                     break;
                 case OPTIONS:
                     if (!(option.getValue() instanceof Map))
diff --git a/src/java/org/apache/cassandra/auth/Roles.java b/src/java/org/apache/cassandra/auth/Roles.java
index 527451e..f18851a 100644
--- a/src/java/org/apache/cassandra/auth/Roles.java
+++ b/src/java/org/apache/cassandra/auth/Roles.java
@@ -20,11 +20,8 @@
 import java.util.Collections;
 import java.util.Map;
 import java.util.Set;
-import java.util.function.BooleanSupplier;
 import java.util.stream.Collectors;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -38,25 +35,12 @@
 
     private static final Role NO_ROLE = new Role("", false, false, Collections.emptyMap(), Collections.emptySet());
 
-    private static RolesCache cache;
-    static
-    {
-        initRolesCache(DatabaseDescriptor.getRoleManager(),
-                       () -> DatabaseDescriptor.getAuthenticator().requireAuthentication());
-    }
+    public static final RolesCache cache = new RolesCache(DatabaseDescriptor.getRoleManager(), () -> DatabaseDescriptor.getAuthenticator().requireAuthentication());
 
-    @VisibleForTesting
-    public static void initRolesCache(IRoleManager roleManager, BooleanSupplier enableCache)
+    /** Use {@link AuthCacheService#initializeAndRegisterCaches} rather than calling this directly */
+    public static void init()
     {
-        if (cache != null)
-            cache.unregisterMBean();
-        cache = new RolesCache(roleManager, enableCache);
-    }
-
-    @VisibleForTesting
-    public static void clearCache()
-    {
-        cache.invalidate();
+        AuthCacheService.instance.register(cache);
     }
 
     /**
@@ -77,7 +61,7 @@
      * Get detailed info on all the roles granted to the role identified by the supplied RoleResource.
      * This includes superuser status and login privileges for the primary role and all roles granted directly
      * to it or inherited.
-     * The returnred roles may be cached if roles_validity_in_ms > 0
+     * The returned roles may be cached if roles_validity > 0
      * This method is used where we need to know specific attributes of the collection of granted roles, i.e.
      * when checking for superuser status which may be inherited from *any* granted role.
      *
@@ -90,6 +74,15 @@
     }
 
     /**
+     * Enumerate all the roles in the system, preferably these will be fetched from the cache, which in turn
+     * may have been warmed during startup.
+     */
+    public static Set<RoleResource> getAllRoles()
+    {
+        return cache.getAllRoles();
+    }
+
+    /**
      * Returns true if the supplied role or any other role granted to it
      * (directly or indirectly) has superuser status.
      *
diff --git a/src/java/org/apache/cassandra/auth/RolesCache.java b/src/java/org/apache/cassandra/auth/RolesCache.java
index d01de63..d34de19 100644
--- a/src/java/org/apache/cassandra/auth/RolesCache.java
+++ b/src/java/org/apache/cassandra/auth/RolesCache.java
@@ -23,19 +23,25 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 
-public class RolesCache extends AuthCache<RoleResource, Set<Role>>
+public class RolesCache extends AuthCache<RoleResource, Set<Role>> implements RolesCacheMBean
 {
+    private final IRoleManager roleManager;
+
     public RolesCache(IRoleManager roleManager, BooleanSupplier enableCache)
     {
-        super("RolesCache",
+        super(CACHE_NAME,
               DatabaseDescriptor::setRolesValidity,
               DatabaseDescriptor::getRolesValidity,
               DatabaseDescriptor::setRolesUpdateInterval,
               DatabaseDescriptor::getRolesUpdateInterval,
               DatabaseDescriptor::setRolesCacheMaxEntries,
               DatabaseDescriptor::getRolesCacheMaxEntries,
+              DatabaseDescriptor::setRolesCacheActiveUpdate,
+              DatabaseDescriptor::getRolesCacheActiveUpdate,
               roleManager::getRoleDetails,
+              roleManager.bulkLoader(),
               enableCache);
+        this.roleManager = roleManager;
     }
 
     /**
@@ -62,4 +68,18 @@
     {
         return get(primaryRole);
     }
+
+    Set<RoleResource> getAllRoles()
+    {
+        // This method seems kind of unnecessary as it is only called from Roles::getAllRoles,
+        // but we are able to inject the RoleManager to this class, making testing possible. If
+        // we lose this method and did everything in Roles, we'd be dependent on the IRM impl
+        // supplied by DatabaseDescriptor
+        return roleManager.getAllRoles();
+    }
+
+    public void invalidateRoles(String roleName)
+    {
+        invalidate(RoleResource.role(roleName));
+    }
 }
diff --git a/src/java/org/apache/cassandra/auth/RolesCacheMBean.java b/src/java/org/apache/cassandra/auth/RolesCacheMBean.java
new file mode 100644
index 0000000..18b3c40
--- /dev/null
+++ b/src/java/org/apache/cassandra/auth/RolesCacheMBean.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+public interface RolesCacheMBean extends AuthCacheMBean
+{
+    public static final String CACHE_NAME = "RolesCache";
+
+    void invalidateRoles(String roleName);
+}
diff --git a/src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java b/src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java
index 36c552c..afc8b46 100644
--- a/src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java
+++ b/src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java
@@ -22,6 +22,7 @@
 import java.security.AccessControlContext;
 import java.security.AccessController;
 import java.security.Principal;
+import java.util.Collections;
 import java.util.Set;
 import java.util.function.BooleanSupplier;
 import java.util.function.Function;
@@ -41,6 +42,7 @@
 import org.apache.cassandra.auth.*;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.MBeanWrapper;
 
 /**
  * Provides a proxy interface to the platform's MBeanServer instance to perform
@@ -104,7 +106,7 @@
                                                                       "registerMBean",
                                                                       "unregisterMBean");
 
-    private static final JMXPermissionsCache permissionsCache = new JMXPermissionsCache();
+    public static final JmxPermissionsCache jmxPermissionsCache = new JmxPermissionsCache();
     private MBeanServer mbs;
 
     /*
@@ -118,7 +120,7 @@
      the permissions from the local cache, which in turn loads them from the configured IAuthorizer
      but can be overridden for testing.
      */
-    protected Function<RoleResource, Set<PermissionDetails>> getPermissions = permissionsCache::get;
+    protected Function<RoleResource, Set<PermissionDetails>> getPermissions = jmxPermissionsCache::get;
 
     /*
      Used to decide whether authorization is enabled or not, usually this depends on the configured
@@ -187,7 +189,7 @@
      *             as an invocation of a method on the MBeanServer.
      */
     @VisibleForTesting
-    boolean authorize(Subject subject, String methodName, Object[] args)
+    public boolean authorize(Subject subject, String methodName, Object[] args)
     {
         logger.trace("Authorizing JMX method invocation {} for {}",
                      methodName,
@@ -546,19 +548,46 @@
             throw new SecurityException("Access is denied!");
     }
 
-    private static final class JMXPermissionsCache extends AuthCache<RoleResource, Set<PermissionDetails>>
+    public static final class JmxPermissionsCache extends AuthCache<RoleResource, Set<PermissionDetails>>
+        implements JmxPermissionsCacheMBean
     {
-        protected JMXPermissionsCache()
+        protected JmxPermissionsCache()
         {
-            super("JMXPermissionsCache",
+            super(CACHE_NAME,
                   DatabaseDescriptor::setPermissionsValidity,
                   DatabaseDescriptor::getPermissionsValidity,
                   DatabaseDescriptor::setPermissionsUpdateInterval,
                   DatabaseDescriptor::getPermissionsUpdateInterval,
                   DatabaseDescriptor::setPermissionsCacheMaxEntries,
                   DatabaseDescriptor::getPermissionsCacheMaxEntries,
+                  DatabaseDescriptor::setPermissionsCacheActiveUpdate,
+                  DatabaseDescriptor::getPermissionsCacheActiveUpdate,
                   AuthorizationProxy::loadPermissions,
+                  Collections::emptyMap,
                   () -> true);
+
+            MBeanWrapper.instance.registerMBean(this, MBEAN_NAME_BASE + DEPRECATED_CACHE_NAME);
         }
+
+        public void invalidatePermissions(String roleName)
+        {
+            invalidate(RoleResource.role(roleName));
+        }
+
+        @Override
+        protected void unregisterMBean()
+        {
+            super.unregisterMBean();
+            MBeanWrapper.instance.unregisterMBean(MBEAN_NAME_BASE + DEPRECATED_CACHE_NAME, MBeanWrapper.OnException.LOG);
+        }
+    }
+
+    public static interface JmxPermissionsCacheMBean extends AuthCacheMBean
+    {
+        public static final String CACHE_NAME = "JmxPermissionsCache";
+        @Deprecated
+        public static final String DEPRECATED_CACHE_NAME = "JMXPermissionsCache";
+
+        public void invalidatePermissions(String roleName);
     }
 }
diff --git a/src/java/org/apache/cassandra/batchlog/Batch.java b/src/java/org/apache/cassandra/batchlog/Batch.java
index 7b205e0..b5a6288 100644
--- a/src/java/org/apache/cassandra/batchlog/Batch.java
+++ b/src/java/org/apache/cassandra/batchlog/Batch.java
@@ -29,7 +29,7 @@
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.db.TypeSizes.sizeof;
 import static org.apache.cassandra.db.TypeSizes.sizeofUnsignedVInt;
@@ -38,14 +38,14 @@
 {
     public static final Serializer serializer = new Serializer();
 
-    public final UUID id;
+    public final TimeUUID id;
     public final long creationTime; // time of batch creation (in microseconds)
 
     // one of these will always be empty
     final Collection<Mutation> decodedMutations;
     final Collection<ByteBuffer> encodedMutations;
 
-    private Batch(UUID id, long creationTime, Collection<Mutation> decodedMutations, Collection<ByteBuffer> encodedMutations)
+    private Batch(TimeUUID id, long creationTime, Collection<Mutation> decodedMutations, Collection<ByteBuffer> encodedMutations)
     {
         this.id = id;
         this.creationTime = creationTime;
@@ -57,7 +57,7 @@
     /**
      * Creates a 'local' batch - with all enclosed mutations in decoded form (as Mutation instances)
      */
-    public static Batch createLocal(UUID id, long creationTime, Collection<Mutation> mutations)
+    public static Batch createLocal(TimeUUID id, long creationTime, Collection<Mutation> mutations)
     {
         return new Batch(id, creationTime, mutations, Collections.emptyList());
     }
@@ -68,7 +68,7 @@
      * The mutations will always be encoded using the current messaging version.
      */
     @SuppressWarnings("RedundantTypeArguments")
-    public static Batch createRemote(UUID id, long creationTime, Collection<ByteBuffer> mutations)
+    public static Batch createRemote(TimeUUID id, long creationTime, Collection<ByteBuffer> mutations)
     {
         return new Batch(id, creationTime, Collections.<Mutation>emptyList(), mutations);
     }
@@ -105,7 +105,7 @@
         {
             assert batch.isLocal() : "attempted to serialize a 'remote' batch";
 
-            long size = UUIDSerializer.serializer.serializedSize(batch.id, version);
+            long size = TimeUUID.sizeInBytes();
             size += sizeof(batch.creationTime);
 
             size += sizeofUnsignedVInt(batch.decodedMutations.size());
@@ -123,7 +123,7 @@
         {
             assert batch.isLocal() : "attempted to serialize a 'remote' batch";
 
-            UUIDSerializer.serializer.serialize(batch.id, out, version);
+            batch.id.serialize(out);
             out.writeLong(batch.creationTime);
 
             out.writeUnsignedVInt(batch.decodedMutations.size());
@@ -136,7 +136,7 @@
 
         public Batch deserialize(DataInputPlus in, int version) throws IOException
         {
-            UUID id = UUIDSerializer.serializer.deserialize(in, version);
+            TimeUUID id = TimeUUID.deserialize(in);
             long creationTime = in.readLong();
 
             /*
diff --git a/src/java/org/apache/cassandra/batchlog/BatchRemoveVerbHandler.java b/src/java/org/apache/cassandra/batchlog/BatchRemoveVerbHandler.java
index 3443cab..89b8aa8 100644
--- a/src/java/org/apache/cassandra/batchlog/BatchRemoveVerbHandler.java
+++ b/src/java/org/apache/cassandra/batchlog/BatchRemoveVerbHandler.java
@@ -17,16 +17,15 @@
  */
 package org.apache.cassandra.batchlog;
 
-import java.util.UUID;
-
 import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.Message;
+import org.apache.cassandra.utils.TimeUUID;
 
-public final class BatchRemoveVerbHandler implements IVerbHandler<UUID>
+public final class BatchRemoveVerbHandler implements IVerbHandler<TimeUUID>
 {
     public static final BatchRemoveVerbHandler instance = new BatchRemoveVerbHandler();
 
-    public void doVerb(Message<UUID> message)
+    public void doVerb(Message<TimeUUID> message)
     {
         BatchlogManager.remove(message.payload);
     }
diff --git a/src/java/org/apache/cassandra/batchlog/BatchlogManager.java b/src/java/org/apache/cassandra/batchlog/BatchlogManager.java
index 0f22b4b..6d102b0 100644
--- a/src/java/org/apache/cassandra/batchlog/BatchlogManager.java
+++ b/src/java/org/apache/cassandra/batchlog/BatchlogManager.java
@@ -28,19 +28,19 @@
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.function.Supplier;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Iterables;
 import com.google.common.util.concurrent.RateLimiter;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -50,7 +50,6 @@
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.db.WriteType;
 import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.WriteFailureException;
@@ -75,12 +74,14 @@
 import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MBeanWrapper;
-import org.apache.cassandra.utils.UUIDGen;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternalWithPaging;
 import static org.apache.cassandra.net.Verb.MUTATION_REQ;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class BatchlogManager implements BatchlogManagerMBean
 {
@@ -93,18 +94,16 @@
     public static final long BATCHLOG_REPLAY_TIMEOUT = Long.getLong("cassandra.batchlog.replay_timeout_in_ms", DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2);
 
     private volatile long totalBatchesReplayed = 0; // no concurrency protection necessary as only written by replay thread.
-    private volatile UUID lastReplayedUuid = UUIDGen.minTimeUUID(0);
+    private volatile TimeUUID lastReplayedUuid = TimeUUID.minAtUnixMillis(0);
 
     // Single-thread executor service for scheduling and serializing log replay.
-    private final ScheduledExecutorService batchlogTasks;
+    private final ScheduledExecutorPlus batchlogTasks;
 
     private final RateLimiter rateLimiter = RateLimiter.create(Double.MAX_VALUE);
 
     public BatchlogManager()
     {
-        ScheduledThreadPoolExecutor executor = new DebuggableScheduledThreadPoolExecutor("BatchlogTasks");
-        executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-        batchlogTasks = executor;
+        batchlogTasks = executorFactory().scheduled(false, "BatchlogTasks");
     }
 
     public void start()
@@ -112,7 +111,7 @@
         MBeanWrapper.instance.registerMBean(this, MBEAN_NAME);
 
         batchlogTasks.scheduleWithFixedDelay(this::replayFailedBatches,
-                                             StorageService.RING_DELAY,
+                                             StorageService.RING_DELAY_MILLIS,
                                              REPLAY_INTERVAL,
                                              MILLISECONDS);
     }
@@ -122,10 +121,10 @@
         ExecutorUtils.shutdownAndWait(timeout, unit, batchlogTasks);
     }
 
-    public static void remove(UUID id)
+    public static void remove(TimeUUID id)
     {
         new Mutation(PartitionUpdate.fullPartitionDelete(SystemKeyspace.Batches,
-                                                         UUIDType.instance.decompose(id),
+                                                         id.toBytes(),
                                                          FBUtilities.timestampMicros(),
                                                          FBUtilities.nowInSeconds()))
             .apply();
@@ -209,9 +208,9 @@
             logger.trace("Replay cancelled as there are no peers in the ring.");
             return;
         }
-        setRate(DatabaseDescriptor.getBatchlogReplayThrottleInKB());
+        setRate(DatabaseDescriptor.getBatchlogReplayThrottleInKiB());
 
-        UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
+        TimeUUID limitUuid = TimeUUID.maxAtUnixMillis(currentTimeMillis() - getBatchlogTimeout());
         ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
         int pageSize = calculatePageSize(store);
         // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
@@ -230,18 +229,18 @@
      * Sets the rate for the current rate limiter. When {@code throttleInKB} is 0, this sets the rate to
      * {@link Double#MAX_VALUE} bytes per second.
      *
-     * @param throttleInKB throughput to set in KB per second
+     * @param throttleInKB throughput to set in KiB per second
      */
     public void setRate(final int throttleInKB)
     {
         int endpointsCount = StorageService.instance.getTokenMetadata().getSizeOfAllEndpoints();
         if (endpointsCount > 0)
         {
-            int endpointThrottleInKB = throttleInKB / endpointsCount;
-            double throughput = endpointThrottleInKB == 0 ? Double.MAX_VALUE : endpointThrottleInKB * 1024.0;
+            int endpointThrottleInKiB = throttleInKB / endpointsCount;
+            double throughput = endpointThrottleInKiB == 0 ? Double.MAX_VALUE : endpointThrottleInKiB * 1024.0;
             if (rateLimiter.getRate() != throughput)
             {
-                logger.debug("Updating batchlog replay throttle to {} KB/s, {} KB/s per endpoint", throttleInKB, endpointThrottleInKB);
+                logger.debug("Updating batchlog replay throttle to {} KB/s, {} KB/s per endpoint", throttleInKB, endpointThrottleInKiB);
                 rateLimiter.setRate(throughput);
             }
         }
@@ -263,14 +262,14 @@
         ArrayList<ReplayingBatch> unfinishedBatches = new ArrayList<>(pageSize);
 
         Set<UUID> hintedNodes = new HashSet<>();
-        Set<UUID> replayedBatches = new HashSet<>();
+        Set<TimeUUID> replayedBatches = new HashSet<>();
         Exception caughtException = null;
         int skipped = 0;
 
         // Sending out batches for replay without waiting for them, so that one stuck batch doesn't affect others
         for (UntypedResultSet.Row row : batches)
         {
-            UUID id = row.getUUID("id");
+            TimeUUID id = row.getTimeUUID("id");
             int version = row.getInt("version");
             try
             {
@@ -316,7 +315,7 @@
         replayedBatches.forEach(BatchlogManager::remove);
     }
 
-    private void finishAndClearBatches(ArrayList<ReplayingBatch> batches, Set<UUID> hintedNodes, Set<UUID> replayedBatches)
+    private void finishAndClearBatches(ArrayList<ReplayingBatch> batches, Set<UUID> hintedNodes, Set<TimeUUID> replayedBatches)
     {
         // schedule hints for timed out deliveries
         for (ReplayingBatch batch : batches)
@@ -336,17 +335,17 @@
 
     private static class ReplayingBatch
     {
-        private final UUID id;
+        private final TimeUUID id;
         private final long writtenAt;
         private final List<Mutation> mutations;
         private final int replayedBytes;
 
         private List<ReplayWriteResponseHandler<Mutation>> replayHandlers;
 
-        ReplayingBatch(UUID id, int version, List<ByteBuffer> serializedMutations) throws IOException
+        ReplayingBatch(TimeUUID id, int version, List<ByteBuffer> serializedMutations) throws IOException
         {
             this.id = id;
-            this.writtenAt = UUIDGen.unixTimestamp(id);
+            this.writtenAt = id.unix(MILLISECONDS);
             this.mutations = new ArrayList<>(serializedMutations.size());
             this.replayedBytes = addMutations(version, serializedMutations);
         }
@@ -501,12 +500,12 @@
                 }
             }
 
-            ReplicaPlan.ForTokenWrite replicaPlan = new ReplicaPlan.ForTokenWrite(keyspace, liveAndDown.replicationStrategy(),
-                    ConsistencyLevel.ONE, liveRemoteOnly.pending(), liveRemoteOnly.all(), liveRemoteOnly.all(), liveRemoteOnly.all());
-            ReplayWriteResponseHandler<Mutation> handler = new ReplayWriteResponseHandler<>(replicaPlan, System.nanoTime());
+            ReplicaPlan.ForWrite replicaPlan = new ReplicaPlan.ForWrite(keyspace, liveAndDown.replicationStrategy(),
+                                                                        ConsistencyLevel.ONE, liveRemoteOnly.pending(), liveRemoteOnly.all(), liveRemoteOnly.all(), liveRemoteOnly.all());
+            ReplayWriteResponseHandler<Mutation> handler = new ReplayWriteResponseHandler<>(replicaPlan, mutation, nanoTime());
             Message<Mutation> message = Message.outWithFlag(MUTATION_REQ, mutation, MessageFlag.CALL_BACK_ON_FAILURE);
             for (Replica replica : liveRemoteOnly.all())
-                MessagingService.instance().sendWriteWithCallback(message, replica, handler, false);
+                MessagingService.instance().sendWriteWithCallback(message, replica, handler);
             return handler;
         }
 
@@ -526,9 +525,10 @@
         {
             private final Set<InetAddressAndPort> undelivered = Collections.newSetFromMap(new ConcurrentHashMap<>());
 
-            ReplayWriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan, long queryStartNanoTime)
+            // TODO: should we be hinting here, since presumably batch log will retry? Maintaining historical behaviour for the moment.
+            ReplayWriteResponseHandler(ReplicaPlan.ForWrite replicaPlan, Supplier<Mutation> hintOnFailure, long queryStartNanoTime)
             {
-                super(replicaPlan, null, WriteType.UNLOGGED_BATCH, queryStartNanoTime);
+                super(replicaPlan, null, WriteType.UNLOGGED_BATCH, hintOnFailure, queryStartNanoTime);
                 Iterables.addAll(undelivered, replicaPlan.contacts().endpoints());
             }
 
diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 430161f..1f383ec 100644
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@ -17,11 +17,13 @@
  */
 package org.apache.cassandra.cache;
 
-import java.io.*;
+import java.io.BufferedInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.NoSuchFileException;
 import java.util.*;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 
@@ -29,10 +31,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
@@ -46,19 +45,22 @@
 import org.apache.cassandra.db.compaction.CompactionInfo.Unit;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.*;
-import org.apache.cassandra.io.util.CorruptFileException;
 import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.Pair;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K, V>
 {
     public interface IStreamFactory
     {
         InputStream getInputStream(File dataPath, File crcPath) throws IOException;
-        OutputStream getOutputStream(File dataPath, File crcPath) throws FileNotFoundException;
+        OutputStream getOutputStream(File dataPath, File crcPath);
     }
 
     private static final Logger logger = LoggerFactory.getLogger(AutoSavingCache.class);
@@ -90,7 +92,7 @@
     {
         private final SequentialWriterOption writerOption = SequentialWriterOption.newBuilder()
                                                                     .trickleFsync(DatabaseDescriptor.getTrickleFsync())
-                                                                    .trickleFsyncByteInterval(DatabaseDescriptor.getTrickleFsyncIntervalInKb() * 1024)
+                                                                    .trickleFsyncByteInterval(DatabaseDescriptor.getTrickleFsyncIntervalInKiB() * 1024)
                                                                     .finishOnClose(true).build();
 
         public InputStream getInputStream(File dataPath, File crcPath) throws IOException
@@ -155,32 +157,20 @@
         }
     }
 
-    public ListenableFuture<Integer> loadSavedAsync()
+    public Future<Integer> loadSavedAsync()
     {
-        final ListeningExecutorService es = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
-        final long start = System.nanoTime();
+        final ExecutorPlus es = executorFactory().sequential("loadSavedCache");
+        final long start = nanoTime();
 
-        ListenableFuture<Integer> cacheLoad = es.submit(new Callable<Integer>()
-        {
-            @Override
-            public Integer call()
-            {
-                return loadSaved();
-            }
+        Future<Integer> cacheLoad = es.submit(this::loadSaved);
+        cacheLoad.addListener(() -> {
+            if (size() > 0)
+                logger.info("Completed loading ({} ms; {} keys) {} cache",
+                        TimeUnit.NANOSECONDS.toMillis(nanoTime() - start),
+                        CacheService.instance.keyCache.size(),
+                        cacheType);
+            es.shutdown();
         });
-        cacheLoad.addListener(new Runnable()
-        {
-            @Override
-            public void run()
-            {
-                if (size() > 0)
-                    logger.info("Completed loading ({} ms; {} keys) {} cache",
-                            TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start),
-                            CacheService.instance.keyCache.size(),
-                            cacheType);
-                es.shutdown();
-            }
-        }, MoreExecutors.directExecutor());
 
         return cacheLoad;
     }
@@ -188,7 +178,7 @@
     public int loadSaved()
     {
         int count = 0;
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         // modern format, allows both key and value (so key cache load can be purely sequential)
         File dataPath = getCacheDataPath(CURRENT_VERSION);
@@ -211,7 +201,7 @@
 
                 ArrayDeque<Future<Pair<K, V>>> futures = new ArrayDeque<>();
                 long loadByNanos = start + TimeUnit.SECONDS.toNanos(DatabaseDescriptor.getCacheLoadTimeout());
-                while (System.nanoTime() < loadByNanos && in.available() > 0)
+                while (nanoTime() < loadByNanos && in.available() > 0)
                 {
                     //tableId and indexName are serialized by the serializers in CacheService
                     //That is delegated there because there are serializer specific conditions
@@ -263,12 +253,12 @@
             catch (CorruptFileException e)
             {
                 JVMStabilityInspector.inspectThrowable(e);
-                logger.warn(String.format("Non-fatal checksum error reading saved cache %s", dataPath.getAbsolutePath()), e);
+                logger.warn(String.format("Non-fatal checksum error reading saved cache %s", dataPath.absolutePath()), e);
             }
             catch (Throwable t)
             {
                 JVMStabilityInspector.inspectThrowable(t);
-                logger.info(String.format("Harmless error reading saved cache %s", dataPath.getAbsolutePath()), t);
+                logger.info(String.format("Harmless error reading saved cache %s", dataPath.absolutePath()), t);
             }
             finally
             {
@@ -278,7 +268,7 @@
         }
         if (logger.isTraceEnabled())
             logger.trace("completed reading ({} ms; {} keys) saved cache {}",
-                    TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start), count, dataPath);
+                    TimeUnit.NANOSECONDS.toMillis(nanoTime() - start), count, dataPath);
         return count;
     }
 
@@ -323,7 +313,7 @@
                                                   0,
                                                   keysEstimate,
                                                   Unit.KEYS,
-                                                  UUIDGen.getTimeUUID());
+                                                  nextTimeUUID());
         }
 
         public CacheService.CacheType cacheType()
@@ -349,7 +339,7 @@
                 return;
             }
 
-            long start = System.nanoTime();
+            long start = nanoTime();
 
             Pair<File, File> cacheFilePaths = tempCacheFiles();
             try (WrappedDataOutputStreamPlus writer = new WrappedDataOutputStreamPlus(streamFactory.getOutputStream(cacheFilePaths.left, cacheFilePaths.right)))
@@ -357,11 +347,6 @@
 
                 //Need to be able to check schema version because CF names are ambiguous
                 UUID schemaVersion = Schema.instance.getVersion();
-                if (schemaVersion == null)
-                {
-                    Schema.instance.updateVersion();
-                    schemaVersion = Schema.instance.getVersion();
-                }
                 writer.writeLong(schemaVersion.getMostSignificantBits());
                 writer.writeLong(schemaVersion.getLeastSignificantBits());
 
@@ -382,7 +367,7 @@
                         break;
                 }
             }
-            catch (FileNotFoundException e)
+            catch (FileNotFoundException | NoSuchFileException e)
             {
                 throw new RuntimeException(e);
             }
@@ -394,31 +379,31 @@
             File cacheFile = getCacheDataPath(CURRENT_VERSION);
             File crcFile = getCacheCrcPath(CURRENT_VERSION);
 
-            cacheFile.delete(); // ignore error if it didn't exist
-            crcFile.delete();
+            cacheFile.tryDelete(); // ignore error if it didn't exist
+            crcFile.tryDelete();
 
-            if (!cacheFilePaths.left.renameTo(cacheFile))
+            if (!cacheFilePaths.left.tryMove(cacheFile))
                 logger.error("Unable to rename {} to {}", cacheFilePaths.left, cacheFile);
 
-            if (!cacheFilePaths.right.renameTo(crcFile))
+            if (!cacheFilePaths.right.tryMove(crcFile))
                 logger.error("Unable to rename {} to {}", cacheFilePaths.right, crcFile);
 
-            logger.info("Saved {} ({} items) in {} ms", cacheType, keysWritten, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+            logger.info("Saved {} ({} items) in {} ms", cacheType, keysWritten, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
         }
 
         private Pair<File, File> tempCacheFiles()
         {
             File dataPath = getCacheDataPath(CURRENT_VERSION);
             File crcPath = getCacheCrcPath(CURRENT_VERSION);
-            return Pair.create(FileUtils.createTempFile(dataPath.getName(), null, dataPath.getParentFile()),
-                               FileUtils.createTempFile(crcPath.getName(), null, crcPath.getParentFile()));
+            return Pair.create(FileUtils.createTempFile(dataPath.name(), null, dataPath.parent()),
+                               FileUtils.createTempFile(crcPath.name(), null, crcPath.parent()));
         }
 
         private void deleteOldCacheFiles()
         {
             File savedCachesDir = new File(DatabaseDescriptor.getSavedCachesLocation());
             assert savedCachesDir.exists() && savedCachesDir.isDirectory();
-            File[] files = savedCachesDir.listFiles();
+            File[] files = savedCachesDir.tryList();
             if (files != null)
             {
                 String cacheNameFormat = String.format("%s-%s.db", cacheType.toString(), CURRENT_VERSION);
@@ -427,11 +412,11 @@
                     if (!file.isFile())
                         continue; // someone's been messing with our directory.  naughty!
 
-                    if (file.getName().endsWith(cacheNameFormat)
-                     || file.getName().endsWith(cacheType.toString()))
+                    if (file.name().endsWith(cacheNameFormat)
+                     || file.name().endsWith(cacheType.toString()))
                     {
-                        if (!file.delete())
-                            logger.warn("Failed to delete {}", file.getAbsolutePath());
+                        if (!file.tryDelete())
+                            logger.warn("Failed to delete {}", file.absolutePath());
                     }
                 }
             }
diff --git a/src/java/org/apache/cassandra/cache/CaffeineCache.java b/src/java/org/apache/cassandra/cache/CaffeineCache.java
index d51ea84..717fb78 100644
--- a/src/java/org/apache/cassandra/cache/CaffeineCache.java
+++ b/src/java/org/apache/cassandra/cache/CaffeineCache.java
@@ -21,12 +21,11 @@
 
 import java.util.Iterator;
 
-import com.google.common.util.concurrent.MoreExecutors;
-
 import com.github.benmanes.caffeine.cache.Cache;
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.Policy.Eviction;
 import com.github.benmanes.caffeine.cache.Weigher;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 
 /**
  * An adapter from a Caffeine cache to the ICache interface. This provides an on-heap cache using
@@ -54,7 +53,7 @@
         Cache<K, V> cache = Caffeine.newBuilder()
                 .maximumWeight(weightedCapacity)
                 .weigher(weigher)
-                .executor(MoreExecutors.directExecutor())
+                .executor(ImmediateExecutor.INSTANCE)
                 .build();
         return new CaffeineCache<>(cache);
     }
@@ -64,7 +63,7 @@
         return create(weightedCapacity, (key, value) -> {
             long size = key.unsharedHeapSize() + value.unsharedHeapSize();
             if (size > Integer.MAX_VALUE) {
-                throw new IllegalArgumentException("Serialized size cannot be more than 2GB/Integer.MAX_VALUE");
+                throw new IllegalArgumentException("Serialized size cannot be more than 2GiB/Integer.MAX_VALUE");
             }
             return (int) size;
         });
diff --git a/src/java/org/apache/cassandra/cache/ChunkCache.java b/src/java/org/apache/cassandra/cache/ChunkCache.java
index c53810a..51dbdc6 100644
--- a/src/java/org/apache/cassandra/cache/ChunkCache.java
+++ b/src/java/org/apache/cassandra/cache/ChunkCache.java
@@ -26,9 +26,9 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Throwables;
 import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.MoreExecutors;
 
 import com.github.benmanes.caffeine.cache.*;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.util.*;
@@ -39,8 +39,8 @@
 public class ChunkCache
         implements CacheLoader<ChunkCache.Key, ChunkCache.Buffer>, RemovalListener<ChunkCache.Key, ChunkCache.Buffer>, CacheSize
 {
-    public static final int RESERVED_POOL_SPACE_IN_MB = 32;
-    public static final long cacheSize = 1024L * 1024L * Math.max(0, DatabaseDescriptor.getFileCacheSizeInMB() - RESERVED_POOL_SPACE_IN_MB);
+    public static final int RESERVED_POOL_SPACE_IN_MiB = 32;
+    public static final long cacheSize = 1024L * 1024L * Math.max(0, DatabaseDescriptor.getFileCacheSizeInMiB() - RESERVED_POOL_SPACE_IN_MiB);
     public static final boolean roundUp = DatabaseDescriptor.getFileCacheRoundUp();
 
     private static boolean enabled = DatabaseDescriptor.getFileCacheEnabled() && cacheSize > 0;
@@ -143,7 +143,7 @@
         metrics = new ChunkCacheMetrics(this);
         cache = Caffeine.newBuilder()
                         .maximumWeight(cacheSize)
-                        .executor(MoreExecutors.directExecutor())
+                        .executor(ImmediateExecutor.INSTANCE)
                         .weigher((key, buffer) -> ((Buffer) buffer).buffer.capacity())
                         .removalListener(this)
                         .recordStats(() -> metrics)
diff --git a/src/java/org/apache/cassandra/cache/OHCProvider.java b/src/java/org/apache/cassandra/cache/OHCProvider.java
index afdc872..4a705a6 100644
--- a/src/java/org/apache/cassandra/cache/OHCProvider.java
+++ b/src/java/org/apache/cassandra/cache/OHCProvider.java
@@ -37,7 +37,7 @@
     public ICache<RowCacheKey, IRowCacheEntry> create()
     {
         OHCacheBuilder<RowCacheKey, IRowCacheEntry> builder = OHCacheBuilder.newBuilder();
-        builder.capacity(DatabaseDescriptor.getRowCacheSizeInMB() * 1024 * 1024)
+        builder.capacity(DatabaseDescriptor.getRowCacheSizeInMiB() * 1024 * 1024)
                .keySerializer(KeySerializer.instance)
                .valueSerializer(ValueSerializer.instance)
                .throwOOME(true);
diff --git a/src/java/org/apache/cassandra/cache/SerializingCache.java b/src/java/org/apache/cassandra/cache/SerializingCache.java
index 55c20ec..8ee8e02 100644
--- a/src/java/org/apache/cassandra/cache/SerializingCache.java
+++ b/src/java/org/apache/cassandra/cache/SerializingCache.java
@@ -21,6 +21,7 @@
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.Weigher;
 
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.util.MemoryInputStream;
 import org.apache.cassandra.io.util.MemoryOutputStream;
@@ -32,8 +33,6 @@
 import java.io.IOException;
 import java.util.Iterator;
 
-import com.google.common.util.concurrent.MoreExecutors;
-
 /**
  * Serializes cache values off-heap.
  */
@@ -51,7 +50,7 @@
         this.cache = Caffeine.newBuilder()
                    .weigher(weigher)
                    .maximumWeight(capacity)
-                   .executor(MoreExecutors.directExecutor())
+                   .executor(ImmediateExecutor.INSTANCE)
                    .removalListener((key, mem, cause) -> {
                        if (cause.wasEvicted()) {
                            mem.unreference();
@@ -70,7 +69,7 @@
         return create(weightedCapacity, (key, value) -> {
             long size = value.size();
             if (size > Integer.MAX_VALUE) {
-                throw new IllegalArgumentException("Serialized size must not be more than 2GB");
+                throw new IllegalArgumentException("Serialized size must not be more than 2GiB");
             }
             return (int) size;
         }, serializer);
diff --git a/src/java/org/apache/cassandra/cache/SerializingCacheProvider.java b/src/java/org/apache/cassandra/cache/SerializingCacheProvider.java
index 813f6fe..56393c4 100644
--- a/src/java/org/apache/cassandra/cache/SerializingCacheProvider.java
+++ b/src/java/org/apache/cassandra/cache/SerializingCacheProvider.java
@@ -30,7 +30,7 @@
 {
     public ICache<RowCacheKey, IRowCacheEntry> create()
     {
-        return SerializingCache.create(DatabaseDescriptor.getRowCacheSizeInMB() * 1024 * 1024, new RowCacheSerializer());
+        return SerializingCache.create(DatabaseDescriptor.getRowCacheSizeInMiB() * 1024 * 1024, new RowCacheSerializer());
     }
 
     // Package Public: used by external Row Cache plugins
diff --git a/src/java/org/apache/cassandra/concurrent/AbstractLocalAwareExecutorService.java b/src/java/org/apache/cassandra/concurrent/AbstractLocalAwareExecutorService.java
deleted file mode 100644
index e7bec19..0000000
--- a/src/java/org/apache/cassandra/concurrent/AbstractLocalAwareExecutorService.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.concurrent;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
-import org.apache.cassandra.utils.JVMStabilityInspector;
-
-public abstract class AbstractLocalAwareExecutorService implements LocalAwareExecutorService
-{
-    private static final Logger logger = LoggerFactory.getLogger(AbstractLocalAwareExecutorService.class);
-
-    protected abstract void addTask(FutureTask<?> futureTask);
-    protected abstract void onCompletion();
-
-    /** Task Submission / Creation / Objects **/
-
-    public <T> FutureTask<T> submit(Callable<T> task)
-    {
-        return submit(newTaskFor(task));
-    }
-
-    public FutureTask<?> submit(Runnable task)
-    {
-        return submit(newTaskFor(task, null));
-    }
-
-    public <T> FutureTask<T> submit(Runnable task, T result)
-    {
-        return submit(newTaskFor(task, result));
-    }
-
-    public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    protected <T> FutureTask<T> newTaskFor(Runnable runnable, T result)
-    {
-        return newTaskFor(runnable, result, ExecutorLocals.create());
-    }
-
-    protected <T> FutureTask<T> newTaskFor(Runnable runnable, T result, ExecutorLocals locals)
-    {
-        if (locals != null)
-        {
-            if (runnable instanceof LocalSessionFutureTask)
-                return (LocalSessionFutureTask<T>) runnable;
-            return new LocalSessionFutureTask<T>(runnable, result, locals);
-        }
-        if (runnable instanceof FutureTask)
-            return (FutureTask<T>) runnable;
-        return new FutureTask<>(runnable, result);
-    }
-
-    protected <T> FutureTask<T> newTaskFor(Callable<T> callable)
-    {
-        return newTaskFor(callable, ExecutorLocals.create());
-    }
-
-    protected <T> FutureTask<T> newTaskFor(Callable<T> callable, ExecutorLocals locals)
-    {
-        if (locals != null)
-        {
-            if (callable instanceof LocalSessionFutureTask)
-                return (LocalSessionFutureTask<T>) callable;
-            return new LocalSessionFutureTask<T>(callable, ExecutorLocals.create());
-        }
-        if (callable instanceof FutureTask)
-            return (FutureTask<T>) callable;
-        return new FutureTask<>(callable);
-    }
-
-    private class LocalSessionFutureTask<T> extends FutureTask<T>
-    {
-        private final ExecutorLocals locals;
-
-        public LocalSessionFutureTask(Callable<T> callable, ExecutorLocals locals)
-        {
-            super(callable);
-            this.locals = locals;
-        }
-
-        public LocalSessionFutureTask(Runnable runnable, T result, ExecutorLocals locals)
-        {
-            super(runnable, result);
-            this.locals = locals;
-        }
-
-        public void run()
-        {
-            ExecutorLocals old = ExecutorLocals.create();
-            ExecutorLocals.set(locals);
-            try
-            {
-                super.run();
-            }
-            finally
-            {
-                ExecutorLocals.set(old);
-            }
-        }
-    }
-
-    class FutureTask<T> extends SimpleCondition implements Future<T>, Runnable
-    {
-        private boolean failure;
-        private Object result = this;
-        private final Callable<T> callable;
-
-        public FutureTask(Callable<T> callable)
-        {
-            this.callable = callable;
-        }
-        public FutureTask(Runnable runnable, T result)
-        {
-            this(Executors.callable(runnable, result));
-        }
-
-        public void run()
-        {
-            try
-            {
-                result = callable.call();
-            }
-            catch (Throwable t)
-            {
-                logger.error("Uncaught exception on thread {}", Thread.currentThread(), t);
-                result = t;
-                failure = true;
-                JVMStabilityInspector.inspectThrowable(t);
-            }
-            finally
-            {
-                signalAll();
-                onCompletion();
-            }
-        }
-
-        public boolean cancel(boolean mayInterruptIfRunning)
-        {
-            return false;
-        }
-
-        public boolean isCancelled()
-        {
-            return false;
-        }
-
-        public boolean isDone()
-        {
-            return isSignaled();
-        }
-
-        public T get() throws InterruptedException, ExecutionException
-        {
-            await();
-            Object result = this.result;
-            if (failure)
-                throw new ExecutionException((Throwable) result);
-            return (T) result;
-        }
-
-        public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
-        {
-            if (!await(timeout, unit))
-                throw new TimeoutException();
-            Object result = this.result;
-            if (failure)
-                throw new ExecutionException((Throwable) result);
-            return (T) result;
-        }
-    }
-
-    private <T> FutureTask<T> submit(FutureTask<T> task)
-    {
-        addTask(task);
-        return task;
-    }
-
-    public void execute(Runnable command)
-    {
-        addTask(newTaskFor(command, null, ExecutorLocals.create()));
-    }
-
-    public void execute(Runnable command, ExecutorLocals locals)
-    {
-        addTask(newTaskFor(command, null, locals));
-    }
-}
diff --git a/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java b/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
deleted file mode 100644
index 3b9d2ff..0000000
--- a/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.concurrent;
-
-import java.util.concurrent.*;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.JVMStabilityInspector;
-
-/**
- * Like DebuggableThreadPoolExecutor, DebuggableScheduledThreadPoolExecutor always
- * logs exceptions from the tasks it is given, even if Future.get is never called elsewhere.
- *
- * DebuggableScheduledThreadPoolExecutor also catches exceptions during Task execution
- * so that they don't supress subsequent invocations of the task.
- *
- * Finally, there is a special rejected execution handler for tasks rejected during the shutdown hook.
- *
- * For fire and forget tasks (like ref tidy) we can safely ignore the exceptions.
- * For any callers that care to know their task was rejected we cancel passed task.
- */
-public class DebuggableScheduledThreadPoolExecutor extends ScheduledThreadPoolExecutor
-{
-    private static final Logger logger = LoggerFactory.getLogger(DebuggableScheduledThreadPoolExecutor.class);
-
-    public static final RejectedExecutionHandler rejectedExecutionHandler = new RejectedExecutionHandler()
-    {
-        public void rejectedExecution(Runnable task, ThreadPoolExecutor executor)
-        {
-            if (executor.isShutdown())
-            {
-                if (!StorageService.instance.isShutdown())
-                    throw new RejectedExecutionException("ScheduledThreadPoolExecutor has shut down.");
-
-                //Give some notification to the caller the task isn't going to run
-                if (task instanceof Future)
-                    ((Future) task).cancel(false);
-
-                logger.debug("ScheduledThreadPoolExecutor has shut down as part of C* shutdown");
-            }
-            else
-            {
-                throw new AssertionError("Unknown rejection of ScheduledThreadPoolExecutor task");
-            }
-        }
-    };
-
-    public DebuggableScheduledThreadPoolExecutor(int corePoolSize, String threadPoolName, int priority)
-    {
-        super(corePoolSize, new NamedThreadFactory(threadPoolName, priority));
-        setRejectedExecutionHandler(rejectedExecutionHandler);
-    }
-
-    public DebuggableScheduledThreadPoolExecutor(int corePoolSize, ThreadFactory threadFactory)
-    {
-        super(corePoolSize, threadFactory);
-        setRejectedExecutionHandler(rejectedExecutionHandler);
-    }
-
-    public DebuggableScheduledThreadPoolExecutor(String threadPoolName)
-    {
-        this(1, threadPoolName, Thread.NORM_PRIORITY);
-        setRejectedExecutionHandler(rejectedExecutionHandler);
-    }
-
-    // We need this as well as the wrapper for the benefit of non-repeating tasks
-    @Override
-    public void afterExecute(Runnable r, Throwable t)
-    {
-        super.afterExecute(r,t);
-        DebuggableThreadPoolExecutor.logExceptionsAfterExecute(r, t);
-    }
-
-    // override scheduling to supress exceptions that would cancel future executions
-    @Override
-    public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit)
-    {
-        return super.scheduleAtFixedRate(new UncomplainingRunnable(command), initialDelay, period, unit);
-    }
-
-    @Override
-    public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit)
-    {
-        return super.scheduleWithFixedDelay(new UncomplainingRunnable(command), initialDelay, delay, unit);
-    }
-
-    private static class UncomplainingRunnable implements Runnable
-    {
-        private final Runnable runnable;
-
-        public UncomplainingRunnable(Runnable runnable)
-        {
-            this.runnable = runnable;
-        }
-
-        public void run()
-        {
-            try
-            {
-                runnable.run();
-            }
-            catch (Throwable t)
-            {
-                JVMStabilityInspector.inspectThrowable(t);
-                DebuggableThreadPoolExecutor.handleOrLog(t);
-            }
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java b/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java
deleted file mode 100644
index 7d9da4d..0000000
--- a/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.concurrent;
-
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.RunnableFuture;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class encorporates some Executor best practices for Cassandra.  Most of the executors in the system
- * should use or extend this.  There are two main improvements over a vanilla TPE:
- *
- * - If a task throws an exception, the default uncaught exception handler will be invoked; if there is
- *   no such handler, the exception will be logged.
- * - MaximumPoolSize is not supported.  Here is what that means (quoting TPE javadoc):
- *
- *     If fewer than corePoolSize threads are running, the Executor always prefers adding a new thread rather than queuing.
- *     If corePoolSize or more threads are running, the Executor always prefers queuing a request rather than adding a new thread.
- *     If a request cannot be queued, a new thread is created unless this would exceed maximumPoolSize, in which case, the task will be rejected.
- *
- *   We don't want this last stage of creating new threads if the queue is full; it makes it needlessly difficult to
- *   reason about the system's behavior.  In other words, if DebuggableTPE has allocated our maximum number of (core)
- *   threads and the queue is full, we want the enqueuer to block.  But to allow the number of threads to drop if a
- *   stage is less busy, core thread timeout is enabled.
- */
-public class DebuggableThreadPoolExecutor extends ThreadPoolExecutor implements LocalAwareExecutorService
-{
-    protected static final Logger logger = LoggerFactory.getLogger(DebuggableThreadPoolExecutor.class);
-    public static final RejectedExecutionHandler blockingExecutionHandler = new RejectedExecutionHandler()
-    {
-        public void rejectedExecution(Runnable task, ThreadPoolExecutor executor)
-        {
-            ((DebuggableThreadPoolExecutor) executor).onInitialRejection(task);
-            BlockingQueue<Runnable> queue = executor.getQueue();
-            while (true)
-            {
-                if (executor.isShutdown())
-                {
-                    ((DebuggableThreadPoolExecutor) executor).onFinalRejection(task);
-                    throw new RejectedExecutionException("ThreadPoolExecutor has shut down");
-                }
-                try
-                {
-                    if (queue.offer(task, 1000, TimeUnit.MILLISECONDS))
-                    {
-                        ((DebuggableThreadPoolExecutor) executor).onFinalAccept(task);
-                        break;
-                    }
-                }
-                catch (InterruptedException e)
-                {
-                    throw new AssertionError(e);
-                }
-            }
-        }
-    };
-
-    public DebuggableThreadPoolExecutor(String threadPoolName, int priority)
-    {
-        this(1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(threadPoolName, priority));
-    }
-
-    public DebuggableThreadPoolExecutor(int corePoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> queue, ThreadFactory factory)
-    {
-        this(corePoolSize, corePoolSize, keepAliveTime, unit, queue, factory);
-    }
-
-    public DebuggableThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory)
-    {
-        super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
-        allowCoreThreadTimeOut(true);
-
-        // block task submissions until queue has room.
-        // this is fighting TPE's design a bit because TPE rejects if queue.offer reports a full queue.
-        // we'll just override this with a handler that retries until it gets in.  ugly, but effective.
-        // (there is an extensive analysis of the options here at
-        //  http://today.java.net/pub/a/today/2008/10/23/creating-a-notifying-blocking-thread-pool-executor.html)
-        this.setRejectedExecutionHandler(blockingExecutionHandler);
-    }
-
-    /**
-     * Creates a thread pool that creates new threads as needed, but
-     * will reuse previously constructed threads when they are
-     * available.
-     * @param threadPoolName the name of the threads created by this executor
-     * @return The new DebuggableThreadPoolExecutor
-     */
-    public static DebuggableThreadPoolExecutor createCachedThreadpoolWithMaxSize(String threadPoolName)
-    {
-        return new DebuggableThreadPoolExecutor(0, Integer.MAX_VALUE,
-                                                60L, TimeUnit.SECONDS,
-                                                new SynchronousQueue<Runnable>(),
-                                                new NamedThreadFactory(threadPoolName));
-    }
-
-    /**
-     * Returns a ThreadPoolExecutor with a fixed number of threads.
-     * When all threads are actively executing tasks, new tasks are queued.
-     * If (most) threads are expected to be idle most of the time, prefer createWithMaxSize() instead.
-     * @param threadPoolName the name of the threads created by this executor
-     * @param size the fixed number of threads for this executor
-     * @return the new DebuggableThreadPoolExecutor
-     */
-    public static DebuggableThreadPoolExecutor createWithFixedPoolSize(String threadPoolName, int size)
-    {
-        return createWithMaximumPoolSize(threadPoolName, size, Integer.MAX_VALUE, TimeUnit.SECONDS);
-    }
-
-    /**
-     * Returns a ThreadPoolExecutor with a fixed maximum number of threads, but whose
-     * threads are terminated when idle for too long.
-     * When all threads are actively executing tasks, new tasks are queued.
-     * @param threadPoolName the name of the threads created by this executor
-     * @param size the maximum number of threads for this executor
-     * @param keepAliveTime the time an idle thread is kept alive before being terminated
-     * @param unit tht time unit for {@code keepAliveTime}
-     * @return the new DebuggableThreadPoolExecutor
-     */
-    public static DebuggableThreadPoolExecutor createWithMaximumPoolSize(String threadPoolName, int size, int keepAliveTime, TimeUnit unit)
-    {
-        return new DebuggableThreadPoolExecutor(size, Integer.MAX_VALUE, keepAliveTime, unit, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(threadPoolName));
-    }
-
-    protected void onInitialRejection(Runnable task) {}
-    protected void onFinalAccept(Runnable task) {}
-    protected void onFinalRejection(Runnable task) {}
-
-    public void execute(Runnable command, ExecutorLocals locals)
-    {
-        super.execute(locals == null || command instanceof LocalSessionWrapper
-                      ? command
-                      : LocalSessionWrapper.create(command, null, locals));
-    }
-
-    public void maybeExecuteImmediately(Runnable command)
-    {
-        execute(command);
-    }
-
-    private ExecutorLocals maybeCreateExecutorLocals(Object command)
-    {
-        return command instanceof LocalSessionWrapper ? null : ExecutorLocals.create();
-    }
-
-    // execute does not call newTaskFor
-    @Override
-    public void execute(Runnable command)
-    {
-        ExecutorLocals locals = maybeCreateExecutorLocals(command);
-        super.execute(locals != null
-                      ? LocalSessionWrapper.create(command, locals)
-                      : command);
-    }
-
-    @Override
-    protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T result)
-    {
-        ExecutorLocals locals = maybeCreateExecutorLocals(runnable);
-        if (locals != null)
-            return LocalSessionWrapper.create(runnable, result, locals);
-        if (runnable instanceof RunnableFuture)
-            return new ForwardingRunnableFuture<>((RunnableFuture) runnable, result);
-        return super.newTaskFor(runnable, result);
-    }
-
-    @Override
-    protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable)
-    {
-        ExecutorLocals locals = maybeCreateExecutorLocals(callable);
-        if (locals != null)
-            return LocalSessionWrapper.create(callable, locals);
-        return super.newTaskFor(callable);
-    }
-
-    @Override
-    protected void afterExecute(Runnable r, Throwable t)
-    {
-        super.afterExecute(r, t);
-
-        maybeResetLocalSessionWrapper(r);
-        logExceptionsAfterExecute(r, t);
-    }
-
-    protected static void maybeResetLocalSessionWrapper(Runnable r)
-    {
-        if (r instanceof LocalSessionWrapper)
-        {
-            LocalSessionWrapper tsw = (LocalSessionWrapper) r;
-            // we have to reset trace state as its presence is what denotes the current thread is tracing
-            // and if left this thread might start tracing unrelated tasks
-            tsw.reset();
-        }
-    }
-
-    @Override
-    protected void beforeExecute(Thread t, Runnable r)
-    {
-        if (r instanceof LocalSessionWrapper)
-            ((LocalSessionWrapper) r).setupContext();
-
-        super.beforeExecute(t, r);
-    }
-
-    @Override
-    public int getActiveTaskCount()
-    {
-        return getActiveCount();
-    }
-
-    @Override
-    public int getPendingTaskCount()
-    {
-        return getQueue().size();
-    }
-
-    /**
-     * Send @param t and any exception wrapped by @param r to the default uncaught exception handler,
-     * or log them if none such is set up
-     */
-    public static void logExceptionsAfterExecute(Runnable r, Throwable t)
-    {
-        Throwable hiddenThrowable = extractThrowable(r);
-        if (hiddenThrowable != null)
-            handleOrLog(hiddenThrowable);
-
-        // ThreadPoolExecutor will re-throw exceptions thrown by its Task (which will be seen by
-        // the default uncaught exception handler) so we only need to do anything if that handler
-        // isn't set up yet.
-        if (t != null && Thread.getDefaultUncaughtExceptionHandler() == null)
-            handleOrLog(t);
-    }
-
-    /**
-     * Send @param t to the default uncaught exception handler, or log it if none such is set up
-     */
-    public static void handleOrLog(Throwable t)
-    {
-        if (Thread.getDefaultUncaughtExceptionHandler() == null)
-            logger.error("Error in ThreadPoolExecutor", t);
-        else
-            Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), t);
-    }
-
-    /**
-     * @return any exception wrapped by @param runnable, i.e., if it is a FutureTask
-     */
-    public static Throwable extractThrowable(Runnable runnable)
-    {
-        // Check for exceptions wrapped by FutureTask or tasks which wrap FutureTask (HasDelegateFuture interface)
-        Throwable throwable = null;
-        if (runnable instanceof Future<?>)
-        {
-            throwable = extractThrowable(((Future<?>) runnable));
-        }
-        if (throwable == null && runnable instanceof HasDelegateFuture)
-        {
-            throwable =  extractThrowable(((HasDelegateFuture) runnable).getDelegate());
-        }
-
-        return throwable;
-    }
-
-    private static Throwable extractThrowable(Future<?> future)
-    {
-        // Check for exceptions wrapped by Future.  We do this by calling get(), which will
-        // cause it to throw any saved exception.
-        //
-        // Complicating things, calling get() on a ScheduledFutureTask will block until the task
-        // is cancelled.  Hence, the extra isDone check beforehand.
-        if (future.isDone())
-        {
-            try
-            {
-                future.get();
-            }
-            catch (InterruptedException e)
-            {
-                throw new AssertionError(e);
-            }
-            catch (CancellationException e)
-            {
-                logger.trace("Task cancelled", e);
-            }
-            catch (ExecutionException e)
-            {
-                return e.getCause();
-            }
-        }
-        return null;
-    }
-
-    /**
-     * If a task wraps a {@link Future} then it should implement this interface to expose the underlining future for
-     * {@link #extractThrowable(Runnable)} to handle.
-     */
-    private interface HasDelegateFuture
-    {
-        Future<?> getDelegate();
-    }
-
-    /**
-     * Used to wrap a Runnable or Callable passed to submit or execute so we can clone the ExecutorLocals and move
-     * them into the worker thread.
-     *
-     * The {@link DebuggableThreadPoolExecutor#afterExecute(java.lang.Runnable, java.lang.Throwable)}
-     * method is called after the runnable completes, which will then call {@link #extractThrowable(Runnable)} to
-     * attempt to get the "hidden" throwable from a task which implements {@link Future}.  The problem is that {@link LocalSessionWrapper}
-     * expects that the {@link Callable} provided to it will throw; which is not true for {@link RunnableFuture} tasks;
-     * the expected semantic in this case is to have the LocalSessionWrapper future be successful and a new implementation
-     * {@link FutureLocalSessionWrapper} is created to expose the underline {@link Future} for {@link #extractThrowable(Runnable)}.
-     *
-     * If a task is a {@link Runnable} the create family of methods should be called rather than {@link Executors#callable(Runnable)}
-     * since they will handle the case where the task is also a future, and will make sure the {@link #extractThrowable(Runnable)}
-     * is able to detect the task's underline exception.
-     *
-     * @param <T>
-     */
-    private static class LocalSessionWrapper<T> extends FutureTask<T>
-    {
-        private final ExecutorLocals locals;
-
-        private LocalSessionWrapper(Callable<T> callable, ExecutorLocals locals)
-        {
-            super(callable);
-            this.locals = locals;
-        }
-
-        static LocalSessionWrapper<Object> create(Runnable command)
-        {
-            return create(command, null, ExecutorLocals.create());
-        }
-
-        static LocalSessionWrapper<Object> create(Runnable command, ExecutorLocals locals)
-        {
-            return create(command, null, locals);
-        }
-
-        static <T> LocalSessionWrapper<T> create(Runnable command, T result)
-        {
-            return create(command, result, ExecutorLocals.create());
-        }
-
-        static <T> LocalSessionWrapper<T> create(Runnable command, T result, ExecutorLocals locals)
-        {
-            if (command instanceof RunnableFuture)
-                return new FutureLocalSessionWrapper<>((RunnableFuture) command, result, locals);
-            return new LocalSessionWrapper<>(Executors.callable(command, result), locals);
-        }
-
-        static <T> LocalSessionWrapper<T> create(Callable<T> command)
-        {
-            return new LocalSessionWrapper<>(command, ExecutorLocals.create());
-        }
-
-        static <T> LocalSessionWrapper<T> create(Callable<T> command, ExecutorLocals locals)
-        {
-            return new LocalSessionWrapper<>(command, locals);
-        }
-
-        private void setupContext()
-        {
-            ExecutorLocals.set(locals);
-        }
-
-        private void reset()
-        {
-            ExecutorLocals.set(null);
-        }
-    }
-
-    private static class FutureLocalSessionWrapper<T> extends LocalSessionWrapper<T> implements HasDelegateFuture
-    {
-        private final RunnableFuture<T> delegate;
-
-        private FutureLocalSessionWrapper(RunnableFuture command, T result, ExecutorLocals locals)
-        {
-            super(() -> {
-                command.run();
-                return result;
-            }, locals);
-            this.delegate = command;
-        }
-
-        public Future<T> getDelegate()
-        {
-            return delegate;
-        }
-    }
-
-    /**
-     * Similar to {@link FutureLocalSessionWrapper}, this class wraps a {@link Future} and will be success
-     * if the underline future is marked as failed; the main difference is that this class does not setup
-     * {@link ExecutorLocals}.
-     *
-     * @param <T>
-     */
-    private static class ForwardingRunnableFuture<T> extends FutureTask<T> implements HasDelegateFuture
-    {
-        private final RunnableFuture<T> delegate;
-
-        public ForwardingRunnableFuture(RunnableFuture<T> delegate, T result)
-        {
-            super(delegate, result);
-            this.delegate = delegate;
-        }
-
-        public Future<T> getDelegate()
-        {
-            return delegate;
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java b/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
new file mode 100644
index 0000000..7fa7dcb
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.WithResources;
+
+/**
+ * Standardised handling of failures during execution - mostly this involves invoking a thread's
+ * {@link java.lang.Thread.UncaughtExceptionHandler} or
+ * {@link JVMStabilityInspector#uncaughtException(Thread, Throwable)},
+ * with special handling for {@link CompactionInterruptedException}.
+ * This class also provides wrappers for {@link WithResources} with {@link Runnable} and {@link Callable}.
+ */
+public class ExecutionFailure
+{
+    private static final Logger logger = LoggerFactory.getLogger(ExecutionFailure.class);
+
+    /**
+     * Invoke the relevant {@link java.lang.Thread.UncaughtExceptionHandler},
+     * ignoring (except for logging) any {@link CompactionInterruptedException}
+     */
+    public static void handle(Throwable t)
+    {
+        try
+        {
+            if (t instanceof CompactionInterruptedException)
+            {
+                // TODO: should we check to see there aren't nested CompactionInterruptedException?
+                logger.info(t.getMessage());
+                if (t.getSuppressed() != null && t.getSuppressed().length > 0)
+                    logger.warn("Interruption of compaction encountered exceptions:", t);
+                else
+                    logger.trace("Full interruption stack trace:", t);
+            }
+            else
+            {
+                Thread thread = Thread.currentThread();
+                Thread.UncaughtExceptionHandler handler = thread.getUncaughtExceptionHandler();
+                if (handler == null)
+                    handler = JVMStabilityInspector::uncaughtException;
+                handler.uncaughtException(thread, t);
+            }
+        }
+        catch (Throwable shouldNeverHappen)
+        {
+            logger.error("Unexpected error while handling unexpected error", shouldNeverHappen);
+        }
+    }
+
+    /**
+     * See {@link #propagating(WithResources, Runnable)}
+     */
+    static Runnable propagating(Runnable wrap)
+    {
+        return wrap instanceof FutureTask<?> ? wrap : propagating(WithResources.none(), wrap);
+    }
+
+    /**
+     * In the case of plain executions, we want to handle exceptions without the full {@link FutureTask} machinery
+     * while still propagating the exception to the encapsulating Future
+     */
+    static Runnable propagating(WithResources withResources, Runnable wrap)
+    {
+        return enforceOptions(withResources, wrap, true);
+    }
+
+    /**
+     * See {@link #suppressing(WithResources, Runnable)}
+     */
+    static Runnable suppressing(Runnable wrap)
+    {
+        return wrap instanceof FutureTask<?> ? wrap : suppressing(WithResources.none(), wrap);
+    }
+
+    /**
+     * In the case of scheduled periodic tasks, we don't want exceptions propagating to cancel the recurring execution.
+     */
+    static Runnable suppressing(WithResources withResources, Runnable wrap)
+    {
+        return enforceOptions(withResources, wrap, false);
+    }
+
+    /**
+     * Encapsulate the execution, propagating or suppressing any exceptions as requested.
+     *
+     * note that if {@code wrap} is a {@link java.util.concurrent.Future} its exceptions may not be captured,
+     * however the codebase should be using our internal {@link Future} variants which handle exceptions in the
+     * desired way.
+     */
+    private static Runnable enforceOptions(WithResources withResources, Runnable wrap, boolean propagate)
+    {
+        return new Runnable()
+        {
+            @Override
+            public void run()
+            {
+                try (Closeable close = withResources.get())
+                {
+                    wrap.run();
+                }
+                catch (Throwable t)
+                {
+                    handle(t);
+                    if (propagate)
+                        throw t;
+                }
+            }
+
+            @Override
+            public String toString()
+            {
+                return wrap.toString();
+            }
+        };
+    }
+
+    /**
+     * See {@link #enforceOptions(WithResources, Callable)}
+     */
+    static <V> Callable<V> propagating(Callable<V> wrap)
+    {
+        return enforceOptions(WithResources.none(), wrap);
+    }
+
+    /**
+     * In the case of non-recurring scheduled tasks, we want to handle exceptions without the full {@link FutureTask}
+     * machinery, while still propagating the exception to the encapsulating Future
+     */
+    static <V> Callable<V> enforceOptions(WithResources withResources, Callable<V> wrap)
+    {
+        return new Callable<V>()
+        {
+            @Override
+            public V call() throws Exception
+            {
+                try (Closeable close = withResources.get())
+                {
+                    return wrap.call();
+                }
+                catch (Throwable t)
+                {
+                    handle(t);
+                    throw t;
+                }
+            }
+
+            @Override
+            public String toString()
+            {
+                return wrap.toString();
+            }
+        };
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutorBuilder.java b/src/java/org/apache/cassandra/concurrent/ExecutorBuilder.java
new file mode 100644
index 0000000..c1d39d5
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ExecutorBuilder.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Configure an executor before creating it.
+ * See {@link ThreadPoolExecutorBuilder}
+ */
+@Shared(scope = SIMULATION)
+public interface ExecutorBuilder<E extends ExecutorService>
+{
+    /**
+     * Threads for the executor built by this factory will timeout (terminate) after the specified period.
+     */
+    ExecutorBuilder<E> withKeepAlive(long keepAlive, TimeUnit keepAliveUnits);
+
+    /**
+     * Core threads for the executor built by this factory will never timeout (default for single threaded builders).
+     * Note that there is ordinarily no difference between core and non-core threads; only when the queue limit is zero
+     * do we create non-core threads.
+     */
+    ExecutorBuilder<E> withKeepAlive();
+
+    /**
+     * Specify the priority of threads that service the executor built by this factory (default to {@link Thread#NORM_PRIORITY})
+     */
+    ExecutorBuilder<E> withThreadPriority(int threadPriority);
+
+    /**
+     * Threads for the executor built by this factory will all be (transitively) members of {@code threadGroup},
+     * but may directly reside in a child thread group.
+     */
+    ExecutorBuilder<E> withThreadGroup(ThreadGroup threadGroup);
+
+    /**
+     * Use the system default thread group for the threads we create.
+     * This is used only for testing, so that we do not hold onto a transitive global reference to all threads.
+     */
+    @VisibleForTesting
+    ExecutorBuilder<E> withDefaultThreadGroup();
+
+    /**
+     * The executor built by this factory will limit the number of queued tasks; default is unlimited.
+     * Once the queue limit is reached and all threads are executing tasks will be rejected
+     * (see {@link #withRejectedExecutionHandler(RejectedExecutionHandler)})
+     */
+    ExecutorBuilder<E> withQueueLimit(int queueLimit);
+
+    /**
+     * Set the {@link RejectedExecutionHandler} for the executor built by this factory.
+     * By default this is executor-specific, either:
+     * <li> {@link ThreadPoolExecutorBase#blockingExecutionHandler}
+     * <li> {@link ScheduledThreadPoolExecutorPlus#rejectedExecutionHandler}
+     * <li> and maybe wrapped by {@link ThreadPoolExecutorJMXAdapter#rejectedExecutionHandler}
+     */
+    ExecutorBuilder<E> withRejectedExecutionHandler(RejectedExecutionHandler rejectedExecutionHandler);
+
+    /**
+     * Set the {@link UncaughtExceptionHandler} for threads that service executors built by this factory.
+     * By default {@link JVMStabilityInspector#uncaughtException(Thread, Throwable)}
+     */
+    ExecutorBuilder<E> withUncaughtExceptionHandler(UncaughtExceptionHandler uncaughtExceptionHandler);
+
+    /**
+     * Build the configured executor
+     */
+    E build();
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutorBuilderFactory.java b/src/java/org/apache/cassandra/concurrent/ExecutorBuilderFactory.java
new file mode 100644
index 0000000..465226a
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ExecutorBuilderFactory.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Entry point for configuring and creating new executors.
+ *
+ * Supports quick and easy construction of default-configured executors via
+ * <li>{@link #sequential(String)}
+ * <li>{@link #pooled(String, int)}
+ *
+ * Supports custom configuration of executors via
+ * <li>{@link #configureSequential(String)}
+ * <li>{@link #configurePooled(String, int)}
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface ExecutorBuilderFactory<E extends ExecutorPlus, S extends SequentialExecutorPlus>
+{
+    /**
+     * Configure a sequential (single threaded) executor
+     */
+    ExecutorBuilder<? extends S> configureSequential(String name);
+
+    /**
+     * Configure a pooled executor with the requested number of threads
+     */
+    ExecutorBuilder<? extends E> configurePooled(String name, int threads);
+
+    /**
+     * Return a default configuration of sequential executor
+     */
+    default S sequential(String name) { return configureSequential(name).build(); }
+
+    /**
+     * Return a default configuration of pooled executor
+     */
+    default E pooled(String name, int threads) { return configurePooled(name, threads).build(); }
+
+    /**
+     * Entry point for configuring and creating new executors.
+     *
+     * Supports quick and easy construction of default-configured executors via
+     * <li>{@link #sequential(String)}
+     * <li>{@link #pooled(String, int)}
+     *
+     * Supports custom configuration of executors via
+     * <li>{@link #configureSequential(String)}
+     * <li>{@link #configurePooled(String, int)}
+     *
+     * Supports any of the above with added JMX registration via sub-factories
+     * <li>{@link #withJmx(String)}
+     * <li>{@link #withJmxInternal()}
+     */
+    interface Jmxable<E extends ExecutorPlus, S extends SequentialExecutorPlus> extends ExecutorBuilderFactory<E, S>
+    {
+        /**
+         * @return a factory that configures executors that register against JMX using the provided jmx path
+         */
+        ExecutorBuilderFactory<E, S> withJmx(String jmxPath);
+
+        /**
+         * @return a factory that configures executors that register against JMX using the "internal" jmx path
+         */
+        default ExecutorBuilderFactory<E, S> withJmxInternal() { return withJmx("internal"); }
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutorFactory.java b/src/java/org/apache/cassandra/concurrent/ExecutorFactory.java
new file mode 100644
index 0000000..0a62747
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ExecutorFactory.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor.Daemon;
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts;
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.Shared;
+
+import static java.lang.Thread.*;
+import static org.apache.cassandra.concurrent.ExecutorFactory.SimulatorSemantics.NORMAL;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Daemon.DAEMON;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts.UNSYNCHRONIZED;
+import static org.apache.cassandra.concurrent.NamedThreadFactory.createThread;
+import static org.apache.cassandra.concurrent.NamedThreadFactory.setupThread;
+import static org.apache.cassandra.concurrent.ThreadPoolExecutorBuilder.pooledJmx;
+import static org.apache.cassandra.concurrent.ThreadPoolExecutorBuilder.sequentialJmx;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Entry point for configuring and creating new executors.
+ *
+ * Supports quick and easy construction of default-configured executors via
+ * <li>{@link #sequential(String)}
+ * <li>{@link #pooled(String, int)}
+ * <li>{@link #scheduled(String)}
+ * <li>{@link #scheduled(boolean, String)}
+ * <li>{@link #scheduled(boolean, String, int)}
+ *
+ * Supports custom configuration of executors via
+ * <li>{@link #configureSequential(String)}
+ * <li>{@link #configurePooled(String, int)}
+ *
+ * Supports any of the above with added JMX registration via sub-factories
+ * <li>{@link #withJmx(String)}
+ * <li>{@link #withJmxInternal()}
+ *
+ * Supports any of the above with the resultant executor propagating {@link ExecutorLocals} via sub-factory
+ * <li>{@link #localAware()}
+ *
+ * Supports shared executors via sub-factory {@code localAware().withJMX()}
+ * using {@link LocalAwareSubFactoryWithJMX#shared(String, int, ExecutorPlus.MaximumPoolSizeListener)}
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface ExecutorFactory extends ExecutorBuilderFactory.Jmxable<ExecutorPlus, SequentialExecutorPlus>
+{
+    public interface LocalAwareSubFactoryWithJMX extends ExecutorBuilderFactory<LocalAwareExecutorPlus, LocalAwareSequentialExecutorPlus>
+    {
+        LocalAwareExecutorPlus shared(String name, int threads, ExecutorPlus.MaximumPoolSizeListener onSetMaxSize);
+    }
+
+    public interface LocalAwareSubFactory extends ExecutorBuilderFactory<LocalAwareExecutorPlus, LocalAwareSequentialExecutorPlus>
+    {
+        LocalAwareSubFactoryWithJMX withJmx(String jmxPath);
+        default LocalAwareSubFactoryWithJMX withJmxInternal() { return withJmx("internal"); }
+    }
+
+    public enum SimulatorSemantics
+    {
+        NORMAL, DISCARD
+    }
+
+    /**
+     * @return a factory that configures executors that propagate {@link ExecutorLocals} to the executing thread
+     */
+    LocalAwareSubFactory localAware();
+
+    /**
+     * @param name the name of the executor, the executor's thread group, and of any worker threads
+     * @return a default-configured {@link ScheduledExecutorPlus}
+     */
+    default ScheduledExecutorPlus scheduled(String name) { return scheduled(true, name, NORM_PRIORITY); }
+
+    /**
+     * @param name the name of the executor, the executor's thread group, and of any worker threads
+     * @param simulatorSemantics indicate special semantics for the executor under simulation
+     * @return a default-configured {@link ScheduledExecutorPlus}
+     */
+    default ScheduledExecutorPlus scheduled(String name, SimulatorSemantics simulatorSemantics) { return scheduled(true, name, NORM_PRIORITY, simulatorSemantics); }
+
+    /**
+     * @param executeOnShutdown if false, waiting tasks will be cancelled on shutdown
+     * @param name the name of the executor, the executor's thread group, and of any worker threads
+     * @return a {@link ScheduledExecutorPlus} with normal thread priority
+     */
+    default ScheduledExecutorPlus scheduled(boolean executeOnShutdown, String name) { return scheduled(executeOnShutdown, name, NORM_PRIORITY); }
+
+    /**
+     * @param executeOnShutdown if false, waiting tasks will be cancelled on shutdown
+     * @param name the name of the executor, the executor's thread group, and of any worker threads
+     * @param priority the thread priority of workers
+     * @return a {@link ScheduledExecutorPlus}
+     */
+    default ScheduledExecutorPlus scheduled(boolean executeOnShutdown, String name, int priority) { return scheduled(executeOnShutdown, name, priority, NORMAL); }
+
+    /**
+     * @param executeOnShutdown if false, waiting tasks will be cancelled on shutdown
+     * @param name the name of the executor, the executor's thread group, and of any worker threads
+     * @param priority the thread priority of workers
+     * @param simulatorSemantics indicate special semantics for the executor under simulation
+     * @return a {@link ScheduledExecutorPlus}
+     */
+    ScheduledExecutorPlus scheduled(boolean executeOnShutdown, String name, int priority, SimulatorSemantics simulatorSemantics);
+
+    /**
+     * Create and start a new thread to execute {@code runnable}
+     * @param name the name of the thread
+     * @param runnable the task to execute
+     * @param daemon flag to indicate whether the thread should be a daemon or not
+     * @return the new thread
+     */
+    Thread startThread(String name, Runnable runnable, Daemon daemon);
+
+    /**
+     * Create and start a new thread to execute {@code runnable}; this thread will be a daemon thread.
+     * @param name the name of the thread
+     * @param runnable the task to execute
+     * @return the new thread
+     */
+    default Thread startThread(String name, Runnable runnable)
+    {
+        return startThread(name, runnable, DAEMON);
+    }
+
+    /**
+     * Create and start a new InfiniteLoopExecutor to repeatedly invoke {@code runnable}.
+     * On shutdown, the executing thread will be interrupted; to support clean shutdown
+     * {@code runnable} should propagate {@link InterruptedException}
+     *
+     * @param name the name of the thread used to invoke the task repeatedly
+     * @param task the task to execute repeatedly
+     * @param simulatorSafe flag indicating if the loop thread can be intercepted / rescheduled during cluster simulation
+     * @param daemon flag to indicate whether the loop thread should be a daemon thread or not
+     * @param interrupts flag to indicate whether to synchronize interrupts of the task execution thread
+     *                   using the task's monitor this can be used to prevent interruption while performing
+     *                   IO operations which forbid interrupted threads.
+     *                   See: {@link org.apache.cassandra.db.commitlog.AbstractCommitLogSegmentManager::start}
+     * @return the new thread
+     */
+    Interruptible infiniteLoop(String name, Interruptible.Task task, SimulatorSafe simulatorSafe, Daemon daemon, Interrupts interrupts);
+
+    /**
+     * Create and start a new InfiniteLoopExecutor to repeatedly invoke {@code runnable}.
+     * On shutdown, the executing thread will be interrupted; to support clean shutdown
+     * {@code runnable} should propagate {@link InterruptedException}
+     *
+     * @param name the name of the thread used to invoke the task repeatedly
+     * @param task the task to execute repeatedly
+     * @param simulatorSafe flag indicating if the loop thread can be intercepted / rescheduled during cluster simulation
+     * @return the new thread
+     */
+    default Interruptible infiniteLoop(String name, Interruptible.SimpleTask task, SimulatorSafe simulatorSafe)
+    {
+        return infiniteLoop(name, Interruptible.Task.from(task), simulatorSafe, DAEMON, UNSYNCHRONIZED);
+    }
+
+    /**
+     * Create a new thread group for use with builders - this thread group will be situated within
+     * this factory's parent thread group, and may be supplied to multiple executor builders.
+     */
+    ThreadGroup newThreadGroup(String name);
+
+    public static final class Global
+    {
+        // deliberately not volatile to ensure zero overhead outside of testing;
+        // depend on other memory visibility primitives to ensure visibility
+        private static ExecutorFactory FACTORY = new ExecutorFactory.Default(Global.class.getClassLoader(), null, JVMStabilityInspector::uncaughtException);
+        private static boolean modified;
+
+        public static ExecutorFactory executorFactory()
+        {
+            return FACTORY;
+        }
+
+        public static synchronized void unsafeSet(ExecutorFactory executorFactory)
+        {
+            FACTORY = executorFactory;
+            modified = true;
+        }
+
+        public static synchronized boolean tryUnsafeSet(ExecutorFactory executorFactory)
+        {
+            if (modified)
+                return false;
+            unsafeSet(executorFactory);
+            return true;
+        }
+    }
+
+    public static final class Default extends NamedThreadFactory.MetaFactory implements ExecutorFactory
+    {
+        public Default(ClassLoader contextClassLoader, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler)
+        {
+            super(contextClassLoader, threadGroup, uncaughtExceptionHandler);
+        }
+
+        @Override
+        public LocalAwareSubFactory localAware()
+        {
+            return new LocalAwareSubFactory()
+            {
+                public ExecutorBuilder<? extends LocalAwareSequentialExecutorPlus> configureSequential(String name)
+                {
+                    return ThreadPoolExecutorBuilder.sequential(LocalAwareSingleThreadExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name);
+                }
+
+                public ExecutorBuilder<LocalAwareThreadPoolExecutorPlus> configurePooled(String name, int threads)
+                {
+                    return ThreadPoolExecutorBuilder.pooled(LocalAwareThreadPoolExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, threads);
+                }
+
+                public LocalAwareSubFactoryWithJMX withJmx(String jmxPath)
+                {
+                    return new LocalAwareSubFactoryWithJMX()
+                    {
+                        public ExecutorBuilder<LocalAwareSingleThreadExecutorPlus> configureSequential(String name)
+                        {
+                            return sequentialJmx(LocalAwareSingleThreadExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, jmxPath);
+                        }
+
+                        public ExecutorBuilder<LocalAwareThreadPoolExecutorPlus> configurePooled(String name, int threads)
+                        {
+                            return pooledJmx(LocalAwareThreadPoolExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, threads, jmxPath);
+                        }
+
+                        public LocalAwareExecutorPlus shared(String name, int threads, ExecutorPlus.MaximumPoolSizeListener onSetMaxSize)
+                        {
+                            return SharedExecutorPool.SHARED.newExecutor(threads, onSetMaxSize, jmxPath, name);
+                        }
+                    };
+                }
+            };
+        }
+
+        @Override
+        public ExecutorBuilderFactory<ExecutorPlus, SequentialExecutorPlus> withJmx(String jmxPath)
+        {
+            return new ExecutorBuilderFactory<ExecutorPlus, SequentialExecutorPlus>()
+            {
+                @Override
+                public ExecutorBuilder<? extends SequentialExecutorPlus> configureSequential(String name)
+                {
+                    return ThreadPoolExecutorBuilder.sequentialJmx(SingleThreadExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, jmxPath);
+                }
+
+                @Override
+                public ExecutorBuilder<? extends ExecutorPlus> configurePooled(String name, int threads)
+                {
+                    return ThreadPoolExecutorBuilder.pooledJmx(ThreadPoolExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, threads, jmxPath);
+                }
+            };
+        }
+
+        @Override
+        public ExecutorBuilder<SingleThreadExecutorPlus> configureSequential(String name)
+        {
+            return ThreadPoolExecutorBuilder.sequential(SingleThreadExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name);
+        }
+
+        @Override
+        public ExecutorBuilder<ThreadPoolExecutorPlus> configurePooled(String name, int threads)
+        {
+            return ThreadPoolExecutorBuilder.pooled(ThreadPoolExecutorPlus::new, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, threads);
+        }
+
+        @Override
+        public ScheduledExecutorPlus scheduled(boolean executeOnShutdown, String name, int priority, SimulatorSemantics simulatorSemantics)
+        {
+            ScheduledThreadPoolExecutorPlus executor = new ScheduledThreadPoolExecutorPlus(newThreadFactory(name, priority));
+            if (!executeOnShutdown)
+                executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
+            return executor;
+        }
+
+        @Override
+        public Thread startThread(String name, Runnable runnable, Daemon daemon)
+        {
+            Thread thread = setupThread(createThread(threadGroup, runnable, name, daemon == DAEMON),
+                                        Thread.NORM_PRIORITY,
+                                        contextClassLoader,
+                                        uncaughtExceptionHandler);
+            thread.start();
+            return thread;
+        }
+
+        @Override
+        public Interruptible infiniteLoop(String name, Interruptible.Task task, SimulatorSafe simulatorSafe, Daemon daemon, Interrupts interrupts)
+        {
+            return new InfiniteLoopExecutor(this, name, task, daemon, interrupts);
+        }
+
+        @Override
+        public ThreadGroup newThreadGroup(String name)
+        {
+            return threadGroup == null ? null : new ThreadGroup(threadGroup, name);
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutorLocal.java b/src/java/org/apache/cassandra/concurrent/ExecutorLocal.java
deleted file mode 100644
index 6577b3d..0000000
--- a/src/java/org/apache/cassandra/concurrent/ExecutorLocal.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.concurrent;
-
-import org.apache.cassandra.service.ClientWarn;
-import org.apache.cassandra.tracing.Tracing;
-
-public interface ExecutorLocal<T>
-{
-    ExecutorLocal[] all = { Tracing.instance, ClientWarn.instance };
-
-    /**
-     * This is called when scheduling the task, and also before calling {@link #set(Object)} when running on a
-     * executor thread.
-     *
-     * @return The thread-local value that we want to copy across executor boundaries; may be null if not set.
-     */
-    T get();
-
-    /**
-     * Before a task has been run, this will be called with the value from the thread that scheduled the task, and after
-     * the task is finished, the value that was previously retrieved from this thread is restored.
-     *
-     * @param value Value to use for the executor local state; may be null.
-     */
-    void set(T value);
-}
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutorLocals.java b/src/java/org/apache/cassandra/concurrent/ExecutorLocals.java
index 8e6d6ea..4eeb2e5 100644
--- a/src/java/org/apache/cassandra/concurrent/ExecutorLocals.java
+++ b/src/java/org/apache/cassandra/concurrent/ExecutorLocals.java
@@ -18,11 +18,11 @@
 
 package org.apache.cassandra.concurrent;
 
-import java.util.Arrays;
-
+import io.netty.util.concurrent.FastThreadLocal;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.tracing.TraceState;
-import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.WithResources;
 
 /*
  * This class only knows about Tracing and ClientWarn, so if any different executor locals are added, it must be
@@ -30,55 +30,80 @@
  *
  * We don't enumerate the ExecutorLocal.all array each time because it would be much slower.
  */
-public class ExecutorLocals
+public class ExecutorLocals implements WithResources, Closeable
 {
-    private static final ExecutorLocal<TraceState> tracing = Tracing.instance;
-    private static final ExecutorLocal<ClientWarn.State> clientWarn = ClientWarn.instance;
+    private static final ExecutorLocals none = new ExecutorLocals(null, null);
+    private static final FastThreadLocal<ExecutorLocals> locals = new FastThreadLocal<ExecutorLocals>()
+    {
+        @Override
+        protected ExecutorLocals initialValue()
+        {
+            return none;
+        }
+    };
+
+    public static class Impl
+    {
+        @SuppressWarnings("resource")
+        protected static void set(TraceState traceState, ClientWarn.State clientWarnState)
+        {
+            if (traceState == null && clientWarnState == null) locals.set(none);
+            else locals.set(new ExecutorLocals(traceState, clientWarnState));
+        }
+    }
 
     public final TraceState traceState;
     public final ClientWarn.State clientWarnState;
 
-    private ExecutorLocals(TraceState traceState, ClientWarn.State clientWarnState)
+    protected ExecutorLocals(TraceState traceState, ClientWarn.State clientWarnState)
     {
         this.traceState = traceState;
         this.clientWarnState = clientWarnState;
     }
 
-    static
+    /**
+     * @return an ExecutorLocals object which has the current trace state and client warn state.
+     */
+    public static ExecutorLocals current()
     {
-        assert Arrays.equals(ExecutorLocal.all, new ExecutorLocal[]{ tracing, clientWarn })
-        : "ExecutorLocals has not been updated to reflect new ExecutorLocal.all";
+        return locals.get();
     }
 
     /**
-     * This creates a new ExecutorLocals object based on what is already set.
-     *
-     * @return an ExecutorLocals object which has the trace state and client warn state captured if either has been set,
-     *         or null if both are unset. The null result short-circuits logic in
-     *         {@link AbstractLocalAwareExecutorService#newTaskFor(Runnable, Object, ExecutorLocals)}, preventing
-     *         unnecessarily calling {@link ExecutorLocals#set(ExecutorLocals)}.
+     * The {@link #current}Locals, if any; otherwise {@link WithResources#none()}.
+     * Used to propagate current to other executors as a {@link WithResources}.
      */
-    public static ExecutorLocals create()
+    public static WithResources propagate()
     {
-        TraceState traceState = tracing.get();
-        ClientWarn.State clientWarnState = clientWarn.get();
-        if (traceState == null && clientWarnState == null)
-            return null;
-        else
-            return new ExecutorLocals(traceState, clientWarnState);
+        ExecutorLocals locals = current();
+        return locals == none ? WithResources.none() : locals;
     }
 
+    @SuppressWarnings("resource")
     public static ExecutorLocals create(TraceState traceState)
     {
-        ClientWarn.State clientWarnState = clientWarn.get();
-        return new ExecutorLocals(traceState, clientWarnState);
+        ExecutorLocals current = locals.get();
+        return current.traceState == traceState ? current : new ExecutorLocals(traceState, current.clientWarnState);
     }
 
-    public static void set(ExecutorLocals locals)
+    public static void clear()
     {
-        TraceState traceState = locals == null ? null : locals.traceState;
-        ClientWarn.State clientWarnState = locals == null ? null : locals.clientWarnState;
-        tracing.set(traceState);
-        clientWarn.set(clientWarnState);
+        locals.set(none);
+    }
+
+    /**
+     * Overwrite current locals, and return the previous ones
+     */
+    public Closeable get()
+    {
+        ExecutorLocals old = current();
+        if (old != this)
+            locals.set(this);
+        return old;
+    }
+
+    public void close()
+    {
+        locals.set(this);
     }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/ExecutorPlus.java
new file mode 100644
index 0000000..1fca66f
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ExecutorPlus.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Cassandra's extension of {@link ExecutorService}, using our own {@link Future}, supporting
+ * {@link #inExecutor()}, and execution with associated resources {@link #execute(WithResources, Runnable)}
+ * (which is primarily used for encapsulating {@link ExecutorLocals} without leaking implementing classes).
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface ExecutorPlus extends ExecutorService, ResizableThreadPool
+{
+    interface MaximumPoolSizeListener
+    {
+        /**
+         * Listener to follow changes to the maximum pool size
+         */
+        void onUpdateMaximumPoolSize(int maximumPoolSize);
+    }
+
+    /**
+     * MAY execute {@code task} immediately, if the calling thread is permitted to do so.
+     */
+    default void maybeExecuteImmediately(Runnable task)
+    {
+        execute(task);
+    }
+
+    /**
+     * Overrides {@link ExecutorService#submit(Callable)} to return a Cassandra {@link Future}
+     */
+    @Override
+    <T> Future<T> submit(Callable<T> task);
+
+    /**
+     * Overrides {@link ExecutorService#submit(Runnable, Object)} to return a Cassandra {@link Future}
+     */
+    @Override
+    <T> Future<T> submit(Runnable task, T result);
+
+    /**
+     * Overrides {@link ExecutorService#submit(Runnable)} to return a Cassandra {@link Future}
+     */
+    @Override
+    Future<?> submit(Runnable task);
+
+    /*
+     * ==============================================
+     * WithResources variants of submit and execute.
+     *
+     * (We need a way to inject a TraceState directly into the Executor context without going through
+     * the global Tracing sessions; see CASSANDRA-5668)
+     * ==============================================
+     */
+
+    /**
+     * Invoke {@code task}. The invoking thread will first instantiate the resources provided before
+     * invoking {@code task}, so that thread state may be modified and cleaned up.
+     *
+     * The invoking thread will execute something semantically equivlent to:
+     *
+     * <code>
+     *     try (Closeable close = withResources.get())
+     *     {
+     *         task.run();
+     *     }
+     * </code>
+     *
+     * @param withResources the resources to create and hold while executing {@code task}
+     * @param task the task to execute
+     */
+    void execute(WithResources withResources, Runnable task);
+
+    /**
+     * Invoke {@code task}, returning a future representing this computation.
+     * The invoking thread will first instantiate the resources provided before
+     * invoking {@code task}, so that thread state may be modified and cleaned up.
+     *
+     * The invoking thread will execute something semantically equivlent to:
+     *
+     * <code>
+     *     try (Closeable close = withResources.get())
+     *     {
+     *         return task.call();
+     *     }
+     * </code>
+     *
+     * @param withResources the resources to create and hold while executing {@code task}
+     * @param task the task to execute
+     */
+    <T> Future<T> submit(WithResources withResources, Callable<T> task);
+
+    /**
+     * Invoke {@code task}, returning a future yielding {@code null} if successful,
+     * or the abnormal termination of {@code task} otherwise.
+     *
+     * The invoking thread will first instantiate the resources provided before
+     * invoking {@code task}, so that thread state may be modified and cleaned up
+     *
+     * <code>
+     *     try (Closeable close = withResources.get())
+     *     {
+     *         task.run();
+     *         return null;
+     *     }
+     * </code>
+     *
+     * @param withResources the resources to create and hold while executing {@code task}
+     * @param task the task to execute
+     */
+    Future<?> submit(WithResources withResources, Runnable task);
+
+    /**
+     * Invoke {@code task}, returning a future yielding {@code result} if successful,
+     * or the abnormal termination of {@code task} otherwise.
+     *
+     * The invoking thread will first instantiate the resources provided before
+     * invoking {@code task}, so that thread state may be modified and cleaned up.
+     *
+     * The invoking thread will execute something semantically equivlent to:
+     *
+     * <code>
+     *     try (Closeable close = withResources.get())
+     *     {
+     *         task.run();
+     *         return result;
+     *     }
+     * </code>
+     *
+     * @param withResources the resources to create and hold while executing {@code task}
+     * @param task the task to execute
+     * @param result the result if successful
+     */
+    <T> Future<T> submit(WithResources withResources, Runnable task, T result);
+
+    /**
+     * @return true iff the caller is a worker thread actively serving this executor
+     */
+    boolean inExecutor();
+
+    default <T> List<java.util.concurrent.Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException
+    {
+        throw new UnsupportedOperationException();
+    }
+    default <T> List<java.util.concurrent.Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException
+    {
+        throw new UnsupportedOperationException();
+    }
+    default <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException
+    {
+        throw new UnsupportedOperationException();
+    }
+    default <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
+    {
+        throw new UnsupportedOperationException();
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/FutureTask.java b/src/java/org/apache/cassandra/concurrent/FutureTask.java
new file mode 100644
index 0000000..2348ff6
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/FutureTask.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.Callable;
+
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+
+/**
+ * A FutureTask that utilises Cassandra's {@link AsyncFuture}, making it compatible with {@link ExecutorPlus}.
+ * Propagates exceptions to the uncaught exception handler.
+ */
+public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
+{
+    private Callable<? extends V> call;
+
+    public FutureTask(Callable<? extends V> call)
+    {
+        this.call = call;
+    }
+
+    public FutureTask(Runnable run)
+    {
+        this.call = callable(run);
+    }
+
+    V call() throws Exception
+    {
+        return call.call();
+    }
+
+    public void run()
+    {
+        try
+        {
+            if (!setUncancellable())
+                return;
+
+            trySuccess(call());
+        }
+        catch (Throwable t)
+        {
+            tryFailure(t);
+        }
+        finally
+        {
+            call = null;
+        }
+    }
+
+    protected boolean tryFailure(Throwable t)
+    {
+        ExecutionFailure.handle(t);
+        return super.tryFailure(t);
+    }
+
+    public static <T> Callable<T> callable(Runnable run)
+    {
+        return new Callable<T>()
+        {
+            public T call()
+            {
+                run.run();
+                return null;
+            }
+
+            public String toString()
+            {
+                return run.toString();
+            }
+        };
+    }
+
+    public static <T> Callable<T> callable(Object id, Runnable run)
+    {
+        return new Callable<T>()
+        {
+            public T call()
+            {
+                run.run();
+                return null;
+            }
+
+            public String toString()
+            {
+                return id.toString();
+            }
+        };
+    }
+
+    public static <T> Callable<T> callable(Runnable run, T result)
+    {
+        return new Callable<T>()
+        {
+            public T call()
+            {
+                run.run();
+                return result;
+            }
+
+            public String toString()
+            {
+                return run + "->" + result;
+            }
+        };
+    }
+
+    public static <T> Callable<T> callable(Object id, Runnable run, T result)
+    {
+        return new Callable<T>()
+        {
+            public T call()
+            {
+                run.run();
+                return result;
+            }
+
+            public String toString()
+            {
+                return id.toString();
+            }
+        };
+    }
+
+    @Override
+    protected String description()
+    {
+        Object desc = call;
+        return desc == null ? null : call.toString();
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/FutureTaskWithResources.java b/src/java/org/apache/cassandra/concurrent/FutureTaskWithResources.java
new file mode 100644
index 0000000..78c4987
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/FutureTaskWithResources.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.Callable;
+
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+
+/**
+ * A FutureTask that utilises Cassandra's {@link AsyncFuture}, making it compatible with {@link ExecutorPlus}.
+ * Encapsulates a {@link WithResources}; the call will instantiate the resources before executing,
+ * and close them after executing but before completing the task.
+ *
+ * Propagates exceptions to the uncaught exception handler.
+ */
+public class FutureTaskWithResources<V> extends FutureTask<V>
+{
+    private final WithResources withResources;
+
+    public FutureTaskWithResources(WithResources withResources, Callable<V> call)
+    {
+        super(call);
+        this.withResources = withResources;
+    }
+
+    public FutureTaskWithResources(WithResources withResources, Runnable task)
+    {
+        super(task);
+        this.withResources = withResources;
+    }
+
+    V call() throws Exception
+    {
+        try (Closeable ignore = withResources.get())
+        {
+            return super.call();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ImmediateExecutor.java b/src/java/org/apache/cassandra/concurrent/ImmediateExecutor.java
index 10c369c..5edc5be 100644
--- a/src/java/org/apache/cassandra/concurrent/ImmediateExecutor.java
+++ b/src/java/org/apache/cassandra/concurrent/ImmediateExecutor.java
@@ -18,30 +18,131 @@
 
 package org.apache.cassandra.concurrent;
 
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
 import java.util.Collections;
 import java.util.List;
-import java.util.concurrent.AbstractExecutorService;
+import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
-public class ImmediateExecutor extends AbstractExecutorService implements LocalAwareExecutorService
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION) // shared to support instanceof check in SimulatedAction
+public class ImmediateExecutor implements LocalAwareExecutorPlus
 {
     public static final ImmediateExecutor INSTANCE = new ImmediateExecutor();
 
     private ImmediateExecutor() {}
 
-    public void execute(Runnable command, ExecutorLocals locals)
+    public <T> Future<T> submit(Callable<T> task)
     {
-        command.run();
+        try
+        {
+            return ImmediateFuture.success(task.call());
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+            return ImmediateFuture.failure(t);
+        }
     }
 
-    public void maybeExecuteImmediately(Runnable command)
+    public <T> Future<T> submit(Runnable task, T result)
     {
-        command.run();
+        try
+        {
+            task.run();
+            return ImmediateFuture.success(result);
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+            return ImmediateFuture.failure(t);
+        }
     }
 
-    public void execute(Runnable command)
+    public Future<?> submit(Runnable task)
     {
-        command.run();
+        try
+        {
+            task.run();
+            return ImmediateFuture.success(null);
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+            return ImmediateFuture.failure(t);
+        }
+    }
+
+    @Override
+    public void execute(WithResources withResources, Runnable task)
+    {
+        try (Closeable ignored = withResources.get())
+        {
+            task.run();
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+        }
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Callable<T> task)
+    {
+        try (Closeable ignored = withResources.get())
+        {
+            return ImmediateFuture.success(task.call());
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+            return ImmediateFuture.failure(t);
+        }
+    }
+
+    @Override
+    public Future<?> submit(WithResources withResources, Runnable task)
+    {
+        return submit(withResources, task, null);
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Runnable task, T result)
+    {
+        try (Closeable ignored = withResources.get())
+        {
+            task.run();
+            return ImmediateFuture.success(result);
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+            return ImmediateFuture.failure(t);
+        }
+    }
+
+    @Override
+    public boolean inExecutor()
+    {
+        return true;
+    }
+
+    public void execute(Runnable task)
+    {
+        try
+        {
+            task.run();
+        }
+        catch (Throwable t)
+        {
+            ExecutionFailure.handle(t);
+        }
     }
 
     public int  getActiveTaskCount()    { return 0; }
diff --git a/src/java/org/apache/cassandra/concurrent/InfiniteLoopExecutor.java b/src/java/org/apache/cassandra/concurrent/InfiniteLoopExecutor.java
index 8e72d91..51c5f9f 100644
--- a/src/java/org/apache/cassandra/concurrent/InfiniteLoopExecutor.java
+++ b/src/java/org/apache/cassandra/concurrent/InfiniteLoopExecutor.java
@@ -24,64 +24,158 @@
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.annotations.VisibleForTesting;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
 
-public class InfiniteLoopExecutor
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.InternalState.SHUTTING_DOWN_NOW;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.InternalState.TERMINATED;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts.SYNCHRONIZED;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts.UNSYNCHRONIZED;
+import static org.apache.cassandra.concurrent.Interruptible.State.INTERRUPTED;
+import static org.apache.cassandra.concurrent.Interruptible.State.NORMAL;
+import static org.apache.cassandra.concurrent.Interruptible.State.SHUTTING_DOWN;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+
+public class InfiniteLoopExecutor implements Interruptible
 {
     private static final Logger logger = LoggerFactory.getLogger(InfiniteLoopExecutor.class);
 
-    public interface InterruptibleRunnable
-    {
-        void run() throws InterruptedException;
-    }
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public enum InternalState { SHUTTING_DOWN_NOW, TERMINATED }
 
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public enum SimulatorSafe { SAFE, UNSAFE }
+
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public enum Daemon        { DAEMON, NON_DAEMON }
+
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public enum Interrupts    { SYNCHRONIZED, UNSYNCHRONIZED }
+
+    private static final AtomicReferenceFieldUpdater<InfiniteLoopExecutor, Object> stateUpdater = AtomicReferenceFieldUpdater.newUpdater(InfiniteLoopExecutor.class, Object.class, "state");
     private final Thread thread;
-    private final InterruptibleRunnable runnable;
-    private volatile boolean isShutdown = false;
+    private final Task task;
+    private volatile Object state = NORMAL;
+    private final Consumer<Thread> interruptHandler;
+    private final Condition isTerminated = newOneTimeCondition();
 
-    public InfiniteLoopExecutor(String name, InterruptibleRunnable runnable)
+    public InfiniteLoopExecutor(String name, Task task, Daemon daemon)
     {
-        this.runnable = runnable;
-        this.thread = new Thread(this::loop, name);
-        this.thread.setDaemon(true);
+        this(ExecutorFactory.Global.executorFactory(), name, task, daemon, UNSYNCHRONIZED);
     }
 
+    public InfiniteLoopExecutor(ExecutorFactory factory, String name, Task task, Daemon daemon)
+    {
+        this(factory, name, task, daemon, UNSYNCHRONIZED);
+    }
+
+    public InfiniteLoopExecutor(ExecutorFactory factory, String name, Task task, Daemon daemon, Interrupts interrupts)
+    {
+        this.task = task;
+        this.thread = factory.startThread(name, this::loop, daemon);
+        this.interruptHandler = interrupts == SYNCHRONIZED
+                                ? interruptHandler(task)
+                                : Thread::interrupt;
+    }
+
+    public InfiniteLoopExecutor(BiFunction<String, Runnable, Thread> threadStarter, String name, Task task, Interrupts interrupts)
+    {
+        this.task = task;
+        this.thread = threadStarter.apply(name, this::loop);
+        this.interruptHandler = interrupts == SYNCHRONIZED
+                                ? interruptHandler(task)
+                                : Thread::interrupt;
+    }
+
+    private static Consumer<Thread> interruptHandler(final Object monitor)
+    {
+        return thread -> {
+            synchronized (monitor)
+            {
+                thread.interrupt();
+            }
+        };
+    }
+
+
     private void loop()
     {
-        while (!isShutdown)
+        boolean interrupted = false;
+        try
         {
-            try
+            while (true)
             {
-                runnable.run();
+                try
+                {
+                    Object cur = state;
+                    if (cur == SHUTTING_DOWN_NOW) break;
+
+                    interrupted |= Thread.interrupted();
+                    if (cur == NORMAL && interrupted) cur = INTERRUPTED;
+                    task.run((State) cur);
+
+                    interrupted = false;
+                    if (cur == SHUTTING_DOWN) break;
+                }
+                catch (TerminateException ignore)
+                {
+                    break;
+                }
+                catch (UncheckedInterruptedException | InterruptedException ignore)
+                {
+                    interrupted = true;
+                }
+                catch (Throwable t)
+                {
+                    logger.error("Exception thrown by runnable, continuing with loop", t);
+                }
             }
-            catch (InterruptedException ie)
-            {
-                if (isShutdown)
-                    return;
-                logger.error("Interrupted while executing {}, but not shutdown; continuing with loop", runnable, ie);
-            }
-            catch (Throwable t)
-            {
-                logger.error("Exception thrown by runnable, continuing with loop", t);
-            }
+        }
+        finally
+        {
+            state = TERMINATED;
+            isTerminated.signal();
         }
     }
 
-    public InfiniteLoopExecutor start()
+    public void interrupt()
     {
-        thread.start();
-        return this;
+        interruptHandler.accept(thread);
     }
 
-    public void shutdownNow()
+    public void shutdown()
     {
-        isShutdown = true;
-        thread.interrupt();
+        stateUpdater.updateAndGet(this, cur -> cur != TERMINATED && cur != SHUTTING_DOWN_NOW ? SHUTTING_DOWN : cur);
+        interruptHandler.accept(thread);
+    }
+
+    public Object shutdownNow()
+    {
+        stateUpdater.updateAndGet(this, cur -> cur != TERMINATED ? SHUTTING_DOWN_NOW : TERMINATED);
+        interruptHandler.accept(thread);
+        return null;
+    }
+
+    @Override
+    public boolean isTerminated()
+    {
+        return state == TERMINATED;
     }
 
     public boolean awaitTermination(long time, TimeUnit unit) throws InterruptedException
     {
-        thread.join(unit.toMillis(time));
-        return !thread.isAlive();
+        if (isTerminated())
+            return true;
+
+        long deadlineNanos = nanoTime() + unit.toNanos(time);
+        isTerminated.awaitUntil(deadlineNanos);
+        return isTerminated();
     }
 
     @VisibleForTesting
diff --git a/src/java/org/apache/cassandra/concurrent/Interruptible.java b/src/java/org/apache/cassandra/concurrent/Interruptible.java
new file mode 100644
index 0000000..8641ec8
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/Interruptible.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.concurrent.Interruptible.State.*;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface Interruptible extends Shutdownable
+{
+    public enum State { NORMAL, INTERRUPTED, SHUTTING_DOWN }
+
+    public static class TerminateException extends InterruptedException {}
+
+    public interface Task
+    {
+        void run(State state) throws InterruptedException;
+
+        static Task from(SimpleTask simpleTask)
+        {
+            return state -> { if (state == NORMAL) simpleTask.run(); };
+        }
+    }
+
+    /**
+     * A Task that only runs on NORMAL states
+     */
+    public interface SimpleTask
+    {
+        void run() throws InterruptedException;
+    }
+
+    void interrupt();
+}
+
diff --git a/src/java/org/apache/cassandra/concurrent/JMXEnabledSingleThreadExecutor.java b/src/java/org/apache/cassandra/concurrent/JMXEnabledSingleThreadExecutor.java
deleted file mode 100644
index ed54b3e..0000000
--- a/src/java/org/apache/cassandra/concurrent/JMXEnabledSingleThreadExecutor.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.concurrent;
-
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-
-public class JMXEnabledSingleThreadExecutor extends JMXEnabledThreadPoolExecutor
-{
-    public JMXEnabledSingleThreadExecutor(String threadPoolName, String jmxPath)
-    {
-        super(1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new SingleThreadFactory(threadPoolName), jmxPath);
-    }
-
-    @Override
-    public void setCoreThreads(int number)
-    {
-        throw new UnsupportedOperationException("Cannot change core pool size for single threaded executor.");
-    }
-
-    @Override
-    public void setMaximumThreads(int number)
-    {
-        throw new UnsupportedOperationException("Cannot change max threads for single threaded executor.");
-    }
-
-    @Override
-    public void setMaximumPoolSize(int newMaximumPoolSize)
-    {
-        setMaximumThreads(newMaximumPoolSize);
-    }
-
-    public boolean isExecutedBy(Thread test)
-    {
-        return getThreadFactory().thread == test;
-    }
-
-    public SingleThreadFactory getThreadFactory()
-    {
-        return (SingleThreadFactory) super.getThreadFactory();
-    }
-
-    public void setThreadFactory(ThreadFactory threadFactory)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    private static class SingleThreadFactory extends NamedThreadFactory
-    {
-        private volatile Thread thread;
-        SingleThreadFactory(String id)
-        {
-            super(id);
-        }
-
-        @Override
-        public Thread newThread(Runnable r)
-        {
-            Thread thread = super.newThread(r);
-            this.thread = thread;
-            return thread;
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/concurrent/JMXEnabledThreadPoolExecutor.java b/src/java/org/apache/cassandra/concurrent/JMXEnabledThreadPoolExecutor.java
deleted file mode 100644
index 4283d4f..0000000
--- a/src/java/org/apache/cassandra/concurrent/JMXEnabledThreadPoolExecutor.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.concurrent;
-
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.cassandra.metrics.ThreadPoolMetrics;
-import org.apache.cassandra.utils.MBeanWrapper;
-
-/**
- * This is a wrapper class for the <i>ScheduledThreadPoolExecutor</i>. It provides an implementation
- * for the <i>afterExecute()</i> found in the <i>ThreadPoolExecutor</i> class to log any unexpected
- * Runtime Exceptions.
- */
-
-public class JMXEnabledThreadPoolExecutor extends DebuggableThreadPoolExecutor implements JMXEnabledThreadPoolExecutorMBean
-{
-    private final String mbeanName;
-    public final ThreadPoolMetrics metrics;
-
-    public JMXEnabledThreadPoolExecutor(String threadPoolName)
-    {
-        this(1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(threadPoolName), "internal");
-    }
-
-    public JMXEnabledThreadPoolExecutor(String threadPoolName, String jmxPath)
-    {
-        this(1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(threadPoolName), jmxPath);
-    }
-
-    public JMXEnabledThreadPoolExecutor(String threadPoolName, int priority)
-    {
-        this(1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(threadPoolName, priority), "internal");
-    }
-
-    public JMXEnabledThreadPoolExecutor(NamedThreadFactory threadFactory, String jmxPath)
-    {
-        this(1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory, jmxPath);
-    }
-
-    public JMXEnabledThreadPoolExecutor(int corePoolSize,
-            long keepAliveTime,
-            TimeUnit unit,
-            BlockingQueue<Runnable> workQueue,
-            NamedThreadFactory threadFactory,
-            String jmxPath)
-    {
-        this(corePoolSize, corePoolSize, keepAliveTime, unit, workQueue, threadFactory, jmxPath);
-    }
-
-    public JMXEnabledThreadPoolExecutor(int corePoolSize,
-                                        int maxPoolSize,
-                                        long keepAliveTime,
-                                        TimeUnit unit,
-                                        BlockingQueue<Runnable> workQueue,
-                                        NamedThreadFactory threadFactory,
-                                        String jmxPath)
-    {
-        super(corePoolSize, maxPoolSize, keepAliveTime, unit, workQueue, threadFactory);
-        super.prestartAllCoreThreads();
-        metrics = new ThreadPoolMetrics(this, jmxPath, threadFactory.id).register();
-
-        mbeanName = "org.apache.cassandra." + jmxPath + ":type=" + threadFactory.id;
-        MBeanWrapper.instance.registerMBean(this, mbeanName);
-    }
-
-    public JMXEnabledThreadPoolExecutor(int corePoolSize,
-                                        int maxPoolSize,
-                                        long keepAliveTime,
-                                        TimeUnit unit,
-                                        BlockingQueue<Runnable> workQueue,
-                                        NamedThreadFactory threadFactory,
-                                        String jmxPath,
-                                        RejectedExecutionHandler rejectedExecutionHandler)
-    {
-        this(corePoolSize, maxPoolSize, keepAliveTime, unit, workQueue, threadFactory, jmxPath);
-        setRejectedExecutionHandler(rejectedExecutionHandler);
-    }
-
-    private void unregisterMBean()
-    {
-        MBeanWrapper.instance.unregisterMBean(mbeanName);
-
-        // release metrics
-        metrics.release();
-    }
-
-    @Override
-    public synchronized void shutdown()
-    {
-        // synchronized, because there is no way to access super.mainLock, which would be
-        // the preferred way to make this threadsafe
-        if (!isShutdown())
-        {
-            unregisterMBean();
-        }
-        super.shutdown();
-    }
-
-    @Override
-    public synchronized List<Runnable> shutdownNow()
-    {
-        // synchronized, because there is no way to access super.mainLock, which would be
-        // the preferred way to make this threadsafe
-        if (!isShutdown())
-        {
-            unregisterMBean();
-        }
-        return super.shutdownNow();
-    }
-
-    public int getTotalBlockedTasks()
-    {
-        return (int) metrics.totalBlocked.getCount();
-    }
-
-    public int getCurrentlyBlockedTasks()
-    {
-        return (int) metrics.currentBlocked.getCount();
-    }
-
-    @Deprecated
-    public int getCoreThreads()
-    {
-        return getCorePoolSize();
-    }
-
-    @Deprecated
-    public void setCoreThreads(int number)
-    {
-        setCorePoolSize(number);
-    }
-
-    @Deprecated
-    public int getMaximumThreads()
-    {
-        return getMaximumPoolSize();
-    }
-
-    @Deprecated
-    public void setMaximumThreads(int number)
-    {
-        setMaximumPoolSize(number);
-    }
-
-    @Override
-    public void setMaximumPoolSize(int newMaximumPoolSize)
-    {
-        if (newMaximumPoolSize < getCorePoolSize())
-            throw new IllegalArgumentException("maximum pool size cannot be less than core pool size");
-        super.setMaximumPoolSize(newMaximumPoolSize);
-    }
-
-    @Override
-    protected void onInitialRejection(Runnable task)
-    {
-        metrics.totalBlocked.inc();
-        metrics.currentBlocked.inc();
-    }
-
-    @Override
-    protected void onFinalAccept(Runnable task)
-    {
-        metrics.currentBlocked.dec();
-    }
-
-    @Override
-    protected void onFinalRejection(Runnable task)
-    {
-        metrics.currentBlocked.dec();
-    }
-}
diff --git a/src/java/org/apache/cassandra/concurrent/JMXEnabledThreadPoolExecutorMBean.java b/src/java/org/apache/cassandra/concurrent/JMXEnabledThreadPoolExecutorMBean.java
deleted file mode 100644
index c2959df..0000000
--- a/src/java/org/apache/cassandra/concurrent/JMXEnabledThreadPoolExecutorMBean.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.concurrent;
-
-public interface JMXEnabledThreadPoolExecutorMBean extends ResizableThreadPool
-{
-    /**
-     * Returns core pool size of thread pool.
-     * Deprecated, use getCorePoolSize instead.
-     */
-    @Deprecated
-    public int getCoreThreads();
-
-    /**
-     * Allows user to resize core pool size of the thread pool.
-     * Deprecated, use setCorePoolSize instead.
-     */
-    @Deprecated
-    public void setCoreThreads(int number);
-
-    /**
-     * Returns maximum pool size of thread pool.
-     * Deprecated, use getMaximumThreads instead.
-     */
-    @Deprecated
-    public int getMaximumThreads();
-
-    /**
-     * Allows user to resize maximum size of the thread pool.
-     * Deprecated, use setMaximumThreads instead.
-     */
-    @Deprecated
-    public void setMaximumThreads(int number);
-}
diff --git a/src/java/org/apache/cassandra/concurrent/LocalAwareExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/LocalAwareExecutorPlus.java
new file mode 100644
index 0000000..743cacc
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/LocalAwareExecutorPlus.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * An {@link ExecutorPlus} that is aware of, and propagates to execution, any ExecutorLocals
+ */
+@Shared(scope = SIMULATION)
+public interface LocalAwareExecutorPlus extends ExecutorPlus
+{
+}
diff --git a/src/java/org/apache/cassandra/concurrent/LocalAwareExecutorService.java b/src/java/org/apache/cassandra/concurrent/LocalAwareExecutorService.java
deleted file mode 100644
index d6ac8e4..0000000
--- a/src/java/org/apache/cassandra/concurrent/LocalAwareExecutorService.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.cassandra.concurrent;
-
-import java.util.concurrent.ExecutorService;
-
-public interface LocalAwareExecutorService extends ExecutorService, ResizableThreadPool
-{
-    // we need a way to inject a TraceState directly into the Executor context without going through
-    // the global Tracing sessions; see CASSANDRA-5668
-    void execute(Runnable command, ExecutorLocals locals);
-
-    // permits executing in the context of the submitting thread
-    void maybeExecuteImmediately(Runnable command);
-
-    /**
-     * Returns the approximate number of threads that are actively
-     * executing tasks.
-     *
-     * @return the number of threads
-     */
-    int getActiveTaskCount();
-
-    /**
-     * Returns the approximate total number of tasks that have
-     * completed execution. Because the states of tasks and threads
-     * may change dynamically during computation, the returned value
-     * is only an approximation, but one that does not ever decrease
-     * across successive calls.
-     *
-     * @return the number of tasks
-     */
-    long getCompletedTaskCount();
-
-    /**
-     * Returns the approximate total of tasks waiting to be executed.
-     * Because the states of tasks and threads
-     * may change dynamically during computation, the returned value
-     * is only an approximation, but one that does not ever decrease
-     * across successive calls.
-     *
-     * @return the number of tasks
-     */
-    int getPendingTaskCount();
-
-    default int getMaxTasksQueued()
-    {
-        return -1;
-    }
-
-    interface MaximumPoolSizeListener
-    {
-        /**
-         * Listener to follow changes to the maximum pool size
-         */
-        void onUpdateMaximumPoolSize(int maximumPoolSize);
-    }
-}
diff --git a/src/java/org/apache/cassandra/concurrent/LocalAwareSequentialExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/LocalAwareSequentialExecutorPlus.java
new file mode 100644
index 0000000..99e44b0
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/LocalAwareSequentialExecutorPlus.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A {@link SequentialExecutorPlus} that is aware of, and propagates to execution, any ExecutorLocals
+ */
+@Shared(scope = SIMULATION)
+public interface LocalAwareSequentialExecutorPlus extends LocalAwareExecutorPlus, SequentialExecutorPlus
+{
+}
diff --git a/src/java/org/apache/cassandra/concurrent/LocalAwareSingleThreadExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/LocalAwareSingleThreadExecutorPlus.java
new file mode 100644
index 0000000..cf67c1a
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/LocalAwareSingleThreadExecutorPlus.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+public class LocalAwareSingleThreadExecutorPlus extends SingleThreadExecutorPlus implements LocalAwareSequentialExecutorPlus
+{
+    LocalAwareSingleThreadExecutorPlus(ThreadPoolExecutorBuilder<LocalAwareSingleThreadExecutorPlus> builder)
+    {
+        super(builder, TaskFactory.localAware());
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/LocalAwareThreadPoolExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/LocalAwareThreadPoolExecutorPlus.java
new file mode 100644
index 0000000..10d107e
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/LocalAwareThreadPoolExecutorPlus.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+public class LocalAwareThreadPoolExecutorPlus extends ThreadPoolExecutorPlus implements LocalAwareExecutorPlus
+{
+    LocalAwareThreadPoolExecutorPlus(ThreadPoolExecutorBuilder<? extends LocalAwareThreadPoolExecutorPlus> builder)
+    {
+        super(builder, TaskFactory.localAware());
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/NamedThreadFactory.java b/src/java/org/apache/cassandra/concurrent/NamedThreadFactory.java
index bcf686f..9816649 100644
--- a/src/java/org/apache/cassandra/concurrent/NamedThreadFactory.java
+++ b/src/java/org/apache/cassandra/concurrent/NamedThreadFactory.java
@@ -22,9 +22,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
-import io.netty.util.concurrent.FastThreadLocal;
 import io.netty.util.concurrent.FastThreadLocalThread;
-import org.apache.cassandra.utils.memory.BufferPool;
+import org.apache.cassandra.utils.JVMStabilityInspector;
 
 /**
  * This class is an implementation of the <i>ThreadFactory</i> interface. This
@@ -34,18 +33,49 @@
 
 public class NamedThreadFactory implements ThreadFactory
 {
+    private static final AtomicInteger anonymousCounter = new AtomicInteger();
     private static volatile String globalPrefix;
+
     public static void setGlobalPrefix(String prefix) { globalPrefix = prefix; }
-    public static String globalPrefix() {
+    public static String globalPrefix()
+    {
         String prefix = globalPrefix;
         return prefix == null ? "" : prefix;
     }
 
+    public static class MetaFactory
+    {
+        protected ClassLoader contextClassLoader;
+        protected ThreadGroup threadGroup;
+        protected Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
+
+        public MetaFactory(ClassLoader contextClassLoader, ThreadGroup threadGroup, Thread.UncaughtExceptionHandler uncaughtExceptionHandler)
+        {
+            this.contextClassLoader = contextClassLoader;
+            if (threadGroup == null)
+            {
+                threadGroup = Thread.currentThread().getThreadGroup();
+                while (threadGroup.getParent() != null)
+                    threadGroup = threadGroup.getParent();
+            }
+            this.threadGroup = threadGroup;
+            this.uncaughtExceptionHandler = uncaughtExceptionHandler;
+        }
+
+        NamedThreadFactory newThreadFactory(String name, int threadPriority)
+        {
+            // We create a unique thread group for each factory, so that e.g. executors can determine which threads are members of the executor
+            ThreadGroup threadGroup = this.threadGroup == null ? null : new ThreadGroup(this.threadGroup, name);
+            return new NamedThreadFactory(name, threadPriority, contextClassLoader, threadGroup, uncaughtExceptionHandler);
+        }
+    }
+
     public final String id;
     private final int priority;
     private final ClassLoader contextClassLoader;
-    private final ThreadGroup threadGroup;
+    public final ThreadGroup threadGroup;
     protected final AtomicInteger n = new AtomicInteger(1);
+    private final Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
 
     public NamedThreadFactory(String id)
     {
@@ -54,33 +84,61 @@
 
     public NamedThreadFactory(String id, int priority)
     {
-        this(id, priority, null, null);
+        this(id, priority, null, null, JVMStabilityInspector::uncaughtException);
+    }
+
+    public NamedThreadFactory(String id, ClassLoader contextClassLoader, ThreadGroup threadGroup)
+    {
+        this(id, Thread.NORM_PRIORITY, contextClassLoader, threadGroup, JVMStabilityInspector::uncaughtException);
     }
 
     public NamedThreadFactory(String id, int priority, ClassLoader contextClassLoader, ThreadGroup threadGroup)
     {
+        this(id, priority, contextClassLoader, threadGroup, JVMStabilityInspector::uncaughtException);
+    }
+
+    public NamedThreadFactory(String id, int priority, ClassLoader contextClassLoader, ThreadGroup threadGroup, Thread.UncaughtExceptionHandler uncaughtExceptionHandler)
+    {
         this.id = id;
         this.priority = priority;
         this.contextClassLoader = contextClassLoader;
         this.threadGroup = threadGroup;
+        this.uncaughtExceptionHandler = uncaughtExceptionHandler;
     }
 
+    @Override
     public Thread newThread(Runnable runnable)
     {
         String name = id + ':' + n.getAndIncrement();
-        Thread thread = createThread(threadGroup, runnable, name, true);
+        return newThread(threadGroup, runnable, name);
+    }
+
+    protected Thread newThread(ThreadGroup threadGroup, Runnable runnable, String name)
+    {
+        return setupThread(createThread(threadGroup, runnable, name, true));
+    }
+
+    protected <T extends Thread> T setupThread(T thread)
+    {
+        return setupThread(thread, priority, contextClassLoader, uncaughtExceptionHandler);
+    }
+
+    public static Thread createThread(ThreadGroup threadGroup, Runnable runnable, String name, int priority, ClassLoader contextClassLoader, Thread.UncaughtExceptionHandler uncaughtExceptionHandler)
+    {
+        String prefix = globalPrefix;
+        Thread thread = createThread(threadGroup, runnable, prefix != null ? prefix + name : name, true);
         thread.setPriority(priority);
         if (contextClassLoader != null)
             thread.setContextClassLoader(contextClassLoader);
+        if (uncaughtExceptionHandler != null)
+            thread.setUncaughtExceptionHandler(uncaughtExceptionHandler);
         return thread;
     }
 
-    private static final AtomicInteger threadCounter = new AtomicInteger();
-
     @VisibleForTesting
-    public static Thread createThread(Runnable runnable)
+    public static Thread createAnonymousThread(Runnable runnable)
     {
-        return createThread(null, runnable, "anonymous-" + threadCounter.incrementAndGet());
+        return createThread(null, runnable, "anonymous-" + anonymousCounter.incrementAndGet());
     }
 
     public static Thread createThread(Runnable runnable, String name)
@@ -88,7 +146,7 @@
         return createThread(null, runnable, name);
     }
 
-    public static Thread createThread(Runnable runnable, String name, boolean daemon)
+    public Thread createThread(Runnable runnable, String name, boolean daemon)
     {
         return createThread(null, runnable, name, daemon);
     }
@@ -105,4 +163,37 @@
         thread.setDaemon(daemon);
         return thread;
     }
+
+    public static  <T extends Thread> T setupThread(T thread, int priority, ClassLoader contextClassLoader, Thread.UncaughtExceptionHandler uncaughtExceptionHandler)
+    {
+        thread.setPriority(priority);
+        if (contextClassLoader != null)
+            thread.setContextClassLoader(contextClassLoader);
+        if (uncaughtExceptionHandler != null)
+            thread.setUncaughtExceptionHandler(uncaughtExceptionHandler);
+        return thread;
+    }
+
+    @Override
+    public String toString()
+    {
+        return threadGroup != null ? id + " in " + threadGroup.getName() : id;
+    }
+
+    public void close()
+    {
+        synchronized (threadGroup)
+        {
+            threadGroup.setDaemon(true);
+            // ThreadGroup API is terrible; setDaemon does not destroy if already empty, and establishing if empty
+            // otherwise is tortuous - easier to just try to destroy and fail if currently an invalid action
+            try
+            {
+                threadGroup.destroy();
+            }
+            catch (IllegalThreadStateException ignore)
+            {
+            }
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/ResizableThreadPool.java b/src/java/org/apache/cassandra/concurrent/ResizableThreadPool.java
index bd3b8ea..9c1dba6 100644
--- a/src/java/org/apache/cassandra/concurrent/ResizableThreadPool.java
+++ b/src/java/org/apache/cassandra/concurrent/ResizableThreadPool.java
@@ -18,15 +18,22 @@
 
 package org.apache.cassandra.concurrent;
 
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
 public interface ResizableThreadPool
 {
     /**
-     * Returns maximum pool size of thread pool.
+     * Returns core pool size of thread pool, the minimum
+     * number of workers (where that makes sense for a thread pool,
+     * SEPExecutor does not have a minimum size).
      */
     public int getCorePoolSize();
 
     /**
-     * Allows user to resize maximum size of the thread pool.
+     * Allows user to resize minimum size of the thread pool.
      */
     public void setCorePoolSize(int newCorePoolSize);
 
@@ -39,4 +46,37 @@
      * Allows user to resize maximum size of the thread pool.
      */
     public void setMaximumPoolSize(int newMaximumPoolSize);
+
+    /**
+     * Returns the approximate number of threads that are actively
+     * executing tasks.
+     *
+     * @return the number of threads
+     */
+    int getActiveTaskCount();
+
+    /**
+     * Returns the approximate total number of tasks that have
+     * completed execution. Because the states of tasks and threads
+     * may change dynamically during computation, the returned value
+     * is only an approximation, but one that does not ever decrease
+     * across successive calls.
+     *
+     * @return the number of tasks
+     */
+    long getCompletedTaskCount();
+
+    /**
+     * Returns the approximate total of tasks waiting to be executed.
+     * Because the states of tasks and threads may change dynamically
+     * during computation, the returned value is only an approximation.
+     *
+     * @return the number of tasks
+     */
+    int getPendingTaskCount();
+
+    default int getMaxTasksQueued()
+    {
+        return -1;
+    }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/ResizableThreadPoolMXBean.java b/src/java/org/apache/cassandra/concurrent/ResizableThreadPoolMXBean.java
new file mode 100644
index 0000000..1c247d6
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ResizableThreadPoolMXBean.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+public interface ResizableThreadPoolMXBean extends ResizableThreadPool
+{
+    /**
+     * Returns core pool size of thread pool.
+     * Deprecated, use getCorePoolSize instead.
+     */
+    @Deprecated
+    public int getCoreThreads();
+
+    /**
+     * Allows user to resize core pool size of the thread pool.
+     * Deprecated, use setCorePoolSize instead.
+     */
+    @Deprecated
+    public void setCoreThreads(int number);
+
+    /**
+     * Returns maximum pool size of thread pool.
+     * Deprecated, use getMaximumThreads instead.
+     */
+    @Deprecated
+    public int getMaximumThreads();
+
+    /**
+     * Allows user to resize maximum size of the thread pool.
+     * Deprecated, use setMaximumThreads instead.
+     */
+    @Deprecated
+    public void setMaximumThreads(int number);
+}
diff --git a/src/java/org/apache/cassandra/concurrent/SEPExecutor.java b/src/java/org/apache/cassandra/concurrent/SEPExecutor.java
index 675e047..05b59c6 100644
--- a/src/java/org/apache/cassandra/concurrent/SEPExecutor.java
+++ b/src/java/org/apache/cassandra/concurrent/SEPExecutor.java
@@ -19,6 +19,7 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -26,18 +27,24 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.MBeanWrapper;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.metrics.ThreadPoolMetrics;
-import org.apache.cassandra.utils.MBeanWrapper;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
+import static org.apache.cassandra.concurrent.SEPExecutor.TakeTaskPermitResult.*;
 import static org.apache.cassandra.concurrent.SEPWorker.Work;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
-public class SEPExecutor extends AbstractLocalAwareExecutorService implements SEPExecutorMBean
+public class SEPExecutor implements LocalAwareExecutorPlus, SEPExecutorMBean
 {
     private static final Logger logger = LoggerFactory.getLogger(SEPExecutor.class);
+    private static final TaskFactory taskFactory = TaskFactory.localAware();
+
     private final SharedExecutorPool pool;
 
     private final AtomicInteger maximumPoolSize;
@@ -55,10 +62,10 @@
     private final AtomicLong completedTasks = new AtomicLong();
 
     volatile boolean shuttingDown = false;
-    final SimpleCondition shutdown = new SimpleCondition();
+    final Condition shutdown = newOneTimeCondition();
 
     // TODO: see if other queue implementations might improve throughput
-    protected final ConcurrentLinkedQueue<FutureTask<?>> tasks = new ConcurrentLinkedQueue<>();
+    protected final ConcurrentLinkedQueue<Runnable> tasks = new ConcurrentLinkedQueue<>();
 
     SEPExecutor(SharedExecutorPool pool, int maximumPoolSize, MaximumPoolSizeListener maximumPoolSizeListener, String jmxPath, String name)
     {
@@ -94,7 +101,7 @@
         return true;
     }
 
-    protected void addTask(FutureTask<?> task)
+    protected <T extends Runnable> T addTask(T task)
     {
         // we add to the queue first, so that when a worker takes a task permit it can be certain there is a task available
         // this permits us to schedule threads non-spuriously; it also means work is serviced fairly
@@ -119,6 +126,7 @@
             // worker, we simply start a worker in a spinning state
             pool.maybeStartSpinningWorker();
         }
+        return task;
     }
 
     public enum TakeTaskPermitResult
@@ -126,7 +134,7 @@
         NONE_AVAILABLE,        // No task permits available
         TOOK_PERMIT,           // Took a permit and reduced task permits
         RETURNED_WORK_PERMIT   // Detected pool shrinking and returned work permit ahead of SEPWorker exit.
-    };
+    }
 
     // takes permission to perform a task, if any are available; once taken it is guaranteed
     // that a proceeding call to tasks.poll() will return some work
@@ -144,14 +152,14 @@
                 // Work permits are negative when the pool is reducing in size.  Atomically
                 // adjust the number of work permits so there is no race of multiple SEPWorkers
                 // exiting.  On conflicting update, recheck.
-                result = TakeTaskPermitResult.RETURNED_WORK_PERMIT;
+                result = RETURNED_WORK_PERMIT;
                 updated = updateWorkPermits(current, workPermits + 1);
             }
             else
             {
                 if (taskPermits == 0)
-                    return TakeTaskPermitResult.NONE_AVAILABLE;
-                result = TakeTaskPermitResult.TOOK_PERMIT;
+                    return NONE_AVAILABLE;
+                result = TOOK_PERMIT;
                 updated = updateTaskPermits(current, taskPermits - 1);
             }
             if (permits.compareAndSet(current, updated))
@@ -192,18 +200,18 @@
     }
 
     @Override
-    public void maybeExecuteImmediately(Runnable command)
+    public void maybeExecuteImmediately(Runnable task)
     {
-        FutureTask<?> ft = newTaskFor(command, null);
+        task = taskFactory.toExecute(task);
         if (!takeWorkPermit(false))
         {
-            addTask(ft);
+            addTask(task);
         }
         else
         {
             try
             {
-                ft.run();
+                task.run();
             }
             finally
             {
@@ -216,6 +224,60 @@
         }
     }
 
+    @Override
+    public void execute(Runnable run)
+    {
+        addTask(taskFactory.toExecute(run));
+    }
+
+    @Override
+    public void execute(WithResources withResources, Runnable run)
+    {
+        addTask(taskFactory.toExecute(withResources, run));
+    }
+
+    @Override
+    public Future<?> submit(Runnable run)
+    {
+        return addTask(taskFactory.toSubmit(run));
+    }
+
+    @Override
+    public <T> Future<T> submit(Runnable run, T result)
+    {
+        return addTask(taskFactory.toSubmit(run, result));
+    }
+
+    @Override
+    public <T> Future<T> submit(Callable<T> call)
+    {
+        return addTask(taskFactory.toSubmit(call));
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Runnable run, T result)
+    {
+        return addTask(taskFactory.toSubmit(withResources, run, result));
+    }
+
+    @Override
+    public Future<?> submit(WithResources withResources, Runnable run)
+    {
+        return addTask(taskFactory.toSubmit(withResources, run));
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Callable<T> call)
+    {
+        return addTask(taskFactory.toSubmit(withResources, call));
+    }
+
+    @Override
+    public boolean inExecutor()
+    {
+        throw new UnsupportedOperationException();
+    }
+
     public synchronized void shutdown()
     {
         if (shuttingDown)
@@ -234,7 +296,7 @@
     {
         shutdown();
         List<Runnable> aborted = new ArrayList<>();
-        while (takeTaskPermit(false) == TakeTaskPermitResult.TOOK_PERMIT)
+        while (takeTaskPermit(false) == TOOK_PERMIT)
             aborted.add(tasks.poll());
         return aborted;
     }
@@ -246,7 +308,7 @@
 
     public boolean isTerminated()
     {
-        return shuttingDown && shutdown.isSignaled();
+        return shuttingDown && shutdown.isSignalled();
     }
 
     public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException
diff --git a/src/java/org/apache/cassandra/concurrent/SEPWorker.java b/src/java/org/apache/cassandra/concurrent/SEPWorker.java
index de5185d..c7b9abf 100644
--- a/src/java/org/apache/cassandra/concurrent/SEPWorker.java
+++ b/src/java/org/apache/cassandra/concurrent/SEPWorker.java
@@ -30,6 +30,7 @@
 
 import static org.apache.cassandra.concurrent.SEPExecutor.TakeTaskPermitResult.RETURNED_WORK_PERMIT;
 import static org.apache.cassandra.concurrent.SEPExecutor.TakeTaskPermitResult.TOOK_PERMIT;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnable
 {
@@ -47,11 +48,11 @@
     long prevStopCheck = 0;
     long soleSpinnerSpinTime = 0;
 
-    SEPWorker(Long workerId, Work initialState, SharedExecutorPool pool)
+    SEPWorker(ThreadGroup threadGroup, Long workerId, Work initialState, SharedExecutorPool pool)
     {
         this.pool = pool;
         this.workerId = workerId;
-        thread = new FastThreadLocalThread(this, pool.poolName + "-Worker-" + workerId);
+        thread = new FastThreadLocalThread(threadGroup, this, threadGroup.getName() + "-Worker-" + workerId);
         thread.setDaemon(true);
         set(initialState);
         thread.start();
@@ -117,6 +118,7 @@
 
                     // we know there is work waiting, as we have a work permit, so poll() will always succeed
                     task.run();
+                    assigned.onCompletion();
                     task = null;
 
                     if (shutdown = assigned.shuttingDown)
@@ -162,9 +164,14 @@
             if (assigned != null)
                 assigned.returnWorkPermit();
             if (task != null)
+            {
                 logger.error("Failed to execute task, unexpected exception killed worker", t);
+                assigned.onCompletion();
+            }
             else
+            {
                 logger.error("Unexpected exception killed worker", t);
+            }
         }
     }
 
@@ -257,7 +264,7 @@
         sleep *= ThreadLocalRandom.current().nextDouble();
         sleep = Math.max(10000, sleep);
 
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         // place ourselves in the spinning collection; if we clash with another thread just exit
         Long target = start + sleep;
@@ -269,7 +276,7 @@
         pool.spinning.remove(target, this);
 
         // finish timing and grab spinningTime (before we finish timing so it is under rather than overestimated)
-        long end = System.nanoTime();
+        long end = nanoTime();
         long spin = end - start;
         long stopCheck = pool.stopCheck.addAndGet(spin);
         maybeStop(stopCheck, end);
diff --git a/src/java/org/apache/cassandra/concurrent/ScheduledExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/ScheduledExecutorPlus.java
new file mode 100644
index 0000000..a2b033a
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ScheduledExecutorPlus.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface ScheduledExecutorPlus extends ExecutorPlus, ScheduledExecutorService
+{
+    /**
+     * Schedule an action that is recurring but self-administered.
+     */
+    ScheduledFuture<?> scheduleSelfRecurring(Runnable run, long delay, TimeUnit units);
+
+    /**
+     * Schedule a timeout action. This method is primarily used by the Simulator to modify its
+     * scheduling behaviour with respect to this operation.
+     */
+    ScheduledFuture<?> scheduleAt(Runnable run, long deadline);
+
+    /**
+     * Schedule a timeout action. This method is primarily used by the Simulator to modify its
+     * scheduling behaviour with respect to this operation.
+     */
+    ScheduledFuture<?> scheduleTimeoutAt(Runnable run, long deadline);
+
+    /**
+     * Schedule a timeout action. This method is primarily used by the Simulator to modify its
+     * scheduling behaviour with respect to this operation.
+     */
+    ScheduledFuture<?> scheduleTimeoutWithDelay(Runnable run, long delay, TimeUnit units);
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ScheduledExecutors.java b/src/java/org/apache/cassandra/concurrent/ScheduledExecutors.java
index ff9d1b4..8da600c 100644
--- a/src/java/org/apache/cassandra/concurrent/ScheduledExecutors.java
+++ b/src/java/org/apache/cassandra/concurrent/ScheduledExecutors.java
@@ -24,6 +24,8 @@
 
 import org.apache.cassandra.utils.ExecutorUtils;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 /**
  * Centralized location for shared executors
  */
@@ -32,26 +34,26 @@
     /**
      * This pool is used for periodic fast (sub-microsecond) tasks.
      */
-    public static final DebuggableScheduledThreadPoolExecutor scheduledFastTasks = new DebuggableScheduledThreadPoolExecutor("ScheduledFastTasks");
+    public static final ScheduledExecutorPlus scheduledFastTasks = executorFactory().scheduled("ScheduledFastTasks");
 
     /**
      * This pool is used for periodic short (sub-second) tasks.
      */
-     public static final DebuggableScheduledThreadPoolExecutor scheduledTasks = new DebuggableScheduledThreadPoolExecutor("ScheduledTasks");
+     public static final ScheduledExecutorPlus scheduledTasks = executorFactory().scheduled("ScheduledTasks");
 
     /**
      * This executor is used for tasks that can have longer execution times, and usually are non periodic.
      */
-    public static final DebuggableScheduledThreadPoolExecutor nonPeriodicTasks = new DebuggableScheduledThreadPoolExecutor("NonPeriodicTasks");
+    public static final ScheduledExecutorPlus nonPeriodicTasks = executorFactory().scheduled("NonPeriodicTasks");
 
     /**
      * This executor is used for tasks that do not need to be waited for on shutdown/drain.
      */
-    public static final DebuggableScheduledThreadPoolExecutor optionalTasks = new DebuggableScheduledThreadPoolExecutor("OptionalTasks");
+    public static final ScheduledExecutorPlus optionalTasks = executorFactory().scheduled(false, "OptionalTasks");
 
     @VisibleForTesting
-    public static void shutdownAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
+    public static void shutdownNowAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
     {
-        ExecutorUtils.shutdownNowAndWait(timeout, unit, scheduledFastTasks, scheduledTasks, nonPeriodicTasks, optionalTasks);
+        ExecutorUtils.shutdownNowAndWait(timeout, unit, scheduledTasks, scheduledFastTasks, nonPeriodicTasks, optionalTasks);
     }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/ScheduledThreadPoolExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/ScheduledThreadPoolExecutorPlus.java
new file mode 100644
index 0000000..0ab09a4
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ScheduledThreadPoolExecutorPlus.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import java.util.List;
+import java.util.concurrent.*;
+
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.service.StorageService;
+
+import static com.google.common.primitives.Longs.max;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.concurrent.ExecutionFailure.propagating;
+import static org.apache.cassandra.concurrent.ExecutionFailure.suppressing;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+/**
+ * Like ExecutorPlus, ScheduledThreadPoolExecutorPlus always
+ * logs exceptions from the tasks it is given, even if Future.get is never called elsewhere.
+ *
+ * Catches exceptions during Task execution so that they don't suppress subsequent invocations of the task.
+ *
+ * Finally, there is a special rejected execution handler for tasks rejected during the shutdown hook.
+ *  - For fire and forget tasks (like ref tidy) we can safely ignore the exceptions.
+ *  - For any callers that care to know their task was rejected we cancel passed task.
+ */
+public class ScheduledThreadPoolExecutorPlus extends ScheduledThreadPoolExecutor implements ScheduledExecutorPlus
+{
+    private static final Logger logger = LoggerFactory.getLogger(ScheduledThreadPoolExecutorPlus.class);
+    private static final TaskFactory taskFactory = TaskFactory.standard();
+
+    public static final RejectedExecutionHandler rejectedExecutionHandler = (task, executor) ->
+    {
+        if (executor.isShutdown())
+        {
+            // TODO: this sequence of events seems poorly thought out
+            if (!StorageService.instance.isShutdown())
+                throw new RejectedExecutionException("ScheduledThreadPoolExecutor has shut down.");
+
+            //Give some notification to the caller the task isn't going to run
+            if (task instanceof java.util.concurrent.Future)
+                ((java.util.concurrent.Future<?>) task).cancel(false);
+
+            logger.debug("ScheduledThreadPoolExecutor has shut down as part of C* shutdown");
+        }
+        else
+        {
+            throw new AssertionError("Unknown rejection of ScheduledThreadPoolExecutor task");
+        }
+    };
+
+    ScheduledThreadPoolExecutorPlus(NamedThreadFactory threadFactory)
+    {
+        super(1, threadFactory);
+        setRejectedExecutionHandler(rejectedExecutionHandler);
+    }
+
+    @Override
+    public ScheduledFuture<?> schedule(Runnable task, long delay, TimeUnit unit)
+    {
+        return super.schedule(propagating(task), delay, unit);
+    }
+
+    @Override
+    public <V> ScheduledFuture<V> schedule(Callable<V> task, long delay, TimeUnit unit)
+    {
+        return super.schedule(propagating(task), delay, unit);
+    }
+
+    // override scheduling to suppress exceptions that would cancel future executions
+    @Override
+    public ScheduledFuture<?> scheduleAtFixedRate(Runnable task, long initialDelay, long period, TimeUnit unit)
+    {
+        return super.scheduleAtFixedRate(suppressing(task), initialDelay, period, unit);
+    }
+
+    @Override
+    public ScheduledFuture<?> scheduleWithFixedDelay(Runnable task, long initialDelay, long delay, TimeUnit unit)
+    {
+        return super.scheduleWithFixedDelay(suppressing(task), initialDelay, delay, unit);
+    }
+
+    @Override
+    public ScheduledFuture<?> scheduleSelfRecurring(Runnable run, long delay, TimeUnit units)
+    {
+        return schedule(run, delay, units);
+    }
+
+    @Override
+    public ScheduledFuture<?> scheduleAt(Runnable run, long deadline)
+    {
+        return schedule(run, max(0, deadline - nanoTime()), NANOSECONDS);
+    }
+
+    @Override
+    public ScheduledFuture<?> scheduleTimeoutAt(Runnable run, long deadline)
+    {
+        return scheduleTimeoutWithDelay(run, max(0, deadline - nanoTime()), NANOSECONDS);
+    }
+
+    @Override
+    public ScheduledFuture<?> scheduleTimeoutWithDelay(Runnable run, long delay, TimeUnit units)
+    {
+        return schedule(run, delay, units);
+    }
+
+    /*======== BEGIN DIRECT COPY OF ThreadPoolExecutorPlus ===============*/
+
+    private <T extends Runnable> T addTask(T task)
+    {
+        super.execute(task);
+        return task;
+    }
+
+    @Override
+    public void execute(Runnable run)
+    {
+        addTask(taskFactory.toExecute(run));
+    }
+
+    @Override
+    public void execute(WithResources withResources, Runnable run)
+    {
+        addTask(taskFactory.toExecute(withResources, run));
+    }
+
+    @Override
+    public Future<?> submit(Runnable run)
+    {
+        return addTask(taskFactory.toSubmit(run));
+    }
+
+    @Override
+    public <T> Future<T> submit(Runnable run, T result)
+    {
+        return addTask(taskFactory.toSubmit(run, result));
+    }
+
+    @Override
+    public <T> Future<T> submit(Callable<T> call)
+    {
+        return addTask(taskFactory.toSubmit(call));
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Runnable run, T result)
+    {
+        return addTask(taskFactory.toSubmit(withResources, run, result));
+    }
+
+    @Override
+    public Future<?> submit(WithResources withResources, Runnable run)
+    {
+        return addTask(taskFactory.toSubmit(withResources, run));
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Callable<T> call)
+    {
+        return addTask(taskFactory.toSubmit(withResources, call));
+    }
+
+    @Override
+    public boolean inExecutor()
+    {
+        return Thread.currentThread().getThreadGroup() == getThreadFactory().threadGroup;
+    }
+
+    @Override
+    protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value)
+    {
+        return taskFactory.toSubmit(runnable, value);
+    }
+
+    @Override
+    protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable)
+    {
+        return taskFactory.toSubmit(callable);
+    }
+
+    @Override
+    public NamedThreadFactory getThreadFactory()
+    {
+        return (NamedThreadFactory) super.getThreadFactory();
+    }
+
+    /*======== DIRECT COPY OF ThreadPoolExecutorBase ===============*/
+
+    @Override
+    public List<Runnable> shutdownNow()
+    {
+        List<Runnable> cancelled = super.shutdownNow();
+        for (Runnable c : cancelled)
+        {
+            if (c instanceof java.util.concurrent.Future<?>)
+                ((java.util.concurrent.Future<?>) c).cancel(true);
+        }
+        return cancelled;
+    }
+
+    @Override
+    protected void terminated()
+    {
+        getThreadFactory().close();
+    }
+
+    @Override
+    public int getActiveTaskCount()
+    {
+        return getActiveCount();
+    }
+
+    @Override
+    public int getPendingTaskCount()
+    {
+        return getQueue().size();
+    }
+
+    /*======== DIRECT COPY OF SingleThreadExecutorPlus ===============*/
+
+    @Override
+    public int getCorePoolSize()
+    {
+        return 1;
+    }
+    @Override
+    public void setCorePoolSize(int number)
+    {
+        throw new UnsupportedOperationException();
+    }
+    @Override
+    public int getMaximumPoolSize()
+    {
+        return 1;
+    }
+    @Override
+    public void setMaximumPoolSize(int number)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getMaxTasksQueued()
+    {
+        return Integer.MAX_VALUE;
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/SequentialExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/SequentialExecutorPlus.java
new file mode 100644
index 0000000..2b63f14
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/SequentialExecutorPlus.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * An {@link ExecutorPlus} that guarantees the order of execution matches the order of task submission,
+ * and provides a simple mechanism for the recurring pattern of ensuring a job is executed at least once
+ * after some point in time (i.e. ensures that at most one copy of the task is queued, with up to one
+ * copy running as well)
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface SequentialExecutorPlus extends ExecutorPlus
+{
+    public interface AtLeastOnceTrigger
+    {
+        /**
+         * Ensure the job is run at least once in its entirety after this method is invoked (including any already queued)
+         */
+        public boolean trigger();
+
+        /**
+         * Run the provided task after all queued and executing jobs have completed
+         */
+        public void runAfter(Runnable run);
+
+        /**
+         * Wait until all queued and executing jobs have completed
+         */
+        public void sync();
+    }
+
+    /**
+     * Return an object for orchestrating the execution of this task at least once (in its entirety) after
+     * the trigger is invoked, i.e. saturating the number of pending tasks at 1 (2 including any possibly executing
+     * at the time of invocation)
+     */
+    public AtLeastOnceTrigger atLeastOnceTrigger(Runnable runnable);
+}
diff --git a/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java b/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
index 7a07cf4..f74854f 100644
--- a/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
+++ b/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
@@ -27,7 +27,9 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.concurrent.SEPWorker.Work;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * A pool of worker threads that are shared between all Executors created with it. Each executor is treated as a distinct
@@ -56,11 +58,10 @@
  */
 public class SharedExecutorPool
 {
-
     public static final SharedExecutorPool SHARED = new SharedExecutorPool("SharedPool");
 
     // the name assigned to workers in the pool, and the id suffix
-    final String poolName;
+    final ThreadGroup threadGroup;
     final AtomicLong workerId = new AtomicLong();
 
     // the collection of executors serviced by this pool; periodically ordered by traffic volume
@@ -79,9 +80,14 @@
 
     volatile boolean shuttingDown = false;
 
-    public SharedExecutorPool(String poolName)
+    public SharedExecutorPool(String name)
     {
-        this.poolName = poolName;
+        this(executorFactory().newThreadGroup(name));
+    }
+
+    public SharedExecutorPool(ThreadGroup threadGroup)
+    {
+        this.threadGroup = threadGroup;
     }
 
     void schedule(Work work)
@@ -96,7 +102,7 @@
                 return;
 
         if (!work.isStop())
-            new SEPWorker(workerId.incrementAndGet(), work, this);
+            new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
     }
 
     void maybeStartSpinningWorker()
@@ -108,12 +114,12 @@
             schedule(Work.SPINNING);
     }
 
-    public synchronized LocalAwareExecutorService newExecutor(int maxConcurrency, String jmxPath, String name)
+    public synchronized LocalAwareExecutorPlus newExecutor(int maxConcurrency, String jmxPath, String name)
     {
         return newExecutor(maxConcurrency, i -> {}, jmxPath, name);
     }
 
-    public LocalAwareExecutorService newExecutor(int maxConcurrency, LocalAwareExecutorService.MaximumPoolSizeListener maximumPoolSizeListener, String jmxPath, String name)
+    public LocalAwareExecutorPlus newExecutor(int maxConcurrency, ExecutorPlus.MaximumPoolSizeListener maximumPoolSizeListener, String jmxPath, String name)
     {
         SEPExecutor executor = new SEPExecutor(this, maxConcurrency, maximumPoolSizeListener, jmxPath, name);
         executors.add(executor);
@@ -128,10 +134,10 @@
 
         terminateWorkers();
 
-        long until = System.nanoTime() + unit.toNanos(timeout);
+        long until = nanoTime() + unit.toNanos(timeout);
         for (SEPExecutor executor : executors)
         {
-            executor.shutdown.await(until - System.nanoTime(), TimeUnit.NANOSECONDS);
+            executor.shutdown.await(until - nanoTime(), TimeUnit.NANOSECONDS);
             if (!executor.isTerminated())
                 throw new TimeoutException(executor.name + " not terminated");
         }
diff --git a/src/java/org/apache/cassandra/concurrent/Shutdownable.java b/src/java/org/apache/cassandra/concurrent/Shutdownable.java
new file mode 100644
index 0000000..185875b
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/Shutdownable.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface Shutdownable
+{
+    boolean isTerminated();
+
+    /**
+     * Shutdown once any remaining work has completed (however this is defined for the implementation).
+     */
+    void shutdown();
+
+    /**
+     * Shutdown immediately, possibly interrupting ongoing work, and cancelling work that is queued.
+     */
+    Object shutdownNow();
+
+    /**
+     * Await termination of this object, i.e. the cessation of all current and future work.
+     */
+    public boolean awaitTermination(long timeout, TimeUnit units) throws InterruptedException;
+}
diff --git a/src/java/org/apache/cassandra/concurrent/SingleThreadExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/SingleThreadExecutorPlus.java
new file mode 100644
index 0000000..eb28277
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/SingleThreadExecutorPlus.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.cassandra.utils.concurrent.Future;
+
+public class SingleThreadExecutorPlus extends ThreadPoolExecutorPlus implements SequentialExecutorPlus
+{
+    public static class AtLeastOnce extends AtomicBoolean implements AtLeastOnceTrigger, Runnable
+    {
+        protected final SequentialExecutorPlus executor;
+        protected final Runnable run;
+
+        public AtLeastOnce(SequentialExecutorPlus executor, Runnable run)
+        {
+            this.executor = executor;
+            this.run = run;
+        }
+
+        public boolean trigger()
+        {
+            boolean success;
+            if (success = compareAndSet(false, true))
+                executor.execute(this);
+            return success;
+        }
+
+        public void runAfter(Runnable run)
+        {
+            executor.execute(run);
+        }
+
+        public void sync()
+        {
+            Future<?> done = executor.submit(() -> {});
+            done.awaitThrowUncheckedOnInterrupt();
+            done.rethrowIfFailed(); // executor might get shutdown before we execute; propagate cancellation exception
+        }
+
+        public void run()
+        {
+            set(false);
+            run.run();
+        }
+
+        @Override
+        public String toString()
+        {
+            return run.toString();
+        }
+    }
+
+    SingleThreadExecutorPlus(ThreadPoolExecutorBuilder<? extends SingleThreadExecutorPlus> builder)
+    {
+        this(builder, TaskFactory.standard());
+    }
+
+    SingleThreadExecutorPlus(ThreadPoolExecutorBuilder<? extends SingleThreadExecutorPlus> builder, TaskFactory taskFactory)
+    {
+        super(builder, taskFactory);
+    }
+
+    @Override
+    public int getCorePoolSize()
+    {
+        return 1;
+    }
+    @Override
+    public void setCorePoolSize(int number)
+    {
+        throw new UnsupportedOperationException();
+    }
+    @Override
+    public int getMaximumPoolSize()
+    {
+        return 1;
+    }
+    @Override
+    public void setMaximumPoolSize(int number)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public AtLeastOnce atLeastOnceTrigger(Runnable run)
+    {
+        return new AtLeastOnce(this, run);
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/Stage.java b/src/java/org/apache/cassandra/concurrent/Stage.java
index 5efaf16..ac609aa 100644
--- a/src/java/org/apache/cassandra/concurrent/Stage.java
+++ b/src/java/org/apache/cassandra/concurrent/Stage.java
@@ -20,15 +20,7 @@
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.function.IntSupplier;
@@ -42,10 +34,11 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.utils.ExecutorUtils;
-
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.Future;
 
 import static java.util.stream.Collectors.toMap;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
 public enum Stage
 {
@@ -56,28 +49,29 @@
     GOSSIP            (true,  "GossipStage",           "internal", () -> 1,                                         null,                                            Stage::singleThreadedStage),
     REQUEST_RESPONSE  (false, "RequestResponseStage",  "request",  FBUtilities::getAvailableProcessors,             null,                                            Stage::multiThreadedLowSignalStage),
     ANTI_ENTROPY      (false, "AntiEntropyStage",      "internal", () -> 1,                                         null,                                            Stage::singleThreadedStage),
-    MIGRATION         (false, "MigrationStage",        "internal", () -> 1,                                         null,                                            Stage::singleThreadedStage),
+    MIGRATION         (false, "MigrationStage",        "internal", () -> 1,                                         null,                                            Stage::migrationStage),
     MISC              (false, "MiscStage",             "internal", () -> 1,                                         null,                                            Stage::singleThreadedStage),
-    TRACING           (false, "TracingStage",          "internal", () -> 1,                                         null,                                            Stage::tracingExecutor),
+    TRACING           (false, "TracingStage",          "internal", () -> 1,                                         null,                                            Stage::tracingStage),
     INTERNAL_RESPONSE (false, "InternalResponseStage", "internal", FBUtilities::getAvailableProcessors,             null,                                            Stage::multiThreadedStage),
-    IMMEDIATE         (false, "ImmediateStage",        "internal", () -> 0,                                         null,                                            Stage::immediateExecutor);
+    IMMEDIATE         (false, "ImmediateStage",        "internal", () -> 0,                                         null,                                            Stage::immediateExecutor),
+    PAXOS_REPAIR      (false, "PaxosRepairStage",      "internal", FBUtilities::getAvailableProcessors,             null,                                            Stage::multiThreadedStage),
+    ;
 
-    public static final long KEEP_ALIVE_SECONDS = 60; // seconds to keep "extra" threads alive for when idle
     public final String jmxName;
+    private final Supplier<ExecutorPlus> executorSupplier;
+    private volatile ExecutorPlus executor;
     /** Set true if this executor should be gracefully shutdown before stopping
      * the commitlog allocator. Tasks on executors that issue mutations may
      * block indefinitely waiting for a new commitlog segment, preventing a
      * clean drain/shutdown.
      */
     public final boolean shutdownBeforeCommitlog;
-    private final Supplier<LocalAwareExecutorService> initialiser;
-    private volatile LocalAwareExecutorService executor = null;
 
-    Stage(Boolean shutdownBeforeCommitlog, String jmxName, String jmxType, IntSupplier numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize, ExecutorServiceInitialiser initialiser)
+    Stage(boolean shutdownBeforeCommitlog, String jmxName, String jmxType, IntSupplier numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize, ExecutorServiceInitialiser executorSupplier)
     {
         this.shutdownBeforeCommitlog = shutdownBeforeCommitlog;
         this.jmxName = jmxName;
-        this.initialiser = () -> initialiser.init(jmxName,jmxType, numThreads.getAsInt(), onSetMaximumPoolSize);
+        this.executorSupplier = () -> executorSupplier.init(jmxName, jmxType, numThreads.getAsInt(), onSetMaximumPoolSize);
     }
 
     private static String normalizeName(String stageName)
@@ -128,14 +122,14 @@
     }
 
     // Convenience functions to execute on this stage
-    public void execute(Runnable command) { executor().execute(command); }
-    public void execute(Runnable command, ExecutorLocals locals) { executor().execute(command, locals); }
-    public void maybeExecuteImmediately(Runnable command) { executor().maybeExecuteImmediately(command); }
+    public void execute(Runnable task) { executor().execute(task); }
+    public void execute(ExecutorLocals locals, Runnable task) { executor().execute(locals, task); }
+    public void maybeExecuteImmediately(Runnable task) { executor().maybeExecuteImmediately(task); }
     public <T> Future<T> submit(Callable<T> task) { return executor().submit(task); }
     public Future<?> submit(Runnable task) { return executor().submit(task); }
     public <T> Future<T> submit(Runnable task, T result) { return executor().submit(task, result); }
 
-    public LocalAwareExecutorService executor()
+    public ExecutorPlus executor()
     {
         if (executor == null)
         {
@@ -143,21 +137,21 @@
             {
                 if (executor == null)
                 {
-                    executor = initialiser.get();
+                    executor = executorSupplier.get();
                 }
             }
         }
         return executor;
     }
 
-    private static List<ExecutorService> executors()
+    private static List<ExecutorPlus> executors()
     {
         return Stream.of(Stage.values())
                      .map(Stage::executor)
                      .collect(Collectors.toList());
     }
 
-    private static List<ExecutorService> mutatingExecutors()
+    private static List<ExecutorPlus> mutatingExecutors()
     {
         return Stream.of(Stage.values())
                      .filter(stage -> stage.shutdownBeforeCommitlog)
@@ -175,57 +169,66 @@
 
     public static void shutdownAndAwaitMutatingExecutors(boolean interrupt, long timeout, TimeUnit units) throws InterruptedException, TimeoutException
     {
-        List<ExecutorService> executors = mutatingExecutors();
+        List<ExecutorPlus> executors = mutatingExecutors();
         ExecutorUtils.shutdown(interrupt, executors);
         ExecutorUtils.awaitTermination(timeout, units, executors);
     }
 
     public static boolean areMutationExecutorsTerminated()
     {
-        return mutatingExecutors().stream().allMatch(ExecutorService::isTerminated);
+        return mutatingExecutors().stream().allMatch(ExecutorPlus::isTerminated);
     }
 
     @VisibleForTesting
     public static void shutdownAndWait(long timeout, TimeUnit units) throws InterruptedException, TimeoutException
     {
-        List<ExecutorService> executors = executors();
+        List<ExecutorPlus> executors = executors();
         ExecutorUtils.shutdownNow(executors);
         ExecutorUtils.awaitTermination(timeout, units, executors);
     }
 
-    static LocalAwareExecutorService tracingExecutor(String jmxName, String jmxType, int numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize)
+    private static ExecutorPlus tracingStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize)
     {
-        RejectedExecutionHandler reh = (r, executor) -> MessagingService.instance().metrics.recordSelfDroppedMessage(Verb._TRACE);
-        return new TracingExecutor(1,
-                                   1,
-                                   KEEP_ALIVE_SECONDS,
-                                   TimeUnit.SECONDS,
-                                   new ArrayBlockingQueue<>(1000),
-                                   new NamedThreadFactory(jmxName),
-                                   reh);
+        return executorFactory()
+                .withJmx(jmxType)
+                .configureSequential(jmxName)
+                .withQueueLimit(1000)
+                .withRejectedExecutionHandler((r, executor) -> MessagingService.instance().metrics.recordSelfDroppedMessage(Verb._TRACE)).build();
     }
 
-    static LocalAwareExecutorService multiThreadedStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize)
+    private static ExecutorPlus migrationStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize)
     {
-        return new JMXEnabledThreadPoolExecutor(numThreads,
-                                                KEEP_ALIVE_SECONDS,
-                                                TimeUnit.SECONDS,
-                                                new LinkedBlockingQueue<>(),
-                                                new NamedThreadFactory(jmxName),
-                                                jmxType);
+        return executorFactory()
+               .localAware()
+               .withJmx(jmxType)
+               .sequential(jmxName);
     }
 
-    static LocalAwareExecutorService multiThreadedLowSignalStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize)
+    private static LocalAwareExecutorPlus singleThreadedStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize)
     {
-        return SharedExecutorPool.SHARED.newExecutor(numThreads, onSetMaximumPoolSize, jmxType, jmxName);
+        return executorFactory()
+                .localAware()
+                .withJmx(jmxType)
+                .sequential(jmxName);
     }
 
-    static LocalAwareExecutorService singleThreadedStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize)
+    static LocalAwareExecutorPlus multiThreadedStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize)
     {
-        return new JMXEnabledSingleThreadExecutor(jmxName, jmxType);
+        return executorFactory()
+                .localAware()
+                .withJmx(jmxType)
+                .pooled(jmxName, numThreads);
     }
 
-    static LocalAwareExecutorService immediateExecutor(String jmxName, String jmxType, int numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize)
+    static LocalAwareExecutorPlus multiThreadedLowSignalStage(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize)
+    {
+        return executorFactory()
+                .localAware()
+                .withJmx(jmxType)
+                .shared(jmxName, numThreads, onSetMaximumPoolSize);
+    }
+
+    static LocalAwareExecutorPlus immediateExecutor(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize)
     {
         return ImmediateExecutor.INSTANCE;
     }
@@ -233,7 +236,7 @@
     @FunctionalInterface
     public interface ExecutorServiceInitialiser
     {
-        public LocalAwareExecutorService init(String jmxName, String jmxType, int numThreads, LocalAwareExecutorService.MaximumPoolSizeListener onSetMaximumPoolSize);
+        public ExecutorPlus init(String jmxName, String jmxType, int numThreads, LocalAwareExecutorPlus.MaximumPoolSizeListener onSetMaximumPoolSize);
     }
 
     /**
@@ -267,38 +270,4 @@
     {
         executor().setMaximumPoolSize(newMaximumPoolSize);
     }
-
-    /**
-     * The executor used for tracing.
-     */
-    private static class TracingExecutor extends ThreadPoolExecutor implements LocalAwareExecutorService
-    {
-        TracingExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler)
-        {
-            super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
-        }
-
-        public void execute(Runnable command, ExecutorLocals locals)
-        {
-            assert locals == null;
-            super.execute(command);
-        }
-
-        public void maybeExecuteImmediately(Runnable command)
-        {
-            execute(command);
-        }
-
-        @Override
-        public int getActiveTaskCount()
-        {
-            return getActiveCount();
-        }
-
-        @Override
-        public int getPendingTaskCount()
-        {
-            return getQueue().size();
-        }
-    }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/SyncFutureTask.java b/src/java/org/apache/cassandra/concurrent/SyncFutureTask.java
new file mode 100644
index 0000000..422da99
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/SyncFutureTask.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.Callable;
+
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import org.apache.cassandra.utils.concurrent.SyncFuture;
+
+public class SyncFutureTask<T> extends SyncFuture<T> implements RunnableFuture<T>
+{
+    final Callable<T> call;
+
+    public SyncFutureTask(Callable<T> call)
+    {
+        this.call = call;
+    }
+
+    public SyncFutureTask(WithResources withResources, Callable<T> call)
+    {
+        this.call = new Callable<T>()
+        {
+            @Override
+            public T call() throws Exception
+            {
+                try (Closeable close = withResources.get())
+                {
+                    return call.call();
+                }
+            }
+
+            @Override
+            public String toString()
+            {
+                return call.toString();
+            }
+        };
+    }
+
+    public void run()
+    {
+        try
+        {
+            if (!setUncancellable())
+            {
+                if (isCancelled()) return;
+                else throw new IllegalStateException();
+            }
+
+            if (!trySuccess(call.call()))
+                throw new IllegalStateException();
+        }
+        catch (Throwable t)
+        {
+            tryFailure(t);
+            ExecutionFailure.handle(t);
+        }
+    }
+
+    @Override
+    public String description()
+    {
+        return call.toString();
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/TaskFactory.java b/src/java/org/apache/cassandra/concurrent/TaskFactory.java
new file mode 100644
index 0000000..56087d9
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/TaskFactory.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.Callable;
+
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+
+import static org.apache.cassandra.concurrent.FutureTask.callable;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A simple mechanism to impose our desired semantics on the execution of a task without requiring a specialised
+ * executor service. We wrap tasks in a suitable {@link FutureTask} or encapsulating {@link Runnable}.
+ *
+ * The encapsulations handle any exceptions in our standard way, as well as ensuring {@link ExecutorLocals} are
+ * propagated in the case of {@link #localAware()}
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface TaskFactory
+{
+    Runnable toExecute(Runnable runnable);
+    <T> RunnableFuture<T> toSubmit(Runnable runnable);
+    <T> RunnableFuture<T> toSubmit(Runnable runnable, T result);
+    <T> RunnableFuture<T> toSubmit(Callable<T> callable);
+
+    Runnable toExecute(WithResources withResources, Runnable runnable);
+    <T> RunnableFuture<T> toSubmit(WithResources withResources, Runnable runnable);
+    <T> RunnableFuture<T> toSubmit(WithResources withResources, Runnable runnable, T result);
+    <T> RunnableFuture<T> toSubmit(WithResources withResources, Callable<T> callable);
+
+    static TaskFactory standard() { return Standard.INSTANCE; }
+    static TaskFactory localAware() { return LocalAware.INSTANCE; }
+
+    public class Standard implements TaskFactory
+    {
+        static final Standard INSTANCE = new Standard();
+        protected Standard() {}
+
+        @Override
+        public Runnable toExecute(Runnable runnable)
+        {
+            return ExecutionFailure.suppressing(runnable);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(Runnable runnable)
+        {
+            return newTask(callable(runnable));
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(Runnable runnable, T result)
+        {
+            return newTask(callable(runnable, result));
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(Callable<T> callable)
+        {
+            return newTask(callable);
+        }
+
+        @Override
+        public Runnable toExecute(WithResources withResources, Runnable runnable)
+        {
+            return ExecutionFailure.suppressing(withResources, runnable);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(WithResources withResources, Runnable runnable)
+        {
+            return withResources.isNoOp() ? newTask(callable(runnable))
+                                          : newTask(withResources, callable(runnable));
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(WithResources withResources, Runnable runnable, T result)
+        {
+            return withResources.isNoOp() ? newTask(callable(runnable, result))
+                                          : newTask(withResources, callable(runnable, result));
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(WithResources withResources, Callable<T> callable)
+        {
+            return withResources.isNoOp() ? newTask(callable)
+                                          : newTask(withResources, callable);
+        }
+
+        protected <T> RunnableFuture<T> newTask(Callable<T> call)
+        {
+            return new FutureTask<>(call);
+        }
+
+        protected <T> RunnableFuture<T> newTask(WithResources withResources, Callable<T> call)
+        {
+            return new FutureTaskWithResources<>(withResources, call);
+        }
+    }
+
+    public class LocalAware extends Standard
+    {
+        static final LocalAware INSTANCE = new LocalAware();
+
+        protected LocalAware() {}
+
+        @Override
+        public Runnable toExecute(Runnable runnable)
+        {
+            // no reason to propagate exception when it is inaccessible to caller
+            return ExecutionFailure.suppressing(ExecutorLocals.propagate(), runnable);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(Runnable runnable)
+        {
+            return super.toSubmit(ExecutorLocals.propagate(), runnable);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(Runnable runnable, T result)
+        {
+            return super.toSubmit(ExecutorLocals.propagate(), runnable, result);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(Callable<T> callable)
+        {
+            return super.toSubmit(ExecutorLocals.propagate(), callable);
+        }
+
+        @Override
+        public Runnable toExecute(WithResources withResources, Runnable runnable)
+        {
+            return ExecutionFailure.suppressing(withLocals(withResources), runnable);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(WithResources withResources, Runnable runnable)
+        {
+            return super.toSubmit(withLocals(withResources), runnable);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(WithResources withResources, Runnable runnable, T result)
+        {
+            return super.toSubmit(withLocals(withResources), runnable, result);
+        }
+
+        @Override
+        public <T> RunnableFuture<T> toSubmit(WithResources withResources, Callable<T> callable)
+        {
+            return super.toSubmit(withLocals(withResources), callable);
+        }
+
+        private static WithResources withLocals(WithResources withResources)
+        {
+            return withResources instanceof ExecutorLocals ? withResources : ExecutorLocals.propagate().and(withResources);
+        }
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorBase.java b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorBase.java
new file mode 100644
index 0000000..b90485a
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorBase.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import java.util.List;
+import java.util.concurrent.*;
+
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+/**
+ * This class incorporates some Executor best practices for Cassandra.  Most of the executors in the system
+ * should use or extend {@link ThreadPoolExecutorPlus}, or in rare exceptions this class.
+ *
+ * This class provides some very basic improvements:
+ * <li>We are configured by {@link ThreadPoolExecutorBuilder}
+ * <li>Tasks rejected due to overflow of the queue block the submitting thread rather than throwing {@link RejectedExecutionException}
+ * <li>{@link RunnableFuture} rejected due to executor shutdown will be cancelled
+ * <li>{@link RunnableFuture} removed by {@link #shutdownNow()} will be cancelled
+ *
+ * We also provide a shutdown hook for JMX registration cleanup.
+ */
+public class ThreadPoolExecutorBase extends ThreadPoolExecutor implements ResizableThreadPool
+{
+    public static final RejectedExecutionHandler blockingExecutionHandler = (task, executor) ->
+    {
+        BlockingQueue<Runnable> queue = executor.getQueue();
+        try
+        {
+            while (true)
+            {
+                try
+                {
+                    if (executor.isShutdown())
+                        throw new RejectedExecutionException(executor + " has shut down");
+
+                    if (queue.offer(task, 1, TimeUnit.SECONDS))
+                        break;
+                }
+                catch (InterruptedException e)
+                {
+                    throw new UncheckedInterruptedException(e);
+                }
+            }
+        }
+        catch (Throwable t)
+        {
+            //Give some notification to the caller the task isn't going to run
+            if (task instanceof java.util.concurrent.Future)
+                ((java.util.concurrent.Future<?>) task).cancel(false);
+            throw t;
+        }
+    };
+
+    private Runnable onShutdown;
+
+    // maximumPoolSize is only used when corePoolSize == 0
+    // if keepAliveTime < 0 and unit == null, we forbid core thread timeouts (e.g. single threaded executors by default)
+    public ThreadPoolExecutorBase(ThreadPoolExecutorBuilder<?> builder)
+    {
+        super(builder.coreThreads(), builder.maxThreads(), builder.keepAlive(), builder.keepAliveUnits(), builder.newQueue(), builder.newThreadFactory());
+        allowCoreThreadTimeOut(builder.allowCoreThreadTimeouts());
+
+        // block task submissions until queue has room.
+        // this is fighting TPE's design a bit because TPE rejects if queue.offer reports a full queue.
+        // we'll just override this with a handler that retries until it gets in.  ugly, but effective.
+        // (there is an extensive analysis of the options here at
+        //  http://today.java.net/pub/a/today/2008/10/23/creating-a-notifying-blocking-thread-pool-executor.html)
+        setRejectedExecutionHandler(builder.rejectedExecutionHandler(blockingExecutionHandler));
+    }
+
+    // no RejectedExecutionHandler
+    public ThreadPoolExecutorBase(int threads, int keepAlive, TimeUnit keepAliveUnits, BlockingQueue<Runnable> queue, NamedThreadFactory threadFactory)
+    {
+        super(threads, threads, keepAlive, keepAliveUnits, queue, threadFactory);
+        assert queue.isEmpty() : "Executor initialized with non-empty task queue";
+        allowCoreThreadTimeOut(true);
+    }
+
+    public void onShutdown(Runnable onShutdown)
+    {
+        this.onShutdown = onShutdown;
+    }
+
+    public Runnable onShutdown()
+    {
+        return onShutdown;
+    }
+
+    @Override
+    protected void terminated()
+    {
+        getThreadFactory().close();
+    }
+
+    @Override
+    public void shutdown()
+    {
+        try
+        {
+            super.shutdown();
+        }
+        finally
+        {
+            if (onShutdown != null)
+                onShutdown.run();
+        }
+    }
+
+    @Override
+    public List<Runnable> shutdownNow()
+    {
+        try
+        {
+            List<Runnable> cancelled = super.shutdownNow();
+            for (Runnable c : cancelled)
+            {
+                if (c instanceof java.util.concurrent.Future<?>)
+                    ((java.util.concurrent.Future<?>) c).cancel(true);
+            }
+            return cancelled;
+        }
+        finally
+        {
+            if (onShutdown != null)
+                onShutdown.run();
+        }
+    }
+    
+    @Override
+    public int getActiveTaskCount()
+    {
+        return getActiveCount();
+    }
+
+    @Override
+    public int getPendingTaskCount()
+    {
+        return getQueue().size();
+    }
+
+    public int getCoreThreads()
+    {
+        return getCorePoolSize();
+    }
+
+    public void setCoreThreads(int number)
+    {
+        setCorePoolSize(number);
+    }
+
+    public int getMaximumThreads()
+    {
+        return getMaximumPoolSize();
+    }
+
+    public void setMaximumThreads(int number)
+    {
+        setMaximumPoolSize(number);
+    }
+
+    @Override
+    public NamedThreadFactory getThreadFactory()
+    {
+        return (NamedThreadFactory) super.getThreadFactory();
+    }
+
+    public String toString()
+    {
+        return getThreadFactory().id;
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorBuilder.java b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorBuilder.java
new file mode 100644
index 0000000..5e578a5
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorBuilder.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+import org.apache.cassandra.concurrent.NamedThreadFactory.MetaFactory;
+
+import static java.lang.Thread.NORM_PRIORITY;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
+
+/**
+ * Configure a {@link ThreadPoolExecutorPlus}, applying Cassandra's best practices by default
+ * <li>Core threads may timeout, and use a default {@link #keepAlive} time in {@link #keepAliveUnits}
+ * <li>Threads share the same {@link ThreadGroup}, which may be configurably a child of a specified {@link ThreadGroup}
+ *     descended from the same parent of the {@link MetaFactory}
+ * <li>By default queues are unbounded in length
+ * <li>The default {@link RejectedExecutionHandler} is implementation dependent, but may be overridden
+ * <li>The default {@link UncaughtExceptionHandler} is inherited from {@link MetaFactory}, which in turn receives it
+ *     from the {@link ExecutorBuilderFactory}
+ */
+public class ThreadPoolExecutorBuilder<E extends ExecutorPlus> extends MetaFactory implements ExecutorBuilder<E>
+{
+    static <E extends SequentialExecutorPlus> ExecutorBuilder<E> sequential(Function<ThreadPoolExecutorBuilder<E>, E> constructor, ClassLoader contextClassLoader, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler, String name)
+    {
+        ThreadPoolExecutorBuilder<E> result = new ThreadPoolExecutorBuilder<>(constructor, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, 1);
+        result.withKeepAlive();
+        return result;
+    }
+
+    static <E extends SingleThreadExecutorPlus> ExecutorBuilder<E> sequentialJmx(Function<ThreadPoolExecutorBuilder<E>, E> constructor, ClassLoader contextClassLoader, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler, String name, String jmxPath)
+    {
+        return new ThreadPoolExecutorJMXAdapter.Builder<>(sequential(constructor, contextClassLoader, threadGroup, uncaughtExceptionHandler, name), jmxPath);
+    }
+
+    static <E extends ExecutorPlus> ExecutorBuilder<E> pooled(Function<ThreadPoolExecutorBuilder<E>, E> constructor, ClassLoader contextClassLoader, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler, String name, int threads)
+    {
+        return new ThreadPoolExecutorBuilder<>(constructor, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, threads);
+    }
+
+    static <E extends ThreadPoolExecutorPlus> ExecutorBuilder<E> pooledJmx(Function<ThreadPoolExecutorBuilder<E>, E> constructor, ClassLoader contextClassLoader, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler, String name, int threads, String jmxPath)
+    {
+        return new ThreadPoolExecutorJMXAdapter.Builder<>(pooled(constructor, contextClassLoader, threadGroup, uncaughtExceptionHandler, name, threads), jmxPath);
+    }
+
+    private final Function<ThreadPoolExecutorBuilder<E>, E> constructor;
+    private final String name;
+    private final int threads;
+    private int threadPriority = NORM_PRIORITY;
+    private Integer queueLimit;
+
+    private long keepAlive = 1;
+    private TimeUnit keepAliveUnits = MINUTES;
+    private boolean allowCoreThreadTimeouts = true;
+
+    private RejectedExecutionHandler rejectedExecutionHandler = null;
+
+    protected ThreadPoolExecutorBuilder(Function<ThreadPoolExecutorBuilder<E>, E> constructor, ClassLoader contextClassLoader, ThreadGroup overrideThreadGroup, UncaughtExceptionHandler uncaughtExceptionHandler, String name, int threads)
+    {
+        super(contextClassLoader, overrideThreadGroup, uncaughtExceptionHandler);
+        this.constructor = constructor;
+        this.name = name;
+        this.threads = threads;
+    }
+
+    // core and non-core threads will die after this period of inactivity
+    public ThreadPoolExecutorBuilder<E> withKeepAlive(long keepAlive, TimeUnit keepAliveUnits)
+    {
+        this.allowCoreThreadTimeouts = true;
+        this.keepAlive = keepAlive;
+        this.keepAliveUnits = keepAliveUnits;
+        return this;
+    }
+
+    // once started, core threads will never die
+    public ThreadPoolExecutorBuilder<E> withKeepAlive()
+    {
+        this.allowCoreThreadTimeouts = false;
+        return this;
+    }
+
+    public ThreadPoolExecutorBuilder<E> withThreadPriority(int threadPriority)
+    {
+        this.threadPriority = threadPriority;
+        return this;
+    }
+
+    @Override
+    public ExecutorBuilder<E> withThreadGroup(ThreadGroup threadGroup)
+    {
+        ThreadGroup current = this.threadGroup;
+
+        ThreadGroup parent = threadGroup;
+        while (parent != null && parent != current)
+            parent = parent.getParent();
+        if (parent != current)
+            throw new IllegalArgumentException("threadGroup may only be overridden with a child of the default threadGroup");
+
+        this.threadGroup = threadGroup;
+        return this;
+    }
+
+    @Override
+    public ExecutorBuilder<E> withDefaultThreadGroup()
+    {
+        this.threadGroup = null;
+        return this;
+    }
+
+    public ThreadPoolExecutorBuilder<E> withQueueLimit(int queueLimit)
+    {
+        this.queueLimit = queueLimit;
+        return this;
+    }
+
+    public ThreadPoolExecutorBuilder<E> withRejectedExecutionHandler(RejectedExecutionHandler rejectedExecutionHandler)
+    {
+        this.rejectedExecutionHandler = rejectedExecutionHandler;
+        return this;
+    }
+
+    public ThreadPoolExecutorBuilder<E> withUncaughtExceptionHandler(UncaughtExceptionHandler uncaughtExceptionHandler)
+    {
+        this.uncaughtExceptionHandler = uncaughtExceptionHandler;
+        return this;
+    }
+
+    @Override
+    public E build()
+    {
+        return constructor.apply(this);
+    }
+
+    NamedThreadFactory newThreadFactory()
+    {
+        return newThreadFactory(name, threadPriority);
+    }
+
+    BlockingQueue<Runnable> newQueue()
+    {
+        // if our pool can have an infinite number of threads, there is no point having an infinite queue length
+        int size = queueLimit != null
+                ? queueLimit
+                : threads == Integer.MAX_VALUE
+                    ? 0 : Integer.MAX_VALUE;
+        return newBlockingQueue(size);
+    }
+
+    /**
+     * If our queue blocks on/rejects all submissions, we can configure our core pool size to 0,
+     * as new threads will always be created for new work, and core threads timeout at the same
+     * rate as non-core threads.
+     */
+    int coreThreads()
+    {
+        return (queueLimit != null && queueLimit == 0) || threads == Integer.MAX_VALUE ? 0 : threads;
+    }
+
+    int maxThreads()
+    {
+        return threads;
+    }
+
+    RejectedExecutionHandler rejectedExecutionHandler(RejectedExecutionHandler ifNotSet)
+    {
+        return rejectedExecutionHandler == null ? ifNotSet : rejectedExecutionHandler;
+    }
+
+    long keepAlive()
+    {
+        return keepAlive;
+    }
+
+    TimeUnit keepAliveUnits()
+    {
+        return keepAliveUnits;
+    }
+
+    boolean allowCoreThreadTimeouts()
+    {
+        return allowCoreThreadTimeouts;
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorJMXAdapter.java b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorJMXAdapter.java
new file mode 100644
index 0000000..c596d2b
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorJMXAdapter.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.metrics.ThreadPoolMetrics;
+import org.apache.cassandra.utils.MBeanWrapper;
+
+/**
+ * A {@link ThreadPoolExecutorBase} adapter to expose it via JMX.
+ * The executor is not itself modified to maximise code re-use.
+ * Only its rejected execution handler is updated, and a shutdown listener is registered.
+ */
+@VisibleForTesting
+public class ThreadPoolExecutorJMXAdapter implements Runnable, ResizableThreadPoolMXBean
+{
+    /**
+     * A builder wrapper that delegates all methods except {@link Builder#build()}
+     * @param <E>
+     */
+    static class Builder<E extends ThreadPoolExecutorBase> implements ExecutorBuilder<E>
+    {
+        final ExecutorBuilder<E> wrapped;
+        final String jmxPath;
+        Builder(ExecutorBuilder<E> wrapped, String jmxPath)
+        {
+            this.wrapped = wrapped;
+            this.jmxPath = jmxPath;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withKeepAlive(long keepAlive, TimeUnit keepAliveUnits)
+        {
+            wrapped.withKeepAlive(keepAlive, keepAliveUnits);
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withKeepAlive()
+        {
+            wrapped.withKeepAlive();
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withThreadPriority(int threadPriority)
+        {
+            wrapped.withThreadPriority(threadPriority);
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withQueueLimit(int queueLimit)
+        {
+            wrapped.withQueueLimit(queueLimit);
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withThreadGroup(ThreadGroup threadGroup)
+        {
+            wrapped.withThreadGroup(threadGroup);
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withDefaultThreadGroup()
+        {
+            wrapped.withDefaultThreadGroup();
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withRejectedExecutionHandler(RejectedExecutionHandler rejectedExecutionHandler)
+        {
+            wrapped.withRejectedExecutionHandler(rejectedExecutionHandler);
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withUncaughtExceptionHandler(Thread.UncaughtExceptionHandler uncaughtExceptionHandler)
+        {
+            wrapped.withUncaughtExceptionHandler(uncaughtExceptionHandler);
+            return this;
+        }
+
+        /**
+         * Invoke {@link ExecutorBuilder#build()} on {@link #wrapped}, and register the resultant
+         * {@link ThreadPoolExecutorBase} with a new {@link ThreadPoolExecutorJMXAdapter}.
+         *
+         * The executor constructed by {@link #wrapped} is returned.
+         */
+        @Override
+        public E build()
+        {
+            E result = wrapped.build();
+            register(jmxPath, result);
+            return result;
+        }
+    }
+
+    public static void register(String jmxPath, ThreadPoolExecutorBase executor)
+    {
+        new ThreadPoolExecutorJMXAdapter(jmxPath, executor);
+    }
+
+    final String mbeanName;
+    final ThreadPoolExecutorBase executor;
+    final ThreadPoolMetrics metrics;
+    boolean released;
+
+    private ThreadPoolExecutorJMXAdapter(String jmxPath, ThreadPoolExecutorBase executor)
+    {
+        this.executor = executor;
+        this.mbeanName = "org.apache.cassandra." + jmxPath + ":type=" + executor.getThreadFactory().id;
+        this.metrics = new ThreadPoolMetrics(executor, jmxPath, executor.getThreadFactory().id).register();
+        executor.setRejectedExecutionHandler(rejectedExecutionHandler(metrics, executor.getRejectedExecutionHandler()));
+        MBeanWrapper.instance.registerMBean(this, mbeanName);
+        executor.onShutdown(this);
+    }
+
+    @Override
+    public synchronized void run()
+    {
+        if (released)
+            return;
+
+        MBeanWrapper.instance.unregisterMBean(mbeanName);
+        metrics.release();
+        released = true;
+    }
+
+    public ThreadPoolMetrics metrics()
+    {
+        return metrics;
+    }
+
+    @Override
+    public int getActiveTaskCount()
+    {
+        return executor.getActiveTaskCount();
+    }
+
+    @Override
+    public int getPendingTaskCount()
+    {
+        return executor.getPendingTaskCount();
+    }
+
+    @Override
+    public int getCoreThreads()
+    {
+        return executor.getCoreThreads();
+    }
+
+    @Override
+    public void setCoreThreads(int number)
+    {
+        executor.setCoreThreads(number);
+    }
+
+    @Override
+    public int getMaximumThreads()
+    {
+        return executor.getMaximumThreads();
+    }
+
+    @Override
+    public void setMaximumThreads(int number)
+    {
+        executor.setMaximumThreads(number);
+    }
+
+    @Override
+    public void setCorePoolSize(int corePoolSize)
+    {
+        executor.setCorePoolSize(corePoolSize);
+    }
+
+    @Override
+    public int getCorePoolSize()
+    {
+        return executor.getCorePoolSize();
+    }
+
+    @Override
+    public void setMaximumPoolSize(int maximumPoolSize)
+    {
+        executor.setMaximumPoolSize(maximumPoolSize);
+    }
+
+    @Override
+    public int getMaximumPoolSize()
+    {
+        return executor.getMaximumPoolSize();
+    }
+
+    @Override
+    public long getCompletedTaskCount()
+    {
+        return executor.getCompletedTaskCount();
+    }
+
+    @Override
+    public int getMaxTasksQueued()
+    {
+        return executor.getMaxTasksQueued();
+    }
+
+    static RejectedExecutionHandler rejectedExecutionHandler(ThreadPoolMetrics metrics, RejectedExecutionHandler wrap)
+    {
+        return (task, executor) ->
+        {
+            metrics.totalBlocked.inc();
+            metrics.currentBlocked.inc();
+            try
+            {
+                wrap.rejectedExecution(task, executor);
+            }
+            finally
+            {
+                metrics.currentBlocked.dec();
+            }
+        };
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorPlus.java
new file mode 100644
index 0000000..ad735d9
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/ThreadPoolExecutorPlus.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.RunnableFuture;
+
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+
+/**
+ * This class inherits Executor best practices from {@link ThreadPoolExecutorBase}
+ * and {@link ThreadPoolExecutorBuilder}. Most Cassandra executors should use or extend this.
+ *
+ * This class' addition is to abstract the semantics of task encapsulation to handle
+ * exceptions and {@link ExecutorLocals}. See {@link TaskFactory} for more detail.
+ */
+public class ThreadPoolExecutorPlus extends ThreadPoolExecutorBase implements ExecutorPlus
+{
+    final TaskFactory taskFactory;
+
+    ThreadPoolExecutorPlus(ThreadPoolExecutorBuilder<? extends ThreadPoolExecutorPlus> builder)
+    {
+        this(builder, TaskFactory.standard());
+    }
+
+    ThreadPoolExecutorPlus(ThreadPoolExecutorBuilder<? extends ThreadPoolExecutorPlus> builder, TaskFactory taskFactory)
+    {
+        super(builder);
+        this.taskFactory = taskFactory;
+    }
+
+    private <T extends Runnable> T addTask(T task)
+    {
+        super.execute(task);
+        return task;
+    }
+
+    @Override
+    public void execute(Runnable run)
+    {
+        addTask(taskFactory.toExecute(run));
+    }
+
+    @Override
+    public void execute(WithResources withResources, Runnable run)
+    {
+        addTask(taskFactory.toExecute(withResources, run));
+    }
+
+    @Override
+    public Future<?> submit(Runnable run)
+    {
+        return addTask(taskFactory.toSubmit(run));
+    }
+
+    @Override
+    public <T> Future<T> submit(Runnable run, T result)
+    {
+        return addTask(taskFactory.toSubmit(run, result));
+    }
+
+    @Override
+    public <T> Future<T> submit(Callable<T> call)
+    {
+        return addTask(taskFactory.toSubmit(call));
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Runnable run, T result)
+    {
+        return addTask(taskFactory.toSubmit(withResources, run, result));
+    }
+
+    @Override
+    public Future<?> submit(WithResources withResources, Runnable run)
+    {
+        return addTask(taskFactory.toSubmit(withResources, run));
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Callable<T> call)
+    {
+        return addTask(taskFactory.toSubmit(withResources, call));
+    }
+
+    @Override
+    public boolean inExecutor()
+    {
+        return Thread.currentThread().getThreadGroup() == getThreadFactory().threadGroup;
+    }
+
+    @Override
+    protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value)
+    {
+        return taskFactory.toSubmit(runnable, value);
+    }
+
+    @Override
+    protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable)
+    {
+        return taskFactory.toSubmit(callable);
+    }
+
+    @Override
+    public int getMaxTasksQueued()
+    {
+        return getQueue().size();
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/WrappedExecutorPlus.java b/src/java/org/apache/cassandra/concurrent/WrappedExecutorPlus.java
new file mode 100644
index 0000000..7408c33
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/WrappedExecutorPlus.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+
+public class WrappedExecutorPlus implements ExecutorPlus
+{
+    protected final ExecutorPlus executor;
+
+    public WrappedExecutorPlus(ExecutorPlus executor)
+    {
+        this.executor = executor;
+    }
+
+    public void maybeExecuteImmediately(Runnable task)
+    {
+        executor.maybeExecuteImmediately(task);
+    }
+
+    public void execute(WithResources withResources, Runnable task)
+    {
+        executor.execute(withResources, task);
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Callable<T> task)
+    {
+        return executor.submit(withResources, task);
+    }
+
+    @Override
+    public <T> Future<T> submit(WithResources withResources, Runnable task, T result)
+    {
+        return executor.submit(withResources, task, result);
+    }
+
+    @Override
+    public Future<?> submit(WithResources withResources, Runnable task)
+    {
+        return executor.submit(withResources, task);
+    }
+
+    @Override
+    public boolean inExecutor()
+    {
+        return executor.inExecutor();
+    }
+
+    public <T> Future<T> submit(Callable<T> task)
+    {
+        return executor.submit(task);
+    }
+
+    public <T> Future<T> submit(Runnable task, T result)
+    {
+        return executor.submit(task, result);
+    }
+
+    public Future<?> submit(Runnable task)
+    {
+        return executor.submit(task);
+    }
+
+    public int getActiveTaskCount()
+    {
+        return executor.getActiveTaskCount();
+    }
+
+    public long getCompletedTaskCount()
+    {
+        return executor.getCompletedTaskCount();
+    }
+
+    public int getPendingTaskCount()
+    {
+        return executor.getPendingTaskCount();
+    }
+
+    public int getMaxTasksQueued()
+    {
+        return executor.getMaxTasksQueued();
+    }
+
+    public int getCorePoolSize()
+    {
+        return executor.getCorePoolSize();
+    }
+
+    public void setCorePoolSize(int newCorePoolSize)
+    {
+        executor.setCorePoolSize(newCorePoolSize);
+    }
+
+    public int getMaximumPoolSize()
+    {
+        return executor.getMaximumPoolSize();
+    }
+
+    public void setMaximumPoolSize(int newMaximumPoolSize)
+    {
+        executor.setMaximumPoolSize(newMaximumPoolSize);
+    }
+
+    public <T> List<java.util.concurrent.Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException
+    {
+        return executor.invokeAll(tasks);
+    }
+
+    public <T> List<java.util.concurrent.Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException
+    {
+        return executor.invokeAll(tasks, timeout, unit);
+    }
+
+    public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException
+    {
+        return executor.invokeAny(tasks);
+    }
+
+    public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
+    {
+        return executor.invokeAny(tasks, timeout, unit);
+    }
+
+    public void shutdown()
+    {
+        executor.shutdown();
+    }
+
+    public List<Runnable> shutdownNow()
+    {
+        return executor.shutdownNow();
+    }
+
+    public boolean isShutdown()
+    {
+        return executor.isShutdown();
+    }
+
+    public boolean isTerminated()
+    {
+        return executor.isTerminated();
+    }
+
+    public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException
+    {
+        return executor.awaitTermination(timeout, unit);
+    }
+
+    public void execute(Runnable task)
+    {
+        executor.execute(task);
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java
index 67de29b..3e45ebc 100644
--- a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java
+++ b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java
@@ -21,6 +21,7 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.service.FileSystemOwnershipCheck;
 
 /** A class that extracts system properties for the cassandra node it runs within. */
 public enum CassandraRelevantProperties
@@ -95,8 +96,9 @@
      */
     COM_SUN_MANAGEMENT_JMXREMOTE_RMI_PORT ("com.sun.management.jmxremote.rmi.port", "0"),
 
-    /** Cassandra jmx remote port */
+    /** Cassandra jmx remote and local port */
     CASSANDRA_JMX_REMOTE_PORT("cassandra.jmx.remote.port"),
+    CASSANDRA_JMX_LOCAL_PORT("cassandra.jmx.local.port"),
 
     /** This property  indicates whether SSL is enabled for monitoring remotely. Default is set to false. */
     COM_SUN_MANAGEMENT_JMXREMOTE_SSL ("com.sun.management.jmxremote.ssl"),
@@ -138,6 +140,8 @@
     /** mx4jport */
     MX4JPORT ("mx4jport"),
 
+    RING_DELAY("cassandra.ring_delay_ms"),
+
     /**
      * When bootstraping we wait for all schema versions found in gossip to be seen, and if not seen in time we fail
      * the bootstrap; this property will avoid failing and allow bootstrap to continue if set to true.
@@ -159,10 +163,14 @@
      */
     GOSSIPER_QUARANTINE_DELAY("cassandra.gossip_quarantine_delay_ms"),
 
+    GOSSIPER_SKIP_WAITING_TO_SETTLE("cassandra.skip_wait_for_gossip_to_settle", "-1"),
+
     IGNORED_SCHEMA_CHECK_VERSIONS("cassandra.skip_schema_check_for_versions"),
 
     IGNORED_SCHEMA_CHECK_ENDPOINTS("cassandra.skip_schema_check_for_endpoints"),
 
+    SHUTDOWN_ANNOUNCE_DELAY_IN_MS("cassandra.shutdown_announce_in_ms", "2000"),
+
     /**
      * When doing a host replacement its possible that the gossip state is "empty" meaning that the endpoint is known
      * but the current state isn't known.  If the host replacement is needed to repair this state, this property must
@@ -171,6 +179,22 @@
     REPLACEMENT_ALLOW_EMPTY("cassandra.allow_empty_replace_address", "true"),
 
     /**
+     * Directory where Cassandra puts its logs, defaults to "." which is current directory.
+     */
+    LOG_DIR("cassandra.logdir", "."),
+
+    /**
+     * Directory where Cassandra persists logs from audit logging. If this property is not set, the audit log framework
+     * will set it automatically to {@link CassandraRelevantProperties#LOG_DIR} + "/audit".
+     */
+    LOG_DIR_AUDIT("cassandra.logdir.audit"),
+
+    CONSISTENT_DIRECTORY_LISTINGS("cassandra.consistent_directory_listings", "false"),
+    CLOCK_GLOBAL("cassandra.clock", null),
+    CLOCK_MONOTONIC_APPROX("cassandra.monotonic_clock.approx", null),
+    CLOCK_MONOTONIC_PRECISE("cassandra.monotonic_clock.precise", null),
+
+    /*
      * Whether {@link org.apache.cassandra.db.ConsistencyLevel#NODE_LOCAL} should be allowed.
      */
     ENABLE_NODELOCAL_QUERIES("cassandra.enable_nodelocal_queries", "false"),
@@ -186,25 +210,93 @@
 
     DEFAULT_PROVIDE_OVERLAPPING_TOMBSTONES ("default.provide.overlapping.tombstones"),
     ORG_APACHE_CASSANDRA_DISABLE_MBEAN_REGISTRATION ("org.apache.cassandra.disable_mbean_registration"),
-    //only for testing
-    ORG_APACHE_CASSANDRA_CONF_CASSANDRA_RELEVANT_PROPERTIES_TEST("org.apache.cassandra.conf.CassandraRelevantPropertiesTest"),
-    ORG_APACHE_CASSANDRA_DB_VIRTUAL_SYSTEM_PROPERTIES_TABLE_TEST("org.apache.cassandra.db.virtual.SystemPropertiesTableTest"),
 
     /** This property indicates whether disable_mbean_registration is true */
     IS_DISABLED_MBEAN_REGISTRATION("org.apache.cassandra.disable_mbean_registration"),
 
+    /** snapshots ttl cleanup period in seconds */
+    SNAPSHOT_CLEANUP_PERIOD_SECONDS("cassandra.snapshot.ttl_cleanup_period_seconds", "60"),
+
+    /** snapshots ttl cleanup initial delay in seconds */
+    SNAPSHOT_CLEANUP_INITIAL_DELAY_SECONDS("cassandra.snapshot.ttl_cleanup_initial_delay_seconds", "5"),
+
+    /** minimum allowed TTL for snapshots */
+    SNAPSHOT_MIN_ALLOWED_TTL_SECONDS("cassandra.snapshot.min_allowed_ttl_seconds", "60"),
+
     /** what class to use for mbean registeration */
     MBEAN_REGISTRATION_CLASS("org.apache.cassandra.mbean_registration_class"),
 
+    BATCH_COMMIT_LOG_SYNC_INTERVAL("cassandra.batch_commitlog_sync_interval_millis", "1000"),
+
+    SYSTEM_AUTH_DEFAULT_RF("cassandra.system_auth.default_rf", "1"),
+    SYSTEM_TRACES_DEFAULT_RF("cassandra.system_traces.default_rf", "2"),
+    SYSTEM_DISTRIBUTED_DEFAULT_RF("cassandra.system_distributed.default_rf", "3"),
+
+    MEMTABLE_OVERHEAD_SIZE("cassandra.memtable.row_overhead_size", "-1"),
+    MEMTABLE_OVERHEAD_COMPUTE_STEPS("cassandra.memtable_row_overhead_computation_step", "100000"),
     MIGRATION_DELAY("cassandra.migration_delay_ms", "60000"),
     /** Defines how often schema definitions are pulled from the other nodes */
     SCHEMA_PULL_INTERVAL_MS("cassandra.schema_pull_interval_ms", "60000"),
-    /**
-     * Minimum delay after a failed pull request before it is reattempted. It prevents reattempting failed requests
-     * immediately as it is high chance they will fail anyway. It is better to wait a bit instead of flooding logs
-     * and wasting resources.
-     */
-    SCHEMA_PULL_BACKOFF_DELAY_MS("cassandra.schema_pull_backoff_delay_ms", "3000"),
+
+    PAXOS_REPAIR_RETRY_TIMEOUT_IN_MS("cassandra.paxos_repair_retry_timeout_millis", "60000"),
+
+    /** If we should allow having duplicate keys in the config file, default to true for legacy reasons */
+    ALLOW_DUPLICATE_CONFIG_KEYS("cassandra.allow_duplicate_config_keys", "true"),
+    /** If we should allow having both new (post CASSANDRA-15234) and old config keys for the same config item in the yaml */
+    ALLOW_NEW_OLD_CONFIG_KEYS("cassandra.allow_new_old_config_keys", "false"),
+
+    // startup checks properties
+    LIBJEMALLOC("cassandra.libjemalloc"),
+    @Deprecated // should be removed in favor of enable flag of relevant startup check (checkDatacenter)
+    IGNORE_DC("cassandra.ignore_dc"),
+    @Deprecated // should be removed in favor of enable flag of relevant startup check (checkRack)
+    IGNORE_RACK("cassandra.ignore_rack"),
+    @Deprecated // should be removed in favor of enable flag of relevant startup check (FileSystemOwnershipCheck)
+    FILE_SYSTEM_CHECK_ENABLE("cassandra.enable_fs_ownership_check"),
+    @Deprecated // should be removed in favor of flags in relevant startup check (FileSystemOwnershipCheck)
+    FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME("cassandra.fs_ownership_filename", FileSystemOwnershipCheck.DEFAULT_FS_OWNERSHIP_FILENAME),
+    @Deprecated // should be removed in favor of flags in relevant startup check (FileSystemOwnershipCheck)
+    FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN(FileSystemOwnershipCheck.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN),
+    // default heartbeating period is 1 minute
+    CHECK_DATA_RESURRECTION_HEARTBEAT_PERIOD("check_data_resurrection_heartbeat_period_milli", "60000"),
+
+    // defaults to false for 4.1 but plan to switch to true in a later release
+    // the thinking is that environments may not work right off the bat so safer to add this feature disabled by default
+    CONFIG_ALLOW_SYSTEM_PROPERTIES("cassandra.config.allow_system_properties", "false"),
+
+    // properties for debugging simulator ASM output
+    TEST_SIMULATOR_PRINT_ASM("cassandra.test.simulator.print_asm", "none"),
+    TEST_SIMULATOR_PRINT_ASM_TYPES("cassandra.test.simulator.print_asm_types", ""),
+    TEST_SIMULATOR_LIVENESS_CHECK("cassandra.test.simulator.livenesscheck", "true"),
+    TEST_SIMULATOR_DEBUG("cassandra.test.simulator.debug", "false"),
+    TEST_SIMULATOR_DETERMINISM_CHECK("cassandra.test.simulator.determinismcheck", "none"),
+    TEST_JVM_DTEST_DISABLE_SSL("cassandra.test.disable_ssl", "false"),
+
+    // determinism properties for testing
+    DETERMINISM_SSTABLE_COMPRESSION_DEFAULT("cassandra.sstable_compression_default", "true"),
+    DETERMINISM_CONSISTENT_DIRECTORY_LISTINGS("cassandra.consistent_directory_listings", "false"),
+    DETERMINISM_UNSAFE_UUID_NODE("cassandra.unsafe.deterministicuuidnode", "false"),
+    FAILURE_LOGGING_INTERVAL_SECONDS("cassandra.request_failure_log_interval_seconds", "60"),
+
+    // properties to disable certain behaviours for testing
+    DISABLE_GOSSIP_ENDPOINT_REMOVAL("cassandra.gossip.disable_endpoint_removal"),
+    IGNORE_MISSING_NATIVE_FILE_HINTS("cassandra.require_native_file_hints", "false"),
+    DISABLE_SSTABLE_ACTIVITY_TRACKING("cassandra.sstable_activity_tracking", "true"),
+    TEST_IGNORE_SIGAR("cassandra.test.ignore_sigar", "false"),
+    PAXOS_EXECUTE_ON_SELF("cassandra.paxos.use_self_execution", "true"),
+
+    /** property for the rate of the scheduled task that monitors disk usage */
+    DISK_USAGE_MONITOR_INTERVAL_MS("cassandra.disk_usage.monitor_interval_ms", Long.toString(TimeUnit.SECONDS.toMillis(30))),
+
+    /** property for the interval on which the repeated client warnings and diagnostic events about disk usage are ignored */
+    DISK_USAGE_NOTIFY_INTERVAL_MS("cassandra.disk_usage.notify_interval_ms", Long.toString(TimeUnit.MINUTES.toMillis(30))),
+
+    // for specific tests
+    ORG_APACHE_CASSANDRA_CONF_CASSANDRA_RELEVANT_PROPERTIES_TEST("org.apache.cassandra.conf.CassandraRelevantPropertiesTest"),
+    ORG_APACHE_CASSANDRA_DB_VIRTUAL_SYSTEM_PROPERTIES_TABLE_TEST("org.apache.cassandra.db.virtual.SystemPropertiesTableTest"),
+
+    /** Used when running in Client mode and the system and schema keyspaces need to be initialized outside of their normal initialization path **/
+    FORCE_LOAD_LOCAL_KEYSPACES("cassandra.schema.force_load_local_keyspaces"),
 
     /** When enabled, recursive directory deletion will be executed using a unix command `rm -rf` instead of traversing
      * and removing individual files. This is now used only tests, but eventually we will make it true by default.*/
@@ -255,6 +347,16 @@
     }
 
     /**
+     * Returns default value.
+     *
+     * @return default value, if any, otherwise null.
+     */
+    public String getDefaultValue()
+    {
+        return defaultVal;
+    }
+
+    /**
      * Sets the property to its default value if a default value was specified. Remove the property otherwise.
      */
     public void reset()
@@ -278,6 +380,15 @@
         return STRING_CONVERTER.convert(value);
     }
 
+    public <T> T convert(PropertyConverter<T> converter)
+    {
+        String value = System.getProperty(key);
+        if (value == null)
+            value = defaultVal;
+
+        return converter.convert(value);
+    }
+
     /**
      * Sets the value into system properties.
      * @param value to set
@@ -333,6 +444,17 @@
 
     /**
      * Gets the value of a system property as a int.
+     * @return system property int value if it exists, defaultValue otherwise.
+     */
+    public long getLong()
+    {
+        String value = System.getProperty(key);
+
+        return LONG_CONVERTER.convert(value == null ? defaultVal : value);
+    }
+
+    /**
+     * Gets the value of a system property as a int.
      * @return system property int value if it exists, overrideDefaultValue otherwise.
      */
     public int getInt(int overrideDefaultValue)
@@ -353,7 +475,16 @@
         System.setProperty(key, Integer.toString(value));
     }
 
-    private interface PropertyConverter<T>
+    /**
+     * Sets the value into system properties.
+     * @param value to set
+     */
+    public void setLong(long value)
+    {
+        System.setProperty(key, Long.toString(value));
+    }
+
+    public interface PropertyConverter<T>
     {
         T convert(String value);
     }
@@ -375,6 +506,19 @@
         }
     };
 
+    private static final PropertyConverter<Long> LONG_CONVERTER = value ->
+    {
+        try
+        {
+            return Long.decode(value);
+        }
+        catch (NumberFormatException e)
+        {
+            throw new ConfigurationException(String.format("Invalid value for system property: " +
+                                                           "expected integer value but got '%s'", value));
+        }
+    };
+
     /**
      * @return whether a system property is present or not.
      */
diff --git a/src/java/org/apache/cassandra/config/Config.java b/src/java/org/apache/cassandra/config/Config.java
index f8d8d46..8a59ca2 100644
--- a/src/java/org/apache/cassandra/config/Config.java
+++ b/src/java/org/apache/cassandra/config/Config.java
@@ -19,23 +19,28 @@
 
 import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
-import java.util.ArrayList;
-import java.util.List;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
+import javax.annotation.Nullable;
+
 import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.audit.AuditLogOptions;
-import org.apache.cassandra.fql.FullQueryLoggerOptions;
 import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.fql.FullQueryLoggerOptions;
+import org.apache.cassandra.service.StartupChecks.StartupCheckType;
 
 /**
  * A class that contains configuration properties for the cassandra node it runs within.
@@ -46,6 +51,20 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(Config.class);
 
+    public static Set<String> splitCommaDelimited(String src)
+    {
+        if (src == null)
+            return ImmutableSet.of();
+        String[] split = src.split(",\\s*");
+        ImmutableSet.Builder<String> builder = ImmutableSet.builder();
+        for (String s : split)
+        {
+            s = s.trim();
+            if (!s.isEmpty())
+                builder.add(s);
+        }
+        return builder.build();
+    }
     /*
      * Prefix for Java properties for internal Cassandra configuration options
      */
@@ -56,15 +75,24 @@
     public String authorizer;
     public String role_manager;
     public String network_authorizer;
-    public volatile int permissions_validity_in_ms = 2000;
+    @Replaces(oldName = "permissions_validity_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound permissions_validity = new DurationSpec.IntMillisecondsBound("2s");
     public volatile int permissions_cache_max_entries = 1000;
-    public volatile int permissions_update_interval_in_ms = -1;
-    public volatile int roles_validity_in_ms = 2000;
+    @Replaces(oldName = "permissions_update_interval_in_ms", converter = Converters.MILLIS_CUSTOM_DURATION, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound permissions_update_interval = null;
+    public volatile boolean permissions_cache_active_update = false;
+    @Replaces(oldName = "roles_validity_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound roles_validity = new DurationSpec.IntMillisecondsBound("2s");
     public volatile int roles_cache_max_entries = 1000;
-    public volatile int roles_update_interval_in_ms = -1;
-    public volatile int credentials_validity_in_ms = 2000;
+    @Replaces(oldName = "roles_update_interval_in_ms", converter = Converters.MILLIS_CUSTOM_DURATION, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound roles_update_interval = null;
+    public volatile boolean roles_cache_active_update = false;
+    @Replaces(oldName = "credentials_validity_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound credentials_validity = new DurationSpec.IntMillisecondsBound("2s");
     public volatile int credentials_cache_max_entries = 1000;
-    public volatile int credentials_update_interval_in_ms = -1;
+    @Replaces(oldName = "credentials_update_interval_in_ms", converter = Converters.MILLIS_CUSTOM_DURATION, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound credentials_update_interval = null;
+    public volatile boolean credentials_cache_active_update = false;
 
     /* Hashing strategy Random or OPHF */
     public String partitioner;
@@ -72,8 +100,10 @@
     public boolean auto_bootstrap = true;
     public volatile boolean hinted_handoff_enabled = true;
     public Set<String> hinted_handoff_disabled_datacenters = Sets.newConcurrentHashSet();
-    public volatile int max_hint_window_in_ms = 3 * 3600 * 1000; // three hours
+    @Replaces(oldName = "max_hint_window_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound max_hint_window = new DurationSpec.IntMillisecondsBound("3h");
     public String hints_directory;
+    public boolean hint_window_persistent_enabled = true;
 
     public volatile boolean force_new_prepared_statement_behaviour = false;
 
@@ -83,6 +113,8 @@
     public DiskFailurePolicy disk_failure_policy = DiskFailurePolicy.ignore;
     public CommitFailurePolicy commit_failure_policy = CommitFailurePolicy.stop;
 
+    public volatile boolean use_deterministic_table_id = false;
+
     /* initial token in the ring */
     public String initial_token;
     public Integer num_tokens;
@@ -91,28 +123,42 @@
     /** Triggers automatic allocation of tokens if set, based on the provided replica count for a datacenter */
     public Integer allocate_tokens_for_local_replication_factor = null;
 
-    public long native_transport_idle_timeout_in_ms = 0L;
+    @Replaces(oldName = "native_transport_idle_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public DurationSpec.LongMillisecondsBound native_transport_idle_timeout = new DurationSpec.LongMillisecondsBound("0ms");
 
-    public volatile long request_timeout_in_ms = 10000L;
+    @Replaces(oldName = "request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound request_timeout = new DurationSpec.LongMillisecondsBound("10000ms");
 
-    public volatile long read_request_timeout_in_ms = 5000L;
+    @Replaces(oldName = "read_request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound read_request_timeout = new DurationSpec.LongMillisecondsBound("5000ms");
 
-    public volatile long range_request_timeout_in_ms = 10000L;
+    @Replaces(oldName = "range_request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound range_request_timeout = new DurationSpec.LongMillisecondsBound("10000ms");
 
-    public volatile long write_request_timeout_in_ms = 2000L;
+    @Replaces(oldName = "write_request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound write_request_timeout = new DurationSpec.LongMillisecondsBound("2000ms");
 
-    public volatile long counter_write_request_timeout_in_ms = 5000L;
+    @Replaces(oldName = "counter_write_request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound counter_write_request_timeout = new DurationSpec.LongMillisecondsBound("5000ms");
 
-    public volatile long cas_contention_timeout_in_ms = 1000L;
+    @Replaces(oldName = "cas_contention_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound cas_contention_timeout = new DurationSpec.LongMillisecondsBound("1800ms");
 
-    public volatile long truncate_request_timeout_in_ms = 60000L;
+    @Replaces(oldName = "truncate_request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound truncate_request_timeout = new DurationSpec.LongMillisecondsBound("60000ms");
+
+    @Replaces(oldName = "repair_request_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound repair_request_timeout = new DurationSpec.LongMillisecondsBound("120000ms");
 
     public Integer streaming_connections_per_host = 1;
-    public Integer streaming_keep_alive_period_in_secs = 300; //5 minutes
+    @Replaces(oldName = "streaming_keep_alive_period_in_secs", converter = Converters.SECONDS_DURATION, deprecated = true)
+    public DurationSpec.IntSecondsBound streaming_keep_alive_period = new DurationSpec.IntSecondsBound("300s");
 
-    public boolean cross_node_timeout = true;
+    @Replaces(oldName = "cross_node_timeout", converter = Converters.IDENTITY, deprecated = true)
+    public boolean internode_timeout = true;
 
-    public volatile long slow_query_log_timeout_in_ms = 500L;
+    @Replaces(oldName = "slow_query_log_timeout_in_ms", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public volatile DurationSpec.LongMillisecondsBound slow_query_log_timeout = new DurationSpec.LongMillisecondsBound("500ms");
 
     public volatile double phi_convict_threshold = 8.0;
 
@@ -120,21 +166,34 @@
     public int concurrent_writes = 32;
     public int concurrent_counter_writes = 32;
     public int concurrent_materialized_view_writes = 32;
+    public int available_processors = -1;
 
     @Deprecated
     public Integer concurrent_replicates = null;
 
     public int memtable_flush_writers = 0;
-    public Integer memtable_heap_space_in_mb;
-    public Integer memtable_offheap_space_in_mb;
+    @Replaces(oldName = "memtable_heap_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound memtable_heap_space;
+    @Replaces(oldName = "memtable_offheap_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound memtable_offheap_space;
     public Float memtable_cleanup_threshold = null;
 
+    public static class MemtableOptions
+    {
+        public LinkedHashMap<String, InheritingClass> configurations; // order must be preserved
+
+        public MemtableOptions()
+        {
+        }
+    }
+
+    public MemtableOptions memtable;
+
     // Limit the maximum depth of repair session merkle trees
     @Deprecated
     public volatile Integer repair_session_max_tree_depth = null;
-    public volatile Integer repair_session_space_in_mb = null;
-
-    public volatile long repair_request_timeout_in_ms = TimeUnit.MILLISECONDS.convert(1, TimeUnit.MINUTES);
+    @Replaces(oldName = "repair_session_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public volatile DataStorageSpec.IntMebibytesBound repair_session_space = null;
 
     public volatile boolean use_offheap_merkle_trees = true;
 
@@ -147,6 +206,8 @@
     public boolean listen_on_broadcast_address = false;
     public String internode_authenticator;
 
+    public boolean traverse_auth_from_root = false;
+
     /*
      * RPC address and interface refer to the address/interface used for the native protocol used to communicate with
      * clients. It's still called RPC in some places even though Thrift RPC is gone. If you see references to native
@@ -160,71 +221,109 @@
     public String broadcast_rpc_address;
     public boolean rpc_keepalive = true;
 
-    public Integer internode_max_message_size_in_bytes;
+    @Replaces(oldName = "internode_max_message_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated=true)
+    public DataStorageSpec.IntBytesBound internode_max_message_size;
 
-    @Replaces(oldName = "internode_send_buff_size_in_bytes", deprecated = true)
-    public int internode_socket_send_buffer_size_in_bytes = 0;
-    @Replaces(oldName = "internode_recv_buff_size_in_bytes", deprecated = true)
-    public int internode_socket_receive_buffer_size_in_bytes = 0;
+    @Replaces(oldName = "internode_socket_send_buffer_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    @Replaces(oldName = "internode_send_buff_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_socket_send_buffer_size = new DataStorageSpec.IntBytesBound("0B");
+    @Replaces(oldName = "internode_socket_receive_buffer_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    @Replaces(oldName = "internode_recv_buff_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_socket_receive_buffer_size = new DataStorageSpec.IntBytesBound("0B");
 
     // TODO: derive defaults from system memory settings?
-    public int internode_application_send_queue_capacity_in_bytes = 1 << 22; // 4MiB
-    public int internode_application_send_queue_reserve_endpoint_capacity_in_bytes = 1 << 27; // 128MiB
-    public int internode_application_send_queue_reserve_global_capacity_in_bytes = 1 << 29; // 512MiB
+    @Replaces(oldName = "internode_application_send_queue_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_application_send_queue_capacity = new DataStorageSpec.IntBytesBound("4MiB");
+    @Replaces(oldName = "internode_application_send_queue_reserve_endpoint_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_application_send_queue_reserve_endpoint_capacity = new DataStorageSpec.IntBytesBound("128MiB");
+    @Replaces(oldName = "internode_application_send_queue_reserve_global_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_application_send_queue_reserve_global_capacity = new DataStorageSpec.IntBytesBound("512MiB");
 
-    public int internode_application_receive_queue_capacity_in_bytes = 1 << 22; // 4MiB
-    public int internode_application_receive_queue_reserve_endpoint_capacity_in_bytes = 1 << 27; // 128MiB
-    public int internode_application_receive_queue_reserve_global_capacity_in_bytes = 1 << 29; // 512MiB
+    @Replaces(oldName = "internode_application_receive_queue_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_application_receive_queue_capacity = new DataStorageSpec.IntBytesBound("4MiB");
+    @Replaces(oldName = "internode_application_receive_queue_reserve_endpoint_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_application_receive_queue_reserve_endpoint_capacity = new DataStorageSpec.IntBytesBound("128MiB");
+    @Replaces(oldName = "internode_application_receive_queue_reserve_global_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound internode_application_receive_queue_reserve_global_capacity = new DataStorageSpec.IntBytesBound("512MiB");
 
     // Defensive settings for protecting Cassandra from true network partitions. See (CASSANDRA-14358) for details.
     // The amount of time to wait for internode tcp connections to establish.
-    public volatile int internode_tcp_connect_timeout_in_ms = 2000;
+    @Replaces(oldName = "internode_tcp_connect_timeout_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound internode_tcp_connect_timeout = new DurationSpec.IntMillisecondsBound("2s");
     // The amount of time unacknowledged data is allowed on a connection before we throw out the connection
     // Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000
     // (it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0
     // (which picks up the OS default) and configure the net.ipv4.tcp_retries2 sysctl to be ~8.
-    public volatile int internode_tcp_user_timeout_in_ms = 30000;
-    // Similar to internode_tcp_user_timeout_in_ms but used specifically for streaming connection.
+    @Replaces(oldName = "internode_tcp_user_timeout_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound internode_tcp_user_timeout = new DurationSpec.IntMillisecondsBound("30s");
+    // Similar to internode_tcp_user_timeout but used specifically for streaming connection.
     // The default is 5 minutes. Increase it or set it to 0 in order to increase the timeout.
-    public volatile int internode_streaming_tcp_user_timeout_in_ms = 300_000; // 5 minutes
+    @Replaces(oldName = "internode_streaming_tcp_user_timeout_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound internode_streaming_tcp_user_timeout = new DurationSpec.IntMillisecondsBound("300s"); // 5 minutes
 
     public boolean start_native_transport = true;
     public int native_transport_port = 9042;
     public Integer native_transport_port_ssl = null;
     public int native_transport_max_threads = 128;
-    public int native_transport_max_frame_size_in_mb = 256;
+    @Replaces(oldName = "native_transport_max_frame_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound native_transport_max_frame_size = new DataStorageSpec.IntMebibytesBound("16MiB");
     public volatile long native_transport_max_concurrent_connections = -1L;
     public volatile long native_transport_max_concurrent_connections_per_ip = -1L;
     public boolean native_transport_flush_in_batches_legacy = false;
     public volatile boolean native_transport_allow_older_protocols = true;
-    public volatile long native_transport_max_concurrent_requests_in_bytes_per_ip = -1L;
-    public volatile long native_transport_max_concurrent_requests_in_bytes = -1L;
-    public int native_transport_receive_queue_capacity_in_bytes = 1 << 20; // 1MiB
+    // Below 2 parameters were fixed in 4.0 + to get default value when ==-1 (old name and value format) or ==null(new name and value format),
+    // not <=0 as it is in previous versions. Throwing config exceptions on < -1
+    @Replaces(oldName = "native_transport_max_concurrent_requests_in_bytes_per_ip", converter = Converters.BYTES_CUSTOM_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec.LongBytesBound native_transport_max_request_data_in_flight_per_ip = null;
+    @Replaces(oldName = "native_transport_max_concurrent_requests_in_bytes", converter = Converters.BYTES_CUSTOM_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec.LongBytesBound native_transport_max_request_data_in_flight = null;
+    public volatile boolean native_transport_rate_limiting_enabled = false;
+    public volatile int native_transport_max_requests_per_second = 1000000;
+    @Replaces(oldName = "native_transport_receive_queue_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntBytesBound native_transport_receive_queue_capacity = new DataStorageSpec.IntBytesBound("1MiB");
 
     @Deprecated
     public Integer native_transport_max_negotiable_protocol_version = null;
 
     /**
-     * Max size of values in SSTables, in MegaBytes.
-     * Default is the same as the native protocol frame limit: 256Mb.
+     * Max size of values in SSTables, in MebiBytes.
+     * Default is the same as the native protocol frame limit: 256MiB.
      * See AbstractType for how it is used.
      */
-    public int max_value_size_in_mb = 256;
+    @Replaces(oldName = "max_value_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound max_value_size = new DataStorageSpec.IntMebibytesBound("256MiB");
 
     public boolean snapshot_before_compaction = false;
     public boolean auto_snapshot = true;
+
+    /**
+     * When auto_snapshot is true and this property
+     * is set, snapshots created by truncation or
+     * drop use this TTL.
+     */
+    public String auto_snapshot_ttl;
+
     public volatile long snapshot_links_per_second = 0;
 
     /* if the size of columns or super-columns are more than this, indexing will kick in */
-    public int column_index_size_in_kb = 64;
-    public volatile int column_index_cache_size_in_kb = 2;
-    public volatile int batch_size_warn_threshold_in_kb = 5;
-    public volatile int batch_size_fail_threshold_in_kb = 50;
+    @Replaces(oldName = "column_index_size_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec.IntKibibytesBound column_index_size = new DataStorageSpec.IntKibibytesBound("64KiB");
+    @Replaces(oldName = "column_index_cache_size_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec.IntKibibytesBound column_index_cache_size = new DataStorageSpec.IntKibibytesBound("2KiB");
+    @Replaces(oldName = "batch_size_warn_threshold_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec.IntKibibytesBound batch_size_warn_threshold = new DataStorageSpec.IntKibibytesBound("5KiB");
+    @Replaces(oldName = "batch_size_fail_threshold_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec.IntKibibytesBound batch_size_fail_threshold = new DataStorageSpec.IntKibibytesBound("50KiB");
+
     public Integer unlogged_batch_across_partitions_warn_threshold = 10;
     public volatile Integer concurrent_compactors;
-    public volatile int compaction_throughput_mb_per_sec = 64;
-    public volatile int compaction_large_partition_warning_threshold_mb = 100;
-    public int min_free_space_per_drive_in_mb = 50;
+    @Replaces(oldName = "compaction_throughput_mb_per_sec", converter = Converters.MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound compaction_throughput = new DataRateSpec.LongBytesPerSecondBound("64MiB/s");
+    @Replaces(oldName = "compaction_large_partition_warning_threshold_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public volatile DataStorageSpec.IntMebibytesBound compaction_large_partition_warning_threshold = new DataStorageSpec.IntMebibytesBound("100MiB");
+    @Replaces(oldName = "min_free_space_per_drive_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound min_free_space_per_drive = new DataStorageSpec.IntMebibytesBound("50MiB");
+    public volatile Integer compaction_tombstone_warning_threshold = 100000;
 
     public volatile int concurrent_materialized_view_builders = 1;
     public volatile int reject_repair_compaction_threshold = Integer.MAX_VALUE;
@@ -235,8 +334,13 @@
     @Deprecated
     public int max_streaming_retries = 3;
 
-    public volatile int stream_throughput_outbound_megabits_per_sec = 200;
-    public volatile int inter_dc_stream_throughput_outbound_megabits_per_sec = 200;
+    @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+    @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+
+    public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
 
     public String[] data_file_directories = new String[0];
 
@@ -250,88 +354,124 @@
 
     // Commit Log
     public String commitlog_directory;
-    public Integer commitlog_total_space_in_mb;
+    @Replaces(oldName = "commitlog_total_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound commitlog_total_space;
     public CommitLogSync commitlog_sync;
 
     /**
      * @deprecated since 4.0 This value was near useless, and we're not using it anymore
      */
     public double commitlog_sync_batch_window_in_ms = Double.NaN;
-    public double commitlog_sync_group_window_in_ms = Double.NaN;
-    public int commitlog_sync_period_in_ms;
-    public int commitlog_segment_size_in_mb = 32;
+    @Replaces(oldName = "commitlog_sync_group_window_in_ms", converter = Converters.MILLIS_DURATION_DOUBLE, deprecated = true)
+    public DurationSpec.IntMillisecondsBound commitlog_sync_group_window = new DurationSpec.IntMillisecondsBound("0ms");
+    @Replaces(oldName = "commitlog_sync_period_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public DurationSpec.IntMillisecondsBound commitlog_sync_period = new DurationSpec.IntMillisecondsBound("0ms");
+    @Replaces(oldName = "commitlog_segment_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound commitlog_segment_size = new DataStorageSpec.IntMebibytesBound("32MiB");
     public ParameterizedClass commitlog_compression;
     public FlushCompression flush_compression = FlushCompression.fast;
     public int commitlog_max_compression_buffers_in_pool = 3;
-    public Integer periodic_commitlog_sync_lag_block_in_ms;
+    @Replaces(oldName = "periodic_commitlog_sync_lag_block_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public DurationSpec.IntMillisecondsBound periodic_commitlog_sync_lag_block;
     public TransparentDataEncryptionOptions transparent_data_encryption_options = new TransparentDataEncryptionOptions();
 
-    public Integer max_mutation_size_in_kb;
+    @Replaces(oldName = "max_mutation_size_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntKibibytesBound max_mutation_size;
 
     // Change-data-capture logs
     public boolean cdc_enabled = false;
+    // When true, new CDC mutations are rejected/blocked when reaching max CDC storage.
+    // When false, new CDC mutations can always be added. But it will remove the oldest CDC commit log segment on full.
+    public volatile boolean cdc_block_writes = true;
     public String cdc_raw_directory;
-    public int cdc_total_space_in_mb = 0;
-    public int cdc_free_space_check_interval_ms = 250;
+    @Replaces(oldName = "cdc_total_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound cdc_total_space = new DataStorageSpec.IntMebibytesBound("0MiB");
+    @Replaces(oldName = "cdc_free_space_check_interval_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public DurationSpec.IntMillisecondsBound cdc_free_space_check_interval = new DurationSpec.IntMillisecondsBound("250ms");
 
     @Deprecated
     public int commitlog_periodic_queue_size = -1;
 
     public String endpoint_snitch;
     public boolean dynamic_snitch = true;
-    public int dynamic_snitch_update_interval_in_ms = 100;
-    public int dynamic_snitch_reset_interval_in_ms = 600000;
+    @Replaces(oldName = "dynamic_snitch_update_interval_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public DurationSpec.IntMillisecondsBound dynamic_snitch_update_interval = new DurationSpec.IntMillisecondsBound("100ms");
+    @Replaces(oldName = "dynamic_snitch_reset_interval_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public DurationSpec.IntMillisecondsBound dynamic_snitch_reset_interval = new DurationSpec.IntMillisecondsBound("10m");
     public double dynamic_snitch_badness_threshold = 1.0;
 
+    public String failure_detector = "FailureDetector";
+
     public EncryptionOptions.ServerEncryptionOptions server_encryption_options = new EncryptionOptions.ServerEncryptionOptions();
     public EncryptionOptions client_encryption_options = new EncryptionOptions();
 
     public InternodeCompression internode_compression = InternodeCompression.none;
 
-    public int hinted_handoff_throttle_in_kb = 1024;
-    public int batchlog_replay_throttle_in_kb = 1024;
+    @Replaces(oldName = "hinted_handoff_throttle_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntKibibytesBound hinted_handoff_throttle = new DataStorageSpec.IntKibibytesBound("1024KiB");
+    @Replaces(oldName = "batchlog_replay_throttle_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntKibibytesBound batchlog_replay_throttle = new DataStorageSpec.IntKibibytesBound("1024KiB");
     public int max_hints_delivery_threads = 2;
-    public int hints_flush_period_in_ms = 10000;
-    public int max_hints_file_size_in_mb = 128;
+    @Replaces(oldName = "hints_flush_period_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public DurationSpec.IntMillisecondsBound hints_flush_period = new DurationSpec.IntMillisecondsBound("10s");
+    @Replaces(oldName = "max_hints_file_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound max_hints_file_size = new DataStorageSpec.IntMebibytesBound("128MiB");
+    public volatile DataStorageSpec.LongBytesBound max_hints_size_per_host = new DataStorageSpec.LongBytesBound("0B"); // 0 means disabled
+
     public ParameterizedClass hints_compression;
+    public volatile boolean auto_hints_cleanup_enabled = false;
 
     public volatile boolean incremental_backups = false;
     public boolean trickle_fsync = false;
-    public int trickle_fsync_interval_in_kb = 10240;
+    @Replaces(oldName = "trickle_fsync_interval_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec.IntKibibytesBound trickle_fsync_interval = new DataStorageSpec.IntKibibytesBound("10240KiB");
 
-    public volatile int sstable_preemptive_open_interval_in_mb = 50;
+    @Nullable
+    @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.NEGATIVE_MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public volatile DataStorageSpec.IntMebibytesBound sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound("50MiB");
 
     public volatile boolean key_cache_migrate_during_compaction = true;
-    public Long key_cache_size_in_mb = null;
-    public volatile int key_cache_save_period = 14400;
     public volatile int key_cache_keys_to_save = Integer.MAX_VALUE;
+    @Replaces(oldName = "key_cache_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
+    public DataStorageSpec.LongMebibytesBound key_cache_size = null;
+    @Replaces(oldName = "key_cache_save_period", converter = Converters.SECONDS_CUSTOM_DURATION)
+    public volatile DurationSpec.IntSecondsBound key_cache_save_period = new DurationSpec.IntSecondsBound("4h");
 
     public String row_cache_class_name = "org.apache.cassandra.cache.OHCProvider";
-    public long row_cache_size_in_mb = 0;
-    public volatile int row_cache_save_period = 0;
+    @Replaces(oldName = "row_cache_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
+    public DataStorageSpec.LongMebibytesBound row_cache_size = new DataStorageSpec.LongMebibytesBound("0MiB");
+    @Replaces(oldName = "row_cache_save_period", converter = Converters.SECONDS_CUSTOM_DURATION)
+    public volatile DurationSpec.IntSecondsBound row_cache_save_period = new DurationSpec.IntSecondsBound("0s");
     public volatile int row_cache_keys_to_save = Integer.MAX_VALUE;
 
-    public Long counter_cache_size_in_mb = null;
-    public volatile int counter_cache_save_period = 7200;
+    @Replaces(oldName = "counter_cache_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
+    public DataStorageSpec.LongMebibytesBound counter_cache_size = null;
+    @Replaces(oldName = "counter_cache_save_period", converter = Converters.SECONDS_CUSTOM_DURATION)
+    public volatile DurationSpec.IntSecondsBound counter_cache_save_period = new DurationSpec.IntSecondsBound("7200s");
     public volatile int counter_cache_keys_to_save = Integer.MAX_VALUE;
 
-    public int cache_load_timeout_seconds = 30;
+    public DataStorageSpec.LongMebibytesBound paxos_cache_size = null;
+
+    @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.NEGATIVE_SECONDS_DURATION, deprecated = true)
+    public DurationSpec.IntSecondsBound cache_load_timeout = new DurationSpec.IntSecondsBound("30s");
 
     private static boolean isClientMode = false;
     private static Supplier<Config> overrideLoadConfig = null;
 
-    public Integer networking_cache_size_in_mb;
+    @Replaces(oldName = "networking_cache_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound networking_cache_size;
 
-    public Integer file_cache_size_in_mb;
+    @Replaces(oldName = "file_cache_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    public DataStorageSpec.IntMebibytesBound file_cache_size;
 
     public boolean file_cache_enabled = Boolean.getBoolean("cassandra.file_cache_enabled");
 
     /**
-     * Because of the current {@link org.apache.cassandra.utils.memory.BufferPool} slab sizes of 64 kb, we
-     * store in the file cache buffers that divide 64 kb, so we need to round the buffer sizes to powers of two.
+     * Because of the current {@link org.apache.cassandra.utils.memory.BufferPool} slab sizes of 64 KiB, we
+     * store in the file cache buffers that divide 64 KiB, so we need to round the buffer sizes to powers of two.
      * This boolean controls weather they are rounded up or down. Set it to true to round up to the
      * next power of two, set it to false to round down to the previous power of two. Note that buffer sizes are
-     * already rounded to 4 kb and capped between 4 kb minimum and 64 kb maximum by the {@link DiskOptimizationStrategy}.
+     * already rounded to 4 KiB and capped between 4 KiB minimum and 64 kb maximum by the {@link DiskOptimizationStrategy}.
      * By default, this boolean is set to round down when {@link #disk_optimization_strategy} is {@code ssd},
      * and to round up when it is {@code spinning}.
      */
@@ -350,20 +490,35 @@
 
     public MemtableAllocationType memtable_allocation_type = MemtableAllocationType.heap_buffers;
 
+    public volatile boolean read_thresholds_enabled = false;
+    public volatile DataStorageSpec.LongBytesBound coordinator_read_size_warn_threshold = null;
+    public volatile DataStorageSpec.LongBytesBound coordinator_read_size_fail_threshold = null;
+    public volatile DataStorageSpec.LongBytesBound local_read_size_warn_threshold = null;
+    public volatile DataStorageSpec.LongBytesBound local_read_size_fail_threshold = null;
+    public volatile DataStorageSpec.LongBytesBound row_index_read_size_warn_threshold = null;
+    public volatile DataStorageSpec.LongBytesBound row_index_read_size_fail_threshold = null;
+
     public volatile int tombstone_warn_threshold = 1000;
     public volatile int tombstone_failure_threshold = 100000;
 
     public final ReplicaFilteringProtectionOptions replica_filtering_protection = new ReplicaFilteringProtectionOptions();
 
-    public volatile Long index_summary_capacity_in_mb;
-    public volatile int index_summary_resize_interval_in_minutes = 60;
+    @Replaces(oldName = "index_summary_capacity_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
+    public volatile DataStorageSpec.LongMebibytesBound index_summary_capacity;
+    @Nullable
+    @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_CUSTOM_DURATION, deprecated = true)
+    public volatile DurationSpec.IntMinutesBound index_summary_resize_interval = new DurationSpec.IntMinutesBound("60m");
 
-    public volatile int gc_log_threshold_in_ms = 200;
-    public volatile int gc_warn_threshold_in_ms = 1000;
+    @Replaces(oldName = "gc_log_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
+    @Replaces(oldName = "gc_warn_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
+    public volatile DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
 
     // TTL for different types of trace events.
-    public int tracetype_query_ttl = (int) TimeUnit.DAYS.toSeconds(1);
-    public int tracetype_repair_ttl = (int) TimeUnit.DAYS.toSeconds(7);
+    @Replaces(oldName = "tracetype_query_ttl", converter = Converters.SECONDS_DURATION, deprecated=true)
+    public DurationSpec.IntSecondsBound trace_type_query_ttl = new DurationSpec.IntSecondsBound("1d");
+    @Replaces(oldName = "tracetype_repair_ttl", converter = Converters.SECONDS_DURATION, deprecated=true)
+    public DurationSpec.IntSecondsBound trace_type_repair_ttl = new DurationSpec.IntSecondsBound("7d");
 
     /**
      * Maintain statistics on whether writes achieve the ideal consistency level
@@ -372,6 +527,9 @@
     public volatile ConsistencyLevel ideal_consistency_level = null;
 
     @Deprecated
+    public int windows_timer_interval = 0;
+
+    @Deprecated
     public String otc_coalescing_strategy = "DISABLED";
 
     @Deprecated
@@ -385,24 +543,31 @@
     @Deprecated
     public volatile int otc_backlog_expiration_interval_ms = otc_backlog_expiration_interval_ms_default;
 
-    public int windows_timer_interval = 0;
-
     /**
-     * Size of the CQL prepared statements cache in MB.
-     * Defaults to 1/256th of the heap size or 10MB, whichever is greater.
+     * Size of the CQL prepared statements cache in MiB.
+     * Defaults to 1/256th of the heap size or 10MiB, whichever is greater.
      */
-    public Long prepared_statements_cache_size_mb = null;
+    @Replaces(oldName = "prepared_statements_cache_size_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
+    public DataStorageSpec.LongMebibytesBound prepared_statements_cache_size = null;
 
-    public boolean enable_user_defined_functions = false;
-    public boolean enable_scripted_user_defined_functions = false;
+    @Replaces(oldName = "enable_user_defined_functions", converter = Converters.IDENTITY, deprecated = true)
+    public boolean user_defined_functions_enabled = false;
+    @Replaces(oldName = "enable_scripted_user_defined_functions", converter = Converters.IDENTITY, deprecated = true)
+    public boolean scripted_user_defined_functions_enabled = false;
 
-    public boolean enable_materialized_views = false;
+    @Replaces(oldName = "enable_materialized_views", converter = Converters.IDENTITY, deprecated = true)
+    public boolean materialized_views_enabled = false;
 
-    public boolean enable_transient_replication = false;
+    @Replaces(oldName = "enable_transient_replication", converter = Converters.IDENTITY, deprecated = true)
+    public boolean transient_replication_enabled = false;
 
-    public boolean enable_sasi_indexes = false;
+    @Replaces(oldName = "enable_sasi_indexes", converter = Converters.IDENTITY, deprecated = true)
+    public boolean sasi_indexes_enabled = false;
 
-    public volatile boolean enable_drop_compact_storage = false;
+    @Replaces(oldName = "enable_drop_compact_storage", converter = Converters.IDENTITY, deprecated = true)
+    public volatile boolean drop_compact_storage_enabled = false;
+
+    public volatile boolean use_statements_enabled = true;
 
     /**
      * Optionally disable asynchronous UDF execution.
@@ -415,7 +580,10 @@
      *
      * This requires allow_insecure_udfs to be true
      */
-    public boolean enable_user_defined_functions_threads = true;
+    // Below parameter is not presented in cassandra.yaml but to be on the safe side that no one was directly using it
+    // I still added backward compatibility (CASSANDRA-15234)
+    @Replaces(oldName = "enable_user_defined_functions_threads", converter = Converters.IDENTITY, deprecated = true)
+    public boolean user_defined_functions_threads_enabled = true;
 
     /**
      * Set this to true to allow running insecure UDFs.
@@ -429,22 +597,24 @@
 
     /**
      * Time in milliseconds after a warning will be emitted to the log and to the client that a UDF runs too long.
-     * (Only valid, if enable_user_defined_functions_threads==true)
+     * (Only valid, if user_defined_functions_threads_enabled==true)
      */
-    public long user_defined_function_warn_timeout = 500;
+    @Replaces(oldName = "user_defined_function_warn_timeout", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public DurationSpec.LongMillisecondsBound user_defined_functions_warn_timeout = new DurationSpec.LongMillisecondsBound("500ms");
     /**
      * Time in milliseconds after a fatal UDF run-time situation is detected and action according to
      * user_function_timeout_policy will take place.
-     * (Only valid, if enable_user_defined_functions_threads==true)
+     * (Only valid, if user_defined_functions_threads_enabled==true)
      */
-    public long user_defined_function_fail_timeout = 1500;
+    @Replaces(oldName = "user_defined_function_fail_timeout", converter = Converters.MILLIS_DURATION_LONG, deprecated = true)
+    public DurationSpec.LongMillisecondsBound user_defined_functions_fail_timeout = new DurationSpec.LongMillisecondsBound("1500ms");
     /**
-     * Defines what to do when a UDF ran longer than user_defined_function_fail_timeout.
+     * Defines what to do when a UDF ran longer than user_defined_functions_fail_timeout.
      * Possible options are:
      * - 'die' - i.e. it is able to emit a warning to the client before the Cassandra Daemon will shut down.
      * - 'die_immediate' - shut down C* daemon immediately (effectively prevent the chance that the client will receive a warning).
      * - 'ignore' - just log - the most dangerous option.
-     * (Only valid, if enable_user_defined_functions_threads==true)
+     * (Only valid, if user_defined_functions_threads_enabled==true)
      */
     public UserFunctionTimeoutPolicy user_function_timeout_policy = UserFunctionTimeoutPolicy.die;
 
@@ -490,6 +660,10 @@
 
     public volatile boolean diagnostic_events_enabled = false;
 
+    // Default keyspace replication factors allow validation of newly created keyspaces
+    // and good defaults if no replication factor is provided by the user
+    public volatile int default_keyspace_rf = 1;
+
     /**
      * flags for enabling tracking repaired state of data during reads
      * separate flags for range & single partition reads as single partition reads are only tracked
@@ -517,11 +691,52 @@
     public volatile boolean snapshot_on_repaired_data_mismatch = false;
 
     /**
-     * number of seconds to set nowInSec into the future when performing validation previews against repaired data
+     * Number of seconds to set nowInSec into the future when performing validation previews against repaired data
      * this (attempts) to prevent a race where validations on different machines are started on different sides of
      * a tombstone being compacted away
      */
-    public volatile int validation_preview_purge_head_start_in_sec = 60 * 60;
+
+    @Replaces(oldName = "validation_preview_purge_head_start_in_sec", converter = Converters.NEGATIVE_SECONDS_DURATION, deprecated = true)
+    public volatile DurationSpec.IntSecondsBound validation_preview_purge_head_start = new DurationSpec.IntSecondsBound("3600s");
+
+    public boolean auth_cache_warming_enabled = false;
+
+    // Using String instead of ConsistencyLevel here to keep static initialization from cascading and starting
+    // threads during tool usage mode. See CASSANDRA-12988 and DatabaseDescriptorRefTest for details
+    public volatile String auth_read_consistency_level = "LOCAL_QUORUM";
+    public volatile String auth_write_consistency_level = "EACH_QUORUM";
+
+    /** This feature allows denying access to operations on certain key partitions, intended for use by operators to
+     * provide another tool to manage cluster health vs application access. See CASSANDRA-12106 and CEP-13 for more details.
+     */
+    public volatile boolean partition_denylist_enabled = false;
+
+    public volatile boolean denylist_writes_enabled = true;
+
+    public volatile boolean denylist_reads_enabled = true;
+
+    public volatile boolean denylist_range_reads_enabled = true;
+
+    public DurationSpec.IntSecondsBound denylist_refresh = new DurationSpec.IntSecondsBound("600s");
+
+    public DurationSpec.IntSecondsBound denylist_initial_load_retry = new DurationSpec.IntSecondsBound("5s");
+
+    /** We cap the number of denylisted keys allowed per table to keep things from growing unbounded. Operators will
+     * receive warnings and only denylist_max_keys_per_table in natural query ordering will be processed on overflow.
+     */
+    public volatile int denylist_max_keys_per_table = 1000;
+
+    /** We cap the total number of denylisted keys allowed in the cluster to keep things from growing unbounded.
+     * Operators will receive warnings on initial cache load that there are too many keys and be directed to trim
+     * down the entries to within the configured limits.
+     */
+    public volatile int denylist_max_keys_total = 10000;
+
+    /** Since the denylist in many ways serves to protect the health of the cluster from partitions operators have identified
+     * as being in a bad state, we usually want more robustness than just CL.ONE on operations to/from these tables to
+     * ensure that these safeguards are in place. That said, we allow users to configure this if they're so inclined.
+     */
+    public ConsistencyLevel denylist_consistency_level = ConsistencyLevel.QUORUM;
 
     /**
      * The intial capacity for creating RangeTombstoneList.
@@ -566,6 +781,9 @@
     public volatile boolean auto_optimise_full_repair_streams = false;
     public volatile boolean auto_optimise_preview_repair_streams = false;
 
+    // see CASSANDRA-17048 and the comment in cassandra.yaml
+    public boolean uuid_sstable_identifiers_enabled = false;
+
     /**
      * Client mode means that the process is a pure client, that uses C* code base but does
      * not read or write local C* database files.
@@ -578,11 +796,255 @@
         isClientMode = clientMode;
     }
 
+    @Deprecated // this warning threshold will be replaced by an equivalent guardrail
     public volatile int table_count_warn_threshold = 150;
+    @Deprecated // this warning threshold will be replaced by an equivalent guardrail
     public volatile int keyspace_count_warn_threshold = 40;
 
     public volatile int consecutive_message_errors_threshold = 1;
 
+    public volatile SubnetGroups client_error_reporting_exclusions = new SubnetGroups();
+    public volatile SubnetGroups internode_error_reporting_exclusions = new SubnetGroups();
+
+    public volatile int keyspaces_warn_threshold = -1;
+    public volatile int keyspaces_fail_threshold = -1;
+    public volatile int tables_warn_threshold = -1;
+    public volatile int tables_fail_threshold = -1;
+    public volatile int columns_per_table_warn_threshold = -1;
+    public volatile int columns_per_table_fail_threshold = -1;
+    public volatile int secondary_indexes_per_table_warn_threshold = -1;
+    public volatile int secondary_indexes_per_table_fail_threshold = -1;
+    public volatile int materialized_views_per_table_warn_threshold = -1;
+    public volatile int materialized_views_per_table_fail_threshold = -1;
+    public volatile int page_size_warn_threshold = -1;
+    public volatile int page_size_fail_threshold = -1;
+    public volatile int partition_keys_in_select_warn_threshold = -1;
+    public volatile int partition_keys_in_select_fail_threshold = -1;
+    public volatile int in_select_cartesian_product_warn_threshold = -1;
+    public volatile int in_select_cartesian_product_fail_threshold = -1;
+    public volatile Set<String> table_properties_warned = Collections.emptySet();
+    public volatile Set<String> table_properties_ignored = Collections.emptySet();
+    public volatile Set<String> table_properties_disallowed = Collections.emptySet();
+    public volatile Set<ConsistencyLevel> read_consistency_levels_warned = Collections.emptySet();
+    public volatile Set<ConsistencyLevel> read_consistency_levels_disallowed = Collections.emptySet();
+    public volatile Set<ConsistencyLevel> write_consistency_levels_warned = Collections.emptySet();
+    public volatile Set<ConsistencyLevel> write_consistency_levels_disallowed = Collections.emptySet();
+    public volatile boolean user_timestamps_enabled = true;
+    public volatile boolean group_by_enabled = true;
+    public volatile boolean drop_truncate_table_enabled = true;
+    public volatile boolean secondary_indexes_enabled = true;
+    public volatile boolean uncompressed_tables_enabled = true;
+    public volatile boolean compact_tables_enabled = true;
+    public volatile boolean read_before_write_list_operations_enabled = true;
+    public volatile boolean allow_filtering_enabled = true;
+    public volatile DataStorageSpec.LongBytesBound collection_size_warn_threshold = null;
+    public volatile DataStorageSpec.LongBytesBound collection_size_fail_threshold = null;
+    public volatile int items_per_collection_warn_threshold = -1;
+    public volatile int items_per_collection_fail_threshold = -1;
+    public volatile int fields_per_udt_warn_threshold = -1;
+    public volatile int fields_per_udt_fail_threshold = -1;
+    public volatile int data_disk_usage_percentage_warn_threshold = -1;
+    public volatile int data_disk_usage_percentage_fail_threshold = -1;
+    public volatile DataStorageSpec.LongBytesBound data_disk_usage_max_disk_size = null;
+    public volatile int minimum_replication_factor_warn_threshold = -1;
+    public volatile int minimum_replication_factor_fail_threshold = -1;
+
+    public volatile DurationSpec.LongNanosecondsBound streaming_state_expires = new DurationSpec.LongNanosecondsBound("3d");
+    public volatile DataStorageSpec.LongBytesBound streaming_state_size = new DataStorageSpec.LongBytesBound("40MiB");
+
+    public volatile boolean streaming_stats_enabled = true;
+    public volatile DurationSpec.IntSecondsBound streaming_slow_events_log_timeout = new DurationSpec.IntSecondsBound("10s");
+
+    /** The configuration of startup checks. */
+    public volatile Map<StartupCheckType, Map<String, Object>> startup_checks = new HashMap<>();
+
+    public volatile DurationSpec.LongNanosecondsBound repair_state_expires = new DurationSpec.LongNanosecondsBound("3d");
+    public volatile int repair_state_size = 100_000;
+
+    /**
+     * The variants of paxos implementation and semantics supported by Cassandra.
+     */
+    public enum PaxosVariant
+    {
+        /**
+         * v1 Paxos lacks most optimisations. Expect 4RTs for a write and 2RTs for a read.
+         *
+         * With legacy semantics for read/read and rejected write linearizability, i.e. not guaranteed.
+         */
+        v1_without_linearizable_reads_or_rejected_writes,
+
+        /**
+         * v1 Paxos lacks most optimisations. Expect 4RTs for a write and 3RTs for a read.
+         */
+        v1,
+
+        /**
+         * v2 Paxos. With PaxosStatePurging.repaired safe to use ANY Commit consistency.
+         * Expect 2RTs for a write and 1RT for a read.
+         *
+         * With legacy semantics for read/read linearizability, i.e. not guaranteed.
+         */
+        v2_without_linearizable_reads,
+
+        /**
+         * v2 Paxos. With PaxosStatePurging.repaired safe to use ANY Commit consistency.
+         * Expect 2RTs for a write and 1RT for a read.
+         *
+         * With legacy semantics for read/read and rejected write linearizability, i.e. not guaranteed.
+         */
+        v2_without_linearizable_reads_or_rejected_writes,
+
+        /**
+         * v2 Paxos. With PaxosStatePurging.repaired safe to use ANY Commit consistency.
+         * Expect 2RTs for a write, and either 1RT or 2RT for a read.
+         */
+        v2
+    }
+
+    /**
+     * Select the kind of paxos state purging to use. Migration to repaired is recommended, but requires that
+     * regular paxos repairs are performed (which by default run as part of incremental repair).
+     *
+     * Once migrated from legacy it is unsafe to return to legacy, but gc_grace mode may be used in its place
+     * and performs a very similar cleanup process.
+     *
+     * Should only be modified once paxos_variant = v2.
+     */
+    public enum PaxosStatePurging
+    {
+        /**
+         * system.paxos records are written and garbage collected with TTLs. Unsafe to use with Commit consistency ANY.
+         * Once migrated from, cannot be migrated back to safely. Must use gc_grace or repaired instead, as TTLs
+         * will not have been set.
+         */
+        legacy,
+
+        /**
+         * Functionally similar to legacy, but the gc_grace expiry is applied at compaction and read time rather than
+         * using TTLs, so may be safely enabled at any point.
+         */
+        gc_grace,
+
+        /**
+         * Clears system.paxos records only once they are known to be persisted to a quorum of replica's base tables
+         * through the use of paxos repair. Requires that regular paxos repairs are performed on the cluster
+         * (which by default are included in incremental repairs if paxos_variant = v2).
+         *
+         * This setting permits the use of Commit consistency ANY or LOCAL_QUORUM without any loss of durability or consistency,
+         * saving 1 RT.
+         */
+        repaired;
+
+        public static PaxosStatePurging fromBoolean(boolean enabled)
+        {
+            return enabled ? repaired : gc_grace;
+        }
+    }
+
+    /**
+     * See {@link PaxosVariant}. Defaults to v1, recommend upgrading to v2 at earliest opportunity.
+     */
+    public volatile PaxosVariant paxos_variant = PaxosVariant.v1;
+
+    /**
+     * If true, paxos topology change repair will not run on a topology change - this option should only be used in
+     * rare operation circumstances e.g. where for some reason the repair is impossible to perform (e.g. too few replicas)
+     * and an unsafe topology change must be made
+     */
+    public volatile boolean skip_paxos_repair_on_topology_change = Boolean.getBoolean("cassandra.skip_paxos_repair_on_topology_change");
+
+    /**
+     * A safety margin when purging paxos state information that has been safely replicated to a quorum.
+     * Data for transactions initiated within this grace period will not be expunged.
+     */
+    public volatile DurationSpec.LongSecondsBound paxos_purge_grace_period = new DurationSpec.LongSecondsBound("60s");
+
+    /**
+     * A safety mechanism for detecting incorrect paxos state, that may be down either to a bug or incorrect usage of LWTs
+     * (most likely due to unsafe mixing of SERIAL and LOCAL_SERIAL operations), and rejecting
+     */
+    public enum PaxosOnLinearizabilityViolation
+    {
+        // reject an operation when a linearizability violation is detected.
+        // note this does not guarantee a violation has been averted,
+        // as it may be a prior operation that invalidated the state.
+        fail,
+        // log any detected linearizability violation
+        log,
+        // ignore any detected linearizability violation
+        ignore
+    }
+
+    /**
+     * See {@link PaxosOnLinearizabilityViolation}.
+     *
+     * Default is to ignore, as applications may readily mix SERIAL and LOCAL_SERIAL and this is the most likely source
+     * of linearizability violations. this facility should be activated only for debugging Cassandra or by power users
+     * who are investigating their own application behaviour.
+     */
+    public volatile PaxosOnLinearizabilityViolation paxos_on_linearizability_violations = PaxosOnLinearizabilityViolation.ignore;
+
+    /**
+     * See {@link PaxosStatePurging} default is legacy.
+     */
+    public volatile PaxosStatePurging paxos_state_purging;
+
+    /**
+     * Enable/disable paxos repair. This is a global flag that not only determines default behaviour but overrides
+     * explicit paxos repair requests, paxos repair on topology changes and paxos auto repairs.
+     */
+    public volatile boolean paxos_repair_enabled = true;
+
+    /**
+     * If true, paxos topology change repair only requires a global quorum of live nodes. If false,
+     * it requires a global quorum as well as a local quorum for each dc (EACH_QUORUM), with the
+     * exception explained in paxos_topology_repair_strict_each_quorum
+     */
+    public boolean paxos_topology_repair_no_dc_checks = false;
+
+    /**
+     * If true, a quorum will be required for the global and local quorum checks. If false, we will
+     * accept a quorum OR n - 1 live nodes. This is to allow for topologies like 2:2:2, where paxos queries
+     * always use SERIAL, and a single node down in a dc should not preclude a paxos repair
+     */
+    public boolean paxos_topology_repair_strict_each_quorum = false;
+
+    /**
+     * If necessary for operational purposes, permit certain keyspaces to be ignored for paxos topology repairs
+     */
+    public volatile Set<String> skip_paxos_repair_on_topology_change_keyspaces = splitCommaDelimited(System.getProperty("cassandra.skip_paxos_repair_on_topology_change_keyspaces"));
+
+    /**
+     * See {@link org.apache.cassandra.service.paxos.ContentionStrategy}
+     */
+    public String paxos_contention_wait_randomizer;
+
+    /**
+     * See {@link org.apache.cassandra.service.paxos.ContentionStrategy}
+     */
+    public String paxos_contention_min_wait;
+
+    /**
+     * See {@link org.apache.cassandra.service.paxos.ContentionStrategy}
+     */
+    public String paxos_contention_max_wait;
+
+    /**
+     * See {@link org.apache.cassandra.service.paxos.ContentionStrategy}
+     */
+    public String paxos_contention_min_delta;
+
+    /**
+     * The number of keys we may simultaneously attempt to finish incomplete paxos operations for.
+     */
+    public volatile int paxos_repair_parallelism = -1;
+
+    public volatile int max_top_size_partition_count = 10;
+    public volatile int max_top_tombstone_partition_count = 10;
+    public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size = new DataStorageSpec.LongBytesBound("1MiB");
+    public volatile long min_tracked_partition_tombstone_count = 5000;
+    public volatile boolean top_partitions_enabled = true;
+
     public static Supplier<Config> getOverrideLoadConfig()
     {
         return overrideLoadConfig;
@@ -623,6 +1085,7 @@
     public enum MemtableAllocationType
     {
         unslabbed_heap_buffers,
+        unslabbed_heap_buffers_logged,
         heap_buffers,
         offheap_buffers,
         offheap_objects
@@ -671,7 +1134,7 @@
         exception
     }
 
-    private static final List<String> SENSITIVE_KEYS = new ArrayList<String>() {{
+    private static final Set<String> SENSITIVE_KEYS = new HashSet<String>() {{
         add("client_encryption_options");
         add("server_encryption_options");
     }};
diff --git a/src/java/org/apache/cassandra/config/Converters.java b/src/java/org/apache/cassandra/config/Converters.java
new file mode 100644
index 0000000..c898c08
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/Converters.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
+
+/**
+ * Converters for backward compatibility with the old cassandra.yaml where duration, data rate and
+ * data storage configuration parameters were provided only by value and the expected unit was part of the configuration
+ * parameter name(suffix). (CASSANDRA-15234)
+ * It is important to be noted that this converter is not intended to be used when we don't change name of a configuration
+ * parameter but we want to add unit. This would always default to the old value provided without a unit at the moment.
+ * In case this functionality is needed at some point, please, raise a Jira ticket. There is only one exception handling
+ * three parameters (key_cache_save_period, row_cache_save_period, counter_cache_save_period) - the SECONDS_CUSTOM_DURATION
+ * converter.
+ */
+public enum Converters
+{
+    /**
+     * This converter is used when we change the name of a cassandra.yaml configuration parameter but we want to be
+     * able to still use the old name too. No units involved.
+     */
+    IDENTITY(null, null, o -> o, o -> o),
+    MILLIS_DURATION_LONG(Long.class, DurationSpec.LongMillisecondsBound.class,
+                         DurationSpec.LongMillisecondsBound::new,
+                         o -> o == null ? null : o.toMilliseconds()),
+    MILLIS_DURATION_INT(Integer.class, DurationSpec.IntMillisecondsBound.class,
+                        DurationSpec.IntMillisecondsBound::new,
+                        o -> o == null ? null : o.toMilliseconds()),
+    MILLIS_DURATION_DOUBLE(Double.class, DurationSpec.IntMillisecondsBound.class,
+                           o -> Double.isNaN(o) ? new DurationSpec.IntMillisecondsBound(0) :
+                                new DurationSpec.IntMillisecondsBound(o, TimeUnit.MILLISECONDS),
+                           o -> (double) o.toMilliseconds()),
+    /**
+     * This converter is used to support backward compatibility for parameters where in the past -1 was used as a value
+     * Example: credentials_update_interval_in_ms = -1 and credentials_update_interval = null are equal.
+     */
+    MILLIS_CUSTOM_DURATION(Integer.class, DurationSpec.IntMillisecondsBound.class,
+                           o -> o == -1 ? null : new DurationSpec.IntMillisecondsBound(o),
+                           o -> o == null ? -1 : o.toMilliseconds()),
+    SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
+                     DurationSpec.IntSecondsBound::new,
+                     o -> o == null ? null : o.toSeconds()),
+    NEGATIVE_SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
+                              o -> o < 0 ? new DurationSpec.IntSecondsBound(0) : new DurationSpec.IntSecondsBound(o),
+                              o -> o == null ? null : o.toSeconds()),
+    /**
+     * This converter is used to support backward compatibility for Duration parameters where we added the opportunity
+     * for the users to add a unit in the parameters' values but we didn't change the names. (key_cache_save_period,
+     * row_cache_save_period, counter_cache_save_period)
+     * Example: row_cache_save_period = 0 and row_cache_save_period = 0s (quantity of 0s) are equal.
+     */
+    SECONDS_CUSTOM_DURATION(String.class, DurationSpec.IntSecondsBound.class,
+                            DurationSpec.IntSecondsBound::inSecondsString,
+                            o -> o == null ? null : Long.toString(o.toSeconds())),
+    /**
+     * This converter is used to support backward compatibility for parameters where in the past -1 was used as a value
+     * Example:  index_summary_resize_interval_in_minutes = -1 and  index_summary_resize_interval = null are equal.
+     */
+    MINUTES_CUSTOM_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
+                            o -> o == -1 ? null : new DurationSpec.IntMinutesBound(o),
+                            o -> o == null ? -1 : o.toMinutes()),
+    MEBIBYTES_DATA_STORAGE_LONG(Long.class, DataStorageSpec.LongMebibytesBound.class,
+                                DataStorageSpec.LongMebibytesBound::new,
+                                o -> o == null ? null : o.toMebibytes()),
+    MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
+                               DataStorageSpec.IntMebibytesBound::new,
+                               o -> o == null ? null : o.toMebibytes()),
+    NEGATIVE_MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
+                                        o -> o < 0 ? null : new DataStorageSpec.IntMebibytesBound(o),
+                                        o -> o == null ? -1 : o.toMebibytes()),
+    KIBIBYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntKibibytesBound.class,
+                          DataStorageSpec.IntKibibytesBound::new,
+                          o -> o == null ? null : o.toKibibytes()),
+    BYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntBytesBound.class,
+                      DataStorageSpec.IntBytesBound::new,
+                      o -> o == null ? null : o.toBytes()),
+    /**
+     * This converter is used to support backward compatibility for parameters where in the past negative number was used as a value
+     * Example: native_transport_max_concurrent_requests_in_bytes_per_ip = -1 and native_transport_max_request_data_in_flight_per_ip = null
+     * are equal. All negative numbers are printed as 0 in virtual tables.
+     */
+    BYTES_CUSTOM_DATASTORAGE(Long.class, DataStorageSpec.LongBytesBound.class,
+                             o -> o == -1 ? null : new DataStorageSpec.LongBytesBound(o),
+                             o -> o == null ? null : o.toBytes()),
+    MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+                                   i -> new DataRateSpec.LongBytesPerSecondBound(i, MEBIBYTES_PER_SECOND),
+                                   o -> o == null ? null : o.toMebibytesPerSecondAsInt()),
+    /**
+     * This converter is a custom one to support backward compatibility for stream_throughput_outbound and
+     * inter_dc_stream_throughput_outbound which were provided in megabits per second prior CASSANDRA-15234.
+     */
+    MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+                                           i -> DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(i),
+                                           o -> o == null ? null : o.toMegabitsPerSecondAsInt());
+    private final Class<?> oldType;
+    private final Class<?> newType;
+    private final Function<Object, Object> convert;
+    private final Function<Object, Object> reverseConvert;
+
+    <Old, New> Converters(Class<Old> oldType, Class<New> newType, Function<Old, New> convert, Function<New, Old> reverseConvert)
+    {
+        this.oldType = oldType;
+        this.newType = newType;
+        this.convert = (Function<Object, Object>) convert;
+        this.reverseConvert = (Function<Object, Object>) reverseConvert;
+    }
+
+    /**
+     * A method to identify what type is needed to be returned by the converter used for a configuration parameter
+     * in {@link Replaces} annotation in {@link Config}
+     *
+     * @return class type
+     */
+    public Class<?> getOldType()
+    {
+        return oldType;
+    }
+
+    /**
+     * Expected return type from {@link #convert(Object)}, and input type to {@link #unconvert(Object)}
+     *
+     * @return type that {@link #convert(Object)} is expected to return
+     */
+    public Class<?> getNewType()
+    {
+        return newType;
+    }
+
+    /**
+     * Apply the converter specified as part of the {@link Replaces} annotation in {@link Config}
+     *
+     * @param value we will use from cassandra.yaml to create a new {@link Config} parameter of type {@link DurationSpec},
+     *              {@link DataRateSpec} or {@link DataStorageSpec}
+     * @return new object of type {@link DurationSpec}, {@link DataRateSpec} or {@link DataStorageSpec}
+     */
+    public Object convert(Object value)
+    {
+        if (value == null) return null;
+        return convert.apply(value);
+    }
+
+    /**
+     * Apply the converter specified as part of the {@link Replaces} annotation in {@link Config} to get config parameters'
+     * values in the old format pre-CASSANDRA-15234 and in the right units, used in the Virtual Tables to ensure backward
+     * compatibility
+     *
+     * @param value we will use to calculate the output value
+     * @return the numeric value
+     */
+    public Object unconvert(Object value)
+    {
+        return reverseConvert.apply(value);
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/DataRateSpec.java b/src/java/org/apache/cassandra/config/DataRateSpec.java
new file mode 100644
index 0000000..1ec2d1e
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/DataRateSpec.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import com.google.common.math.DoubleMath;
+import com.google.common.primitives.Ints;
+
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
+
+/**
+ * Represents a data rate type used for cassandra configuration. It supports the opportunity for the users to be able to
+ * add units to the confiuration parameter value. (CASSANDRA-15234)
+ */
+public abstract class DataRateSpec
+{
+    /**
+     * The Regexp used to parse the rate provided as String in cassandra.yaml.
+     */
+    private static final Pattern UNITS_PATTERN = Pattern.compile("^(\\d+)(MiB/s|KiB/s|B/s)$");
+
+    private final long quantity;
+
+    private final DataRateUnit unit;
+
+    private DataRateSpec(String value)
+    {
+        //parse the string field value
+        Matcher matcher = UNITS_PATTERN.matcher(value);
+
+        if (!matcher.find())
+            throw new IllegalArgumentException("Invalid data rate: " + value + " Accepted units: MiB/s, KiB/s, B/s where " +
+                                                "case matters and " + "only non-negative values are valid");
+
+        quantity = Long.parseLong(matcher.group(1));
+        unit = DataRateUnit.fromSymbol(matcher.group(2));
+    }
+
+    private DataRateSpec(String value, DataRateUnit minUnit, long max)
+    {
+        this(value);
+
+        validateQuantity(value, quantity(), unit(), minUnit, max);
+    }
+
+    private DataRateSpec(long quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
+    {
+        this.quantity = quantity;
+        this.unit = unit;
+
+        validateQuantity(quantity, unit, minUnit, max);
+    }
+
+    private static void validateQuantity(String value, double quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
+    {
+        // negatives are not allowed by the regex pattern
+        if (minUnit.convert(quantity, unit) >= max)
+            throw new IllegalArgumentException("Invalid data rate: " + value + ". It shouldn't be more than " +
+                                             (max - 1) + " in " + minUnit.name().toLowerCase());
+    }
+
+    private static void validateQuantity(double quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
+    {
+        if (quantity < 0)
+            throw new IllegalArgumentException("Invalid data rate: value must be non-negative");
+
+        if (minUnit.convert(quantity, unit) >= max)
+            throw new IllegalArgumentException(String.format("Invalid data rate: %s %s. It shouldn't be more than %d in %s",
+                                                       quantity, unit.name().toLowerCase(),
+                                                       max - 1, minUnit.name().toLowerCase()));
+    }
+
+    // get vs no-get prefix is not consistent in the code base, but for classes involved with config parsing, it is
+    // imporant to be explicit about get/set as this changes how parsing is done; this class is a data-type, so is
+    // not nested, having get/set can confuse parsing thinking this is a nested type
+    /**
+     * @return the data rate unit assigned.
+     */
+    public DataRateUnit unit()
+    {
+        return unit;
+    }
+
+    /**
+     * @return the data rate quantity.
+     */
+    private double quantity()
+    {
+        return quantity;
+    }
+
+    /**
+     * @return the data rate in bytes per second
+     */
+    public double toBytesPerSecond()
+    {
+        return unit.toBytesPerSecond(quantity);
+    }
+
+    /**
+     * Returns the data rate in bytes per second as an {@code int}
+     *
+     * @return the data rate in bytes per second or {@code Integer.MAX_VALUE} if the rate is too large.
+     */
+    public int toBytesPerSecondAsInt()
+    {
+        return Ints.saturatedCast(Math.round(toBytesPerSecond()));
+    }
+
+    /**
+     * @return the data rate in kibibytes per second
+     */
+    public double toKibibytesPerSecond()
+    {
+        return unit.toKibibytesPerSecond(quantity);
+    }
+
+    /**
+     * Returns the data rate in kibibytes per second as an {@code int}
+     *
+     * @return the data rate in kibibytes per second or {@code Integer.MAX_VALUE} if the number of kibibytes is too large.
+     */
+    public int toKibibytesPerSecondAsInt()
+    {
+        return Ints.saturatedCast(Math.round(toKibibytesPerSecond()));
+    }
+
+    /**
+     * @return the data rate in mebibytes per second
+     */
+    public double toMebibytesPerSecond()
+    {
+        return unit.toMebibytesPerSecond(quantity);
+    }
+
+    /**
+     * Returns the data rate in mebibytes per second as an {@code int}
+     *
+     * @return the data rate in mebibytes per second or {@code Integer.MAX_VALUE} if the number of mebibytes is too large.
+     */
+    public int toMebibytesPerSecondAsInt()
+    {
+        return Ints.saturatedCast(Math.round(toMebibytesPerSecond()));
+    }
+
+    /**
+     * This method is required in order to support backward compatibility with the old unit used for a few Data Rate
+     * parameters before CASSANDRA-15234
+     *
+     * @return the data rate in megabits per second.
+     */
+    public double toMegabitsPerSecond()
+    {
+        return unit.toMegabitsPerSecond(quantity);
+    }
+
+    /**
+     * Returns the data rate in megabits per second as an {@code int}. This method is required in order to support
+     * backward compatibility with the old unit used for a few Data Rate parameters before CASSANDRA-15234
+     *
+     * @return the data rate in mebibytes per second or {@code Integer.MAX_VALUE} if the number of mebibytes is too large.
+     */
+    public int toMegabitsPerSecondAsInt()
+    {
+        return Ints.saturatedCast(Math.round(toMegabitsPerSecond()));
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(unit.toKibibytesPerSecond(quantity));
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        if (this == obj)
+            return true;
+
+        if (!(obj instanceof DataRateSpec))
+            return false;
+
+        DataRateSpec other = (DataRateSpec) obj;
+        if (unit == other.unit)
+            return quantity == other.quantity;
+
+        // Due to overflows we can only guarantee that the 2 data rates are equal if we get the same results
+        // doing the conversion in both directions.
+        return unit.convert(other.quantity, other.unit) == quantity && other.unit.convert(quantity, unit) == other.quantity;
+    }
+
+    @Override
+    public String toString()
+    {
+        return (DoubleMath.isMathematicalInteger(quantity) ? (long) quantity : quantity) + unit.symbol;
+    }
+
+    /**
+     * Represents a data rate used for Cassandra configuration. The bound is [0, Long.MAX_VALUE) in bytes per second.
+     * If the user sets a different unit, we still validate that converted to bytes per second the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class LongBytesPerSecondBound extends DataRateSpec
+    {
+        /**
+         * Creates a {@code DataRateSpec.LongBytesPerSecondBound} of the specified amount.
+         *
+         * @param value the data rate
+         */
+        public LongBytesPerSecondBound(String value)
+        {
+            super(value, BYTES_PER_SECOND, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataRateSpec.LongBytesPerSecondBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in bytes per second
+         * @param unit     in which the provided quantity is
+         */
+        public LongBytesPerSecondBound(long quantity, DataRateUnit unit)
+        {
+            super(quantity, unit, BYTES_PER_SECOND, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataRateSpec.LongBytesPerSecondBound} of the specified amount in bytes per second.
+         *
+         * @param bytesPerSecond where bytesPerSecond shouldn't be bigger than Long.MAX_VALUE
+         */
+        public LongBytesPerSecondBound(long bytesPerSecond)
+        {
+            this(bytesPerSecond, BYTES_PER_SECOND);
+        }
+
+        // this one should be used only for backward compatibility for stream_throughput_outbound and inter_dc_stream_throughput_outbound
+        // which were in megabits per second in 4.0. Do not start using it for any new properties
+        @Deprecated
+        public static LongBytesPerSecondBound megabitsPerSecondInBytesPerSecond(long megabitsPerSecond)
+        {
+            final long BYTES_PER_MEGABIT = 125_000;
+            long bytesPerSecond = megabitsPerSecond * BYTES_PER_MEGABIT;
+
+            if (megabitsPerSecond >= Integer.MAX_VALUE)
+                throw new IllegalArgumentException("Invalid data rate: " + megabitsPerSecond + " megabits per second; " +
+                                                   "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
+                                                   " should be between 0 and " + (Integer.MAX_VALUE - 1) + " in megabits per second");
+
+            return new LongBytesPerSecondBound(bytesPerSecond, BYTES_PER_SECOND);
+        }
+    }
+
+    public enum DataRateUnit
+    {
+        BYTES_PER_SECOND("B/s")
+        {
+            public double toBytesPerSecond(double d)
+            {
+                return d;
+            }
+
+            public double toKibibytesPerSecond(double d)
+            {
+                return d / 1024.0;
+            }
+
+            public double toMebibytesPerSecond(double d)
+            {
+                return d / (1024.0 * 1024.0);
+            }
+
+            public double toMegabitsPerSecond(double d)
+            {
+                return (d / 125000.0);
+            }
+
+            public double convert(double source, DataRateUnit sourceUnit)
+            {
+                return sourceUnit.toBytesPerSecond(source);
+            }
+        },
+        KIBIBYTES_PER_SECOND("KiB/s")
+        {
+            public double toBytesPerSecond(double d)
+            {
+                return x(d, 1024.0, (MAX / 1024.0));
+            }
+
+            public double toKibibytesPerSecond(double d)
+            {
+                return d;
+            }
+
+            public double toMebibytesPerSecond(double d)
+            {
+                return d / 1024.0;
+            }
+
+            public double toMegabitsPerSecond(double d)
+            {
+                return d / 122.0;
+            }
+
+            public double convert(double source, DataRateUnit sourceUnit)
+            {
+                return sourceUnit.toKibibytesPerSecond(source);
+            }
+        },
+        MEBIBYTES_PER_SECOND("MiB/s")
+        {
+            public double toBytesPerSecond(double d)
+            {
+                return x(d, (1024.0 * 1024.0), (MAX / (1024.0 * 1024.0)));
+            }
+
+            public double toKibibytesPerSecond(double d)
+            {
+                return x(d, 1024.0, (MAX / 1024.0));
+            }
+
+            public double toMebibytesPerSecond(double d)
+            {
+                return d;
+            }
+
+            public double toMegabitsPerSecond(double d)
+            {
+                if (d > MAX / (MEGABITS_PER_MEBIBYTE))
+                    return MAX;
+                return d * MEGABITS_PER_MEBIBYTE;
+            }
+
+            public double convert(double source, DataRateUnit sourceUnit)
+            {
+                return sourceUnit.toMebibytesPerSecond(source);
+            }
+        };
+
+        static final double MAX = Long.MAX_VALUE;
+        static final double MEGABITS_PER_MEBIBYTE = 8.388608;
+
+        /**
+         * Scale d by m, checking for overflow. This has a short name to make above code more readable.
+         */
+        static double x(double d, double m, double over)
+        {
+            assert (over > 0.0) && (over < (MAX - 1)) && (over == (MAX / m));
+
+            if (d > over)
+                return MAX;
+            return d * m;
+        }
+
+        /**
+         * @param symbol the unit symbol
+         * @return the rate unit corresponding to the given symbol
+         */
+        public static DataRateUnit fromSymbol(String symbol)
+        {
+            for (DataRateUnit value : values())
+            {
+                if (value.symbol.equalsIgnoreCase(symbol))
+                    return value;
+            }
+            throw new IllegalArgumentException(String.format("Unsupported data rate unit: %s. Supported units are: %s",
+                                                             symbol, Arrays.stream(values())
+                                                                           .map(u -> u.symbol)
+                                                                           .collect(Collectors.joining(", "))));
+        }
+
+        /**
+         * The unit symbol
+         */
+        private final String symbol;
+
+        DataRateUnit(String symbol)
+        {
+            this.symbol = symbol;
+        }
+
+        public double toBytesPerSecond(double d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public double toKibibytesPerSecond(double d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public double toMebibytesPerSecond(double d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public double toMegabitsPerSecond(double d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public double convert(double source, DataRateUnit sourceUnit)
+        {
+            throw new AbstractMethodError();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/DataStorageSpec.java b/src/java/org/apache/cassandra/config/DataStorageSpec.java
new file mode 100644
index 0000000..f0d3aca
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/DataStorageSpec.java
@@ -0,0 +1,647 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import com.google.common.primitives.Ints;
+
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.BYTES;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
+
+/**
+ * Represents an amount of data storage. Wrapper class for Cassandra configuration parameters, providing to the
+ * users the opportunity to be able to provide config with a unit of their choice in cassandra.yaml as per the available
+ * options. (CASSANDRA-15234)
+ */
+public abstract class DataStorageSpec
+{
+    /**
+     * The Regexp used to parse the storage provided as String.
+     */
+    private static final Pattern UNITS_PATTERN = Pattern.compile("^(\\d+)(GiB|MiB|KiB|B)$");
+
+    private final long quantity;
+
+    private final DataStorageUnit unit;
+
+    private DataStorageSpec(long quantity, DataStorageUnit unit, DataStorageUnit minUnit, long max, String value)
+    {
+        this.quantity = quantity;
+        this.unit = unit;
+
+        validateMinUnit(unit, minUnit, value);
+        validateQuantity(quantity, unit, minUnit, max);
+    }
+
+    private DataStorageSpec(String value, DataStorageUnit minUnit)
+    {
+        //parse the string field value
+        Matcher matcher = UNITS_PATTERN.matcher(value);
+
+        if (matcher.find())
+        {
+            quantity = Long.parseLong(matcher.group(1));
+            unit = DataStorageUnit.fromSymbol(matcher.group(2));
+
+            // this constructor is used only by extended classes for min unit; upper bound and min unit are guarded there accordingly
+        }
+        else
+        {
+            throw new IllegalArgumentException("Invalid data storage: " + value + " Accepted units:" + acceptedUnits(minUnit) +
+                                               " where case matters and only non-negative values are accepted");
+        }
+    }
+
+    private DataStorageSpec(String value, DataStorageUnit minUnit, long max)
+    {
+        this(value, minUnit);
+
+        validateMinUnit(unit, minUnit, value);
+        validateQuantity(value, quantity(), unit(), minUnit, max);
+    }
+
+    private static void validateMinUnit(DataStorageUnit sourceUnit, DataStorageUnit minUnit, String value)
+    {
+        if (sourceUnit.compareTo(minUnit) < 0)
+            throw new IllegalArgumentException(String.format("Invalid data storage: %s Accepted units:%s", value, acceptedUnits(minUnit)));
+    }
+
+    private static String acceptedUnits(DataStorageUnit minUnit)
+    {
+        DataStorageUnit[] units = DataStorageUnit.values();
+        return Arrays.toString(Arrays.copyOfRange(units, minUnit.ordinal(), units.length));
+    }
+
+    private static void validateQuantity(String value, long quantity, DataStorageUnit sourceUnit, DataStorageUnit minUnit, long max)
+    {
+        // no need to validate for negatives as they are not allowed at first place from the regex
+
+        if (minUnit.convert(quantity, sourceUnit) >= max)
+            throw new IllegalArgumentException("Invalid data storage: " + value + ". It shouldn't be more than " +
+                                               (max - 1) + " in " + minUnit.name().toLowerCase());
+    }
+
+    private static void validateQuantity(long quantity, DataStorageUnit sourceUnit, DataStorageUnit minUnit, long max)
+    {
+        if (quantity < 0)
+            throw new IllegalArgumentException("Invalid data storage: value must be non-negative");
+
+        if (minUnit.convert(quantity, sourceUnit) >= max)
+            throw new IllegalArgumentException(String.format("Invalid data storage: %d %s. It shouldn't be more than %d in %s",
+                                                             quantity, sourceUnit.name().toLowerCase(),
+                                                             max - 1, minUnit.name().toLowerCase()));
+    }
+
+    // get vs no-get prefix is not consistent in the code base, but for classes involved with config parsing, it is
+    // imporant to be explicit about get/set as this changes how parsing is done; this class is a data-type, so is
+    // not nested, having get/set can confuse parsing thinking this is a nested type
+    /**
+     * @return the data storage quantity.
+     */
+    public long quantity()
+    {
+        return quantity;
+    }
+
+    /**
+     * @return the data storage unit.
+     */
+    public DataStorageUnit unit()
+    {
+        return unit;
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(unit.toKibibytes(quantity));
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        if (this == obj)
+            return true;
+
+        if (!(obj instanceof DataStorageSpec))
+            return false;
+
+        DataStorageSpec other = (DataStorageSpec) obj;
+        if (unit == other.unit)
+            return quantity == other.quantity;
+
+        // Due to overflows we can only guarantee that the 2 storages are equal if we get the same results
+        // doing the convertion in both directions.
+        return unit.convert(other.quantity, other.unit) == quantity && other.unit.convert(quantity, unit) == other.quantity;
+    }
+
+    @Override
+    public String toString()
+    {
+        return quantity + unit.symbol;
+    }
+
+    /**
+     * Represents a data storage quantity used for Cassandra configuration. The bound is [0, Long.MAX_VALUE) in bytes.
+     * If the user sets a different unit - we still validate that converted to bytes the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class LongBytesBound extends DataStorageSpec
+    {
+        /**
+         * Creates a {@code DataStorageSpec.LongBytesBound} of the specified amount.
+         *
+         * @param value the data storage
+         */
+        public LongBytesBound(String value)
+        {
+            super(value, BYTES, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.LongBytesBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in bytes
+         * @param unit in which the provided quantity is
+         */
+        public LongBytesBound(long quantity, DataStorageUnit unit)
+        {
+            super(quantity, unit, BYTES, Long.MAX_VALUE, quantity + unit.symbol);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.LongBytesBound} of the specified amount in bytes.
+         *
+         * @param bytes where bytes shouldn't be bigger than Long.MAX_VALUE-1
+         */
+        public LongBytesBound(long bytes)
+        {
+            this(bytes, BYTES);
+        }
+
+        /**
+         * @return the amount of data storage in bytes
+         */
+        public long toBytes()
+        {
+            return unit().toBytes(quantity());
+        }
+    }
+
+    /**
+     * Represents a data storage quantity used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in bytes.
+     * If the user sets a different unit - we still validate that converted to bytes the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class IntBytesBound extends DataStorageSpec
+    {
+        /**
+         * Creates a {@code DataStorageSpec.IntBytesBound} of the specified amount.
+         *
+         * @param value the data storage
+         */
+        public IntBytesBound(String value)
+        {
+            super(value, BYTES, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.IntBytesBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in bytes
+         * @param unit in which the provided quantity is
+         */
+        public IntBytesBound(long quantity, DataStorageUnit unit)
+        {
+            super(quantity, unit, BYTES, Integer.MAX_VALUE, quantity + unit.symbol);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.IntBytesBound} of the specified amount in bytes.
+         *
+         * @param bytes where bytes shouldn't be bigger than Integer.MAX_VALUE-1
+         */
+        public IntBytesBound(long bytes)
+        {
+            this(bytes, BYTES);
+        }
+
+        /**
+         * Returns the amount of data storage in bytes as an {@code int}
+         *
+         * @return the amount of data storage in bytes or {@code Integer.MAX_VALUE} if the number of bytes is too large.
+         */
+        public int toBytes()
+        {
+            return Ints.saturatedCast(unit().toBytes(quantity()));
+        }
+    }
+
+    /**
+     * Represents a data storage quantity used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in kibibytes.
+     * If the user sets a different unit - we still validate that converted to kibibytes the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class IntKibibytesBound extends DataStorageSpec
+    {
+        /**
+         * Creates a {@code DataStorageSpec.IntKibibytesBound} of the specified amount.
+         *
+         * @param value the data storage
+         */
+        public IntKibibytesBound(String value)
+        {
+            super(value, KIBIBYTES, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.IntKibibytesBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in kibibytes
+         * @param unit in which the provided quantity is
+         */
+        public IntKibibytesBound(long quantity, DataStorageUnit unit)
+        {
+            super(quantity, unit, KIBIBYTES, Integer.MAX_VALUE, quantity + unit.symbol);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.IntKibibytesBound} of the specified amount in kibibytes.
+         *
+         * @param kibibytes where kibibytes shouldn't be bigger than Integer.MAX_VALUE-1
+         */
+        public IntKibibytesBound(long kibibytes)
+        {
+            this(kibibytes, KIBIBYTES);
+        }
+
+        /**
+         * Returns the amount of data storage in bytes as an {@code int}
+         *
+         * @return the amount of data storage in bytes or {@code Integer.MAX_VALUE} if the number of bytes is too large.
+         */
+        public int toBytes()
+        {
+            return Ints.saturatedCast(unit().toBytes(quantity()));
+        }
+
+        /**
+         * Returns the amount of data storage in kibibytes as an {@code int}
+         *
+         * @return the amount of data storage in kibibytes or {@code Integer.MAX_VALUE} if the number of kibibytes is too large.
+         */
+        public int toKibibytes()
+        {
+            return Ints.saturatedCast(unit().toKibibytes(quantity()));
+        }
+
+        /**
+         * @return the amount of data storage in bytes.
+         */
+        public long toBytesInLong()
+        {
+           return unit().toBytes(quantity());
+        }
+    }
+
+    /**
+     * Represents a data storage quantity used for Cassandra configuration. The bound is [0, Long.MAX_VALUE) in mebibytes.
+     * If the user sets a different unit - we still validate that converted to mebibytes the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class LongMebibytesBound extends DataStorageSpec
+    {
+        /**
+         * Creates a {@code DataStorageSpec.LongMebibytesBound} of the specified amount.
+         *
+         * @param value the data storage
+         */
+        public LongMebibytesBound(String value)
+        {
+            super(value, MEBIBYTES, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.LongMebibytesBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in mebibytes
+         * @param unit in which the provided quantity is
+         */
+        public LongMebibytesBound(long quantity, DataStorageUnit unit)
+        {
+            super(quantity, unit, MEBIBYTES, Long.MAX_VALUE, quantity + unit.symbol);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.LongMebibytesBound} of the specified amount in mebibytes.
+         *
+         * @param mebibytes where mebibytes shouldn't be bigger than Long.MAX_VALUE-1
+         */
+        public LongMebibytesBound(long mebibytes)
+        {
+            this(mebibytes, MEBIBYTES);
+        }
+
+        /**
+         * @return the amount of data storage in bytes
+         */
+        public long toBytes()
+        {
+            return unit().toBytes(quantity());
+        }
+
+        /**
+         * @return the amount of data storage in kibibytes
+         */
+        public long toKibibytes()
+        {
+            return unit().toKibibytes(quantity());
+        }
+
+        /**
+         * @return the amount of data storage in mebibytes
+         */
+        public long toMebibytes()
+        {
+            return unit().toMebibytes(quantity());
+        }
+    }
+
+    /**
+     * Represents a data storage quantity used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in mebibytes.
+     * If the user sets a different unit - we still validate that converted to mebibytes the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class IntMebibytesBound extends DataStorageSpec
+    {
+        /**
+         * Creates a {@code DataStorageSpec.IntMebibytesBound} of the specified amount.
+         *
+         * @param value the data storage
+         */
+        public IntMebibytesBound(String value)
+        {
+            super(value, MEBIBYTES, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.IntMebibytesBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in mebibytes
+         * @param unit in which the provided quantity is
+         */
+        public IntMebibytesBound(long quantity, DataStorageUnit unit)
+        {
+            super(quantity, unit, MEBIBYTES, Integer.MAX_VALUE, quantity + unit.symbol);
+        }
+
+        /**
+         * Creates a {@code DataStorageSpec.IntMebibytesBound} of the specified amount in mebibytes.
+         *
+         * @param mebibytes where mebibytes shouldn't be bigger than Integer.MAX_VALUE-1
+         */
+        public IntMebibytesBound(long mebibytes)
+        {
+            this(mebibytes, MEBIBYTES);
+        }
+
+        /**
+         * Returns the amount of data storage in bytes as an {@code int}
+         *
+         * @return the amount of data storage in bytes or {@code Integer.MAX_VALUE} if the number of bytes is too large.
+         */
+        public int toBytes()
+        {
+            return Ints.saturatedCast(unit().toBytes(quantity()));
+        }
+
+        /**
+         * Returns the amount of data storage in kibibytes as an {@code int}
+         *
+         * @return the amount of data storage in kibibytes or {@code Integer.MAX_VALUE} if the number of kibibytes is too large.
+         */
+        public int toKibibytes()
+        {
+            return Ints.saturatedCast(unit().toKibibytes(quantity()));
+        }
+
+        /**
+         * Returns the amount of data storage in mebibytes as an {@code int}
+         *
+         * @return the amount of data storage in mebibytes or {@code Integer.MAX_VALUE} if the number of mebibytes is too large.
+         */
+        public int toMebibytes()
+        {
+            return Ints.saturatedCast(unit().toMebibytes(quantity()));
+        }
+
+        /**
+         * Returns the amount of data storage in bytes as {@code long}
+         *
+         * @return the amount of data storage in bytes.
+         */
+        public long toBytesInLong()
+        {
+            return unit().toBytes(quantity());
+        }
+    }
+
+    public enum DataStorageUnit
+    {
+        BYTES("B")
+        {
+            public long toBytes(long d)
+            {
+                return d;
+            }
+
+            public long toKibibytes(long d)
+            {
+                return (d / 1024L);
+            }
+
+            public long toMebibytes(long d)
+            {
+                return (d / (1024L * 1024));
+            }
+
+            public long toGibibytes(long d)
+            {
+                return (d / (1024L * 1024 * 1024));
+            }
+
+            public long convert(long source, DataStorageUnit sourceUnit)
+            {
+                return sourceUnit.toBytes(source);
+            }
+        },
+        KIBIBYTES("KiB")
+        {
+            public long toBytes(long d)
+            {
+                return x(d, 1024L, (MAX / 1024L));
+            }
+
+            public long toKibibytes(long d)
+            {
+                return d;
+            }
+
+            public long toMebibytes(long d)
+            {
+                return (d / 1024L);
+            }
+
+            public long toGibibytes(long d)
+            {
+                return (d / (1024L * 1024));
+            }
+
+            public long convert(long source, DataStorageUnit sourceUnit)
+            {
+                return sourceUnit.toKibibytes(source);
+            }
+        },
+        MEBIBYTES("MiB")
+        {
+            public long toBytes(long d)
+            {
+                return x(d, (1024L * 1024), MAX / (1024L * 1024));
+            }
+
+            public long toKibibytes(long d)
+            {
+                return x(d, 1024L, (MAX / 1024L));
+            }
+
+            public long toMebibytes(long d)
+            {
+                return d;
+            }
+
+            public long toGibibytes(long d)
+            {
+                return (d / 1024L);
+            }
+
+            public long convert(long source, DataStorageUnit sourceUnit)
+            {
+                return sourceUnit.toMebibytes(source);
+            }
+        },
+        GIBIBYTES("GiB")
+        {
+            public long toBytes(long d)
+            {
+                return x(d, (1024L * 1024 * 1024), MAX / (1024L * 1024 * 1024));
+            }
+
+            public long toKibibytes(long d)
+            {
+                return x(d, (1024L * 1024), MAX / (1024L * 1024));
+            }
+
+            public long toMebibytes(long d)
+            {
+                return x(d, 1024L, (MAX / 1024L));
+            }
+
+            public long toGibibytes(long d)
+            {
+                return d;
+            }
+
+            public long convert(long source, DataStorageUnit sourceUnit)
+            {
+                return sourceUnit.toGibibytes(source);
+            }
+        };
+
+        /**
+         * Scale d by m, checking for overflow. This has a short name to make above code more readable.
+         */
+        static long x(long d, long m, long over)
+        {
+            assert (over > 0) && (over < (MAX-1L)) && (over == (MAX / m));
+
+            if (d > over)
+                return Long.MAX_VALUE;
+            return Math.multiplyExact(d, m);
+        }
+
+        /**
+         * @param symbol the unit symbol
+         * @return the memory unit corresponding to the given symbol
+         */
+        public static DataStorageUnit fromSymbol(String symbol)
+        {
+            for (DataStorageUnit value : values())
+            {
+                if (value.symbol.equalsIgnoreCase(symbol))
+                    return value;
+            }
+            throw new IllegalArgumentException(String.format("Unsupported data storage unit: %s. Supported units are: %s",
+                                                           symbol, Arrays.stream(values())
+                                                                         .map(u -> u.symbol)
+                                                                         .collect(Collectors.joining(", "))));
+        }
+
+        static final long MAX = Long.MAX_VALUE;
+
+        /**
+         * The unit symbol
+         */
+        private final String symbol;
+
+        DataStorageUnit(String symbol)
+        {
+            this.symbol = symbol;
+        }
+
+        public long toBytes(long d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public long toKibibytes(long d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public long toMebibytes(long d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public long toGibibytes(long d)
+        {
+            throw new AbstractMethodError();
+        }
+
+        public long convert(long source, DataStorageUnit sourceUnit)
+        {
+            throw new AbstractMethodError();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index 8893f58..a04e85c 100644
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@ -17,30 +17,41 @@
  */
 package org.apache.cassandra.config;
 
-import java.io.File;
 import java.io.IOException;
-import java.net.*;
+import java.net.Inet4Address;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.net.UnknownHostException;
 import java.nio.file.FileStore;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.function.Supplier;
 
+import javax.annotation.Nullable;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.primitives.Ints;
 import com.google.common.primitives.Longs;
 import com.google.common.util.concurrent.RateLimiter;
-
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.audit.AuditLogOptions;
-import org.apache.cassandra.fql.FullQueryLoggerOptions;
 import org.apache.cassandra.auth.AllowAllInternodeAuthenticator;
 import org.apache.cassandra.auth.AuthConfig;
 import org.apache.cassandra.auth.IAuthenticator;
@@ -49,6 +60,8 @@
 import org.apache.cassandra.auth.INetworkAuthorizer;
 import org.apache.cassandra.auth.IRoleManager;
 import org.apache.cassandra.config.Config.CommitLogSync;
+import org.apache.cassandra.config.Config.PaxosOnLinearizabilityViolation;
+import org.apache.cassandra.config.Config.PaxosStatePurging;
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.commitlog.AbstractCommitLogSegmentManager;
 import org.apache.cassandra.db.commitlog.CommitLog;
@@ -56,9 +69,13 @@
 import org.apache.cassandra.db.commitlog.CommitLogSegmentManagerStandard;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.fql.FullQueryLoggerOptions;
+import org.apache.cassandra.gms.IFailureDetector;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.DiskOptimizationStrategy;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.io.util.SpinningDiskOptimizationStrategy;
 import org.apache.cassandra.io.util.SsdDiskOptimizationStrategy;
 import org.apache.cassandra.locator.DynamicEndpointSnitch;
@@ -70,16 +87,18 @@
 import org.apache.cassandra.security.EncryptionContext;
 import org.apache.cassandra.security.SSLFactory;
 import org.apache.cassandra.service.CacheService.CacheType;
+import org.apache.cassandra.service.paxos.Paxos;
 import org.apache.cassandra.utils.FBUtilities;
 
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.commons.lang3.StringUtils;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.cassandra.config.CassandraRelevantProperties.OS_ARCH;
 import static org.apache.cassandra.config.CassandraRelevantProperties.SUN_ARCH_DATA_MODEL;
-import static org.apache.cassandra.io.util.FileUtils.ONE_GB;
-import static org.apache.cassandra.io.util.FileUtils.ONE_MB;
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
+import static org.apache.cassandra.io.util.FileUtils.ONE_GIB;
+import static org.apache.cassandra.io.util.FileUtils.ONE_MIB;
+import static org.apache.cassandra.utils.Clock.Global.logInitializationOutcome;
 
 public class DatabaseDescriptor
 {
@@ -93,7 +112,7 @@
     private static final Logger logger = LoggerFactory.getLogger(DatabaseDescriptor.class);
 
     /**
-     * Tokens are serialized in a Gossip VersionedValue String.  VV are restricted to 64KB
+     * Tokens are serialized in a Gossip VersionedValue String.  VV are restricted to 64KiB
      * when we send them over the wire, which works out to about 1700 tokens.
      */
     private static final int MAX_NUM_TOKENS = 1536;
@@ -103,8 +122,9 @@
     /**
      * Request timeouts can not be less than below defined value (see CASSANDRA-9375)
      */
-    static final long LOWEST_ACCEPTED_TIMEOUT = 10L;
+    static final DurationSpec.LongMillisecondsBound LOWEST_ACCEPTED_TIMEOUT = new DurationSpec.LongMillisecondsBound(10L);
 
+    private static Supplier<IFailureDetector> newFailureDetector;
     private static IEndpointSnitch snitch;
     private static InetAddress listenAddress; // leave null so we can fall through to getLocalHost
     private static InetAddress broadcastAddress;
@@ -126,11 +146,12 @@
     // depend on the configured IAuthenticator, so defer creating it until that's been set.
     private static IRoleManager roleManager;
 
-    private static long preparedStatementsCacheSizeInMB;
+    private static long preparedStatementsCacheSizeInMiB;
 
-    private static long keyCacheSizeInMB;
-    private static long counterCacheSizeInMB;
-    private static long indexSummaryCapacityInMB;
+    private static long keyCacheSizeInMiB;
+    private static long paxosCacheSizeInMiB;
+    private static long counterCacheSizeInMiB;
+    private static long indexSummaryCapacityInMiB;
 
     private static String localDC;
     private static Comparator<Replica> localComparator;
@@ -144,6 +165,7 @@
     private static boolean daemonInitialized;
 
     private static final int searchConcurrencyFactor = Integer.parseInt(System.getProperty(Config.PROPERTY_PREFIX + "search_concurrency_factor", "1"));
+    private static DurationSpec.IntSecondsBound autoSnapshoTtl;
 
     private static volatile boolean disableSTCSInL0 = Boolean.getBoolean(Config.PROPERTY_PREFIX + "disable_stcs_in_l0");
     private static final boolean unsafeSystem = Boolean.getBoolean(Config.PROPERTY_PREFIX + "unsafesystem");
@@ -153,6 +175,10 @@
 
     public static volatile boolean allowUnlimitedConcurrentValidations = Boolean.getBoolean("cassandra.allow_unlimited_concurrent_validations");
 
+    /** The configuration for guardrails. */
+    private static GuardrailsOptions guardrails;
+    private static StartupChecksOptions startupChecksOptions;
+
     private static Function<CommitLog, AbstractCommitLogSegmentManager> commitLogSegmentMgrProvider = c -> DatabaseDescriptor.isCDCEnabled()
                                        ? new CommitLogSegmentManagerCDC(c, DatabaseDescriptor.getCommitLogLocation())
                                        : new CommitLogSegmentManagerStandard(c, DatabaseDescriptor.getCommitLogLocation());
@@ -257,7 +283,7 @@
         if (clientInitialized)
             return;
         clientInitialized = true;
-
+        setDefaultFailureDetector();
         Config.setClientMode(true);
         conf = new Config();
         diskOptimizationStrategy = new SpinningDiskOptimizationStrategy();
@@ -362,6 +388,10 @@
         applyEncryptionContext();
 
         applySslContext();
+
+        applyGuardrails();
+
+        applyStartupChecks();
     }
 
     private static void applySimpleConfig()
@@ -370,42 +400,56 @@
         //InetAddressAndPort and get the right defaults
         InetAddressAndPort.initializeDefaultPort(getStoragePort());
 
+        validateUpperBoundStreamingConfig();
+
+        if (conf.auto_snapshot_ttl != null)
+        {
+            try
+            {
+                autoSnapshoTtl = new DurationSpec.IntSecondsBound(conf.auto_snapshot_ttl);
+            }
+            catch (IllegalArgumentException e)
+            {
+                throw new ConfigurationException("Invalid value of auto_snapshot_ttl: " + conf.auto_snapshot_ttl, false);
+            }
+        }
+
         if (conf.commitlog_sync == null)
         {
             throw new ConfigurationException("Missing required directive CommitLogSync", false);
         }
 
-        if (conf.commitlog_sync == Config.CommitLogSync.batch)
+        if (conf.commitlog_sync == CommitLogSync.batch)
         {
-            if (conf.commitlog_sync_period_in_ms != 0)
+            if (conf.commitlog_sync_period.toMilliseconds() != 0)
             {
-                throw new ConfigurationException("Batch sync specified, but commitlog_sync_period_in_ms found. Only specify commitlog_sync_batch_window_in_ms when using batch sync", false);
+                throw new ConfigurationException("Batch sync specified, but commitlog_sync_period found.", false);
             }
             logger.debug("Syncing log with batch mode");
         }
         else if (conf.commitlog_sync == CommitLogSync.group)
         {
-            if (Double.isNaN(conf.commitlog_sync_group_window_in_ms) || conf.commitlog_sync_group_window_in_ms <= 0d)
+            if (conf.commitlog_sync_group_window.toMilliseconds() == 0)
             {
-                throw new ConfigurationException("Missing value for commitlog_sync_group_window_in_ms: positive double value expected.", false);
+                throw new ConfigurationException("Missing value for commitlog_sync_group_window.", false);
             }
-            else if (conf.commitlog_sync_period_in_ms != 0)
+            else if (conf.commitlog_sync_period.toMilliseconds() != 0)
             {
-                throw new ConfigurationException("Group sync specified, but commitlog_sync_period_in_ms found. Only specify commitlog_sync_group_window_in_ms when using group sync", false);
+                throw new ConfigurationException("Group sync specified, but commitlog_sync_period found. Only specify commitlog_sync_group_window when using group sync", false);
             }
-            logger.debug("Syncing log with a group window of {}", conf.commitlog_sync_period_in_ms);
+            logger.debug("Syncing log with a group window of {}", conf.commitlog_sync_period.toString());
         }
         else
         {
-            if (conf.commitlog_sync_period_in_ms <= 0)
+            if (conf.commitlog_sync_period.toMilliseconds() == 0)
             {
-                throw new ConfigurationException("Missing value for commitlog_sync_period_in_ms: positive integer expected", false);
+                throw new ConfigurationException("Missing value for commitlog_sync_period.", false);
             }
             else if (!Double.isNaN(conf.commitlog_sync_batch_window_in_ms))
             {
-                throw new ConfigurationException("commitlog_sync_period_in_ms specified, but commitlog_sync_batch_window_in_ms found.  Only specify commitlog_sync_period_in_ms when using periodic sync.", false);
+                throw new ConfigurationException("commitlog_sync_period specified, but commitlog_sync_batch_window found.  Only specify commitlog_sync_period when using periodic sync.", false);
             }
-            logger.debug("Syncing log with a period of {}", conf.commitlog_sync_period_in_ms);
+            logger.debug("Syncing log with a period of {}", conf.commitlog_sync_period.toString());
         }
 
         /* evaluate the DiskAccessMode Config directive, which also affects indexAccessMode selection */
@@ -427,11 +471,6 @@
             logger.info("DiskAccessMode is {}, indexAccessMode is {}", conf.disk_access_mode, indexAccessMode);
         }
 
-        if (conf.gc_warn_threshold_in_ms < 0)
-        {
-            throw new ConfigurationException("gc_warn_threshold_in_ms must be a positive integer");
-        }
-
         /* phi convict threshold for FailureDetector */
         if (conf.phi_convict_threshold < 5 || conf.phi_convict_threshold > 16)
         {
@@ -455,34 +494,32 @@
         if (conf.concurrent_replicates != null)
             logger.warn("concurrent_replicates has been deprecated and should be removed from cassandra.yaml");
 
-        if (conf.networking_cache_size_in_mb == null)
-            conf.networking_cache_size_in_mb = Math.min(128, (int) (Runtime.getRuntime().maxMemory() / (16 * 1048576)));
+        if (conf.networking_cache_size == null)
+            conf.networking_cache_size = new DataStorageSpec.IntMebibytesBound(Math.min(128, (int) (Runtime.getRuntime().maxMemory() / (16 * 1048576))));
 
-        if (conf.file_cache_size_in_mb == null)
-            conf.file_cache_size_in_mb = Math.min(512, (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)));
+        if (conf.file_cache_size == null)
+            conf.file_cache_size = new DataStorageSpec.IntMebibytesBound(Math.min(512, (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))));
 
         // round down for SSDs and round up for spinning disks
         if (conf.file_cache_round_up == null)
             conf.file_cache_round_up = conf.disk_optimization_strategy == Config.DiskOptimizationStrategy.spinning;
 
-        if (conf.memtable_offheap_space_in_mb == null)
-            conf.memtable_offheap_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576));
-        if (conf.memtable_offheap_space_in_mb < 0)
-            throw new ConfigurationException("memtable_offheap_space_in_mb must be positive, but was " + conf.memtable_offheap_space_in_mb, false);
+        if (conf.memtable_offheap_space == null)
+            conf.memtable_offheap_space = new DataStorageSpec.IntMebibytesBound((int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)));
         // for the moment, we default to twice as much on-heap space as off-heap, as heap overhead is very large
-        if (conf.memtable_heap_space_in_mb == null)
-            conf.memtable_heap_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576));
-        if (conf.memtable_heap_space_in_mb <= 0)
-            throw new ConfigurationException("memtable_heap_space_in_mb must be positive, but was " + conf.memtable_heap_space_in_mb, false);
-        logger.info("Global memtable on-heap threshold is enabled at {}MB", conf.memtable_heap_space_in_mb);
-        if (conf.memtable_offheap_space_in_mb == 0)
+        if (conf.memtable_heap_space == null)
+            conf.memtable_heap_space = new DataStorageSpec.IntMebibytesBound((int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)));
+        if (conf.memtable_heap_space.toMebibytes() == 0)
+            throw new ConfigurationException("memtable_heap_space must be positive, but was " + conf.memtable_heap_space, false);
+        logger.info("Global memtable on-heap threshold is enabled at {}", conf.memtable_heap_space);
+        if (conf.memtable_offheap_space.toMebibytes() == 0)
             logger.info("Global memtable off-heap threshold is disabled, HeapAllocator will be used instead");
         else
-            logger.info("Global memtable off-heap threshold is enabled at {}MB", conf.memtable_offheap_space_in_mb);
+            logger.info("Global memtable off-heap threshold is enabled at {}", conf.memtable_offheap_space);
 
         if (conf.repair_session_max_tree_depth != null)
         {
-            logger.warn("repair_session_max_tree_depth has been deprecated and should be removed from cassandra.yaml. Use repair_session_space_in_mb instead");
+            logger.warn("repair_session_max_tree_depth has been deprecated and should be removed from cassandra.yaml. Use repair_session_space instead");
             if (conf.repair_session_max_tree_depth < 10)
                 throw new ConfigurationException("repair_session_max_tree_depth should not be < 10, but was " + conf.repair_session_max_tree_depth);
             if (conf.repair_session_max_tree_depth > 20)
@@ -493,27 +530,28 @@
             conf.repair_session_max_tree_depth = 20;
         }
 
-        if (conf.repair_session_space_in_mb == null)
-            conf.repair_session_space_in_mb = Math.max(1, (int) (Runtime.getRuntime().maxMemory() / (16 * 1048576)));
+        if (conf.repair_session_space == null)
+            conf.repair_session_space = new DataStorageSpec.IntMebibytesBound(Math.max(1, (int) (Runtime.getRuntime().maxMemory() / (16 * 1048576))));
 
-        if (conf.repair_session_space_in_mb < 1)
-            throw new ConfigurationException("repair_session_space_in_mb must be > 0, but was " + conf.repair_session_space_in_mb);
-        else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)))
-            logger.warn("A repair_session_space_in_mb of " + conf.repair_session_space_in_mb + " megabytes is likely to cause heap pressure");
+        if (conf.repair_session_space.toMebibytes() < 1)
+            throw new ConfigurationException("repair_session_space must be > 0, but was " + conf.repair_session_space);
+        else if (conf.repair_session_space.toMebibytes() > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)))
+            logger.warn("A repair_session_space of " + conf.repair_session_space+ " mebibytes is likely to cause heap pressure");
 
         checkForLowestAcceptedTimeouts(conf);
 
-        checkValidForByteConversion(conf.native_transport_max_frame_size_in_mb,
-                                    "native_transport_max_frame_size_in_mb", ByteUnit.MEBI_BYTES);
+        long valueInBytes = conf.native_transport_max_frame_size.toBytes();
+        if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE-1)
+        {
+            throw new ConfigurationException(String.format("native_transport_max_frame_size must be positive value < %dB, but was %dB",
+                                                           Integer.MAX_VALUE,
+                                                           valueInBytes),
+                                             false);
+        }
 
-        checkValidForByteConversion(conf.column_index_size_in_kb,
-                                    "column_index_size_in_kb", ByteUnit.KIBI_BYTES);
-
-        checkValidForByteConversion(conf.column_index_cache_size_in_kb,
-                                    "column_index_cache_size_in_kb", ByteUnit.KIBI_BYTES);
-
-        checkValidForByteConversion(conf.batch_size_warn_threshold_in_kb,
-                                    "batch_size_warn_threshold_in_kb", ByteUnit.KIBI_BYTES);
+        checkValidForByteConversion(conf.column_index_size, "column_index_size");
+        checkValidForByteConversion(conf.column_index_cache_size, "column_index_cache_size");
+        checkValidForByteConversion(conf.batch_size_warn_threshold, "batch_size_warn_threshold");
 
         if (conf.native_transport_max_negotiable_protocol_version != null)
             logger.warn("The configuration option native_transport_max_negotiable_protocol_version has been deprecated " +
@@ -531,68 +569,52 @@
             conf.hints_directory = storagedirFor("hints");
         }
 
-        if (conf.native_transport_max_concurrent_requests_in_bytes <= 0)
+        if (conf.native_transport_max_request_data_in_flight == null)
         {
-            conf.native_transport_max_concurrent_requests_in_bytes = Runtime.getRuntime().maxMemory() / 10;
+            conf.native_transport_max_request_data_in_flight = new DataStorageSpec.LongBytesBound(Runtime.getRuntime().maxMemory() / 10);
         }
 
-        if (conf.native_transport_max_concurrent_requests_in_bytes_per_ip <= 0)
+        if (conf.native_transport_max_request_data_in_flight_per_ip == null)
         {
-            conf.native_transport_max_concurrent_requests_in_bytes_per_ip = Runtime.getRuntime().maxMemory() / 40;
+            conf.native_transport_max_request_data_in_flight_per_ip = new DataStorageSpec.LongBytesBound(Runtime.getRuntime().maxMemory() / 40);
         }
 
-        if (conf.commitlog_total_space_in_mb == null)
-        {
-            final int preferredSizeInMB = 8192;
-            try
-            {
-                // use 1/4 of available space.  See discussion on #10013 and #10199
-                final long totalSpaceInBytes = guessFileStore(conf.commitlog_directory).getTotalSpace();
-                conf.commitlog_total_space_in_mb = calculateDefaultSpaceInMB("commitlog",
-                                                                             conf.commitlog_directory,
-                                                                             "commitlog_total_space_in_mb",
-                                                                             preferredSizeInMB,
-                                                                             totalSpaceInBytes, 1, 4);
+        if (conf.native_transport_rate_limiting_enabled)
+            logger.info("Native transport rate-limiting enabled at {} requests/second.", conf.native_transport_max_requests_per_second);
+        else
+            logger.info("Native transport rate-limiting disabled.");
 
-            }
-            catch (IOException e)
-            {
-                logger.debug("Error checking disk space", e);
-                throw new ConfigurationException(String.format("Unable to check disk space available to '%s'. Perhaps the Cassandra user does not have the necessary permissions",
-                                                               conf.commitlog_directory), e);
-            }
+        if (conf.commitlog_total_space == null)
+        {
+            final int preferredSizeInMiB = 8192;
+            // use 1/4 of available space.  See discussion on #10013 and #10199
+            final long totalSpaceInBytes = tryGetSpace(conf.commitlog_directory, FileStore::getTotalSpace);
+            int defaultSpaceInMiB = calculateDefaultSpaceInMiB("commitlog",
+                                                               conf.commitlog_directory,
+                                                               "commitlog_total_space",
+                                                               preferredSizeInMiB,
+                                                               totalSpaceInBytes, 1, 4);
+            conf.commitlog_total_space = new DataStorageSpec.IntMebibytesBound(defaultSpaceInMiB);
         }
 
         if (conf.cdc_enabled)
         {
-            // Windows memory-mapped CommitLog files is incompatible with CDC as we hard-link files in cdc_raw. Confirm we don't have both enabled.
-            if (FBUtilities.isWindows && conf.commitlog_compression == null)
-                throw new ConfigurationException("Cannot enable cdc on Windows with uncompressed commitlog.");
-
             if (conf.cdc_raw_directory == null)
             {
                 conf.cdc_raw_directory = storagedirFor("cdc_raw");
             }
 
-            if (conf.cdc_total_space_in_mb == 0)
+            if (conf.cdc_total_space.toMebibytes() == 0)
             {
-                final int preferredSizeInMB = 4096;
-                try
-                {
-                    // use 1/8th of available space.  See discussion on #10013 and #10199 on the CL, taking half that for CDC
-                    final long totalSpaceInBytes = guessFileStore(conf.cdc_raw_directory).getTotalSpace();
-                    conf.cdc_total_space_in_mb = calculateDefaultSpaceInMB("cdc",
-                                                                           conf.cdc_raw_directory,
-                                                                           "cdc_total_space_in_mb",
-                                                                           preferredSizeInMB,
-                                                                           totalSpaceInBytes, 1, 8);
-                }
-                catch (IOException e)
-                {
-                    logger.debug("Error checking disk space", e);
-                    throw new ConfigurationException(String.format("Unable to check disk space available to '%s'. Perhaps the Cassandra user does not have the necessary permissions",
-                                                                   conf.cdc_raw_directory), e);
-                }
+                final int preferredSizeInMiB = 4096;
+                // use 1/8th of available space.  See discussion on #10013 and #10199 on the CL, taking half that for CDC
+                final long totalSpaceInBytes = tryGetSpace(conf.cdc_raw_directory, FileStore::getTotalSpace);
+                int defaultSpaceInMiB = calculateDefaultSpaceInMiB("cdc",
+                                                                   conf.cdc_raw_directory,
+                                                                   "cdc_total_space",
+                                                                   preferredSizeInMiB,
+                                                                   totalSpaceInBytes, 1, 8);
+                conf.cdc_total_space = new DataStorageSpec.IntMebibytesBound(defaultSpaceInMiB);
             }
 
             logger.info("cdc_enabled is true. Starting casssandra node with Change-Data-Capture enabled.");
@@ -604,7 +626,7 @@
         }
         if (conf.data_file_directories == null || conf.data_file_directories.length == 0)
         {
-            conf.data_file_directories = new String[]{ storagedir("data_file_directories") + File.separator + "data" };
+            conf.data_file_directories = new String[]{ storagedir("data_file_directories") + File.pathSeparator() + "data" };
         }
 
         long dataFreeBytes = 0;
@@ -622,9 +644,9 @@
             if (datadir.equals(conf.saved_caches_directory))
                 throw new ConfigurationException("saved_caches_directory must not be the same as any data_file_directories", false);
 
-            dataFreeBytes = saturatedSum(dataFreeBytes, getUnallocatedSpace(datadir));
+            dataFreeBytes = saturatedSum(dataFreeBytes, tryGetSpace(datadir, FileStore::getUnallocatedSpace));
         }
-        if (dataFreeBytes < 64 * ONE_GB) // 64 GB
+        if (dataFreeBytes < 64 * ONE_GIB) // 64 GB
             logger.warn("Only {} free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots",
                         FBUtilities.prettyPrintMemory(dataFreeBytes));
 
@@ -637,9 +659,9 @@
             if (conf.local_system_data_file_directory.equals(conf.hints_directory))
                 throw new ConfigurationException("local_system_data_file_directory must not be the same as the hints_directory", false);
 
-            long freeBytes = getUnallocatedSpace(conf.local_system_data_file_directory);
+            long freeBytes = tryGetSpace(conf.local_system_data_file_directory, FileStore::getUnallocatedSpace);
 
-            if (freeBytes < ONE_GB)
+            if (freeBytes < ONE_GIB)
                 logger.warn("Only {} free in the system data volume. Consider adding more capacity or removing obsolete snapshots",
                             FBUtilities.prettyPrintMemory(freeBytes));
         }
@@ -683,6 +705,7 @@
 
         applyConcurrentValidations(conf);
         applyRepairCommandPoolSize(conf);
+        applyReadThresholdsValidations(conf);
 
         if (conf.concurrent_materialized_view_builders <= 0)
             throw new ConfigurationException("concurrent_materialized_view_builders should be strictly greater than 0, but was " + conf.concurrent_materialized_view_builders, false);
@@ -692,101 +715,112 @@
 
         try
         {
-            // if prepared_statements_cache_size_mb option was set to "auto" then size of the cache should be "max(1/256 of Heap (in MB), 10MB)"
-            preparedStatementsCacheSizeInMB = (conf.prepared_statements_cache_size_mb == null)
+            // if prepared_statements_cache_size option was set to "auto" then size of the cache should be "max(1/256 of Heap (in MiB), 10MiB)"
+            preparedStatementsCacheSizeInMiB = (conf.prepared_statements_cache_size == null)
                                               ? Math.max(10, (int) (Runtime.getRuntime().maxMemory() / 1024 / 1024 / 256))
-                                              : conf.prepared_statements_cache_size_mb;
+                                              : conf.prepared_statements_cache_size.toMebibytes();
 
-            if (preparedStatementsCacheSizeInMB <= 0)
+            if (preparedStatementsCacheSizeInMiB == 0)
                 throw new NumberFormatException(); // to escape duplicating error message
 
             // we need this assignment for the Settings virtual table - CASSANDRA-17734
-            conf.prepared_statements_cache_size_mb = preparedStatementsCacheSizeInMB;
+            conf.prepared_statements_cache_size = new DataStorageSpec.LongMebibytesBound(preparedStatementsCacheSizeInMiB);
         }
         catch (NumberFormatException e)
         {
-            throw new ConfigurationException("prepared_statements_cache_size_mb option was set incorrectly to '"
-                                             + conf.prepared_statements_cache_size_mb + "', supported values are <integer> >= 0.", false);
+            throw new ConfigurationException("prepared_statements_cache_size option was set incorrectly to '"
+                                             + (conf.prepared_statements_cache_size != null ? conf.prepared_statements_cache_size.toString() : null) + "', supported values are <integer> >= 0.", false);
         }
 
         try
         {
-            // if key_cache_size_in_mb option was set to "auto" then size of the cache should be "min(5% of Heap (in MB), 100MB)
-            keyCacheSizeInMB = (conf.key_cache_size_in_mb == null)
+            // if key_cache_size option was set to "auto" then size of the cache should be "min(5% of Heap (in MiB), 100MiB)
+            keyCacheSizeInMiB = (conf.key_cache_size == null)
                                ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024)), 100)
-                               : conf.key_cache_size_in_mb;
+                               : conf.key_cache_size.toMebibytes();
 
-            if (keyCacheSizeInMB < 0)
+            if (keyCacheSizeInMiB < 0)
                 throw new NumberFormatException(); // to escape duplicating error message
 
             // we need this assignment for the Settings Virtual Table - CASSANDRA-17734
-            conf.key_cache_size_in_mb = keyCacheSizeInMB;
+            conf.key_cache_size = new DataStorageSpec.LongMebibytesBound(keyCacheSizeInMiB);
         }
         catch (NumberFormatException e)
         {
-            throw new ConfigurationException("key_cache_size_in_mb option was set incorrectly to '"
-                                             + conf.key_cache_size_in_mb + "', supported values are <integer> >= 0.", false);
+            throw new ConfigurationException("key_cache_size option was set incorrectly to '"
+                                             + (conf.key_cache_size != null ? conf.key_cache_size.toString() : null) + "', supported values are <integer> >= 0.", false);
         }
 
         try
         {
-            // if counter_cache_size_in_mb option was set to "auto" then size of the cache should be "min(2.5% of Heap (in MB), 50MB)
-            counterCacheSizeInMB = (conf.counter_cache_size_in_mb == null)
+            // if counter_cache_size option was set to "auto" then size of the cache should be "min(2.5% of Heap (in MiB), 50MiB)
+            counterCacheSizeInMiB = (conf.counter_cache_size == null)
                                    ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.025 / 1024 / 1024)), 50)
-                                   : conf.counter_cache_size_in_mb;
+                                   : conf.counter_cache_size.toMebibytes();
 
-            if (counterCacheSizeInMB < 0)
+            if (counterCacheSizeInMiB < 0)
                 throw new NumberFormatException(); // to escape duplicating error message
         }
         catch (NumberFormatException e)
         {
-            throw new ConfigurationException("counter_cache_size_in_mb option was set incorrectly to '"
-                                             + conf.counter_cache_size_in_mb + "', supported values are <integer> >= 0.", false);
+            throw new ConfigurationException("counter_cache_size option was set incorrectly to '"
+                                             + (conf.counter_cache_size !=null ?conf.counter_cache_size.toString() : null) + "', supported values are <integer> >= 0.", false);
+        }
+
+        try
+        {
+            // if paxosCacheSizeInMiB option was set to "auto" then size of the cache should be "min(1% of Heap (in MB), 50MB)
+            paxosCacheSizeInMiB = (conf.paxos_cache_size == null)
+                    ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.01 / 1024 / 1024)), 50)
+                    : conf.paxos_cache_size.toMebibytes();
+
+            if (paxosCacheSizeInMiB < 0)
+                throw new NumberFormatException(); // to escape duplicating error message
+        }
+        catch (NumberFormatException e)
+        {
+            throw new ConfigurationException("paxos_cache_size option was set incorrectly to '"
+                    + conf.paxos_cache_size + "', supported values are <integer> >= 0.", false);
         }
 
         // we need this assignment for the Settings virtual table - CASSANDRA-17735
-        conf.counter_cache_size_in_mb = counterCacheSizeInMB;
+        conf.counter_cache_size = new DataStorageSpec.LongMebibytesBound(counterCacheSizeInMiB);
 
         // if set to empty/"auto" then use 5% of Heap size
-        indexSummaryCapacityInMB = (conf.index_summary_capacity_in_mb == null)
+        indexSummaryCapacityInMiB = (conf.index_summary_capacity == null)
                                    ? Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024))
-                                   : conf.index_summary_capacity_in_mb;
+                                   : conf.index_summary_capacity.toMebibytes();
 
-        if (indexSummaryCapacityInMB < 0)
-            throw new ConfigurationException("index_summary_capacity_in_mb option was set incorrectly to '"
-                                             + conf.index_summary_capacity_in_mb + "', it should be a non-negative integer.", false);
+        if (indexSummaryCapacityInMiB < 0)
+            throw new ConfigurationException("index_summary_capacity option was set incorrectly to '"
+                                             + conf.index_summary_capacity.toString() + "', it should be a non-negative integer.", false);
 
         // we need this assignment for the Settings virtual table - CASSANDRA-17735
-        conf.index_summary_capacity_in_mb = indexSummaryCapacityInMB;
+        conf.index_summary_capacity = new DataStorageSpec.LongMebibytesBound(indexSummaryCapacityInMiB);
 
-        if (conf.user_defined_function_fail_timeout < 0)
-            throw new ConfigurationException("user_defined_function_fail_timeout must not be negative", false);
-        if (conf.user_defined_function_warn_timeout < 0)
-            throw new ConfigurationException("user_defined_function_warn_timeout must not be negative", false);
+        if (conf.user_defined_functions_fail_timeout.toMilliseconds() < conf.user_defined_functions_warn_timeout.toMilliseconds())
+            throw new ConfigurationException("user_defined_functions_warn_timeout must less than user_defined_function_fail_timeout", false);
 
-        if (conf.user_defined_function_fail_timeout < conf.user_defined_function_warn_timeout)
-            throw new ConfigurationException("user_defined_function_warn_timeout must less than user_defined_function_fail_timeout", false);
-
-        if (!conf.allow_insecure_udfs && !conf.enable_user_defined_functions_threads)
+        if (!conf.allow_insecure_udfs && !conf.user_defined_functions_threads_enabled)
             throw new ConfigurationException("To be able to set enable_user_defined_functions_threads: false you need to set allow_insecure_udfs: true - this is an unsafe configuration and is not recommended.");
 
         if (conf.allow_extra_insecure_udfs)
             logger.warn("Allowing java.lang.System.* access in UDFs is dangerous and not recommended. Set allow_extra_insecure_udfs: false to disable.");
 
-        if(conf.enable_scripted_user_defined_functions)
+        if(conf.scripted_user_defined_functions_enabled)
             logger.warn("JavaScript user-defined functions have been deprecated. You can still use them but the plan is to remove them in the next major version. For more information - CASSANDRA-17280");
 
-        if (conf.commitlog_segment_size_in_mb <= 0)
-            throw new ConfigurationException("commitlog_segment_size_in_mb must be positive, but was "
-                    + conf.commitlog_segment_size_in_mb, false);
-        else if (conf.commitlog_segment_size_in_mb >= 2048)
-            throw new ConfigurationException("commitlog_segment_size_in_mb must be smaller than 2048, but was "
-                    + conf.commitlog_segment_size_in_mb, false);
+        if (conf.commitlog_segment_size.toMebibytes() == 0)
+            throw new ConfigurationException("commitlog_segment_size must be positive, but was "
+                                             + conf.commitlog_segment_size.toString(), false);
+        else if (conf.commitlog_segment_size.toMebibytes() >= 2048)
+            throw new ConfigurationException("commitlog_segment_size must be smaller than 2048, but was "
+                                             + conf.commitlog_segment_size.toString(), false);
 
-        if (conf.max_mutation_size_in_kb == null)
-            conf.max_mutation_size_in_kb = conf.commitlog_segment_size_in_mb * 1024 / 2;
-        else if (conf.commitlog_segment_size_in_mb * 1024 < 2 * conf.max_mutation_size_in_kb)
-            throw new ConfigurationException("commitlog_segment_size_in_mb must be at least twice the size of max_mutation_size_in_kb / 1024", false);
+        if (conf.max_mutation_size == null)
+            conf.max_mutation_size = new DataStorageSpec.IntKibibytesBound(conf.commitlog_segment_size.toKibibytes() / 2);
+        else if (conf.commitlog_segment_size.toKibibytes() < 2 * conf.max_mutation_size.toKibibytes())
+            throw new ConfigurationException("commitlog_segment_size must be at least twice the size of max_mutation_size / 1024", false);
 
         // native transport encryption options
         if (conf.client_encryption_options != null)
@@ -804,11 +838,11 @@
         if (conf.snapshot_links_per_second < 0)
             throw new ConfigurationException("snapshot_links_per_second must be >= 0");
 
-        if (conf.max_value_size_in_mb <= 0)
-            throw new ConfigurationException("max_value_size_in_mb must be positive", false);
-        else if (conf.max_value_size_in_mb >= 2048)
-            throw new ConfigurationException("max_value_size_in_mb must be smaller than 2048, but was "
-                    + conf.max_value_size_in_mb, false);
+        if (conf.max_value_size.toMebibytes() == 0)
+            throw new ConfigurationException("max_value_size must be positive", false);
+        else if (conf.max_value_size.toMebibytes() >= 2048)
+            throw new ConfigurationException("max_value_size must be smaller than 2048, but was "
+                    + conf.max_value_size.toString(), false);
 
         switch (conf.disk_optimization_strategy)
         {
@@ -824,35 +858,84 @@
         {
             conf.server_encryption_options.applyConfig();
 
-            if (conf.server_encryption_options.enable_legacy_ssl_storage_port &&
+            if (conf.server_encryption_options.legacy_ssl_storage_port_enabled &&
                 conf.server_encryption_options.tlsEncryptionPolicy() == EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED)
             {
-                throw new ConfigurationException("enable_legacy_ssl_storage_port is true (enabled) with internode encryption disabled (none). Enable encryption or disable the legacy ssl storage port.");
+                throw new ConfigurationException("legacy_ssl_storage_port_enabled is true (enabled) with internode encryption disabled (none). Enable encryption or disable the legacy ssl storage port.");
             }
         }
-        Integer maxMessageSize = conf.internode_max_message_size_in_bytes;
-        if (maxMessageSize != null)
+
+        if (conf.internode_max_message_size != null)
         {
-            if (maxMessageSize > conf.internode_application_receive_queue_reserve_endpoint_capacity_in_bytes)
-                throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_receive_queue_reserve_endpoint_capacity_in_bytes", false);
+            long maxMessageSize = conf.internode_max_message_size.toBytes();
 
-            if (maxMessageSize > conf.internode_application_receive_queue_reserve_global_capacity_in_bytes)
-                throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_receive_queue_reserve_global_capacity_in_bytes", false);
+            if (maxMessageSize > conf.internode_application_receive_queue_reserve_endpoint_capacity.toBytes())
+                throw new ConfigurationException("internode_max_message_size must no exceed internode_application_receive_queue_reserve_endpoint_capacity", false);
 
-            if (maxMessageSize > conf.internode_application_send_queue_reserve_endpoint_capacity_in_bytes)
-                throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_send_queue_reserve_endpoint_capacity_in_bytes", false);
+            if (maxMessageSize > conf.internode_application_receive_queue_reserve_global_capacity.toBytes())
+                throw new ConfigurationException("internode_max_message_size must no exceed internode_application_receive_queue_reserve_global_capacity", false);
 
-            if (maxMessageSize > conf.internode_application_send_queue_reserve_global_capacity_in_bytes)
-                throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_send_queue_reserve_global_capacity_in_bytes", false);
+            if (maxMessageSize > conf.internode_application_send_queue_reserve_endpoint_capacity.toBytes())
+                throw new ConfigurationException("internode_max_message_size must no exceed internode_application_send_queue_reserve_endpoint_capacity", false);
+
+            if (maxMessageSize > conf.internode_application_send_queue_reserve_global_capacity.toBytes())
+                throw new ConfigurationException("internode_max_message_size must no exceed internode_application_send_queue_reserve_global_capacity", false);
         }
         else
         {
-            conf.internode_max_message_size_in_bytes =
-                Math.min(conf.internode_application_receive_queue_reserve_endpoint_capacity_in_bytes,
-                         conf.internode_application_send_queue_reserve_endpoint_capacity_in_bytes);
+            long maxMessageSizeInBytes =
+            Math.min(conf.internode_application_receive_queue_reserve_endpoint_capacity.toBytes(),
+                     conf.internode_application_send_queue_reserve_endpoint_capacity.toBytes());
+
+            conf.internode_max_message_size = new DataStorageSpec.IntBytesBound(maxMessageSizeInBytes);
         }
 
         validateMaxConcurrentAutoUpgradeTasksConf(conf.max_concurrent_automatic_sstable_upgrades);
+
+        if (conf.default_keyspace_rf < conf.minimum_replication_factor_fail_threshold)
+        {
+            throw new ConfigurationException(String.format("default_keyspace_rf (%d) cannot be less than minimum_replication_factor_fail_threshold (%d)",
+                                                           conf.default_keyspace_rf, conf.minimum_replication_factor_fail_threshold));
+        }
+
+        if (conf.paxos_repair_parallelism <= 0)
+            conf.paxos_repair_parallelism = Math.max(1, conf.concurrent_writes / 8);
+
+        Paxos.setPaxosVariant(conf.paxos_variant);
+        if (conf.paxos_state_purging == null)
+            conf.paxos_state_purging = PaxosStatePurging.legacy;
+
+        logInitializationOutcome(logger);
+    }
+
+    @VisibleForTesting
+    static void validateUpperBoundStreamingConfig() throws ConfigurationException
+    {
+        // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
+        if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of entire_sstable_stream_throughput_outbound: " + conf.entire_sstable_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of entire_sstable_inter_dc_stream_throughput_outbound: " + conf.entire_sstable_inter_dc_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.compaction_throughput.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of compaction_throughput: " + conf.compaction_throughput.toString(), false);
+        }
     }
 
     @VisibleForTesting
@@ -876,9 +959,52 @@
             config.repair_command_pool_size = config.concurrent_validations;
     }
 
+    @VisibleForTesting
+    static void applyReadThresholdsValidations(Config config)
+    {
+        validateReadThresholds("coordinator_read_size", config.coordinator_read_size_warn_threshold, config.coordinator_read_size_fail_threshold);
+        validateReadThresholds("local_read_size", config.local_read_size_warn_threshold, config.local_read_size_fail_threshold);
+        validateReadThresholds("row_index_read_size", config.row_index_read_size_warn_threshold, config.row_index_read_size_fail_threshold);
+    }
+
+    private static void validateReadThresholds(String name, DataStorageSpec.LongBytesBound warn, DataStorageSpec.LongBytesBound fail)
+    {
+        if (fail != null && warn != null && fail.toBytes() < warn.toBytes())
+            throw new ConfigurationException(String.format("%s (%s) must be greater than or equal to %s (%s)",
+                                                           name + "_fail_threshold", fail,
+                                                           name + "_warn_threshold", warn));
+    }
+
+    public static GuardrailsOptions getGuardrailsConfig()
+    {
+        return guardrails;
+    }
+
+    private static void applyGuardrails()
+    {
+        try
+        {
+            guardrails = new GuardrailsOptions(conf);
+        }
+        catch (IllegalArgumentException e)
+        {
+            throw new ConfigurationException("Invalid guardrails configuration: " + e.getMessage(), e);
+        }
+    }
+
+    public static StartupChecksOptions getStartupChecksOptions()
+    {
+        return startupChecksOptions;
+    }
+
+    private static void applyStartupChecks()
+    {
+        startupChecksOptions = new StartupChecksOptions(conf.startup_checks);
+    }
+
     private static String storagedirFor(String type)
     {
-        return storagedir(type + "_directory") + File.separator + type;
+        return storagedir(type + "_directory") + File.pathSeparator() + type;
     }
 
     private static String storagedir(String errMsgType)
@@ -889,20 +1015,20 @@
         return storagedir;
     }
 
-    static int calculateDefaultSpaceInMB(String type, String path, String setting, int preferredSizeInMB, long totalSpaceInBytes, long totalSpaceNumerator, long totalSpaceDenominator)
+    static int calculateDefaultSpaceInMiB(String type, String path, String setting, int preferredSizeInMiB, long totalSpaceInBytes, long totalSpaceNumerator, long totalSpaceDenominator)
     {
-        final long totalSizeInMB = totalSpaceInBytes / ONE_MB;
-        final int minSizeInMB = Ints.saturatedCast(totalSpaceNumerator * totalSizeInMB / totalSpaceDenominator);
+        final long totalSizeInMiB = totalSpaceInBytes / ONE_MIB;
+        final int minSizeInMiB = Ints.saturatedCast(totalSpaceNumerator * totalSizeInMiB / totalSpaceDenominator);
 
-        if (minSizeInMB < preferredSizeInMB)
+        if (minSizeInMiB < preferredSizeInMiB)
         {
             logger.warn("Small {} volume detected at '{}'; setting {} to {}.  You can override this in cassandra.yaml",
-                        type, path, setting, minSizeInMB);
-            return minSizeInMB;
+                        type, path, setting, minSizeInMiB);
+            return minSizeInMiB;
         }
         else
         {
-            return preferredSizeInMB;
+            return preferredSizeInMiB;
         }
     }
 
@@ -1015,6 +1141,9 @@
 
     public static void applySslContext()
     {
+        if (TEST_JVM_DTEST_DISABLE_SSL.getBoolean())
+            return;
+
         try
         {
             SSLFactory.validateSslContext("Internode messaging", conf.server_encryption_options, true, true);
@@ -1051,52 +1180,55 @@
     @VisibleForTesting
     static void checkForLowestAcceptedTimeouts(Config conf)
     {
-        if(conf.read_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.read_request_timeout.toMilliseconds() < LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("read_request_timeout_in_ms", conf.read_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.read_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("read_request_timeout", conf.read_request_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.read_request_timeout = new DurationSpec.LongMillisecondsBound("10ms");
         }
 
-        if(conf.range_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.range_request_timeout.toMilliseconds() < LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("range_request_timeout_in_ms", conf.range_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.range_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("range_request_timeout", conf.range_request_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.range_request_timeout = new DurationSpec.LongMillisecondsBound("10ms");
         }
 
-        if(conf.request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.request_timeout.toMilliseconds() < LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("request_timeout_in_ms", conf.request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("request_timeout", conf.request_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.request_timeout = new DurationSpec.LongMillisecondsBound("10ms");
         }
 
-        if(conf.write_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.write_request_timeout.toMilliseconds() < LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("write_request_timeout_in_ms", conf.write_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.write_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("write_request_timeout", conf.write_request_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.write_request_timeout = new DurationSpec.LongMillisecondsBound("10ms");
         }
 
-        if(conf.cas_contention_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.cas_contention_timeout.toMilliseconds() < LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("cas_contention_timeout_in_ms", conf.cas_contention_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.cas_contention_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("cas_contention_timeout", conf.cas_contention_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.cas_contention_timeout = new DurationSpec.LongMillisecondsBound("10ms");
         }
 
-        if(conf.counter_write_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.counter_write_request_timeout.toMilliseconds()< LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("counter_write_request_timeout_in_ms", conf.counter_write_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.counter_write_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("counter_write_request_timeout", conf.counter_write_request_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.counter_write_request_timeout = new DurationSpec.LongMillisecondsBound("10ms");
         }
-
-        if(conf.truncate_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
+        if(conf.truncate_request_timeout.toMilliseconds() < LOWEST_ACCEPTED_TIMEOUT.toMilliseconds())
         {
-           logInfo("truncate_request_timeout_in_ms", conf.truncate_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
-           conf.truncate_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
+            logInfo("truncate_request_timeout", conf.truncate_request_timeout, LOWEST_ACCEPTED_TIMEOUT);
+            conf.truncate_request_timeout = LOWEST_ACCEPTED_TIMEOUT;
         }
     }
 
-    private static void logInfo(String property, long actualValue, long lowestAcceptedValue)
+    private static void logInfo(String property, DurationSpec.LongMillisecondsBound actualValue, DurationSpec.LongMillisecondsBound lowestAcceptedValue)
     {
-        logger.info("found {}::{} less than lowest acceptable value {}, continuing with {}", property, actualValue, lowestAcceptedValue, lowestAcceptedValue);
+        logger.info("found {}::{} less than lowest acceptable value {}, continuing with {}",
+                    property,
+                    actualValue.toString(),
+                    lowestAcceptedValue.toString(),
+                    lowestAcceptedValue);
     }
 
     public static void applyTokensConfig()
@@ -1155,6 +1287,7 @@
                 return 1;
             return 0;
         };
+        newFailureDetector = () -> createFailureDetector(conf.failure_detector);
     }
 
     // definitely not safe for tools + clients - implicitly instantiates schema
@@ -1198,45 +1331,9 @@
         return sum < 0 ? Long.MAX_VALUE : sum;
     }
 
-    private static FileStore guessFileStore(String dir) throws IOException
+    private static long tryGetSpace(String dir, PathUtils.IOToLongFunction<FileStore> getSpace)
     {
-        Path path = Paths.get(dir);
-        while (true)
-        {
-            try
-            {
-                return FileUtils.getFileStore(path);
-            }
-            catch (IOException e)
-            {
-                if (e instanceof NoSuchFileException)
-                {
-                    path = path.getParent();
-                    if (path == null)
-                    {
-                        throw new ConfigurationException("Unable to find filesystem for '" + dir + "'.");
-                    }
-                }
-                else
-                {
-                    throw e;
-                }
-            }
-        }
-    }
-
-    private static long getUnallocatedSpace(String directory)
-    {
-        try
-        {
-            return guessFileStore(directory).getUnallocatedSpace();
-        }
-        catch (IOException e)
-        {
-            logger.debug("Error checking disk space", e);
-            throw new ConfigurationException(String.format("Unable to check disk space available to %s. Perhaps the Cassandra user does not have the necessary permissions",
-                                                           directory), e);
-        }
+        return PathUtils.tryGetSpace(new File(dir).toPath(), getSpace, e -> { throw new ConfigurationException("Unable check disk space in '" + dir + "'. Perhaps the Cassandra user does not have the necessary permissions"); });
     }
 
     public static IEndpointSnitch createEndpointSnitch(boolean dynamic, String snitchClassName) throws ConfigurationException
@@ -1247,6 +1344,14 @@
         return dynamic ? new DynamicEndpointSnitch(snitch) : snitch;
     }
 
+    private static IFailureDetector createFailureDetector(String detectorClassName) throws ConfigurationException
+    {
+        if (!detectorClassName.contains("."))
+            detectorClassName = "org.apache.cassandra.gms." + detectorClassName;
+        IFailureDetector detector = FBUtilities.construct(detectorClassName, "failure detector");
+        return detector;
+    }
+
     public static IAuthenticator getAuthenticator()
     {
         return authenticator;
@@ -1277,6 +1382,16 @@
         DatabaseDescriptor.networkAuthorizer = networkAuthorizer;
     }
 
+    public static void setAuthFromRoot(boolean fromRoot)
+    {
+        conf.traverse_auth_from_root = fromRoot;
+    }
+
+    public static boolean getAuthFromRoot()
+    {
+        return conf.traverse_auth_from_root;
+    }
+
     public static IRoleManager getRoleManager()
     {
         return roleManager;
@@ -1289,24 +1404,27 @@
 
     public static int getPermissionsValidity()
     {
-        return conf.permissions_validity_in_ms;
+        return conf.permissions_validity.toMilliseconds();
     }
 
     public static void setPermissionsValidity(int timeout)
     {
-        conf.permissions_validity_in_ms = timeout;
+        conf.permissions_validity = new DurationSpec.IntMillisecondsBound(timeout);
     }
 
     public static int getPermissionsUpdateInterval()
     {
-        return conf.permissions_update_interval_in_ms == -1
-             ? conf.permissions_validity_in_ms
-             : conf.permissions_update_interval_in_ms;
+        return conf.permissions_update_interval == null
+             ? conf.permissions_validity.toMilliseconds()
+             : conf.permissions_update_interval.toMilliseconds();
     }
 
     public static void setPermissionsUpdateInterval(int updateInterval)
     {
-        conf.permissions_update_interval_in_ms = updateInterval;
+        if (updateInterval == -1)
+            conf.permissions_update_interval = null;
+        else
+            conf.permissions_update_interval = new DurationSpec.IntMillisecondsBound(updateInterval);
     }
 
     public static int getPermissionsCacheMaxEntries()
@@ -1319,26 +1437,49 @@
         return conf.permissions_cache_max_entries = maxEntries;
     }
 
+    public static boolean getPermissionsCacheActiveUpdate()
+    {
+        return conf.permissions_cache_active_update;
+    }
+
+    public static void setPermissionsCacheActiveUpdate(boolean update)
+    {
+        conf.permissions_cache_active_update = update;
+    }
+
     public static int getRolesValidity()
     {
-        return conf.roles_validity_in_ms;
+        return conf.roles_validity.toMilliseconds();
     }
 
     public static void setRolesValidity(int validity)
     {
-        conf.roles_validity_in_ms = validity;
+        conf.roles_validity = new DurationSpec.IntMillisecondsBound(validity);
     }
 
     public static int getRolesUpdateInterval()
     {
-        return conf.roles_update_interval_in_ms == -1
-             ? conf.roles_validity_in_ms
-             : conf.roles_update_interval_in_ms;
+        return conf.roles_update_interval == null
+             ? conf.roles_validity.toMilliseconds()
+             : conf.roles_update_interval.toMilliseconds();
+    }
+
+    public static void setRolesCacheActiveUpdate(boolean update)
+    {
+        conf.roles_cache_active_update = update;
+    }
+
+    public static boolean getRolesCacheActiveUpdate()
+    {
+        return conf.roles_cache_active_update;
     }
 
     public static void setRolesUpdateInterval(int interval)
     {
-        conf.roles_update_interval_in_ms = interval;
+        if (interval == -1)
+            conf.roles_update_interval = null;
+        else
+            conf.roles_update_interval = new DurationSpec.IntMillisecondsBound(interval);
     }
 
     public static int getRolesCacheMaxEntries()
@@ -1353,24 +1494,27 @@
 
     public static int getCredentialsValidity()
     {
-        return conf.credentials_validity_in_ms;
+        return conf.credentials_validity.toMilliseconds();
     }
 
     public static void setCredentialsValidity(int timeout)
     {
-        conf.credentials_validity_in_ms = timeout;
+        conf.credentials_validity = new DurationSpec.IntMillisecondsBound(timeout);
     }
 
     public static int getCredentialsUpdateInterval()
     {
-        return conf.credentials_update_interval_in_ms == -1
-               ? conf.credentials_validity_in_ms
-               : conf.credentials_update_interval_in_ms;
+        return conf.credentials_update_interval == null
+               ? conf.credentials_validity.toMilliseconds()
+               : conf.credentials_update_interval.toMilliseconds();
     }
 
     public static void setCredentialsUpdateInterval(int updateInterval)
     {
-        conf.credentials_update_interval_in_ms = updateInterval;
+        if (updateInterval == -1)
+            conf.credentials_update_interval = null;
+        else
+            conf.credentials_update_interval = new DurationSpec.IntMillisecondsBound(updateInterval);
     }
 
     public static int getCredentialsCacheMaxEntries()
@@ -1383,14 +1527,25 @@
         return conf.credentials_cache_max_entries = maxEntries;
     }
 
+    public static boolean getCredentialsCacheActiveUpdate()
+    {
+        return conf.credentials_cache_active_update;
+    }
+
+    public static void setCredentialsCacheActiveUpdate(boolean update)
+    {
+        conf.credentials_cache_active_update = update;
+    }
+
     public static int getMaxValueSize()
     {
-        return conf.max_value_size_in_mb * 1024 * 1024;
+        return Ints.saturatedCast(conf.max_value_size.toMebibytes() * 1024L * 1024);
     }
 
     public static void setMaxValueSize(int maxValueSizeInBytes)
     {
-        conf.max_value_size_in_mb = maxValueSizeInBytes / 1024 / 1024;
+        // the below division is safe as this setter is used only in tests with values that won't lead to precision loss
+        conf.max_value_size = new DataStorageSpec.IntMebibytesBound((maxValueSizeInBytes / (1024L * 1024)), MEBIBYTES);
     }
 
     /**
@@ -1465,57 +1620,64 @@
         snitch = eps;
     }
 
+    public static IFailureDetector newFailureDetector()
+    {
+        return newFailureDetector.get();
+    }
+
+    public static void setDefaultFailureDetector()
+    {
+        newFailureDetector = () -> createFailureDetector("FailureDetector");
+    }
+
     public static int getColumnIndexSize()
     {
-        return (int) ByteUnit.KIBI_BYTES.toBytes(conf.column_index_size_in_kb);
+        return conf.column_index_size.toBytes();
     }
 
-    public static int getColumnIndexSizeInKB()
+    public static int getColumnIndexSizeInKiB()
     {
-        return conf.column_index_size_in_kb;
+        return conf.column_index_size.toKibibytes();
     }
 
-    @VisibleForTesting
     public static void setColumnIndexSize(int val)
     {
-        checkValidForByteConversion(val, "column_index_size_in_kb", ByteUnit.KIBI_BYTES);
-        conf.column_index_size_in_kb = val;
+        conf.column_index_size =  createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_size");
     }
 
     public static int getColumnIndexCacheSize()
     {
-        return (int) ByteUnit.KIBI_BYTES.toBytes(conf.column_index_cache_size_in_kb);
+        return conf.column_index_cache_size.toBytes();
     }
 
-    public static int getColumnIndexCacheSizeInKB()
+    public static int getColumnIndexCacheSizeInKiB()
     {
-        return conf.column_index_cache_size_in_kb;
+        return conf.column_index_cache_size.toKibibytes();
     }
 
     public static void setColumnIndexCacheSize(int val)
     {
-        checkValidForByteConversion(val, "column_index_cache_size_in_kb", ByteUnit.KIBI_BYTES);
-        conf.column_index_cache_size_in_kb = val;
+        conf.column_index_cache_size = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_cache_size");
     }
 
     public static int getBatchSizeWarnThreshold()
     {
-        return (int) ByteUnit.KIBI_BYTES.toBytes(conf.batch_size_warn_threshold_in_kb);
+        return conf.batch_size_warn_threshold.toBytes();
     }
 
-    public static int getBatchSizeWarnThresholdInKB()
+    public static int getBatchSizeWarnThresholdInKiB()
     {
-        return conf.batch_size_warn_threshold_in_kb;
+        return conf.batch_size_warn_threshold.toKibibytes();
     }
 
     public static long getBatchSizeFailThreshold()
     {
-        return ByteUnit.KIBI_BYTES.toBytes(conf.batch_size_fail_threshold_in_kb);
+        return conf.batch_size_fail_threshold.toBytesInLong();
     }
 
-    public static int getBatchSizeFailThresholdInKB()
+    public static int getBatchSizeFailThresholdInKiB()
     {
-        return conf.batch_size_fail_threshold_in_kb;
+        return conf.batch_size_fail_threshold.toKibibytes();
     }
 
     public static int getUnloggedBatchAcrossPartitionsWarnThreshold()
@@ -1523,15 +1685,14 @@
         return conf.unlogged_batch_across_partitions_warn_threshold;
     }
 
-    public static void setBatchSizeWarnThresholdInKB(int threshold)
+    public static void setBatchSizeWarnThresholdInKiB(int threshold)
     {
-        checkValidForByteConversion(threshold, "batch_size_warn_threshold_in_kb", ByteUnit.KIBI_BYTES);
-        conf.batch_size_warn_threshold_in_kb = threshold;
+        conf.batch_size_warn_threshold = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(threshold,"batch_size_warn_threshold");
     }
 
-    public static void setBatchSizeFailThresholdInKB(int threshold)
+    public static void setBatchSizeFailThresholdInKiB(int threshold)
     {
-        conf.batch_size_fail_threshold_in_kb = threshold;
+        conf.batch_size_fail_threshold = new DataStorageSpec.IntKibibytesBound(threshold);
     }
 
     public static Collection<String> getInitialTokens()
@@ -1612,97 +1773,107 @@
 
     public static long nativeTransportIdleTimeout()
     {
-        return conf.native_transport_idle_timeout_in_ms;
+        return conf.native_transport_idle_timeout.toMilliseconds();
     }
 
     public static void setNativeTransportIdleTimeout(long nativeTransportTimeout)
     {
-        conf.native_transport_idle_timeout_in_ms = nativeTransportTimeout;
+        conf.native_transport_idle_timeout = new DurationSpec.LongMillisecondsBound(nativeTransportTimeout);
     }
 
     public static long getRpcTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.request_timeout_in_ms, MILLISECONDS);
+        return conf.request_timeout.to(unit);
     }
 
     public static void setRpcTimeout(long timeOutInMillis)
     {
-        conf.request_timeout_in_ms = timeOutInMillis;
+        conf.request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static long getReadRpcTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.read_request_timeout_in_ms, MILLISECONDS);
+        return conf.read_request_timeout.to(unit);
     }
 
     public static void setReadRpcTimeout(long timeOutInMillis)
     {
-        conf.read_request_timeout_in_ms = timeOutInMillis;
+        conf.read_request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static long getRangeRpcTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.range_request_timeout_in_ms, MILLISECONDS);
+        return conf.range_request_timeout.to(unit);
     }
 
     public static void setRangeRpcTimeout(long timeOutInMillis)
     {
-        conf.range_request_timeout_in_ms = timeOutInMillis;
+        conf.range_request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static long getWriteRpcTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.write_request_timeout_in_ms, MILLISECONDS);
+        return conf.write_request_timeout.to(unit);
     }
 
     public static void setWriteRpcTimeout(long timeOutInMillis)
     {
-        conf.write_request_timeout_in_ms = timeOutInMillis;
+        conf.write_request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static long getCounterWriteRpcTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.counter_write_request_timeout_in_ms, MILLISECONDS);
+        return conf.counter_write_request_timeout.to(unit);
     }
 
     public static void setCounterWriteRpcTimeout(long timeOutInMillis)
     {
-        conf.counter_write_request_timeout_in_ms = timeOutInMillis;
+        conf.counter_write_request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static long getCasContentionTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.cas_contention_timeout_in_ms, MILLISECONDS);
+        return conf.cas_contention_timeout.to(unit);
     }
 
     public static void setCasContentionTimeout(long timeOutInMillis)
     {
-        conf.cas_contention_timeout_in_ms = timeOutInMillis;
+        conf.cas_contention_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static long getTruncateRpcTimeout(TimeUnit unit)
     {
-        return unit.convert(conf.truncate_request_timeout_in_ms, MILLISECONDS);
+        return conf.truncate_request_timeout.to(unit);
     }
 
     public static void setTruncateRpcTimeout(long timeOutInMillis)
     {
-        conf.truncate_request_timeout_in_ms = timeOutInMillis;
+        conf.truncate_request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
+    }
+
+    public static long getRepairRpcTimeout(TimeUnit unit)
+    {
+        return conf.repair_request_timeout.to(unit);
+    }
+
+    public static void setRepairRpcTimeout(Long timeOutInMillis)
+    {
+        conf.repair_request_timeout = new DurationSpec.LongMillisecondsBound(timeOutInMillis);
     }
 
     public static boolean hasCrossNodeTimeout()
     {
-        return conf.cross_node_timeout;
+        return conf.internode_timeout;
     }
 
     public static void setCrossNodeTimeout(boolean crossNodeTimeout)
     {
-        conf.cross_node_timeout = crossNodeTimeout;
+        conf.internode_timeout = crossNodeTimeout;
     }
 
-    public static long getSlowQueryTimeout(TimeUnit units)
+    public static long getSlowQueryTimeout(TimeUnit unit)
     {
-        return units.convert(conf.slow_query_log_timeout_in_ms, MILLISECONDS);
+        return conf.slow_query_log_timeout.to(unit);
     }
 
     /**
@@ -1723,16 +1894,6 @@
         return unit.convert(getBlockForPeersTimeoutInSeconds(), TimeUnit.SECONDS);
     }
 
-    public static long getRepairRpcTimeout(TimeUnit unit)
-    {
-        return unit.convert(conf.repair_request_timeout_in_ms, MILLISECONDS);
-    }
-
-    public static void setRepairRpcTimeout(long time, TimeUnit unit)
-    {
-        conf.repair_request_timeout_in_ms = MILLISECONDS.convert(time, unit);
-    }
-
     public static double getPhiConvictThreshold()
     {
         return conf.phi_convict_threshold;
@@ -1804,6 +1965,11 @@
         return conf.memtable_flush_writers;
     }
 
+    public static int getAvailableProcessors()
+    {
+        return conf == null ? -1 : conf.available_processors;
+    }
+
     public static int getConcurrentCompactors()
     {
         return conf.concurrent_compactors;
@@ -1814,17 +1980,53 @@
         conf.concurrent_compactors = value;
     }
 
-    public static int getCompactionThroughputMbPerSec()
+    public static int getCompactionThroughputMebibytesPerSecAsInt()
     {
-        return conf.compaction_throughput_mb_per_sec;
+        return conf.compaction_throughput.toMebibytesPerSecondAsInt();
     }
 
-    public static void setCompactionThroughputMbPerSec(int value)
+    public static double getCompactionThroughputBytesPerSec()
     {
-        conf.compaction_throughput_mb_per_sec = value;
+        return conf.compaction_throughput.toBytesPerSecond();
     }
 
-    public static long getCompactionLargePartitionWarningThreshold() { return ByteUnit.MEBI_BYTES.toBytes(conf.compaction_large_partition_warning_threshold_mb); }
+    public static double getCompactionThroughputMebibytesPerSec()
+    {
+        return conf.compaction_throughput.toMebibytesPerSecond();
+    }
+
+    @VisibleForTesting // only for testing!
+    public static void setCompactionThroughputBytesPerSec(int value)
+    {
+        if (BYTES_PER_SECOND.toMebibytesPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("compaction_throughput: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value);
+    }
+
+    public static void setCompactionThroughputMebibytesPerSec(int value)
+    {
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("compaction_throughput: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
+    public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytesInLong(); }
+
+    public static int getCompactionTombstoneWarningThreshold()
+    {
+        return conf.compaction_tombstone_warning_threshold;
+    }
+
+    public static void setCompactionTombstoneWarningThreshold(int count)
+    {
+        conf.compaction_tombstone_warning_threshold = count;
+    }
 
     public static int getConcurrentValidations()
     {
@@ -1849,7 +2051,7 @@
 
     public static long getMinFreeSpacePerDriveInBytes()
     {
-        return ByteUnit.MEBI_BYTES.toBytes(conf.min_free_space_per_drive_in_mb);
+        return conf.min_free_space_per_drive.toBytesInLong();
     }
 
     public static boolean getDisableSTCSInL0()
@@ -1864,22 +2066,132 @@
 
     public static int getStreamThroughputOutboundMegabitsPerSec()
     {
-        return conf.stream_throughput_outbound_megabits_per_sec;
+        return conf.stream_throughput_outbound.toMegabitsPerSecondAsInt();
+    }
+
+    public static double getStreamThroughputOutboundMegabitsPerSecAsDouble()
+    {
+        return conf.stream_throughput_outbound.toMegabitsPerSecond();
+    }
+
+    public static double getStreamThroughputOutboundMebibytesPerSec()
+    {
+        return conf.stream_throughput_outbound.toMebibytesPerSecond();
+    }
+
+    public static double getStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.stream_throughput_outbound.toBytesPerSecond();
+    }
+
+    public static int getStreamThroughputOutboundMebibytesPerSecAsInt()
+    {
+        return conf.stream_throughput_outbound.toMebibytesPerSecondAsInt();
+    }
+
+    public static void setStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+    {
+        if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("stream_throughput_outbound: " + value  +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in megabits/s");
+
+        conf.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
+    public static void setStreamThroughputOutboundBytesPerSec(long value)
+    {
+        conf.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, BYTES_PER_SECOND);
     }
 
     public static void setStreamThroughputOutboundMegabitsPerSec(int value)
     {
-        conf.stream_throughput_outbound_megabits_per_sec = value;
+        conf.stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
+    }
+
+    public static double getEntireSSTableStreamThroughputOutboundMebibytesPerSec()
+    {
+        return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond();
+    }
+
+    public static double getEntireSSTableStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.entire_sstable_stream_throughput_outbound.toBytesPerSecond();
+    }
+
+    public static void setEntireSSTableStreamThroughputOutboundMebibytesPerSec(int value)
+    {
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("entire_sstable_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
     public static int getInterDCStreamThroughputOutboundMegabitsPerSec()
     {
-        return conf.inter_dc_stream_throughput_outbound_megabits_per_sec;
+        return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecondAsInt();
+    }
+
+    public static double getInterDCStreamThroughputOutboundMegabitsPerSecAsDouble()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond();
+    }
+
+    public static double getInterDCStreamThroughputOutboundMebibytesPerSec()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
+    }
+
+    public static double getInterDCStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toBytesPerSecond();
+    }
+
+    public static int getInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+    }
+
+    public static void setInterDCStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+    {
+        if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("inter_dc_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in megabits/s");
+
+        conf.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
+    public static void setInterDCStreamThroughputOutboundBytesPerSec(long value)
+    {
+        conf.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, BYTES_PER_SECOND);
     }
 
     public static void setInterDCStreamThroughputOutboundMegabitsPerSec(int value)
     {
-        conf.inter_dc_stream_throughput_outbound_megabits_per_sec = value;
+        conf.inter_dc_stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
+    }
+
+    public static double getEntireSSTableInterDCStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toBytesPerSecond();
+    }
+
+    public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
+    {
+        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
+    }
+
+    public static void setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(int value)
+    {
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("entire_sstable_inter_dc_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
     /**
@@ -1981,7 +2293,7 @@
 
     public static int getMaxMutationSize()
     {
-        return (int) ByteUnit.KIBI_BYTES.toBytes(conf.max_mutation_size_in_kb);
+        return conf.max_mutation_size.toBytes();
     }
 
     public static int getTombstoneWarnThreshold()
@@ -2029,12 +2341,19 @@
      */
     public static int getCommitLogSegmentSize()
     {
-        return (int) ByteUnit.MEBI_BYTES.toBytes(conf.commitlog_segment_size_in_mb);
+        return conf.commitlog_segment_size.toBytes();
     }
 
-    public static void setCommitLogSegmentSize(int sizeMegabytes)
+    /**
+     * Update commitlog_segment_size in the tests.
+     * {@link CommitLogSegmentManagerCDC} uses the CommitLogSegmentSize to estimate the file size on allocation.
+     * It is important to keep the value unchanged for the estimation to be correct.
+     * @param sizeMebibytes
+     */
+    @VisibleForTesting /* Only for testing */
+    public static void setCommitLogSegmentSize(int sizeMebibytes)
     {
-        conf.commitlog_segment_size_in_mb = sizeMegabytes;
+        conf.commitlog_segment_size = new DataStorageSpec.IntMebibytesBound(sizeMebibytes);
     }
 
     public static String getSavedCachesLocation()
@@ -2139,83 +2458,83 @@
 
     public static int getInternodeSocketSendBufferSizeInBytes()
     {
-        return conf.internode_socket_send_buffer_size_in_bytes;
+        return conf.internode_socket_send_buffer_size.toBytes();
     }
 
     public static int getInternodeSocketReceiveBufferSizeInBytes()
     {
-        return conf.internode_socket_receive_buffer_size_in_bytes;
+        return conf.internode_socket_receive_buffer_size.toBytes();
     }
 
     public static int getInternodeApplicationSendQueueCapacityInBytes()
     {
-        return conf.internode_application_send_queue_capacity_in_bytes;
+        return conf.internode_application_send_queue_capacity.toBytes();
     }
 
     public static int getInternodeApplicationSendQueueReserveEndpointCapacityInBytes()
     {
-        return conf.internode_application_send_queue_reserve_endpoint_capacity_in_bytes;
+        return conf.internode_application_send_queue_reserve_endpoint_capacity.toBytes();
     }
 
     public static int getInternodeApplicationSendQueueReserveGlobalCapacityInBytes()
     {
-        return conf.internode_application_send_queue_reserve_global_capacity_in_bytes;
+        return conf.internode_application_send_queue_reserve_global_capacity.toBytes();
     }
 
     public static int getInternodeApplicationReceiveQueueCapacityInBytes()
     {
-        return conf.internode_application_receive_queue_capacity_in_bytes;
+        return conf.internode_application_receive_queue_capacity.toBytes();
     }
 
     public static int getInternodeApplicationReceiveQueueReserveEndpointCapacityInBytes()
     {
-        return conf.internode_application_receive_queue_reserve_endpoint_capacity_in_bytes;
+        return conf.internode_application_receive_queue_reserve_endpoint_capacity.toBytes();
     }
 
     public static int getInternodeApplicationReceiveQueueReserveGlobalCapacityInBytes()
     {
-        return conf.internode_application_receive_queue_reserve_global_capacity_in_bytes;
+        return conf.internode_application_receive_queue_reserve_global_capacity.toBytes();
     }
 
     public static int getInternodeTcpConnectTimeoutInMS()
     {
-        return conf.internode_tcp_connect_timeout_in_ms;
+        return conf.internode_tcp_connect_timeout.toMilliseconds();
     }
 
     public static void setInternodeTcpConnectTimeoutInMS(int value)
     {
-        conf.internode_tcp_connect_timeout_in_ms = value;
+        conf.internode_tcp_connect_timeout = new DurationSpec.IntMillisecondsBound(value);
     }
 
     public static int getInternodeTcpUserTimeoutInMS()
     {
-        return conf.internode_tcp_user_timeout_in_ms;
+        return conf.internode_tcp_user_timeout.toMilliseconds();
     }
 
     public static void setInternodeTcpUserTimeoutInMS(int value)
     {
-        conf.internode_tcp_user_timeout_in_ms = value;
+        conf.internode_tcp_user_timeout = new DurationSpec.IntMillisecondsBound(value);
     }
 
     public static int getInternodeStreamingTcpUserTimeoutInMS()
     {
-        return conf.internode_streaming_tcp_user_timeout_in_ms;
+        return conf.internode_streaming_tcp_user_timeout.toMilliseconds();
     }
 
     public static void setInternodeStreamingTcpUserTimeoutInMS(int value)
     {
-        conf.internode_streaming_tcp_user_timeout_in_ms = value;
+        conf.internode_streaming_tcp_user_timeout = new DurationSpec.IntMillisecondsBound(value);
     }
 
     public static int getInternodeMaxMessageSizeInBytes()
     {
-        return conf.internode_max_message_size_in_bytes;
+        return conf.internode_max_message_size.toBytes();
     }
 
     @VisibleForTesting
     public static void setInternodeMaxMessageSizeInBytes(int value)
     {
-        conf.internode_max_message_size_in_bytes = value;
+        conf.internode_max_message_size = new DataStorageSpec.IntBytesBound(value);
     }
 
     public static boolean startNativeTransport()
@@ -2261,7 +2580,12 @@
 
     public static int getNativeTransportMaxFrameSize()
     {
-        return (int) ByteUnit.MEBI_BYTES.toBytes(conf.native_transport_max_frame_size_in_mb);
+        return conf.native_transport_max_frame_size.toBytes();
+    }
+
+    public static void setNativeTransportMaxFrameSize(int bytes)
+    {
+        conf.native_transport_max_frame_size = new DataStorageSpec.IntMebibytesBound(bytes);
     }
 
     public static long getNativeTransportMaxConcurrentConnections()
@@ -2299,62 +2623,210 @@
         conf.native_transport_allow_older_protocols = isEnabled;
     }
 
-    public static double getCommitLogSyncGroupWindow()
+    public static long getCommitLogSyncGroupWindow()
     {
-        return conf.commitlog_sync_group_window_in_ms;
+        return conf.commitlog_sync_group_window.toMilliseconds();
     }
 
-    public static void setCommitLogSyncGroupWindow(double windowMillis)
+    public static void setCommitLogSyncGroupWindow(long windowMillis)
     {
-        conf.commitlog_sync_group_window_in_ms = windowMillis;
+        conf.commitlog_sync_group_window = new DurationSpec.IntMillisecondsBound(windowMillis);
     }
 
     public static int getNativeTransportReceiveQueueCapacityInBytes()
     {
-        return conf.native_transport_receive_queue_capacity_in_bytes;
+        return conf.native_transport_receive_queue_capacity.toBytes();
     }
 
     public static void setNativeTransportReceiveQueueCapacityInBytes(int queueSize)
     {
-        conf.native_transport_receive_queue_capacity_in_bytes = queueSize;
+        conf.native_transport_receive_queue_capacity = new DataStorageSpec.IntBytesBound(queueSize);
     }
 
-    public static long getNativeTransportMaxConcurrentRequestsInBytesPerIp()
+    public static long getNativeTransportMaxRequestDataInFlightPerIpInBytes()
     {
-        return conf.native_transport_max_concurrent_requests_in_bytes_per_ip;
+        return conf.native_transport_max_request_data_in_flight_per_ip.toBytes();
     }
 
-    public static void setNativeTransportMaxConcurrentRequestsInBytesPerIp(long maxConcurrentRequestsInBytes)
+    public static Config.PaxosVariant getPaxosVariant()
     {
-        conf.native_transport_max_concurrent_requests_in_bytes_per_ip = maxConcurrentRequestsInBytes;
+        return conf.paxos_variant;
     }
 
-    public static long getNativeTransportMaxConcurrentRequestsInBytes()
+    public static void setPaxosVariant(Config.PaxosVariant variant)
     {
-        return conf.native_transport_max_concurrent_requests_in_bytes;
+        conf.paxos_variant = variant;
     }
 
-    public static void setNativeTransportMaxConcurrentRequestsInBytes(long maxConcurrentRequestsInBytes)
+    public static String getPaxosContentionWaitRandomizer()
     {
-        conf.native_transport_max_concurrent_requests_in_bytes = maxConcurrentRequestsInBytes;
+        return conf.paxos_contention_wait_randomizer;
+    }
+
+    public static String getPaxosContentionMinWait()
+    {
+        return conf.paxos_contention_min_wait;
+    }
+
+    public static String getPaxosContentionMaxWait()
+    {
+        return conf.paxos_contention_max_wait;
+    }
+
+    public static String getPaxosContentionMinDelta()
+    {
+        return conf.paxos_contention_min_delta;
+    }
+
+    public static void setPaxosContentionWaitRandomizer(String waitRandomizer)
+    {
+        conf.paxos_contention_wait_randomizer = waitRandomizer;
+    }
+
+    public static void setPaxosContentionMinWait(String minWait)
+    {
+        conf.paxos_contention_min_wait = minWait;
+    }
+
+    public static void setPaxosContentionMaxWait(String maxWait)
+    {
+        conf.paxos_contention_max_wait = maxWait;
+    }
+
+    public static void setPaxosContentionMinDelta(String minDelta)
+    {
+        conf.paxos_contention_min_delta = minDelta;
+    }
+
+    public static boolean skipPaxosRepairOnTopologyChange()
+    {
+        return conf.skip_paxos_repair_on_topology_change;
+    }
+
+    public static void setSkipPaxosRepairOnTopologyChange(boolean value)
+    {
+        conf.skip_paxos_repair_on_topology_change = value;
+    }
+
+    public static long getPaxosPurgeGrace(TimeUnit units)
+    {
+        return conf.paxos_purge_grace_period.to(units);
+    }
+
+    public static void setPaxosPurgeGrace(long seconds)
+    {
+        conf.paxos_purge_grace_period = new DurationSpec.LongSecondsBound(seconds);
+    }
+
+    public static PaxosOnLinearizabilityViolation paxosOnLinearizabilityViolations()
+    {
+        return conf.paxos_on_linearizability_violations;
+    }
+
+    public static void setPaxosOnLinearizabilityViolations(PaxosOnLinearizabilityViolation v)
+    {
+        conf.paxos_on_linearizability_violations = v;
+    }
+
+    public static PaxosStatePurging paxosStatePurging()
+    {
+        return conf.paxos_state_purging;
+    }
+
+    public static void setPaxosStatePurging(PaxosStatePurging v)
+    {
+        conf.paxos_state_purging = v;
+    }
+
+    public static boolean paxosRepairEnabled()
+    {
+        return conf.paxos_repair_enabled;
+    }
+
+    public static void setPaxosRepairEnabled(boolean v)
+    {
+        conf.paxos_repair_enabled = v;
+    }
+
+    public static Set<String> skipPaxosRepairOnTopologyChangeKeyspaces()
+    {
+        return conf.skip_paxos_repair_on_topology_change_keyspaces;
+    }
+
+    public static void setSkipPaxosRepairOnTopologyChangeKeyspaces(String keyspaces)
+    {
+        conf.skip_paxos_repair_on_topology_change_keyspaces = Config.splitCommaDelimited(keyspaces);
+    }
+
+    public static boolean paxoTopologyRepairNoDcChecks()
+    {
+        return conf.paxos_topology_repair_no_dc_checks;
+    }
+
+    public static boolean paxoTopologyRepairStrictEachQuorum()
+    {
+        return conf.paxos_topology_repair_strict_each_quorum;
+    }
+
+    public static void setNativeTransportMaxRequestDataInFlightPerIpInBytes(long maxRequestDataInFlightInBytes)
+    {
+        if (maxRequestDataInFlightInBytes == -1)
+            maxRequestDataInFlightInBytes = Runtime.getRuntime().maxMemory() / 40;
+
+        conf.native_transport_max_request_data_in_flight_per_ip = new DataStorageSpec.LongBytesBound(maxRequestDataInFlightInBytes);
+    }
+
+    public static long getNativeTransportMaxRequestDataInFlightInBytes()
+    {
+        return conf.native_transport_max_request_data_in_flight.toBytes();
+    }
+
+    public static void setNativeTransportConcurrentRequestDataInFlightInBytes(long maxRequestDataInFlightInBytes)
+    {
+        if (maxRequestDataInFlightInBytes == -1)
+            maxRequestDataInFlightInBytes = Runtime.getRuntime().maxMemory() / 10;
+
+        conf.native_transport_max_request_data_in_flight = new DataStorageSpec.LongBytesBound(maxRequestDataInFlightInBytes);
+    }
+
+    public static int getNativeTransportMaxRequestsPerSecond()
+    {
+        return conf.native_transport_max_requests_per_second;
+    }
+
+    public static void setNativeTransportMaxRequestsPerSecond(int perSecond)
+    {
+        Preconditions.checkArgument(perSecond > 0, "native_transport_max_requests_per_second must be greater than zero");
+        conf.native_transport_max_requests_per_second = perSecond;
+    }
+
+    public static void setNativeTransportRateLimitingEnabled(boolean enabled)
+    {
+        logger.info("native_transport_rate_limiting_enabled set to {}", enabled);
+        conf.native_transport_rate_limiting_enabled = enabled;
+    }
+
+    public static boolean getNativeTransportRateLimitingEnabled()
+    {
+        return conf.native_transport_rate_limiting_enabled;
     }
 
     public static int getCommitLogSyncPeriod()
     {
-        return conf.commitlog_sync_period_in_ms;
+        return conf.commitlog_sync_period.toMilliseconds();
     }
 
     public static long getPeriodicCommitLogSyncBlock()
     {
-        Integer blockMillis = conf.periodic_commitlog_sync_lag_block_in_ms;
+        DurationSpec.IntMillisecondsBound blockMillis = conf.periodic_commitlog_sync_lag_block;
         return blockMillis == null
                ? (long)(getCommitLogSyncPeriod() * 1.5)
-               : blockMillis;
+               : blockMillis.toMilliseconds();
     }
 
     public static void setCommitLogSyncPeriod(int periodMillis)
     {
-        conf.commitlog_sync_period_in_ms = periodMillis;
+        conf.commitlog_sync_period = new DurationSpec.IntMillisecondsBound(periodMillis);
     }
 
     public static Config.CommitLogSync getCommitLogSync()
@@ -2421,6 +2893,17 @@
         return conf.auto_snapshot;
     }
 
+    public static DurationSpec.IntSecondsBound getAutoSnapshotTtl()
+    {
+        return autoSnapshoTtl;
+    }
+
+    @VisibleForTesting
+    public static void setAutoSnapshotTtl(DurationSpec.IntSecondsBound newTtl)
+    {
+        autoSnapshoTtl = newTtl;
+    }
+
     @VisibleForTesting
     public static void setAutoSnapshot(boolean autoSnapshot)
     {
@@ -2470,6 +2953,16 @@
         return conf.hinted_handoff_disabled_datacenters;
     }
 
+    public static boolean useDeterministicTableID()
+    {
+        return conf != null && conf.use_deterministic_table_id;
+    }
+
+    public static void useDeterministicTableID(boolean value)
+    {
+        conf.use_deterministic_table_id = value;
+    }
+
     public static void enableHintsForDC(String dc)
     {
         conf.hinted_handoff_disabled_datacenters.remove(dc);
@@ -2482,12 +2975,28 @@
 
     public static void setMaxHintWindow(int ms)
     {
-        conf.max_hint_window_in_ms = ms;
+        conf.max_hint_window = new DurationSpec.IntMillisecondsBound(ms);
     }
 
     public static int getMaxHintWindow()
     {
-        return conf.max_hint_window_in_ms;
+        return conf.max_hint_window.toMilliseconds();
+    }
+
+    public static void setMaxHintsSizePerHostInMiB(int value)
+    {
+        conf.max_hints_size_per_host = new DataStorageSpec.LongBytesBound(value, MEBIBYTES);
+    }
+
+    public static int getMaxHintsSizePerHostInMiB()
+    {
+        // Warnings: this conversion rounds down while converting bytes to mebibytes
+        return Ints.saturatedCast(conf.max_hints_size_per_host.unit().toMebibytes(conf.max_hints_size_per_host.quantity()));
+    }
+
+    public static long getMaxHintsSizePerHost()
+    {
+        return conf.max_hints_size_per_host.toBytes();
     }
 
     public static File getHintsDirectory()
@@ -2495,6 +3004,11 @@
         return new File(conf.hints_directory);
     }
 
+    public static boolean hintWindowPersistentEnabled()
+    {
+        return conf.hint_window_persistent_enabled;
+    }
+
     public static File getSerializedCachePath(CacheType cacheType, String version, String extension)
     {
         String name = cacheType.toString()
@@ -2504,20 +3018,20 @@
 
     public static int getDynamicUpdateInterval()
     {
-        return conf.dynamic_snitch_update_interval_in_ms;
+        return conf.dynamic_snitch_update_interval.toMilliseconds();
     }
     public static void setDynamicUpdateInterval(int dynamicUpdateInterval)
     {
-        conf.dynamic_snitch_update_interval_in_ms = dynamicUpdateInterval;
+        conf.dynamic_snitch_update_interval = new DurationSpec.IntMillisecondsBound(dynamicUpdateInterval);
     }
 
     public static int getDynamicResetInterval()
     {
-        return conf.dynamic_snitch_reset_interval_in_ms;
+        return conf.dynamic_snitch_reset_interval.toMilliseconds();
     }
     public static void setDynamicResetInterval(int dynamicResetInterval)
     {
-        conf.dynamic_snitch_reset_interval_in_ms = dynamicResetInterval;
+        conf.dynamic_snitch_reset_interval = new DurationSpec.IntMillisecondsBound(dynamicResetInterval);
     }
 
     public static double getDynamicBadnessThreshold()
@@ -2551,24 +3065,24 @@
         conf.client_encryption_options = update.apply(conf.client_encryption_options);
     }
 
-    public static int getHintedHandoffThrottleInKB()
+    public static int getHintedHandoffThrottleInKiB()
     {
-        return conf.hinted_handoff_throttle_in_kb;
+        return conf.hinted_handoff_throttle.toKibibytes();
     }
 
-    public static void setHintedHandoffThrottleInKB(int throttleInKB)
+    public static void setHintedHandoffThrottleInKiB(int throttleInKiB)
     {
-        conf.hinted_handoff_throttle_in_kb = throttleInKB;
+        conf.hinted_handoff_throttle = new DataStorageSpec.IntKibibytesBound(throttleInKiB);
     }
 
-    public static int getBatchlogReplayThrottleInKB()
+    public static int getBatchlogReplayThrottleInKiB()
     {
-        return conf.batchlog_replay_throttle_in_kb;
+        return conf.batchlog_replay_throttle.toKibibytes();
     }
 
-    public static void setBatchlogReplayThrottleInKB(int throttleInKB)
+    public static void setBatchlogReplayThrottleInKiB(int throttleInKiB)
     {
-        conf.batchlog_replay_throttle_in_kb = throttleInKB;
+        conf.batchlog_replay_throttle = new DataStorageSpec.IntKibibytesBound(throttleInKiB);
     }
 
     public static int getMaxHintsDeliveryThreads()
@@ -2578,12 +3092,12 @@
 
     public static int getHintsFlushPeriodInMS()
     {
-        return conf.hints_flush_period_in_ms;
+        return conf.hints_flush_period.toMilliseconds();
     }
 
     public static long getMaxHintsFileSize()
     {
-        return  ByteUnit.MEBI_BYTES.toBytes(conf.max_hints_file_size_in_mb);
+        return  conf.max_hints_file_size.toBytesInLong();
     }
 
     public static ParameterizedClass getHintsCompression()
@@ -2596,6 +3110,16 @@
         conf.hints_compression = parameterizedClass;
     }
 
+    public static boolean isAutoHintsCleanupEnabled()
+    {
+        return conf.auto_hints_cleanup_enabled;
+    }
+
+    public static void setAutoHintsCleanupEnabled(boolean value)
+    {
+        conf.auto_hints_cleanup_enabled = value;
+    }
+
     public static boolean isIncrementalBackupsEnabled()
     {
         return conf.incremental_backups;
@@ -2611,27 +3135,27 @@
         return conf.file_cache_enabled;
     }
 
-    public static int getFileCacheSizeInMB()
+    public static int getFileCacheSizeInMiB()
     {
-        if (conf.file_cache_size_in_mb == null)
+        if (conf.file_cache_size == null)
         {
             // In client mode the value is not set.
             assert DatabaseDescriptor.isClientInitialized();
             return 0;
         }
 
-        return conf.file_cache_size_in_mb;
+        return conf.file_cache_size.toMebibytes();
     }
 
-    public static int getNetworkingCacheSizeInMB()
+    public static int getNetworkingCacheSizeInMiB()
     {
-        if (conf.networking_cache_size_in_mb == null)
+        if (conf.networking_cache_size == null)
         {
             // In client mode the value is not set.
             assert DatabaseDescriptor.isClientInitialized();
             return 0;
         }
-        return conf.networking_cache_size_in_mb;
+        return conf.networking_cache_size.toMebibytes();
     }
 
     public static boolean getFileCacheRoundUp()
@@ -2656,9 +3180,9 @@
         return conf.disk_optimization_estimate_percentile;
     }
 
-    public static long getTotalCommitlogSpaceInMB()
+    public static long getTotalCommitlogSpaceInMiB()
     {
-        return conf.commitlog_total_space_in_mb;
+        return conf.commitlog_total_space.toMebibytes();
     }
 
     public static boolean shouldMigrateKeycacheOnCompaction()
@@ -2671,13 +3195,21 @@
         conf.key_cache_migrate_during_compaction = migrateCacheEntry;
     }
 
-    public static int getSSTablePreemptiveOpenIntervalInMB()
+    /** This method can return negative number for disabled */
+    public static int getSSTablePreemptiveOpenIntervalInMiB()
     {
-        return FBUtilities.isWindows ? -1 : conf.sstable_preemptive_open_interval_in_mb;
+        if (conf.sstable_preemptive_open_interval == null)
+            return -1;
+        return conf.sstable_preemptive_open_interval.toMebibytes();
     }
-    public static void setSSTablePreemptiveOpenIntervalInMB(int mb)
+
+    /** Negative number for disabled */
+    public static void setSSTablePreemptiveOpenIntervalInMiB(int mib)
     {
-        conf.sstable_preemptive_open_interval_in_mb = mb;
+        if (mib < 0)
+            conf.sstable_preemptive_open_interval = null;
+        else
+            conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
     }
 
     public static boolean getTrickleFsync()
@@ -2685,29 +3217,29 @@
         return conf.trickle_fsync;
     }
 
-    public static int getTrickleFsyncIntervalInKb()
+    public static int getTrickleFsyncIntervalInKiB()
     {
-        return conf.trickle_fsync_interval_in_kb;
+        return conf.trickle_fsync_interval.toKibibytes();
     }
 
-    public static long getKeyCacheSizeInMB()
+    public static long getKeyCacheSizeInMiB()
     {
-        return keyCacheSizeInMB;
+        return keyCacheSizeInMiB;
     }
 
-    public static long getIndexSummaryCapacityInMB()
+    public static long getIndexSummaryCapacityInMiB()
     {
-        return indexSummaryCapacityInMB;
+        return indexSummaryCapacityInMiB;
     }
 
     public static int getKeyCacheSavePeriod()
     {
-        return conf.key_cache_save_period;
+        return conf.key_cache_save_period.toSeconds();
     }
 
     public static void setKeyCacheSavePeriod(int keyCacheSavePeriod)
     {
-        conf.key_cache_save_period = keyCacheSavePeriod;
+        conf.key_cache_save_period = new DurationSpec.IntSecondsBound(keyCacheSavePeriod);
     }
 
     public static int getKeyCacheKeysToSave()
@@ -2725,25 +3257,25 @@
         return conf.row_cache_class_name;
     }
 
-    public static long getRowCacheSizeInMB()
+    public static long getRowCacheSizeInMiB()
     {
-        return conf.row_cache_size_in_mb;
+        return conf.row_cache_size.toMebibytes();
     }
 
     @VisibleForTesting
-    public static void setRowCacheSizeInMB(long val)
+    public static void setRowCacheSizeInMiB(long val)
     {
-        conf.row_cache_size_in_mb = val;
+        conf.row_cache_size = new DataStorageSpec.LongMebibytesBound(val);
     }
 
     public static int getRowCacheSavePeriod()
     {
-        return conf.row_cache_save_period;
+        return conf.row_cache_save_period.toSeconds();
     }
 
     public static void setRowCacheSavePeriod(int rowCacheSavePeriod)
     {
-        conf.row_cache_save_period = rowCacheSavePeriod;
+        conf.row_cache_save_period = new DurationSpec.IntSecondsBound(rowCacheSavePeriod);
     }
 
     public static int getRowCacheKeysToSave()
@@ -2751,9 +3283,14 @@
         return conf.row_cache_keys_to_save;
     }
 
-    public static long getCounterCacheSizeInMB()
+    public static long getPaxosCacheSizeInMiB()
     {
-        return counterCacheSizeInMB;
+        return paxosCacheSizeInMiB;
+    }
+
+    public static long getCounterCacheSizeInMiB()
+    {
+        return counterCacheSizeInMiB;
     }
 
     public static void setRowCacheKeysToSave(int rowCacheKeysToSave)
@@ -2763,23 +3300,23 @@
 
     public static int getCounterCacheSavePeriod()
     {
-        return conf.counter_cache_save_period;
+        return conf.counter_cache_save_period.toSeconds();
     }
 
     public static void setCounterCacheSavePeriod(int counterCacheSavePeriod)
     {
-        conf.counter_cache_save_period = counterCacheSavePeriod;
+        conf.counter_cache_save_period = new DurationSpec.IntSecondsBound(counterCacheSavePeriod);
     }
 
     public static int getCacheLoadTimeout()
     {
-        return conf.cache_load_timeout_seconds;
+        return conf.cache_load_timeout.toSeconds();
     }
 
     @VisibleForTesting
     public static void setCacheLoadTimeout(int seconds)
     {
-        conf.cache_load_timeout_seconds = seconds;
+        conf.cache_load_timeout = new DurationSpec.IntSecondsBound(seconds);
     }
 
     public static int getCounterCacheKeysToSave()
@@ -2794,7 +3331,7 @@
 
     public static int getStreamingKeepAlivePeriod()
     {
-        return conf.streaming_keep_alive_period_in_secs;
+        return conf.streaming_keep_alive_period.toSeconds();
     }
 
     public static int getStreamingConnectionsPerHost()
@@ -2832,14 +3369,14 @@
         return conf.inter_dc_tcp_nodelay;
     }
 
-    public static long getMemtableHeapSpaceInMb()
+    public static long getMemtableHeapSpaceInMiB()
     {
-        return conf.memtable_heap_space_in_mb;
+        return conf.memtable_heap_space.toMebibytes();
     }
 
-    public static long getMemtableOffheapSpaceInMb()
+    public static long getMemtableOffheapSpaceInMiB()
     {
-        return conf.memtable_offheap_space_in_mb;
+        return conf.memtable_offheap_space.toMebibytes();
     }
 
     public static Config.MemtableAllocationType getMemtableAllocationType()
@@ -2863,21 +3400,32 @@
         conf.repair_session_max_tree_depth = depth;
     }
 
-    public static int getRepairSessionSpaceInMegabytes()
+    public static int getRepairSessionSpaceInMiB()
     {
-        return conf.repair_session_space_in_mb;
+        return conf.repair_session_space.toMebibytes();
     }
 
-    public static void setRepairSessionSpaceInMegabytes(int sizeInMegabytes)
+    public static void setRepairSessionSpaceInMiB(int sizeInMiB)
     {
-        if (sizeInMegabytes < 1)
-            throw new ConfigurationException("Cannot set repair_session_space_in_mb to " + sizeInMegabytes +
-                                             " < 1 megabyte");
-        else if (sizeInMegabytes > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)))
-            logger.warn("A repair_session_space_in_mb of " + conf.repair_session_space_in_mb +
-                        " megabytes is likely to cause heap pressure.");
+        if (sizeInMiB < 1)
+            throw new ConfigurationException("Cannot set repair_session_space to " + sizeInMiB +
+                                             " < 1 mebibyte");
+        else if (sizeInMiB > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)))
+            logger.warn("A repair_session_space of " + conf.repair_session_space +
+                        " is likely to cause heap pressure.");
 
-        conf.repair_session_space_in_mb = sizeInMegabytes;
+        conf.repair_session_space = new DataStorageSpec.IntMebibytesBound(sizeInMiB);
+    }
+
+    public static int getPaxosRepairParallelism()
+    {
+        return conf.paxos_repair_parallelism;
+    }
+
+    public static void setPaxosRepairParallelism(int v)
+    {
+        Preconditions.checkArgument(v > 0);
+        conf.paxos_repair_parallelism = v;
     }
 
     public static Float getMemtableCleanupThreshold()
@@ -2885,14 +3433,27 @@
         return conf.memtable_cleanup_threshold;
     }
 
+    public static Map<String, InheritingClass> getMemtableConfigurations()
+    {
+        if (conf == null || conf.memtable == null)
+            return null;
+        return conf.memtable.configurations;
+    }
+
     public static int getIndexSummaryResizeIntervalInMinutes()
     {
-        return conf.index_summary_resize_interval_in_minutes;
+        if (conf.index_summary_resize_interval == null)
+            return -1;
+
+        return conf.index_summary_resize_interval.toMinutes();
     }
 
     public static void setIndexSummaryResizeIntervalInMinutes(int value)
     {
-        conf.index_summary_resize_interval_in_minutes = value;
+        if (value == -1)
+            conf.index_summary_resize_interval = null;
+        else
+            conf.index_summary_resize_interval = new DurationSpec.IntMinutesBound(value);
     }
 
     public static boolean hasLargeAddressSpace()
@@ -2913,52 +3474,47 @@
 
     public static int getTracetypeRepairTTL()
     {
-        return conf.tracetype_repair_ttl;
+        return conf.trace_type_repair_ttl.toSeconds();
     }
 
     public static int getTracetypeQueryTTL()
     {
-        return conf.tracetype_query_ttl;
+        return conf.trace_type_query_ttl.toSeconds();
     }
 
-    public static int getWindowsTimerInterval()
+    public static long getPreparedStatementsCacheSizeMiB()
     {
-        return conf.windows_timer_interval;
-    }
-
-    public static long getPreparedStatementsCacheSizeMB()
-    {
-        return preparedStatementsCacheSizeInMB;
+        return preparedStatementsCacheSizeInMiB;
     }
 
     public static boolean enableUserDefinedFunctions()
     {
-        return conf.enable_user_defined_functions;
+        return conf.user_defined_functions_enabled;
     }
 
     public static boolean enableScriptedUserDefinedFunctions()
     {
-        return conf.enable_scripted_user_defined_functions;
+        return conf.scripted_user_defined_functions_enabled;
     }
 
     public static void enableScriptedUserDefinedFunctions(boolean enableScriptedUserDefinedFunctions)
     {
-        conf.enable_scripted_user_defined_functions = enableScriptedUserDefinedFunctions;
+        conf.scripted_user_defined_functions_enabled = enableScriptedUserDefinedFunctions;
     }
 
     public static boolean enableUserDefinedFunctionsThreads()
     {
-        return conf.enable_user_defined_functions_threads;
+        return conf.user_defined_functions_threads_enabled;
     }
 
     public static long getUserDefinedFunctionWarnTimeout()
     {
-        return conf.user_defined_function_warn_timeout;
+        return conf.user_defined_functions_warn_timeout.toMilliseconds();
     }
 
     public static void setUserDefinedFunctionWarnTimeout(long userDefinedFunctionWarnTimeout)
     {
-        conf.user_defined_function_warn_timeout = userDefinedFunctionWarnTimeout;
+        conf.user_defined_functions_warn_timeout = new DurationSpec.LongMillisecondsBound(userDefinedFunctionWarnTimeout);
     }
 
     public static boolean allowInsecureUDFs()
@@ -2971,55 +3527,55 @@
         return conf.allow_extra_insecure_udfs;
     }
 
-    public static boolean getEnableMaterializedViews()
+    public static boolean getMaterializedViewsEnabled()
     {
-        return conf.enable_materialized_views;
+        return conf.materialized_views_enabled;
     }
 
-    public static void setEnableMaterializedViews(boolean enableMaterializedViews)
+    public static void setMaterializedViewsEnabled(boolean enableMaterializedViews)
     {
-        conf.enable_materialized_views = enableMaterializedViews;
+        conf.materialized_views_enabled = enableMaterializedViews;
     }
 
-    public static boolean getEnableSASIIndexes()
+    public static boolean getSASIIndexesEnabled()
     {
-        return conf.enable_sasi_indexes;
+        return conf.sasi_indexes_enabled;
     }
 
-    public static void setEnableSASIIndexes(boolean enableSASIIndexes)
+    public static void setSASIIndexesEnabled(boolean enableSASIIndexes)
     {
-        conf.enable_sasi_indexes = enableSASIIndexes;
+        conf.sasi_indexes_enabled = enableSASIIndexes;
     }
 
     public static boolean isTransientReplicationEnabled()
     {
-        return conf.enable_transient_replication;
+        return conf.transient_replication_enabled;
     }
 
     public static void setTransientReplicationEnabledUnsafe(boolean enabled)
     {
-        conf.enable_transient_replication = enabled;
+        conf.transient_replication_enabled = enabled;
     }
 
     public static boolean enableDropCompactStorage()
     {
-        return conf.enable_drop_compact_storage;
+        return conf.drop_compact_storage_enabled;
     }
 
     @VisibleForTesting
     public static void setEnableDropCompactStorage(boolean enableDropCompactStorage)
     {
-        conf.enable_drop_compact_storage = enableDropCompactStorage;
+        conf.drop_compact_storage_enabled = enableDropCompactStorage;
     }
 
     public static long getUserDefinedFunctionFailTimeout()
     {
-        return conf.user_defined_function_fail_timeout;
+        return conf.user_defined_functions_fail_timeout.toMilliseconds();
     }
 
     public static void setUserDefinedFunctionFailTimeout(long userDefinedFunctionFailTimeout)
     {
-        conf.user_defined_function_fail_timeout = userDefinedFunctionFailTimeout;
+        conf.user_defined_functions_fail_timeout = new DurationSpec.LongMillisecondsBound(userDefinedFunctionFailTimeout);
     }
 
     public static Config.UserFunctionTimeoutPolicy getUserFunctionTimeoutPolicy()
@@ -3034,12 +3590,12 @@
 
     public static long getGCLogThreshold()
     {
-        return conf.gc_log_threshold_in_ms;
+        return conf.gc_log_threshold.toMilliseconds();
     }
 
     public static void setGCLogThreshold(int gcLogThreshold)
     {
-        conf.gc_log_threshold_in_ms = gcLogThreshold;
+        conf.gc_log_threshold = new DurationSpec.IntMillisecondsBound(gcLogThreshold);
     }
 
     public static EncryptionContext getEncryptionContext()
@@ -3049,12 +3605,12 @@
 
     public static long getGCWarnThreshold()
     {
-        return conf.gc_warn_threshold_in_ms;
+        return conf.gc_warn_threshold.toMilliseconds();
     }
 
     public static void setGCWarnThreshold(int threshold)
     {
-        conf.gc_warn_threshold_in_ms = threshold;
+        conf.gc_warn_threshold = new DurationSpec.IntMillisecondsBound(threshold);
     }
 
     public static boolean isCDCEnabled()
@@ -3068,25 +3624,35 @@
         conf.cdc_enabled = cdc_enabled;
     }
 
+    public static boolean getCDCBlockWrites()
+    {
+        return conf.cdc_block_writes;
+    }
+
+    public static void setCDCBlockWrites(boolean val)
+    {
+        conf.cdc_block_writes = val;
+    }
+
     public static String getCDCLogLocation()
     {
         return conf.cdc_raw_directory;
     }
 
-    public static int getCDCSpaceInMB()
+    public static long getCDCTotalSpace()
     {
-        return conf.cdc_total_space_in_mb;
+        return conf.cdc_total_space.toBytesInLong();
     }
 
     @VisibleForTesting
-    public static void setCDCSpaceInMB(int input)
+    public static void setCDCTotalSpaceInMiB(int mibs)
     {
-        conf.cdc_total_space_in_mb = input;
+        conf.cdc_total_space = new DataStorageSpec.IntMebibytesBound(mibs);
     }
 
     public static int getCDCDiskCheckInterval()
     {
-        return conf.cdc_free_space_check_interval_ms;
+        return conf.cdc_free_space_check_interval.toMilliseconds();
     }
 
     @VisibleForTesting
@@ -3187,7 +3753,7 @@
         if (value > getConcurrentCompactors())
             logger.warn("max_concurrent_automatic_sstable_upgrades ({}) is larger than concurrent_compactors ({})", value, getConcurrentCompactors());
     }
-    
+
     public static AuditLogOptions getAuditLoggingOptions()
     {
         return conf.audit_logging_options;
@@ -3195,7 +3761,7 @@
 
     public static void setAuditLoggingOptions(AuditLogOptions auditLoggingOptions)
     {
-        conf.audit_logging_options = auditLoggingOptions;
+        conf.audit_logging_options = new AuditLogOptions.Builder(auditLoggingOptions).build();
     }
 
     public static Config.CorruptedTombstoneStrategy getCorruptedTombstoneStrategy()
@@ -3284,52 +3850,32 @@
         commitLogSegmentMgrProvider = provider;
     }
 
-    /**
-     * Class that primarily tracks overflow thresholds during conversions
-     */
-    private enum ByteUnit {
-        KIBI_BYTES(2048 * 1024, 1024),
-        MEBI_BYTES(2048, 1024 * 1024);
-
-        private final int overflowThreshold;
-        private final int multiplier;
-
-        ByteUnit(int t, int m)
-        {
-            this.overflowThreshold = t;
-            this.multiplier = m;
-        }
-
-        public int overflowThreshold()
-        {
-            return overflowThreshold;
-        }
-
-        public boolean willOverflowInBytes(int val)
-        {
-            return val >= overflowThreshold;
-        }
-
-        public long toBytes(int val)
-        {
-            return val * multiplier;
-        }
+    private static DataStorageSpec.IntKibibytesBound createIntKibibyteBoundAndEnsureItIsValidForByteConversion(int kibibytes, String propertyName)
+    {
+        DataStorageSpec.IntKibibytesBound intKibibytesBound = new DataStorageSpec.IntKibibytesBound(kibibytes);
+        checkValidForByteConversion(intKibibytesBound, propertyName);
+        return intKibibytesBound;
     }
 
     /**
      * Ensures passed in configuration value is positive and will not overflow when converted to Bytes
      */
-    private static void checkValidForByteConversion(int val, final String name, final ByteUnit unit)
+    private static void checkValidForByteConversion(final DataStorageSpec.IntKibibytesBound value, String name)
     {
-        if (val < 0 || unit.willOverflowInBytes(val))
-            throw new ConfigurationException(String.format("%s must be positive value < %d, but was %d",
-                                                           name, unit.overflowThreshold(), val), false);
+        long valueInBytes = value.toBytesInLong();
+        if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE - 1)
+        {
+            throw new ConfigurationException(String.format("%s must be positive value <= %dB, but was %dB",
+                                                           name,
+                                                           Integer.MAX_VALUE - 1,
+                                                           valueInBytes),
+                                             false);
+        }
     }
 
     public static int getValidationPreviewPurgeHeadStartInSec()
     {
-        int seconds = conf.validation_preview_purge_head_start_in_sec;
-        return Math.max(seconds, 0);
+        return conf.validation_preview_purge_head_start.toSeconds();
     }
 
     public static boolean checkForDuplicateRowsDuringReads()
@@ -3423,26 +3969,51 @@
         conf.auto_optimise_preview_repair_streams = enabled;
     }
 
+    @Deprecated
     public static int tableCountWarnThreshold()
     {
         return conf.table_count_warn_threshold;
     }
 
+    @Deprecated // this warning threshold will be replaced by an equivalent guardrail
     public static void setTableCountWarnThreshold(int value)
     {
         conf.table_count_warn_threshold = value;
     }
 
+    @Deprecated // this warning threshold will be replaced by an equivalent guardrail
     public static int keyspaceCountWarnThreshold()
     {
         return conf.keyspace_count_warn_threshold;
     }
 
+    @Deprecated // this warning threshold will be replaced by an equivalent guardrail
     public static void setKeyspaceCountWarnThreshold(int value)
     {
         conf.keyspace_count_warn_threshold = value;
     }
 
+    @Deprecated // this warning threshold will be replaced by an equivalent guardrail
+    public static ConsistencyLevel getAuthWriteConsistencyLevel()
+    {
+        return ConsistencyLevel.valueOf(conf.auth_write_consistency_level);
+    }
+
+    public static ConsistencyLevel getAuthReadConsistencyLevel()
+    {
+        return ConsistencyLevel.valueOf(conf.auth_read_consistency_level);
+    }
+
+    public static void setAuthWriteConsistencyLevel(ConsistencyLevel cl)
+    {
+        conf.auth_write_consistency_level = cl.toString();
+    }
+
+    public static void setAuthReadConsistencyLevel(ConsistencyLevel cl)
+    {
+        conf.auth_read_consistency_level = cl.toString();
+    }
+
     public static int getConsecutiveMessageErrorsThreshold()
     {
         return conf.consecutive_message_errors_threshold;
@@ -3453,6 +4024,239 @@
         conf.consecutive_message_errors_threshold = value;
     }
 
+    public static boolean getPartitionDenylistEnabled()
+    {
+        return conf.partition_denylist_enabled;
+    }
+
+    public static void setPartitionDenylistEnabled(boolean enabled)
+    {
+        conf.partition_denylist_enabled = enabled;
+    }
+
+    public static boolean getDenylistWritesEnabled()
+    {
+        return conf.denylist_writes_enabled;
+    }
+
+    public static void setDenylistWritesEnabled(boolean enabled)
+    {
+        conf.denylist_writes_enabled = enabled;
+    }
+
+    public static boolean getDenylistReadsEnabled()
+    {
+        return conf.denylist_reads_enabled;
+    }
+
+    public static void setDenylistReadsEnabled(boolean enabled)
+    {
+        conf.denylist_reads_enabled = enabled;
+    }
+
+    public static boolean getDenylistRangeReadsEnabled()
+    {
+        return conf.denylist_range_reads_enabled;
+    }
+
+    public static void setDenylistRangeReadsEnabled(boolean enabled)
+    {
+        conf.denylist_range_reads_enabled = enabled;
+    }
+
+    public static int getDenylistRefreshSeconds()
+    {
+        return conf.denylist_refresh.toSeconds();
+    }
+
+    public static void setDenylistRefreshSeconds(int seconds)
+    {
+        if (seconds <= 0)
+            throw new IllegalArgumentException("denylist_refresh must be a positive integer.");
+
+        conf.denylist_refresh = new DurationSpec.IntSecondsBound(seconds);
+    }
+
+    public static int getDenylistInitialLoadRetrySeconds()
+    {
+        return conf.denylist_initial_load_retry.toSeconds();
+    }
+
+    public static void setDenylistInitialLoadRetrySeconds(int seconds)
+    {
+        if (seconds <= 0)
+            throw new IllegalArgumentException("denylist_initial_load_retry must be a positive integer.");
+
+        conf.denylist_initial_load_retry = new DurationSpec.IntSecondsBound(seconds);
+    }
+
+    public static ConsistencyLevel getDenylistConsistencyLevel()
+    {
+        return conf.denylist_consistency_level;
+    }
+
+    public static void setDenylistConsistencyLevel(ConsistencyLevel cl)
+    {
+        conf.denylist_consistency_level = cl;
+    }
+
+    public static int getDenylistMaxKeysPerTable()
+    {
+        return conf.denylist_max_keys_per_table;
+    }
+
+    public static void setDenylistMaxKeysPerTable(int value)
+    {
+        if (value <= 0)
+            throw new IllegalArgumentException("denylist_max_keys_per_table must be a positive integer.");
+        conf.denylist_max_keys_per_table = value;
+    }
+
+    public static int getDenylistMaxKeysTotal()
+    {
+        return conf.denylist_max_keys_total;
+    }
+
+    public static void setDenylistMaxKeysTotal(int value)
+    {
+        if (value <= 0)
+            throw new IllegalArgumentException("denylist_max_keys_total must be a positive integer.");
+        conf.denylist_max_keys_total = value;
+    }
+
+    public static boolean getAuthCacheWarmingEnabled()
+    {
+        return conf.auth_cache_warming_enabled;
+    }
+
+    public static SubnetGroups getClientErrorReportingExclusions()
+    {
+        return conf.client_error_reporting_exclusions;
+    }
+
+    public static SubnetGroups getInternodeErrorReportingExclusions()
+    {
+        return conf.internode_error_reporting_exclusions;
+    }
+
+    public static boolean getReadThresholdsEnabled()
+    {
+        return conf.read_thresholds_enabled;
+    }
+
+    public static void setReadThresholdsEnabled(boolean value)
+    {
+        if (conf.read_thresholds_enabled != value)
+        {
+            conf.read_thresholds_enabled = value;
+            logger.info("updated read_thresholds_enabled to {}", value);
+        }
+    }
+
+    @Nullable
+    public static DataStorageSpec.LongBytesBound getCoordinatorReadSizeWarnThreshold()
+    {
+        return conf.coordinator_read_size_warn_threshold;
+    }
+
+    public static void setCoordinatorReadSizeWarnThreshold(@Nullable DataStorageSpec.LongBytesBound value)
+    {
+        logger.info("updating  coordinator_read_size_warn_threshold to {}", value);
+        conf.coordinator_read_size_warn_threshold = value;
+    }
+
+    @Nullable
+    public static DataStorageSpec.LongBytesBound getCoordinatorReadSizeFailThreshold()
+    {
+        return conf.coordinator_read_size_fail_threshold;
+    }
+
+    public static void setCoordinatorReadSizeFailThreshold(@Nullable DataStorageSpec.LongBytesBound value)
+    {
+        logger.info("updating  coordinator_read_size_fail_threshold to {}", value);
+        conf.coordinator_read_size_fail_threshold = value;
+    }
+
+    @Nullable
+    public static DataStorageSpec.LongBytesBound getLocalReadSizeWarnThreshold()
+    {
+        return conf.local_read_size_warn_threshold;
+    }
+
+    public static void setLocalReadSizeWarnThreshold(@Nullable DataStorageSpec.LongBytesBound value)
+    {
+        logger.info("updating  local_read_size_warn_threshold to {}", value);
+        conf.local_read_size_warn_threshold = value;
+    }
+
+    @Nullable
+    public static DataStorageSpec.LongBytesBound getLocalReadSizeFailThreshold()
+    {
+        return conf.local_read_size_fail_threshold;
+    }
+
+    public static void setLocalReadSizeFailThreshold(@Nullable DataStorageSpec.LongBytesBound value)
+    {
+        logger.info("updating  local_read_size_fail_threshold to {}", value);
+        conf.local_read_size_fail_threshold = value;
+    }
+
+    @Nullable
+    public static DataStorageSpec.LongBytesBound getRowIndexReadSizeWarnThreshold()
+    {
+        return conf.row_index_read_size_warn_threshold;
+    }
+
+    public static void setRowIndexReadSizeWarnThreshold(@Nullable DataStorageSpec.LongBytesBound value)
+    {
+        logger.info("updating  row_index_size_warn_threshold to {}", value);
+        conf.row_index_read_size_warn_threshold = value;
+    }
+
+    @Nullable
+    public static DataStorageSpec.LongBytesBound getRowIndexReadSizeFailThreshold()
+    {
+        return conf.row_index_read_size_fail_threshold;
+    }
+
+    public static void setRowIndexReadSizeFailThreshold(@Nullable DataStorageSpec.LongBytesBound value)
+    {
+        logger.info("updating  row_index_read_size_fail_threshold to {}", value);
+        conf.row_index_read_size_fail_threshold = value;
+    }
+
+    public static int getDefaultKeyspaceRF() { return conf.default_keyspace_rf; }
+
+    public static void setDefaultKeyspaceRF(int value) throws IllegalArgumentException
+    {
+        if (value < 1)
+        {
+            throw new IllegalArgumentException("default_keyspace_rf cannot be less than 1");
+        }
+
+        if (value < guardrails.getMinimumReplicationFactorFailThreshold())
+        {
+            throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be less than minimum_replication_factor_fail_threshold (%d)", value, guardrails.getMinimumReplicationFactorFailThreshold()));
+        }
+
+        conf.default_keyspace_rf = value;
+    }
+
+
+    public static boolean getUseStatementsEnabled()
+    {
+        return conf.use_statements_enabled;
+    }
+
+    public static void setUseStatementsEnabled(boolean enabled)
+    {
+        if (enabled != conf.use_statements_enabled)
+        {
+            logger.info("Setting use_statements_enabled to {}", enabled);
+            conf.use_statements_enabled = enabled;
+        }
+    }
+
     public static boolean getForceNewPreparedStatementBehaviour()
     {
         return conf.force_new_prepared_statement_behaviour;
@@ -3466,4 +4270,137 @@
             conf.force_new_prepared_statement_behaviour = value;
         }
     }
+
+    public static DurationSpec.LongNanosecondsBound getStreamingStateExpires()
+    {
+        return conf.streaming_state_expires;
+    }
+
+    public static void setStreamingStateExpires(DurationSpec.LongNanosecondsBound duration)
+    {
+        if (!conf.streaming_state_expires.equals(Objects.requireNonNull(duration, "duration")))
+        {
+            logger.info("Setting streaming_state_expires to {}", duration);
+            conf.streaming_state_expires = duration;
+        }
+    }
+
+    public static DataStorageSpec.LongBytesBound getStreamingStateSize()
+    {
+        return conf.streaming_state_size;
+    }
+
+    public static void setStreamingStateSize(DataStorageSpec.LongBytesBound duration)
+    {
+        if (!conf.streaming_state_size.equals(Objects.requireNonNull(duration, "duration")))
+        {
+            logger.info("Setting streaming_state_size to {}", duration);
+            conf.streaming_state_size = duration;
+        }
+    }
+
+    public static boolean getStreamingStatsEnabled()
+    {
+        return conf.streaming_stats_enabled;
+    }
+
+    public static void setStreamingStatsEnabled(boolean streamingStatsEnabled)
+    {
+        if (conf.streaming_stats_enabled != streamingStatsEnabled)
+        {
+            logger.info("Setting streaming_stats_enabled to {}", streamingStatsEnabled);
+            conf.streaming_stats_enabled = streamingStatsEnabled;
+        }
+    }
+
+    public static DurationSpec.IntSecondsBound getStreamingSlowEventsLogTimeout() {
+        return conf.streaming_slow_events_log_timeout;
+    }
+
+    public static void setStreamingSlowEventsLogTimeout(String value) {
+        DurationSpec.IntSecondsBound next = new DurationSpec.IntSecondsBound(value);
+        if (!conf.streaming_slow_events_log_timeout.equals(next))
+        {
+            logger.info("Setting streaming_slow_events_log to " + value);
+            conf.streaming_slow_events_log_timeout = next;
+        }
+    }
+
+    public static boolean isUUIDSSTableIdentifiersEnabled()
+    {
+        return conf.uuid_sstable_identifiers_enabled;
+    }
+
+    public static DurationSpec.LongNanosecondsBound getRepairStateExpires()
+    {
+        return conf.repair_state_expires;
+    }
+
+    public static void setRepairStateExpires(DurationSpec.LongNanosecondsBound duration)
+    {
+        if (!conf.repair_state_expires.equals(Objects.requireNonNull(duration, "duration")))
+        {
+            logger.info("Setting repair_state_expires to {}", duration);
+            conf.repair_state_expires = duration;
+        }
+    }
+
+    public static int getRepairStateSize()
+    {
+        return conf.repair_state_size;
+    }
+
+    public static void setRepairStateSize(int size)
+    {
+        if (conf.repair_state_size != size)
+        {
+            logger.info("Setting repair_state_size to {}", size);
+            conf.repair_state_size = size;
+        }
+    }
+
+    public static boolean topPartitionsEnabled()
+    {
+        return conf.top_partitions_enabled;
+    }
+
+    public static int getMaxTopSizePartitionCount()
+    {
+        return conf.max_top_size_partition_count;
+    }
+
+    public static void setMaxTopSizePartitionCount(int value)
+    {
+        conf.max_top_size_partition_count = value;
+    }
+
+    public static int getMaxTopTombstonePartitionCount()
+    {
+        return conf.max_top_tombstone_partition_count;
+    }
+
+    public static void setMaxTopTombstonePartitionCount(int value)
+    {
+        conf.max_top_tombstone_partition_count = value;
+    }
+
+    public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSizeInBytes()
+    {
+        return conf.min_tracked_partition_size;
+    }
+
+    public static void setMinTrackedPartitionSizeInBytes(DataStorageSpec.LongBytesBound spec)
+    {
+        conf.min_tracked_partition_size = spec;
+    }
+
+    public static long getMinTrackedPartitionTombstoneCount()
+    {
+        return conf.min_tracked_partition_tombstone_count;
+    }
+
+    public static void setMinTrackedPartitionTombstoneCount(long value)
+    {
+        conf.min_tracked_partition_tombstone_count = value;
+    }
 }
diff --git a/src/java/org/apache/cassandra/config/DefaultLoader.java b/src/java/org/apache/cassandra/config/DefaultLoader.java
new file mode 100644
index 0000000..6767d8a
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/DefaultLoader.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.beans.IntrospectionException;
+import java.beans.Introspector;
+import java.beans.PropertyDescriptor;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.yaml.snakeyaml.error.YAMLException;
+import org.yaml.snakeyaml.introspector.FieldProperty;
+import org.yaml.snakeyaml.introspector.MethodProperty;
+import org.yaml.snakeyaml.introspector.Property;
+
+import static org.apache.cassandra.utils.FBUtilities.camelToSnake;
+
+public class DefaultLoader implements Loader
+{
+    @Override
+    public Map<String, Property> getProperties(Class<?> root)
+    {
+        Map<String, Property> properties = new HashMap<>();
+        for (Class<?> c = root; c != null; c = c.getSuperclass())
+        {
+            for (Field f : c.getDeclaredFields())
+            {
+                String name = camelToSnake(f.getName());
+                int modifiers = f.getModifiers();
+                if (!Modifier.isStatic(modifiers)
+                    && !f.isAnnotationPresent(JsonIgnore.class)
+                    && !Modifier.isTransient(modifiers)
+                    && Modifier.isPublic(modifiers)
+                    && !properties.containsKey(name))
+                    properties.put(name, new FieldProperty(f));
+            }
+        }
+        try
+        {
+            PropertyDescriptor[] descriptors = Introspector.getBeanInfo(root).getPropertyDescriptors();
+            if (descriptors != null)
+            {
+                for (PropertyDescriptor d : descriptors)
+                {
+                    String name = camelToSnake(d.getName());
+                    Method writeMethod = d.getWriteMethod();
+                    // if the property can't be written to, then ignore it
+                    if (writeMethod == null || writeMethod.isAnnotationPresent(JsonIgnore.class))
+                        continue;
+                    // if read method exists, override the field version in case get/set does validation
+                    if (properties.containsKey(name) && (d.getReadMethod() == null || d.getReadMethod().isAnnotationPresent(JsonIgnore.class)))
+                        continue;
+                    d.setName(name);
+                    properties.put(name, new MethodPropertyPlus(d));
+                }
+            }
+        }
+        catch (IntrospectionException e)
+        {
+            throw new RuntimeException(e);
+        }
+        return properties;
+    }
+
+    /**
+     * .get() acts differently than .set() and doesn't do a good job showing the cause of the failure, this
+     * class rewrites to make the errors easier to reason with.
+     */
+    private static class MethodPropertyPlus extends MethodProperty
+    {
+        private final Method readMethod;
+
+        public MethodPropertyPlus(PropertyDescriptor property)
+        {
+            super(property);
+            this.readMethod = property.getReadMethod();
+        }
+
+        @Override
+        public Object get(Object object)
+        {
+            if (!isReadable())
+                throw new YAMLException("No readable property '" + getName() + "' on class: " + object.getClass().getName());
+
+            try
+            {
+                return readMethod.invoke(object);
+            }
+            catch (IllegalAccessException e)
+            {
+                throw new YAMLException("Unable to find getter for property '" + getName() + "' on class " + object.getClass().getName(), e);
+            }
+            catch (InvocationTargetException e)
+            {
+                throw new YAMLException("Failed calling getter for property '" + getName() + "' on class " + object.getClass().getName(), e.getCause());
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/DurationSpec.java b/src/java/org/apache/cassandra/config/DurationSpec.java
new file mode 100644
index 0000000..10d56c2
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/DurationSpec.java
@@ -0,0 +1,617 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import com.google.common.primitives.Ints;
+
+import static java.util.concurrent.TimeUnit.DAYS;
+import static java.util.concurrent.TimeUnit.HOURS;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+/**
+ * Represents a positive time duration. Wrapper class for Cassandra duration configuration parameters, providing to the
+ * users the opportunity to be able to provide config with a unit of their choice in cassandra.yaml as per the available
+ * options. (CASSANDRA-15234)
+ */
+public abstract class DurationSpec
+{
+    /**
+     * The Regexp used to parse the duration provided as String.
+     */
+    private static final Pattern UNITS_PATTERN = Pattern.compile(("^(\\d+)(d|h|s|ms|us|µs|ns|m)$"));
+
+    private final long quantity;
+
+    private final TimeUnit unit;
+
+    private DurationSpec(long quantity, TimeUnit unit, TimeUnit minUnit, long max)
+    {
+        this.quantity = quantity;
+        this.unit = unit;
+
+        validateMinUnit(unit, minUnit, quantity + " " + unit);
+        validateQuantity(quantity, unit, minUnit, max);
+    }
+
+    private DurationSpec(double quantity, TimeUnit unit, TimeUnit minUnit, long max)
+    {
+        this(Math.round(quantity), unit, minUnit, max);
+    }
+
+    private DurationSpec(String value, TimeUnit minUnit)
+    {
+        Matcher matcher = UNITS_PATTERN.matcher(value);
+
+        if (matcher.find())
+        {
+            quantity = Long.parseLong(matcher.group(1));
+            unit = fromSymbol(matcher.group(2));
+
+            // this constructor is used only by extended classes for min unit; upper bound and min unit are guarded there accordingly
+        }
+        else
+        {
+            throw new IllegalArgumentException("Invalid duration: " + value + " Accepted units:" + acceptedUnits(minUnit) +
+                                               " where case matters and only non-negative values.");
+        }
+    }
+
+    private DurationSpec(String value, TimeUnit minUnit, long max)
+    {
+        this(value, minUnit);
+
+        validateMinUnit(unit, minUnit, value);
+        validateQuantity(value, quantity(), unit(), minUnit, max);
+    }
+
+    private static void validateMinUnit(TimeUnit unit, TimeUnit minUnit, String value)
+    {
+        if (unit.compareTo(minUnit) < 0)
+            throw new IllegalArgumentException(String.format("Invalid duration: %s Accepted units:%s", value, acceptedUnits(minUnit)));
+    }
+
+    private static String acceptedUnits(TimeUnit minUnit)
+    {
+        TimeUnit[] units = TimeUnit.values();
+        return Arrays.toString(Arrays.copyOfRange(units, minUnit.ordinal(), units.length));
+    }
+
+    private static void validateQuantity(String value, long quantity, TimeUnit sourceUnit, TimeUnit minUnit, long max)
+    {
+        // no need to validate for negatives as they are not allowed at first place from the regex
+
+        if (minUnit.convert(quantity, sourceUnit) >= max)
+            throw new IllegalArgumentException("Invalid duration: " + value + ". It shouldn't be more than " +
+                                             (max - 1) + " in " + minUnit.name().toLowerCase());
+    }
+
+    private static void validateQuantity(long quantity, TimeUnit sourceUnit, TimeUnit minUnit, long max)
+    {
+        if (quantity < 0)
+            throw new IllegalArgumentException("Invalid duration: value must be non-negative");
+
+        if (minUnit.convert(quantity, sourceUnit) >= max)
+            throw new IllegalArgumentException(String.format("Invalid duration: %d %s. It shouldn't be more than %d in %s",
+                                                           quantity, sourceUnit.name().toLowerCase(),
+                                                           max - 1, minUnit.name().toLowerCase()));
+    }
+
+    // get vs no-get prefix is not consistent in the code base, but for classes involved with config parsing, it is
+    // imporant to be explicit about get/set as this changes how parsing is done; this class is a data-type, so is
+    // not nested, having get/set can confuse parsing thinking this is a nested type
+    public long quantity()
+    {
+        return quantity;
+    }
+
+    public TimeUnit unit()
+    {
+        return unit;
+    }
+
+    /**
+     * @param symbol the time unit symbol
+     * @return the time unit associated to the specified symbol
+     */
+    static TimeUnit fromSymbol(String symbol)
+    {
+        switch (symbol.toLowerCase())
+        {
+            case "d": return DAYS;
+            case "h": return HOURS;
+            case "m": return MINUTES;
+            case "s": return SECONDS;
+            case "ms": return MILLISECONDS;
+            case "us":
+            case "µs": return MICROSECONDS;
+            case "ns": return TimeUnit.NANOSECONDS;
+        }
+        throw new IllegalArgumentException(String.format("Unsupported time unit: %s. Supported units are: %s",
+                                                       symbol, Arrays.stream(TimeUnit.values())
+                                                                     .map(DurationSpec::symbol)
+                                                                     .collect(Collectors.joining(", "))));
+    }
+
+    /**
+     * @param targetUnit the time unit
+     * @return this duration in the specified time unit
+     */
+    public long to(TimeUnit targetUnit)
+    {
+        return targetUnit.convert(quantity, unit);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        // Milliseconds seems to be a reasonable tradeoff
+        return Objects.hash(unit.toMillis(quantity));
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        if (this == obj)
+            return true;
+
+        if (!(obj instanceof DurationSpec))
+            return false;
+
+        DurationSpec other = (DurationSpec) obj;
+        if (unit == other.unit)
+            return quantity == other.quantity;
+
+        // Due to overflows we can only guarantee that the 2 durations are equal if we get the same results
+        // doing the conversion in both directions.
+        return unit.convert(other.quantity, other.unit) == quantity && other.unit.convert(quantity, unit) == other.quantity;
+    }
+
+    @Override
+    public String toString()
+    {
+        return quantity + symbol(unit);
+    }
+
+    /**
+     * Returns the symbol associated to the specified unit
+     *
+     * @param unit the time unit
+     * @return the time unit symbol
+     */
+    // get vs no-get prefix is not consistent in the code base, but for classes involved with config parsing, it is
+    // imporant to be explicit about get/set as this changes how parsing is done; this class is a data-type, so is
+    // not nested, having get/set can confuse parsing thinking this is a nested type
+    static String symbol(TimeUnit unit)
+    {
+        switch (unit)
+        {
+            case DAYS: return "d";
+            case HOURS: return "h";
+            case MINUTES: return "m";
+            case SECONDS: return "s";
+            case MILLISECONDS: return "ms";
+            case MICROSECONDS: return "us";
+            case NANOSECONDS: return "ns";
+        }
+        throw new AssertionError();
+    }
+
+    /**
+     * Represents a duration used for Cassandra configuration. The bound is [0, Long.MAX_VALUE) in nanoseconds.
+     * If the user sets a different unit - we still validate that converted to nanoseconds the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class LongNanosecondsBound extends DurationSpec
+    {
+        /**
+         * Creates a {@code DurationSpec.LongNanosecondsBound} of the specified amount.
+         * The bound is [0, Long.MAX_VALUE) in nanoseconds.
+         *
+         * @param value the duration
+         */
+        public LongNanosecondsBound(String value)
+        {
+            super(value, NANOSECONDS, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.LongNanosecondsBound} of the specified amount in the specified unit.
+         * The bound is [0, Long.MAX_VALUE) in nanoseconds.
+         *
+         * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in nanoseconds
+         * @param unit in which the provided quantity is
+         */
+        public LongNanosecondsBound(long quantity, TimeUnit unit)
+        {
+            super(quantity, unit, NANOSECONDS, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.LongNanosecondsBound} of the specified amount in nanoseconds.
+         * The bound is [0, Long.MAX_VALUE) in nanoseconds.
+         *
+         * @param nanoseconds where nanoseconds shouldn't be bigger than Long.MAX_VALUE-1
+         */
+        public LongNanosecondsBound(long nanoseconds)
+        {
+            this(nanoseconds, NANOSECONDS);
+        }
+
+        /**
+         * @return this duration in number of nanoseconds
+         */
+        public long toNanoseconds()
+        {
+            return unit().toNanos(quantity());
+        }
+    }
+
+    /**
+     * Represents a duration used for Cassandra configuration. The bound is [0, Long.MAX_VALUE) in milliseconds.
+     * If the user sets a different unit - we still validate that converted to milliseconds the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class LongMillisecondsBound extends DurationSpec
+    {
+        /**
+         * Creates a {@code DurationSpec.LongMillisecondsBound} of the specified amount.
+         * The bound is [0, Long.MAX_VALUE) in milliseconds.
+         *
+         * @param value the duration
+         */
+        public LongMillisecondsBound(String value)
+        {
+            super(value, MILLISECONDS, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.LongMillisecondsBound} of the specified amount in the specified unit.
+         * The bound is [0, Long.MAX_VALUE) in milliseconds.
+         *
+         * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in milliseconds
+         * @param unit in which the provided quantity is
+         */
+        public LongMillisecondsBound(long quantity, TimeUnit unit)
+        {
+            super(quantity, unit, MILLISECONDS, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.LongMillisecondsBound} of the specified amount in milliseconds.
+         * The bound is [0, Long.MAX_VALUE) in milliseconds.
+         *
+         * @param milliseconds where milliseconds shouldn't be bigger than Long.MAX_VALUE-1
+         */
+        public LongMillisecondsBound(long milliseconds)
+        {
+            this(milliseconds, MILLISECONDS);
+        }
+
+        /**
+         * @return this duration in number of milliseconds
+         */
+        public long toMilliseconds()
+        {
+            return unit().toMillis(quantity());
+        }
+    }
+
+    /**
+     * Represents a duration used for Cassandra configuration. The bound is [0, Long.MAX_VALUE) in seconds.
+     * If the user sets a different unit - we still validate that converted to seconds the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class LongSecondsBound extends DurationSpec
+    {
+        /**
+         * Creates a {@code DurationSpec.LongSecondsBound} of the specified amount.
+         * The bound is [0, Long.MAX_VALUE) in seconds.
+         *
+         * @param value the duration
+         */
+        public LongSecondsBound(String value)
+        {
+            super(value, SECONDS, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.LongSecondsBound} of the specified amount in the specified unit.
+         * The bound is [0, Long.MAX_VALUE) in seconds.
+         *
+         * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in seconds
+         * @param unit in which the provided quantity is
+         */
+        public LongSecondsBound(long quantity, TimeUnit unit)
+        {
+            super(quantity, unit, SECONDS, Long.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.LongSecondsBound} of the specified amount in seconds.
+         * The bound is [0, Long.MAX_VALUE) in seconds.
+         *
+         * @param seconds where seconds shouldn't be bigger than Long.MAX_VALUE-1
+         */
+        public LongSecondsBound(long seconds)
+        {
+            this(seconds, SECONDS);
+        }
+
+        /**
+         * @return this duration in number of milliseconds
+         */
+        public long toMilliseconds()
+        {
+            return unit().toMillis(quantity());
+        }
+
+        /**
+         * @return this duration in number of seconds
+         */
+        public long toSeconds()
+        {
+            return unit().toSeconds(quantity());
+        }
+    }
+
+    /**
+     * Represents a duration used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in minutes.
+     * If the user sets a different unit - we still validate that converted to minutes the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class IntMinutesBound extends DurationSpec
+    {
+        /**
+         * Creates a {@code DurationSpec.IntMinutesBound} of the specified amount. The bound is [0, Integer.MAX_VALUE) in minutes.
+         * The bound is [0, Integer.MAX_VALUE) in minutes.
+         *
+         * @param value the duration
+         */
+        public IntMinutesBound(String value)
+        {
+            super(value, MINUTES, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntMinutesBound} of the specified amount in the specified unit.
+         * The bound is [0, Integer.MAX_VALUE) in minutes.
+         *
+         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in minutes
+         * @param unit in which the provided quantity is
+         */
+        public IntMinutesBound(long quantity, TimeUnit unit)
+        {
+            super(quantity, unit, MINUTES, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntMinutesBound} of the specified amount in minutes.
+         * The bound is [0, Integer.MAX_VALUE) in minutes.
+         *
+         * @param minutes where minutes shouldn't be bigger than Integer.MAX_VALUE-1
+         */
+        public IntMinutesBound(long minutes)
+        {
+            this(minutes, MINUTES);
+        }
+
+        /**
+         * Returns this duration in number of milliseconds as an {@code int}
+         *
+         * @return this duration in number of milliseconds or {@code Integer.MAX_VALUE} if the number of milliseconds is too large.
+         */
+        public int toMilliseconds()
+        {
+            return Ints.saturatedCast(unit().toMillis(quantity()));
+        }
+
+        /**
+         * Returns this duration in number of seconds as an {@code int}
+         *
+         * @return this duration in number of seconds or {@code Integer.MAX_VALUE} if the number of seconds is too large.
+         */
+        public int toSeconds()
+        {
+            return Ints.saturatedCast(unit().toSeconds(quantity()));
+        }
+
+        /**
+         * Returns this duration in number of minutes as an {@code int}
+         *
+         * @return this duration in number of minutes or {@code Integer.MAX_VALUE} if the number of minutes is too large.
+         */
+        public int toMinutes()
+        {
+            return Ints.saturatedCast(unit().toMinutes(quantity()));
+        }
+    }
+
+    /**
+     * Represents a duration used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in seconds.
+     * If the user sets a different unit - we still validate that converted to seconds the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class IntSecondsBound extends DurationSpec
+    {
+        private static final Pattern VALUES_PATTERN = Pattern.compile(("\\d+"));
+
+        /**
+         * Creates a {@code DurationSpec.IntSecondsBound} of the specified amount. The bound is [0, Integer.MAX_VALUE) in seconds.
+         * The bound is [0, Integer.MAX_VALUE) in seconds.
+         *
+         * @param value the duration
+         */
+        public IntSecondsBound(String value)
+        {
+            super(value, SECONDS, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntSecondsBound} of the specified amount in the specified unit.
+         * The bound is [0, Integer.MAX_VALUE) in seconds.
+         *
+         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in seconds
+         * @param unit in which the provided quantity is
+         */
+        public IntSecondsBound(long quantity, TimeUnit unit)
+        {
+            super(quantity, unit, SECONDS, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntSecondsBound} of the specified amount in seconds.
+         * The bound is [0, Integer.MAX_VALUE) in seconds.
+         *
+         * @param seconds where seconds shouldn't be bigger than Integer.MAX_VALUE-1
+         */
+        public IntSecondsBound(long seconds)
+        {
+            this(seconds, SECONDS);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntSecondsBound} of the specified amount in seconds, expressed either as the
+         * number of seconds without unit, or as a regular quantity with unit.
+         * Used in the Converters for a few parameters which changed only type, but not names
+         * The bound is [0, Integer.MAX_VALUE) in seconds.
+         *
+         * @param value where value shouldn't be bigger than Integer.MAX_VALUE-1 in seconds
+         */
+        public static IntSecondsBound inSecondsString(String value)
+        {
+            //parse the string field value
+            Matcher matcher = VALUES_PATTERN.matcher(value);
+
+            long seconds;
+            //if the provided string value is just a number, then we create a IntSecondsBound value in seconds
+            if (matcher.matches())
+            {
+                seconds = Integer.parseInt(value);
+                return new IntSecondsBound(seconds, TimeUnit.SECONDS);
+            }
+
+            //otherwise we just use the standard constructors
+            return new IntSecondsBound(value);
+        }
+
+        /**
+         * Returns this duration in the number of nanoseconds as an {@code int}
+         *
+         * @return this duration in number of nanoseconds or {@code Integer.MAX_VALUE} if the number of nanoseconds is too large.
+         */
+        public int toNanoseconds()
+        {
+            return Ints.saturatedCast(unit().toNanos(quantity()));
+        }
+
+        /**
+         * Returns this duration in number of milliseconds as an {@code int}
+         *
+         * @return this duration in number of milliseconds or {@code Integer.MAX_VALUE} if the number of milliseconds is too large.
+         */
+        public int toMilliseconds()
+        {
+            return Ints.saturatedCast(unit().toMillis(quantity()));
+        }
+
+        /**
+         * Returns this duration in number of seconds as an {@code int}
+         *
+         * @return this duration in number of seconds or {@code Integer.MAX_VALUE} if the number of seconds is too large.
+         */
+        public int toSeconds()
+        {
+            return Ints.saturatedCast(unit().toSeconds(quantity()));
+        }
+    }
+
+    /**
+     * Represents a duration used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in milliseconds.
+     * If the user sets a different unit - we still validate that converted to milliseconds the quantity will not exceed
+     * that upper bound. (CASSANDRA-17571)
+     */
+    public final static class IntMillisecondsBound extends DurationSpec
+    {
+        /**
+         * Creates a {@code DurationSpec.IntMillisecondsBound} of the specified amount. The bound is [0, Integer.MAX_VALUE) in milliseconds.
+         * The bound is [0, Integer.MAX_VALUE) in milliseconds.
+         *
+         * @param value the duration
+         */
+        public IntMillisecondsBound(String value)
+        {
+            super(value, MILLISECONDS, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntMillisecondsBound} of the specified amount in the specified unit.
+         * The bound is [0, Integer.MAX_VALUE) in milliseconds.
+         *
+         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in milliseconds
+         * @param unit in which the provided quantity is
+         */
+        public IntMillisecondsBound(long quantity, TimeUnit unit)
+        {
+            super(quantity, unit, MILLISECONDS, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Creates a {@code DurationSpec.IntMillisecondsBound} of the specified amount in milliseconds.
+         * The bound is [0, Integer.MAX_VALUE) in milliseconds.
+         *
+         * @param milliseconds where milliseconds shouldn't be bigger than Integer.MAX_VALUE-1
+         */
+        public IntMillisecondsBound(long milliseconds)
+        {
+            this(milliseconds, MILLISECONDS);
+        }
+
+        /**
+         * Below constructor is used only for backward compatibility for the old commitlog_sync_group_window_in_ms before 4.1
+         * Creates a {@code DurationSpec.IntMillisecondsBound} of the specified amount in the specified unit.
+         *
+         * @param quantity where quantity shouldn't be bigger than Intetger.MAX_VALUE - 1 in milliseconds
+         * @param unit in which the provided quantity is
+         */
+        public IntMillisecondsBound(double quantity, TimeUnit unit)
+        {
+            super(quantity, unit, MILLISECONDS, Integer.MAX_VALUE);
+        }
+
+        /**
+         * Returns this duration in number of milliseconds as an {@code int}
+         *
+         * @return this duration in number of milliseconds or {@code Integer.MAX_VALUE} if the number of milliseconds is too large.
+         */
+        public int toMilliseconds()
+        {
+            return Ints.saturatedCast(unit().toMillis(quantity()));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/EncryptionOptions.java b/src/java/org/apache/cassandra/config/EncryptionOptions.java
index 93668d9..2610ff6 100644
--- a/src/java/org/apache/cassandra/config/EncryptionOptions.java
+++ b/src/java/org/apache/cassandra/config/EncryptionOptions.java
@@ -17,9 +17,15 @@
  */
 package org.apache.cassandra.config;
 
-import java.io.File;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
+import java.util.Set;
+
+import javax.annotation.Nullable;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
@@ -27,17 +33,27 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.security.SSLFactory;
+import org.apache.cassandra.security.DisableSslContextFactory;
+import org.apache.cassandra.security.ISslContextFactory;
+import org.apache.cassandra.utils.FBUtilities;
 
+/**
+ * This holds various options used for enabling SSL/TLS encryption.
+ * Examples of such options are: supported cipher-suites, ssl protocol with version, accepted protocols, end-point
+ * verification, require client-auth/cert etc.
+ */
 public class EncryptionOptions
 {
     Logger logger = LoggerFactory.getLogger(EncryptionOptions.class);
 
     public enum TlsEncryptionPolicy
     {
-        UNENCRYPTED("unencrypted"), OPTIONAL("optionally encrypted"), ENCRYPTED("encrypted");
+        UNENCRYPTED("unencrypted"),
+        OPTIONAL("optionally encrypted"),
+        ENCRYPTED("encrypted");
 
         private final String description;
 
@@ -52,9 +68,17 @@
         }
     }
 
+    /*
+     * If the ssl_context_factory is configured, most likely it won't use file based keystores and truststores and
+     * can choose to completely customize SSL context's creation. Most likely it won't also use keystore_password and
+     * truststore_passwords configurations as they are in plaintext format.
+     */
+    public final ParameterizedClass ssl_context_factory;
     public final String keystore;
+    @Nullable
     public final String keystore_password;
     public final String truststore;
+    @Nullable
     public final String truststore_password;
     public final List<String> cipher_suites;
     protected String protocol;
@@ -73,15 +97,62 @@
     protected Boolean optional;
 
     // Calculated by calling applyConfig() after populating/parsing
-    protected Boolean isEnabled = null;
-    protected Boolean isOptional = null;
+    protected Boolean isEnabled;
+    protected Boolean isOptional;
+
+    /*
+     * We will wait to initialize this until applyConfig() call to make sure we do it only when the caller is ready
+     * to use this option instance.
+     */
+    public transient ISslContextFactory sslContextFactoryInstance;
+
+    public enum ConfigKey
+    {
+        KEYSTORE("keystore"),
+        KEYSTORE_PASSWORD("keystore_password"),
+        TRUSTSTORE("truststore"),
+        TRUSTSTORE_PASSWORD("truststore_password"),
+        CIPHER_SUITES("cipher_suites"),
+        PROTOCOL("protocol"),
+        ACCEPTED_PROTOCOLS("accepted_protocols"),
+        ALGORITHM("algorithm"),
+        STORE_TYPE("store_type"),
+        REQUIRE_CLIENT_AUTH("require_client_auth"),
+        REQUIRE_ENDPOINT_VERIFICATION("require_endpoint_verification"),
+        ENABLED("enabled"),
+        OPTIONAL("optional");
+
+        final String keyName;
+
+        ConfigKey(String keyName)
+        {
+            this.keyName=keyName;
+        }
+
+        String getKeyName()
+        {
+            return keyName;
+        }
+
+        static Set<String> asSet()
+        {
+            Set<String> valueSet = new HashSet<>();
+            ConfigKey[] values = values();
+            for(ConfigKey key: values) {
+                valueSet.add(key.getKeyName().toLowerCase());
+            }
+            return valueSet;
+        }
+    }
 
     public EncryptionOptions()
     {
+        ssl_context_factory = new ParameterizedClass("org.apache.cassandra.security.DefaultSslContextFactory",
+                                                     new HashMap<>());
         keystore = "conf/.keystore";
-        keystore_password = "cassandra";
+        keystore_password = null;
         truststore = "conf/.truststore";
-        truststore_password = "cassandra";
+        truststore_password = null;
         cipher_suites = null;
         protocol = null;
         accepted_protocols = null;
@@ -93,8 +164,13 @@
         optional = null;
     }
 
-    public EncryptionOptions(String keystore, String keystore_password, String truststore, String truststore_password, List<String> cipher_suites, String protocol, List<String> accepted_protocols, String algorithm, String store_type, boolean require_client_auth, boolean require_endpoint_verification, Boolean enabled, Boolean optional)
+    public EncryptionOptions(ParameterizedClass ssl_context_factory, String keystore, String keystore_password,
+                             String truststore, String truststore_password, List<String> cipher_suites,
+                             String protocol, List<String> accepted_protocols, String algorithm, String store_type,
+                             boolean require_client_auth, boolean require_endpoint_verification, Boolean enabled,
+                             Boolean optional)
     {
+        this.ssl_context_factory = ssl_context_factory;
         this.keystore = keystore;
         this.keystore_password = keystore_password;
         this.truststore = truststore;
@@ -112,6 +188,7 @@
 
     public EncryptionOptions(EncryptionOptions options)
     {
+        ssl_context_factory = options.ssl_context_factory;
         keystore = options.keystore;
         keystore_password = options.keystore_password;
         truststore = options.truststore;
@@ -130,11 +207,15 @@
     /* Computes enabled and optional before use. Because the configuration can be loaded
      * through pluggable mechanisms this is the only safe way to make sure that
      * enabled and optional are set correctly.
+     *
+     * It also initializes the ISslContextFactory's instance
      */
     public EncryptionOptions applyConfig()
     {
         ensureConfigNotApplied();
 
+        initializeSslContextFactory();
+
         isEnabled = this.enabled != null && enabled;
 
         if (optional != null)
@@ -144,7 +225,7 @@
         // If someone is asking for an _insecure_ connection and not explicitly telling us to refuse
         // encrypted connections AND they have a keystore file, we assume they would like to be able
         // to transition to encrypted connections in the future.
-        else if (new File(keystore).exists())
+        else if (sslContextFactoryInstance.hasKeystore())
         {
             isOptional = !isEnabled;
         }
@@ -156,6 +237,72 @@
         return this;
     }
 
+    /**
+     * Prepares the parameterized keys provided in the configuration for {@link ISslContextFactory} to be passed in
+     * as the constructor for its implementation.
+     *
+     * @throws IllegalArgumentException in case any pre-defined key, as per {@link ConfigKey}, for the encryption
+     * options is duplicated in the parameterized keys.
+     */
+    private void prepareSslContextFactoryParameterizedKeys(Map<String,Object> sslContextFactoryParameters)
+    {
+        if (ssl_context_factory.parameters != null)
+        {
+            Set<String> configKeys = ConfigKey.asSet();
+            for (Map.Entry<String, String> entry : ssl_context_factory.parameters.entrySet())
+            {
+                if(configKeys.contains(entry.getKey().toLowerCase()))
+                {
+                    throw new IllegalArgumentException("SslContextFactory "+ssl_context_factory.class_name+" should " +
+                                                       "configure '"+entry.getKey()+"' as encryption_options instead of" +
+                                                       " parameterized keys");
+                }
+                sslContextFactoryParameters.put(entry.getKey(),entry.getValue());
+            }
+        }
+    }
+
+    private void initializeSslContextFactory()
+    {
+        Map<String,Object> sslContextFactoryParameters = new HashMap<>();
+        prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
+
+        /*
+         * Copy all configs to the Map to pass it on to the ISslContextFactory's implementation
+         */
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.KEYSTORE, this.keystore);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.KEYSTORE_PASSWORD, this.keystore_password);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.TRUSTSTORE, this.truststore);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.TRUSTSTORE_PASSWORD, this.truststore_password);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.CIPHER_SUITES, this.cipher_suites);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.PROTOCOL, this.protocol);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.ACCEPTED_PROTOCOLS, this.accepted_protocols);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.ALGORITHM, this.algorithm);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.STORE_TYPE, this.store_type);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.REQUIRE_CLIENT_AUTH, this.require_client_auth);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.REQUIRE_ENDPOINT_VERIFICATION, this.require_endpoint_verification);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.ENABLED, this.enabled);
+        putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OPTIONAL, this.optional);
+
+        if (CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL.getBoolean())
+        {
+            sslContextFactoryInstance = new DisableSslContextFactory();
+        }
+        else
+        {
+            sslContextFactoryInstance = FBUtilities.newSslContextFactory(ssl_context_factory.class_name,
+                                                                         sslContextFactoryParameters);
+        }
+    }
+
+    private void putSslContextFactoryParameter(Map<String,Object> existingParameters, ConfigKey configKey,
+                                               Object value)
+    {
+        if (value != null) {
+            existingParameters.put(configKey.getKeyName(), value);
+        }
+    }
+
     private void ensureConfigApplied()
     {
         if (isEnabled == null || isOptional == null)
@@ -173,7 +320,8 @@
      *
      * @return if the channel should be encrypted
      */
-    public Boolean isEnabled() {
+    public Boolean getEnabled()
+    {
         ensureConfigApplied();
         return isEnabled;
     }
@@ -184,7 +332,8 @@
      * is probably a bad idea.
      * @param enabled value to set
      */
-    public void setEnabled(Boolean enabled) {
+    public void setEnabled(Boolean enabled)
+    {
         ensureConfigNotApplied();
         this.enabled = enabled;
     }
@@ -198,7 +347,7 @@
      * Return type is Boolean even though it can never be null so that snakeyaml can find it
      * @return if the channel may be encrypted
      */
-    public Boolean isOptional()
+    public Boolean getOptional()
     {
         ensureConfigApplied();
         return isOptional;
@@ -210,7 +359,8 @@
      * is probably a bad idea.
      * @param optional value to set
      */
-    public void setOptional(boolean optional) {
+    public void setOptional(Boolean optional)
+    {
         ensureConfigNotApplied();
         this.optional = optional;
     }
@@ -222,82 +372,45 @@
      * @param protocol value to set
      */
     @VisibleForTesting
-    public void setProtocol(String protocol) {
+    public void setProtocol(String protocol)
+    {
         this.protocol = protocol;
     }
 
+    public String getProtocol()
+    {
+        return protocol;
+    }
+
     /**
      * Sets accepted TLS protocols for this channel. Note that this should only be called by
      * the configuration parser or tests. It is public only for that purpose, mutating protocol state
      * is probably a bad idea. The function casing is required for snakeyaml to find this setter for the protected field.
      * @param accepted_protocols value to set
      */
-    public void setaccepted_protocols(List<String> accepted_protocols) {
+    public void setAcceptedProtocols(List<String> accepted_protocols)
+    {
         this.accepted_protocols = accepted_protocols == null ? null : ImmutableList.copyOf(accepted_protocols);
     }
 
-    /* This list is substituted in configurations that have explicitly specified the original "TLS" default,
-     * by extracting it from the default "TLS" SSL Context instance
-     */
-    static private final List<String> TLS_PROTOCOL_SUBSTITUTION = SSLFactory.tlsInstanceProtocolSubstitution();
-
-    /**
-     * Combine the pre-4.0 protocol field with the accepted_protocols list, substituting a list of
-     * explicit protocols for the previous catchall default of "TLS"
-     * @return array of protocol names suitable for passing to SslContextBuilder.protocols, or null if the default
-     */
-    public List<String> acceptedProtocols()
+    public List<String> getAcceptedProtocols()
     {
-        if (accepted_protocols == null)
-        {
-            if (protocol == null)
-            {
-                return null;
-            }
-            // TLS is accepted by SSLContext.getInstance as a shorthand for give me an engine that
-            // can speak some of the TLS protocols.  It is not supported by SSLEngine.setAcceptedProtocols
-            // so substitute if the user hasn't provided an accepted protocol configuration
-            else if (protocol.equalsIgnoreCase("TLS"))
-            {
-                return TLS_PROTOCOL_SUBSTITUTION;
-            }
-            else // the user was trying to limit to a single specific protocol, so try that
-            {
-                return ImmutableList.of(protocol);
-            }
-        }
-
-        if (protocol != null && !protocol.equalsIgnoreCase("TLS") &&
-            accepted_protocols.stream().noneMatch(ap -> ap.equalsIgnoreCase(protocol)))
-        {
-            // If the user provided a non-generic default protocol, append it to accepted_protocols - they wanted
-            // it after all.
-            return ImmutableList.<String>builder().addAll(accepted_protocols).add(protocol).build();
-        }
-        else
-        {
-            return accepted_protocols;
-        }
+        return sslContextFactoryInstance == null ? null : sslContextFactoryInstance.getAcceptedProtocols();
     }
 
     public String[] acceptedProtocolsArray()
     {
-        List<String> ap = acceptedProtocols();
+        List<String> ap = getAcceptedProtocols();
         return ap == null ?  new String[0] : ap.toArray(new String[0]);
     }
 
-    public String[] cipherSuitesArray()
-    {
-        return cipher_suites == null ? new String[0] : cipher_suites.toArray(new String[0]);
-    }
-
     public TlsEncryptionPolicy tlsEncryptionPolicy()
     {
-        if (isOptional())
+        if (getOptional())
         {
             return TlsEncryptionPolicy.OPTIONAL;
         }
-        else if (isEnabled())
+        else if (getEnabled())
         {
             return TlsEncryptionPolicy.ENCRYPTED;
         }
@@ -307,104 +420,127 @@
         }
     }
 
+    public EncryptionOptions withSslContextFactory(ParameterizedClass sslContextFactoryClass)
+    {
+        return new EncryptionOptions(sslContextFactoryClass, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites,protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification,enabled,
+                                     optional).applyConfig();
+    }
+
     public EncryptionOptions withKeyStore(String keystore)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites,protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withKeyStorePassword(String keystore_password)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites,protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withTrustStore(String truststore)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withTrustStorePassword(String truststore_password)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withCipherSuites(List<String> cipher_suites)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withCipherSuites(String ... cipher_suites)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, ImmutableList.copyOf(cipher_suites),
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, ImmutableList.copyOf(cipher_suites), protocol,
+                                     accepted_protocols, algorithm, store_type, require_client_auth,
+                                     require_endpoint_verification, enabled, optional).applyConfig();
     }
 
     public EncryptionOptions withProtocol(String protocol)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
 
     public EncryptionOptions withAcceptedProtocols(List<String> accepted_protocols)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites, protocol,
-                                     accepted_protocols == null ? null : ImmutableList.copyOf(accepted_protocols),
-                                     algorithm, store_type, require_client_auth, require_endpoint_verification, enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites,protocol, accepted_protocols == null ? null :
+                                                                                  ImmutableList.copyOf(accepted_protocols),
+                                     algorithm, store_type, require_client_auth, require_endpoint_verification,
+                                     enabled, optional).applyConfig();
     }
 
 
     public EncryptionOptions withAlgorithm(String algorithm)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withStoreType(String store_type)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withRequireClientAuth(boolean require_client_auth)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withRequireEndpointVerification(boolean require_endpoint_verification)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withEnabled(boolean enabled)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     public EncryptionOptions withOptional(Boolean optional)
     {
-        return new EncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                           protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                           enabled, optional).applyConfig();
+        return new EncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                     truststore_password, cipher_suites, protocol, accepted_protocols, algorithm,
+                                     store_type, require_client_auth, require_endpoint_verification, enabled,
+                                     optional).applyConfig();
     }
 
     /**
@@ -432,7 +568,8 @@
                Objects.equals(accepted_protocols, opt.accepted_protocols) &&
                Objects.equals(algorithm, opt.algorithm) &&
                Objects.equals(store_type, opt.store_type) &&
-               Objects.equals(cipher_suites, opt.cipher_suites);
+               Objects.equals(cipher_suites, opt.cipher_suites) &&
+               Objects.equals(ssl_context_factory, opt.ssl_context_factory);
     }
 
     /**
@@ -456,6 +593,7 @@
         result += 31 * (cipher_suites == null ? 0 : cipher_suites.hashCode());
         result += 31 * Boolean.hashCode(require_client_auth);
         result += 31 * Boolean.hashCode(require_endpoint_verification);
+        result += 31 * (ssl_context_factory == null ? 0 : ssl_context_factory.hashCode());
         return result;
     }
 
@@ -467,33 +605,34 @@
         }
 
         public final InternodeEncryption internode_encryption;
-        public final boolean enable_legacy_ssl_storage_port;
+        @Replaces(oldName = "enable_legacy_ssl_storage_port", deprecated = true)
+        public final boolean legacy_ssl_storage_port_enabled;
 
         public ServerEncryptionOptions()
         {
             this.internode_encryption = InternodeEncryption.none;
-            this.enable_legacy_ssl_storage_port = false;
+            this.legacy_ssl_storage_port_enabled = false;
         }
 
-        public ServerEncryptionOptions(String keystore, String keystore_password, String truststore,
-                                       String truststore_password, List<String> cipher_suites, String protocol,
-                                       List<String> accepted_protocols, String algorithm, String store_type,
-                                       boolean require_client_auth, boolean require_endpoint_verification,
-                                       Boolean optional, InternodeEncryption internode_encryption,
-                                       boolean enable_legacy_ssl_storage_port)
+        public ServerEncryptionOptions(ParameterizedClass sslContextFactoryClass, String keystore,
+                                       String keystore_password, String truststore, String truststore_password,
+                                       List<String> cipher_suites, String protocol, List<String> accepted_protocols,
+                                       String algorithm, String store_type, boolean require_client_auth,
+                                       boolean require_endpoint_verification, Boolean optional,
+                                       InternodeEncryption internode_encryption, boolean legacy_ssl_storage_port_enabled)
         {
-            super(keystore, keystore_password, truststore, truststore_password, cipher_suites, protocol,
-                  accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                  null, optional);
+            super(sslContextFactoryClass, keystore, keystore_password, truststore, truststore_password, cipher_suites,
+            protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
+            null, optional);
             this.internode_encryption = internode_encryption;
-            this.enable_legacy_ssl_storage_port = enable_legacy_ssl_storage_port;
+            this.legacy_ssl_storage_port_enabled = legacy_ssl_storage_port_enabled;
         }
 
         public ServerEncryptionOptions(ServerEncryptionOptions options)
         {
             super(options);
             this.internode_encryption = options.internode_encryption;
-            this.enable_legacy_ssl_storage_port = options.enable_legacy_ssl_storage_port;
+            this.legacy_ssl_storage_port_enabled = options.legacy_ssl_storage_port_enabled;
         }
 
         @Override
@@ -557,115 +696,154 @@
          * values of "dc" and "all". This method returns the explicit, raw value of {@link #optional}
          * as set by the user (if set at all).
          */
+        @JsonIgnore
         public boolean isExplicitlyOptional()
         {
             return optional != null && optional;
         }
 
+        public ServerEncryptionOptions withSslContextFactory(ParameterizedClass sslContextFactoryClass)
+        {
+            return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
+        }
+
         public ServerEncryptionOptions withKeyStore(String keystore)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withKeyStorePassword(String keystore_password)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withTrustStore(String truststore)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withTrustStorePassword(String truststore_password)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withCipherSuites(List<String> cipher_suites)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withCipherSuites(String ... cipher_suites)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, ImmutableList.copyOf(cipher_suites),
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, Arrays.asList(cipher_suites), protocol,
+                                               accepted_protocols, algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withProtocol(String protocol)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withAcceptedProtocols(List<String> accepted_protocols)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols == null ? null : ImmutableList.copyOf(accepted_protocols),
-                                               algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withAlgorithm(String algorithm)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withStoreType(String store_type)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withRequireClientAuth(boolean require_client_auth)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withRequireEndpointVerification(boolean require_endpoint_verification)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withOptional(boolean optional)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withInternodeEncryption(InternodeEncryption internode_encryption)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
         public ServerEncryptionOptions withLegacySslStoragePort(boolean enable_legacy_ssl_storage_port)
         {
-            return new ServerEncryptionOptions(keystore, keystore_password, truststore, truststore_password, cipher_suites,
-                                               protocol, accepted_protocols, algorithm, store_type, require_client_auth, require_endpoint_verification,
-                                               optional, internode_encryption, enable_legacy_ssl_storage_port).applyConfigInternal();
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               enable_legacy_ssl_storage_port).applyConfigInternal();
         }
 
     }
diff --git a/src/java/org/apache/cassandra/config/ForwardingProperty.java b/src/java/org/apache/cassandra/config/ForwardingProperty.java
new file mode 100644
index 0000000..5e7840e
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/ForwardingProperty.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.lang.annotation.Annotation;
+import java.util.List;
+
+import org.yaml.snakeyaml.introspector.Property;
+
+/**
+ * This class delegates all calls of {@link Property} to a {@link #delegate()}, used for cases where a small number of
+ * methods want to be overriden from the delegate.
+ *
+ * This class acts as a decorator to a {@link Property} and allows decorating any method.
+ */
+public class ForwardingProperty extends Property
+{
+    private final Property delegate;
+
+    public ForwardingProperty(String name, Property property)
+    {
+        this(name, property.getType(), property);
+    }
+
+    public ForwardingProperty(String name, Class<?> type, Property property)
+    {
+        super(name, type);
+        this.delegate = property;
+    }
+
+    protected Property delegate()
+    {
+        return delegate;
+    }
+
+    @Override
+    public boolean isWritable()
+    {
+        return delegate().isWritable();
+    }
+
+    @Override
+    public boolean isReadable()
+    {
+        return delegate().isReadable();
+    }
+
+    @Override
+    public Class<?>[] getActualTypeArguments()
+    {
+        return delegate().getActualTypeArguments();
+    }
+
+    @Override
+    public void set(Object o, Object o1) throws Exception
+    {
+        delegate().set(o, o1);
+    }
+
+    @Override
+    public Object get(Object o)
+    {
+        return delegate().get(o);
+    }
+
+    @Override
+    public List<Annotation> getAnnotations()
+    {
+        return delegate().getAnnotations();
+    }
+
+    @Override
+    public <A extends Annotation> A getAnnotation(Class<A> aClass)
+    {
+        return delegate().getAnnotation(aClass);
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/GuardrailsOptions.java b/src/java/org/apache/cassandra/config/GuardrailsOptions.java
new file mode 100644
index 0000000..e4694b9
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/GuardrailsOptions.java
@@ -0,0 +1,793 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.util.Collections;
+import java.util.Set;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+import javax.annotation.Nullable;
+
+import com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.cql3.statements.schema.TableAttributes;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.guardrails.Guardrails;
+import org.apache.cassandra.db.guardrails.GuardrailsConfig;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.service.disk.usage.DiskUsageMonitor;
+
+import static java.lang.String.format;
+import static java.util.stream.Collectors.toSet;
+
+/**
+ * Configuration settings for guardrails populated from the Yaml file.
+ *
+ * <p>Note that the settings here must only be used to build the {@link GuardrailsConfig} class and not directly by the
+ * code checking each guarded constraint. That code should use the higher level abstractions defined in
+ * {@link Guardrails}).
+ *
+ * <p>We have 2 variants of guardrails, soft (warn) and hard (fail) limits, each guardrail having either one of the
+ * variants or both. Note in particular that hard limits only make sense for guardrails triggering during query
+ * execution. For other guardrails, say one triggering during compaction, aborting that compaction does not make sense.
+ *
+ * <p>Additionally, each individual setting should have a specific value (typically -1 for numeric settings),
+ * that allows to disable the corresponding guardrail.
+ */
+public class GuardrailsOptions implements GuardrailsConfig
+{
+    private static final Logger logger = LoggerFactory.getLogger(GuardrailsOptions.class);
+
+    private final Config config;
+
+    public GuardrailsOptions(Config config)
+    {
+        this.config = config;
+        validateMaxIntThreshold(config.keyspaces_warn_threshold, config.keyspaces_fail_threshold, "keyspaces");
+        validateMaxIntThreshold(config.tables_warn_threshold, config.tables_fail_threshold, "tables");
+        validateMaxIntThreshold(config.columns_per_table_warn_threshold, config.columns_per_table_fail_threshold, "columns_per_table");
+        validateMaxIntThreshold(config.secondary_indexes_per_table_warn_threshold, config.secondary_indexes_per_table_fail_threshold, "secondary_indexes_per_table");
+        validateMaxIntThreshold(config.materialized_views_per_table_warn_threshold, config.materialized_views_per_table_fail_threshold, "materialized_views_per_table");
+        config.table_properties_warned = validateTableProperties(config.table_properties_warned, "table_properties_warned");
+        config.table_properties_ignored = validateTableProperties(config.table_properties_ignored, "table_properties_ignored");
+        config.table_properties_disallowed = validateTableProperties(config.table_properties_disallowed, "table_properties_disallowed");
+        validateMaxIntThreshold(config.page_size_warn_threshold, config.page_size_fail_threshold, "page_size");
+        validateMaxIntThreshold(config.partition_keys_in_select_warn_threshold, config.partition_keys_in_select_fail_threshold, "partition_keys_in_select");
+        validateMaxIntThreshold(config.in_select_cartesian_product_warn_threshold, config.in_select_cartesian_product_fail_threshold, "in_select_cartesian_product");
+        config.read_consistency_levels_warned = validateConsistencyLevels(config.read_consistency_levels_warned, "read_consistency_levels_warned");
+        config.read_consistency_levels_disallowed = validateConsistencyLevels(config.read_consistency_levels_disallowed, "read_consistency_levels_disallowed");
+        config.write_consistency_levels_warned = validateConsistencyLevels(config.write_consistency_levels_warned, "write_consistency_levels_warned");
+        config.write_consistency_levels_disallowed = validateConsistencyLevels(config.write_consistency_levels_disallowed, "write_consistency_levels_disallowed");
+        validateSizeThreshold(config.collection_size_warn_threshold, config.collection_size_fail_threshold, false, "collection_size");
+        validateMaxIntThreshold(config.items_per_collection_warn_threshold, config.items_per_collection_fail_threshold, "items_per_collection");
+        validateMaxIntThreshold(config.fields_per_udt_warn_threshold, config.fields_per_udt_fail_threshold, "fields_per_udt");
+        validatePercentageThreshold(config.data_disk_usage_percentage_warn_threshold, config.data_disk_usage_percentage_fail_threshold, "data_disk_usage_percentage");
+        validateDataDiskUsageMaxDiskSize(config.data_disk_usage_max_disk_size);
+        validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold, "minimum_replication_factor");
+    }
+
+    @Override
+    public int getKeyspacesWarnThreshold()
+    {
+        return config.keyspaces_warn_threshold;
+    }
+
+    @Override
+    public int getKeyspacesFailThreshold()
+    {
+        return config.keyspaces_fail_threshold;
+    }
+
+    public void setKeyspacesThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "keyspaces");
+        updatePropertyWithLogging("keyspaces_warn_threshold",
+                                  warn,
+                                  () -> config.keyspaces_warn_threshold,
+                                  x -> config.keyspaces_warn_threshold = x);
+        updatePropertyWithLogging("keyspaces_fail_threshold",
+                                  fail,
+                                  () -> config.keyspaces_fail_threshold,
+                                  x -> config.keyspaces_fail_threshold = x);
+    }
+
+    @Override
+    public int getTablesWarnThreshold()
+    {
+        return config.tables_warn_threshold;
+    }
+
+    @Override
+    public int getTablesFailThreshold()
+    {
+        return config.tables_fail_threshold;
+    }
+
+    public void setTablesThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "tables");
+        updatePropertyWithLogging("tables_warn_threshold",
+                                  warn,
+                                  () -> config.tables_warn_threshold,
+                                  x -> config.tables_warn_threshold = x);
+        updatePropertyWithLogging("tables_fail_threshold",
+                                  fail,
+                                  () -> config.tables_fail_threshold,
+                                  x -> config.tables_fail_threshold = x);
+    }
+
+    @Override
+    public int getColumnsPerTableWarnThreshold()
+    {
+        return config.columns_per_table_warn_threshold;
+    }
+
+    @Override
+    public int getColumnsPerTableFailThreshold()
+    {
+        return config.columns_per_table_fail_threshold;
+    }
+
+    public void setColumnsPerTableThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "columns_per_table");
+        updatePropertyWithLogging("columns_per_table_warn_threshold",
+                                  warn,
+                                  () -> config.columns_per_table_warn_threshold,
+                                  x -> config.columns_per_table_warn_threshold = x);
+        updatePropertyWithLogging("columns_per_table_fail_threshold",
+                                  fail,
+                                  () -> config.columns_per_table_fail_threshold,
+                                  x -> config.columns_per_table_fail_threshold = x);
+    }
+
+    @Override
+    public int getSecondaryIndexesPerTableWarnThreshold()
+    {
+        return config.secondary_indexes_per_table_warn_threshold;
+    }
+
+    @Override
+    public int getSecondaryIndexesPerTableFailThreshold()
+    {
+        return config.secondary_indexes_per_table_fail_threshold;
+    }
+
+    public void setSecondaryIndexesPerTableThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "secondary_indexes_per_table");
+        updatePropertyWithLogging("secondary_indexes_per_table_warn_threshold",
+                                  warn,
+                                  () -> config.secondary_indexes_per_table_warn_threshold,
+                                  x -> config.secondary_indexes_per_table_warn_threshold = x);
+        updatePropertyWithLogging("secondary_indexes_per_table_fail_threshold",
+                                  fail,
+                                  () -> config.secondary_indexes_per_table_fail_threshold,
+                                  x -> config.secondary_indexes_per_table_fail_threshold = x);
+    }
+
+    @Override
+    public int getMaterializedViewsPerTableWarnThreshold()
+    {
+        return config.materialized_views_per_table_warn_threshold;
+    }
+
+    @Override
+    public int getPartitionKeysInSelectWarnThreshold()
+    {
+        return config.partition_keys_in_select_warn_threshold;
+    }
+
+    @Override
+    public int getPartitionKeysInSelectFailThreshold()
+    {
+        return config.partition_keys_in_select_fail_threshold;
+    }
+
+    public void setPartitionKeysInSelectThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "partition_keys_in_select");
+        updatePropertyWithLogging("partition_keys_in_select_warn_threshold",
+                                  warn,
+                                  () -> config.partition_keys_in_select_warn_threshold,
+                                  x -> config.partition_keys_in_select_warn_threshold = x);
+        updatePropertyWithLogging("partition_keys_in_select_fail_threshold",
+                                  fail,
+                                  () -> config.partition_keys_in_select_fail_threshold,
+                                  x -> config.partition_keys_in_select_fail_threshold = x);
+    }
+
+    @Override
+    public int getMaterializedViewsPerTableFailThreshold()
+    {
+        return config.materialized_views_per_table_fail_threshold;
+    }
+
+    public void setMaterializedViewsPerTableThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "materialized_views_per_table");
+        updatePropertyWithLogging("materialized_views_per_table_warn_threshold",
+                                  warn,
+                                  () -> config.materialized_views_per_table_warn_threshold,
+                                  x -> config.materialized_views_per_table_warn_threshold = x);
+        updatePropertyWithLogging("materialized_views_per_table_fail_threshold",
+                                  fail,
+                                  () -> config.materialized_views_per_table_fail_threshold,
+                                  x -> config.materialized_views_per_table_fail_threshold = x);
+    }
+
+    @Override
+    public int getPageSizeWarnThreshold()
+    {
+        return config.page_size_warn_threshold;
+    }
+
+    @Override
+    public int getPageSizeFailThreshold()
+    {
+        return config.page_size_fail_threshold;
+    }
+
+    public void setPageSizeThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "page_size");
+        updatePropertyWithLogging("page_size_warn_threshold",
+                                  warn,
+                                  () -> config.page_size_warn_threshold,
+                                  x -> config.page_size_warn_threshold = x);
+        updatePropertyWithLogging("page_size_fail_threshold",
+                                  fail,
+                                  () -> config.page_size_fail_threshold,
+                                  x -> config.page_size_fail_threshold = x);
+    }
+
+    @Override
+    public Set<String> getTablePropertiesWarned()
+    {
+        return config.table_properties_warned;
+    }
+
+    public void setTablePropertiesWarned(Set<String> properties)
+    {
+        updatePropertyWithLogging("table_properties_warned",
+                                  validateTableProperties(properties, "table_properties_warned"),
+                                  () -> config.table_properties_warned,
+                                  x -> config.table_properties_warned = x);
+    }
+
+    @Override
+    public Set<String> getTablePropertiesIgnored()
+    {
+        return config.table_properties_ignored;
+    }
+
+    public void setTablePropertiesIgnored(Set<String> properties)
+    {
+        updatePropertyWithLogging("table_properties_ignored",
+                                  validateTableProperties(properties, "table_properties_ignored"),
+                                  () -> config.table_properties_ignored,
+                                  x -> config.table_properties_ignored = x);
+    }
+
+    @Override
+    public Set<String> getTablePropertiesDisallowed()
+    {
+        return config.table_properties_disallowed;
+    }
+
+    public void setTablePropertiesDisallowed(Set<String> properties)
+    {
+        updatePropertyWithLogging("table_properties_disallowed",
+                                  validateTableProperties(properties, "table_properties_disallowed"),
+                                  () -> config.table_properties_disallowed,
+                                  x -> config.table_properties_disallowed = x);
+    }
+
+    @Override
+    public boolean getUserTimestampsEnabled()
+    {
+        return config.user_timestamps_enabled;
+    }
+
+    public void setUserTimestampsEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("user_timestamps_enabled",
+                                  enabled,
+                                  () -> config.user_timestamps_enabled,
+                                  x -> config.user_timestamps_enabled = x);
+    }
+
+    @Override
+    public boolean getGroupByEnabled()
+    {
+        return config.group_by_enabled;
+    }
+
+    public void setGroupByEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("group_by_enabled",
+                                  enabled,
+                                  () -> config.group_by_enabled,
+                                  x -> config.group_by_enabled = x);
+    }
+
+    @Override
+    public boolean getDropTruncateTableEnabled()
+    {
+        return config.drop_truncate_table_enabled;
+    }
+
+    public void setDropTruncateTableEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("drop_truncate_table_enabled",
+                                  enabled,
+                                  () -> config.drop_truncate_table_enabled,
+                                  x -> config.drop_truncate_table_enabled = x);
+    }
+
+    @Override
+    public boolean getSecondaryIndexesEnabled()
+    {
+        return config.secondary_indexes_enabled;
+    }
+
+    public void setSecondaryIndexesEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("secondary_indexes_enabled",
+                                  enabled,
+                                  () -> config.secondary_indexes_enabled,
+                                  x -> config.secondary_indexes_enabled = x);
+    }
+
+    @Override
+    public boolean getUncompressedTablesEnabled()
+    {
+        return config.uncompressed_tables_enabled;
+    }
+
+    public void setUncompressedTablesEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("uncompressed_tables_enabled",
+                                  enabled,
+                                  () -> config.uncompressed_tables_enabled,
+                                  x -> config.uncompressed_tables_enabled = x);
+    }
+
+    @Override
+    public boolean getCompactTablesEnabled()
+    {
+        return config.compact_tables_enabled;
+    }
+
+    public void setCompactTablesEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("compact_tables_enabled",
+                                  enabled,
+                                  () -> config.compact_tables_enabled,
+                                  x -> config.compact_tables_enabled = x);
+    }
+
+    @Override
+    public boolean getReadBeforeWriteListOperationsEnabled()
+    {
+        return config.read_before_write_list_operations_enabled;
+    }
+
+    public void setReadBeforeWriteListOperationsEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("read_before_write_list_operations_enabled",
+                                  enabled,
+                                  () -> config.read_before_write_list_operations_enabled,
+                                  x -> config.read_before_write_list_operations_enabled = x);
+    }
+
+    @Override
+    public boolean getAllowFilteringEnabled()
+    {
+        return config.allow_filtering_enabled;
+    }
+
+    public void setAllowFilteringEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("allow_filtering_enabled",
+                                  enabled,
+                                  () -> config.allow_filtering_enabled,
+                                  x -> config.allow_filtering_enabled = x);
+    }
+
+    @Override
+    public int getInSelectCartesianProductWarnThreshold()
+    {
+        return config.in_select_cartesian_product_warn_threshold;
+    }
+
+    @Override
+    public int getInSelectCartesianProductFailThreshold()
+    {
+        return config.in_select_cartesian_product_fail_threshold;
+    }
+
+    public void setInSelectCartesianProductThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "in_select_cartesian_product");
+        updatePropertyWithLogging("in_select_cartesian_product_warn_threshold",
+                                  warn,
+                                  () -> config.in_select_cartesian_product_warn_threshold,
+                                  x -> config.in_select_cartesian_product_warn_threshold = x);
+        updatePropertyWithLogging("in_select_cartesian_product_fail_threshold",
+                                  fail,
+                                  () -> config.in_select_cartesian_product_fail_threshold,
+                                  x -> config.in_select_cartesian_product_fail_threshold = x);
+    }
+
+    public Set<ConsistencyLevel> getReadConsistencyLevelsWarned()
+    {
+        return config.read_consistency_levels_warned;
+    }
+
+    public void setReadConsistencyLevelsWarned(Set<ConsistencyLevel> consistencyLevels)
+    {
+        updatePropertyWithLogging("read_consistency_levels_warned",
+                                  validateConsistencyLevels(consistencyLevels, "read_consistency_levels_warned"),
+                                  () -> config.read_consistency_levels_warned,
+                                  x -> config.read_consistency_levels_warned = x);
+    }
+
+    @Override
+    public Set<ConsistencyLevel> getReadConsistencyLevelsDisallowed()
+    {
+        return config.read_consistency_levels_disallowed;
+    }
+
+    public void setReadConsistencyLevelsDisallowed(Set<ConsistencyLevel> consistencyLevels)
+    {
+        updatePropertyWithLogging("read_consistency_levels_disallowed",
+                                  validateConsistencyLevels(consistencyLevels, "read_consistency_levels_disallowed"),
+                                  () -> config.read_consistency_levels_disallowed,
+                                  x -> config.read_consistency_levels_disallowed = x);
+    }
+
+    @Override
+    public Set<ConsistencyLevel> getWriteConsistencyLevelsWarned()
+    {
+        return config.write_consistency_levels_warned;
+    }
+
+    public void setWriteConsistencyLevelsWarned(Set<ConsistencyLevel> consistencyLevels)
+    {
+        updatePropertyWithLogging("write_consistency_levels_warned",
+                                  validateConsistencyLevels(consistencyLevels, "write_consistency_levels_warned"),
+                                  () -> config.write_consistency_levels_warned,
+                                  x -> config.write_consistency_levels_warned = x);
+    }
+
+    @Override
+    public Set<ConsistencyLevel> getWriteConsistencyLevelsDisallowed()
+    {
+        return config.write_consistency_levels_disallowed;
+    }
+
+    public void setWriteConsistencyLevelsDisallowed(Set<ConsistencyLevel> consistencyLevels)
+    {
+        updatePropertyWithLogging("write_consistency_levels_disallowed",
+                                  validateConsistencyLevels(consistencyLevels, "write_consistency_levels_disallowed"),
+                                  () -> config.write_consistency_levels_disallowed,
+                                  x -> config.write_consistency_levels_disallowed = x);
+    }
+
+    @Override
+    @Nullable
+    public DataStorageSpec.LongBytesBound getCollectionSizeWarnThreshold()
+    {
+        return config.collection_size_warn_threshold;
+    }
+
+    @Override
+    @Nullable
+    public DataStorageSpec.LongBytesBound getCollectionSizeFailThreshold()
+    {
+        return config.collection_size_fail_threshold;
+    }
+
+    public void setCollectionSizeThreshold(@Nullable DataStorageSpec.LongBytesBound warn, @Nullable DataStorageSpec.LongBytesBound fail)
+    {
+        validateSizeThreshold(warn, fail, false, "collection_size");
+        updatePropertyWithLogging("collection_size_warn_threshold",
+                                  warn,
+                                  () -> config.collection_size_warn_threshold,
+                                  x -> config.collection_size_warn_threshold = x);
+        updatePropertyWithLogging("collection_size_fail_threshold",
+                                  fail,
+                                  () -> config.collection_size_fail_threshold,
+                                  x -> config.collection_size_fail_threshold = x);
+    }
+
+    @Override
+    public int getItemsPerCollectionWarnThreshold()
+    {
+        return config.items_per_collection_warn_threshold;
+    }
+
+    @Override
+    public int getItemsPerCollectionFailThreshold()
+    {
+        return config.items_per_collection_fail_threshold;
+    }
+
+    public void setItemsPerCollectionThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "items_per_collection");
+        updatePropertyWithLogging("items_per_collection_warn_threshold",
+                                  warn,
+                                  () -> config.items_per_collection_warn_threshold,
+                                  x -> config.items_per_collection_warn_threshold = x);
+        updatePropertyWithLogging("items_per_collection_fail_threshold",
+                                  fail,
+                                  () -> config.items_per_collection_fail_threshold,
+                                  x -> config.items_per_collection_fail_threshold = x);
+    }
+
+    @Override
+    public int getFieldsPerUDTWarnThreshold()
+    {
+        return config.fields_per_udt_warn_threshold;
+    }
+
+    @Override
+    public int getFieldsPerUDTFailThreshold()
+    {
+        return config.fields_per_udt_fail_threshold;
+    }
+
+    public void setFieldsPerUDTThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "fields_per_udt");
+        updatePropertyWithLogging("fields_per_udt_warn_threshold",
+                                  warn,
+                                  () -> config.fields_per_udt_warn_threshold,
+                                  x -> config.fields_per_udt_warn_threshold = x);
+        updatePropertyWithLogging("fields_per_udt_fail_threshold",
+                                  fail,
+                                  () -> config.fields_per_udt_fail_threshold,
+                                  x -> config.fields_per_udt_fail_threshold = x);
+    }
+
+    public int getDataDiskUsagePercentageWarnThreshold()
+    {
+        return config.data_disk_usage_percentage_warn_threshold;
+    }
+
+    @Override
+    public int getDataDiskUsagePercentageFailThreshold()
+    {
+        return config.data_disk_usage_percentage_fail_threshold;
+    }
+
+    public void setDataDiskUsagePercentageThreshold(int warn, int fail)
+    {
+        validatePercentageThreshold(warn, fail, "data_disk_usage_percentage");
+        updatePropertyWithLogging("data_disk_usage_percentage_warn_threshold",
+                                  warn,
+                                  () -> config.data_disk_usage_percentage_warn_threshold,
+                                  x -> config.data_disk_usage_percentage_warn_threshold = x);
+        updatePropertyWithLogging("data_disk_usage_percentage_fail_threshold",
+                                  fail,
+                                  () -> config.data_disk_usage_percentage_fail_threshold,
+                                  x -> config.data_disk_usage_percentage_fail_threshold = x);
+    }
+
+    @Override
+    public DataStorageSpec.LongBytesBound getDataDiskUsageMaxDiskSize()
+    {
+        return config.data_disk_usage_max_disk_size;
+    }
+
+    public void setDataDiskUsageMaxDiskSize(@Nullable DataStorageSpec.LongBytesBound diskSize)
+    {
+        validateDataDiskUsageMaxDiskSize(diskSize);
+        updatePropertyWithLogging("data_disk_usage_max_disk_size",
+                                  diskSize,
+                                  () -> config.data_disk_usage_max_disk_size,
+                                  x -> config.data_disk_usage_max_disk_size = x);
+    }
+
+    @Override
+    public int getMinimumReplicationFactorWarnThreshold()
+    {
+        return config.minimum_replication_factor_warn_threshold;
+    }
+
+    @Override
+    public int getMinimumReplicationFactorFailThreshold()
+    {
+        return config.minimum_replication_factor_fail_threshold;
+    }
+
+    public void setMinimumReplicationFactorThreshold(int warn, int fail)
+    {
+        validateMinRFThreshold(warn, fail, "minimum_replication_factor");
+        updatePropertyWithLogging("minimum_replication_factor_warn_threshold",
+                                  warn,
+                                  () -> config.minimum_replication_factor_warn_threshold,
+                                  x -> config.minimum_replication_factor_warn_threshold = x);
+        updatePropertyWithLogging("minimum_replication_factor_fail_threshold",
+                                  fail,
+                                  () -> config.minimum_replication_factor_fail_threshold,
+                                  x -> config.minimum_replication_factor_fail_threshold = x);
+    }
+
+    private static <T> void updatePropertyWithLogging(String propertyName, T newValue, Supplier<T> getter, Consumer<T> setter)
+    {
+        T oldValue = getter.get();
+        if (newValue == null || !newValue.equals(oldValue))
+        {
+            setter.accept(newValue);
+            logger.info("Updated {} from {} to {}", propertyName, oldValue, newValue);
+        }
+    }
+
+    private static void validatePositiveNumeric(long value, long maxValue, String name)
+    {
+        if (value == -1)
+            return;
+
+        if (value > maxValue)
+            throw new IllegalArgumentException(format("Invalid value %d for %s: maximum allowed value is %d",
+                                                      value, name, maxValue));
+
+        if (value == 0)
+            throw new IllegalArgumentException(format("Invalid value for %s: 0 is not allowed; " +
+                                                      "if attempting to disable use -1", name));
+
+        // We allow -1 as a general "disabling" flag. But reject anything lower to avoid mistakes.
+        if (value <= 0)
+            throw new IllegalArgumentException(format("Invalid value %d for %s: negative values are not allowed, " +
+                                                      "outside of -1 which disables the guardrail", value, name));
+    }
+
+    private static void validatePercentage(long value, String name)
+    {
+        validatePositiveNumeric(value, 100, name);
+    }
+
+    private static void validatePercentageThreshold(int warn, int fail, String name)
+    {
+        validatePercentage(warn, name + "_warn_threshold");
+        validatePercentage(fail, name + "_fail_threshold");
+        validateWarnLowerThanFail(warn, fail, name);
+    }
+
+    private static void validateMaxIntThreshold(int warn, int fail, String name)
+    {
+        validatePositiveNumeric(warn, Integer.MAX_VALUE, name + "_warn_threshold");
+        validatePositiveNumeric(fail, Integer.MAX_VALUE, name + "_fail_threshold");
+        validateWarnLowerThanFail(warn, fail, name);
+    }
+
+    private static void validateMinIntThreshold(int warn, int fail, String name)
+    {
+        validatePositiveNumeric(warn, Integer.MAX_VALUE, name + "_warn_threshold");
+        validatePositiveNumeric(fail, Integer.MAX_VALUE, name + "_fail_threshold");
+        validateWarnGreaterThanFail(warn, fail, name);
+    }
+
+    private static void validateMinRFThreshold(int warn, int fail, String name)
+    {
+        validateMinIntThreshold(warn, fail, name);
+        validateMinRFVersusDefaultRF(fail, name);
+    }
+
+    private static void validateWarnLowerThanFail(long warn, long fail, String name)
+    {
+        if (warn == -1 || fail == -1)
+            return;
+
+        if (fail < warn)
+            throw new IllegalArgumentException(format("The warn threshold %d for %s_warn_threshold should be lower " +
+                                                      "than the fail threshold %d", warn, name, fail));
+    }
+
+    private static void validateWarnGreaterThanFail(long warn, long fail, String name)
+    {
+        if (warn == -1 || fail == -1)
+            return;
+
+        if (fail > warn)
+            throw new IllegalArgumentException(format("The warn threshold %d for %s_warn_threshold should be greater " +
+                                                      "than the fail threshold %d", warn, name, fail));
+    }
+
+    private static void validateMinRFVersusDefaultRF(int fail, String name) throws IllegalArgumentException
+    {
+        if (fail > DatabaseDescriptor.getDefaultKeyspaceRF())
+        {
+            throw new IllegalArgumentException(String.format("%s_fail_threshold to be set (%d) cannot be greater than default_keyspace_rf (%d)",
+                                                           name, fail, DatabaseDescriptor.getDefaultKeyspaceRF()));
+        }
+    }
+
+    private static void validateSize(DataStorageSpec.LongBytesBound size, boolean allowZero, String name)
+    {
+        if (size == null)
+            return;
+
+        if (!allowZero && size.toBytes() == 0)
+            throw new IllegalArgumentException(format("Invalid value for %s: 0 is not allowed; " +
+                                                      "if attempting to disable use an empty value",
+                                                      name));
+    }
+
+    private static void validateSizeThreshold(DataStorageSpec.LongBytesBound warn, DataStorageSpec.LongBytesBound fail, boolean allowZero, String name)
+    {
+        validateSize(warn, allowZero, name + "_warn_threshold");
+        validateSize(fail, allowZero, name + "_fail_threshold");
+        validateWarnLowerThanFail(warn, fail, name);
+    }
+
+    private static void validateWarnLowerThanFail(DataStorageSpec.LongBytesBound warn, DataStorageSpec.LongBytesBound fail, String name)
+    {
+        if (warn == null || fail == null)
+            return;
+
+        if (fail.toBytes() < warn.toBytes())
+            throw new IllegalArgumentException(format("The warn threshold %s for %s_warn_threshold should be lower " +
+                                                      "than the fail threshold %s", warn, name, fail));
+    }
+
+    private static Set<String> validateTableProperties(Set<String> properties, String name)
+    {
+        if (properties == null)
+            throw new IllegalArgumentException(format("Invalid value for %s: null is not allowed", name));
+
+        Set<String> lowerCaseProperties = properties.stream().map(String::toLowerCase).collect(toSet());
+
+        Set<String> diff = Sets.difference(lowerCaseProperties, TableAttributes.allKeywords());
+
+        if (!diff.isEmpty())
+            throw new IllegalArgumentException(format("Invalid value for %s: '%s' do not parse as valid table properties",
+                                                      name, diff));
+
+        return lowerCaseProperties;
+    }
+
+    private static Set<ConsistencyLevel> validateConsistencyLevels(Set<ConsistencyLevel> consistencyLevels, String name)
+    {
+        if (consistencyLevels == null)
+            throw new IllegalArgumentException(format("Invalid value for %s: null is not allowed", name));
+
+        return consistencyLevels.isEmpty() ? Collections.emptySet() : Sets.immutableEnumSet(consistencyLevels);
+    }
+
+    private static void validateDataDiskUsageMaxDiskSize(DataStorageSpec.LongBytesBound maxDiskSize)
+    {
+        if (maxDiskSize == null)
+            return;
+
+        validateSize(maxDiskSize, false, "data_disk_usage_max_disk_size");
+
+        long diskSize = DiskUsageMonitor.totalDiskSpace();
+
+        if (diskSize < maxDiskSize.toBytes())
+            throw new IllegalArgumentException(format("Invalid value for data_disk_usage_max_disk_size: " +
+                                                      "%s specified, but only %s are actually available on disk",
+                                                      maxDiskSize, FileUtils.stringifyFileSize(diskSize)));
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/InheritingClass.java b/src/java/org/apache/cassandra/config/InheritingClass.java
new file mode 100644
index 0000000..c0bc41f
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/InheritingClass.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+public class InheritingClass extends ParameterizedClass
+{
+    public String inherits = null;
+
+    @SuppressWarnings("unused") // for snakeyaml
+    public InheritingClass()
+    {
+    }
+
+    public InheritingClass(String inherits, String class_name, Map<String, String> parameters)
+    {
+        super(class_name, parameters);
+        this.inherits = inherits;
+    }
+
+    @SuppressWarnings("unused")
+    public InheritingClass(Map<String, ?> p)
+    {
+        super(p);
+        this.inherits = p.get("inherits").toString();
+    }
+
+    public ParameterizedClass resolve(Map<String, ParameterizedClass> map)
+    {
+        if (inherits == null)
+            return this;
+        ParameterizedClass parent = map.get(inherits);
+        if (parent == null)
+            throw new ConfigurationException("Configuration definition inherits unknown " + inherits
+                                             + ". A configuration can only extend one defined earlier or \"default\".");
+        Map<String, String> resolvedParameters;
+        if (parameters == null || parameters.isEmpty())
+            resolvedParameters = parent.parameters;
+        else if (parent.parameters == null || parent.parameters.isEmpty())
+            resolvedParameters = this.parameters;
+        else
+        {
+            resolvedParameters = new LinkedHashMap<>(parent.parameters);
+            resolvedParameters.putAll(this.parameters);
+        }
+
+        String resolvedClass = this.class_name == null ? parent.class_name : this.class_name;
+        return new ParameterizedClass(resolvedClass, resolvedParameters);
+    }
+
+    @Override
+    public String toString()
+    {
+        return (inherits != null ? (inherits + "+") : "") +
+               (class_name != null ? class_name : "") +
+               (parameters != null ? parameters.toString() : "");
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/Loader.java b/src/java/org/apache/cassandra/config/Loader.java
new file mode 100644
index 0000000..3167645
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/Loader.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Map;
+
+import org.yaml.snakeyaml.introspector.Property;
+
+public interface Loader
+{
+    Map<String, Property> getProperties(Class<?> root);
+
+    default Map<String, Property> flatten(Class<?> root)
+    {
+        return Properties.flatten(this, getProperties(root));
+    }
+
+    default Map<String, Property> flatten(Class<?> root, String delimiter)
+    {
+        return Properties.flatten(this, getProperties(root), delimiter);
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/ParameterizedClass.java b/src/java/org/apache/cassandra/config/ParameterizedClass.java
index d0542f5..9b00178 100644
--- a/src/java/org/apache/cassandra/config/ParameterizedClass.java
+++ b/src/java/org/apache/cassandra/config/ParameterizedClass.java
@@ -22,6 +22,11 @@
 
 import com.google.common.base.Objects;
 
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
 public class ParameterizedClass
 {
     public static final String CLASS_NAME = "class_name";
@@ -60,6 +65,12 @@
     }
 
     @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(class_name, parameters);
+    }
+
+    @Override
     public String toString()
     {
         return class_name + (parameters == null ? "" : parameters.toString());
diff --git a/src/java/org/apache/cassandra/config/Properties.java b/src/java/org/apache/cassandra/config/Properties.java
new file mode 100644
index 0000000..79852d4
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/Properties.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.lang.reflect.Constructor;
+import java.util.ArrayDeque;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Queue;
+
+import com.google.common.collect.Maps;
+
+import org.yaml.snakeyaml.introspector.Property;
+
+/**
+ * Utility class for working with {@link Property} types.
+ */
+public final class Properties
+{
+    public static final String DELIMITER = ".";
+
+    private Properties()
+    {
+    }
+
+    /**
+     * Given two properties (root, leaf), calls first go through root and passed to leaf.
+     *
+     * <pre>{@code leaf.get(root.get(o))}</pre>
+     *
+     * @param root first property in the chain
+     * @param leaf last property in the chain
+     * @param delimiter for joining names
+     * @return new Property which combines root -> leaf
+     */
+    public static Property andThen(Property root, Property leaf, String delimiter)
+    {
+        return new AndThen(root, leaf, delimiter);
+    }
+
+    /**
+     * Given two properties (root, leaf), calls first go through root and passed to leaf.
+     *
+     * <pre>{@code leaf.get(root.get(o))}</pre>
+     *
+     * @param root first property in the chain
+     * @param leaf last property in the chain
+     * @return new Property which combines root -> leaf
+     */
+    public static Property andThen(Property root, Property leaf)
+    {
+        return andThen(root, leaf, DELIMITER);
+    }
+
+    /**
+     * Given a map of Properties, takes any "nested" property (non primitive, value-type, or collection), and
+     * expands them, producing 1 or more Properties.
+     *
+     * @param loader for mapping type to map of properties
+     * @param input map to flatten
+     * @return map of all flattened properties
+     */
+    public static Map<String, Property> flatten(Loader loader, Map<String, Property> input)
+    {
+        return flatten(loader, input, DELIMITER);
+    }
+
+    /**
+     * Given a map of Properties, takes any "nested" property (non primitive, value-type, or collection), and
+     * expands them, producing 1 or more Properties.
+     *
+     * @param loader for mapping type to map of properties
+     * @param input map to flatten
+     * @param delimiter for joining names
+     * @return map of all flattened properties
+     */
+    public static Map<String, Property> flatten(Loader loader, Map<String, Property> input, String delimiter)
+    {
+        Queue<Property> queue = new ArrayDeque<>(input.values());
+
+        Map<String, Property> output = Maps.newHashMapWithExpectedSize(input.size());
+        while (!queue.isEmpty())
+        {
+            Property prop = queue.poll();
+            Map<String, Property> children = isPrimitive(prop) || isCollection(prop) ? Collections.emptyMap() : loader.getProperties(prop.getType());
+            if (children.isEmpty())
+            {
+                // not nested, so assume properties can be handled
+                output.put(prop.getName(), prop);
+            }
+            else
+            {
+                children.values().stream().map(p -> andThen(prop, p, delimiter)).forEach(queue::add);
+            }
+        }
+        return output;
+    }
+
+    /**
+     * @return true if proeprty type is a collection
+     */
+    public static boolean isCollection(Property prop)
+    {
+        return Collection.class.isAssignableFrom(prop.getType()) || Map.class.isAssignableFrom(prop.getType());
+    }
+
+    /**
+     * @return true if property type is a primitive, or well known value type (may return false for user defined value types)
+     */
+    public static boolean isPrimitive(Property prop)
+    {
+        Class<?> type = prop.getType();
+        return type.isPrimitive() || type.isEnum() || type.equals(String.class) || Number.class.isAssignableFrom(type) || Boolean.class.equals(type);
+    }
+
+    /**
+     * @return default implementation of {@link Loader}
+     */
+    public static Loader defaultLoader()
+    {
+        return new DefaultLoader();
+    }
+
+    /**
+     * @return a new property with an updated name
+     */
+    public static Property rename(String newName, Property prop)
+    {
+        return new ForwardingProperty(newName, prop);
+    }
+
+    private static final class AndThen extends ForwardingProperty
+    {
+        private final Property root;
+        private final Property leaf;
+
+        AndThen(Property root, Property leaf, String delimiter)
+        {
+            super(root.getName() + delimiter + leaf.getName(), leaf);
+            this.root = root;
+            this.leaf = leaf;
+        }
+
+        @Override
+        public void set(Object object, Object value) throws Exception
+        {
+            Object parent = root.get(object);
+            if (parent == null)
+            {
+                // see: org.yaml.snakeyaml.constructor.BaseConstructor.newInstance(java.lang.Class<?>, org.yaml.snakeyaml.nodes.Node, boolean)
+                // That method is what creates the types, and it boils down to this call.  There is a TypeDescription
+                // class that we don't use, so boils down to "null" in our existing logic... if we start using TypeDescription
+                // to build the object, then we will need to rewrite this logic to work with BaseConstructor.
+                Constructor<?> c = root.getType().getDeclaredConstructor();
+                c.setAccessible(true);
+                parent = c.newInstance();
+                root.set(object, parent);
+            }
+            leaf.set(parent, value);
+        }
+
+        @Override
+        public Object get(Object object)
+        {
+            try
+            {
+                Object parent = root.get(object);
+                if (parent == null)
+                    return null;
+                return leaf.get(parent);
+            }
+            catch (Exception e)
+            {
+                if (!(root instanceof AndThen))
+                    e.addSuppressed(new RuntimeException("Error calling get() on " + this));
+                throw e;
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/Replacement.java b/src/java/org/apache/cassandra/config/Replacement.java
new file mode 100644
index 0000000..a201b63
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/Replacement.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Objects;
+
+import org.yaml.snakeyaml.introspector.Property;
+
+/**
+ * Holder for replacements to support backward compatibility between old and new names and types
+ * of configuration parameters (CASSANDRA-15234)
+ */
+public final class Replacement
+{
+    /**
+     * Currently we use Config class
+     */
+    public final Class<?> parent;
+    /**
+     * Old name of the configuration parameter
+     */
+    public final String oldName;
+    /**
+     * Old type of the configuration parameter
+     */
+    public final Class<?> oldType;
+    /**
+     * New name used for the configuration parameter
+     */
+    public final String newName;
+    /**
+     * Converter to be used according to the old default unit which was provided as a suffix of the configuration
+     * parameter
+     */
+    public final Converters converter;
+    public final boolean deprecated;
+
+    public Replacement(Class<?> parent,
+                       String oldName,
+                       Class<?> oldType,
+                       String newName,
+                       Converters converter,
+                       boolean deprecated)
+    {
+        this.parent = Objects.requireNonNull(parent);
+        this.oldName = Objects.requireNonNull(oldName);
+        this.oldType = Objects.requireNonNull(oldType);
+        this.newName = Objects.requireNonNull(newName);
+        this.converter = Objects.requireNonNull(converter);
+        // by default deprecated is false
+        this.deprecated = deprecated;
+    }
+
+    public Property toProperty(Property newProperty)
+    {
+        return new ForwardingProperty(oldName, oldType, newProperty)
+        {
+            @Override
+            public void set(Object o, Object o1) throws Exception
+            {
+                newProperty.set(o, converter.convert(o1));
+            }
+
+            @Override
+            public Object get(Object o)
+            {
+                return converter.unconvert(newProperty.get(o));
+            }
+        };
+    }
+
+    public boolean isValueFormatReplacement()
+    {
+        return oldName.equals(newName);
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/Replacements.java b/src/java/org/apache/cassandra/config/Replacements.java
new file mode 100644
index 0000000..f6577cb
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/Replacements.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+public final class Replacements
+{
+    private Replacements()
+    {
+    }
+
+    /**
+     * @param klass to get replacements for
+     * @return map of old names and replacements needed.
+     */
+    public static Map<Class<? extends Object>, Map<String, Replacement>> getNameReplacements(Class<? extends Object> klass)
+    {
+        List<Replacement> replacements = getReplacementsRecursive(klass);
+        Map<Class<?>, Map<String, Replacement>> objectOldNames = new HashMap<>();
+        for (Replacement r : replacements)
+        {
+            Map<String, Replacement> oldNames = objectOldNames.computeIfAbsent(r.parent, ignore -> new HashMap<>());
+            if (!oldNames.containsKey(r.oldName))
+                oldNames.put(r.oldName, r);
+            else
+            {
+                throw new ConfigurationException("Invalid annotations, you have more than one @Replaces annotation in " +
+                                                 "Config class with same old name(" + r.oldName + ") defined.");
+            }
+        }
+        return objectOldNames;
+    }
+
+    /**
+     * @param klass to get replacements for
+     * @return map of old names and replacements needed.
+     */
+    private static List<Replacement> getReplacementsRecursive(Class<?> klass)
+    {
+        Set<Class<?>> seen = new HashSet<>(); // to make sure not to process the same type twice
+        List<Replacement> accum = new ArrayList<>();
+        getReplacementsRecursive(seen, accum, klass);
+        return accum.isEmpty() ? Collections.emptyList() : accum;
+    }
+
+    private static void getReplacementsRecursive(Set<Class<?>> seen,
+                                                 List<Replacement> accum,
+                                                 Class<?> klass)
+    {
+        accum.addAll(getReplacements(klass));
+        for (Field field : klass.getDeclaredFields())
+        {
+            if (seen.add(field.getType()))
+            {
+                // first time looking at this type, walk it
+                getReplacementsRecursive(seen, accum, field.getType());
+            }
+        }
+    }
+
+    private static List<Replacement> getReplacements(Class<?> klass)
+    {
+        List<Replacement> replacements = new ArrayList<>();
+        for (Field field : klass.getDeclaredFields())
+        {
+            String newName = field.getName();
+            Class<?> newType = field.getType();
+            final ReplacesList[] byType = field.getAnnotationsByType(ReplacesList.class);
+            if (byType == null || byType.length == 0)
+            {
+                Replaces r = field.getAnnotation(Replaces.class);
+                if (r != null)
+                    addReplacement(klass, replacements, newName, newType, r);
+            }
+            else
+            {
+                for (ReplacesList replacesList : byType)
+                    for (Replaces r : replacesList.value())
+                        addReplacement(klass, replacements, newName, newType, r);
+            }
+        }
+        return replacements.isEmpty() ? Collections.emptyList() : replacements;
+    }
+
+    private static void addReplacement(Class<?> klass,
+                                       List<Replacement> replacements,
+                                       String newName, Class<?> newType,
+                                       Replaces r)
+    {
+        String oldName = r.oldName();
+
+        boolean deprecated = r.deprecated();
+
+        Class<?> oldType = r.converter().getOldType();
+        if (oldType == null)
+            oldType = newType;
+        Class<?> expectedNewType = r.converter().getNewType();
+        if (expectedNewType != null)
+            assert expectedNewType.equals(newType) : String.format("Converter is expected to return %s but %s#%s expects %s", expectedNewType, klass, newName, newType);
+
+        replacements.add(new Replacement(klass, oldName, oldType, newName, r.converter(), deprecated));
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/Replaces.java b/src/java/org/apache/cassandra/config/Replaces.java
index 93bdcb5..e1354b9 100644
--- a/src/java/org/apache/cassandra/config/Replaces.java
+++ b/src/java/org/apache/cassandra/config/Replaces.java
@@ -25,8 +25,8 @@
 import java.lang.annotation.Target;
 
 /**
- * Repeatable annotation for providing old name and whether the
- * config parameters we annotate are deprecated and we need to warn the users. (CASSANDRA-17141)
+ * Repeatable annotation for providing old name, converter from old to new type and whether the
+ * config parameters we annotate are deprecated and we need to warn the users. (CASSANDRA-17141, CASSANDRA-15234)
  */
 @Retention(RetentionPolicy.RUNTIME)
 @Target({ ElementType.FIELD})
@@ -39,6 +39,11 @@
     String oldName();
 
     /**
+     * @return which converter we need depending on the old default unit that was used
+     */
+    Converters converter() default Converters.IDENTITY;
+
+    /**
      * @return whether the parameter should be marked as deprecated or not and warning sent to the user
      */
     boolean deprecated() default false;
diff --git a/src/java/org/apache/cassandra/config/ReplacesList.java b/src/java/org/apache/cassandra/config/ReplacesList.java
index a2b0c96..eec4e4c 100644
--- a/src/java/org/apache/cassandra/config/ReplacesList.java
+++ b/src/java/org/apache/cassandra/config/ReplacesList.java
@@ -24,7 +24,7 @@
 import java.lang.annotation.Target;
 
 /**
- * Contatining annotation type for the repeatable annotation {@link Replaces}
+ * Concatenating annotation type for the repeatable annotation {@link Replaces}
  */
 @Retention(RetentionPolicy.RUNTIME)
 @Target({ ElementType.FIELD})
diff --git a/src/java/org/apache/cassandra/config/StartupChecksOptions.java b/src/java/org/apache/cassandra/config/StartupChecksOptions.java
new file mode 100644
index 0000000..6eb3189
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/StartupChecksOptions.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.cassandra.service.StartupChecks.StartupCheckType;
+
+import static java.lang.Boolean.FALSE;
+import static java.lang.Boolean.TRUE;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.non_configurable_check;
+
+public class StartupChecksOptions
+{
+    public static final String ENABLED_PROPERTY = "enabled";
+
+    private final Map<StartupCheckType, Map<String, Object>> options = new EnumMap<>(StartupCheckType.class);
+
+    public StartupChecksOptions()
+    {
+        this(Collections.emptyMap());
+    }
+
+    public StartupChecksOptions(final Map<StartupCheckType, Map<String, Object>> options)
+    {
+        this.options.putAll(options);
+        apply();
+    }
+
+    public void set(final StartupCheckType startupCheckType, final String key, final Object value)
+    {
+        if (startupCheckType != non_configurable_check)
+            options.get(startupCheckType).put(key, value);
+    }
+
+    public void enable(final StartupCheckType startupCheckType)
+    {
+        set(startupCheckType, ENABLED_PROPERTY, TRUE);
+    }
+
+    public void disable(final StartupCheckType startupCheckType)
+    {
+        if (startupCheckType != non_configurable_check)
+            set(startupCheckType, ENABLED_PROPERTY, FALSE);
+    }
+
+    public boolean isEnabled(final StartupCheckType startupCheckType)
+    {
+        return Boolean.parseBoolean(options.get(startupCheckType).get(ENABLED_PROPERTY).toString());
+    }
+
+    public boolean isDisabled(final StartupCheckType startupCheckType)
+    {
+        return !isEnabled(startupCheckType);
+    }
+
+    public Map<String, Object> getConfig(final StartupCheckType startupCheckType)
+    {
+        return options.get(startupCheckType);
+    }
+
+    private void apply()
+    {
+        for (final StartupCheckType startupCheckType : StartupCheckType.values())
+        {
+            final Map<String, Object> configMap = options.computeIfAbsent(startupCheckType, k -> new HashMap<>());
+            if (configMap.containsKey(ENABLED_PROPERTY))
+                configMap.putIfAbsent(ENABLED_PROPERTY, FALSE);
+            else if (startupCheckType.disabledByDefault)
+                configMap.put(ENABLED_PROPERTY, FALSE);
+            else
+                configMap.put(ENABLED_PROPERTY, TRUE);
+        }
+        // clear if we put anything into it by accident & enable this check every time no matter what
+        options.get(non_configurable_check).clear();
+        options.get(non_configurable_check).put(ENABLED_PROPERTY, TRUE);
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/SubnetGroups.java b/src/java/org/apache/cassandra/config/SubnetGroups.java
new file mode 100644
index 0000000..4f460fa
--- /dev/null
+++ b/src/java/org/apache/cassandra/config/SubnetGroups.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+
+import inet.ipaddr.IPAddressNetwork;
+import inet.ipaddr.IPAddressString;
+
+/**
+ * When a group of subnets are needed, this class can be used to represent the group as if it was a single subnet.
+ *
+ * This class supports IPV4 and IPV6 subnets
+ */
+public class SubnetGroups
+{
+    public Set<Group> subnets = Collections.emptySet();
+
+    public SubnetGroups()
+    {
+    }
+
+    /** Used by SnakeYaml */
+    @SuppressWarnings("unused")
+    public SubnetGroups(List<String> values)
+    {
+        this.subnets = ImmutableSet.copyOf(values.stream().map(Group::new).collect(Collectors.toSet()));
+    }
+
+    public boolean contains(SocketAddress address)
+    {
+        Preconditions.checkNotNull(address);
+        Preconditions.checkArgument(address instanceof InetSocketAddress, "Unsupported socket address type: " + address.getClass());
+        return contains(((InetSocketAddress) address).getAddress());
+    }
+
+    public boolean contains(InetAddress address)
+    {
+        for (Group group : subnets)
+        {
+            if (group.contains(address))
+            {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public boolean isEmpty()
+    {
+        return subnets.isEmpty();
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        SubnetGroups that = (SubnetGroups) o;
+        return subnets.equals(that.subnets);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(subnets);
+    }
+
+    @Override
+    public String toString()
+    {
+        return "SubnetGroups{" +
+               "subnets=" + subnets +
+               '}';
+    }
+
+    private static class Group
+    {
+        private static final IPAddressNetwork.IPAddressGenerator IP_ADDRESS_GENERATOR = new IPAddressNetwork.IPAddressGenerator();
+
+        private final IPAddressString subnet;
+
+        Group(String range)
+        {
+            subnet = new IPAddressString(range);
+        }
+
+        boolean contains(InetAddress address)
+        {
+            return subnet.contains(IP_ADDRESS_GENERATOR.from(address).toAddressString());
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Group group = (Group) o;
+            return subnet.equals(group.subnet);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(subnet);
+        }
+
+        @Override
+        public String toString()
+        {
+            return subnet.toString();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java b/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
index 48af78b..528accd 100644
--- a/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
+++ b/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
@@ -18,11 +18,8 @@
 package org.apache.cassandra.config;
 
 import java.io.ByteArrayInputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Field;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -33,18 +30,19 @@
 import java.util.Objects;
 import java.util.Set;
 
+import javax.annotation.Nullable;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.io.ByteStreams;
-
-import org.apache.commons.lang3.SystemUtils;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.util.File;
+import org.yaml.snakeyaml.LoaderOptions;
 import org.yaml.snakeyaml.TypeDescription;
 import org.yaml.snakeyaml.Yaml;
 import org.yaml.snakeyaml.composer.Composer;
@@ -56,11 +54,20 @@
 import org.yaml.snakeyaml.introspector.PropertyUtils;
 import org.yaml.snakeyaml.nodes.Node;
 
+import static org.apache.cassandra.config.CassandraRelevantProperties.ALLOW_DUPLICATE_CONFIG_KEYS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.ALLOW_NEW_OLD_CONFIG_KEYS;
+import static org.apache.cassandra.config.Replacements.getNameReplacements;
+
 public class YamlConfigurationLoader implements ConfigurationLoader
 {
     private static final Logger logger = LoggerFactory.getLogger(YamlConfigurationLoader.class);
 
     private final static String DEFAULT_CONFIGURATION = "cassandra.yaml";
+    /**
+     * This is related to {@link Config#PROPERTY_PREFIX} but is different to make sure Config properties updated via
+     * system properties do not conflict with other system properties; the name "settings" matches system_views.settings.
+     */
+    static final String SYSTEM_PROPERTY_PREFIX = "cassandra.settings.";
 
     /**
      * Inspect the classpath to find storage configuration file
@@ -83,14 +90,14 @@
             url = loader.getResource(configUrl);
             if (url == null)
             {
-                String required = "file:" + File.separator + File.separator;
+                String required = "file:" + File.pathSeparator() + File.pathSeparator();
                 if (!configUrl.startsWith(required))
                     throw new ConfigurationException(String.format(
                         "Expecting URI in variable: [cassandra.config]. Found[%s]. Please prefix the file with [%s%s] for local " +
                         "files and [%s<server>%s] for remote files. If you are executing this from an external tool, it needs " +
                         "to set Config.setClientMode(true) to avoid loading configuration.",
-                        configUrl, required, File.separator, required, File.separator));
-                throw new ConfigurationException("Cannot locate " + configUrl + ".  If this is a local file, please confirm you've provided " + required + File.separator + " as a URI prefix.");
+                        configUrl, required, File.pathSeparator(), required, File.pathSeparator()));
+                throw new ConfigurationException("Cannot locate " + configUrl + ".  If this is a local file, please confirm you've provided " + required + File.pathSeparator() + " as a URI prefix.");
             }
         }
 
@@ -106,6 +113,7 @@
     {
         if (storageConfigURL == null)
             storageConfigURL = getStorageConfigURL();
+
         return loadConfig(storageConfigURL);
     }
 
@@ -125,23 +133,80 @@
                 throw new AssertionError(e);
             }
 
-
             SafeConstructor constructor = new CustomConstructor(Config.class, Yaml.class.getClassLoader());
             Map<Class<?>, Map<String, Replacement>> replacements = getNameReplacements(Config.class);
+            verifyReplacements(replacements, configBytes);
             PropertiesChecker propertiesChecker = new PropertiesChecker(replacements);
             constructor.setPropertyUtils(propertiesChecker);
             Yaml yaml = new Yaml(constructor);
             Config result = loadConfig(yaml, configBytes);
             propertiesChecker.check();
+            maybeAddSystemProperties(result);
             return result;
         }
         catch (YAMLException e)
         {
-            throw new ConfigurationException("Invalid yaml: " + url + SystemUtils.LINE_SEPARATOR
-                                             +  " Error: " + e.getMessage(), false);
+            throw new ConfigurationException("Invalid yaml: " + url, e);
         }
     }
 
+    private static void maybeAddSystemProperties(Object obj)
+    {
+        if (CassandraRelevantProperties.CONFIG_ALLOW_SYSTEM_PROPERTIES.getBoolean())
+        {
+            java.util.Properties props = System.getProperties();
+            Map<String, String> map = new HashMap<>();
+            for (String name : props.stringPropertyNames())
+            {
+                if (name.startsWith(SYSTEM_PROPERTY_PREFIX))
+                {
+                    String value = props.getProperty(name);
+                    if (value != null)
+                        map.put(name.replace(SYSTEM_PROPERTY_PREFIX, ""), value);
+                }
+            }
+            if (!map.isEmpty())
+                updateFromMap(map, false, obj);
+        }
+    }
+
+    private static void verifyReplacements(Map<Class<?>, Map<String, Replacement>> replacements, Map<String, ?> rawConfig)
+    {
+        List<String> duplicates = new ArrayList<>();
+        for (Map.Entry<Class<?>, Map<String, Replacement>> outerEntry : replacements.entrySet())
+        {
+            for (Map.Entry<String, Replacement> entry : outerEntry.getValue().entrySet())
+            {
+                Replacement r = entry.getValue();
+                if (!r.isValueFormatReplacement() && rawConfig.containsKey(r.oldName) && rawConfig.containsKey(r.newName))
+                {
+                    String msg = String.format("[%s -> %s]", r.oldName, r.newName);
+                    duplicates.add(msg);
+                }
+            }
+        }
+
+        if (!duplicates.isEmpty())
+        {
+            String msg = String.format("Config contains both old and new keys for the same configuration parameters, migrate old -> new: %s", String.join(", ", duplicates));
+            if (!ALLOW_NEW_OLD_CONFIG_KEYS.getBoolean())
+                throw new ConfigurationException(msg);
+            else
+                logger.warn(msg);
+        }
+    }
+
+    private static void verifyReplacements(Map<Class<?>, Map<String, Replacement>> replacements, byte[] configBytes)
+    {
+        LoaderOptions loaderOptions = new LoaderOptions();
+        loaderOptions.setAllowDuplicateKeys(ALLOW_DUPLICATE_CONFIG_KEYS.getBoolean());
+        Yaml rawYaml = new Yaml(loaderOptions);
+
+        Map<String, Object> rawConfig = rawYaml.load(new ByteArrayInputStream(configBytes));
+        verifyReplacements(replacements, rawConfig);
+
+    }
+
     @VisibleForTesting
     public static <T> T fromMap(Map<String,Object> map, Class<T> klass)
     {
@@ -153,6 +218,41 @@
     {
         SafeConstructor constructor = new YamlConfigurationLoader.CustomConstructor(klass, klass.getClassLoader());
         Map<Class<?>, Map<String, Replacement>> replacements = getNameReplacements(Config.class);
+        verifyReplacements(replacements, map);
+        YamlConfigurationLoader.PropertiesChecker propertiesChecker = new YamlConfigurationLoader.PropertiesChecker(replacements);
+        constructor.setPropertyUtils(propertiesChecker);
+        Yaml yaml = new Yaml(constructor);
+        Node node = yaml.represent(map);
+        constructor.setComposer(new Composer(null, null)
+        {
+            @Override
+            public Node getSingleNode()
+            {
+                return node;
+            }
+        });
+        T value = (T) constructor.getSingleData(klass);
+        if (shouldCheck)
+            propertiesChecker.check();
+        maybeAddSystemProperties(value);
+        return value;
+    }
+
+    public static <T> T updateFromMap(Map<String, ?> map, boolean shouldCheck, T obj)
+    {
+        Class<T> klass = (Class<T>) obj.getClass();
+        SafeConstructor constructor = new YamlConfigurationLoader.CustomConstructor(klass, klass.getClassLoader())
+        {
+            @Override
+            protected Object newInstance(Node node)
+            {
+                if (node.getType() == obj.getClass())
+                    return obj;
+                return super.newInstance(node);
+            }
+        };
+        Map<Class<?>, Map<String, Replacement>> replacements = getNameReplacements(Config.class);
+        verifyReplacements(replacements, map);
         YamlConfigurationLoader.PropertiesChecker propertiesChecker = new YamlConfigurationLoader.PropertiesChecker(replacements);
         constructor.setPropertyUtils(propertiesChecker);
         Yaml yaml = new Yaml(constructor);
@@ -171,6 +271,7 @@
         return value;
     }
 
+    @VisibleForTesting
     static class CustomConstructor extends CustomClassLoaderConstructor
     {
         CustomConstructor(Class<?> theRoot, ClassLoader classLoader)
@@ -180,6 +281,10 @@
             TypeDescription seedDesc = new TypeDescription(ParameterizedClass.class);
             seedDesc.putMapPropertyType("parameters", String.class, String.class);
             addTypeDescription(seedDesc);
+
+            TypeDescription memtableDesc = new TypeDescription(Config.MemtableOptions.class);
+            memtableDesc.addPropertyParameters("configurations", String.class, InheritingClass.class);
+            addTypeDescription(memtableDesc);
         }
 
         @Override
@@ -213,8 +318,10 @@
      * Utility class to check that there are no extra properties and that properties that are not null by default
      * are not set to null.
      */
+    @VisibleForTesting
     private static class PropertiesChecker extends PropertyUtils
     {
+        private final Loader loader = Properties.defaultLoader();
         private final Set<String> missingProperties = new HashSet<>();
 
         private final Set<String> nullProperties = new HashSet<>();
@@ -223,83 +330,51 @@
 
         private final Map<Class<?>, Map<String, Replacement>> replacements;
 
-        public PropertiesChecker(Map<Class<?>, Map<String, Replacement>> replacements)
+        PropertiesChecker(Map<Class<?>, Map<String, Replacement>> replacements)
         {
             this.replacements = Objects.requireNonNull(replacements, "Replacements should not be null");
             setSkipMissingProperties(true);
         }
 
         @Override
-        public Property getProperty(Class<? extends Object> type, String name)
+        public Property getProperty(Class<?> type, String name)
         {
             final Property result;
             Map<String, Replacement> typeReplacements = replacements.getOrDefault(type, Collections.emptyMap());
             if (typeReplacements.containsKey(name))
             {
                 Replacement replacement = typeReplacements.get(name);
-                final Property newProperty = super.getProperty(type, replacement.newName);
-                result = new Property(replacement.oldName, newProperty.getType())
-                {
-                    @Override
-                    public Class<?>[] getActualTypeArguments()
-                    {
-                        return newProperty.getActualTypeArguments();
-                    }
-
-                    @Override
-                    public void set(Object o, Object o1) throws Exception
-                    {
-                        newProperty.set(o, o1);
-                    }
-
-                    @Override
-                    public Object get(Object o)
-                    {
-                        return newProperty.get(o);
-                    }
-
-                    @Override
-                    public List<Annotation> getAnnotations()
-                    {
-                        return null;
-                    }
-
-                    @Override
-                    public <A extends Annotation> A getAnnotation(Class<A> aClass)
-                    {
-                        return null;
-                    }
-                };
-
+                result = replacement.toProperty(getProperty0(type, replacement.newName));
+                
                 if (replacement.deprecated)
                     deprecationWarnings.add(replacement.oldName);
             }
             else
             {
-                result = super.getProperty(type, name);
+                result = getProperty0(type, name);
             }
 
             if (result instanceof MissingProperty)
             {
                 missingProperties.add(result.getName());
             }
-
-            return new Property(result.getName(), result.getType())
+            else if (result.getAnnotation(Deprecated.class) != null)
             {
+                deprecationWarnings.add(result.getName());
+            }
+
+            return new ForwardingProperty(result.getName(), result)
+            {
+                boolean allowsNull = result.getAnnotation(Nullable.class) != null;
+
                 @Override
                 public void set(Object object, Object value) throws Exception
                 {
-                    if (value == null && get(object) != null)
-                    {
+                    // TODO: CASSANDRA-17785, add @Nullable to all nullable Config properties and remove value == null
+                    if (value == null && get(object) != null && !allowsNull)
                         nullProperties.add(getName());
-                    }
-                    result.set(object, value);
-                }
 
-                @Override
-                public Class<?>[] getActualTypeArguments()
-                {
-                    return result.getActualTypeArguments();
+                    result.set(object, value);
                 }
 
                 @Override
@@ -307,21 +382,39 @@
                 {
                     return result.get(object);
                 }
-
-                @Override
-                public List<Annotation> getAnnotations()
-                {
-                    return Collections.EMPTY_LIST;
-                }
-
-                @Override
-                public <A extends Annotation> A getAnnotation(Class<A> aClass)
-                {
-                    return null;
-                }
             };
         }
 
+        private Property getProperty0(Class<? extends Object> type, String name)
+        {
+            if (name.contains("."))
+                return getNestedProperty(type, name);
+            return getFlatProperty(type, name);
+        }
+
+        private Property getFlatProperty(Class<?> type, String name)
+        {
+            Property prop = loader.getProperties(type).get(name);
+            return prop == null ? new MissingProperty(name) : prop;
+        }
+
+        private Property getNestedProperty(Class<?> type, String name)
+        {
+            Property root = null;
+            for (String s : name.split("\\."))
+            {
+                Property prop = getFlatProperty(type, s);
+                if (prop instanceof MissingProperty)
+                {
+                    root = null;
+                    break;
+                }
+                root = root == null ? prop : Properties.andThen(root, prop);
+                type = root.getType();
+            }
+            return root != null ? root : new MissingProperty(name);
+        }
+
         public void check() throws ConfigurationException
         {
             if (!nullProperties.isEmpty())
@@ -331,100 +424,8 @@
                 throw new ConfigurationException("Invalid yaml. Please remove properties " + missingProperties + " from your cassandra.yaml", false);
 
             if (!deprecationWarnings.isEmpty())
-                logger.warn("{} parameters have been deprecated. They have new names; For more information, please refer to NEWS.txt", deprecationWarnings);
-        }
-    }
-
-    /**
-     * @param klass to get replacements for
-     * @return map of old names and replacements needed.
-     */
-    private static Map<Class<?>, Map<String, Replacement>> getNameReplacements(Class<?> klass)
-    {
-        List<Replacement> replacements = getReplacements(klass);
-        Map<Class<?>, Map<String, Replacement>> objectOldNames = new HashMap<>();
-        for (Replacement r : replacements)
-        {
-            Map<String, Replacement> oldNames = objectOldNames.computeIfAbsent(r.parent, ignore -> new HashMap<>());
-            if (!oldNames.containsKey(r.oldName))
-                oldNames.put(r.oldName, r);
-            else
-            {
-                throw new ConfigurationException("Invalid annotations, you have more than one @Replaces annotation in " +
-                                                 "Config class with same old name(" + r.oldName + ") defined.");
-            }
-        }
-        return objectOldNames;
-    }
-
-    private static List<Replacement> getReplacements(Class<?> klass)
-    {
-        List<Replacement> replacements = new ArrayList<>();
-        for (Field field : klass.getDeclaredFields())
-        {
-            String newName = field.getName();
-            final ReplacesList[] byType = field.getAnnotationsByType(ReplacesList.class);
-            if (byType == null || byType.length == 0)
-            {
-                Replaces r = field.getAnnotation(Replaces.class);
-                if (r != null)
-                    addReplacement(klass, replacements, newName, r);
-            }
-            else
-            {
-                for (ReplacesList replacesList : byType)
-                    for (Replaces r : replacesList.value())
-                        addReplacement(klass, replacements, newName, r);
-            }
-        }
-        return replacements.isEmpty() ? Collections.emptyList() : replacements;
-    }
-
-    private static void addReplacement(Class<?> klass,
-                                       List<Replacement> replacements,
-                                       String newName,
-                                       Replaces r)
-    {
-        String oldName = r.oldName();
-        boolean deprecated = r.deprecated();
-
-        replacements.add(new Replacement(klass, oldName, newName, deprecated));
-    }
-
-    /**
-     * Holder for replacements to support backward compatibility between old and new names for configuration parameters
-     * backported partially from trunk(CASSANDRA-15234) to support a bug fix/improvement in Cassadra 4.0
-     * (CASSANDRA-17141)
-     */
-    static final class Replacement
-    {
-        /**
-         * Currently we use for Config class
-         */
-        final Class<?> parent;
-        /**
-         * Old name of the configuration parameter
-         */
-        final String oldName;
-        /**
-         * New name used for the configuration parameter
-         */
-        final String newName;
-        /**
-         * A flag to mark whether the old name is deprecated and fire a warning to the user. By default we set it to false.
-         */
-        final boolean deprecated;
-
-        Replacement(Class<?> parent,
-                    String oldName,
-                    String newName,
-                    boolean deprecated)
-        {
-            this.parent = Objects.requireNonNull(parent);
-            this.oldName = Objects.requireNonNull(oldName);
-            this.newName = Objects.requireNonNull(newName);
-            // by default deprecated is false
-            this.deprecated = deprecated;
+                logger.warn("{} parameters have been deprecated. They have new names and/or value format; For more information, please refer to NEWS.txt", deprecationWarnings);
         }
     }
 }
+
diff --git a/src/java/org/apache/cassandra/cql3/CQL3Type.java b/src/java/org/apache/cassandra/cql3/CQL3Type.java
index 5059104..1d792b2 100644
--- a/src/java/org/apache/cassandra/cql3/CQL3Type.java
+++ b/src/java/org/apache/cassandra/cql3/CQL3Type.java
@@ -24,7 +24,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.db.marshal.CollectionType.Kind;
@@ -55,6 +54,7 @@
     }
 
     public AbstractType<?> getType();
+    default public AbstractType<?> getUDFType() { return getType(); }
 
     /**
      * Generates CQL literal from a binary value of this type.
@@ -101,6 +101,11 @@
             return type;
         }
 
+        public AbstractType<?> getUDFType()
+        {
+            return this == TIMEUUID ? UUID.type : type;
+        }
+
         /**
          * Delegate to
          * {@link org.apache.cassandra.serializers.TypeSerializer#toCQLLiteral(ByteBuffer)}
diff --git a/src/java/org/apache/cassandra/cql3/Constants.java b/src/java/org/apache/cassandra/cql3/Constants.java
index 6dce3a3..64d9d69 100644
--- a/src/java/org/apache/cassandra/cql3/Constants.java
+++ b/src/java/org/apache/cassandra/cql3/Constants.java
@@ -20,9 +20,13 @@
 import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.marshal.*;
@@ -30,6 +34,7 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FastByteOperations;
 
 /**
  * Static helper methods and classes for constants.
@@ -40,7 +45,18 @@
 
     public enum Type
     {
-        STRING,
+        STRING
+        {
+            public AbstractType<?> getPreferedTypeFor(String text)
+            {
+                 if(Charset.forName("US-ASCII").newEncoder().canEncode(text))
+                 {
+                     return AsciiType.instance;
+                 }
+
+                 return UTF8Type.instance;
+            }
+        },
         INTEGER
         {
             public AbstractType<?> getPreferedTypeFor(String text)
@@ -446,16 +462,53 @@
             super(column, t);
         }
 
+        public boolean requiresRead()
+        {
+            return !(column.type instanceof CounterColumnType);
+        }
+
         public void execute(DecoratedKey partitionKey, UpdateParameters params) throws InvalidRequestException
         {
-            ByteBuffer bytes = t.bindAndGet(params.options);
-            if (bytes == null)
-                throw new InvalidRequestException("Invalid null value for counter increment");
-            if (bytes == ByteBufferUtil.UNSET_BYTE_BUFFER)
-                return;
+            if (column.type instanceof CounterColumnType)
+            {
+                ByteBuffer bytes = t.bindAndGet(params.options);
+                if (bytes == null)
+                    throw new InvalidRequestException("Invalid null value for counter increment");
+                if (bytes == ByteBufferUtil.UNSET_BYTE_BUFFER)
+                    return;
 
-            long increment = ByteBufferUtil.toLong(bytes);
-            params.addCounter(column, increment);
+                long increment = ByteBufferUtil.toLong(bytes);
+                params.addCounter(column, increment);
+            }
+            else if (column.type instanceof NumberType<?>)
+            {
+                @SuppressWarnings("unchecked") NumberType<Number> type = (NumberType<Number>) column.type;
+                ByteBuffer increment = t.bindAndGet(params.options);
+                ByteBuffer current = getCurrentCellBuffer(partitionKey, params);
+                if (current == null)
+                    return;
+                ByteBuffer newValue = type.add(type, current, type, increment);
+                params.addCell(column, newValue);
+            }
+            else if (column.type instanceof StringType)
+            {
+                ByteBuffer append = t.bindAndGet(params.options);
+                ByteBuffer current = getCurrentCellBuffer(partitionKey, params);
+                ByteBuffer newValue;
+                if (current == null)
+                    return;
+                newValue = ByteBuffer.allocate(current.remaining() + append.remaining());
+                FastByteOperations.copy(current, current.position(), newValue, newValue.position(), current.remaining());
+                FastByteOperations.copy(append, append.position(), newValue, newValue.position() + current.remaining(), append.remaining());
+                params.addCell(column, newValue);
+            }
+        }
+
+        private ByteBuffer getCurrentCellBuffer(DecoratedKey key, UpdateParameters params)
+        {
+            Row currentRow = params.getPrefetchedRow(key, column.isStatic() ? Clustering.STATIC_CLUSTERING : params.currentClustering());
+            Cell<?> currentCell = currentRow == null ? null : currentRow.getCell(column);
+            return currentCell == null ? null : currentCell.buffer();
         }
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/Duration.java b/src/java/org/apache/cassandra/cql3/Duration.java
index 520d195..1d6b9f7 100644
--- a/src/java/org/apache/cassandra/cql3/Duration.java
+++ b/src/java/org/apache/cassandra/cql3/Duration.java
@@ -27,10 +27,13 @@
 
 import org.apache.cassandra.serializers.MarshalException;
 
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
 import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 import static org.apache.commons.lang3.time.DateUtils.MILLIS_PER_DAY;
 
+import io.netty.util.concurrent.FastThreadLocal;
+
 /**
  * Represents a duration. A durations store separately months, days, and seconds due to the fact that
  * the number of days in a month varies, and a day can have 23 or 25 hours if a daylight saving is involved.
@@ -45,6 +48,17 @@
     public static final int DAYS_PER_WEEK = 7;
     public static final int MONTHS_PER_YEAR = 12;
 
+    // For some operations, like floor, a Calendar is needed if months or years are involved. Unfortunatly, creating a
+    // Calendar is a costly operation so instead of creating one with every call we reuse them.
+    private static final FastThreadLocal<Calendar> CALENDAR_PROVIDER = new FastThreadLocal<Calendar>()
+    {
+        @Override
+        public Calendar initialValue()
+        {
+            return Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.US);
+        }
+    };
+
     /**
      * The Regexp used to parse the duration provided as String.
      */
@@ -339,7 +353,7 @@
     {
         StringBuilder builder = new StringBuilder();
 
-        if (months < 0 || days < 0 || nanoseconds < 0)
+        if (isNegative())
             builder.append('-');
 
         long remainder = append(builder, Math.abs(months), MONTHS_PER_YEAR, "y");
@@ -395,6 +409,124 @@
         return dividend % divisor;
     }
 
+    /**
+     * Rounds a timestamp down to the closest multiple of a duration.
+     *
+     * @param timeInMillis the time to round in millisecond
+     * @param duration the duration
+     * @param startingTimeInMillis the time offset in milliseconds
+     * @return the timestamp rounded down to the closest multiple of the duration
+     */
+    public static long floorTimestamp(long timeInMillis, Duration duration, long startingTimeInMillis)
+    {
+        checkFalse(startingTimeInMillis > timeInMillis, "The floor function starting time is greater than the provided time");
+        checkFalse(duration.isNegative(), "Negative durations are not supported by the floor function");
+
+        // If the duration does not contain any months we can ignore daylight saving,
+        // as time zones are not supported, and simply look at the milliseconds
+        if (duration.months == 0)
+        {
+            long durationInMillis = getDurationMilliseconds(duration);
+
+            // If the duration is smaller than millisecond
+            if (durationInMillis == 0)
+                return timeInMillis;
+
+            long delta = (timeInMillis - startingTimeInMillis) % durationInMillis;
+            return timeInMillis - delta;
+        }
+
+        /*
+         * Otherwise, we resort to Calendar for the computation.
+         * What we're trying to compute is the largest integer 'multiplier' value such that
+         *   startingTimeMillis + (multiplier * duration) <= timeInMillis
+         * at which point we want to return 'startingTimeMillis + (multiplier * duration)'.
+         *
+         * One option would be to add 'duration' to 'statingTimeMillis' in a loop until we
+         * cross 'timeInMillis' and return how many iterator we did. But this might be slow if there is very many
+         * steps.
+         *
+         * So instead we first estimate 'multiplier' using the number of months between 'startingTimeMillis'
+         * and 'timeInMillis' ('durationInMonths' below) and the duration months. As the real computation
+         * should also take the 'days' and 'nanoseconds' parts of the duration, this multiplier may overshoot,
+         * so we detect it and work back from that, decreasing the multiplier until we find the proper one.
+         */
+
+        Calendar calendar = CALENDAR_PROVIDER.get();
+
+        calendar.setTimeInMillis(timeInMillis);
+        int year = calendar.get(Calendar.YEAR);
+        int month = calendar.get(Calendar.MONTH);
+
+        calendar.setTimeInMillis(startingTimeInMillis);
+        int startingYear = calendar.get(Calendar.YEAR);
+        int startingMonth = calendar.get(Calendar.MONTH);
+
+        int durationInMonths = (year - startingYear) * MONTHS_PER_YEAR + (month - startingMonth);
+        int multiplier = durationInMonths / duration.months;
+
+        calendar.add(Calendar.MONTH, multiplier * duration.months);
+
+        // If the duration was only containing months, we are done.
+        if (duration.days == 0 && duration.nanoseconds == 0)
+            return calendar.getTimeInMillis();
+
+        long durationInMillis = getDurationMilliseconds(duration);
+        long floor = calendar.getTimeInMillis() + (multiplier * durationInMillis);
+
+        // Once the milliseconds have been added we might have gone too far. If it is the case we will reduce the
+        // multiplier until the floor value is smaller than time in millis.
+        while (floor > timeInMillis)
+        {
+            multiplier--;
+            calendar.add(Calendar.MONTH, -duration.months);
+            floor = calendar.getTimeInMillis() + (multiplier * durationInMillis);
+        }
+
+        return Math.max(startingTimeInMillis, floor);
+    }
+
+    /**
+     * Returns the milliseconds part of the duration ignoring the month part
+     *
+     * @param duration the duration
+     * @return the milliseconds corresponding to the duration days and nanoseconds
+     */
+    private static long getDurationMilliseconds(Duration duration)
+    {
+        // We can ignore daylight saving as time zones are not supported
+        return (duration.days * MILLIS_PER_DAY) + (duration.nanoseconds / NANOS_PER_MILLI);
+    }
+
+    /**
+     * Rounds a time down to the closest multiple of a duration.
+     *
+     * @param timeInNanos the time of day in nanoseconds
+     * @param duration the duration
+     * @return the time rounded down to the closest multiple of the duration
+     */
+    public static long floorTime(long timeInNanos, Duration duration)
+    {
+        checkFalse(duration.isNegative(), "Negative durations are not supported by the floor function");
+        checkFalse(duration.getMonths() != 0 || duration.getDays() != 0 || duration.getNanoseconds() > (NANOS_PER_HOUR * 24),
+                   "For time values, the floor can only be computed for durations smaller that a day");
+
+        if (duration.nanoseconds == 0)
+            return timeInNanos;
+
+        long delta = timeInNanos % duration.nanoseconds;
+        return timeInNanos - delta;
+    }
+
+    /**
+     * Checks if the duration is negative.
+     * @return {@code true} if the duration is negative, {@code false} otherwise
+     */
+    public boolean isNegative()
+    {
+        return nanoseconds < 0 || days < 0 || months < 0;
+    }
+
     private static class Builder
     {
         /**
diff --git a/src/java/org/apache/cassandra/cql3/Lists.java b/src/java/org/apache/cassandra/cql3/Lists.java
index 1d94d69..bdac046 100644
--- a/src/java/org/apache/cassandra/cql3/Lists.java
+++ b/src/java/org/apache/cassandra/cql3/Lists.java
@@ -18,6 +18,8 @@
 package org.apache.cassandra.cql3;
 
 import static org.apache.cassandra.cql3.Constants.UNSET_VALUE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.TimeUUID.Generator.atUnixMillisAsBytes;
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -28,6 +30,7 @@
 import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.schema.ColumnMetadata;
 import com.google.common.annotations.VisibleForTesting;
@@ -43,7 +46,6 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
 
 /**
  * Static helper methods and classes for lists.
@@ -359,7 +361,7 @@
                 else
                 {
                     // in addition to being at the same millisecond, we handle the unexpected case of the millis parameter
-                    // being in the past. That could happen if the System.currentTimeMillis() not operating montonically
+                    // being in the past. That could happen if the Global.currentTimeMillis() not operating montonically
                     // or if one thread is just a really big loser in the compareAndSet game of life.
                     long millisToUse = millis <= current.millis ? millis : current.millis;
 
@@ -447,6 +449,9 @@
             // we should not get here for frozen lists
             assert column.type.isMultiCell() : "Attempted to set an individual element on a frozen list";
 
+            Guardrails.readBeforeWriteListOperationsEnabled
+            .ensureEnabled("Setting of list items by index requiring read before write", params.clientState);
+
             ByteBuffer index = idx.bindAndGet(params.options);
             ByteBuffer value = t.bindAndGet(params.options);
 
@@ -487,26 +492,43 @@
 
         static void doAppend(Term.Terminal value, ColumnMetadata column, UpdateParameters params) throws InvalidRequestException
         {
-            if (column.type.isMultiCell())
+            if (value == null)
             {
+                // for frozen lists, we're overwriting the whole cell value
+                if (!column.type.isMultiCell())
+                    params.addTombstone(column);
+
                 // If we append null, do nothing. Note that for Setter, we've
                 // already removed the previous value so we're good here too
-                if (value == null)
+                return;
+            }
+
+            List<ByteBuffer> elements = ((Value) value).elements;
+
+            if (column.type.isMultiCell())
+            {
+                if (elements.size() == 0)
                     return;
 
-                for (ByteBuffer buffer : ((Value) value).elements)
+                // Guardrails about collection size are only checked for the added elements without considering
+                // already existent elements. This is done so to avoid read-before-write, having additional checks
+                // during SSTable write.
+                Guardrails.itemsPerCollection.guard(elements.size(), column.name.toString(), false, params.clientState);
+
+                int dataSize = 0;
+                for (ByteBuffer buffer : elements)
                 {
-                    ByteBuffer uuid = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes());
-                    params.addCell(column, CellPath.create(uuid), buffer);
+                    ByteBuffer uuid = ByteBuffer.wrap(params.nextTimeUUIDAsBytes());
+                    Cell<?> cell = params.addCell(column, CellPath.create(uuid), buffer);
+                    dataSize += cell.dataSize();
                 }
+                Guardrails.collectionSize.guard(dataSize, column.name.toString(), false, params.clientState);
             }
             else
             {
-                // for frozen lists, we're overwriting the whole cell value
-                if (value == null)
-                    params.addTombstone(column);
-                else
-                    params.addCell(column, value.get(ProtocolVersion.CURRENT));
+                Guardrails.itemsPerCollection.guard(elements.size(), column.name.toString(), false, params.clientState);
+                Cell<?> cell = params.addCell(column, value.get(ProtocolVersion.CURRENT));
+                Guardrails.collectionSize.guard(cell.dataSize(), column.name.toString(), false, params.clientState);
             }
         }
     }
@@ -536,12 +558,13 @@
             {
                 if (remainingInBatch == 0)
                 {
-                    long time = PrecisionTime.REFERENCE_TIME - (System.currentTimeMillis() - PrecisionTime.REFERENCE_TIME);
+                    long time = PrecisionTime.REFERENCE_TIME - (currentTimeMillis() - PrecisionTime.REFERENCE_TIME);
                     remainingInBatch = Math.min(PrecisionTime.MAX_NANOS, i) + 1;
                     pt = PrecisionTime.getNext(time, remainingInBatch);
                 }
 
-                ByteBuffer uuid = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(pt.millis, (pt.nanos + remainingInBatch--)));
+                // TODO: is this safe as part of LWTs?
+                ByteBuffer uuid = ByteBuffer.wrap(atUnixMillisAsBytes(pt.millis, (pt.nanos + remainingInBatch--)));
                 params.addCell(column, CellPath.create(uuid), toAdd.get(i));
             }
         }
@@ -564,6 +587,9 @@
         {
             assert column.type.isMultiCell() : "Attempted to delete from a frozen list";
 
+            Guardrails.readBeforeWriteListOperationsEnabled
+            .ensureEnabled("Removal of list items requiring read before write", params.clientState);
+
             // We want to call bind before possibly returning to reject queries where the value provided is not a list.
             Term.Terminal value = t.bind(params.options);
 
@@ -601,6 +627,10 @@
         public void execute(DecoratedKey partitionKey, UpdateParameters params) throws InvalidRequestException
         {
             assert column.type.isMultiCell() : "Attempted to delete an item by index from a frozen list";
+
+            Guardrails.readBeforeWriteListOperationsEnabled
+            .ensureEnabled("Removal of list items by index requiring read before write", params.clientState);
+
             Term.Terminal index = t.bind(params.options);
             if (index == null)
                 throw new InvalidRequestException("Invalid null value for list index");
diff --git a/src/java/org/apache/cassandra/cql3/Maps.java b/src/java/org/apache/cassandra/cql3/Maps.java
index 3a9575c..a2d23a6 100644
--- a/src/java/org/apache/cassandra/cql3/Maps.java
+++ b/src/java/org/apache/cassandra/cql3/Maps.java
@@ -23,6 +23,7 @@
 import java.util.*;
 import java.util.stream.Collectors;
 
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.cql3.functions.Function;
 import org.apache.cassandra.db.DecoratedKey;
@@ -422,22 +423,40 @@
 
         static void doPut(Term.Terminal value, ColumnMetadata column, UpdateParameters params) throws InvalidRequestException
         {
+            if (value == null)
+            {
+                // for frozen maps, we're overwriting the whole cell
+                if (!column.type.isMultiCell())
+                    params.addTombstone(column);
+
+                return;
+            }
+
+            SortedMap<ByteBuffer, ByteBuffer> elements = ((Value) value).map;
+
             if (column.type.isMultiCell())
             {
-                if (value == null)
+                if (elements.size() == 0)
                     return;
 
-                SortedMap<ByteBuffer, ByteBuffer> elements = ((Value) value).map;
+                // Guardrails about collection size are only checked for the added elements without considering
+                // already existent elements. This is done so to avoid read-before-write, having additional checks
+                // during SSTable write.
+                Guardrails.itemsPerCollection.guard(elements.size(), column.name.toString(), false, params.clientState);
+
+                int dataSize = 0;
                 for (Map.Entry<ByteBuffer, ByteBuffer> entry : elements.entrySet())
-                    params.addCell(column, CellPath.create(entry.getKey()), entry.getValue());
+                {
+                    Cell<?> cell = params.addCell(column, CellPath.create(entry.getKey()), entry.getValue());
+                    dataSize += cell.dataSize();
+                }
+                Guardrails.collectionSize.guard(dataSize, column.name.toString(), false, params.clientState);
             }
             else
             {
-                // for frozen maps, we're overwriting the whole cell
-                if (value == null)
-                    params.addTombstone(column);
-                else
-                    params.addCell(column, value.get(ProtocolVersion.CURRENT));
+                Guardrails.itemsPerCollection.guard(elements.size(), column.name.toString(), false, params.clientState);
+                Cell<?> cell = params.addCell(column, value.get(ProtocolVersion.CURRENT));
+                Guardrails.collectionSize.guard(cell.dataSize(), column.name.toString(), false, params.clientState);
             }
         }
     }
diff --git a/src/java/org/apache/cassandra/cql3/Operation.java b/src/java/org/apache/cassandra/cql3/Operation.java
index d52d10e..51f5d13 100644
--- a/src/java/org/apache/cassandra/cql3/Operation.java
+++ b/src/java/org/apache/cassandra/cql3/Operation.java
@@ -97,7 +97,7 @@
      * This can be one of:
      *   - Setting a value: c = v
      *   - Setting an element of a collection: c[x] = v
-     *   - An addition/subtraction to a variable: c = c +/- v (where v can be a collection literal)
+     *   - An addition/subtraction to a variable: c = c +/- v (where v can be a collection literal, scalar, or string)
      *   - An prepend operation: c = v + c
      */
     public interface RawUpdate
@@ -112,9 +112,11 @@
          *
          * @param metadata
          * @param receiver the column this operation applies to.
+         * @param canReadExistingState whether the update depends on existing state
+         *                 
          * @return the prepared update operation.
          */
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException;
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException;
 
         /**
          * @return whether this operation can be applied alongside the {@code
@@ -161,7 +163,7 @@
             this.value = value;
         }
 
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException
         {
             Term v = value.prepare(metadata.keyspace, receiver);
 
@@ -213,7 +215,7 @@
             this.value = value;
         }
 
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException
         {
             if (!(receiver.type instanceof CollectionType))
                 throw new InvalidRequestException(String.format("Invalid operation (%s) for non collection column %s", toString(receiver), receiver.name));
@@ -260,7 +262,7 @@
             this.value = value;
         }
 
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException
         {
             if (!receiver.type.isUDT())
                 throw new InvalidRequestException(String.format("Invalid operation (%s) for non-UDT column %s", toString(receiver), receiver.name));
@@ -298,15 +300,23 @@
             this.value = value;
         }
 
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException
         {
             if (!(receiver.type instanceof CollectionType))
             {
                 if (receiver.type instanceof TupleType)
                     throw new InvalidRequestException(String.format("Invalid operation (%s) for tuple column %s", toString(receiver), receiver.name));
 
-                if (!(receiver.type instanceof CounterColumnType))
-                    throw new InvalidRequestException(String.format("Invalid operation (%s) for non counter column %s", toString(receiver), receiver.name));
+                if (canReadExistingState)
+                {
+                    if (!(receiver.type instanceof NumberType<?>) && !(receiver.type instanceof StringType))
+                        throw new InvalidRequestException(String.format("Invalid operation (%s) for non-numeric and non-text type %s", toString(receiver), receiver.name));
+                }
+                else
+                {
+                    if (!(receiver.type instanceof CounterColumnType))
+                        throw new InvalidRequestException(String.format("Invalid operation (%s) for non counter column %s", toString(receiver), receiver.name));
+                }
                 return new Constants.Adder(receiver, value.prepare(metadata.keyspace, receiver));
             }
             else if (!(receiver.type.isMultiCell()))
@@ -354,7 +364,7 @@
             this.value = value;
         }
 
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException
         {
             if (!(receiver.type instanceof CollectionType))
             {
@@ -411,7 +421,7 @@
             this.value = value;
         }
 
-        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver) throws InvalidRequestException
+        public Operation prepare(TableMetadata metadata, ColumnMetadata receiver, boolean canReadExistingState) throws InvalidRequestException
         {
             Term v = value.prepare(metadata.keyspace, receiver);
 
diff --git a/src/java/org/apache/cassandra/cql3/Operator.java b/src/java/org/apache/cassandra/cql3/Operator.java
index 1acedee..bcb5f63 100644
--- a/src/java/org/apache/cassandra/cql3/Operator.java
+++ b/src/java/org/apache/cassandra/cql3/Operator.java
@@ -26,6 +26,7 @@
 import java.util.Set;
 
 import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.serializers.ListSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 public enum Operator
@@ -110,8 +111,8 @@
 
         public boolean isSatisfiedBy(AbstractType<?> type, ByteBuffer leftOperand, ByteBuffer rightOperand)
         {
-            List<?> inValues = ListType.getInstance(type, false).getSerializer().deserialize(rightOperand);
-            return inValues.contains(type.getSerializer().deserialize(leftOperand));
+            ListSerializer<?> serializer = ListType.getInstance(type, false).getSerializer();
+            return serializer.anyMatch(rightOperand, r -> type.compareForCQL(leftOperand, r) == 0);
         }
     },
     CONTAINS(5)
@@ -339,4 +340,22 @@
     {
         return this == IN;
     }
+
+    /**
+     * Checks if this operator is CONTAINS operator.
+     * @return {@code true} if this operator is a CONTAINS operator, {@code false} otherwise.
+     */
+    public boolean isContains()
+    {
+        return this == CONTAINS;
+    }
+
+    /**
+     * Checks if this operator is CONTAINS KEY operator.
+     * @return {@code true} if this operator is a CONTAINS operator, {@code false} otherwise.
+     */
+    public boolean isContainsKey()
+    {
+        return this == CONTAINS_KEY;
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/PasswordObfuscator.java b/src/java/org/apache/cassandra/cql3/PasswordObfuscator.java
index 89962f9..8e18f34 100644
--- a/src/java/org/apache/cassandra/cql3/PasswordObfuscator.java
+++ b/src/java/org/apache/cassandra/cql3/PasswordObfuscator.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.cql3;
 
-import com.google.common.base.Optional;
+import java.util.Optional;
 
 import org.apache.cassandra.auth.PasswordAuthenticator;
 import org.apache.cassandra.auth.RoleOptions;
@@ -63,9 +63,15 @@
 
         Optional<String> pass = opts.getPassword();
         if (!pass.isPresent() || pass.get().isEmpty())
+            pass = opts.getHashedPassword();
+        if (!pass.isPresent() || pass.get().isEmpty())
             return query;
 
-        // match new line, case insensitive (?si), and PASSWORD_TOKEN up to the actual password greedy. Group that and replace the password
-        return query.replaceAll("((?si)"+ PASSWORD_TOKEN + ".+?)" + pass.get(), "$1" + PasswordObfuscator.OBFUSCATION_TOKEN);
+        // Regular expression:
+        //  - Match new line and case insensitive (?si), and PASSWORD_TOKEN with greedy mode up to the start of the actual password and group it.
+        //  - Quote the password between \Q and \E so any potential special characters are ignored
+        //  - Replace the match with the grouped data + the obfuscated token
+        return query.replaceAll("((?si)"+ PASSWORD_TOKEN + ".+?)\\Q" + pass.get() + "\\E",
+                                "$1" + PasswordObfuscator.OBFUSCATION_TOKEN);
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/QueryOptions.java b/src/java/org/apache/cassandra/cql3/QueryOptions.java
index d3b1a03..a286aed 100644
--- a/src/java/org/apache/cassandra/cql3/QueryOptions.java
+++ b/src/java/org/apache/cassandra/cql3/QueryOptions.java
@@ -24,6 +24,8 @@
 
 import io.netty.buffer.ByteBuf;
 
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.marshal.UTF8Type;
@@ -59,6 +61,11 @@
         return new DefaultQueryOptions(consistency, values, false, SpecificOptions.DEFAULT, ProtocolVersion.V3);
     }
 
+    public static QueryOptions forInternalCallsWithNowInSec(int nowInSec, ConsistencyLevel consistency, List<ByteBuffer> values)
+    {
+        return new DefaultQueryOptions(consistency, values, false, SpecificOptions.DEFAULT.withNowInSec(nowInSec), ProtocolVersion.CURRENT);
+    }
+
     public static QueryOptions forInternalCalls(List<ByteBuffer> values)
     {
         return new DefaultQueryOptions(ConsistencyLevel.ONE, values, false, SpecificOptions.DEFAULT, ProtocolVersion.V3);
@@ -208,6 +215,12 @@
     /** The keyspace that this query is bound to, or null if not relevant. */
     public String getKeyspace() { return getSpecificOptions().keyspace; }
 
+    public int getNowInSec(int ifNotSet)
+    {
+        int nowInSec = getSpecificOptions().nowInSeconds;
+        return nowInSec != Integer.MIN_VALUE ? nowInSec : ifNotSet;
+    }
+
     /**
      * The protocol version for the query.
      */
@@ -216,11 +229,98 @@
     // Mainly for the sake of BatchQueryOptions
     abstract SpecificOptions getSpecificOptions();
 
+    abstract ReadThresholds getReadThresholds();
+
+    public boolean isReadThresholdsEnabled()
+    {
+        return getReadThresholds().isEnabled();
+    }
+
+    public long getCoordinatorReadSizeWarnThresholdBytes()
+    {
+        return getReadThresholds().getCoordinatorReadSizeWarnThresholdBytes();
+    }
+
+    public long getCoordinatorReadSizeAbortThresholdBytes()
+    {
+        return getReadThresholds().getCoordinatorReadSizeFailThresholdBytes();
+    }
+
     public QueryOptions prepare(List<ColumnSpecification> specs)
     {
         return this;
     }
 
+    interface ReadThresholds
+    {
+        boolean isEnabled();
+
+        long getCoordinatorReadSizeWarnThresholdBytes();
+
+        long getCoordinatorReadSizeFailThresholdBytes();
+
+        static ReadThresholds create()
+        {
+            // if daemon initialization hasn't happened yet (very common in tests) then ignore
+            if (!DatabaseDescriptor.isDaemonInitialized() || !DatabaseDescriptor.getReadThresholdsEnabled())
+                return DisabledReadThresholds.INSTANCE;
+            return new DefaultReadThresholds(DatabaseDescriptor.getCoordinatorReadSizeWarnThreshold(), DatabaseDescriptor.getCoordinatorReadSizeFailThreshold());
+        }
+    }
+
+    private enum DisabledReadThresholds implements ReadThresholds
+    {
+        INSTANCE;
+
+        @Override
+        public boolean isEnabled()
+        {
+            return false;
+        }
+
+        @Override
+        public long getCoordinatorReadSizeWarnThresholdBytes()
+        {
+            return -1;
+        }
+
+        @Override
+        public long getCoordinatorReadSizeFailThresholdBytes()
+        {
+            return -1;
+        }
+    }
+
+    private static class DefaultReadThresholds implements ReadThresholds
+    {
+        private final long warnThresholdBytes;
+        private final long abortThresholdBytes;
+
+        public DefaultReadThresholds(DataStorageSpec.LongBytesBound warnThreshold, DataStorageSpec.LongBytesBound abortThreshold)
+        {
+            this.warnThresholdBytes = warnThreshold == null ? -1 : warnThreshold.toBytes();
+            this.abortThresholdBytes = abortThreshold == null ? -1 : abortThreshold.toBytes();
+        }
+
+        @Override
+        public boolean isEnabled()
+        {
+            return true;
+        }
+
+        @Override
+        public long getCoordinatorReadSizeWarnThresholdBytes()
+        {
+            return warnThresholdBytes;
+        }
+
+        @Override
+        public long getCoordinatorReadSizeFailThresholdBytes()
+        {
+            return abortThresholdBytes;
+        }
+    }
+
     static class DefaultQueryOptions extends QueryOptions
     {
         private final ConsistencyLevel consistency;
@@ -230,6 +330,7 @@
         private final SpecificOptions options;
 
         private final transient ProtocolVersion protocolVersion;
+        private final transient ReadThresholds readThresholds = ReadThresholds.create();
 
         DefaultQueryOptions(ConsistencyLevel consistency, List<ByteBuffer> values, boolean skipMetadata, SpecificOptions options, ProtocolVersion protocolVersion)
         {
@@ -264,6 +365,12 @@
         {
             return options;
         }
+
+        @Override
+        ReadThresholds getReadThresholds()
+        {
+            return readThresholds;
+        }
     }
 
     static class QueryOptionsWrapper extends QueryOptions
@@ -301,6 +408,12 @@
         }
 
         @Override
+        ReadThresholds getReadThresholds()
+        {
+            return wrapped.getReadThresholds();
+        }
+
+        @Override
         public QueryOptions prepare(List<ColumnSpecification> specs)
         {
             wrapped.prepare(specs);
@@ -400,6 +513,11 @@
             this.keyspace = keyspace;
             this.nowInSeconds = nowInSeconds;
         }
+
+        public SpecificOptions withNowInSec(int nowInSec)
+        {
+            return new SpecificOptions(pageSize, state, serialConsistency, timestamp, keyspace, nowInSec);
+        }
     }
 
     private static class Codec implements CBCodec<QueryOptions>
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index 66ce2ef..1e2d0db 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -23,6 +23,7 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
 
 import com.github.benmanes.caffeine.cache.Cache;
 import com.github.benmanes.caffeine.cache.Caffeine;
@@ -30,22 +31,32 @@
 import com.google.common.base.Predicate;
 import com.google.common.collect.*;
 import com.google.common.primitives.Ints;
-import com.google.common.util.concurrent.MoreExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.antlr.runtime.*;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterators;
+import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.ClientRequestMetrics;
 import org.apache.cassandra.metrics.ClientRequestsMetricsHolder;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaChangeListener;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.cql3.functions.UDAggregate;
+import org.apache.cassandra.cql3.functions.UDFunction;
 import org.apache.cassandra.cql3.functions.Function;
 import org.apache.cassandra.cql3.functions.FunctionName;
+import org.apache.cassandra.cql3.selection.ResultSetBuilder;
 import org.apache.cassandra.cql3.statements.*;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.db.rows.RowIterator;
 import org.apache.cassandra.db.partitions.PartitionIterator;
 import org.apache.cassandra.db.partitions.PartitionIterators;
@@ -59,13 +70,16 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.*;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.ENABLE_NODELOCAL_QUERIES;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class QueryProcessor implements QueryHandler
 {
-    public static final CassandraVersion CQL_VERSION = new CassandraVersion("3.4.5");
+    public static final CassandraVersion CQL_VERSION = new CassandraVersion("3.4.6");
 
     // See comments on QueryProcessor #prepare
     public static final CassandraVersion NEW_PREPARED_STATEMENT_BEHAVIOUR_SINCE_30 = new CassandraVersion("3.0.26");
@@ -91,8 +105,8 @@
     static
     {
         preparedStatements = Caffeine.newBuilder()
-                             .executor(MoreExecutors.directExecutor())
-                             .maximumWeight(capacityToBytes(DatabaseDescriptor.getPreparedStatementsCacheSizeMB()))
+                             .executor(ImmediateExecutor.INSTANCE)
+                             .maximumWeight(capacityToBytes(DatabaseDescriptor.getPreparedStatementsCacheSizeMiB()))
                              .weigher(QueryProcessor::measure)
                              .removalListener((key, prepared, cause) -> {
                                  MD5Digest md5Digest = (MD5Digest) key;
@@ -107,13 +121,13 @@
         ScheduledExecutors.scheduledTasks.scheduleAtFixedRate(() -> {
             long count = lastMinuteEvictionsCount.getAndSet(0);
             if (count > 0)
-                logger.warn("{} prepared statements discarded in the last minute because cache limit reached ({} MB)",
+                logger.warn("{} prepared statements discarded in the last minute because cache limit reached ({} MiB)",
                             count,
-                            DatabaseDescriptor.getPreparedStatementsCacheSizeMB());
+                            DatabaseDescriptor.getPreparedStatementsCacheSizeMiB());
         }, 1, 1, TimeUnit.MINUTES);
 
-        logger.info("Initialized prepared statement caches with {} MB",
-                    DatabaseDescriptor.getPreparedStatementsCacheSizeMB());
+        logger.info("Initialized prepared statement caches with {} MiB",
+                    DatabaseDescriptor.getPreparedStatementsCacheSizeMiB());
     }
 
     private static long capacityToBytes(long cacheSizeMB)
@@ -264,14 +278,14 @@
         ClientRequestMetrics  levelMetrics = ClientRequestsMetricsHolder.writeMetricsForLevel(ConsistencyLevel.NODE_LOCAL);
         ClientRequestMetrics globalMetrics = ClientRequestsMetricsHolder.writeMetrics;
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
         try
         {
             return statement.executeLocally(queryState, options);
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
              levelMetrics.addNano(latency);
             globalMetrics.addNano(latency);
         }
@@ -289,14 +303,14 @@
             throw new IsBootstrappingException();
         }
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
         try
         {
             return statement.executeLocally(queryState, options);
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
              levelMetrics.addNano(latency);
             globalMetrics.addNano(latency);
         }
@@ -352,7 +366,7 @@
         QueryState queryState = QueryState.forInternalCalls();
         QueryOptions options = QueryOptions.forInternalCalls(cl, values);
         CQLStatement statement = instance.parse(query, queryState, options);
-        ResultMessage result = instance.process(statement, queryState, options, System.nanoTime());
+        ResultMessage result = instance.process(statement, queryState, options, nanoTime());
         if (result instanceof ResultMessage.Rows)
             return UntypedResultSet.create(((ResultMessage.Rows)result).result);
         else
@@ -367,6 +381,16 @@
 
     private static QueryOptions makeInternalOptions(CQLStatement prepared, Object[] values, ConsistencyLevel cl)
     {
+        return makeInternalOptionsWithNowInSec(prepared, FBUtilities.nowInSeconds(), values, cl);
+    }
+
+    public static QueryOptions makeInternalOptionsWithNowInSec(CQLStatement prepared, int nowInSec, Object[] values)
+    {
+        return makeInternalOptionsWithNowInSec(prepared, nowInSec, values, ConsistencyLevel.ONE);
+    }
+
+    private static QueryOptions makeInternalOptionsWithNowInSec(CQLStatement prepared, int nowInSec, Object[] values, ConsistencyLevel cl)
+    {
         if (prepared.getBindVariables().size() != values.length)
             throw new IllegalArgumentException(String.format("Invalid number of values. Expecting %d but got %d", prepared.getBindVariables().size(), values.length));
 
@@ -374,10 +398,10 @@
         for (int i = 0; i < values.length; i++)
         {
             Object value = values[i];
-            AbstractType type = prepared.getBindVariables().get(i).type;
-            boundValues.add(value instanceof ByteBuffer || value == null ? (ByteBuffer)value : type.decompose(value));
+            AbstractType<?> type = prepared.getBindVariables().get(i).type;
+            boundValues.add(value instanceof ByteBuffer || value == null ? (ByteBuffer)value : type.decomposeUntyped(value));
         }
-        return QueryOptions.forInternalCalls(cl, boundValues);
+        return QueryOptions.forInternalCallsWithNowInSec(nowInSec, cl, boundValues);
     }
 
     public static Prepared prepareInternal(String query) throws RequestValidationException
@@ -427,19 +451,84 @@
             return null;
     }
 
+    public static Future<UntypedResultSet> executeAsync(InetAddressAndPort address, String query, Object... values)
+    {
+        Prepared prepared = prepareInternal(query);
+        int nowInSec = FBUtilities.nowInSeconds();
+        QueryOptions options = makeInternalOptionsWithNowInSec(prepared.statement, nowInSec, values);
+        if (prepared.statement instanceof SelectStatement)
+        {
+            SelectStatement select = (SelectStatement) prepared.statement;
+            ReadQuery readQuery = select.getQuery(options, nowInSec);
+            List<ReadCommand> commands;
+            if (readQuery instanceof ReadCommand)
+            {
+                commands = Collections.singletonList((ReadCommand) readQuery);
+            }
+            else if (readQuery instanceof SinglePartitionReadQuery.Group)
+            {
+                List<? extends SinglePartitionReadQuery> queries = ((SinglePartitionReadQuery.Group<? extends SinglePartitionReadQuery>) readQuery).queries;
+                queries.forEach(a -> {
+                    if (!(a instanceof ReadCommand))
+                        throw new IllegalArgumentException("Queries found which are not ReadCommand: " + a.getClass());
+                });
+                commands = (List<ReadCommand>) (List<?>) queries;
+            }
+            else
+            {
+                throw new IllegalArgumentException("Unable to handle; only expected ReadCommands but given " + readQuery.getClass());
+            }
+            Future<List<Message<ReadResponse>>> future = FutureCombiner.allOf(commands.stream()
+                                                                                      .map(rc -> Message.out(rc.verb(), rc))
+                                                                                      .map(m -> MessagingService.instance().<ReadResponse>sendWithResult(m, address))
+                                                                                      .collect(Collectors.toList()));
+
+            ResultSetBuilder result = new ResultSetBuilder(select.getResultMetadata(), select.getSelection().newSelectors(options), null);
+            return future.map(list -> {
+                int i = 0;
+                for (Message<ReadResponse> m : list)
+                {
+                    ReadResponse rsp = m.payload;
+                    try (PartitionIterator it = UnfilteredPartitionIterators.filter(rsp.makeIterator(commands.get(i++)), nowInSec))
+                    {
+                        while (it.hasNext())
+                        {
+                            try (RowIterator partition = it.next())
+                            {
+                                select.processPartition(partition, options, result, nowInSec);
+                            }
+                        }
+                    }
+                }
+                return result.build();
+            }).map(UntypedResultSet::create);
+        }
+        throw new IllegalArgumentException("Unable to execute query; only SELECT supported but given: " + query);
+    }
+
     public static UntypedResultSet execute(String query, ConsistencyLevel cl, Object... values)
     throws RequestExecutionException
     {
         return execute(query, cl, internalQueryState(), values);
     }
 
+    public static UntypedResultSet executeInternalWithNowInSec(String query, int nowInSec, Object... values)
+    {
+        Prepared prepared = prepareInternal(query);
+        ResultMessage result = prepared.statement.executeLocally(internalQueryState(), makeInternalOptionsWithNowInSec(prepared.statement, nowInSec, values));
+        if (result instanceof ResultMessage.Rows)
+            return UntypedResultSet.create(((ResultMessage.Rows)result).result);
+        else
+            return null;
+    }
+
     public static UntypedResultSet execute(String query, ConsistencyLevel cl, QueryState state, Object... values)
     throws RequestExecutionException
     {
         try
         {
             Prepared prepared = prepareInternal(query);
-            ResultMessage result = prepared.statement.execute(state, makeInternalOptions(prepared.statement, values, cl), System.nanoTime());
+            ResultMessage result = prepared.statement.execute(state, makeInternalOptionsWithNowInSec(prepared.statement, state.getNowInSeconds(), values, cl), nanoTime());
             if (result instanceof ResultMessage.Rows)
                 return UntypedResultSet.create(((ResultMessage.Rows)result).result);
             else
@@ -458,7 +547,8 @@
             throw new IllegalArgumentException("Only SELECTs can be paged");
 
         SelectStatement select = (SelectStatement)prepared.statement;
-        QueryPager pager = select.getQuery(makeInternalOptions(prepared.statement, values), FBUtilities.nowInSeconds()).getPager(null, ProtocolVersion.CURRENT);
+        int nowInSec = FBUtilities.nowInSeconds();
+        QueryPager pager = select.getQuery(makeInternalOptionsWithNowInSec(prepared.statement, nowInSec, values), nowInSec).getPager(null, ProtocolVersion.CURRENT);
         return UntypedResultSet.create(select, pager, pageSize);
     }
 
@@ -486,7 +576,7 @@
     {
         CQLStatement statement = parseStatement(query, queryState.getClientState());
         statement.validate(queryState.getClientState());
-        ResultMessage result = statement.executeLocally(queryState, makeInternalOptions(statement, values));
+        ResultMessage result = statement.executeLocally(queryState, makeInternalOptionsWithNowInSec(statement, queryState.getNowInSeconds(), values));
         if (result instanceof ResultMessage.Rows)
             return UntypedResultSet.create(((ResultMessage.Rows)result).result);
         else
@@ -503,11 +593,24 @@
         Prepared prepared = prepareInternal(query);
         assert prepared.statement instanceof SelectStatement;
         SelectStatement select = (SelectStatement)prepared.statement;
-        ResultMessage result = select.executeInternal(internalQueryState(), makeInternalOptions(prepared.statement, values), nowInSec, queryStartNanoTime);
+        ResultMessage result = select.executeInternal(internalQueryState(), makeInternalOptionsWithNowInSec(prepared.statement, nowInSec, values), nowInSec, queryStartNanoTime);
         assert result instanceof ResultMessage.Rows;
         return UntypedResultSet.create(((ResultMessage.Rows)result).result);
     }
 
+    /**
+     * A special version of executeInternal that takes the time used as "now" for the query in argument.
+     * Note that this only make sense for Selects so this only accept SELECT statements and is only useful in rare
+     * cases.
+     */
+    public static Map<DecoratedKey, List<Row>> executeInternalRawWithNow(int nowInSec, String query, Object... values)
+    {
+        Prepared prepared = prepareInternal(query);
+        assert prepared.statement instanceof SelectStatement;
+        SelectStatement select = (SelectStatement) prepared.statement;
+        return select.executeRawInternal(makeInternalOptionsWithNowInSec(prepared.statement, nowInSec, values), internalQueryState().getClientState(), nowInSec);
+    }
+
     public static UntypedResultSet resultify(String query, RowIterator partition)
     {
         return resultify(query, PartitionIterators.singletonIterator(partition));
@@ -565,7 +668,7 @@
      *
      * The correct combination to return is 2/3 - the problem is during upgrades (assuming upgrading from < 3.0.26)
      * - Existing clients have hash 1 or 3
-     * - Query prepared on a 3.0.25/3.11.12/4.0.2 instance needs to return hash 1/3 to be able to execute it on a 3.0.25 instance
+     * - Query prepared on a 3.0.26/3.11.12/4.0.2 instance needs to return hash 1/3 to be able to execute it on a 3.0.25 instance
      * - This is handled by the useNewPreparedStatementBehaviour flag - while there still are 3.0.25 instances in
      *   the cluster we always return hash 1/3
      * - Once fully upgraded we start returning hash 2/3, this will cause a prepared statement id mismatch for existing
@@ -676,10 +779,10 @@
         // (if the keyspace is null, queryString has to have a fully-qualified keyspace so it's fine.
         long statementSize = ObjectSizes.measureDeep(prepared.statement);
         // don't execute the statement if it's bigger than the allowed threshold
-        if (statementSize > capacityToBytes(DatabaseDescriptor.getPreparedStatementsCacheSizeMB()))
+        if (statementSize > capacityToBytes(DatabaseDescriptor.getPreparedStatementsCacheSizeMiB()))
             throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d MB: %s...",
                                                             statementSize,
-                                                            DatabaseDescriptor.getPreparedStatementsCacheSizeMB(),
+                                                            DatabaseDescriptor.getPreparedStatementsCacheSizeMiB(),
                                                             queryString.substring(0, 200)));
         MD5Digest statementId = computeId(queryString, keyspace);
         Prepared previous = preparedStatements.get(statementId, (ignored_) -> prepared);
@@ -817,7 +920,7 @@
         preparedStatements.asMap().clear();
     }
 
-    private static class StatementInvalidatingListener extends SchemaChangeListener
+    private static class StatementInvalidatingListener implements SchemaChangeListener
     {
         private static void removeInvalidPreparedStatements(String ksName, String cfName)
         {
@@ -877,13 +980,13 @@
             {
                 ModificationStatement modificationStatement = ((ModificationStatement) statement);
                 statementKsName = modificationStatement.keyspace();
-                statementCfName = modificationStatement.columnFamily();
+                statementCfName = modificationStatement.table();
             }
             else if (statement instanceof SelectStatement)
             {
                 SelectStatement selectStatement = ((SelectStatement) statement);
                 statementKsName = selectStatement.keyspace();
-                statementCfName = selectStatement.columnFamily();
+                statementCfName = selectStatement.table();
             }
             else if (statement instanceof BatchStatement)
             {
@@ -903,14 +1006,16 @@
             return ksName.equals(statementKsName) && (cfName == null || cfName.equals(statementCfName));
         }
 
-        public void onCreateFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onCreateFunction(UDFunction function)
         {
-            onCreateFunctionInternal(ksName, functionName, argTypes);
+            onCreateFunctionInternal(function.name().keyspace, function.name().name, function.argTypes());
         }
 
-        public void onCreateAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onCreateAggregate(UDAggregate aggregate)
         {
-            onCreateFunctionInternal(ksName, aggregateName, argTypes);
+            onCreateFunctionInternal(aggregate.name().keyspace, aggregate.name().name, aggregate.argTypes());
         }
 
         private static void onCreateFunctionInternal(String ksName, String functionName, List<AbstractType<?>> argTypes)
@@ -921,51 +1026,58 @@
                 removeInvalidPreparedStatementsForFunction(ksName, functionName);
         }
 
-        public void onAlterTable(String ksName, String cfName, boolean affectsStatements)
+        @Override
+        public void onAlterTable(TableMetadata before, TableMetadata after, boolean affectsStatements)
         {
-            logger.trace("Column definitions for {}.{} changed, invalidating related prepared statements", ksName, cfName);
+            logger.trace("Column definitions for {}.{} changed, invalidating related prepared statements", before.keyspace, before.name);
             if (affectsStatements)
-                removeInvalidPreparedStatements(ksName, cfName);
+                removeInvalidPreparedStatements(before.keyspace, before.name);
         }
 
-        public void onAlterFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onAlterFunction(UDFunction before, UDFunction after)
         {
             // Updating a function may imply we've changed the body of the function, so we need to invalid statements so that
             // the new definition is picked (the function is resolved at preparation time).
             // TODO: if the function has multiple overload, we could invalidate only the statement refering to the overload
             // that was updated. This requires a few changes however and probably doesn't matter much in practice.
-            removeInvalidPreparedStatementsForFunction(ksName, functionName);
+            removeInvalidPreparedStatementsForFunction(before.name().keyspace, before.name().name);
         }
 
-        public void onAlterAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onAlterAggregate(UDAggregate before, UDAggregate after)
         {
             // Updating a function may imply we've changed the body of the function, so we need to invalid statements so that
             // the new definition is picked (the function is resolved at preparation time).
             // TODO: if the function has multiple overload, we could invalidate only the statement refering to the overload
             // that was updated. This requires a few changes however and probably doesn't matter much in practice.
-            removeInvalidPreparedStatementsForFunction(ksName, aggregateName);
+            removeInvalidPreparedStatementsForFunction(before.name().keyspace, before.name().name);
         }
 
-        public void onDropKeyspace(String ksName)
+        @Override
+        public void onDropKeyspace(KeyspaceMetadata keyspace, boolean dropData)
         {
-            logger.trace("Keyspace {} was dropped, invalidating related prepared statements", ksName);
-            removeInvalidPreparedStatements(ksName, null);
+            logger.trace("Keyspace {} was dropped, invalidating related prepared statements", keyspace.name);
+            removeInvalidPreparedStatements(keyspace.name, null);
         }
 
-        public void onDropTable(String ksName, String cfName)
+        @Override
+        public void onDropTable(TableMetadata table, boolean dropData)
         {
-            logger.trace("Table {}.{} was dropped, invalidating related prepared statements", ksName, cfName);
-            removeInvalidPreparedStatements(ksName, cfName);
+            logger.trace("Table {}.{} was dropped, invalidating related prepared statements", table.keyspace, table.name);
+            removeInvalidPreparedStatements(table.keyspace, table.name);
         }
 
-        public void onDropFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onDropFunction(UDFunction function)
         {
-            removeInvalidPreparedStatementsForFunction(ksName, functionName);
+            removeInvalidPreparedStatementsForFunction(function.name().keyspace, function.name().name);
         }
 
-        public void onDropAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onDropAggregate(UDAggregate aggregate)
         {
-            removeInvalidPreparedStatementsForFunction(ksName, aggregateName);
+            removeInvalidPreparedStatementsForFunction(aggregate.name().keyspace, aggregate.name().name);
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/Sets.java b/src/java/org/apache/cassandra/cql3/Sets.java
index 2baa060..104a857 100644
--- a/src/java/org/apache/cassandra/cql3/Sets.java
+++ b/src/java/org/apache/cassandra/cql3/Sets.java
@@ -24,6 +24,7 @@
 import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.cql3.functions.Function;
 import org.apache.cassandra.db.DecoratedKey;
@@ -217,7 +218,7 @@
                 Set<?> s = type.getSerializer().deserializeForNativeProtocol(value, ByteBufferAccessor.instance, version);
                 SortedSet<ByteBuffer> elements = new TreeSet<>(type.getElementsType());
                 for (Object element : s)
-                    elements.add(type.getElementsType().decompose(element));
+                    elements.add(type.getElementsType().decomposeUntyped(element));
                 return new Value(elements);
             }
             catch (MarshalException e)
@@ -348,26 +349,43 @@
 
         static void doAdd(Term.Terminal value, ColumnMetadata column, UpdateParameters params) throws InvalidRequestException
         {
+            if (value == null)
+            {
+                // for frozen sets, we're overwriting the whole cell
+                if (!column.type.isMultiCell())
+                    params.addTombstone(column);
+
+                return;
+            }
+
+            SortedSet<ByteBuffer> elements = ((Value) value).elements;
+
             if (column.type.isMultiCell())
             {
-                if (value == null)
+                if (elements.size() == 0)
                     return;
 
-                for (ByteBuffer bb : ((Value) value).elements)
+                // Guardrails about collection size are only checked for the added elements without considering
+                // already existent elements. This is done so to avoid read-before-write, having additional checks
+                // during SSTable write.
+                Guardrails.itemsPerCollection.guard(elements.size(), column.name.toString(), false, params.clientState);
+
+                int dataSize = 0;
+                for (ByteBuffer bb : elements)
                 {
                     if (bb == ByteBufferUtil.UNSET_BYTE_BUFFER)
                         continue;
 
-                    params.addCell(column, CellPath.create(bb), ByteBufferUtil.EMPTY_BYTE_BUFFER);
+                    Cell<?> cell = params.addCell(column, CellPath.create(bb), ByteBufferUtil.EMPTY_BYTE_BUFFER);
+                    dataSize += cell.dataSize();
                 }
+                Guardrails.collectionSize.guard(dataSize, column.name.toString(), false, params.clientState);
             }
             else
             {
-                // for frozen sets, we're overwriting the whole cell
-                if (value == null)
-                    params.addTombstone(column);
-                else
-                    params.addCell(column, value.get(ProtocolVersion.CURRENT));
+                Guardrails.itemsPerCollection.guard(elements.size(), column.name.toString(), false, params.clientState);
+                Cell<?> cell = params.addCell(column, value.get(ProtocolVersion.CURRENT));
+                Guardrails.collectionSize.guard(cell.dataSize(), column.name.toString(), false, params.clientState);
             }
         }
     }
diff --git a/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java b/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
index 9ff3f07..cf1cb69 100644
--- a/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
+++ b/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
@@ -274,16 +274,6 @@
     {
         ColumnSpecification receiver = columnDef;
 
-        if (isIN())
-        {
-            // We only allow IN on the row key and the clustering key so far, never on non-PK columns, and this even if
-            // there's an index
-            // Note: for backward compatibility reason, we conside a IN of 1 value the same as a EQ, so we let that
-            // slide.
-            checkFalse(!columnDef.isPrimaryKeyColumn() && !canHaveOnlyOneValue(),
-                       "IN predicates on non-primary-key columns (%s) is not yet supported", columnDef.name);
-        }
-
         checkFalse(isContainsKey() && !(receiver.type instanceof MapType), "Cannot use CONTAINS KEY on non-map column %s", receiver.name);
         checkFalse(isContains() && !(receiver.type.isCollection()), "Cannot use CONTAINS on non-collection column %s", receiver.name);
 
diff --git a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
index 4411c48..f00c137 100644
--- a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
+++ b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
@@ -38,6 +38,9 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.AbstractIterator;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /** a utility for doing internal cql-based queries */
 public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
@@ -271,7 +274,7 @@
                         if (pager.isExhausted())
                             return endOfData();
 
-                        try (PartitionIterator iter = pager.fetchPage(pageSize, cl, clientState, System.nanoTime()))
+                        try (PartitionIterator iter = pager.fetchPage(pageSize, cl, clientState, nanoTime()))
                         {
                             currentPage = select.process(iter, nowInSec).rows.iterator();
                         }
@@ -371,6 +374,12 @@
             return Int32Type.instance.compose(data.get(column));
         }
 
+        public int getInt(String column, int ifNull)
+        {
+            ByteBuffer bytes = data.get(column);
+            return bytes == null ? ifNull : Int32Type.instance.compose(bytes);
+        }
+
         public double getDouble(String column)
         {
             return DoubleType.instance.compose(data.get(column));
@@ -401,6 +410,17 @@
             return UUIDType.instance.compose(data.get(column));
         }
 
+        public UUID getUUID(String column, UUID ifNull)
+        {
+            ByteBuffer bytes = data.get(column);
+            return bytes == null ? ifNull : UUIDType.instance.compose(bytes);
+        }
+
+        public TimeUUID getTimeUUID(String column)
+        {
+            return TimeUUID.deserialize(data.get(column));
+        }
+
         public Date getTimestamp(String column)
         {
             return TimestampType.instance.compose(data.get(column));
diff --git a/src/java/org/apache/cassandra/cql3/UpdateParameters.java b/src/java/org/apache/cassandra/cql3/UpdateParameters.java
index 4272307..2d59366 100644
--- a/src/java/org/apache/cassandra/cql3/UpdateParameters.java
+++ b/src/java/org/apache/cassandra/cql3/UpdateParameters.java
@@ -27,6 +27,8 @@
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Groups the parameters of an update query, and make building updates easier.
@@ -35,6 +37,7 @@
 {
     public final TableMetadata metadata;
     public final RegularAndStaticColumns updatedColumns;
+    public final ClientState clientState;
     public final QueryOptions options;
 
     private final int nowInSec;
@@ -54,6 +57,7 @@
 
     public UpdateParameters(TableMetadata metadata,
                             RegularAndStaticColumns updatedColumns,
+                            ClientState clientState,
                             QueryOptions options,
                             long timestamp,
                             int nowInSec,
@@ -63,6 +67,7 @@
     {
         this.metadata = metadata;
         this.updatedColumns = updatedColumns;
+        this.clientState = clientState;
         this.options = options;
 
         this.nowInSec = nowInSec;
@@ -141,17 +146,18 @@
         builder.addCell(BufferCell.tombstone(column, timestamp, nowInSec, path));
     }
 
-    public void addCell(ColumnMetadata column, ByteBuffer value) throws InvalidRequestException
+    public Cell<?> addCell(ColumnMetadata column, ByteBuffer value) throws InvalidRequestException
     {
-        addCell(column, null, value);
+        return addCell(column, null, value);
     }
 
-    public void addCell(ColumnMetadata column, CellPath path, ByteBuffer value) throws InvalidRequestException
+    public Cell<?> addCell(ColumnMetadata column, CellPath path, ByteBuffer value) throws InvalidRequestException
     {
         Cell<?> cell = ttl == LivenessInfo.NO_TTL
                        ? BufferCell.live(column, timestamp, value, path)
                        : BufferCell.expiring(column, timestamp, ttl, nowInSec, value, path);
         builder.addCell(cell);
+        return cell;
     }
 
     public void addCounter(ColumnMetadata column, long increment) throws InvalidRequestException
@@ -205,6 +211,11 @@
         return new RangeTombstone(slice, deletionTime);
     }
 
+    public byte[] nextTimeUUIDAsBytes()
+    {
+        return TimeUUID.Generator.nextTimeUUIDAsBytes();
+    }
+
     /**
      * Returns the prefetched row with the already performed modifications.
      * <p>If no modification have yet been performed this method will return the fetched row or {@code null} if
diff --git a/src/java/org/apache/cassandra/cql3/WhereClause.java b/src/java/org/apache/cassandra/cql3/WhereClause.java
index 9f59c5ef..dc1a7cf 100644
--- a/src/java/org/apache/cassandra/cql3/WhereClause.java
+++ b/src/java/org/apache/cassandra/cql3/WhereClause.java
@@ -24,7 +24,6 @@
 
 import org.antlr.runtime.RecognitionException;
 import org.apache.cassandra.cql3.restrictions.CustomIndexExpression;
-import org.apache.cassandra.exceptions.InvalidRequestException;
 
 import static java.lang.String.join;
 
diff --git a/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java b/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
index 93ed6ae..e3f463a 100644
--- a/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
+++ b/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
@@ -495,6 +495,9 @@
             if (value == null)
                 return !iter.hasNext();
 
+            if(operator.isContains() || operator.isContainsKey())
+                return containsAppliesTo(type, iter, value.get(ProtocolVersion.CURRENT), operator);
+
             switch (type.kind)
             {
                 case LIST:
@@ -573,6 +576,39 @@
         }
     }
 
+    private static boolean containsAppliesTo(CollectionType<?> type, Iterator<Cell<?>> iter, ByteBuffer value, Operator operator)
+    {
+        AbstractType<?> compareType;
+        switch (type.kind)
+        {
+            case LIST:
+                compareType = ((ListType<?>)type).getElementsType();
+                break;
+            case SET:
+                compareType = ((SetType<?>)type).getElementsType();
+                break;
+            case MAP:
+                compareType = operator.isContainsKey() ? ((MapType<?, ?>)type).getKeysType() : ((MapType<?, ?>)type).getValuesType();
+                break;
+            default:
+                throw new AssertionError();
+        }
+        boolean appliesToSetOrMapKeys = (type.kind == CollectionType.Kind.SET || type.kind == CollectionType.Kind.MAP && operator.isContainsKey());
+        return containsAppliesTo(compareType, iter, value, appliesToSetOrMapKeys);
+    }
+
+    private static boolean containsAppliesTo(AbstractType<?> type, Iterator<Cell<?>> iter, ByteBuffer value, Boolean appliesToSetOrMapKeys)
+    {
+        while(iter.hasNext())
+        {
+            // for lists and map values we use the cell value; for sets and map keys we use the cell name
+            ByteBuffer cellValue = appliesToSetOrMapKeys ? iter.next().path().get(0) : iter.next().buffer();
+            if(type.compare(cellValue, value) == 0)
+                return true;
+        }
+        return false;
+    }
+
     /**
      * A condition on a UDT field
      */
@@ -819,12 +855,18 @@
 
         private Terms prepareTerms(String keyspace, ColumnSpecification receiver)
         {
+            checkFalse(operator.isContainsKey() && !(receiver.type instanceof MapType), "Cannot use CONTAINS KEY on non-map column %s", receiver.name);
+            checkFalse(operator.isContains() && !(receiver.type.isCollection()), "Cannot use CONTAINS on non-collection column %s", receiver.name);
+
             if (operator.isIN())
             {
                 return inValues == null ? Terms.ofListMarker(inMarker.prepare(keyspace, receiver), receiver.type)
                                         : Terms.of(prepareTerms(keyspace, receiver, inValues));
             }
 
+            if (operator.isContains() || operator.isContainsKey())
+                receiver = ((CollectionType<?>) receiver.type).makeCollectionReceiver(receiver, operator.isContainsKey());
+
             return Terms.of(value.prepare(keyspace, receiver));
         }
 
@@ -861,4 +903,4 @@
             return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/cql3/functions/AbstractFunction.java b/src/java/org/apache/cassandra/cql3/functions/AbstractFunction.java
index 940f0a4..aab2046 100644
--- a/src/java/org/apache/cassandra/cql3/functions/AbstractFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/AbstractFunction.java
@@ -27,7 +27,6 @@
 import org.apache.cassandra.cql3.CQL3Type.Tuple;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.CqlBuilder;
-import org.apache.cassandra.cql3.CqlBuilder.Appender;
 import org.apache.cassandra.db.marshal.AbstractType;
 
 import org.apache.commons.lang3.text.StrBuilder;
diff --git a/src/java/org/apache/cassandra/cql3/functions/AggregateFcts.java b/src/java/org/apache/cassandra/cql3/functions/AggregateFcts.java
index 85d3763..5797de4 100644
--- a/src/java/org/apache/cassandra/cql3/functions/AggregateFcts.java
+++ b/src/java/org/apache/cassandra/cql3/functions/AggregateFcts.java
@@ -23,7 +23,9 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.db.marshal.*;
@@ -64,21 +66,23 @@
         functions.add(avgFunctionForCounter);
 
         // count, max, and min for all standard types
+        Set<AbstractType<?>> types = new HashSet<>();
         for (CQL3Type type : CQL3Type.Native.values())
         {
-            if (type != CQL3Type.Native.VARCHAR) // varchar and text both mapping to UTF8Type
+            AbstractType<?> udfType = type.getType().udfType();
+            if (!types.add(udfType))
+                continue;
+
+            functions.add(AggregateFcts.makeCountFunction(udfType));
+            if (type != CQL3Type.Native.COUNTER)
             {
-                functions.add(AggregateFcts.makeCountFunction(type.getType()));
-                if (type != CQL3Type.Native.COUNTER)
-                {
-                    functions.add(AggregateFcts.makeMaxFunction(type.getType()));
-                    functions.add(AggregateFcts.makeMinFunction(type.getType()));
-                }
-                else
-                {
-                    functions.add(AggregateFcts.maxFunctionForCounter);
-                    functions.add(AggregateFcts.minFunctionForCounter);
-                }
+                functions.add(AggregateFcts.makeMaxFunction(udfType));
+                functions.add(AggregateFcts.makeMinFunction(udfType));
+            }
+            else
+            {
+                functions.add(AggregateFcts.maxFunctionForCounter);
+                functions.add(AggregateFcts.minFunctionForCounter);
             }
         }
 
diff --git a/src/java/org/apache/cassandra/cql3/functions/BytesConversionFcts.java b/src/java/org/apache/cassandra/cql3/functions/BytesConversionFcts.java
index 33771b7..7e9708a 100644
--- a/src/java/org/apache/cassandra/cql3/functions/BytesConversionFcts.java
+++ b/src/java/org/apache/cassandra/cql3/functions/BytesConversionFcts.java
@@ -20,7 +20,9 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -39,13 +41,17 @@
 
         // because text and varchar ends up being synonymous, our automatic makeToBlobFunction doesn't work
         // for varchar, so we special case it below. We also skip blob for obvious reasons.
+        Set<AbstractType<?>> types = new HashSet<>();
         for (CQL3Type type : CQL3Type.Native.values())
         {
-            if (type != CQL3Type.Native.VARCHAR && type != CQL3Type.Native.BLOB)
-            {
-                functions.add(makeToBlobFunction(type.getType()));
-                functions.add(makeFromBlobFunction(type.getType()));
-            }
+            if (type == CQL3Type.Native.BLOB)
+                continue;
+            AbstractType<?> udfType = type.getType().udfType();
+            if (!types.add(udfType))
+                continue;
+
+            functions.add(makeToBlobFunction(type.getType().udfType()));
+            functions.add(makeFromBlobFunction(type.getType().udfType()));
         }
 
         functions.add(VarcharAsBlobFct);
diff --git a/src/java/org/apache/cassandra/cql3/functions/Function.java b/src/java/org/apache/cassandra/cql3/functions/Function.java
index e13e906..0a8d331 100644
--- a/src/java/org/apache/cassandra/cql3/functions/Function.java
+++ b/src/java/org/apache/cassandra/cql3/functions/Function.java
@@ -29,6 +29,12 @@
 @Unmetered
 public interface Function extends AssignmentTestable
 {
+    /**
+     * A marker buffer used to represent function parameters that cannot be resolved at some stage of CQL processing.
+     * This is used for partial function application in particular.
+     */
+    public static final ByteBuffer UNRESOLVED = ByteBuffer.allocate(0);
+
     public FunctionName name();
     public List<AbstractType<?>> argTypes();
     public AbstractType<?> returnType();
@@ -36,14 +42,21 @@
     /**
      * Checks whether the function is a native/hard coded one or not.
      *
-     * @return <code>true</code> if the function is a native/hard coded one, <code>false</code> otherwise.
+     * @return {@code true} if the function is a native/hard coded one, {@code false} otherwise.
      */
     public boolean isNative();
 
     /**
+     * Checks whether the function is a pure function (as in doesn't depend on, nor produces side effects) or not.
+     *
+     * @return {@code true} if the function is a pure function, {@code false} otherwise.
+     */
+    public boolean isPure();
+
+    /**
      * Checks whether the function is an aggregate function or not.
      *
-     * @return <code>true</code> if the function is an aggregate function, <code>false</code> otherwise.
+     * @return {@code true} if the function is an aggregate function, {@code false} otherwise.
      */
     public boolean isAggregate();
 
diff --git a/src/java/org/apache/cassandra/cql3/functions/FunctionCall.java b/src/java/org/apache/cassandra/cql3/functions/FunctionCall.java
index 0083a31..4fb1ba3 100644
--- a/src/java/org/apache/cassandra/cql3/functions/FunctionCall.java
+++ b/src/java/org/apache/cassandra/cql3/functions/FunctionCall.java
@@ -148,6 +148,12 @@
             return new Raw(name, Collections.singletonList(raw));
         }
 
+        public static Raw newCast(Term.Raw raw, CQL3Type type)
+        {
+            FunctionName name = FunctionName.nativeFunction(CastFcts.getFunctionName(type));
+            return new Raw(name, Collections.singletonList(raw));
+        }
+
         public Term prepare(String keyspace, ColumnSpecification receiver) throws InvalidRequestException
         {
             Function fun = FunctionResolver.get(keyspace, name, terms, receiver.ksName, receiver.cfName, receiver.type);
@@ -202,9 +208,9 @@
                 if (fun != null && fun.name().equals(FromJsonFct.NAME))
                     return TestResult.WEAKLY_ASSIGNABLE;
 
-                if (fun != null && receiver.type.equals(fun.returnType()))
+                if (fun != null && receiver.type.udfType().equals(fun.returnType()))
                     return AssignmentTestable.TestResult.EXACT_MATCH;
-                else if (fun == null || receiver.type.isValueCompatibleWith(fun.returnType()))
+                else if (fun == null || receiver.type.udfType().isValueCompatibleWith(fun.returnType()))
                     return AssignmentTestable.TestResult.WEAKLY_ASSIGNABLE;
                 else
                     return AssignmentTestable.TestResult.NOT_ASSIGNABLE;
diff --git a/src/java/org/apache/cassandra/cql3/functions/FunctionResolver.java b/src/java/org/apache/cassandra/cql3/functions/FunctionResolver.java
index 40d68f2..7717bdb 100644
--- a/src/java/org/apache/cassandra/cql3/functions/FunctionResolver.java
+++ b/src/java/org/apache/cassandra/cql3/functions/FunctionResolver.java
@@ -131,7 +131,8 @@
                                           FunctionName name,
                                           List<? extends AssignmentTestable> providedArgs,
                                           String receiverKs,
-                                          String receiverCf, AbstractType<?> receiverType,
+                                          String receiverCf,
+                                          AbstractType<?> receiverType,
                                           Collection<Function> candidates)
     {
         List<Function> compatibles = null;
@@ -212,7 +213,7 @@
      */
     private static boolean matchReturnType(Function fun, AbstractType<?> receiverType)
     {
-        return receiverType == null || fun.returnType().testAssignment(receiverType).isAssignable();
+        return receiverType == null || fun.returnType().testAssignment(receiverType.udfType()).isAssignable();
     }
 
     // This method and matchArguments are somewhat duplicate, but this method allows us to provide more precise errors in the common
diff --git a/src/java/org/apache/cassandra/cql3/functions/JavaBasedUDFunction.java b/src/java/org/apache/cassandra/cql3/functions/JavaBasedUDFunction.java
index d2bac5f..10be467 100644
--- a/src/java/org/apache/cassandra/cql3/functions/JavaBasedUDFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/JavaBasedUDFunction.java
@@ -74,7 +74,7 @@
 
     private static final AtomicInteger classSequence = new AtomicInteger();
 
-    // use a JVM standard ExecutorService as DebuggableThreadPoolExecutor references internal
+    // use a JVM standard ExecutorService as ExecutorPlus references internal
     // classes, which triggers AccessControlException from the UDF sandbox
     private static final UDFExecutorService executor =
         new UDFExecutorService(new NamedThreadFactory("UserDefinedFunctions",
@@ -350,6 +350,7 @@
         catch (InvocationTargetException e)
         {
             // in case of an ITE, use the cause
+            logger.error(String.format("Could not compile function '%s' from Java source:%n%s", name, javaSource), e);
             throw new InvalidRequestException(String.format("Could not compile function '%s' from Java source: %s", name, e.getCause()));
         }
         catch (InvalidRequestException | VirtualMachineError e)
diff --git a/src/java/org/apache/cassandra/cql3/functions/NativeFunction.java b/src/java/org/apache/cassandra/cql3/functions/NativeFunction.java
index df66ea0..cafeca1 100644
--- a/src/java/org/apache/cassandra/cql3/functions/NativeFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/NativeFunction.java
@@ -31,8 +31,16 @@
         super(FunctionName.nativeFunction(name), Arrays.asList(argTypes), returnType);
     }
 
+    @Override
     public boolean isNative()
     {
         return true;
     }
+
+    @Override
+    public boolean isPure()
+    {
+        // Most of our functions are pure, the other ones should override this
+        return true;
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/functions/NativeScalarFunction.java b/src/java/org/apache/cassandra/cql3/functions/NativeScalarFunction.java
index 3ae0607..e492f75 100644
--- a/src/java/org/apache/cassandra/cql3/functions/NativeScalarFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/NativeScalarFunction.java
@@ -17,6 +17,9 @@
  */
 package org.apache.cassandra.cql3.functions;
 
+import java.nio.ByteBuffer;
+import java.util.List;
+
 import org.apache.cassandra.db.marshal.AbstractType;
 
 /**
@@ -38,4 +41,17 @@
     {
         return false;
     }
+
+    /**
+     * Checks if a partial application of the function is monotonic.
+     *
+     * <p>A function is monotonic if it is either entirely nonincreasing or nondecreasing.</p>
+     *
+     * @param partialParameters the input parameters used to create the partial application of the function
+     * @return {@code true} if the partial application of the function is monotonic {@code false} otherwise.
+     */
+    protected boolean isPartialApplicationMonotonic(List<ByteBuffer> partialParameters)
+    {
+        return isMonotonic();
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/functions/OperationFcts.java b/src/java/org/apache/cassandra/cql3/functions/OperationFcts.java
index 4994660..b00ced7 100644
--- a/src/java/org/apache/cassandra/cql3/functions/OperationFcts.java
+++ b/src/java/org/apache/cassandra/cql3/functions/OperationFcts.java
@@ -53,14 +53,24 @@
             {
                 return type.addDuration(temporal, duration);
             }
+
+            @Override
+            protected ByteBuffer excuteOnStrings(StringType resultType,
+                                                 StringType leftType,
+                                                 ByteBuffer left,
+                                                 StringType rightType,
+                                                 ByteBuffer right)
+            {
+                return resultType.concat(leftType, left, rightType, right);
+            }
         },
         SUBSTRACTION('-', "_substract")
         {
             protected ByteBuffer executeOnNumerics(NumberType<?> resultType,
-                                         NumberType<?> leftType,
-                                         ByteBuffer left,
-                                         NumberType<?> rightType,
-                                         ByteBuffer right)
+                                                   NumberType<?> leftType,
+                                                   ByteBuffer left,
+                                                   NumberType<?> rightType,
+                                                   ByteBuffer right)
             {
                 return resultType.substract(leftType, left, rightType, right);
             }
@@ -76,10 +86,10 @@
         MULTIPLICATION('*', "_multiply")
         {
             protected ByteBuffer executeOnNumerics(NumberType<?> resultType,
-                                         NumberType<?> leftType,
-                                         ByteBuffer left,
-                                         NumberType<?> rightType,
-                                         ByteBuffer right)
+                                                   NumberType<?> leftType,
+                                                   ByteBuffer left,
+                                                   NumberType<?> rightType,
+                                                   ByteBuffer right)
             {
                 return resultType.multiply(leftType, left, rightType, right);
             }
@@ -87,10 +97,10 @@
         DIVISION('/', "_divide")
         {
             protected ByteBuffer executeOnNumerics(NumberType<?> resultType,
-                                         NumberType<?> leftType,
-                                         ByteBuffer left,
-                                         NumberType<?> rightType,
-                                         ByteBuffer right)
+                                                   NumberType<?> leftType,
+                                                   ByteBuffer left,
+                                                   NumberType<?> rightType,
+                                                   ByteBuffer right)
             {
                 return resultType.divide(leftType, left, rightType, right);
             }
@@ -98,10 +108,10 @@
         MODULO('%', "_modulo")
         {
             protected ByteBuffer executeOnNumerics(NumberType<?> resultType,
-                                         NumberType<?> leftType,
-                                         ByteBuffer left,
-                                         NumberType<?> rightType,
-                                         ByteBuffer right)
+                                                   NumberType<?> leftType,
+                                                   ByteBuffer left,
+                                                   NumberType<?> rightType,
+                                                   ByteBuffer right)
             {
                 return resultType.mod(leftType, left, rightType, right);
             }
@@ -155,6 +165,25 @@
         }
 
         /**
+         * Executes the operation between the specified string operand.
+         *
+         * @param resultType the result type of the operation
+         * @param leftType the type of the left operand
+         * @param left the left operand
+         * @param rightType  the type of the right operand
+         * @param right the right operand
+         * @return the operation result
+         */
+        protected ByteBuffer excuteOnStrings(StringType resultType,
+                                             StringType leftType,
+                                             ByteBuffer left,
+                                             StringType rightType,
+                                             ByteBuffer right)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
          * Returns the {@code OPERATOR} associated to the specified function.
          * @param functionName the function name
          * @return the {@code OPERATOR} associated to the specified function
@@ -221,9 +250,19 @@
             functions.add(new TemporalOperationFunction(SimpleDateType.instance, operation));
         }
 
+        addStringConcatenations(functions);
+
         return functions;
     }
 
+    private static void addStringConcatenations(List<Function> functions)
+    {
+        functions.add(new StringOperationFunction(UTF8Type.instance, UTF8Type.instance, OPERATION.ADDITION, UTF8Type.instance));
+        functions.add(new StringOperationFunction(AsciiType.instance, AsciiType.instance, OPERATION.ADDITION, AsciiType.instance));
+        functions.add(new StringOperationFunction(UTF8Type.instance, AsciiType.instance, OPERATION.ADDITION, UTF8Type.instance));
+        functions.add(new StringOperationFunction(UTF8Type.instance, UTF8Type.instance, OPERATION.ADDITION, AsciiType.instance));
+    }
+
     /**
      * Checks if the function with the specified name is an operation.
      *
@@ -415,6 +454,27 @@
         }
     }
 
+    private static class StringOperationFunction extends OperationFunction
+    {
+        public StringOperationFunction(StringType returnType,
+                                       StringType left,
+                                       OPERATION operation,
+                                       StringType right)
+        {
+            super(returnType, left, operation, right);
+        }
+
+        @Override
+        protected ByteBuffer doExecute(ByteBuffer left, OPERATION operation, ByteBuffer right)
+        {
+            StringType leftType = (StringType) argTypes().get(0);
+            StringType rightType = (StringType) argTypes().get(1);
+            StringType resultType = (StringType) returnType();
+
+            return operation.excuteOnStrings(resultType, leftType, left, rightType, right);
+        }
+    }
+
     /**
      * Function that execute operations on temporals (timestamp, date, ...).
      */
diff --git a/src/java/org/apache/cassandra/cql3/functions/PartialScalarFunction.java b/src/java/org/apache/cassandra/cql3/functions/PartialScalarFunction.java
new file mode 100644
index 0000000..6970768
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/functions/PartialScalarFunction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.functions;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.cassandra.transport.ProtocolVersion;
+
+/**
+ * A partial application of a function.
+ *
+ * @see ScalarFunction#partialApplication(ProtocolVersion, List)
+ */
+public interface PartialScalarFunction extends ScalarFunction
+{
+    /**
+     * Returns the original function.
+     *
+     * @return the original function
+     */
+    public Function getFunction();
+
+    /**
+     * Returns the list of input parameters for the function where some parameters can be {@link #UNRESOLVED}.
+     *
+     * @return the list of input parameters for the function
+     */
+    public List<ByteBuffer> getPartialParameters();
+}
diff --git a/src/java/org/apache/cassandra/cql3/functions/PartiallyAppliedScalarFunction.java b/src/java/org/apache/cassandra/cql3/functions/PartiallyAppliedScalarFunction.java
new file mode 100644
index 0000000..301160b
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/functions/PartiallyAppliedScalarFunction.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.functions;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.cassandra.cql3.CqlBuilder;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.transport.ProtocolVersion;
+
+/**
+ * An internal function used to hold the partial application of another function to only some of its parameters.
+ *
+ * @see ScalarFunction#partialApplication(ProtocolVersion, List)
+ */
+final class PartiallyAppliedScalarFunction extends NativeScalarFunction implements PartialScalarFunction
+{
+    private final ScalarFunction function;
+    private final List<ByteBuffer> partialParameters;
+
+    PartiallyAppliedScalarFunction(ScalarFunction function, List<ByteBuffer> partialParameters, int unresolvedCount)
+    {
+        // Note that we never register those function, there are just used internally, so the name doesn't matter much
+        super("__partial_application__", function.returnType(), computeArgTypes(function, partialParameters, unresolvedCount));
+        this.function = function;
+        this.partialParameters = partialParameters;
+    }
+
+    @Override
+    public boolean isMonotonic()
+    {
+        return function.isNative() ? ((NativeScalarFunction) function).isPartialApplicationMonotonic(partialParameters)
+                                   : function.isMonotonic();
+    }
+
+    @Override
+    public boolean isPure()
+    {
+        return function.isPure();
+    }
+
+    @Override
+    public Function getFunction()
+    {
+        return function;
+    }
+
+    @Override
+    public List<ByteBuffer> getPartialParameters()
+    {
+        return partialParameters;
+    }
+
+    private static AbstractType<?>[] computeArgTypes(ScalarFunction function, List<ByteBuffer> partialParameters, int unresolvedCount)
+    {
+        AbstractType<?>[] argTypes = new AbstractType<?>[unresolvedCount];
+        int arg = 0;
+        for (int i = 0; i < partialParameters.size(); i++)
+        {
+            if (partialParameters.get(i) == UNRESOLVED)
+                argTypes[arg++] = function.argTypes().get(i);
+        }
+        return argTypes;
+    }
+
+    public ByteBuffer execute(ProtocolVersion protocolVersion, List<ByteBuffer> parameters) throws InvalidRequestException
+    {
+        List<ByteBuffer> fullParameters = new ArrayList<>(partialParameters);
+        int arg = 0;
+        for (int i = 0; i < fullParameters.size(); i++)
+        {
+            if (fullParameters.get(i) == UNRESOLVED)
+                fullParameters.set(i, parameters.get(arg++));
+        }
+        return function.execute(protocolVersion, fullParameters);
+    }
+
+    @Override
+    public String toString()
+    {
+        CqlBuilder b = new CqlBuilder().append(function.name()).append(" : (");
+
+        List<AbstractType<?>> types = function.argTypes();
+        for (int i = 0, m = types.size(); i < m; i++)
+        {
+            if (i > 0)
+                b.append(", ");
+            b.append(toCqlString(types.get(i)));
+            if (partialParameters.get(i) != Function.UNRESOLVED)
+                b.append("(constant)");
+        }
+        b.append(") -> ").append(returnType);
+        return b.toString();
+    }
+}
diff --git a/src/java/org/apache/cassandra/cql3/functions/PreComputedScalarFunction.java b/src/java/org/apache/cassandra/cql3/functions/PreComputedScalarFunction.java
new file mode 100644
index 0000000..9670132
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/functions/PreComputedScalarFunction.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.functions;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.transport.ProtocolVersion;
+
+/**
+ * Function used internally to hold the pre-computed result of another function.
+ * <p>
+ * See {@link ScalarFunction#partialApplication(ProtocolVersion, List)} for why this is used.
+ * <p>
+ * Note : the function is cautious in keeping the protocol version used for the pre-computed value and to
+ * fallback to recomputation if the version we get when {@link #execute} is called. I don't think it's truly necessary
+ * though as I don't think we actually depend on the protocol version for values anymore (it's remnant of previous
+ * transitions). It's not a lot of code to be on safe side though until this is cleaned (assuming we do clean it).
+ */
+class PreComputedScalarFunction extends NativeScalarFunction implements PartialScalarFunction
+{
+    private final ByteBuffer value;
+    private final ProtocolVersion valueVersion;
+
+    private final ScalarFunction function;
+    private final List<ByteBuffer> parameters;
+
+    PreComputedScalarFunction(AbstractType<?> returnType,
+                              ByteBuffer value,
+                              ProtocolVersion valueVersion,
+                              ScalarFunction function,
+                              List<ByteBuffer> parameters)
+    {
+        // Note that we never register those function, there are just used internally, so the name doesn't matter much
+        super("__constant__", returnType);
+        this.value = value;
+        this.valueVersion = valueVersion;
+        this.function = function;
+        this.parameters = parameters;
+    }
+
+    @Override
+    public Function getFunction()
+    {
+        return function;
+    }
+
+    @Override
+    public List<ByteBuffer> getPartialParameters()
+    {
+        return parameters;
+    }
+
+    public ByteBuffer execute(ProtocolVersion protocolVersion, List<ByteBuffer> nothing) throws InvalidRequestException
+    {
+        if (protocolVersion == valueVersion)
+            return value;
+
+        return function.execute(protocolVersion, parameters);
+    }
+
+    public ScalarFunction partialApplication(ProtocolVersion protocolVersion, List<ByteBuffer> nothing) throws InvalidRequestException
+    {
+        return this;
+    }
+}
diff --git a/src/java/org/apache/cassandra/cql3/functions/ScalarFunction.java b/src/java/org/apache/cassandra/cql3/functions/ScalarFunction.java
index 1f98372..fc18aae 100644
--- a/src/java/org/apache/cassandra/cql3/functions/ScalarFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/ScalarFunction.java
@@ -24,13 +24,25 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 
 /**
- * Determines a single output value based on a single input value.
+ * Determines a single output value based on any number of input values.
  */
 public interface ScalarFunction extends Function
 {
     public boolean isCalledOnNullInput();
 
     /**
+     * Checks if the function is monotonic.
+     *
+     *<p>A function is monotonic if it is either entirely nonincreasing or nondecreasing given an ordered set of inputs.</p>
+     *
+     * @return {@code true} if the function is monotonic {@code false} otherwise.
+     */
+    public default boolean isMonotonic()
+    {
+        return false;
+    }
+
+    /**
      * Applies this function to the specified parameter.
      *
      * @param protocolVersion protocol version used for parameters and return value
@@ -39,4 +51,49 @@
      * @throws InvalidRequestException if this function cannot not be applied to the parameter
      */
     public ByteBuffer execute(ProtocolVersion protocolVersion, List<ByteBuffer> parameters) throws InvalidRequestException;
+
+    /**
+     * Does a partial application of the function. That is, given only some of the parameters of the function provided,
+     * return a new function that only expect the parameters not provided.
+     * <p>
+     * To take an example, if you consider the function
+     * <pre>
+     *     text foo(int a, text b, text c, int d)
+     * </pre>
+     * then {@code foo.partialApplication([3, <ommitted>, 'bar', <omitted>])} will return a function {@code bar} of signature:
+     * <pre>
+     *     text bar(text b, int d)
+     * </pre>
+     * and such that for any value of {@code b} and {@code d}, {@code bar(b, d) == foo(3, b, 'bar', d)}.
+     *
+     * @param protocolVersion protocol version used for parameters
+     * @param partialParameters a list of input parameters for the function where some parameters can be {@link #UNRESOLVED}.
+     *                          The input <b>must</b> be of size {@code this.argsType().size()}. For convenience, it is
+     *                          allowed both to pass a list with all parameters being {@link #UNRESOLVED} (the function is
+     *                          then returned directly) and with none of them unresolved (in which case, if the function is pure,
+     *                          it is computed and a dummy no-arg function returning the result is returned).
+     * @return a function corresponding to the partial application of this function to the parameters of
+     * {@code partialParameters} that are not {@link #UNRESOLVED}.
+     */
+    public default ScalarFunction partialApplication(ProtocolVersion protocolVersion, List<ByteBuffer> partialParameters)
+    {
+        int unresolvedCount = 0;
+        for (ByteBuffer parameter : partialParameters)
+        {
+            if (parameter == UNRESOLVED)
+                ++unresolvedCount;
+        }
+
+        if (unresolvedCount == argTypes().size())
+            return this;
+
+        if (isPure() && unresolvedCount == 0)
+            return new PreComputedScalarFunction(returnType(),
+                                           execute(protocolVersion, partialParameters),
+                                           protocolVersion,
+                                           this,
+                                           partialParameters);
+
+        return new PartiallyAppliedScalarFunction(this, partialParameters, unresolvedCount);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDFunction.java b/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDFunction.java
index d7e5eb8..e42fbe9 100644
--- a/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDFunction.java
@@ -94,7 +94,7 @@
     "org.apache.cassandra.cql3.functions.types.utils"
     };
 
-    // use a JVM standard ExecutorService as DebuggableThreadPoolExecutor references internal
+    // use a JVM standard ExecutorService as ExecutorPlus references internal
     // classes, which triggers AccessControlException from the UDF sandbox
     private static final UDFExecutorService executor =
         new UDFExecutorService(new NamedThreadFactory("UserDefinedScriptFunctions",
diff --git a/src/java/org/apache/cassandra/cql3/functions/TimeFcts.java b/src/java/org/apache/cassandra/cql3/functions/TimeFcts.java
index f029e59..2759210 100644
--- a/src/java/org/apache/cassandra/cql3/functions/TimeFcts.java
+++ b/src/java/org/apache/cassandra/cql3/functions/TimeFcts.java
@@ -25,11 +25,17 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.cql3.Duration;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.UUIDGen;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
+
 public abstract class TimeFcts
 {
     public static Logger logger = LoggerFactory.getLogger(TimeFcts.class);
@@ -51,7 +57,14 @@
                                 toUnixTimestamp(TimestampType.instance),
                                 toDate(TimestampType.instance),
                                 toUnixTimestamp(SimpleDateType.instance),
-                                toTimestamp(SimpleDateType.instance));
+                                toTimestamp(SimpleDateType.instance),
+                                FloorTimestampFunction.newInstance(),
+                                FloorTimestampFunction.newInstanceWithStartTimeArgument(),
+                                FloorTimeUuidFunction.newInstance(),
+                                FloorTimeUuidFunction.newInstanceWithStartTimeArgument(),
+                                FloorDateFunction.newInstance(),
+                                FloorDateFunction.newInstanceWithStartTimeArgument(),
+                                floorTime);
     }
 
     public static final Function now(final String name, final TemporalType<?> type)
@@ -63,6 +76,12 @@
             {
                 return type.now();
             }
+
+            @Override
+            public boolean isPure()
+            {
+                return false; // as it returns non-identical results for identical arguments
+            }
         };
     };
 
@@ -74,7 +93,7 @@
             if (bb == null)
                 return null;
 
-            return UUIDGen.toByteBuffer(UUIDGen.minTimeUUID(TimestampType.instance.compose(bb).getTime()));
+            return TimeUUID.minAtUnixMillis(TimestampType.instance.compose(bb).getTime()).toBytes();
         }
     };
 
@@ -86,13 +105,13 @@
             if (bb == null)
                 return null;
 
-            return UUIDGen.toByteBuffer(UUIDGen.maxTimeUUID(TimestampType.instance.compose(bb).getTime()));
+            return TimeUUID.maxAtUnixMillis(TimestampType.instance.compose(bb).getTime()).toBytes();
         }
     };
 
     /**
      * Function that convert a value of <code>TIMEUUID</code> into a value of type <code>TIMESTAMP</code>.
-     * @deprecated Replaced by the {@link #timeUuidToTimestamp} function
+     * @deprecated Replaced by the {@link #toTimestamp} function
      */
     public static final NativeScalarFunction dateOfFct = new NativeScalarFunction("dateof", TimestampType.instance, TimeUUIDType.instance)
     {
@@ -111,14 +130,14 @@
             if (bb == null)
                 return null;
 
-            long timeInMillis = UUIDGen.unixTimestamp(UUIDGen.getUUID(bb));
+            long timeInMillis = TimeUUID.deserialize(bb).unix(MILLISECONDS);
             return ByteBufferUtil.bytes(timeInMillis);
         }
     };
 
     /**
      * Function that convert a value of type <code>TIMEUUID</code> into an UNIX timestamp.
-     * @deprecated Replaced by the {@link #timeUuidToUnixTimestamp} function
+     * @deprecated Replaced by the {@link #toUnixTimestamp} function
      */
     public static final NativeScalarFunction unixTimestampOfFct = new NativeScalarFunction("unixtimestampof", LongType.instance, TimeUUIDType.instance)
     {
@@ -137,7 +156,7 @@
             if (bb == null)
                 return null;
 
-            return ByteBufferUtil.bytes(UUIDGen.unixTimestamp(UUIDGen.getUUID(bb)));
+            return ByteBufferUtil.bytes(TimeUUID.deserialize(bb).unix(MILLISECONDS));
         }
     };
 
@@ -159,6 +178,12 @@
                long millis = type.toTimeInMillis(bb);
                return SimpleDateType.instance.fromTimeInMillis(millis);
            }
+
+           @Override
+           public boolean isMonotonic()
+           {
+               return true;
+           }
        };
    }
 
@@ -180,6 +205,12 @@
                long millis = type.toTimeInMillis(bb);
                return TimestampType.instance.fromTimeInMillis(millis);
            }
+
+           @Override
+           public boolean isMonotonic()
+           {
+               return true;
+           }
        };
    }
 
@@ -200,7 +231,282 @@
 
                 return ByteBufferUtil.bytes(type.toTimeInMillis(bb));
             }
+
+            @Override
+            public boolean isMonotonic()
+            {
+                return true;
+            }
         };
     }
-}
+
+    /**
+     * Function that rounds a timestamp down to the closest multiple of a duration.
+     */
+     private static abstract class FloorFunction extends NativeScalarFunction
+     {
+         private static final Long ZERO = Long.valueOf(0);
+
+         protected FloorFunction(AbstractType<?> returnType,
+                                 AbstractType<?>... argsType)
+         {
+             super("floor", returnType, argsType);
+             // The function can accept either 2 parameters (time and duration) or 3 parameters (time, duration and startTime)r
+             assert argsType.length == 2 || argsType.length == 3; 
+         }
+
+         @Override
+         protected boolean isPartialApplicationMonotonic(List<ByteBuffer> partialParameters)
+         {
+             return partialParameters.get(0) == UNRESOLVED
+                     && partialParameters.get(1) != UNRESOLVED
+                     && (partialParameters.size() == 2 || partialParameters.get(2) != UNRESOLVED);
+         }
+
+         public final ByteBuffer execute(ProtocolVersion protocolVersion, List<ByteBuffer> parameters)
+         {
+             ByteBuffer timeBuffer = parameters.get(0);
+             ByteBuffer durationBuffer = parameters.get(1);
+             Long startingTime = getStartingTime(parameters);
+
+             if (timeBuffer == null || durationBuffer == null || startingTime == null)
+                 return null;
+
+             Long time = toTimeInMillis(timeBuffer);
+             Duration duration = DurationType.instance.compose(durationBuffer);
+
+             if (time == null || duration == null)
+                 return null;
+
+
+             validateDuration(duration);
+
+
+             long floor = Duration.floorTimestamp(time, duration, startingTime);
+
+             return fromTimeInMillis(floor);
+         }
+
+         /**
+          * Returns the time to use as the starting time.
+          *
+          * @param parameters the function parameters
+          * @return the time to use as the starting time
+          */
+         private Long getStartingTime(List<ByteBuffer> parameters)
+         {
+             if (parameters.size() == 3)
+             {
+                 ByteBuffer startingTimeBuffer = parameters.get(2);
+
+                 if (startingTimeBuffer == null)
+                     return null;
+
+                 return toStartingTimeInMillis(startingTimeBuffer);
+             }
+
+             return ZERO;
+         }
+
+         /**
+          * Validates that the duration has the correct precision.
+          * @param duration the duration to validate.
+          */
+         protected void validateDuration(Duration duration)
+         {
+             if (!duration.hasMillisecondPrecision())
+                 throw invalidRequest("The floor cannot be computed for the %s duration as precision is below 1 millisecond", duration);
+         }
+
+         /**
+          * Serializes the specified time.
+          *
+          * @param timeInMillis the time in milliseconds
+          * @return the serialized time
+          */
+         protected abstract ByteBuffer fromTimeInMillis(long timeInMillis);
+
+         /**
+          * Deserializes the specified input time.
+          *
+          * @param bytes the serialized time
+          * @return the time in milliseconds
+          */
+         protected abstract Long toTimeInMillis(ByteBuffer bytes);
+
+         /**
+          * Deserializes the specified starting time.
+          *
+          * @param bytes the serialized starting time
+          * @return the starting time in milliseconds
+          */
+         protected abstract Long toStartingTimeInMillis(ByteBuffer bytes);
+     }
+
+    /**
+     * Function that rounds a timestamp down to the closest multiple of a duration.
+     */
+     public static final class FloorTimestampFunction extends FloorFunction
+     {
+         public static FloorTimestampFunction newInstance()
+         {
+             return new FloorTimestampFunction(TimestampType.instance,
+                                               TimestampType.instance,
+                                               DurationType.instance);
+         }
+
+         public static FloorTimestampFunction newInstanceWithStartTimeArgument()
+         {
+             return new FloorTimestampFunction(TimestampType.instance,
+                                               TimestampType.instance,
+                                               DurationType.instance,
+                                               TimestampType.instance);
+         }
+
+         private FloorTimestampFunction(AbstractType<?> returnType,
+                                        AbstractType<?>... argTypes)
+         {
+             super(returnType, argTypes);
+         }
+
+         protected ByteBuffer fromTimeInMillis(long timeInMillis)
+         {
+             return TimestampType.instance.fromTimeInMillis(timeInMillis);
+         }
+
+         protected Long toStartingTimeInMillis(ByteBuffer bytes)
+         {
+             return TimestampType.instance.toTimeInMillis(bytes);
+         }
+
+         protected Long toTimeInMillis(ByteBuffer bytes)
+         {
+             return TimestampType.instance.toTimeInMillis(bytes);
+         }
+     }
+
+     /**
+      * Function that rounds a timeUUID down to the closest multiple of a duration.
+      */
+     public static final class FloorTimeUuidFunction extends FloorFunction
+     {
+         public static FloorTimeUuidFunction newInstance()
+         {
+             return new FloorTimeUuidFunction(TimestampType.instance,
+                                              TimeUUIDType.instance,
+                                              DurationType.instance);
+         }
+
+         public static FloorTimeUuidFunction newInstanceWithStartTimeArgument()
+         {
+             return new FloorTimeUuidFunction(TimestampType.instance,
+                                              TimeUUIDType.instance,
+                                              DurationType.instance,
+                                              TimestampType.instance);
+         }
+
+         private FloorTimeUuidFunction(AbstractType<?> returnType,
+                                       AbstractType<?>... argTypes)
+         {
+             super(returnType, argTypes);
+         }
+
+         protected ByteBuffer fromTimeInMillis(long timeInMillis)
+         {
+             return TimestampType.instance.fromTimeInMillis(timeInMillis);
+         }
+
+         protected Long toStartingTimeInMillis(ByteBuffer bytes)
+         {
+             return TimestampType.instance.toTimeInMillis(bytes);
+         }
+
+         protected Long toTimeInMillis(ByteBuffer bytes)
+         {
+             return UUIDGen.getAdjustedTimestamp(UUIDGen.getUUID(bytes));
+         }
+     }
+
+     /**
+      * Function that rounds a date down to the closest multiple of a duration.
+      */
+     public static final class FloorDateFunction extends FloorFunction
+     {
+         public static FloorDateFunction newInstance()
+         {
+             return new FloorDateFunction(SimpleDateType.instance,
+                                          SimpleDateType.instance,
+                                          DurationType.instance);
+         }
+
+         public static FloorDateFunction newInstanceWithStartTimeArgument()
+         {
+             return new FloorDateFunction(SimpleDateType.instance,
+                                          SimpleDateType.instance,
+                                          DurationType.instance,
+                                          SimpleDateType.instance);
+         }
+
+         private FloorDateFunction(AbstractType<?> returnType,
+                                   AbstractType<?>... argTypes)
+         {
+             super(returnType, argTypes);
+         }
+
+         protected ByteBuffer fromTimeInMillis(long timeInMillis)
+         {
+             return SimpleDateType.instance.fromTimeInMillis(timeInMillis);
+         }
+
+         protected Long toStartingTimeInMillis(ByteBuffer bytes)
+         {
+             return SimpleDateType.instance.toTimeInMillis(bytes);
+         }
+
+         protected Long toTimeInMillis(ByteBuffer bytes)
+         {
+             return SimpleDateType.instance.toTimeInMillis(bytes);
+         }
+
+         @Override
+         protected void validateDuration(Duration duration)
+         {
+             // Checks that the duration has no data below days.
+             if (duration.getNanoseconds() != 0)
+                 throw invalidRequest("The floor on %s values cannot be computed for the %s duration as precision is below 1 day",
+                                      SimpleDateType.instance.asCQL3Type(), duration);
+         }
+     }
+
+     /**
+      * Function that rounds a time down to the closest multiple of a duration.
+      */
+     public static final NativeScalarFunction floorTime = new NativeScalarFunction("floor", TimeType.instance, TimeType.instance, DurationType.instance)
+     {
+         @Override
+         protected boolean isPartialApplicationMonotonic(List<ByteBuffer> partialParameters)
+         {
+             return partialParameters.get(0) == UNRESOLVED && partialParameters.get(1) != UNRESOLVED;
+         }
+
+         public ByteBuffer execute(ProtocolVersion protocolVersion, List<ByteBuffer> parameters)
+         {
+             ByteBuffer timeBuffer = parameters.get(0);
+             ByteBuffer durationBuffer = parameters.get(1);
+
+             if (timeBuffer == null || durationBuffer == null)
+                 return null;
+
+             Long time = TimeType.instance.compose(timeBuffer);
+             Duration duration = DurationType.instance.compose(durationBuffer);
+
+             if (time == null || duration == null)
+                 return null;
+
+             long floor = Duration.floorTime(time, duration);
+
+             return TimeType.instance.decompose(floor);
+         }
+     };
+ }
 
diff --git a/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java b/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java
index b764835..b686328 100644
--- a/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java
+++ b/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java
@@ -39,6 +39,7 @@
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.transform;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Base class for user-defined-aggregates.
@@ -99,6 +100,12 @@
                         .orElseThrow(() -> new ConfigurationException(String.format("Unable to find function %s referenced by UDA %s", name, udaName)));
     }
 
+    public boolean isPure()
+    {
+        // Right now, we have no way to check if an UDA is pure. Due to that we consider them as non pure to avoid any risk.
+        return false;
+    }
+
     public boolean hasReferenceTo(Function function)
     {
         return stateFunction == function || finalFunction == function;
@@ -182,7 +189,7 @@
             {
                 maybeInit(protocolVersion);
 
-                long startTime = System.nanoTime();
+                long startTime = nanoTime();
                 stateFunctionCount++;
                 if (stateFunction instanceof UDFunction)
                 {
@@ -194,7 +201,7 @@
                 {
                     throw new UnsupportedOperationException("UDAs only support UDFs");
                 }
-                stateFunctionDuration += (System.nanoTime() - startTime) / 1000;
+                stateFunctionDuration += (nanoTime() - startTime) / 1000;
             }
 
             private void maybeInit(ProtocolVersion protocolVersion)
diff --git a/src/java/org/apache/cassandra/cql3/functions/UDFExecutorService.java b/src/java/org/apache/cassandra/cql3/functions/UDFExecutorService.java
index 5e08ad8..3b7631f 100644
--- a/src/java/org/apache/cassandra/cql3/functions/UDFExecutorService.java
+++ b/src/java/org/apache/cassandra/cql3/functions/UDFExecutorService.java
@@ -17,37 +17,47 @@
  */
 package org.apache.cassandra.cql3.functions;
 
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
+import org.apache.cassandra.concurrent.ThreadPoolExecutorJMXAdapter;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.concurrent.ThreadPoolExecutorBase;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.FBUtilities.getAvailableProcessors;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 /**
- * Executor service which exposes stats via JMX, but which doesn't reference
- * internal classes in its beforeExecute & afterExecute methods as these are
- * forbidden by the UDF execution sandbox
+ * Executor service which exposes stats via JMX, but which doesn't reference internal classes
+ * as these are forbidden by the UDF execution sandbox.
+ *
+ * TODO: see if we can port to ExecutorPlus to avoid duplication
  */
-final class UDFExecutorService extends JMXEnabledThreadPoolExecutor
+final class UDFExecutorService extends ThreadPoolExecutorBase
 {
-    private static int KEEPALIVE = Integer.getInteger("cassandra.udf_executor_thread_keepalive_ms", 30000);
+    private static final int KEEPALIVE = Integer.getInteger("cassandra.udf_executor_thread_keepalive_ms", 30000);
 
-    UDFExecutorService(NamedThreadFactory threadFactory, String jmxPath)
+    public UDFExecutorService(NamedThreadFactory threadFactory, String jmxPath)
     {
-        super(FBUtilities.getAvailableProcessors(),
-              KEEPALIVE,
-              TimeUnit.MILLISECONDS,
-              new LinkedBlockingQueue<>(),
-              threadFactory,
-              jmxPath);
+        super(getAvailableProcessors(), KEEPALIVE, MILLISECONDS, newBlockingQueue(), threadFactory);
+        ThreadPoolExecutorJMXAdapter.register(jmxPath, this);
     }
 
-    protected void afterExecute(Runnable r, Throwable t)
+    public int getCoreThreads()
     {
+        return getCorePoolSize();
     }
 
-    protected void beforeExecute(Thread t, Runnable r)
+    public void setCoreThreads(int newCorePoolSize)
     {
+        setCorePoolSize(newCorePoolSize);
+    }
+
+    public int getMaximumThreads()
+    {
+        return getMaximumPoolSize();
+    }
+
+    public void setMaximumThreads(int maxPoolSize)
+    {
+        setMaximumPoolSize(maxPoolSize);
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/functions/UDFunction.java b/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
index 3863f89..5ef065b 100644
--- a/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
@@ -28,11 +28,10 @@
 import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
-import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletableFuture; // checkstyle: permit this import
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
@@ -42,6 +41,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.ColumnIdentifier;
@@ -58,9 +58,11 @@
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.transform;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Base class for User Defined Functions.
@@ -296,7 +298,7 @@
         {
             protected ExecutorService executor()
             {
-                return Executors.newSingleThreadExecutor();
+                return ImmediateExecutor.INSTANCE;
             }
 
             protected Object executeAggregateUserDefined(ProtocolVersion protocolVersion, Object firstParam, List<ByteBuffer> parameters)
@@ -366,6 +368,12 @@
         return builder.toString();
     }
 
+    public boolean isPure()
+    {
+        // Right now, we have no way to check if an UDF is pure. Due to that we consider them as non pure to avoid any risk.
+        return false;
+    }
+
     public final ByteBuffer execute(ProtocolVersion protocolVersion, List<ByteBuffer> parameters)
     {
         assertUdfsEnabled(language);
@@ -373,7 +381,7 @@
         if (!isCallableWrtNullable(parameters))
             return null;
 
-        long tStart = System.nanoTime();
+        long tStart = nanoTime();
         parameters = makeEmptyParametersNull(parameters);
 
         try
@@ -383,7 +391,7 @@
                                 ? executeAsync(protocolVersion, parameters)
                                 : executeUserDefined(protocolVersion, parameters);
 
-            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (System.nanoTime() - tStart) / 1000);
+            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (nanoTime() - tStart) / 1000);
             return result;
         }
         catch (InvalidRequestException e)
@@ -412,7 +420,7 @@
         if (!calledOnNullInput && firstParam == null || !isCallableWrtNullable(parameters))
             return null;
 
-        long tStart = System.nanoTime();
+        long tStart = nanoTime();
         parameters = makeEmptyParametersNull(parameters);
 
         try
@@ -421,7 +429,7 @@
             Object result = DatabaseDescriptor.enableUserDefinedFunctionsThreads()
                                 ? executeAggregateAsync(protocolVersion, firstParam, parameters)
                                 : executeAggregateUserDefined(protocolVersion, firstParam, parameters);
-            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (System.nanoTime() - tStart) / 1000);
+            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (nanoTime() - tStart) / 1000);
             return result;
         }
         catch (InvalidRequestException e)
@@ -440,9 +448,9 @@
     public static void assertUdfsEnabled(String language)
     {
         if (!DatabaseDescriptor.enableUserDefinedFunctions())
-            throw new InvalidRequestException("User-defined functions are disabled in cassandra.yaml - set enable_user_defined_functions=true to enable");
+            throw new InvalidRequestException("User-defined functions are disabled in cassandra.yaml - set user_defined_functions_enabled=true to enable");
         if (!"java".equalsIgnoreCase(language) && !DatabaseDescriptor.enableScriptedUserDefinedFunctions())
-            throw new InvalidRequestException("Scripted user-defined functions are disabled in cassandra.yaml - set enable_scripted_user_defined_functions=true to enable if you are aware of the security risks");
+            throw new InvalidRequestException("Scripted user-defined functions are disabled in cassandra.yaml - set scripted_user_defined_functions_enabled=true to enable if you are aware of the security risks");
     }
 
     static void initializeThread()
@@ -528,7 +536,7 @@
         catch (InterruptedException e)
         {
             Thread.currentThread().interrupt();
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
         catch (ExecutionException e)
         {
@@ -554,7 +562,7 @@
             catch (InterruptedException e1)
             {
                 Thread.currentThread().interrupt();
-                throw new RuntimeException(e);
+                throw new UncheckedInterruptedException(e1);
             }
             catch (ExecutionException e1)
             {
@@ -758,7 +766,7 @@
     private static class UDFClassLoader extends ClassLoader
     {
         // insecureClassLoader is the C* class loader
-        static final ClassLoader insecureClassLoader = Thread.currentThread().getContextClassLoader();
+        static final ClassLoader insecureClassLoader = UDFClassLoader.class.getClassLoader();
 
         private UDFClassLoader()
         {
diff --git a/src/java/org/apache/cassandra/cql3/functions/types/TypeCodec.java b/src/java/org/apache/cassandra/cql3/functions/types/TypeCodec.java
index a728a1c..dc34bca 100644
--- a/src/java/org/apache/cassandra/cql3/functions/types/TypeCodec.java
+++ b/src/java/org/apache/cassandra/cql3/functions/types/TypeCodec.java
@@ -31,13 +31,10 @@
 import java.util.*;
 import java.util.regex.Pattern;
 
-import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
 import com.google.common.reflect.TypeToken;
 
 import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.cql3.functions.types.DataType.CollectionType;
-import org.apache.cassandra.cql3.functions.types.DataType.Name;
 import org.apache.cassandra.cql3.functions.types.exceptions.InvalidTypeException;
 import org.apache.cassandra.cql3.functions.types.utils.Bytes;
 import org.apache.cassandra.utils.vint.VIntCoding;
@@ -3045,19 +3042,20 @@
             VIntCoding.computeVIntSize(months)
             + VIntCoding.computeVIntSize(days)
             + VIntCoding.computeVIntSize(nanoseconds);
-            ByteArrayDataOutput out = ByteStreams.newDataOutput(size);
+            ByteBuffer bb = ByteBuffer.allocate(size);
             try
             {
-                VIntCoding.writeVInt(months, out);
-                VIntCoding.writeVInt(days, out);
-                VIntCoding.writeVInt(nanoseconds, out);
+                VIntCoding.writeVInt(months, bb);
+                VIntCoding.writeVInt(days, bb);
+                VIntCoding.writeVInt(nanoseconds, bb);
             }
             catch (IOException e)
             {
                 // cannot happen
                 throw new AssertionError();
             }
-            return ByteBuffer.wrap(out.toByteArray());
+            bb.flip();
+            return bb;
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/ClusteringColumnRestrictions.java b/src/java/org/apache/cassandra/cql3/restrictions/ClusteringColumnRestrictions.java
index 0a252ff..c1d0c52 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/ClusteringColumnRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/ClusteringColumnRestrictions.java
@@ -19,6 +19,7 @@
 
 import java.util.*;
 
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.QueryOptions;
@@ -27,6 +28,7 @@
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.IndexRegistry;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.utils.btree.BTreeSet;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
@@ -101,12 +103,16 @@
         return false;
     }
 
-    public NavigableSet<Clustering<?>> valuesAsClustering(QueryOptions options) throws InvalidRequestException
+    public NavigableSet<Clustering<?>> valuesAsClustering(QueryOptions options, ClientState state) throws InvalidRequestException
     {
         MultiCBuilder builder = MultiCBuilder.create(comparator, hasIN());
         for (SingleRestriction r : restrictions)
         {
             r.appendTo(builder, options);
+
+            if (hasIN() && Guardrails.inSelectCartesianProduct.enabled(state))
+                Guardrails.inSelectCartesianProduct.guard(builder.buildSize(), "clustering key", false, state);
+
             if (builder.hasMissingElements())
                 break;
         }
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/MultiColumnRestriction.java b/src/java/org/apache/cassandra/cql3/restrictions/MultiColumnRestriction.java
index 4c6ce2f..acbb48e 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/MultiColumnRestriction.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/MultiColumnRestriction.java
@@ -21,6 +21,8 @@
 import java.util.*;
 
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.serializers.ListSerializer;
+import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.cql3.Term.Terminal;
 import org.apache.cassandra.cql3.functions.Function;
@@ -247,7 +249,22 @@
                                          IndexRegistry indexRegistry,
                                          QueryOptions options)
         {
-            throw  invalidRequest("IN restrictions are not supported on indexed columns");
+            // If the relation is of the type (c) IN ((x),(y),(z)) then it is equivalent to
+            // c IN (x, y, z) and we can perform filtering
+            if (getColumnDefs().size() == 1)
+            {
+                List<List<ByteBuffer>> splitValues = splitValues(options);
+                List<ByteBuffer> values = new ArrayList<>(splitValues.size());
+                for (List<ByteBuffer> splitValue : splitValues)
+                    values.add(splitValue.get(0));
+
+                ByteBuffer buffer = ListSerializer.pack(values, values.size(), ProtocolVersion.V3);
+                filter.add(getFirstColumn(), Operator.IN, buffer);
+            }
+            else
+            {
+                throw invalidRequest("Multicolumn IN filters are not supported");
+            }
         }
 
         protected abstract List<List<ByteBuffer>> splitValues(QueryOptions options);
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeyRestrictions.java b/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeyRestrictions.java
index b1edf94..8224529 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeyRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeyRestrictions.java
@@ -23,6 +23,7 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.statements.Bound;
+import org.apache.cassandra.service.ClientState;
 
 /**
  * A set of restrictions on the partition key.
@@ -32,7 +33,7 @@
 {
     public PartitionKeyRestrictions mergeWith(Restriction restriction);
 
-    public List<ByteBuffer> values(QueryOptions options);
+    public List<ByteBuffer> values(QueryOptions options, ClientState state);
 
     public List<ByteBuffer> bounds(Bound b, QueryOptions options);
 
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeySingleRestrictionSet.java b/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeySingleRestrictionSet.java
index fbe5673..a6f227a 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeySingleRestrictionSet.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/PartitionKeySingleRestrictionSet.java
@@ -20,6 +20,7 @@
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.statements.Bound;
@@ -28,6 +29,7 @@
 import org.apache.cassandra.db.MultiCBuilder;
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.index.IndexRegistry;
+import org.apache.cassandra.service.ClientState;
 
 /**
  * A set of single restrictions on the partition key.
@@ -78,12 +80,16 @@
     }
 
     @Override
-    public List<ByteBuffer> values(QueryOptions options)
+    public List<ByteBuffer> values(QueryOptions options, ClientState state)
     {
         MultiCBuilder builder = MultiCBuilder.create(comparator, hasIN());
         for (SingleRestriction r : restrictions)
         {
             r.appendTo(builder, options);
+
+            if (hasIN() && Guardrails.inSelectCartesianProduct.enabled(state))
+                Guardrails.inSelectCartesianProduct.guard(builder.buildSize(), "partition key", false, state);
+
             if (builder.hasMissingElements())
                 break;
         }
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/SingleColumnRestriction.java b/src/java/org/apache/cassandra/cql3/restrictions/SingleColumnRestriction.java
index 1b3482b..e5b2465 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/SingleColumnRestriction.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/SingleColumnRestriction.java
@@ -23,6 +23,8 @@
 import java.util.List;
 
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.serializers.ListSerializer;
+import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.cql3.Term.Terminal;
 import org.apache.cassandra.cql3.functions.Function;
@@ -218,7 +220,9 @@
                                    IndexRegistry indexRegistry,
                                    QueryOptions options)
         {
-            throw invalidRequest("IN restrictions are not supported on indexed columns");
+            List<ByteBuffer> values = getValues(options);
+            ByteBuffer buffer = ListSerializer.pack(values, values.size(), ProtocolVersion.V3);
+            filter.add(columnDef, Operator.IN, buffer);
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
index 5d924f5..8f8be94 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
@@ -35,6 +35,7 @@
 import org.apache.cassandra.index.IndexRegistry;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.utils.btree.BTreeSet;
 
 import org.apache.commons.lang3.builder.ToStringBuilder;
@@ -446,9 +447,6 @@
                 if (!allowFiltering && !forView && !hasQueriableIndex)
                     throw new InvalidRequestException(REQUIRES_ALLOW_FILTERING_MESSAGE);
 
-                if (partitionKeyRestrictions.hasIN())
-                    throw new InvalidRequestException("IN restrictions are not supported when the query involves filtering");
-
                 isKeyRange = true;
                 usesSecondaryIndexing = hasQueriableIndex;
             }
@@ -628,11 +626,12 @@
      * Returns the partition keys for which the data is requested.
      *
      * @param options the query options
+     * @param state the client state
      * @return the partition keys for which the data is requested.
      */
-    public List<ByteBuffer> getPartitionKeys(final QueryOptions options)
+    public List<ByteBuffer> getPartitionKeys(final QueryOptions options, ClientState state)
     {
-        return partitionKeyRestrictions.values(options);
+        return partitionKeyRestrictions.values(options, state);
     }
 
     /**
@@ -750,9 +749,10 @@
      * Returns the requested clustering columns.
      *
      * @param options the query options
+     * @param state the client state
      * @return the requested clustering columns
      */
-    public NavigableSet<Clustering<?>> getClusteringColumns(QueryOptions options)
+    public NavigableSet<Clustering<?>> getClusteringColumns(QueryOptions options, ClientState state)
     {
         // If this is a names command and the table is a static compact one, then as far as CQL is concerned we have
         // only a single row which internally correspond to the static parts. In which case we want to return an empty
@@ -760,7 +760,7 @@
         if (table.isStaticCompactTable())
             return BTreeSet.empty(table.comparator);
 
-        return clusteringColumnsRestrictions.valuesAsClustering(options);
+        return clusteringColumnsRestrictions.valuesAsClustering(options, state);
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/TokenFilter.java b/src/java/org/apache/cassandra/cql3/restrictions/TokenFilter.java
index 437b17c..9f67cc0 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/TokenFilter.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/TokenFilter.java
@@ -35,6 +35,7 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.IndexRegistry;
+import org.apache.cassandra.service.ClientState;
 
 import static org.apache.cassandra.cql3.statements.Bound.END;
 import static org.apache.cassandra.cql3.statements.Bound.START;
@@ -102,9 +103,9 @@
     }
 
     @Override
-    public List<ByteBuffer> values(QueryOptions options) throws InvalidRequestException
+    public List<ByteBuffer> values(QueryOptions options, ClientState state) throws InvalidRequestException
     {
-        return filter(restrictions.values(options), options);
+        return filter(restrictions.values(options, state), options, state);
     }
 
     @Override
@@ -139,13 +140,14 @@
      *
      * @param values the values returned by the decorated restriction
      * @param options the query options
+     * @param state the client state
      * @return the values matching the token restriction
      * @throws InvalidRequestException if the request is invalid
      */
-    private List<ByteBuffer> filter(List<ByteBuffer> values, QueryOptions options) throws InvalidRequestException
+    private List<ByteBuffer> filter(List<ByteBuffer> values, QueryOptions options, ClientState state) throws InvalidRequestException
     {
         RangeSet<Token> rangeSet = tokenRestriction.hasSlice() ? toRangeSet(tokenRestriction, options)
-                                                               : toRangeSet(tokenRestriction.values(options));
+                                                               : toRangeSet(tokenRestriction.values(options, state));
 
         return filterWithRangeSet(rangeSet, values);
     }
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/TokenRestriction.java b/src/java/org/apache/cassandra/cql3/restrictions/TokenRestriction.java
index e71b177..d7477fb 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/TokenRestriction.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/TokenRestriction.java
@@ -31,6 +31,7 @@
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.IndexRegistry;
+import org.apache.cassandra.service.ClientState;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 
@@ -205,7 +206,10 @@
         @Override
         public List<ByteBuffer> bounds(Bound b, QueryOptions options) throws InvalidRequestException
         {
-            return values(options);
+            // ClientState is used by inSelectCartesianProduct guardrail to skip non-ordinary users.
+            // Passing null here to avoid polluting too many methods, because in case of EQ token restriction,
+            // it won't generate high cartesian product.
+            return values(options, null);
         }
 
         @Override
@@ -221,7 +225,7 @@
         }
 
         @Override
-        public List<ByteBuffer> values(QueryOptions options) throws InvalidRequestException
+        public List<ByteBuffer> values(QueryOptions options, ClientState state) throws InvalidRequestException
         {
             return Collections.singletonList(value.bindAndGet(options));
         }
@@ -254,7 +258,7 @@
         }
 
         @Override
-        public List<ByteBuffer> values(QueryOptions options) throws InvalidRequestException
+        public List<ByteBuffer> values(QueryOptions options, ClientState state) throws InvalidRequestException
         {
             throw new UnsupportedOperationException();
         }
diff --git a/src/java/org/apache/cassandra/cql3/selection/AbstractFunctionSelector.java b/src/java/org/apache/cassandra/cql3/selection/AbstractFunctionSelector.java
index d420857..bf2564e 100644
--- a/src/java/org/apache/cassandra/cql3/selection/AbstractFunctionSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/AbstractFunctionSelector.java
@@ -17,24 +17,93 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Optional;
 
+import com.google.common.base.Objects;
 import com.google.common.collect.Iterables;
 
 import org.apache.commons.lang3.text.StrBuilder;
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.functions.FunctionName;
+import org.apache.cassandra.cql3.functions.PartialScalarFunction;
+import org.apache.cassandra.cql3.functions.ScalarFunction;
 import org.apache.cassandra.cql3.statements.RequestValidations;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+import static java.util.stream.Collectors.joining;
 
 abstract class AbstractFunctionSelector<T extends Function> extends Selector
 {
+    protected static abstract class AbstractFunctionSelectorDeserializer extends SelectorDeserializer
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            FunctionName name = new FunctionName(in.readUTF(), in.readUTF());
+
+            int numberOfArguments = (int) in.readUnsignedVInt();
+            List<AbstractType<?>> argTypes = new ArrayList<>(numberOfArguments);
+            for (int i = 0; i < numberOfArguments; i++)
+            {
+                argTypes.add(readType(metadata, in));
+            }
+
+            Optional<Function> optional = Schema.instance.findFunction(name, argTypes);
+
+            if (!optional.isPresent())
+                throw new IOException(String.format("Unknown serialized function %s(%s)",
+                                                    name,
+                                                    argTypes.stream()
+                                                            .map(p -> p.asCQL3Type().toString())
+                                                            .collect(joining(", "))));
+
+            Function function = optional.get();
+
+            boolean isPartial = in.readBoolean();
+            if (isPartial)
+            {
+                int bitset = (int) in.readUnsignedVInt();
+                List<ByteBuffer> partialParameters = new ArrayList<>(numberOfArguments);
+                for (int i = 0; i < numberOfArguments; i++)
+                {
+                    ByteBuffer parameter = ((bitset & 1) == 1) ? ByteBufferUtil.readWithVIntLength(in)
+                                                               : Function.UNRESOLVED;
+                    partialParameters.add(parameter);
+                    bitset >>= 1;
+                }
+
+                function = ((ScalarFunction) function).partialApplication(ProtocolVersion.CURRENT, partialParameters);
+            }
+
+            int numberOfRemainingArguments = (int) in.readUnsignedVInt();
+            List<Selector> argSelectors = new ArrayList<>(numberOfRemainingArguments);
+            for (int i = 0; i < numberOfRemainingArguments; i++)
+            {
+                argSelectors.add(Selector.serializer.deserialize(in, version, metadata));
+            }
+
+            return newFunctionSelector(function, argSelectors);
+        }
+
+        protected abstract Selector newFunctionSelector(Function function, List<Selector> argSelectors);
+    };
+    
     protected final T fun;
 
     /**
@@ -88,7 +157,45 @@
             public Selector newInstance(QueryOptions options) throws InvalidRequestException
             {
                 return fun.isAggregate() ? new AggregateFunctionSelector(fun, factories.newInstances(options))
-                                         : new ScalarFunctionSelector(fun, factories.newInstances(options));
+                                         : createScalarSelector(options, (ScalarFunction) fun, factories.newInstances(options));
+            }
+
+            private Selector createScalarSelector(QueryOptions options, ScalarFunction function, List<Selector> argSelectors)
+            {
+                ProtocolVersion version = options.getProtocolVersion();
+                int terminalCount = 0;
+                List<ByteBuffer> terminalArgs = new ArrayList<>(argSelectors.size());
+                for (Selector selector : argSelectors)
+                {
+                    if (selector.isTerminal())
+                    {
+                        ++terminalCount;
+                        ByteBuffer output = selector.getOutput(version);
+                        RequestValidations.checkBindValueSet(output, "Invalid unset value for argument in call to function %s", fun.name().name);
+                        terminalArgs.add(output);
+                    }
+                    else
+                    {
+                        terminalArgs.add(Function.UNRESOLVED);
+                    }
+                }
+
+                if (terminalCount == 0)
+                    return new ScalarFunctionSelector(fun, argSelectors);
+
+                // All terminal, reduce to a simple value if the function is pure
+                if (terminalCount == argSelectors.size() && function.isPure())
+                    return new TermSelector(function.execute(version, terminalArgs), function.returnType());
+
+                // We have some terminal arguments but not all, do a partial application
+                ScalarFunction partialFunction = function.partialApplication(version, terminalArgs);
+                List<Selector> remainingSelectors = new ArrayList<>(argSelectors.size() - terminalCount);
+                for (Selector selector : argSelectors)
+                {
+                    if (!selector.isTerminal())
+                        remainingSelectors.add(selector);
+                }
+                return new ScalarFunctionSelector(partialFunction, remainingSelectors);
             }
 
             public boolean isWritetimeSelectorFactory()
@@ -121,8 +228,9 @@
         };
     }
 
-    protected AbstractFunctionSelector(T fun, List<Selector> argSelectors)
+    protected AbstractFunctionSelector(Kind kind, T fun, List<Selector> argSelectors)
     {
+        super(kind);
         this.fun = fun;
         this.argSelectors = argSelectors;
         this.args = Arrays.asList(new ByteBuffer[argSelectors.size()]);
@@ -154,6 +262,28 @@
     }
 
     @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof AbstractFunctionSelector))
+            return false;
+
+        AbstractFunctionSelector<?> s = (AbstractFunctionSelector<?>) o;
+
+        return Objects.equal(fun.name(), s.fun.name())
+            && Objects.equal(fun.argTypes(), s.fun.argTypes())
+            && Objects.equal(argSelectors, s.argSelectors);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(fun.name(), fun.argTypes(), argSelectors);
+    }
+
+    @Override
     public String toString()
     {
         return new StrBuilder().append(fun.name())
@@ -162,4 +292,97 @@
                                .append(")")
                                .toString();
     }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        boolean isPartial = fun instanceof PartialScalarFunction;
+        Function function = isPartial ? ((PartialScalarFunction) fun).getFunction() : fun;
+
+        FunctionName name = function.name();
+        int size =  TypeSizes.sizeof(name.keyspace) + TypeSizes.sizeof(name.name);
+
+        List<AbstractType<?>> argTypes = function.argTypes();
+        size += TypeSizes.sizeofUnsignedVInt(argTypes.size());
+        for (int i = 0, m = argTypes.size(); i < m; i++)
+        {
+            size += sizeOf(argTypes.get(i));
+        }
+
+        size += TypeSizes.sizeof(isPartial);
+
+        if (isPartial)
+        {
+            List<ByteBuffer> partialParameters = ((PartialScalarFunction) fun).getPartialParameters();
+
+            // We use a bitset to track the position of the unresolved arguments
+            size += TypeSizes.sizeofUnsignedVInt(computeBitSet(partialParameters));
+
+            for (int i = 0, m = partialParameters.size(); i < m; i++)
+            {
+                ByteBuffer buffer = partialParameters.get(i);
+                if (buffer != Function.UNRESOLVED)
+                    size += ByteBufferUtil.serializedSizeWithVIntLength(buffer);
+            }
+        }
+
+        int numberOfRemainingArguments = argSelectors.size();
+        size += TypeSizes.sizeofUnsignedVInt(numberOfRemainingArguments);
+        for (int i = 0; i < numberOfRemainingArguments; i++)
+            size += serializer.serializedSize(argSelectors.get(i), version);
+
+        return size;
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        boolean isPartial = fun instanceof PartialScalarFunction;
+        Function function = isPartial ? ((PartialScalarFunction) fun).getFunction() : fun;
+
+        FunctionName name = function.name();
+        out.writeUTF(name.keyspace);
+        out.writeUTF(name.name);
+
+        List<AbstractType<?>> argTypes = function.argTypes();
+        int numberOfArguments = argTypes.size();
+        out.writeUnsignedVInt(numberOfArguments);
+
+        for (int i = 0; i < numberOfArguments; i++)
+            writeType(out, argTypes.get(i));
+
+        out.writeBoolean(isPartial);
+
+        if (isPartial)
+        {
+            List<ByteBuffer> partialParameters = ((PartialScalarFunction) fun).getPartialParameters();
+
+            // We use a bitset to track the position of the unresolved arguments
+            out.writeUnsignedVInt(computeBitSet(partialParameters));
+
+            for (int i = 0, m = partialParameters.size(); i < m; i++)
+            {
+                ByteBuffer buffer = partialParameters.get(i);
+                if (buffer != Function.UNRESOLVED)
+                    ByteBufferUtil.writeWithVIntLength(buffer, out);
+            }
+        }
+
+        int numberOfRemainingArguments = argSelectors.size();
+        out.writeUnsignedVInt(numberOfRemainingArguments);
+        for (int i = 0; i < numberOfRemainingArguments; i++)
+            serializer.serialize(argSelectors.get(i), out, version);
+    }
+
+    private int computeBitSet(List<ByteBuffer> partialParameters)
+    {
+        assert partialParameters.size() <= 32 : "cannot serialize partial function with more than 32 parameters";
+        int bitset = 0;
+        for (int i = 0, m = partialParameters.size(); i < m; i++)
+        {
+            if (partialParameters.get(i) != Function.UNRESOLVED)
+                bitset |= 1 << i;
+        }
+        return bitset;
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/AggregateFunctionSelector.java b/src/java/org/apache/cassandra/cql3/selection/AggregateFunctionSelector.java
index a9df220..8c4f745 100644
--- a/src/java/org/apache/cassandra/cql3/selection/AggregateFunctionSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/AggregateFunctionSelector.java
@@ -27,6 +27,15 @@
 
 final class AggregateFunctionSelector extends AbstractFunctionSelector<AggregateFunction>
 {
+    protected static final SelectorDeserializer deserializer = new AbstractFunctionSelectorDeserializer()
+    {
+        @Override
+        protected Selector newFunctionSelector(Function function, List<Selector> argSelectors)
+        {
+            return new AggregateFunctionSelector(function, argSelectors);
+        }
+    };
+
     private final AggregateFunction.Aggregate aggregate;
 
     public boolean isAggregate()
@@ -34,13 +43,13 @@
         return true;
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         // Aggregation of aggregation is not supported
         for (int i = 0, m = argSelectors.size(); i < m; i++)
         {
             Selector s = argSelectors.get(i);
-            s.addInput(protocolVersion, rs);
+            s.addInput(protocolVersion, input);
             setArg(i, s.getOutput(protocolVersion));
             s.reset();
         }
@@ -59,7 +68,7 @@
 
     AggregateFunctionSelector(Function fun, List<Selector> argSelectors) throws InvalidRequestException
     {
-        super((AggregateFunction) fun, argSelectors);
+        super(Kind.AGGREGATE_FUNCTION_SELECTOR, (AggregateFunction) fun, argSelectors);
 
         this.aggregate = this.fun.newAggregate();
     }
diff --git a/src/java/org/apache/cassandra/cql3/selection/ElementsSelector.java b/src/java/org/apache/cassandra/cql3/selection/ElementsSelector.java
index 5eace66..e520d0f 100644
--- a/src/java/org/apache/cassandra/cql3/selection/ElementsSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/ElementsSelector.java
@@ -17,17 +17,24 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.Term;
 import org.apache.cassandra.cql3.selection.SimpleSelector.SimpleSelectorFactory;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.db.rows.CellPath;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -38,8 +45,9 @@
 {
     protected final Selector selected;
 
-    protected ElementsSelector(Selector selected)
+    protected ElementsSelector(Kind kind,Selector selected)
     {
+        super(kind);
         this.selected = selected;
     }
 
@@ -221,7 +229,7 @@
         };
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         ByteBuffer value = selected.getOutput(protocolVersion);
         return value == null ? null : extractSelection(value);
@@ -229,9 +237,9 @@
 
     protected abstract ByteBuffer extractSelection(ByteBuffer collection);
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
-        selected.addInput(protocolVersion, rs);
+        selected.addInput(protocolVersion, input);
     }
 
     public void reset()
@@ -239,14 +247,31 @@
         selected.reset();
     }
 
-    private static class ElementSelector extends ElementsSelector
+    @Override
+    public boolean isTerminal()
     {
+        return selected.isTerminal();
+    }
+
+    static class ElementSelector extends ElementsSelector
+    {
+        protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+        {
+            protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+            {
+                Selector selected = Selector.serializer.deserialize(in, version, metadata);
+                ByteBuffer key = ByteBufferUtil.readWithVIntLength(in);
+
+                return new ElementSelector(selected, key);
+            }
+        };
+
         private final CollectionType<?> type;
         private final ByteBuffer key;
 
         private ElementSelector(Selector selected, ByteBuffer key)
         {
-            super(selected);
+            super(Kind.ELEMENT_SELECTOR, selected);
             this.type = getCollectionType(selected);
             this.key = key;
         }
@@ -279,10 +304,60 @@
         {
             return String.format("%s[%s]", selected, keyType(type).getString(key));
         }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o)
+                return true;
+
+            if (!(o instanceof ElementSelector))
+                return false;
+
+            ElementSelector s = (ElementSelector) o;
+
+            return Objects.equal(selected, s.selected)
+                && Objects.equal(key, s.key);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hashCode(selected, key);
+        }
+
+        @Override
+        protected int serializedSize(int version)
+        {
+            return TypeSizes.sizeofWithVIntLength(key) + serializer.serializedSize(selected, version);
+        }
+
+        @Override
+        protected void serialize(DataOutputPlus out, int version) throws IOException
+        {
+            serializer.serialize(selected, out, version);
+            ByteBufferUtil.serializedSizeWithVIntLength(key);
+        }
     }
 
-    private static class SliceSelector extends ElementsSelector
+    static class SliceSelector extends ElementsSelector
     {
+        protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+        {
+            protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+            {
+                Selector selected = Selector.serializer.deserialize(in, version, metadata);
+
+                boolean isFromUnset = in.readBoolean();
+                ByteBuffer from = isFromUnset ? ByteBufferUtil.UNSET_BYTE_BUFFER : ByteBufferUtil.readWithVIntLength(in);
+
+                boolean isToUnset = in.readBoolean();
+                ByteBuffer to = isToUnset ? ByteBufferUtil.UNSET_BYTE_BUFFER : ByteBufferUtil.readWithVIntLength(in);
+
+                return new SliceSelector(selected, from, to);
+            }
+        };
+
         private final CollectionType<?> type;
 
         // Note that neither from nor to can be null, but they can both be ByteBufferUtil.UNSET_BYTE_BUFFER to represent no particular bound
@@ -291,7 +366,7 @@
 
         private SliceSelector(Selector selected, ByteBuffer from, ByteBuffer to)
         {
-            super(selected);
+            super(Kind.SLICE_SELECTOR, selected);
             assert from != null && to != null : "We can have unset buffers, but not nulls";
             this.type = getCollectionType(selected);
             this.from = from;
@@ -330,5 +405,57 @@
                  ? selected.toString()
                  : String.format("%s[%s..%s]", selected, fromUnset ? "" : keyType(type).getString(from), toUnset ? "" : keyType(type).getString(to));
         }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o)
+                return true;
+
+            if (!(o instanceof SliceSelector))
+                return false;
+
+            SliceSelector s = (SliceSelector) o;
+
+            return Objects.equal(selected, s.selected)
+                && Objects.equal(from, s.from)
+                && Objects.equal(to, s.to);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hashCode(selected, from, to);
+        }
+
+        @Override
+        protected int serializedSize(int version)
+        {
+            int size = serializer.serializedSize(selected, version) + 2;
+
+            if (!isUnset(from))
+                size += TypeSizes.sizeofWithVIntLength(from);
+
+            if (!isUnset(to))
+                size += TypeSizes.sizeofWithVIntLength(to);
+
+            return size;
+        }
+
+        @Override
+        protected void serialize(DataOutputPlus out, int version) throws IOException
+        {
+            serializer.serialize(selected, out, version);
+
+            boolean isFromUnset = isUnset(from);
+            out.writeBoolean(isFromUnset);
+            if (!isFromUnset)
+                ByteBufferUtil.serializedSizeWithVIntLength(from);
+
+            boolean isToUnset = isUnset(to);
+            out.writeBoolean(isToUnset);
+            if (!isToUnset)
+                ByteBufferUtil.serializedSizeWithVIntLength(to);
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java b/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
index c67fc03..0c62397 100644
--- a/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
@@ -17,18 +17,37 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.transport.ProtocolVersion;
 
 final class FieldSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            UserType type = (UserType) readType(metadata, in);
+            int field = (int) in.readUnsignedVInt();
+            Selector selected = Selector.serializer.deserialize(in, version, metadata);
+
+            return new FieldSelector(type, field, selected);
+        }
+    };
+
     private final UserType type;
     private final int field;
     private final Selector selected;
@@ -79,12 +98,12 @@
         selected.addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
-        selected.addInput(protocolVersion, rs);
+        selected.addInput(protocolVersion, input);
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         ByteBuffer value = selected.getOutput(protocolVersion);
         if (value == null)
@@ -104,6 +123,12 @@
     }
 
     @Override
+    public boolean isTerminal()
+    {
+        return selected.isTerminal();
+    }
+
+    @Override
     public String toString()
     {
         return String.format("%s.%s", selected, type.fieldName(field));
@@ -111,8 +136,45 @@
 
     private FieldSelector(UserType type, int field, Selector selected)
     {
+        super(Kind.FIELD_SELECTOR);
         this.type = type;
         this.field = field;
         this.selected = selected;
     }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof FieldSelector))
+            return false;
+
+        FieldSelector s = (FieldSelector) o;
+
+        return Objects.equal(type, s.type)
+            && Objects.equal(field, s.field)
+            && Objects.equal(selected, s.selected);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(type, field, selected);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        return sizeOf(type) + TypeSizes.sizeofUnsignedVInt(field) + serializer.serializedSize(selected, version);
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        out.writeUnsignedVInt(field);
+        serializer.serialize(selected, out, version);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/ListSelector.java b/src/java/org/apache/cassandra/cql3/selection/ListSelector.java
index a8c5d5c..9136ab2 100644
--- a/src/java/org/apache/cassandra/cql3/selection/ListSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/ListSelector.java
@@ -17,15 +17,22 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.Lists;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.db.marshal.ListType;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.serializers.CollectionSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 
@@ -35,6 +42,20 @@
  */
 final class ListSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            ListType<?> type = (ListType<?>) readType(metadata, in);
+            int size = (int) in.readUnsignedVInt();
+            List<Selector> elements = new ArrayList<>(size);
+            for (int i = 0; i < size; i++)
+                elements.add(serializer.deserialize(in, version, metadata));
+
+            return new ListSelector(type, elements);
+        }
+    };
+
     /**
      * The list type.
      */
@@ -68,13 +89,13 @@
             elements.get(i).addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
-            elements.get(i).addInput(protocolVersion, rs);
+            elements.get(i).addInput(protocolVersion, input);
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         List<ByteBuffer> buffers = new ArrayList<>(elements.size());
         for (int i = 0, m = elements.size(); i < m; i++)
@@ -90,6 +111,17 @@
             elements.get(i).reset();
     }
 
+    @Override
+    public boolean isTerminal()
+    {
+        for (int i = 0, m = elements.size(); i < m; i++)
+        {
+            if (!elements.get(i).isTerminal())
+                return false;
+        }
+        return true;
+    }
+
     public AbstractType<?> getType()
     {
         return type;
@@ -103,7 +135,48 @@
 
     private ListSelector(AbstractType<?> type, List<Selector> elements)
     {
+        super(Kind.LIST_SELECTOR);
         this.type = type;
         this.elements = elements;
     }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof ListSelector))
+            return false;
+
+        ListSelector s = (ListSelector) o;
+
+        return Objects.equal(type, s.type)
+            && Objects.equal(elements, s.elements);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(type, elements);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        int size = sizeOf(type) + TypeSizes.sizeofUnsignedVInt(elements.size());
+        for (int i = 0, m = elements.size(); i < m; i++)
+            size += serializer.serializedSize(elements.get(i), version);
+
+        return size;
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        out.writeUnsignedVInt(elements.size());
+        for (int i = 0, m = elements.size(); i < m; i++)
+            serializer.serialize(elements.get(i), out, version);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/MapSelector.java b/src/java/org/apache/cassandra/cql3/selection/MapSelector.java
index 09424bd..dc811c0 100644
--- a/src/java/org/apache/cassandra/cql3/selection/MapSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/MapSelector.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
@@ -24,15 +25,20 @@
 import java.util.TreeMap;
 import java.util.stream.Collectors;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.Maps;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.MapType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.serializers.CollectionSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.Pair;
@@ -43,6 +49,23 @@
  */
 final class MapSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            MapType<?, ?> type = (MapType<?, ?>) readType(metadata, in);
+            int size = (int) in.readUnsignedVInt();
+            List<Pair<Selector, Selector>> entries = new ArrayList<>(size);
+            for (int i = 0; i < size; i++)
+            {
+                Pair<Selector, Selector> entry = Pair.create(serializer.deserialize(in, version, metadata),
+                                                             serializer.deserialize(in, version, metadata));
+                entries.add(entry);
+            }
+            return new MapSelector(type, entries);
+        }
+    };
+
     /**
      * The map type.
      */
@@ -170,17 +193,17 @@
         }
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
         {
             Pair<Selector, Selector> pair = elements.get(i);
-            pair.left.addInput(protocolVersion, rs);
-            pair.right.addInput(protocolVersion, rs);
+            pair.left.addInput(protocolVersion, input);
+            pair.right.addInput(protocolVersion, input);
         }
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         Map<ByteBuffer, ByteBuffer> map = new TreeMap<>(type.getKeysType());
         for (int i = 0, m = elements.size(); i < m; i++)
@@ -208,6 +231,18 @@
         }
     }
 
+    @Override
+    public boolean isTerminal()
+    {
+        for (int i = 0, m = elements.size(); i < m; i++)
+        {
+            Pair<Selector, Selector> pair = elements.get(i);
+            if (!pair.left.isTerminal() || !pair.right.isTerminal())
+                return false;
+        }
+        return true;
+    }
+
     public AbstractType<?> getType()
     {
         return type;
@@ -221,7 +256,58 @@
 
     private MapSelector(AbstractType<?> type, List<Pair<Selector, Selector>> elements)
     {
+        super(Kind.MAP_SELECTOR);
         this.type = (MapType<?, ?>) type;
         this.elements = elements;
     }
+    
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof MapSelector))
+            return false;
+
+        MapSelector s = (MapSelector) o;
+
+        return Objects.equal(type, s.type)
+            && Objects.equal(elements, s.elements);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(type, elements);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        int size = sizeOf(type) + TypeSizes.sizeofUnsignedVInt(elements.size());
+
+        for (int i = 0, m = elements.size(); i < m; i++)
+        {
+            Pair<Selector, Selector> entry = elements.get(i);
+            size += serializer.serializedSize(entry.left, version) + serializer.serializedSize(entry.right, version);
+        }
+
+        return size;
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        out.writeUnsignedVInt(elements.size());
+
+        for (int i = 0, m = elements.size(); i < m; i++)
+        {
+            Pair<Selector, Selector> entry = elements.get(i);
+            serializer.serialize(entry.left, out, version);
+            serializer.serialize(entry.right, out, version);
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java b/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
index 84e1e84..22566b2 100644
--- a/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
+++ b/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
@@ -19,7 +19,6 @@
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.cassandra.cql3.ResultSet;
@@ -28,9 +27,7 @@
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.aggregation.GroupMaker;
-import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.rows.Cell;
-import org.apache.cassandra.utils.ByteBufferUtil;
 
 public final class ResultSetBuilder
 {
@@ -49,15 +46,11 @@
 
     /*
      * We'll build CQL3 row one by one.
-     * The currentRow is the values for the (CQL3) columns we've fetched.
-     * We also collect timestamps and ttls for the case where the writetime and
-     * ttl functions are used. Note that we might collect timestamp and/or ttls
-     * we don't care about, but since the array below are allocated just once,
-     * it doesn't matter performance wise.
      */
-    List<ByteBuffer> current;
-    final long[] timestamps;
-    final int[] ttls;
+    private Selector.InputRow inputRow;
+
+    private long size = 0;
+    private boolean sizeWarningEmitted = false;
 
     public ResultSetBuilder(ResultMetadata metadata, Selectors selectors)
     {
@@ -69,52 +62,45 @@
         this.resultSet = new ResultSet(metadata.copy(), new ArrayList<List<ByteBuffer>>());
         this.selectors = selectors;
         this.groupMaker = groupMaker;
-        this.timestamps = selectors.collectTimestamps() ? new long[selectors.numberOfFetchedColumns()] : null;
-        this.ttls = selectors.collectTTLs() ? new int[selectors.numberOfFetchedColumns()] : null;
+    }
 
-        // We use MIN_VALUE to indicate no timestamp and -1 for no ttl
-        if (timestamps != null)
-            Arrays.fill(timestamps, Long.MIN_VALUE);
-        if (ttls != null)
-            Arrays.fill(ttls, -1);
+    private void addSize(List<ByteBuffer> row)
+    {
+        for (int i=0, isize=row.size(); i<isize; i++)
+        {
+            ByteBuffer value = row.get(i);
+            size += value != null ? value.remaining() : 0;
+        }
+    }
+
+    public boolean shouldWarn(long thresholdBytes)
+    {
+        if (thresholdBytes != -1 &&!sizeWarningEmitted && size > thresholdBytes)
+        {
+            sizeWarningEmitted = true;
+            return true;
+        }
+        return false;
+    }
+
+    public boolean shouldReject(long thresholdBytes)
+    {
+        return thresholdBytes != -1 && size > thresholdBytes;
+    }
+
+    public long getSize()
+    {
+        return size;
     }
 
     public void add(ByteBuffer v)
     {
-        current.add(v);
+        inputRow.add(v);
     }
 
     public void add(Cell<?> c, int nowInSec)
     {
-        if (c == null)
-        {
-            current.add(null);
-            return;
-        }
-
-        current.add(value(c));
-
-        if (timestamps != null)
-            timestamps[current.size() - 1] = c.timestamp();
-
-        if (ttls != null)
-            ttls[current.size() - 1] = remainingTTL(c, nowInSec);
-    }
-
-    private int remainingTTL(Cell<?> c, int nowInSec)
-    {
-        if (!c.isExpiring())
-            return -1;
-
-        int remaining = c.localDeletionTime() - nowInSec;
-        return remaining >= 0 ? remaining : -1;
-    }
-
-    private <V> ByteBuffer value(Cell<V> c)
-    {
-        return c.isCounterCell()
-             ? ByteBufferUtil.bytes(CounterContext.instance().total(c.value(), c.accessor()))
-             : c.buffer();
+        inputRow.add(c, nowInSec);
     }
 
     /**
@@ -127,22 +113,24 @@
     {
         // The groupMaker needs to be called for each row
         boolean isNewAggregate = groupMaker == null || groupMaker.isNewGroup(partitionKey, clustering);
-        if (current != null)
+        if (inputRow != null)
         {
-            selectors.addInputRow(this);
+            selectors.addInputRow(inputRow);
             if (isNewAggregate)
             {
                 resultSet.addRow(getOutputRow());
+                inputRow.reset(!selectors.hasProcessing());
                 selectors.reset();
             }
+            else
+            {
+                inputRow.reset(!selectors.hasProcessing());
+            }
         }
-        current = new ArrayList<>(selectors.numberOfFetchedColumns());
-
-        // Timestamps and TTLs are arrays per row, we must null them out between rows
-        if (timestamps != null)
-            Arrays.fill(timestamps, Long.MIN_VALUE);
-        if (ttls != null)
-            Arrays.fill(ttls, -1);
+        else
+        {
+            inputRow = new Selector.InputRow(selectors.numberOfFetchedColumns(), selectors.collectTimestamps(), selectors.collectTTLs());
+        }
     }
 
     /**
@@ -150,12 +138,12 @@
      */
     public ResultSet build()
     {
-        if (current != null)
+        if (inputRow  != null)
         {
-            selectors.addInputRow(this);
+            selectors.addInputRow(inputRow);
             resultSet.addRow(getOutputRow());
+            inputRow.reset(!selectors.hasProcessing());
             selectors.reset();
-            current = null;
         }
 
         // For aggregates we need to return a row even it no records have been found
@@ -166,6 +154,8 @@
 
     private List<ByteBuffer> getOutputRow()
     {
-        return selectors.getOutputRow();
+        List<ByteBuffer> row = selectors.getOutputRow();
+        addSize(row);
+        return row;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/cql3/selection/ScalarFunctionSelector.java b/src/java/org/apache/cassandra/cql3/selection/ScalarFunctionSelector.java
index de74678..ed2a140 100644
--- a/src/java/org/apache/cassandra/cql3/selection/ScalarFunctionSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/ScalarFunctionSelector.java
@@ -22,17 +22,27 @@
 
 import org.apache.cassandra.cql3.functions.Function;
 import org.apache.cassandra.cql3.functions.ScalarFunction;
-import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.transport.ProtocolVersion;
 
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+
 final class ScalarFunctionSelector extends AbstractFunctionSelector<ScalarFunction>
 {
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    protected static final SelectorDeserializer deserializer = new AbstractFunctionSelectorDeserializer()
+    {
+        @Override
+        protected Selector newFunctionSelector(Function function, List<Selector> argSelectors)
+        {
+            return new ScalarFunctionSelector(function, argSelectors);
+        }
+    };
+
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         for (int i = 0, m = argSelectors.size(); i < m; i++)
         {
             Selector s = argSelectors.get(i);
-            s.addInput(protocolVersion, rs);
+            s.addInput(protocolVersion, input);
         }
     }
 
@@ -40,7 +50,7 @@
     {
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         for (int i = 0, m = argSelectors.size(); i < m; i++)
         {
@@ -51,8 +61,16 @@
         return fun.execute(protocolVersion, args());
     }
 
+    @Override
+    public void validateForGroupBy()
+    {
+        checkTrue(fun.isMonotonic(), "Only monotonic functions are supported in the GROUP BY clause. Got: %s ", fun);
+        for (int i = 0, m = argSelectors.size(); i < m; i++)
+            argSelectors.get(i).validateForGroupBy();
+    }
+
     ScalarFunctionSelector(Function fun, List<Selector> argSelectors)
     {
-        super((ScalarFunction) fun, argSelectors);
+        super(Kind.SCALAR_FUNCTION_SELECTOR, (ScalarFunction) fun, argSelectors);
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/Selection.java b/src/java/org/apache/cassandra/cql3/selection/Selection.java
index 77e4cc8..f07184a 100644
--- a/src/java/org/apache/cassandra/cql3/selection/Selection.java
+++ b/src/java/org/apache/cassandra/cql3/selection/Selection.java
@@ -28,6 +28,7 @@
 
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.selection.Selector.InputRow;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -346,6 +347,12 @@
         public ColumnFilter getColumnFilter();
 
         /**
+         * Checks if this Selectors perform some processing
+         * @return {@code true} if this Selectors perform some processing, {@code false} otherwise.
+         */
+        public boolean hasProcessing();
+
+        /**
          * Checks if one of the selectors perform some aggregations.
          * @return {@code true} if one of the selectors perform some aggregations, {@code false} otherwise.
          */
@@ -372,10 +379,9 @@
         /**
          * Adds the current row of the specified <code>ResultSetBuilder</code>.
          *
-         * @param rs the <code>ResultSetBuilder</code>
-         * @throws InvalidRequestException
+         * @param input the input row
          */
-        public void addInputRow(ResultSetBuilder rs);
+        public void addInputRow(InputRow input);
 
         public List<ByteBuffer> getOutputRow();
 
@@ -467,9 +473,9 @@
                     return current;
                 }
 
-                public void addInputRow(ResultSetBuilder rs) throws InvalidRequestException
+                public void addInputRow(InputRow input)
                 {
-                    current = rs.current;
+                    current = input.getValues();
                 }
 
                 public boolean isAggregate()
@@ -477,6 +483,11 @@
                     return false;
                 }
 
+                public boolean hasProcessing()
+                {
+                    return false;
+                }
+
                 @Override
                 public int numberOfFetchedColumns()
                 {
@@ -572,6 +583,11 @@
                     return factories.doesAggregation();
                 }
 
+                public boolean hasProcessing()
+                {
+                    return true;
+                }
+
                 public List<ByteBuffer> getOutputRow()
                 {
                     List<ByteBuffer> outputRow = new ArrayList<>(selectors.size());
@@ -582,10 +598,10 @@
                     return isJson ? rowToJson(outputRow, options.getProtocolVersion(), metadata, orderingColumns) : outputRow;
                 }
 
-                public void addInputRow(ResultSetBuilder rs) throws InvalidRequestException
+                public void addInputRow(InputRow input)
                 {
                     for (Selector selector : selectors)
-                        selector.addInput(options.getProtocolVersion(), rs);
+                        selector.addInput(options.getProtocolVersion(), input);
                 }
 
                 @Override
diff --git a/src/java/org/apache/cassandra/cql3/selection/Selector.java b/src/java/org/apache/cassandra/cql3/selection/Selector.java
index 3262b9c..463382d 100644
--- a/src/java/org/apache/cassandra/cql3/selection/Selector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/Selector.java
@@ -17,18 +17,31 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Arrays;
 import java.util.List;
 
+import org.apache.cassandra.schema.CQLTypeParser;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 
 /**
  * A <code>Selector</code> is used to convert the data returned by the storage engine into the data requested by the
@@ -38,6 +51,50 @@
  */
 public abstract class Selector
 {
+    protected static abstract class SelectorDeserializer
+    {
+        protected abstract Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException;
+
+        protected final AbstractType<?> readType(TableMetadata metadata, DataInputPlus in) throws IOException
+        {
+            KeyspaceMetadata keyspace = Schema.instance.getKeyspaceMetadata(metadata.keyspace);
+            return readType(keyspace, in);
+        }
+
+        protected final AbstractType<?> readType(KeyspaceMetadata keyspace, DataInputPlus in) throws IOException
+        {
+            String cqlType = in.readUTF();
+            return CQLTypeParser.parse(keyspace.name, cqlType, keyspace.types);
+        }
+    }
+
+    /**
+     * The <code>Selector</code> kinds.
+     */
+    public static enum Kind
+    {
+        SIMPLE_SELECTOR(SimpleSelector.deserializer),
+        TERM_SELECTOR(TermSelector.deserializer),
+        WRITETIME_OR_TTL_SELECTOR(WritetimeOrTTLSelector.deserializer),
+        LIST_SELECTOR(ListSelector.deserializer),
+        SET_SELECTOR(SetSelector.deserializer),
+        MAP_SELECTOR(MapSelector.deserializer),
+        TUPLE_SELECTOR(TupleSelector.deserializer),
+        USER_TYPE_SELECTOR(UserTypeSelector.deserializer),
+        FIELD_SELECTOR(FieldSelector.deserializer),
+        SCALAR_FUNCTION_SELECTOR(ScalarFunctionSelector.deserializer),
+        AGGREGATE_FUNCTION_SELECTOR(AggregateFunctionSelector.deserializer),
+        ELEMENT_SELECTOR(ElementsSelector.ElementSelector.deserializer),
+        SLICE_SELECTOR(ElementsSelector.SliceSelector.deserializer);
+
+        private final SelectorDeserializer deserializer;
+
+        Kind(SelectorDeserializer deserializer)
+        {
+            this.deserializer = deserializer;
+        }
+    }
+
     /**
      * A factory for <code>Selector</code> instances.
      */
@@ -70,7 +127,7 @@
          * depends on the bound values in particular).
          * @return a new <code>Selector</code> instance
          */
-        public abstract Selector newInstance(QueryOptions options) throws InvalidRequestException;
+        public abstract Selector newInstance(QueryOptions options);
 
         /**
          * Checks if this factory creates selectors instances that creates aggregates.
@@ -174,6 +231,50 @@
         abstract void addFetchedColumns(ColumnFilter.Builder builder);
     }
 
+    public static class Serializer
+    {
+        public void serialize(Selector selector, DataOutputPlus out, int version) throws IOException
+        {
+            out.writeByte(selector.kind().ordinal());
+            selector.serialize(out, version);
+        }
+
+        public Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            Kind kind = Kind.values()[in.readUnsignedByte()];
+            return kind.deserializer.deserialize(in, version, metadata);
+        }
+
+        public int serializedSize(Selector selector, int version)
+        {
+            return TypeSizes.sizeof((byte) selector.kind().ordinal()) + selector.serializedSize(version);
+        }
+    }
+
+    /**
+     * The {@code Selector} serializer.
+     */
+    public static final Serializer serializer = new Serializer();
+
+    /**
+     * The {@code Selector} kind.
+     */
+    private final Kind kind;
+
+    /**
+     * Returns the {@code Selector} kind.
+     * @return the {@code Selector} kind
+     */
+    public final Kind kind()
+    {
+        return kind;
+    }
+
+    protected Selector(Kind kind)
+    {
+        this.kind = kind;
+    }
+
     /**
      * Add to the provided builder the column (and potential subselections) to fetch for this
      * selection.
@@ -183,13 +284,157 @@
     public abstract void addFetchedColumns(ColumnFilter.Builder builder);
 
     /**
+     * A row of data that need to be processed by a {@code Selector}
+     */
+    public static final class InputRow
+    {
+        private ByteBuffer[] values;
+        private final long[] timestamps;
+        private final int[] ttls;
+        private int index;
+
+        public InputRow(int size, boolean collectTimestamps, boolean collectTTLs)
+        {
+            this.values = new ByteBuffer[size];
+
+            if (collectTimestamps)
+            {
+                this.timestamps = new long[size];
+                // We use MIN_VALUE to indicate no timestamp
+                Arrays.fill(timestamps, Long.MIN_VALUE);
+            }
+            else
+            {
+                timestamps = null;
+            }
+
+            if (collectTTLs)
+            {
+                this.ttls = new int[size];
+                // We use -1 to indicate no ttl
+                Arrays.fill(ttls, -1);
+            }
+            else
+            {
+                ttls = null;
+            }
+        }
+
+        public void add(ByteBuffer v)
+        {
+            values[index] = v;
+
+            if (timestamps != null)
+                timestamps[index] = Long.MIN_VALUE;
+
+            if (ttls != null)
+                ttls[index] = -1;
+
+            index++;
+        }
+
+        public void add(Cell<?> c, int nowInSec)
+        {
+            if (c == null)
+            {
+                add(null);
+                return;
+            }
+
+            values[index] = value(c);
+
+            if (timestamps != null)
+                timestamps[index] = c.timestamp();
+
+            if (ttls != null)
+                ttls[index] = remainingTTL(c, nowInSec);
+
+            index++;
+        }
+
+        private int remainingTTL(Cell<?> c, int nowInSec)
+        {
+            if (!c.isExpiring())
+                return -1;
+
+            int remaining = c.localDeletionTime() - nowInSec;
+            return remaining >= 0 ? remaining : -1;
+        }
+
+        private <V> ByteBuffer value(Cell<V> c)
+        {
+            return c.isCounterCell()
+                 ? ByteBufferUtil.bytes(CounterContext.instance().total(c.value(), c.accessor()))
+                 : c.buffer();
+        }
+
+        /**
+         * Return the value of the column with the specified index.
+         *
+         * @param index the column index
+         * @return the value of the column with the specified index
+         */
+        public ByteBuffer getValue(int index)
+        {
+            return values[index];
+        }
+
+        /**
+         * Reset the row internal state.
+         * <p>If the reset is not a deep one only the index will be reset. If the reset is a deep one a new
+         * array will be created to store the column values. This allow to reduce object creation when it is not
+         * necessary.</p>
+         *
+         * @param deep {@code true} if the reset must be a deep one.
+         */
+        public void reset(boolean deep)
+        {
+            index = 0;
+            if (deep)
+                values = new ByteBuffer[values.length];
+        }
+
+        /**
+         * Return the timestamp of the column with the specified index.
+         *
+         * @param index the column index
+         * @return the timestamp of the column with the specified index
+         */
+        public long getTimestamp(int index)
+        {
+            return timestamps[index];
+        }
+
+        /**
+         * Return the ttl of the column with the specified index.
+         *
+         * @param index the column index
+         * @return the ttl of the column with the specified index
+         */
+        public int getTtl(int index)
+        {
+            return ttls[index];
+        }
+
+        /**
+         * Returns the column values as list.
+         * <p>This content of the list will be shared with the {@code InputRow} unless a deep reset has been done.</p>
+         * @return the column values as list.
+         */
+        public List<ByteBuffer> getValues()
+        {
+            return Arrays.asList(values);
+        }
+    }
+
+    /**
      * Add the current value from the specified <code>ResultSetBuilder</code>.
      *
      * @param protocolVersion protocol version used for serialization
-     * @param rs the <code>ResultSetBuilder</code>
-     * @throws InvalidRequestException if a problem occurs while add the input value
+     * @param input the input row
+     * @throws InvalidRequestException if a problem occurs while adding the input row
      */
-    public abstract void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException;
+    public abstract void addInput(ProtocolVersion protocolVersion, InputRow input);
 
     /**
      * Returns the selector output.
@@ -211,4 +456,36 @@
      * Reset the internal state of this <code>Selector</code>.
      */
     public abstract void reset();
+
+    /**
+     * A selector is terminal if it doesn't require any input for it's output to be computed, i.e. if {@link #getOutput}
+     * result doesn't depend of {@link #addInput}. This is typically the case of a constant value or functions on constant
+     * values.
+     */
+    public boolean isTerminal()
+    {
+        return false;
+    }
+
+    /**
+     * Checks that this selector is valid for GROUP BY clause.
+     */
+    public void validateForGroupBy()
+    {
+        throw invalidRequest("Only column names and monotonic scalar functions are supported in the GROUP BY clause.");
+    }
+
+    protected abstract int serializedSize(int version);
+
+    protected abstract void serialize(DataOutputPlus out, int version) throws IOException;
+
+    protected static void writeType(DataOutputPlus out, AbstractType<?> type) throws IOException
+    {
+        out.writeUTF(type.asCQL3Type().toString());
+    }
+
+    protected static int sizeOf(AbstractType<?> type)
+    {
+        return TypeSizes.sizeof(type.asCQL3Type().toString());
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/SetSelector.java b/src/java/org/apache/cassandra/cql3/selection/SetSelector.java
index 6693121..b54b2d4 100644
--- a/src/java/org/apache/cassandra/cql3/selection/SetSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/SetSelector.java
@@ -17,17 +17,24 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.Sets;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.SetType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.serializers.CollectionSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 
@@ -37,6 +44,20 @@
  */
 final class SetSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            SetType<?> type = (SetType<?>) readType(metadata, in);
+            int size = (int) in.readUnsignedVInt();
+            List<Selector> elements = new ArrayList<>(size);
+            for (int i = 0; i < size; i++)
+                elements.add(serializer.deserialize(in, version, metadata));
+
+            return new SetSelector(type, elements);
+        }
+    };
+
     /**
      * The set type.
      */
@@ -70,13 +91,13 @@
             elements.get(i).addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
-            elements.get(i).addInput(protocolVersion, rs);
+            elements.get(i).addInput(protocolVersion, input);
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         Set<ByteBuffer> buffers = new TreeSet<>(type.getElementsType());
         for (int i = 0, m = elements.size(); i < m; i++)
@@ -92,6 +113,17 @@
             elements.get(i).reset();
     }
 
+    @Override
+    public boolean isTerminal()
+    {
+        for (int i = 0, m = elements.size(); i < m; i++)
+        {
+            if (!elements.get(i).isTerminal())
+                return false;
+        }
+        return true;
+    }
+
     public AbstractType<?> getType()
     {
         return type;
@@ -105,7 +137,49 @@
 
     private SetSelector(AbstractType<?> type, List<Selector> elements)
     {
+        super(Kind.SET_SELECTOR);
         this.type = (SetType<?>) type;
         this.elements = elements;
     }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof SetSelector))
+            return false;
+
+        SetSelector s = (SetSelector) o;
+
+        return Objects.equal(type, s.type)
+            && Objects.equal(elements, s.elements);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(type, elements);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        int size = sizeOf(type) + TypeSizes.sizeofUnsignedVInt(elements.size());
+
+        for (int i = 0, m = elements.size(); i < m; i++)
+            size += serializer.serializedSize(elements.get(i), version);
+
+        return size;
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        out.writeUnsignedVInt(elements.size());
+        for (int i = 0, m = elements.size(); i < m; i++)
+            serializer.serialize(elements.get(i), out, version);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/SimpleSelector.java b/src/java/org/apache/cassandra/cql3/selection/SimpleSelector.java
index 31b1911..a6ae446 100644
--- a/src/java/org/apache/cassandra/cql3/selection/SimpleSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/SimpleSelector.java
@@ -17,19 +17,38 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ByteBufferUtil;
 
 public final class SimpleSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            ByteBuffer columnName = ByteBufferUtil.readWithVIntLength(in);
+            ColumnMetadata column = metadata.getColumn(columnName);
+            int idx = in.readInt();
+            return new SimpleSelector(column, idx);
+        }
+    };
+
     /**
      * The Factory for {@code SimpleSelector}.
      */
@@ -113,17 +132,17 @@
     }
 
     @Override
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input) throws InvalidRequestException
     {
         if (!isSet)
         {
             isSet = true;
-            current = rs.current.get(idx);
+            current = input.getValue(idx);
         }
     }
 
     @Override
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         return current;
     }
@@ -149,7 +168,48 @@
 
     private SimpleSelector(ColumnMetadata column, int idx)
     {
+        super(Kind.SIMPLE_SELECTOR);
         this.column = column;
         this.idx = idx;
     }
+
+    @Override
+    public void validateForGroupBy()
+    {
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof SimpleSelector))
+            return false;
+
+        SimpleSelector s = (SimpleSelector) o;
+
+        return Objects.equal(column, s.column)
+            && Objects.equal(idx, s.idx);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(column, idx);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        return ByteBufferUtil.serializedSizeWithVIntLength(column.name.bytes)
+                + TypeSizes.sizeof(idx);
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        ByteBufferUtil.writeWithVIntLength(column.name.bytes, out);
+        out.writeInt(idx);;
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/TermSelector.java b/src/java/org/apache/cassandra/cql3/selection/TermSelector.java
index 321cd27..6f0c844 100644
--- a/src/java/org/apache/cassandra/cql3/selection/TermSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/TermSelector.java
@@ -17,16 +17,22 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.Term;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ByteBufferUtil;
 
 /**
  * Selector representing a simple term (literals or bound variables).
@@ -36,6 +42,16 @@
  */
 public class TermSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            AbstractType<?> type = readType(metadata, in);
+            ByteBuffer value = ByteBufferUtil.readWithVIntLength(in);
+            return new TermSelector(value, type);
+        }
+    };
+
     private final ByteBuffer value;
     private final AbstractType<?> type;
 
@@ -74,8 +90,9 @@
         };
     }
 
-    private TermSelector(ByteBuffer value, AbstractType<?> type)
+    TermSelector(ByteBuffer value, AbstractType<?> type)
     {
+        super(Kind.TERM_SELECTOR);
         this.value = value;
         this.type = type;
     }
@@ -84,11 +101,11 @@
     {
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         return value;
     }
@@ -101,4 +118,44 @@
     public void reset()
     {
     }
+
+    @Override
+    public boolean isTerminal()
+    {
+        return true;
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof TermSelector))
+            return false;
+
+        TermSelector s = (TermSelector) o;
+
+        return Objects.equal(value, s.value)
+            && Objects.equal(type, s.type);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(value, type);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        return sizeOf(type) + ByteBufferUtil.serializedSizeWithVIntLength(value);
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        ByteBufferUtil.writeWithVIntLength(value, out);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/TupleSelector.java b/src/java/org/apache/cassandra/cql3/selection/TupleSelector.java
index 898085b..0c06bc2 100644
--- a/src/java/org/apache/cassandra/cql3/selection/TupleSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/TupleSelector.java
@@ -17,15 +17,23 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.List;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.Tuples;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.TupleType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.transport.ProtocolVersion;
 
 /**
@@ -34,6 +42,20 @@
  */
 final class TupleSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            AbstractType<?> type = readType(metadata, in);
+            int size = (int) in.readUnsignedVInt();
+            List<Selector> elements = new ArrayList<>(size);
+            for (int i = 0; i < size; i++)
+                elements.add(serializer.deserialize(in, version, metadata));
+
+            return new TupleSelector(type, elements);
+        }
+    };
+
     /**
      * The tuple type.
      */
@@ -67,10 +89,10 @@
             elements.get(i).addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
-            elements.get(i).addInput(protocolVersion, rs);
+            elements.get(i).addInput(protocolVersion, input);
     }
 
     public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
@@ -89,6 +111,17 @@
             elements.get(i).reset();
     }
 
+    @Override
+    public boolean isTerminal()
+    {
+        for (int i = 0, m = elements.size(); i < m; i++)
+        {
+            if (!elements.get(i).isTerminal())
+                return false;
+        }
+        return true;
+    }
+
     public AbstractType<?> getType()
     {
         return type;
@@ -102,7 +135,50 @@
 
     private TupleSelector(AbstractType<?> type, List<Selector> elements)
     {
+        super(Kind.TUPLE_SELECTOR);
         this.type = type;
         this.elements = elements;
     }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof TupleSelector))
+            return false;
+
+        TupleSelector s = (TupleSelector) o;
+
+        return Objects.equal(type, s.type)
+            && Objects.equal(elements, s.elements);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(type, elements);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        int size = sizeOf(type) + TypeSizes.sizeofUnsignedVInt(elements.size());
+
+        for (int i = 0, m = elements.size(); i < m; i++)
+            size += serializer.serializedSize(elements.get(i), version);
+
+        return size;
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        out.writeUnsignedVInt(elements.size());
+
+        for (int i = 0, m = elements.size(); i < m; i++)
+            serializer.serialize(elements.get(i), out, version);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/UserTypeSelector.java b/src/java/org/apache/cassandra/cql3/selection/UserTypeSelector.java
index 61faf8d..8007467 100644
--- a/src/java/org/apache/cassandra/cql3/selection/UserTypeSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/UserTypeSelector.java
@@ -17,25 +17,32 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.FieldIdentifier;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.UserTypes;
 import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.TupleType;
 import org.apache.cassandra.db.marshal.UserType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ByteBufferUtil;
 
 /**
  * <code>Selector</code> for literal map (e.g. {'min' : min(value), 'max' : max(value), 'count' : count(value)}).
@@ -43,6 +50,23 @@
  */
 final class UserTypeSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            UserType type = (UserType) readType(metadata, in);
+            int size = (int) in.readUnsignedVInt();
+            Map<FieldIdentifier, Selector> fields = new HashMap<>(size);
+            for (int i = 0; i < size; i++)
+            {
+                FieldIdentifier identifier = new FieldIdentifier(ByteBufferUtil.readWithVIntLength(in));
+                Selector selector = serializer.deserialize(in, version, metadata);
+                fields.put(identifier, selector);
+            }
+            return new UserTypeSelector(type, fields);
+        }
+    };
+
     /**
      * The map type.
      */
@@ -158,13 +182,13 @@
             field.addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs) throws InvalidRequestException
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         for (Selector field : fields.values())
-            field.addInput(protocolVersion, rs);
+            field.addInput(protocolVersion, input);
     }
 
-    public ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException
+    public ByteBuffer getOutput(ProtocolVersion protocolVersion)
     {
         UserType userType = (UserType) type;
         ByteBuffer[] buffers = new ByteBuffer[userType.size()];
@@ -183,6 +207,17 @@
             field.reset();
     }
 
+    @Override
+    public boolean isTerminal()
+    {
+        for (Selector field : fields.values())
+        {
+            if(!field.isTerminal())
+                return false;
+        }
+        return true;
+    }
+
     public AbstractType<?> getType()
     {
         return type;
@@ -196,7 +231,53 @@
 
     private UserTypeSelector(AbstractType<?> type, Map<FieldIdentifier, Selector> fields)
     {
+        super(Kind.USER_TYPE_SELECTOR);
         this.type = type;
         this.fields = fields;
     }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof UserTypeSelector))
+            return false;
+
+        UserTypeSelector s = (UserTypeSelector) o;
+
+        return Objects.equal(type, s.type)
+            && Objects.equal(fields, s.fields);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(type, fields);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        int size = sizeOf(type) + TypeSizes.sizeofUnsignedVInt(fields.size());
+
+        for (Map.Entry<FieldIdentifier, Selector> field : fields.entrySet())
+            size += ByteBufferUtil.serializedSizeWithVIntLength(field.getKey().bytes) + serializer.serializedSize(field.getValue(), version);
+
+        return size;
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        writeType(out, type);
+        out.writeUnsignedVInt(fields.size());
+
+        for (Map.Entry<FieldIdentifier, Selector> field : fields.entrySet())
+        {
+            ByteBufferUtil.writeWithVIntLength(field.getKey().bytes, out);
+            serializer.serialize(field.getValue(), out, version);
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java b/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
index 95586f2..2c56f5c 100644
--- a/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
@@ -17,20 +17,39 @@
  */
 package org.apache.cassandra.cql3.selection;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import com.google.common.base.Objects;
+
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.ColumnSpecification;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.LongType;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 final class WritetimeOrTTLSelector extends Selector
 {
+    protected static final SelectorDeserializer deserializer = new SelectorDeserializer()
+    {
+        protected Selector deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
+        {
+            ByteBuffer columnName = ByteBufferUtil.readWithVIntLength(in);
+            ColumnMetadata column = metadata.getColumn(columnName);
+            int idx = in.readInt();
+            boolean isWritetime = in.readBoolean();
+            return new WritetimeOrTTLSelector(column, idx, isWritetime);
+        }
+    };
+
     private final ColumnMetadata column;
     private final int idx;
     private final boolean isWritetime;
@@ -88,7 +107,7 @@
         builder.add(column);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, ResultSetBuilder rs)
+    public void addInput(ProtocolVersion protocolVersion, InputRow input)
     {
         if (isSet)
             return;
@@ -97,12 +116,12 @@
 
         if (isWritetime)
         {
-            long ts = rs.timestamps[idx];
+            long ts = input.getTimestamp(idx);
             current = ts != Long.MIN_VALUE ? ByteBufferUtil.bytes(ts) : null;
         }
         else
         {
-            int ttl = rs.ttls[idx];
+            int ttl = input.getTtl(idx);
             current = ttl > 0 ? ByteBufferUtil.bytes(ttl) : null;
         }
     }
@@ -131,8 +150,47 @@
 
     private WritetimeOrTTLSelector(ColumnMetadata column, int idx, boolean isWritetime)
     {
+        super(Kind.WRITETIME_OR_TTL_SELECTOR);
         this.column = column;
         this.idx = idx;
         this.isWritetime = isWritetime;
     }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof WritetimeOrTTLSelector))
+            return false;
+
+        WritetimeOrTTLSelector s = (WritetimeOrTTLSelector) o;
+
+        return Objects.equal(column, s.column)
+            && Objects.equal(idx, s.idx)
+            && Objects.equal(isWritetime, s.isWritetime);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(column, idx, isWritetime);
+    }
+
+    @Override
+    protected int serializedSize(int version)
+    {
+        return ByteBufferUtil.serializedSizeWithVIntLength(column.name.bytes)
+                + TypeSizes.sizeof(idx)
+                + TypeSizes.sizeof(isWritetime);
+    }
+
+    @Override
+    protected void serialize(DataOutputPlus out, int version) throws IOException
+    {
+        ByteBufferUtil.writeWithVIntLength(column.name.bytes, out);
+        out.writeInt(idx);
+        out.writeBoolean(isWritetime);
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterRoleStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterRoleStatement.java
index 2ffd050..eb0e3e0 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterRoleStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterRoleStatement.java
@@ -29,23 +29,26 @@
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
+import static org.apache.cassandra.cql3.statements.RequestValidations.*;
 
 public class AlterRoleStatement extends AuthenticationStatement
 {
     private final RoleResource role;
     private final RoleOptions opts;
     final DCPermissions dcPermissions;
+    private final boolean ifExists;
 
     public AlterRoleStatement(RoleName name, RoleOptions opts)
     {
-        this(name, opts, null);
+        this(name, opts, null, false);
     }
 
-    public AlterRoleStatement(RoleName name, RoleOptions opts, DCPermissions dcPermissions)
+    public AlterRoleStatement(RoleName name, RoleOptions opts, DCPermissions dcPermissions, boolean ifExists)
     {
         this.role = RoleResource.role(name.getName());
         this.opts = opts;
         this.dcPermissions = dcPermissions;
+        this.ifExists = ifExists;
     }
 
     public void validate(ClientState state) throws RequestValidationException
@@ -63,7 +66,9 @@
         // validate login here before authorize to avoid leaking user existence to anonymous users.
         state.ensureNotAnonymous();
         if (!DatabaseDescriptor.getRoleManager().isExistingRole(role))
-            throw new InvalidRequestException(String.format("%s doesn't exist", role.getRoleName()));
+        {
+            checkTrue(ifExists, "Role %s doesn't exist", role.getRoleName());
+        }
     }
 
     public void authorize(ClientState state) throws UnauthorizedException
@@ -106,7 +111,7 @@
             DatabaseDescriptor.getNetworkAuthorizer().setRoleDatacenters(role, dcPermissions);
         return null;
     }
-    
+
     @Override
     public String toString()
     {
diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
index 946f5de..61e4934 100644
--- a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
@@ -31,6 +31,7 @@
 
 import org.apache.cassandra.audit.AuditLogContext;
 import org.apache.cassandra.audit.AuditLogEntryType;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.ColumnMetadata;
@@ -51,6 +52,7 @@
 import static java.util.function.Predicate.isEqual;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * A <code>BATCH</code> statement parsed from a CQL query.
@@ -232,10 +234,10 @@
             String cfName = null;
             for (ModificationStatement stmt : statements)
             {
-                if (ksName != null && (!stmt.keyspace().equals(ksName) || !stmt.columnFamily().equals(cfName)))
+                if (ksName != null && (!stmt.keyspace().equals(ksName) || !stmt.table().equals(cfName)))
                     throw new InvalidRequestException("Batch with conditions cannot span multiple tables");
                 ksName = stmt.keyspace();
-                cfName = stmt.columnFamily();
+                cfName = stmt.table();
             }
         }
     }
@@ -264,7 +266,8 @@
     }
 
     @VisibleForTesting
-    public List<? extends IMutation> getMutations(BatchQueryOptions options,
+    public List<? extends IMutation> getMutations(ClientState state,
+                                                  BatchQueryOptions options,
                                                   boolean local,
                                                   long batchTimestamp,
                                                   int nowInSeconds,
@@ -280,7 +283,7 @@
             ModificationStatement stmt = statements.get(i);
             if (metadata != null && !stmt.metadata.id.equals(metadata.id))
                 metadata = null;
-            List<ByteBuffer> stmtPartitionKeys = stmt.buildPartitionKeyNames(options.forStatement(i));
+            List<ByteBuffer> stmtPartitionKeys = stmt.buildPartitionKeyNames(options.forStatement(i), state);
             partitionKeys.add(stmtPartitionKeys);
             HashMultiset<ByteBuffer> perKeyCountsForTable = partitionCounts.computeIfAbsent(stmt.metadata.id, k -> HashMultiset.create());
             for (int stmtIdx = 0, stmtSize = stmtPartitionKeys.size(); stmtIdx < stmtSize; stmtIdx++)
@@ -305,7 +308,7 @@
             }
             QueryOptions statementOptions = options.forStatement(i);
             long timestamp = attrs.getTimestamp(batchTimestamp, statementOptions);
-            statement.addUpdates(collector, partitionKeys.get(i), statementOptions, local, timestamp, nowInSeconds, queryStartNanoTime);
+            statement.addUpdates(collector, partitionKeys.get(i), state, statementOptions, local, timestamp, nowInSeconds, queryStartNanoTime);
         }
 
         if (tablesWithZeroGcGs != null)
@@ -348,9 +351,9 @@
             if (size > failThreshold)
             {
                 Tracing.trace(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(failThreshold),
-                              FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold_in_kb)");
+                              FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold)");
                 logger.error(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(failThreshold),
-                             FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold_in_kb)");
+                             FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold)");
                 throw new InvalidRequestException("Batch too large");
             }
             else if (logger.isWarnEnabled())
@@ -408,13 +411,21 @@
         if (options.getSerialConsistency() == null)
             throw new InvalidRequestException("Invalid empty serial consistency level");
 
+        ClientState clientState = queryState.getClientState();
+        Guardrails.writeConsistencyLevels.guard(EnumSet.of(options.getConsistency(), options.getSerialConsistency()),
+                                                clientState);
+
+        for (int i = 0; i < statements.size(); i++ )
+            statements.get(i).validateDiskUsage(options.forStatement(i), clientState);
+
         if (hasConditions)
             return executeWithConditions(options, queryState, queryStartNanoTime);
 
         if (updatesVirtualTables)
             executeInternalWithoutCondition(queryState, options, queryStartNanoTime);
         else    
-            executeWithoutConditions(getMutations(options, false, timestamp, nowInSeconds, queryStartNanoTime), options.getConsistency(), queryStartNanoTime);
+            executeWithoutConditions(getMutations(clientState, options, false, timestamp, nowInSeconds, queryStartNanoTime),
+                                     options.getConsistency(), queryStartNanoTime);
 
         return new ResultMessage.Void();
     }
@@ -486,7 +497,7 @@
             ModificationStatement statement = statements.get(i);
             QueryOptions statementOptions = options.forStatement(i);
             long timestamp = attrs.getTimestamp(batchTimestamp, statementOptions);
-            List<ByteBuffer> pks = statement.buildPartitionKeyNames(statementOptions);
+            List<ByteBuffer> pks = statement.buildPartitionKeyNames(statementOptions, state.getClientState());
             if (statement.getRestrictions().keyIsInRelation())
                 throw new IllegalArgumentException("Batch with conditions cannot span multiple partitions (you cannot use IN on the partition key)");
             if (key == null)
@@ -521,7 +532,7 @@
             }
             else
             {
-                Clustering<?> clustering = Iterables.getOnlyElement(statement.createClustering(statementOptions));
+                Clustering<?> clustering = Iterables.getOnlyElement(statement.createClustering(statementOptions, state.getClientState()));
                 if (statement.hasConditions())
                 {
                     statement.addConditions(clustering, casRequest, statementOptions);
@@ -550,7 +561,7 @@
         if (hasConditions)
             return executeInternalWithConditions(batchOptions, queryState);
 
-        executeInternalWithoutCondition(queryState, batchOptions, System.nanoTime());
+        executeInternalWithoutCondition(queryState, batchOptions, nanoTime());
         return new ResultMessage.Void();
     }
 
@@ -559,7 +570,7 @@
         long timestamp = batchOptions.getTimestamp(queryState);
         int nowInSeconds = batchOptions.getNowInSeconds(queryState);
 
-        for (IMutation mutation : getMutations(batchOptions, true, timestamp, nowInSeconds, queryStartNanoTime))
+        for (IMutation mutation : getMutations(queryState.getClientState(), batchOptions, true, timestamp, nowInSeconds, queryStartNanoTime))
             mutation.apply();
         return null;
     }
@@ -576,7 +587,7 @@
         long timestamp = options.getTimestamp(state);
         int nowInSeconds = options.getNowInSeconds(state);
 
-        try (RowIterator result = ModificationStatement.casInternal(request, timestamp, nowInSeconds))
+        try (RowIterator result = ModificationStatement.casInternal(state.getClientState(), request, timestamp, nowInSeconds))
         {
             ResultSet resultSet =
                 ModificationStatement.buildCasResultSet(ksName,
diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchUpdatesCollector.java b/src/java/org/apache/cassandra/cql3/statements/BatchUpdatesCollector.java
index cb88bdd..c346eb9 100644
--- a/src/java/org/apache/cassandra/cql3/statements/BatchUpdatesCollector.java
+++ b/src/java/org/apache/cassandra/cql3/statements/BatchUpdatesCollector.java
@@ -24,13 +24,15 @@
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 
+import org.apache.cassandra.db.commitlog.CommitLogSegment;
 import org.apache.cassandra.db.virtual.VirtualMutation;
+import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 /**
  * Utility class to collect updates.
@@ -131,6 +133,7 @@
             {
                 IMutation mutation = builder.build();
                 mutation.validateIndexedColumns();
+                mutation.validateSize(MessagingService.current_version, CommitLogSegment.ENTRY_OVERHEAD_SIZE);
                 ms.add(mutation);
             }
         }
diff --git a/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java b/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
index 563a639..3db4793 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
@@ -22,6 +22,7 @@
 
 import com.google.common.collect.*;
 
+import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.index.IndexRegistry;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.*;
@@ -34,7 +35,11 @@
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.service.CASRequest;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.paxos.Ballot;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
+
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
 
@@ -176,7 +181,7 @@
         return new RegularAndStaticColumns(statics, regulars);
     }
 
-    public SinglePartitionReadQuery readCommand(int nowInSec)
+    public SinglePartitionReadCommand readCommand(int nowInSec)
     {
         assert staticConditions != null || !conditions.isEmpty();
 
@@ -186,7 +191,7 @@
         // With only a static condition, we still want to make the distinction between a non-existing partition and one
         // that exists (has some live data) but has not static content. So we query the first live row of the partition.
         if (conditions.isEmpty())
-            return SinglePartitionReadQuery.create(metadata,
+            return SinglePartitionReadCommand.create(metadata,
                                                    nowInSec,
                                                    columnFilter,
                                                    RowFilter.NONE,
@@ -195,7 +200,7 @@
                                                    new ClusteringIndexSliceFilter(Slices.ALL, false));
 
         ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(conditions.navigableKeySet(), false);
-        return SinglePartitionReadQuery.create(metadata, nowInSec, key, columnFilter, filter);
+        return SinglePartitionReadCommand.create(metadata, nowInSec, key, columnFilter, filter);
     }
 
     /**
@@ -228,13 +233,14 @@
         return builder.build();
     }
 
-    public PartitionUpdate makeUpdates(FilteredPartition current) throws InvalidRequestException
+    public PartitionUpdate makeUpdates(FilteredPartition current, ClientState clientState, Ballot ballot) throws InvalidRequestException
     {
         PartitionUpdate.Builder updateBuilder = new PartitionUpdate.Builder(metadata, key, updatedColumns(), conditions.size());
+        long timeUuidNanos = 0;
         for (RowUpdate upd : updates)
-            upd.applyUpdates(current, updateBuilder);
+            timeUuidNanos = upd.applyUpdates(current, updateBuilder, clientState, ballot.msb(), timeUuidNanos);
         for (RangeDeletion upd : rangeDeletions)
-            upd.applyUpdates(current, updateBuilder);
+            upd.applyUpdates(current, updateBuilder, clientState);
 
         PartitionUpdate partitionUpdate = updateBuilder.build();
         IndexRegistry.obtain(metadata).validate(partitionUpdate);
@@ -242,6 +248,24 @@
         return partitionUpdate;
     }
 
+    private static class CASUpdateParameters extends UpdateParameters
+    {
+        final long timeUuidMsb;
+        long timeUuidNanos;
+
+        public CASUpdateParameters(TableMetadata metadata, RegularAndStaticColumns updatedColumns, ClientState state, QueryOptions options, long timestamp, int nowInSec, int ttl, Map<DecoratedKey, Partition> prefetchedRows, long timeUuidMsb, long timeUuidNanos) throws InvalidRequestException
+        {
+            super(metadata, updatedColumns, state, options, timestamp, nowInSec, ttl, prefetchedRows);
+            this.timeUuidMsb = timeUuidMsb;
+            this.timeUuidNanos = timeUuidNanos;
+        }
+
+        public byte[] nextTimeUUIDAsBytes()
+        {
+            return TimeUUID.toBytes(timeUuidMsb, TimeUUIDType.signedBytesToNativeLong(timeUuidNanos++));
+        }
+    }
+
     /**
      * Due to some operation on lists, we can't generate the update that a given Modification statement does before
      * we get the values read by the initial read of Paxos. A RowUpdate thus just store the relevant information
@@ -265,18 +289,14 @@
             this.nowInSeconds = nowInSeconds;
         }
 
-        void applyUpdates(FilteredPartition current, PartitionUpdate.Builder updateBuilder)
+        long applyUpdates(FilteredPartition current, PartitionUpdate.Builder updateBuilder, ClientState state, long timeUuidMsb, long timeUuidNanos)
         {
             Map<DecoratedKey, Partition> map = stmt.requiresRead() ? Collections.singletonMap(key, current) : null;
-            UpdateParameters params =
-                new UpdateParameters(metadata,
-                                     updateBuilder.columns(),
-                                     options,
-                                     timestamp,
-                                     nowInSeconds,
-                                     stmt.getTimeToLive(options),
-                                     map);
+            CASUpdateParameters params =
+                new CASUpdateParameters(metadata, updateBuilder.columns(), state, options, timestamp, nowInSeconds,
+                                     stmt.getTimeToLive(options), map, timeUuidMsb, timeUuidNanos);
             stmt.addUpdateForKey(updateBuilder, clustering, params);
+            return params.timeUuidNanos;
         }
     }
 
@@ -297,13 +317,14 @@
             this.nowInSeconds = nowInSeconds;
         }
 
-        void applyUpdates(FilteredPartition current, PartitionUpdate.Builder updateBuilder)
+        void applyUpdates(FilteredPartition current, PartitionUpdate.Builder updateBuilder, ClientState state)
         {
             // No slice statements currently require a read, but this maintains consistency with RowUpdate, and future proofs us
             Map<DecoratedKey, Partition> map = stmt.requiresRead() ? Collections.singletonMap(key, current) : null;
             UpdateParameters params =
                 new UpdateParameters(metadata,
                                      updateBuilder.columns(),
+                                     state,
                                      options,
                                      timestamp,
                                      nowInSeconds,
diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateRoleStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateRoleStatement.java
index b3333fc..04f183d 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CreateRoleStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CreateRoleStatement.java
@@ -85,6 +85,7 @@
             DatabaseDescriptor.getNetworkAuthorizer().setRoleDatacenters(role, dcPermissions);
         }
         grantPermissionsToCreator(state);
+
         return null;
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java b/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
index 1a92196..be01481 100644
--- a/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
@@ -145,8 +145,6 @@
                                                         Conditions conditions,
                                                         Attributes attrs)
         {
-            checkFalse(metadata.isVirtual(), "Virtual tables don't support DELETE statements");
-
             Operations operations = new Operations(type);
 
             for (Operation.RawDeletion deletion : deletions)
@@ -177,6 +175,8 @@
 
             if (stmt.hasConditions() && !restrictions.hasAllPKColumnsRestrictedByEqualities())
             {
+                checkFalse(stmt.isVirtual(), "DELETE statements must restrict all PRIMARY KEY columns with equality relations");
+
                 checkFalse(operations.appliesToRegularColumns(),
                            "DELETE statements must restrict all PRIMARY KEY columns with equality relations in order to delete non static columns");
 
@@ -198,6 +198,6 @@
     @Override
     public AuditLogContext getAuditLogContext()
     {
-        return new AuditLogContext(AuditLogEntryType.DELETE, keyspace(), columnFamily());
+        return new AuditLogContext(AuditLogEntryType.DELETE, keyspace(), table());
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/DescribeStatement.java b/src/java/org/apache/cassandra/cql3/statements/DescribeStatement.java
index 16671dd..b1f576e 100644
--- a/src/java/org/apache/cassandra/cql3/statements/DescribeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/DescribeStatement.java
@@ -132,7 +132,7 @@
     @Override
     public ResultMessage executeLocally(QueryState state, QueryOptions options)
     {
-        Keyspaces keyspaces = Schema.instance.snapshot();
+        Keyspaces keyspaces = Schema.instance.distributedAndLocalKeyspaces();
         UUID schemaVersion = Schema.instance.getVersion();
 
         keyspaces = Keyspaces.builder()
diff --git a/src/java/org/apache/cassandra/cql3/statements/GrantPermissionsStatement.java b/src/java/org/apache/cassandra/cql3/statements/GrantPermissionsStatement.java
index 3db20e3..824c485 100644
--- a/src/java/org/apache/cassandra/cql3/statements/GrantPermissionsStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/GrantPermissionsStatement.java
@@ -18,9 +18,11 @@
 package org.apache.cassandra.cql3.statements;
 
 import java.util.Set;
+import java.util.stream.Collectors;
 
 import org.apache.cassandra.audit.AuditLogContext;
 import org.apache.cassandra.audit.AuditLogEntryType;
+import org.apache.cassandra.auth.IAuthorizer;
 import org.apache.cassandra.auth.IResource;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -28,6 +30,7 @@
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.exceptions.RequestValidationException;
 import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.transport.messages.ResultMessage;
 
 public class GrantPermissionsStatement extends PermissionsManagementStatement
@@ -39,7 +42,25 @@
 
     public ResultMessage execute(ClientState state) throws RequestValidationException, RequestExecutionException
     {
-        DatabaseDescriptor.getAuthorizer().grant(state.getUser(), permissions, resource, grantee);
+        IAuthorizer authorizer = DatabaseDescriptor.getAuthorizer();
+        Set<Permission> granted = authorizer.grant(state.getUser(), permissions, resource, grantee);
+
+        // We want to warn the client if all the specified permissions have not been granted and the client did
+        // not specify ALL in the query.
+        if (!granted.equals(permissions) && !permissions.equals(Permission.ALL))
+        {
+            String permissionsStr = permissions.stream()
+                                               .filter(permission -> !granted.contains(permission))
+                                               .sorted(Permission::compareTo) // guarantee the order for testing
+                                               .map(Permission::name)
+                                               .collect(Collectors.joining(", "));
+
+            ClientWarn.instance.warn(String.format("Role '%s' was already granted %s on %s",
+                                                   grantee.getRoleName(),
+                                                   permissionsStr,
+                                                   resource));
+        }
+
         return null;
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
index a072da5..ab36ec9 100644
--- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
@@ -21,14 +21,16 @@
 import java.util.*;
 
 import com.google.common.collect.HashMultiset;
-import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Maps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.ValueAccessor;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaLayout;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
@@ -52,16 +54,20 @@
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.service.StorageProxy;
-import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.disk.usage.DiskUsageBroadcaster;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.BallotGenerator;
+import org.apache.cassandra.service.paxos.Commit.Proposal;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.triggers.TriggerExecutor;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MD5Digest;
 import org.apache.cassandra.utils.Pair;
-import org.apache.cassandra.utils.UUIDGen;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNull;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /*
  * Abstract parent class of individual modifications, i.e. INSERT, UPDATE and DELETE.
@@ -202,7 +208,7 @@
         return metadata.keyspace;
     }
 
-    public String columnFamily()
+    public String table()
     {
         return metadata.name;
     }
@@ -247,7 +253,7 @@
 
         // MV updates need to get the current state from the table, and might update the views
         // Require Permission.SELECT on the base table, and Permission.MODIFY on the views
-        Iterator<ViewMetadata> views = View.findAll(keyspace(), columnFamily()).iterator();
+        Iterator<ViewMetadata> views = View.findAll(keyspace(), table()).iterator();
         if (views.hasNext())
         {
             state.ensureTablePermission(metadata, Permission.SELECT);
@@ -267,8 +273,31 @@
         checkFalse(isCounter() && attrs.isTimestampSet(), "Cannot provide custom timestamp for counter updates");
         checkFalse(isCounter() && attrs.isTimeToLiveSet(), "Cannot provide custom TTL for counter updates");
         checkFalse(isView(), "Cannot directly modify a materialized view");
+        checkFalse(isVirtual() && attrs.isTimestampSet(), "Custom timestamp is not supported by virtual tables");
         checkFalse(isVirtual() && attrs.isTimeToLiveSet(), "Expiring columns are not supported by virtual tables");
         checkFalse(isVirtual() && hasConditions(), "Conditional updates are not supported by virtual tables");
+
+        if (attrs.isTimestampSet())
+            Guardrails.userTimestampsEnabled.ensureEnabled(state);
+    }
+
+    public void validateDiskUsage(QueryOptions options, ClientState state)
+    {
+        // reject writes if any replica exceeds disk usage failure limit or warn if it exceeds warn limit
+        if (Guardrails.replicaDiskUsage.enabled(state) && DiskUsageBroadcaster.instance.hasStuffedOrFullNode())
+        {
+            Keyspace keyspace = Keyspace.open(keyspace());
+
+            for (ByteBuffer key : buildPartitionKeyNames(options, state))
+            {
+                Token token = metadata().partitioner.getToken(key);
+
+                for (Replica replica : ReplicaLayout.forTokenWriteLiveAndDown(keyspace, token).all())
+                {
+                    Guardrails.replicaDiskUsage.guard(replica.endpoint(), state);
+                }
+            }
+        }
     }
 
     public RegularAndStaticColumns updatedColumns()
@@ -326,23 +355,23 @@
         return conditions.isIfExists();
     }
 
-    public List<ByteBuffer> buildPartitionKeyNames(QueryOptions options)
+    public List<ByteBuffer> buildPartitionKeyNames(QueryOptions options, ClientState state)
     throws InvalidRequestException
     {
-        List<ByteBuffer> partitionKeys = restrictions.getPartitionKeys(options);
+        List<ByteBuffer> partitionKeys = restrictions.getPartitionKeys(options, state);
         for (ByteBuffer key : partitionKeys)
             QueryProcessor.validateKey(key);
 
         return partitionKeys;
     }
 
-    public NavigableSet<Clustering<?>> createClustering(QueryOptions options)
+    public NavigableSet<Clustering<?>> createClustering(QueryOptions options, ClientState state)
     throws InvalidRequestException
     {
         if (appliesOnlyToStaticColumns() && !restrictions.hasClusteringColumnsRestrictions())
             return FBUtilities.singleton(CBuilder.STATIC_BUILDER.build(), metadata().comparator);
 
-        return restrictions.getClusteringColumns(options);
+        return restrictions.getClusteringColumns(options, state);
     }
 
     /**
@@ -367,7 +396,12 @@
 
     public boolean requiresRead()
     {
-        // Lists SET operation incurs a read.
+        // A subset of operations require a read before write:
+        // * Setting list element by index
+        // * Deleting list element by index
+        // * Deleting list element by value
+        // * Performing addition on a StringType (i.e. concatenation, only supported for CAS operations)
+        // * Performing addition on a NumberType, again only supported for CAS operations.
         return !requiresRead.isEmpty();
     }
 
@@ -401,7 +435,7 @@
                                                            metadata().partitioner.decorateKey(key),
                                                            filter));
 
-        SinglePartitionReadCommand.Group group = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
+        SinglePartitionReadCommand.Group group = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
 
         if (local)
         {
@@ -449,6 +483,9 @@
         if (options.getConsistency() == null)
             throw new InvalidRequestException("Invalid empty consistency level");
 
+        Guardrails.writeConsistencyLevels.guard(EnumSet.of(options.getConsistency(), options.getSerialConsistency()),
+                                                queryState.getClientState());
+
         return hasConditions()
              ? executeWithCondition(queryState, options, queryStartNanoTime)
              : executeWithoutCondition(queryState, options, queryStartNanoTime);
@@ -466,8 +503,11 @@
         else
             cl.validateForWrite();
 
+        validateDiskUsage(options, queryState.getClientState());
+
         List<? extends IMutation> mutations =
-            getMutations(options,
+            getMutations(queryState.getClientState(),
+                         options,
                          false,
                          options.getTimestamp(queryState),
                          options.getNowInSeconds(queryState),
@@ -483,7 +523,7 @@
         CQL3CasRequest request = makeCasRequest(queryState, options);
 
         try (RowIterator result = StorageProxy.cas(keyspace(),
-                                                   columnFamily(),
+                                                   table(),
                                                    request.key,
                                                    request,
                                                    options.getSerialConsistency(),
@@ -498,7 +538,8 @@
 
     private CQL3CasRequest makeCasRequest(QueryState queryState, QueryOptions options)
     {
-        List<ByteBuffer> keys = buildPartitionKeyNames(options);
+        ClientState clientState = queryState.getClientState();
+        List<ByteBuffer> keys = buildPartitionKeyNames(options, clientState);
         // We don't support IN for CAS operation so far
         checkFalse(restrictions.keyIsInRelation(),
                    "IN on the partition key is not supported with conditional %s",
@@ -512,7 +553,7 @@
                    "IN on the clustering key columns is not supported with conditional %s",
                     type.isUpdate()? "updates" : "deletions");
 
-        Clustering<?> clustering = Iterables.getOnlyElement(createClustering(options));
+        Clustering<?> clustering = Iterables.getOnlyElement(createClustering(options, clientState));
         CQL3CasRequest request = new CQL3CasRequest(metadata(), key, conditionColumns(), updatesRegularRows(), updatesStaticRow());
 
         addConditions(clustering, request, options);
@@ -541,7 +582,7 @@
 
     private ResultSet buildCasResultSet(RowIterator partition, QueryState state, QueryOptions options)
     {
-        return buildCasResultSet(keyspace(), columnFamily(), partition, getColumnsWithConditions(), false, state, options);
+        return buildCasResultSet(keyspace(), table(), partition, getColumnsWithConditions(), false, state, options);
     }
 
     static ResultSet buildCasResultSet(String ksName,
@@ -622,7 +663,7 @@
     {
         return hasConditions()
                ? executeInternalWithCondition(queryState, options)
-               : executeInternalWithoutCondition(queryState, options, System.nanoTime());
+               : executeInternalWithoutCondition(queryState, options, nanoTime());
     }
 
     public ResultMessage executeInternalWithoutCondition(QueryState queryState, QueryOptions options, long queryStartNanoTime)
@@ -630,7 +671,7 @@
     {
         long timestamp = options.getTimestamp(queryState);
         int nowInSeconds = options.getNowInSeconds(queryState);
-        for (IMutation mutation : getMutations(options, true, timestamp, nowInSeconds, queryStartNanoTime))
+        for (IMutation mutation : getMutations(queryState.getClientState(), options, true, timestamp, nowInSeconds, queryStartNanoTime))
             mutation.apply();
         return null;
     }
@@ -639,15 +680,15 @@
     {
         CQL3CasRequest request = makeCasRequest(state, options);
 
-        try (RowIterator result = casInternal(request, options.getTimestamp(state), options.getNowInSeconds(state)))
+        try (RowIterator result = casInternal(state.getClientState(), request, options.getTimestamp(state), options.getNowInSeconds(state)))
         {
             return new ResultMessage.Rows(buildCasResultSet(result, state, options));
         }
     }
 
-    static RowIterator casInternal(CQL3CasRequest request, long timestamp, int nowInSeconds)
+    static RowIterator casInternal(ClientState state, CQL3CasRequest request, long timestamp, int nowInSeconds)
     {
-        UUID ballot = UUIDGen.getTimeUUIDFromMicros(timestamp);
+        Ballot ballot = BallotGenerator.Global.atUnixMicros(timestamp, NONE);
 
         SinglePartitionReadQuery readCommand = request.readCommand(nowInSeconds);
         FilteredPartition current;
@@ -660,10 +701,10 @@
         if (!request.appliesTo(current))
             return current.rowIterator();
 
-        PartitionUpdate updates = request.makeUpdates(current);
+        PartitionUpdate updates = request.makeUpdates(current, state, ballot);
         updates = TriggerExecutor.instance.execute(updates);
 
-        Commit proposal = Commit.newProposal(ballot, updates);
+        Proposal proposal = Proposal.of(ballot, updates);
         proposal.makeMutation().apply();
         return null;
     }
@@ -671,27 +712,30 @@
     /**
      * Convert statement into a list of mutations to apply on the server
      *
+     * @param state the client state
      * @param options value for prepared statement markers
      * @param local if true, any requests (for collections) performed by getMutation should be done locally only.
      * @param timestamp the current timestamp in microseconds to use if no timestamp is user provided.
      *
      * @return list of the mutations
      */
-    private List<? extends IMutation> getMutations(QueryOptions options,
-                                                         boolean local,
-                                                         long timestamp,
-                                                         int nowInSeconds,
-                                                         long queryStartNanoTime)
+    private List<? extends IMutation> getMutations(ClientState state,
+                                                   QueryOptions options,
+                                                   boolean local,
+                                                   long timestamp,
+                                                   int nowInSeconds,
+                                                   long queryStartNanoTime)
     {
-        List<ByteBuffer> keys = buildPartitionKeyNames(options);
+        List<ByteBuffer> keys = buildPartitionKeyNames(options, state);
         HashMultiset<ByteBuffer> perPartitionKeyCounts = HashMultiset.create(keys);
         SingleTableUpdatesCollector collector = new SingleTableUpdatesCollector(metadata, updatedColumns, perPartitionKeyCounts);
-        addUpdates(collector, keys, options, local, timestamp, nowInSeconds, queryStartNanoTime);
+        addUpdates(collector, keys, state, options, local, timestamp, nowInSeconds, queryStartNanoTime);
         return collector.toMutations();
     }
 
     final void addUpdates(UpdatesCollector collector,
                           List<ByteBuffer> keys,
+                          ClientState state,
                           QueryOptions options,
                           boolean local,
                           long timestamp,
@@ -708,6 +752,7 @@
 
             UpdateParameters params = makeUpdateParameters(keys,
                                                            new ClusteringIndexSliceFilter(slices, false),
+                                                           state,
                                                            options,
                                                            DataLimits.NONE,
                                                            local,
@@ -727,13 +772,13 @@
         }
         else
         {
-            NavigableSet<Clustering<?>> clusterings = createClustering(options);
+            NavigableSet<Clustering<?>> clusterings = createClustering(options, state);
 
             // If some of the restrictions were unspecified (e.g. empty IN restrictions) we do not need to do anything.
             if (restrictions.hasClusteringColumnsRestrictions() && clusterings.isEmpty())
                 return;
 
-            UpdateParameters params = makeUpdateParameters(keys, clusterings, options, local, timestamp, nowInSeconds, queryStartNanoTime);
+            UpdateParameters params = makeUpdateParameters(keys, clusterings, state, options, local, timestamp, nowInSeconds, queryStartNanoTime);
 
             for (ByteBuffer key : keys)
             {
@@ -770,7 +815,7 @@
         }
     }
 
-    Slices createSlices(QueryOptions options)
+    public Slices createSlices(QueryOptions options)
     {
         SortedSet<ClusteringBound<?>> startBounds = restrictions.getClusteringColumnsBounds(Bound.START, options);
         SortedSet<ClusteringBound<?>> endBounds = restrictions.getClusteringColumnsBounds(Bound.END, options);
@@ -780,6 +825,7 @@
 
     private UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys,
                                                   NavigableSet<Clustering<?>> clusterings,
+                                                  ClientState state,
                                                   QueryOptions options,
                                                   boolean local,
                                                   long timestamp,
@@ -789,6 +835,7 @@
         if (clusterings.contains(Clustering.STATIC_CLUSTERING))
             return makeUpdateParameters(keys,
                                         new ClusteringIndexSliceFilter(Slices.ALL, false),
+                                        state,
                                         options,
                                         DataLimits.cqlLimits(1),
                                         local,
@@ -798,6 +845,7 @@
 
         return makeUpdateParameters(keys,
                                     new ClusteringIndexNamesFilter(clusterings, false),
+                                    state,
                                     options,
                                     DataLimits.NONE,
                                     local,
@@ -808,6 +856,7 @@
 
     private UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys,
                                                   ClusteringIndexFilter filter,
+                                                  ClientState state,
                                                   QueryOptions options,
                                                   DataLimits limits,
                                                   boolean local,
@@ -827,6 +876,7 @@
 
         return new UpdateParameters(metadata(),
                                     updatedColumns(),
+                                    state,
                                     options,
                                     getTimestamp(timestamp, options),
                                     nowInSeconds,
@@ -836,9 +886,19 @@
 
     private Slices toSlices(SortedSet<ClusteringBound<?>> startBounds, SortedSet<ClusteringBound<?>> endBounds)
     {
+        return toSlices(metadata, startBounds, endBounds);
+    }
+
+    public static Slices toSlices(TableMetadata metadata, SortedSet<ClusteringBound<?>> startBounds, SortedSet<ClusteringBound<?>> endBounds)
+    {
+        return toSlices(metadata.comparator, startBounds, endBounds);
+    }
+
+    public static Slices toSlices(ClusteringComparator comparator, SortedSet<ClusteringBound<?>> startBounds, SortedSet<ClusteringBound<?>> endBounds)
+    {
         assert startBounds.size() == endBounds.size();
 
-        Slices.Builder builder = new Slices.Builder(metadata().comparator);
+        Slices.Builder builder = new Slices.Builder(comparator);
 
         Iterator<ClusteringBound<?>> starts = startBounds.iterator();
         Iterator<ClusteringBound<?>> ends = endBounds.iterator();
@@ -846,7 +906,7 @@
         while (starts.hasNext())
         {
             Slice slice = Slice.make(starts.next(), ends.next());
-            if (!slice.isEmpty(metadata().comparator))
+            if (!slice.isEmpty(comparator))
             {
                 builder.add(slice);
             }
diff --git a/src/java/org/apache/cassandra/cql3/statements/PropertyDefinitions.java b/src/java/org/apache/cassandra/cql3/statements/PropertyDefinitions.java
index 0c63c2e..b6112fa 100644
--- a/src/java/org/apache/cassandra/cql3/statements/PropertyDefinitions.java
+++ b/src/java/org/apache/cassandra/cql3/statements/PropertyDefinitions.java
@@ -60,6 +60,19 @@
         }
     }
 
+    /**
+     * Returns the name of all the properties that are updated by this object.
+     */
+    public Set<String> updatedProperties()
+    {
+        return properties.keySet();
+    }
+
+    public void removeProperty(String name)
+    {
+        properties.remove(name);
+    }
+
     protected String getSimple(String name) throws SyntaxException
     {
         Object val = properties.get(name);
diff --git a/src/java/org/apache/cassandra/cql3/statements/RequestValidations.java b/src/java/org/apache/cassandra/cql3/statements/RequestValidations.java
index f351788..cdaac98 100644
--- a/src/java/org/apache/cassandra/cql3/statements/RequestValidations.java
+++ b/src/java/org/apache/cassandra/cql3/statements/RequestValidations.java
@@ -26,42 +26,136 @@
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
-import static org.apache.commons.lang3.ArrayUtils.EMPTY_OBJECT_ARRAY;
-
 /**
  * Utility methods use to perform request validation.
+ *
+ * <p>This class use overloaded methods to allow to specify different numbers of message arguments. While
+ * this introduces some clutter in the API, it avoids array allocation, initialization, and garbage collection
+ * overhead that is incurred by varargs calls. </p>
+ *
+ * <b>Warning about performance</b>
+ *
+ * <p>The goal of this class is to improve readability of code, but in some circumstances this may come at a
+ * significant performance cost. Remember that argument values for message construction must all be computed eagerly,
+ * and autoboxing may happen as well, even when the check succeeds. If the message arguments are expensive to create
+ * you should use the customary form:
+ *  <pre>
+ *      if (value < 0.0)
+ *          throw RequestValidations.invalidRequest("negative value: %s", toReadableText(value));
+ *  </pre>
+ * </p>
  */
 public final class RequestValidations
 {
     /**
-     * Checks that the specified expression is <code>true</code>. If not an <code>InvalidRequestException</code> will
+     * Checks that the specified expression is {@code true}. If not an {@code InvalidRequestException} will
      * be thrown.
      *
      * @param expression the expression to test
      * @param message the error message
-     * @throws InvalidRequestException if the specified expression is <code>false</code>.
+     * @throws InvalidRequestException if the specified expression is {@code false}.
      */
     public static void checkTrue(boolean expression, String message) throws InvalidRequestException
     {
-        checkTrue(expression, message, EMPTY_OBJECT_ARRAY);
+        if (!expression)
+            throw invalidRequest(message);
     }
 
     /**
-     * Checks that the specified expression is <code>true</code>. If not an <code>InvalidRequestException</code> will
+     * Checks that the specified expression is <code>true</code>. If not an {@code InvalidRequestException} will
      * be thrown.
      *
      * @param expression the expression to test
      * @param messageTemplate the template used to build the error message
-     * @param messageArgs the message arguments
-     * @throws InvalidRequestException if the specified expression is <code>false</code>.
+     * @param messageArg the message argument
+     * @throws InvalidRequestException if the specified expression is {@code false}.
      */
     public static void checkTrue(boolean expression,
                                  String messageTemplate,
-                                 Object... messageArgs)
-                                 throws InvalidRequestException
+                                 Object messageArg) throws InvalidRequestException
     {
         if (!expression)
-            throw invalidRequest(messageTemplate, messageArgs);
+            throw invalidRequest(messageTemplate, messageArg);
+    }
+
+    /**
+     * Checks that the specified expression is <code>true</code>. If not an {@code InvalidRequestException} will
+     * be thrown.
+     *
+     * @param expression the expression to test
+     * @param messageTemplate the template used to build the error message
+     * @param arg1 the first message argument
+     * @param arg2 the second message argument
+     * @throws InvalidRequestException if the specified expression is {@code false}.
+     */
+    public static void checkTrue(boolean expression,
+                                 String messageTemplate,
+                                 Object arg1,
+                                 Object arg2) throws InvalidRequestException
+    {
+        if (!expression)
+            throw invalidRequest(messageTemplate, arg1, arg2);
+    }
+
+    /**
+     * Checks that the specified expression is <code>true</code>. If not an {@code InvalidRequestException} will
+     * be thrown.
+     *
+     * @param expression the expression to test
+     * @param messageTemplate the template used to build the error message
+     * @param arg1 the first message argument
+     * @param arg2 the second message argument
+     * @param arg3 the third message argument
+     * @throws InvalidRequestException if the specified expression is {@code false}.
+     */
+    public static void checkTrue(boolean expression,
+                                 String messageTemplate,
+                                 Object arg1,
+                                 Object arg2,
+                                 Object arg3) throws InvalidRequestException
+    {
+        if (!expression)
+            throw invalidRequest(messageTemplate, arg1, arg2, arg3);
+    }
+
+    /**
+     * Checks that the specified collections is NOT <code>empty</code>.
+     * If it is an {@code InvalidRequestException} will be thrown.
+     *
+     * @param collection the collection to test
+     * @param messageTemplate the template used to build the error message
+     * @param messageArg the message argument
+     * @return the collection
+     * @throws InvalidRequestException if the specified collection is <code>empty</code>.
+     */
+    public static <T extends Collection<E>, E> T checkNotEmpty(T collection,
+                                                               String messageTemplate,
+                                                               Object messageArg)
+                                                               throws InvalidRequestException
+    {
+        checkTrue(!collection.isEmpty(), messageTemplate, messageArg);
+        return collection;
+    }
+
+    /**
+     * Checks that the specified collections is NOT <code>empty</code>.
+     * If it is an {@code InvalidRequestException} will be thrown.
+     *
+     * @param collection the collection to test
+     * @param messageTemplate the template used to build the error message
+     * @param arg1 the first message argument
+     * @param arg2 the second message argument
+     * @return the collection
+     * @throws InvalidRequestException if the specified collection is <code>empty</code>.
+     */
+    public static <T extends Collection<E>, E> T checkNotEmpty(T collection,
+                                                               String messageTemplate,
+                                                               Object arg1,
+                                                               Object arg2)
+                                                               throws InvalidRequestException
+    {
+        checkTrue(!collection.isEmpty(), messageTemplate, arg1, arg2);
+        return collection;
     }
 
     /**
@@ -96,24 +190,60 @@
     }
 
     /**
-     * Checks that the specified expression is <code>false</code>. If not an <code>InvalidRequestException</code> will
+     * Checks that the specified expression is {@code false}. If not an {@code InvalidRequestException} will
      * be thrown.
      *
      * @param expression the expression to test
      * @param messageTemplate the template used to build the error message
-     * @param messageArgs the message arguments
+     * @param messageArg the message argument
      * @throws InvalidRequestException if the specified expression is <code>true</code>.
      */
     public static void checkFalse(boolean expression,
                                   String messageTemplate,
-                                  Object... messageArgs)
-                                  throws InvalidRequestException
+                                  Object messageArg) throws InvalidRequestException
     {
-        checkTrue(!expression, messageTemplate, messageArgs);
+        checkTrue(!expression, messageTemplate, messageArg);
     }
 
     /**
-     * Checks that the specified expression is <code>false</code>. If not an <code>InvalidRequestException</code> will
+     * Checks that the specified expression is {@code false}. If not an {@code InvalidRequestException} will
+     * be thrown.
+     *
+     * @param expression the expression to test
+     * @param messageTemplate the template used to build the error message
+     * @param arg1 the first message argument
+     * @param arg2 the second message argument
+     * @throws InvalidRequestException if the specified expression is <code>true</code>.
+     */
+    public static void checkFalse(boolean expression,
+                                  String messageTemplate,
+                                  Object arg1,
+                                  Object arg2) throws InvalidRequestException
+    {
+        checkTrue(!expression, messageTemplate, arg1, arg2);
+    }
+
+    /**
+     * Checks that the specified expression is {@code false}. If not an {@code InvalidRequestException} will
+     * be thrown.
+     *
+     * @param expression the expression to test
+     * @param messageTemplate the template used to build the error message
+     * @param arg1 the first message argument
+     * @param arg2 the second message argument
+     * @param arg3 the third message argument
+     * @throws InvalidRequestException if the specified expression is <code>true</code>.
+     */
+    public static void checkFalse(boolean expression,
+                                  String messageTemplate,
+                                  Object arg1,
+                                  Object arg2,
+                                  Object arg3) throws InvalidRequestException
+    {
+        checkTrue(!expression, messageTemplate, arg1, arg2, arg3);
+    }
+    /**
+     * Checks that the specified expression is {@code false}. If not an {@code InvalidRequestException} will
      * be thrown.
      *
      * @param expression the expression to test
@@ -126,91 +256,114 @@
     }
 
     /**
-     * Checks that the specified object is NOT <code>null</code>.
-     * If it is an <code>InvalidRequestException</code> will be throws.
-     *
-     * @param object the object to test
-     * @param messageTemplate the template used to build the error message
-     * @param messageArgs the message arguments
-     * @return the object
-     * @throws InvalidRequestException if the specified object is <code>null</code>.
-     */
-    public static <T> T checkNotNull(T object, String messageTemplate, Object... messageArgs)
-            throws InvalidRequestException
-    {
-        checkTrue(object != null, messageTemplate, messageArgs);
-        return object;
-    }
-
-    /**
-     * Checks that the specified collections is NOT <code>empty</code>.
-     * If it is an <code>InvalidRequestException</code> will be throws.
-     *
-     * @param collection the collection to test
-     * @param messageTemplate the template used to build the error message
-     * @param messageArgs the message arguments
-     * @return the collection
-     * @throws InvalidRequestException if the specified collection is <code>empty</code>.
-     */
-    public static <T extends Collection<E>, E> T checkNotEmpty(T collection, String messageTemplate, Object... messageArgs)
-            throws InvalidRequestException
-    {
-        checkTrue(!collection.isEmpty(), messageTemplate, messageArgs);
-        return collection;
-    }
-
-    /**
-     * Checks that the specified bind marker value is set to a meaningful value.
-     * If it is not a <code>InvalidRequestException</code> will be thrown.
-     *
-     * @param b the <code>ByteBuffer</code> to test
-     * @param messageTemplate the template used to build the error message
-     * @param messageArgs the message arguments
-     * @throws InvalidRequestException if the specified bind marker value is not set to a meaningful value.
-     */
-    public static void checkBindValueSet(ByteBuffer b, String messageTemplate, Object... messageArgs)
-            throws InvalidRequestException
-    {
-        checkTrue(b != ByteBufferUtil.UNSET_BYTE_BUFFER, messageTemplate, messageArgs);
-    }
-
-    /**
-     * Checks that the specified object is <code>null</code>.
-     * If it is not an <code>InvalidRequestException</code> will be throws.
-     *
-     * @param object the object to test
-     * @param messageTemplate the template used to build the error message
-     * @param messageArgs the message arguments
-     * @return the object
-     * @throws InvalidRequestException if the specified object is not <code>null</code>.
-     */
-    public static <T> T checkNull(T object, String messageTemplate, Object... messageArgs)
-            throws InvalidRequestException
-    {
-        checkTrue(object == null, messageTemplate, messageArgs);
-        return object;
-    }
-
-    /**
-     * Checks that the specified object is <code>null</code>.
-     * If it is not an <code>InvalidRequestException</code> will be throws.
+     * Checks that the specified object is NOT {@code null}.
+     * If it is an {@code InvalidRequestException} will be thrown.
      *
      * @param object the object to test
      * @param message the error message
      * @return the object
-     * @throws InvalidRequestException if the specified object is not <code>null</code>.
+     * @throws InvalidRequestException if the specified object is {@code null}.
      */
-    public static <T> T checkNull(T object, String message) throws InvalidRequestException
+    public static <T> T checkNotNull(T object, String message) throws InvalidRequestException
     {
-        return checkNull(object, message, EMPTY_OBJECT_ARRAY);
+        checkTrue(object != null, message);
+        return object;
     }
 
     /**
-     * Returns an <code>InvalidRequestException</code> with the specified message.
+     * Checks that the specified object is NOT {@code null}.
+     * If it is an {@code InvalidRequestException} will be thrown.
+     *
+     * @param object the object to test
+     * @param messageTemplate the template used to build the error message
+     * @param messageArg the message argument
+     * @return the object
+     * @throws InvalidRequestException if the specified object is {@code null}.
+     */
+    public static <T> T checkNotNull(T object, String messageTemplate, Object messageArg) throws InvalidRequestException
+    {
+        checkTrue(object != null, messageTemplate, messageArg);
+        return object;
+    }
+
+    /**
+     * Checks that the specified object is NOT {@code null}.
+     * If it is an {@code InvalidRequestException} will be thrown.
+     *
+     * @param object the object to test
+     * @param messageTemplate the template used to build the error message
+     * @param arg1 the first message argument
+     * @param arg2 the second message argument
+     * @return the object
+     * @throws InvalidRequestException if the specified object is {@code null}.
+     */
+    public static <T> T checkNotNull(T object,
+                                     String messageTemplate,
+                                     Object arg1,
+                                     Object arg2) throws InvalidRequestException
+    {
+        checkTrue(object != null, messageTemplate, arg1, arg2);
+        return object;
+    }
+
+    /**
+     * Checks that the specified bind marker value is set to a meaningful value.
+     * If it is not a {@code InvalidRequestException} will be thrown.
+     *
+     * @param b the <code>ByteBuffer</code> to test
+     * @param messageTemplate the template used to build the error message
+     * @param messageArg the message argument
+     * @throws InvalidRequestException if the specified bind marker value is not set to a meaningful value.
+     */
+    public static void checkBindValueSet(ByteBuffer b, String messageTemplate, Object messageArg) throws InvalidRequestException
+    {
+        checkTrue(b != ByteBufferUtil.UNSET_BYTE_BUFFER, messageTemplate, messageArg);
+    }
+
+    /**
+     * Checks that the specified object is {@code null}.
+     * If it is not an {@code InvalidRequestException} will be thrown.
+     *
+     * @param object the object to test
+     * @param messageTemplate the template used to build the error message
+     * @param messageArg the message argument
+     * @throws InvalidRequestException if the specified object is not {@code null}.
+     */
+    public static void checkNull(Object object, String messageTemplate, Object messageArg) throws InvalidRequestException
+    {
+        checkTrue(object == null, messageTemplate, messageArg);
+    }
+
+    /**
+     * Checks that the specified object is {@code null}.
+     * If it is not an {@code InvalidRequestException} will be thrown.
+     *
+     * @param object the object to test
+     * @param message the error message
+     * @throws InvalidRequestException if the specified object is not {@code null}.
+     */
+    public static void checkNull(Object object, String message) throws InvalidRequestException
+    {
+        checkTrue(object == null, message);
+    }
+
+    /**
+     * Returns an {@code InvalidRequestException} with the specified message.
+     *
+     * @param message the error message
+     * @return an {@code InvalidRequestException} with the specified message.
+     */
+    public static InvalidRequestException invalidRequest(String message)
+    {
+        return new InvalidRequestException(message);
+    }
+
+    /**
+     * Returns an {@code InvalidRequestException} with the specified message.
      *
      * @param messageTemplate the template used to build the error message
      * @param messageArgs the message arguments
-     * @return an <code>InvalidRequestException</code> with the specified message.
+     * @return an {@code InvalidRequestException} with the specified message.
      */
     public static InvalidRequestException invalidRequest(String messageTemplate, Object... messageArgs)
     {
diff --git a/src/java/org/apache/cassandra/cql3/statements/RevokePermissionsStatement.java b/src/java/org/apache/cassandra/cql3/statements/RevokePermissionsStatement.java
index 57d0631..4262285 100644
--- a/src/java/org/apache/cassandra/cql3/statements/RevokePermissionsStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/RevokePermissionsStatement.java
@@ -18,9 +18,11 @@
 package org.apache.cassandra.cql3.statements;
 
 import java.util.Set;
+import java.util.stream.Collectors;
 
 import org.apache.cassandra.audit.AuditLogContext;
 import org.apache.cassandra.audit.AuditLogEntryType;
+import org.apache.cassandra.auth.IAuthorizer;
 import org.apache.cassandra.auth.IResource;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -28,6 +30,7 @@
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.exceptions.RequestValidationException;
 import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
@@ -41,7 +44,25 @@
 
     public ResultMessage execute(ClientState state) throws RequestValidationException, RequestExecutionException
     {
-        DatabaseDescriptor.getAuthorizer().revoke(state.getUser(), permissions, resource, grantee);
+        IAuthorizer authorizer = DatabaseDescriptor.getAuthorizer();
+        Set<Permission> revoked = authorizer.revoke(state.getUser(), permissions, resource, grantee);
+
+        // We want to warn the client if all the specified permissions have not been revoked and the client did
+        // not specify ALL in the query.
+        if (!revoked.equals(permissions) && !permissions.equals(Permission.ALL))
+        {
+            String permissionsStr = permissions.stream()
+                                               .filter(permission -> !revoked.contains(permission))
+                                               .sorted(Permission::compareTo) // guarantee the order for testing
+                                               .map(Permission::name)
+                                               .collect(Collectors.joining(", "));
+
+            ClientWarn.instance.warn(String.format("Role '%s' was not granted %s on %s",
+                                                   grantee.getRoleName(),
+                                                   permissionsStr,
+                                                   resource));
+        }
+
         return null;
     }
     
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index a5f2eab..8a48935 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -19,10 +19,13 @@
 
 import java.nio.ByteBuffer;
 import java.util.*;
+import java.util.stream.Collectors;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -30,8 +33,10 @@
 import org.apache.cassandra.audit.AuditLogContext;
 import org.apache.cassandra.audit.AuditLogEntryType;
 import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.cql3.*;
@@ -40,8 +45,10 @@
 import org.apache.cassandra.cql3.selection.RawSelector;
 import org.apache.cassandra.cql3.selection.ResultSetBuilder;
 import org.apache.cassandra.cql3.selection.Selectable;
+import org.apache.cassandra.cql3.selection.Selectable.WithFunction;
 import org.apache.cassandra.cql3.selection.Selection;
 import org.apache.cassandra.cql3.selection.Selection.Selectors;
+import org.apache.cassandra.cql3.selection.Selector;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.aggregation.AggregationSpecification;
 import org.apache.cassandra.db.aggregation.GroupMaker;
@@ -62,13 +69,16 @@
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.pager.AggregationQueryPager;
 import org.apache.cassandra.service.pager.PagingState;
 import org.apache.cassandra.service.pager.QueryPager;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.NoSpamLogger;
+
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
 
@@ -76,7 +86,9 @@
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNotNull;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNull;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 import static org.apache.cassandra.utils.ByteBufferUtil.UNSET_BYTE_BUFFER;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Encapsulates a completely parsed SELECT query, including the target
@@ -106,9 +118,9 @@
     private final boolean isReversed;
 
     /**
-     * The <code>AggregationSpecification</code> used to make the aggregates.
+     * The {@code Factory} used to create the {@code AggregationSpecification}.
      */
-    private final AggregationSpecification aggregationSpec;
+    private final AggregationSpecification.Factory aggregationSpecFactory;
 
     /**
      * The comparator used to orders results when multiple keys are selected (using IN).
@@ -128,7 +140,7 @@
                            Selection selection,
                            StatementRestrictions restrictions,
                            boolean isReversed,
-                           AggregationSpecification aggregationSpec,
+                           AggregationSpecification.Factory aggregationSpecFactory,
                            Comparator<List<ByteBuffer>> orderingComparator,
                            Term limit,
                            Term perPartitionLimit)
@@ -138,7 +150,7 @@
         this.selection = selection;
         this.restrictions = restrictions;
         this.isReversed = isReversed;
-        this.aggregationSpec = aggregationSpec;
+        this.aggregationSpecFactory = aggregationSpecFactory;
         this.orderingComparator = orderingComparator;
         this.parameters = parameters;
         this.limit = limit;
@@ -170,6 +182,9 @@
         selection.addFunctionsTo(functions);
         restrictions.addFunctionsTo(functions);
 
+        if (aggregationSpecFactory != null)
+            aggregationSpecFactory.addFunctionsTo(functions);
+
         if (limit != null)
             limit.addFunctionsTo(functions);
 
@@ -212,7 +227,7 @@
     {
         if (table.isView())
         {
-            TableMetadataRef baseTable = View.findBaseTable(keyspace(), columnFamily());
+            TableMetadataRef baseTable = View.findBaseTable(keyspace(), table());
             if (baseTable != null)
                 state.ensureTablePermission(baseTable, Permission.SELECT);
         }
@@ -227,7 +242,8 @@
 
     public void validate(ClientState state) throws InvalidRequestException
     {
-        // Nothing to do, all validation has been done by RawStatement.prepare()
+        if (parameters.allowFiltering && !SchemaConstants.isSystemKeyspace(table.keyspace))
+            Guardrails.allowFilteringEnabled.ensureEnabled(state);
     }
 
     public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime)
@@ -236,6 +252,7 @@
         checkNotNull(cl, "Invalid empty consistency level");
 
         cl.validateForRead();
+        Guardrails.readConsistencyLevels.guard(EnumSet.of(cl), state.getClientState());
 
         int nowInSec = options.getNowInSeconds(state);
         int userLimit = getLimit(options);
@@ -243,60 +260,79 @@
         int pageSize = options.getPageSize();
 
         Selectors selectors = selection.newSelectors(options);
-        ReadQuery query = getQuery(options, selectors.getColumnFilter(), nowInSec, userLimit, userPerPartitionLimit, pageSize);
+        AggregationSpecification aggregationSpec = getAggregationSpec(options);
+        ReadQuery query = getQuery(options, state.getClientState(), selectors.getColumnFilter(),
+                                   nowInSec, userLimit, userPerPartitionLimit, pageSize, aggregationSpec);
+
+        if (options.isReadThresholdsEnabled())
+            query.trackWarnings();
 
         if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize)))
-            return execute(query, options, state, selectors, nowInSec, userLimit, queryStartNanoTime);
+            return execute(query, options, state.getClientState(), selectors, nowInSec, userLimit, null, queryStartNanoTime);
 
         QueryPager pager = getPager(query, options);
 
-        return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()),
+        return execute(state,
+                       Pager.forDistributedQuery(pager, cl, state.getClientState()),
                        options,
                        selectors,
                        pageSize,
                        nowInSec,
                        userLimit,
+                       aggregationSpec,
                        queryStartNanoTime);
     }
 
+
+    public AggregationSpecification getAggregationSpec(QueryOptions options)
+    {
+        return aggregationSpecFactory == null ? null : aggregationSpecFactory.newInstance(options);
+    }
+
     public ReadQuery getQuery(QueryOptions options, int nowInSec) throws RequestValidationException
     {
         Selectors selectors = selection.newSelectors(options);
         return getQuery(options,
+                        ClientState.forInternalCalls(),
                         selectors.getColumnFilter(),
                         nowInSec,
                         getLimit(options),
                         getPerPartitionLimit(options),
-                        options.getPageSize());
+                        options.getPageSize(),
+                        getAggregationSpec(options));
     }
 
     public ReadQuery getQuery(QueryOptions options,
+                              ClientState state,
                               ColumnFilter columnFilter,
                               int nowInSec,
                               int userLimit,
                               int perPartitionLimit,
-                              int pageSize)
+                              int pageSize,
+                              AggregationSpecification aggregationSpec)
     {
         boolean isPartitionRangeQuery = restrictions.isKeyRange() || restrictions.usesSecondaryIndexing();
 
-        DataLimits limit = getDataLimits(userLimit, perPartitionLimit, pageSize);
+        DataLimits limit = getDataLimits(userLimit, perPartitionLimit, pageSize, aggregationSpec);
 
         if (isPartitionRangeQuery)
-            return getRangeCommand(options, columnFilter, limit, nowInSec);
+            return getRangeCommand(options, state, columnFilter, limit, nowInSec);
 
-        return getSliceCommands(options, columnFilter, limit, nowInSec);
+        return getSliceCommands(options, state, columnFilter, limit, nowInSec);
     }
 
     private ResultMessage.Rows execute(ReadQuery query,
                                        QueryOptions options,
-                                       QueryState state,
+                                       ClientState state,
                                        Selectors selectors,
                                        int nowInSec,
-                                       int userLimit, long queryStartNanoTime) throws RequestValidationException, RequestExecutionException
+                                       int userLimit,
+                                       AggregationSpecification aggregationSpec,
+                                       long queryStartNanoTime)
     {
-        try (PartitionIterator data = query.execute(options.getConsistency(), state.getClientState(), queryStartNanoTime))
+        try (PartitionIterator data = query.execute(options.getConsistency(), state, queryStartNanoTime))
         {
-            return processResults(data, options, selectors, nowInSec, userLimit);
+            return processResults(data, options, selectors, nowInSec, userLimit, aggregationSpec);
         }
     }
 
@@ -373,27 +409,31 @@
         }
     }
 
-    private ResultMessage.Rows execute(Pager pager,
+    private ResultMessage.Rows execute(QueryState state,
+                                       Pager pager,
                                        QueryOptions options,
                                        Selectors selectors,
                                        int pageSize,
                                        int nowInSec,
                                        int userLimit,
-                                       long queryStartNanoTime) throws RequestValidationException, RequestExecutionException
+                                       AggregationSpecification aggregationSpec,
+                                       long queryStartNanoTime)
     {
-        if (aggregationSpec != null)
+        Guardrails.pageSize.guard(pageSize, table(), false, state.getClientState());
+
+        if (aggregationSpecFactory != null)
         {
             if (!restrictions.hasPartitionKeyRestrictions())
             {
                 warn("Aggregation query used without partition key");
                 noSpamLogger.warn(String.format("Aggregation query used without partition key on table %s.%s, aggregation type: %s",
-                                                 keyspace(), columnFamily(), aggregationSpec.kind()));
+                                                 keyspace(), table(), aggregationSpec.kind()));
             }
             else if (restrictions.keyIsInRelation())
             {
                 warn("Aggregation query used on multiple partition keys (IN restriction)");
                 noSpamLogger.warn(String.format("Aggregation query used on multiple partition keys (IN restriction) on table %s.%s, aggregation type: %s",
-                                                 keyspace(), columnFamily(), aggregationSpec.kind()));
+                                                 keyspace(), table(), aggregationSpec.kind()));
             }
         }
 
@@ -406,7 +446,7 @@
         ResultMessage.Rows msg;
         try (PartitionIterator page = pager.fetchPage(pageSize, queryStartNanoTime))
         {
-            msg = processResults(page, options, selectors, nowInSec, userLimit);
+            msg = processResults(page, options, selectors, nowInSec, userLimit, aggregationSpec);
         }
 
         // Please note that the isExhausted state of the pager only gets updated when we've closed the page, so this
@@ -427,25 +467,37 @@
                                               QueryOptions options,
                                               Selectors selectors,
                                               int nowInSec,
-                                              int userLimit) throws RequestValidationException
+                                              int userLimit,
+                                              AggregationSpecification aggregationSpec) throws RequestValidationException
     {
-        ResultSet rset = process(partitions, options, selectors, nowInSec, userLimit);
+        ResultSet rset = process(partitions, options, selectors, nowInSec, userLimit, aggregationSpec);
         return new ResultMessage.Rows(rset);
     }
 
     public ResultMessage.Rows executeLocally(QueryState state, QueryOptions options) throws RequestExecutionException, RequestValidationException
     {
-        return executeInternal(state, options, options.getNowInSeconds(state), System.nanoTime());
+        return executeInternal(state, options, options.getNowInSeconds(state), nanoTime());
     }
 
-    public ResultMessage.Rows executeInternal(QueryState state, QueryOptions options, int nowInSec, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException
+    public ResultMessage.Rows executeInternal(QueryState state,
+                                              QueryOptions options,
+                                              int nowInSec,
+                                              long queryStartNanoTime)
     {
         int userLimit = getLimit(options);
         int userPerPartitionLimit = getPerPartitionLimit(options);
         int pageSize = options.getPageSize();
 
         Selectors selectors = selection.newSelectors(options);
-        ReadQuery query = getQuery(options, selectors.getColumnFilter(), nowInSec, userLimit, userPerPartitionLimit, pageSize);
+        AggregationSpecification aggregationSpec = getAggregationSpec(options);
+        ReadQuery query = getQuery(options,
+                                   state.getClientState(),
+                                   selectors.getColumnFilter(),
+                                   nowInSec,
+                                   userLimit,
+                                   userPerPartitionLimit,
+                                   pageSize,
+                                   aggregationSpec);
 
         try (ReadExecutionController executionController = query.executionController())
         {
@@ -453,18 +505,20 @@
             {
                 try (PartitionIterator data = query.executeInternal(executionController))
                 {
-                    return processResults(data, options, selectors, nowInSec, userLimit);
+                    return processResults(data, options, selectors, nowInSec, userLimit, null);
                 }
             }
 
             QueryPager pager = getPager(query, options);
 
-            return execute(Pager.forInternalQuery(pager, executionController),
+            return execute(state,
+                           Pager.forInternalQuery(pager, executionController),
                            options,
                            selectors,
                            pageSize,
                            nowInSec,
                            userLimit,
+                           aggregationSpec,
                            queryStartNanoTime);
         }
     }
@@ -473,17 +527,61 @@
     {
         QueryPager pager = query.getPager(options.getPagingState(), options.getProtocolVersion());
 
-        if (aggregationSpec == null || query.isEmpty())
+        if (aggregationSpecFactory == null || query.isEmpty())
             return pager;
 
         return new AggregationQueryPager(pager, query.limits());
     }
 
+    public Map<DecoratedKey, List<Row>> executeRawInternal(QueryOptions options, ClientState state, int nowInSec) throws RequestExecutionException, RequestValidationException
+    {
+        int userLimit = getLimit(options);
+        int userPerPartitionLimit = getPerPartitionLimit(options);
+        if (options.getPageSize() > 0)
+            throw new IllegalStateException();
+        if (aggregationSpecFactory != null)
+            throw new IllegalStateException();
+
+        Selectors selectors = selection.newSelectors(options);
+        ReadQuery query = getQuery(options, state, selectors.getColumnFilter(), nowInSec, userLimit, userPerPartitionLimit, Integer.MAX_VALUE, null);
+
+        Map<DecoratedKey, List<Row>> result = Collections.emptyMap();
+        try (ReadExecutionController executionController = query.executionController())
+        {
+            try (PartitionIterator data = query.executeInternal(executionController))
+            {
+                while (data.hasNext())
+                {
+                    try (RowIterator in = data.next())
+                    {
+                        List<Row> out = Collections.emptyList();
+                        while (in.hasNext())
+                        {
+                            switch (out.size())
+                            {
+                                case 0:  out = Collections.singletonList(in.next()); break;
+                                case 1:  out = new ArrayList<>(out);
+                                default: out.add(in.next());
+                            }
+                        }
+                        switch (result.size())
+                        {
+                            case 0:  result = Collections.singletonMap(in.partitionKey(), out); break;
+                            case 1:  result = new TreeMap<>(result);
+                            default: result.put(in.partitionKey(), out);
+                        }
+                    }
+                }
+                return result;
+            }
+        }
+    }
+
     public ResultSet process(PartitionIterator partitions, int nowInSec) throws InvalidRequestException
     {
         QueryOptions options = QueryOptions.DEFAULT;
         Selectors selectors = selection.newSelectors(options);
-        return process(partitions, options, selectors, nowInSec, getLimit(options));
+        return process(partitions, options, selectors, nowInSec, getLimit(options), getAggregationSpec(options));
     }
 
     @Override
@@ -492,7 +590,7 @@
         return table.keyspace;
     }
 
-    public String columnFamily()
+    public String table()
     {
         return table.name;
     }
@@ -513,13 +611,19 @@
         return restrictions;
     }
 
-    private ReadQuery getSliceCommands(QueryOptions options, ColumnFilter columnFilter, DataLimits limit, int nowInSec)
+    private ReadQuery getSliceCommands(QueryOptions options, ClientState state, ColumnFilter columnFilter,
+                                       DataLimits limit, int nowInSec)
     {
-        Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options);
+        Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options, state);
         if (keys.isEmpty())
             return ReadQuery.empty(table);
 
-        ClusteringIndexFilter filter = makeClusteringIndexFilter(options, columnFilter);
+        if (restrictions.keyIsInRelation())
+        {
+            Guardrails.partitionKeysInSelect.guard(keys.size(), table.name, false, state);
+        }
+
+        ClusteringIndexFilter filter = makeClusteringIndexFilter(options, state, columnFilter);
         if (filter == null || filter.isEmpty(table.comparator))
             return ReadQuery.empty(table);
 
@@ -546,8 +650,9 @@
     public Slices clusteringIndexFilterAsSlices()
     {
         QueryOptions options = QueryOptions.forInternalCalls(Collections.emptyList());
+        ClientState state = ClientState.forInternalCalls();
         ColumnFilter columnFilter = selection.newSelectors(options).getColumnFilter();
-        ClusteringIndexFilter filter = makeClusteringIndexFilter(options, columnFilter);
+        ClusteringIndexFilter filter = makeClusteringIndexFilter(options, state, columnFilter);
         if (filter instanceof ClusteringIndexSliceFilter)
             return ((ClusteringIndexSliceFilter)filter).requestedSlices();
 
@@ -564,8 +669,9 @@
     public SinglePartitionReadCommand internalReadForView(DecoratedKey key, int nowInSec)
     {
         QueryOptions options = QueryOptions.forInternalCalls(Collections.emptyList());
+        ClientState state = ClientState.forInternalCalls();
         ColumnFilter columnFilter = selection.newSelectors(options).getColumnFilter();
-        ClusteringIndexFilter filter = makeClusteringIndexFilter(options, columnFilter);
+        ClusteringIndexFilter filter = makeClusteringIndexFilter(options, state, columnFilter);
         RowFilter rowFilter = getRowFilter(options);
         return SinglePartitionReadCommand.create(table, nowInSec, columnFilter, rowFilter, DataLimits.NONE, key, filter);
     }
@@ -578,9 +684,9 @@
         return getRowFilter(QueryOptions.forInternalCalls(Collections.emptyList()));
     }
 
-    private ReadQuery getRangeCommand(QueryOptions options, ColumnFilter columnFilter, DataLimits limit, int nowInSec)
+    private ReadQuery getRangeCommand(QueryOptions options, ClientState state, ColumnFilter columnFilter, DataLimits limit, int nowInSec)
     {
-        ClusteringIndexFilter clusteringIndexFilter = makeClusteringIndexFilter(options, columnFilter);
+        ClusteringIndexFilter clusteringIndexFilter = makeClusteringIndexFilter(options, state, columnFilter);
         if (clusteringIndexFilter == null)
             return ReadQuery.empty(table);
 
@@ -601,7 +707,7 @@
         return command;
     }
 
-    private ClusteringIndexFilter makeClusteringIndexFilter(QueryOptions options, ColumnFilter columnFilter)
+    private ClusteringIndexFilter makeClusteringIndexFilter(QueryOptions options, ClientState state, ColumnFilter columnFilter)
     {
         if (parameters.isDistinct)
         {
@@ -624,7 +730,7 @@
             return new ClusteringIndexSliceFilter(slices, isReversed);
         }
 
-        NavigableSet<Clustering<?>> clusterings = getRequestedRows(options);
+        NavigableSet<Clustering<?>> clusterings = getRequestedRows(options, state);
         // We can have no clusterings if either we're only selecting the static columns, or if we have
         // a 'IN ()' for clusterings. In that case, we still want to query if some static columns are
         // queried. But we're fine otherwise.
@@ -670,7 +776,10 @@
         return builder.build();
     }
 
-    private DataLimits getDataLimits(int userLimit, int perPartitionLimit, int pageSize)
+    private DataLimits getDataLimits(int userLimit,
+                                     int perPartitionLimit,
+                                     int pageSize,
+                                     AggregationSpecification aggregationSpec)
     {
         int cqlRowLimit = DataLimits.NO_LIMIT;
         int cqlPerPartitionLimit = DataLimits.NO_LIMIT;
@@ -756,12 +865,12 @@
         return userLimit;
     }
 
-    private NavigableSet<Clustering<?>> getRequestedRows(QueryOptions options) throws InvalidRequestException
+    private NavigableSet<Clustering<?>> getRequestedRows(QueryOptions options, ClientState state) throws InvalidRequestException
     {
         // Note: getRequestedColumns don't handle static columns, but due to CASSANDRA-5762
         // we always do a slice for CQL3 tables, so it's ok to ignore them here
         assert !restrictions.isColumnRange();
-        return restrictions.getClusteringColumns(options);
+        return restrictions.getClusteringColumns(options, state);
     }
 
     /**
@@ -777,7 +886,8 @@
                               QueryOptions options,
                               Selectors selectors,
                               int nowInSec,
-                              int userLimit) throws InvalidRequestException
+                              int userLimit,
+                              AggregationSpecification aggregationSpec) throws InvalidRequestException
     {
         GroupMaker groupMaker = aggregationSpec == null ? null : aggregationSpec.newGroupMaker();
         ResultSetBuilder result = new ResultSetBuilder(getResultMetadata(), selectors, groupMaker);
@@ -791,6 +901,7 @@
         }
 
         ResultSet cqlRows = result.build();
+        maybeWarn(result, options);
 
         orderResults(cqlRows);
 
@@ -812,10 +923,60 @@
         }
     }
 
+    private void maybeWarn(ResultSetBuilder result, QueryOptions options)
+    {
+        if (!options.isReadThresholdsEnabled())
+            return;
+        ColumnFamilyStore store = cfs();
+        if (store != null)
+            store.metric.coordinatorReadSize.update(result.getSize());
+        if (result.shouldWarn(options.getCoordinatorReadSizeWarnThresholdBytes()))
+        {
+            String msg = String.format("Read on table %s has exceeded the size warning threshold of %,d bytes", table, options.getCoordinatorReadSizeWarnThresholdBytes());
+            ClientState state = ClientState.forInternalCalls();
+            ClientWarn.instance.warn(msg + " with " + loggableTokens(options, state));
+            logger.warn("{} with query {}", msg, asCQL(options, state));
+            if (store != null)
+                store.metric.coordinatorReadSizeWarnings.mark();
+        }
+    }
+
+    private void maybeFail(ResultSetBuilder result, QueryOptions options)
+    {
+        if (!options.isReadThresholdsEnabled())
+            return;
+        if (result.shouldReject(options.getCoordinatorReadSizeAbortThresholdBytes()))
+        {
+            String msg = String.format("Read on table %s has exceeded the size failure threshold of %,d bytes", table, options.getCoordinatorReadSizeAbortThresholdBytes());
+            ClientState state = ClientState.forInternalCalls();
+            String clientMsg = msg + " with " + loggableTokens(options, state);
+            ClientWarn.instance.warn(clientMsg);
+            logger.warn("{} with query {}", msg, asCQL(options, state));
+            ColumnFamilyStore store = cfs();
+            if (store != null)
+            {
+                store.metric.coordinatorReadSizeAborts.mark();
+                store.metric.coordinatorReadSize.update(result.getSize());
+            }
+            // read errors require blockFor and recieved (its in the protocol message), but this isn't known;
+            // to work around this, treat the coordinator as the only response we care about and mark it failed
+            ReadSizeAbortException exception = new ReadSizeAbortException(clientMsg, options.getConsistency(), 0, 1, true,
+                                                                          ImmutableMap.of(FBUtilities.getBroadcastAddressAndPort(), RequestFailureReason.READ_SIZE));
+            StorageProxy.recordReadRegularAbort(options.getConsistency(), exception);
+            throw exception;
+        }
+    }
+
+    private ColumnFamilyStore cfs()
+    {
+        return Schema.instance.getColumnFamilyStoreInstance(table.id);
+    }
+
     // Used by ModificationStatement for CAS operations
-    void processPartition(RowIterator partition, QueryOptions options, ResultSetBuilder result, int nowInSec)
+    public void processPartition(RowIterator partition, QueryOptions options, ResultSetBuilder result, int nowInSec)
     throws InvalidRequestException
     {
+        maybeFail(result, options);
         ProtocolVersion protocolVersion = options.getProtocolVersion();
 
         ByteBuffer[] keyComponents = getComponents(table, partition.partitionKey());
@@ -827,6 +988,7 @@
             if (!staticRow.isEmpty() && restrictions.returnStaticContentOnPartitionWithNoRows())
             {
                 result.newRow(partition.partitionKey(), staticRow.clustering());
+                maybeFail(result, options);
                 for (ColumnMetadata def : selection.getColumns())
                 {
                     switch (def.kind)
@@ -849,6 +1011,13 @@
         {
             Row row = partition.next();
             result.newRow( partition.partitionKey(), row.clustering());
+
+            // reads aren't failed as soon the size exceeds the failure threshold, they're failed once the failure
+            // threshold has been exceeded and we start adding more data. We're slightly more permissive to avoid
+            // cases where a row can never be read. Since we only warn/fail after entire rows are read, this will
+            // still allow the entire dataset to be read with LIMIT 1 queries, even if every row is oversized
+            maybeFail(result, options);
+
             // Respect selection order
             for (ColumnMetadata def : selection.getColumns())
             {
@@ -914,6 +1083,7 @@
         public final WhereClause whereClause;
         public final Term.Raw limit;
         public final Term.Raw perPartitionLimit;
+        private ClientState state;
 
         public RawStatement(QualifiedName cfName,
                             Parameters parameters,
@@ -932,6 +1102,8 @@
 
         public SelectStatement prepare(ClientState state)
         {
+            // Cache locally for use by Guardrails
+            this.state = state;
             return prepare(false);
         }
 
@@ -962,12 +1134,14 @@
                 validateDistinctSelection(table, selection, restrictions);
             }
 
-            AggregationSpecification aggregationSpec = getAggregationSpecification(table,
-                                                                                   selection,
-                                                                                   restrictions,
-                                                                                   parameters.isDistinct);
+            AggregationSpecification.Factory aggregationSpecFactory = getAggregationSpecFactory(table,
+                                                                                                bindVariables,
+                                                                                                selection,
+                                                                                                restrictions,
+                                                                                                parameters.isDistinct);
 
-            checkFalse(aggregationSpec == AggregationSpecification.AGGREGATE_EVERYTHING && perPartitionLimit != null,
+            checkFalse(aggregationSpecFactory == AggregationSpecification.AGGREGATE_EVERYTHING_FACTORY
+                       && perPartitionLimit != null,
                        "PER PARTITION LIMIT is not allowed with aggregate queries.");
 
             Comparator<List<ByteBuffer>> orderingComparator = null;
@@ -991,7 +1165,7 @@
                                        selection,
                                        restrictions,
                                        isReversed,
-                                       aggregationSpec,
+                                       aggregationSpecFactory,
                                        orderingComparator,
                                        prepareLimit(bindVariables, limit, keyspace(), limitReceiver()),
                                        prepareLimit(bindVariables, perPartitionLimit, keyspace(), perPartitionLimitReceiver()));
@@ -1005,6 +1179,9 @@
         {
             boolean hasGroupBy = !parameters.groups.isEmpty();
 
+            if (hasGroupBy)
+                Guardrails.groupByEnabled.ensureEnabled(state);
+
             if (selectables.isEmpty()) // wildcard query
             {
                 return hasGroupBy ? Selection.wildcardWithGroupBy(table, boundNames, parameters.isJson, restrictions.returnStaticContentOnPartitionWithNoRows())
@@ -1125,32 +1302,56 @@
         }
 
         /**
-         * Creates the <code>AggregationSpecification</code>s used to make the aggregates.
+         * Creates the {@code AggregationSpecification.Factory} used to make the aggregates.
          *
          * @param metadata the table metadata
          * @param selection the selection
          * @param restrictions the restrictions
-         * @param isDistinct <code>true</code> if the query is a DISTINCT one.
-         * @return the <code>AggregationSpecification</code>s used to make the aggregates
+         * @param isDistinct <code>true</code> if the query is a DISTINCT one. 
+         * @return the {@code AggregationSpecification.Factory} used to make the aggregates
          */
-        private AggregationSpecification getAggregationSpecification(TableMetadata metadata,
-                                                                     Selection selection,
-                                                                     StatementRestrictions restrictions,
-                                                                     boolean isDistinct)
+        private AggregationSpecification.Factory getAggregationSpecFactory(TableMetadata metadata,
+                                                                           VariableSpecifications boundNames,
+                                                                           Selection selection,
+                                                                           StatementRestrictions restrictions,
+                                                                           boolean isDistinct)
         {
             if (parameters.groups.isEmpty())
-                return selection.isAggregate() ? AggregationSpecification.AGGREGATE_EVERYTHING
+                return selection.isAggregate() ? AggregationSpecification.AGGREGATE_EVERYTHING_FACTORY
                                                : null;
 
             int clusteringPrefixSize = 0;
 
             Iterator<ColumnMetadata> pkColumns = metadata.primaryKeyColumns().iterator();
-            for (ColumnIdentifier id : parameters.groups)
+            Selector.Factory selectorFactory = null;
+            for (Selectable.Raw raw : parameters.groups)
             {
-                ColumnMetadata def = metadata.getExistingColumn(id);
+                Selectable selectable = raw.prepare(metadata);
+                ColumnMetadata def = null;
 
-                checkTrue(def.isPartitionKey() || def.isClusteringColumn(),
-                          "Group by is currently only supported on the columns of the PRIMARY KEY, got %s", def.name);
+                // For GROUP BY we only allow column names or functions at the higher level.
+                if (selectable instanceof WithFunction)
+                {
+                    WithFunction withFunction = (WithFunction) selectable;
+                    validateGroupByFunction(withFunction);
+                    List<ColumnMetadata> columns = new ArrayList<ColumnMetadata>();
+                    selectorFactory = selectable.newSelectorFactory(metadata, null, columns, boundNames);
+                    checkFalse(columns.isEmpty(), "GROUP BY functions must have one clustering column name as parameter");
+                    if (columns.size() > 1)
+                        throw invalidRequest("GROUP BY functions accept only one clustering column as parameter, got: %s",
+                                             columns.stream().map(c -> c.name.toCQLString()).collect(Collectors.joining(",")));
+
+                    def = columns.get(0);
+                    checkTrue(def.isClusteringColumn(),
+                              "Group by functions are only supported on clustering columns, got %s", def.name);
+                }
+                else
+                {
+                    def = (ColumnMetadata) selectable;
+                    checkTrue(def.isPartitionKey() || def.isClusteringColumn(),
+                              "Group by is currently only supported on the columns of the PRIMARY KEY, got %s", def.name);
+                    checkNull(selectorFactory, "Functions are only supported on the last element of the GROUP BY clause");
+                }
 
                 while (true)
                 {
@@ -1178,7 +1379,22 @@
             checkFalse(clusteringPrefixSize > 0 && isDistinct,
                        "Grouping on clustering columns is not allowed for SELECT DISTINCT queries");
 
-            return AggregationSpecification.aggregatePkPrefix(metadata.comparator, clusteringPrefixSize);
+            return selectorFactory == null ? AggregationSpecification.aggregatePkPrefixFactory(metadata.comparator, clusteringPrefixSize)
+                                           : AggregationSpecification.aggregatePkPrefixFactoryWithSelector(metadata.comparator,
+                                                                                                           clusteringPrefixSize,
+                                                                                                           selectorFactory);
+        }
+
+        /**
+         * Checks that the function used is a valid one for the GROUP BY clause.
+         *
+         * @param withFunction the {@code Selectable} from which the function must be retrieved.
+         * @return the monotonic scalar function that must be used for determining the groups.
+         */
+        private void validateGroupByFunction(WithFunction withFunction)
+        {
+            Function f = withFunction.function;
+            checkFalse(f.isAggregate(), "Aggregate functions are not supported within the GROUP BY clause, got: %s", f.name());
         }
 
         private Comparator<List<ByteBuffer>> getOrderingComparator(Selection selection,
@@ -1280,13 +1496,13 @@
     {
         // Public because CASSANDRA-9858
         public final Map<ColumnIdentifier, Boolean> orderings;
-        public final List<ColumnIdentifier> groups;
+        public final List<Selectable.Raw> groups;
         public final boolean isDistinct;
         public final boolean allowFiltering;
         public final boolean isJson;
 
         public Parameters(Map<ColumnIdentifier, Boolean> orderings,
-                          List<ColumnIdentifier> groups,
+                          List<Selectable.Raw> groups,
                           boolean isDistinct,
                           boolean allowFiltering,
                           boolean isJson)
@@ -1366,4 +1582,131 @@
     {
         return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
+
+    private String loggableTokens(QueryOptions options, ClientState state)
+    {
+        if (restrictions.isKeyRange() || restrictions.usesSecondaryIndexing())
+        {
+            AbstractBounds<PartitionPosition> bounds = restrictions.getPartitionKeyBounds(options);
+            return "token range: " + (bounds.inclusiveLeft() ? '[' : '(') +
+                   bounds.left.getToken().toString() + ", " +
+                   bounds.right.getToken().toString() +
+                   (bounds.inclusiveRight() ? ']' : ')');
+        }
+        else
+        {
+            Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options, state);
+            if (keys.size() == 1)
+            {
+                return "token: " + table.partitioner.getToken(Iterables.getOnlyElement(keys)).toString();
+            }
+            else
+            {
+                StringBuilder sb = new StringBuilder("tokens: [");
+                boolean isFirst = true;
+                for (ByteBuffer key : keys)
+                {
+                    if (!isFirst) sb.append(", ");
+                    sb.append(table.partitioner.getToken(key).toString());
+                    isFirst = false;
+                }
+                return sb.append(']').toString();
+            }
+        }
+    }
+
+    private String asCQL(QueryOptions options, ClientState state)
+    {
+        ColumnFilter columnFilter = selection.newSelectors(options).getColumnFilter();
+        StringBuilder sb = new StringBuilder();
+
+        sb.append("SELECT ").append(queriedColumns().toCQLString());
+        sb.append(" FROM ").append(table.keyspace).append('.').append(table.name);
+        if (restrictions.isKeyRange() || restrictions.usesSecondaryIndexing())
+        {
+            // partition range
+            ClusteringIndexFilter clusteringIndexFilter = makeClusteringIndexFilter(options, state, columnFilter);
+            if (clusteringIndexFilter == null)
+                return "EMPTY";
+
+            RowFilter rowFilter = getRowFilter(options);
+
+            // The LIMIT provided by the user is the number of CQL row he wants returned.
+            // We want to have getRangeSlice to count the number of columns, not the number of keys.
+            AbstractBounds<PartitionPosition> keyBounds = restrictions.getPartitionKeyBounds(options);
+            if (keyBounds == null)
+                return "EMPTY";
+
+            DataRange dataRange = new DataRange(keyBounds, clusteringIndexFilter);
+
+            if (!dataRange.isUnrestricted(table) || !rowFilter.isEmpty())
+            {
+                sb.append(" WHERE ");
+                // We put the row filter first because the data range can end by "ORDER BY"
+                if (!rowFilter.isEmpty())
+                {
+                    sb.append(rowFilter);
+                    if (!dataRange.isUnrestricted(table))
+                        sb.append(" AND ");
+                }
+                if (!dataRange.isUnrestricted(table))
+                    sb.append(dataRange.toCQLString(table, rowFilter));
+            }
+        }
+        else
+        {
+            // single partition
+            Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options, state);
+            if (keys.isEmpty())
+                return "EMPTY";
+            ClusteringIndexFilter filter = makeClusteringIndexFilter(options, state, columnFilter);
+            if (filter == null)
+                return "EMPTY";
+
+            sb.append(" WHERE ");
+
+
+            boolean compoundPk = table.partitionKeyColumns().size() > 1;
+            if (compoundPk) sb.append('(');
+            sb.append(ColumnMetadata.toCQLString(table.partitionKeyColumns()));
+            if (compoundPk) sb.append(')');
+            if (keys.size() == 1)
+            {
+                sb.append(" = ");
+                if (compoundPk) sb.append('(');
+                DataRange.appendKeyString(sb, table.partitionKeyType, Iterables.getOnlyElement(keys));
+                if (compoundPk) sb.append(')');
+            }
+            else
+            {
+                sb.append(" IN (");
+                boolean first = true;
+                for (ByteBuffer key : keys)
+                {
+                    if (!first)
+                        sb.append(", ");
+
+                    if (compoundPk) sb.append('(');
+                    DataRange.appendKeyString(sb, table.partitionKeyType, key);
+                    if (compoundPk) sb.append(')');
+                    first = false;
+                }
+
+                sb.append(')');
+            }
+
+            RowFilter rowFilter = getRowFilter(options);
+            if (!rowFilter.isEmpty())
+                sb.append(" AND ").append(rowFilter);
+
+            String filterString = filter.toCQLString(table, rowFilter);
+            if (!filterString.isEmpty())
+                sb.append(" AND ").append(filterString);
+        }
+
+        DataLimits limits = getDataLimits(getLimit(options), getPerPartitionLimit(options), options.getPageSize(), getAggregationSpec(options));
+        if (limits != DataLimits.NONE)
+            sb.append(' ').append(limits);
+        return sb.toString();
+    }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/SingleTableUpdatesCollector.java b/src/java/org/apache/cassandra/cql3/statements/SingleTableUpdatesCollector.java
index 6dc2d41..14b1660 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SingleTableUpdatesCollector.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SingleTableUpdatesCollector.java
@@ -31,8 +31,10 @@
 import org.apache.cassandra.db.IMutation;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.commitlog.CommitLogSegment;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.virtual.VirtualMutation;
+import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.TableMetadata;
 
 /**
@@ -105,6 +107,7 @@
                 mutation = new Mutation(builder.build());
 
             mutation.validateIndexedColumns();
+            mutation.validateSize(MessagingService.current_version, CommitLogSegment.ENTRY_OVERHEAD_SIZE);
             ms.add(mutation);
         }
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/StatementType.java b/src/java/org/apache/cassandra/cql3/statements/StatementType.java
index d399931..f9c0d3d 100644
--- a/src/java/org/apache/cassandra/cql3/statements/StatementType.java
+++ b/src/java/org/apache/cassandra/cql3/statements/StatementType.java
@@ -135,4 +135,4 @@
     {
         return false;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java b/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java
index 206d116..1d01eaa 100644
--- a/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java
@@ -25,8 +25,11 @@
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.guardrails.Guardrails;
+import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
@@ -55,6 +58,7 @@
     public void validate(ClientState state) throws InvalidRequestException
     {
         Schema.instance.validateTable(keyspace(), name());
+        Guardrails.dropTruncateTableEnabled.ensureEnabled(state);
     }
 
     public ResultMessage execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws InvalidRequestException, TruncateException
@@ -66,9 +70,13 @@
                 throw new InvalidRequestException("Cannot TRUNCATE materialized view directly; must truncate base table instead");
 
             if (metaData.isVirtual())
-                throw new InvalidRequestException("Cannot truncate virtual tables");
-
-            StorageProxy.truncateBlocking(keyspace(), name());
+            {
+                executeForVirtualTable(metaData.id);
+            }
+            else
+            {
+                StorageProxy.truncateBlocking(keyspace(), name());
+            }
         }
         catch (UnavailableException | TimeoutException e)
         {
@@ -86,10 +94,14 @@
                 throw new InvalidRequestException("Cannot TRUNCATE materialized view directly; must truncate base table instead");
 
             if (metaData.isVirtual())
-                throw new InvalidRequestException("Cannot truncate virtual tables");
-
-            ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(name());
-            cfs.truncateBlocking();
+            {
+                executeForVirtualTable(metaData.id);
+            }
+            else
+            {
+                ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(name());
+                cfs.truncateBlocking();
+            }
         }
         catch (Exception e)
         {
@@ -97,7 +109,12 @@
         }
         return null;
     }
-    
+
+    private void executeForVirtualTable(TableId id)
+    {
+        VirtualKeyspaceRegistry.instance.getTableNullable(id).truncate();
+    }
+
     @Override
     public String toString()
     {
diff --git a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
index f67db14..20df151 100644
--- a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
@@ -168,7 +168,7 @@
                 }
                 else
                 {
-                    Operation operation = new Operation.SetValue(value).prepare(metadata, def);
+                    Operation operation = new Operation.SetValue(value).prepare(metadata, def, !conditions.isEmpty());
                     operation.collectMarkerSpecification(bindVariables);
                     operations.add(operation);
                 }
@@ -236,7 +236,7 @@
                 }
                 else
                 {
-                    Operation operation = new Operation.SetValue(raw).prepare(metadata, def);
+                    Operation operation = new Operation.SetValue(raw).prepare(metadata, def, !conditions.isEmpty());
                     operation.collectMarkerSpecification(bindVariables);
                     operations.add(operation);
                 }
@@ -304,7 +304,7 @@
 
                 checkFalse(def.isPrimaryKeyColumn(), "PRIMARY KEY part %s found in SET part", def.name);
 
-                Operation operation = entry.right.prepare(metadata, def);
+                Operation operation = entry.right.prepare(metadata, def, !conditions.isEmpty());
                 operation.collectMarkerSpecification(bindVariables);
                 operations.add(operation);
             }
@@ -334,6 +334,6 @@
     @Override
     public AuditLogContext getAuditLogContext()
     {
-        return new AuditLogContext(AuditLogEntryType.UPDATE, keyspace(), columnFamily());
+        return new AuditLogContext(AuditLogEntryType.UPDATE, keyspace(), table());
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/UseStatement.java b/src/java/org/apache/cassandra/cql3/statements/UseStatement.java
index 0504f43..c7f0c24 100644
--- a/src/java/org/apache/cassandra/cql3/statements/UseStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/UseStatement.java
@@ -19,8 +19,10 @@
 
 import org.apache.cassandra.audit.AuditLogContext;
 import org.apache.cassandra.audit.AuditLogEntryType;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.UnauthorizedException;
 import org.apache.cassandra.transport.messages.ResultMessage;
@@ -29,6 +31,9 @@
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
 
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class UseStatement extends CQLStatement.Raw implements CQLStatement
 {
     private final String keyspace;
@@ -48,12 +53,15 @@
         state.validateLogin();
     }
 
+    @Override
     public void validate(ClientState state) throws InvalidRequestException
     {
+        checkTrue(DatabaseDescriptor.getUseStatementsEnabled(), "USE statements prohibited. (see use_statements_enabled in cassandra.yaml)");
     }
 
     public ResultMessage execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws InvalidRequestException
     {
+        QueryProcessor.metrics.useStatementsExecuted.inc();
         state.getClientState().setKeyspace(keyspace);
         return new ResultMessage.SetKeyspace(keyspace);
     }
@@ -62,7 +70,7 @@
     {
         // In production, internal queries are exclusively on the system keyspace and 'use' is thus useless
         // but for some unit tests we need to set the keyspace (e.g. for tests with DROP INDEX)
-        return execute(state, options, System.nanoTime());
+        return execute(state, options, nanoTime());
     }
     
     @Override
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
index f208e4a..87377d7 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
@@ -53,11 +53,13 @@
     private final HashSet<String> clientWarnings = new HashSet<>();
 
     private final KeyspaceAttributes attrs;
+    private final boolean ifExists;
 
-    public AlterKeyspaceStatement(String keyspaceName, KeyspaceAttributes attrs)
+    public AlterKeyspaceStatement(String keyspaceName, KeyspaceAttributes attrs, boolean ifExists)
     {
         super(keyspaceName);
         this.attrs = attrs;
+        this.ifExists = ifExists;
     }
 
     public Keyspaces apply(Keyspaces schema)
@@ -66,14 +68,18 @@
 
         KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
         if (null == keyspace)
-            throw ire("Keyspace '%s' doesn't exist", keyspaceName);
+        {
+            if (!ifExists)
+                throw ire("Keyspace '%s' doesn't exist", keyspaceName);
+            return schema;
+        }
 
         KeyspaceMetadata newKeyspace = keyspace.withSwapped(attrs.asAlteredKeyspaceParams(keyspace.params));
 
         if (newKeyspace.params.replication.klass.equals(LocalStrategy.class))
             throw ire("Unable to use given strategy class: LocalStrategy is reserved for internal use.");
 
-        newKeyspace.params.validate(keyspaceName);
+        newKeyspace.params.validate(keyspaceName, state);
 
         validateNoRangeMovements();
         validateTransientReplication(keyspace.createReplicationStrategy(), newKeyspace.createReplicationStrategy());
@@ -195,16 +201,18 @@
     {
         private final String keyspaceName;
         private final KeyspaceAttributes attrs;
+        private final boolean ifExists;
 
-        public Raw(String keyspaceName, KeyspaceAttributes attrs)
+        public Raw(String keyspaceName, KeyspaceAttributes attrs, boolean ifExists)
         {
             this.keyspaceName = keyspaceName;
             this.attrs = attrs;
+            this.ifExists = ifExists;
         }
 
         public AlterKeyspaceStatement prepare(ClientState state)
         {
-            return new AlterKeyspaceStatement(keyspaceName, attrs);
+            return new AlterKeyspaceStatement(keyspaceName, attrs, ifExists);
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterSchemaStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterSchemaStatement.java
index 124d04c..fdc4921 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterSchemaStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterSchemaStatement.java
@@ -38,15 +38,19 @@
 abstract public class AlterSchemaStatement implements CQLStatement.SingleKeyspaceCqlStatement, SchemaTransformation
 {
     protected final String keyspaceName; // name of the keyspace affected by the statement
+    protected ClientState state;
 
     protected AlterSchemaStatement(String keyspaceName)
     {
         this.keyspaceName = keyspaceName;
     }
 
-    public final void validate(ClientState state)
+    public void validate(ClientState state)
     {
-        // no-op; validation is performed while executing the statement, in apply()
+        // validation is performed while executing the statement, in apply()
+
+        // Cache our ClientState for use by guardrails
+        this.state = state;
     }
 
     public ResultMessage execute(QueryState state, QueryOptions options, long queryStartNanoTime)
@@ -105,11 +109,11 @@
 
         validateKeyspaceName();
 
-        KeyspacesDiff diff = MigrationManager.announce(this, locally);
+        SchemaTransformationResult result = Schema.instance.transform(this, locally);
 
-        clientWarnings(diff).forEach(ClientWarn.instance::warn);
+        clientWarnings(result.diff).forEach(ClientWarn.instance::warn);
 
-        if (diff.isEmpty())
+        if (result.diff.isEmpty())
             return new ResultMessage.Void();
 
         /*
@@ -121,9 +125,9 @@
          */
         AuthenticatedUser user = state.getClientState().getUser();
         if (null != user && !user.isAnonymous())
-            createdResources(diff).forEach(r -> grantPermissionsOnResource(r, user));
+            createdResources(result.diff).forEach(r -> grantPermissionsOnResource(r, user));
 
-        return new ResultMessage.SchemaChange(schemaChangeEvent(diff));
+        return new ResultMessage.SchemaChange(schemaChangeEvent(result.diff));
     }
 
     private void validateKeyspaceName()
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
index f361b64..08c5f04 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.cql3.statements.schema;
 
-import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -42,6 +41,7 @@
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.QualifiedName;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.AbstractType;
 
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -77,14 +77,16 @@
 public abstract class AlterTableStatement extends AlterSchemaStatement
 {
     protected final String tableName;
+    private final boolean ifExists;
 
-    public AlterTableStatement(String keyspaceName, String tableName)
+    public AlterTableStatement(String keyspaceName, String tableName, boolean ifExists)
     {
         super(keyspaceName);
         this.tableName = tableName;
+        this.ifExists = ifExists;
     }
 
-    public Keyspaces apply(Keyspaces schema) throws UnknownHostException
+    public Keyspaces apply(Keyspaces schema)
     {
         KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
 
@@ -93,7 +95,11 @@
                             : keyspace.getTableOrViewNullable(tableName);
 
         if (null == table)
-            throw ire("Table '%s.%s' doesn't exist", keyspaceName, tableName);
+        {
+            if (!ifExists)
+                throw ire("Table '%s.%s' doesn't exist", keyspaceName, tableName);
+            return schema;
+        }
 
         if (table.isView())
             throw ire("Cannot use ALTER TABLE on a materialized view; use ALTER MATERIALIZED VIEW instead");
@@ -122,18 +128,18 @@
         return format("%s (%s, %s)", getClass().getSimpleName(), keyspaceName, tableName);
     }
 
-    abstract KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table) throws UnknownHostException;
+    abstract KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table);
 
     /**
-     * ALTER TABLE <table> ALTER <column> TYPE <newtype>;
+     * ALTER TABLE [IF EXISTS] <table> ALTER <column> TYPE <newtype>;
      *
      * No longer supported.
      */
     public static class AlterColumn extends AlterTableStatement
     {
-        AlterColumn(String keyspaceName, String tableName)
+        AlterColumn(String keyspaceName, String tableName, boolean ifTableExists)
         {
-            super(keyspaceName, tableName);
+            super(keyspaceName, tableName, ifTableExists);
         }
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
@@ -143,8 +149,8 @@
     }
 
     /**
-     * ALTER TABLE <table> ADD <column> <newtype>
-     * ALTER TABLE <table> ADD (<column> <newtype>, <column1> <newtype1>, ... <columnn> <newtypen>)
+     * ALTER TABLE [IF EXISTS] <table> ADD [IF NOT EXISTS] <column> <newtype>
+     * ALTER TABLE [IF EXISTS] <table> ADD [IF NOT EXISTS] (<column> <newtype>, <column1> <newtype1>, ... <columnn> <newtypen>)
      */
     private static class AddColumns extends AlterTableStatement
     {
@@ -163,18 +169,29 @@
         }
 
         private final Collection<Column> newColumns;
+        private final boolean ifColumnNotExists;
 
-        private AddColumns(String keyspaceName, String tableName, Collection<Column> newColumns)
+        private AddColumns(String keyspaceName, String tableName, Collection<Column> newColumns, boolean ifTableExists, boolean ifColumnNotExists)
         {
-            super(keyspaceName, tableName);
+            super(keyspaceName, tableName, ifTableExists);
             this.newColumns = newColumns;
+            this.ifColumnNotExists = ifColumnNotExists;
+        }
+
+        @Override
+        public void validate(ClientState state)
+        {
+            super.validate(state);
         }
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
             TableMetadata.Builder tableBuilder = table.unbuild();
             Views.Builder viewsBuilder = keyspace.views.unbuild();
-            newColumns.forEach(c -> addColumn(keyspace, table, c, tableBuilder, viewsBuilder));
+            newColumns.forEach(c -> addColumn(keyspace, table, c, ifColumnNotExists, tableBuilder, viewsBuilder));
+
+            Guardrails.columnsPerTable.guard(tableBuilder.numColumns(), tableName, false, state);
+
             TableMetadata tableMetadata = tableBuilder.build();
             tableMetadata.validate();
 
@@ -185,6 +202,7 @@
         private void addColumn(KeyspaceMetadata keyspace,
                                TableMetadata table,
                                Column column,
+                               boolean ifColumnNotExists,
                                TableMetadata.Builder tableBuilder,
                                Views.Builder viewsBuilder)
         {
@@ -192,8 +210,11 @@
             AbstractType<?> type = column.type.prepare(keyspaceName, keyspace.types).getType();
             boolean isStatic = column.isStatic;
 
-            if (null != tableBuilder.getColumn(name))
-                throw ire("Column with name '%s' already exists", name);
+            if (null != tableBuilder.getColumn(name)) {
+                if (!ifColumnNotExists)
+                    throw ire("Column with name '%s' already exists", name);
+                return;
+            }
 
             if (table.isCompactTable())
                 throw ire("Cannot add new column to a COMPACT STORAGE table");
@@ -206,7 +227,7 @@
             {
                 // After #8099, not safe to re-add columns of incompatible types - until *maybe* deser logic with dropped
                 // columns is pushed deeper down the line. The latter would still be problematic in cases of schema races.
-                if (!type.isValueCompatibleWith(droppedColumn.type))
+                if (!type.isSerializationCompatibleWith(droppedColumn.type))
                 {
                     throw ire("Cannot re-add previously dropped column '%s' of type %s, incompatible with previous type %s",
                               name,
@@ -247,34 +268,39 @@
     }
 
     /**
-     * ALTER TABLE <table> DROP <column>
-     * ALTER TABLE <table> DROP ( <column>, <column1>, ... <columnn>)
+     * ALTER TABLE [IF EXISTS] <table> DROP [IF EXISTS] <column>
+     * ALTER TABLE [IF EXISTS] <table> DROP [IF EXISTS] ( <column>, <column1>, ... <columnn>)
      */
     // TODO: swap UDT refs with expanded tuples on drop
     private static class DropColumns extends AlterTableStatement
     {
         private final Set<ColumnIdentifier> removedColumns;
+        private final boolean ifColumnExists;
         private final Long timestamp;
 
-        private DropColumns(String keyspaceName, String tableName, Set<ColumnIdentifier> removedColumns, Long timestamp)
+        private DropColumns(String keyspaceName, String tableName, Set<ColumnIdentifier> removedColumns, boolean ifTableExists, boolean ifColumnExists, Long timestamp)
         {
-            super(keyspaceName, tableName);
+            super(keyspaceName, tableName, ifTableExists);
             this.removedColumns = removedColumns;
+            this.ifColumnExists = ifColumnExists;
             this.timestamp = timestamp;
         }
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
             TableMetadata.Builder builder = table.unbuild();
-            removedColumns.forEach(c -> dropColumn(keyspace, table, c, builder));
+            removedColumns.forEach(c -> dropColumn(keyspace, table, c, ifColumnExists, builder));
             return keyspace.withSwapped(keyspace.tables.withSwapped(builder.build()));
         }
 
-        private void dropColumn(KeyspaceMetadata keyspace, TableMetadata table, ColumnIdentifier column, TableMetadata.Builder builder)
+        private void dropColumn(KeyspaceMetadata keyspace, TableMetadata table, ColumnIdentifier column, boolean ifExists, TableMetadata.Builder builder)
         {
             ColumnMetadata currentColumn = table.getColumn(column);
-            if (null == currentColumn)
-                throw ire("Column %s was not found in table '%s'", column, table);
+            if (null == currentColumn) {
+                if (!ifExists)
+                    throw ire("Column %s was not found in table '%s'", column, table);
+                return;
+            }
 
             if (currentColumn.isPrimaryKeyColumn())
                 throw ire("Cannot drop PRIMARY KEY column %s", column);
@@ -313,23 +339,25 @@
     }
 
     /**
-     * ALTER TABLE <table> RENAME <column> TO <column>;
+     * ALTER TABLE [IF EXISTS] <table> RENAME [IF EXISTS] <column> TO <column>;
      */
     private static class RenameColumns extends AlterTableStatement
     {
         private final Map<ColumnIdentifier, ColumnIdentifier> renamedColumns;
+        private final boolean ifColumnsExists;
 
-        private RenameColumns(String keyspaceName, String tableName, Map<ColumnIdentifier, ColumnIdentifier> renamedColumns)
+        private RenameColumns(String keyspaceName, String tableName, Map<ColumnIdentifier, ColumnIdentifier> renamedColumns, boolean ifTableExists, boolean ifColumnsExists)
         {
-            super(keyspaceName, tableName);
+            super(keyspaceName, tableName, ifTableExists);
             this.renamedColumns = renamedColumns;
+            this.ifColumnsExists = ifColumnsExists;
         }
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
             TableMetadata.Builder tableBuilder = table.unbuild();
             Views.Builder viewsBuilder = keyspace.views.unbuild();
-            renamedColumns.forEach((o, n) -> renameColumn(keyspace, table, o, n, tableBuilder, viewsBuilder));
+            renamedColumns.forEach((o, n) -> renameColumn(keyspace, table, o, n, ifColumnsExists, tableBuilder, viewsBuilder));
 
             return keyspace.withSwapped(keyspace.tables.withSwapped(tableBuilder.build()))
                            .withSwapped(viewsBuilder.build());
@@ -339,12 +367,17 @@
                                   TableMetadata table,
                                   ColumnIdentifier oldName,
                                   ColumnIdentifier newName,
+                                  boolean ifColumnsExists,
                                   TableMetadata.Builder tableBuilder,
                                   Views.Builder viewsBuilder)
         {
             ColumnMetadata column = table.getExistingColumn(oldName);
             if (null == column)
-                throw ire("Column %s was not found in table %s", oldName, table);
+            {
+                if (!ifColumnsExists)
+                    throw ire("Column %s was not found in table %s", oldName, table);
+                return;
+            }
 
             if (!column.isPrimaryKeyColumn())
                 throw ire("Cannot rename non PRIMARY KEY column %s", oldName);
@@ -379,18 +412,26 @@
     }
 
     /**
-     * ALTER TABLE <table> WITH <property> = <value>
+     * ALTER TABLE [IF EXISTS] <table> WITH <property> = <value>
      */
     private static class AlterOptions extends AlterTableStatement
     {
         private final TableAttributes attrs;
 
-        private AlterOptions(String keyspaceName, String tableName, TableAttributes attrs)
+        private AlterOptions(String keyspaceName, String tableName, TableAttributes attrs, boolean ifTableExists)
         {
-            super(keyspaceName, tableName);
+            super(keyspaceName, tableName, ifTableExists);
             this.attrs = attrs;
         }
 
+        @Override
+        public void validate(ClientState state)
+        {
+            super.validate(state);
+
+            Guardrails.tableProperties.guard(attrs.updatedProperties(), attrs::removeProperty, state);
+        }
+
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
             attrs.validate();
@@ -415,21 +456,24 @@
                 throw ire("read_repair must be set to 'NONE' for transiently replicated keyspaces");
             }
 
+            if (!params.compression.isEnabled())
+                Guardrails.uncompressedTablesEnabled.ensureEnabled(state);
+
             return keyspace.withSwapped(keyspace.tables.withSwapped(table.withSwapped(params)));
         }
     }
 
 
     /**
-     * ALTER TABLE <table> DROP COMPACT STORAGE
+     * ALTER TABLE [IF EXISTS] <table> DROP COMPACT STORAGE
      */
     private static class DropCompactStorage extends AlterTableStatement
     {
         private static final Logger logger = LoggerFactory.getLogger(AlterTableStatement.class);
         private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(logger, 5L, TimeUnit.MINUTES);
-        private DropCompactStorage(String keyspaceName, String tableName)
+        private DropCompactStorage(String keyspaceName, String tableName, boolean ifTableExists)
         {
-            super(keyspaceName, tableName);
+            super(keyspaceName, tableName, ifTableExists);
         }
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
@@ -527,6 +571,9 @@
         }
 
         private final QualifiedName name;
+        private final boolean ifTableExists;
+        private boolean ifColumnExists;
+        private boolean ifColumnNotExists;
 
         private Kind kind;
 
@@ -543,9 +590,10 @@
         // OPTIONS
         public final TableAttributes attrs = new TableAttributes();
 
-        public Raw(QualifiedName name)
+        public Raw(QualifiedName name, boolean ifTableExists)
         {
             this.name = name;
+            this.ifTableExists = ifTableExists;
         }
 
         public AlterTableStatement prepare(ClientState state)
@@ -555,12 +603,12 @@
 
             switch (kind)
             {
-                case          ALTER_COLUMN: return new AlterColumn(keyspaceName, tableName);
-                case           ADD_COLUMNS: return new AddColumns(keyspaceName, tableName, addedColumns);
-                case          DROP_COLUMNS: return new DropColumns(keyspaceName, tableName, droppedColumns, timestamp);
-                case        RENAME_COLUMNS: return new RenameColumns(keyspaceName, tableName, renamedColumns);
-                case         ALTER_OPTIONS: return new AlterOptions(keyspaceName, tableName, attrs);
-                case  DROP_COMPACT_STORAGE: return new DropCompactStorage(keyspaceName, tableName);
+                case          ALTER_COLUMN: return new AlterColumn(keyspaceName, tableName, ifTableExists);
+                case           ADD_COLUMNS: return new AddColumns(keyspaceName, tableName, addedColumns, ifTableExists, ifColumnNotExists);
+                case          DROP_COLUMNS: return new DropColumns(keyspaceName, tableName, droppedColumns, ifTableExists, ifColumnExists, timestamp);
+                case        RENAME_COLUMNS: return new RenameColumns(keyspaceName, tableName, renamedColumns, ifTableExists, ifColumnExists);
+                case         ALTER_OPTIONS: return new AlterOptions(keyspaceName, tableName, attrs, ifTableExists);
+                case  DROP_COMPACT_STORAGE: return new DropCompactStorage(keyspaceName, tableName, ifTableExists);
             }
 
             throw new AssertionError();
@@ -583,6 +631,16 @@
             droppedColumns.add(name);
         }
 
+        public void ifColumnNotExists(boolean ifNotExists)
+        {
+            ifColumnNotExists = ifNotExists;
+        }
+
+        public void ifColumnExists(boolean ifExists)
+        {
+            ifColumnExists = ifExists;
+        }
+
         public void dropCompactStorage()
         {
             kind = Kind.DROP_COMPACT_STORAGE;
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterTypeStatement.java
index a9887c4..9c3be11 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterTypeStatement.java
@@ -27,6 +27,7 @@
 import org.apache.cassandra.audit.AuditLogEntryType;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.schema.KeyspaceMetadata;
@@ -49,16 +50,18 @@
 public abstract class AlterTypeStatement extends AlterSchemaStatement
 {
     protected final String typeName;
+    protected final boolean ifExists;
 
-    public AlterTypeStatement(String keyspaceName, String typeName)
+    public AlterTypeStatement(String keyspaceName, String typeName, boolean ifExists)
     {
         super(keyspaceName);
+        this.ifExists = ifExists;
         this.typeName = typeName;
     }
 
     public void authorize(ClientState client)
     {
-        client.ensureKeyspacePermission(keyspaceName, Permission.ALTER);
+        client.ensureAllTablesPermission(keyspaceName, Permission.ALTER);
     }
 
     SchemaChange schemaChangeEvent(Keyspaces.KeyspacesDiff diff)
@@ -75,7 +78,11 @@
                       : keyspace.types.getNullable(bytes(typeName));
 
         if (null == type)
-            throw ire("Type %s.%s doesn't exist", keyspaceName, typeName);
+        {
+            if (!ifExists)
+                throw ire("Type %s.%s doesn't exist", keyspaceName, typeName);
+            return schema;
+        }
 
         return schema.withAddedOrUpdated(keyspace.withUpdatedUserType(apply(keyspace, type)));
     }
@@ -97,18 +104,35 @@
     {
         private final FieldIdentifier fieldName;
         private final CQL3Type.Raw type;
+        private final boolean ifFieldNotExists;
 
-        private AddField(String keyspaceName, String typeName, FieldIdentifier fieldName, CQL3Type.Raw type)
+        private ClientState state;
+
+        private AddField(String keyspaceName, String typeName, FieldIdentifier fieldName, CQL3Type.Raw type, boolean ifExists, boolean ifFieldNotExists)
         {
-            super(keyspaceName, typeName);
+            super(keyspaceName, typeName, ifExists);
             this.fieldName = fieldName;
+            this.ifFieldNotExists = ifFieldNotExists;
             this.type = type;
         }
 
+        @Override
+        public void validate(ClientState state)
+        {
+            super.validate(state);
+
+            // save the query state to use it for guardrails validation in #apply
+            this.state = state;
+        }
+
         UserType apply(KeyspaceMetadata keyspace, UserType userType)
         {
             if (userType.fieldPosition(fieldName) >= 0)
-                throw ire("Cannot add field %s to type %s: a field with name %s already exists", fieldName, userType.getCqlTypeName(), fieldName);
+            {
+                if (!ifFieldNotExists)
+                    throw ire("Cannot add field %s to type %s: a field with name %s already exists", fieldName, userType.getCqlTypeName(), fieldName);
+                return userType;
+            }
 
             AbstractType<?> fieldType = type.prepare(keyspaceName, keyspace.types).getType();
             if (fieldType.referencesUserType(userType.name))
@@ -122,6 +146,8 @@
                           String.join(", ", transform(tablesWithTypeInPartitionKey, TableMetadata::toString)));
             }
 
+            Guardrails.fieldsPerUDT.guard(userType.size() + 1, userType.getNameAsString(), false, state);
+
             List<FieldIdentifier> fieldNames = new ArrayList<>(userType.fieldNames()); fieldNames.add(fieldName);
             List<AbstractType<?>> fieldTypes = new ArrayList<>(userType.fieldTypes()); fieldTypes.add(fieldType);
 
@@ -141,10 +167,12 @@
     private static final class RenameFields extends AlterTypeStatement
     {
         private final Map<FieldIdentifier, FieldIdentifier> renamedFields;
+        private final boolean ifFieldExists;
 
-        private RenameFields(String keyspaceName, String typeName, Map<FieldIdentifier, FieldIdentifier> renamedFields)
+        private RenameFields(String keyspaceName, String typeName, Map<FieldIdentifier, FieldIdentifier> renamedFields, boolean ifExists, boolean ifFieldExists)
         {
-            super(keyspaceName, typeName);
+            super(keyspaceName, typeName, ifExists);
+            this.ifFieldExists = ifFieldExists;
             this.renamedFields = renamedFields;
         }
 
@@ -170,7 +198,11 @@
             {
                 int idx = userType.fieldPosition(oldName);
                 if (idx < 0)
-                    throw ire("Unkown field %s in user type %s", oldName, keyspaceName, userType.getCqlTypeName());
+                {
+                    if (!ifFieldExists)
+                        throw ire("Unkown field %s in user type %s", oldName, userType.getCqlTypeName());
+                    return;
+                }
                 fieldNames.set(idx, newName);
             });
 
@@ -186,9 +218,9 @@
 
     private static final class AlterField extends AlterTypeStatement
     {
-        private AlterField(String keyspaceName, String typeName)
+        private AlterField(String keyspaceName, String typeName, boolean ifExists)
         {
-            super(keyspaceName, typeName);
+            super(keyspaceName, typeName, ifExists);
         }
 
         UserType apply(KeyspaceMetadata keyspace, UserType userType)
@@ -205,6 +237,9 @@
         }
 
         private final UTName name;
+        private final boolean ifExists;
+        private boolean ifFieldExists;
+        private boolean ifFieldNotExists;
 
         private Kind kind;
 
@@ -215,8 +250,9 @@
         // RENAME
         private final Map<FieldIdentifier, FieldIdentifier> renamedFields = new HashMap<>();
 
-        public Raw(UTName name)
+        public Raw(UTName name, boolean ifExists)
         {
+            this.ifExists = ifExists;
             this.name = name;
         }
 
@@ -227,9 +263,9 @@
 
             switch (kind)
             {
-                case     ADD_FIELD: return new AddField(keyspaceName, typeName, newFieldName, newFieldType);
-                case RENAME_FIELDS: return new RenameFields(keyspaceName, typeName, renamedFields);
-                case   ALTER_FIELD: return new AlterField(keyspaceName, typeName);
+                case     ADD_FIELD: return new AddField(keyspaceName, typeName, newFieldName, newFieldType, ifExists, ifFieldNotExists);
+                case RENAME_FIELDS: return new RenameFields(keyspaceName, typeName, renamedFields, ifExists, ifFieldExists);
+                case   ALTER_FIELD: return new AlterField(keyspaceName, typeName, ifExists);
             }
 
             throw new AssertionError();
@@ -242,12 +278,22 @@
             newFieldType = type;
         }
 
+        public void ifFieldNotExists(boolean ifNotExists)
+        {
+            this.ifFieldNotExists = ifNotExists;
+        }
+
         public void rename(FieldIdentifier from, FieldIdentifier to)
         {
             kind = Kind.RENAME_FIELDS;
             renamedFields.put(from, to);
         }
 
+        public void ifFieldExists(boolean ifExists)
+        {
+            this.ifFieldExists = ifExists;
+        }
+
         public void alter(FieldIdentifier name, CQL3Type.Raw type)
         {
             kind = Kind.ALTER_FIELD;
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterViewStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterViewStatement.java
index 3493eb0..7e707f4 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterViewStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterViewStatement.java
@@ -22,6 +22,7 @@
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.cql3.QualifiedName;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.*;
 import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
 import org.apache.cassandra.service.ClientState;
@@ -33,12 +34,24 @@
 {
     private final String viewName;
     private final TableAttributes attrs;
+    private ClientState state;
+    private final boolean ifExists;
 
-    public AlterViewStatement(String keyspaceName, String viewName, TableAttributes attrs)
+    public AlterViewStatement(String keyspaceName, String viewName, TableAttributes attrs, boolean ifExists)
     {
         super(keyspaceName);
         this.viewName = viewName;
         this.attrs = attrs;
+        this.ifExists = ifExists;
+    }
+
+    @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        // save the query state to use it for guardrails validation in #apply
+        this.state = state;
     }
 
     public Keyspaces apply(Keyspaces schema)
@@ -50,10 +63,16 @@
                           : keyspace.views.getNullable(viewName);
 
         if (null == view)
+        {
+            if (ifExists) return schema;
             throw ire("Materialized view '%s.%s' doesn't exist", keyspaceName, viewName);
+        }
 
         attrs.validate();
 
+        // Guardrails on table properties
+        Guardrails.tableProperties.guard(attrs.updatedProperties(), attrs::removeProperty, state);
+
         TableParams params = attrs.asAlteredTableParams(view.metadata.params);
 
         if (params.gcGraceSeconds == 0)
@@ -102,17 +121,19 @@
     {
         private final QualifiedName name;
         private final TableAttributes attrs;
+        private final boolean ifExists;
 
-        public Raw(QualifiedName name, TableAttributes attrs)
+        public Raw(QualifiedName name, TableAttributes attrs, boolean ifExists)
         {
             this.name = name;
             this.attrs = attrs;
+            this.ifExists = ifExists;
         }
 
         public AlterViewStatement prepare(ClientState state)
         {
             String keyspaceName = name.hasKeyspace() ? name.getKeyspace() : state.getKeyspace();
-            return new AlterViewStatement(keyspaceName, name.getName(), attrs);
+            return new AlterViewStatement(keyspaceName, name.getName(), attrs, ifExists);
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateAggregateStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateAggregateStatement.java
index e567021..0550515 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateAggregateStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateAggregateStatement.java
@@ -112,9 +112,9 @@
 
         List<AbstractType<?>> argumentTypes =
             rawArgumentTypes.stream()
-                            .map(t -> t.prepare(keyspaceName, keyspace.types).getType())
+                            .map(t -> t.prepare(keyspaceName, keyspace.types).getType().udfType())
                             .collect(toList());
-        AbstractType<?> stateType = rawStateType.prepare(keyspaceName, keyspace.types).getType();
+        AbstractType<?> stateType = rawStateType.prepare(keyspaceName, keyspace.types).getType().udfType();
         List<AbstractType<?>> stateFunctionArguments = Lists.newArrayList(concat(singleton(stateType), argumentTypes));
 
         Function stateFunction =
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateFunctionStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateFunctionStatement.java
index 3a1eb9f..adb8f40 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateFunctionStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateFunctionStatement.java
@@ -26,8 +26,6 @@
 
 import org.apache.cassandra.audit.AuditLogContext;
 import org.apache.cassandra.audit.AuditLogEntryType;
-import org.apache.cassandra.auth.FunctionResource;
-import org.apache.cassandra.auth.IResource;
 import org.apache.cassandra.auth.*;
 import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.cql3.CQLStatement;
@@ -111,9 +109,9 @@
 
         List<AbstractType<?>> argumentTypes =
             rawArgumentTypes.stream()
-                            .map(t -> t.prepare(keyspaceName, keyspace.types).getType())
+                            .map(t -> t.prepare(keyspaceName, keyspace.types).getType().udfType())
                             .collect(toList());
-        AbstractType<?> returnType = rawReturnType.prepare(keyspaceName, keyspace.types).getType();
+        AbstractType<?> returnType = rawReturnType.prepare(keyspaceName, keyspace.types).getType().udfType();
 
         UDFunction function =
             UDFunction.create(new FunctionName(keyspaceName, functionName),
@@ -176,7 +174,7 @@
     {
         FunctionName name = new FunctionName(keyspaceName, functionName);
 
-        if (Schema.instance.findFunction(name, Lists.transform(rawArgumentTypes, t -> t.prepare(keyspaceName).getType())).isPresent() && orReplace)
+        if (Schema.instance.findFunction(name, Lists.transform(rawArgumentTypes, t -> t.prepare(keyspaceName).getType().udfType())).isPresent() && orReplace)
             client.ensurePermission(Permission.ALTER, FunctionResource.functionFromCql(keyspaceName, functionName, rawArgumentTypes));
         else
             client.ensurePermission(Permission.CREATE, FunctionResource.keyspace(keyspaceName));
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java
index d501423..7a3a41e 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java
@@ -19,6 +19,7 @@
 
 import java.util.*;
 
+import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
@@ -31,6 +32,7 @@
 import org.apache.cassandra.cql3.QualifiedName;
 import org.apache.cassandra.cql3.statements.schema.IndexTarget.Type;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.MapType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.sasi.SASIIndex;
@@ -52,6 +54,8 @@
     private final IndexAttributes attrs;
     private final boolean ifNotExists;
 
+    private ClientState state;
+
     public CreateIndexStatement(String keyspaceName,
                                 String tableName,
                                 String indexName,
@@ -67,11 +71,22 @@
         this.ifNotExists = ifNotExists;
     }
 
+    @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        // save the query state to use it for guardrails validation in #apply
+        this.state = state;
+    }
+
     public Keyspaces apply(Keyspaces schema)
     {
         attrs.validate();
 
-        if (attrs.isCustom && attrs.customClass.equals(SASIIndex.class.getName()) && !DatabaseDescriptor.getEnableSASIIndexes())
+        Guardrails.createSecondaryIndexesEnabled.ensureEnabled("Creating secondary indexes", state);
+
+        if (attrs.isCustom && attrs.customClass.equals(SASIIndex.class.getName()) && !DatabaseDescriptor.getSASIIndexesEnabled())
             throw new InvalidRequestException("SASI indexes are disabled. Enable in cassandra.yaml to use.");
 
         KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
@@ -99,6 +114,14 @@
         if (Keyspace.open(table.keyspace).getReplicationStrategy().hasTransientReplicas())
             throw new InvalidRequestException("Secondary indexes are not supported on transiently replicated keyspaces");
 
+        // guardrails to limit number of secondary indexes per table.
+        Guardrails.secondaryIndexesPerTable.guard(table.indexes.size() + 1,
+                                                  Strings.isNullOrEmpty(indexName)
+                                                  ? String.format("on table %s", table.name)
+                                                  : String.format("%s on table %s", indexName, table.name),
+                                                  false,
+                                                  state);
+
         List<IndexTarget> indexTargets = Lists.newArrayList(transform(rawIndexTargets, t -> t.prepare(table)));
 
         if (indexTargets.isEmpty() && !attrs.isCustom)
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
index 9aa27ce..dc82f93 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
@@ -33,6 +33,7 @@
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.exceptions.AlreadyExistsException;
 import org.apache.cassandra.locator.LocalStrategy;
 import org.apache.cassandra.schema.KeyspaceMetadata;
@@ -79,7 +80,7 @@
         if (keyspace.params.replication.klass.equals(LocalStrategy.class))
             throw ire("Unable to use given strategy class: LocalStrategy is reserved for internal use.");
 
-        keyspace.params.validate(keyspaceName);
+        keyspace.params.validate(keyspaceName, state);
         return schema.withAddedOrUpdated(keyspace);
     }
 
@@ -111,8 +112,18 @@
     }
 
     @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        // Guardrail on number of keyspaces
+        Guardrails.keyspaces.guard(Schema.instance.getUserKeyspaces().size() + 1, keyspaceName, false, state);
+    }
+
+    @Override
     Set<String> clientWarnings(KeyspacesDiff diff)
     {
+        // this threshold is deprecated, it will be replaced by the guardrail used in #validate(ClientState)
         int keyspaceCount = Schema.instance.getKeyspaces().size();
         if (keyspaceCount > DatabaseDescriptor.keyspaceCountWarnThreshold())
         {
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateTableStatement.java
index 1339ba3..1a62538 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateTableStatement.java
@@ -33,6 +33,8 @@
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.AlreadyExistsException;
 import org.apache.cassandra.schema.*;
@@ -115,9 +117,40 @@
             throw ire("read_repair must be set to 'NONE' for transiently replicated keyspaces");
         }
 
+        if (!table.params.compression.isEnabled())
+            Guardrails.uncompressedTablesEnabled.ensureEnabled(state);
+
         return schema.withAddedOrUpdated(keyspace.withSwapped(keyspace.tables.with(table)));
     }
 
+    @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        // Guardrail on table properties
+        Guardrails.tableProperties.guard(attrs.updatedProperties(), attrs::removeProperty, state);
+
+        // Guardrail on columns per table
+        Guardrails.columnsPerTable.guard(rawColumns.size(), tableName, false, state);
+
+        // Guardrail on number of tables
+        if (Guardrails.tables.enabled(state))
+        {
+            int totalUserTables = Schema.instance.getUserKeyspaces()
+                                                 .stream()
+                                                 .map(ksm -> ksm.name)
+                                                 .map(Keyspace::open)
+                                                 .mapToInt(keyspace -> keyspace.getColumnFamilyStores().size())
+                                                 .sum();
+            Guardrails.tables.guard(totalUserTables + 1, tableName, false, state);
+        }
+
+        // Guardrail to check whether creation of new COMPACT STORAGE tables is allowed
+        if (useCompactStorage)
+            Guardrails.compactTablesEnabled.ensureEnabled(state);
+    }
+
     SchemaChange schemaChangeEvent(KeyspacesDiff diff)
     {
         return new SchemaChange(Change.CREATED, Target.TABLE, keyspaceName, tableName);
@@ -125,7 +158,7 @@
 
     public void authorize(ClientState client)
     {
-        client.ensureKeyspacePermission(keyspaceName, Permission.CREATE);
+        client.ensureAllTablesPermission(keyspaceName, Permission.CREATE);
     }
 
     @Override
@@ -372,6 +405,7 @@
     @Override
     public Set<String> clientWarnings(KeyspacesDiff diff)
     {
+        // this threshold is deprecated, it will be replaced by the guardrail used in #validate(ClientState)
         int tableCount = Schema.instance.getNumberOfTables();
         if (tableCount > DatabaseDescriptor.tableCountWarnThreshold())
         {
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateTypeStatement.java
index 7c1717e..e015c34 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateTypeStatement.java
@@ -26,6 +26,7 @@
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.cql3.FieldIdentifier;
 import org.apache.cassandra.cql3.UTName;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.schema.KeyspaceMetadata;
@@ -61,6 +62,14 @@
         this.ifNotExists = ifNotExists;
     }
 
+    @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        Guardrails.fieldsPerUDT.guard(fieldNames.size(), typeName, false, state);
+    }
+
     public Keyspaces apply(Keyspaces schema)
     {
         KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
@@ -106,7 +115,7 @@
 
     public void authorize(ClientState client)
     {
-        client.ensureKeyspacePermission(keyspaceName, Permission.CREATE);
+        client.ensureAllTablesPermission(keyspaceName, Permission.CREATE);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java
index 39a4c20..145c8fc 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java
@@ -20,6 +20,7 @@
 import java.util.*;
 
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
 import org.apache.cassandra.audit.AuditLogContext;
@@ -31,6 +32,7 @@
 import org.apache.cassandra.cql3.selection.RawSelector;
 import org.apache.cassandra.cql3.selection.Selectable;
 import org.apache.cassandra.cql3.statements.StatementType;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.ReversedType;
 import org.apache.cassandra.db.view.View;
@@ -65,6 +67,8 @@
 
     private final boolean ifNotExists;
 
+    private ClientState state;
+
     public CreateViewStatement(String keyspaceName,
                                String tableName,
                                String viewName,
@@ -96,9 +100,18 @@
         this.ifNotExists = ifNotExists;
     }
 
+    @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        // save the query state to use it for guardrails validation in #apply
+        this.state = state;
+    }
+
     public Keyspaces apply(Keyspaces schema)
     {
-        if (!DatabaseDescriptor.getEnableMaterializedViews())
+        if (!DatabaseDescriptor.getMaterializedViewsEnabled())
             throw ire("Materialized views are disabled. Enable in cassandra.yaml to use.");
 
         /*
@@ -137,6 +150,16 @@
         if (table.isView())
             throw ire("Materialized views cannot be created against other materialized views");
 
+        // Guardrails on table properties
+        Guardrails.tableProperties.guard(attrs.updatedProperties(), attrs::removeProperty, state);
+
+        // Guardrail to limit number of mvs per table
+        Iterable<ViewMetadata> tableViews = keyspace.views.forTable(table.id);
+        Guardrails.materializedViewsPerTable.guard(Iterables.size(tableViews) + 1,
+                                                   String.format("%s on table %s", viewName, table.name),
+                                                   false,
+                                                   state);
+
         if (table.params.gcGraceSeconds == 0)
         {
             throw ire("Cannot create materialized view '%s' for base table " +
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/DropAggregateStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/DropAggregateStatement.java
index 7302158..0cb1cbe 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/DropAggregateStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/DropAggregateStatement.java
@@ -148,7 +148,7 @@
     {
         return arguments.stream()
                         .map(t -> t.prepare(keyspaceName, types))
-                        .map(CQL3Type::getType)
+                        .map(t -> t.getType().udfType())
                         .collect(toList());
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/DropFunctionStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/DropFunctionStatement.java
index 99bfd64..d9d637d 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/DropFunctionStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/DropFunctionStatement.java
@@ -156,7 +156,7 @@
     {
         return arguments.stream()
                         .map(t -> t.prepare(keyspaceName, types))
-                        .map(CQL3Type::getType)
+                        .map(t -> t.getType().udfType())
                         .collect(toList());
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/DropIndexStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/DropIndexStatement.java
index 2186470..24b372d 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/DropIndexStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/DropIndexStatement.java
@@ -22,7 +22,6 @@
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.cql3.QualifiedName;
-import org.apache.cassandra.schema.Diff;
 import org.apache.cassandra.schema.*;
 import org.apache.cassandra.schema.KeyspaceMetadata.KeyspaceDiff;
 import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/DropTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/DropTableStatement.java
index 15c2a03..78c98be 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/DropTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/DropTableStatement.java
@@ -22,6 +22,7 @@
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.cql3.QualifiedName;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.*;
 import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
 import org.apache.cassandra.service.ClientState;
@@ -48,6 +49,8 @@
 
     public Keyspaces apply(Keyspaces schema)
     {
+        Guardrails.dropTruncateTableEnabled.ensureEnabled(state);
+
         KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
 
         TableMetadata table = null == keyspace
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/DropTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/DropTypeStatement.java
index 6cda7ba..d188bdb 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/DropTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/DropTypeStatement.java
@@ -120,7 +120,7 @@
 
     public void authorize(ClientState client)
     {
-        client.ensureKeyspacePermission(keyspaceName, Permission.DROP);
+        client.ensureAllTablesPermission(keyspaceName, Permission.DROP);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/TableAttributes.java b/src/java/org/apache/cassandra/cql3/statements/schema/TableAttributes.java
index 126e6d7..829a53a 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/TableAttributes.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/TableAttributes.java
@@ -21,6 +21,7 @@
 import java.util.Set;
 
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
 
 import org.apache.cassandra.cql3.statements.PropertyDefinitions;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -28,6 +29,7 @@
 import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.CompressionParams;
+import org.apache.cassandra.schema.MemtableParams;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableParams;
 import org.apache.cassandra.schema.TableParams.Option;
@@ -83,6 +85,16 @@
         }
     }
 
+    public static Set<String> validKeywords()
+    {
+        return ImmutableSet.copyOf(validKeywords);
+    }
+
+    public static Set<String> allKeywords()
+    {
+        return Sets.union(validKeywords, obsoleteKeywords);
+    }
+
     private TableParams build(TableParams.Builder builder)
     {
         if (hasOption(Option.BLOOM_FILTER_FP_CHANCE))
@@ -110,6 +122,9 @@
             builder.compression(CompressionParams.fromMap(getMap(Option.COMPRESSION)));
         }
 
+        if (hasOption(Option.MEMTABLE))
+            builder.memtable(MemtableParams.get(getString(Option.MEMTABLE)));
+
         if (hasOption(Option.DEFAULT_TIME_TO_LIVE))
             builder.defaultTimeToLive(getInt(Option.DEFAULT_TIME_TO_LIVE));
 
diff --git a/src/java/org/apache/cassandra/db/AbstractReadQuery.java b/src/java/org/apache/cassandra/db/AbstractReadQuery.java
index ec1a6b1..374d2b2 100644
--- a/src/java/org/apache/cassandra/db/AbstractReadQuery.java
+++ b/src/java/org/apache/cassandra/db/AbstractReadQuery.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.db;
 
+import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.filter.RowFilter;
@@ -102,13 +103,17 @@
         StringBuilder sb = new StringBuilder().append("SELECT ")
                                               .append(columnFilter().toCQLString())
                                               .append(" FROM ")
-                                              .append(metadata().keyspace)
+                                              .append(ColumnIdentifier.maybeQuote(metadata().keyspace))
                                               .append('.')
-                                              .append(metadata().name);
+                                              .append(ColumnIdentifier.maybeQuote(metadata().name));
         appendCQLWhereClause(sb);
 
         if (limits() != DataLimits.NONE)
             sb.append(' ').append(limits());
+
+        // ALLOW FILTERING might not be strictly necessary
+        sb.append(" ALLOW FILTERING");
+
         return sb.toString();
     }
 
diff --git a/src/java/org/apache/cassandra/db/ArrayClustering.java b/src/java/org/apache/cassandra/db/ArrayClustering.java
index a6ee991..53d45e7 100644
--- a/src/java/org/apache/cassandra/db/ArrayClustering.java
+++ b/src/java/org/apache/cassandra/db/ArrayClustering.java
@@ -22,7 +22,7 @@
 
 public class ArrayClustering extends AbstractArrayClusteringPrefix implements Clustering<byte[]>
 {
-    private static final long EMPTY_SIZE = ObjectSizes.measure(new ArrayClustering(EMPTY_VALUES_ARRAY));
+    public static final long EMPTY_SIZE = ObjectSizes.measure(new ArrayClustering(EMPTY_VALUES_ARRAY));
 
     public ArrayClustering(byte[]... values)
     {
diff --git a/src/java/org/apache/cassandra/db/BufferClustering.java b/src/java/org/apache/cassandra/db/BufferClustering.java
index a6dcd1b..e3592e1 100644
--- a/src/java/org/apache/cassandra/db/BufferClustering.java
+++ b/src/java/org/apache/cassandra/db/BufferClustering.java
@@ -19,7 +19,6 @@
 
 import java.nio.ByteBuffer;
 
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.ObjectSizes;
 
 /**
diff --git a/src/java/org/apache/cassandra/db/CassandraKeyspaceWriteHandler.java b/src/java/org/apache/cassandra/db/CassandraKeyspaceWriteHandler.java
index efba11f..f2cf93c 100644
--- a/src/java/org/apache/cassandra/db/CassandraKeyspaceWriteHandler.java
+++ b/src/java/org/apache/cassandra/db/CassandraKeyspaceWriteHandler.java
@@ -18,9 +18,14 @@
 
 package org.apache.cassandra.db;
 
+import java.util.HashSet;
+import java.util.Set;
+
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 
@@ -46,8 +51,7 @@
             CommitLogPosition position = null;
             if (makeDurable)
             {
-                Tracing.trace("Appending to commitlog");
-                position = CommitLog.instance.add(mutation);
+                position = addToCommitLog(mutation);
             }
             return new CassandraWriteContext(group, position);
         }
@@ -61,6 +65,41 @@
         }
     }
 
+    private CommitLogPosition addToCommitLog(Mutation mutation)
+    {
+        // Usually one of these will be true, so first check if that's the case.
+        boolean allSkipCommitlog = true;
+        boolean noneSkipCommitlog = true;
+        for (PartitionUpdate update : mutation.getPartitionUpdates())
+        {
+            if (update.metadata().params.memtable.factory().writesShouldSkipCommitLog())
+                noneSkipCommitlog = false;
+            else
+                allSkipCommitlog = false;
+        }
+
+        if (!noneSkipCommitlog)
+        {
+            if (allSkipCommitlog)
+                return null;
+            else
+            {
+                Set<TableId> ids = new HashSet<>();
+                for (PartitionUpdate update : mutation.getPartitionUpdates())
+                {
+                    if (update.metadata().params.memtable.factory().writesShouldSkipCommitLog())
+                        ids.add(update.metadata().id);
+                }
+                mutation = mutation.without(ids);
+            }
+        }
+        // Note: It may be a good idea to precalculate none/all for the set of all tables in the keyspace,
+        // or memoize the mutation.getTableIds()->ids map (needs invalidation on schema version change).
+
+        Tracing.trace("Appending to commitlog");
+        return CommitLog.instance.add(mutation);
+    }
+
     @SuppressWarnings("resource") // group is closed when CassandraWriteContext is closed
     private WriteContext createEmptyContext()
     {
diff --git a/src/java/org/apache/cassandra/db/Clustering.java b/src/java/org/apache/cassandra/db/Clustering.java
index 24a5000..dd91fea 100644
--- a/src/java/org/apache/cassandra/db/Clustering.java
+++ b/src/java/org/apache/cassandra/db/Clustering.java
@@ -22,6 +22,7 @@
 import java.nio.ByteBuffer;
 import java.util.List;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
@@ -34,7 +35,7 @@
 
 import static org.apache.cassandra.db.AbstractBufferClusteringPrefix.EMPTY_VALUES_ARRAY;
 
-public interface Clustering<V> extends ClusteringPrefix<V>
+public interface Clustering<V> extends ClusteringPrefix<V>, IMeasurableMemory
 {
     public static final Serializer serializer = new Serializer();
 
@@ -72,7 +73,7 @@
         for (int i = 0; i < size(); i++)
         {
             ColumnMetadata c = metadata.clusteringColumns().get(i);
-            sb.append(i == 0 ? "" : ", ").append(c.type.getString(get(i), accessor()));
+            sb.append(i == 0 ? "" : ", ").append(c.type.toCQLString(bufferAt(i)));
         }
         return sb.toString();
     }
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index 571b86e..3619520 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -17,47 +17,99 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
-import java.util.*;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
 import java.util.Objects;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
 import java.util.regex.Pattern;
-import javax.management.*;
-import javax.management.openmbean.*;
+import java.util.stream.Collectors;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.openmbean.CompositeData;
+import javax.management.openmbean.CompositeDataSupport;
+import javax.management.openmbean.CompositeType;
+import javax.management.openmbean.OpenDataException;
+import javax.management.openmbean.OpenType;
+import javax.management.openmbean.SimpleType;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.*;
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.base.Strings;
 import com.google.common.base.Throwables;
-import com.google.common.collect.*;
-import com.google.common.util.concurrent.*;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.RateLimiter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cache.*;
-import org.apache.cassandra.concurrent.*;
-import org.apache.cassandra.config.*;
+import org.apache.cassandra.cache.CounterCacheKey;
+import org.apache.cassandra.cache.IRowCacheEntry;
+import org.apache.cassandra.cache.RowCacheKey;
+import org.apache.cassandra.cache.RowCacheSentinel;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.FutureTask;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.DurationSpec;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
-import org.apache.cassandra.db.compaction.*;
+import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.compaction.CompactionStrategyManager;
+import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.db.compaction.Verifier;
 import org.apache.cassandra.db.filter.ClusteringIndexFilter;
 import org.apache.cassandra.db.filter.DataLimits;
-import org.apache.cassandra.db.streaming.CassandraStreamManager;
-import org.apache.cassandra.db.repair.CassandraTableRepairManager;
-import org.apache.cassandra.db.view.TableViews;
-import org.apache.cassandra.db.lifecycle.*;
+import org.apache.cassandra.db.memtable.Flushing;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.memtable.ShardBoundaries;
+import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.lifecycle.SSTableSet;
+import org.apache.cassandra.db.lifecycle.Tracker;
+import org.apache.cassandra.db.lifecycle.View;
 import org.apache.cassandra.db.partitions.CachedPartition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.repair.CassandraTableRepairManager;
 import org.apache.cassandra.db.rows.CellPath;
-import org.apache.cassandra.dht.*;
+import org.apache.cassandra.db.streaming.CassandraStreamManager;
+import org.apache.cassandra.db.view.TableViews;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Bounds;
+import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Splitter;
+import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.StartupException;
 import org.apache.cassandra.index.SecondaryIndexManager;
@@ -67,36 +119,72 @@
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SSTable;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SSTableIdFactory;
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
-import org.apache.cassandra.io.sstable.format.*;
+import org.apache.cassandra.io.sstable.format.SSTableFormat;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.metrics.Sampler;
 import org.apache.cassandra.metrics.Sampler.Sample;
 import org.apache.cassandra.metrics.Sampler.SamplerType;
 import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.metrics.TopPartitionTracker;
 import org.apache.cassandra.repair.TableRepairManager;
 import org.apache.cassandra.repair.consistent.admin.CleanupSummary;
 import org.apache.cassandra.repair.consistent.admin.PendingStat;
-import org.apache.cassandra.schema.*;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
+import org.apache.cassandra.schema.CompressionParams;
+import org.apache.cassandra.schema.IndexMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.schema.TableParams;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.service.paxos.TablePaxosRepairHistory;
+import org.apache.cassandra.service.snapshot.SnapshotManifest;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.streaming.TableStreamManager;
-import org.apache.cassandra.utils.*;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.DefaultValue;
+import org.apache.cassandra.utils.ExecutorUtils;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.MBeanWrapper;
+import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.WrappedRunnable;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.concurrent.Refs;
-import org.apache.cassandra.utils.memory.MemtableAllocator;
-import org.json.simple.JSONArray;
-import org.json.simple.JSONObject;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
-
+import static com.google.common.base.Throwables.propagate;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.config.DatabaseDescriptor.getFlushWriters;
+import static org.apache.cassandra.db.commitlog.CommitLogPosition.NONE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.FBUtilities.now;
 import static org.apache.cassandra.utils.Throwables.maybeFail;
 import static org.apache.cassandra.utils.Throwables.merge;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
 
-public class ColumnFamilyStore implements ColumnFamilyStoreMBean
+public class ColumnFamilyStore implements ColumnFamilyStoreMBean, Memtable.Owner
 {
     private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyStore.class);
 
@@ -108,31 +196,51 @@
     are finished. By having flushExecutor size the same size as each of the perDiskflushExecutors we make sure we can
     have that many flushes going at the same time.
     */
-    private static final ThreadPoolExecutor flushExecutor = new JMXEnabledThreadPoolExecutor(DatabaseDescriptor.getFlushWriters(),
-                                                                                             Stage.KEEP_ALIVE_SECONDS,
-                                                                                             TimeUnit.SECONDS,
-                                                                                             new LinkedBlockingQueue<>(),
-                                                                                             new NamedThreadFactory("MemtableFlushWriter"),
-                                                                                             "internal");
+    private static final ExecutorPlus flushExecutor = executorFactory()
+            .withJmxInternal()
+            .pooled("MemtableFlushWriter", getFlushWriters());
+
+    // post-flush executor is single threaded to provide guarantee that any flush Future on a CF will never return until prior flushes have completed
+    private static final ExecutorPlus postFlushExecutor = executorFactory()
+            .withJmxInternal()
+            .sequential("MemtablePostFlush");
+
+    private static final ExecutorPlus reclaimExecutor = executorFactory()
+            .withJmxInternal()
+            .sequential("MemtableReclaimMemory");
 
     private static final PerDiskFlushExecutors perDiskflushExecutors = new PerDiskFlushExecutors(DatabaseDescriptor.getFlushWriters(),
                                                                                                  DatabaseDescriptor.getNonLocalSystemKeyspacesDataFileLocations(),
                                                                                                  DatabaseDescriptor.useSpecificLocationForLocalSystemData());
 
-    // post-flush executor is single threaded to provide guarantee that any flush Future on a CF will never return until prior flushes have completed
-    private static final ThreadPoolExecutor postFlushExecutor = new JMXEnabledThreadPoolExecutor(1,
-                                                                                                 Stage.KEEP_ALIVE_SECONDS,
-                                                                                                 TimeUnit.SECONDS,
-                                                                                                 new LinkedBlockingQueue<>(),
-                                                                                                 new NamedThreadFactory("MemtablePostFlush"),
-                                                                                                 "internal");
-
-    private static final ThreadPoolExecutor reclaimExecutor = new JMXEnabledThreadPoolExecutor(1,
-                                                                                               Stage.KEEP_ALIVE_SECONDS,
-                                                                                               TimeUnit.SECONDS,
-                                                                                               new LinkedBlockingQueue<>(),
-                                                                                               new NamedThreadFactory("MemtableReclaimMemory"),
-                                                                                               "internal");
+    /**
+     * Reason for initiating a memtable flush.
+     */
+    public enum FlushReason
+    {
+        COMMITLOG_DIRTY,
+        MEMTABLE_LIMIT,
+        MEMTABLE_PERIOD_EXPIRED,
+        INDEX_BUILD_STARTED,
+        INDEX_BUILD_COMPLETED,
+        INDEX_REMOVED,
+        INDEX_TABLE_FLUSH,
+        VIEW_BUILD_STARTED,
+        INTERNALLY_FORCED,  // explicitly requested flush, necessary for the operation of an internal table
+        USER_FORCED, // flush explicitly requested by the user (e.g. nodetool flush)
+        STARTUP,
+        DRAIN,
+        SNAPSHOT,
+        TRUNCATE,
+        DROP,
+        STREAMING,
+        STREAMS_RECEIVED,
+        VALIDATION,
+        ANTICOMPACTION,
+        SCHEMA_CHANGE,
+        OWNED_RANGES_CHANGE,
+        UNIT_TESTS // explicitly requested flush needed for a test
+    }
 
     private static final String[] COUNTER_NAMES = new String[]{"table", "count", "error", "value"};
     private static final String[] COUNTER_DESCS = new String[]
@@ -146,6 +254,7 @@
 
     public static final String SNAPSHOT_TRUNCATE_PREFIX = "truncated";
     public static final String SNAPSHOT_DROP_PREFIX = "dropped";
+    static final String TOKEN_DELIMITER = ":";
 
     static
     {
@@ -167,6 +276,8 @@
     private final String oldMBeanName;
     private volatile boolean valid = true;
 
+    private volatile Memtable.Factory memtableFactory;
+
     /**
      * Memtables and SSTables on disk for this column family.
      *
@@ -181,7 +292,7 @@
     public final OpOrder readOrdering = new OpOrder();
 
     /* This is used to generate the next index for a SSTable */
-    private final AtomicInteger fileIndexGenerator = new AtomicInteger(0);
+    private final Supplier<? extends SSTableId> sstableIdGenerator;
 
     public final SecondaryIndexManager indexManager;
     public final TableViews viewManager;
@@ -196,13 +307,14 @@
     private final Directories directories;
 
     public final TableMetrics metric;
-    public volatile long sampleReadLatencyNanos;
-    public volatile long additionalWriteLatencyNanos;
+    public volatile long sampleReadLatencyMicros;
+    public volatile long additionalWriteLatencyMicros;
 
     private final CassandraTableWriteHandler writeHandler;
     private final CassandraStreamManager streamManager;
 
     private final TableRepairManager repairManager;
+    public final TopPartitionTracker topPartitions;
 
     private final SSTableImporter sstableImporter;
 
@@ -210,9 +322,33 @@
 
     @VisibleForTesting
     final DiskBoundaryManager diskBoundaryManager = new DiskBoundaryManager();
+    private volatile ShardBoundaries cachedShardBoundaries = null;
 
     private volatile boolean neverPurgeTombstones = false;
 
+    private class PaxosRepairHistoryLoader
+    {
+        private TablePaxosRepairHistory history;
+
+        TablePaxosRepairHistory get()
+        {
+            if (history != null)
+                return history;
+
+            synchronized (this)
+            {
+                if (history != null)
+                    return history;
+
+                history = TablePaxosRepairHistory.load(keyspace.getName(), name);
+                return history;
+            }
+        }
+
+    }
+
+    private final PaxosRepairHistoryLoader paxosRepairHistory = new PaxosRepairHistoryLoader();
+
     public static void shutdownPostFlushExecutor() throws InterruptedException
     {
         postFlushExecutor.shutdown();
@@ -244,48 +380,10 @@
 
         compactionStrategyManager.maybeReload(metadata());
 
-        scheduleFlush();
-
         indexManager.reload();
 
-        // If the CF comparator has changed, we need to change the memtable,
-        // because the old one still aliases the previous comparator.
-        if (data.getView().getCurrentMemtable().initialComparator != metadata().comparator)
-            switchMemtable();
-    }
-
-    void scheduleFlush()
-    {
-        int period = metadata().params.memtableFlushPeriodInMs;
-        if (period > 0)
-        {
-            logger.trace("scheduling flush in {} ms", period);
-            WrappedRunnable runnable = new WrappedRunnable()
-            {
-                protected void runMayThrow()
-                {
-                    synchronized (data)
-                    {
-                        Memtable current = data.getView().getCurrentMemtable();
-                        // if we're not expired, we've been hit by a scheduled flush for an already flushed memtable, so ignore
-                        if (current.isExpired())
-                        {
-                            if (current.isClean())
-                            {
-                                // if we're still clean, instead of swapping just reschedule a flush for later
-                                scheduleFlush();
-                            }
-                            else
-                            {
-                                // we'll be rescheduled by the constructor of the Memtable.
-                                forceFlush();
-                            }
-                        }
-                    }
-                }
-            };
-            ScheduledExecutors.scheduledTasks.schedule(runnable, period, TimeUnit.MILLISECONDS);
-        }
+        memtableFactory = metadata().params.memtable.factory();
+        switchMemtableOrNotify(FlushReason.SCHEMA_CHANGE, Memtable::metadataUpdated);
     }
 
     public static Runnable getBackgroundCompactionTaskSubmitter()
@@ -360,7 +458,7 @@
     @VisibleForTesting
     public ColumnFamilyStore(Keyspace keyspace,
                              String columnFamilyName,
-                             int generation,
+                             Supplier<? extends SSTableId> sstableIdGenerator,
                              TableMetadataRef metadata,
                              Directories directories,
                              boolean loadSSTables,
@@ -378,17 +476,22 @@
         maxCompactionThreshold = new DefaultValue<>(metadata.get().params.compaction.maxCompactionThreshold());
         crcCheckChance = new DefaultValue<>(metadata.get().params.crcCheckChance);
         viewManager = keyspace.viewManager.forTable(metadata.id);
-        fileIndexGenerator.set(generation);
-        sampleReadLatencyNanos = DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS) / 2;
-        additionalWriteLatencyNanos = DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS) / 2;
+        this.sstableIdGenerator = sstableIdGenerator;
+        sampleReadLatencyMicros = DatabaseDescriptor.getReadRpcTimeout(TimeUnit.MICROSECONDS) / 2;
+        additionalWriteLatencyMicros = DatabaseDescriptor.getWriteRpcTimeout(TimeUnit.MICROSECONDS) / 2;
+        memtableFactory = metadata.get().params.memtable.factory();
 
         logger.info("Initializing {}.{}", keyspace.getName(), name);
 
-        // Create Memtable only on online
+        // Create Memtable and its metrics object only on online
         Memtable initialMemtable = null;
+        TableMetrics.ReleasableMetric memtableMetrics = null;
         if (DatabaseDescriptor.isDaemonInitialized())
-            initialMemtable = new Memtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition()), this);
-        data = new Tracker(initialMemtable, loadSSTables);
+        {
+            initialMemtable = createMemtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition()));
+            memtableMetrics = memtableFactory.createMemtableMetrics(metadata);
+        }
+        data = new Tracker(this, initialMemtable, loadSSTables);
 
         // Note that this needs to happen before we load the first sstables, or the global sstable tracker will not
         // be notified on the initial loading.
@@ -419,7 +522,7 @@
             indexManager.addIndex(info, true);
         }
 
-        metric = new TableMetrics(this);
+        metric = new TableMetrics(this, memtableMetrics);
 
         if (data.loadsstables)
         {
@@ -445,6 +548,11 @@
         streamManager = new CassandraStreamManager(this);
         repairManager = new CassandraTableRepairManager(this);
         sstableImporter = new SSTableImporter(this);
+
+        if (SchemaConstants.isSystemKeyspace(keyspace.getName()))
+            topPartitions = null;
+        else
+            topPartitions = new TopPartitionTracker(metadata());
     }
 
     public static String getTableMBeanName(String ks, String name, boolean isIndex)
@@ -465,8 +573,8 @@
     {
         try
         {
-            sampleReadLatencyNanos = metadata().params.speculativeRetry.calculateThreshold(metric.coordinatorReadLatency.getSnapshot(), sampleReadLatencyNanos);
-            additionalWriteLatencyNanos = metadata().params.additionalWritePolicy.calculateThreshold(metric.coordinatorWriteLatency.getSnapshot(), additionalWriteLatencyNanos);
+            sampleReadLatencyMicros = metadata().params.speculativeRetry.calculateThreshold(metric.coordinatorReadLatency, sampleReadLatencyMicros);
+            additionalWriteLatencyMicros = metadata().params.additionalWritePolicy.calculateThreshold(metric.coordinatorWriteLatency, additionalWriteLatencyMicros);
         }
         catch (Throwable e)
         {
@@ -504,19 +612,39 @@
         List<String> dataPaths = new ArrayList<>();
         for (File dataPath : directories.getCFDirectories())
         {
-            dataPaths.add(dataPath.getCanonicalPath());
+            dataPaths.add(dataPath.canonicalPath());
         }
 
         return dataPaths;
     }
 
-    public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, UUID pendingRepair, boolean isTransient, int sstableLevel, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker)
+    public boolean writesShouldSkipCommitLog()
+    {
+        return memtableFactory.writesShouldSkipCommitLog();
+    }
+
+    public boolean memtableWritesAreDurable()
+    {
+        return memtableFactory.writesAreDurable();
+    }
+
+    public boolean streamToMemtable()
+    {
+        return memtableFactory.streamToMemtable();
+    }
+
+    public boolean streamFromMemtable()
+    {
+        return memtableFactory.streamFromMemtable();
+    }
+
+    public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, int sstableLevel, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker)
     {
         MetadataCollector collector = new MetadataCollector(metadata().comparator).sstableLevel(sstableLevel);
         return createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, collector, header, lifecycleNewTracker);
     }
 
-    public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, UUID pendingRepair, boolean isTransient, MetadataCollector metadataCollector, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker)
+    public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, MetadataCollector metadataCollector, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker)
     {
         return getCompactionStrategyManager().createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, metadataCollector, header, indexManager.listIndexes(), lifecycleNewTracker);
     }
@@ -529,11 +657,16 @@
     /** call when dropping or renaming a CF. Performs mbean housekeeping and invalidates CFS to other operations */
     public void invalidate()
     {
-        invalidate(true);
+        invalidate(true, true);
     }
 
     public void invalidate(boolean expectMBean)
     {
+        invalidate(expectMBean, true);
+    }
+
+    public void invalidate(boolean expectMBean, boolean dropData)
+    {
         // disable and cancel in-progress compactions before invalidating
         valid = false;
 
@@ -557,11 +690,16 @@
         if (!metadata.get().isIndex())
             SystemKeyspace.removeTruncationRecord(metadata.id);
 
-        data.dropSSTables();
-        LifecycleTransaction.waitForDeletions();
-        indexManager.dropAllIndexes();
+        if (dropData)
+        {
+            data.dropSSTables();
+            LifecycleTransaction.waitForDeletions();
+        }
+        indexManager.dropAllIndexes(dropData);
 
         invalidateCaches();
+        if (topPartitions != null)
+            topPartitions.close();
     }
 
     /**
@@ -592,7 +730,7 @@
         return createColumnFamilyStore(keyspace, metadata.name, metadata, loadSSTables);
     }
 
-    public static synchronized ColumnFamilyStore createColumnFamilyStore(Keyspace keyspace,
+    public static ColumnFamilyStore createColumnFamilyStore(Keyspace keyspace,
                                                                          String columnFamily,
                                                                          TableMetadataRef metadata,
                                                                          boolean loadSSTables)
@@ -610,21 +748,9 @@
                                                                          boolean registerBookkeeping,
                                                                          boolean offline)
     {
-        // get the max generation number, to prevent generation conflicts
-        Directories.SSTableLister lister = directories.sstableLister(Directories.OnTxnErr.IGNORE).includeBackups(true);
-        List<Integer> generations = new ArrayList<>();
-        for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet())
-        {
-            Descriptor desc = entry.getKey();
-            generations.add(desc.generation);
-            if (!desc.isCompatible())
-                throw new RuntimeException(String.format("Incompatible SSTable found. Current version %s is unable to read file: %s. Please run upgradesstables.",
-                                                         desc.getFormat().getLatestVersion(), desc));
-        }
-        Collections.sort(generations);
-        int value = (generations.size() > 0) ? (generations.get(generations.size() - 1)) : 0;
-
-        return new ColumnFamilyStore(keyspace, columnFamily, value, metadata, directories, loadSSTables, registerBookkeeping, offline);
+        return new ColumnFamilyStore(keyspace, columnFamily,
+                                     directories.getUIDGenerator(SSTableIdFactory.instance.defaultBuilder()),
+                                     metadata, directories, loadSSTables, registerBookkeeping, offline);
     }
 
     /**
@@ -662,7 +788,7 @@
                 for (File tmpFile : desc.getTemporaryFiles())
                 {
                     logger.info("Removing unfinished temporary file {}", tmpFile);
-                    tmpFile.delete();
+                    tmpFile.tryDelete();
                 }
             }
 
@@ -688,10 +814,10 @@
         if (dir.exists())
         {
             assert dir.isDirectory();
-            for (File file : Objects.requireNonNull(dir.listFiles()))
-                if (tmpCacheFilePattern.matcher(file.getName()).matches())
-                    if (!file.delete())
-                        logger.warn("could not delete {}", file.getAbsolutePath());
+            for (File file : dir.tryList())
+                if (tmpCacheFilePattern.matcher(file.name()).matches())
+                    if (!file.tryDelete())
+                        logger.warn("could not delete {}", file.absolutePath());
         }
 
         // also clean out any index leftovers.
@@ -757,10 +883,10 @@
                                            descriptor.cfname,
                                            // Increment the generation until we find a filename that doesn't exist. This is needed because the new
                                            // SSTables that are being loaded might already use these generation numbers.
-                                           fileIndexGenerator.incrementAndGet(),
+                                           sstableIdGenerator.get(),
                                            descriptor.formatType);
         }
-        while (new File(newDescriptor.filenameFor(Component.DATA)).exists());
+        while (newDescriptor.fileFor(Component.DATA).exists());
         return newDescriptor;
     }
 
@@ -814,12 +940,27 @@
 
     public Descriptor newSSTableDescriptor(File directory, Version version, SSTableFormat.Type format)
     {
-        return new Descriptor(version,
-                              directory,
-                              keyspace.getName(),
-                              name,
-                              fileIndexGenerator.incrementAndGet(),
-                              format);
+        Descriptor newDescriptor = new Descriptor(version,
+                                                  directory,
+                                                  keyspace.getName(),
+                                                  name,
+                                                  sstableIdGenerator.get(),
+                                                  format);
+        assert !newDescriptor.fileFor(Component.DATA).exists();
+        return newDescriptor;
+    }
+
+    /**
+     * Checks with the memtable if it should be switched for the given reason, and if not, calls the specified
+     * notification method.
+     */
+    private void switchMemtableOrNotify(FlushReason reason, Consumer<Memtable> elseNotify)
+    {
+        Memtable currentMemtable = data.getView().getCurrentMemtable();
+        if (currentMemtable.shouldSwitch(reason))
+            switchMemtableIfCurrent(currentMemtable, reason);
+        else
+            elseNotify.accept(currentMemtable);
     }
 
     /**
@@ -827,12 +968,12 @@
      *
      * @param memtable
      */
-    public ListenableFuture<CommitLogPosition> switchMemtableIfCurrent(Memtable memtable)
+    public Future<CommitLogPosition> switchMemtableIfCurrent(Memtable memtable, FlushReason reason)
     {
         synchronized (data)
         {
             if (data.getView().getCurrentMemtable() == memtable)
-                return switchMemtable();
+                return switchMemtable(reason);
         }
         logger.debug("Memtable is no longer current, returning future that completes when current flushing operation completes");
         return waitForFlushes();
@@ -845,11 +986,12 @@
      * not complete until the Memtable (and all prior Memtables) have been successfully flushed, and the CL
      * marked clean up to the position owned by the Memtable.
      */
-    public ListenableFuture<CommitLogPosition> switchMemtable()
+    @VisibleForTesting
+    public Future<CommitLogPosition> switchMemtable(FlushReason reason)
     {
         synchronized (data)
         {
-            logFlush();
+            logFlush(reason);
             Flush flush = new Flush(false);
             flushExecutor.execute(flush);
             postFlushExecutor.execute(flush.postFlushTask);
@@ -858,33 +1000,16 @@
     }
 
     // print out size of all memtables we're enqueuing
-    private void logFlush()
+    private void logFlush(FlushReason reason)
     {
         // reclaiming includes that which we are GC-ing;
-        float onHeapRatio = 0, offHeapRatio = 0;
-        long onHeapTotal = 0, offHeapTotal = 0;
-        Memtable memtable = getTracker().getView().getCurrentMemtable();
-        onHeapRatio +=  memtable.getAllocator().onHeap().ownershipRatio();
-        offHeapRatio += memtable.getAllocator().offHeap().ownershipRatio();
-        onHeapTotal += memtable.getAllocator().onHeap().owns();
-        offHeapTotal += memtable.getAllocator().offHeap().owns();
+        Memtable.MemoryUsage usage = Memtable.newMemoryUsage();
+        getTracker().getView().getCurrentMemtable().addMemoryUsageTo(usage);
 
         for (ColumnFamilyStore indexCfs : indexManager.getAllIndexColumnFamilyStores())
-        {
-            MemtableAllocator allocator = indexCfs.getTracker().getView().getCurrentMemtable().getAllocator();
-            onHeapRatio += allocator.onHeap().ownershipRatio();
-            offHeapRatio += allocator.offHeap().ownershipRatio();
-            onHeapTotal += allocator.onHeap().owns();
-            offHeapTotal += allocator.offHeap().owns();
-        }
+            indexCfs.getTracker().getView().getCurrentMemtable().addMemoryUsageTo(usage);
 
-        logger.info("Enqueuing flush of {}: {}",
-                     name,
-                     String.format("%s (%.0f%%) on-heap, %s (%.0f%%) off-heap",
-                                   FBUtilities.prettyPrintMemory(onHeapTotal),
-                                   onHeapRatio * 100,
-                                   FBUtilities.prettyPrintMemory(offHeapTotal),
-                                   offHeapRatio * 100));
+        logger.info("Enqueuing flush of {}.{}, Reason: {}, Usage: {}", keyspace.getName(), name, reason, usage);
     }
 
 
@@ -894,14 +1019,14 @@
      * @return a Future yielding the commit log position that can be guaranteed to have been successfully written
      *         to sstables for this table once the future completes
      */
-    public ListenableFuture<CommitLogPosition> forceFlush()
+    public Future<CommitLogPosition> forceFlush(FlushReason reason)
     {
         synchronized (data)
         {
             Memtable current = data.getView().getCurrentMemtable();
             for (ColumnFamilyStore cfs : concatWithIndexes())
                 if (!cfs.data.getView().getCurrentMemtable().isClean())
-                    return switchMemtableIfCurrent(current);
+                    return flushMemtable(current, reason);
             return waitForFlushes();
         }
     }
@@ -913,36 +1038,39 @@
      * @return a Future yielding the commit log position that can be guaranteed to have been successfully written
      *         to sstables for this table once the future completes
      */
-    public ListenableFuture<?> forceFlush(CommitLogPosition flushIfDirtyBefore)
+    public Future<?> forceFlush(CommitLogPosition flushIfDirtyBefore)
     {
         // we don't loop through the remaining memtables since here we only care about commit log dirtiness
         // and this does not vary between a table and its table-backed indexes
         Memtable current = data.getView().getCurrentMemtable();
         if (current.mayContainDataBefore(flushIfDirtyBefore))
-            return switchMemtableIfCurrent(current);
+            return flushMemtable(current, FlushReason.COMMITLOG_DIRTY);
         return waitForFlushes();
     }
 
+    private Future<CommitLogPosition> flushMemtable(Memtable current, FlushReason reason)
+    {
+        if (current.shouldSwitch(reason))
+            return switchMemtableIfCurrent(current, reason);
+        else
+            return waitForFlushes();
+    }
+
     /**
      * @return a Future yielding the commit log position that can be guaranteed to have been successfully written
      *         to sstables for this table once the future completes
      */
-    private ListenableFuture<CommitLogPosition> waitForFlushes()
+    private Future<CommitLogPosition> waitForFlushes()
     {
         // we grab the current memtable; once any preceding memtables have flushed, we know its
         // commitLogLowerBound has been set (as this it is set with the upper bound of the preceding memtable)
         final Memtable current = data.getView().getCurrentMemtable();
-        ListenableFutureTask<CommitLogPosition> task = ListenableFutureTask.create(() -> {
-            logger.debug("forceFlush requested but everything is clean in {}", name);
-            return current.getCommitLogLowerBound();
-        });
-        postFlushExecutor.execute(task);
-        return task;
+        return postFlushExecutor.submit(current::getCommitLogLowerBound);
     }
 
-    public CommitLogPosition forceBlockingFlush()
+    public CommitLogPosition forceBlockingFlush(FlushReason reason)
     {
-        return FBUtilities.waitOnFuture(forceFlush());
+        return FBUtilities.waitOnFuture(forceFlush(reason));
     }
 
     /**
@@ -951,13 +1079,13 @@
      */
     private final class PostFlush implements Callable<CommitLogPosition>
     {
-        final CountDownLatch latch = new CountDownLatch(1);
-        final List<Memtable> memtables;
+        final CountDownLatch latch = newCountDownLatch(1);
+        final Memtable mainMemtable;
         volatile Throwable flushFailure = null;
 
-        private PostFlush(List<Memtable> memtables)
+        private PostFlush(Memtable mainMemtable)
         {
-            this.memtables = memtables;
+            this.mainMemtable = mainMemtable;
         }
 
         public CommitLogPosition call()
@@ -970,22 +1098,21 @@
             }
             catch (InterruptedException e)
             {
-                throw new IllegalStateException();
+                throw new UncheckedInterruptedException(e);
             }
 
-            CommitLogPosition commitLogUpperBound = CommitLogPosition.NONE;
+            CommitLogPosition commitLogUpperBound = NONE;
             // If a flush errored out but the error was ignored, make sure we don't discard the commit log.
-            if (flushFailure == null && !memtables.isEmpty())
+            if (flushFailure == null && mainMemtable != null)
             {
-                Memtable memtable = memtables.get(0);
-                commitLogUpperBound = memtable.getCommitLogUpperBound();
-                CommitLog.instance.discardCompletedSegments(metadata.id, memtable.getCommitLogLowerBound(), commitLogUpperBound);
+                commitLogUpperBound = mainMemtable.getFinalCommitLogUpperBound();
+                CommitLog.instance.discardCompletedSegments(metadata.id, mainMemtable.getCommitLogLowerBound(), commitLogUpperBound);
             }
 
             metric.pendingFlushes.dec();
 
             if (flushFailure != null)
-                throw Throwables.propagate(flushFailure);
+                throw propagate(flushFailure);
 
             return commitLogUpperBound;
         }
@@ -1002,8 +1129,8 @@
     private final class Flush implements Runnable
     {
         final OpOrder.Barrier writeBarrier;
-        final List<Memtable> memtables = new ArrayList<>();
-        final ListenableFutureTask<CommitLogPosition> postFlushTask;
+        final Map<ColumnFamilyStore, Memtable> memtables;
+        final FutureTask<CommitLogPosition> postFlushTask;
         final PostFlush postFlush;
         final boolean truncate;
 
@@ -1026,6 +1153,8 @@
              */
             writeBarrier = Keyspace.writeOrder.newBarrier();
 
+            memtables = new LinkedHashMap<>();
+
             // submit flushes for the memtable for any indexed sub-cfses, and our own
             AtomicReference<CommitLogPosition> commitLogUpperBound = new AtomicReference<>();
             for (ColumnFamilyStore cfs : concatWithIndexes())
@@ -1033,10 +1162,10 @@
                 // switch all memtables, regardless of their dirty status, setting the barrier
                 // so that we can reach a coordinated decision about cleanliness once they
                 // are no longer possible to be modified
-                Memtable newMemtable = new Memtable(commitLogUpperBound, cfs);
+                Memtable newMemtable = cfs.createMemtable(commitLogUpperBound);
                 Memtable oldMemtable = cfs.data.switchMemtable(truncate, newMemtable);
-                oldMemtable.setDiscarding(writeBarrier, commitLogUpperBound);
-                memtables.add(oldMemtable);
+                oldMemtable.switchOut(writeBarrier, commitLogUpperBound);
+                memtables.put(cfs, oldMemtable);
             }
 
             // we then ensure an atomic decision is made about the upper bound of the continuous range of commit log
@@ -1047,8 +1176,8 @@
             // since this happens after wiring up the commitLogUpperBound, we also know all operations with earlier
             // commit log segment position have also completed, i.e. the memtables are done and ready to flush
             writeBarrier.issue();
-            postFlush = new PostFlush(memtables);
-            postFlushTask = ListenableFutureTask.create(postFlush);
+            postFlush = new PostFlush(Iterables.get(memtables.values(), 0, null));
+            postFlushTask = new FutureTask<>(postFlush);
         }
 
         public void run()
@@ -1056,7 +1185,7 @@
             if (logger.isTraceEnabled())
                 logger.trace("Flush task {}@{} starts executing, waiting on barrier", hashCode(), name);
 
-            long start = System.nanoTime();
+            long start = nanoTime();
 
             // mark writes older than the barrier as blocking progress, permitting them to exceed our memory limit
             // if they are stuck waiting on it, then wait for them all to complete
@@ -1064,20 +1193,23 @@
             writeBarrier.await();
 
             if (logger.isTraceEnabled())
-                logger.trace("Flush task for task {}@{} waited {} ms at the barrier", hashCode(), name, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+                logger.trace("Flush task for task {}@{} waited {} ms at the barrier", hashCode(), name, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
 
             // mark all memtables as flushing, removing them from the live memtable list
-            for (Memtable memtable : memtables)
-                memtable.cfs.data.markFlushing(memtable);
+            for (Map.Entry<ColumnFamilyStore, Memtable> entry : memtables.entrySet())
+                entry.getKey().data.markFlushing(entry.getValue());
 
             metric.memtableSwitchCount.inc();
 
             try
             {
+                boolean first = true;
                 // Flush "data" memtable with non-cf 2i first;
-                flushMemtable(memtables.get(0), true);
-                for (int i = 1; i < memtables.size(); i++)
-                    flushMemtable(memtables.get(i), false);
+                for (Map.Entry<ColumnFamilyStore, Memtable> entry : memtables.entrySet())
+                {
+                    flushMemtable(entry.getKey(), entry.getValue(), first);
+                    first = false;
+                }
             }
             catch (Throwable t)
             {
@@ -1089,20 +1221,20 @@
                 logger.trace("Flush task {}@{} signaling post flush task", hashCode(), name);
 
             // signal the post-flush we've done our work
-            postFlush.latch.countDown();
+            postFlush.latch.decrement();
 
             if (logger.isTraceEnabled())
                 logger.trace("Flush task task {}@{} finished", hashCode(), name);
         }
 
-        public Collection<SSTableReader> flushMemtable(Memtable memtable, boolean flushNonCf2i)
+        public Collection<SSTableReader> flushMemtable(ColumnFamilyStore cfs, Memtable memtable, boolean flushNonCf2i)
         {
             if (logger.isTraceEnabled())
                 logger.trace("Flush task task {}@{} flushing memtable {}", hashCode(), name, memtable);
 
             if (memtable.isClean() || truncate)
             {
-                memtable.cfs.replaceFlushed(memtable, Collections.emptyList());
+                cfs.replaceFlushed(memtable, Collections.emptyList());
                 reclaim(memtable);
                 return Collections.emptyList();
             }
@@ -1114,14 +1246,14 @@
             List<SSTableReader> sstables = new ArrayList<>();
             try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.FLUSH))
             {
-                List<Memtable.FlushRunnable> flushRunnables = null;
+                List<Flushing.FlushRunnable> flushRunnables = null;
                 List<SSTableMultiWriter> flushResults = null;
 
                 try
                 {
                     // flush the memtable
-                    flushRunnables = memtable.flushRunnables(txn);
-                    ExecutorService[] executors = perDiskflushExecutors.getExecutorsFor(keyspace.getName(), name);
+                    flushRunnables = Flushing.flushRunnables(cfs, memtable, txn);
+                    ExecutorPlus[] executors = perDiskflushExecutors.getExecutorsFor(keyspace.getName(), name);
 
                     for (int i = 0; i < flushRunnables.size(); i++)
                         futures.add(executors[i].submit(flushRunnables.get(i)));
@@ -1133,13 +1265,13 @@
                      * with CL as we do with memtables/CFS-backed SecondaryIndexes.
                      */
                     if (flushNonCf2i)
-                        indexManager.flushAllNonCFSBackedIndexesBlocking();
+                        indexManager.flushAllNonCFSBackedIndexesBlocking(memtable);
 
                     flushResults = Lists.newArrayList(FBUtilities.waitOnFutures(futures));
                 }
                 catch (Throwable t)
                 {
-                    t = memtable.abortRunnables(flushRunnables, t);
+                    t = Flushing.abortRunnables(flushRunnables, t);
                     t = txn.abort(t);
                     throw Throwables.propagate(t);
                 }
@@ -1194,9 +1326,9 @@
                     }
                 }
             }
-            memtable.cfs.replaceFlushed(memtable, sstables);
+            cfs.replaceFlushed(memtable, sstables);
             reclaim(memtable);
-            memtable.cfs.compactionStrategyManager.compactionLogger.flush(sstables);
+            cfs.compactionStrategyManager.compactionLogger.flush(sstables);
             logger.debug("Flushed to {} ({} sstables, {}), biggest {}, smallest {}",
                          sstables,
                          sstables.size(),
@@ -1216,10 +1348,21 @@
                 public void runMayThrow()
                 {
                     readBarrier.await();
-                    memtable.setDiscarded();
+                    memtable.discard();
                 }
             }, reclaimExecutor);
         }
+
+        @Override
+        public String toString()
+        {
+            return "Flush " + keyspace + '.' + name;
+        }
+    }
+
+    public Memtable createMemtable(AtomicReference<CommitLogPosition> commitLogUpperBound)
+    {
+        return memtableFactory.create(commitLogUpperBound, metadata, this);
     }
 
     // atomically set the upper bound for the commit log
@@ -1239,86 +1382,29 @@
         }
     }
 
-    /**
-     * Finds the largest memtable, as a percentage of *either* on- or off-heap memory limits, and immediately
-     * queues it for flushing. If the memtable selected is flushed before this completes, no work is done.
-     */
-    public static CompletableFuture<Boolean> flushLargestMemtable()
+    @Override
+    public Future<CommitLogPosition> signalFlushRequired(Memtable memtable, FlushReason reason)
     {
-        float largestRatio = 0f;
-        Memtable largest = null;
-        float liveOnHeap = 0, liveOffHeap = 0;
-        for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
-        {
-            // we take a reference to the current main memtable for the CF prior to snapping its ownership ratios
-            // to ensure we have some ordering guarantee for performing the switchMemtableIf(), i.e. we will only
-            // swap if the memtables we are measuring here haven't already been swapped by the time we try to swap them
-            Memtable current = cfs.getTracker().getView().getCurrentMemtable();
-
-            // find the total ownership ratio for the memtable and all SecondaryIndexes owned by this CF,
-            // both on- and off-heap, and select the largest of the two ratios to weight this CF
-            float onHeap = 0f, offHeap = 0f;
-            onHeap += current.getAllocator().onHeap().ownershipRatio();
-            offHeap += current.getAllocator().offHeap().ownershipRatio();
-
-            for (ColumnFamilyStore indexCfs : cfs.indexManager.getAllIndexColumnFamilyStores())
-            {
-                MemtableAllocator allocator = indexCfs.getTracker().getView().getCurrentMemtable().getAllocator();
-                onHeap += allocator.onHeap().ownershipRatio();
-                offHeap += allocator.offHeap().ownershipRatio();
-            }
-
-            float ratio = Math.max(onHeap, offHeap);
-            if (ratio > largestRatio)
-            {
-                largest = current;
-                largestRatio = ratio;
-            }
-
-            liveOnHeap += onHeap;
-            liveOffHeap += offHeap;
-        }
-
-        CompletableFuture<Boolean> returnFuture = new CompletableFuture<>();
-
-        if (largest != null)
-        {
-            float usedOnHeap = Memtable.MEMORY_POOL.onHeap.usedRatio();
-            float usedOffHeap = Memtable.MEMORY_POOL.offHeap.usedRatio();
-            float flushingOnHeap = Memtable.MEMORY_POOL.onHeap.reclaimingRatio();
-            float flushingOffHeap = Memtable.MEMORY_POOL.offHeap.reclaimingRatio();
-            float thisOnHeap = largest.getAllocator().onHeap().ownershipRatio();
-            float thisOffHeap = largest.getAllocator().offHeap().ownershipRatio();
-            logger.debug("Flushing largest {} to free up room. Used total: {}, live: {}, flushing: {}, this: {}",
-                         largest.cfs, ratio(usedOnHeap, usedOffHeap), ratio(liveOnHeap, liveOffHeap),
-                         ratio(flushingOnHeap, flushingOffHeap), ratio(thisOnHeap, thisOffHeap));
-
-            ListenableFuture<CommitLogPosition> flushFuture = largest.cfs.switchMemtableIfCurrent(largest);
-            flushFuture.addListener(() -> {
-                try
-                {
-                    flushFuture.get();
-                    returnFuture.complete(true);
-                }
-                catch (Throwable t)
-                {
-                    returnFuture.completeExceptionally(t);
-                }
-            }, MoreExecutors.directExecutor());
-        }
-        else
-        {
-            logger.debug("Flushing of largest memtable, not done, no memtable found");
-
-            returnFuture.complete(false);
-        }
-
-        return returnFuture;
+        return switchMemtableIfCurrent(memtable, reason);
     }
 
-    private static String ratio(float onHeap, float offHeap)
+    @Override
+    public Memtable getCurrentMemtable()
     {
-        return String.format("%.2f/%.2f", onHeap, offHeap);
+        return data.getView().getCurrentMemtable();
+    }
+
+    public static Iterable<Memtable> activeMemtables()
+    {
+        return Iterables.transform(ColumnFamilyStore.all(),
+                                   cfs -> cfs.getTracker().getView().getCurrentMemtable());
+    }
+
+    @Override
+    public Iterable<Memtable> getIndexMemtables()
+    {
+        return Iterables.transform(indexManager.getAllIndexColumnFamilyStores(),
+                                   cfs -> cfs.getTracker().getView().getCurrentMemtable());
     }
 
     /**
@@ -1331,7 +1417,7 @@
     public void apply(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup, CommitLogPosition commitLogPosition)
 
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             Memtable mt = data.getMemtableFor(opGroup, commitLogPosition);
@@ -1342,7 +1428,7 @@
             if (metric.topWritePartitionSize.isEnabled()) // dont compute datasize if not needed
                 metric.topWritePartitionSize.addSample(key.getKey(), update.dataSize());
             StorageHook.instance.reportWrite(metadata.id, update);
-            metric.writeLatency.addNano(System.nanoTime() - start);
+            metric.writeLatency.addNano(nanoTime() - start);
             // CASSANDRA-11117 - certain resolution paths on memtable put can result in very
             // large time deltas, either through a variety of sentinel timestamps (used for empty values, ensuring
             // a minimal write, etc). This limits the time delta to the max value the histogram
@@ -1359,6 +1445,44 @@
         }
     }
 
+    @Override
+    public ShardBoundaries localRangeSplits(int shardCount)
+    {
+        if (shardCount == 1 || !getPartitioner().splitter().isPresent() || SchemaConstants.isLocalSystemKeyspace(keyspace.getName()))
+            return ShardBoundaries.NONE;
+
+        ShardBoundaries shardBoundaries = cachedShardBoundaries;
+        if (shardBoundaries == null ||
+            shardBoundaries.shardCount() != shardCount ||
+            shardBoundaries.ringVersion != StorageService.instance.getTokenMetadata().getRingVersion())
+        {
+            DiskBoundaryManager.VersionedRangesAtEndpoint versionedLocalRanges = DiskBoundaryManager.getVersionedLocalRanges(this);
+            Set<Range<Token>> localRanges = versionedLocalRanges.rangesAtEndpoint.ranges();
+            List<Splitter.WeightedRange> weightedRanges;
+            if (localRanges.isEmpty())
+                weightedRanges = ImmutableList.of(new Splitter.WeightedRange(1.0, new Range<>(getPartitioner().getMinimumToken(), getPartitioner().getMaximumToken())));
+            else
+            {
+                weightedRanges = new ArrayList<>(localRanges.size());
+                for (Range<Token> r : localRanges)
+                {
+                    // WeightedRange supports only unwrapped ranges as it relies
+                    // on right - left == num tokens equality
+                    for (Range<Token> u: r.unwrap())
+                        weightedRanges.add(new Splitter.WeightedRange(1.0, u));
+                }
+                weightedRanges.sort(Comparator.comparing(Splitter.WeightedRange::left));
+            }
+
+            List<Token> boundaries = getPartitioner().splitter().get().splitOwnedRanges(shardCount, weightedRanges, false);
+            shardBoundaries = new ShardBoundaries(boundaries.subList(0, boundaries.size() - 1),
+                                                  versionedLocalRanges.ringVersion);
+            cachedShardBoundaries = shardBoundaries;
+            logger.debug("Memtable shard boundaries for {}.{}: {}", keyspace.getName(), getTableName(), boundaries);
+        }
+        return shardBoundaries;
+    }
+
     /**
      * @param sstables
      * @return sstables whose key range overlaps with that of the given sstables, not including itself.
@@ -1525,7 +1649,11 @@
     {
         // skip snapshot creation during scrub, SEE JIRA 5891
         if(!disableSnapshot)
-            snapshotWithoutFlush("pre-scrub-" + System.currentTimeMillis());
+        {
+            Instant creationTime = now();
+            String snapshotName = "pre-scrub-" + creationTime.toEpochMilli();
+            snapshotWithoutMemtable(snapshotName, creationTime);
+        }
 
         try
         {
@@ -1569,9 +1697,20 @@
         return CompactionManager.instance.performVerify(ColumnFamilyStore.this, options);
     }
 
-    public CompactionManager.AllSSTableOpStatus sstablesRewrite(boolean excludeCurrentVersion, int jobs) throws ExecutionException, InterruptedException
+    /**
+     * Rewrites all SSTables according to specified parameters
+     *
+     * @param skipIfCurrentVersion - if {@link true}, will rewrite only SSTables that have version older than the current one ({@link BigFormat#latestVersion})
+     * @param skipIfNewerThanTimestamp - max timestamp (local creation time) for SSTable; SSTables created _after_ this timestamp will be excluded from compaction
+     * @param skipIfCompressionMatches - if {@link true}, will rewrite only SSTables whose compression parameters are different from {@link CFMetaData#compressionParams()}
+     * @param jobs number of jobs for parallel execution
+     */
+    public CompactionManager.AllSSTableOpStatus sstablesRewrite(final boolean skipIfCurrentVersion,
+                                                                final long skipIfNewerThanTimestamp,
+                                                                final boolean skipIfCompressionMatches,
+                                                                final int jobs) throws ExecutionException, InterruptedException
     {
-        return CompactionManager.instance.performSSTableRewrite(ColumnFamilyStore.this, excludeCurrentVersion, jobs);
+        return CompactionManager.instance.performSSTableRewrite(ColumnFamilyStore.this, skipIfCurrentVersion, skipIfNewerThanTimestamp, skipIfCompressionMatches, jobs);
     }
 
     public CompactionManager.AllSSTableOpStatus relocateSSTables(int jobs) throws ExecutionException, InterruptedException
@@ -1625,12 +1764,12 @@
         return data.getUncompacting();
     }
 
-    public Map<UUID, PendingStat> getPendingRepairStats()
+    public Map<TimeUUID, PendingStat> getPendingRepairStats()
     {
-        Map<UUID, PendingStat.Builder> builders = new HashMap<>();
+        Map<TimeUUID, PendingStat.Builder> builders = new HashMap<>();
         for (SSTableReader sstable : getLiveSSTables())
         {
-            UUID session = sstable.getPendingRepair();
+            TimeUUID session = sstable.getPendingRepair();
             if (session == null)
                 continue;
 
@@ -1640,8 +1779,8 @@
             builders.get(session).addSSTable(sstable);
         }
 
-        Map<UUID, PendingStat> stats = new HashMap<>();
-        for (Map.Entry<UUID, PendingStat.Builder> entry : builders.entrySet())
+        Map<TimeUUID, PendingStat> stats = new HashMap<>();
+        for (Map.Entry<TimeUUID, PendingStat.Builder> entry : builders.entrySet())
         {
             stats.put(entry.getKey(), entry.getValue().build());
         }
@@ -1654,12 +1793,12 @@
      *
      * @return session ids whose data could not be released
      */
-    public CleanupSummary releaseRepairData(Collection<UUID> sessions, boolean force)
+    public CleanupSummary releaseRepairData(Collection<TimeUUID> sessions, boolean force)
     {
         if (force)
         {
             Predicate<SSTableReader> predicate = sst -> {
-                UUID session = sst.getPendingRepair();
+                TimeUUID session = sst.getPendingRepair();
                 return session != null && sessions.contains(session);
             };
             return runWithCompactionsDisabled(() -> compactionStrategyManager.releaseRepairData(sessions),
@@ -1700,6 +1839,31 @@
                || filter.isFullyCoveredBy(cached);
     }
 
+    public PaxosRepairHistory getPaxosRepairHistory()
+    {
+        return paxosRepairHistory.get().getHistory();
+    }
+
+    public PaxosRepairHistory getPaxosRepairHistoryForRanges(Collection<Range<Token>> ranges)
+    {
+        return paxosRepairHistory.get().getHistoryForRanges(ranges);
+    }
+
+    public void syncPaxosRepairHistory(PaxosRepairHistory sync, boolean flush)
+    {
+        paxosRepairHistory.get().merge(sync, flush);
+    }
+
+    public void onPaxosRepairComplete(Collection<Range<Token>> ranges, Ballot highBallot)
+    {
+        paxosRepairHistory.get().add(ranges, highBallot, true);
+    }
+
+    public Ballot getPaxosRepairLowBound(DecoratedKey key)
+    {
+        return paxosRepairHistory.get().getBallotForToken(key.getToken());
+    }
+
     public int gcBefore(int nowInSec)
     {
         return nowInSec - metadata().params.gcGraceSeconds;
@@ -1717,9 +1881,9 @@
                 return new RefViewFragment(view.sstables, view.memtables, refs);
             if (failingSince <= 0)
             {
-                failingSince = System.nanoTime();
+                failingSince = nanoTime();
             }
-            else if (System.nanoTime() - failingSince > TimeUnit.MILLISECONDS.toNanos(100))
+            else if (nanoTime() - failingSince > TimeUnit.MILLISECONDS.toNanos(100))
             {
                 List<SSTableReader> released = new ArrayList<>();
                 for (SSTableReader reader : view.sstables)
@@ -1727,7 +1891,7 @@
                         released.add(reader);
                 NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.SECONDS,
                                  "Spinning trying to capture readers {}, released: {}, ", view.sstables, released);
-                failingSince = System.nanoTime();
+                failingSince = nanoTime();
             }
         }
     }
@@ -1827,21 +1991,30 @@
         return metadata().comparator;
     }
 
-    public void snapshotWithoutFlush(String snapshotName)
+    public TableSnapshot snapshotWithoutMemtable(String snapshotName)
     {
-        snapshotWithoutFlush(snapshotName, null, false, null);
+        return snapshotWithoutMemtable(snapshotName, now());
+    }
+
+    public TableSnapshot snapshotWithoutMemtable(String snapshotName, Instant creationTime)
+    {
+        return snapshotWithoutMemtable(snapshotName, null, false, null, null, creationTime);
     }
 
     /**
      * @param ephemeral If this flag is set to true, the snapshot will be cleaned during next startup
      */
-    public Set<SSTableReader> snapshotWithoutFlush(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral, RateLimiter rateLimiter)
+    public TableSnapshot snapshotWithoutMemtable(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral, DurationSpec.IntSecondsBound ttl, RateLimiter rateLimiter, Instant creationTime)
     {
+        if (ephemeral && ttl != null)
+        {
+            throw new IllegalStateException(String.format("can not take ephemeral snapshot (%s) while ttl is specified too", snapshotName));
+        }
+
         if (rateLimiter == null)
             rateLimiter = DatabaseDescriptor.getSnapshotRateLimiter();
 
-        Set<SSTableReader> snapshottedSSTables = new HashSet<>();
-        final JSONArray filesJSONArr = new JSONArray();
+        Set<SSTableReader> snapshottedSSTables = new LinkedHashSet<>();
         for (ColumnFamilyStore cfs : concatWithIndexes())
         {
             try (RefViewFragment currentView = cfs.selectAndReference(View.select(SSTableSet.CANONICAL, (x) -> predicate == null || predicate.apply(x))))
@@ -1849,8 +2022,7 @@
                 for (SSTableReader ssTable : currentView.sstables)
                 {
                     File snapshotDirectory = Directories.getSnapshotDirectory(ssTable.descriptor, snapshotName);
-                    ssTable.createLinks(snapshotDirectory.getPath(), rateLimiter); // hard links
-                    filesJSONArr.add(ssTable.descriptor.relativeFilenameFor(Component.DATA));
+                    ssTable.createLinks(snapshotDirectory.path(), rateLimiter); // hard links
 
                     if (logger.isTraceEnabled())
                         logger.trace("Snapshot for {} keyspace data file {} created in {}", keyspace, ssTable.getFilename(), snapshotDirectory);
@@ -1859,30 +2031,51 @@
             }
         }
 
-        writeSnapshotManifest(filesJSONArr, snapshotName);
-        if (!SchemaConstants.isLocalSystemKeyspace(metadata.keyspace) && !SchemaConstants.isReplicatedSystemKeyspace(metadata.keyspace))
-            writeSnapshotSchema(snapshotName);
-
-        if (ephemeral)
-            createEphemeralSnapshotMarkerFile(snapshotName);
-        return snapshottedSSTables;
+        return createSnapshot(snapshotName, ephemeral, ttl, snapshottedSSTables, creationTime);
     }
 
-    private void writeSnapshotManifest(final JSONArray filesJSONArr, final String snapshotName)
-    {
-        final File manifestFile = getDirectories().getSnapshotManifestFile(snapshotName);
+    protected TableSnapshot createSnapshot(String tag, boolean ephemeral, DurationSpec.IntSecondsBound ttl, Set<SSTableReader> sstables, Instant creationTime) {
+        Set<File> snapshotDirs = sstables.stream()
+                                         .map(s -> Directories.getSnapshotDirectory(s.descriptor, tag).toAbsolute())
+                                         .filter(dir -> !Directories.isSecondaryIndexFolder(dir)) // Remove secondary index subdirectory
+                                         .collect(Collectors.toCollection(HashSet::new));
 
+        // Create and write snapshot manifest
+        SnapshotManifest manifest = new SnapshotManifest(mapToDataFilenames(sstables), ttl, creationTime);
+        File manifestFile = getDirectories().getSnapshotManifestFile(tag);
+        writeSnapshotManifest(manifest, manifestFile);
+        snapshotDirs.add(manifestFile.parent().toAbsolute()); // manifest may create empty snapshot dir
+
+        // Write snapshot schema
+        if (!SchemaConstants.isLocalSystemKeyspace(metadata.keyspace) && !SchemaConstants.isReplicatedSystemKeyspace(metadata.keyspace))
+        {
+            File schemaFile = getDirectories().getSnapshotSchemaFile(tag);
+            writeSnapshotSchema(schemaFile);
+            snapshotDirs.add(schemaFile.parent().toAbsolute()); // schema may create empty snapshot dir
+        }
+
+        // Maybe create ephemeral marker
+        if (ephemeral)
+        {
+            File ephemeralSnapshotMarker = getDirectories().getNewEphemeralSnapshotMarkerFile(tag);
+            createEphemeralSnapshotMarkerFile(tag, ephemeralSnapshotMarker);
+            snapshotDirs.add(ephemeralSnapshotMarker.parent().toAbsolute()); // marker may create empty snapshot dir
+        }
+
+        TableSnapshot snapshot = new TableSnapshot(metadata.keyspace, metadata.name, metadata.id.asUUID(), tag,
+                                                   manifest.createdAt, manifest.expiresAt, snapshotDirs);
+
+        StorageService.instance.addSnapshot(snapshot);
+        return snapshot;
+    }
+
+    private SnapshotManifest writeSnapshotManifest(SnapshotManifest manifest, File manifestFile)
+    {
         try
         {
-            if (!manifestFile.getParentFile().exists())
-                manifestFile.getParentFile().mkdirs();
-
-            try (PrintStream out = new PrintStream(manifestFile))
-            {
-                final JSONObject manifestJSON = new JSONObject();
-                manifestJSON.put("files", filesJSONArr);
-                out.println(manifestJSON.toJSONString());
-            }
+            manifestFile.parent().tryCreateDirectories();
+            manifest.serializeToJsonFile(manifestFile);
+            return manifest;
         }
         catch (IOException e)
         {
@@ -1890,19 +2083,22 @@
         }
     }
 
-    private void writeSnapshotSchema(final String snapshotName)
+    private List<String> mapToDataFilenames(Collection<SSTableReader> sstables)
     {
-        final File schemaFile = getDirectories().getSnapshotSchemaFile(snapshotName);
+        return sstables.stream().map(s -> s.descriptor.relativeFilenameFor(Component.DATA)).collect(Collectors.toList());
+    }
 
+    private void writeSnapshotSchema(File schemaFile)
+    {
         try
         {
-            if (!schemaFile.getParentFile().exists())
-                schemaFile.getParentFile().mkdirs();
+            if (!schemaFile.parent().exists())
+                schemaFile.parent().tryCreateDirectories();
 
-            try (PrintStream out = new PrintStream(schemaFile))
+            try (PrintStream out = new PrintStream(new FileOutputStreamPlus(schemaFile)))
             {
                 SchemaCQLHelper.reCreateStatementsForSchemaCql(metadata(),
-                                                               keyspace.getMetadata().types)
+                                                               keyspace.getMetadata())
                                .forEach(out::println);
             }
         }
@@ -1912,25 +2108,23 @@
         }
     }
 
-    private void createEphemeralSnapshotMarkerFile(final String snapshot)
+    private void createEphemeralSnapshotMarkerFile(final String snapshot, File ephemeralSnapshotMarker)
     {
-        final File ephemeralSnapshotMarker = getDirectories().getNewEphemeralSnapshotMarkerFile(snapshot);
-
         try
         {
-            if (!ephemeralSnapshotMarker.getParentFile().exists())
-                ephemeralSnapshotMarker.getParentFile().mkdirs();
+            if (!ephemeralSnapshotMarker.parent().exists())
+                ephemeralSnapshotMarker.parent().tryCreateDirectories();
 
             Files.createFile(ephemeralSnapshotMarker.toPath());
             if (logger.isTraceEnabled())
-                logger.trace("Created ephemeral snapshot marker file on {}.", ephemeralSnapshotMarker.getAbsolutePath());
+                logger.trace("Created ephemeral snapshot marker file on {}.", ephemeralSnapshotMarker.absolutePath());
         }
         catch (IOException e)
         {
             logger.warn(String.format("Could not create marker file %s for ephemeral snapshot %s. " +
                                       "In case there is a failure in the operation that created " +
                                       "this snapshot, you may need to clean it manually afterwards.",
-                                      ephemeralSnapshotMarker.getAbsolutePath(), snapshot), e);
+                                      ephemeralSnapshotMarker.absolutePath(), snapshot), e);
         }
     }
 
@@ -1947,9 +2141,9 @@
 
     public Refs<SSTableReader> getSnapshotSSTableReaders(String tag) throws IOException
     {
-        Map<Integer, SSTableReader> active = new HashMap<>();
+        Map<SSTableId, SSTableReader> active = new HashMap<>();
         for (SSTableReader sstable : getSSTables(SSTableSet.CANONICAL))
-            active.put(sstable.descriptor.generation, sstable);
+            active.put(sstable.descriptor.id, sstable);
         Map<Descriptor, Set<Component>> snapshots = getDirectories().sstableLister(Directories.OnTxnErr.IGNORE).snapshots(tag).list();
         Refs<SSTableReader> refs = new Refs<>();
         try
@@ -1958,7 +2152,7 @@
             {
                 // Try acquire reference to an active sstable instead of snapshot if it exists,
                 // to avoid opening new sstables. If it fails, use the snapshot reference instead.
-                SSTableReader sstable = active.get(entries.getKey().generation);
+                SSTableReader sstable = active.get(entries.getKey().id);
                 if (sstable == null || !refs.tryRef(sstable))
                 {
                     if (logger.isTraceEnabled())
@@ -1990,45 +2184,61 @@
      *
      * @param snapshotName the name of the associated with the snapshot
      */
-    public Set<SSTableReader> snapshot(String snapshotName)
+    public TableSnapshot snapshot(String snapshotName)
     {
-        return snapshot(snapshotName, false, null);
+        return snapshot(snapshotName, null);
+    }
+
+    public TableSnapshot snapshot(String snapshotName, DurationSpec.IntSecondsBound ttl)
+    {
+        return snapshot(snapshotName, false, ttl, null, now());
     }
 
     /**
      * Take a snap shot of this columnfamily store.
      *
      * @param snapshotName the name of the associated with the snapshot
-     * @param skipFlush Skip blocking flush of memtable
+     * @param skipMemtable Skip flushing the memtable
+     * @param ttl duration after which the taken snapshot is removed automatically, if supplied with null, it will never be automatically removed
      * @param rateLimiter Rate limiter for hardlinks-per-second
+     * @param creationTime time when this snapshot was taken
      */
-    public Set<SSTableReader> snapshot(String snapshotName, boolean skipFlush, RateLimiter rateLimiter)
+    public TableSnapshot snapshot(String snapshotName, boolean skipMemtable, DurationSpec.IntSecondsBound ttl, RateLimiter rateLimiter, Instant creationTime)
     {
-        return snapshot(snapshotName, null, false, skipFlush, rateLimiter);
+        return snapshot(snapshotName, null, false, skipMemtable, ttl, rateLimiter, creationTime);
     }
 
 
     /**
      * @param ephemeral If this flag is set to true, the snapshot will be cleaned up during next startup
-     * @param skipFlush Skip blocking flush of memtable
+     * @param skipMemtable Skip flushing the memtable
      */
-    public Set<SSTableReader> snapshot(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral, boolean skipFlush)
+    public TableSnapshot snapshot(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral, boolean skipMemtable)
     {
-        return snapshot(snapshotName, predicate, ephemeral, skipFlush, null);
+        return snapshot(snapshotName, predicate, ephemeral, skipMemtable, null, null, now());
     }
 
     /**
      * @param ephemeral If this flag is set to true, the snapshot will be cleaned up during next startup
-     * @param skipFlush Skip blocking flush of memtable
+     * @param skipMemtable Skip flushing the memtable
+     * @param ttl duration after which the taken snapshot is removed automatically, if supplied with null, it will never be automatically removed
      * @param rateLimiter Rate limiter for hardlinks-per-second
+     * @param creationTime time when this snapshot was taken
      */
-    public Set<SSTableReader> snapshot(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral, boolean skipFlush, RateLimiter rateLimiter)
+    public TableSnapshot snapshot(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral, boolean skipMemtable, DurationSpec.IntSecondsBound ttl, RateLimiter rateLimiter, Instant creationTime)
     {
-        if (!skipFlush)
+        if (!skipMemtable)
         {
-            forceBlockingFlush();
+            Memtable current = getTracker().getView().getCurrentMemtable();
+            if (!current.isClean())
+            {
+                if (current.shouldSwitch(FlushReason.SNAPSHOT))
+                    FBUtilities.waitOnFuture(switchMemtableIfCurrent(current, FlushReason.SNAPSHOT));
+                else
+                    current.performSnapshot(snapshotName);
+            }
         }
-        return snapshotWithoutFlush(snapshotName, predicate, ephemeral, rateLimiter);
+        return snapshotWithoutMemtable(snapshotName, predicate, ephemeral, ttl, rateLimiter, creationTime);
     }
 
     public boolean snapshotExists(String snapshotName)
@@ -2036,10 +2246,6 @@
         return getDirectories().snapshotExists(snapshotName);
     }
 
-    public long getSnapshotCreationTime(String snapshotName)
-    {
-        return getDirectories().snapshotCreationTime(snapshotName);
-    }
 
     /**
      * Clear all the snapshots for a given column family.
@@ -2059,9 +2265,9 @@
      * @return  Return a map of all snapshots to space being used
      * The pair for a snapshot has true size and size on disk.
      */
-    public Map<String, Directories.SnapshotSizeDetails> getSnapshotDetails()
+    public Map<String, TableSnapshot> listSnapshots()
     {
-        return getDirectories().getSnapshotDetails();
+        return getDirectories().listSnapshots();
     }
 
     /**
@@ -2163,15 +2369,46 @@
     }
 
     public void forceMajorCompaction(boolean splitOutput)
-   {
+    {
         CompactionManager.instance.performMaximal(this, splitOutput);
     }
 
+    @Override
     public void forceCompactionForTokenRange(Collection<Range<Token>> tokenRanges) throws ExecutionException, InterruptedException
     {
         CompactionManager.instance.forceCompactionForTokenRange(this, tokenRanges);
     }
 
+    @Override
+    public void forceCompactionForTokenRanges(String... strings)
+    {
+        CompactionManager.instance.forceCompactionForTokenRange(this, toTokenRanges(DatabaseDescriptor.getPartitioner(), strings));
+    }
+
+    static Set<Range<Token>> toTokenRanges(IPartitioner partitioner, String... strings)
+    {
+        Token.TokenFactory tokenFactory = partitioner.getTokenFactory();
+        Set<Range<Token>> tokenRanges = new HashSet<>();
+        for (String str : strings)
+        {
+            String[] splits = str.split(TOKEN_DELIMITER);
+            assert splits.length == 2 : String.format("Unable to parse token range %s; needs to have two tokens separated by %s", str, TOKEN_DELIMITER);
+            String lhsStr = splits[0];
+            assert !Strings.isNullOrEmpty(lhsStr) : String.format("Unable to parse token range %s; left hand side of the token separater is empty", str);
+            String rhsStr = splits[1];
+            assert !Strings.isNullOrEmpty(rhsStr) : String.format("Unable to parse token range %s; right hand side of the token separater is empty", str);
+            Token lhs = tokenFactory.fromString(lhsStr);
+            Token rhs = tokenFactory.fromString(rhsStr);
+            tokenRanges.add(new Range<>(lhs, rhs));
+        }
+        return tokenRanges;
+    }
+
+    public void forceCompactionForKey(DecoratedKey key)
+    {
+        CompactionManager.instance.forceCompactionForKey(this, key);
+    }
+
     public static Iterable<ColumnFamilyStore> all()
     {
         List<Iterable<ColumnFamilyStore>> stores = new ArrayList<>(Schema.instance.getKeyspaces().size());
@@ -2207,6 +2444,108 @@
         }
     }
 
+    public void writeAndAddMemtableRanges(TimeUUID repairSessionID,
+                                          Supplier<Collection<Range<PartitionPosition>>> rangesSupplier,
+                                          Refs<SSTableReader> placeIntoRefs)
+    {
+        @SuppressWarnings("resource") // closed by finish or on exception
+        SSTableMultiWriter memtableContent = writeMemtableRanges(rangesSupplier, repairSessionID);
+        if (memtableContent != null)
+        {
+            try
+            {
+                Collection<SSTableReader> sstables = memtableContent.finish(true);
+                try (Refs sstableReferences = Refs.ref(sstables))
+                {
+                    // This moves all references to placeIntoRefs, clearing sstableReferences
+                    placeIntoRefs.addAll(sstableReferences);
+                }
+
+                // Release the reference any written sstables start with.
+                for (SSTableReader rdr : sstables)
+                {
+                    rdr.selfRef().release();
+                    logger.info("Memtable ranges (keys {} size {}) written in {}",
+                                rdr.estimatedKeys(),
+                                rdr.getDataChannel().size(),
+                                rdr);
+                }
+            }
+            catch (Throwable t)
+            {
+                memtableContent.close();
+                Throwables.propagate(t);
+            }
+        }
+    }
+
+    private SSTableMultiWriter writeMemtableRanges(Supplier<Collection<Range<PartitionPosition>>> rangesSupplier,
+                                                   TimeUUID repairSessionID)
+    {
+        if (!streamFromMemtable())
+            return null;
+
+        Collection<Range<PartitionPosition>> ranges = rangesSupplier.get();
+        Memtable current = getTracker().getView().getCurrentMemtable();
+        if (current.isClean())
+            return null;
+
+        List<Memtable.FlushablePartitionSet<?>> dataSets = new ArrayList<>(ranges.size());
+        long keys = 0;
+        for (Range<PartitionPosition> range : ranges)
+        {
+            Memtable.FlushablePartitionSet<?> dataSet = current.getFlushSet(range.left, range.right);
+            dataSets.add(dataSet);
+            keys += dataSet.partitionCount();
+        }
+        if (keys == 0)
+            return null;
+
+        // TODO: Can we write directly to stream, skipping disk?
+        Memtable.FlushablePartitionSet<?> firstDataSet = dataSets.get(0);
+        SSTableMultiWriter writer = createSSTableMultiWriter(newSSTableDescriptor(directories.getDirectoryForNewSSTables()),
+                                                             keys,
+                                                             0,
+                                                             repairSessionID,
+                                                             false,
+                                                             0,
+                                                             new SerializationHeader(true,
+                                                                                     firstDataSet.metadata(),
+                                                                                     firstDataSet.columns(),
+                                                                                     firstDataSet.encodingStats()),
+                                                             DO_NOT_TRACK);
+        try
+        {
+            for (Memtable.FlushablePartitionSet<?> dataSet : dataSets)
+                new Flushing.FlushRunnable(dataSet, writer, metric, false).call();  // executes on this thread
+
+            return writer;
+        }
+        catch (Error | RuntimeException t)
+        {
+            writer.abort(t);
+            throw t;
+        }
+    }
+
+    private static final LifecycleNewTracker DO_NOT_TRACK = new LifecycleNewTracker()
+    {
+        public void trackNew(SSTable table)
+        {
+            // not tracking
+        }
+
+        public void untrackNew(SSTable table)
+        {
+            // not tracking
+        }
+
+        public OperationType opType()
+        {
+            return OperationType.FLUSH;
+        }
+    };
+
     /**
      * For testing.  No effort is made to clear historical or even the current memtables, nor for
      * thread safety.  All we do is wipe the sstable containers clean, while leaving the actual
@@ -2218,7 +2557,7 @@
         for (final ColumnFamilyStore cfs : concatWithIndexes())
         {
             cfs.runWithCompactionsDisabled((Callable<Void>) () -> {
-                cfs.data.reset(new Memtable(new AtomicReference<>(CommitLogPosition.NONE), cfs));
+                cfs.data.reset(memtableFactory.create(new AtomicReference<>(CommitLogPosition.NONE), cfs.metadata, cfs));
                 return null;
             }, true, false);
         }
@@ -2259,26 +2598,23 @@
         final long truncatedAt;
         final CommitLogPosition replayAfter;
 
-        if (!noSnapshot && (keyspace.getMetadata().params.durableWrites || DatabaseDescriptor.isAutoSnapshot()))
+        if (!noSnapshot &&
+               ((keyspace.getMetadata().params.durableWrites && !memtableWritesAreDurable())  // need to clear dirty regions
+               || DatabaseDescriptor.isAutoSnapshot())) // need sstable for snapshot
         {
-            replayAfter = forceBlockingFlush();
-            viewManager.forceBlockingFlush();
+            replayAfter = forceBlockingFlush(FlushReason.TRUNCATE);
+            viewManager.forceBlockingFlush(FlushReason.TRUNCATE);
         }
         else
         {
             // just nuke the memtable data w/o writing to disk first
+            // note: this does not wait for the switch to complete, but because the post-flush processing is serial,
+            // the call below does.
             viewManager.dumpMemtables();
-            try
-            {
-                replayAfter = dumpMemtable().get();
-            }
-            catch (Exception e)
-            {
-                throw new RuntimeException(e);
-            }
+            replayAfter = FBUtilities.waitOnFuture(dumpMemtable());
         }
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         // make sure none of our sstables are somehow in the future (clock drift, perhaps)
         for (ColumnFamilyStore cfs : concatWithIndexes())
             for (SSTableReader sstable : cfs.getLiveSSTables())
@@ -2298,7 +2634,7 @@
                 data.notifyTruncated(truncatedAt);
 
             if (!noSnapshot && DatabaseDescriptor.isAutoSnapshot())
-                snapshot(Keyspace.getTimestampedSnapshotNameWithPrefix(name, SNAPSHOT_TRUNCATE_PREFIX));
+                snapshot(Keyspace.getTimestampedSnapshotNameWithPrefix(name, SNAPSHOT_TRUNCATE_PREFIX), DatabaseDescriptor.getAutoSnapshotTtl());
 
             discardSSTables(truncatedAt);
 
@@ -2312,7 +2648,7 @@
             }
         };
 
-        runWithCompactionsDisabled(Executors.callable(truncateRunnable), true, true);
+        runWithCompactionsDisabled(FutureTask.callable(truncateRunnable), true, true);
 
         viewManager.build();
 
@@ -2320,7 +2656,9 @@
     }
 
     /**
-     * Drops current memtable without flushing to disk. This should only be called when truncating a column family which is not durable.
+     * Drops current memtable without flushing to disk. This should only be called when truncating a column family
+     * that cannot have dirty intervals in the commit log (i.e. one which is not durable, or where the memtable itself
+     * performs durable writes).
      */
     public Future<CommitLogPosition> dumpMemtable()
     {
@@ -2333,6 +2671,14 @@
         }
     }
 
+    public void unloadCf()
+    {
+        if (keyspace.getMetadata().params.durableWrites && !memtableWritesAreDurable())  // need to clear dirty regions
+            forceBlockingFlush(ColumnFamilyStore.FlushReason.DROP);
+        else
+            FBUtilities.waitOnFuture(dumpMemtable());
+    }
+
     public <V> V runWithCompactionsDisabled(Callable<V> callable, boolean interruptValidation, boolean interruptViews)
     {
         return runWithCompactionsDisabled(callable, (sstable) -> true, interruptValidation, interruptViews, true);
@@ -2651,6 +2997,11 @@
         return compactionStrategyManager.getSSTableCountPerLevel();
     }
 
+    public long[] getPerLevelSizeBytes()
+    {
+        return compactionStrategyManager.getPerLevelSizeBytes();
+    }
+
     public int getLevelFanoutSize()
     {
         return compactionStrategyManager.getLevelFanoutSize();
@@ -2750,7 +3101,7 @@
     {
         double allDroppable = 0;
         long allColumns = 0;
-        int localTime = (int)(System.currentTimeMillis()/1000);
+        int localTime = (int)(currentTimeMillis() / 1000);
 
         for (SSTableReader sstable : getSSTables(SSTableSet.LIVE))
         {
@@ -2814,9 +3165,11 @@
         return diskBoundaryManager.getDiskBoundaries(this);
     }
 
-    public void invalidateDiskBoundaries()
+    public void invalidateLocalRanges()
     {
         diskBoundaryManager.invalidate();
+
+        switchMemtableOrNotify(FlushReason.OWNED_RANGES_CHANGE, Memtable::localRangesUpdated);
     }
 
     @Override
@@ -2836,6 +3189,24 @@
         return neverPurgeTombstones;
     }
 
+    void onTableDropped()
+    {
+        indexManager.markAllIndexesRemoved();
+
+        CompactionManager.instance.interruptCompactionForCFs(concatWithIndexes(), (sstable) -> true, true);
+
+        if (DatabaseDescriptor.isAutoSnapshot())
+            snapshot(Keyspace.getTimestampedSnapshotNameWithPrefix(name, ColumnFamilyStore.SNAPSHOT_DROP_PREFIX), DatabaseDescriptor.getAutoSnapshotTtl());
+
+        CommitLog.instance.forceRecycleAllSegments(Collections.singleton(metadata.id));
+
+        compactionStrategyManager.shutdown();
+
+        // wait for any outstanding reads/writes that might affect the CFS
+        Keyspace.writeOrder.awaitNewBarrier();
+        readOrdering.awaitNewBarrier();
+    }
+
     /**
      * The thread pools used to flush memtables.
      *
@@ -2849,12 +3220,12 @@
         /**
          * The flush executors for non local system keyspaces.
          */
-        private final ExecutorService[] nonLocalSystemflushExecutors;
+        private final ExecutorPlus[] nonLocalSystemflushExecutors;
 
         /**
          * The flush executors for the local system keyspaces.
          */
-        private final ExecutorService[] localSystemDiskFlushExecutors;
+        private final ExecutorPlus[] localSystemDiskFlushExecutors;
 
         /**
          * {@code true} if local system keyspaces are stored in their own directory and use an extra flush executor,
@@ -2866,32 +3237,26 @@
                                      String[] locationsForNonSystemKeyspaces,
                                      boolean useSpecificLocationForSystemKeyspaces)
         {
-            ExecutorService[] flushExecutors = createPerDiskFlushWriters(locationsForNonSystemKeyspaces.length, flushWriters);
+            ExecutorPlus[] flushExecutors = createPerDiskFlushWriters(locationsForNonSystemKeyspaces.length, flushWriters);
             nonLocalSystemflushExecutors = flushExecutors;
             useSpecificExecutorForSystemKeyspaces = useSpecificLocationForSystemKeyspaces;
-            localSystemDiskFlushExecutors = useSpecificLocationForSystemKeyspaces ? new ExecutorService[] {newThreadPool("LocalSystemKeyspacesDiskMemtableFlushWriter", flushWriters)}
-                                                                                  : new ExecutorService[] {flushExecutors[0]};
+            localSystemDiskFlushExecutors = useSpecificLocationForSystemKeyspaces ? new ExecutorPlus[] {newThreadPool("LocalSystemKeyspacesDiskMemtableFlushWriter", flushWriters)}
+                                                                                  : new ExecutorPlus[] {flushExecutors[0]};
         }
 
-        private static ExecutorService[] createPerDiskFlushWriters(int numberOfExecutors, int flushWriters)
+        private static ExecutorPlus[] createPerDiskFlushWriters(int numberOfExecutors, int flushWriters)
         {
-            ExecutorService[] flushExecutors = new ExecutorService[numberOfExecutors];
-
+            ExecutorPlus[] flushExecutors = new ExecutorPlus[numberOfExecutors];
             for (int i = 0; i < numberOfExecutors; i++)
             {
-                flushExecutors[i] = newThreadPool("PerDiskMemtableFlushWriter_" + i, flushWriters);
+                flushExecutors[i] = newThreadPool("PerDiskMemtableFlushWriter_"+i, flushWriters);
             }
             return flushExecutors;
         }
 
-        private static JMXEnabledThreadPoolExecutor newThreadPool(String poolName, int size)
+        private static ExecutorPlus newThreadPool(String poolName, int size)
         {
-            return new JMXEnabledThreadPoolExecutor(size,
-                                                    Stage.KEEP_ALIVE_SECONDS,
-                                                    TimeUnit.SECONDS,
-                                                    new LinkedBlockingQueue<>(),
-                                                    new NamedThreadFactory(poolName),
-                                                    "internal");
+            return executorFactory().withJmxInternal().pooled(poolName, size);
         }
 
         /**
@@ -2901,7 +3266,7 @@
          * @param tableName the table name
          * @return the flush executors that should be used for flushing the memtables of the specified keyspace.
          */
-        public ExecutorService[] getExecutorsFor(String keyspaceName, String tableName)
+        public ExecutorPlus[] getExecutorsFor(String keyspaceName, String tableName)
         {
             return Directories.isStoredInLocalSystemKeyspacesDataLocation(keyspaceName, tableName) ? localSystemDiskFlushExecutors
                                                                                                    : nonLocalSystemflushExecutors;
@@ -2940,4 +3305,36 @@
         }
         return false;
     }
+
+    @Override
+    public Map<String, Long> getTopSizePartitions()
+    {
+        if (topPartitions == null)
+            return Collections.emptyMap();
+        return topPartitions.getTopSizePartitionMap();
+    }
+
+    @Override
+    public Long getTopSizePartitionsLastUpdate()
+    {
+        if (topPartitions == null)
+            return null;
+        return topPartitions.topSizes().lastUpdate;
+    }
+
+    @Override
+    public Map<String, Long> getTopTombstonePartitions()
+    {
+        if (topPartitions == null)
+            return Collections.emptyMap();
+        return topPartitions.getTopTombstonePartitionMap();
+    }
+
+    @Override
+    public Long getTopTombstonePartitionsLastUpdate()
+    {
+        if (topPartitions == null)
+            return null;
+        return topPartitions.topTombstones().lastUpdate;
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java b/src/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java
index 59d1d5d..de7c933 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java
@@ -29,6 +29,7 @@
 
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.utils.BreaksJMX;
 
 /**
  * The MBean interface for ColumnFamilyStore
@@ -58,7 +59,18 @@
      *
      * @param tokenRanges The token ranges to be compacted, interpreted as closed intervals.
      */
+    @BreaksJMX("This API was released in 3.10 using a parameter that takes Range of Token, which can only be done IFF client has Cassandra binaries in the classpath")
+    @Deprecated
     public void forceCompactionForTokenRange(Collection<Range<Token>> tokenRanges) throws ExecutionException, InterruptedException;
+
+    /**
+     * Forces a major compaction of specified token ranges in this column family.
+     * <p>
+     * The token ranges will be interpreted as closed intervals to match the closed interval defined by the first and
+     * last keys of a sstable, even though the {@link Range} class is suppossed to be half-open by definition.
+     */
+    public void forceCompactionForTokenRanges(String... tokenRanges);
+
     /**
      * Gets the minimum number of sstables in queue before compaction kicks off
      */
@@ -208,6 +220,12 @@
     public int[] getSSTableCountPerLevel();
 
     /**
+     * @return total size on disk for each level. null unless leveled compaction is used.
+     *         array index corresponds to level(int[0] is for level 0, ...).
+     */
+    public long[] getPerLevelSizeBytes();
+
+    /**
      * @return sstable fanout size for level compaction strategy.
      */
     public int getLevelFanoutSize();
@@ -256,4 +274,9 @@
     public boolean hasMisplacedSSTables();
 
     public List<String> getDataPaths() throws IOException;
+
+    public Map<String, Long> getTopSizePartitions();
+    public Long getTopSizePartitionsLastUpdate();
+    public Map<String, Long> getTopTombstonePartitions();
+    public Long getTopTombstonePartitionsLastUpdate();
 }
diff --git a/src/java/org/apache/cassandra/db/ColumnIndex.java b/src/java/org/apache/cassandra/db/ColumnIndex.java
index b872300..f7860df 100644
--- a/src/java/org/apache/cassandra/db/ColumnIndex.java
+++ b/src/java/org/apache/cassandra/db/ColumnIndex.java
@@ -29,6 +29,7 @@
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.sstable.IndexInfo;
 import org.apache.cassandra.io.sstable.format.SSTableFlushObserver;
+import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.SequentialWriter;
@@ -36,16 +37,16 @@
 
 /**
  * Column index builder used by {@link org.apache.cassandra.io.sstable.format.big.BigTableWriter}.
- * For index entries that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb},
+ * For index entries that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size},
  * this uses the serialization logic as in {@link RowIndexEntry}.
  */
 public class ColumnIndex
 {
-    // used, if the row-index-entry reaches config column_index_cache_size_in_kb
+    // used, if the row-index-entry reaches config column_index_cache_size
     private DataOutputBuffer buffer;
     // used to track the size of the serialized size of row-index-entry (unused for buffer)
     private int indexSamplesSerializedSize;
-    // used, until the row-index-entry reaches config column_index_cache_size_in_kb
+    // used, until the row-index-entry reaches config column_index_cache_size
     private final List<IndexInfo> indexSamples = new ArrayList<>();
 
     private DataOutputBuffer reusableBuffer;
@@ -115,7 +116,11 @@
         this.headerLength = writer.position() - initialPosition;
 
         while (iterator.hasNext())
-            add(iterator.next());
+        {
+            Unfiltered unfiltered = iterator.next();
+            SSTableWriter.guardCollectionSize(iterator.metadata(), iterator.partitionKey(), unfiltered);
+            add(unfiltered);
+        }
 
         finish();
     }
@@ -199,8 +204,8 @@
         }
         columnIndexCount++;
 
-        // First, we collect the IndexInfo objects until we reach Config.column_index_cache_size_in_kb in an ArrayList.
-        // When column_index_cache_size_in_kb is reached, we switch to byte-buffer mode.
+        // First, we collect the IndexInfo objects until we reach Config.column_index_cache_size in an ArrayList.
+        // When column_index_cache_size is reached, we switch to byte-buffer mode.
         if (buffer == null)
         {
             indexSamplesSerializedSize += idxSerializer.serializedSize(cIndexInfo);
@@ -285,7 +290,7 @@
 
         // If we serialize the IndexInfo objects directly in the code above into 'buffer',
         // we have to write the offsts to these here. The offsets have already been are collected
-        // in indexOffsets[]. buffer is != null, if it exceeds Config.column_index_cache_size_in_kb.
+        // in indexOffsets[]. buffer is != null, if it exceeds Config.column_index_cache_size.
         // In the other case, when buffer==null, the offsets are serialized in RowIndexEntry.IndexedEntry.serialize().
         if (buffer != null)
             RowIndexEntry.Serializer.serializeOffsets(buffer, indexOffsets, columnIndexCount);
diff --git a/src/java/org/apache/cassandra/db/ConsistencyLevel.java b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
index fbaf3fd..843ccb9 100644
--- a/src/java/org/apache/cassandra/db/ConsistencyLevel.java
+++ b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
@@ -18,8 +18,11 @@
 package org.apache.cassandra.db;
 
 
+import java.util.Locale;
+
 import com.carrotsearch.hppc.ObjectIntHashMap;
 import org.apache.cassandra.locator.Endpoints;
+import org.apache.cassandra.locator.InOurDc;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -28,7 +31,6 @@
 import org.apache.cassandra.transport.ProtocolException;
 
 import static org.apache.cassandra.locator.Replicas.addToCountPerDc;
-import static org.apache.cassandra.locator.Replicas.countInOurDc;
 
 public enum ConsistencyLevel
 {
@@ -41,7 +43,7 @@
     LOCAL_QUORUM(6, true),
     EACH_QUORUM (7),
     SERIAL      (8),
-    LOCAL_SERIAL(9),
+    LOCAL_SERIAL(9, true),
     LOCAL_ONE   (10, true),
     NODE_LOCAL  (11, true);
 
@@ -81,6 +83,11 @@
         return codeIdx[code];
     }
 
+    public static ConsistencyLevel fromString(String str)
+    {
+        return valueOf(str.toUpperCase(Locale.US));
+    }
+
     public static int quorumFor(AbstractReplicationStrategy replicationStrategy)
     {
         return (replicationStrategy.getReplicationFactor().allReplicas / 2) + 1;
@@ -173,7 +180,7 @@
                 break;
             case LOCAL_ONE: case LOCAL_QUORUM: case LOCAL_SERIAL:
                 // we will only count local replicas towards our response count, as these queries only care about local guarantees
-                blockFor += countInOurDc(pending).allReplicas();
+                blockFor += pending.count(InOurDc.replicas());
                 break;
             case ONE: case TWO: case THREE:
             case QUORUM: case EACH_QUORUM:
diff --git a/src/java/org/apache/cassandra/db/CounterMutation.java b/src/java/org/apache/cassandra/db/CounterMutation.java
index fe1e46e..4f91b83 100644
--- a/src/java/org/apache/cassandra/db/CounterMutation.java
+++ b/src/java/org/apache/cassandra/db/CounterMutation.java
@@ -21,6 +21,7 @@
 import java.util.*;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
+import java.util.function.Supplier;
 
 import com.google.common.base.Function;
 import com.google.common.base.Objects;
@@ -50,6 +51,7 @@
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
 import static org.apache.cassandra.net.MessagingService.VERSION_3014;
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class CounterMutation implements IMutation
 {
@@ -81,6 +83,12 @@
         return mutation.getPartitionUpdates();
     }
 
+    @Override
+    public Supplier<Mutation> hintOnFailure()
+    {
+        return null;
+    }
+
     public void validateSize(int version, int overhead)
     {
         long totalSize = serializedSize(version) + overhead;
@@ -150,12 +158,12 @@
 
     private void grabCounterLocks(Keyspace keyspace, List<Lock> locks) throws WriteTimeoutException
     {
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
         AbstractReplicationStrategy replicationStrategy = keyspace.getReplicationStrategy();
         for (Lock lock : LOCKS.bulkGet(getCounterLockKeys()))
         {
-            long timeout = getTimeout(NANOSECONDS) - (System.nanoTime() - startTime);
+            long timeout = getTimeout(NANOSECONDS) - (nanoTime() - startTime);
             try
             {
                 if (!lock.tryLock(timeout, NANOSECONDS))
diff --git a/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java b/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
index a30ce66..e4c7669 100644
--- a/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
@@ -26,6 +26,8 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.service.StorageProxy;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CounterMutationVerbHandler implements IVerbHandler<CounterMutation>
 {
     public static final CounterMutationVerbHandler instance = new CounterMutationVerbHandler();
@@ -34,7 +36,7 @@
 
     public void doVerb(final Message<CounterMutation> message)
     {
-        long queryStartNanoTime = System.nanoTime();
+        long queryStartNanoTime = nanoTime();
         final CounterMutation cm = message.payload;
         logger.trace("Applying forwarded {}", cm);
 
diff --git a/src/java/org/apache/cassandra/db/DataRange.java b/src/java/org/apache/cassandra/db/DataRange.java
index 91a62b3..52162be 100644
--- a/src/java/org/apache/cassandra/db/DataRange.java
+++ b/src/java/org/apache/cassandra/db/DataRange.java
@@ -19,7 +19,6 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.filter.*;
@@ -186,9 +185,10 @@
      *
      * @return Whether this {@code DataRange} queries everything.
      */
-    public boolean isUnrestricted()
+    public boolean isUnrestricted(TableMetadata metadata)
     {
-        return startKey().isMinimum() && stopKey().isMinimum() && clusteringIndexFilter.selectsAllPartition();
+        return startKey().isMinimum() && stopKey().isMinimum() &&
+               (clusteringIndexFilter.selectsAllPartition() || metadata.clusteringColumns().isEmpty());
     }
 
     public boolean selectsAllPartition()
@@ -257,10 +257,10 @@
         return String.format("range=%s pfilter=%s", keyRange.getString(metadata.partitionKeyType), clusteringIndexFilter.toString(metadata));
     }
 
-    public String toCQLString(TableMetadata metadata)
+    public String toCQLString(TableMetadata metadata, RowFilter rowFilter)
     {
-        if (isUnrestricted())
-            return "UNRESTRICTED";
+        if (isUnrestricted(metadata))
+            return rowFilter.toCQLString();
 
         StringBuilder sb = new StringBuilder();
 
@@ -278,7 +278,7 @@
             needAnd = true;
         }
 
-        String filterString = clusteringIndexFilter.toCQLString(metadata);
+        String filterString = clusteringIndexFilter.toCQLString(metadata, rowFilter);
         if (!filterString.isEmpty())
             sb.append(needAnd ? " AND " : "").append(filterString);
 
@@ -312,8 +312,6 @@
              : (isInclusive ? "<=" : "<");
     }
 
-    // TODO: this is reused in SinglePartitionReadCommand but this should not really be here. Ideally
-    // we need a more "native" handling of composite partition keys.
     public static void appendKeyString(StringBuilder sb, AbstractType<?> type, ByteBuffer key)
     {
         if (type instanceof CompositeType)
@@ -321,11 +319,11 @@
             CompositeType ct = (CompositeType)type;
             ByteBuffer[] values = ct.split(key);
             for (int i = 0; i < ct.types.size(); i++)
-                sb.append(i == 0 ? "" : ", ").append(ct.types.get(i).getString(values[i]));
+                sb.append(i == 0 ? "" : ", ").append(ct.types.get(i).toCQLString(values[i]));
         }
         else
         {
-            sb.append(type.getString(key));
+            sb.append(type.toCQLString(key));
         }
     }
 
@@ -393,7 +391,7 @@
         }
 
         @Override
-        public boolean isUnrestricted()
+        public boolean isUnrestricted(TableMetadata metadata)
         {
             return false;
         }
diff --git a/src/java/org/apache/cassandra/db/DecoratedKey.java b/src/java/org/apache/cassandra/db/DecoratedKey.java
index 92d6414..4dd87d0 100644
--- a/src/java/org/apache/cassandra/db/DecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/DecoratedKey.java
@@ -19,10 +19,15 @@
 
 import java.nio.ByteBuffer;
 import java.util.Comparator;
+import java.util.List;
+import java.util.StringJoiner;
 
+import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.dht.Token.KeyBound;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.MurmurHash;
 import org.apache.cassandra.utils.IFilter.FilterKey;
@@ -125,6 +130,33 @@
         return "DecoratedKey(" + getToken() + ", " + keystring + ")";
     }
 
+    /**
+     * Returns a CQL representation of this key.
+     *
+     * @param metadata the metadata of the table that this key belogs to
+     * @return a CQL representation of this key
+     */
+    public String toCQLString(TableMetadata metadata)
+    {
+        List<ColumnMetadata> columns = metadata.partitionKeyColumns();
+
+        if (columns.size() == 1)
+            return toCQLString(columns.get(0), getKey());
+
+        ByteBuffer[] values = ((CompositeType) metadata.partitionKeyType).split(getKey());
+        StringJoiner joiner = new StringJoiner(" AND ");
+
+        for (int i = 0; i < columns.size(); i++)
+            joiner.add(toCQLString(columns.get(i), values[i]));
+
+        return joiner.toString();
+    }
+
+    private static String toCQLString(ColumnMetadata metadata, ByteBuffer key)
+    {
+        return String.format("%s = %s", metadata.name.toCQLString(), metadata.type.toCQLString(key));
+    }
+
     public Token getToken()
     {
         return token;
diff --git a/src/java/org/apache/cassandra/db/DeletionTime.java b/src/java/org/apache/cassandra/db/DeletionTime.java
index d8ac91d..105e10d 100644
--- a/src/java/org/apache/cassandra/db/DeletionTime.java
+++ b/src/java/org/apache/cassandra/db/DeletionTime.java
@@ -33,7 +33,7 @@
  */
 public class DeletionTime implements Comparable<DeletionTime>, IMeasurableMemory
 {
-    private static final long EMPTY_SIZE = ObjectSizes.measure(new DeletionTime(0, 0));
+    public static final long EMPTY_SIZE = ObjectSizes.measure(new DeletionTime(0, 0));
 
     /**
      * A special DeletionTime that signifies that there is no top-level (row) tombstone.
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index cecc60d..b16dd97 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -17,18 +17,29 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.*;
-import java.nio.file.*;
+import java.time.Instant;
 import java.util.*;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.function.BiPredicate;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
+import com.google.common.annotations.VisibleForTesting;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.file.FileStore;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.RateLimiter;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -42,9 +53,11 @@
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.sstable.*;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.snapshot.SnapshotManifest;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.utils.DirectorySizeCalculator;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.Pair;
 
@@ -145,25 +158,25 @@
             switch (action)
             {
                 case X:
-                    privilege = file.canExecute();
+                    privilege = file.isExecutable();
                     break;
                 case W:
-                    privilege = file.canWrite();
+                    privilege = file.isWritable();
                     break;
                 case XW:
-                    privilege = file.canExecute() && file.canWrite();
+                    privilege = file.isExecutable() && file.isWritable();
                     break;
                 case R:
-                    privilege = file.canRead();
+                    privilege = file.isReadable();
                     break;
                 case XR:
-                    privilege = file.canExecute() && file.canRead();
+                    privilege = file.isExecutable() && file.isReadable();
                     break;
                 case RW:
-                    privilege = file.canRead() && file.canWrite();
+                    privilege = file.isReadable() && file.isWritable();
                     break;
                 case XRW:
-                    privilege = file.canExecute() && file.canRead() && file.canWrite();
+                    privilege = file.isExecutable() && file.isReadable() && file.isWritable();
                     break;
             }
             return privilege;
@@ -209,7 +222,7 @@
             // check if old SSTable directory exists
             File dataPath = new File(paths[i].location, oldSSTableRelativePath);
             dataPaths[i] = dataPath;
-            canonicalPathsBuilder.put(Paths.get(FileUtils.getCanonicalPath(dataPath)), paths[i]);
+            canonicalPathsBuilder.put(dataPath.toCanonical().toPath(), paths[i]);
         }
         boolean olderDirectoryExists = Iterables.any(Arrays.asList(dataPaths), File::exists);
         if (!olderDirectoryExists)
@@ -221,7 +234,7 @@
             {
                 File dataPath = new File(paths[i].location, newSSTableRelativePath);
                 dataPaths[i] = dataPath;
-                canonicalPathsBuilder.put(Paths.get(FileUtils.getCanonicalPath(dataPath)), paths[i]);
+                canonicalPathsBuilder.put(dataPath.toCanonical().toPath(), paths[i]);
             }
         }
         // if index, then move to its own directory
@@ -232,7 +245,7 @@
             {
                 File dataPath = new File(dataPaths[i], indexNameWithDot);
                 dataPaths[i] = dataPath;
-                canonicalPathsBuilder.put(Paths.get(FileUtils.getCanonicalPath(dataPath)), paths[i]);
+                canonicalPathsBuilder.put(dataPath.toCanonical().toPath(), paths[i]);
             }
         }
 
@@ -255,22 +268,16 @@
         {
             for (File dataPath : dataPaths)
             {
-                File[] indexFiles = dataPath.getParentFile().listFiles(new FileFilter()
-                {
-                    @Override
-                    public boolean accept(File file)
-                    {
-                        if (file.isDirectory())
-                            return false;
+                File[] indexFiles = dataPath.parent().tryList(file -> {
+                    if (file.isDirectory())
+                        return false;
 
-                        Descriptor desc = SSTable.tryDescriptorFromFilename(file);
-                        return desc != null && desc.ksname.equals(metadata.keyspace) && desc.cfname.equals(metadata.name);
-
-                    }
+                    Descriptor desc = SSTable.tryDescriptorFromFilename(file);
+                    return desc != null && desc.ksname.equals(metadata.keyspace) && desc.cfname.equals(metadata.name);
                 });
                 for (File indexFile : indexFiles)
                 {
-                    File destFile = new File(dataPath, indexFile.getName());
+                    File destFile = new File(dataPath, indexFile.name());
                     logger.trace("Moving index file {} to {}", indexFile, destFile);
                     FileUtils.renameWithConfirm(indexFile, destFile);
                 }
@@ -291,8 +298,8 @@
             for (File dir : dataPaths)
             {
                 // Note that we must compare absolute paths (not canonical) here since keyspace directories might be symlinks
-                Path dirPath = Paths.get(dir.getAbsolutePath());
-                Path locationPath = Paths.get(dataDirectory.location.getAbsolutePath());
+                Path dirPath = dir.toAbsolute().toPath();
+                Path locationPath = dataDirectory.location.toAbsolute().toPath();
                 if (dirPath.startsWith(locationPath))
                     return dir;
             }
@@ -509,7 +516,7 @@
             StringJoiner pathString = new StringJoiner(",", "[", "]");
             for (DataDirectory p: paths)
             {
-                pathString.add(p.location.getAbsolutePath());
+                pathString.add(p.location.toJavaIOFile().getAbsolutePath());
             }
             logger.warn("Insufficient disk space for compaction. Across {} there's only {} available, but {} is needed.",
                         pathString.toString(),
@@ -556,7 +563,7 @@
     {
         if (isSecondaryIndexFolder(location))
         {
-            return getOrCreate(location.getParentFile(), SNAPSHOT_SUBDIR, snapshotName, location.getName());
+            return getOrCreate(location.parent(), SNAPSHOT_SUBDIR, snapshotName, location.name());
         }
         else
         {
@@ -567,12 +574,22 @@
     public File getSnapshotManifestFile(String snapshotName)
     {
         File snapshotDir = getSnapshotDirectory(getDirectoryForNewSSTables(), snapshotName);
+        return getSnapshotManifestFile(snapshotDir);
+    }
+
+    public static File getSnapshotManifestFile(File snapshotDir)
+    {
         return new File(snapshotDir, "manifest.json");
     }
 
     public File getSnapshotSchemaFile(String snapshotName)
     {
         File snapshotDir = getSnapshotDirectory(getDirectoryForNewSSTables(), snapshotName);
+        return getSnapshotSchemaFile(snapshotDir);
+    }
+
+    public static File getSnapshotSchemaFile(File snapshotDir)
+    {
         return new File(snapshotDir, "schema.cql");
     }
 
@@ -596,7 +613,7 @@
     {
         if (isSecondaryIndexFolder(location))
         {
-            return getOrCreate(location.getParentFile(), BACKUPS_SUBDIR, location.getName());
+            return getOrCreate(location.parent(), BACKUPS_SUBDIR, location.name());
         }
         else
         {
@@ -638,12 +655,22 @@
             this.location = location;
         }
 
+        public DataDirectory(Path location)
+        {
+            this.location = new File(location);
+        }
+
         public long getAvailableSpace()
         {
-            long availableSpace = FileUtils.getUsableSpace(location) - DatabaseDescriptor.getMinFreeSpacePerDriveInBytes();
+            long availableSpace = PathUtils.tryGetSpace(location.toPath(), FileStore::getUsableSpace) - DatabaseDescriptor.getMinFreeSpacePerDriveInBytes();
             return availableSpace > 0 ? availableSpace : 0;
         }
 
+        public long getRawSize()
+        {
+            return FileUtils.folderSize(location);
+        }
+
         @Override
         public boolean equals(Object o)
         {
@@ -889,7 +916,7 @@
         public List<Map.Entry<Descriptor, Set<Component>>> sortedList()
         {
             List<Map.Entry<Descriptor, Set<Component>>> sortedEntries = new ArrayList<>(list().entrySet());
-            sortedEntries.sort(Comparator.comparingInt(e -> e.getKey().generation));
+            sortedEntries.sort((o1, o2) -> SSTableIdFactory.COMPARATOR.compare(o1.getKey().id, o2.getKey().id));
             return sortedEntries;
         }
 
@@ -972,51 +999,80 @@
         }
     }
 
-    /**
-     *
-     * @return  Return a map of all snapshots to space being used
-     * The pair for a snapshot has size on disk and true size.
-     */
-    public Map<String, SnapshotSizeDetails> getSnapshotDetails()
+    public Map<String, TableSnapshot> listSnapshots()
     {
-        List<File> snapshots = listSnapshots();
-        final Map<String, SnapshotSizeDetails> snapshotSpaceMap = Maps.newHashMapWithExpectedSize(snapshots.size());
-        for (File snapshot : snapshots)
+        Map<String, Set<File>> snapshotDirsByTag = listSnapshotDirsByTag();
+
+        Map<String, TableSnapshot> snapshots = Maps.newHashMapWithExpectedSize(snapshotDirsByTag.size());
+
+        for (Map.Entry<String, Set<File>> entry : snapshotDirsByTag.entrySet())
         {
-            final long sizeOnDisk = FileUtils.folderSize(snapshot);
-            final long trueSize = getTrueAllocatedSizeIn(snapshot);
-            SnapshotSizeDetails spaceUsed = snapshotSpaceMap.get(snapshot.getName());
-            if (spaceUsed == null)
-                spaceUsed =  new SnapshotSizeDetails(sizeOnDisk,trueSize);
-            else
-                spaceUsed = new SnapshotSizeDetails(spaceUsed.sizeOnDiskBytes + sizeOnDisk, spaceUsed.dataSizeBytes + trueSize);
-            snapshotSpaceMap.put(snapshot.getName(), spaceUsed);
+            String tag = entry.getKey();
+            Set<File> snapshotDirs = entry.getValue();
+            SnapshotManifest manifest = maybeLoadManifest(metadata.keyspace, metadata.name, tag, snapshotDirs);
+            snapshots.put(tag, buildSnapshot(tag, manifest, snapshotDirs));
         }
-        return snapshotSpaceMap;
+
+        return snapshots;
+    }
+
+    protected TableSnapshot buildSnapshot(String tag, SnapshotManifest manifest, Set<File> snapshotDirs) {
+        Instant createdAt = manifest == null ? null : manifest.createdAt;
+        Instant expiresAt = manifest == null ? null : manifest.expiresAt;
+        return new TableSnapshot(metadata.keyspace, metadata.name, metadata.id.asUUID(), tag, createdAt, expiresAt,
+                                 snapshotDirs);
+    }
+
+    @VisibleForTesting
+    protected static SnapshotManifest maybeLoadManifest(String keyspace, String table, String tag, Set<File> snapshotDirs)
+    {
+        List<File> manifests = snapshotDirs.stream().map(d -> new File(d, "manifest.json"))
+                                           .filter(d -> d.exists()).collect(Collectors.toList());
+
+        if (manifests.isEmpty())
+        {
+            logger.warn("No manifest found for snapshot {} of table {}.{}.", tag, keyspace, table);
+            return null;
+        }
+
+        if (manifests.size() > 1) {
+            logger.warn("Found multiple manifests for snapshot {} of table {}.{}", tag, keyspace, table);
+        }
+
+        try
+        {
+            return SnapshotManifest.deserializeFromJsonFile(manifests.get(0));
+        }
+        catch (IOException e)
+        {
+            logger.warn("Cannot read manifest file {} of snapshot {}.", manifests, tag, e);
+        }
+
+        return null;
     }
 
     public List<String> listEphemeralSnapshots()
     {
         final List<String> ephemeralSnapshots = new LinkedList<>();
-        for (File snapshot : listSnapshots())
+        for (File snapshot : listAllSnapshots())
         {
             if (getEphemeralSnapshotMarkerFile(snapshot).exists())
-                ephemeralSnapshots.add(snapshot.getName());
+                ephemeralSnapshots.add(snapshot.name());
         }
         return ephemeralSnapshots;
     }
 
-    private List<File> listSnapshots()
+    private List<File> listAllSnapshots()
     {
         final List<File> snapshots = new LinkedList<>();
         for (final File dir : dataPaths)
         {
             File snapshotDir = isSecondaryIndexFolder(dir)
-                               ? new File(dir.getParent(), SNAPSHOT_SUBDIR)
+                               ? new File(dir.parentPath(), SNAPSHOT_SUBDIR)
                                : new File(dir, SNAPSHOT_SUBDIR);
             if (snapshotDir.exists() && snapshotDir.isDirectory())
             {
-                final File[] snapshotDirs  = snapshotDir.listFiles();
+                final File[] snapshotDirs  = snapshotDir.tryList();
                 if (snapshotDirs != null)
                 {
                     for (final File snapshot : snapshotDirs)
@@ -1031,6 +1087,32 @@
         return snapshots;
     }
 
+    @VisibleForTesting
+    protected Map<String, Set<File>> listSnapshotDirsByTag()
+    {
+        Map<String, Set<File>> snapshotDirsByTag = new HashMap<>();
+        for (final File dir : dataPaths)
+        {
+            File snapshotDir = isSecondaryIndexFolder(dir)
+                               ? new File(dir.parentPath(), SNAPSHOT_SUBDIR)
+                               : new File(dir, SNAPSHOT_SUBDIR);
+            if (snapshotDir.exists() && snapshotDir.isDirectory())
+            {
+                final File[] snapshotDirs  = snapshotDir.tryList();
+                if (snapshotDirs != null)
+                {
+                    for (final File snapshot : snapshotDirs)
+                    {
+                        if (snapshot.isDirectory()) {
+                            snapshotDirsByTag.computeIfAbsent(snapshot.name(), k -> new LinkedHashSet<>()).add(snapshot.toAbsolute());
+                        }
+                    }
+                }
+            }
+        }
+        return snapshotDirsByTag;
+    }
+
     public boolean snapshotExists(String snapshotName)
     {
         for (File dir : dataPaths)
@@ -1038,7 +1120,7 @@
             File snapshotDir;
             if (isSecondaryIndexFolder(dir))
             {
-                snapshotDir = new File(dir.getParentFile(), join(SNAPSHOT_SUBDIR, snapshotName, dir.getName()));
+                snapshotDir = new File(dir.parent(), join(SNAPSHOT_SUBDIR, snapshotName, dir.name()));
             }
             else
             {
@@ -1050,41 +1132,33 @@
         return false;
     }
 
-    public static void clearSnapshot(String snapshotName, List<File> snapshotDirectories, RateLimiter snapshotRateLimiter)
+    public static void clearSnapshot(String snapshotName, List<File> tableDirectories, RateLimiter snapshotRateLimiter)
     {
         // If snapshotName is empty or null, we will delete the entire snapshot directory
         String tag = snapshotName == null ? "" : snapshotName;
-        for (File dir : snapshotDirectories)
+        for (File tableDir : tableDirectories)
         {
-            File snapshotDir = new File(dir, join(SNAPSHOT_SUBDIR, tag));
-            if (snapshotDir.exists())
-            {
-                logger.trace("Removing snapshot directory {}", snapshotDir);
-                try
-                {
-                    FileUtils.deleteRecursiveWithThrottle(snapshotDir, snapshotRateLimiter);
-                }
-                catch (FSWriteError e)
-                {
-                    if (FBUtilities.isWindows)
-                        SnapshotDeletingTask.addFailedSnapshot(snapshotDir);
-                    else
-                        throw e;
-                }
-            }
+            File snapshotDir = new File(tableDir, join(SNAPSHOT_SUBDIR, tag));
+            removeSnapshotDirectory(snapshotRateLimiter, snapshotDir);
         }
     }
 
-    // The snapshot must exist
-    public long snapshotCreationTime(String snapshotName)
+    public static void removeSnapshotDirectory(RateLimiter snapshotRateLimiter, File snapshotDir)
     {
-        for (File dir : dataPaths)
+        if (snapshotDir.exists())
         {
-            File snapshotDir = getSnapshotDirectory(dir, snapshotName);
-            if (snapshotDir.exists())
-                return snapshotDir.lastModified();
+            logger.trace("Removing snapshot directory {}", snapshotDir);
+            try
+            {
+                FileUtils.deleteRecursiveWithThrottle(snapshotDir, snapshotRateLimiter);
+            }
+            catch (RuntimeException ex)
+            {
+                if (!snapshotDir.exists())
+                    return; // ignore
+                throw ex;
+            }
         }
-        throw new RuntimeException("Snapshot " + snapshotName + " doesn't exist");
     }
 
     /**
@@ -1096,7 +1170,7 @@
         for (File dir : dataPaths)
         {
             File snapshotDir = isSecondaryIndexFolder(dir)
-                               ? new File(dir.getParent(), SNAPSHOT_SUBDIR)
+                               ? new File(dir.parentPath(), SNAPSHOT_SUBDIR)
                                : new File(dir, SNAPSHOT_SUBDIR);
             result += getTrueAllocatedSizeIn(snapshotDir);
         }
@@ -1116,19 +1190,19 @@
         return totalAllocatedSize;
     }
 
-    public long getTrueAllocatedSizeIn(File input)
+    public long getTrueAllocatedSizeIn(File snapshotDir)
     {
-        if (!input.isDirectory())
+        if (!snapshotDir.isDirectory())
             return 0;
 
-        SSTableSizeSummer visitor = new SSTableSizeSummer(input, sstableLister(Directories.OnTxnErr.THROW).listFiles());
+        SSTableSizeSummer visitor = new SSTableSizeSummer(sstableLister(OnTxnErr.THROW).listFiles());
         try
         {
-            Files.walkFileTree(input.toPath(), visitor);
+            Files.walkFileTree(snapshotDir.toPath(), visitor);
         }
         catch (IOException e)
         {
-            logger.error("Could not calculate the size of {}. {}", input, e.getMessage());
+            logger.error("Could not calculate the size of {}. {}", snapshotDir, e.getMessage());
         }
 
         return visitor.getAllocatedSize();
@@ -1141,7 +1215,7 @@
         for (DataDirectory dataDirectory : dataDirectories.getAllDirectories())
         {
             File ksDir = new File(dataDirectory.location, ksName);
-            File[] cfDirs = ksDir.listFiles();
+            File[] cfDirs = ksDir.tryList();
             if (cfDirs == null)
                 continue;
             for (File cfDir : cfDirs)
@@ -1155,7 +1229,12 @@
 
     public static boolean isSecondaryIndexFolder(File dir)
     {
-        return dir.getName().startsWith(SECONDARY_INDEX_NAME_SEPARATOR);
+        return dir.name().startsWith(SECONDARY_INDEX_NAME_SEPARATOR);
+    }
+
+    public static boolean isSecondaryIndexFolder(Path dir)
+    {
+        return PathUtils.filename(dir).startsWith(SECONDARY_INDEX_NAME_SEPARATOR);
     }
 
     public List<File> getCFDirectories()
@@ -1169,6 +1248,24 @@
         return result;
     }
 
+    /**
+     * Initializes the sstable unique identifier generator using a provided builder for this instance of directories.
+     * If the id builder needs that, sstables in these directories are listed to provide the existing identifiers to
+     * the builder. The listing is done lazily so if the builder does not require that, listing is skipped.
+     */
+    public <T extends SSTableId> Supplier<T> getUIDGenerator(SSTableId.Builder<T> builder)
+    {
+        // this stream is evaluated lazily - if the generator does not need the existing ids, we do not even call #sstableLister
+        Stream<SSTableId> curIds = StreamSupport.stream(() -> sstableLister(Directories.OnTxnErr.IGNORE)
+                                                              .includeBackups(true)
+                                                              .list()
+                                                              .keySet()
+                                                              .spliterator(), Spliterator.DISTINCT, false)
+                                                .map(d -> d.id);
+
+        return builder.generator(curIds);
+    }
+
     private static File getOrCreate(File base, String... subdirs)
     {
         File dir = subdirs == null || subdirs.length == 0 ? base : new File(base, join(subdirs));
@@ -1177,7 +1274,7 @@
             if (!dir.isDirectory())
                 throw new AssertionError(String.format("Invalid directory path %s: path exists but is not a directory", dir));
         }
-        else if (!dir.mkdirs() && !(dir.exists() && dir.isDirectory()))
+        else if (!dir.tryCreateDirectories() && !(dir.exists() && dir.isDirectory()))
         {
             throw new FSWriteError(new IOException("Unable to create directory " + dir), dir);
         }
@@ -1186,55 +1283,27 @@
 
     private static String join(String... s)
     {
-        return StringUtils.join(s, File.separator);
+        return StringUtils.join(s, File.pathSeparator());
     }
 
     private class SSTableSizeSummer extends DirectorySizeCalculator
     {
         private final Set<String> toSkip;
-        SSTableSizeSummer(File path, List<File> files)
+        SSTableSizeSummer(List<File> files)
         {
-            super(path);
-            toSkip = files.stream().map(f -> f.getName()).collect(Collectors.toSet());
+            toSkip = files.stream().map(File::name).collect(Collectors.toSet());
         }
 
         @Override
         public boolean isAcceptable(Path path)
         {
-            File file = path.toFile();
+            File file = new File(path);
             Descriptor desc = SSTable.tryDescriptorFromFilename(file);
             return desc != null
                 && desc.ksname.equals(metadata.keyspace)
                 && desc.cfname.equals(metadata.name)
-                && !toSkip.contains(file.getName());
+                && !toSkip.contains(file.name());
         }
     }
 
-    public static class SnapshotSizeDetails
-    {
-        public final long sizeOnDiskBytes;
-        public final long dataSizeBytes;
-
-        private SnapshotSizeDetails(long sizeOnDiskBytes, long dataSizeBytes)
-        {
-            this.sizeOnDiskBytes = sizeOnDiskBytes;
-            this.dataSizeBytes = dataSizeBytes;
-        }
-
-        @Override
-        public final int hashCode()
-        {
-            int hashCode = (int) sizeOnDiskBytes ^ (int) (sizeOnDiskBytes >>> 32);
-            return 31 * (hashCode ^ (int) ((int) dataSizeBytes ^ (dataSizeBytes >>> 32)));
-        }
-
-        @Override
-        public final boolean equals(Object o)
-        {
-            if(!(o instanceof SnapshotSizeDetails))
-                return false;
-            SnapshotSizeDetails that = (SnapshotSizeDetails)o;
-            return sizeOnDiskBytes == that.sizeOnDiskBytes && dataSizeBytes == that.dataSizeBytes;
-        }
-    }
 }
diff --git a/src/java/org/apache/cassandra/db/DisallowedDirectories.java b/src/java/org/apache/cassandra/db/DisallowedDirectories.java
index f030253..e666bad 100644
--- a/src/java/org/apache/cassandra/db/DisallowedDirectories.java
+++ b/src/java/org/apache/cassandra/db/DisallowedDirectories.java
@@ -20,14 +20,14 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArraySet;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.MBeanWrapper;
 
 public class DisallowedDirectories implements DisallowedDirectoriesMBean
@@ -49,14 +49,21 @@
         MBeanWrapper.instance.registerMBean(this, MBEAN_NAME, MBeanWrapper.OnException.LOG);
     }
 
-    public Set<File> getUnreadableDirectories()
+    @Override
+    public Set<java.io.File> getUnreadableDirectories()
     {
-        return Collections.unmodifiableSet(unreadableDirectories);
+        return toJmx(unreadableDirectories);
     }
 
-    public Set<File> getUnwritableDirectories()
+    @Override
+    public Set<java.io.File> getUnwritableDirectories()
     {
-        return Collections.unmodifiableSet(unwritableDirectories);
+        return toJmx(unwritableDirectories);
+    }
+
+    private static Set<java.io.File> toJmx(Set<File> set)
+    {
+        return set.stream().map(f -> f.toPath().toFile()).collect(Collectors.toSet()); // checkstyle: permit this invocation
     }
 
     public void markUnreadable(String path)
@@ -145,11 +152,11 @@
             return file;
 
         if (file.isFile())
-            return file.getParentFile();
+            return file.parent();
 
         // the file with path cannot be read - try determining the directory manually.
-        if (file.getPath().endsWith(".db"))
-            return file.getParentFile();
+        if (file.path().endsWith(".db"))
+            return file.parent();
 
         // We may not be able to determine if it's a file or a directory if
         // we were called because we couldn't create the file/directory.
diff --git a/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java b/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java
index 64f15e5..e75bebe 100644
--- a/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java
+++ b/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java
@@ -17,7 +17,7 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
+import java.io.File; //checkstyle: permit this import
 import java.util.Set;
 
 public interface DisallowedDirectoriesMBean
diff --git a/src/java/org/apache/cassandra/db/DiskBoundaryManager.java b/src/java/org/apache/cassandra/db/DiskBoundaryManager.java
index cc617da..0de745d 100644
--- a/src/java/org/apache/cassandra/db/DiskBoundaryManager.java
+++ b/src/java/org/apache/cassandra/db/DiskBoundaryManager.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
-import java.util.stream.Collectors;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -68,7 +67,19 @@
            diskBoundaries.invalidate();
     }
 
-    private static DiskBoundaries getDiskBoundaryValue(ColumnFamilyStore cfs)
+    static class VersionedRangesAtEndpoint
+    {
+        public final RangesAtEndpoint rangesAtEndpoint;
+        public final long ringVersion;
+
+        VersionedRangesAtEndpoint(RangesAtEndpoint rangesAtEndpoint, long ringVersion)
+        {
+            this.rangesAtEndpoint = rangesAtEndpoint;
+            this.ringVersion = ringVersion;
+        }
+    }
+
+    public static VersionedRangesAtEndpoint getVersionedLocalRanges(ColumnFamilyStore cfs)
     {
         RangesAtEndpoint localRanges;
 
@@ -78,23 +89,20 @@
         {
             tmd = StorageService.instance.getTokenMetadata();
             ringVersion = tmd.getRingVersion();
-            if (StorageService.instance.isBootstrapMode()
-                && !StorageService.isReplacingSameAddress()) // When replacing same address, the node marks itself as UN locally
-            {
-                PendingRangeCalculatorService.instance.blockUntilFinished();
-                localRanges = tmd.getPendingRanges(cfs.keyspace.getName(), FBUtilities.getBroadcastAddressAndPort());
-            }
-            else
-            {
-                // Reason we use use the future settled TMD is that if we decommission a node, we want to stream
-                // from that node to the correct location on disk, if we didn't, we would put new files in the wrong places.
-                // We do this to minimize the amount of data we need to move in rebalancedisks once everything settled
-                localRanges = cfs.keyspace.getReplicationStrategy().getAddressReplicas(tmd.cloneAfterAllSettled(), FBUtilities.getBroadcastAddressAndPort());
-            }
+            localRanges = getLocalRanges(cfs, tmd);
             logger.debug("Got local ranges {} (ringVersion = {})", localRanges, ringVersion);
         }
         while (ringVersion != tmd.getRingVersion()); // if ringVersion is different here it means that
-                                                     // it might have changed before we calculated localRanges - recalculate
+        // it might have changed before we calculated localRanges - recalculate
+
+        return new VersionedRangesAtEndpoint(localRanges, ringVersion);
+    }
+
+    private static DiskBoundaries getDiskBoundaryValue(ColumnFamilyStore cfs)
+    {
+        VersionedRangesAtEndpoint rangesAtEndpoint = getVersionedLocalRanges(cfs);
+        RangesAtEndpoint localRanges = rangesAtEndpoint.rangesAtEndpoint;
+        long ringVersion = rangesAtEndpoint.ringVersion;
 
         int directoriesVersion;
         Directories.DataDirectory[] dirs;
@@ -113,6 +121,25 @@
         return new DiskBoundaries(cfs, dirs, positions, ringVersion, directoriesVersion);
     }
 
+    private static RangesAtEndpoint getLocalRanges(ColumnFamilyStore cfs, TokenMetadata tmd)
+    {
+        RangesAtEndpoint localRanges;
+        if (StorageService.instance.isBootstrapMode()
+        && !StorageService.isReplacingSameAddress()) // When replacing same address, the node marks itself as UN locally
+        {
+            PendingRangeCalculatorService.instance.blockUntilFinished();
+            localRanges = tmd.getPendingRanges(cfs.keyspace.getName(), FBUtilities.getBroadcastAddressAndPort());
+        }
+        else
+        {
+            // Reason we use use the future settled TMD is that if we decommission a node, we want to stream
+            // from that node to the correct location on disk, if we didn't, we would put new files in the wrong places.
+            // We do this to minimize the amount of data we need to move in rebalancedisks once everything settled
+            localRanges = cfs.keyspace.getReplicationStrategy().getAddressReplicas(tmd.cloneAfterAllSettled(), FBUtilities.getBroadcastAddressAndPort());
+        }
+        return localRanges;
+    }
+
     /**
      * Returns a list of disk boundaries, the result will differ depending on whether vnodes are enabled or not.
      *
diff --git a/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java b/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java
index 81e3d1e..7f81b5c 100644
--- a/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java
+++ b/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java
@@ -32,6 +32,8 @@
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class ExpirationDateOverflowHandling
 {
     private static final Logger logger = LoggerFactory.getLogger(ExpirationDateOverflowHandling.class);
@@ -75,7 +77,7 @@
             return;
 
         // Check for localExpirationTime overflow (CASSANDRA-14092)
-        int nowInSecs = (int)(System.currentTimeMillis() / 1000);
+        int nowInSecs = (int)(currentTimeMillis() / 1000);
         if (ttl + nowInSecs < 0)
         {
             switch (policy)
diff --git a/src/java/org/apache/cassandra/db/IMutation.java b/src/java/org/apache/cassandra/db/IMutation.java
index 10472c1..831652b 100644
--- a/src/java/org/apache/cassandra/db/IMutation.java
+++ b/src/java/org/apache/cassandra/db/IMutation.java
@@ -19,6 +19,7 @@
 
 import java.util.Collection;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
@@ -35,6 +36,7 @@
     public long getTimeout(TimeUnit unit);
     public String toString(boolean shallow);
     public Collection<PartitionUpdate> getPartitionUpdates();
+    public Supplier<Mutation> hintOnFailure();
 
     public default void validateIndexedColumns()
     {
diff --git a/src/java/org/apache/cassandra/db/Keyspace.java b/src/java/org/apache/cassandra/db/Keyspace.java
index ead01fb..d6db700 100644
--- a/src/java/org/apache/cassandra/db/Keyspace.java
+++ b/src/java/org/apache/cassandra/db/Keyspace.java
@@ -17,8 +17,8 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOException;
+import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -27,10 +27,8 @@
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.stream.Stream;
@@ -43,7 +41,7 @@
 
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.config.DurationSpec;
 import org.apache.cassandra.db.lifecycle.SSTableSet;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.repair.CassandraKeyspaceRepairManager;
@@ -52,6 +50,7 @@
 import org.apache.cassandra.index.Index;
 import org.apache.cassandra.index.SecondaryIndexManager;
 import org.apache.cassandra.index.transactions.UpdateTransaction;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.metrics.KeyspaceMetrics;
@@ -64,15 +63,22 @@
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+import org.apache.cassandra.utils.concurrent.Promise;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.FBUtilities.now;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 /**
  * It represents a Keyspace.
@@ -113,14 +119,36 @@
 
     private static volatile boolean initialized = false;
 
+    public static boolean isInitialized()
+    {
+        return initialized;
+    }
+
     public static void setInitialized()
     {
-        initialized = true;
+        synchronized (Schema.instance)
+        {
+            initialized = true;
+        }
+    }
+
+    /**
+     * Never use it in production code.
+     *
+     * Useful when creating a fake Schema so that it does not manage Keyspace instances (and CFS)
+     */
+    @VisibleForTesting
+    public static void unsetInitialized()
+    {
+        synchronized (Schema.instance)
+        {
+            initialized = false;
+        }
     }
 
     public static Keyspace open(String keyspaceName)
     {
-        assert initialized || SchemaConstants.isLocalSystemKeyspace(keyspaceName);
+        assert initialized || SchemaConstants.isLocalSystemKeyspace(keyspaceName) : "Initialized: " + initialized;
         return open(keyspaceName, Schema.instance, true);
     }
 
@@ -130,47 +158,9 @@
         return open(keyspaceName, Schema.instance, false);
     }
 
-    @VisibleForTesting
-    static Keyspace open(String keyspaceName, SchemaProvider schema, boolean loadSSTables)
+    public static Keyspace open(String keyspaceName, SchemaProvider schema, boolean loadSSTables)
     {
-        Keyspace keyspaceInstance = schema.getKeyspaceInstance(keyspaceName);
-
-        if (keyspaceInstance == null)
-        {
-            // Instantiate the Keyspace while holding the Schema lock. This both ensures we only do it once per
-            // keyspace, and also ensures that Keyspace construction sees a consistent view of the schema.
-            synchronized (schema)
-            {
-                keyspaceInstance = schema.getKeyspaceInstance(keyspaceName);
-                if (keyspaceInstance == null)
-                {
-                    // open and store the keyspace
-                    keyspaceInstance = new Keyspace(keyspaceName, schema, loadSSTables);
-                    schema.storeKeyspaceInstance(keyspaceInstance);
-                }
-            }
-        }
-        return keyspaceInstance;
-    }
-
-    public static Keyspace clear(String keyspaceName)
-    {
-        return clear(keyspaceName, Schema.instance);
-    }
-
-    public static Keyspace clear(String keyspaceName, Schema schema)
-    {
-        synchronized (schema)
-        {
-            Keyspace t = schema.removeKeyspaceInstance(keyspaceName);
-            if (t != null)
-            {
-                for (ColumnFamilyStore cfs : t.getColumnFamilyStores())
-                    t.unloadCf(cfs);
-                t.metric.release();
-            }
-            return t;
-        }
+        return schema.maybeAddKeyspaceInstance(keyspaceName, () -> new Keyspace(keyspaceName, schema, loadSSTables));
     }
 
     public static ColumnFamilyStore openAndGetStore(TableMetadataRef tableRef)
@@ -246,7 +236,7 @@
      * @param rateLimiter Rate limiter for hardlinks-per-second
      * @throws IOException if the column family doesn't exist
      */
-    public void snapshot(String snapshotName, String columnFamilyName, boolean skipFlush, RateLimiter rateLimiter) throws IOException
+    public void snapshot(String snapshotName, String columnFamilyName, boolean skipFlush, DurationSpec.IntSecondsBound ttl, RateLimiter rateLimiter, Instant creationTime) throws IOException
     {
         assert snapshotName != null;
         boolean tookSnapShot = false;
@@ -255,7 +245,7 @@
             if (columnFamilyName == null || cfStore.name.equals(columnFamilyName))
             {
                 tookSnapShot = true;
-                cfStore.snapshot(snapshotName, skipFlush, rateLimiter);
+                cfStore.snapshot(snapshotName, skipFlush, ttl, rateLimiter, creationTime);
             }
         }
 
@@ -273,7 +263,7 @@
      */
     public void snapshot(String snapshotName, String columnFamilyName) throws IOException
     {
-        snapshot(snapshotName, columnFamilyName, false, null);
+        snapshot(snapshotName, columnFamilyName, false, null, null, now());
     }
 
     /**
@@ -282,7 +272,7 @@
      */
     public static String getTimestampedSnapshotName(String clientSuppliedName)
     {
-        String snapshotName = Long.toString(System.currentTimeMillis());
+        String snapshotName = Long.toString(currentTimeMillis());
         if (clientSuppliedName != null && !clientSuppliedName.equals(""))
         {
             snapshotName = snapshotName + "-" + clientSuppliedName;
@@ -322,8 +312,8 @@
     {
         RateLimiter clearSnapshotRateLimiter = DatabaseDescriptor.getSnapshotRateLimiter();
 
-        List<File> snapshotDirs = Directories.getKSChildDirectories(keyspace);
-        Directories.clearSnapshot(snapshotName, snapshotDirs, clearSnapshotRateLimiter);
+        List<File> tableDirectories = Directories.getKSChildDirectories(keyspace);
+        Directories.clearSnapshot(snapshotName, tableDirectories, clearSnapshotRateLimiter);
     }
 
     /**
@@ -337,6 +327,11 @@
         return list;
     }
 
+    public Stream<TableSnapshot> getAllSnapshots()
+    {
+        return getColumnFamilyStores().stream().flatMap(cfs -> cfs.listSnapshots().values().stream());
+    }
+
     private Keyspace(String keyspaceName, SchemaProvider schema, boolean loadSSTables)
     {
         this.schema = schema;
@@ -388,33 +383,37 @@
         if (!ksm.params.replication.equals(replicationParams))
         {
             logger.debug("New replication settings for keyspace {} - invalidating disk boundary caches", ksm.name);
-            columnFamilyStores.values().forEach(ColumnFamilyStore::invalidateDiskBoundaries);
+            columnFamilyStores.values().forEach(ColumnFamilyStore::invalidateLocalRanges);
         }
         replicationParams = ksm.params.replication;
     }
 
-    // best invoked on the compaction mananger.
-    public void dropCf(TableId tableId)
+    // best invoked on the compaction manager.
+    public void dropCf(TableId tableId, boolean dropData)
     {
-        assert columnFamilyStores.containsKey(tableId);
         ColumnFamilyStore cfs = columnFamilyStores.remove(tableId);
         if (cfs == null)
             return;
 
-        cfs.getCompactionStrategyManager().shutdown();
-        CompactionManager.instance.interruptCompactionForCFs(cfs.concatWithIndexes(), (sstable) -> true, true);
-        // wait for any outstanding reads/writes that might affect the CFS
-        cfs.keyspace.writeOrder.awaitNewBarrier();
-        cfs.readOrdering.awaitNewBarrier();
+        cfs.onTableDropped();
+        unloadCf(cfs, dropData);
+    }
 
-        unloadCf(cfs);
+    /**
+     * Unloads all column family stores and releases metrics.
+     */
+    public void unload(boolean dropData)
+    {
+        for (ColumnFamilyStore cfs : getColumnFamilyStores())
+            unloadCf(cfs, dropData);
+        metric.release();
     }
 
     // disassociate a cfs from this keyspace instance.
-    private void unloadCf(ColumnFamilyStore cfs)
+    private void unloadCf(ColumnFamilyStore cfs, boolean dropData)
     {
-        cfs.forceBlockingFlush();
-        cfs.invalidate();
+        cfs.unloadCf();
+        cfs.invalidate(true, dropData);
     }
 
     /**
@@ -472,15 +471,15 @@
         }
     }
 
-    public CompletableFuture<?> applyFuture(Mutation mutation, boolean writeCommitLog, boolean updateIndexes)
+    public Future<?> applyFuture(Mutation mutation, boolean writeCommitLog, boolean updateIndexes)
     {
-        return applyInternal(mutation, writeCommitLog, updateIndexes, true, true, new CompletableFuture<>());
+        return applyInternal(mutation, writeCommitLog, updateIndexes, true, true, new AsyncPromise<>());
     }
 
-    public CompletableFuture<?> applyFuture(Mutation mutation, boolean writeCommitLog, boolean updateIndexes, boolean isDroppable,
+    public Future<?> applyFuture(Mutation mutation, boolean writeCommitLog, boolean updateIndexes, boolean isDroppable,
                                             boolean isDeferrable)
     {
-        return applyInternal(mutation, writeCommitLog, updateIndexes, isDroppable, isDeferrable, new CompletableFuture<>());
+        return applyInternal(mutation, writeCommitLog, updateIndexes, isDroppable, isDeferrable, new AsyncPromise<>());
     }
 
     public void apply(Mutation mutation, boolean writeCommitLog, boolean updateIndexes)
@@ -503,7 +502,7 @@
      *                       may happen concurrently, depending on the CL Executor type.
      * @param makeDurable    if true, don't return unless write has been made durable
      * @param updateIndexes  false to disable index updates (used by CollationController "defragmenting")
-     * @param isDroppable    true if this should throw WriteTimeoutException if it does not acquire lock within write_request_timeout_in_ms
+     * @param isDroppable    true if this should throw WriteTimeoutException if it does not acquire lock within write_request_timeout
      */
     public void apply(final Mutation mutation,
                       final boolean makeDurable,
@@ -520,15 +519,15 @@
      *                       may happen concurrently, depending on the CL Executor type.
      * @param makeDurable    if true, don't return unless write has been made durable
      * @param updateIndexes  false to disable index updates (used by CollationController "defragmenting")
-     * @param isDroppable    true if this should throw WriteTimeoutException if it does not acquire lock within write_request_timeout_in_ms
+     * @param isDroppable    true if this should throw WriteTimeoutException if it does not acquire lock within write_request_timeout
      * @param isDeferrable   true if caller is not waiting for future to complete, so that future may be deferred
      */
-    private CompletableFuture<?> applyInternal(final Mutation mutation,
+    private Future<?> applyInternal(final Mutation mutation,
                                                final boolean makeDurable,
                                                boolean updateIndexes,
                                                boolean isDroppable,
                                                boolean isDeferrable,
-                                               CompletableFuture<?> future)
+                                               Promise<?> future)
     {
         if (TEST_FAIL_WRITES && metadata.name.equals(TEST_FAIL_WRITES_KS))
             throw new RuntimeException("Testing write failures");
@@ -539,7 +538,7 @@
 
         if (requiresViewUpdate)
         {
-            mutation.viewLockAcquireStart.compareAndSet(0L, System.currentTimeMillis());
+            mutation.viewLockAcquireStart.compareAndSet(0L, currentTimeMillis());
 
             // the order of lock acquisition doesn't matter (from a deadlock perspective) because we only use tryLock()
             Collection<TableId> tableIds = mutation.getTableIds();
@@ -572,7 +571,7 @@
                             Tracing.trace("Could not acquire MV lock");
                             if (future != null)
                             {
-                                future.completeExceptionally(new WriteTimeoutException(WriteType.VIEW, ConsistencyLevel.LOCAL_ONE, 0, 1));
+                                future.tryFailure(new WriteTimeoutException(WriteType.VIEW, ConsistencyLevel.LOCAL_ONE, 0, 1));
                                 return future;
                             }
                             else
@@ -585,9 +584,8 @@
 
                             // This view update can't happen right now. so rather than keep this thread busy
                             // we will re-apply ourself to the queue and try again later
-                            final CompletableFuture<?> mark = future;
                             Stage.MUTATION.execute(() ->
-                                                   applyInternal(mutation, makeDurable, true, isDroppable, true, mark)
+                                                   applyInternal(mutation, makeDurable, true, isDroppable, true, future)
                             );
                             return future;
                         }
@@ -604,7 +602,7 @@
                             }
                             catch (InterruptedException e)
                             {
-                                // Just continue
+                                throw new UncheckedInterruptedException(e);
                             }
                             continue;
                         }
@@ -617,7 +615,7 @@
                 }
             }
 
-            long acquireTime = System.currentTimeMillis() - mutation.viewLockAcquireStart.get();
+            long acquireTime = currentTimeMillis() - mutation.viewLockAcquireStart.get();
             // Metrics are only collected for droppable write operations
             // Bulk non-droppable operations (e.g. commitlog replay, hint delivery) are not measured
             if (isDroppable)
@@ -661,11 +659,11 @@
                 cfs.getWriteHandler().write(upd, ctx, indexTransaction);
 
                 if (requiresViewUpdate)
-                    baseComplete.set(System.currentTimeMillis());
+                    baseComplete.set(currentTimeMillis());
             }
 
             if (future != null) {
-                future.complete(null);
+                future.trySuccess(null);
             }
             return future;
         }
@@ -685,11 +683,11 @@
         return replicationStrategy;
     }
 
-    public List<Future<?>> flush()
+    public List<Future<?>> flush(ColumnFamilyStore.FlushReason reason)
     {
         List<Future<?>> futures = new ArrayList<>(columnFamilyStores.size());
         for (ColumnFamilyStore cfs : columnFamilyStores.values())
-            futures.add(cfs.forceFlush());
+            futures.add(cfs.forceFlush(reason));
         return futures;
     }
 
@@ -772,12 +770,12 @@
 
     public static Iterable<Keyspace> nonSystem()
     {
-        return Iterables.transform(Schema.instance.getNonSystemKeyspaces(), Keyspace::open);
+        return Iterables.transform(Schema.instance.getNonSystemKeyspaces().names(), Keyspace::open);
     }
 
     public static Iterable<Keyspace> nonLocalStrategy()
     {
-        return Iterables.transform(Schema.instance.getNonLocalStrategyKeyspaces(), Keyspace::open);
+        return Iterables.transform(Schema.instance.getNonLocalStrategyKeyspaces().names(), Keyspace::open);
     }
 
     public static Iterable<Keyspace> system()
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
deleted file mode 100644
index b74ac5f..0000000
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ /dev/null
@@ -1,695 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Throwables;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.Config;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.db.commitlog.CommitLogPosition;
-import org.apache.cassandra.db.commitlog.IntervalSet;
-import org.apache.cassandra.db.filter.ClusteringIndexFilter;
-import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
-import org.apache.cassandra.db.partitions.AbstractBTreePartition;
-import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
-import org.apache.cassandra.db.partitions.AtomicBTreePartition;
-import org.apache.cassandra.db.partitions.Partition;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.db.rows.EncodingStats;
-import org.apache.cassandra.db.rows.UnfilteredRowIterator;
-import org.apache.cassandra.dht.AbstractBounds;
-import org.apache.cassandra.dht.Bounds;
-import org.apache.cassandra.dht.IncludingExcludingBounds;
-import org.apache.cassandra.dht.Murmur3Partitioner.LongToken;
-import org.apache.cassandra.dht.Range;
-import org.apache.cassandra.index.transactions.UpdateTransaction;
-import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.sstable.SSTableMultiWriter;
-import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
-import org.apache.cassandra.schema.ColumnMetadata;
-import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.ObjectSizes;
-import org.apache.cassandra.utils.concurrent.OpOrder;
-import org.apache.cassandra.utils.memory.Cloner;
-import org.apache.cassandra.utils.memory.HeapPool;
-import org.apache.cassandra.utils.memory.MemtableAllocator;
-import org.apache.cassandra.utils.memory.MemtableCleaner;
-import org.apache.cassandra.utils.memory.MemtablePool;
-import org.apache.cassandra.utils.memory.NativePool;
-import org.apache.cassandra.utils.memory.SlabPool;
-
-public class Memtable implements Comparable<Memtable>
-{
-    private static final Logger logger = LoggerFactory.getLogger(Memtable.class);
-
-    public static final MemtablePool MEMORY_POOL = createMemtableAllocatorPoolInternal();
-    public static final long NO_MIN_TIMESTAMP = -1;
-
-    private static MemtablePool createMemtableAllocatorPoolInternal()
-    {
-        Config.MemtableAllocationType allocationType = DatabaseDescriptor.getMemtableAllocationType();
-        long heapLimit = DatabaseDescriptor.getMemtableHeapSpaceInMb() << 20;
-        long offHeapLimit = DatabaseDescriptor.getMemtableOffheapSpaceInMb() << 20;
-        final float cleaningThreshold = DatabaseDescriptor.getMemtableCleanupThreshold();
-        final MemtableCleaner cleaner = ColumnFamilyStore::flushLargestMemtable;
-        return createMemtableAllocatorPoolInternal(allocationType, heapLimit, offHeapLimit, cleaningThreshold, cleaner);
-    }
-
-    @VisibleForTesting
-    public static MemtablePool createMemtableAllocatorPoolInternal(Config.MemtableAllocationType allocationType,
-                                                                   long heapLimit, long offHeapLimit,
-                                                                   float cleaningThreshold, MemtableCleaner cleaner)
-    {
-        switch (allocationType)
-        {
-            case unslabbed_heap_buffers:
-                return new HeapPool(heapLimit, cleaningThreshold, cleaner);
-            case heap_buffers:
-                return new SlabPool(heapLimit, 0, cleaningThreshold, cleaner);
-            case offheap_buffers:
-                return new SlabPool(heapLimit, offHeapLimit, cleaningThreshold, cleaner);
-            case offheap_objects:
-                return new NativePool(heapLimit, offHeapLimit, cleaningThreshold, cleaner);
-            default:
-                throw new AssertionError();
-        }
-    }
-
-    private static final int ROW_OVERHEAD_HEAP_SIZE = estimateRowOverhead(Integer.parseInt(System.getProperty("cassandra.memtable_row_overhead_computation_step", "100000")));
-
-    private final MemtableAllocator allocator;
-    private final AtomicLong liveDataSize = new AtomicLong(0);
-    private final AtomicLong currentOperations = new AtomicLong(0);
-
-    // the write barrier for directing writes to this memtable or the next during a switch
-    private volatile OpOrder.Barrier writeBarrier;
-    // the precise upper bound of CommitLogPosition owned by this memtable
-    private volatile AtomicReference<CommitLogPosition> commitLogUpperBound;
-    // the precise lower bound of CommitLogPosition owned by this memtable; equal to its predecessor's commitLogUpperBound
-    private AtomicReference<CommitLogPosition> commitLogLowerBound;
-
-    // The approximate lower bound by this memtable; must be <= commitLogLowerBound once our predecessor
-    // has been finalised, and this is enforced in the ColumnFamilyStore.setCommitLogUpperBound
-    private final CommitLogPosition approximateCommitLogLowerBound = CommitLog.instance.getCurrentPosition();
-
-    public int compareTo(Memtable that)
-    {
-        return this.approximateCommitLogLowerBound.compareTo(that.approximateCommitLogLowerBound);
-    }
-
-    public static final class LastCommitLogPosition extends CommitLogPosition
-    {
-        public LastCommitLogPosition(CommitLogPosition copy)
-        {
-            super(copy.segmentId, copy.position);
-        }
-    }
-
-    // We index the memtable by PartitionPosition only for the purpose of being able
-    // to select key range using Token.KeyBound. However put() ensures that we
-    // actually only store DecoratedKey.
-    private final ConcurrentNavigableMap<PartitionPosition, AtomicBTreePartition> partitions = new ConcurrentSkipListMap<>();
-    public final ColumnFamilyStore cfs;
-    private final long creationNano = System.nanoTime();
-
-    // The smallest timestamp for all partitions stored in this memtable
-    private long minTimestamp = Long.MAX_VALUE;
-
-    // Record the comparator of the CFS at the creation of the memtable. This
-    // is only used when a user update the CF comparator, to know if the
-    // memtable was created with the new or old comparator.
-    public final ClusteringComparator initialComparator;
-
-    private final ColumnsCollector columnsCollector;
-    private final StatsCollector statsCollector = new StatsCollector();
-
-    // only to be used by init(), to setup the very first memtable for the cfs
-    public Memtable(AtomicReference<CommitLogPosition> commitLogLowerBound, ColumnFamilyStore cfs)
-    {
-        this.cfs = cfs;
-        this.commitLogLowerBound = commitLogLowerBound;
-        this.allocator = MEMORY_POOL.newAllocator();
-        this.initialComparator = cfs.metadata().comparator;
-        this.cfs.scheduleFlush();
-        this.columnsCollector = new ColumnsCollector(cfs.metadata().regularAndStaticColumns());
-    }
-
-    // ONLY to be used for testing, to create a mock Memtable
-    @VisibleForTesting
-    public Memtable(TableMetadata metadata)
-    {
-        this.initialComparator = metadata.comparator;
-        this.cfs = null;
-        this.allocator = null;
-        this.columnsCollector = new ColumnsCollector(metadata.regularAndStaticColumns());
-    }
-
-    @VisibleForTesting
-    public Memtable(TableMetadata metadata, long minTimestamp)
-    {
-        this.initialComparator = metadata.comparator;
-        this.cfs = null;
-        this.allocator = null;
-        this.columnsCollector = new ColumnsCollector(metadata.regularAndStaticColumns());
-        this.minTimestamp = minTimestamp;
-    }
-
-    public MemtableAllocator getAllocator()
-    {
-        return allocator;
-    }
-
-    public long getLiveDataSize()
-    {
-        return liveDataSize.get();
-    }
-
-    public long getOperations()
-    {
-        return currentOperations.get();
-    }
-
-    @VisibleForTesting
-    public void setDiscarding(OpOrder.Barrier writeBarrier, AtomicReference<CommitLogPosition> commitLogUpperBound)
-    {
-        assert this.writeBarrier == null;
-        this.commitLogUpperBound = commitLogUpperBound;
-        this.writeBarrier = writeBarrier;
-        allocator.setDiscarding();
-    }
-
-    void setDiscarded()
-    {
-        allocator.setDiscarded();
-    }
-
-    // decide if this memtable should take the write, or if it should go to the next memtable
-    public boolean accepts(OpOrder.Group opGroup, CommitLogPosition commitLogPosition)
-    {
-        // if the barrier hasn't been set yet, then this memtable is still taking ALL writes
-        OpOrder.Barrier barrier = this.writeBarrier;
-        if (barrier == null)
-            return true;
-        // if the barrier has been set, but is in the past, we are definitely destined for a future memtable
-        if (!barrier.isAfter(opGroup))
-            return false;
-        // if we aren't durable we are directed only by the barrier
-        if (commitLogPosition == null)
-            return true;
-        while (true)
-        {
-            // otherwise we check if we are in the past/future wrt the CL boundary;
-            // if the boundary hasn't been finalised yet, we simply update it to the max of
-            // its current value and ours; if it HAS been finalised, we simply accept its judgement
-            // this permits us to coordinate a safe boundary, as the boundary choice is made
-            // atomically wrt our max() maintenance, so an operation cannot sneak into the past
-            CommitLogPosition currentLast = commitLogUpperBound.get();
-            if (currentLast instanceof LastCommitLogPosition)
-                return currentLast.compareTo(commitLogPosition) >= 0;
-            if (currentLast != null && currentLast.compareTo(commitLogPosition) >= 0)
-                return true;
-            if (commitLogUpperBound.compareAndSet(currentLast, commitLogPosition))
-                return true;
-        }
-    }
-
-    public CommitLogPosition getCommitLogLowerBound()
-    {
-        return commitLogLowerBound.get();
-    }
-
-    public CommitLogPosition getCommitLogUpperBound()
-    {
-        return commitLogUpperBound.get();
-    }
-
-    public boolean isLive()
-    {
-        return allocator.isLive();
-    }
-
-    public boolean isClean()
-    {
-        return partitions.isEmpty();
-    }
-
-    public boolean mayContainDataBefore(CommitLogPosition position)
-    {
-        return approximateCommitLogLowerBound.compareTo(position) < 0;
-    }
-
-    /**
-     * @return true if this memtable is expired. Expiration time is determined by CF's memtable_flush_period_in_ms.
-     */
-    public boolean isExpired()
-    {
-        int period = cfs.metadata().params.memtableFlushPeriodInMs;
-        return period > 0 && (System.nanoTime() - creationNano >= TimeUnit.MILLISECONDS.toNanos(period));
-    }
-
-    /**
-     * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the appropriate
-     * OpOrdering.
-     *
-     * commitLogSegmentPosition should only be null if this is a secondary index, in which case it is *expected* to be null
-     */
-    long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup)
-    {
-        Cloner cloner = allocator.cloner(opGroup);
-        AtomicBTreePartition previous = partitions.get(update.partitionKey());
-
-        long initialSize = 0;
-        if (previous == null)
-        {
-            final DecoratedKey cloneKey = cloner.clone(update.partitionKey());
-            AtomicBTreePartition empty = new AtomicBTreePartition(cfs.metadata, cloneKey, allocator);
-            // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent
-            previous = partitions.putIfAbsent(cloneKey, empty);
-            if (previous == null)
-            {
-                previous = empty;
-                // allocate the row overhead after the fact; this saves over allocating and having to free after, but
-                // means we can overshoot our declared limit.
-                int overhead = (int) (cloneKey.getToken().getHeapSize() + ROW_OVERHEAD_HEAP_SIZE);
-                allocator.onHeap().allocate(overhead, opGroup);
-                initialSize = 8;
-            }
-        }
-
-        long[] pair = previous.addAllWithSizeDelta(update, cloner, opGroup, indexer);
-        minTimestamp = Math.min(minTimestamp, previous.stats().minTimestamp);
-        liveDataSize.addAndGet(initialSize + pair[0]);
-        columnsCollector.update(update.columns());
-        statsCollector.update(update.stats());
-        currentOperations.addAndGet(update.operationCount());
-        return pair[1];
-    }
-
-    public int partitionCount()
-    {
-        return partitions.size();
-    }
-
-    public List<FlushRunnable> flushRunnables(LifecycleTransaction txn)
-    {
-        return createFlushRunnables(txn);
-    }
-
-    private List<FlushRunnable> createFlushRunnables(LifecycleTransaction txn)
-    {
-        DiskBoundaries diskBoundaries = cfs.getDiskBoundaries();
-        List<PartitionPosition> boundaries = diskBoundaries.positions;
-        List<Directories.DataDirectory> locations = diskBoundaries.directories;
-        if (boundaries == null)
-            return Collections.singletonList(new FlushRunnable(txn));
-
-        List<FlushRunnable> runnables = new ArrayList<>(boundaries.size());
-        PartitionPosition rangeStart = cfs.getPartitioner().getMinimumToken().minKeyBound();
-        try
-        {
-            for (int i = 0; i < boundaries.size(); i++)
-            {
-                PartitionPosition t = boundaries.get(i);
-                runnables.add(new FlushRunnable(rangeStart, t, locations.get(i), txn));
-                rangeStart = t;
-            }
-            return runnables;
-        }
-        catch (Throwable e)
-        {
-            throw Throwables.propagate(abortRunnables(runnables, e));
-        }
-    }
-
-    public Throwable abortRunnables(List<FlushRunnable> runnables, Throwable t)
-    {
-        if (runnables != null)
-            for (FlushRunnable runnable : runnables)
-                t = runnable.writer.abort(t);
-        return t;
-    }
-
-    public String toString()
-    {
-        return String.format("Memtable-%s@%s(%s serialized bytes, %s ops, %.0f%%/%.0f%% of on/off-heap limit)",
-                             cfs.name, hashCode(), FBUtilities.prettyPrintMemory(liveDataSize.get()), currentOperations,
-                             100 * allocator.onHeap().ownershipRatio(), 100 * allocator.offHeap().ownershipRatio());
-    }
-
-    public MemtableUnfilteredPartitionIterator makePartitionIterator(final ColumnFilter columnFilter, final DataRange dataRange)
-    {
-        AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();
-
-        boolean startIsMin = keyRange.left.isMinimum();
-        boolean stopIsMin = keyRange.right.isMinimum();
-
-        boolean isBound = keyRange instanceof Bounds;
-        boolean includeStart = isBound || keyRange instanceof IncludingExcludingBounds;
-        boolean includeStop = isBound || keyRange instanceof Range;
-        Map<PartitionPosition, AtomicBTreePartition> subMap;
-        if (startIsMin)
-            subMap = stopIsMin ? partitions : partitions.headMap(keyRange.right, includeStop);
-        else
-            subMap = stopIsMin
-                   ? partitions.tailMap(keyRange.left, includeStart)
-                   : partitions.subMap(keyRange.left, includeStart, keyRange.right, includeStop);
-
-        int minLocalDeletionTime = Integer.MAX_VALUE;
-
-        // avoid iterating over the memtable if we purge all tombstones
-        if (cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones())
-            minLocalDeletionTime = findMinLocalDeletionTime(subMap.entrySet().iterator());
-
-        final Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter = subMap.entrySet().iterator();
-
-        return new MemtableUnfilteredPartitionIterator(cfs, iter, minLocalDeletionTime, columnFilter, dataRange);
-    }
-
-    private int findMinLocalDeletionTime(Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iterator)
-    {
-        int minLocalDeletionTime = Integer.MAX_VALUE;
-        while (iterator.hasNext())
-        {
-            Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iterator.next();
-            minLocalDeletionTime = Math.min(minLocalDeletionTime, entry.getValue().stats().minLocalDeletionTime);
-        }
-        return minLocalDeletionTime;
-    }
-
-    public Partition getPartition(DecoratedKey key)
-    {
-        return partitions.get(key);
-    }
-
-    /**
-     * Returns the minTS if one available, otherwise NO_MIN_TIMESTAMP.
-     *
-     * EncodingStats uses a synthetic epoch TS at 2015. We don't want to leak that (CASSANDRA-18118) so we return NO_MIN_TIMESTAMP instead.
-     *
-     * @return The minTS or NO_MIN_TIMESTAMP if none available
-     */
-    public long getMinTimestamp()
-    {
-        return minTimestamp != EncodingStats.NO_STATS.minTimestamp ? minTimestamp : NO_MIN_TIMESTAMP;
-    }
-
-    /**
-     * For testing only. Give this memtable too big a size to make it always fail flushing.
-     */
-    @VisibleForTesting
-    public void makeUnflushable()
-    {
-        liveDataSize.addAndGet((long) 1024 * 1024 * 1024 * 1024 * 1024);
-    }
-
-    class FlushRunnable implements Callable<SSTableMultiWriter>
-    {
-        private final long estimatedSize;
-        private final ConcurrentNavigableMap<PartitionPosition, AtomicBTreePartition> toFlush;
-
-        private final boolean isBatchLogTable;
-        private final SSTableMultiWriter writer;
-
-        // keeping these to be able to log what we are actually flushing
-        private final PartitionPosition from;
-        private final PartitionPosition to;
-
-        FlushRunnable(PartitionPosition from, PartitionPosition to, Directories.DataDirectory flushLocation, LifecycleTransaction txn)
-        {
-            this(partitions.subMap(from, to), flushLocation, from, to, txn);
-        }
-
-        FlushRunnable(LifecycleTransaction txn)
-        {
-            this(partitions, null, null, null, txn);
-        }
-
-        FlushRunnable(ConcurrentNavigableMap<PartitionPosition, AtomicBTreePartition> toFlush, Directories.DataDirectory flushLocation, PartitionPosition from, PartitionPosition to, LifecycleTransaction txn)
-        {
-            this.toFlush = toFlush;
-            this.from = from;
-            this.to = to;
-            long keySize = 0;
-            for (PartitionPosition key : toFlush.keySet())
-            {
-                //  make sure we don't write non-sensical keys
-                assert key instanceof DecoratedKey;
-                keySize += ((DecoratedKey) key).getKey().remaining();
-            }
-            estimatedSize = (long) ((keySize // index entries
-                                    + keySize // keys in data file
-                                    + liveDataSize.get()) // data
-                                    * 1.2); // bloom filter and row index overhead
-
-            this.isBatchLogTable = cfs.name.equals(SystemKeyspace.BATCHES) && cfs.keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME);
-
-            if (flushLocation == null)
-                writer = createFlushWriter(txn, cfs.newSSTableDescriptor(getDirectories().getWriteableLocationAsFile(estimatedSize)), columnsCollector.get(), statsCollector.get());
-            else
-                writer = createFlushWriter(txn, cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(flushLocation)), columnsCollector.get(), statsCollector.get());
-
-        }
-
-        protected Directories getDirectories()
-        {
-            return cfs.getDirectories();
-        }
-
-        private void writeSortedContents()
-        {
-            logger.info("Writing {}, flushed range = ({}, {}]", Memtable.this.toString(), from, to);
-
-            boolean trackContention = logger.isTraceEnabled();
-            int heavilyContendedRowCount = 0;
-            // (we can't clear out the map as-we-go to free up memory,
-            //  since the memtable is being used for queries in the "pending flush" category)
-            for (AtomicBTreePartition partition : toFlush.values())
-            {
-                // Each batchlog partition is a separate entry in the log. And for an entry, we only do 2
-                // operations: 1) we insert the entry and 2) we delete it. Further, BL data is strictly local,
-                // we don't need to preserve tombstones for repair. So if both operation are in this
-                // memtable (which will almost always be the case if there is no ongoing failure), we can
-                // just skip the entry (CASSANDRA-4667).
-                if (isBatchLogTable && !partition.partitionLevelDeletion().isLive() && partition.hasRows())
-                    continue;
-
-                if (trackContention && partition.useLock())
-                    heavilyContendedRowCount++;
-
-                if (!partition.isEmpty())
-                {
-                    try (UnfilteredRowIterator iter = partition.unfilteredIterator())
-                    {
-                        writer.append(iter);
-                    }
-                }
-            }
-
-            long bytesFlushed = writer.getFilePointer();
-            logger.info("Completed flushing {} ({}) for commitlog position {}",
-                         writer.getFilename(),
-                         FBUtilities.prettyPrintMemory(bytesFlushed),
-                         commitLogUpperBound);
-            // Update the metrics
-            cfs.metric.bytesFlushed.inc(bytesFlushed);
-
-            if (heavilyContendedRowCount > 0)
-                logger.trace("High update contention in {}/{} partitions of {} ", heavilyContendedRowCount, toFlush.size(), Memtable.this);
-        }
-
-        public SSTableMultiWriter createFlushWriter(LifecycleTransaction txn,
-                                                    Descriptor descriptor,
-                                                    RegularAndStaticColumns columns,
-                                                    EncodingStats stats)
-        {
-            MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata().comparator)
-                    .commitLogIntervals(new IntervalSet<>(commitLogLowerBound.get(), commitLogUpperBound.get()));
-
-            return cfs.createSSTableMultiWriter(descriptor,
-                                                toFlush.size(),
-                                                ActiveRepairService.UNREPAIRED_SSTABLE,
-                                                ActiveRepairService.NO_PENDING_REPAIR,
-                                                false,
-                                                sstableMetadataCollector,
-                                                new SerializationHeader(true, cfs.metadata(), columns, stats), txn);
-        }
-
-        @Override
-        public SSTableMultiWriter call()
-        {
-            writeSortedContents();
-            return writer;
-        }
-    }
-
-    private static int estimateRowOverhead(final int count)
-    {
-        // calculate row overhead
-        try (final OpOrder.Group group = new OpOrder().start())
-        {
-            int rowOverhead;
-            MemtableAllocator allocator = MEMORY_POOL.newAllocator();
-            Cloner cloner = allocator.cloner(group);
-            ConcurrentNavigableMap<PartitionPosition, Object> partitions = new ConcurrentSkipListMap<>();
-            final Object val = new Object();
-            for (int i = 0 ; i < count ; i++)
-                partitions.put(cloner.clone(new BufferDecoratedKey(new LongToken(i), ByteBufferUtil.EMPTY_BYTE_BUFFER)), val);
-            double avgSize = ObjectSizes.measureDeep(partitions) / (double) count;
-            rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize));
-            rowOverhead -= ObjectSizes.measureDeep(new LongToken(0));
-            rowOverhead += AtomicBTreePartition.EMPTY_SIZE;
-            rowOverhead += AbstractBTreePartition.HOLDER_UNSHARED_HEAP_SIZE;
-            allocator.setDiscarding();
-            allocator.setDiscarded();
-            return rowOverhead;
-        }
-    }
-
-    public static class MemtableUnfilteredPartitionIterator extends AbstractUnfilteredPartitionIterator
-    {
-        private final ColumnFamilyStore cfs;
-        private final Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter;
-        private final int minLocalDeletionTime;
-        private final ColumnFilter columnFilter;
-        private final DataRange dataRange;
-
-        public MemtableUnfilteredPartitionIterator(ColumnFamilyStore cfs, Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter, int minLocalDeletionTime, ColumnFilter columnFilter, DataRange dataRange)
-        {
-            this.cfs = cfs;
-            this.iter = iter;
-            this.minLocalDeletionTime = minLocalDeletionTime;
-            this.columnFilter = columnFilter;
-            this.dataRange = dataRange;
-        }
-
-        public int getMinLocalDeletionTime()
-        {
-            return minLocalDeletionTime;
-        }
-
-        public TableMetadata metadata()
-        {
-            return cfs.metadata();
-        }
-
-        public boolean hasNext()
-        {
-            return iter.hasNext();
-        }
-
-        public UnfilteredRowIterator next()
-        {
-            Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iter.next();
-            // Actual stored key should be true DecoratedKey
-            assert entry.getKey() instanceof DecoratedKey;
-            DecoratedKey key = (DecoratedKey)entry.getKey();
-            ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key);
-
-            return filter.getUnfilteredRowIterator(columnFilter, entry.getValue());
-        }
-    }
-
-    private static class ColumnsCollector
-    {
-        private final HashMap<ColumnMetadata, AtomicBoolean> predefined = new HashMap<>();
-        private final ConcurrentSkipListSet<ColumnMetadata> extra = new ConcurrentSkipListSet<>();
-        ColumnsCollector(RegularAndStaticColumns columns)
-        {
-            for (ColumnMetadata def : columns.statics)
-                predefined.put(def, new AtomicBoolean());
-            for (ColumnMetadata def : columns.regulars)
-                predefined.put(def, new AtomicBoolean());
-        }
-
-        public void update(RegularAndStaticColumns columns)
-        {
-            for (ColumnMetadata s : columns.statics)
-                update(s);
-            for (ColumnMetadata r : columns.regulars)
-                update(r);
-        }
-
-        private void update(ColumnMetadata definition)
-        {
-            AtomicBoolean present = predefined.get(definition);
-            if (present != null)
-            {
-                if (!present.get())
-                    present.set(true);
-            }
-            else
-            {
-                extra.add(definition);
-            }
-        }
-
-        public RegularAndStaticColumns get()
-        {
-            RegularAndStaticColumns.Builder builder = RegularAndStaticColumns.builder();
-            for (Map.Entry<ColumnMetadata, AtomicBoolean> e : predefined.entrySet())
-                if (e.getValue().get())
-                    builder.add(e.getKey());
-            return builder.addAll(extra).build();
-        }
-    }
-
-    private static class StatsCollector
-    {
-        private final AtomicReference<EncodingStats> stats = new AtomicReference<>(EncodingStats.NO_STATS);
-
-        public void update(EncodingStats newStats)
-        {
-            while (true)
-            {
-                EncodingStats current = stats.get();
-                EncodingStats updated = current.mergeWith(newStats);
-                if (stats.compareAndSet(current, updated))
-                    return;
-            }
-        }
-
-        public EncodingStats get()
-        {
-            return stats.get();
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/db/MessageParams.java b/src/java/org/apache/cassandra/db/MessageParams.java
new file mode 100644
index 0000000..137d3a6
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/MessageParams.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.util.EnumMap;
+import java.util.Map;
+
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.ParamType;
+
+public class MessageParams
+{
+    private static final FastThreadLocal<Map<ParamType, Object>> local = new FastThreadLocal<>();
+
+    private MessageParams()
+    {
+    }
+
+    private static Map<ParamType, Object> get()
+    {
+        Map<ParamType, Object> instance = local.get();
+        if (instance == null)
+        {
+            instance = new EnumMap<>(ParamType.class);
+            local.set(instance);
+        }
+
+        return instance;
+    }
+
+    public static void add(ParamType key, Object value)
+    {
+        get().put(key, value);
+    }
+
+    public static <T> T get(ParamType key)
+    {
+        return (T) get().get(key);
+    }
+
+    public static void remove(ParamType key)
+    {
+        get().remove(key);
+    }
+
+    public static void reset()
+    {
+        get().clear();
+    }
+
+    public static <T> Message<T> addToMessage(Message<T> message)
+    {
+        return message.withParams(get());
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/MultiCBuilder.java b/src/java/org/apache/cassandra/db/MultiCBuilder.java
index 0b5625b..435e418 100644
--- a/src/java/org/apache/cassandra/db/MultiCBuilder.java
+++ b/src/java/org/apache/cassandra/db/MultiCBuilder.java
@@ -130,6 +130,13 @@
     }
 
     /**
+     * Returns the current number of results when {@link #build()} is called
+     *
+     * @return the current number of build results
+     */
+    public abstract int buildSize();
+
+    /**
      * Checks if the clusterings contains null elements.
      *
      * @return <code>true</code> if the clusterings contains <code>null</code> elements, <code>false</code> otherwise.
@@ -252,6 +259,12 @@
             return addEachElementToAll(values.get(0));
         }
 
+        @Override
+        public int buildSize()
+        {
+            return hasMissingElements ? 0 : 1;
+        }
+
         public NavigableSet<Clustering<?>> build()
         {
             built = true;
@@ -309,7 +322,7 @@
             checkUpdateable();
 
             if (elementsList.isEmpty())
-                elementsList.add(new ArrayList<ByteBuffer>());
+                elementsList.add(new ArrayList<>());
 
             if (value == null)
                 containsNull = true;
@@ -328,7 +341,7 @@
             checkUpdateable();
 
             if (elementsList.isEmpty())
-                elementsList.add(new ArrayList<ByteBuffer>());
+                elementsList.add(new ArrayList<>());
 
             if (values.isEmpty())
             {
@@ -365,7 +378,7 @@
             checkUpdateable();
 
             if (elementsList.isEmpty())
-                elementsList.add(new ArrayList<ByteBuffer>());
+                elementsList.add(new ArrayList<>());
 
             if (values.isEmpty())
             {
@@ -397,6 +410,12 @@
             return this;
         }
 
+        @Override
+        public int buildSize()
+        {
+            return hasMissingElements ? 0 : elementsList.size();
+        }
+
         public NavigableSet<Clustering<?>> build()
         {
             built = true;
diff --git a/src/java/org/apache/cassandra/db/MutableDeletionInfo.java b/src/java/org/apache/cassandra/db/MutableDeletionInfo.java
index 1c58b92..9cad53e 100644
--- a/src/java/org/apache/cassandra/db/MutableDeletionInfo.java
+++ b/src/java/org/apache/cassandra/db/MutableDeletionInfo.java
@@ -24,7 +24,6 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.db.rows.EncodingStats;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.memory.ByteBufferCloner;
 
diff --git a/src/java/org/apache/cassandra/db/Mutation.java b/src/java/org/apache/cassandra/db/Mutation.java
index 8a1ffc1..7b6a686 100644
--- a/src/java/org/apache/cassandra/db/Mutation.java
+++ b/src/java/org/apache/cassandra/db/Mutation.java
@@ -19,9 +19,9 @@
 
 import java.io.IOException;
 import java.util.*;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
 
 import com.google.common.collect.ImmutableCollection;
 import com.google.common.collect.ImmutableMap;
@@ -38,13 +38,14 @@
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.concurrent.Future;
 
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
 import static org.apache.cassandra.net.MessagingService.VERSION_3014;
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
-public class Mutation implements IMutation
+public class Mutation implements IMutation, Supplier<Mutation>
 {
     public static final MutationSerializer serializer = new MutationSerializer();
 
@@ -132,6 +133,18 @@
         return modifications.values();
     }
 
+    @Override
+    public Supplier<Mutation> hintOnFailure()
+    {
+        return this;
+    }
+
+    @Override
+    public Mutation get()
+    {
+        return this;
+    }
+
     public void validateSize(int version, int overhead)
     {
         long totalSize = serializedSize(version) + overhead;
@@ -204,15 +217,20 @@
         return new Mutation(ks, key, modifications.build(), approxTime.now());
     }
 
-    public CompletableFuture<?> applyFuture()
+    public Future<?> applyFuture()
     {
         Keyspace ks = Keyspace.open(keyspaceName);
         return ks.applyFuture(this, Keyspace.open(keyspaceName).getMetadata().params.durableWrites, true);
     }
 
+    private void apply(Keyspace keyspace, boolean durableWrites, boolean isDroppable)
+    {
+        keyspace.apply(this, durableWrites, true, isDroppable);
+    }
+
     public void apply(boolean durableWrites, boolean isDroppable)
     {
-        Keyspace.open(keyspaceName).apply(this, durableWrites, true, isDroppable);
+        apply(Keyspace.open(keyspaceName), durableWrites, isDroppable);
     }
 
     public void apply(boolean durableWrites)
@@ -226,7 +244,8 @@
      */
     public void apply()
     {
-        apply(Keyspace.open(keyspaceName).getMetadata().params.durableWrites);
+        Keyspace keyspace = Keyspace.open(keyspaceName);
+        apply(keyspace, keyspace.getMetadata().params.durableWrites, true);
     }
 
     public void applyUnsafe()
diff --git a/src/java/org/apache/cassandra/db/MutationExceededMaxSizeException.java b/src/java/org/apache/cassandra/db/MutationExceededMaxSizeException.java
index 084c21e..45d9e43 100644
--- a/src/java/org/apache/cassandra/db/MutationExceededMaxSizeException.java
+++ b/src/java/org/apache/cassandra/db/MutationExceededMaxSizeException.java
@@ -25,10 +25,11 @@
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.exceptions.InvalidRequestException;
 
 import static org.apache.cassandra.db.IMutation.MAX_MUTATION_SIZE;
 
-public class MutationExceededMaxSizeException extends RuntimeException
+public class MutationExceededMaxSizeException extends InvalidRequestException
 {
     public static final int PARTITION_MESSAGE_LIMIT = 1024;
 
@@ -52,7 +53,7 @@
                                              .collect(Collectors.toList());
 
         String topKeys = makeTopKeysString(topPartitions, PARTITION_MESSAGE_LIMIT);
-        return String.format("Encountered an oversized mutation (%d/%d) for keyspace: %s. Top keys are: %s",
+        return String.format("Rejected an oversized mutation (%d/%d) for keyspace: %s. Top keys are: %s",
                              totalSize,
                              MAX_MUTATION_SIZE,
                              mutation.getKeyspaceName(),
diff --git a/src/java/org/apache/cassandra/db/MutationVerbHandler.java b/src/java/org/apache/cassandra/db/MutationVerbHandler.java
index 1d4f868..230ca63 100644
--- a/src/java/org/apache/cassandra/db/MutationVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/MutationVerbHandler.java
@@ -22,6 +22,8 @@
 import org.apache.cassandra.net.*;
 import org.apache.cassandra.tracing.Tracing;
 
+import static org.apache.cassandra.db.commitlog.CommitLogSegment.ENTRY_OVERHEAD_SIZE;
+
 public class MutationVerbHandler implements IVerbHandler<Mutation>
 {
     public static final MutationVerbHandler instance = new MutationVerbHandler();
@@ -39,26 +41,17 @@
 
     public void doVerb(Message<Mutation> message)
     {
-        // Check if there were any forwarding headers in this message
-        InetAddressAndPort from = message.respondTo();
-        InetAddressAndPort respondToAddress;
-        if (from == null)
-        {
-            respondToAddress = message.from();
-            ForwardingInfo forwardTo = message.forwardTo();
-            if (forwardTo != null) forwardToLocalNodes(message, forwardTo);
-        }
-        else
-        {
-            respondToAddress = from;
-        }
+        message.payload.validateSize(MessagingService.current_version, ENTRY_OVERHEAD_SIZE);
 
+        // Check if there were any forwarding headers in this message
+        ForwardingInfo forwardTo = message.forwardTo();
+        if (forwardTo != null)
+            forwardToLocalNodes(message, forwardTo);
+
+        InetAddressAndPort respondToAddress = message.respondTo();
         try
         {
-            message.payload.applyFuture().thenAccept(o -> respond(message, respondToAddress)).exceptionally(wto -> {
-                failed();
-                return null;
-            });
+            message.payload.applyFuture().addCallback(o -> respond(message, respondToAddress), wto -> failed());
         }
         catch (WriteTimeoutException wto)
         {
diff --git a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
index e55ab63..f000d63 100644
--- a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
+++ b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
@@ -22,12 +22,19 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
-import org.apache.cassandra.net.Verb;
-import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
+import org.apache.cassandra.db.virtual.VirtualTable;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.filter.*;
+import org.apache.cassandra.db.filter.ClusteringIndexFilter;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.filter.DataLimits;
+import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.db.lifecycle.View;
-import org.apache.cassandra.db.partitions.*;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.partitions.CachedPartition;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterators;
 import org.apache.cassandra.db.rows.BaseRowIterator;
 import org.apache.cassandra.db.transform.RTBoundValidator;
 import org.apache.cassandra.db.transform.Transformation;
@@ -40,7 +47,9 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.schema.IndexMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.tracing.Tracing;
@@ -52,23 +61,63 @@
 {
     protected static final SelectionDeserializer selectionDeserializer = new Deserializer();
 
-    private final DataRange dataRange;
+    protected final DataRange dataRange;
 
     private PartitionRangeReadCommand(boolean isDigest,
-                                     int digestVersion,
-                                     boolean acceptsTransient,
-                                     TableMetadata metadata,
-                                     int nowInSec,
-                                     ColumnFilter columnFilter,
-                                     RowFilter rowFilter,
-                                     DataLimits limits,
-                                     DataRange dataRange,
-                                     IndexMetadata index)
+                                      int digestVersion,
+                                      boolean acceptsTransient,
+                                      TableMetadata metadata,
+                                      int nowInSec,
+                                      ColumnFilter columnFilter,
+                                      RowFilter rowFilter,
+                                      DataLimits limits,
+                                      DataRange dataRange,
+                                      IndexMetadata index,
+                                      boolean trackWarnings)
     {
-        super(Kind.PARTITION_RANGE, isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, index);
+        super(Kind.PARTITION_RANGE, isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, index, trackWarnings);
         this.dataRange = dataRange;
     }
 
+    private static PartitionRangeReadCommand create(boolean isDigest,
+                                                    int digestVersion,
+                                                    boolean acceptsTransient,
+                                                    TableMetadata metadata,
+                                                    int nowInSec,
+                                                    ColumnFilter columnFilter,
+                                                    RowFilter rowFilter,
+                                                    DataLimits limits,
+                                                    DataRange dataRange,
+                                                    IndexMetadata index,
+                                                    boolean trackWarnings)
+    {
+        if (metadata.isVirtual())
+        {
+            return new VirtualTablePartitionRangeReadCommand(isDigest,
+                                                             digestVersion,
+                                                             acceptsTransient,
+                                                             metadata,
+                                                             nowInSec,
+                                                             columnFilter,
+                                                             rowFilter,
+                                                             limits,
+                                                             dataRange,
+                                                             index,
+                                                             trackWarnings);
+        }
+        return new PartitionRangeReadCommand(isDigest,
+                                             digestVersion,
+                                             acceptsTransient,
+                                             metadata,
+                                             nowInSec,
+                                             columnFilter,
+                                             rowFilter,
+                                             limits,
+                                             dataRange,
+                                             index,
+                                             trackWarnings);
+    }
+
     public static PartitionRangeReadCommand create(TableMetadata metadata,
                                                    int nowInSec,
                                                    ColumnFilter columnFilter,
@@ -76,16 +125,17 @@
                                                    DataLimits limits,
                                                    DataRange dataRange)
     {
-        return new PartitionRangeReadCommand(false,
-                                             0,
-                                             false,
-                                             metadata,
-                                             nowInSec,
-                                             columnFilter,
-                                             rowFilter,
-                                             limits,
-                                             dataRange,
-                                             findIndex(metadata, rowFilter));
+        return create(false,
+                      0,
+                      false,
+                      metadata,
+                      nowInSec,
+                      columnFilter,
+                      rowFilter,
+                      limits,
+                      dataRange,
+                      findIndex(metadata, rowFilter),
+                      false);
     }
 
     /**
@@ -98,16 +148,17 @@
      */
     public static PartitionRangeReadCommand allDataRead(TableMetadata metadata, int nowInSec)
     {
-        return new PartitionRangeReadCommand(false,
-                                             0,
-                                             false,
-                                             metadata,
-                                             nowInSec,
-                                             ColumnFilter.all(metadata),
-                                             RowFilter.NONE,
-                                             DataLimits.NONE,
-                                             DataRange.allData(metadata.partitioner),
-                                             null);
+        return create(false,
+                      0,
+                      false,
+                      metadata,
+                      nowInSec,
+                      ColumnFilter.all(metadata),
+                      RowFilter.NONE,
+                      DataLimits.NONE,
+                      DataRange.allData(metadata.partitioner),
+                      null,
+                      false);
     }
 
     public DataRange dataRange()
@@ -147,90 +198,96 @@
         // DataLimits.CQLGroupByLimits.GroupByAwareCounter assumes that if GroupingState.hasClustering(), then we're in
         // the middle of a group, but we can't make that assumption if we query and range "in advance" of where we are
         // on the ring.
-        return new PartitionRangeReadCommand(isDigestQuery(),
-                                             digestVersion(),
-                                             acceptsTransient(),
-                                             metadata(),
-                                             nowInSec(),
-                                             columnFilter(),
-                                             rowFilter(),
-                                             isRangeContinuation ? limits() : limits().withoutState(),
-                                             dataRange().forSubRange(range),
-                                             indexMetadata());
+        return create(isDigestQuery(),
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      isRangeContinuation ? limits() : limits().withoutState(),
+                      dataRange().forSubRange(range),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     public PartitionRangeReadCommand copy()
     {
-        return new PartitionRangeReadCommand(isDigestQuery(),
-                                             digestVersion(),
-                                             acceptsTransient(),
-                                             metadata(),
-                                             nowInSec(),
-                                             columnFilter(),
-                                             rowFilter(),
-                                             limits(),
-                                             dataRange(),
-                                             indexMetadata());
+        return create(isDigestQuery(),
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      limits(),
+                      dataRange(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     protected PartitionRangeReadCommand copyAsDigestQuery()
     {
-        return new PartitionRangeReadCommand(true,
-                                             digestVersion(),
-                                             false,
-                                             metadata(),
-                                             nowInSec(),
-                                             columnFilter(),
-                                             rowFilter(),
-                                             limits(),
-                                             dataRange(),
-                                             indexMetadata());
+        return create(true,
+                      digestVersion(),
+                      false,
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      limits(),
+                      dataRange(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     protected PartitionRangeReadCommand copyAsTransientQuery()
     {
-        return new PartitionRangeReadCommand(false,
-                                             0,
-                                             true,
-                                             metadata(),
-                                             nowInSec(),
-                                             columnFilter(),
-                                             rowFilter(),
-                                             limits(),
-                                             dataRange(),
-                                             indexMetadata());
+        return create(false,
+                      0,
+                      true,
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      limits(),
+                      dataRange(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     public PartitionRangeReadCommand withUpdatedLimit(DataLimits newLimits)
     {
-        return new PartitionRangeReadCommand(isDigestQuery(),
-                                             digestVersion(),
-                                             acceptsTransient(),
-                                             metadata(),
-                                             nowInSec(),
-                                             columnFilter(),
-                                             rowFilter(),
-                                             newLimits,
-                                             dataRange(),
-                                             indexMetadata());
+        return create(isDigestQuery(),
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      newLimits,
+                      dataRange(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     public PartitionRangeReadCommand withUpdatedLimitsAndDataRange(DataLimits newLimits, DataRange newDataRange)
     {
-        return new PartitionRangeReadCommand(isDigestQuery(),
-                                             digestVersion(),
-                                             acceptsTransient(),
-                                             metadata(),
-                                             nowInSec(),
-                                             columnFilter(),
-                                             rowFilter(),
-                                             newLimits,
-                                             newDataRange,
-                                             indexMetadata());
+        return create(isDigestQuery(),
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      newLimits,
+                      newDataRange,
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     public long getTimeout(TimeUnit unit)
@@ -243,7 +300,7 @@
         return dataRange.isReversed();
     }
 
-    public PartitionIterator execute(ConsistencyLevel consistency, ClientState clientState, long queryStartNanoTime) throws RequestExecutionException
+    public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
     {
         return StorageProxy.getRangeSlice(this, consistency, queryStartNanoTime);
     }
@@ -263,19 +320,19 @@
         InputCollector<UnfilteredPartitionIterator> inputCollector = iteratorsForRange(view, controller);
         try
         {
+            SSTableReadsListener readCountUpdater = newReadCountUpdater();
             for (Memtable memtable : view.memtables)
             {
                 @SuppressWarnings("resource") // We close on exception and on closing the result returned by this method
-                Memtable.MemtableUnfilteredPartitionIterator iter = memtable.makePartitionIterator(columnFilter(), dataRange());
-                controller.updateMinOldestUnrepairedTombstone(iter.getMinLocalDeletionTime());
+                UnfilteredPartitionIterator iter = memtable.partitionIterator(columnFilter(), dataRange(), readCountUpdater);
+                controller.updateMinOldestUnrepairedTombstone(memtable.getMinLocalDeletionTime());
                 inputCollector.addMemtableIterator(RTBoundValidator.validate(iter, RTBoundValidator.Stage.MEMTABLE, false));
             }
 
-            SSTableReadsListener readCountUpdater = newReadCountUpdater();
             for (SSTableReader sstable : view.sstables)
             {
                 @SuppressWarnings("resource") // We close on exception and on closing the result returned by this method
-                UnfilteredPartitionIterator iter = sstable.getScanner(columnFilter(), dataRange(), readCountUpdater);
+                UnfilteredPartitionIterator iter = sstable.partitionIterator(columnFilter(), dataRange(), readCountUpdater);
                 inputCollector.addSSTableIterator(sstable, RTBoundValidator.validate(iter, RTBoundValidator.Stage.SSTABLE, false));
 
                 if (!sstable.isRepaired())
@@ -358,19 +415,18 @@
 
     protected void appendCQLWhereClause(StringBuilder sb)
     {
-        if (dataRange.isUnrestricted() && rowFilter().isEmpty())
-            return;
+        String filterString = dataRange().toCQLString(metadata(), rowFilter());
+        if (!filterString.isEmpty())
+            sb.append(" WHERE ").append(filterString);
+    }
 
-        sb.append(" WHERE ");
-        // We put the row filter first because the data range can end by "ORDER BY"
-        if (!rowFilter().isEmpty())
-        {
-            sb.append(rowFilter());
-            if (!dataRange.isUnrestricted())
-                sb.append(" AND ");
-        }
-        if (!dataRange.isUnrestricted())
-            sb.append(dataRange.toCQLString(metadata()));
+    @Override
+    public String loggableTokens()
+    {
+        return "token range: " + (dataRange.keyRange.inclusiveLeft() ? '[' : '(') +
+               dataRange.keyRange.left.getToken().toString() + ", " +
+               dataRange.keyRange.right.getToken().toString() +
+               (dataRange.keyRange.inclusiveRight() ? ']' : ')');
     }
 
     /**
@@ -441,7 +497,52 @@
         throws IOException
         {
             DataRange range = DataRange.serializer.deserialize(in, version, metadata);
-            return new PartitionRangeReadCommand(isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, range, index);
+            return PartitionRangeReadCommand.create(isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, range, index, false);
+        }
+    }
+
+    public static class VirtualTablePartitionRangeReadCommand extends PartitionRangeReadCommand
+    {
+        private VirtualTablePartitionRangeReadCommand(boolean isDigest,
+                                                      int digestVersion,
+                                                      boolean acceptsTransient,
+                                                      TableMetadata metadata,
+                                                      int nowInSec,
+                                                      ColumnFilter columnFilter,
+                                                      RowFilter rowFilter,
+                                                      DataLimits limits,
+                                                      DataRange dataRange,
+                                                      IndexMetadata index,
+                                                      boolean trackWarnings)
+        {
+            super(isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, dataRange, index, trackWarnings);
+        }
+
+        @Override
+        public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
+        {
+            return executeInternal(executionController());
+        }
+
+        @Override
+        @SuppressWarnings("resource")
+        public UnfilteredPartitionIterator executeLocally(ReadExecutionController executionController)
+        {
+            VirtualTable view = VirtualKeyspaceRegistry.instance.getTableNullable(metadata().id);
+            UnfilteredPartitionIterator resultIterator = view.select(dataRange, columnFilter());
+            return limits().filter(rowFilter().filter(resultIterator, nowInSec()), nowInSec(), selectsFullPartition());
+        }
+
+        @Override
+        public ReadExecutionController executionController()
+        {
+            return ReadExecutionController.empty();
+        }
+
+        @Override
+        public ReadExecutionController executionController(boolean trackRepairedStatus)
+        {
+            return executionController();
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/db/PartitionRangeReadQuery.java b/src/java/org/apache/cassandra/db/PartitionRangeReadQuery.java
index 12624e7..5054f32 100644
--- a/src/java/org/apache/cassandra/db/PartitionRangeReadQuery.java
+++ b/src/java/org/apache/cassandra/db/PartitionRangeReadQuery.java
@@ -38,9 +38,6 @@
                             DataLimits limits,
                             DataRange dataRange)
     {
-        if (table.isVirtual())
-            return VirtualTablePartitionRangeReadQuery.create(table, nowInSec, columnFilter, rowFilter, limits, dataRange);
-
         return PartitionRangeReadCommand.create(table, nowInSec, columnFilter, rowFilter, limits, dataRange);
     }
 
diff --git a/src/java/org/apache/cassandra/db/ReadCommand.java b/src/java/org/apache/cassandra/db/ReadCommand.java
index d3aef4c..358d408 100644
--- a/src/java/org/apache/cassandra/db/ReadCommand.java
+++ b/src/java/org/apache/cassandra/db/ReadCommand.java
@@ -33,9 +33,11 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import io.netty.util.concurrent.FastThreadLocal;
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.net.MessageFlag;
+import org.apache.cassandra.net.ParamType;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.rows.*;
@@ -65,10 +67,13 @@
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.ObjectSizes;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.filter;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 import static org.apache.cassandra.db.partitions.UnfilteredPartitionIterators.MergeListener.NOOP;
 
 /**
@@ -84,6 +89,10 @@
     protected static final Logger logger = LoggerFactory.getLogger(ReadCommand.class);
     public static final IVersionedSerializer<ReadCommand> serializer = new Serializer();
 
+    // Expose the active command running so transitive calls can lookup this command.
+    // This is useful for a few reasons, but mainly because the CQL query is here.
+    private static final FastThreadLocal<ReadCommand> COMMAND = new FastThreadLocal<>();
+
     private final Kind kind;
 
     private final boolean isDigestQuery;
@@ -91,6 +100,8 @@
     // if a digest query, the version for which the digest is expected. Ignored if not a digest.
     private int digestVersion;
 
+    private boolean trackWarnings;
+
     @Nullable
     private final IndexMetadata index;
 
@@ -131,7 +142,8 @@
                           ColumnFilter columnFilter,
                           RowFilter rowFilter,
                           DataLimits limits,
-                          IndexMetadata index)
+                          IndexMetadata index,
+                          boolean trackWarnings)
     {
         super(metadata, nowInSec, columnFilter, rowFilter, limits);
         if (acceptsTransient && isDigestQuery)
@@ -142,6 +154,12 @@
         this.digestVersion = digestVersion;
         this.acceptsTransient = acceptsTransient;
         this.index = index;
+        this.trackWarnings = trackWarnings;
+    }
+
+    public static ReadCommand getCommand()
+    {
+        return COMMAND.get();
     }
 
     protected abstract void serializeSelection(DataOutputPlus out, int version) throws IOException;
@@ -211,6 +229,17 @@
         return acceptsTransient;
     }
 
+    @Override
+    public void trackWarnings()
+    {
+        trackWarnings = true;
+    }
+
+    public boolean isTrackingWarnings()
+    {
+        return trackWarnings;
+    }
+
     /**
      * Index (metadata) chosen for this query. Can be null.
      *
@@ -308,6 +337,16 @@
                : ReadResponse.createDataResponse(iterator, this, rdi);
     }
 
+    @SuppressWarnings("resource") // We don't need to close an empty iterator.
+    public ReadResponse createEmptyResponse()
+    {
+        UnfilteredPartitionIterator iterator = EmptyIterators.unfilteredPartition(metadata());
+        
+        return isDigestQuery()
+               ? ReadResponse.createDigestResponse(iterator, this)
+               : ReadResponse.createDataResponse(iterator, this, RepairedDataInfo.NO_OP_REPAIRED_DATA_INFO);
+    }
+
     long indexSerializedSize(int version)
     {
         return null != index
@@ -359,68 +398,77 @@
                                   // iterators created inside the try as long as we do close the original resultIterator), or by closing the result.
     public UnfilteredPartitionIterator executeLocally(ReadExecutionController executionController)
     {
-        long startTimeNanos = System.nanoTime();
+        long startTimeNanos = nanoTime();
 
-        ColumnFamilyStore cfs = Keyspace.openAndGetStore(metadata());
-        Index index = getIndex(cfs);
-
-        Index.Searcher searcher = null;
-        if (index != null)
-        {
-            if (!cfs.indexManager.isIndexQueryable(index))
-                throw new IndexNotAvailableException(index);
-
-            searcher = index.searcherFor(this);
-            Tracing.trace("Executing read on {}.{} using index {}", cfs.metadata.keyspace, cfs.metadata.name, index.getIndexMetadata().name);
-        }
-
-        UnfilteredPartitionIterator iterator = (null == searcher) ? queryStorage(cfs, executionController) : searcher.search(executionController);
-        iterator = RTBoundValidator.validate(iterator, Stage.MERGED, false);
-
+        COMMAND.set(this);
         try
         {
-            iterator = withStateTracking(iterator);
-            iterator = RTBoundValidator.validate(withoutPurgeableTombstones(iterator, cfs, executionController), Stage.PURGED, false);
-            iterator = withMetricsRecording(iterator, cfs.metric, startTimeNanos);
+            ColumnFamilyStore cfs = Keyspace.openAndGetStore(metadata());
+            Index index = getIndex(cfs);
 
-            // If we've used a 2ndary index, we know the result already satisfy the primary expression used, so
-            // no point in checking it again.
-            RowFilter filter = (null == searcher) ? rowFilter() : index.getPostIndexQueryFilter(rowFilter());
-
-            /*
-             * TODO: We'll currently do filtering by the rowFilter here because it's convenient. However,
-             * we'll probably want to optimize by pushing it down the layer (like for dropped columns) as it
-             * would be more efficient (the sooner we discard stuff we know we don't care, the less useless
-             * processing we do on it).
-             */
-            iterator = filter.filter(iterator, nowInSec());
-
-            // apply the limits/row counter; this transformation is stopping and would close the iterator as soon
-            // as the count is observed; if that happens in the middle of an open RT, its end bound will not be included.
-            // If tracking repaired data, the counter is needed for overreading repaired data, otherwise we can
-            // optimise the case where this.limit = DataLimits.NONE which skips an unnecessary transform
-            if (executionController.isTrackingRepairedStatus())
+            Index.Searcher searcher = null;
+            if (index != null)
             {
-                DataLimits.Counter limit =
+                if (!cfs.indexManager.isIndexQueryable(index))
+                    throw new IndexNotAvailableException(index);
+
+                searcher = index.searcherFor(this);
+                Tracing.trace("Executing read on {}.{} using index {}", cfs.metadata.keyspace, cfs.metadata.name, index.getIndexMetadata().name);
+            }
+
+            UnfilteredPartitionIterator iterator = (null == searcher) ? queryStorage(cfs, executionController) : searcher.search(executionController);
+            iterator = RTBoundValidator.validate(iterator, Stage.MERGED, false);
+
+            try
+            {
+                iterator = withQuerySizeTracking(iterator);
+                iterator = withStateTracking(iterator);
+                iterator = RTBoundValidator.validate(withoutPurgeableTombstones(iterator, cfs, executionController), Stage.PURGED, false);
+                iterator = withMetricsRecording(iterator, cfs.metric, startTimeNanos);
+
+                // If we've used a 2ndary index, we know the result already satisfy the primary expression used, so
+                // no point in checking it again.
+                RowFilter filter = (null == searcher) ? rowFilter() : index.getPostIndexQueryFilter(rowFilter());
+
+                /*
+                 * TODO: We'll currently do filtering by the rowFilter here because it's convenient. However,
+                 * we'll probably want to optimize by pushing it down the layer (like for dropped columns) as it
+                 * would be more efficient (the sooner we discard stuff we know we don't care, the less useless
+                 * processing we do on it).
+                 */
+                iterator = filter.filter(iterator, nowInSec());
+
+                // apply the limits/row counter; this transformation is stopping and would close the iterator as soon
+                // as the count is observed; if that happens in the middle of an open RT, its end bound will not be included.
+                // If tracking repaired data, the counter is needed for overreading repaired data, otherwise we can
+                // optimise the case where this.limit = DataLimits.NONE which skips an unnecessary transform
+                if (executionController.isTrackingRepairedStatus())
+                {
+                    DataLimits.Counter limit =
                     limits().newCounter(nowInSec(), false, selectsFullPartition(), metadata().enforceStrictLiveness());
-                iterator = limit.applyTo(iterator);
-                // ensure that a consistent amount of repaired data is read on each replica. This causes silent
-                // overreading from the repaired data set, up to limits(). The extra data is not visible to
-                // the caller, only iterated to produce the repaired data digest.
-                iterator = executionController.getRepairedDataInfo().extend(iterator, limit);
-            }
-            else
-            {
-                iterator = limits().filter(iterator, nowInSec(), selectsFullPartition());
-            }
+                    iterator = limit.applyTo(iterator);
+                    // ensure that a consistent amount of repaired data is read on each replica. This causes silent
+                    // overreading from the repaired data set, up to limits(). The extra data is not visible to
+                    // the caller, only iterated to produce the repaired data digest.
+                    iterator = executionController.getRepairedDataInfo().extend(iterator, limit);
+                }
+                else
+                {
+                    iterator = limits().filter(iterator, nowInSec(), selectsFullPartition());
+                }
 
-            // because of the above, we need to append an aritifical end bound if the source iterator was stopped short by a counter.
-            return RTBoundCloser.close(iterator);
+                // because of the above, we need to append an aritifical end bound if the source iterator was stopped short by a counter.
+                return RTBoundCloser.close(iterator);
+            }
+            catch (RuntimeException | Error e)
+            {
+                iterator.close();
+                throw e;
+            }
         }
-        catch (RuntimeException | Error e)
+        finally
         {
-            iterator.close();
-            throw e;
+            COMMAND.set(null);
         }
     }
 
@@ -509,6 +557,11 @@
                     String query = ReadCommand.this.toCQLString();
                     Tracing.trace("Scanned over {} tombstones for query {}; query aborted (see tombstone_failure_threshold)", failureThreshold, query);
                     metric.tombstoneFailures.inc();
+                    if (trackWarnings)
+                    {
+                        MessageParams.remove(ParamType.TOMBSTONE_WARNING);
+                        MessageParams.add(ParamType.TOMBSTONE_FAIL, tombstones);
+                    }
                     throw new TombstoneOverwhelmingException(tombstones, query, ReadCommand.this.metadata(), currentKey, clustering);
                 }
             }
@@ -516,7 +569,7 @@
             @Override
             public void onClose()
             {
-                recordLatency(metric, System.nanoTime() - startTimeNanos);
+                recordLatency(metric, nanoTime() - startTimeNanos);
 
                 metric.tombstoneScannedHistogram.update(tombstones);
                 metric.liveScannedHistogram.update(liveRows);
@@ -527,7 +580,10 @@
                     String msg = String.format(
                             "Read %d live rows and %d tombstone cells for query %1.512s; token %s (see tombstone_warn_threshold)",
                             liveRows, tombstones, ReadCommand.this.toCQLString(), currentKey.getToken());
-                    ClientWarn.instance.warn(msg);
+                    if (trackWarnings)
+                        MessageParams.add(ParamType.TOMBSTONE_WARNING, tombstones);
+                    else
+                        ClientWarn.instance.warn(msg);
                     if (tombstones < failureThreshold)
                     {
                         metric.tombstoneWarnings.inc();
@@ -597,6 +653,90 @@
         }
     }
 
+    private boolean shouldTrackSize(DataStorageSpec.LongBytesBound warnThresholdBytes, DataStorageSpec.LongBytesBound abortThresholdBytes)
+    {
+        return trackWarnings
+               && !SchemaConstants.isSystemKeyspace(metadata().keyspace)
+               && !(warnThresholdBytes == null && abortThresholdBytes == null);
+    }
+
+    private UnfilteredPartitionIterator withQuerySizeTracking(UnfilteredPartitionIterator iterator)
+    {
+        DataStorageSpec.LongBytesBound warnThreshold = DatabaseDescriptor.getLocalReadSizeWarnThreshold();
+        DataStorageSpec.LongBytesBound failThreshold = DatabaseDescriptor.getLocalReadSizeFailThreshold();
+        if (!shouldTrackSize(warnThreshold, failThreshold))
+            return iterator;
+        final long warnBytes = warnThreshold == null ? -1 : warnThreshold.toBytes();
+        final long failBytes = failThreshold == null ? -1 : failThreshold.toBytes();
+        class QuerySizeTracking extends Transformation<UnfilteredRowIterator>
+        {
+            private long sizeInBytes = 0;
+
+            @Override
+            public UnfilteredRowIterator applyToPartition(UnfilteredRowIterator iter)
+            {
+                sizeInBytes += ObjectSizes.sizeOnHeapOf(iter.partitionKey().getKey());
+                return Transformation.apply(iter, this);
+            }
+
+            @Override
+            protected Row applyToStatic(Row row)
+            {
+                return applyToRow(row);
+            }
+
+            @Override
+            protected Row applyToRow(Row row)
+            {
+                addSize(row.unsharedHeapSize());
+                return row;
+            }
+
+            @Override
+            protected RangeTombstoneMarker applyToMarker(RangeTombstoneMarker marker)
+            {
+                addSize(marker.unsharedHeapSize());
+                return marker;
+            }
+
+            @Override
+            protected DeletionTime applyToDeletion(DeletionTime deletionTime)
+            {
+                addSize(deletionTime.unsharedHeapSize());
+                return deletionTime;
+            }
+
+            private void addSize(long size)
+            {
+                this.sizeInBytes += size;
+                if (failBytes != -1 && this.sizeInBytes >= failBytes)
+                {
+                    String msg = String.format("Query %s attempted to read %d bytes but max allowed is %s; query aborted  (see local_read_size_fail_threshold)",
+                                               ReadCommand.this.toCQLString(), this.sizeInBytes, failThreshold);
+                    Tracing.trace(msg);
+                    MessageParams.remove(ParamType.LOCAL_READ_SIZE_WARN);
+                    MessageParams.add(ParamType.LOCAL_READ_SIZE_FAIL, this.sizeInBytes);
+                    throw new LocalReadSizeTooLargeException(msg);
+                }
+                else if (warnBytes != -1 && this.sizeInBytes >= warnBytes)
+                {
+                    MessageParams.add(ParamType.LOCAL_READ_SIZE_WARN, this.sizeInBytes);
+                }
+            }
+
+            @Override
+            protected void onClose()
+            {
+                ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata().id);
+                if (cfs != null)
+                    cfs.metric.localReadSize.update(sizeInBytes);
+            }
+        }
+
+        iterator = Transformation.apply(iterator, new QuerySizeTracking());
+        return iterator;
+    }
+
     protected UnfilteredPartitionIterator withStateTracking(UnfilteredPartitionIterator iter)
     {
         return Transformation.apply(iter, new CheckForAbort());
@@ -607,9 +747,12 @@
      */
     public Message<ReadCommand> createMessage(boolean trackRepairedData)
     {
-        return trackRepairedData
-             ? Message.outWithFlags(verb(), this, MessageFlag.CALL_BACK_ON_FAILURE, MessageFlag.TRACK_REPAIRED_DATA)
-             : Message.outWithFlag (verb(), this, MessageFlag.CALL_BACK_ON_FAILURE);
+        Message<ReadCommand> msg = trackRepairedData
+                                   ? Message.outWithFlags(verb(), this, MessageFlag.CALL_BACK_ON_FAILURE, MessageFlag.TRACK_REPAIRED_DATA)
+                                   : Message.outWithFlag(verb(), this, MessageFlag.CALL_BACK_ON_FAILURE);
+        if (trackWarnings)
+            msg = msg.withFlag(MessageFlag.TRACK_WARNINGS);
+        return msg;
     }
 
     public abstract Verb verb();
@@ -641,25 +784,9 @@
     }
 
     /**
-     * Recreate the CQL string corresponding to this query.
-     * <p>
-     * Note that in general the returned string will not be exactly the original user string, first
-     * because there isn't always a single syntax for a given query,  but also because we don't have
-     * all the information needed (we know the non-PK columns queried but not the PK ones as internally
-     * we query them all). So this shouldn't be relied too strongly, but this should be good enough for
-     * debugging purpose which is what this is for.
+     * Return the queried token(s) for logging
      */
-    public String toCQLString()
-    {
-        StringBuilder sb = new StringBuilder();
-        sb.append("SELECT ").append(columnFilter().toCQLString());
-        sb.append(" FROM ").append(metadata().keyspace).append('.').append(metadata().name);
-        appendCQLWhereClause(sb);
-
-        if (limits() != DataLimits.NONE)
-            sb.append(' ').append(limits());
-        return sb.toString();
-    }
+    public abstract String loggableTokens();
 
     // Monitorable interface
     public String name()
@@ -800,7 +927,7 @@
             if (!isTrackingRepairedStatus)
                 return false;
 
-            UUID pendingRepair = sstable.getPendingRepair();
+            TimeUUID pendingRepair = sstable.getPendingRepair();
             if (pendingRepair != ActiveRepairService.NO_PENDING_REPAIR)
             {
                 if (ActiveRepairService.instance.consistent.local.isSessionFinalized(pendingRepair))
@@ -927,7 +1054,7 @@
             int nowInSec = in.readInt();
             ColumnFilter columnFilter = ColumnFilter.serializer.deserialize(in, version, metadata);
             RowFilter rowFilter = RowFilter.serializer.deserialize(in, version, metadata);
-            DataLimits limits = DataLimits.serializer.deserialize(in, version,  metadata.comparator);
+            DataLimits limits = DataLimits.serializer.deserialize(in, version,  metadata);
             IndexMetadata index = hasIndex ? deserializeIndexMetadata(in, version, metadata) : null;
 
             return kind.selectionDeserializer.deserialize(in, version, isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, index);
diff --git a/src/java/org/apache/cassandra/db/ReadCommandVerbHandler.java b/src/java/org/apache/cassandra/db/ReadCommandVerbHandler.java
index a86852f..9226568 100644
--- a/src/java/org/apache/cassandra/db/ReadCommandVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/ReadCommandVerbHandler.java
@@ -48,16 +48,35 @@
 
         ReadCommand command = message.payload;
         validateTransientStatus(message);
+        MessageParams.reset();
 
         long timeout = message.expiresAtNanos() - message.createdAtNanos();
         command.setMonitoringTime(message.createdAtNanos(), message.isCrossNode(), timeout, DatabaseDescriptor.getSlowQueryTimeout(NANOSECONDS));
 
+        if (message.trackWarnings())
+            command.trackWarnings();
+
         ReadResponse response;
         try (ReadExecutionController controller = command.executionController(message.trackRepairedData());
              UnfilteredPartitionIterator iterator = command.executeLocally(controller))
         {
             response = command.createResponse(iterator, controller.getRepairedDataInfo());
         }
+        catch (RejectException e)
+        {
+            if (!command.isTrackingWarnings())
+                throw e;
+
+            // make sure to log as the exception is swallowed
+            logger.error(e.getMessage());
+
+            response = command.createEmptyResponse();
+            Message<ReadResponse> reply = message.responseWith(response);
+            reply = MessageParams.addToMessage(reply);
+
+            MessagingService.instance().send(reply, message.from());
+            return;
+        }
 
         if (!command.complete())
         {
@@ -68,12 +87,15 @@
 
         Tracing.trace("Enqueuing response to {}", message.from());
         Message<ReadResponse> reply = message.responseWith(response);
+        reply = MessageParams.addToMessage(reply);
         MessagingService.instance().send(reply, message.from());
     }
 
     private void validateTransientStatus(Message<ReadCommand> message)
     {
         ReadCommand command = message.payload;
+        if (command.metadata().isVirtual())
+            return;
         Token token;
 
         if (command instanceof SinglePartitionReadCommand)
diff --git a/src/java/org/apache/cassandra/db/ReadExecutionController.java b/src/java/org/apache/cassandra/db/ReadExecutionController.java
index 5bcd84b..2fbe3ac 100644
--- a/src/java/org/apache/cassandra/db/ReadExecutionController.java
+++ b/src/java/org/apache/cassandra/db/ReadExecutionController.java
@@ -28,7 +28,7 @@
 import org.apache.cassandra.utils.MonotonicClock;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 
-import static org.apache.cassandra.utils.MonotonicClock.preciseTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.preciseTime;
 
 public class ReadExecutionController implements AutoCloseable
 {
diff --git a/src/java/org/apache/cassandra/db/ReadQuery.java b/src/java/org/apache/cassandra/db/ReadQuery.java
index bd20c26..55a2cf6 100644
--- a/src/java/org/apache/cassandra/db/ReadQuery.java
+++ b/src/java/org/apache/cassandra/db/ReadQuery.java
@@ -48,7 +48,7 @@
                 return ReadExecutionController.empty();
             }
 
-            public PartitionIterator execute(ConsistencyLevel consistency, ClientState clientState, long queryStartNanoTime) throws RequestExecutionException
+            public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
             {
                 return EmptyIterators.partition();
             }
@@ -140,12 +140,10 @@
      * Executes the query at the provided consistency level.
      *
      * @param consistency the consistency level to achieve for the query.
-     * @param clientState the {@code ClientState} for the query. In practice, this can be null unless
-     * {@code consistency} is a serial consistency.
-     *
+     * @param state client state
      * @return the result of the query.
      */
-    public PartitionIterator execute(ConsistencyLevel consistency, ClientState clientState, long queryStartNanoTime) throws RequestExecutionException;
+    public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException;
 
     /**
      * Execute the query for internal queries (that is, it basically executes the query locally).
@@ -254,4 +252,8 @@
     default void maybeValidateIndex()
     {
     }
+
+    default void trackWarnings()
+    {
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/ReadResponse.java b/src/java/org/apache/cassandra/db/ReadResponse.java
index 52e6fd5..9ef9128 100644
--- a/src/java/org/apache/cassandra/db/ReadResponse.java
+++ b/src/java/org/apache/cassandra/db/ReadResponse.java
@@ -48,6 +48,11 @@
         return new LocalDataResponse(data, command, rdi);
     }
 
+    public static ReadResponse createSimpleDataResponse(UnfilteredPartitionIterator data, ColumnFilter selection)
+    {
+        return new LocalDataResponse(data, selection);
+    }
+
     @VisibleForTesting
     public static ReadResponse createRemoteDataResponse(UnfilteredPartitionIterator data,
                                                         ByteBuffer repairedDataDigest,
@@ -184,6 +189,11 @@
                   DeserializationHelper.Flag.LOCAL);
         }
 
+        private LocalDataResponse(UnfilteredPartitionIterator iter, ColumnFilter selection)
+        {
+            super(build(iter, selection), null, false, MessagingService.current_version, DeserializationHelper.Flag.LOCAL);
+        }
+
         private static ByteBuffer build(UnfilteredPartitionIterator iter, ColumnFilter selection)
         {
             try (DataOutputBuffer buffer = new DataOutputBuffer())
diff --git a/src/java/org/apache/cassandra/db/RejectException.java b/src/java/org/apache/cassandra/db/RejectException.java
new file mode 100644
index 0000000..a879b76
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/RejectException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+/**
+ * Represents a request to reject the current operation
+ */
+public abstract class RejectException extends RuntimeException
+{
+    public RejectException(String message)
+    {
+        super(message);
+    }
+
+    public RejectException(String message, Throwable cause)
+    {
+        super(message, cause);
+    }
+
+    public RejectException(Throwable cause)
+    {
+        super(cause);
+    }
+
+    public RejectException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace)
+    {
+        super(message, cause, enableSuppression, writableStackTrace);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/RepairedDataInfo.java b/src/java/org/apache/cassandra/db/RepairedDataInfo.java
index f80b113..32a4061 100644
--- a/src/java/org/apache/cassandra/db/RepairedDataInfo.java
+++ b/src/java/org/apache/cassandra/db/RepairedDataInfo.java
@@ -36,6 +36,8 @@
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 @NotThreadSafe
 class RepairedDataInfo
 {
@@ -281,7 +283,7 @@
                     return null;
 
                 long countBeforeOverreads = repairedCounter.counted();
-                long overreadStartTime = System.nanoTime();
+                long overreadStartTime = nanoTime();
                 if (currentPartition != null)
                     consumePartition(currentPartition, repairedCounter);
 
@@ -291,7 +293,7 @@
 
                 // we're not actually providing any more rows, just consuming the repaired data
                 long rows = repairedCounter.counted() - countBeforeOverreads;
-                long nanos = System.nanoTime() - overreadStartTime;
+                long nanos = nanoTime() - overreadStartTime;
                 metrics.repairedDataTrackingOverreadRows.update(rows);
                 metrics.repairedDataTrackingOverreadTime.update(nanos, TimeUnit.NANOSECONDS);
                 Tracing.trace("Read {} additional rows of repaired data for tracking in {}ps", rows, TimeUnit.NANOSECONDS.toMicros(nanos));
diff --git a/src/java/org/apache/cassandra/db/RowIndexEntry.java b/src/java/org/apache/cassandra/db/RowIndexEntry.java
index 215768b..80f53a9 100644
--- a/src/java/org/apache/cassandra/db/RowIndexEntry.java
+++ b/src/java/org/apache/cassandra/db/RowIndexEntry.java
@@ -23,7 +23,9 @@
 
 import com.codahale.metrics.Histogram;
 import org.apache.cassandra.cache.IMeasurableMemory;
+import org.apache.cassandra.config.DataStorageSpec;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.filter.RowIndexEntryReadSizeTooLargeException;
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.sstable.IndexInfo;
 import org.apache.cassandra.io.sstable.format.Version;
@@ -36,6 +38,9 @@
 import org.apache.cassandra.io.util.TrackedDataInputPlus;
 import org.apache.cassandra.metrics.DefaultNameFactory;
 import org.apache.cassandra.metrics.MetricNameFactory;
+import org.apache.cassandra.net.ParamType;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.vint.VIntCoding;
 import org.github.jamm.Unmetered;
@@ -72,7 +77,7 @@
  *     samples</i> (list of {@link IndexInfo} objects) and those who don't.
  *     For each <i>portion</i> of data for a single partition in the data file,
  *     an index sample is created. The size of that <i>portion</i> is defined
- *     by {@link org.apache.cassandra.config.Config#column_index_size_in_kb}.
+ *     by {@link org.apache.cassandra.config.Config#column_index_size}.
  * </p>
  * <p>
  *     Index entries with less than 2 index samples, will just store the
@@ -93,9 +98,9 @@
  *     "acceptable" amount of index samples per partition and those
  *     with an "enormous" amount of index samples. The barrier
  *     is controlled by the configuration parameter
- *     {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}.
+ *     {@link org.apache.cassandra.config.Config#column_index_cache_size}.
  *     Index entries with a total serialized size of index samples up to
- *     {@code column_index_cache_size_in_kb} will be held in an array.
+ *     {@code column_index_cache_size} will be held in an array.
  *     Index entries exceeding that value will always be accessed from
  *     disk.
  * </p>
@@ -106,9 +111,9 @@
  *     <li>{@link RowIndexEntry} just stores the offset in the data file.</li>
  *     <li>{@link IndexedEntry} is for index entries with index samples
  *     and used for both current and legacy sstables, which do not exceed
- *     {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}.</li>
+ *     {@link org.apache.cassandra.config.Config#column_index_cache_size}.</li>
  *     <li>{@link ShallowIndexedEntry} is for index entries with index samples
- *     that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}
+ *     that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size}
  *     for sstables with an offset table to the index samples.</li>
  * </ul>
  * <p>
@@ -189,7 +194,7 @@
      * @param headerLength      deletion time of {@link RowIndexEntry}
      * @param columnIndexCount  number of {@link IndexInfo} entries in the {@link RowIndexEntry}
      * @param indexedPartSize   serialized size of all serialized {@link IndexInfo} objects and their offsets
-     * @param indexSamples      list with IndexInfo offsets (if total serialized size is less than {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}
+     * @param indexSamples      list with IndexInfo offsets (if total serialized size is less than {@link org.apache.cassandra.config.Config#column_index_cache_size}
      * @param offsets           offsets of IndexInfo offsets
      * @param idxInfoSerializer the {@link IndexInfo} serializer
      */
@@ -201,14 +206,14 @@
     {
         // If the "partition building code" in BigTableWriter.append() via ColumnIndex returns a list
         // of IndexInfo objects, which is the case if the serialized size is less than
-        // Config.column_index_cache_size_in_kb, AND we have more than one IndexInfo object, we
+        // Config.column_index_cache_size, AND we have more than one IndexInfo object, we
         // construct an IndexedEntry object. (note: indexSamples.size() and columnIndexCount have the same meaning)
         if (indexSamples != null && indexSamples.size() > 1)
             return new IndexedEntry(dataFilePosition, deletionTime, headerLength,
                                     indexSamples.toArray(new IndexInfo[indexSamples.size()]), offsets,
                                     indexedPartSize, idxInfoSerializer);
         // Here we have to decide whether we have serialized IndexInfo objects that exceeds
-        // Config.column_index_cache_size_in_kb (not exceeding case covered above).
+        // Config.column_index_cache_size (not exceeding case covered above).
         // Such a "big" indexed-entry is represented as a shallow one.
         if (columnIndexCount > 1)
             return new ShallowIndexedEntry(dataFilePosition, indexFilePosition,
@@ -324,6 +329,8 @@
                 DeletionTime deletionTime = DeletionTime.serializer.deserialize(in);
                 int columnsIndexCount = (int) in.readUnsignedVInt();
 
+                checkSize(columnsIndexCount, size);
+
                 int indexedPartSize = size - serializedSize(deletionTime, headerLength, columnsIndexCount);
 
                 if (size <= DatabaseDescriptor.getColumnIndexCacheSize())
@@ -343,6 +350,51 @@
             }
         }
 
+        private void checkSize(int entries, int bytes)
+        {
+            ReadCommand command = ReadCommand.getCommand();
+            if (command == null || SchemaConstants.isSystemKeyspace(command.metadata().keyspace) || !DatabaseDescriptor.getReadThresholdsEnabled())
+                return;
+
+            DataStorageSpec.LongBytesBound warnThreshold = DatabaseDescriptor.getRowIndexReadSizeWarnThreshold();
+            DataStorageSpec.LongBytesBound failThreshold = DatabaseDescriptor.getRowIndexReadSizeFailThreshold();
+            if (warnThreshold == null && failThreshold == null)
+                return;
+
+            long estimatedMemory = estimateMaterializedIndexSize(entries, bytes);
+            ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(command.metadata().id);
+            if (cfs != null)
+                cfs.metric.rowIndexSize.update(estimatedMemory);
+
+            if (failThreshold != null && estimatedMemory > failThreshold.toBytes())
+            {
+                String msg = String.format("Query %s attempted to access a large RowIndexEntry estimated to be %d bytes " +
+                                           "in-memory (total entries: %d, total bytes: %d) but the max allowed is %s;" +
+                                           " query aborted  (see row_index_read_size_fail_threshold)",
+                                           command.toCQLString(), estimatedMemory, entries, bytes, failThreshold);
+                MessageParams.remove(ParamType.ROW_INDEX_READ_SIZE_WARN);
+                MessageParams.add(ParamType.ROW_INDEX_READ_SIZE_FAIL, estimatedMemory);
+
+                throw new RowIndexEntryReadSizeTooLargeException(msg);
+            }
+            else if (warnThreshold != null && estimatedMemory > warnThreshold.toBytes())
+            {
+                // use addIfLarger rather than add as a previous partition may be larger than this one
+                Long current = MessageParams.get(ParamType.ROW_INDEX_READ_SIZE_WARN);
+                if (current == null || current.compareTo(estimatedMemory) < 0)
+                    MessageParams.add(ParamType.ROW_INDEX_READ_SIZE_WARN, estimatedMemory);
+            }
+        }
+
+        private static long estimateMaterializedIndexSize(int entries, int bytes)
+        {
+            long overhead = IndexInfo.EMPTY_SIZE
+                            + ArrayClustering.EMPTY_SIZE
+                            + DeletionTime.EMPTY_SIZE;
+
+            return (overhead * entries) + bytes;
+        }
+
         public long deserializePositionAndSkip(DataInputPlus in) throws IOException
         {
             long position = in.readUnsignedVInt();
diff --git a/src/java/org/apache/cassandra/db/SSTableImporter.java b/src/java/org/apache/cassandra/db/SSTableImporter.java
index 989ff12..5949559 100644
--- a/src/java/org/apache/cassandra/db/SSTableImporter.java
+++ b/src/java/org/apache/cassandra/db/SSTableImporter.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -29,6 +28,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
diff --git a/src/java/org/apache/cassandra/db/SchemaCQLHelper.java b/src/java/org/apache/cassandra/db/SchemaCQLHelper.java
index 5d83a2b..b959ebc 100644
--- a/src/java/org/apache/cassandra/db/SchemaCQLHelper.java
+++ b/src/java/org/apache/cassandra/db/SchemaCQLHelper.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.db;
 
 import java.nio.ByteBuffer;
+import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Stream;
@@ -41,39 +42,15 @@
     /**
      * Generates the DDL statement for a {@code schema.cql} snapshot file.
      */
-    public static Stream<String> reCreateStatementsForSchemaCql(TableMetadata metadata, Types types)
+    public static Stream<String> reCreateStatementsForSchemaCql(TableMetadata metadata, KeyspaceMetadata keyspaceMetadata)
     {
         // Types come first, as table can't be created without them
-        Stream<String> udts = SchemaCQLHelper.getUserTypesAsCQL(metadata, types, true);
+        Stream<String> udts = SchemaCQLHelper.getUserTypesAsCQL(metadata, keyspaceMetadata.types, true);
 
-        return Stream.concat(udts,
-                             reCreateStatements(metadata,
-                                                true,
-                                                true,
-                                                true,
-                                                true));
-    }
+        Stream<String> tableMatadata = Stream.of(SchemaCQLHelper.getTableMetadataAsCQL(metadata, keyspaceMetadata));
 
-    public static Stream<String> reCreateStatements(TableMetadata metadata,
-                                                    boolean includeDroppedColumns,
-                                                    boolean internals,
-                                                    boolean ifNotExists,
-                                                    boolean includeIndexes)
-    {
-        // Record re-create schema statements
-        Stream<String> r = Stream.of(metadata)
-                                         .map((tm) -> SchemaCQLHelper.getTableMetadataAsCQL(tm,
-                                                                                            includeDroppedColumns,
-                                                                                            internals,
-                                                                                            ifNotExists));
-
-        if (includeIndexes)
-        {
-            // Indexes applied as last, since otherwise they may interfere with column drops / re-additions
-            r = Stream.concat(r, SchemaCQLHelper.getIndexesAsCQL(metadata, ifNotExists));
-        }
-
-        return r;
+        Stream<String> indexes = SchemaCQLHelper.getIndexesAsCQL(metadata, true);
+        return Stream.of(udts, tableMatadata, indexes).flatMap(Function.identity());
     }
 
     /**
@@ -83,20 +60,24 @@
      * that will not contain everything needed for user types.
      */
     @VisibleForTesting
-    public static String getTableMetadataAsCQL(TableMetadata metadata,
-                                               boolean includeDroppedColumns,
-                                               boolean internals,
-                                               boolean ifNotExists)
+    public static String getTableMetadataAsCQL(TableMetadata metadata, KeyspaceMetadata keyspaceMetadata)
     {
         if (metadata.isView())
         {
-            KeyspaceMetadata keyspaceMetadata = Schema.instance.getKeyspaceMetadata(metadata.keyspace);
             ViewMetadata viewMetadata = keyspaceMetadata.views.get(metadata.name).orElse(null);
             assert viewMetadata != null;
-            return viewMetadata.toCqlString(internals, ifNotExists);
+            /*
+             * first argument(withInternals) indicates to include table metadata id and clustering columns order,
+             * second argument(ifNotExists) instructs to include IF NOT EXISTS statement within creation statements.
+             */
+            return viewMetadata.toCqlString(true, true);
         }
 
-        return metadata.toCqlString(includeDroppedColumns, internals, ifNotExists);
+        /*
+         * With addition to withInternals and ifNotExists arguments, includeDroppedColumns will include dropped
+         * columns as ALTER TABLE statements appended into the snapshot.
+         */
+        return metadata.toCqlString(true, true, true);
     }
 
     /**
@@ -162,7 +143,7 @@
     private static UserType getType(TableMetadata metadata, Types types, ByteBuffer name)
     {
         return types.get(name)
-                    .orElseThrow(() -> new IllegalStateException(String.format("user type %s is part of table %s definition but its definition was missing", 
+                    .orElseThrow(() -> new IllegalStateException(String.format("user type %s is part of table %s definition but its definition was missing",
                                                                               UTF8Type.instance.getString(name),
                                                                               metadata)));
     }
diff --git a/src/java/org/apache/cassandra/db/SerializationHeader.java b/src/java/org/apache/cassandra/db/SerializationHeader.java
index 1c22feb..11239d8 100644
--- a/src/java/org/apache/cassandra/db/SerializationHeader.java
+++ b/src/java/org/apache/cassandra/db/SerializationHeader.java
@@ -105,7 +105,7 @@
             return sstables;
 
         List<SSTableReader> readers = new ArrayList<>(sstables);
-        readers.sort(SSTableReader.generationReverseComparator);
+        readers.sort(SSTableReader.idReverseComparator);
         return readers;
     }
 
diff --git a/src/java/org/apache/cassandra/db/SimpleBuilders.java b/src/java/org/apache/cassandra/db/SimpleBuilders.java
index 6f609ec..def3309 100644
--- a/src/java/org/apache/cassandra/db/SimpleBuilders.java
+++ b/src/java/org/apache/cassandra/db/SimpleBuilders.java
@@ -36,7 +36,8 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.CounterId;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
 
 public abstract class SimpleBuilders
 {
@@ -391,7 +392,7 @@
                     ListType lt = (ListType)column.type;
                     assert value instanceof List;
                     for (Object elt : (List)value)
-                        builder.addCell(cell(column, toByteBuffer(elt, lt.getElementsType()), CellPath.create(ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes()))));
+                        builder.addCell(cell(column, toByteBuffer(elt, lt.getElementsType()), CellPath.create(ByteBuffer.wrap(nextTimeUUIDAsBytes()))));
                     break;
                 case SET:
                     SetType st = (SetType)column.type;
@@ -420,6 +421,12 @@
             return this;
         }
 
+        public Row.SimpleBuilder deletePrevious()
+        {
+            builder.addRowDeletion(Row.Deletion.regular(new DeletionTime(timestamp - 1, nowInSec)));
+            return this;
+        }
+
         public Row.SimpleBuilder delete(String columnName)
         {
             return add(columnName, null);
diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
index 8ac26e8..963b9fe 100644
--- a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
@@ -21,6 +21,7 @@
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Sets;
@@ -31,10 +32,13 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.lifecycle.*;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.transform.RTBoundValidator;
 import org.apache.cassandra.db.transform.Transformation;
+import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
+import org.apache.cassandra.db.virtual.VirtualTable;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
@@ -57,8 +61,8 @@
 {
     protected static final SelectionDeserializer selectionDeserializer = new Deserializer();
 
-    private final DecoratedKey partitionKey;
-    private final ClusteringIndexFilter clusteringIndexFilter;
+    protected final DecoratedKey partitionKey;
+    protected final ClusteringIndexFilter clusteringIndexFilter;
 
     @VisibleForTesting
     protected SinglePartitionReadCommand(boolean isDigest,
@@ -71,14 +75,57 @@
                                          DataLimits limits,
                                          DecoratedKey partitionKey,
                                          ClusteringIndexFilter clusteringIndexFilter,
-                                         IndexMetadata index)
+                                         IndexMetadata index,
+                                         boolean trackWarnings)
     {
-        super(Kind.SINGLE_PARTITION, isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, index);
+        super(Kind.SINGLE_PARTITION, isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, index, trackWarnings);
         assert partitionKey.getPartitioner() == metadata.partitioner;
         this.partitionKey = partitionKey;
         this.clusteringIndexFilter = clusteringIndexFilter;
     }
 
+    private static SinglePartitionReadCommand create(boolean isDigest,
+                                                    int digestVersion,
+                                                    boolean acceptsTransient,
+                                                    TableMetadata metadata,
+                                                    int nowInSec,
+                                                    ColumnFilter columnFilter,
+                                                    RowFilter rowFilter,
+                                                    DataLimits limits,
+                                                    DecoratedKey partitionKey,
+                                                    ClusteringIndexFilter clusteringIndexFilter,
+                                                    IndexMetadata index,
+                                                    boolean trackWarnings)
+    {
+        if (metadata.isVirtual())
+        {
+            return new VirtualTableSinglePartitionReadCommand(isDigest,
+                                                              digestVersion,
+                                                              acceptsTransient,
+                                                              metadata,
+                                                              nowInSec,
+                                                              columnFilter,
+                                                              rowFilter,
+                                                              limits,
+                                                              partitionKey,
+                                                              clusteringIndexFilter,
+                                                              index,
+                                                              trackWarnings);
+        }
+        return new SinglePartitionReadCommand(isDigest,
+                                              digestVersion,
+                                              acceptsTransient,
+                                              metadata,
+                                              nowInSec,
+                                              columnFilter,
+                                              rowFilter,
+                                              limits,
+                                              partitionKey,
+                                              clusteringIndexFilter,
+                                              index,
+                                              trackWarnings);
+    }
+
     /**
      * Creates a new read command on a single partition.
      *
@@ -102,17 +149,18 @@
                                                     ClusteringIndexFilter clusteringIndexFilter,
                                                     IndexMetadata indexMetadata)
     {
-        return new SinglePartitionReadCommand(false,
-                                              0,
-                                              false,
-                                              metadata,
-                                              nowInSec,
-                                              columnFilter,
-                                              rowFilter,
-                                              limits,
-                                              partitionKey,
-                                              clusteringIndexFilter,
-                                              indexMetadata);
+        return create(false,
+                      0,
+                      false,
+                      metadata,
+                      nowInSec,
+                      columnFilter,
+                      rowFilter,
+                      limits,
+                      partitionKey,
+                      clusteringIndexFilter,
+                      indexMetadata,
+                      false);
     }
 
     /**
@@ -278,65 +326,69 @@
 
     public SinglePartitionReadCommand copy()
     {
-        return new SinglePartitionReadCommand(isDigestQuery(),
-                                              digestVersion(),
-                                              acceptsTransient(),
-                                              metadata(),
-                                              nowInSec(),
-                                              columnFilter(),
-                                              rowFilter(),
-                                              limits(),
-                                              partitionKey(),
-                                              clusteringIndexFilter(),
-                                              indexMetadata());
+        return create(isDigestQuery(),
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      limits(),
+                      partitionKey(),
+                      clusteringIndexFilter(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     protected SinglePartitionReadCommand copyAsDigestQuery()
     {
-        return new SinglePartitionReadCommand(true,
-                                              digestVersion(),
-                                              acceptsTransient(),
-                                              metadata(),
-                                              nowInSec(),
-                                              columnFilter(),
-                                              rowFilter(),
-                                              limits(),
-                                              partitionKey(),
-                                              clusteringIndexFilter(),
-                                              indexMetadata());
+        return create(true,
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      limits(),
+                      partitionKey(),
+                      clusteringIndexFilter(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     protected SinglePartitionReadCommand copyAsTransientQuery()
     {
-        return new SinglePartitionReadCommand(false,
-                                              0,
-                                              true,
-                                              metadata(),
-                                              nowInSec(),
-                                              columnFilter(),
-                                              rowFilter(),
-                                              limits(),
-                                              partitionKey(),
-                                              clusteringIndexFilter(),
-                                              indexMetadata());
+        return create(false,
+                      0,
+                      true,
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      limits(),
+                      partitionKey(),
+                      clusteringIndexFilter(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
     public SinglePartitionReadCommand withUpdatedLimit(DataLimits newLimits)
     {
-        return new SinglePartitionReadCommand(isDigestQuery(),
-                                              digestVersion(),
-                                              acceptsTransient(),
-                                              metadata(),
-                                              nowInSec(),
-                                              columnFilter(),
-                                              rowFilter(),
-                                              newLimits,
-                                              partitionKey(),
-                                              clusteringIndexFilter(),
-                                              indexMetadata());
+        return create(isDigestQuery(),
+                      digestVersion(),
+                      acceptsTransient(),
+                      metadata(),
+                      nowInSec(),
+                      columnFilter(),
+                      rowFilter(),
+                      newLimits,
+                      partitionKey(),
+                      clusteringIndexFilter(),
+                      indexMetadata(),
+                      isTrackingWarnings());
     }
 
     @Override
@@ -371,21 +423,25 @@
     {
         // We shouldn't have set digest yet when reaching that point
         assert !isDigestQuery();
-        return create(metadata(),
-                      nowInSec(),
-                      columnFilter(),
-                      rowFilter(),
-                      limits,
-                      partitionKey(),
-                      lastReturned == null ? clusteringIndexFilter() : clusteringIndexFilter.forPaging(metadata().comparator, lastReturned, false));
+        SinglePartitionReadCommand cmd = create(metadata(),
+                                                nowInSec(),
+                                                columnFilter(),
+                                                rowFilter(),
+                                                limits,
+                                                partitionKey(),
+                                                lastReturned == null ? clusteringIndexFilter() : clusteringIndexFilter.forPaging(metadata().comparator, lastReturned, false));
+        if (isTrackingWarnings())
+            cmd.trackWarnings();
+        return cmd;
     }
 
-    public PartitionIterator execute(ConsistencyLevel consistency, ClientState clientState, long queryStartNanoTime) throws RequestExecutionException
+    @Override
+    public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
     {
         if (clusteringIndexFilter.isEmpty(metadata().comparator))
             return EmptyIterators.partition();
 
-        return StorageProxy.read(Group.one(this), consistency, clientState, queryStartNanoTime);
+        return StorageProxy.read(Group.one(this), consistency, queryStartNanoTime);
     }
 
     protected void recordLatency(TableMetrics metric, long latencyNanos)
@@ -605,20 +661,20 @@
         InputCollector<UnfilteredRowIterator> inputCollector = iteratorsForPartition(view, controller);
         try
         {
+            SSTableReadMetricsCollector metricsCollector = new SSTableReadMetricsCollector();
+
             for (Memtable memtable : view.memtables)
             {
-                Partition partition = memtable.getPartition(partitionKey());
-                if (partition == null)
+                @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator
+                UnfilteredRowIterator iter = memtable.rowIterator(partitionKey(), filter.getSlices(metadata()), columnFilter(), filter.isReversed(), metricsCollector);
+                if (iter == null)
                     continue;
 
                 if (memtable.getMinTimestamp() != Memtable.NO_MIN_TIMESTAMP)
                     minTimestamp = Math.min(minTimestamp, memtable.getMinTimestamp());
 
-                @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator
-                UnfilteredRowIterator iter = filter.getUnfilteredRowIterator(columnFilter(), partition);
-
                 // Memtable data is always considered unrepaired
-                controller.updateMinOldestUnrepairedTombstone(partition.stats().minLocalDeletionTime);
+                controller.updateMinOldestUnrepairedTombstone(memtable.getMinLocalDeletionTime());
                 inputCollector.addMemtableIterator(RTBoundValidator.validate(iter, RTBoundValidator.Stage.MEMTABLE, false));
 
                 mostRecentPartitionTombstone = Math.max(mostRecentPartitionTombstone,
@@ -641,8 +697,6 @@
             int nonIntersectingSSTables = 0;
             int includedDueToTombstones = 0;
 
-            SSTableReadMetricsCollector metricsCollector = new SSTableReadMetricsCollector();
-
             if (controller.isTrackingRepairedStatus())
                 Tracing.trace("Collecting data from sstables and tracking repaired status");
 
@@ -805,17 +859,14 @@
         ColumnFamilyStore.ViewFragment view = cfs.select(View.select(SSTableSet.LIVE, partitionKey()));
 
         ImmutableBTreePartition result = null;
+        SSTableReadMetricsCollector metricsCollector = new SSTableReadMetricsCollector();
 
         Tracing.trace("Merging memtable contents");
         for (Memtable memtable : view.memtables)
         {
-            Partition partition = memtable.getPartition(partitionKey());
-            if (partition == null)
-                continue;
-
-            try (UnfilteredRowIterator iter = filter.getUnfilteredRowIterator(columnFilter(), partition))
+            try (UnfilteredRowIterator iter = memtable.rowIterator(partitionKey, filter.getSlices(metadata()), columnFilter(), isReversed(), metricsCollector))
             {
-                if (iter.isEmpty())
+                if (iter == null)
                     continue;
 
                 result = add(RTBoundValidator.validate(iter, RTBoundValidator.Stage.MEMTABLE, false),
@@ -829,7 +880,6 @@
         /* add the SSTables on disk */
         view.sstables.sort(SSTableReader.maxTimestampDescending);
         // read sorted sstables
-        SSTableReadMetricsCollector metricsCollector = new SSTableReadMetricsCollector();
         for (SSTableReader sstable : view.sstables)
         {
             // if we've already seen a partition tombstone with a timestamp greater
@@ -1081,20 +1131,24 @@
         return Verb.READ_REQ;
     }
 
+    @Override
     protected void appendCQLWhereClause(StringBuilder sb)
     {
-        sb.append(" WHERE ");
+        sb.append(" WHERE ").append(partitionKey().toCQLString(metadata()));
 
-        sb.append(ColumnMetadata.toCQLString(metadata().partitionKeyColumns())).append(" = ");
-        DataRange.appendKeyString(sb, metadata().partitionKeyType, partitionKey().getKey());
-
-        // We put the row filter first because the clustering index filter can end by "ORDER BY"
-        if (!rowFilter().isEmpty())
-            sb.append(" AND ").append(rowFilter());
-
-        String filterString = clusteringIndexFilter().toCQLString(metadata());
+        String filterString = clusteringIndexFilter().toCQLString(metadata(), rowFilter());
         if (!filterString.isEmpty())
-            sb.append(" AND ").append(filterString);
+        {
+            if (!clusteringIndexFilter().selectsAllPartition() || !rowFilter().isEmpty())
+                sb.append(" AND ");
+            sb.append(filterString);
+        }
+    }
+
+    @Override
+    public String loggableTokens()
+    {
+        return "token=" + partitionKey.getToken().toString();
     }
 
     protected void serializeSelection(DataOutputPlus out, int version) throws IOException
@@ -1144,22 +1198,48 @@
                                                                clusteringIndexFilter));
             }
 
-            return new Group(commands, limits);
+            return create(commands, limits);
         }
 
-        public Group(List<SinglePartitionReadCommand> commands, DataLimits limits)
+        private Group(List<SinglePartitionReadCommand> commands, DataLimits limits)
         {
             super(commands, limits);
         }
 
         public static Group one(SinglePartitionReadCommand command)
         {
-            return new Group(Collections.singletonList(command), command.limits());
+            return create(Collections.singletonList(command), command.limits());
         }
 
-        public PartitionIterator execute(ConsistencyLevel consistency, ClientState clientState, long queryStartNanoTime) throws RequestExecutionException
+        public static Group create(List<SinglePartitionReadCommand> commands, DataLimits limits)
         {
-            return StorageProxy.read(this, consistency, clientState, queryStartNanoTime);
+            return commands.get(0).metadata().isVirtual() ?
+                   new VirtualTableGroup(commands, limits) :
+                   new Group(commands, limits);
+        }
+
+        public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
+        {
+            return StorageProxy.read(this, consistency, queryStartNanoTime);
+        }
+    }
+
+    public static class VirtualTableGroup extends Group
+    {
+        public VirtualTableGroup(List<SinglePartitionReadCommand> commands, DataLimits limits)
+        {
+            super(commands, limits);
+        }
+
+        @Override
+        public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
+        {
+            if (queries.size() == 1)
+                return queries.get(0).execute(consistency, state, queryStartNanoTime);
+
+            return PartitionIterators.concat(queries.stream()
+                                                    .map(q -> q.execute(consistency, state, queryStartNanoTime))
+                                                    .collect(Collectors.toList()));
         }
     }
 
@@ -1180,7 +1260,7 @@
         {
             DecoratedKey key = metadata.partitioner.decorateKey(metadata.partitionKeyType.readBuffer(in, DatabaseDescriptor.getMaxValueSize()));
             ClusteringIndexFilter filter = ClusteringIndexFilter.serializer.deserialize(in, version, metadata);
-            return new SinglePartitionReadCommand(isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, key, filter, index);
+            return SinglePartitionReadCommand.create(isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, key, filter, index, false);
         }
     }
 
@@ -1211,4 +1291,50 @@
             return mergedSSTables;
         }
     }
+
+    public static class VirtualTableSinglePartitionReadCommand extends SinglePartitionReadCommand
+    {
+        protected VirtualTableSinglePartitionReadCommand(boolean isDigest,
+                                                         int digestVersion,
+                                                         boolean acceptsTransient,
+                                                         TableMetadata metadata,
+                                                         int nowInSec,
+                                                         ColumnFilter columnFilter,
+                                                         RowFilter rowFilter,
+                                                         DataLimits limits,
+                                                         DecoratedKey partitionKey,
+                                                         ClusteringIndexFilter clusteringIndexFilter,
+                                                         IndexMetadata index,
+                                                         boolean trackWarnings)
+        {
+            super(isDigest, digestVersion, acceptsTransient, metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter, index, trackWarnings);
+        }
+
+        @Override
+        public PartitionIterator execute(ConsistencyLevel consistency, ClientState state, long queryStartNanoTime) throws RequestExecutionException
+        {
+            return executeInternal(executionController());
+        }
+
+        @Override
+        @SuppressWarnings("resource")
+        public UnfilteredPartitionIterator executeLocally(ReadExecutionController executionController)
+        {
+            VirtualTable view = VirtualKeyspaceRegistry.instance.getTableNullable(metadata().id);
+            UnfilteredPartitionIterator resultIterator = view.select(partitionKey, clusteringIndexFilter, columnFilter());
+            return limits().filter(rowFilter().filter(resultIterator, nowInSec()), nowInSec(), selectsFullPartition());
+        }
+
+        @Override
+        public ReadExecutionController executionController()
+        {
+            return ReadExecutionController.empty();
+        }
+
+        @Override
+        public ReadExecutionController executionController(boolean trackRepairedStatus)
+        {
+            return executionController();
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadQuery.java b/src/java/org/apache/cassandra/db/SinglePartitionReadQuery.java
index 755d552..e595fcb 100644
--- a/src/java/org/apache/cassandra/db/SinglePartitionReadQuery.java
+++ b/src/java/org/apache/cassandra/db/SinglePartitionReadQuery.java
@@ -53,9 +53,7 @@
                                                                         List<DecoratedKey> partitionKeys,
                                                                         ClusteringIndexFilter clusteringIndexFilter)
     {
-        return metadata.isVirtual()
-             ? VirtualTableSinglePartitionReadQuery.Group.create(metadata, nowInSec, columnFilter, rowFilter, limits, partitionKeys, clusteringIndexFilter)
-             : SinglePartitionReadCommand.Group.create(metadata, nowInSec, columnFilter, rowFilter, limits, partitionKeys, clusteringIndexFilter);
+        return SinglePartitionReadCommand.Group.create(metadata, nowInSec, columnFilter, rowFilter, limits, partitionKeys, clusteringIndexFilter);
     }
 
 
@@ -100,9 +98,7 @@
                                                   DecoratedKey partitionKey,
                                                   ClusteringIndexFilter clusteringIndexFilter)
     {
-        return metadata.isVirtual()
-             ? VirtualTableSinglePartitionReadQuery.create(metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter)
-             : SinglePartitionReadCommand.create(metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter);
+        return SinglePartitionReadCommand.create(metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter);
     }
 
     /**
@@ -282,6 +278,12 @@
         }
 
         @Override
+        public void trackWarnings()
+        {
+            queries.forEach(ReadQuery::trackWarnings);
+        }
+
+        @Override
         public String toString()
         {
             return queries.toString();
diff --git a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
index fe38d64..dfd52c5 100644
--- a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
+++ b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
@@ -32,11 +32,14 @@
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaChangeListener;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * A very simplistic/crude partition count/size estimator.
  *
@@ -46,7 +49,7 @@
  *
  * See CASSANDRA-7688.
  */
-public class SizeEstimatesRecorder extends SchemaChangeListener implements Runnable
+public class SizeEstimatesRecorder implements SchemaChangeListener, Runnable
 {
     private static final Logger logger = LoggerFactory.getLogger(SizeEstimatesRecorder.class);
 
@@ -89,7 +92,7 @@
             boolean rangesAreEqual = primaryRanges.equals(localPrimaryRanges);
             for (ColumnFamilyStore table : keyspace.getColumnFamilyStores())
             {
-                long start = System.nanoTime();
+                long start = nanoTime();
 
                 // compute estimates for primary ranges for backwards compatability
                 Map<Range<Token>, Pair<Long, Long>> estimates = computeSizeEstimates(table, primaryRanges);
@@ -103,7 +106,7 @@
                 }
                 SystemKeyspace.updateTableEstimates(table.metadata.keyspace, table.metadata.name, SystemKeyspace.TABLE_ESTIMATES_TYPE_LOCAL_PRIMARY, estimates);
 
-                long passed = System.nanoTime() - start;
+                long passed = nanoTime() - start;
                 if (logger.isTraceEnabled())
                     logger.trace("Spent {} milliseconds on estimating {}.{} size",
                                  TimeUnit.NANOSECONDS.toMillis(passed),
@@ -175,8 +178,8 @@
     }
 
     @Override
-    public void onDropTable(String keyspace, String table)
+    public void onDropTable(TableMetadata table, boolean dropData)
     {
-        SystemKeyspace.clearEstimates(keyspace, table);
+        SystemKeyspace.clearEstimates(table.keyspace, table.name);
     }
 }
diff --git a/src/java/org/apache/cassandra/db/Slice.java b/src/java/org/apache/cassandra/db/Slice.java
index 8956bd1..e2c787d 100644
--- a/src/java/org/apache/cassandra/db/Slice.java
+++ b/src/java/org/apache/cassandra/db/Slice.java
@@ -22,7 +22,6 @@
 import java.util.*;
 
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 
diff --git a/src/java/org/apache/cassandra/db/Slices.java b/src/java/org/apache/cassandra/db/Slices.java
index 441a5d3..b3f5681 100644
--- a/src/java/org/apache/cassandra/db/Slices.java
+++ b/src/java/org/apache/cassandra/db/Slices.java
@@ -24,6 +24,8 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterators;
 
+import org.apache.cassandra.cql3.Operator;
+import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -141,7 +143,7 @@
      */
     public abstract boolean intersects(List<ByteBuffer> minClusteringValues, List<ByteBuffer> maxClusteringValues);
 
-    public abstract String toCQLString(TableMetadata metadata);
+    public abstract String toCQLString(TableMetadata metadata, RowFilter rowFilter);
 
     /**
      * Checks if this <code>Slices</code> is empty.
@@ -549,7 +551,8 @@
             return sb.append("}").toString();
         }
 
-        public String toCQLString(TableMetadata metadata)
+        @Override
+        public String toCQLString(TableMetadata metadata, RowFilter rowFilter)
         {
             StringBuilder sb = new StringBuilder();
 
@@ -593,7 +596,7 @@
                         sb.append(" AND ");
                     needAnd = true;
 
-                    sb.append(column.name);
+                    sb.append(column.name.toCQLString());
 
                     Set<ByteBuffer> values = new LinkedHashSet<>();
                     for (int j = 0; j < componentInfo.size(); j++)
@@ -601,20 +604,25 @@
 
                     if (values.size() == 1)
                     {
-                        sb.append(" = ").append(column.type.getString(first.startValue));
+                        sb.append(" = ").append(column.type.toCQLString(first.startValue));
+                        rowFilter = rowFilter.without(column, Operator.EQ, first.startValue);
                     }
                     else
                     {
                         sb.append(" IN (");
                         int j = 0;
                         for (ByteBuffer value : values)
-                            sb.append(j++ == 0 ? "" : ", ").append(column.type.getString(value));
+                        {
+                            sb.append(j++ == 0 ? "" : ", ").append(column.type.toCQLString(value));
+                            rowFilter = rowFilter.without(column, Operator.EQ, value);
+                        }
                         sb.append(")");
                     }
                 }
                 else
                 {
                     boolean isReversed = column.isReversedType();
+                    Operator operator;
 
                     // As said above, we assume (without checking) that this means all ComponentOfSlice for this column
                     // are the same, so we only bother about the first.
@@ -623,27 +631,39 @@
                         if (needAnd)
                             sb.append(" AND ");
                         needAnd = true;
-                        sb.append(column.name);
+                        sb.append(column.name.toCQLString());
                         if (isReversed)
-                            sb.append(first.startInclusive ? " <= " : " < ");
+                            operator = first.startInclusive ? Operator.LTE : Operator.LT;
                         else
-                            sb.append(first.startInclusive ? " >= " : " > ");
-                        sb.append(column.type.getString(first.startValue));
+                            operator = first.startInclusive ? Operator.GTE : Operator.GT;
+                        sb.append(' ').append(operator.toString()).append(' ')
+                          .append(column.type.toCQLString(first.startValue));
+                        rowFilter = rowFilter.without(column, operator, first.startValue);
                     }
                     if (first.endValue != null)
                     {
                         if (needAnd)
                             sb.append(" AND ");
                         needAnd = true;
-                        sb.append(column.name);
+                        sb.append(column.name.toCQLString());
                         if (isReversed)
-                            sb.append(first.endInclusive ? " >= " : " > ");
+                            operator = first.endInclusive ? Operator.GTE : Operator.GT;
                         else
-                            sb.append(first.endInclusive ? " <= " : " < ");
-                        sb.append(column.type.getString(first.endValue));
+                            operator = first.endInclusive ? Operator.LTE : Operator.LT;
+                        sb.append(' ').append(operator.toString()).append(' ')
+                          .append(column.type.toCQLString(first.endValue));
+                        rowFilter = rowFilter.without(column, operator, first.endValue);
                     }
                 }
             }
+
+            if (!rowFilter.isEmpty())
+            {
+                if (needAnd)
+                    sb.append(" AND ");
+                sb.append(rowFilter.toCQLString());
+            }
+
             return sb.toString();
         }
 
@@ -764,9 +784,10 @@
             return "ALL";
         }
 
-        public String toCQLString(TableMetadata metadata)
+        @Override
+        public String toCQLString(TableMetadata metadata, RowFilter rowFilter)
         {
-            return "";
+            return rowFilter.toCQLString();
         }
     }
 
@@ -839,7 +860,8 @@
             return "NONE";
         }
 
-        public String toCQLString(TableMetadata metadata)
+        @Override
+        public String toCQLString(TableMetadata metadata, RowFilter rowFilter)
         {
             return "";
         }
diff --git a/src/java/org/apache/cassandra/db/SnapshotCommand.java b/src/java/org/apache/cassandra/db/SnapshotCommand.java
index 484db2f..e909e50 100644
--- a/src/java/org/apache/cassandra/db/SnapshotCommand.java
+++ b/src/java/org/apache/cassandra/db/SnapshotCommand.java
@@ -22,8 +22,6 @@
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.Verb;
 
 public class SnapshotCommand
 {
diff --git a/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java b/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
index 5ef729a..4e6ab11 100644
--- a/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
+++ b/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
@@ -17,15 +17,11 @@
  */
 package org.apache.cassandra.db;
 
-import java.util.Map;
 import javax.management.openmbean.*;
 
 import com.google.common.base.Throwables;
 import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.utils.Pair;
-
-
-
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 
 public class SnapshotDetailsTabularData
 {
@@ -34,13 +30,17 @@
             "Keyspace name",
             "Column family name",
             "True size",
-            "Size on disk"};
+            "Size on disk",
+            "Creation time",
+            "Expiration time",};
 
     private static final String[] ITEM_DESCS = new String[]{"snapshot_name",
             "keyspace_name",
             "columnfamily_name",
             "TrueDiskSpaceUsed",
-            "TotalDiskSpaceUsed"};
+            "TotalDiskSpaceUsed",
+            "created_at",
+            "expires_at",};
 
     private static final String TYPE_NAME = "SnapshotDetails";
 
@@ -56,7 +56,7 @@
     {
         try
         {
-            ITEM_TYPES = new OpenType[]{ SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING };
+            ITEM_TYPES = new OpenType[]{ SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING };
 
             COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES);
 
@@ -69,18 +69,25 @@
     }
 
 
-    public static void from(final String snapshot, final String ks, final String cf, Map.Entry<String, Directories.SnapshotSizeDetails> snapshotDetail, TabularDataSupport result)
+    public static void from(TableSnapshot details, TabularDataSupport result)
     {
         try
         {
-            final String totalSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().sizeOnDiskBytes);
-            final String liveSize =  FileUtils.stringifyFileSize(snapshotDetail.getValue().dataSizeBytes);
+            final String totalSize = FileUtils.stringifyFileSize(details.computeSizeOnDiskBytes());
+            final String liveSize =  FileUtils.stringifyFileSize(details.computeTrueSizeBytes());
+            String createdAt = safeToString(details.getCreatedAt());
+            String expiresAt = safeToString(details.getExpiresAt());
             result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
-                    new Object[]{ snapshot, ks, cf, liveSize, totalSize }));
+                    new Object[]{ details.getTag(), details.getKeyspaceName(), details.getTableName(), liveSize, totalSize, createdAt, expiresAt }));
         }
         catch (OpenDataException e)
         {
             throw new RuntimeException(e);
         }
     }
+
+    private static String safeToString(Object object)
+    {
+        return object == null ? null : object.toString();
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/StorageHook.java b/src/java/org/apache/cassandra/db/StorageHook.java
index be1d0bf..c998338 100644
--- a/src/java/org/apache/cassandra/db/StorageHook.java
+++ b/src/java/org/apache/cassandra/db/StorageHook.java
@@ -84,7 +84,7 @@
                                                          boolean reversed,
                                                          SSTableReadsListener listener)
             {
-                return sstable.iterator(key, slices, selectedColumns, reversed, listener);
+                return sstable.rowIterator(key, slices, selectedColumns, reversed, listener);
             }
         };
     }
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index e3f7fba..fd2145b 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -17,15 +17,17 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOError;
 import java.io.IOException;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -45,10 +47,11 @@
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import com.google.common.collect.SetMultimap;
 import com.google.common.collect.Sets;
 import com.google.common.io.ByteStreams;
-import com.google.common.util.concurrent.ListenableFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,22 +69,29 @@
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
 import org.apache.cassandra.db.compaction.CompactionHistoryTabularData;
 import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.db.marshal.TupleType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.db.rows.Rows;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.LocalPartitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
 import org.apache.cassandra.io.util.DataInputBuffer;
 import org.apache.cassandra.io.util.DataOutputBuffer;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.RebufferingInputStream;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.RestorableMeter;
+import org.apache.cassandra.metrics.TopPartitionTracker;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.Functions;
@@ -95,8 +105,15 @@
 import org.apache.cassandra.schema.Types;
 import org.apache.cassandra.schema.Views;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.Ballot;
 import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.Commit.Accepted;
+import org.apache.cassandra.service.paxos.Commit.AcceptedWithTTL;
+import org.apache.cassandra.service.paxos.Commit.Committed;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
 import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosRows;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedIndex;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -104,13 +121,23 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MD5Digest;
 import org.apache.cassandra.utils.Pair;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 
 import static java.lang.String.format;
 import static java.util.Collections.emptyMap;
 import static java.util.Collections.singletonMap;
+import static org.apache.cassandra.config.Config.PaxosStatePurging.legacy;
+import static org.apache.cassandra.config.DatabaseDescriptor.paxosStatePurging;
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
+import static org.apache.cassandra.cql3.QueryProcessor.executeInternalWithNowInSec;
 import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
+import static org.apache.cassandra.service.paxos.Commit.latest;
+import static org.apache.cassandra.utils.CassandraVersion.NULL_VERSION;
+import static org.apache.cassandra.utils.CassandraVersion.UNREADABLE_VERSION;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.FBUtilities.now;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public final class SystemKeyspace
 {
@@ -120,25 +147,17 @@
 
     private static final Logger logger = LoggerFactory.getLogger(SystemKeyspace.class);
 
-    // Used to indicate that there was a previous version written to the legacy (pre 1.2)
-    // system.Versions table, but that we cannot read it. Suffice to say, any upgrade should
-    // proceed through 1.2.x before upgrading to the current version.
-    public static final CassandraVersion UNREADABLE_VERSION = new CassandraVersion("0.0.0-unknown");
-
-    // Used to indicate that no previous version information was found. When encountered, we assume that
-    // Cassandra was not previously installed and we're in the process of starting a fresh node.
-    public static final CassandraVersion NULL_VERSION = new CassandraVersion("0.0.0-absent");
-
     public static final CassandraVersion CURRENT_VERSION = new CassandraVersion(FBUtilities.getReleaseVersionString());
 
     public static final String BATCHES = "batches";
     public static final String PAXOS = "paxos";
+    public static final String PAXOS_REPAIR_HISTORY = "paxos_repair_history";
     public static final String BUILT_INDEXES = "IndexInfo";
     public static final String LOCAL = "local";
     public static final String PEERS_V2 = "peers_v2";
     public static final String PEER_EVENTS_V2 = "peer_events_v2";
     public static final String COMPACTION_HISTORY = "compaction_history";
-    public static final String SSTABLE_ACTIVITY = "sstable_activity";
+    public static final String SSTABLE_ACTIVITY_V2 = "sstable_activity_v2"; // v2 has modified generation column type (v1 - int, v2 - blob), see CASSANDRA-17048
     public static final String TABLE_ESTIMATES = "table_estimates";
     public static final String TABLE_ESTIMATES_TYPE_PRIMARY = "primary";
     public static final String TABLE_ESTIMATES_TYPE_LOCAL_PRIMARY = "local_primary";
@@ -148,6 +167,7 @@
     public static final String BUILT_VIEWS = "built_views";
     public static final String PREPARED_STATEMENTS = "prepared_statements";
     public static final String REPAIRS = "repairs";
+    public static final String TOP_PARTITIONS = "top_partitions";
 
     /**
      * By default the system keyspace tables should be stored in a single data directory to allow the server
@@ -165,6 +185,7 @@
     @Deprecated public static final String LEGACY_TRANSFERRED_RANGES = "transferred_ranges";
     @Deprecated public static final String LEGACY_AVAILABLE_RANGES = "available_ranges";
     @Deprecated public static final String LEGACY_SIZE_ESTIMATES = "size_estimates";
+    @Deprecated public static final String LEGACY_SSTABLE_ACTIVITY = "sstable_activity";
 
 
     public static final TableMetadata Batches =
@@ -186,6 +207,7 @@
                 + "row_key blob,"
                 + "cf_id UUID,"
                 + "in_progress_ballot timeuuid,"
+                + "in_progress_read_ballot timeuuid,"
                 + "most_recent_commit blob,"
                 + "most_recent_commit_at timeuuid,"
                 + "most_recent_commit_version int,"
@@ -194,6 +216,7 @@
                 + "proposal_version int,"
                 + "PRIMARY KEY ((row_key), cf_id))")
                 .compaction(CompactionParams.lcs(emptyMap()))
+                .indexes(PaxosUncommittedIndex.indexes())
                 .build();
 
     private static final TableMetadata BuiltIndexes =
@@ -206,6 +229,17 @@
               + "PRIMARY KEY ((table_name), index_name)) ")
               .build();
 
+    private static final TableMetadata PaxosRepairHistoryTable =
+        parse(PAXOS_REPAIR_HISTORY,
+                "paxos repair history",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "table_name text,"
+                + "points frozen<list<tuple<blob, timeuuid>>>, "
+                + "PRIMARY KEY (keyspace_name, table_name))"
+                + "WITH COMMENT='Last successful paxos repairs by range'")
+        .build();
+
     private static final TableMetadata Local =
         parse(LOCAL,
                 "information about the local node",
@@ -259,7 +293,7 @@
                 "CREATE TABLE %s ("
                 + "peer inet,"
                 + "peer_port int,"
-                + "hints_dropped map<uuid, int>,"
+                + "hints_dropped map<timeuuid, int>,"
                 + "PRIMARY KEY ((peer), peer_port))")
                 .build();
 
@@ -267,7 +301,7 @@
         parse(COMPACTION_HISTORY,
                 "week-long compaction history",
                 "CREATE TABLE %s ("
-                + "id uuid,"
+                + "id timeuuid,"
                 + "bytes_in bigint,"
                 + "bytes_out bigint,"
                 + "columnfamily_name text,"
@@ -278,8 +312,8 @@
                 .defaultTimeToLive((int) TimeUnit.DAYS.toSeconds(7))
                 .build();
 
-    private static final TableMetadata SSTableActivity =
-        parse(SSTABLE_ACTIVITY,
+    private static final TableMetadata LegacySSTableActivity =
+        parse(LEGACY_SSTABLE_ACTIVITY,
                 "historic sstable read rates",
                 "CREATE TABLE %s ("
                 + "keyspace_name text,"
@@ -290,6 +324,18 @@
                 + "PRIMARY KEY ((keyspace_name, columnfamily_name, generation)))")
                 .build();
 
+    private static final TableMetadata SSTableActivity =
+        parse(SSTABLE_ACTIVITY_V2,
+                "historic sstable read rates",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "table_name text,"
+                + "id text,"
+                + "rate_120m double,"
+                + "rate_15m double,"
+                + "PRIMARY KEY ((keyspace_name, table_name, id)))")
+                .build();
+
     @Deprecated
     private static final TableMetadata LegacySizeEstimates =
         parse(LEGACY_SIZE_ESTIMATES,
@@ -363,6 +409,19 @@
                 + "PRIMARY KEY ((keyspace_name), view_name))")
                 .build();
 
+    private static final TableMetadata TopPartitions =
+        parse(TOP_PARTITIONS,
+                "Stores the top partitions",
+                "CREATE TABLE  %s ("
+                + "keyspace_name text,"
+                + "table_name text,"
+                + "top_type text,"
+                + "top frozen<list<tuple<text, bigint>>>,"
+                + "last_update timestamp,"
+                + "PRIMARY KEY (keyspace_name, table_name, top_type))")
+                .build();
+
+
     private static final TableMetadata PreparedStatements =
         parse(PREPARED_STATEMENTS,
                 "prepared statements",
@@ -413,7 +472,7 @@
             "events related to peers",
             "CREATE TABLE %s ("
             + "peer inet,"
-            + "hints_dropped map<uuid, int>,"
+            + "hints_dropped map<timeuuid, int>,"
             + "PRIMARY KEY ((peer)))")
             .build();
 
@@ -458,12 +517,14 @@
         return Tables.of(BuiltIndexes,
                          Batches,
                          Paxos,
+                         PaxosRepairHistoryTable,
                          Local,
                          PeersV2,
                          LegacyPeers,
                          PeerEventsV2,
                          LegacyPeerEvents,
                          CompactionHistory,
+                         LegacySSTableActivity,
                          SSTableActivity,
                          LegacySizeEstimates,
                          TableEstimates,
@@ -474,7 +535,8 @@
                          ViewBuildsInProgress,
                          BuiltViews,
                          PreparedStatements,
-                         Repairs);
+                         Repairs,
+                         TopPartitions);
     }
 
     private static Functions functions()
@@ -499,11 +561,6 @@
         DECOMMISSIONED
     }
 
-    public static void finishStartup()
-    {
-        Schema.instance.saveSystemKeyspace();
-    }
-
     public static void persistLocalMetadata()
     {
         persistLocalMetadata(UUID::randomUUID);
@@ -538,7 +595,7 @@
                             snitch.getLocalDatacenter(),
                             snitch.getLocalRack(),
                             DatabaseDescriptor.getPartitioner().getClass().getName(),
-                            DatabaseDescriptor.getRpcAddress(),
+                            FBUtilities.getJustBroadcastNativeAddress(),
                             DatabaseDescriptor.getNativeTransportPort(),
                             FBUtilities.getJustBroadcastAddress(),
                             DatabaseDescriptor.getStoragePort(),
@@ -565,7 +622,7 @@
             return;
         String req = "INSERT INTO system.%s (id, keyspace_name, columnfamily_name, compacted_at, bytes_in, bytes_out, rows_merged) VALUES (?, ?, ?, ?, ?, ?, ?)";
         executeInternal(format(req, COMPACTION_HISTORY),
-                        UUIDGen.getTimeUUID(),
+                        nextTimeUUID(),
                         ksname,
                         cfname,
                         ByteBufferUtil.bytes(compactedAt),
@@ -765,9 +822,9 @@
             return;
 
         String req = "INSERT INTO system.%s (peer, tokens) VALUES (?, ?)";
-        executeInternal(String.format(req, LEGACY_PEERS), ep.address, tokensAsSet(tokens));
+        executeInternal(String.format(req, LEGACY_PEERS), ep.getAddress(), tokensAsSet(tokens));
         req = "INSERT INTO system.%s (peer, peer_port, tokens) VALUES (?, ?, ?)";
-        executeInternal(String.format(req, PEERS_V2), ep.address, ep.port, tokensAsSet(tokens));
+        executeInternal(String.format(req, PEERS_V2), ep.getAddress(), ep.getPort(), tokensAsSet(tokens));
     }
 
     public static synchronized boolean updatePreferredIP(InetAddressAndPort ep, InetAddressAndPort preferred_ip)
@@ -776,9 +833,9 @@
             return false;
 
         String req = "INSERT INTO system.%s (peer, preferred_ip) VALUES (?, ?)";
-        executeInternal(String.format(req, LEGACY_PEERS), ep.address, preferred_ip.address);
+        executeInternal(String.format(req, LEGACY_PEERS), ep.getAddress(), preferred_ip.getAddress());
         req = "INSERT INTO system.%s (peer, peer_port, preferred_ip, preferred_port) VALUES (?, ?, ?, ?)";
-        executeInternal(String.format(req, PEERS_V2), ep.address, ep.port, preferred_ip.address, preferred_ip.port);
+        executeInternal(String.format(req, PEERS_V2), ep.getAddress(), ep.getPort(), preferred_ip.getAddress(), preferred_ip.getPort());
         forceBlockingFlush(LEGACY_PEERS, PEERS_V2);
         return true;
     }
@@ -789,14 +846,14 @@
             return;
 
         String req = "INSERT INTO system.%s (peer, %s) VALUES (?, ?)";
-        executeInternal(String.format(req, LEGACY_PEERS, columnName), ep.address, value);
+        executeInternal(String.format(req, LEGACY_PEERS, columnName), ep.getAddress(), value);
         //This column doesn't match across the two tables
         if (columnName.equals("rpc_address"))
         {
             columnName = "native_address";
         }
         req = "INSERT INTO system.%s (peer, peer_port, %s) VALUES (?, ?, ?)";
-        executeInternal(String.format(req, PEERS_V2, columnName), ep.address, ep.port, value);
+        executeInternal(String.format(req, PEERS_V2, columnName), ep.getAddress(), ep.getPort(), value);
     }
 
     public static synchronized void updatePeerNativeAddress(InetAddressAndPort ep, InetAddressAndPort address)
@@ -805,19 +862,19 @@
             return;
 
         String req = "INSERT INTO system.%s (peer, rpc_address) VALUES (?, ?)";
-        executeInternal(String.format(req, LEGACY_PEERS), ep.address, address.address);
+        executeInternal(String.format(req, LEGACY_PEERS), ep.getAddress(), address.getAddress());
         req = "INSERT INTO system.%s (peer, peer_port, native_address, native_port) VALUES (?, ?, ?, ?)";
-        executeInternal(String.format(req, PEERS_V2), ep.address, ep.port, address.address, address.port);
+        executeInternal(String.format(req, PEERS_V2), ep.getAddress(), ep.getPort(), address.getAddress(), address.getPort());
     }
 
 
-    public static synchronized void updateHintsDropped(InetAddressAndPort ep, UUID timePeriod, int value)
+    public static synchronized void updateHintsDropped(InetAddressAndPort ep, TimeUUID timePeriod, int value)
     {
         // with 30 day TTL
         String req = "UPDATE system.%s USING TTL 2592000 SET hints_dropped[ ? ] = ? WHERE peer = ?";
-        executeInternal(String.format(req, LEGACY_PEER_EVENTS), timePeriod, value, ep.address);
+        executeInternal(String.format(req, LEGACY_PEER_EVENTS), timePeriod, value, ep.getAddress());
         req = "UPDATE system.%s USING TTL 2592000 SET hints_dropped[ ? ] = ? WHERE peer = ? AND peer_port = ?";
-        executeInternal(String.format(req, PEER_EVENTS_V2), timePeriod, value, ep.address, ep.port);
+        executeInternal(String.format(req, PEER_EVENTS_V2), timePeriod, value, ep.getAddress(), ep.getPort());
     }
 
     public static synchronized void updateSchemaVersion(UUID version)
@@ -849,12 +906,12 @@
     /**
      * Remove stored tokens being used by another node
      */
-    public static synchronized void removeEndpoint(InetAddressAndPort ep)
+    public static synchronized void removeEndpoint(InetSocketAddress ep)
     {
         String req = "DELETE FROM system.%s WHERE peer = ?";
-        executeInternal(String.format(req, LEGACY_PEERS), ep.address);
+        executeInternal(String.format(req, LEGACY_PEERS), ep.getAddress());
         req = String.format("DELETE FROM system.%s WHERE peer = ? AND peer_port = ?", PEERS_V2);
-        executeInternal(req, ep.address, ep.port);
+        executeInternal(req, ep.getAddress(), ep.getPort());
         forceBlockingFlush(LEGACY_PEERS, PEERS_V2);
     }
 
@@ -878,11 +935,13 @@
     {
         if (!DatabaseDescriptor.isUnsafeSystem())
         {
-            List<ListenableFuture<CommitLogPosition>> futures = new ArrayList<>();
+            List<Future<CommitLogPosition>> futures = new ArrayList<>();
 
             for (String cfname : cfnames)
             {
-                futures.add(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(cfname).forceFlush());
+                futures.add(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME)
+                                    .getColumnFamilyStore(cfname)
+                                    .forceFlush(ColumnFamilyStore.FlushReason.INTERNALLY_FORCED));
             }
             FBUtilities.waitOnFutures(futures);
         }
@@ -936,9 +995,9 @@
     public static InetAddressAndPort getPreferredIP(InetAddressAndPort ep)
     {
         Preconditions.checkState(DatabaseDescriptor.isDaemonInitialized()); // Make sure being used as a daemon, not a tool
-        
+
         String req = "SELECT preferred_ip, preferred_port FROM system.%s WHERE peer=? AND peer_port = ?";
-        UntypedResultSet result = executeInternal(String.format(req, PEERS_V2), ep.address, ep.port);
+        UntypedResultSet result = executeInternal(String.format(req, PEERS_V2), ep.getAddress(), ep.getPort());
         if (!result.isEmpty() && result.one().has("preferred_ip"))
         {
             UntypedResultSet.Row row = result.one();
@@ -985,7 +1044,7 @@
                 return CURRENT_VERSION;
             }
             String req = "SELECT release_version FROM system.%s WHERE peer=? AND peer_port=?";
-            UntypedResultSet result = executeInternal(String.format(req, PEERS_V2), ep.address, ep.port);
+            UntypedResultSet result = executeInternal(String.format(req, PEERS_V2), ep.getAddress(), ep.getPort());
             if (result != null && result.one().has("release_version"))
             {
                 return new CassandraVersion(result.one().getString("release_version"));
@@ -1061,13 +1120,13 @@
             // seconds-since-epoch isn't a foolproof new generation
             // (where foolproof is "guaranteed to be larger than the last one seen at this ip address"),
             // but it's as close as sanely possible
-            generation = (int) (System.currentTimeMillis() / 1000);
+            generation = (int) (currentTimeMillis() / 1000);
         }
         else
         {
             // Other nodes will ignore gossip messages about a node that have a lower generation than previously seen.
             final int storedGeneration = result.one().getInt("gossip_generation") + 1;
-            final int now = (int) (System.currentTimeMillis() / 1000);
+            final int now = (int) (currentTimeMillis() / 1000);
             if (storedGeneration >= now)
             {
                 logger.warn("Using stored Gossip Generation {} as it is greater than current system time {}.  See CASSANDRA-3654 if you experience problems",
@@ -1202,6 +1261,20 @@
     }
 
     /**
+     * Gets the schema version or null if missing
+     */
+    public static UUID getSchemaVersion()
+    {
+        String req = "SELECT schema_version FROM system.%s WHERE key='%s'";
+        UntypedResultSet result = executeInternal(format(req, LOCAL, LOCAL));
+
+        if (!result.isEmpty() && result.one().has("schema_version"))
+            return result.one().getUUID("schema_version");
+
+        return null;
+    }
+
+    /**
      * Gets the stored rack for the local node, or null if none have been set yet.
      */
     public static String getRack()
@@ -1231,72 +1304,178 @@
         return null;
     }
 
-    public static PaxosState loadPaxosState(DecoratedKey key, TableMetadata metadata, int nowInSec)
+    /**
+     * Load the current paxos state for the table and key
+     */
+    public static PaxosState.Snapshot loadPaxosState(DecoratedKey partitionKey, TableMetadata metadata, int nowInSec)
     {
-        String req = "SELECT * FROM system.%s WHERE row_key = ? AND cf_id = ?";
-        UntypedResultSet results = QueryProcessor.executeInternalWithNow(nowInSec, System.nanoTime(), format(req, PAXOS), key.getKey(), metadata.id.asUUID());
-        if (results.isEmpty())
-            return new PaxosState(key, metadata);
-        UntypedResultSet.Row row = results.one();
+        String cql = "SELECT * FROM system." + PAXOS + " WHERE row_key = ? AND cf_id = ?";
+        List<Row> results = QueryProcessor.executeInternalRawWithNow(nowInSec, cql, partitionKey.getKey(), metadata.id.asUUID()).get(partitionKey);
+        if (results == null || results.isEmpty())
+        {
+            Committed noneCommitted = Committed.none(partitionKey, metadata);
+            return new PaxosState.Snapshot(Ballot.none(), Ballot.none(), null, noneCommitted);
+        }
 
-        Commit promised = row.has("in_progress_ballot")
-                        ? new Commit(row.getUUID("in_progress_ballot"), new PartitionUpdate.Builder(metadata, key, metadata.regularAndStaticColumns(), 1).build())
-                        : Commit.emptyCommit(key, metadata);
+        Row row = results.get(0);
+
+        Ballot promisedWrite = PaxosRows.getWritePromise(row);
+        Ballot promised = latest(promisedWrite, PaxosRows.getPromise(row));
+
         // either we have both a recently accepted ballot and update or we have neither
-        Commit accepted = row.has("proposal_version") && row.has("proposal")
-                        ? new Commit(row.getUUID("proposal_ballot"),
-                                     PartitionUpdate.fromBytes(row.getBytes("proposal"), row.getInt("proposal_version")))
-                        : Commit.emptyCommit(key, metadata);
-        // either most_recent_commit and most_recent_commit_at will both be set, or neither
-        Commit mostRecent = row.has("most_recent_commit_version") && row.has("most_recent_commit")
-                          ? new Commit(row.getUUID("most_recent_commit_at"),
-                                       PartitionUpdate.fromBytes(row.getBytes("most_recent_commit"), row.getInt("most_recent_commit_version")))
-                          : Commit.emptyCommit(key, metadata);
-        return new PaxosState(promised, accepted, mostRecent);
+        Accepted accepted = PaxosRows.getAccepted(row);
+        Committed committed = PaxosRows.getCommitted(metadata, partitionKey, row);
+        // fix a race with TTL/deletion resolution, where TTL expires after equal deletion is inserted; TTL wins the resolution, and is read using an old ballot's nowInSec
+        if (accepted != null && !accepted.isAfter(committed))
+            accepted = null;
+
+        return new PaxosState.Snapshot(promised, promisedWrite, accepted, committed);
     }
 
-    public static void savePaxosPromise(Commit promise)
+    public static int legacyPaxosTtlSec(TableMetadata metadata)
     {
-        String req = "UPDATE system.%s USING TIMESTAMP ? AND TTL ? SET in_progress_ballot = ? WHERE row_key = ? AND cf_id = ?";
-        executeInternal(format(req, PAXOS),
-                        UUIDGen.microsTimestamp(promise.ballot),
-                        paxosTtlSec(promise.update.metadata()),
-                        promise.ballot,
-                        promise.update.partitionKey().getKey(),
-                        promise.update.metadata().id.asUUID());
+        // keep paxos state around for at least 3h
+        return Math.max(3 * 3600, metadata.params.gcGraceSeconds);
+    }
+
+    public static void savePaxosWritePromise(DecoratedKey key, TableMetadata metadata, Ballot ballot)
+    {
+        if (paxosStatePurging() == legacy)
+        {
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? AND TTL ? SET in_progress_ballot = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternal(cql,
+                            ballot.unixMicros(),
+                            legacyPaxosTtlSec(metadata),
+                            ballot,
+                            key.getKey(),
+                            metadata.id.asUUID());
+        }
+        else
+        {
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? SET in_progress_ballot = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternal(cql,
+                            ballot.unixMicros(),
+                            ballot,
+                            key.getKey(),
+                            metadata.id.asUUID());
+        }
+    }
+
+    public static void savePaxosReadPromise(DecoratedKey key, TableMetadata metadata, Ballot ballot)
+    {
+        if (paxosStatePurging() == legacy)
+        {
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? AND TTL ? SET in_progress_read_ballot = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternal(cql,
+                            ballot.unixMicros(),
+                            legacyPaxosTtlSec(metadata),
+                            ballot,
+                            key.getKey(),
+                            metadata.id.asUUID());
+        }
+        else
+        {
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? SET in_progress_read_ballot = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternal(cql,
+                            ballot.unixMicros(),
+                            ballot,
+                            key.getKey(),
+                            metadata.id.asUUID());
+        }
     }
 
     public static void savePaxosProposal(Commit proposal)
     {
-        executeInternal(format("UPDATE system.%s USING TIMESTAMP ? AND TTL ? SET proposal_ballot = ?, proposal = ?, proposal_version = ? WHERE row_key = ? AND cf_id = ?", PAXOS),
-                        UUIDGen.microsTimestamp(proposal.ballot),
-                        paxosTtlSec(proposal.update.metadata()),
-                        proposal.ballot,
-                        PartitionUpdate.toBytes(proposal.update, MessagingService.current_version),
-                        MessagingService.current_version,
-                        proposal.update.partitionKey().getKey(),
-                        proposal.update.metadata().id.asUUID());
-    }
-
-    public static int paxosTtlSec(TableMetadata metadata)
-    {
-        // keep paxos state around for at least 3h
-        return Math.max(3 * 3600, metadata.params.gcGraceSeconds);
+        if (proposal instanceof AcceptedWithTTL)
+        {
+            int localDeletionTime = ((Commit.AcceptedWithTTL) proposal).localDeletionTime;
+            int ttlInSec = legacyPaxosTtlSec(proposal.update.metadata());
+            int nowInSec = localDeletionTime - ttlInSec;
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? AND TTL ? SET proposal_ballot = ?, proposal = ?, proposal_version = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternalWithNowInSec(cql,
+                                        nowInSec,
+                                        proposal.ballot.unixMicros(),
+                                        ttlInSec,
+                                        proposal.ballot,
+                                        PartitionUpdate.toBytes(proposal.update, MessagingService.current_version),
+                                        MessagingService.current_version,
+                                        proposal.update.partitionKey().getKey(),
+                                        proposal.update.metadata().id.asUUID());
+        }
+        else
+        {
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? SET proposal_ballot = ?, proposal = ?, proposal_version = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternal(cql,
+                            proposal.ballot.unixMicros(),
+                            proposal.ballot,
+                            PartitionUpdate.toBytes(proposal.update, MessagingService.current_version),
+                            MessagingService.current_version,
+                            proposal.update.partitionKey().getKey(),
+                            proposal.update.metadata().id.asUUID());
+        }
     }
 
     public static void savePaxosCommit(Commit commit)
     {
         // We always erase the last proposal (with the commit timestamp to no erase more recent proposal in case the commit is old)
         // even though that's really just an optimization  since SP.beginAndRepairPaxos will exclude accepted proposal older than the mrc.
-        String cql = "UPDATE system.%s USING TIMESTAMP ? AND TTL ? SET proposal_ballot = null, proposal = null, most_recent_commit_at = ?, most_recent_commit = ?, most_recent_commit_version = ? WHERE row_key = ? AND cf_id = ?";
-        executeInternal(format(cql, PAXOS),
-                        UUIDGen.microsTimestamp(commit.ballot),
-                        paxosTtlSec(commit.update.metadata()),
-                        commit.ballot,
-                        PartitionUpdate.toBytes(commit.update, MessagingService.current_version),
-                        MessagingService.current_version,
-                        commit.update.partitionKey().getKey(),
-                        commit.update.metadata().id.asUUID());
+        if (commit instanceof Commit.CommittedWithTTL)
+        {
+            int localDeletionTime = ((Commit.CommittedWithTTL) commit).localDeletionTime;
+            int ttlInSec = legacyPaxosTtlSec(commit.update.metadata());
+            int nowInSec = localDeletionTime - ttlInSec;
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? AND TTL ? SET proposal_ballot = null, proposal = null, proposal_version = null, most_recent_commit_at = ?, most_recent_commit = ?, most_recent_commit_version = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternalWithNowInSec(cql,
+                            nowInSec,
+                            commit.ballot.unixMicros(),
+                            ttlInSec,
+                            commit.ballot,
+                            PartitionUpdate.toBytes(commit.update, MessagingService.current_version),
+                            MessagingService.current_version,
+                            commit.update.partitionKey().getKey(),
+                            commit.update.metadata().id.asUUID());
+        }
+        else
+        {
+            String cql = "UPDATE system." + PAXOS + " USING TIMESTAMP ? SET proposal_ballot = null, proposal = null, proposal_version = null, most_recent_commit_at = ?, most_recent_commit = ?, most_recent_commit_version = ? WHERE row_key = ? AND cf_id = ?";
+            executeInternal(cql,
+                            commit.ballot.unixMicros(),
+                            commit.ballot,
+                            PartitionUpdate.toBytes(commit.update, MessagingService.current_version),
+                            MessagingService.current_version,
+                            commit.update.partitionKey().getKey(),
+                            commit.update.metadata().id.asUUID());
+        }
+    }
+
+    @VisibleForTesting
+    public static void savePaxosRepairHistory(String keyspace, String table, PaxosRepairHistory history, boolean flush)
+    {
+        String cql = "INSERT INTO system.%s (keyspace_name, table_name, points) VALUES (?, ?, ?)";
+        executeInternal(String.format(cql, PAXOS_REPAIR_HISTORY), keyspace, table, history.toTupleBufferList());
+        if (flush)
+            flushPaxosRepairHistory();
+    }
+
+    public static void flushPaxosRepairHistory()
+    {
+        Schema.instance.getColumnFamilyStoreInstance(PaxosRepairHistoryTable.id)
+                       .forceBlockingFlush(ColumnFamilyStore.FlushReason.INTERNALLY_FORCED);
+    }
+
+    public static PaxosRepairHistory loadPaxosRepairHistory(String keyspace, String table)
+    {
+        if (SchemaConstants.LOCAL_SYSTEM_KEYSPACE_NAMES.contains(keyspace))
+            return PaxosRepairHistory.EMPTY;
+
+        UntypedResultSet results = executeInternal(String.format("SELECT * FROM system.%s WHERE keyspace_name=? AND table_name=?", PAXOS_REPAIR_HISTORY), keyspace, table);
+        if (results.isEmpty())
+            return PaxosRepairHistory.EMPTY;
+
+        UntypedResultSet.Row row = Iterables.getOnlyElement(results);
+        List<ByteBuffer> points = row.getList("points", BytesType.instance);
+
+        return PaxosRepairHistory.fromTupleBufferList(points);
     }
 
     /**
@@ -1304,12 +1483,12 @@
      * from values in system.sstable_activity if present.
      * @param keyspace the keyspace the sstable belongs to
      * @param table the table the sstable belongs to
-     * @param generation the generation number for the sstable
+     * @param id the generation id for the sstable
      */
-    public static RestorableMeter getSSTableReadMeter(String keyspace, String table, int generation)
+    public static RestorableMeter getSSTableReadMeter(String keyspace, String table, SSTableId id)
     {
-        String cql = "SELECT * FROM system.%s WHERE keyspace_name=? and columnfamily_name=? and generation=?";
-        UntypedResultSet results = executeInternal(format(cql, SSTABLE_ACTIVITY), keyspace, table, generation);
+        String cql = "SELECT * FROM system.%s WHERE keyspace_name=? and table_name=? and id=?";
+        UntypedResultSet results = executeInternal(format(cql, SSTABLE_ACTIVITY_V2), keyspace, table, id.toString());
 
         if (results.isEmpty())
             return new RestorableMeter();
@@ -1323,25 +1502,45 @@
     /**
      * Writes the current read rates for a given SSTable to system.sstable_activity
      */
-    public static void persistSSTableReadMeter(String keyspace, String table, int generation, RestorableMeter meter)
+    public static void persistSSTableReadMeter(String keyspace, String table, SSTableId id, RestorableMeter meter)
     {
         // Store values with a one-day TTL to handle corner cases where cleanup might not occur
-        String cql = "INSERT INTO system.%s (keyspace_name, columnfamily_name, generation, rate_15m, rate_120m) VALUES (?, ?, ?, ?, ?) USING TTL 864000";
-        executeInternal(format(cql, SSTABLE_ACTIVITY),
+        String cql = "INSERT INTO system.%s (keyspace_name, table_name, id, rate_15m, rate_120m) VALUES (?, ?, ?, ?, ?) USING TTL 864000";
+        executeInternal(format(cql, SSTABLE_ACTIVITY_V2),
                         keyspace,
                         table,
-                        generation,
+                        id.toString(),
                         meter.fifteenMinuteRate(),
                         meter.twoHourRate());
+
+        if (!DatabaseDescriptor.isUUIDSSTableIdentifiersEnabled() && id instanceof SequenceBasedSSTableId)
+        {
+            // we do this in order to make it possible to downgrade until we switch in cassandra.yaml to UUID based ids
+            // see the discussion on CASSANDRA-17048
+            cql = "INSERT INTO system.%s (keyspace_name, columnfamily_name, generation, rate_15m, rate_120m) VALUES (?, ?, ?, ?, ?) USING TTL 864000";
+            executeInternal(format(cql, LEGACY_SSTABLE_ACTIVITY),
+                            keyspace,
+                            table,
+                            ((SequenceBasedSSTableId) id).generation,
+                            meter.fifteenMinuteRate(),
+                            meter.twoHourRate());
+        }
     }
 
     /**
      * Clears persisted read rates from system.sstable_activity for SSTables that have been deleted.
      */
-    public static void clearSSTableReadMeter(String keyspace, String table, int generation)
+    public static void clearSSTableReadMeter(String keyspace, String table, SSTableId id)
     {
-        String cql = "DELETE FROM system.%s WHERE keyspace_name=? AND columnfamily_name=? and generation=?";
-        executeInternal(format(cql, SSTABLE_ACTIVITY), keyspace, table, generation);
+        String cql = "DELETE FROM system.%s WHERE keyspace_name=? AND table_name=? and id=?";
+        executeInternal(format(cql, SSTABLE_ACTIVITY_V2), keyspace, table, id.toString());
+        if (!DatabaseDescriptor.isUUIDSSTableIdentifiersEnabled() && id instanceof SequenceBasedSSTableId)
+        {
+            // we do this in order to make it possible to downgrade until we switch in cassandra.yaml to UUID based ids
+            // see the discussion on CASSANDRA-17048
+            cql = "DELETE FROM system.%s WHERE keyspace_name=? AND columnfamily_name=? and generation=?";
+            executeInternal(format(cql, LEGACY_SSTABLE_ACTIVITY), keyspace, table, ((SequenceBasedSSTableId) id).generation);
+        }
     }
 
     /**
@@ -1483,9 +1682,9 @@
         {
             rangesToUpdate.add(rangeToBytes(range));
         }
-        executeInternal(format(cql, LEGACY_TRANSFERRED_RANGES), rangesToUpdate, streamOperation.getDescription(), peer.address, keyspace);
+        executeInternal(format(cql, LEGACY_TRANSFERRED_RANGES), rangesToUpdate, streamOperation.getDescription(), peer.getAddress(), keyspace);
         cql = "UPDATE system.%s SET ranges = ranges + ? WHERE operation = ? AND peer = ? AND peer_port = ? AND keyspace_name = ?";
-        executeInternal(String.format(cql, TRANSFERRED_RANGES_V2), rangesToUpdate, streamOperation.getDescription(), peer.address, peer.port, keyspace);
+        executeInternal(String.format(cql, TRANSFERRED_RANGES_V2), rangesToUpdate, streamOperation.getDescription(), peer.getAddress(), peer.getPort(), keyspace);
     }
 
     public static synchronized Map<InetAddressAndPort, Set<Range<Token>>> getTransferredRanges(String description, String keyspace, IPartitioner partitioner)
@@ -1531,8 +1730,10 @@
             String snapshotName = Keyspace.getTimestampedSnapshotName(format("upgrade-%s-%s",
                                                                              previous,
                                                                              next));
+
+            Instant creationTime = now();
             for (String keyspace : SchemaConstants.LOCAL_SYSTEM_KEYSPACE_NAMES)
-                Keyspace.open(keyspace).snapshot(snapshotName, null, false, null);
+                Keyspace.open(keyspace).snapshot(snapshotName, null, false, null, null, creationTime);
         }
     }
 
@@ -1559,7 +1760,7 @@
             // from there, but it informs us that this isn't a completely new node.
             for (File dataDirectory : Directories.getKSChildDirectories(SchemaConstants.SYSTEM_KEYSPACE_NAME))
             {
-                if (dataDirectory.getName().equals("Versions") && dataDirectory.listFiles().length > 0)
+                if (dataDirectory.name().equals("Versions") && dataDirectory.tryList().length > 0)
                 {
                     logger.trace("Found unreadable versions info in pre 1.2 system.Versions table");
                     return UNREADABLE_VERSION.toString();
@@ -1667,4 +1868,48 @@
     public static interface TriFunction<A, B, C, D> {
         D accept(A var1, B var2, C var3);
     }
+
+    public static void saveTopPartitions(TableMetadata metadata, String topType, Collection<TopPartitionTracker.TopPartition> topPartitions, long lastUpdate)
+    {
+        String cql = String.format("INSERT INTO %s.%s (keyspace_name, table_name, top_type, top, last_update) values (?, ?, ?, ?, ?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, TOP_PARTITIONS);
+        List<ByteBuffer> tupleList = new ArrayList<>(topPartitions.size());
+        topPartitions.forEach(tp -> {
+            String key = metadata.partitionKeyType.getString(tp.key.getKey());
+            tupleList.add(TupleType.buildValue(new ByteBuffer[] { UTF8Type.instance.decompose(key),
+                                                                  LongType.instance.decompose(tp.value)}));
+        });
+        executeInternal(cql, metadata.keyspace, metadata.name, topType, tupleList, Date.from(Instant.ofEpochMilli(lastUpdate)));
+    }
+
+    public static TopPartitionTracker.StoredTopPartitions getTopPartitions(TableMetadata metadata, String topType)
+    {
+        try
+        {
+            String cql = String.format("SELECT top, last_update FROM %s.%s WHERE keyspace_name = ? and table_name = ? and top_type = ?", SchemaConstants.SYSTEM_KEYSPACE_NAME, TOP_PARTITIONS);
+            UntypedResultSet res = executeInternal(cql, metadata.keyspace, metadata.name, topType);
+            if (res == null || res.isEmpty())
+                return TopPartitionTracker.StoredTopPartitions.EMPTY;
+            UntypedResultSet.Row row = res.one();
+            long lastUpdated = row.getLong("last_update");
+            List<ByteBuffer> top = row.getList("top", BytesType.instance);
+            if (top == null || top.isEmpty())
+                return TopPartitionTracker.StoredTopPartitions.EMPTY;
+
+            List<TopPartitionTracker.TopPartition> topPartitions = new ArrayList<>(top.size());
+            TupleType tupleType = new TupleType(Lists.newArrayList(UTF8Type.instance, LongType.instance));
+            for (ByteBuffer bb : top)
+            {
+                ByteBuffer[] components = tupleType.split(bb);
+                String keyStr = UTF8Type.instance.compose(components[0]);
+                long value = LongType.instance.compose(components[1]);
+                topPartitions.add(new TopPartitionTracker.TopPartition(metadata.partitioner.decorateKey(metadata.partitionKeyType.fromString(keyStr)), value));
+            }
+            return new TopPartitionTracker.StoredTopPartitions(topPartitions, lastUpdated);
+        }
+        catch (Exception e)
+        {
+            logger.warn("Could not load stored top {} partitions for {}.{}", topType, metadata.keyspace, metadata.name, e);
+            return TopPartitionTracker.StoredTopPartitions.EMPTY;
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspaceMigrator40.java b/src/java/org/apache/cassandra/db/SystemKeyspaceMigrator40.java
deleted file mode 100644
index e0a58ba..0000000
--- a/src/java/org/apache/cassandra/db/SystemKeyspaceMigrator40.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.Optional;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.db.marshal.UTF8Type;
-import org.apache.cassandra.db.marshal.UUIDType;
-
-/**
- * Migrate 3.0 versions of some tables to 4.0. In this case it's just extra columns and some keys
- * that are changed.
- *
- * Can't just add the additional columns because they are primary key columns and C* doesn't support changing
- * key columns even if it's just clustering columns.
- */
-public class SystemKeyspaceMigrator40
-{
-    static final String legacyPeersName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_PEERS);
-    static final String peersName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.PEERS_V2);
-    static final String legacyPeerEventsName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_PEER_EVENTS);
-    static final String peerEventsName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.PEER_EVENTS_V2);
-    static final String legacyTransferredRangesName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_TRANSFERRED_RANGES);
-    static final String transferredRangesName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.TRANSFERRED_RANGES_V2);
-    static final String legacyAvailableRangesName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_AVAILABLE_RANGES);
-    static final String availableRangesName = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.AVAILABLE_RANGES_V2);
-
-
-    private static final Logger logger = LoggerFactory.getLogger(SystemKeyspaceMigrator40.class);
-
-    private SystemKeyspaceMigrator40() {}
-
-    public static void migrate()
-    {
-        migratePeers();
-        migratePeerEvents();
-        migrateTransferredRanges();
-        migrateAvailableRanges();
-    }
-
-    private static void migratePeers()
-    {
-        ColumnFamilyStore newPeers = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PEERS_V2);
-
-        if (!newPeers.isEmpty())
-             return;
-
-        logger.info("{} table was empty, migrating legacy {}, if this fails you should fix the issue and then truncate {} to have it try again.",
-                                  peersName, legacyPeersName, peersName);
-
-        String query = String.format("SELECT * FROM %s",
-                                     legacyPeersName);
-
-        String insert = String.format("INSERT INTO %s ( "
-                                      + "peer, "
-                                      + "peer_port, "
-                                      + "data_center, "
-                                      + "host_id, "
-                                      + "preferred_ip, "
-                                      + "preferred_port, "
-                                      + "rack, "
-                                      + "release_version, "
-                                      + "native_address, "
-                                      + "native_port, "
-                                      + "schema_version, "
-                                      + "tokens) "
-                                      + " values ( ?, ?, ? , ? , ?, ?, ?, ?, ?, ?, ?, ?)",
-                                      peersName);
-
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
-        int transferred = 0;
-        logger.info("Migrating rows from legacy {} to {}", legacyPeersName, peersName);
-        for (UntypedResultSet.Row row : rows)
-        {
-            logger.debug("Transferring row {}", transferred);
-            QueryProcessor.executeInternal(insert,
-                                           row.has("peer") ? row.getInetAddress("peer") : null,
-                                           DatabaseDescriptor.getStoragePort(),
-                                           row.has("data_center") ? row.getString("data_center") : null,
-                                           row.has("host_id") ? row.getUUID("host_id") : null,
-                                           row.has("preferred_ip") ? row.getInetAddress("preferred_ip") : null,
-                                           DatabaseDescriptor.getStoragePort(),
-                                           row.has("rack") ? row.getString("rack") : null,
-                                           row.has("release_version") ? row.getString("release_version") : null,
-                                           row.has("rpc_address") ? row.getInetAddress("rpc_address") : null,
-                                           DatabaseDescriptor.getNativeTransportPort(),
-                                           row.has("schema_version") ? row.getUUID("schema_version") : null,
-                                           row.has("tokens") ? row.getSet("tokens", UTF8Type.instance) : null);
-            transferred++;
-        }
-        logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyPeersName, peersName);
-    }
-
-    private static void migratePeerEvents()
-    {
-        ColumnFamilyStore newPeerEvents = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PEER_EVENTS_V2);
-
-        if (!newPeerEvents.isEmpty())
-            return;
-
-        logger.info("{} table was empty, migrating legacy {} to {}", peerEventsName, legacyPeerEventsName, peerEventsName);
-
-        String query = String.format("SELECT * FROM %s",
-                                     legacyPeerEventsName);
-
-        String insert = String.format("INSERT INTO %s ( "
-                                      + "peer, "
-                                      + "peer_port, "
-                                      + "hints_dropped) "
-                                      + " values ( ?, ?, ? )",
-                                      peerEventsName);
-
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
-        int transferred = 0;
-        for (UntypedResultSet.Row row : rows)
-        {
-            logger.debug("Transferring row {}", transferred);
-            QueryProcessor.executeInternal(insert,
-                                           row.has("peer") ? row.getInetAddress("peer") : null,
-                                           DatabaseDescriptor.getStoragePort(),
-                                           row.has("hints_dropped") ? row.getMap("hints_dropped", UUIDType.instance, Int32Type.instance) : null);
-            transferred++;
-        }
-        logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyPeerEventsName, peerEventsName);
-    }
-
-    static void migrateTransferredRanges()
-    {
-        ColumnFamilyStore newTransferredRanges = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.TRANSFERRED_RANGES_V2);
-
-        if (!newTransferredRanges.isEmpty())
-            return;
-
-        logger.info("{} table was empty, migrating legacy {} to {}", transferredRangesName, legacyTransferredRangesName, transferredRangesName);
-
-        String query = String.format("SELECT * FROM %s",
-                                     legacyTransferredRangesName);
-
-        String insert = String.format("INSERT INTO %s ("
-                                      + "operation, "
-                                      + "peer, "
-                                      + "peer_port, "
-                                      + "keyspace_name, "
-                                      + "ranges) "
-                                      + " values ( ?, ?, ? , ?, ?)",
-                                      transferredRangesName);
-
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
-        int transferred = 0;
-        for (UntypedResultSet.Row row : rows)
-        {
-            logger.debug("Transferring row {}", transferred);
-            QueryProcessor.executeInternal(insert,
-                                           row.has("operation") ? row.getString("operation") : null,
-                                           row.has("peer") ? row.getInetAddress("peer") : null,
-                                           DatabaseDescriptor.getStoragePort(),
-                                           row.has("keyspace_name") ? row.getString("keyspace_name") : null,
-                                           row.has("ranges") ? row.getSet("ranges", BytesType.instance) : null);
-            transferred++;
-        }
-
-        logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyTransferredRangesName, transferredRangesName);
-    }
-
-    static void migrateAvailableRanges()
-    {
-        ColumnFamilyStore newAvailableRanges = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.AVAILABLE_RANGES_V2);
-
-        if (!newAvailableRanges.isEmpty())
-            return;
-
-        logger.info("{} table was empty, migrating legacy {} to {}", availableRangesName, legacyAvailableRangesName, availableRangesName);
-
-        String query = String.format("SELECT * FROM %s",
-                                     legacyAvailableRangesName);
-
-        String insert = String.format("INSERT INTO %s ("
-                                      + "keyspace_name, "
-                                      + "full_ranges, "
-                                      + "transient_ranges) "
-                                      + " values ( ?, ?, ? )",
-                                      availableRangesName);
-
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
-        int transferred = 0;
-        for (UntypedResultSet.Row row : rows)
-        {
-            logger.debug("Transferring row {}", transferred);
-            String keyspace = row.getString("keyspace_name");
-            Set<ByteBuffer> ranges = Optional.ofNullable(row.getSet("ranges", BytesType.instance)).orElse(Collections.emptySet());
-            QueryProcessor.executeInternal(insert,
-                                           keyspace,
-                                           ranges,
-                                           Collections.emptySet());
-            transferred++;
-        }
-
-        logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyAvailableRangesName, availableRangesName);
-    }
-
-}
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspaceMigrator41.java b/src/java/org/apache/cassandra/db/SystemKeyspaceMigrator41.java
new file mode 100644
index 0000000..bfce780
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/SystemKeyspaceMigrator41.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Optional;
+import java.util.function.Function;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.FBUtilities;
+
+/**
+ * Migrate 3.0 versions of some tables to 4.1. In this case it's just extra columns and some keys
+ * that are changed.
+ * <p>
+ * Can't just add the additional columns because they are primary key columns and C* doesn't support changing
+ * key columns even if it's just clustering columns.
+ */
+public class SystemKeyspaceMigrator41
+{
+    private static final Logger logger = LoggerFactory.getLogger(SystemKeyspaceMigrator41.class);
+
+    private SystemKeyspaceMigrator41()
+    {
+    }
+
+    public static void migrate()
+    {
+        migratePeers();
+        migratePeerEvents();
+        migrateTransferredRanges();
+        migrateAvailableRanges();
+        migrateSSTableActivity();
+    }
+
+    @VisibleForTesting
+    static void migratePeers()
+    {
+        migrateTable(false,
+                     SystemKeyspace.LEGACY_PEERS,
+                     SystemKeyspace.PEERS_V2,
+                     new String[]{ "peer",
+                                   "peer_port",
+                                   "data_center",
+                                   "host_id",
+                                   "preferred_ip",
+                                   "preferred_port",
+                                   "rack",
+                                   "release_version",
+                                   "native_address",
+                                   "native_port",
+                                   "schema_version",
+                                   "tokens" },
+                     row -> Collections.singletonList(new Object[]{ row.has("peer") ? row.getInetAddress("peer") : null,
+                                                                    DatabaseDescriptor.getStoragePort(),
+                                                                    row.has("data_center") ? row.getString("data_center") : null,
+                                                                    row.has("host_id") ? row.getUUID("host_id") : null,
+                                                                    row.has("preferred_ip") ? row.getInetAddress("preferred_ip") : null,
+                                                                    DatabaseDescriptor.getStoragePort(),
+                                                                    row.has("rack") ? row.getString("rack") : null,
+                                                                    row.has("release_version") ? row.getString("release_version") : null,
+                                                                    row.has("rpc_address") ? row.getInetAddress("rpc_address") : null,
+                                                                    DatabaseDescriptor.getNativeTransportPort(),
+                                                                    row.has("schema_version") ? row.getUUID("schema_version") : null,
+                                                                    row.has("tokens") ? row.getSet("tokens", UTF8Type.instance) : null }));
+    }
+
+    @VisibleForTesting
+    static void migratePeerEvents()
+    {
+        migrateTable(false,
+                     SystemKeyspace.LEGACY_PEER_EVENTS,
+                     SystemKeyspace.PEER_EVENTS_V2,
+                     new String[]{ "peer",
+                                   "peer_port",
+                                   "hints_dropped" },
+                     row -> Collections.singletonList(
+                     new Object[]{ row.has("peer") ? row.getInetAddress("peer") : null,
+                                   DatabaseDescriptor.getStoragePort(),
+                                   row.has("hints_dropped") ? row.getMap("hints_dropped", TimeUUIDType.instance, Int32Type.instance) : null }
+                     ));
+    }
+
+    @VisibleForTesting
+    static void migrateTransferredRanges()
+    {
+        migrateTable(false,
+                     SystemKeyspace.LEGACY_TRANSFERRED_RANGES,
+                     SystemKeyspace.TRANSFERRED_RANGES_V2,
+                     new String[]{ "operation", "peer", "peer_port", "keyspace_name", "ranges" },
+                     row -> Collections.singletonList(new Object[]{ row.has("operation") ? row.getString("operation") : null,
+                                                                    row.has("peer") ? row.getInetAddress("peer") : null,
+                                                                    DatabaseDescriptor.getStoragePort(),
+                                                                    row.has("keyspace_name") ? row.getString("keyspace_name") : null,
+                                                                    row.has("ranges") ? row.getSet("ranges", BytesType.instance) : null }));
+    }
+
+    @VisibleForTesting
+    static void migrateAvailableRanges()
+    {
+        migrateTable(false,
+                     SystemKeyspace.LEGACY_AVAILABLE_RANGES,
+                     SystemKeyspace.AVAILABLE_RANGES_V2,
+                     new String[]{ "keyspace_name", "full_ranges", "transient_ranges" },
+                     row -> Collections.singletonList(new Object[]{ row.getString("keyspace_name"),
+                                                                    Optional.ofNullable(row.getSet("ranges", BytesType.instance)).orElse(Collections.emptySet()),
+                                                                    Collections.emptySet() }));
+    }
+
+    @VisibleForTesting
+    static void migrateSSTableActivity()
+    {
+        String prevVersionString = FBUtilities.getPreviousReleaseVersionString();
+        CassandraVersion prevVersion = prevVersionString != null ? new CassandraVersion(prevVersionString) : CassandraVersion.NULL_VERSION;
+
+        // if we are upgrading from pre 4.1, we want to force repopulate the table; this is for the case when we
+        // upgraded from pre 4.1, then downgraded to pre 4.1 and then upgraded again
+        migrateTable(CassandraVersion.CASSANDRA_4_1.compareTo(prevVersion) > 0,
+                     SystemKeyspace.LEGACY_SSTABLE_ACTIVITY,
+                     SystemKeyspace.SSTABLE_ACTIVITY_V2,
+                     new String[]{ "keyspace_name", "table_name", "id", "rate_120m", "rate_15m" },
+                     row ->
+                     Collections.singletonList(new Object[]{ row.getString("keyspace_name"),
+                                                             row.getString("columnfamily_name"),
+                                                             new SequenceBasedSSTableId(row.getInt("generation")).toString(),
+                                                             row.has("rate_120m") ? row.getDouble("rate_120m") : null,
+                                                             row.has("rate_15m") ? row.getDouble("rate_15m") : null
+                     })
+        );
+    }
+
+    /**
+     * Perform table migration by reading data from the old table, converting it, and adding to the new table.
+     *
+     * @param truncateIfExists truncate the existing table if it exists before migration; if it is disabled
+     *                         and the new table is not empty, no migration is performed
+     * @param oldName          old table name
+     * @param newName          new table name
+     * @param columns          columns to fill in the new table in the same order as returned by the transformation
+     * @param transformation   transformation function which gets the row from the old table and returns a row for the new table
+     */
+    @VisibleForTesting
+    static void migrateTable(boolean truncateIfExists, String oldName, String newName, String[] columns, Function<UntypedResultSet.Row, Collection<Object[]>> transformation)
+    {
+        ColumnFamilyStore newTable = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(newName);
+
+        if (!newTable.isEmpty() && !truncateIfExists)
+            return;
+
+        if (truncateIfExists)
+            newTable.truncateBlockingWithoutSnapshot();
+
+        logger.info("{} table was empty, migrating legacy {}, if this fails you should fix the issue and then truncate {} to have it try again.",
+                    newName, oldName, newName);
+
+        String query = String.format("SELECT * FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, oldName);
+        String insert = String.format("INSERT INTO %s.%s (%s) VALUES (%s)", SchemaConstants.SYSTEM_KEYSPACE_NAME, newName,
+                                      StringUtils.join(columns, ", "), StringUtils.repeat("?", ", ", columns.length));
+
+        UntypedResultSet rows = QueryProcessor.executeInternal(query);
+        int transferred = 0;
+        logger.info("Migrating rows from legacy {} to {}", oldName, newName);
+        for (UntypedResultSet.Row row : rows)
+        {
+            logger.debug("Transferring row {}", transferred);
+            for (Object[] newRow : transformation.apply(row))
+                QueryProcessor.executeInternal(insert, newRow);
+            transferred++;
+        }
+
+        logger.info("Migrated {} rows from legacy {} to {}", transferred, oldName, newName);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
index 8430541..856b27c 100644
--- a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
+++ b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 
-import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.io.util.DataInputPlus;
diff --git a/src/java/org/apache/cassandra/db/VirtualTablePartitionRangeReadQuery.java b/src/java/org/apache/cassandra/db/VirtualTablePartitionRangeReadQuery.java
deleted file mode 100644
index 48cafa1..0000000
--- a/src/java/org/apache/cassandra/db/VirtualTablePartitionRangeReadQuery.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.filter.DataLimits;
-import org.apache.cassandra.db.filter.RowFilter;
-import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
-import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
-import org.apache.cassandra.db.virtual.VirtualTable;
-import org.apache.cassandra.schema.TableMetadata;
-
-/**
- * A read query that selects a (part of a) range of partitions of a virtual table.
- */
-public class VirtualTablePartitionRangeReadQuery extends VirtualTableReadQuery implements PartitionRangeReadQuery
-{
-    private final DataRange dataRange;
-
-    public static VirtualTablePartitionRangeReadQuery create(TableMetadata metadata,
-                                                             int nowInSec,
-                                                             ColumnFilter columnFilter,
-                                                             RowFilter rowFilter,
-                                                             DataLimits limits,
-                                                             DataRange dataRange)
-    {
-        return new VirtualTablePartitionRangeReadQuery(metadata,
-                                                       nowInSec,
-                                                       columnFilter,
-                                                       rowFilter,
-                                                       limits,
-                                                       dataRange);
-    }
-
-    private VirtualTablePartitionRangeReadQuery(TableMetadata metadata,
-                                                int nowInSec,
-                                                ColumnFilter columnFilter,
-                                                RowFilter rowFilter,
-                                                DataLimits limits,
-                                                DataRange dataRange)
-    {
-        super(metadata, nowInSec, columnFilter, rowFilter, limits);
-        this.dataRange = dataRange;
-    }
-
-    @Override
-    public DataRange dataRange()
-    {
-        return dataRange;
-    }
-
-    @Override
-    public PartitionRangeReadQuery withUpdatedLimit(DataLimits newLimits)
-    {
-        return new VirtualTablePartitionRangeReadQuery(metadata(),
-                                                       nowInSec(),
-                                                       columnFilter(),
-                                                       rowFilter(),
-                                                       newLimits,
-                                                       dataRange());
-    }
-
-    @Override
-    public PartitionRangeReadQuery withUpdatedLimitsAndDataRange(DataLimits newLimits, DataRange newDataRange)
-    {
-        return new VirtualTablePartitionRangeReadQuery(metadata(),
-                                                       nowInSec(),
-                                                       columnFilter(),
-                                                       rowFilter(),
-                                                       newLimits,
-                                                       newDataRange);
-    }
-
-    @Override
-    protected UnfilteredPartitionIterator queryVirtualTable()
-    {
-        VirtualTable view = VirtualKeyspaceRegistry.instance.getTableNullable(metadata().id);
-        return view.select(dataRange, columnFilter());
-    }
-
-    @Override
-    protected void appendCQLWhereClause(StringBuilder sb)
-    {
-        if (dataRange.isUnrestricted() && rowFilter().isEmpty())
-            return;
-
-        sb.append(" WHERE ");
-        // We put the row filter first because the data range can end by "ORDER BY"
-        if (!rowFilter().isEmpty())
-        {
-            sb.append(rowFilter());
-            if (!dataRange.isUnrestricted())
-                sb.append(" AND ");
-        }
-        if (!dataRange.isUnrestricted())
-            sb.append(dataRange.toCQLString(metadata()));
-    }
-}
diff --git a/src/java/org/apache/cassandra/db/VirtualTableReadQuery.java b/src/java/org/apache/cassandra/db/VirtualTableReadQuery.java
deleted file mode 100644
index ad22a58..0000000
--- a/src/java/org/apache/cassandra/db/VirtualTableReadQuery.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.filter.DataLimits;
-import org.apache.cassandra.db.filter.RowFilter;
-import org.apache.cassandra.db.partitions.PartitionIterator;
-import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
-import org.apache.cassandra.exceptions.RequestExecutionException;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.service.ClientState;
-
-/**
- * Base class for the {@code ReadQuery} implementations use to query virtual tables.
- */
-public abstract class VirtualTableReadQuery extends AbstractReadQuery
-{
-    protected VirtualTableReadQuery(TableMetadata metadata,
-                                    int nowInSec,
-                                    ColumnFilter columnFilter,
-                                    RowFilter rowFilter,
-                                    DataLimits limits)
-    {
-        super(metadata, nowInSec, columnFilter, rowFilter, limits);
-    }
-
-    @Override
-    public ReadExecutionController executionController()
-    {
-        return ReadExecutionController.empty();
-    }
-
-    @Override
-    public PartitionIterator execute(ConsistencyLevel consistency,
-                                     ClientState clientState,
-                                     long queryStartNanoTime) throws RequestExecutionException
-    {
-        return executeInternal(executionController());
-    }
-
-    @Override
-    @SuppressWarnings("resource")
-    public UnfilteredPartitionIterator executeLocally(ReadExecutionController executionController)
-    {
-        UnfilteredPartitionIterator resultIterator = queryVirtualTable();
-        return limits().filter(rowFilter().filter(resultIterator, nowInSec()), nowInSec(), selectsFullPartition());
-    }
-
-    protected abstract UnfilteredPartitionIterator queryVirtualTable();
-}
diff --git a/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java b/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java
deleted file mode 100644
index ba9441a..0000000
--- a/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.cassandra.db.filter.ClusteringIndexFilter;
-import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.filter.DataLimits;
-import org.apache.cassandra.db.filter.RowFilter;
-import org.apache.cassandra.db.partitions.PartitionIterator;
-import org.apache.cassandra.db.partitions.PartitionIterators;
-import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
-import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
-import org.apache.cassandra.db.virtual.VirtualTable;
-import org.apache.cassandra.exceptions.RequestExecutionException;
-import org.apache.cassandra.schema.ColumnMetadata;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.service.ClientState;
-
-/**
- * A read query that selects a (part of a) single partition of a virtual table.
- */
-public class VirtualTableSinglePartitionReadQuery extends VirtualTableReadQuery implements SinglePartitionReadQuery
-{
-    private final DecoratedKey partitionKey;
-    private final ClusteringIndexFilter clusteringIndexFilter;
-
-    public static VirtualTableSinglePartitionReadQuery create(TableMetadata metadata,
-                                                              int nowInSec,
-                                                              ColumnFilter columnFilter,
-                                                              RowFilter rowFilter,
-                                                              DataLimits limits,
-                                                              DecoratedKey partitionKey,
-                                                              ClusteringIndexFilter clusteringIndexFilter)
-    {
-        return new VirtualTableSinglePartitionReadQuery(metadata,
-                                                        nowInSec,
-                                                        columnFilter,
-                                                        rowFilter,
-                                                        limits,
-                                                        partitionKey,
-                                                        clusteringIndexFilter);
-    }
-
-    private VirtualTableSinglePartitionReadQuery(TableMetadata metadata,
-                                                 int nowInSec,
-                                                 ColumnFilter columnFilter,
-                                                 RowFilter rowFilter,
-                                                 DataLimits limits,
-                                                 DecoratedKey partitionKey,
-                                                 ClusteringIndexFilter clusteringIndexFilter)
-    {
-        super(metadata, nowInSec, columnFilter, rowFilter, limits);
-        this.partitionKey = partitionKey;
-        this.clusteringIndexFilter = clusteringIndexFilter;
-    }
-
-    @Override
-    protected void appendCQLWhereClause(StringBuilder sb)
-    {
-        sb.append(" WHERE ");
-
-        sb.append(ColumnMetadata.toCQLString(metadata().partitionKeyColumns())).append(" = ");
-        DataRange.appendKeyString(sb, metadata().partitionKeyType, partitionKey().getKey());
-
-        // We put the row filter first because the clustering index filter can end by "ORDER BY"
-        if (!rowFilter().isEmpty())
-            sb.append(" AND ").append(rowFilter());
-
-        String filterString = clusteringIndexFilter().toCQLString(metadata());
-        if (!filterString.isEmpty())
-            sb.append(" AND ").append(filterString);
-    }
-
-    @Override
-    public ClusteringIndexFilter clusteringIndexFilter()
-    {
-        return clusteringIndexFilter;
-    }
-
-    @Override
-    public boolean selectsFullPartition()
-    {
-        return clusteringIndexFilter.selectsAllPartition() && !rowFilter().hasExpressionOnClusteringOrRegularColumns();
-    }
-
-    @Override
-    public DecoratedKey partitionKey()
-    {
-        return partitionKey;
-    }
-
-    @Override
-    public SinglePartitionReadQuery withUpdatedLimit(DataLimits newLimits)
-    {
-        return new VirtualTableSinglePartitionReadQuery(metadata(),
-                                                        nowInSec(),
-                                                        columnFilter(),
-                                                        rowFilter(),
-                                                        newLimits,
-                                                        partitionKey(),
-                                                        clusteringIndexFilter);
-    }
-
-    @Override
-    public SinglePartitionReadQuery forPaging(Clustering<?> lastReturned, DataLimits limits)
-    {
-        return new VirtualTableSinglePartitionReadQuery(metadata(),
-                                                        nowInSec(),
-                                                        columnFilter(),
-                                                        rowFilter(),
-                                                        limits,
-                                                        partitionKey(),
-                                                      lastReturned == null ? clusteringIndexFilter
-                                                              : clusteringIndexFilter.forPaging(metadata().comparator,
-                                                                                                lastReturned,
-                                                                                                false));
-    }
-
-    @Override
-    protected UnfilteredPartitionIterator queryVirtualTable()
-    {
-        VirtualTable view = VirtualKeyspaceRegistry.instance.getTableNullable(metadata().id);
-        return view.select(partitionKey, clusteringIndexFilter, columnFilter());
-    }
-
-    /**
-     * Groups multiple single partition read queries.
-     */
-    public static class Group extends SinglePartitionReadQuery.Group<VirtualTableSinglePartitionReadQuery>
-    {
-        public static Group create(TableMetadata metadata,
-                                   int nowInSec,
-                                   ColumnFilter columnFilter,
-                                   RowFilter rowFilter,
-                                   DataLimits limits,
-                                   List<DecoratedKey> partitionKeys,
-                                   ClusteringIndexFilter clusteringIndexFilter)
-        {
-            List<VirtualTableSinglePartitionReadQuery> queries = new ArrayList<>(partitionKeys.size());
-            for (DecoratedKey partitionKey : partitionKeys)
-            {
-                queries.add(VirtualTableSinglePartitionReadQuery.create(metadata,
-                                                                        nowInSec,
-                                                                        columnFilter,
-                                                                        rowFilter,
-                                                                        limits,
-                                                                        partitionKey,
-                                                                        clusteringIndexFilter));
-            }
-
-            return new Group(queries, limits);
-        }
-
-        public Group(List<VirtualTableSinglePartitionReadQuery> queries, DataLimits limits)
-        {
-            super(queries, limits);
-        }
-
-        public static Group one(VirtualTableSinglePartitionReadQuery query)
-        {
-            return new Group(Collections.singletonList(query), query.limits());
-        }
-
-        public PartitionIterator execute(ConsistencyLevel consistency, ClientState clientState, long queryStartNanoTime) throws RequestExecutionException
-        {
-            if (queries.size() == 1)
-                return queries.get(0).execute(consistency, clientState, queryStartNanoTime);
-
-            return PartitionIterators.concat(queries.stream()
-                                                    .map(q -> q.execute(consistency, clientState, queryStartNanoTime))
-                                                    .collect(Collectors.toList()));
-        }
-    }
-}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java b/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java
deleted file mode 100644
index 134fb11..0000000
--- a/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.io.util.FileUtils;
-
-
-public class WindowsFailedSnapshotTracker
-{
-    private static final Logger logger = LoggerFactory.getLogger(WindowsFailedSnapshotTracker.class);
-    private static PrintWriter _failedSnapshotFile;
-
-    @VisibleForTesting
-    // Need to handle null for unit tests
-    public static final String TODELETEFILE = System.getenv("CASSANDRA_HOME") == null
-                 ? ".toDelete"
-                 : System.getenv("CASSANDRA_HOME") + File.separator + ".toDelete";
-
-    public static void deleteOldSnapshots()
-    {
-        if (new File(TODELETEFILE).exists())
-        {
-            try
-            {
-                try (BufferedReader reader = new BufferedReader(new FileReader(TODELETEFILE)))
-                {
-                    String snapshotDirectory;
-                    while ((snapshotDirectory = reader.readLine()) != null)
-                    {
-                        File f = new File(snapshotDirectory);
-
-                        // Skip folders that aren't a subset of temp or a data folder. We don't want people to accidentally
-                        // delete something important by virtue of adding something invalid to the .toDelete file.
-                        boolean validFolder = FileUtils.isSubDirectory(new File(System.getenv("TEMP")), f);
-                        for (String s : DatabaseDescriptor.getAllDataFileLocations())
-                            validFolder |= FileUtils.isSubDirectory(new File(s), f);
-
-                        if (!validFolder)
-                        {
-                            logger.warn("Skipping invalid directory found in .toDelete: {}. Only %TEMP% or data file subdirectories are valid.", f);
-                            continue;
-                        }
-
-                        // Could be a non-existent directory if deletion worked on previous JVM shutdown.
-                        if (f.exists())
-                        {
-                            logger.warn("Discovered obsolete snapshot. Deleting directory [{}]", snapshotDirectory);
-                            FileUtils.deleteRecursive(new File(snapshotDirectory));
-                        }
-                    }
-                }
-
-                // Only delete the old .toDelete file if we succeed in deleting all our known bad snapshots.
-                Files.delete(Paths.get(TODELETEFILE));
-            }
-            catch (IOException e)
-            {
-                logger.warn("Failed to open {}. Obsolete snapshots from previous runs will not be deleted.", TODELETEFILE, e);
-            }
-        }
-
-        try
-        {
-            _failedSnapshotFile = new PrintWriter(new FileWriter(TODELETEFILE, true));
-        }
-        catch (IOException e)
-        {
-            throw new RuntimeException(String.format("Failed to create failed snapshot tracking file [%s]. Aborting", TODELETEFILE));
-        }
-    }
-
-    public static synchronized void handleFailedSnapshot(File dir)
-    {
-        assert _failedSnapshotFile != null : "_failedSnapshotFile not initialized within WindowsFailedSnapshotTracker";
-        FileUtils.deleteRecursiveOnExit(dir);
-        _failedSnapshotFile.println(dir.toString());
-        _failedSnapshotFile.flush();
-    }
-
-    @VisibleForTesting
-    public static void resetForTests()
-    {
-        _failedSnapshotFile.close();
-    }
-}
diff --git a/src/java/org/apache/cassandra/db/aggregation/AggregationSpecification.java b/src/java/org/apache/cassandra/db/aggregation/AggregationSpecification.java
index 7324dfd..0d6c0ee 100644
--- a/src/java/org/apache/cassandra/db/aggregation/AggregationSpecification.java
+++ b/src/java/org/apache/cassandra/db/aggregation/AggregationSpecification.java
@@ -18,11 +18,16 @@
 package org.apache.cassandra.db.aggregation;
 
 import java.io.IOException;
+import java.util.List;
 
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.selection.Selector;
 import org.apache.cassandra.db.ClusteringComparator;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableMetadata;
 
 /**
  * Defines how rows should be grouped for creating aggregates.
@@ -44,6 +49,11 @@
     };
 
     /**
+     * Factory for <code>AggregationSpecification</code> that group all the row together.
+     */
+    public static final AggregationSpecification.Factory AGGREGATE_EVERYTHING_FACTORY = options -> AGGREGATE_EVERYTHING;
+
+    /**
      * The <code>AggregationSpecification</code> kind.
      */
     private final Kind kind;
@@ -53,7 +63,7 @@
      */
     public static enum Kind
     {
-        AGGREGATE_EVERYTHING, AGGREGATE_BY_PK_PREFIX
+        AGGREGATE_EVERYTHING, AGGREGATE_BY_PK_PREFIX, AGGREGATE_BY_PK_PREFIX_WITH_SELECTOR
     }
 
     /**
@@ -89,37 +99,86 @@
     public abstract GroupMaker newGroupMaker(GroupingState state);
 
     /**
-     * Creates a new <code>AggregationSpecification</code> instance that will build aggregates based on primary key
-     * columns.
+     * Creates a new {@code Factory} instance to create {@code AggregationSpecification} that will build aggregates
+     * based on primary key columns.
      *
      * @param comparator the comparator used to compare the clustering prefixes
      * @param clusteringPrefixSize the number of clustering columns used to create the aggregates
-     * @return a new <code>AggregationSpecification</code> instance that will build aggregates based on primary key
-     * columns
+     * @return a  new {@code Factory} instance to create {@code AggregationSpecification} that will build aggregates
+     * based on primary key columns.
      */
-    public static AggregationSpecification aggregatePkPrefix(ClusteringComparator comparator, int clusteringPrefixSize)
+    public static AggregationSpecification.Factory aggregatePkPrefixFactory(ClusteringComparator comparator,
+                                                                            int clusteringPrefixSize)
     {
-        return new AggregateByPkPrefix(comparator, clusteringPrefixSize);
+        return options -> new  AggregateByPkPrefix(comparator, clusteringPrefixSize);
+    }
+
+    public static AggregationSpecification.Factory aggregatePkPrefixFactoryWithSelector(final ClusteringComparator comparator,
+                                                                                        final int clusteringPrefixSize,
+                                                                                        final Selector.Factory factory)
+    {
+        return new Factory()
+        {
+            @Override
+            public void addFunctionsTo(List<Function> functions)
+            {
+                factory.addFunctionsTo(functions);
+            }
+
+            @Override
+            public AggregationSpecification newInstance(QueryOptions options)
+            {
+                Selector selector = factory.newInstance(options);
+                selector.validateForGroupBy();
+                return new  AggregateByPkPrefixWithSelector(comparator,
+                        clusteringPrefixSize,
+                        selector);
+            }
+        };
+    }
+    
+    /**
+     * Factory for {@code AggregationSpecification}.
+     *
+     */
+    public static interface Factory
+    {
+        /**
+         * Creates a new {@code AggregationSpecification} instance after having binded the parameters.
+         *
+         * @param options the query options
+         * @return a new {@code AggregationSpecification} instance.
+         */
+        public AggregationSpecification newInstance(QueryOptions options);
+
+        public default void addFunctionsTo(List<Function> functions)
+        {
+        }
     }
 
     /**
      * <code>AggregationSpecification</code> that build aggregates based on primary key columns
      */
-    private static final class AggregateByPkPrefix extends AggregationSpecification
+    private static class AggregateByPkPrefix extends AggregationSpecification
     {
         /**
          * The number of clustering component to compare.
          */
-        private final int clusteringPrefixSize;
+        protected final int clusteringPrefixSize;
 
         /**
          * The comparator used to compare the clustering prefixes.
          */
-        private final ClusteringComparator comparator;
+        protected final ClusteringComparator comparator;
 
         public AggregateByPkPrefix(ClusteringComparator comparator, int clusteringPrefixSize)
         {
-            super(Kind.AGGREGATE_BY_PK_PREFIX);
+            this(Kind.AGGREGATE_BY_PK_PREFIX, comparator, clusteringPrefixSize);
+        }
+
+        protected AggregateByPkPrefix(Kind kind, ClusteringComparator comparator, int clusteringPrefixSize)
+        {
+            super(kind);
             this.comparator = comparator;
             this.clusteringPrefixSize = clusteringPrefixSize;
         }
@@ -127,7 +186,32 @@
         @Override
         public GroupMaker newGroupMaker(GroupingState state)
         {
-            return GroupMaker.newInstance(comparator, clusteringPrefixSize, state);
+            return GroupMaker.newPkPrefixGroupMaker(comparator, clusteringPrefixSize, state);
+        }
+    }
+
+    /**
+     * <code>AggregationSpecification</code> that build aggregates based on primary key columns using a selector.
+     */
+    private static final class AggregateByPkPrefixWithSelector extends AggregateByPkPrefix
+    {
+        /**
+         * The selector.
+         */
+        private final Selector selector;
+
+        public AggregateByPkPrefixWithSelector(ClusteringComparator comparator,
+                                               int clusteringPrefixSize,
+                                               Selector selector)
+        {
+            super(Kind.AGGREGATE_BY_PK_PREFIX_WITH_SELECTOR, comparator, clusteringPrefixSize);
+            this.selector = selector;
+        }
+
+        @Override
+        public GroupMaker newGroupMaker(GroupingState state)
+        {
+            return GroupMaker.newSelectorGroupMaker(comparator, clusteringPrefixSize, selector, state);
         }
     }
 
@@ -143,12 +227,17 @@
                 case AGGREGATE_BY_PK_PREFIX:
                     out.writeUnsignedVInt(((AggregateByPkPrefix) aggregationSpec).clusteringPrefixSize);
                     break;
+                case AGGREGATE_BY_PK_PREFIX_WITH_SELECTOR:
+                    AggregateByPkPrefixWithSelector spec = (AggregateByPkPrefixWithSelector) aggregationSpec;
+                    out.writeUnsignedVInt(spec.clusteringPrefixSize);
+                    Selector.serializer.serialize(spec.selector, out, version);
+                    break;
                 default:
-                    throw new AssertionError();
+                    throw new AssertionError("Unknow aggregation kind: " + aggregationSpec.kind());
             }
         }
 
-        public AggregationSpecification deserialize(DataInputPlus in, int version, ClusteringComparator comparator) throws IOException
+        public AggregationSpecification deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
         {
             Kind kind = Kind.values()[in.readUnsignedByte()];
             switch (kind)
@@ -156,10 +245,15 @@
                 case AGGREGATE_EVERYTHING:
                     return AggregationSpecification.AGGREGATE_EVERYTHING;
                 case AGGREGATE_BY_PK_PREFIX:
+                    return new AggregateByPkPrefix(metadata.comparator, (int) in.readUnsignedVInt());
+                case AGGREGATE_BY_PK_PREFIX_WITH_SELECTOR:
                     int clusteringPrefixSize = (int) in.readUnsignedVInt();
-                    return AggregationSpecification.aggregatePkPrefix(comparator, clusteringPrefixSize);
+                    Selector selector = Selector.serializer.deserialize(in, version, metadata);
+                    return new AggregateByPkPrefixWithSelector(metadata.comparator,
+                                                               clusteringPrefixSize,
+                                                               selector);
                 default:
-                    throw new AssertionError();
+                    throw new AssertionError("Unknow aggregation kind: " + kind);
             }
         }
 
@@ -173,8 +267,15 @@
                 case AGGREGATE_BY_PK_PREFIX:
                     size += TypeSizes.sizeofUnsignedVInt(((AggregateByPkPrefix) aggregationSpec).clusteringPrefixSize);
                     break;
+                case AGGREGATE_BY_PK_PREFIX_WITH_SELECTOR:
+                    AggregateByPkPrefixWithSelector spec = (AggregateByPkPrefixWithSelector) aggregationSpec;
+                    size += TypeSizes.sizeofUnsignedVInt(spec.clusteringPrefixSize);
+                    size += Selector.serializer.serializedSize(spec.selector, version
+                            
+                            );
+                    break;
                 default:
-                    throw new AssertionError();
+                    throw new AssertionError("Unknow aggregation kind: " + aggregationSpec.kind());
             }
             return size;
         }
diff --git a/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java b/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java
index 90cdab2..968219f 100644
--- a/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java
+++ b/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java
@@ -19,9 +19,11 @@
 
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.cql3.selection.Selector;
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.ClusteringComparator;
 import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.transport.ProtocolVersion;
 
 /**
  * A <code>GroupMaker</code> can be used to determine if some sorted rows belongs to the same group or not.
@@ -44,16 +46,33 @@
         }
     };
 
-    public static GroupMaker newInstance(ClusteringComparator comparator, int clusteringPrefixSize, GroupingState state)
+    public static GroupMaker newPkPrefixGroupMaker(ClusteringComparator comparator,
+                                                   int clusteringPrefixSize,
+                                                   GroupingState state)
     {
         return new PkPrefixGroupMaker(comparator, clusteringPrefixSize, state);
     }
 
-    public static GroupMaker newInstance(ClusteringComparator comparator, int clusteringPrefixSize)
+    public static GroupMaker newPkPrefixGroupMaker(ClusteringComparator comparator, int clusteringPrefixSize)
     {
         return new PkPrefixGroupMaker(comparator, clusteringPrefixSize);
     }
 
+    public static GroupMaker newSelectorGroupMaker(ClusteringComparator comparator,
+                                                   int clusteringPrefixSize,
+                                                   Selector selector,
+                                                   GroupingState state)
+    {
+        return new SelectorGroupMaker(comparator, clusteringPrefixSize, selector, state);
+    }
+
+    public static GroupMaker newSelectorGroupMaker(ClusteringComparator comparator,
+                                                   int clusteringPrefixSize,
+                                                   Selector selector)
+    {
+        return new SelectorGroupMaker(comparator, clusteringPrefixSize, selector);
+    }
+
     /**
      * Checks if a given row belongs to the same group that the previous row or not.
      *
@@ -75,27 +94,27 @@
         return false;
     }
 
-    private static final class PkPrefixGroupMaker extends GroupMaker
+    private static class PkPrefixGroupMaker extends GroupMaker
     {
         /**
          * The size of the clustering prefix used to make the groups
          */
-        private final int clusteringPrefixSize;
+        protected final int clusteringPrefixSize;
 
         /**
          * The comparator used to compare the clustering prefixes.
          */
-        private final ClusteringComparator comparator;
+        protected final ClusteringComparator comparator;
 
         /**
          * The last partition key seen
          */
-        private ByteBuffer lastPartitionKey;
+        protected ByteBuffer lastPartitionKey;
 
         /**
          * The last clustering seen
          */
-        private Clustering<?> lastClustering;
+        protected Clustering<?> lastClustering;
 
         public PkPrefixGroupMaker(ClusteringComparator comparator, int clusteringPrefixSize, GroupingState state)
         {
@@ -113,28 +132,97 @@
         @Override
         public boolean isNewGroup(DecoratedKey partitionKey, Clustering<?> clustering)
         {
-            boolean isNew = false;
+            ByteBuffer key = partitionKey.getKey();
+            // We are entering a new group if:
+            // - the partition key is a new one
+            // - the last clustering was not null and does not have the same prefix as the new clustering one
+            boolean isNew = !key.equals(lastPartitionKey)
+                            || lastClustering == null
+                            || comparator.compare(lastClustering, clustering, clusteringPrefixSize) != 0;
+
+            lastPartitionKey = key;
+            lastClustering =  Clustering.STATIC_CLUSTERING == clustering ? null : clustering;
+            return isNew;
+        }
+    }
+
+    private static class SelectorGroupMaker extends PkPrefixGroupMaker
+    {
+        /**
+         * The selector used to build the groups.
+         */
+        private final Selector selector;
+
+        /**
+         * The output of the selector call on the last clustering
+         */
+        private ByteBuffer lastOutput;
+
+        private final Selector.InputRow input = new Selector.InputRow(1, false, false);
+
+        public SelectorGroupMaker(ClusteringComparator comparator,
+                                  int clusteringPrefixSize,
+                                  Selector selector,
+                                  GroupingState state)
+        {
+            super(comparator, clusteringPrefixSize, state);
+            this.selector = selector;
+            this.lastOutput = lastClustering == null ? null :
+                                                       executeSelector(lastClustering.bufferAt(clusteringPrefixSize - 1));
+        }
+
+        public SelectorGroupMaker(ClusteringComparator comparator,
+                                  int clusteringPrefixSize,
+                                  Selector selector)
+        {
+            super(comparator, clusteringPrefixSize);
+            this.selector = selector;
+        }
+
+        @Override
+        public boolean isNewGroup(DecoratedKey partitionKey, Clustering<?> clustering)
+        {
+            ByteBuffer output =
+                    Clustering.STATIC_CLUSTERING == clustering ? null
+                                                               : executeSelector(clustering.bufferAt(clusteringPrefixSize - 1));
+
+            ByteBuffer key = partitionKey.getKey();
 
             // We are entering a new group if:
             // - the partition key is a new one
             // - the last clustering was not null and does not have the same prefix as the new clustering one
-            if (!partitionKey.getKey().equals(lastPartitionKey))
-            {
-                lastPartitionKey = partitionKey.getKey();
-                isNew = true;
-                if (Clustering.STATIC_CLUSTERING == clustering)
-                {
-                    lastClustering = null;
-                    return true;
-                }
-            }
-            else if (lastClustering != null && comparator.compare(lastClustering, clustering, clusteringPrefixSize) != 0)
-            {
-                isNew = true;
-            }
+            boolean isNew = !key.equals(lastPartitionKey)
+                            || lastClustering == null
+                            || comparator.compare(lastClustering, clustering, clusteringPrefixSize - 1) != 0
+                            || compareOutput(output) != 0;
 
-            lastClustering = clustering;
+            lastPartitionKey = key;
+            lastClustering = Clustering.STATIC_CLUSTERING == clustering ? null : clustering;
+            lastOutput = output;
             return isNew;
         }
+
+        private int compareOutput(ByteBuffer output)
+        {
+            if (output == null)
+                return lastOutput == null ? 0 : -1;
+            if (lastOutput == null)
+                return 1;
+
+            return selector.getType().compare(output, lastOutput);
+        }
+
+        private ByteBuffer executeSelector(ByteBuffer argument)
+        {
+            input.add(argument);
+
+            // For computing groups we do not need to use the client protocol version.
+            selector.addInput(ProtocolVersion.CURRENT, input);
+            ByteBuffer output = selector.getOutput(ProtocolVersion.CURRENT);
+            selector.reset();
+            input.reset(false);
+
+            return output;
+        }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/db/aggregation/GroupingState.java b/src/java/org/apache/cassandra/db/aggregation/GroupingState.java
index 2e522c4..b77802c 100644
--- a/src/java/org/apache/cassandra/db/aggregation/GroupingState.java
+++ b/src/java/org/apache/cassandra/db/aggregation/GroupingState.java
@@ -23,7 +23,6 @@
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.ClusteringComparator;
 import org.apache.cassandra.db.TypeSizes;
-import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.ByteBufferUtil;
diff --git a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
old mode 100755
new mode 100644
index cdf96cd..d8eb0e7
--- a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
+++ b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
@@ -17,32 +17,41 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.*;
-import java.util.concurrent.*;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.BooleanSupplier;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.codahale.metrics.Timer.Context;
 import net.nicoulaj.compilecommand.annotations.DontInline;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.Interruptible;
+import org.apache.cassandra.concurrent.Interruptible.TerminateException;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.io.compress.BufferType;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.SimpleCachedBufferPool;
 import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.db.*;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.utils.*;
-import org.apache.cassandra.utils.concurrent.WaitQueue;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.*;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Daemon.NON_DAEMON;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts.SYNCHRONIZED;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe.SAFE;
 import static org.apache.cassandra.db.commitlog.CommitLogSegment.Allocation;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
 
 /**
  * Performs eager-creation of commit log segments in a background thread. All the
@@ -62,7 +71,7 @@
      */
     private volatile CommitLogSegment availableSegment = null;
 
-    private final WaitQueue segmentPrepared = new WaitQueue();
+    private final WaitQueue segmentPrepared = newWaitQueue();
 
     /** Active segments, containing unflushed data. The tail of this queue is the one we allocate writes to */
     private final ConcurrentLinkedQueue<CommitLogSegment> activeSegments = new ConcurrentLinkedQueue<>();
@@ -85,11 +94,10 @@
     private final AtomicLong size = new AtomicLong();
 
     @VisibleForTesting
-    Thread managerThread;
+    Interruptible executor;
     protected final CommitLog commitLog;
-    private volatile boolean shutdown;
-    private final BooleanSupplier managerThreadWaitCondition = () -> (availableSegment == null && !atSegmentBufferLimit()) || shutdown;
-    private final WaitQueue managerThreadWaitQueue = new WaitQueue();
+    private final BooleanSupplier managerThreadWaitCondition = () -> (availableSegment == null && !atSegmentBufferLimit());
+    private final WaitQueue managerThreadWaitQueue = newWaitQueue();
 
     private volatile SimpleCachedBufferPool bufferPool;
 
@@ -101,73 +109,101 @@
 
     void start()
     {
-        // The run loop for the manager thread
-        Runnable runnable = new WrappedRunnable()
-        {
-            public void runMayThrow() throws Exception
-            {
-                while (!shutdown)
-                {
-                    try
-                    {
-                        assert availableSegment == null;
-                        logger.trace("No segments in reserve; creating a fresh one");
-                        availableSegment = createSegment();
-                        if (shutdown)
-                        {
-                            // If shutdown() started and finished during segment creation, we are now left with a
-                            // segment that no one will consume. Discard it.
-                            discardAvailableSegment();
-                            return;
-                        }
-
-                        segmentPrepared.signalAll();
-                        Thread.yield();
-
-                        if (availableSegment == null && !atSegmentBufferLimit())
-                            // Writing threads need another segment now.
-                            continue;
-
-                        // Writing threads are not waiting for new segments, we can spend time on other tasks.
-                        // flush old Cfs if we're full
-                        maybeFlushToReclaim();
-                    }
-                    catch (Throwable t)
-                    {
-                        if (!CommitLog.handleCommitError("Failed managing commit log segments", t))
-                            return;
-                        // sleep some arbitrary period to avoid spamming CL
-                        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
-
-                        // If we offered a segment, wait for it to be taken before reentering the loop.
-                        // There could be a new segment in next not offered, but only on failure to discard it while
-                        // shutting down-- nothing more can or needs to be done in that case.
-                    }
-
-                    WaitQueue.waitOnCondition(managerThreadWaitCondition, managerThreadWaitQueue);
-                }
-            }
-        };
-
         // For encrypted segments we want to keep the compression buffers on-heap as we need those bytes for encryption,
         // and we want to avoid copying from off-heap (compression buffer) to on-heap encryption APIs
         BufferType bufferType = commitLog.configuration.useEncryption() || !commitLog.configuration.useCompression()
-                              ? BufferType.ON_HEAP
-                              : commitLog.configuration.getCompressor().preferredBufferType();
+                                ? BufferType.ON_HEAP
+                                : commitLog.configuration.getCompressor().preferredBufferType();
 
         this.bufferPool = new SimpleCachedBufferPool(DatabaseDescriptor.getCommitLogMaxCompressionBuffersInPool(),
                                                      DatabaseDescriptor.getCommitLogSegmentSize(),
                                                      bufferType);
 
-        shutdown = false;
 
-        managerThread = NamedThreadFactory.createThread(runnable, "COMMIT-LOG-ALLOCATOR");
-        managerThread.start();
-
+        AllocatorRunnable allocator = new AllocatorRunnable();
+        executor = executorFactory().infiniteLoop("COMMIT-LOG-ALLOCATOR", allocator, SAFE, NON_DAEMON, SYNCHRONIZED);
         // for simplicity, ensure the first segment is allocated before continuing
         advanceAllocatingFrom(null);
     }
 
+    class AllocatorRunnable implements Interruptible.Task
+    {
+        // The run loop for the manager thread
+        @Override
+        public void run(Interruptible.State state) throws InterruptedException
+        {
+            boolean interrupted = false;
+            try
+            {
+                switch (state)
+                {
+                    case SHUTTING_DOWN:
+                        // If shutdown() started and finished during segment creation, we are now left with a
+                        // segment that no one will consume. Discard it.
+                        discardAvailableSegment();
+                        return;
+
+                    case NORMAL:
+                        assert availableSegment == null;
+                        // synchronized to prevent thread interrupts while performing IO operations and also
+                        // clear interrupted status to prevent ClosedByInterruptException in createSegment
+
+                        synchronized (this)
+                        {
+                            interrupted = Thread.interrupted();
+                            logger.trace("No segments in reserve; creating a fresh one");
+                            availableSegment = createSegment();
+
+                            segmentPrepared.signalAll();
+                            Thread.yield();
+
+                            if (availableSegment == null && !atSegmentBufferLimit())
+                                // Writing threads need another segment now.
+                                return;
+
+                            // Writing threads are not waiting for new segments, we can spend time on other tasks.
+                            // flush old Cfs if we're full
+                            maybeFlushToReclaim();
+                        }
+                }
+            }
+            catch (Throwable t)
+            {
+                if (!CommitLog.handleCommitError("Failed managing commit log segments", t))
+                {
+                    discardAvailableSegment();
+                    throw new TerminateException();
+                }
+
+                // sleep some arbitrary period to avoid spamming CL
+                Thread.sleep(TimeUnit.SECONDS.toMillis(1L));
+
+                // If we offered a segment, wait for it to be taken before reentering the loop.
+                // There could be a new segment in next not offered, but only on failure to discard it while
+                // shutting down-- nothing more can or needs to be done in that case.
+            }
+
+            interrupted = interrupted || Thread.interrupted();
+            if (!interrupted)
+            {
+                try
+                {
+                    WaitQueue.waitOnCondition(managerThreadWaitCondition, managerThreadWaitQueue);
+                }
+                catch(InterruptedException e)
+                {
+                    interrupted = true;
+                }
+            }
+            
+            if (interrupted)
+            {
+                discardAvailableSegment();
+                throw new InterruptedException();
+            }
+        }
+    }
+
     private boolean atSegmentBufferLimit()
     {
         return CommitLogSegment.usesBufferPool(commitLog) && bufferPool.atLimit();
@@ -189,7 +225,7 @@
                 if (flushingSize + unused >= 0)
                     break;
             }
-            flushDataFrom(segmentsToRecycle, false);
+            flushDataFrom(segmentsToRecycle, Collections.emptyList(), false);
         }
     }
 
@@ -265,7 +301,7 @@
     {
         do
         {
-            WaitQueue.Signal prepared = segmentPrepared.register(commitLog.metrics.waitingOnSegmentAllocation.time());
+            WaitQueue.Signal prepared = segmentPrepared.register(commitLog.metrics.waitingOnSegmentAllocation.time(), Context::stop);
             if (availableSegment == null && allocatingFrom == currentAllocatingFrom)
                 prepared.awaitUninterruptibly();
             else
@@ -281,7 +317,7 @@
      * This is necessary to avoid resurrecting data during replay if a user creates a new table with
      * the same name and ID. See CASSANDRA-16986 for more details.
      */
-    void forceRecycleAll(Iterable<TableId> droppedTables)
+    void forceRecycleAll(Collection<TableId> droppedTables)
     {
         List<CommitLogSegment> segmentsToRecycle = new ArrayList<>(activeSegments);
         CommitLogSegment last = segmentsToRecycle.get(segmentsToRecycle.size() - 1);
@@ -295,7 +331,7 @@
         Keyspace.writeOrder.awaitNewBarrier();
 
         // flush and wait for all CFs that are dirty in segments up-to and including 'last'
-        Future<?> future = flushDataFrom(segmentsToRecycle, true);
+        Future<?> future = flushDataFrom(segmentsToRecycle, droppedTables, true);
         try
         {
             future.get();
@@ -370,7 +406,7 @@
 
     private long unusedCapacity()
     {
-        long total = DatabaseDescriptor.getTotalCommitlogSpaceInMB() * 1024 * 1024;
+        long total = DatabaseDescriptor.getTotalCommitlogSpaceInMiB() * 1024 * 1024;
         long currentSize = size.get();
         logger.trace("Total active commitlog segment space used is {} out of {}", currentSize, total);
         return total - currentSize;
@@ -381,20 +417,22 @@
      *
      * @return a Future that will finish when all the flushes are complete.
      */
-    private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force)
+    private Future<?> flushDataFrom(List<CommitLogSegment> segments, Collection<TableId> droppedTables, boolean force)
     {
         if (segments.isEmpty())
-            return Futures.immediateFuture(null);
+            return ImmediateFuture.success(null);
         final CommitLogPosition maxCommitLogPosition = segments.get(segments.size() - 1).getCurrentCommitLogPosition();
 
         // a map of CfId -> forceFlush() to ensure we only queue one flush per cf
-        final Map<TableId, ListenableFuture<?>> flushes = new LinkedHashMap<>();
+        final Map<TableId, Future<?>> flushes = new LinkedHashMap<>();
 
         for (CommitLogSegment segment : segments)
         {
             for (TableId dirtyTableId : segment.getDirtyTableIds())
             {
-                TableMetadata metadata = Schema.instance.getTableMetadata(dirtyTableId);
+                TableMetadata metadata = droppedTables.contains(dirtyTableId)
+                                         ? null
+                                         : Schema.instance.getTableMetadata(dirtyTableId);
                 if (metadata == null)
                 {
                     // even though we remove the schema entry before a final flush when dropping a CF,
@@ -405,20 +443,32 @@
                 else if (!flushes.containsKey(dirtyTableId))
                 {
                     final ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(dirtyTableId);
-                    // can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
-                    // no deadlock possibility since switchLock removal
-                    flushes.put(dirtyTableId, force ? cfs.forceFlush() : cfs.forceFlush(maxCommitLogPosition));
+
+                    if (cfs.memtableWritesAreDurable())
+                    {
+                        // The memtable does not need this data to be preserved (we only wrote it for PITR and CDC)
+                        segment.markClean(dirtyTableId, CommitLogPosition.NONE, segment.getCurrentCommitLogPosition());
+                    }
+                    else
+                    {
+                        // can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
+                        // no deadlock possibility since switchLock removal
+                        flushes.put(dirtyTableId, force
+                                                  ? cfs.forceFlush(ColumnFamilyStore.FlushReason.COMMITLOG_DIRTY)
+                                                  : cfs.forceFlush(maxCommitLogPosition));
+                    }
                 }
             }
         }
 
-        return Futures.allAsList(flushes.values());
+        return FutureCombiner.allOf(flushes.values());
     }
 
     /**
      * Stops CL, for testing purposes. DO NOT USE THIS OUTSIDE OF TESTS.
      * Only call this after the AbstractCommitLogService is shut down.
      */
+    @VisibleForTesting
     public void stopUnsafe(boolean deleteSegments)
     {
         logger.debug("CLSM closing and clearing existing commit log segments...");
@@ -426,11 +476,12 @@
         shutdown();
         try
         {
-            awaitTermination();
+            // On heavily loaded test envs we need a longer wait
+            assert awaitTermination(5L, TimeUnit.MINUTES) : "Assert waiting for termination failed on " + FBUtilities.now().toString();
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
 
         for (CommitLogSegment segment : activeSegments)
@@ -474,9 +525,7 @@
      */
     public void shutdown()
     {
-        assert !shutdown;
-        shutdown = true;
-
+        executor.shutdownNow();
         // Release the management thread and delete prepared segment.
         // Do not block as another thread may claim the segment (this can happen during unit test initialization).
         discardAvailableSegment();
@@ -485,7 +534,7 @@
 
     private void discardAvailableSegment()
     {
-        CommitLogSegment next = null;
+        CommitLogSegment next;
         synchronized (this)
         {
             next = availableSegment;
@@ -498,19 +547,16 @@
     /**
      * Returns when the management thread terminates.
      */
-    public void awaitTermination() throws InterruptedException
+    public boolean awaitTermination(long timeout, TimeUnit units) throws InterruptedException
     {
-        if (managerThread != null)
-        {
-            managerThread.join();
-            managerThread = null;
-        }
-
+        boolean res = executor.awaitTermination(timeout, units);
         for (CommitLogSegment segment : activeSegments)
             segment.close();
 
         if (bufferPool != null)
             bufferPool.emptyBufferPool();
+
+        return res;
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java
index a65ef00..6b5378f 100644
--- a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java
+++ b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java
@@ -17,22 +17,38 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.LockSupport;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.codahale.metrics.Timer.Context;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.Interruptible;
+import org.apache.cassandra.concurrent.Interruptible.TerminateException;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.db.commitlog.CommitLogSegment.Allocation;
 import org.apache.cassandra.utils.MonotonicClock;
 import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.concurrent.Semaphore;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
+import static com.codahale.metrics.Timer.Context;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Daemon.NON_DAEMON;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts.SYNCHRONIZED;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe.SAFE;
+import static org.apache.cassandra.concurrent.Interruptible.State.NORMAL;
+import static org.apache.cassandra.concurrent.Interruptible.State.SHUTTING_DOWN;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.preciseTime;
+import static org.apache.cassandra.utils.concurrent.Semaphore.newSemaphore;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
+
 public abstract class AbstractCommitLogService
 {
     /**
@@ -41,18 +57,18 @@
      */
     static final long DEFAULT_MARKER_INTERVAL_MILLIS = 100;
 
-    private volatile Thread thread;
-    private volatile boolean shutdown = false;
+    private volatile Interruptible executor;
 
     // all Allocations written before this time will be synced
-    protected volatile long lastSyncedAt = System.currentTimeMillis();
+    protected volatile long lastSyncedAt = currentTimeMillis();
 
     // counts of total written, and pending, log messages
     private final AtomicLong written = new AtomicLong(0);
     protected final AtomicLong pending = new AtomicLong(0);
 
     // signal that writers can wait on to be notified of a completed sync
-    protected final WaitQueue syncComplete = new WaitQueue();
+    protected final WaitQueue syncComplete = newWaitQueue();
+    protected final Semaphore haveWork = newSemaphore(1);
 
     final CommitLog commitLog;
     private final String name;
@@ -102,7 +118,11 @@
         this.name = name;
 
         final long markerIntervalMillis;
-        if (markHeadersFaster && syncIntervalMillis > DEFAULT_MARKER_INTERVAL_MILLIS)
+        if (syncIntervalMillis < 0)
+        {
+            markerIntervalMillis = -1;
+        }
+        else if (markHeadersFaster && syncIntervalMillis > DEFAULT_MARKER_INTERVAL_MILLIS)
         {
             markerIntervalMillis = DEFAULT_MARKER_INTERVAL_MILLIS;
             long modulo = syncIntervalMillis % markerIntervalMillis;
@@ -114,29 +134,29 @@
                 if (modulo >= markerIntervalMillis / 2)
                     syncIntervalMillis += markerIntervalMillis;
             }
+            assert syncIntervalMillis % markerIntervalMillis == 0;
             logger.debug("Will update the commitlog markers every {}ms and flush every {}ms", markerIntervalMillis, syncIntervalMillis);
         }
         else
         {
             markerIntervalMillis = syncIntervalMillis;
         }
-        assert syncIntervalMillis % markerIntervalMillis == 0;
-        this.markerIntervalNanos = TimeUnit.NANOSECONDS.convert(markerIntervalMillis, TimeUnit.MILLISECONDS);
-        this.syncIntervalNanos = TimeUnit.NANOSECONDS.convert(syncIntervalMillis, TimeUnit.MILLISECONDS);
+        this.markerIntervalNanos = NANOSECONDS.convert(markerIntervalMillis, MILLISECONDS);
+        this.syncIntervalNanos = NANOSECONDS.convert(syncIntervalMillis, MILLISECONDS);
     }
 
     // Separated into individual method to ensure relevant objects are constructed before this is started.
     void start()
     {
-        if (syncIntervalNanos < 1)
+        if (syncIntervalNanos < 1 && !(this instanceof BatchCommitLogService)) // permit indefinite waiting with batch, as perfectly sensible
             throw new IllegalArgumentException(String.format("Commit log flush interval must be positive: %fms",
                                                              syncIntervalNanos * 1e-6));
-        shutdown = false;
-        thread = NamedThreadFactory.createThread(new SyncRunnable(MonotonicClock.preciseTime), name);
-        thread.start();
+
+        SyncRunnable sync = new SyncRunnable(preciseTime);
+        executor = executorFactory().infiniteLoop(name, sync, SAFE, NON_DAEMON, SYNCHRONIZED);
     }
 
-    class SyncRunnable implements Runnable
+    class SyncRunnable implements Interruptible.Task
     {
         private final MonotonicClock clock;
         private long firstLagAt = 0;
@@ -150,61 +170,59 @@
             this.clock = clock;
         }
 
-        public void run()
+        public void run(Interruptible.State state) throws InterruptedException
         {
-            while (true)
-            {
-                if (!sync())
-                    break;
-            }
-        }
-
-        boolean sync()
-        {
-            // always run once after shutdown signalled
-            boolean shutdownRequested = shutdown;
-
             try
             {
                 // sync and signal
                 long pollStarted = clock.now();
-                boolean flushToDisk = lastSyncedAt + syncIntervalNanos <= pollStarted || shutdownRequested || syncRequested;
-                if (flushToDisk)
+                boolean flushToDisk = lastSyncedAt + syncIntervalNanos <= pollStarted || state != NORMAL || syncRequested;
+                // synchronized to prevent thread interrupts while performing IO operations and also
+                // clear interrupted status to prevent ClosedByInterruptException in CommitLog::sync
+                synchronized (this)
                 {
-                    // in this branch, we want to flush the commit log to disk
-                    syncRequested = false;
-                    commitLog.sync(true);
-                    lastSyncedAt = pollStarted;
-                    syncComplete.signalAll();
-                    syncCount++;
+                    Thread.interrupted();
+                    if (flushToDisk)
+                    {
+                        // in this branch, we want to flush the commit log to disk
+                        syncRequested = false;
+                        commitLog.sync(true);
+                        lastSyncedAt = pollStarted;
+                        syncComplete.signalAll();
+                        syncCount++;
+                    }
+                    else
+                    {
+                        // in this branch, just update the commit log sync headers
+                        commitLog.sync(false);
+                    }
+                }
+
+                if (state == SHUTTING_DOWN)
+                    return;
+
+                if (markerIntervalNanos <= 0)
+                {
+                    haveWork.acquire(1);
                 }
                 else
                 {
-                    // in this branch, just update the commit log sync headers
-                    commitLog.sync(false);
+                    long now = clock.now();
+                    if (flushToDisk)
+                        maybeLogFlushLag(pollStarted, now);
+
+                    long wakeUpAt = pollStarted + markerIntervalNanos;
+                    if (wakeUpAt > now)
+                        haveWork.tryAcquireUntil(1, wakeUpAt);
                 }
-
-                long now = clock.now();
-                if (flushToDisk)
-                    maybeLogFlushLag(pollStarted, now);
-
-                if (shutdownRequested)
-                    return false;
-
-                long wakeUpAt = pollStarted + markerIntervalNanos;
-                if (wakeUpAt > now)
-                    LockSupport.parkNanos(wakeUpAt - now);
             }
             catch (Throwable t)
             {
                 if (!CommitLog.handleCommitError("Failed to persist commits to disk", t))
-                    return false;
-
-                // sleep for full poll-interval after an error, so we don't spam the log file
-                LockSupport.parkNanos(markerIntervalNanos);
+                    throw new TerminateException();
+                else // sleep for full poll-interval after an error, so we don't spam the log file
+                    haveWork.tryAcquire(1, markerIntervalNanos, NANOSECONDS);
             }
-
-            return true;
         }
 
         /**
@@ -238,7 +256,7 @@
                 boolean logged = NoSpamLogger.log(logger,
                                                   NoSpamLogger.Level.WARN,
                                                   5,
-                                                  TimeUnit.MINUTES,
+                                                  MINUTES,
                                                   "Out of {} commit log syncs over the past {}s with average duration of {}ms, {} have exceeded the configured commit interval by an average of {}ms",
                                                   syncCount,
                                                   String.format("%.2f", (now - firstLagAt) * 1e-9d),
@@ -274,14 +292,14 @@
      */
     void requestExtraSync()
     {
+        // note: cannot simply invoke executor.interrupt() as some filesystems don't like it (jimfs, at least)
         syncRequested = true;
-        LockSupport.unpark(thread);
+        haveWork.release(1);
     }
 
     public void shutdown()
     {
-        shutdown = true;
-        requestExtraSync();
+        executor.shutdown();
     }
 
     /**
@@ -292,7 +310,7 @@
      */
     public void syncBlocking()
     {
-        long requestTime = System.nanoTime();
+        long requestTime = nanoTime();
         requestExtraSync();
         awaitSyncAt(requestTime, null);
     }
@@ -301,7 +319,7 @@
     {
         do
         {
-            WaitQueue.Signal signal = context != null ? syncComplete.register(context) : syncComplete.register();
+            WaitQueue.Signal signal = context != null ? syncComplete.register(context, Context::stop) : syncComplete.register();
             if (lastSyncedAt < syncTime)
                 signal.awaitUninterruptibly();
             else
@@ -312,8 +330,7 @@
 
     public void awaitTermination() throws InterruptedException
     {
-        if (thread != null)
-            thread.join();
+        executor.awaitTermination(5L, MINUTES);
     }
 
     public long getCompletedTasks()
diff --git a/src/java/org/apache/cassandra/db/commitlog/BatchCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/BatchCommitLogService.java
index 78bf30c..e913e67 100644
--- a/src/java/org/apache/cassandra/db/commitlog/BatchCommitLogService.java
+++ b/src/java/org/apache/cassandra/db/commitlog/BatchCommitLogService.java
@@ -17,6 +17,8 @@
  */
 package org.apache.cassandra.db.commitlog;
 
+import static org.apache.cassandra.config.CassandraRelevantProperties.BATCH_COMMIT_LOG_SYNC_INTERVAL;
+
 class BatchCommitLogService extends AbstractCommitLogService
 {
     /**
@@ -24,7 +26,7 @@
      * the disk sync. Instead we trigger it explicitly in {@link #maybeWaitForSync(CommitLogSegment.Allocation)}.
      * This value here is largely irrelevant, but should high enough so the sync thread is not continually waking up.
      */
-    private static final int POLL_TIME_MILLIS = 1000;
+    private static final int POLL_TIME_MILLIS = BATCH_COMMIT_LOG_SYNC_INTERVAL.getInt();
 
     public BatchCommitLogService(CommitLog commitLog)
     {
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
index 49eb67b..6195b1b 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
@@ -17,10 +17,9 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.nio.file.FileStore;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -30,10 +29,13 @@
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiPredicate;
 import java.util.function.Function;
 import java.util.zip.CRC32;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -47,7 +49,8 @@
 import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.DataOutputBufferFixed;
-import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.metrics.CommitLogMetrics;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.CompressionParams;
@@ -56,9 +59,9 @@
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MBeanWrapper;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static org.apache.cassandra.db.commitlog.CommitLogSegment.Allocation;
-import static org.apache.cassandra.db.commitlog.CommitLogSegment.CommitLogSegmentFileComparator;
 import static org.apache.cassandra.db.commitlog.CommitLogSegment.ENTRY_OVERHEAD_SIZE;
 import static org.apache.cassandra.utils.FBUtilities.updateChecksum;
 import static org.apache.cassandra.utils.FBUtilities.updateChecksumInt;
@@ -73,7 +76,7 @@
 
     public static final CommitLog instance = CommitLog.construct();
 
-    private static final FilenameFilter unmanagedFilesFilter = (dir, name) -> CommitLogDescriptor.isValid(name) && CommitLogSegment.shouldReplay(name);
+    private static final BiPredicate<File, String> unmanagedFilesFilter = (dir, name) -> CommitLogDescriptor.isValid(name) && CommitLogSegment.shouldReplay(name);
 
     final public AbstractCommitLogSegmentManager segmentManager;
 
@@ -87,7 +90,6 @@
     private static CommitLog construct()
     {
         CommitLog log = new CommitLog(CommitLogArchiver.construct(), DatabaseDescriptor.getCommitLogSegmentMgrProvider());
-
         MBeanWrapper.instance.registerMBean(log, "org.apache.cassandra.db:type=Commitlog");
         return log;
     }
@@ -162,7 +164,7 @@
 
     private File[] getUnmanagedFiles()
     {
-        File[] files = new File(segmentManager.storageDirectory).listFiles(unmanagedFilesFilter);
+        File[] files = new File(segmentManager.storageDirectory).tryList(unmanagedFilesFilter);
         if (files == null)
             return new File[0];
         return files;
@@ -181,8 +183,8 @@
         // archiving pass, which we should not treat as serious.
         for (File file : getUnmanagedFiles())
         {
-            archiver.maybeArchive(file.getPath(), file.getName());
-            archiver.maybeWaitForArchiving(file.getName());
+            archiver.maybeArchive(file.path(), file.name());
+            archiver.maybeWaitForArchiving(file.name());
         }
 
         assert archiver.archivePending.isEmpty() : "Not all commit log archive tasks were completed before restore";
@@ -197,7 +199,7 @@
         }
         else
         {
-            Arrays.sort(files, new CommitLogSegmentFileComparator());
+            Arrays.sort(files, new CommitLogSegment.CommitLogSegmentFileComparator());
             logger.info("Replaying {}", StringUtils.join(files, ", "));
             replayed = recoverFiles(files);
             logger.info("Log replay complete, {} replayed mutations", replayed);
@@ -254,7 +256,7 @@
     /**
      * Flushes all dirty CFs, waiting for them to free and recycle any segments they were retaining
      */
-    public void forceRecycleAllSegments(Iterable<TableId> droppedTables)
+    public void forceRecycleAllSegments(Collection<TableId> droppedTables)
     {
         segmentManager.forceRecycleAll(droppedTables);
     }
@@ -442,6 +444,29 @@
         return segmentRatios;
     }
 
+    @Override
+    public boolean getCDCBlockWrites()
+    {
+        return DatabaseDescriptor.getCDCBlockWrites();
+    }
+
+    @Override
+    public void setCDCBlockWrites(boolean val)
+    {
+        Preconditions.checkState(DatabaseDescriptor.isCDCEnabled(),
+                                 "Unable to set block_writes (%s): CDC is not enabled.", val);
+        Preconditions.checkState(segmentManager instanceof CommitLogSegmentManagerCDC,
+                                 "CDC is enabled but we have the wrong CommitLogSegmentManager type: %s. " +
+                                 "Please report this as bug.", segmentManager.getClass().getName());
+        boolean oldVal = DatabaseDescriptor.getCDCBlockWrites();
+        CommitLogSegment currentSegment = segmentManager.allocatingFrom();
+        // Update the current segment CDC state to PERMITTED if block_writes is disabled now, and it was in FORBIDDEN state
+        if (!val && currentSegment.getCDCState() == CommitLogSegment.CDCState.FORBIDDEN)
+            currentSegment.setCDCState(CommitLogSegment.CDCState.PERMITTED);
+        DatabaseDescriptor.setCDCBlockWrites(val);
+        logger.info("Updated CDC block_writes from {} to {}", oldVal, val);
+    }
+
     /**
      * Shuts down the threads used by the commit log, blocking until completion.
      * TODO this should accept a timeout, and throw TimeoutException
@@ -455,7 +480,7 @@
         executor.shutdown();
         executor.awaitTermination();
         segmentManager.shutdown();
-        segmentManager.awaitTermination();
+        segmentManager.awaitTermination(1L, TimeUnit.MINUTES);
     }
 
     /**
@@ -497,13 +522,13 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
         segmentManager.stopUnsafe(deleteSegments);
         CommitLogSegment.resetReplayLimit();
         if (DatabaseDescriptor.isCDCEnabled() && deleteSegments)
-            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
-                FileUtils.deleteWithConfirm(f);
+            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
+                f.delete();
     }
 
     /**
@@ -518,7 +543,7 @@
 
     public static long freeDiskSpace()
     {
-        return FileUtils.getFreeSpace(new File(DatabaseDescriptor.getCommitLogLocation()));
+        return PathUtils.tryGetSpace(new File(DatabaseDescriptor.getCommitLogLocation()).toPath(), FileStore::getTotalSpace);
     }
 
     @VisibleForTesting
@@ -602,7 +627,7 @@
          */
         public boolean useEncryption()
         {
-            return encryptionContext.isEnabled();
+            return encryptionContext != null && encryptionContext.isEnabled();
         }
 
         /**
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
index b58a316..2e1b580 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
@@ -20,7 +20,6 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.text.ParseException;
@@ -32,17 +31,21 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.WrappedRunnable;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Strings;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 public class CommitLogArchiver
 {
     private static final Logger logger = LoggerFactory.getLogger(CommitLogArchiver.class);
@@ -63,22 +66,28 @@
     final String restoreCommand;
     final String restoreDirectories;
     public long restorePointInTime;
+    public CommitLogPosition snapshotCommitLogPosition;
     public final TimeUnit precision;
 
     public CommitLogArchiver(String archiveCommand, String restoreCommand, String restoreDirectories,
-            long restorePointInTime, TimeUnit precision)
+            long restorePointInTime, CommitLogPosition snapshotCommitLogPosition, TimeUnit precision)
     {
         this.archiveCommand = archiveCommand;
         this.restoreCommand = restoreCommand;
         this.restoreDirectories = restoreDirectories;
         this.restorePointInTime = restorePointInTime;
+        this.snapshotCommitLogPosition = snapshotCommitLogPosition;
         this.precision = precision;
-        executor = !Strings.isNullOrEmpty(archiveCommand) ? new JMXEnabledThreadPoolExecutor("CommitLogArchiver") : null;
+        executor = !Strings.isNullOrEmpty(archiveCommand)
+                ? executorFactory()
+                    .withJmxInternal()
+                    .sequential("CommitLogArchiver")
+                : null;
     }
 
     public static CommitLogArchiver disabled()
     {
-        return new CommitLogArchiver(null, null, null, Long.MAX_VALUE, TimeUnit.MICROSECONDS);
+        return new CommitLogArchiver(null, null, null, Long.MAX_VALUE, CommitLogPosition.NONE, TimeUnit.MICROSECONDS);
     }
 
     public static CommitLogArchiver construct()
@@ -104,7 +113,7 @@
                         File directory = new File(dir);
                         if (!directory.exists())
                         {
-                            if (!directory.mkdir())
+                            if (!directory.tryCreateDirectory())
                             {
                                 throw new RuntimeException("Unable to create directory: " + dir);
                             }
@@ -122,7 +131,27 @@
                 {
                     throw new RuntimeException("Unable to parse restore target time", e);
                 }
-                return new CommitLogArchiver(archiveCommand, restoreCommand, restoreDirectories, restorePointInTime, precision);
+
+                String snapshotPosition = commitlog_commands.getProperty("snapshot_commitlog_position");
+                CommitLogPosition snapshotCommitLogPosition;
+                try
+                {
+
+                    snapshotCommitLogPosition = Strings.isNullOrEmpty(snapshotPosition)
+                                                ? CommitLogPosition.NONE
+                                                : CommitLogPosition.serializer.fromString(snapshotPosition);
+                }
+                catch (ParseException | NumberFormatException e)
+                {
+                    throw new RuntimeException("Unable to parse snapshot commit log position", e);
+                }
+
+                return new CommitLogArchiver(archiveCommand,
+                                             restoreCommand,
+                                             restoreDirectories,
+                                             restorePointInTime,
+                                             snapshotCommitLogPosition,
+                                             precision);
             }
         }
         catch (IOException e)
@@ -193,7 +222,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new AssertionError(e);
+            throw new UncheckedInterruptedException(e);
         }
         catch (ExecutionException e)
         {
@@ -218,7 +247,7 @@
 
         for (String dir : restoreDirectories.split(DELIMITER))
         {
-            File[] files = new File(dir).listFiles();
+            File[] files = new File(dir).tryList();
             if (files == null)
             {
                 throw new RuntimeException("Unable to list directory " + dir);
@@ -226,14 +255,14 @@
             for (File fromFile : files)
             {
                 CommitLogDescriptor fromHeader = CommitLogDescriptor.fromHeader(fromFile, DatabaseDescriptor.getEncryptionContext());
-                CommitLogDescriptor fromName = CommitLogDescriptor.isValid(fromFile.getName()) ? CommitLogDescriptor.fromFileName(fromFile.getName()) : null;
+                CommitLogDescriptor fromName = CommitLogDescriptor.isValid(fromFile.name()) ? CommitLogDescriptor.fromFileName(fromFile.name()) : null;
                 CommitLogDescriptor descriptor;
                 if (fromHeader == null && fromName == null)
-                    throw new IllegalStateException("Cannot safely construct descriptor for segment, either from its name or its header: " + fromFile.getPath());
+                    throw new IllegalStateException("Cannot safely construct descriptor for segment, either from its name or its header: " + fromFile.path());
                 else if (fromHeader != null && fromName != null && !fromHeader.equalsIgnoringCompression(fromName))
-                    throw new IllegalStateException(String.format("Cannot safely construct descriptor for segment, as name and header descriptors do not match (%s vs %s): %s", fromHeader, fromName, fromFile.getPath()));
+                    throw new IllegalStateException(String.format("Cannot safely construct descriptor for segment, as name and header descriptors do not match (%s vs %s): %s", fromHeader, fromName, fromFile.path()));
                 else if (fromName != null && fromHeader == null)
-                    throw new IllegalStateException("Cannot safely construct descriptor for segment, as name descriptor implies a version that should contain a header descriptor, but that descriptor could not be read: " + fromFile.getPath());
+                    throw new IllegalStateException("Cannot safely construct descriptor for segment, as name descriptor implies a version that should contain a header descriptor, but that descriptor could not be read: " + fromFile.path());
                 else if (fromHeader != null)
                     descriptor = fromHeader;
                 else descriptor = fromName;
@@ -257,12 +286,12 @@
                 if (toFile.exists())
                 {
                     logger.trace("Skipping restore of archive {} as the segment already exists in the restore location {}",
-                                 fromFile.getPath(), toFile.getPath());
+                                 fromFile.path(), toFile.path());
                     continue;
                 }
 
-                String command = FROM.matcher(restoreCommand).replaceAll(Matcher.quoteReplacement(fromFile.getPath()));
-                command = TO.matcher(command).replaceAll(Matcher.quoteReplacement(toFile.getPath()));
+                String command = FROM.matcher(restoreCommand).replaceAll(Matcher.quoteReplacement(fromFile.path()));
+                command = TO.matcher(command).replaceAll(Matcher.quoteReplacement(toFile.path()));
                 try
                 {
                     exec(command);
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
index 700f12a..ed2af1b 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
@@ -22,9 +22,7 @@
 
 import java.io.DataInput;
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.util.Collections;
@@ -37,9 +35,12 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Objects;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.FSReadError;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.security.EncryptionContext;
 import org.json.simple.JSONValue;
@@ -51,6 +52,7 @@
     private static final String SEPARATOR = "-";
     private static final String FILENAME_PREFIX = "CommitLog" + SEPARATOR;
     private static final String FILENAME_EXTENSION = ".log";
+    private static final String INDEX_FILENAME_SUFFIX = "_cdc.idx";
     // match both legacy and new version of commitlogs Ex: CommitLog-12345.log and CommitLog-4-12345.log.
     private static final Pattern COMMIT_LOG_FILE_PATTERN = Pattern.compile(FILENAME_PREFIX + "((\\d+)(" + SEPARATOR + "\\d+)?)" + FILENAME_EXTENSION);
 
@@ -131,9 +133,8 @@
 
     public static CommitLogDescriptor fromHeader(File file, EncryptionContext encryptionContext)
     {
-        try (RandomAccessFile raf = new RandomAccessFile(file, "r"))
+        try (FileInputStreamPlus raf = new FileInputStreamPlus(file))
         {
-            assert raf.getFilePointer() == 0;
             return readHeader(raf, encryptionContext);
         }
         catch (EOFException e)
@@ -190,15 +191,27 @@
 
     public static CommitLogDescriptor fromFileName(String name)
     {
-        Matcher matcher;
-        if (!(matcher = COMMIT_LOG_FILE_PATTERN.matcher(name)).matches())
+        Matcher matcher = extactFromFileName(name);
+        long id = Long.parseLong(matcher.group(3).split(SEPARATOR)[1]);
+        return new CommitLogDescriptor(Integer.parseInt(matcher.group(2)), id, null, new EncryptionContext());
+    }
+
+    public static long idFromFileName(String name)
+    {
+        Matcher matcher = extactFromFileName(name);
+        return Long.parseLong(matcher.group(3).split(SEPARATOR)[1]);
+    }
+
+    private static Matcher extactFromFileName(String name)
+    {
+        Matcher matcher = COMMIT_LOG_FILE_PATTERN.matcher(name);
+        if (!matcher.matches())
             throw new RuntimeException("Cannot parse the version of the file: " + name);
 
         if (matcher.group(3) == null)
             throw new UnsupportedOperationException("Commitlog segment is too old to open; upgrade to 1.2.5+ first");
 
-        long id = Long.parseLong(matcher.group(3).split(SEPARATOR)[1]);
-        return new CommitLogDescriptor(Integer.parseInt(matcher.group(2)), id, null, new EncryptionContext());
+        return matcher;
     }
 
     public int getMessagingVersion()
@@ -221,7 +234,21 @@
 
     public String cdcIndexFileName()
     {
-        return FILENAME_PREFIX + version + SEPARATOR + id + "_cdc.idx";
+        return FILENAME_PREFIX + version + SEPARATOR + id + INDEX_FILENAME_SUFFIX;
+    }
+
+    /**
+     * Infer the corresponding cdc index file using its cdc commitlog file
+     * @param cdcCommitLogSegment
+     * @return cdc index file or null if the cdc index file cannot be inferred.
+     */
+    public static File inferCdcIndexFile(File cdcCommitLogSegment)
+    {
+        if (!isValid(cdcCommitLogSegment.name()))
+            return null;
+        String cdcFileName = cdcCommitLogSegment.name();
+        String indexFileName = cdcFileName.substring(0, cdcFileName.length() - FILENAME_EXTENSION.length()) + INDEX_FILENAME_SUFFIX;
+        return new File(DatabaseDescriptor.getCDCLogLocation(), indexFileName);
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
index 3b20bbc..7e8deca 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
@@ -84,4 +84,8 @@
      * @return A map between active log segments and the compression ratio achieved for each.
      */
     public Map<String, Double> getActiveSegmentCompressionRatios();
+
+    public boolean getCDCBlockWrites();
+
+    public void setCDCBlockWrites(boolean val);
 }
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogPosition.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogPosition.java
index 3ffb04c..3b3a21a 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogPosition.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogPosition.java
@@ -18,8 +18,11 @@
 package org.apache.cassandra.db.commitlog;
 
 import java.io.IOException;
+import java.text.ParseException;
 import java.util.Comparator;
 
+import com.google.common.base.Strings;
+
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -118,5 +121,20 @@
         {
             return TypeSizes.sizeof(clsp.segmentId) + TypeSizes.sizeof(clsp.position);
         }
+
+        public CommitLogPosition fromString(String position) throws ParseException
+        {
+            if (Strings.isNullOrEmpty(position))
+                return NONE;
+            String[] parts = position.split(",");
+            if (parts.length != 2)
+                throw new ParseException("Commit log position must be given as <segment>,<position>", 0);
+            return new CommitLogPosition(Long.parseLong(parts[0].trim()), Integer.parseInt(parts[1].trim()));
+        }
+
+        public String toString(CommitLogPosition position)
+        {
+            return position == NONE ? "" : String.format("%d, %d", position.segmentId, position.position);
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
index 5123580..451ee37 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
@@ -25,6 +25,7 @@
 import java.util.zip.CRC32;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -99,7 +100,7 @@
             {
                 if (shouldSkip(file))
                 {
-                    logger.info("Skipping playback of empty log: {}", file.getName());
+                    logger.info("Skipping playback of empty log: {}", file.name());
                 }
                 else
                 {
@@ -172,7 +173,7 @@
                                      boolean tolerateTruncation) throws IOException
     {
         // just transform from the file name (no reading of headers) to determine version
-        CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName());
+        CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.name());
 
         try(RandomAccessReader reader = RandomAccessReader.open(file))
         {
@@ -263,7 +264,7 @@
     private boolean shouldSkipSegmentId(File file, CommitLogDescriptor desc, CommitLogPosition minPosition)
     {
         logger.debug("Reading {} (CL version {}, messaging version {}, compression {})",
-            file.getPath(),
+            file.path(),
             desc.version,
             desc.getMessagingVersion(),
             desc.compression);
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
index 39777ec..74aa67d 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
@@ -18,10 +18,8 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.*;
-import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -31,7 +29,10 @@
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Ordering;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
+
+import org.apache.cassandra.utils.concurrent.Future;
 import org.cliffc.high_scale_lib.NonBlockingHashSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,7 +52,6 @@
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadataRef;
-import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.WrappedRunnable;
 
@@ -130,7 +130,41 @@
                 }
             }
 
-            IntervalSet<CommitLogPosition> filter = persistedIntervals(cfs.getLiveSSTables(), truncatedAt, localHostId);
+            IntervalSet<CommitLogPosition> filter;
+            final CommitLogPosition snapshotPosition = commitLog.archiver.snapshotCommitLogPosition;
+            if (snapshotPosition == CommitLogPosition.NONE)
+            {
+                // normal path: snapshot position is not explicitly specified, find it from sstables
+                if (!cfs.memtableWritesAreDurable())
+                {
+                    filter = persistedIntervals(cfs.getLiveSSTables(), truncatedAt, localHostId);
+                }
+                else
+                {
+                    if (commitLog.archiver.restorePointInTime == Long.MAX_VALUE)
+                    {
+                        // Normal restart, everything is persisted and restored by the memtable itself.
+                        filter = new IntervalSet<>(CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
+                    }
+                    else
+                    {
+                        // Point-in-time restore with a persistent memtable. In this case user should have restored
+                        // the memtable from a snapshot and specified that snapshot's commit log position, reaching
+                        // the "else" path below.
+                        // If they haven't, do not filter any commit log data -- this supports a mode of operation where
+                        // the user deletes old archived commit log segments when a snapshot completes -- but issue a
+                        // message as this may be inefficient / not what the user wants.
+                        logger.info("Point-in-time restore on a persistent memtable started without a snapshot time. " +
+                                    "All commit log data will be replayed.");
+                        filter = IntervalSet.empty();
+                    }
+                }
+            }
+            else
+            {
+                // If the positions is specified, it must override whatever we calculate.
+                filter = new IntervalSet<>(CommitLogPosition.NONE, snapshotPosition);
+            }
             cfPersisted.put(cfs.metadata.id, filter);
         }
         CommitLogPosition globalPosition = firstNotCovered(cfPersisted.values());
@@ -170,7 +204,7 @@
         // Can only reach this point if CDC is enabled, thus we have a CDCSegmentManager
         ((CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager).addCDCSize(f.length());
 
-        File dest = new File(DatabaseDescriptor.getCDCLogLocation(), f.getName());
+        File dest = new File(DatabaseDescriptor.getCDCLogLocation(), f.name());
 
         // If hard link already exists, assume it's from a previous node run. If people are mucking around in the cdc_raw
         // directory that's on them.
@@ -212,12 +246,14 @@
             if (keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME))
                 flushingSystem = true;
 
-            futures.addAll(keyspace.flush());
+            futures.addAll(keyspace.flush(ColumnFamilyStore.FlushReason.STARTUP));
         }
 
         // also flush batchlog incase of any MV updates
         if (!flushingSystem)
-            futures.add(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceFlush());
+            futures.add(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME)
+                                .getColumnFamilyStore(SystemKeyspace.BATCHES)
+                                .forceFlush(ColumnFamilyStore.FlushReason.INTERNALLY_FORCED));
 
         FBUtilities.waitOnFutures(futures);
 
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
index 64b815e..97a032a 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
@@ -17,8 +17,6 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
@@ -31,6 +29,8 @@
 import java.util.zip.CRC32;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileWriter;
 import org.cliffc.high_scale_lib.NonBlockingHashMap;
 
 import com.codahale.metrics.Timer;
@@ -48,7 +48,9 @@
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.utils.FBUtilities.updateChecksumInt;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
 
 /*
  * A single commit log file on disk. Manages creation of the file and writing mutations to disk,
@@ -66,19 +68,19 @@
         FORBIDDEN,
         CONTAINS
     }
-    Object cdcStateLock = new Object();
+    final Object cdcStateLock = new Object();
 
     private final static AtomicInteger nextId = new AtomicInteger(1);
     private static long replayLimitId;
     static
     {
         long maxId = Long.MIN_VALUE;
-        for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
+        for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).tryList())
         {
-            if (CommitLogDescriptor.isValid(file.getName()))
-                maxId = Math.max(CommitLogDescriptor.fromFileName(file.getName()).id, maxId);
+            if (CommitLogDescriptor.isValid(file.name()))
+                maxId = Math.max(CommitLogDescriptor.fromFileName(file.name()).id, maxId);
         }
-        replayLimitId = idBase = Math.max(System.currentTimeMillis(), maxId + 1);
+        replayLimitId = idBase = Math.max(currentTimeMillis(), maxId + 1);
     }
 
     // The commit log entry overhead in bytes (int: length + int: head checksum + int: tail checksum)
@@ -110,7 +112,7 @@
     private int endOfBuffer;
 
     // a signal for writers to wait on to confirm the log message they provided has been written to disk
-    private final WaitQueue syncComplete = new WaitQueue();
+    private final WaitQueue syncComplete = newWaitQueue();
 
     // a map of Cf->dirty interval in this segment; if interval is not covered by the clean set, the log contains unflushed data
     private final NonBlockingHashMap<TableId, IntegerInterval> tableDirty = new NonBlockingHashMap<>(1024);
@@ -368,7 +370,11 @@
 
         if (flush || close)
         {
-            flush(startMarker, sectionEnd);
+            try (Timer.Context ignored = CommitLog.instance.metrics.waitingOnFlush.time())
+            {
+                flush(startMarker, sectionEnd);
+            }
+            
             if (cdcState == CDCState.CONTAINS)
                 writeCDCIndexFile(descriptor, sectionEnd, close);
             lastSyncedOffset = lastMarkerOffset = nextMarker;
@@ -457,7 +463,7 @@
      */
     public String getPath()
     {
-        return logFile.getPath();
+        return logFile.path();
     }
 
     /**
@@ -465,7 +471,7 @@
      */
     public String getName()
     {
-        return logFile.getName();
+        return logFile.name();
     }
 
     /**
@@ -473,7 +479,7 @@
      */
     public File getCDCFile()
     {
-        return new File(DatabaseDescriptor.getCDCLogLocation(), logFile.getName());
+        return new File(DatabaseDescriptor.getCDCLogLocation(), logFile.name());
     }
 
     /**
@@ -501,15 +507,13 @@
         }
     }
 
-    void waitForSync(int position, Timer waitingOnCommit)
+    void waitForSync(int position)
     {
         while (lastSyncedOffset < position)
         {
-            WaitQueue.Signal signal = waitingOnCommit != null ?
-                                      syncComplete.register(waitingOnCommit.time()) :
-                                      syncComplete.register();
+            WaitQueue.Signal signal = syncComplete.register();
             if (lastSyncedOffset < position)
-                signal.awaitUninterruptibly();
+                signal.awaitThrowUncheckedOnInterrupt();
             else
                 signal.cancel();
         }
@@ -673,9 +677,8 @@
     {
         public int compare(File f, File f2)
         {
-            CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(f.getName());
-            CommitLogDescriptor desc2 = CommitLogDescriptor.fromFileName(f2.getName());
-            return Long.compare(desc.id, desc2.id);
+            return Long.compare(CommitLogDescriptor.idFromFileName(f.name()),
+                                CommitLogDescriptor.idFromFileName(f2.name()));
         }
     }
 
@@ -748,7 +751,10 @@
 
         void awaitDiskSync(Timer waitingOnCommit)
         {
-            segment.waitForSync(position, waitingOnCommit);
+            try (Timer.Context ignored = waitingOnCommit.time())
+            {
+                segment.waitForSync(position);
+            }
         }
 
         /**
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java
index b254b9b..b2e3d06 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java
@@ -18,17 +18,20 @@
 
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
-import java.nio.file.FileAlreadyExistsException;
-import java.nio.file.FileVisitResult;
 import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.List;
 import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.RateLimiter;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,11 +39,12 @@
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.commitlog.CommitLogSegment.CDCState;
 import org.apache.cassandra.exceptions.CDCWriteException;
-import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.DirectorySizeCalculator;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 public class CommitLogSegmentManagerCDC extends AbstractCommitLogSegmentManager
 {
     static final Logger logger = LoggerFactory.getLogger(CommitLogSegmentManagerCDC.class);
@@ -49,7 +53,7 @@
     public CommitLogSegmentManagerCDC(final CommitLog commitLog, String storageDirectory)
     {
         super(commitLog, storageDirectory);
-        cdcSizeTracker = new CDCSizeTracker(new File(DatabaseDescriptor.getCDCLogLocation()));
+        cdcSizeTracker = new CDCSizeTracker(this, new File(DatabaseDescriptor.getCDCLogLocation()));
     }
 
     @Override
@@ -67,23 +71,83 @@
         cdcSizeTracker.processDiscardedSegment(segment);
 
         if (delete)
-            FileUtils.deleteWithConfirm(segment.logFile);
+            segment.logFile.delete();
 
         if (segment.getCDCState() != CDCState.CONTAINS)
         {
             // Always delete hard-link from cdc folder if this segment didn't contain CDC data. Note: File may not exist
             // if processing discard during startup.
             File cdcLink = segment.getCDCFile();
-            if (cdcLink.exists())
-                FileUtils.deleteWithConfirm(cdcLink);
-
             File cdcIndexFile = segment.getCDCIndexFile();
-            if (cdcIndexFile.exists())
-                FileUtils.deleteWithConfirm(cdcIndexFile);
+            deleteCDCFiles(cdcLink, cdcIndexFile);
         }
     }
 
     /**
+     * Delete the oldest hard-linked CDC commit log segment to free up space.
+     * @param bytesToFree, the minimum space to free up
+     * @return total size under the CDC folder in bytes after deletion
+     */
+    public long deleteOldLinkedCDCCommitLogSegment(long bytesToFree)
+    {
+        if (bytesToFree <= 0)
+            return 0;
+
+        File cdcDir = new File(DatabaseDescriptor.getCDCLogLocation());
+        Preconditions.checkState(cdcDir.isDirectory(), "The CDC directory does not exist.");
+        File[] files = cdcDir.tryList(f -> CommitLogDescriptor.isValid(f.name()));
+        if (files == null || files.length == 0)
+        {
+            logger.warn("Skip deleting due to no CDC commit log segments found.");
+            return 0;
+        }
+        List<File> sorted = Arrays.stream(files)
+                                  // sort by the commmit log segment id
+                                  .sorted(new CommitLogSegment.CommitLogSegmentFileComparator())
+                                  .collect(Collectors.toList());
+        long bytesDeleted = 0;
+        long bytesRemaining = 0;
+        boolean deletionCompleted = false;
+        // keep deleting from old to new until it reaches to the goal or the current writing segment
+        for (File linkedCdcFile : sorted)
+        {
+            // only evaluate/update when deletionCompleted is false
+            if (!deletionCompleted)
+            {
+                deletionCompleted = bytesDeleted >= bytesToFree || linkedCdcFile.equals(allocatingFrom().getCDCFile());
+            }
+
+            if (deletionCompleted)
+            {
+                bytesRemaining += linkedCdcFile.length();
+            }
+            else
+            {
+                File cdcIndexFile = CommitLogDescriptor.inferCdcIndexFile(linkedCdcFile);
+                bytesDeleted += deleteCDCFiles(linkedCdcFile, cdcIndexFile);
+            }
+        }
+        return bytesRemaining;
+    }
+
+    private long deleteCDCFiles(File cdcLink, File cdcIndexFile)
+    {
+        long total = 0;
+        if (cdcLink != null && cdcLink.exists())
+        {
+            total += cdcLink.length();
+            cdcLink.delete();
+        }
+
+        if (cdcIndexFile != null && cdcIndexFile.exists())
+        {
+            total += cdcIndexFile.length();
+            cdcIndexFile.delete();
+        }
+        return total;
+    }
+
+    /**
      * Initiates the shutdown process for the management thread. Also stops the cdc on-disk size calculator executor.
      */
     public void shutdown()
@@ -133,10 +197,10 @@
         if (segment.getCDCState() != CDCState.FORBIDDEN)
             return;
 
-        if (cdcSizeTracker.hasSpaceForNewSegment())
+        if (!DatabaseDescriptor.getCDCBlockWrites()
+            || cdcSizeTracker.sizeInProgress.get() + DatabaseDescriptor.getCommitLogSegmentSize() < DatabaseDescriptor.getCDCTotalSpace())
         {
             CDCState oldState = segment.setCDCState(CDCState.PERMITTED);
-
             if (oldState == CDCState.FORBIDDEN)
             {
                 FileUtils.createHardLink(segment.logFile, segment.getCDCFile());
@@ -149,11 +213,11 @@
     {
         if (mutation.trackedByCDC() && segment.getCDCState() == CDCState.FORBIDDEN)
         {
+            cdcSizeTracker.submitOverflowSizeRecalculation();
             String logMsg = String.format("Rejecting mutation to keyspace %s. Free up space in %s by processing CDC logs. " +
                                           "Total CDC bytes on disk is %s.",
                                           mutation.getKeyspaceName(), DatabaseDescriptor.getCDCLogLocation(),
-                                          cdcSizeTracker.totalCDCSizeOnDisk());
-            cdcSizeTracker.submitOverflowSizeRecalculation();
+                                          cdcSizeTracker.sizeInProgress.get());
             NoSpamLogger.log(logger,
                              NoSpamLogger.Level.WARN,
                              10,
@@ -173,7 +237,6 @@
     public CommitLogSegment createSegment()
     {
         CommitLogSegment segment = CommitLogSegment.createSegment(commitLog, this);
-
         cdcSizeTracker.processNewSegment(segment);
         // After processing, the state of the segment can either be PERMITTED or FORBIDDEN
         if (segment.getCDCState() == CDCState.PERMITTED)
@@ -195,12 +258,12 @@
         super.handleReplayedSegment(file);
 
         // delete untracked cdc segment hard link files if their index files do not exist
-        File cdcFile = new File(DatabaseDescriptor.getCDCLogLocation(), file.getName());
-        File cdcIndexFile = new File(DatabaseDescriptor.getCDCLogLocation(), CommitLogDescriptor.fromFileName(file.getName()).cdcIndexFileName());
+        File cdcFile = new File(DatabaseDescriptor.getCDCLogLocation(), file.name());
+        File cdcIndexFile = new File(DatabaseDescriptor.getCDCLogLocation(), CommitLogDescriptor.fromFileName(file.name()).cdcIndexFileName());
         if (cdcFile.exists() && !cdcIndexFile.exists())
         {
             logger.trace("(Unopened) CDC segment {} is no longer needed and will be deleted now", cdcFile);
-            FileUtils.deleteWithConfirm(cdcFile);
+            cdcFile.delete();
         }
     }
 
@@ -223,13 +286,17 @@
     {
         private final RateLimiter rateLimiter = RateLimiter.create(1000.0 / DatabaseDescriptor.getCDCDiskCheckInterval());
         private ExecutorService cdcSizeCalculationExecutor;
+        private final CommitLogSegmentManagerCDC segmentManager;
+        // track the total size between two dictionary size calculations
+        private final AtomicLong sizeInProgress;
 
-        // Used instead of size during walk to remove chance of over-allocation
-        private volatile long sizeInProgress = 0;
+        private final File path;
 
-        CDCSizeTracker(File path)
+        CDCSizeTracker(CommitLogSegmentManagerCDC segmentManager, File path)
         {
-            super(path);
+            this.path = path;
+            this.segmentManager = segmentManager;
+            this.sizeInProgress = new AtomicLong(0);
         }
 
         /**
@@ -237,15 +304,19 @@
          */
         public void start()
         {
-            size = 0;
-            cdcSizeCalculationExecutor = new ThreadPoolExecutor(1, 1, 1000, TimeUnit.SECONDS, new SynchronousQueue<>(), new ThreadPoolExecutor.DiscardPolicy());
+            sizeInProgress.getAndSet(0);
+            cdcSizeCalculationExecutor = executorFactory().configureSequential("CDCSizeCalculationExecutor")
+                                                          .withRejectedExecutionHandler(new ThreadPoolExecutor.DiscardPolicy())
+                                                          .withQueueLimit(0)
+                                                          .withKeepAlive(1000, TimeUnit.SECONDS)
+                                                          .build();
         }
 
         /**
          * Synchronous size recalculation on each segment creation/deletion call could lead to very long delays in new
          * segment allocation, thus long delays in thread signaling to wake waiting allocation / writer threads.
          *
-         * This can be reached either from the segment management thread in ABstractCommitLogSegmentManager or from the
+         * This can be reached either from the segment management thread in AbstractCommitLogSegmentManager or from the
          * size recalculation executor, so we synchronize on this object to reduce the race overlap window available for
          * size to get off.
          *
@@ -253,14 +324,32 @@
          */
         void processNewSegment(CommitLogSegment segment)
         {
+            int segmentSize = defaultSegmentSize();
+            long allowance = DatabaseDescriptor.getCDCTotalSpace();
+            boolean blocking = DatabaseDescriptor.getCDCBlockWrites();
+
             // See synchronization in CommitLogSegment.setCDCState
-            synchronized(segment.cdcStateLock)
+            synchronized (segment.cdcStateLock)
             {
-                segment.setCDCState(hasSpaceForNewSegment()
-                                    ? CDCState.PERMITTED
-                                    : CDCState.FORBIDDEN);
+                segment.setCDCState(blocking && segmentSize + sizeInProgress.get() > allowance
+                                    ? CDCState.FORBIDDEN
+                                    : CDCState.PERMITTED);
+
+                // Aggressively count in the (estimated) size of new segments.
                 if (segment.getCDCState() == CDCState.PERMITTED)
-                    size += defaultSegmentSize();
+                    addSize(segmentSize);
+            }
+
+            // Remove the oldest cdc segment file when exceeding the CDC storage allowance
+            if (!blocking && sizeInProgress.get() > allowance)
+            {
+                long bytesToFree = sizeInProgress.get() - allowance;
+                long remainingSize = segmentManager.deleteOldLinkedCDCCommitLogSegment(bytesToFree);
+                long releasedSize = sizeInProgress.get() - remainingSize;
+                sizeInProgress.getAndSet(remainingSize);
+                logger.debug("Freed up {} ({}) bytes after deleting the oldest CDC commit log segments in non-blocking mode. " +
+                             "Total on-disk CDC size: {}; allowed CDC size: {}",
+                             releasedSize, bytesToFree, remainingSize, allowance);
             }
 
             // Take this opportunity to kick off a recalc to pick up any consumer file deletion.
@@ -269,25 +358,29 @@
 
         void processDiscardedSegment(CommitLogSegment segment)
         {
-            // See synchronization in CommitLogSegment.setCDCState
-            synchronized(segment.cdcStateLock)
+            if (!segment.getCDCFile().exists())
             {
-                // Add to flushed size before decrementing unflushed so we don't have a window of false generosity
+                logger.debug("Not processing discarded CommitLogSegment {}; this segment appears to have been deleted already.", segment);
+                return;
+            }
+
+            synchronized (segment.cdcStateLock)
+            {
+                // Add to flushed size before decrementing unflushed, so we don't have a window of false generosity
                 if (segment.getCDCState() == CDCState.CONTAINS)
-                    size += segment.onDiskSize();
+                    addSize(segment.onDiskSize());
+
+                // Subtract the (estimated) size of the segment from processNewSegment.
+                // For the segement that CONTAINS, we update with adding the actual onDiskSize and removing the estimated size.
+                // For the segment that remains in PERMITTED, the file is to be deleted and the estimate should be returned.
                 if (segment.getCDCState() != CDCState.FORBIDDEN)
-                    size -= defaultSegmentSize();
+                    addSize(-defaultSegmentSize());
             }
 
             // Take this opportunity to kick off a recalc to pick up any consumer file deletion.
             submitOverflowSizeRecalculation();
         }
 
-        long allowableCDCBytes()
-        {
-            return (long)DatabaseDescriptor.getCDCSpaceInMB() * 1024 * 1024;
-        }
-
         public void submitOverflowSizeRecalculation()
         {
             try
@@ -305,6 +398,8 @@
 
         private int defaultSegmentSize()
         {
+            // CommitLogSegmentSize is only loaded from yaml.
+            // There is a setter but is used only for testing.
             return DatabaseDescriptor.getCommitLogSegmentSize();
         }
 
@@ -312,25 +407,16 @@
         {
             try
             {
-                // The Arrays.stream approach is considerably slower on Windows than linux
-                sizeInProgress = 0;
+                resetSize();
                 Files.walkFileTree(path.toPath(), this);
-                size = sizeInProgress;
+                sizeInProgress.getAndSet(getAllocatedSize());
             }
             catch (IOException ie)
             {
-                CommitLog.instance.handleCommitError("Failed CDC Size Calculation", ie);
+                CommitLog.handleCommitError("Failed CDC Size Calculation", ie);
             }
         }
 
-        @Override
-        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException
-        {
-            sizeInProgress += attrs.size();
-            return FileVisitResult.CONTINUE;
-        }
-
-
         public void shutdown()
         {
             if (cdcSizeCalculationExecutor != null && !cdcSizeCalculationExecutor.isShutdown())
@@ -341,17 +427,7 @@
 
         private void addSize(long toAdd)
         {
-            size += toAdd;
-        }
-
-        private long totalCDCSizeOnDisk()
-        {
-            return size;
-        }
-
-        private boolean hasSpaceForNewSegment()
-        {
-            return defaultSegmentSize() + totalCDCSizeOnDisk() <= allowableCDCBytes();
+            sizeInProgress.getAndAdd(toAdd);
         }
     }
 
@@ -361,19 +437,15 @@
     @VisibleForTesting
     public long updateCDCTotalSize()
     {
+        long sleepTime = DatabaseDescriptor.getCDCDiskCheckInterval() + 50L;
+        // Give the update time to finish the last run if any. Therefore, avoid modifying production code only for testing purpose.
+        Uninterruptibles.sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS);
         cdcSizeTracker.submitOverflowSizeRecalculation();
-
         // Give the update time to run
-        try
-        {
-            Thread.sleep(DatabaseDescriptor.getCDCDiskCheckInterval() + 10);
-        }
-        catch (InterruptedException e) {}
-
+        Uninterruptibles.sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS);
         // then update the state of the segment it is allocating from. In produciton, the state is updated during "allocate"
         if (allocatingFrom().getCDCState() == CDCState.FORBIDDEN)
             cdcSizeTracker.processNewSegment(allocatingFrom());
-
-        return cdcSizeTracker.totalCDCSizeOnDisk();
+        return cdcSizeTracker.getAllocatedSize();
     }
 }
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java
index 2682114..1ae2f13 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
-
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.io.util.FileUtils;
 
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentReader.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentReader.java
index e23a915..33e70c1 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentReader.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentReader.java
@@ -26,6 +26,10 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.AbstractIterator;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.db.commitlog.EncryptedFileSegmentInputStream.ChunkProvider;
 import org.apache.cassandra.db.commitlog.CommitLogReadHandler.*;
 import org.apache.cassandra.io.FSReadError;
@@ -46,6 +50,11 @@
  */
 public class CommitLogSegmentReader implements Iterable<CommitLogSegmentReader.SyncSegment>
 {
+    public static final String ALLOW_IGNORE_SYNC_CRC = Config.PROPERTY_PREFIX + "commitlog.allow_ignore_sync_crc";
+    private static volatile boolean allowSkipSyncMarkerCrc = Boolean.getBoolean(ALLOW_IGNORE_SYNC_CRC);
+
+    private static final Logger logger = LoggerFactory.getLogger(CommitLogSegmentReader.class);
+    
     private final CommitLogReadHandler handler;
     private final CommitLogDescriptor descriptor;
     private final RandomAccessReader reader;
@@ -75,6 +84,11 @@
         else
             segmenter = new NoOpSegmenter(reader);
     }
+    
+    public static void setAllowSkipSyncMarkerCrc(boolean allow)
+    {
+        allowSkipSyncMarkerCrc = allow;
+    }
 
     public Iterator<SyncSegment> iterator()
     {
@@ -151,8 +165,23 @@
         updateChecksumInt(crc, (int) reader.getPosition());
         final int end = reader.readInt();
         long filecrc = reader.readInt() & 0xffffffffL;
+
         if (crc.getValue() != filecrc)
         {
+            // The next marker position and CRC value are not written atomically, so it is possible for the latter to 
+            // still be zero after the former has been finalized, even though the mutations that follow it are valid.
+            // When there is no compression or encryption enabled, we can ignore a sync marker CRC mismatch and defer 
+            // to the per-mutation CRCs, which may be preferable to preventing startup altogether.
+            if (allowSkipSyncMarkerCrc
+                && descriptor.compression == null && !descriptor.getEncryptionContext().isEnabled()
+                && filecrc == 0 && end != 0)
+            {
+                logger.warn("Skipping sync marker CRC check at position {} (end={}, calculated crc={}) of commit log {}." +
+                            "Using per-mutation CRC checks to ensure correctness...",
+                            offset, end, crc.getValue(), reader.getPath());
+                return end;
+            }
+
             if (end != 0 || filecrc != 0)
             {
                 String msg = String.format("Encountered bad header at position %d of commit log %s, with invalid CRC. " +
diff --git a/src/java/org/apache/cassandra/db/commitlog/EncryptedSegment.java b/src/java/org/apache/cassandra/db/commitlog/EncryptedSegment.java
index a13f615..f503658 100644
--- a/src/java/org/apache/cassandra/db/commitlog/EncryptedSegment.java
+++ b/src/java/org/apache/cassandra/db/commitlog/EncryptedSegment.java
@@ -27,7 +27,6 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.io.compress.ICompressor;
 import org.apache.cassandra.security.EncryptionUtils;
 import org.apache.cassandra.security.EncryptionContext;
diff --git a/src/java/org/apache/cassandra/db/commitlog/GroupCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/GroupCommitLogService.java
index a76923e..ad4448a 100644
--- a/src/java/org/apache/cassandra/db/commitlog/GroupCommitLogService.java
+++ b/src/java/org/apache/cassandra/db/commitlog/GroupCommitLogService.java
@@ -35,7 +35,7 @@
     {
         // wait until record has been safely persisted to disk
         pending.incrementAndGet();
-        // wait for commitlog_sync_group_window_in_ms
+        // wait for commitlog_sync_group_window
         alloc.awaitDiskSync(commitLog.metrics.waitingOnCommit);
         pending.decrementAndGet();
     }
diff --git a/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java b/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java
index 45db2f6..1108739 100644
--- a/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java
+++ b/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java
@@ -207,4 +207,4 @@
         }
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java b/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java
index 6ecdbd3..d564117 100644
--- a/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java
+++ b/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java
@@ -90,7 +90,7 @@
         {
             throw new FSWriteError(e, getPath());
         }
-        NativeLibrary.trySkipCache(fd, startMarker, nextMarker, logFile.getAbsolutePath());
+        NativeLibrary.trySkipCache(fd, startMarker, nextMarker, logFile.absolutePath());
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java
index e94c616..ae170a8 100644
--- a/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java
+++ b/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java
@@ -21,6 +21,8 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 class PeriodicCommitLogService extends AbstractCommitLogService
 {
     private static final long blockWhenSyncLagsNanos = TimeUnit.MILLISECONDS.toNanos(DatabaseDescriptor.getPeriodicCommitLogSyncBlock());
@@ -33,7 +35,7 @@
 
     protected void maybeWaitForSync(CommitLogSegment.Allocation alloc)
     {
-        long expectedSyncTime = System.nanoTime() - blockWhenSyncLagsNanos;
+        long expectedSyncTime = nanoTime() - blockWhenSyncLagsNanos;
         if (lastSyncedAt < expectedSyncTime)
         {
             pending.incrementAndGet();
@@ -41,4 +43,4 @@
             pending.decrementAndGet();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java
index 0b37c22..5fe1df7 100644
--- a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java
@@ -38,11 +38,14 @@
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.schema.CompactionParams;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.io.sstable.Component.DATA;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Pluggable compaction strategy determines how SSTables get merged.
@@ -388,7 +391,7 @@
         // since we use estimations to calculate, there is a chance that compaction will not drop tombstones actually.
         // if that happens we will end up in infinite compaction loop, so first we check enough if enough time has
         // elapsed since SSTable created.
-        if (System.currentTimeMillis() < sstable.getCreationTimeFor(Component.DATA) + tombstoneCompactionInterval * 1000)
+        if (currentTimeMillis() < sstable.getCreationTimeFor(DATA) + tombstoneCompactionInterval * 1000)
            return false;
 
         double droppableRatio = sstable.getEstimatedDroppableTombstoneRatio(gcBefore);
@@ -542,7 +545,7 @@
     public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor,
                                                        long keyCount,
                                                        long repairedAt,
-                                                       UUID pendingRepair,
+                                                       TimeUUID pendingRepair,
                                                        boolean isTransient,
                                                        MetadataCollector meta,
                                                        SerializationHeader header,
diff --git a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionTask.java b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionTask.java
index 989c21c..40c4cb4 100644
--- a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionTask.java
@@ -19,7 +19,6 @@
 
 import java.util.Iterator;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.base.Preconditions;
 
@@ -28,6 +27,7 @@
 import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter;
 import org.apache.cassandra.io.FSDiskFullWriteError;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.WrappedRunnable;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 
@@ -67,7 +67,7 @@
             Iterator<SSTableReader> iter = sstables.iterator();
             SSTableReader first = iter.next();
             boolean isRepaired = first.isRepaired();
-            UUID pendingRepair = first.getPendingRepair();
+            TimeUUID pendingRepair = first.getPendingRepair();
             while (iter.hasNext())
             {
                 SSTableReader next = iter.next();
diff --git a/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java b/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java
index 95fc7b8..de6ff71 100644
--- a/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java
+++ b/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java
@@ -23,7 +23,6 @@
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.function.Supplier;
 
 import com.google.common.base.Preconditions;
@@ -31,7 +30,6 @@
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
-import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.index.Index;
@@ -41,6 +39,7 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.CompactionParams;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Wrapper that's aware of how sstables are divided between separate strategies,
@@ -193,7 +192,7 @@
     public abstract SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor,
                                                                 long keyCount,
                                                                 long repairedAt,
-                                                                UUID pendingRepair,
+                                                                TimeUUID pendingRepair,
                                                                 boolean isTransient,
                                                                 MetadataCollector collector,
                                                                 SerializationHeader header,
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionController.java b/src/java/org/apache/cassandra/db/compaction/CompactionController.java
index cee2b58..6480631 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionController.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionController.java
@@ -28,13 +28,12 @@
 
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
-import org.apache.cassandra.utils.AlwaysPresentFilter;
 import org.apache.cassandra.utils.OverlapIterator;
 import org.apache.cassandra.utils.concurrent.Refs;
 
@@ -258,10 +257,7 @@
 
         for (SSTableReader sstable: filteredSSTables)
         {
-            // if we don't have bloom filter(bf_fp_chance=1.0 or filter file is missing),
-            // we check index file instead.
-            if (sstable.getBloomFilter() instanceof AlwaysPresentFilter && sstable.getPosition(key, SSTableReader.Operator.EQ, false) != null
-                || sstable.getBloomFilter().isPresent(key))
+            if (sstable.maybePresent(key))
             {
                 minTimestampSeen = Math.min(minTimestampSeen, sstable.getMinTimestamp());
                 hasTimestamp = true;
@@ -272,10 +268,9 @@
         {
             if (memtable.getMinTimestamp() != Memtable.NO_MIN_TIMESTAMP)
             {
-                Partition partition = memtable.getPartition(key);
-                if (partition != null)
+                if (memtable.rowIterator(key) != null)
                 {
-                    minTimestampSeen = Math.min(minTimestampSeen, partition.stats().minTimestamp);
+                    minTimestampSeen = Math.min(minTimestampSeen, memtable.getMinTimestamp());
                     hasTimestamp = true;
                 }
             }
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionInfo.java b/src/java/org/apache/cassandra/db/compaction/CompactionInfo.java
index bdddaab..513adfa 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionInfo.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionInfo.java
@@ -22,7 +22,6 @@
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
-import java.util.UUID;
 import java.util.function.Predicate;
 
 import com.google.common.base.Joiner;
@@ -30,6 +29,7 @@
 
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.TimeUUID;
 
 public final class CompactionInfo
 {
@@ -48,15 +48,15 @@
     private final long completed;
     private final long total;
     private final Unit unit;
-    private final UUID compactionId;
+    private final TimeUUID compactionId;
     private final ImmutableSet<SSTableReader> sstables;
 
-    public CompactionInfo(TableMetadata metadata, OperationType tasktype, long bytesComplete, long totalBytes, UUID compactionId, Collection<SSTableReader> sstables)
+    public CompactionInfo(TableMetadata metadata, OperationType tasktype, long bytesComplete, long totalBytes, TimeUUID compactionId, Collection<SSTableReader> sstables)
     {
         this(metadata, tasktype, bytesComplete, totalBytes, Unit.BYTES, compactionId, sstables);
     }
 
-    private CompactionInfo(TableMetadata metadata, OperationType tasktype, long completed, long total, Unit unit, UUID compactionId, Collection<SSTableReader> sstables)
+    private CompactionInfo(TableMetadata metadata, OperationType tasktype, long completed, long total, Unit unit, TimeUUID compactionId, Collection<SSTableReader> sstables)
     {
         this.tasktype = tasktype;
         this.completed = completed;
@@ -71,7 +71,7 @@
      * Special compaction info where we always need to cancel the compaction - for example ViewBuilderTask and AutoSavingCache where we don't know
      * the sstables at construction
      */
-    public static CompactionInfo withoutSSTables(TableMetadata metadata, OperationType tasktype, long completed, long total, Unit unit, UUID compactionId)
+    public static CompactionInfo withoutSSTables(TableMetadata metadata, OperationType tasktype, long completed, long total, Unit unit, TimeUUID compactionId)
     {
         return new CompactionInfo(metadata, tasktype, completed, total, unit, compactionId, ImmutableSet.of());
     }
@@ -112,7 +112,7 @@
         return tasktype;
     }
 
-    public UUID getTaskId()
+    public TimeUUID getTaskId()
     {
         return compactionId;
     }
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionInterruptedException.java b/src/java/org/apache/cassandra/db/compaction/CompactionInterruptedException.java
index 129d9fc..b9174ec 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionInterruptedException.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionInterruptedException.java
@@ -17,11 +17,14 @@
  */
 package org.apache.cassandra.db.compaction;
 
+import org.apache.cassandra.utils.Shared;
+
+@Shared
 public class CompactionInterruptedException extends RuntimeException
 {
     private static final long serialVersionUID = -8651427062512310398L;
 
-    public CompactionInterruptedException(CompactionInfo info)
+    public CompactionInterruptedException(Object info)
     {
         super("Compaction interrupted: " + info);
     }
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
index ec6a4d4..a0dc087 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
@@ -19,11 +19,17 @@
 
 import java.util.*;
 import java.util.function.LongPredicate;
+import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Ordering;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 
 import org.apache.cassandra.db.transform.DuplicateRowChecker;
@@ -36,7 +42,15 @@
 import org.apache.cassandra.db.transform.Transformation;
 import org.apache.cassandra.index.transactions.CompactionTransaction;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
+import org.apache.cassandra.metrics.TopPartitionTracker;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosRows;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static org.apache.cassandra.config.Config.PaxosStatePurging.legacy;
+import static org.apache.cassandra.config.DatabaseDescriptor.paxosStatePurging;
 
 /**
  * Merge multiple iterators over the content of sstable into a "compacted" iterator.
@@ -63,7 +77,7 @@
     private final List<ISSTableScanner> scanners;
     private final ImmutableSet<SSTableReader> sstables;
     private final int nowInSec;
-    private final UUID compactionId;
+    private final TimeUUID compactionId;
 
     private final long totalBytes;
     private long bytesRead;
@@ -79,13 +93,19 @@
     private final UnfilteredPartitionIterator compacted;
     private final ActiveCompactionsTracker activeCompactions;
 
-    public CompactionIterator(OperationType type, List<ISSTableScanner> scanners, AbstractCompactionController controller, int nowInSec, UUID compactionId)
+    public CompactionIterator(OperationType type, List<ISSTableScanner> scanners, AbstractCompactionController controller, int nowInSec, TimeUUID compactionId)
     {
-        this(type, scanners, controller, nowInSec, compactionId, ActiveCompactionsTracker.NOOP);
+        this(type, scanners, controller, nowInSec, compactionId, ActiveCompactionsTracker.NOOP, null);
     }
 
     @SuppressWarnings("resource") // We make sure to close mergedIterator in close() and CompactionIterator is itself an AutoCloseable
-    public CompactionIterator(OperationType type, List<ISSTableScanner> scanners, AbstractCompactionController controller, int nowInSec, UUID compactionId, ActiveCompactionsTracker activeCompactions)
+    public CompactionIterator(OperationType type,
+                              List<ISSTableScanner> scanners,
+                              AbstractCompactionController controller,
+                              int nowInSec,
+                              TimeUUID compactionId,
+                              ActiveCompactionsTracker activeCompactions,
+                              TopPartitionTracker.Collector topPartitionCollector)
     {
         this.controller = controller;
         this.type = type;
@@ -108,8 +128,13 @@
         UnfilteredPartitionIterator merged = scanners.isEmpty()
                                            ? EmptyIterators.unfilteredPartition(controller.cfs.metadata())
                                            : UnfilteredPartitionIterators.merge(scanners, listener());
+        if (topPartitionCollector != null) // need to count tombstones before they are purged
+            merged = Transformation.apply(merged, new TopPartitionTracker.TombstoneCounter(topPartitionCollector, nowInSec));
         merged = Transformation.apply(merged, new GarbageSkipper(controller));
-        merged = Transformation.apply(merged, new Purger(controller, nowInSec));
+        Transformation<UnfilteredRowIterator> purger = isPaxos(controller.cfs) && paxosStatePurging() != legacy
+                                                       ? new PaxosPurger(nowInSec)
+                                                       : new Purger(controller, nowInSec);
+        merged = Transformation.apply(merged, purger);
         merged = DuplicateRowChecker.duringCompaction(merged, type);
         compacted = Transformation.apply(merged, new AbortableUnfilteredPartitionTransformation(this));
     }
@@ -240,6 +265,11 @@
         bytesRead = n;
     }
 
+    public long getBytesRead()
+    {
+        return bytesRead;
+    }
+
     public boolean hasNext()
     {
         return compacted.hasNext();
@@ -555,6 +585,78 @@
         }
     }
 
+    private class PaxosPurger extends Transformation<UnfilteredRowIterator>
+    {
+        private final long nowInSec;
+        private final long paxosPurgeGraceMicros = DatabaseDescriptor.getPaxosPurgeGrace(MICROSECONDS);
+        private final Map<TableId, PaxosRepairHistory.Searcher> tableIdToHistory = new HashMap<>();
+        private Token currentToken;
+        private int compactedUnfiltered;
+
+        private PaxosPurger(long nowInSec)
+        {
+            this.nowInSec = nowInSec;
+        }
+
+        protected void onEmptyPartitionPostPurge(DecoratedKey key)
+        {
+            if (type == OperationType.COMPACTION)
+                controller.cfs.invalidateCachedPartition(key);
+        }
+
+        protected void updateProgress()
+        {
+            if ((++compactedUnfiltered) % UNFILTERED_TO_UPDATE_PROGRESS == 0)
+                updateBytesRead();
+        }
+
+        @Override
+        @SuppressWarnings("resource")
+        protected UnfilteredRowIterator applyToPartition(UnfilteredRowIterator partition)
+        {
+            currentToken = partition.partitionKey().getToken();
+            UnfilteredRowIterator purged = Transformation.apply(partition, this);
+            if (purged.isEmpty())
+            {
+                onEmptyPartitionPostPurge(purged.partitionKey());
+                purged.close();
+                return null;
+            }
+
+            return purged;
+        }
+
+        @Override
+        protected Row applyToRow(Row row)
+        {
+            updateProgress();
+            TableId tableId = PaxosRows.getTableId(row);
+
+            switch (paxosStatePurging())
+            {
+                default: throw new AssertionError();
+                case legacy:
+                case gc_grace:
+                {
+                    TableMetadata metadata = Schema.instance.getTableMetadata(tableId);
+                    return row.purgeDataOlderThan(TimeUnit.SECONDS.toMicros(nowInSec - (metadata == null ? (3 * 3600) : metadata.params.gcGraceSeconds)), false);
+                }
+                case repaired:
+                {
+                    PaxosRepairHistory.Searcher history = tableIdToHistory.computeIfAbsent(tableId, find -> {
+                        TableMetadata metadata = Schema.instance.getTableMetadata(find);
+                        if (metadata == null)
+                            return null;
+                        return Keyspace.openAndGetStore(metadata).getPaxosRepairHistory().searcher();
+                    });
+
+                    return history == null ? row :
+                           row.purgeDataOlderThan(history.ballotForToken(currentToken).unixMicros() - paxosPurgeGraceMicros, false);
+                }
+            }
+        }
+    }
+
     private static class AbortableUnfilteredPartitionTransformation extends Transformation<UnfilteredRowIterator>
     {
         private final AbortableUnfilteredRowTransformation abortableIter;
@@ -573,7 +675,7 @@
         }
     }
 
-    private static class AbortableUnfilteredRowTransformation extends Transformation
+    private static class AbortableUnfilteredRowTransformation extends Transformation<UnfilteredRowIterator>
     {
         private final CompactionIterator iter;
 
@@ -589,4 +691,9 @@
             return row;
         }
     }
+
+    private static boolean isPaxos(ColumnFamilyStore cfs)
+    {
+        return cfs.name.equals(SystemKeyspace.PAXOS) && cfs.keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME);
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java
index f473be7..9bf063b 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java
@@ -41,10 +41,16 @@
 import com.fasterxml.jackson.databind.node.ArrayNode;
 import com.fasterxml.jackson.databind.node.JsonNodeFactory;
 import com.fasterxml.jackson.databind.node.ObjectNode;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class CompactionLogger
 {
     public interface Strategy
@@ -103,7 +109,7 @@
 
     private static final JsonNodeFactory json = JsonNodeFactory.instance;
     private static final Logger logger = LoggerFactory.getLogger(CompactionLogger.class);
-    private static final Writer serializer = new CompactionLogSerializer();
+    private static final CompactionLogSerializer serializer = new CompactionLogSerializer();
     private final WeakReference<ColumnFamilyStore> cfsRef;
     private final WeakReference<CompactionStrategyManager> csmRef;
     private final AtomicInteger identifier = new AtomicInteger(0);
@@ -165,7 +171,7 @@
     private JsonNode formatSSTable(AbstractCompactionStrategy strategy, SSTableReader sstable)
     {
         ObjectNode node = json.objectNode();
-        node.put("generation", sstable.descriptor.generation);
+        node.put("generation", sstable.descriptor.id.toString());
         node.put("version", sstable.descriptor.version.getVersion());
         node.put("size", sstable.onDiskLength());
         JsonNode logResult = strategy.strategyLogger().sstable(sstable);
@@ -220,7 +226,7 @@
             return;
         node.put("keyspace", cfs.keyspace.getName());
         node.put("table", cfs.getTableName());
-        node.put("time", System.currentTimeMillis());
+        node.put("time", currentTimeMillis());
     }
 
     private JsonNode startStrategies()
@@ -295,7 +301,7 @@
     private static class CompactionLogSerializer implements Writer
     {
         private static final String logDirectory = System.getProperty("cassandra.logdir", ".");
-        private final ExecutorService loggerService = Executors.newFixedThreadPool(1);
+        private final ExecutorPlus loggerService = executorFactory().sequential("CompactionLogger");
         // This is only accessed on the logger service thread, so it does not need to be thread safe
         private final Set<Object> rolled = new HashSet<>();
         private OutputStreamWriter stream;
@@ -303,13 +309,13 @@
         private static OutputStreamWriter createStream() throws IOException
         {
             int count = 0;
-            Path compactionLog = Paths.get(logDirectory, "compaction.log");
+            Path compactionLog = new File(logDirectory, "compaction.log").toPath();
             if (Files.exists(compactionLog))
             {
                 Path tryPath = compactionLog;
                 while (Files.exists(tryPath))
                 {
-                    tryPath = Paths.get(logDirectory, String.format("compaction-%d.log", count++));
+                    tryPath = new File(logDirectory, String.format("compaction-%d.log", count++)).toPath();
                 }
                 Files.move(compactionLog, tryPath);
             }
@@ -357,4 +363,10 @@
             });
         }
     }
+
+    public static void shutdownNowAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownNowAndWait(timeout, unit, serializer.loggerService);
+    }
+
 }
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index b53f331..347f9b3 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db.compaction;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -29,19 +28,15 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.BooleanSupplier;
 import java.util.function.Predicate;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import javax.management.openmbean.OpenDataException;
 import javax.management.openmbean.TabularData;
@@ -57,19 +52,14 @@
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Multiset;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListenableFutureTask;
 import com.google.common.util.concurrent.RateLimiter;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import io.netty.util.concurrent.FastThreadLocal;
 import org.apache.cassandra.cache.AutoSavingCache;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.WrappedExecutorPlus;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -93,19 +83,21 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.index.SecondaryIndexBuilder;
+import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
 import org.apache.cassandra.io.sstable.IndexSummaryRedistribution;
 import org.apache.cassandra.io.sstable.SSTableRewriter;
-import org.apache.cassandra.io.sstable.SnapshotDeletingTask;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.metrics.CompactionMetrics;
 import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
@@ -116,13 +108,21 @@
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.Throwables;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.WrappedRunnable;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.Refs;
 
 import static java.util.Collections.singleton;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.concurrent.FutureTask.callable;
+import static org.apache.cassandra.config.DatabaseDescriptor.getConcurrentCompactors;
+import static org.apache.cassandra.db.compaction.CompactionManager.CompactionExecutor.compactionThreadGroup;
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * <p>
@@ -144,17 +144,6 @@
     public static final int NO_GC = Integer.MIN_VALUE;
     public static final int GC_ALL = Integer.MAX_VALUE;
 
-    // A thread local that tells us if the current thread is owned by the compaction manager. Used
-    // by CounterContext to figure out if it should log a warning for invalid counter shards.
-    public static final FastThreadLocal<Boolean> isCompactionManager = new FastThreadLocal<Boolean>()
-    {
-        @Override
-        protected Boolean initialValue()
-        {
-            return false;
-        }
-    };
-
     static
     {
         instance = new CompactionManager();
@@ -191,18 +180,30 @@
      */
     public RateLimiter getRateLimiter()
     {
-        setRate(DatabaseDescriptor.getCompactionThroughputMbPerSec());
+        setRateInBytes(DatabaseDescriptor.getCompactionThroughputBytesPerSec());
         return compactionRateLimiter;
     }
 
     /**
-     * Sets the rate for the rate limiter. When compaction_throughput_mb_per_sec is 0 or node is bootstrapping,
+     * Sets the rate for the rate limiter. When compaction_throughput is 0 or node is bootstrapping,
      * this sets the rate to Double.MAX_VALUE bytes per second.
-     * @param throughPutMbPerSec throughput to set in mb per second
+     * @param throughputMbPerSec throughput to set in MiB/s
+     * @deprecated Use setRateInBytes instead
      */
-    public void setRate(final double throughPutMbPerSec)
+    @Deprecated
+    public void setRate(final double throughputMbPerSec)
     {
-        double throughput = throughPutMbPerSec * 1024.0 * 1024.0;
+        setRateInBytes(throughputMbPerSec * 1024.0 * 1024);
+    }
+
+    /**
+     * Sets the rate for the rate limiter. When compaction_throughput is 0 or node is bootstrapping,
+     * this sets the rate to Double.MAX_VALUE bytes per second.
+     * @param throughputBytesPerSec throughput to set in B/s
+     */
+    public void setRateInBytes(final double throughputBytesPerSec)
+    {
+        double throughput = throughputBytesPerSec;
         // if throughput is set to 0, throttling is disabled
         if (throughput == 0 || StorageService.instance.isBootstrapMode())
             throughput = Double.MAX_VALUE;
@@ -230,7 +231,7 @@
          * are idle threads stil. (CASSANDRA-4310)
          */
         int count = compactingCF.count(cfs);
-        if (count > 0 && executor.getActiveCount() >= executor.getMaximumPoolSize())
+        if (count > 0 && executor.getActiveTaskCount() >= executor.getMaximumPoolSize())
         {
             logger.trace("Background compaction is still running for {}.{} ({} remaining). Skipping",
                          cfs.keyspace.getName(), cfs.name, count);
@@ -279,6 +280,7 @@
 
         return activeTasks > 0;
     }
+
     /**
      * Shutdowns both compaction and validation executors, cancels running compaction / validation,
      * and waits for tasks to complete if tasks were not cancelable.
@@ -537,7 +539,38 @@
         }, 0, OperationType.VERIFY);
     }
 
-    public AllSSTableOpStatus performSSTableRewrite(final ColumnFamilyStore cfs, final boolean excludeCurrentVersion, int jobs) throws InterruptedException, ExecutionException
+    public AllSSTableOpStatus performSSTableRewrite(final ColumnFamilyStore cfs,
+                                                    final boolean skipIfCurrentVersion,
+                                                    final long skipIfOlderThanTimestamp,
+                                                    final boolean skipIfCompressionMatches,
+                                                    int jobs) throws InterruptedException, ExecutionException
+    {
+        return performSSTableRewrite(cfs, (sstable) -> {
+            // Skip if descriptor version matches current version
+            if (skipIfCurrentVersion && sstable.descriptor.version.equals(sstable.descriptor.getFormat().getLatestVersion()))
+                return false;
+
+            // Skip if SSTable creation time is past given timestamp
+            if (sstable.getCreationTimeFor(Component.DATA) > skipIfOlderThanTimestamp)
+                return false;
+
+            TableMetadata metadata = cfs.metadata.get();
+            // Skip if SSTable compression parameters match current ones
+            if (skipIfCompressionMatches &&
+                ((!sstable.compression && !metadata.params.compression.isEnabled()) ||
+                 (sstable.compression && metadata.params.compression.equals(sstable.getCompressionMetadata().parameters))))
+                return false;
+
+            return true;
+        }, jobs);
+    }
+
+    /**
+     * Perform SSTable rewrite
+
+     * @param sstableFilter sstables for which predicate returns {@link false} will be excluded
+     */
+    public AllSSTableOpStatus performSSTableRewrite(final ColumnFamilyStore cfs, Predicate<SSTableReader> sstableFilter, int jobs) throws InterruptedException, ExecutionException
     {
         return parallelAllSSTableOperation(cfs, new OneSSTableOperation()
         {
@@ -550,7 +583,7 @@
                 while (iter.hasNext())
                 {
                     SSTableReader sstable = iter.next();
-                    if (excludeCurrentVersion && sstable.descriptor.version.equals(sstable.descriptor.getFormat().getLatestVersion()))
+                    if (!sstableFilter.test(sstable))
                     {
                         transaction.cancel(sstable);
                         iter.remove();
@@ -741,12 +774,12 @@
     /**
      * Splits the given token ranges of the given sstables into a pending repair silo
      */
-    public ListenableFuture<?> submitPendingAntiCompaction(ColumnFamilyStore cfs,
-                                                           RangesAtEndpoint tokenRanges,
-                                                           Refs<SSTableReader> sstables,
-                                                           LifecycleTransaction txn,
-                                                           UUID sessionId,
-                                                           BooleanSupplier isCancelled)
+    public Future<Void> submitPendingAntiCompaction(ColumnFamilyStore cfs,
+                                                    RangesAtEndpoint tokenRanges,
+                                                    Refs<SSTableReader> sstables,
+                                                    LifecycleTransaction txn,
+                                                    TimeUUID sessionId,
+                                                    BooleanSupplier isCancelled)
     {
         Runnable runnable = new WrappedRunnable()
         {
@@ -759,7 +792,7 @@
             }
         };
 
-        ListenableFuture<?> task = null;
+        Future<Void> task = null;
         try
         {
             task = executor.submitIfRunning(runnable, "pending anticompaction");
@@ -784,7 +817,7 @@
                                                      Iterator<SSTableReader> sstableIterator,
                                                      Collection<Range<Token>> ranges,
                                                      LifecycleTransaction txn,
-                                                     UUID sessionID,
+                                                     TimeUUID sessionID,
                                                      boolean isTransient) throws IOException
     {
         if (ranges.isEmpty())
@@ -819,12 +852,20 @@
                                       RangesAtEndpoint replicas,
                                       Refs<SSTableReader> validatedForRepair,
                                       LifecycleTransaction txn,
-                                      UUID sessionID,
+                                      TimeUUID sessionID,
                                       BooleanSupplier isCancelled) throws IOException
     {
         try
         {
-            ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
+            ActiveRepairService.ParentRepairSession prs;
+            try
+            {
+                prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
+            }
+            catch (NoSuchRepairSessionException e)
+            {
+                throw new CompactionInterruptedException(e.getMessage());
+            }
             Preconditions.checkArgument(!prs.isPreview(), "Cannot anticompact for previews");
             Preconditions.checkArgument(!replicas.isEmpty(), "No ranges to anti-compact");
 
@@ -852,7 +893,7 @@
         logger.info("{} Completed anticompaction successfully", PreviewKind.NONE.logPrefix(sessionID));
     }
 
-    static void validateSSTableBoundsForAnticompaction(UUID sessionID,
+    static void validateSSTableBoundsForAnticompaction(TimeUUID sessionID,
                                                        Collection<SSTableReader> sstables,
                                                        RangesAtEndpoint ranges)
     {
@@ -874,7 +915,7 @@
     }
 
     @VisibleForTesting
-    static Set<SSTableReader> findSSTablesToAnticompact(Iterator<SSTableReader> sstableIterator, List<Range<Token>> normalizedRanges, UUID parentRepairSession)
+    static Set<SSTableReader> findSSTablesToAnticompact(Iterator<SSTableReader> sstableIterator, List<Range<Token>> normalizedRanges, TimeUUID parentRepairSession)
     {
         Set<SSTableReader> fullyContainedSSTables = new HashSet<>();
         while (sstableIterator.hasNext())
@@ -944,19 +985,10 @@
         return futures;
     }
 
-    /**
-     * Forces a major compaction of specified token ranges of the specified column family.
-     * <p>
-     * The token ranges will be interpreted as closed intervals to match the closed interval defined by the first and
-     * last keys of a sstable, even though the {@link Range} class is suppossed to be half-open by definition.
-     *
-     * @param cfStore The column family store to be compacted.
-     * @param ranges The token ranges to be compacted, interpreted as closed intervals.
-     */
-    public void forceCompactionForTokenRange(ColumnFamilyStore cfStore, Collection<Range<Token>> ranges)
+    public void forceCompaction(ColumnFamilyStore cfStore, Supplier<Collection<SSTableReader>> sstablesFn, com.google.common.base.Predicate<SSTableReader> sstablesPredicate)
     {
         Callable<CompactionTasks> taskCreator = () -> {
-            Collection<SSTableReader> sstables = sstablesInBounds(cfStore, ranges);
+            Collection<SSTableReader> sstables = sstablesFn.get();
             if (sstables == null || sstables.isEmpty())
             {
                 logger.debug("No sstables found for the provided token range");
@@ -966,7 +998,7 @@
         };
 
         try (CompactionTasks tasks = cfStore.runWithCompactionsDisabled(taskCreator,
-                                                                        (sstable) -> new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges),
+                                                                        sstablesPredicate,
                                                                         false,
                                                                         false,
                                                                         false))
@@ -989,6 +1021,22 @@
     }
 
     /**
+     * Forces a major compaction of specified token ranges of the specified column family.
+     * <p>
+     * The token ranges will be interpreted as closed intervals to match the closed interval defined by the first and
+     * last keys of a sstable, even though the {@link Range} class is suppossed to be half-open by definition.
+     *
+     * @param cfStore The column family store to be compacted.
+     * @param ranges The token ranges to be compacted, interpreted as closed intervals.
+     */
+    public void forceCompactionForTokenRange(ColumnFamilyStore cfStore, Collection<Range<Token>> ranges)
+    {
+        forceCompaction(cfStore,
+                        () -> sstablesInBounds(cfStore, ranges),
+                        sstable -> new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges));
+    }
+
+    /**
      * Returns the sstables of the specified column family store that intersect with the specified token ranges.
      * <p>
      * The token ranges will be interpreted as closed intervals to match the closed interval defined by the first and
@@ -1021,6 +1069,24 @@
         return sstables;
     }
 
+    public void forceCompactionForKey(ColumnFamilyStore cfStore, DecoratedKey key)
+    {
+        forceCompaction(cfStore, () -> sstablesWithKey(cfStore, key), sstable -> sstable.maybePresent(key));
+    }
+
+    private static Collection<SSTableReader> sstablesWithKey(ColumnFamilyStore cfs, DecoratedKey key)
+    {
+        final Set<SSTableReader> sstables = new HashSet<>();
+        Iterable<SSTableReader> liveTables = cfs.getTracker().getView().liveSSTablesInBounds(key.getToken().minKeyBound(),
+                                                                                             key.getToken().maxKeyBound());
+        for (SSTableReader sstable : liveTables)
+        {
+            if (sstable.maybePresent(key))
+                sstables.add(sstable);
+        }
+        return sstables.isEmpty() ? Collections.emptyList() : sstables;
+    }
+
     public void forceUserDefinedCompaction(String dataFiles)
     {
         String[] filenames = dataFiles.split(",");
@@ -1037,7 +1103,7 @@
             }
             // group by keyspace/columnfamily
             ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname);
-            descriptors.put(cfs, cfs.getDirectories().find(new File(filename.trim()).getName()));
+            descriptors.put(cfs, cfs.getDirectories().find(new File(filename.trim()).name()));
         }
 
         List<Future<?>> futures = new ArrayList<>(descriptors.size());
@@ -1063,7 +1129,7 @@
             }
             // group by keyspace/columnfamily
             ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname);
-            desc = cfs.getDirectories().find(new File(filename.trim()).getName());
+            desc = cfs.getDirectories().find(new File(filename.trim()).name());
             if (desc != null)
                 descriptors.put(cfs, desc);
         }
@@ -1168,7 +1234,7 @@
     /* Used in tests. */
     public void disableAutoCompaction()
     {
-        for (String ksname : Schema.instance.getNonSystemKeyspaces())
+        for (String ksname : Schema.instance.getNonSystemKeyspaces().names())
         {
             for (ColumnFamilyStore cfs : Keyspace.open(ksname).getColumnFamilyStores())
                 cfs.disableAutoCompaction();
@@ -1293,7 +1359,7 @@
             return;
         }
 
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         long totalkeysWritten = 0;
 
@@ -1317,7 +1383,7 @@
              ISSTableScanner scanner = cleanupStrategy.getScanner(sstable);
              CompactionController controller = new CompactionController(cfs, txn.originals(), getDefaultGcBefore(cfs, nowInSec));
              Refs<SSTableReader> refs = Refs.ref(Collections.singleton(sstable));
-             CompactionIterator ci = new CompactionIterator(OperationType.CLEANUP, Collections.singletonList(scanner), controller, nowInSec, UUIDGen.getTimeUUID(), active))
+             CompactionIterator ci = new CompactionIterator(OperationType.CLEANUP, Collections.singletonList(scanner), controller, nowInSec, nextTimeUUID(), active, null))
         {
             StatsMetadata metadata = sstable.getSSTableMetadata();
             writer.switchWriter(createWriter(cfs, compactionFileLocation, expectedBloomFilterSize, metadata.repairedAt, metadata.pendingRepair, metadata.isTransient, sstable, txn));
@@ -1351,7 +1417,7 @@
         if (!finished.isEmpty())
         {
             String format = "Cleaned up to %s.  %s to %s (~%d%% of original) for %,d keys.  Time: %,dms.";
-            long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+            long dTime = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
             long startsize = sstable.onDiskLength();
             long endsize = 0;
             for (SSTableReader newSstable : finished)
@@ -1481,7 +1547,7 @@
                                              File compactionFileLocation,
                                              long expectedBloomFilterSize,
                                              long repairedAt,
-                                             UUID pendingRepair,
+                                             TimeUUID pendingRepair,
                                              boolean isTransient,
                                              SSTableReader sstable,
                                              LifecycleTransaction txn)
@@ -1504,7 +1570,7 @@
                                                               File compactionFileLocation,
                                                               int expectedBloomFilterSize,
                                                               long repairedAt,
-                                                              UUID pendingRepair,
+                                                              TimeUUID pendingRepair,
                                                               boolean isTransient,
                                                               Collection<SSTableReader> sstables,
                                                               ILifecycleTransaction txn)
@@ -1551,7 +1617,7 @@
     private void doAntiCompaction(ColumnFamilyStore cfs,
                                   RangesAtEndpoint ranges,
                                   LifecycleTransaction txn,
-                                  UUID pendingRepair,
+                                  TimeUUID pendingRepair,
                                   BooleanSupplier isCancelled)
     {
         int originalCount = txn.originals().size();
@@ -1586,7 +1652,7 @@
     int antiCompactGroup(ColumnFamilyStore cfs,
                          RangesAtEndpoint ranges,
                          LifecycleTransaction txn,
-                         UUID pendingRepair,
+                         TimeUUID pendingRepair,
                          BooleanSupplier isCancelled)
     {
         Preconditions.checkArgument(!ranges.isEmpty(), "need at least one full or transient range");
@@ -1654,7 +1720,7 @@
 
              AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(txn.originals());
              CompactionController controller = new CompactionController(cfs, sstableAsSet, getDefaultGcBefore(cfs, nowInSec));
-             CompactionIterator ci = getAntiCompactionIterator(scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID(), active, isCancelled))
+             CompactionIterator ci = getAntiCompactionIterator(scanners.scanners, controller, nowInSec, nextTimeUUID(), active, isCancelled))
         {
             int expectedBloomFilterSize = Math.max(cfs.metadata().params.minIndexInterval, (int)(SSTableReader.getApproximateKeyCount(sstableAsSet)));
 
@@ -1737,10 +1803,10 @@
     }
 
     @VisibleForTesting
-    public static CompactionIterator getAntiCompactionIterator(List<ISSTableScanner> scanners, CompactionController controller, int nowInSec, UUID timeUUID, ActiveCompactionsTracker activeCompactions, BooleanSupplier isCancelled)
+    public static CompactionIterator getAntiCompactionIterator(List<ISSTableScanner> scanners, CompactionController controller, int nowInSec, TimeUUID timeUUID, ActiveCompactionsTracker activeCompactions, BooleanSupplier isCancelled)
     {
-        return new CompactionIterator(OperationType.ANTICOMPACTION, scanners, controller, nowInSec, timeUUID, activeCompactions) {
-
+        return new CompactionIterator(OperationType.ANTICOMPACTION, scanners, controller, nowInSec, timeUUID, activeCompactions, null)
+        {
             public boolean isStopRequested()
             {
                 return super.isStopRequested() || isCancelled.getAsBoolean();
@@ -1749,7 +1815,7 @@
     }
 
     @VisibleForTesting
-    ListenableFuture<?> submitIndexBuild(final SecondaryIndexBuilder builder, ActiveCompactionsTracker activeCompactions)
+    Future<?> submitIndexBuild(final SecondaryIndexBuilder builder, ActiveCompactionsTracker activeCompactions)
     {
         Runnable runnable = new Runnable()
         {
@@ -1773,7 +1839,7 @@
     /**
      * Is not scheduled, because it is performing disjoint work from sstable compaction.
      */
-    public ListenableFuture<?> submitIndexBuild(final SecondaryIndexBuilder builder)
+    public Future<?> submitIndexBuild(final SecondaryIndexBuilder builder)
     {
         return submitIndexBuild(builder, active);
     }
@@ -1842,13 +1908,13 @@
         return cfs.isIndex() ? nowInSec : cfs.gcBefore(nowInSec);
     }
 
-    public ListenableFuture<Long> submitViewBuilder(final ViewBuilderTask task)
+    public Future<Long> submitViewBuilder(final ViewBuilderTask task)
     {
         return submitViewBuilder(task, active);
     }
 
     @VisibleForTesting
-    ListenableFuture<Long> submitViewBuilder(final ViewBuilderTask task, ActiveCompactionsTracker activeCompactions)
+    Future<Long> submitViewBuilder(final ViewBuilderTask task, ActiveCompactionsTracker activeCompactions)
     {
         return viewBuildExecutor.submitIfRunning(() -> {
             activeCompactions.beginCompaction(task);
@@ -1868,63 +1934,38 @@
         return active.getCompactions().size();
     }
 
-    static class CompactionExecutor extends JMXEnabledThreadPoolExecutor
+    public static boolean isCompactor(Thread thread)
     {
-        protected CompactionExecutor(int minThreads, int maxThreads, String name, BlockingQueue<Runnable> queue)
-        {
-            super(minThreads, maxThreads, 60, TimeUnit.SECONDS, queue, new NamedThreadFactory(name, Thread.MIN_PRIORITY), "internal");
-        }
+        return thread.getThreadGroup().getParent() == compactionThreadGroup;
+    }
 
-        private CompactionExecutor(int threadCount, String name)
-        {
-            this(threadCount, threadCount, name, new LinkedBlockingQueue<Runnable>());
-        }
+    // TODO: this is a bit ugly, but no uglier than it was
+    static class CompactionExecutor extends WrappedExecutorPlus
+    {
+        static final ThreadGroup compactionThreadGroup = executorFactory().newThreadGroup("compaction");
 
         public CompactionExecutor()
         {
-            this(Math.max(1, DatabaseDescriptor.getConcurrentCompactors()), "CompactionExecutor");
+            this(executorFactory(), getConcurrentCompactors(), "CompactionExecutor", Integer.MAX_VALUE);
         }
 
-        protected void beforeExecute(Thread t, Runnable r)
+        public CompactionExecutor(int threads, String name, int queueSize)
         {
-            // can't set this in Thread factory, so we do it redundantly here
-            isCompactionManager.set(true);
-            super.beforeExecute(t, r);
+            this(executorFactory(), threads, name, queueSize);
         }
 
-        // modified from DebuggableThreadPoolExecutor so that CompactionInterruptedExceptions are not logged
-        @Override
-        public void afterExecute(Runnable r, Throwable t)
+        protected CompactionExecutor(ExecutorFactory executorFactory, int threads, String name, int queueSize)
         {
-            DebuggableThreadPoolExecutor.maybeResetLocalSessionWrapper(r);
-
-            if (t == null)
-                t = DebuggableThreadPoolExecutor.extractThrowable(r);
-
-            if (t != null)
-            {
-                if (t instanceof CompactionInterruptedException)
-                {
-                    logger.info(t.getMessage());
-                    if (t.getSuppressed() != null && t.getSuppressed().length > 0)
-                        logger.warn("Interruption of compaction encountered exceptions:", t);
-                    else
-                        logger.trace("Full interruption stack trace:", t);
-                }
-                else
-                {
-                    DebuggableThreadPoolExecutor.handleOrLog(t);
-                }
-            }
-
-            // Snapshots cannot be deleted on Windows while segments of the root element are mapped in NTFS. Compactions
-            // unmap those segments which could free up a snapshot for successful deletion.
-            SnapshotDeletingTask.rescheduleFailedTasks();
+            super(executorFactory
+                    .withJmxInternal()
+                    .configurePooled(name, threads)
+                    .withThreadGroup(compactionThreadGroup)
+                    .withQueueLimit(queueSize).build());
         }
 
-        public ListenableFuture<?> submitIfRunning(Runnable task, String name)
+        public Future<Void> submitIfRunning(Runnable task, String name)
         {
-            return submitIfRunning(Executors.callable(task, null), name);
+            return submitIfRunning(callable(name, task), name);
         }
 
         /**
@@ -1937,19 +1978,11 @@
          * @return the future that will deliver the task result, or a future that has already been
          *         cancelled if the task could not be submitted.
          */
-        public <T> ListenableFuture<T> submitIfRunning(Callable<T> task, String name)
+        public <T> Future<T> submitIfRunning(Callable<T> task, String name)
         {
-            if (isShutdown())
-            {
-                logger.info("Executor has been shut down, not submitting {}", name);
-                return Futures.immediateCancelledFuture();
-            }
-
             try
             {
-                ListenableFutureTask<T> ret = ListenableFutureTask.create(task);
-                execute(ret);
-                return ret;
+                return submit(task);
             }
             catch (RejectedExecutionException ex)
             {
@@ -1958,15 +1991,35 @@
                 else
                     logger.error("Failed to submit {}", name, ex);
 
-                return Futures.immediateCancelledFuture();
+                return ImmediateFuture.cancelled();
             }
         }
+
+        public void execute(Runnable command)
+        {
+            executor.execute(command);
+        }
+
+        public <T> Future<T> submit(Callable<T> task)
+        {
+            return executor.submit(task);
+        }
+
+        public <T> Future<T> submit(Runnable task, T result)
+        {
+            return submit(callable(task, result));
+        }
+
+        public Future<?> submit(Runnable task)
+        {
+            return submit(task, null);
+        }
     }
 
     // TODO: pull out relevant parts of CompactionExecutor and move to ValidationManager
     public static class ValidationExecutor extends CompactionExecutor
     {
-        // CompactionExecutor, and by extension ValidationExecutor, use DebuggableThreadPoolExecutor's
+        // CompactionExecutor, and by extension ValidationExecutor, use ExecutorPlus's
         // default RejectedExecutionHandler which blocks the submitting thread when the work queue is
         // full. The calling thread in this case is AntiEntropyStage, so in most cases we don't actually
         // want to block when the ValidationExecutor is saturated as this prevents progress on all
@@ -1981,11 +2034,8 @@
         public ValidationExecutor()
         {
             super(DatabaseDescriptor.getConcurrentValidations(),
-                  DatabaseDescriptor.getConcurrentValidations(),
                   "ValidationExecutor",
-                  new LinkedBlockingQueue());
-
-            allowCoreThreadTimeOut(true);
+                  Integer.MAX_VALUE);
         }
 
         public void adjustPoolSize()
@@ -1999,7 +2049,7 @@
     {
         public ViewBuildExecutor()
         {
-            super(DatabaseDescriptor.getConcurrentViewBuilders(), "ViewBuildExecutor");
+            super(DatabaseDescriptor.getConcurrentViewBuilders(), "ViewBuildExecutor", Integer.MAX_VALUE);
         }
     }
 
@@ -2007,7 +2057,7 @@
     {
         public CacheCleanupExecutor()
         {
-            super(1, "CacheCleanupExecutor");
+            super(1, "CacheCleanupExecutor", Integer.MAX_VALUE);
         }
     }
 
@@ -2091,8 +2141,8 @@
     {
         for (Holder holder : active.getCompactions())
         {
-            UUID holderId = holder.getCompactionInfo().getTaskId();
-            if (holderId != null && holderId.equals(UUID.fromString(compactionId)))
+            TimeUUID holderId = holder.getCompactionInfo().getTaskId();
+            if (holderId != null && holderId.equals(TimeUUID.fromString(compactionId)))
                 holder.stop();
         }
     }
@@ -2273,10 +2323,10 @@
 
     public void waitForCessation(Iterable<ColumnFamilyStore> cfss, Predicate<SSTableReader> sstablePredicate)
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         long delay = TimeUnit.MINUTES.toNanos(1);
 
-        while (System.nanoTime() - start < delay)
+        while (nanoTime() - start < delay)
         {
             if (CompactionManager.instance.isCompacting(cfss, sstablePredicate))
                 Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java
index 129ee79..cab0af0 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.UUID;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
@@ -29,7 +28,6 @@
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
-import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.index.Index;
@@ -40,6 +38,7 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class CompactionStrategyHolder extends AbstractStrategyHolder
 {
@@ -219,7 +218,7 @@
     public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor,
                                                        long keyCount,
                                                        long repairedAt,
-                                                       UUID pendingRepair,
+                                                       TimeUUID pendingRepair,
                                                        boolean isTransient,
                                                        MetadataCollector collector,
                                                        SerializationHeader header,
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
index 99e2ce9..5e1d561 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.db.compaction;
 
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -29,7 +28,6 @@
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
@@ -39,6 +37,7 @@
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Longs;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -74,6 +73,7 @@
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.db.compaction.AbstractStrategyHolder.GroupedSSTableContainer;
 
@@ -421,7 +421,7 @@
         return transientRepairs;
     }
 
-    public boolean hasDataForPendingRepair(UUID sessionID)
+    public boolean hasDataForPendingRepair(TimeUUID sessionID)
     {
         readLock.lock();
         try
@@ -587,6 +587,29 @@
         return null;
     }
 
+    public long[] getPerLevelSizeBytes()
+    {
+        readLock.lock();
+        try
+        {
+            if (repaired.first() instanceof LeveledCompactionStrategy)
+            {
+                long [] res = new long[LeveledGenerations.MAX_LEVEL_COUNT];
+                for (AbstractCompactionStrategy strategy : getAllStrategies())
+                {
+                    long[] repairedCountPerLevel = ((LeveledCompactionStrategy) strategy).getAllLevelSizeBytes();
+                    res = sumArrays(res, repairedCountPerLevel);
+                }
+                return res;
+            }
+            return null;
+        }
+        finally
+        {
+            readLock.unlock();
+        }
+    }
+
     static int[] sumArrays(int[] a, int[] b)
     {
         int[] res = new int[Math.max(a.length, b.length)];
@@ -602,6 +625,21 @@
         return res;
     }
 
+    static long[] sumArrays(long[] a, long[] b)
+    {
+        long[] res = new long[Math.max(a.length, b.length)];
+        for (int i = 0; i < res.length; i++)
+        {
+            if (i < a.length && i < b.length)
+                res[i] = a[i] + b[i];
+            else if (i < a.length)
+                res[i] = a[i];
+            else
+                res[i] = b[i];
+        }
+        return res;
+    }
+
     /**
      * Should only be called holding the readLock
      */
@@ -633,7 +671,7 @@
         throw new IllegalStateException("No holder claimed " + sstable);
     }
 
-    private AbstractStrategyHolder getHolder(long repairedAt, UUID pendingRepair, boolean isTransient)
+    private AbstractStrategyHolder getHolder(long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         return getHolder(repairedAt != ActiveRepairService.UNREPAIRED_SSTABLE,
                          pendingRepair != ActiveRepairService.NO_PENDING_REPAIR,
@@ -902,7 +940,7 @@
             boolean repaired = firstSSTable.isRepaired();
             int firstIndex = compactionStrategyIndexFor(firstSSTable);
             boolean isPending = firstSSTable.isPendingRepair();
-            UUID pendingRepair = firstSSTable.getSSTableMetadata().pendingRepair;
+            TimeUUID pendingRepair = firstSSTable.getSSTableMetadata().pendingRepair;
             for (SSTableReader sstable : input)
             {
                 if (sstable.isRepaired() != repaired)
@@ -1060,7 +1098,7 @@
     public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor,
                                                        long keyCount,
                                                        long repairedAt,
-                                                       UUID pendingRepair,
+                                                       TimeUUID pendingRepair,
                                                        boolean isTransient,
                                                        MetadataCollector collector,
                                                        SerializationHeader header,
@@ -1105,13 +1143,13 @@
                 {
                     int idx = holder.getStrategyIndex(strategy);
                     if (idx >= 0)
-                        return Collections.singletonList(locations[idx].location.getAbsolutePath());
+                        return Collections.singletonList(locations[idx].location.absolutePath());
                 }
             }
             List<String> folders = new ArrayList<>(locations.length);
             for (Directories.DataDirectory location : locations)
             {
-                folders.add(location.location.getAbsolutePath());
+                folders.add(location.location.absolutePath());
             }
             return folders;
         }
@@ -1145,7 +1183,7 @@
      * Mutates sstable repairedAt times and notifies listeners of the change with the writeLock held. Prevents races
      * with other processes between when the metadata is changed and when sstables are moved between strategies.
       */
-    public void mutateRepaired(Collection<SSTableReader> sstables, long repairedAt, UUID pendingRepair, boolean isTransient) throws IOException
+    public void mutateRepaired(Collection<SSTableReader> sstables, long repairedAt, TimeUUID pendingRepair, boolean isTransient) throws IOException
     {
         Set<SSTableReader> changed = new HashSet<>();
 
@@ -1174,7 +1212,7 @@
         }
     }
 
-    private static void verifyMetadata(SSTableReader sstable, long repairedAt, UUID pendingRepair, boolean isTransient)
+    private static void verifyMetadata(SSTableReader sstable, long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         if (!Objects.equals(pendingRepair, sstable.getPendingRepair()))
             throw new IllegalStateException(String.format("Failed setting pending repair to %s on %s (pending repair is %s)", pendingRepair, sstable, sstable.getPendingRepair()));
@@ -1184,7 +1222,7 @@
             throw new IllegalStateException(String.format("Failed setting isTransient to %b on %s (isTransient is %b)", isTransient, sstable, sstable.isTransient()));
     }
 
-    public CleanupSummary releaseRepairData(Collection<UUID> sessions)
+    public CleanupSummary releaseRepairData(Collection<TimeUUID> sessions)
     {
         List<CleanupTask> cleanupTasks = new ArrayList<>();
         readLock.lock();
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index 90abac3..5fc8031 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -17,12 +17,12 @@
  */
 package org.apache.cassandra.db.compaction;
 
+import java.time.Instant;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Predicate;
@@ -44,8 +44,13 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.FBUtilities.now;
+
 public class CompactionTask extends AbstractCompactionTask
 {
     protected static final Logger logger = LoggerFactory.getLogger(CompactionTask.class);
@@ -84,7 +89,7 @@
         {
             // Try again w/o the largest one.
             SSTableReader removedSSTable = cfs.getMaxSizeFile(nonExpiredSSTables);
-            logger.warn("insufficient space to compact all requested files. {}MB required, {} for compaction {} - removing largest SSTable: {}",
+            logger.warn("insufficient space to compact all requested files. {}MiB required, {} for compaction {} - removing largest SSTable: {}",
                         (float) expectedSize / 1024 / 1024,
                         StringUtils.join(transaction.originals(), ", "),
                         transaction.opId(),
@@ -116,7 +121,10 @@
         CompactionStrategyManager strategy = cfs.getCompactionStrategyManager();
 
         if (DatabaseDescriptor.isSnapshotBeforeCompaction())
-            cfs.snapshotWithoutFlush(System.currentTimeMillis() + "-compact-" + cfs.name);
+        {
+            Instant creationTime = now();
+            cfs.snapshotWithoutMemtable(creationTime.toEpochMilli() + "-compact-" + cfs.name, creationTime);
+        }
 
         try (CompactionController controller = getCompactionController(transaction.originals()))
         {
@@ -141,7 +149,7 @@
                 }
             });
 
-            UUID taskId = transaction.opId();
+            TimeUUID taskId = transaction.opId();
 
             // new sstables from flush can be added during a compaction, but only the compaction can remove them,
             // so in our single-threaded compaction world this is a valid way of determining if we're compacting
@@ -156,11 +164,12 @@
             logger.info("Compacting ({}) {}", taskId, ssTableLoggerMsg);
 
             RateLimiter limiter = CompactionManager.instance.getRateLimiter();
-            long start = System.nanoTime();
-            long startTime = System.currentTimeMillis();
+            long start = nanoTime();
+            long startTime = currentTimeMillis();
             long totalKeysWritten = 0;
             long estimatedKeys = 0;
             long inputSizeBytes;
+            long timeSpentWritingKeys;
 
             Set<SSTableReader> actuallyCompact = Sets.difference(transaction.originals(), fullyExpiredSSTables);
             Collection<SSTableReader> newSStables;
@@ -168,9 +177,6 @@
             long[] mergedRowCounts;
             long totalSourceCQLRows;
 
-            // SSTableScanners need to be closed before markCompactedSSTablesReplaced call as scanners contain references
-            // to both ifile and dfile and SSTR will throw deletion errors on Windows if it tries to delete before scanner is closed.
-            // See CASSANDRA-8019 and CASSANDRA-8399
             int nowInSec = FBUtilities.nowInSeconds();
             try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact);
                  AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact);
@@ -199,20 +205,20 @@
                         if (writer.append(ci.next()))
                             totalKeysWritten++;
 
-
                         long bytesScanned = scanners.getTotalBytesScanned();
 
-                        //Rate limit the scanners, and account for compression
+                        // Rate limit the scanners, and account for compression
                         CompactionManager.compactionRateLimiterAcquire(limiter, bytesScanned, lastBytesScanned, compressionRatio);
 
                         lastBytesScanned = bytesScanned;
 
-                        if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
+                        if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
                         {
                             controller.maybeRefreshOverlaps();
-                            lastCheckObsoletion = System.nanoTime();
+                            lastCheckObsoletion = nanoTime();
                         }
                     }
+                    timeSpentWritingKeys = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
 
                     // point of no return
                     newSStables = writer.finish();
@@ -229,7 +235,7 @@
                 return;
 
             // log a bunch of statistics about the result and save to system table compaction_history
-            long durationInNano = System.nanoTime() - start;
+            long durationInNano = nanoTime() - start;
             long dTime = TimeUnit.NANOSECONDS.toMillis(durationInNano);
             long startsize = inputSizeBytes;
             long endsize = SSTableReader.getTotalBytes(newSStables);
@@ -244,7 +250,7 @@
 
             String mergeSummary = updateCompactionHistory(cfs.keyspace.getName(), cfs.getTableName(), mergedRowCounts, startsize, endsize);
 
-            logger.info(String.format("Compacted (%s) %d sstables to [%s] to level=%d.  %s to %s (~%d%% of original) in %,dms.  Read Throughput = %s, Write Throughput = %s, Row Throughput = ~%,d/s.  %,d total partitions merged to %,d.  Partition merge counts were {%s}",
+            logger.info(String.format("Compacted (%s) %d sstables to [%s] to level=%d.  %s to %s (~%d%% of original) in %,dms.  Read Throughput = %s, Write Throughput = %s, Row Throughput = ~%,d/s.  %,d total partitions merged to %,d.  Partition merge counts were {%s}. Time spent writing keys = %,dms",
                                        taskId,
                                        transaction.originals().size(),
                                        newSSTableNames.toString(),
@@ -258,13 +264,14 @@
                                        (int) totalSourceCQLRows / (TimeUnit.NANOSECONDS.toSeconds(durationInNano) + 1),
                                        totalSourceRows,
                                        totalKeysWritten,
-                                       mergeSummary));
+                                       mergeSummary,
+                                       timeSpentWritingKeys));
             if (logger.isTraceEnabled())
             {
                 logger.trace("CF Total Bytes Compacted: {}", FBUtilities.prettyPrintMemory(CompactionTask.addToTotalBytesCompacted(endsize)));
                 logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", totalKeysWritten, estimatedKeys, ((double)(totalKeysWritten - estimatedKeys)/totalKeysWritten));
             }
-            cfs.getCompactionStrategyManager().compactionLogger.compaction(startTime, transaction.originals(), System.currentTimeMillis(), newSStables);
+            cfs.getCompactionStrategyManager().compactionLogger.compaction(startTime, transaction.originals(), currentTimeMillis(), newSStables);
 
             // update the metrics
             cfs.metric.compactionBytesWritten.inc(endsize);
@@ -294,7 +301,7 @@
             mergeSummary.append(String.format("%d:%d, ", rows, count));
             mergedRows.put(rows, count);
         }
-        SystemKeyspace.updateCompactionHistory(keyspaceName, columnFamilyName, System.currentTimeMillis(), startSize, endSize, mergedRows);
+        SystemKeyspace.updateCompactionHistory(keyspaceName, columnFamilyName, currentTimeMillis(), startSize, endSize, mergedRows);
         return mergeSummary.toString();
     }
 
@@ -313,13 +320,13 @@
         return minRepairedAt;
     }
 
-    public static UUID getPendingRepair(Set<SSTableReader> sstables)
+    public static TimeUUID getPendingRepair(Set<SSTableReader> sstables)
     {
         if (sstables.isEmpty())
         {
             return ActiveRepairService.NO_PENDING_REPAIR;
         }
-        Set<UUID> ids = new HashSet<>();
+        Set<TimeUUID> ids = new HashSet<>();
         for (SSTableReader sstable: sstables)
             ids.add(sstable.getSSTableMetadata().pendingRepair);
 
@@ -395,7 +402,7 @@
             }
 
             sstablesRemoved++;
-            logger.warn("Not enough space for compaction, {}MB estimated.  Reducing scope.",
+            logger.warn("Not enough space for compaction, {}MiB estimated.  Reducing scope.",
                         (float) expectedWriteSize / 1024 / 1024);
         }
 
diff --git a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
index a94bcfa..1e18bf9 100644
--- a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
@@ -38,6 +38,7 @@
 import org.apache.cassandra.utils.Pair;
 
 import static com.google.common.collect.Iterables.filter;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * @deprecated in favour of {@link TimeWindowCompactionStrategy}
@@ -116,11 +117,11 @@
 
         Set<SSTableReader> expired = Collections.emptySet();
         // we only check for expired sstables every 10 minutes (by default) due to it being an expensive operation
-        if (System.currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
+        if (currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
         {
             // Find fully expired SSTables. Those will be included no matter what.
             expired = CompactionController.getFullyExpiredSSTables(cfs, uncompacting, cfs.getOverlappingLiveSSTables(uncompacting), gcBefore);
-            lastExpiredCheck = System.currentTimeMillis();
+            lastExpiredCheck = currentTimeMillis();
         }
         Set<SSTableReader> candidates = Sets.newHashSet(filterSuspectSSTables(uncompacting));
 
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
index dd7c9df..54953e4 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
@@ -54,7 +54,7 @@
 
     @VisibleForTesting
     final LeveledManifest manifest;
-    private final int maxSSTableSizeInMB;
+    private final int maxSSTableSizeInMiB;
     private final int levelFanoutSize;
     private final boolean singleSSTableUplevel;
 
@@ -91,11 +91,11 @@
                 configuredSingleSSTableUplevel = Boolean.parseBoolean(options.get(SINGLE_SSTABLE_UPLEVEL_OPTION));
             }
         }
-        maxSSTableSizeInMB = configuredMaxSSTableSize;
+        maxSSTableSizeInMiB = configuredMaxSSTableSize;
         levelFanoutSize = configuredLevelFanoutSize;
         singleSSTableUplevel = configuredSingleSSTableUplevel;
 
-        manifest = new LeveledManifest(cfs, this.maxSSTableSizeInMB, this.levelFanoutSize, localOptions);
+        manifest = new LeveledManifest(cfs, this.maxSSTableSizeInMiB, this.levelFanoutSize, localOptions);
         logger.trace("Created {}", manifest);
     }
 
@@ -109,6 +109,11 @@
         return manifest.getAllLevelSize();
     }
 
+    public long[] getAllLevelSizeBytes()
+    {
+        return manifest.getAllLevelSizeBytes();
+    }
+
     @Override
     public void startup()
     {
@@ -277,7 +282,7 @@
 
     public long getMaxSSTableBytes()
     {
-        return maxSSTableSizeInMB * 1024L * 1024L;
+        return maxSSTableSizeInMiB * 1024L * 1024L;
     }
 
     public int getLevelFanoutSize()
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java
index 64027f2..36b5600 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java
@@ -33,15 +33,17 @@
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.PeekingIterator;
-import com.google.common.primitives.Ints;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.Config;
+import org.apache.cassandra.io.sstable.SSTableIdFactory;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Handles the leveled manifest generations
  *
@@ -68,14 +70,14 @@
      */
     private final Map<SSTableReader, SSTableReader> allSSTables = new HashMap<>();
     private final Set<SSTableReader> l0 = new HashSet<>();
-    private static long lastOverlapCheck = System.nanoTime();
+    private static long lastOverlapCheck = nanoTime();
     // note that since l0 is broken out, levels[0] represents L1:
     private final TreeSet<SSTableReader> [] levels = new TreeSet[MAX_LEVEL_COUNT - 1];
 
     private static final Comparator<SSTableReader> nonL0Comparator = (o1, o2) -> {
         int cmp = SSTableReader.sstableComparator.compare(o1, o2);
         if (cmp == 0)
-            cmp = Ints.compare(o1.descriptor.generation, o2.descriptor.generation);
+            cmp = SSTableIdFactory.COMPARATOR.compare(o1.descriptor.id, o2.descriptor.id);
         return cmp;
     };
 
@@ -226,6 +228,14 @@
         return counts;
     }
 
+    long[] getAllLevelSizeBytes()
+    {
+        long[] sums = new long[levelCount()];
+        for (int i = 0; i < sums.length; i++)
+            sums[i] = get(i).stream().map(SSTableReader::onDiskLength).reduce(0L, Long::sum);
+        return sums;
+    }
+
     Set<SSTableReader> allSSTables()
     {
         ImmutableSet.Builder<SSTableReader> builder = ImmutableSet.builder();
@@ -302,10 +312,10 @@
      */
     private void maybeVerifyLevels()
     {
-        if (!strictLCSChecksTest || System.nanoTime() - lastOverlapCheck <= TimeUnit.NANOSECONDS.convert(5, TimeUnit.SECONDS))
+        if (!strictLCSChecksTest || nanoTime() - lastOverlapCheck <= TimeUnit.NANOSECONDS.convert(5, TimeUnit.SECONDS))
             return;
         logger.info("LCS verifying levels");
-        lastOverlapCheck = System.nanoTime();
+        lastOverlapCheck = nanoTime();
         for (int i = 1; i < levelCount(); i++)
         {
             SSTableReader prev = null;
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
index 7c865c7..2972d7d 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
@@ -161,7 +161,7 @@
         {
             builder.append(sstable.descriptor.cfname)
                    .append('-')
-                   .append(sstable.descriptor.generation)
+                   .append(sstable.descriptor.id)
                    .append("(L")
                    .append(sstable.getSSTableLevel())
                    .append("), ");
@@ -395,6 +395,11 @@
         return generations.getAllLevelSize();
     }
 
+    public synchronized long[] getAllLevelSizeBytes()
+    {
+        return generations.getAllLevelSizeBytes();
+    }
+
     @VisibleForTesting
     public synchronized int remove(SSTableReader reader)
     {
diff --git a/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java b/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java
index 03d4111..314df9e 100644
--- a/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java
+++ b/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java
@@ -22,7 +22,6 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
@@ -30,7 +29,6 @@
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
-import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.index.Index;
@@ -41,6 +39,7 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class PendingRepairHolder extends AbstractStrategyHolder
 {
@@ -94,7 +93,7 @@
         return Iterables.concat(Iterables.transform(managers, PendingRepairManager::getStrategies));
     }
 
-    Iterable<AbstractCompactionStrategy> getStrategiesFor(UUID session)
+    Iterable<AbstractCompactionStrategy> getStrategiesFor(TimeUUID session)
     {
         List<AbstractCompactionStrategy> strategies = new ArrayList<>(managers.size());
         for (PendingRepairManager manager : managers)
@@ -237,7 +236,7 @@
     public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor,
                                                        long keyCount,
                                                        long repairedAt,
-                                                       UUID pendingRepair,
+                                                       TimeUUID pendingRepair,
                                                        boolean isTransient,
                                                        MetadataCollector collector,
                                                        SerializationHeader header,
@@ -272,7 +271,7 @@
         return -1;
     }
 
-    public boolean hasDataForSession(UUID sessionID)
+    public boolean hasDataForSession(TimeUUID sessionID)
     {
         return Iterables.any(managers, prm -> prm.hasDataForSession(sessionID));
     }
diff --git a/src/java/org/apache/cassandra/db/compaction/PendingRepairManager.java b/src/java/org/apache/cassandra/db/compaction/PendingRepairManager.java
index aefa40b..11d6fe8 100644
--- a/src/java/org/apache/cassandra/db/compaction/PendingRepairManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/PendingRepairManager.java
@@ -26,7 +26,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -50,6 +49,7 @@
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Companion to CompactionStrategyManager which manages the sstables marked pending repair.
@@ -66,7 +66,7 @@
     private final ColumnFamilyStore cfs;
     private final CompactionParams params;
     private final boolean isTransient;
-    private volatile ImmutableMap<UUID, AbstractCompactionStrategy> strategies = ImmutableMap.of();
+    private volatile ImmutableMap<TimeUUID, AbstractCompactionStrategy> strategies = ImmutableMap.of();
 
     /**
      * Indicates we're being asked to do something with an sstable that isn't marked pending repair
@@ -86,12 +86,12 @@
         this.isTransient = isTransient;
     }
 
-    private ImmutableMap.Builder<UUID, AbstractCompactionStrategy> mapBuilder()
+    private ImmutableMap.Builder<TimeUUID, AbstractCompactionStrategy> mapBuilder()
     {
         return ImmutableMap.builder();
     }
 
-    AbstractCompactionStrategy get(UUID id)
+    AbstractCompactionStrategy get(TimeUUID id)
     {
         return strategies.get(id);
     }
@@ -102,7 +102,7 @@
         return get(sstable.getSSTableMetadata().pendingRepair);
     }
 
-    AbstractCompactionStrategy getOrCreate(UUID id)
+    AbstractCompactionStrategy getOrCreate(TimeUUID id)
     {
         checkPendingID(id);
         assert id != null;
@@ -124,7 +124,7 @@
         return strategy;
     }
 
-    private static void checkPendingID(UUID pendingID)
+    private static void checkPendingID(TimeUUID pendingID)
     {
         if (pendingID == null)
         {
@@ -137,7 +137,7 @@
         return getOrCreate(sstable.getSSTableMetadata().pendingRepair);
     }
 
-    private synchronized void removeSessionIfEmpty(UUID sessionID)
+    private synchronized void removeSessionIfEmpty(TimeUUID sessionID)
     {
         if (!strategies.containsKey(sessionID) || !strategies.get(sessionID).getSSTables().isEmpty())
             return;
@@ -148,7 +148,7 @@
 
     synchronized void removeSSTable(SSTableReader sstable)
     {
-        for (Map.Entry<UUID, AbstractCompactionStrategy> entry : strategies.entrySet())
+        for (Map.Entry<TimeUUID, AbstractCompactionStrategy> entry : strategies.entrySet())
         {
             entry.getValue().removeSSTable(sstable);
             removeSessionIfEmpty(entry.getKey());
@@ -180,10 +180,10 @@
             return;
 
         // left=removed, right=added
-        Map<UUID, Pair<Set<SSTableReader>, Set<SSTableReader>>> groups = new HashMap<>();
+        Map<TimeUUID, Pair<Set<SSTableReader>, Set<SSTableReader>>> groups = new HashMap<>();
         for (SSTableReader sstable : removed)
         {
-            UUID sessionID = sstable.getSSTableMetadata().pendingRepair;
+            TimeUUID sessionID = sstable.getSSTableMetadata().pendingRepair;
             if (!groups.containsKey(sessionID))
             {
                 groups.put(sessionID, Pair.create(new HashSet<>(), new HashSet<>()));
@@ -193,7 +193,7 @@
 
         for (SSTableReader sstable : added)
         {
-            UUID sessionID = sstable.getSSTableMetadata().pendingRepair;
+            TimeUUID sessionID = sstable.getSSTableMetadata().pendingRepair;
             if (!groups.containsKey(sessionID))
             {
                 groups.put(sessionID, Pair.create(new HashSet<>(), new HashSet<>()));
@@ -201,7 +201,7 @@
             groups.get(sessionID).right.add(sstable);
         }
 
-        for (Map.Entry<UUID, Pair<Set<SSTableReader>, Set<SSTableReader>>> entry : groups.entrySet())
+        for (Map.Entry<TimeUUID, Pair<Set<SSTableReader>, Set<SSTableReader>>> entry : groups.entrySet())
         {
             AbstractCompactionStrategy strategy = getOrCreate(entry.getKey());
             Set<SSTableReader> groupRemoved = entry.getValue().left;
@@ -226,7 +226,7 @@
         strategies.values().forEach(AbstractCompactionStrategy::shutdown);
     }
 
-    private int getEstimatedRemainingTasks(UUID sessionID, AbstractCompactionStrategy strategy)
+    private int getEstimatedRemainingTasks(TimeUUID sessionID, AbstractCompactionStrategy strategy)
     {
         if (canCleanup(sessionID))
         {
@@ -241,7 +241,7 @@
     int getEstimatedRemainingTasks()
     {
         int tasks = 0;
-        for (Map.Entry<UUID, AbstractCompactionStrategy> entry : strategies.entrySet())
+        for (Map.Entry<TimeUUID, AbstractCompactionStrategy> entry : strategies.entrySet())
         {
             tasks += getEstimatedRemainingTasks(entry.getKey(), entry.getValue());
         }
@@ -254,7 +254,7 @@
     int getMaxEstimatedRemainingTasks()
     {
         int tasks = 0;
-        for (Map.Entry<UUID, AbstractCompactionStrategy> entry : strategies.entrySet())
+        for (Map.Entry<TimeUUID, AbstractCompactionStrategy> entry : strategies.entrySet())
         {
             tasks = Math.max(tasks, getEstimatedRemainingTasks(entry.getKey(), entry.getValue()));
         }
@@ -262,7 +262,7 @@
     }
 
     @SuppressWarnings("resource")
-    private RepairFinishedCompactionTask getRepairFinishedCompactionTask(UUID sessionID)
+    private RepairFinishedCompactionTask getRepairFinishedCompactionTask(TimeUUID sessionID)
     {
         Preconditions.checkState(canCleanup(sessionID));
         AbstractCompactionStrategy compactionStrategy = get(sessionID);
@@ -277,9 +277,9 @@
     public static class CleanupTask
     {
         private final ColumnFamilyStore cfs;
-        private final List<Pair<UUID, RepairFinishedCompactionTask>> tasks;
+        private final List<Pair<TimeUUID, RepairFinishedCompactionTask>> tasks;
 
-        public CleanupTask(ColumnFamilyStore cfs, List<Pair<UUID, RepairFinishedCompactionTask>> tasks)
+        public CleanupTask(ColumnFamilyStore cfs, List<Pair<TimeUUID, RepairFinishedCompactionTask>> tasks)
         {
             this.cfs = cfs;
             this.tasks = tasks;
@@ -287,11 +287,11 @@
 
         public CleanupSummary cleanup()
         {
-            Set<UUID> successful = new HashSet<>();
-            Set<UUID> unsuccessful = new HashSet<>();
-            for (Pair<UUID, RepairFinishedCompactionTask> pair : tasks)
+            Set<TimeUUID> successful = new HashSet<>();
+            Set<TimeUUID> unsuccessful = new HashSet<>();
+            for (Pair<TimeUUID, RepairFinishedCompactionTask> pair : tasks)
             {
-                UUID session = pair.left;
+                TimeUUID session = pair.left;
                 RepairFinishedCompactionTask task = pair.right;
 
                 if (task != null)
@@ -318,16 +318,16 @@
 
         public Throwable abort(Throwable accumulate)
         {
-            for (Pair<UUID, RepairFinishedCompactionTask> pair : tasks)
+            for (Pair<TimeUUID, RepairFinishedCompactionTask> pair : tasks)
                 accumulate = pair.right.transaction.abort(accumulate);
             return accumulate;
         }
     }
 
-    public CleanupTask releaseSessionData(Collection<UUID> sessionIDs)
+    public CleanupTask releaseSessionData(Collection<TimeUUID> sessionIDs)
     {
-        List<Pair<UUID, RepairFinishedCompactionTask>> tasks = new ArrayList<>(sessionIDs.size());
-        for (UUID session : sessionIDs)
+        List<Pair<TimeUUID, RepairFinishedCompactionTask>> tasks = new ArrayList<>(sessionIDs.size());
+        for (TimeUUID session : sessionIDs)
         {
             if (hasDataForSession(session))
             {
@@ -340,7 +340,7 @@
     synchronized int getNumPendingRepairFinishedTasks()
     {
         int count = 0;
-        for (UUID sessionID : strategies.keySet())
+        for (TimeUUID sessionID : strategies.keySet())
         {
             if (canCleanup(sessionID))
             {
@@ -352,7 +352,7 @@
 
     synchronized AbstractCompactionTask getNextRepairFinishedTask()
     {
-        for (UUID sessionID : strategies.keySet())
+        for (TimeUUID sessionID : strategies.keySet())
         {
             if (canCleanup(sessionID))
             {
@@ -367,9 +367,9 @@
         if (strategies.isEmpty())
             return null;
 
-        Map<UUID, Integer> numTasks = new HashMap<>(strategies.size());
-        ArrayList<UUID> sessions = new ArrayList<>(strategies.size());
-        for (Map.Entry<UUID, AbstractCompactionStrategy> entry : strategies.entrySet())
+        Map<TimeUUID, Integer> numTasks = new HashMap<>(strategies.size());
+        ArrayList<TimeUUID> sessions = new ArrayList<>(strategies.size());
+        for (Map.Entry<TimeUUID, AbstractCompactionStrategy> entry : strategies.entrySet())
         {
             if (canCleanup(entry.getKey()))
             {
@@ -385,7 +385,7 @@
         // we want the session with the most compactions at the head of the list
         sessions.sort((o1, o2) -> numTasks.get(o2) - numTasks.get(o1));
 
-        UUID sessionID = sessions.get(0);
+        TimeUUID sessionID = sessions.get(0);
         return get(sessionID).getNextBackgroundTask(gcBefore);
     }
 
@@ -395,7 +395,7 @@
             return null;
 
         List<AbstractCompactionTask> maximalTasks = new ArrayList<>(strategies.size());
-        for (Map.Entry<UUID, AbstractCompactionStrategy> entry : strategies.entrySet())
+        for (Map.Entry<TimeUUID, AbstractCompactionStrategy> entry : strategies.entrySet())
         {
             if (canCleanup(entry.getKey()))
             {
@@ -416,12 +416,12 @@
         return strategies.values();
     }
 
-    Set<UUID> getSessions()
+    Set<TimeUUID> getSessions()
     {
         return strategies.keySet();
     }
 
-    boolean canCleanup(UUID sessionID)
+    boolean canCleanup(TimeUUID sessionID)
     {
         return !ActiveRepairService.instance.consistent.local.isSessionInProgress(sessionID);
     }
@@ -434,10 +434,10 @@
             return Collections.emptySet();
         }
 
-        Map<UUID, Set<SSTableReader>> sessionSSTables = new HashMap<>();
+        Map<TimeUUID, Set<SSTableReader>> sessionSSTables = new HashMap<>();
         for (SSTableReader sstable : sstables)
         {
-            UUID sessionID = sstable.getSSTableMetadata().pendingRepair;
+            TimeUUID sessionID = sstable.getSSTableMetadata().pendingRepair;
             checkPendingID(sessionID);
             sessionSSTables.computeIfAbsent(sessionID, k -> new HashSet<>()).add(sstable);
         }
@@ -445,7 +445,7 @@
         Set<ISSTableScanner> scanners = new HashSet<>(sessionSSTables.size());
         try
         {
-            for (Map.Entry<UUID, Set<SSTableReader>> entry : sessionSSTables.entrySet())
+            for (Map.Entry<TimeUUID, Set<SSTableReader>> entry : sessionSSTables.entrySet())
             {
                 scanners.addAll(getOrCreate(entry.getKey()).getScanners(entry.getValue(), ranges).scanners);
             }
@@ -462,7 +462,7 @@
         return strategies.values().contains(strategy);
     }
 
-    public synchronized boolean hasDataForSession(UUID sessionID)
+    public synchronized boolean hasDataForSession(TimeUUID sessionID)
     {
         return strategies.keySet().contains(sessionID);
     }
@@ -478,7 +478,7 @@
 
     public Collection<AbstractCompactionTask> createUserDefinedTasks(Collection<SSTableReader> sstables, int gcBefore)
     {
-        Map<UUID, List<SSTableReader>> group = sstables.stream().collect(Collectors.groupingBy(s -> s.getSSTableMetadata().pendingRepair));
+        Map<TimeUUID, List<SSTableReader>> group = sstables.stream().collect(Collectors.groupingBy(s -> s.getSSTableMetadata().pendingRepair));
         return group.entrySet().stream().map(g -> strategies.get(g.getKey()).getUserDefinedTask(g.getValue(), gcBefore)).collect(Collectors.toList());
     }
 
@@ -487,10 +487,10 @@
      */
     class RepairFinishedCompactionTask extends AbstractCompactionTask
     {
-        private final UUID sessionID;
+        private final TimeUUID sessionID;
         private final long repairedAt;
 
-        RepairFinishedCompactionTask(ColumnFamilyStore cfs, LifecycleTransaction transaction, UUID sessionID, long repairedAt)
+        RepairFinishedCompactionTask(ColumnFamilyStore cfs, LifecycleTransaction transaction, TimeUUID sessionID, long repairedAt)
         {
             super(cfs, transaction);
             this.sessionID = sessionID;
@@ -498,7 +498,7 @@
         }
 
         @VisibleForTesting
-        UUID getSessionID()
+        TimeUUID getSessionID()
         {
             return sessionID;
         }
diff --git a/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java b/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
index 1746d7c..6f68c34 100644
--- a/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
+++ b/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
@@ -19,7 +19,6 @@
 
 import java.util.*;
 import java.util.function.LongPredicate;
-import java.util.function.Predicate;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter;
@@ -43,12 +42,12 @@
 
     public static class SplittingCompactionTask extends CompactionTask
     {
-        private final int sstableSizeInMB;
+        private final int sstableSizeInMiB;
 
         public SplittingCompactionTask(ColumnFamilyStore cfs, LifecycleTransaction transaction, int sstableSizeInMB)
         {
             super(cfs, transaction, CompactionManager.NO_GC, false);
-            this.sstableSizeInMB = sstableSizeInMB;
+            this.sstableSizeInMiB = sstableSizeInMB;
 
             if (sstableSizeInMB <= 0)
                 throw new IllegalArgumentException("Invalid target size for SSTables, must be > 0 (got: " + sstableSizeInMB + ")");
@@ -66,7 +65,7 @@
                                                               LifecycleTransaction txn,
                                                               Set<SSTableReader> nonExpiredSSTables)
         {
-            return new MaxSSTableSizeWriter(cfs, directories, txn, nonExpiredSSTables, sstableSizeInMB * 1024L * 1024L, 0, false);
+            return new MaxSSTableSizeWriter(cfs, directories, txn, nonExpiredSSTables, sstableSizeInMiB * 1024L * 1024L, 0, false);
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index 4ca2d85..56825d0 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -17,8 +17,10 @@
  */
 package org.apache.cassandra.db.compaction;
 
+import java.io.IOError;
+import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.io.*;
+
 import java.util.*;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -28,6 +30,7 @@
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableSet;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
@@ -44,6 +47,8 @@
 import org.apache.cassandra.utils.concurrent.Refs;
 import org.apache.cassandra.utils.memory.HeapCloner;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class Scrubber implements Closeable
 {
     private final ColumnFamilyStore cfs;
@@ -490,7 +495,7 @@
     {
         private final RandomAccessReader dataFile;
         private final SSTableReader sstable;
-        private final UUID scrubCompactionId;
+        private final TimeUUID scrubCompactionId;
         private final Lock fileReadLock;
 
         public ScrubInfo(RandomAccessReader dataFile, SSTableReader sstable, Lock fileReadLock)
@@ -498,7 +503,7 @@
             this.dataFile = dataFile;
             this.sstable = sstable;
             this.fileReadLock = fileReadLock;
-            scrubCompactionId = UUIDGen.getTimeUUID();
+            scrubCompactionId = nextTimeUUID();
         }
 
         public CompactionInfo getCompactionInfo()
diff --git a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java
index 288af2b..eb1d8f9 100644
--- a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java
+++ b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java
@@ -101,4 +101,4 @@
         return String.format("Min sstable size: %d, bucket low: %f, bucket high: %f", minSSTableSize, bucketLow, bucketHigh);
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
index 4b499ce..555d86a 100644
--- a/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
@@ -45,6 +45,7 @@
 import org.apache.cassandra.utils.Pair;
 
 import static com.google.common.collect.Iterables.filter;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class TimeWindowCompactionStrategy extends AbstractCompactionStrategy
 {
@@ -117,12 +118,12 @@
         // Find fully expired SSTables. Those will be included no matter what.
         Set<SSTableReader> expired = Collections.emptySet();
 
-        if (System.currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
+        if (currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
         {
             logger.debug("TWCS expired check sufficiently far in the past, checking for fully expired SSTables");
             expired = CompactionController.getFullyExpiredSSTables(cfs, uncompacting, options.ignoreOverlaps ? Collections.emptySet() : cfs.getOverlappingLiveSSTables(uncompacting),
                                                                    gcBefore, options.ignoreOverlaps);
-            lastExpiredCheck = System.currentTimeMillis();
+            lastExpiredCheck = currentTimeMillis();
         }
         else
         {
diff --git a/src/java/org/apache/cassandra/db/compaction/Upgrader.java b/src/java/org/apache/cassandra/db/compaction/Upgrader.java
index e1406aa..87bf5b8 100644
--- a/src/java/org/apache/cassandra/db/compaction/Upgrader.java
+++ b/src/java/org/apache/cassandra/db/compaction/Upgrader.java
@@ -17,10 +17,8 @@
  */
 package org.apache.cassandra.db.compaction;
 
-import java.io.File;
 import java.util.*;
 import java.util.function.LongPredicate;
-import java.util.function.Predicate;
 
 import com.google.common.base.Throwables;
 import com.google.common.collect.Sets;
@@ -34,9 +32,11 @@
 import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.OutputHandler;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class Upgrader
 {
@@ -58,7 +58,7 @@
         this.sstable = txn.onlyOne();
         this.outputHandler = outputHandler;
 
-        this.directory = new File(sstable.getFilename()).getParentFile();
+        this.directory = new File(sstable.getFilename()).parent();
 
         this.controller = new UpgradeController(cfs);
 
@@ -90,7 +90,7 @@
         int nowInSec = FBUtilities.nowInSeconds();
         try (SSTableRewriter writer = SSTableRewriter.construct(cfs, transaction, keepOriginals, CompactionTask.getMaxDataAge(transaction.originals()));
              AbstractCompactionStrategy.ScannerList scanners = strategyManager.getScanners(transaction.originals());
-             CompactionIterator iter = new CompactionIterator(transaction.opType(), scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
+             CompactionIterator iter = new CompactionIterator(transaction.opType(), scanners.scanners, controller, nowInSec, nextTimeUUID()))
         {
             writer.switchWriter(createCompactionWriter(sstable.getSSTableMetadata()));
             while (iter.hasNext())
diff --git a/src/java/org/apache/cassandra/db/compaction/Verifier.java b/src/java/org/apache/cassandra/db/compaction/Verifier.java
index 30e74ad..29eb299 100644
--- a/src/java/org/apache/cassandra/db/compaction/Verifier.java
+++ b/src/java/org/apache/cassandra/db/compaction/Verifier.java
@@ -38,6 +38,7 @@
 import org.apache.cassandra.io.sstable.metadata.ValidationMetadata;
 import org.apache.cassandra.io.util.DataIntegrityMetadata;
 import org.apache.cassandra.io.util.DataIntegrityMetadata.FileDigestValidator;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.schema.TableMetadata;
@@ -48,18 +49,14 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.IFilter;
 import org.apache.cassandra.utils.OutputHandler;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
-import java.io.BufferedInputStream;
 import java.io.Closeable;
 import java.io.DataInputStream;
-import java.io.File;
 import java.io.IOError;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.*;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -67,6 +64,10 @@
 import java.util.function.Function;
 import java.util.function.LongPredicate;
 
+import org.apache.cassandra.io.util.File;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class Verifier implements Closeable
 {
     private final ColumnFamilyStore cfs;
@@ -444,10 +445,10 @@
 
     private void deserializeBloomFilter(SSTableReader sstable) throws IOException
     {
-        Path bfPath = Paths.get(sstable.descriptor.filenameFor(Component.FILTER));
-        if (Files.exists(bfPath))
+        File bfPath = new File(sstable.descriptor.filenameFor(Component.FILTER));
+        if (bfPath.exists())
         {
-            try (DataInputStream stream = new DataInputStream(new BufferedInputStream(Files.newInputStream(bfPath)));
+            try (FileInputStreamPlus stream = bfPath.newInputStream();
                  IFilter bf = BloomFilterSerializer.deserialize(stream, sstable.descriptor.version.hasOldBfFormat()))
             {
             }
@@ -509,7 +510,7 @@
     {
         private final RandomAccessReader dataFile;
         private final SSTableReader sstable;
-        private final UUID verificationCompactionId;
+        private final TimeUUID verificationCompactionId;
         private final Lock fileReadLock;
 
         public VerifyInfo(RandomAccessReader dataFile, SSTableReader sstable, Lock fileReadLock)
@@ -517,7 +518,7 @@
             this.dataFile = dataFile;
             this.sstable = sstable;
             this.fileReadLock = fileReadLock;
-            verificationCompactionId = UUIDGen.getTimeUUID();
+            verificationCompactionId = nextTimeUUID();
         }
 
         public CompactionInfo getCompactionInfo()
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java
index 3760ab1..0f4fe2d 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java
@@ -18,11 +18,9 @@
 
 package org.apache.cassandra.db.compaction.writers;
 
-import java.io.File;
 import java.util.Collection;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,9 +37,8 @@
 import org.apache.cassandra.io.sstable.SSTableRewriter;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Transactional;
-import org.apache.cassandra.db.compaction.OperationType;
-
 
 /**
  * Class that abstracts away the actual writing of files to make it possible to use CompactionTask for more
@@ -57,7 +54,7 @@
     protected final long estimatedTotalKeys;
     protected final long maxAge;
     protected final long minRepairedAt;
-    protected final UUID pendingRepair;
+    protected final TimeUUID pendingRepair;
     protected final boolean isTransient;
 
     protected final SSTableRewriter sstableWriter;
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
index 6508fa9..264e19c 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
@@ -37,7 +37,7 @@
  * CompactionAwareWriter that splits input in differently sized sstables
  *
  * Biggest sstable will be total_compaction_size / 2, second biggest total_compaction_size / 4 etc until
- * the result would be sub 50MB, all those are put in the same
+ * the result would be sub 50MiB, all those are put in the same
  */
 public class SplittingSizeTieredCompactionWriter extends CompactionAwareWriter
 {
@@ -70,7 +70,7 @@
         }
 
         int noPointIndex = 0;
-        // find how many sstables we should create - 50MB min sstable size
+        // find how many sstables we should create - 50MiB min sstable size
         for (double ratio : potentialRatios)
         {
             noPointIndex++;
diff --git a/src/java/org/apache/cassandra/db/context/CounterContext.java b/src/java/org/apache/cassandra/db/context/CounterContext.java
index 7db8192..37bd3ca 100644
--- a/src/java/org/apache/cassandra/db/context/CounterContext.java
+++ b/src/java/org/apache/cassandra/db/context/CounterContext.java
@@ -465,7 +465,7 @@
                 if (leftClock == rightClock)
                 {
                     // Can happen if an sstable gets lost and disk failure policy is set to 'best effort'
-                    if (leftCount != rightCount && CompactionManager.isCompactionManager.get())
+                    if (leftCount != rightCount && CompactionManager.isCompactor(Thread.currentThread()))
                     {
                         logger.warn("invalid global counter shard detected; ({}, {}, {}) and ({}, {}, {}) differ only in "
                                     + "count; will pick highest to self-heal on compaction",
@@ -506,7 +506,7 @@
             // We should never see non-local shards w/ same id+clock but different counts. However, if we do
             // we should "heal" the problem by being deterministic in our selection of shard - and
             // log the occurrence so that the operator will know something is wrong.
-            if (leftCount != rightCount && CompactionManager.isCompactionManager.get())
+            if (leftCount != rightCount && CompactionManager.isCompactor(Thread.currentThread()))
             {
                 logger.warn("invalid remote counter shard detected; ({}, {}, {}) and ({}, {}, {}) differ only in "
                             + "count; will pick highest to self-heal on compaction",
diff --git a/src/java/org/apache/cassandra/db/filter/AbstractClusteringIndexFilter.java b/src/java/org/apache/cassandra/db/filter/AbstractClusteringIndexFilter.java
index 63c2783..ddcaaed 100644
--- a/src/java/org/apache/cassandra/db/filter/AbstractClusteringIndexFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/AbstractClusteringIndexFilter.java
@@ -54,11 +54,14 @@
     {
         if (reversed)
         {
-            sb.append(" ORDER BY (");
+            sb.append(" ORDER BY ");
             int i = 0;
             for (ColumnMetadata column : metadata.clusteringColumns())
-                sb.append(i++ == 0 ? "" : ", ").append(column.name).append(column.type instanceof ReversedType ? " ASC" : " DESC");
-            sb.append(')');
+            {
+                sb.append(i++ == 0 ? "" : ", ")
+                  .append(column.name.toCQLString())
+                  .append(column.type instanceof ReversedType ? " ASC" : " DESC");
+            }
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/filter/ClusteringIndexFilter.java b/src/java/org/apache/cassandra/db/filter/ClusteringIndexFilter.java
index 6ea0435..924ff21 100644
--- a/src/java/org/apache/cassandra/db/filter/ClusteringIndexFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/ClusteringIndexFilter.java
@@ -153,7 +153,7 @@
     public Kind kind();
 
     public String toString(TableMetadata metadata);
-    public String toCQLString(TableMetadata metadata);
+    public String toCQLString(TableMetadata metadata, RowFilter rowFilter);
 
     public interface Serializer
     {
diff --git a/src/java/org/apache/cassandra/db/filter/ClusteringIndexNamesFilter.java b/src/java/org/apache/cassandra/db/filter/ClusteringIndexNamesFilter.java
index 3ff5234..18dc471 100644
--- a/src/java/org/apache/cassandra/db/filter/ClusteringIndexNamesFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/ClusteringIndexNamesFilter.java
@@ -21,6 +21,7 @@
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.rows.*;
@@ -168,18 +169,36 @@
         return sb.append(')').toString();
     }
 
-    public String toCQLString(TableMetadata metadata)
+    @Override
+    public String toCQLString(TableMetadata metadata, RowFilter rowFilter)
     {
         if (metadata.clusteringColumns().isEmpty() || clusterings.isEmpty())
-            return "";
+            return rowFilter.toCQLString();
+
+        boolean isSingleColumn = metadata.clusteringColumns().size() == 1;
+        boolean isSingleClustering = clusterings.size() == 1;
 
         StringBuilder sb = new StringBuilder();
-        sb.append('(').append(ColumnMetadata.toCQLString(metadata.clusteringColumns())).append(')');
-        sb.append(clusterings.size() == 1 ? " = " : " IN (");
+        sb.append(isSingleColumn ? "" : '(')
+          .append(ColumnMetadata.toCQLString(metadata.clusteringColumns()))
+          .append(isSingleColumn ? "" : ')');
+
+        sb.append(isSingleClustering ? " = " : " IN (");
         int i = 0;
         for (Clustering<?> clustering : clusterings)
-            sb.append(i++ == 0 ? "" : ", ").append('(').append(clustering.toCQLString(metadata)).append(')');
-        sb.append(clusterings.size() == 1 ? "" : ")");
+        {
+            sb.append(i++ == 0 ? "" : ", ")
+              .append(isSingleColumn ? "" : '(')
+              .append(clustering.toCQLString(metadata))
+              .append(isSingleColumn ? "" : ')');
+
+            for (int j = 0; j < clustering.size(); j++)
+                rowFilter = rowFilter.without(metadata.clusteringColumns().get(j), Operator.EQ, clustering.bufferAt(j));
+        }
+        sb.append(isSingleClustering ? "" : ")");
+
+        if (!rowFilter.isEmpty())
+            sb.append(" AND ").append(rowFilter.toCQLString());
 
         appendOrderByToCQLString(metadata, sb);
         return sb.toString();
diff --git a/src/java/org/apache/cassandra/db/filter/ClusteringIndexSliceFilter.java b/src/java/org/apache/cassandra/db/filter/ClusteringIndexSliceFilter.java
index 5df98c3..178c96b 100644
--- a/src/java/org/apache/cassandra/db/filter/ClusteringIndexSliceFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/ClusteringIndexSliceFilter.java
@@ -142,13 +142,12 @@
         return String.format("slice(slices=%s, reversed=%b)", slices, reversed);
     }
 
-    public String toCQLString(TableMetadata metadata)
+    @Override
+    public String toCQLString(TableMetadata metadata, RowFilter rowFilter)
     {
         StringBuilder sb = new StringBuilder();
 
-        if (!selectsAllPartition())
-            sb.append(slices.toCQLString(metadata));
-
+        sb.append(slices.toCQLString(metadata, rowFilter));
         appendOrderByToCQLString(metadata, sb);
 
         return sb.toString();
diff --git a/src/java/org/apache/cassandra/db/filter/ColumnFilter.java b/src/java/org/apache/cassandra/db/filter/ColumnFilter.java
index d9a1b9d4..0ed6237 100644
--- a/src/java/org/apache/cassandra/db/filter/ColumnFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/ColumnFilter.java
@@ -940,7 +940,7 @@
                 if (s.isEmpty())
                     joiner.add(columnName);
                 else
-                    s.forEach(subSel -> joiner.add(String.format("%s%s", columnName, subSel)));
+                    s.forEach(subSel -> joiner.add(String.format("%s%s", columnName, subSel.toString(cql))));
             }
             return joiner.toString();
         }
diff --git a/src/java/org/apache/cassandra/db/filter/ColumnSubselection.java b/src/java/org/apache/cassandra/db/filter/ColumnSubselection.java
index dbb415a..c53c43a 100644
--- a/src/java/org/apache/cassandra/db/filter/ColumnSubselection.java
+++ b/src/java/org/apache/cassandra/db/filter/ColumnSubselection.java
@@ -88,6 +88,14 @@
      */
     public abstract int compareInclusionOf(CellPath path);
 
+    @Override
+    public String toString()
+    {
+        return toString(false);
+    }
+
+    protected abstract String toString(boolean cql);
+
     private static class Slice extends ColumnSubselection
     {
         private final CellPath from;
@@ -122,11 +130,13 @@
         }
 
         @Override
-        public String toString()
+        protected String toString(boolean cql)
         {
             // This assert we're dealing with a collection since that's the only thing it's used for so far.
             AbstractType<?> type = ((CollectionType<?>)column().type).nameComparator();
-            return String.format("[%s:%s]", from == CellPath.BOTTOM ? "" : type.getString(from.get(0)), to == CellPath.TOP ? "" : type.getString(to.get(0)));
+            return String.format("[%s:%s]",
+                                 from == CellPath.BOTTOM ? "" : (cql ? type.toCQLString(from.get(0)) : type.getString(from.get(0))),
+                                 to == CellPath.TOP ? "" : (cql ? type.toCQLString(to.get(0)) : type.getString(to.get(0))));
         }
     }
 
@@ -156,11 +166,11 @@
         }
 
         @Override
-        public String toString()
+        protected String toString(boolean cql)
         {
             // This assert we're dealing with a collection since that's the only thing it's used for so far.
             AbstractType<?> type = ((CollectionType<?>)column().type).nameComparator();
-            return String.format("[%s]", type.getString(element.get(0)));
+            return String.format("[%s]", cql ? type.toCQLString(element.get(0)) : type.getString(element.get(0)));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/filter/DataLimits.java b/src/java/org/apache/cassandra/db/filter/DataLimits.java
index 845cffd..f988cb3 100644
--- a/src/java/org/apache/cassandra/db/filter/DataLimits.java
+++ b/src/java/org/apache/cassandra/db/filter/DataLimits.java
@@ -32,6 +32,7 @@
 import org.apache.cassandra.db.transform.Transformation;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 /**
@@ -1175,7 +1176,7 @@
             }
         }
 
-        public DataLimits deserialize(DataInputPlus in, int version, ClusteringComparator comparator) throws IOException
+        public DataLimits deserialize(DataInputPlus in, int version, TableMetadata metadata) throws IOException
         {
             Kind kind = Kind.values()[in.readUnsignedByte()];
             switch (kind)
@@ -1199,9 +1200,9 @@
                     int groupPerPartitionLimit = (int) in.readUnsignedVInt();
                     int rowLimit = (int) in.readUnsignedVInt();
 
-                    AggregationSpecification groupBySpec = AggregationSpecification.serializer.deserialize(in, version, comparator);
+                    AggregationSpecification groupBySpec = AggregationSpecification.serializer.deserialize(in, version, metadata);
 
-                    GroupingState state = GroupingState.serializer.deserialize(in, version, comparator);
+                    GroupingState state = GroupingState.serializer.deserialize(in, version, metadata.comparator);
 
                     if (kind == Kind.CQL_GROUP_BY_LIMIT)
                         return new CQLGroupByLimits(groupLimit,
diff --git a/src/java/org/apache/cassandra/db/filter/LocalReadSizeTooLargeException.java b/src/java/org/apache/cassandra/db/filter/LocalReadSizeTooLargeException.java
new file mode 100644
index 0000000..9d872df
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/filter/LocalReadSizeTooLargeException.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.filter;
+
+import org.apache.cassandra.db.RejectException;
+
+public class LocalReadSizeTooLargeException extends RejectException
+{
+    public LocalReadSizeTooLargeException(String message)
+    {
+        super(message);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/filter/RowFilter.java b/src/java/org/apache/cassandra/db/filter/RowFilter.java
index 68a1d57..5e0fb51 100644
--- a/src/java/org/apache/cassandra/db/filter/RowFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/RowFilter.java
@@ -28,6 +28,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.context.*;
@@ -238,6 +239,23 @@
         return withNewExpressions(newExpressions);
     }
 
+    /**
+     * Returns a copy of this filter but without the provided expression. If this filter doesn't contain the specified
+     * expression this method will just return an identical copy of this filter.
+     */
+    public RowFilter without(ColumnMetadata column, Operator op, ByteBuffer value)
+    {
+        if (isEmpty())
+            return this;
+
+        List<Expression> newExpressions = new ArrayList<>(expressions.size() - 1);
+        for (Expression e : expressions)
+            if (!e.column().equals(column) || e.operator() != op || !e.value.equals(value))
+                newExpressions.add(e);
+
+        return withNewExpressions(newExpressions);
+    }
+
     public RowFilter withoutExpressions()
     {
         return withNewExpressions(Collections.emptyList());
@@ -258,12 +276,27 @@
     @Override
     public String toString()
     {
+        return toString(false);
+    }
+
+    /**
+     * Returns a CQL representation of this row filter.
+     *
+     * @return a CQL representation of this row filter
+     */
+    public String toCQLString()
+    {
+        return toString(true);
+    }
+
+    private String toString(boolean cql)
+    {
         StringBuilder sb = new StringBuilder();
         for (int i = 0; i < expressions.size(); i++)
         {
             if (i > 0)
                 sb.append(" AND ");
-            sb.append(expressions.get(i));
+            sb.append(expressions.get(i).toString(cql));
         }
         return sb.toString();
     }
@@ -478,6 +511,24 @@
             return Objects.hashCode(column.name, operator, value);
         }
 
+        @Override
+        public String toString()
+        {
+            return toString(false);
+        }
+
+        /**
+         * Returns a CQL representation of this expression.
+         *
+         * @return a CQL representation of this expression
+         */
+        public String toCQLString()
+        {
+            return toString(true);
+        }
+
+        protected abstract String toString(boolean cql);
+
         private static class Serializer
         {
             public void serialize(Expression expression, DataOutputPlus out, int version) throws IOException
@@ -603,6 +654,7 @@
             switch (operator)
             {
                 case EQ:
+                case IN:
                 case LT:
                 case LTE:
                 case GTE:
@@ -695,17 +747,12 @@
                         ByteBuffer foundValue = getValue(metadata, partitionKey, row);
                         return foundValue != null && mapType.getSerializer().getSerializedValue(foundValue, value, mapType.getKeysType()) != null;
                     }
-
-                case IN:
-                    // It wouldn't be terribly hard to support this (though doing so would imply supporting
-                    // IN for 2ndary index) but currently we don't.
-                    throw new AssertionError();
             }
             throw new AssertionError();
         }
 
         @Override
-        public String toString()
+        protected String toString(boolean cql)
         {
             AbstractType<?> type = column.type;
             switch (operator)
@@ -725,7 +772,9 @@
                 default:
                     break;
             }
-            return String.format("%s %s %s", column.name, operator, type.getString(value));
+            return cql
+                 ? String.format("%s %s %s", column.name.toCQLString(), operator, type.toCQLString(value) )
+                 : String.format("%s %s %s", column.name.toString(), operator, type.getString(value));
         }
 
         @Override
@@ -793,10 +842,14 @@
         }
 
         @Override
-        public String toString()
+        protected String toString(boolean cql)
         {
-            MapType<?, ?> mt = (MapType<?, ?>)column.type;
-            return String.format("%s[%s] = %s", column.name, mt.nameComparator().getString(key), mt.valueComparator().getString(value));
+            MapType<?, ?> mt = (MapType<?, ?>) column.type;
+            AbstractType<?> nt = mt.nameComparator();
+            AbstractType<?> vt = mt.valueComparator();
+            return cql
+                 ? String.format("%s[%s] = %s", column.name.toCQLString(), nt.toCQLString(key), vt.toCQLString(value))
+                 : String.format("%s[%s] = %s", column.name.toString(), nt.getString(key), vt.getString(value));
         }
 
         @Override
@@ -863,10 +916,11 @@
             return value;
         }
 
-        public String toString()
+        @Override
+        protected String toString(boolean cql)
         {
             return String.format("expr(%s, %s)",
-                                 targetIndex.name,
+                                 cql ? ColumnIdentifier.maybeQuote(targetIndex.name) : targetIndex.name,
                                  Keyspace.openAndGetStore(table)
                                          .indexManager
                                          .getIndex(targetIndex)
diff --git a/src/java/org/apache/cassandra/db/filter/RowIndexEntryReadSizeTooLargeException.java b/src/java/org/apache/cassandra/db/filter/RowIndexEntryReadSizeTooLargeException.java
new file mode 100644
index 0000000..20f3f8f
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/filter/RowIndexEntryReadSizeTooLargeException.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.filter;
+
+import org.apache.cassandra.db.RejectException;
+
+public class RowIndexEntryReadSizeTooLargeException extends RejectException
+{
+    public RowIndexEntryReadSizeTooLargeException(String message)
+    {
+        super(message);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/filter/TombstoneOverwhelmingException.java b/src/java/org/apache/cassandra/db/filter/TombstoneOverwhelmingException.java
index 28d49ae..efca3ac 100644
--- a/src/java/org/apache/cassandra/db/filter/TombstoneOverwhelmingException.java
+++ b/src/java/org/apache/cassandra/db/filter/TombstoneOverwhelmingException.java
@@ -24,7 +24,7 @@
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.marshal.*;
 
-public class TombstoneOverwhelmingException extends RuntimeException
+public class TombstoneOverwhelmingException extends RejectException
 {
     public TombstoneOverwhelmingException(int numTombstones, String query, TableMetadata metadata, DecoratedKey lastPartitionKey, ClusteringPrefix<?> lastClustering)
     {
diff --git a/src/java/org/apache/cassandra/db/guardrails/DisableFlag.java b/src/java/org/apache/cassandra/db/guardrails/DisableFlag.java
new file mode 100644
index 0000000..9ec1951
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/DisableFlag.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.Predicate;
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * A guardrail that completely disables the use of a particular feature.
+ *
+ * <p>Note that this guardrail only aborts operations (if the feature is disabled) so is only meant for
+ * query-based guardrails (we're happy to reject queries deemed dangerous, but we don't want to create a guardrail
+ * that breaks compaction for instance).
+ */
+public class DisableFlag extends Guardrail
+{
+    private final Predicate<ClientState> disabled;
+    private final String what;
+
+    /**
+     * Creates a new {@link DisableFlag} guardrail.
+     *
+     * @param name     the identifying name of the guardrail
+     * @param disabled a {@link ClientState}-based supplier of boolean indicating whether the feature guarded by this
+     *                 guardrail must be disabled.
+     * @param what     The feature that is guarded by this guardrail (for reporting in error messages),
+     *                 {@link DisableFlag#ensureEnabled(String, ClientState)} can specify a different {@code what}.
+     */
+    public DisableFlag(String name, Predicate<ClientState> disabled, String what)
+    {
+        super(name);
+        this.disabled = disabled;
+        this.what = what;
+    }
+
+    /**
+     * Aborts the operation if this guardrail is disabled.
+     *
+     * <p>This must be called when the feature guarded by this guardrail is used to ensure such use is in fact
+     * allowed.
+     *
+     * @param state The client state, used to skip the check if the query is internal or is done by a superuser.
+     *              A {@code null} value means that the check should be done regardless of the query.
+     */
+    public void ensureEnabled(@Nullable ClientState state)
+    {
+        ensureEnabled(what, state);
+    }
+
+    /**
+     * Aborts the operation if this guardrail is disabled.
+     *
+     * <p>This must be called when the feature guarded by this guardrail is used to ensure such use is in fact
+     * allowed.
+     *
+     * @param what  The feature that is guarded by this guardrail (for reporting in error messages).
+     * @param state The client state, used to skip the check if the query is internal or is done by a superuser.
+     *              A {@code null} value means that the check should be done regardless of the query, although it won't
+     *              throw any exception if the failure threshold is exceeded. This is so because checks without an
+     *              associated client come from asynchronous processes such as compaction, and we don't want to
+     *              interrupt such processes.
+     */
+    public void ensureEnabled(String what, @Nullable ClientState state)
+    {
+        if (enabled(state) && disabled.test(state))
+            fail(what + " is not allowed", state);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/Guardrail.java b/src/java/org/apache/cassandra/db/guardrails/Guardrail.java
new file mode 100644
index 0000000..c058f10
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/Guardrail.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.concurrent.TimeUnit;
+import javax.annotation.Nullable;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+/**
+ * General class defining a given guardrail that guards against some particular usage/condition.
+ * <p>
+ * Some guardrails only emit warnings when triggered, while others abort the query that triggers them. Some may do one
+ * or the other based on specific threshold. The queries are aborted with an {@link InvalidRequestException}.
+ * <p>
+ * Note that all the defined classes support live updates, which is why each guardrail class constructor takes
+ * suppliers of the condition the guardrail acts on rather than the condition itself. This implies that said suppliers
+ * should be fast and non-blocking to avoid surprises.
+ */
+public abstract class Guardrail
+{
+    protected static final NoSpamLogger logger = NoSpamLogger.getLogger(LoggerFactory.getLogger(Guardrail.class),
+                                                                        10, TimeUnit.MINUTES);
+    protected static final String REDACTED = "<redacted>";
+
+    /** A name identifying the guardrail (mainly for shipping with diagnostic events). */
+    public final String name;
+
+    /** Minimum logging and triggering interval to avoid spamming downstream. */
+    private long minNotifyIntervalInMs = 0;
+
+    /** Time of last warning in milliseconds. */
+    private volatile long lastWarnInMs = 0;
+
+    /** Time of last failure in milliseconds. */
+    private volatile long lastFailInMs = 0;
+
+    Guardrail(String name)
+    {
+        this.name = name;
+    }
+
+    /**
+     * Checks whether this guardrail is enabled or not when the check is done for a background opperation that is not
+     * associated to a specific {@link ClientState}, such as compaction or other background processes. Operations that
+     * are associated to a {@link ClientState}, such as CQL queries, should use {@link Guardrail#enabled(ClientState)}.
+     *
+     * @return {@code true} if this guardrail is enabled, {@code false} otherwise.
+     */
+    public boolean enabled()
+    {
+        return enabled(null);
+    }
+
+    /**
+     * Checks whether this guardrail is enabled or not. This will be enabled if the database is initialized and the
+     * authenticated user (if specified) is not system nor superuser.
+     *
+     * @param state the client state, used to skip the check if the query is internal or is done by a superuser.
+     *              A {@code null} value means that the check should be done regardless of the query.
+     * @return {@code true} if this guardrail is enabled, {@code false} otherwise.
+     */
+    public boolean enabled(@Nullable ClientState state)
+    {
+        return DatabaseDescriptor.isDaemonInitialized() && (state == null || state.isOrdinaryUser());
+    }
+
+    protected void warn(String message)
+    {
+        warn(message, message);
+    }
+
+    protected void warn(String message, String redactedMessage)
+    {
+        if (skipNotifying(true))
+            return;
+
+        message = decorateMessage(message);
+
+        logger.warn(message);
+        // Note that ClientWarn will simply ignore the message if we're not running this as part of a user query
+        // (the internal "state" will be null)
+        ClientWarn.instance.warn(message);
+        // Similarly, tracing will also ignore the message if we're not running tracing on the current thread.
+        Tracing.trace(message);
+        GuardrailsDiagnostics.warned(name, decorateMessage(redactedMessage));
+    }
+
+    protected void fail(String message, @Nullable ClientState state)
+    {
+        fail(message, message, state);
+    }
+
+    protected void fail(String message, String redactedMessage, @Nullable ClientState state)
+    {
+        message = decorateMessage(message);
+
+        if (!skipNotifying(false))
+        {
+            logger.error(message);
+            // Note that ClientWarn will simply ignore the message if we're not running this as part of a user query
+            // (the internal "state" will be null)
+            ClientWarn.instance.warn(message);
+            // Similarly, tracing will also ignore the message if we're not running tracing on the current thread.
+            Tracing.trace(message);
+            GuardrailsDiagnostics.failed(name, decorateMessage(redactedMessage));
+        }
+
+        if (state != null)
+            throw new GuardrailViolatedException(message);
+    }
+
+    @VisibleForTesting
+    String decorateMessage(String message)
+    {
+        // Add a prefix to error message so user knows what threw the warning or cause the failure
+        return String.format("Guardrail %s violated: %s", name, message);
+    }
+
+    /**
+     * Note: this method is not thread safe and should only be used during guardrail initialization
+     *
+     * @param minNotifyIntervalInMs frequency of logging and triggering listener to avoid spamming,
+     *                              default 0 means always log and trigger listeners.
+     * @return current guardrail
+     */
+    Guardrail minNotifyIntervalInMs(long minNotifyIntervalInMs)
+    {
+        assert minNotifyIntervalInMs >= 0;
+        this.minNotifyIntervalInMs = minNotifyIntervalInMs;
+        return this;
+    }
+
+    /**
+     * reset last notify time to make sure it will notify downstream when {@link this#warn(String, String)}
+     * or {@link this#fail(String, ClientState)} is called next time.
+     */
+    @VisibleForTesting
+    void resetLastNotifyTime()
+    {
+        lastFailInMs = 0;
+        lastWarnInMs = 0;
+    }
+
+    /**
+     * @return true if guardrail should not log message and trigger listeners; otherwise, update lastWarnInMs or
+     * lastFailInMs respectively.
+     */
+    private boolean skipNotifying(boolean isWarn)
+    {
+        if (minNotifyIntervalInMs == 0)
+            return false;
+
+        long nowInMs = Clock.Global.currentTimeMillis();
+        long timeElapsedInMs = nowInMs - (isWarn ? lastWarnInMs : lastFailInMs);
+
+        boolean skip = timeElapsedInMs < minNotifyIntervalInMs;
+
+        if (!skip)
+        {
+            if (isWarn)
+                lastWarnInMs = nowInMs;
+            else
+                lastFailInMs = nowInMs;
+        }
+
+        return skip;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailEvent.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailEvent.java
new file mode 100644
index 0000000..30eaafa
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailEvent.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.io.Serializable;
+import java.util.HashMap;
+
+import org.apache.cassandra.diag.DiagnosticEvent;
+
+/**
+ * {@link DiagnosticEvent} implementation for guardrail activation events.
+ */
+final class GuardrailEvent extends DiagnosticEvent
+{
+    enum GuardrailEventType
+    {
+        WARNED, FAILED
+    }
+
+    /** The type of activation, which is a warning or a failure. */
+    private final GuardrailEventType type;
+
+    /** The name that identifies the activated guardrail. */
+    private final String name;
+
+    /** The warn/fail message emitted by the activated guardrail. */
+    private final String message;
+
+    /**
+     * Creates new guardrail activation event.
+     *
+     * @param type    The type of activation, which is warning or a failure.
+     * @param name    The name that identifies the activated guardrail.
+     * @param message The warn/fail message emitted by the activated guardrail.
+     */
+    GuardrailEvent(GuardrailEventType type, String name, String message)
+    {
+        this.type = type;
+        this.name = name;
+        this.message = message;
+    }
+
+    @Override
+    public Enum<GuardrailEventType> getType()
+    {
+        return type;
+    }
+
+    @Override
+    public HashMap<String, Serializable> toMap()
+    {
+        HashMap<String, Serializable> ret = new HashMap<>();
+        ret.put("name", name);
+        ret.put("message", message);
+        return ret;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailViolatedException.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailViolatedException.java
new file mode 100644
index 0000000..e8a4795
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailViolatedException.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.apache.cassandra.exceptions.InvalidRequestException;
+
+public class GuardrailViolatedException extends InvalidRequestException
+{
+    GuardrailViolatedException(String message)
+    {
+        super(message);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/Guardrails.java b/src/java/org/apache/cassandra/db/guardrails/Guardrails.java
new file mode 100644
index 0000000..9d08ab0
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/Guardrails.java
@@ -0,0 +1,917 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Collections;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.GuardrailsOptions;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.service.disk.usage.DiskUsageBroadcaster;
+import org.apache.cassandra.utils.MBeanWrapper;
+
+import static java.lang.String.format;
+
+/**
+ * Entry point for Guardrails, storing the defined guardrails and providing a few global methods over them.
+ */
+public final class Guardrails implements GuardrailsMBean
+{
+    public static final String MBEAN_NAME = "org.apache.cassandra.db:type=Guardrails";
+
+    public static final GuardrailsConfigProvider CONFIG_PROVIDER = GuardrailsConfigProvider.instance;
+    private static final GuardrailsOptions DEFAULT_CONFIG = DatabaseDescriptor.getGuardrailsConfig();
+
+    @VisibleForTesting
+    static final Guardrails instance = new Guardrails();
+
+    /**
+     * Guardrail on the total number of user keyspaces.
+     */
+    public static final MaxThreshold keyspaces =
+    new MaxThreshold("keyspaces",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getKeyspacesWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getKeyspacesFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Creating keyspace %s, current number of keyspaces %s exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Cannot have more than %s keyspaces, aborting the creation of keyspace %s",
+                                        threshold, what));
+
+    /**
+     * Guardrail on the total number of tables on user keyspaces.
+     */
+    public static final MaxThreshold tables =
+    new MaxThreshold("tables",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getTablesWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getTablesFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Creating table %s, current number of tables %s exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Cannot have more than %s tables, aborting the creation of table %s",
+                                        threshold, what));
+
+    /**
+     * Guardrail on the number of columns per table.
+     */
+    public static final MaxThreshold columnsPerTable =
+    new MaxThreshold("columns_per_table",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getColumnsPerTableWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getColumnsPerTableFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("The table %s has %s columns, this exceeds the warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Tables cannot have more than %s columns, but %s provided for table %s",
+                                        threshold, value, what));
+
+    public static final MaxThreshold secondaryIndexesPerTable =
+    new MaxThreshold("secondary_indexes_per_table",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getSecondaryIndexesPerTableWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getSecondaryIndexesPerTableFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Creating secondary index %s, current number of indexes %s exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Tables cannot have more than %s secondary indexes, aborting the creation of secondary index %s",
+                                        threshold, what));
+
+    /**
+     * Guardrail disabling user's ability to create secondary indexes
+     */
+    public static final DisableFlag createSecondaryIndexesEnabled =
+    new DisableFlag("secondary_indexes",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getSecondaryIndexesEnabled(),
+                    "User creation of secondary indexes");
+
+    /**
+     * Guardrail on the number of materialized views per table.
+     */
+    public static final MaxThreshold materializedViewsPerTable =
+    new MaxThreshold("materialized_views_per_table",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getMaterializedViewsPerTableWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getMaterializedViewsPerTableFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Creating materialized view %s, current number of views %s exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Tables cannot have more than %s materialized views, aborting the creation of materialized view %s",
+                                        threshold, what));
+
+    /**
+     * Guardrail warning about, ignoring or rejecting the usage of certain table properties.
+     */
+    public static final Values<String> tableProperties =
+    new Values<>("table_properties",
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getTablePropertiesWarned(),
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getTablePropertiesIgnored(),
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getTablePropertiesDisallowed(),
+                 "Table Properties");
+
+    /**
+     * Guardrail disabling user-provided timestamps.
+     */
+    public static final DisableFlag userTimestampsEnabled =
+    new DisableFlag("user_timestamps",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getUserTimestampsEnabled(),
+                    "User provided timestamps (USING TIMESTAMP)");
+
+    public static final DisableFlag groupByEnabled =
+    new DisableFlag("group_by",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getGroupByEnabled(),
+                    "GROUP BY functionality");
+
+    public static final DisableFlag dropTruncateTableEnabled =
+    new DisableFlag("drop_truncate_table_enabled",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getDropTruncateTableEnabled(),
+                    "DROP and TRUNCATE TABLE functionality");
+
+    /**
+     * Guardrail disabling user's ability to turn off compression
+     */
+    public static final DisableFlag uncompressedTablesEnabled =
+    new DisableFlag("uncompressed_tables_enabled",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getUncompressedTablesEnabled(),
+                    "Uncompressed table");
+
+    /**
+     * Guardrail disabling the creation of new COMPACT STORAGE tables
+     */
+    public static final DisableFlag compactTablesEnabled =
+    new DisableFlag("compact_tables",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getCompactTablesEnabled(),
+                    "Creation of new COMPACT STORAGE tables");
+
+    /**
+     * Guardrail on the number of elements returned within page.
+     */
+    public static final MaxThreshold pageSize =
+    new MaxThreshold("page_size",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getPageSizeWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getPageSizeFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Query for table %s with page size %s exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Aborting query for table %s, page size %s exceeds fail threshold of %s.",
+                                        what, value, threshold));
+
+    /**
+     * Guardrail on the number of partition keys in the IN clause.
+     */
+    public static final MaxThreshold partitionKeysInSelect =
+    new MaxThreshold("partition_keys_in_select",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getPartitionKeysInSelectWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getPartitionKeysInSelectFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Query with partition keys in IN clause on table %s, with number of " +
+                                        "partition keys %s exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Aborting query with partition keys in IN clause on table %s, " +
+                                        "number of partition keys %s exceeds fail threshold of %s.",
+                                        what, value, threshold));
+
+    /**
+     * Guardrail disabling operations on lists that require read before write.
+     */
+    public static final DisableFlag readBeforeWriteListOperationsEnabled =
+    new DisableFlag("read_before_write_list_operations",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getReadBeforeWriteListOperationsEnabled(),
+                    "List operation requiring read before write");
+
+    /**
+     * Guardrail disabling ALLOW FILTERING statement within a query
+     */
+    public static final DisableFlag allowFilteringEnabled =
+    new DisableFlag("allow_filtering",
+                    state -> !CONFIG_PROVIDER.getOrCreate(state).getAllowFilteringEnabled(),
+                    "Querying with ALLOW FILTERING");
+
+    /**
+     * Guardrail on the number of restrictions created by a cartesian product of a CQL's {@code IN} query.
+     */
+    public static final MaxThreshold inSelectCartesianProduct =
+    new MaxThreshold("in_select_cartesian_product",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getInSelectCartesianProductWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getInSelectCartesianProductFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("The cartesian product of the IN restrictions on %s produces %s values, " +
+                                        "this exceeds warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Aborting query because the cartesian product of the IN restrictions on %s " +
+                                        "produces %s values, this exceeds fail threshold of %s.",
+                                        what, value, threshold));
+
+    /**
+     * Guardrail on read consistency levels.
+     */
+    public static final Values<ConsistencyLevel> readConsistencyLevels =
+    new Values<>("read_consistency_levels",
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getReadConsistencyLevelsWarned(),
+                 state -> Collections.emptySet(),
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getReadConsistencyLevelsDisallowed(),
+                 "read consistency levels");
+
+    /**
+     * Guardrail on write consistency levels.
+     */
+    public static final Values<ConsistencyLevel> writeConsistencyLevels =
+    new Values<>("write_consistency_levels",
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getWriteConsistencyLevelsWarned(),
+                 state -> Collections.emptySet(),
+                 state -> CONFIG_PROVIDER.getOrCreate(state).getWriteConsistencyLevelsDisallowed(),
+                 "write consistency levels");
+
+    /**
+     * Guardrail on the size of a collection.
+     */
+    public static final MaxThreshold collectionSize =
+    new MaxThreshold("collection_size",
+                     state -> sizeToBytes(CONFIG_PROVIDER.getOrCreate(state).getCollectionSizeWarnThreshold()),
+                     state -> sizeToBytes(CONFIG_PROVIDER.getOrCreate(state).getCollectionSizeFailThreshold()),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Detected collection %s of size %s, this exceeds the warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Detected collection %s of size %s, this exceeds the failure threshold of %s.",
+                                        what, value, threshold));
+
+    /**
+     * Guardrail on the number of items of a collection.
+     */
+    public static final MaxThreshold itemsPerCollection =
+    new MaxThreshold("items_per_collection",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getItemsPerCollectionWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getItemsPerCollectionFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("Detected collection %s with %s items, this exceeds the warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("Detected collection %s with %s items, this exceeds the failure threshold of %s.",
+                                        what, value, threshold));
+
+    /**
+     * Guardrail on the number of fields on each UDT.
+     */
+    public static final MaxThreshold fieldsPerUDT =
+    new MaxThreshold("fields_per_udt",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getFieldsPerUDTWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getFieldsPerUDTFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("The user type %s has %s columns, this exceeds the warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("User types cannot have more than %s columns, but %s provided for user type %s.",
+                                        threshold, value, what));
+
+    /**
+     * Guardrail on the data disk usage on the local node, used by a periodic task to calculate and propagate that status.
+     * See {@link org.apache.cassandra.service.disk.usage.DiskUsageMonitor} and {@link DiskUsageBroadcaster}.
+     */
+    public static final PercentageThreshold localDataDiskUsage =
+    new PercentageThreshold("local_data_disk_usage",
+                            state -> CONFIG_PROVIDER.getOrCreate(state).getDataDiskUsagePercentageWarnThreshold(),
+                            state -> CONFIG_PROVIDER.getOrCreate(state).getDataDiskUsagePercentageFailThreshold(),
+                            (isWarning, what, value, threshold) ->
+                            isWarning ? format("Local data disk usage %s(%s) exceeds warning threshold of %s",
+                                               value, what, threshold)
+                                      : format("Local data disk usage %s(%s) exceeds failure threshold of %s, " +
+                                               "will stop accepting writes",
+                                               value, what, threshold));
+
+    /**
+     * Guardrail on the data disk usage on replicas, used at write time to verify the status of the involved replicas.
+     * See {@link org.apache.cassandra.service.disk.usage.DiskUsageMonitor} and {@link DiskUsageBroadcaster}.
+     */
+    public static final Predicates<InetAddressAndPort> replicaDiskUsage =
+    new Predicates<>("replica_disk_usage",
+                     state -> DiskUsageBroadcaster.instance::isStuffed,
+                     state -> DiskUsageBroadcaster.instance::isFull,
+                     // not using `value` because it represents replica address which should be hidden from client.
+                     (isWarning, value) ->
+                     isWarning ? "Replica disk usage exceeds warning threshold"
+                               : "Write request failed because disk usage exceeds failure threshold");
+
+    static
+    {
+        // Avoid spamming with notifications about stuffed/full disks
+        long minNotifyInterval = CassandraRelevantProperties.DISK_USAGE_NOTIFY_INTERVAL_MS.getLong();
+        localDataDiskUsage.minNotifyIntervalInMs(minNotifyInterval);
+        replicaDiskUsage.minNotifyIntervalInMs(minNotifyInterval);
+    }
+
+    /**
+     * Guardrail on the minimum replication factor.
+     */
+    public static final MinThreshold minimumReplicationFactor =
+    new MinThreshold("minimum_replication_factor",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getMinimumReplicationFactorWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getMinimumReplicationFactorFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     isWarning ? format("The keyspace %s has a replication factor of %s, below the warning threshold of %s.",
+                                        what, value, threshold)
+                               : format("The keyspace %s has a replication factor of %s, below the failure threshold of %s.",
+                                        what, value, threshold));
+
+    private Guardrails()
+    {
+        MBeanWrapper.instance.registerMBean(this, MBEAN_NAME);
+    }
+
+    @Override
+    public int getKeyspacesWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getKeyspacesWarnThreshold();
+    }
+
+    @Override
+    public int getKeyspacesFailThreshold()
+    {
+        return DEFAULT_CONFIG.getKeyspacesFailThreshold();
+    }
+
+    @Override
+    public void setKeyspacesThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setKeyspacesThreshold(warn, fail);
+    }
+
+    @Override
+    public int getTablesWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getTablesWarnThreshold();
+    }
+
+    @Override
+    public int getTablesFailThreshold()
+    {
+        return DEFAULT_CONFIG.getTablesFailThreshold();
+    }
+
+    @Override
+    public void setTablesThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setTablesThreshold(warn, fail);
+    }
+
+    @Override
+    public int getColumnsPerTableWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getColumnsPerTableWarnThreshold();
+    }
+
+    @Override
+    public int getColumnsPerTableFailThreshold()
+    {
+        return DEFAULT_CONFIG.getColumnsPerTableFailThreshold();
+    }
+
+    @Override
+    public void setColumnsPerTableThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setColumnsPerTableThreshold(warn, fail);
+    }
+
+    @Override
+    public int getSecondaryIndexesPerTableWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getSecondaryIndexesPerTableWarnThreshold();
+    }
+
+    @Override
+    public int getSecondaryIndexesPerTableFailThreshold()
+    {
+        return DEFAULT_CONFIG.getSecondaryIndexesPerTableFailThreshold();
+    }
+
+    @Override
+    public void setSecondaryIndexesPerTableThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setSecondaryIndexesPerTableThreshold(warn, fail);
+    }
+
+    @Override
+    public boolean getSecondaryIndexesEnabled()
+    {
+        return DEFAULT_CONFIG.getSecondaryIndexesEnabled();
+    }
+
+    @Override
+    public void setSecondaryIndexesEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setSecondaryIndexesEnabled(enabled);
+    }
+
+    @Override
+    public int getMaterializedViewsPerTableWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getMaterializedViewsPerTableWarnThreshold();
+    }
+
+    @Override
+    public int getMaterializedViewsPerTableFailThreshold()
+    {
+        return DEFAULT_CONFIG.getMaterializedViewsPerTableFailThreshold();
+    }
+
+    @Override
+    public void setMaterializedViewsPerTableThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setMaterializedViewsPerTableThreshold(warn, fail);
+    }
+
+    @Override
+    public Set<String> getTablePropertiesWarned()
+    {
+        return DEFAULT_CONFIG.getTablePropertiesWarned();
+    }
+
+    @Override
+    public String getTablePropertiesWarnedCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getTablePropertiesWarned());
+    }
+
+    public void setTablePropertiesWarned(String... properties)
+    {
+        setTablePropertiesWarned(ImmutableSet.copyOf(properties));
+    }
+
+    @Override
+    public void setTablePropertiesWarned(Set<String> properties)
+    {
+        DEFAULT_CONFIG.setTablePropertiesWarned(properties);
+    }
+
+    @Override
+    public void setTablePropertiesWarnedCSV(String properties)
+    {
+        setTablePropertiesWarned(fromCSV(properties));
+    }
+
+    @Override
+    public Set<String> getTablePropertiesDisallowed()
+    {
+        return DEFAULT_CONFIG.getTablePropertiesDisallowed();
+    }
+
+    @Override
+    public String getTablePropertiesDisallowedCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getTablePropertiesDisallowed());
+    }
+
+    public void setTablePropertiesDisallowed(String... properties)
+    {
+        setTablePropertiesDisallowed(ImmutableSet.copyOf(properties));
+    }
+
+    @Override
+    public void setTablePropertiesDisallowed(Set<String> properties)
+    {
+        DEFAULT_CONFIG.setTablePropertiesDisallowed(properties);
+    }
+
+    @Override
+    public void setTablePropertiesDisallowedCSV(String properties)
+    {
+        setTablePropertiesDisallowed(fromCSV(properties));
+    }
+
+    @Override
+    public Set<String> getTablePropertiesIgnored()
+    {
+        return DEFAULT_CONFIG.getTablePropertiesIgnored();
+    }
+
+    @Override
+    public String getTablePropertiesIgnoredCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getTablePropertiesIgnored());
+    }
+
+    public void setTablePropertiesIgnored(String... properties)
+    {
+        setTablePropertiesIgnored(ImmutableSet.copyOf(properties));
+    }
+
+    @Override
+    public void setTablePropertiesIgnored(Set<String> properties)
+    {
+        DEFAULT_CONFIG.setTablePropertiesIgnored(properties);
+    }
+
+    @Override
+    public void setTablePropertiesIgnoredCSV(String properties)
+    {
+        setTablePropertiesIgnored(fromCSV(properties));
+    }
+
+    @Override
+    public boolean getUserTimestampsEnabled()
+    {
+        return DEFAULT_CONFIG.getUserTimestampsEnabled();
+    }
+
+    @Override
+    public void setUserTimestampsEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setUserTimestampsEnabled(enabled);
+    }
+
+    @Override
+    public boolean getAllowFilteringEnabled()
+    {
+        return DEFAULT_CONFIG.getAllowFilteringEnabled();
+    }
+
+    @Override
+    public void setAllowFilteringEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setAllowFilteringEnabled(enabled);
+    }
+
+    @Override
+    public boolean getUncompressedTablesEnabled()
+    {
+        return DEFAULT_CONFIG.getUncompressedTablesEnabled();
+    }
+
+    @Override
+    public void setUncompressedTablesEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setUncompressedTablesEnabled(enabled);
+    }
+
+    @Override
+    public boolean getCompactTablesEnabled()
+    {
+        return DEFAULT_CONFIG.getCompactTablesEnabled();
+    }
+
+    @Override
+    public void setCompactTablesEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setCompactTablesEnabled(enabled);
+    }
+
+    @Override
+    public boolean getGroupByEnabled()
+    {
+        return DEFAULT_CONFIG.getGroupByEnabled();
+    }
+
+    @Override
+    public void setGroupByEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setGroupByEnabled(enabled);
+    }
+
+    @Override
+    public boolean getDropTruncateTableEnabled()
+    {
+        return DEFAULT_CONFIG.getDropTruncateTableEnabled();
+    }
+
+    @Override
+    public void setDropTruncateTableEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setDropTruncateTableEnabled(enabled);
+    }
+
+    @Override
+    public int getPageSizeWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getPageSizeWarnThreshold();
+    }
+
+    @Override
+    public int getPageSizeFailThreshold()
+    {
+        return DEFAULT_CONFIG.getPageSizeFailThreshold();
+    }
+
+    @Override
+    public void setPageSizeThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setPageSizeThreshold(warn, fail);
+    }
+
+    @Override
+    public boolean getReadBeforeWriteListOperationsEnabled()
+    {
+        return DEFAULT_CONFIG.getReadBeforeWriteListOperationsEnabled();
+    }
+
+    @Override
+    public void setReadBeforeWriteListOperationsEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setReadBeforeWriteListOperationsEnabled(enabled);
+    }
+
+    @Override
+    public int getPartitionKeysInSelectWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getPartitionKeysInSelectWarnThreshold();
+    }
+
+    @Override
+    public int getPartitionKeysInSelectFailThreshold()
+    {
+        return DEFAULT_CONFIG.getPartitionKeysInSelectFailThreshold();
+    }
+
+    @Override
+    public void setPartitionKeysInSelectThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setPartitionKeysInSelectThreshold(warn, fail);
+    }
+
+    @Override
+    @Nullable
+    public String getCollectionSizeWarnThreshold()
+    {
+        return sizeToString(DEFAULT_CONFIG.getCollectionSizeWarnThreshold());
+    }
+
+    @Override
+    @Nullable
+    public String getCollectionSizeFailThreshold()
+    {
+        return sizeToString(DEFAULT_CONFIG.getCollectionSizeFailThreshold());
+    }
+
+    @Override
+    public void setCollectionSizeThreshold(@Nullable String warnSize, @Nullable String failSize)
+    {
+        DEFAULT_CONFIG.setCollectionSizeThreshold(sizeFromString(warnSize), sizeFromString(failSize));
+    }
+
+    @Override
+    public int getItemsPerCollectionWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getItemsPerCollectionWarnThreshold();
+    }
+
+    @Override
+    public int getItemsPerCollectionFailThreshold()
+    {
+        return DEFAULT_CONFIG.getItemsPerCollectionFailThreshold();
+    }
+
+    @Override
+    public void setItemsPerCollectionThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setItemsPerCollectionThreshold(warn, fail);
+    }
+
+    @Override
+    public int getInSelectCartesianProductWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getInSelectCartesianProductWarnThreshold();
+    }
+
+    @Override
+    public int getInSelectCartesianProductFailThreshold()
+    {
+        return DEFAULT_CONFIG.getInSelectCartesianProductFailThreshold();
+    }
+
+    @Override
+    public void setInSelectCartesianProductThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setInSelectCartesianProductThreshold(warn, fail);
+    }
+
+    @Override
+    public Set<String> getReadConsistencyLevelsWarned()
+    {
+        return toJmx(DEFAULT_CONFIG.getReadConsistencyLevelsWarned());
+    }
+
+    @Override
+    public String getReadConsistencyLevelsWarnedCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getReadConsistencyLevelsWarned(), ConsistencyLevel::toString);
+    }
+
+    @Override
+    public void setReadConsistencyLevelsWarned(Set<String> consistencyLevels)
+    {
+        DEFAULT_CONFIG.setReadConsistencyLevelsWarned(fromJmx(consistencyLevels));
+    }
+
+    @Override
+    public void setReadConsistencyLevelsWarnedCSV(String consistencyLevels)
+    {
+        DEFAULT_CONFIG.setReadConsistencyLevelsWarned(fromCSV(consistencyLevels, ConsistencyLevel::fromString));
+    }
+
+    @Override
+    public Set<String> getReadConsistencyLevelsDisallowed()
+    {
+        return toJmx(DEFAULT_CONFIG.getReadConsistencyLevelsDisallowed());
+    }
+
+    @Override
+    public String getReadConsistencyLevelsDisallowedCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getReadConsistencyLevelsDisallowed(), ConsistencyLevel::toString);
+    }
+
+    @Override
+    public void setReadConsistencyLevelsDisallowed(Set<String> consistencyLevels)
+    {
+        DEFAULT_CONFIG.setReadConsistencyLevelsDisallowed(fromJmx(consistencyLevels));
+    }
+
+    @Override
+    public void setReadConsistencyLevelsDisallowedCSV(String consistencyLevels)
+    {
+        DEFAULT_CONFIG.setReadConsistencyLevelsDisallowed(fromCSV(consistencyLevels, ConsistencyLevel::fromString));
+    }
+
+    @Override
+    public Set<String> getWriteConsistencyLevelsWarned()
+    {
+        return toJmx(DEFAULT_CONFIG.getWriteConsistencyLevelsWarned());
+    }
+
+    @Override
+    public String getWriteConsistencyLevelsWarnedCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getWriteConsistencyLevelsWarned(), ConsistencyLevel::toString);
+    }
+
+    @Override
+    public void setWriteConsistencyLevelsWarned(Set<String> consistencyLevels)
+    {
+        DEFAULT_CONFIG.setWriteConsistencyLevelsWarned(fromJmx(consistencyLevels));
+    }
+
+    @Override
+    public void setWriteConsistencyLevelsWarnedCSV(String consistencyLevels)
+    {
+        DEFAULT_CONFIG.setWriteConsistencyLevelsWarned(fromCSV(consistencyLevels, ConsistencyLevel::fromString));
+    }
+
+    @Override
+    public Set<String> getWriteConsistencyLevelsDisallowed()
+    {
+        return toJmx(DEFAULT_CONFIG.getWriteConsistencyLevelsDisallowed());
+    }
+
+    @Override
+    public String getWriteConsistencyLevelsDisallowedCSV()
+    {
+        return toCSV(DEFAULT_CONFIG.getWriteConsistencyLevelsDisallowed(), ConsistencyLevel::toString);
+    }
+
+    @Override
+    public void setWriteConsistencyLevelsDisallowed(Set<String> consistencyLevels)
+    {
+        DEFAULT_CONFIG.setWriteConsistencyLevelsDisallowed(fromJmx(consistencyLevels));
+    }
+
+    @Override
+    public void setWriteConsistencyLevelsDisallowedCSV(String consistencyLevels)
+    {
+        DEFAULT_CONFIG.setWriteConsistencyLevelsDisallowed(fromCSV(consistencyLevels, ConsistencyLevel::fromString));
+    }
+
+    @Override
+    public int getFieldsPerUDTWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getFieldsPerUDTWarnThreshold();
+    }
+
+    @Override
+    public int getFieldsPerUDTFailThreshold()
+    {
+        return DEFAULT_CONFIG.getFieldsPerUDTFailThreshold();
+    }
+
+    @Override
+    public void setFieldsPerUDTThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setFieldsPerUDTThreshold(warn, fail);
+    }
+
+    @Override
+    public int getDataDiskUsagePercentageWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getDataDiskUsagePercentageWarnThreshold();
+    }
+
+    @Override
+    public int getDataDiskUsagePercentageFailThreshold()
+    {
+        return DEFAULT_CONFIG.getDataDiskUsagePercentageFailThreshold();
+    }
+
+    @Override
+    public void setDataDiskUsagePercentageThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setDataDiskUsagePercentageThreshold(warn, fail);
+    }
+
+    @Override
+    @Nullable
+    public String getDataDiskUsageMaxDiskSize()
+    {
+        return sizeToString(DEFAULT_CONFIG.getDataDiskUsageMaxDiskSize());
+    }
+
+    @Override
+    public void setDataDiskUsageMaxDiskSize(@Nullable String size)
+    {
+        DEFAULT_CONFIG.setDataDiskUsageMaxDiskSize(sizeFromString(size));
+    }
+
+    @Override
+    public int getMinimumReplicationFactorWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getMinimumReplicationFactorWarnThreshold();
+    }
+
+    @Override
+    public int getMinimumReplicationFactorFailThreshold()
+    {
+        return DEFAULT_CONFIG.getMinimumReplicationFactorFailThreshold();
+    }
+
+    @Override
+    public void setMinimumReplicationFactorThreshold(int warn, int fail)
+    {
+        DEFAULT_CONFIG.setMinimumReplicationFactorThreshold(warn, fail);
+    }
+
+    private static String toCSV(Set<String> values)
+    {
+        return values == null || values.isEmpty() ? "" : String.join(",", values);
+    }
+
+    private static <T> String toCSV(Set<T> values, Function<T, String> formatter)
+    {
+        return values == null || values.isEmpty() ? "" : values.stream().map(formatter).collect(Collectors.joining(","));
+    }
+
+    private static Set<String> fromCSV(String csv)
+    {
+        return StringUtils.isEmpty(csv) ? Collections.emptySet() : ImmutableSet.copyOf(csv.split(","));
+    }
+
+    private static <T> Set<T> fromCSV(String csv, Function<String, T> parser)
+    {
+        return StringUtils.isEmpty(csv) ? Collections.emptySet() : fromCSV(csv).stream().map(parser).collect(Collectors.toSet());
+    }
+
+    private static Set<String> toJmx(Set<ConsistencyLevel> set)
+    {
+        if (set == null)
+            return null;
+        return set.stream().map(ConsistencyLevel::name).collect(Collectors.toSet());
+    }
+
+    private static Set<ConsistencyLevel> fromJmx(Set<String> set)
+    {
+        if (set == null)
+            return null;
+        return set.stream().map(ConsistencyLevel::valueOf).collect(Collectors.toSet());
+    }
+
+    private static Long sizeToBytes(@Nullable DataStorageSpec.LongBytesBound size)
+    {
+        return size == null ? -1 : size.toBytes();
+    }
+
+    private static String sizeToString(@Nullable DataStorageSpec size)
+    {
+        return size == null ? null : size.toString();
+    }
+
+    private static DataStorageSpec.LongBytesBound sizeFromString(@Nullable String size)
+    {
+        return StringUtils.isEmpty(size) ? null : new DataStorageSpec.LongBytesBound(size);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
new file mode 100644
index 0000000..a52eeb0
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Set;
+
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.db.ConsistencyLevel;
+
+/**
+ * Configuration settings for guardrails.
+ *
+ * <p>Note that the settings here must only be used by the {@link Guardrails} class and not directly by the code
+ * checking each guarded constraint (which, again, should use the higher level abstractions defined in
+ * {@link Guardrails}).
+ *
+ * <p>We have 2 variants of guardrails, soft (warn) and hard (fail) limits, each guardrail having either one of the
+ * variants or both. Note in particular that hard limits only make sense for guardrails triggering during query
+ * execution. For other guardrails, say one triggering during compaction, aborting that compaction does not make sense.
+ *
+ * <p>Additionally, each individual setting should have a specific value (typically -1 for numeric settings),
+ * that allows to disable the corresponding guardrail.
+ * <p>
+ * This configuration is offered as an interface so different implementations of {@link GuardrailsConfigProvider} can
+ * provide different implementations of this config. However, this mechanism for guardrails config pluggability is not
+ * officially supported and this interface may change in a minor release.
+ */
+public interface GuardrailsConfig
+{
+    /**
+     * @return The threshold to warn when creating more user keyspaces than threshold.
+     */
+    int getKeyspacesWarnThreshold();
+
+    /**
+     * @return The threshold to fail when creating more user keyspaces than threshold.
+     */
+    int getKeyspacesFailThreshold();
+
+    /**
+     * @return The threshold to warn when creating more user tables than threshold.
+     */
+    int getTablesWarnThreshold();
+
+    /**
+     * @return The threshold to fail when creating more user tables than threshold.
+     */
+    int getTablesFailThreshold();
+
+    /**
+     * @return The threshold to warn when creating more columns per table than threshold.
+     */
+    int getColumnsPerTableWarnThreshold();
+
+    /**
+     * @return The threshold to fail when creating more columns per table than threshold.
+     */
+    int getColumnsPerTableFailThreshold();
+
+    /**
+     * @return The threshold to warn when creating more secondary indexes per table than threshold.
+     */
+    int getSecondaryIndexesPerTableWarnThreshold();
+
+    /**
+     * @return The threshold to fail when creating more secondary indexes per table than threshold.
+     */
+    int getSecondaryIndexesPerTableFailThreshold();
+
+    /**
+     * @return Whether creation of secondary indexes is allowed.
+     */
+    boolean getSecondaryIndexesEnabled();
+
+    /**
+     * @return The threshold to warn when creating more materialized views per table than threshold.
+     */
+    int getMaterializedViewsPerTableWarnThreshold();
+
+    /**
+     * @return The threshold to warn when partition keys in select more than threshold.
+     */
+    int getPartitionKeysInSelectWarnThreshold();
+
+    /**
+     * @return The threshold to fail when partition keys in select more than threshold.
+     */
+    int getPartitionKeysInSelectFailThreshold();
+
+    /**
+     * @return The threshold to fail when creating more materialized views per table than threshold.
+     */
+    int getMaterializedViewsPerTableFailThreshold();
+
+    /**
+     * @return The table properties that are warned about when creating or altering a table.
+     */
+    Set<String> getTablePropertiesWarned();
+
+    /**
+     * @return The table properties that are ignored when creating or altering a table.
+     */
+    Set<String> getTablePropertiesIgnored();
+
+    /**
+     * @return The table properties that are disallowed when creating or altering a table.
+     */
+    Set<String> getTablePropertiesDisallowed();
+
+    /**
+     * Returns whether user-provided timestamps are allowed.
+     *
+     * @return {@code true} if user-provided timestamps are allowed, {@code false} otherwise.
+     */
+    boolean getUserTimestampsEnabled();
+
+    /**
+     * Returns whether tables can be uncompressed
+     *
+     * @return {@code true} if user's can disable compression, {@code false} otherwise.
+     */
+    boolean getUncompressedTablesEnabled();
+
+    /**
+     * Returns whether users can create new COMPACT STORAGE tables
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getCompactTablesEnabled();
+
+    /**
+     * Returns whether GROUP BY functionality is allowed
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getGroupByEnabled();
+
+    /**
+     * Returns whether TRUNCATE or DROP table are allowed
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getDropTruncateTableEnabled();
+
+    /**
+     * @return The threshold to warn when page size exceeds given size.
+     */
+    int getPageSizeWarnThreshold();
+
+    /**
+     * @return The threshold to fail when page size exceeds given size.
+     */
+    int getPageSizeFailThreshold();
+
+    /**
+     * Returns whether list operations that require read before write are allowed.
+     *
+     * @return {@code true} if list operations that require read before write are allowed, {@code false} otherwise.
+     */
+    boolean getReadBeforeWriteListOperationsEnabled();
+
+    /**
+     * Returns whether ALLOW FILTERING property is allowed.
+     *
+     * @return {@code true} if ALLOW FILTERING is allowed, {@code false} otherwise.
+     */
+    boolean getAllowFilteringEnabled();
+
+    /**
+     * @return The threshold to warn when an IN query creates a cartesian product with a size exceeding threshold.
+     * -1 means disabled.
+     */
+    public int getInSelectCartesianProductWarnThreshold();
+
+    /**
+     * @return The threshold to prevent IN queries creating a cartesian product with a size exceeding threshold.
+     * -1 means disabled.
+     */
+    public int getInSelectCartesianProductFailThreshold();
+
+    /**
+     * @return The consistency levels that are warned about when reading.
+     */
+    Set<ConsistencyLevel> getReadConsistencyLevelsWarned();
+
+    /**
+     * @return The consistency levels that are disallowed when reading.
+     */
+    Set<ConsistencyLevel> getReadConsistencyLevelsDisallowed();
+
+    /**
+     * @return The consistency levels that are warned about when writing.
+     */
+    Set<ConsistencyLevel> getWriteConsistencyLevelsWarned();
+
+    /**
+     * @return The consistency levels that are disallowed when writing.
+     */
+    Set<ConsistencyLevel> getWriteConsistencyLevelsDisallowed();
+
+    /**
+     * @return The threshold to warn when encountering a collection with larger data size than threshold.
+     */
+    @Nullable
+    DataStorageSpec.LongBytesBound getCollectionSizeWarnThreshold();
+
+    /**
+     * @return The threshold to prevent collections with larger data size than threshold.
+     */
+    @Nullable
+    DataStorageSpec.LongBytesBound getCollectionSizeFailThreshold();
+
+    /**
+     * @return The threshold to warn when encountering more elements in a collection than threshold.
+     */
+    int getItemsPerCollectionWarnThreshold();
+
+    /**
+     * @return The threshold to prevent collections with more elements than threshold.
+     */
+    int getItemsPerCollectionFailThreshold();
+
+    /**
+     * @return The threshold to warn when creating a UDT with more fields than threshold.
+     */
+    int getFieldsPerUDTWarnThreshold();
+
+    /**
+     * @return The threshold to fail when creating a UDT with more fields than threshold.
+     */
+    int getFieldsPerUDTFailThreshold();
+
+    /**
+     * @return The threshold to warn when local disk usage percentage exceeds that threshold.
+     * Allowed values are in the range {@code [1, 100]}, and -1 means disabled.
+     */
+    int getDataDiskUsagePercentageWarnThreshold();
+
+    /**
+     * @return The threshold to fail when local disk usage percentage exceeds that threshold.
+     * Allowed values are in the range {@code [1, 100]}, and -1 means disabled.
+     */
+    int getDataDiskUsagePercentageFailThreshold();
+
+    /**
+     * @return The max disk size of the data directories when calculating disk usage thresholds, {@code null} means
+     * disabled.
+     */
+    @Nullable
+    DataStorageSpec.LongBytesBound getDataDiskUsageMaxDiskSize();
+
+    /**
+     * @return The threshold to warn when replication factor is lesser than threshold.
+     */
+    int getMinimumReplicationFactorWarnThreshold();
+
+    /**
+     * @return The threshold to fail when replication factor is lesser than threshold.
+     */
+    int getMinimumReplicationFactorFailThreshold();
+
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfigProvider.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfigProvider.java
new file mode 100644
index 0000000..6128764
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfigProvider.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.utils.FBUtilities;
+
+/**
+ * Provider of {@link GuardrailsConfig}s for a {@link ClientState}.
+ * <p>
+ * The {@link Default} implementation always retuns the {@link GuardrailsConfig} parsed from {@code cassandra.yaml},
+ * but different implementations can return different configurations based on the specified {@link ClientState}.
+ * <p>
+ * Custom implementations can be specified at runtime with the system property {@link #CUSTOM_IMPLEMENTATION_PROPERTY}.
+ * These configurations can be used to read the guardrails configuration from some other source, and provide different
+ * configurations depending on the {@link ClientState} or some other factors. However, this mechanism for pluggability
+ * and the related {@link GuardrailsConfig} interface are not officially supported and may change in a minor release.
+ */
+public interface GuardrailsConfigProvider
+{
+    public static final String CUSTOM_IMPLEMENTATION_PROPERTY = "cassandra.custom_guardrails_config_provider_class";
+
+    static final GuardrailsConfigProvider instance = System.getProperty(CUSTOM_IMPLEMENTATION_PROPERTY) == null
+                                                     ? new Default()
+                                                     : build(System.getProperty(CUSTOM_IMPLEMENTATION_PROPERTY));
+
+    /**
+     * Returns the {@link GuardrailsConfig} to be used for the specified {@link ClientState}.
+     *
+     * @param state a client state, maybe {@code null} if the guardrails check for which we are getting the config is
+     *              for a background process that is not associated to a user query.
+     * @return the configuration to be used for {@code state}
+     */
+    GuardrailsConfig getOrCreate(@Nullable ClientState state);
+
+    /**
+     * Creates an instance of the custom guardrails config provider of the given class.
+     *
+     * @param customImpl the fully qualified classname of the guardrails config provider to be instantiated
+     * @return a new instance of the specified custom guardrails config provider of the given class.
+     */
+    static GuardrailsConfigProvider build(String customImpl)
+    {
+        return FBUtilities.construct(customImpl, "custom guardrails config provider");
+    }
+
+    /**
+     * Default implementation of {@link GuardrailsConfigProvider} that always returns the {@link GuardrailsConfig}
+     * parsed from {@code cassandra.yaml}, independently of the {@link ClientState}.
+     */
+    class Default implements GuardrailsConfigProvider
+    {
+        @Override
+        public GuardrailsConfig getOrCreate(@Nullable ClientState state)
+        {
+            return DatabaseDescriptor.getGuardrailsConfig();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailsDiagnostics.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailsDiagnostics.java
new file mode 100644
index 0000000..2905afa
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailsDiagnostics.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.apache.cassandra.db.guardrails.GuardrailEvent.GuardrailEventType;
+import org.apache.cassandra.diag.DiagnosticEventService;
+
+/**
+ * Utility methods for {@link GuardrailEvent} activities.
+ */
+final class GuardrailsDiagnostics
+{
+    private static final DiagnosticEventService service = DiagnosticEventService.instance();
+
+    private GuardrailsDiagnostics()
+    {
+    }
+
+    /**
+     * Creates a new diagnostic event for the activation of the soft/warn limit of a guardrail.
+     *
+     * @param name    The name that identifies the activated guardrail.
+     * @param message The warning message emitted by the activated guardrail.
+     */
+    static void warned(String name, String message)
+    {
+        if (isEnabled(GuardrailEventType.WARNED))
+            service.publish(new GuardrailEvent(GuardrailEventType.WARNED, name, message));
+    }
+
+    /**
+     * Creates a new diagnostic event for the activation of the hard/fail limit of a guardrail.
+     *
+     * @param name    The name that identifies the activated guardrail.
+     * @param message The failure message emitted by the activated guardrail.
+     */
+    static void failed(String name, String message)
+    {
+        if (isEnabled(GuardrailEventType.FAILED))
+            service.publish(new GuardrailEvent(GuardrailEventType.FAILED, name, message));
+    }
+
+    private static boolean isEnabled(GuardrailEventType type)
+    {
+        return service.isEnabled(GuardrailEvent.class, type);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
new file mode 100644
index 0000000..ad2edda
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
@@ -0,0 +1,542 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Set;
+import javax.annotation.Nullable;
+
+/**
+ * JMX entrypoint for updating the default guardrails configuration parsed from {@code cassandra.yaml}.
+ * <p>
+ * This is different to just exposing {@link GuardrailsConfig} in that the methods here should be JMX-friendly.
+ *
+ * <p>For consistency, guardrails based on a simple numeric threshold should use the naming scheme
+ * {@code <whatIsGuarded>WarnThreshold} for soft limits and {@code <whatIsGuarded>FailThreshold} for hard
+ * ones, and if the value has a unit, that unit should be added at the end (for instance,
+ * {@code <whatIsGuarded>FailThresholdInKb}). For "boolean" guardrails that disable a feature, use
+ * {@code <whatIsGuardedEnabled}. Other type of guardrails can use appropriate suffixes but should start with
+ * {@code <whatIsGuarded>}.
+ */
+public interface GuardrailsMBean
+{
+    /**
+     * @return The threshold to warn when creating more user keyspaces than threshold.
+     * -1 means disabled.
+     */
+    int getKeyspacesWarnThreshold();
+
+    /**
+     * @return The threshold to prevent creating more user keyspaces than threshold.
+     * -1 means disabled.
+     */
+    int getKeyspacesFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when creating more user keyspaces than threshold. -1 means disabled.
+     * @param fail The threshold to prevent creating more user keyspaces than threshold. -1 means disabled.
+     */
+    void setKeyspacesThreshold(int warn, int fail);
+
+    /**
+     * @return The threshold to warn when creating more tables than threshold.
+     * -1 means disabled.
+     */
+    int getTablesWarnThreshold();
+
+    /**
+     * @return The threshold to prevent creating more tables than threshold.
+     * -1 means disabled.
+     */
+    int getTablesFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when creating more tables than threshold. -1 means disabled.
+     * @param fail The threshold to prevent creating more tables than threshold. -1 means disabled.
+     */
+    void setTablesThreshold(int warn, int fail);
+
+    /**
+     * @return The threshold to warn when having more columns per table than threshold.
+     * -1 means disabled.
+     */
+    int getColumnsPerTableWarnThreshold();
+
+    /**
+     * @return The threshold to prevent having more columns per table than threshold. -1 means disabled.
+     */
+    int getColumnsPerTableFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when having more columns per table than threshold. -1 means disabled.
+     * @param fail The threshold to prevent having more columns per table than threshold. -1 means disabled.
+     */
+    void setColumnsPerTableThreshold(int warn, int fail);
+
+    /**
+     * @return The threshold to warn when creating more secondary indexes per table than threshold. -1 means disabled.
+     */
+    int getSecondaryIndexesPerTableWarnThreshold();
+
+    /**
+     * @return The threshold to prevent creating more secondary indexes per table than threshold. -1 means disabled.
+     */
+    int getSecondaryIndexesPerTableFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when creating more secondary indexes per table than threshold. -1 means disabled.
+     * @param fail The threshold to prevent creating more secondary indexes per table than threshold. -1 means disabled.
+     */
+    void setSecondaryIndexesPerTableThreshold(int warn, int fail);
+
+    /**
+     * @return Whether secondary index creation is active or not on the node
+     */
+    boolean getSecondaryIndexesEnabled();
+
+    /**
+     * Enables or disables the ability to create secondary indexes
+     *
+     * @param enabled
+     */
+    void setSecondaryIndexesEnabled(boolean enabled);
+
+    /**
+     * @return The threshold to warn when creating more materialized views per table than threshold.
+     * -1 means disabled.
+     */
+    int getMaterializedViewsPerTableWarnThreshold();
+
+    /**
+     * @return The threshold to prevent creating more materialized views per table than threshold.
+     * -1 means disabled.
+     */
+    int getMaterializedViewsPerTableFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when creating more materialized views per table than threshold. -1 means disabled.
+     * @param fail The threshold to prevent creating more materialized views per table than threshold. -1 means disabled.
+     */
+    void setMaterializedViewsPerTableThreshold(int warn, int fail);
+
+    /**
+     * @return properties that are warned about when creating or altering a table.
+     */
+    Set<String> getTablePropertiesWarned();
+
+    /**
+     * @return Comma-separated list of properties that are warned about when creating or altering a table.
+     */
+    String getTablePropertiesWarnedCSV();
+
+    /**
+     * @param properties properties that are warned about when creating or altering a table.
+     */
+    void setTablePropertiesWarned(Set<String> properties);
+
+    /**
+     * @param properties Comma-separated list of properties that are warned about when creating or altering a table.
+     */
+    void setTablePropertiesWarnedCSV(String properties);
+
+    /**
+     * @return properties that are not allowed when creating or altering a table.
+     */
+    Set<String> getTablePropertiesDisallowed();
+
+    /**
+     * @return Comma-separated list of properties that are not allowed when creating or altering a table.
+     */
+    String getTablePropertiesDisallowedCSV();
+
+    /**
+     * @param properties properties that are not allowed when creating or altering a table.
+     */
+    void setTablePropertiesDisallowed(Set<String> properties);
+
+    /**
+     * @param properties Comma-separated list of properties that are not allowed when creating or altering a table.
+     */
+    void setTablePropertiesDisallowedCSV(String properties);
+
+    /**
+     * @return properties that are ignored when creating or altering a table.
+     */
+    Set<String> getTablePropertiesIgnored();
+
+    /**
+     * @return Comma-separated list of properties that are ignored when creating or altering a table.
+     */
+    String getTablePropertiesIgnoredCSV();
+
+    /**
+     * @param properties properties that are ignored when creating or altering a table.
+     */
+    void setTablePropertiesIgnored(Set<String> properties);
+
+    /**
+     * @param properties Comma-separated list of properties that are ignored when creating or altering a table.
+     */
+    void setTablePropertiesIgnoredCSV(String properties);
+
+    /**
+     * Returns whether user-provided timestamps are allowed.
+     *
+     * @return {@code true} if user-provided timestamps are allowed, {@code false} otherwise.
+     */
+    boolean getUserTimestampsEnabled();
+
+    /**
+     * Sets whether user-provided timestamps are allowed.
+     *
+     * @param enabled {@code true} if user-provided timestamps are allowed, {@code false} otherwise.
+     */
+    void setUserTimestampsEnabled(boolean enabled);
+
+    /**
+     * Returns whether ALLOW FILTERING property is allowed.
+     *
+     * @return {@code true} if ALLOW FILTERING is allowed, {@code false} otherwise.
+     */
+    boolean getAllowFilteringEnabled();
+
+    /**
+     * Sets whether ALLOW FILTERING is allowed.
+     *
+     * @param enabled {@code true} if ALLOW FILTERING is allowed, {@code false} otherwise.
+     */
+    void setAllowFilteringEnabled(boolean enabled);
+
+    /**
+     * Returns whether users can disable compression on tables
+     *
+     * @return {@code true} if users can disable compression on a table, {@code false} otherwise.
+     */
+    boolean getUncompressedTablesEnabled();
+
+    /**
+     * Sets whether users can disable compression on tables
+     *
+     * @param enabled {@code true} if users can disable compression on a table, {@code false} otherwise.
+     */
+    void setUncompressedTablesEnabled(boolean enabled);
+
+    /**
+     * Returns whether users can create new COMPACT STORAGE tables
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getCompactTablesEnabled();
+
+    /**
+     * Sets whether users can create new COMPACT STORAGE tables
+     *
+     * @param enabled {@code true} if allowed, {@code false} otherwise.
+     */
+    void setCompactTablesEnabled(boolean enabled);
+
+    /**
+     * Returns whether GROUP BY queries are allowed.
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getGroupByEnabled();
+
+    /**
+     * Sets whether GROUP BY queries are allowed.
+     *
+     * @param enabled {@code true} if allowed, {@code false} otherwise.
+     */
+    void setGroupByEnabled(boolean enabled);
+
+    /**
+     * Returns whether users can TRUNCATE or DROP TABLE
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getDropTruncateTableEnabled();
+
+    /**
+     * Sets whether users can TRUNCATE or DROP TABLE
+     */
+    void setDropTruncateTableEnabled(boolean enabled);
+
+    /**
+     * @return The threshold to warn when requested page size greater than threshold.
+     * -1 means disabled.
+     */
+    int getPageSizeWarnThreshold();
+
+    /**
+     * @return The threshold to prevent requesting page with more elements than threshold.
+     * -1 means disabled.
+     */
+    int getPageSizeFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when the requested page size is greater than threshold. -1 means disabled.
+     * @param fail The threshold to prevent requesting pages with more elements than threshold. -1 means disabled.
+     */
+    void setPageSizeThreshold(int warn, int fail);
+
+    /**
+     * Returns whether list operations that require read before write are allowed.
+     *
+     * @return {@code true} if list operations that require read before write are allowed, {@code false} otherwise.
+     */
+    boolean getReadBeforeWriteListOperationsEnabled();
+
+    /**
+     * Sets whether list operations that require read before write are allowed.
+     *
+     * @param enabled {@code true} if list operations that require read before write are allowed, {@code false} otherwise.
+     */
+    void setReadBeforeWriteListOperationsEnabled(boolean enabled);
+
+    /**
+     * @return The threshold to warn when the number of partition keys in a select statement greater than threshold.
+     * -1 means disabled.
+     */
+    int getPartitionKeysInSelectWarnThreshold();
+
+    /**
+     * @return The threshold to fail when the number of partition keys in a select statement greater than threshold.
+     * -1 means disabled.
+     */
+    int getPartitionKeysInSelectFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when the number of partition keys in a select statement is greater than
+     *             threshold -1 means disabled.
+     * @param fail The threshold to prevent when the number of partition keys in a select statement is more than
+     *             threshold -1 means disabled.
+     */
+    void setPartitionKeysInSelectThreshold(int warn, int fail);
+
+    /**
+     * @return The threshold to warn when an IN query creates a cartesian product with a size exceeding threshold.
+     * -1 means disabled.
+     */
+    public int getInSelectCartesianProductWarnThreshold();
+
+    /**
+     * @return The threshold to prevent IN queries creating a cartesian product with a size exceeding threshold.
+     * -1 means disabled.
+     */
+    public int getInSelectCartesianProductFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when an IN query creates a cartesian product with a size exceeding threshold.
+     *             -1 means disabled.
+     * @param fail The threshold to prevent IN queries creating a cartesian product with a size exceeding threshold.
+     *             -1 means disabled.
+     */
+    public void setInSelectCartesianProductThreshold(int warn, int fail);
+
+    /**
+     * @return consistency levels that are warned about when reading.
+     */
+    Set<String> getReadConsistencyLevelsWarned();
+
+    /**
+     * @return Comma-separated list of consistency levels that are warned about when reading.
+     */
+    String getReadConsistencyLevelsWarnedCSV();
+
+    /**
+     * @param consistencyLevels consistency levels that are warned about when reading.
+     */
+    void setReadConsistencyLevelsWarned(Set<String> consistencyLevels);
+
+    /**
+     * @param consistencyLevels Comma-separated list of consistency levels that are warned about when reading.
+     */
+    void setReadConsistencyLevelsWarnedCSV(String consistencyLevels);
+
+    /**
+     * @return consistency levels that are not allowed when reading.
+     */
+    Set<String> getReadConsistencyLevelsDisallowed();
+
+    /**
+     * @return Comma-separated list of consistency levels that are not allowed when reading.
+     */
+    String getReadConsistencyLevelsDisallowedCSV();
+
+    /**
+     * @param consistencyLevels consistency levels that are not allowed when reading.
+     */
+    void setReadConsistencyLevelsDisallowed(Set<String> consistencyLevels);
+
+    /**
+     * @param consistencyLevels Comma-separated list of consistency levels that are not allowed when reading.
+     */
+    void setReadConsistencyLevelsDisallowedCSV(String consistencyLevels);
+
+    /**
+     * @return consistency levels that are warned about when writing.
+     */
+    Set<String> getWriteConsistencyLevelsWarned();
+
+    /**
+     * @return Comma-separated list of consistency levels that are warned about when writing.
+     */
+    String getWriteConsistencyLevelsWarnedCSV();
+
+    /**
+     * @param consistencyLevels consistency levels that are warned about when writing.
+     */
+    void setWriteConsistencyLevelsWarned(Set<String> consistencyLevels);
+
+    /**
+     * @param consistencyLevels Comma-separated list of consistency levels that are warned about when writing.
+     */
+    void setWriteConsistencyLevelsWarnedCSV(String consistencyLevels);
+
+    /**
+     * @return consistency levels that are not allowed when writing.
+     */
+    Set<String> getWriteConsistencyLevelsDisallowed();
+
+    /**
+     * @return Comma-separated list of consistency levels that are not allowed when writing.
+     */
+    String getWriteConsistencyLevelsDisallowedCSV();
+
+    /**
+     * @param consistencyLevels consistency levels that are not allowed when writing.
+     */
+    void setWriteConsistencyLevelsDisallowed(Set<String> consistencyLevels);
+
+    /**
+     * @param consistencyLevels Comma-separated list of consistency levels that are not allowed when writing.
+     */
+    void setWriteConsistencyLevelsDisallowedCSV(String consistencyLevels);
+
+    /**
+     * @return The threshold to warn when encountering larger size of collection data than threshold, as a string
+     * formatted as in, for example, {@code 10GiB}, {@code 20MiB}, {@code 30KiB} or {@code 40B}.  A {@code null} value
+     * means that the threshold is disabled.
+     */
+    @Nullable
+    String getCollectionSizeWarnThreshold();
+
+    /**
+     * @return The threshold to prevent collections with larger data size than threshold, as a string formatted as in,
+     * for example, {@code 10GiB}, {@code 20MiB}, {@code 30KiB} or {@code 40B}. A {@code null} value means that the
+     * threshold is disabled.
+     */
+    @Nullable
+    String getCollectionSizeFailThreshold();
+
+    /**
+     * @param warnSize The threshold to warn when encountering larger size of collection data than threshold, as a
+     *                 string formatted as in, for example, {@code 10GiB}, {@code 20MiB}, {@code 30KiB} or {@code 40B}.
+     *                 A {@code null} value means disabled.
+     * @param failSize The threshold to prevent collections with larger data size than threshold, as a string formatted
+     *                 as in, for example, {@code 10GiB}, {@code 20MiB}, {@code 30KiB} or {@code 40B}. A {@code null}
+     *                 value means disabled.
+     */
+    void setCollectionSizeThreshold(@Nullable String warnSize, @Nullable String failSize);
+
+    /**
+     * @return The threshold to warn when encountering more elements in a collection than threshold.
+     */
+    int getItemsPerCollectionWarnThreshold();
+
+    /**
+     * @return The threshold to prevent collections with more elements than threshold.
+     */
+    int getItemsPerCollectionFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when encountering more elements in a collection than threshold.
+     * @param fail The threshold to prevent collectiosn with more elements than threshold.
+     */
+    void setItemsPerCollectionThreshold(int warn, int fail);
+
+    /**
+     * @return The threshold to warn when creating a UDT with more fields than threshold. -1 means disabled.
+     */
+    int getFieldsPerUDTWarnThreshold();
+
+    /**
+     * @return The threshold to fail when creating a UDT with more fields than threshold. -1 means disabled.
+     */
+    int getFieldsPerUDTFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when creating a UDT with more fields than threshold. -1 means disabled.
+     * @param fail The threshold to prevent creating a UDT with more fields than threshold. -1 means disabled.
+     */
+    void setFieldsPerUDTThreshold(int warn, int fail);
+
+    /**
+     * @return The threshold to warn when local data disk usage percentage exceeds that threshold.
+     * Allowed values are in the range {@code [1, 100]}, and -1 means disabled.
+     */
+    int getDataDiskUsagePercentageWarnThreshold();
+
+    /**
+     * @return The threshold to fail when local data disk usage percentage exceeds that threshold.
+     * Allowed values are in the range {@code [1, 100]}, and -1 means disabled.
+     */
+    int getDataDiskUsagePercentageFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when local disk usage percentage exceeds that threshold.
+     *             Allowed values are in the range {@code [1, 100]}, and -1 means disabled.
+     * @param fail The threshold to fail when local disk usage percentage exceeds that threshold.
+     *             Allowed values are in the range {@code [1, 100]}, and -1 means disabled.
+     */
+    public void setDataDiskUsagePercentageThreshold(int warn, int fail);
+
+    /**
+     * @return The max disk size of the data directories when calculating disk usage thresholds, as a string formatted
+     * as in, for example, {@code 10GiB}, {@code 20MiB}, {@code 30KiB} or {@code 40B}. A {@code null} value means
+     * disabled.
+     */
+    @Nullable
+    String getDataDiskUsageMaxDiskSize();
+
+    /**
+     * @param size The max disk size of the data directories when calculating disk usage thresholds, as a string
+     *             formatted as in, for example, {@code 10GiB}, {@code 20MiB}, {@code 30KiB} or {@code 40B}.
+     *             A {@code null} value means disabled.
+     */
+    void setDataDiskUsageMaxDiskSize(@Nullable String size);
+
+    /**
+     * @return The threshold to warn when replication factor is lesser threshold.
+     */
+    int getMinimumReplicationFactorWarnThreshold();
+
+    /**
+     * @return The threshold to fail when replication factor is lesser threshold.
+     */
+    int getMinimumReplicationFactorFailThreshold();
+
+    /**
+     * @param warn the threshold to warn when the minimum replication factor is lesser than
+     *             threshold -1 means disabled.
+     * @param fail the threshold to fail when the minimum replication factor is lesser than
+     *             threshold -1 means disabled.
+     */
+    void setMinimumReplicationFactorThreshold (int warn, int fail);
+
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/MaxThreshold.java b/src/java/org/apache/cassandra/db/guardrails/MaxThreshold.java
new file mode 100644
index 0000000..badaff7
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/MaxThreshold.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.ToLongFunction;
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * {@link MaxThreshold} for maximum guardrails, the value is checked to see if it is greater than the warn and fail thresholds.
+ */
+public class MaxThreshold extends Threshold
+{
+    /**
+     * Creates a new threshold guardrail.
+     *
+     * @param name            the identifying name of the guardrail
+     * @param warnThreshold   a {@link ClientState}-based provider of the value above which a warning should be triggered.
+     * @param failThreshold   a {@link ClientState}-based provider of the value above which the operation should be aborted.
+     * @param messageProvider a function to generate the warning or error message if the guardrail is triggered
+     */
+    public MaxThreshold(String name,
+                        ToLongFunction<ClientState> warnThreshold,
+                        ToLongFunction<ClientState> failThreshold,
+                        Threshold.ErrorMessageProvider messageProvider)
+    {
+        super(name, warnThreshold, failThreshold, messageProvider);
+    }
+
+    @Override
+    protected boolean compare(long value, long threshold)
+    {
+        return value > threshold;
+    }
+
+    @Override
+    protected long failValue(ClientState state)
+    {
+        long failValue = failThreshold.applyAsLong(state);
+        return failValue <= 0 ? Long.MAX_VALUE : failValue;
+    }
+
+    @Override
+    protected long warnValue(ClientState state)
+    {
+        long warnValue = warnThreshold.applyAsLong(state);
+        return warnValue <= 0 ? Long.MAX_VALUE : warnValue;
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/MinThreshold.java b/src/java/org/apache/cassandra/db/guardrails/MinThreshold.java
new file mode 100644
index 0000000..427f277
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/MinThreshold.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.ToLongFunction;
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * {@link MinThreshold} for minimum guardrails, the value is checked to see if it is lesser than the warn and fail thresholds.
+ */
+public class MinThreshold extends Threshold
+{
+    /**
+     * Creates a new minimum threshold guardrail.
+     *
+     * @param name            the identifying name of the guardrail
+     * @param warnThreshold   a {@link ClientState}-based provider of the value above which a warning should be triggered.
+     * @param failThreshold   a {@link ClientState}-based provider of the value above which the operation should be aborted.
+     * @param messageProvider a function to generate the warning or error message if the guardrail is triggered
+     */
+    public MinThreshold(String name,
+                        ToLongFunction<ClientState> warnThreshold,
+                        ToLongFunction<ClientState> failThreshold,
+                        Threshold.ErrorMessageProvider messageProvider)
+    {
+        super(name, warnThreshold, failThreshold, messageProvider);
+    }
+
+    @Override
+    protected boolean compare(long value, long threshold)
+    {
+        return value < threshold;
+    }
+
+    @Override
+    protected long failValue(ClientState state)
+    {
+        long failValue = failThreshold.applyAsLong(state);
+        return failValue <= 0 ? Long.MIN_VALUE : failValue;
+    }
+
+    @Override
+    protected long warnValue(ClientState state)
+    {
+        long warnValue = warnThreshold.applyAsLong(state);
+        return warnValue <= 0 ? Long.MIN_VALUE : warnValue;
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/PercentageThreshold.java b/src/java/org/apache/cassandra/db/guardrails/PercentageThreshold.java
new file mode 100644
index 0000000..6f866c6
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/PercentageThreshold.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.ToLongFunction;
+
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * A {@link Threshold} guardrail whose values represent a percentage
+ * <p>
+ * This works exactly as a {@link Threshold}, but provides slightly more convenient error messages for percentage
+ */
+public class PercentageThreshold extends MaxThreshold
+{
+    /**
+     * Creates a new threshold guardrail.
+     *
+     * @param name            the identifying name of the guardrail
+     * @param warnThreshold   a {@link ClientState}-based provider of the value above which a warning should be triggered.
+     * @param failThreshold   a {@link ClientState}-based provider of the value above which the operation should be aborted.
+     * @param messageProvider a function to generate the warning or error message if the guardrail is triggered
+     */
+    public PercentageThreshold(String name,
+                               ToLongFunction<ClientState> warnThreshold,
+                               ToLongFunction<ClientState> failThreshold,
+                               ErrorMessageProvider messageProvider)
+    {
+        super(name, warnThreshold, failThreshold, messageProvider);
+    }
+
+    @Override
+    protected String errMsg(boolean isWarning, String what, long value, long thresholdValue)
+    {
+        return messageProvider.createMessage(isWarning,
+                                             what,
+                                             String.format("%d%%", value),
+                                             String.format("%d%%", thresholdValue));
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/Predicates.java b/src/java/org/apache/cassandra/db/guardrails/Predicates.java
new file mode 100644
index 0000000..13be9e9
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/Predicates.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.Function;
+import java.util.function.Predicate;
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * A guardrail based on two predicates.
+ *
+ * <p>A {@link Predicates} guardrail defines (up to) 2 predicates, one at which a warning is issued, and another one
+ * at which a failure is triggered. If failure is triggered, warning is skipped.
+ *
+ * @param <T> the type of the values to be tested against predicates.
+ */
+public class Predicates<T> extends Guardrail
+{
+    private final Function<ClientState, Predicate<T>> warnPredicate;
+    private final Function<ClientState, Predicate<T>> failurePredicate;
+    private final MessageProvider<T> messageProvider;
+
+    /**
+     * A function used to build the warning or error message of a triggered {@link Predicates} guardrail.
+     */
+    public interface MessageProvider<T>
+    {
+        /**
+         * Called when the guardrail is triggered to build the corresponding message.
+         *
+         * @param isWarning whether the trigger is a warning one; otherwise it is failure one.
+         * @param value     the value that triggers guardrail.
+         */
+        String createMessage(boolean isWarning, T value);
+    }
+
+    /**
+     * Creates a new {@link Predicates} guardrail.
+     *
+     * @param name             the identifying name of the guardrail
+     * @param warnPredicate    a {@link ClientState}-based predicate provider that is used to check if given value should trigger a warning.
+     * @param failurePredicate a {@link ClientState}-based predicate provider that is used to check if given value should trigger a failure.
+     * @param messageProvider  a function to generate the warning or error message if the guardrail is triggered
+     */
+    Predicates(String name,
+               Function<ClientState, Predicate<T>> warnPredicate,
+               Function<ClientState, Predicate<T>> failurePredicate,
+               MessageProvider<T> messageProvider)
+    {
+        super(name);
+        this.warnPredicate = warnPredicate;
+        this.failurePredicate = failurePredicate;
+        this.messageProvider = messageProvider;
+    }
+
+    /**
+     * Apply the guardrail to the provided value, triggering a warning or failure if appropriate.
+     *
+     * @param value the value to check.
+     */
+    public void guard(T value, @Nullable ClientState state)
+    {
+        if (!enabled(state))
+            return;
+
+        if (failurePredicate.apply(state).test(value))
+        {
+            fail(messageProvider.createMessage(false, value), state);
+        }
+        else if (warnPredicate.apply(state).test(value))
+        {
+            warn(messageProvider.createMessage(true, value));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/Threshold.java b/src/java/org/apache/cassandra/db/guardrails/Threshold.java
new file mode 100644
index 0000000..b671907
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/Threshold.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.ToLongFunction;
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * A guardrail based on numeric threshold(s).
+ *
+ * <p>A {@link Threshold} guardrail defines (up to) 2 thresholds, one at which a warning is issued, and a lower one
+ * at which the operation is aborted with an exception. Only one of those thresholds can be activated if desired.
+ *
+ * <p>This guardrail only handles guarding positive values.
+ */
+public abstract class Threshold extends Guardrail
+{
+    protected ToLongFunction<ClientState> warnThreshold;
+    protected ToLongFunction<ClientState> failThreshold;
+    protected final ErrorMessageProvider messageProvider;
+
+    /**
+     * Creates a new threshold guardrail.
+     *
+     * @param name            the identifying name of the guardrail
+     * @param warnThreshold   a {@link ClientState}-based provider of the value above which a warning should be triggered.
+     * @param failThreshold   a {@link ClientState}-based provider of the value above which the operation should be aborted.
+     * @param messageProvider a function to generate the warning or error message if the guardrail is triggered
+     */
+    public Threshold(String name,
+                     ToLongFunction<ClientState> warnThreshold,
+                     ToLongFunction<ClientState> failThreshold,
+                     ErrorMessageProvider messageProvider)
+    {
+        super(name);
+        this.warnThreshold = warnThreshold;
+        this.failThreshold = failThreshold;
+        this.messageProvider = messageProvider;
+    }
+
+    protected abstract boolean compare(long value, long threshold);
+
+    protected String errMsg(boolean isWarning, String what, long value, long thresholdValue)
+    {
+        return messageProvider.createMessage(isWarning,
+                                             what,
+                                             Long.toString(value),
+                                             Long.toString(thresholdValue));
+    }
+
+    private String redactedErrMsg(boolean isWarning, long value, long thresholdValue)
+    {
+        return errMsg(isWarning, REDACTED, value, thresholdValue);
+    }
+
+    protected abstract long failValue(ClientState state);
+
+    protected abstract long warnValue(ClientState state);
+
+    public boolean enabled(@Nullable ClientState state)
+    {
+        if (!super.enabled(state))
+            return false;
+
+        return failThreshold.applyAsLong(state) > 0 || warnThreshold.applyAsLong(state) > 0;
+    }
+
+    /**
+     * Checks whether the provided value would trigger a warning or failure if passed to {@link #guard}.
+     *
+     * <p>This method is optional (does not have to be called) but can be used in the case where the "what"
+     * argument to {@link #guard} is expensive to build to save doing so in the common case (of the guardrail
+     * not being triggered).
+     *
+     * @param value the value to test.
+     * @param state The client state, used to skip the check if the query is internal or is done by a superuser.
+     *              A {@code null} value means that the check should be done regardless of the query.
+     * @return {@code true} if {@code value} is above the warning or failure thresholds of this guardrail,
+     * {@code false otherwise}.
+     */
+    public boolean triggersOn(long value, @Nullable ClientState state)
+    {
+        return enabled(state) && (compare(value, warnValue(state)) || compare(value, failValue(state)));
+    }
+
+    public boolean warnsOn(long value, @Nullable ClientState state)
+    {
+        return enabled(state) && compare(value, warnValue(state));
+    }
+
+    public boolean failsOn(long value, @Nullable ClientState state)
+    {
+        return enabled(state) && compare(value, failValue(state));
+    }
+
+    /**
+     * Apply the guardrail to the provided value, warning or failing if appropriate.
+     *
+     * @param value            The value to check.
+     * @param what             A string describing what {@code value} is a value of. This is used in the error message
+     *                         if the guardrail is triggered. For instance, say the guardrail guards the size of column
+     *                         values, then this argument must describe which column of which row is triggering the
+     *                         guardrail for convenience.
+     * @param containsUserData whether the {@code what} contains user data that should be redacted on external systems.
+     * @param state            The client state, used to skip the check if the query is internal or is done by a superuser.
+     *                         A {@code null} value means that the check should be done regardless of the query.
+     */
+    public void guard(long value, String what, boolean containsUserData, @Nullable ClientState state)
+    {
+        if (!enabled(state))
+            return;
+
+        long failValue = failValue(state);
+        if (compare(value, failValue))
+        {
+            triggerFail(value, failValue, what, containsUserData, state);
+            return;
+        }
+
+        long warnValue = warnValue(state);
+        if (compare(value, warnValue))
+            triggerWarn(value, warnValue, what, containsUserData);
+    }
+
+    private void triggerFail(long value, long failValue, String what, boolean containsUserData, ClientState state)
+    {
+        String fullMessage = errMsg(false, what, value, failValue);
+        fail(fullMessage, containsUserData ? redactedErrMsg(false, value, failValue) : fullMessage, state);
+    }
+
+    private void triggerWarn(long value, long warnValue, String what, boolean containsUserData)
+    {
+        String fullMessage = errMsg(true, what, value, warnValue);
+        warn(fullMessage, containsUserData ? redactedErrMsg(true, value, warnValue) : fullMessage);
+    }
+
+    /**
+     * A function used to build the error message of a triggered {@link Threshold} guardrail.
+     */
+    interface ErrorMessageProvider
+    {
+        /**
+         * Called when the guardrail is triggered to build the corresponding error message.
+         *
+         * @param isWarning Whether the trigger is a warning one; otherwise it is a failure one.
+         * @param what      A string, provided by the call to the {@link #guard} method, describing what the guardrail
+         *                  has been applied to (and that has triggered it).
+         * @param value     The value that triggered the guardrail (as a string).
+         * @param threshold The threshold that was passed to trigger the guardrail (as a string).
+         */
+        String createMessage(boolean isWarning, String what, String value, String threshold);
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/db/guardrails/Values.java b/src/java/org/apache/cassandra/db/guardrails/Values.java
new file mode 100644
index 0000000..f46e3af
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/Values.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Set;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+
+import com.google.common.collect.Sets;
+
+import org.apache.cassandra.service.ClientState;
+
+import static java.lang.String.format;
+
+/**
+ * A guardrail that warns about some specific values, warns about but ignores some other values, and/or rejects the use
+ * of some other values.
+ *
+ * @param <T> The type of the values of which certain are warned, ignored and/or disallowed.
+ */
+public class Values<T> extends Guardrail
+{
+    private final Function<ClientState, Set<T>> warnedValues;
+    private final Function<ClientState, Set<T>> ignoredValues;
+    private final Function<ClientState, Set<T>> disallowedValues;
+    private final String what;
+
+    /**
+     * Creates a new values guardrail.
+     *
+     * @param name             the identifying name of the guardrail
+     * @param warnedValues     a {@link ClientState}-based provider of the values for which a warning is triggered.
+     * @param ignoredValues    a {@link ClientState}-based provider of the values that are ignored.
+     * @param disallowedValues a {@link ClientState}-based provider of the values that are disallowed.
+     * @param what             The feature that is guarded by this guardrail (for reporting in error messages).
+     */
+    public Values(String name,
+                  Function<ClientState, Set<T>> warnedValues,
+                  Function<ClientState, Set<T>> ignoredValues,
+                  Function<ClientState, Set<T>> disallowedValues,
+                  String what)
+    {
+        super(name);
+        this.warnedValues = warnedValues;
+        this.ignoredValues = ignoredValues;
+        this.disallowedValues = disallowedValues;
+        this.what = what;
+    }
+
+    /**
+     * Triggers a warning for each of the provided values that is discouraged by this guardrail. If any of the values
+     * is disallowed it will abort the operation.
+     * <p>
+     * This assumes that there aren't any values to be ignored, thus it doesn't require an ignore action. If this is
+     * not the case and the provided value is set up to be ignored this will throw an assertion error.
+     *
+     * @param values The values to check.
+     * @param state  The client state, used to skip the check if the query is internal or is done by a superuser.
+     *               A {@code null} value means that the check should be done regardless of the query.
+     */
+    public void guard(Set<T> values, @Nullable ClientState state)
+    {
+        guard(values, x -> {
+            throw new AssertionError(format("There isn't an ignore action for %s, but value %s is setup to be ignored",
+                                            what, x));
+        }, state);
+    }
+
+    /**
+     * Triggers a warning for each of the provided values that is discouraged by this guardrail. Also triggers a warning
+     * for each of the provided values that is ignored by this guardrail and triggers the provided action to ignore it.
+     * If any of the values is disallowed it will abort the operation.
+     *
+     * @param values       The values to check.
+     * @param ignoreAction An action called on the subset of {@code values} that should be ignored. This action
+     *                     should do whatever is necessary to make sure the value is ignored.
+     * @param state        The client state, used to skip the check if the query is internal or is done by a superuser.
+     *                     A {@code null} value means that the check should be done regardless of the query, although it
+     *                     won't throw any exception if the failure threshold is exceeded. This is so because checks
+     *                     without an associated client come from asynchronous processes such as compaction, and we
+     *                     don't want to interrupt such processes.
+     */
+    public void guard(Set<T> values, Consumer<T> ignoreAction, @Nullable ClientState state)
+    {
+        if (!enabled(state))
+            return;
+
+        Set<T> disallowed = disallowedValues.apply(state);
+        Set<T> toDisallow = Sets.intersection(values, disallowed);
+        if (!toDisallow.isEmpty())
+            fail(format("Provided values %s are not allowed for %s (disallowed values are: %s)",
+                        toDisallow.stream().sorted().collect(Collectors.toList()), what, disallowed), state);
+
+        Set<T> ignored = ignoredValues.apply(state);
+        Set<T> toIgnore = Sets.intersection(values, ignored);
+        if (!toIgnore.isEmpty())
+        {
+            warn(format("Ignoring provided values %s as they are not supported for %s (ignored values are: %s)",
+                        toIgnore.stream().sorted().collect(Collectors.toList()), what, ignored));
+            toIgnore.forEach(ignoreAction);
+        }
+
+        Set<T> warned = warnedValues.apply(state);
+        Set<T> toWarn = Sets.intersection(values, warned);
+        if (!toWarn.isEmpty())
+            warn(format("Provided values %s are not recommended for %s (warned values are: %s)",
+                        toWarn.stream().sorted().collect(Collectors.toList()), what, warned));
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/lifecycle/ILifecycleTransaction.java b/src/java/org/apache/cassandra/db/lifecycle/ILifecycleTransaction.java
index 3de0a35..c014e38 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/ILifecycleTransaction.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/ILifecycleTransaction.java
@@ -21,7 +21,6 @@
 import java.util.Collection;
 import java.util.Set;
 
-import org.apache.cassandra.io.sstable.SSTable;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java b/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
index 574c6a4..d3f3a1e 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.nio.file.Path;
 import java.util.*;
 import java.util.function.BiPredicate;
@@ -25,6 +24,7 @@
 import com.google.common.base.Predicate;
 import com.google.common.collect.*;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,6 +36,7 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableReader.UniqueIdentifier;
 import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
 import static com.google.common.base.Functions.compose;
@@ -142,7 +143,7 @@
     public static LifecycleTransaction offline(OperationType operationType, Iterable<SSTableReader> readers)
     {
         // if offline, for simplicity we just use a dummy tracker
-        Tracker dummy = new Tracker(null, false);
+        Tracker dummy = Tracker.newDummyTracker();
         dummy.addInitialSSTables(readers);
         dummy.apply(updateCompacting(emptySet(), readers));
         return new LifecycleTransaction(dummy, operationType, readers);
@@ -154,7 +155,7 @@
     @SuppressWarnings("resource") // log closed during postCleanup
     public static LifecycleTransaction offline(OperationType operationType)
     {
-        Tracker dummy = new Tracker(null, false);
+        Tracker dummy = Tracker.newDummyTracker();
         return new LifecycleTransaction(dummy, new LogTransaction(operationType, dummy), Collections.emptyList());
     }
 
@@ -187,7 +188,7 @@
         return log.type();
     }
 
-    public UUID opId()
+    public TimeUUID opId()
     {
         return log.id();
     }
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java b/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java
index 254966e..120f9bc 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java
@@ -20,7 +20,6 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.DirectoryStream;
 import java.nio.file.Files;
@@ -32,6 +31,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -106,7 +106,7 @@
         try
         {
             return StreamSupport.stream(stream.spliterator(), false)
-                                .map(Path::toFile)
+                                .map(File::new)
                                 .filter((f) -> !f.isDirectory())
                                 .collect(Collectors.toList());
         }
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
index 9053034..11e3ffb 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
@@ -20,7 +20,6 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.nio.file.Path;
 import java.util.*;
 import java.util.regex.Matcher;
@@ -30,6 +29,7 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Iterables;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,7 +40,9 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.big.BigFormat;
 import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.utils.Throwables.merge;
 
 /**
@@ -73,11 +75,11 @@
     private final OperationType type;
 
     // The unique id of the transaction
-    private final UUID id;
+    private final TimeUUID id;
 
     static LogFile make(File logReplica)
     {
-        return make(logReplica.getName(), Collections.singletonList(logReplica));
+        return make(logReplica.name(), Collections.singletonList(logReplica));
     }
 
     static LogFile make(String fileName, List<File> logReplicas)
@@ -91,7 +93,7 @@
         //String version = matcher.group(1);
 
         OperationType operationType = OperationType.fromFileName(matcher.group(2));
-        UUID id = UUID.fromString(matcher.group(3));
+        TimeUUID id = TimeUUID.fromString(matcher.group(3));
 
         return new LogFile(operationType, id, logReplicas);
     }
@@ -106,7 +108,7 @@
         return type;
     }
 
-    UUID id()
+    TimeUUID id()
     {
         return id;
     }
@@ -139,16 +141,16 @@
 
     static boolean isLogFile(File file)
     {
-        return LogFile.FILE_REGEX.matcher(file.getName()).matches();
+        return LogFile.FILE_REGEX.matcher(file.name()).matches();
     }
 
-    LogFile(OperationType type, UUID id, List<File> replicas)
+    LogFile(OperationType type, TimeUUID id, List<File> replicas)
     {
         this(type, id);
         this.replicas.addReplicas(replicas);
     }
 
-    LogFile(OperationType type, UUID id)
+    LogFile(OperationType type, TimeUUID id)
     {
         this.type = type;
         this.id = id;
@@ -275,12 +277,12 @@
 
     void commit()
     {
-        addRecord(LogRecord.makeCommit(System.currentTimeMillis()));
+        addRecord(LogRecord.makeCommit(currentTimeMillis()));
     }
 
     void abort()
     {
-        addRecord(LogRecord.makeAbort(System.currentTimeMillis()));
+        addRecord(LogRecord.makeAbort(currentTimeMillis()));
     }
 
     private boolean isLastRecordValidWithType(Type type)
@@ -347,7 +349,7 @@
     private void maybeCreateReplica(SSTable sstable)
     {
         File directory = sstable.descriptor.directory;
-        String fileName = StringUtils.join(directory, File.separator, getFileName());
+        String fileName = StringUtils.join(directory, File.pathSeparator(), getFileName());
         replicas.maybeCreateReplica(directory, fileName, onDiskRecords);
     }
 
@@ -443,7 +445,7 @@
     private static Set<File> getRecordFiles(NavigableSet<File> files, LogRecord record)
     {
         String fileName = record.fileName();
-        return files.stream().filter(f -> f.getName().startsWith(fileName)).collect(Collectors.toSet());
+        return files.stream().filter(f -> f.name().startsWith(fileName)).collect(Collectors.toSet());
     }
 
     boolean exists()
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java b/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java
index 513ad87..45653c4 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java
@@ -20,11 +20,10 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
-import java.io.FilenameFilter;
+
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.*;
+import java.util.function.BiPredicate;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
@@ -33,7 +32,9 @@
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.SSTable;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.utils.FBUtilities;
 
 /**
@@ -283,8 +284,8 @@
 
     public static List<File> getExistingFiles(String absoluteFilePath)
     {
-        Path path = Paths.get(absoluteFilePath);
-        File[] files = path.getParent().toFile().listFiles((dir, name) -> name.startsWith(path.getFileName().toString()));
+        File file = new File(absoluteFilePath);
+        File[] files = file.parent().tryList((dir, name) -> name.startsWith(file.name()));
         // files may be null if the directory does not exist yet, e.g. when tracking new files
         return files == null ? Collections.emptyList() : Arrays.asList(files);
     }
@@ -302,13 +303,13 @@
         Map<File, TreeSet<String>> dirToFileNamePrefix = new HashMap<>();
         for (String absolutePath : absoluteFilePaths)
         {
-            Path fullPath = Paths.get(absolutePath);
+            Path fullPath = new File(absolutePath).toPath();
             Path path = fullPath.getParent();
             if (path != null)
-                dirToFileNamePrefix.computeIfAbsent(path.toFile(), (k) -> new TreeSet<>()).add(fullPath.getFileName().toString());
+                dirToFileNamePrefix.computeIfAbsent(new File(path), (k) -> new TreeSet<>()).add(fullPath.getFileName().toString());
         }
 
-        FilenameFilter ff = (dir, name) -> {
+        BiPredicate<File, String> ff = (dir, name) -> {
             TreeSet<String> dirSet = dirToFileNamePrefix.get(dir);
             // if the set contains a prefix of the current file name, the file name we have here should sort directly
             // after the prefix in the tree set, which means we can use 'floor' to get the prefix (returns the largest
@@ -317,7 +318,7 @@
             String baseName = dirSet.floor(name);
             if (baseName != null && name.startsWith(baseName))
             {
-                String absolutePath = new File(dir, baseName).getPath();
+                String absolutePath = new File(dir, baseName).path();
                 fileMap.computeIfAbsent(absolutePath, k -> new ArrayList<>()).add(new File(dir, name));
             }
             return false;
@@ -325,7 +326,7 @@
 
         // populate the file map:
         for (File f : dirToFileNamePrefix.keySet())
-            f.listFiles(ff);
+            f.tryList(ff);
 
         return fileMap;
     }
@@ -338,14 +339,12 @@
 
     String fileName()
     {
-        return absolutePath.isPresent() ? Paths.get(absolutePath.get()).getFileName().toString() : "";
+        return absolutePath.isPresent() ? new File(absolutePath.get()).name() : "";
     }
 
     boolean isInFolder(Path folder)
     {
-        return absolutePath.isPresent()
-               ? FileUtils.isContained(folder.toFile(), Paths.get(absolutePath.get()).toFile())
-               : false;
+        return absolutePath.isPresent() && PathUtils.isContained(folder, new File(absolutePath.get()).toPath());
     }
 
     String absolutePath()
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java b/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java
index cdc4c35..1ea8b83 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java
@@ -18,21 +18,22 @@
 
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.io.IOException;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.io.FSError;
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.NativeLibrary;
 
+import static org.apache.cassandra.config.CassandraRelevantProperties.IGNORE_MISSING_NATIVE_FILE_HINTS;
+
 /**
  * Because a column family may have sstables on different disks and disks can
  * be removed, we duplicate log files into many replicas so as to have a file
@@ -47,6 +48,7 @@
 final class LogReplica implements AutoCloseable
 {
     private static final Logger logger = LoggerFactory.getLogger(LogReplica.class);
+    private static final boolean REQUIRE_FD = !IGNORE_MISSING_NATIVE_FILE_HINTS.getBoolean();
 
     private final File file;
     private int directoryDescriptor;
@@ -54,18 +56,18 @@
 
     static LogReplica create(File directory, String fileName)
     {
-        int folderFD = NativeLibrary.tryOpenDirectory(directory.getPath());
-        if (folderFD == -1 && !FBUtilities.isWindows)
-            throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", directory.getPath())), directory.getPath());
+        int folderFD = NativeLibrary.tryOpenDirectory(directory.path());
+        if (folderFD == -1  && REQUIRE_FD)
+            throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", directory.path())), directory.path());
 
         return new LogReplica(new File(fileName), folderFD);
     }
 
     static LogReplica open(File file)
     {
-        int folderFD = NativeLibrary.tryOpenDirectory(file.getParentFile().getPath());
-        if (folderFD == -1 && !FBUtilities.isWindows)
-            throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", file.getParentFile().getPath())), file.getParentFile().getPath());
+        int folderFD = NativeLibrary.tryOpenDirectory(file.parent().path());
+        if (folderFD == -1)
+            throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", file.parent().path())), file.parent().path());
 
         return new LogReplica(file, folderFD);
     }
@@ -88,12 +90,12 @@
 
     String getFileName()
     {
-        return file.getName();
+        return file.name();
     }
 
     String getDirectory()
     {
-        return file.getParent();
+        return file.parentPath();
     }
 
     void append(LogRecord record)
@@ -162,7 +164,7 @@
 
     void printContentsWithAnyErrors(StringBuilder str)
     {
-        str.append(file.getPath());
+        str.append(file.path());
         str.append(System.lineSeparator());
         FileUtils.readLines(file).forEach(line -> printLineWithAnyError(str, line));
     }
@@ -181,4 +183,9 @@
             str.append(System.lineSeparator());
         }
     }
+
+    public int hashCode()
+    {
+        return file.hashCode();
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java b/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java
index 0295357..316e4b6 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.LinkedHashMap;
@@ -29,6 +28,7 @@
 import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -61,7 +61,7 @@
 
     void addReplica(File file)
     {
-        File directory = file.getParentFile();
+        File directory = file.parent();
         assert !replicasByFile.containsKey(directory);
         try
         {
@@ -268,6 +268,6 @@
     @VisibleForTesting
     List<String> getFilePaths()
     {
-        return replicas().stream().map(LogReplica::file).map(File::getPath).collect(Collectors.toList());
+        return replicas().stream().map(LogReplica::file).map(File::path).collect(Collectors.toList());
     }
 }
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java b/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java
index a3c3837..f203bd8 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.db.lifecycle;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.file.Files;
@@ -37,7 +36,6 @@
 
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.db.compaction.OperationType;
@@ -46,15 +44,18 @@
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.SSTable;
-import org.apache.cassandra.io.sstable.SnapshotDeletingTask;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.*;
 import org.apache.cassandra.utils.concurrent.Ref;
 import org.apache.cassandra.utils.concurrent.RefCounted;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 /**
  * IMPORTANT: When this object is involved in a transactional graph, and is not encapsulated in a LifecycleTransaction,
  * for correct behaviour its commit MUST occur before any others, since it may legitimately fail. This is consistent
@@ -114,8 +115,7 @@
     // We need an explicit lock because the transaction tidier cannot store a reference to the transaction
     private final Object lock;
     private final Ref<LogTransaction> selfRef;
-    // Deleting sstables is tricky because the mmapping might not have been finalized yet,
-    // and delete will fail (on Windows) until it is (we only force the unmapping on SUN VMs).
+    // Deleting sstables is tricky because the mmapping might not have been finalized yet.
     // Additionally, we need to make sure to delete the data file first, so on restart the others
     // will be recognized as GCable.
     private static final Queue<Runnable> failedDeletions = new ConcurrentLinkedQueue<>();
@@ -128,7 +128,7 @@
     LogTransaction(OperationType opType, Tracker tracker)
     {
         this.tracker = tracker;
-        this.txnFile = new LogFile(opType, UUIDGen.getTimeUUID());
+        this.txnFile = new LogFile(opType, nextTimeUUID());
         this.lock = new Object();
         this.selfRef = new Ref<>(this, new TransactionTidier(txnFile, lock));
 
@@ -211,7 +211,7 @@
         return txnFile.type();
     }
 
-    UUID id()
+    TimeUUID id()
     {
         return txnFile.id();
     }
@@ -380,7 +380,7 @@
             // While this may be a dummy tracker w/out information in the metrics table, we attempt to delete regardless
             // and allow the delete to silently fail if this is an invalid ks + cf combination at time of tidy run.
             if (DatabaseDescriptor.isDaemonInitialized())
-                SystemKeyspace.clearSSTableReadMeter(desc.ksname, desc.cfname, desc.generation);
+                SystemKeyspace.clearSSTableReadMeter(desc.ksname, desc.cfname, desc.id);
 
             synchronized (lock)
             {
@@ -433,9 +433,6 @@
         Runnable task;
         while ( null != (task = failedDeletions.poll()))
             ScheduledExecutors.nonPeriodicTasks.submit(task);
-
-        // On Windows, snapshots cannot be deleted so long as a segment of the root element is memory-mapped in NTFS.
-        SnapshotDeletingTask.rescheduleFailedTasks();
     }
 
     static void waitForDeletions()
@@ -517,16 +514,16 @@
 
         void list(File directory)
         {
-            Arrays.stream(directory.listFiles(LogFile::isLogFile)).forEach(this::add);
+            Arrays.stream(directory.tryList(LogFile::isLogFile)).forEach(this::add);
         }
 
         void add(File file)
         {
-            List<File> filesByName = files.get(file.getName());
+            List<File> filesByName = files.get(file.name());
             if (filesByName == null)
             {
                 filesByName = new ArrayList<>();
-                files.put(file.getName(), filesByName);
+                files.put(file.name(), filesByName);
             }
 
             filesByName.add(file);
diff --git a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
index 3d72a11..ab8a74b 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.util.*;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.atomic.AtomicReference;
@@ -30,8 +29,9 @@
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -74,17 +74,23 @@
     public final boolean loadsstables;
 
     /**
+     * @param columnFamilyStore
      * @param memtable Initial Memtable. Can be null.
      * @param loadsstables true to indicate to load SSTables (TODO: remove as this is only accessed from 2i)
      */
-    public Tracker(Memtable memtable, boolean loadsstables)
+    public Tracker(ColumnFamilyStore columnFamilyStore, Memtable memtable, boolean loadsstables)
     {
-        this.cfstore = memtable != null ? memtable.cfs : null;
+        this.cfstore = columnFamilyStore;
         this.view = new AtomicReference<>();
         this.loadsstables = loadsstables;
         this.reset(memtable);
     }
 
+    public static Tracker newDummyTracker()
+    {
+        return new Tracker(null, null, false);
+    }
+
     public LifecycleTransaction tryModify(SSTableReader sstable, OperationType operationType)
     {
         return tryModify(singleton(sstable), operationType);
diff --git a/src/java/org/apache/cassandra/db/lifecycle/View.java b/src/java/org/apache/cassandra/db/lifecycle/View.java
index 203f2fa..958bc0d 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/View.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/View.java
@@ -26,7 +26,7 @@
 import com.google.common.collect.*;
 
 import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.PartitionPosition;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
@@ -169,7 +169,7 @@
         return sstables.isEmpty()
                && liveMemtables.size() <= 1
                && flushingMemtables.size() == 0
-               && (liveMemtables.size() == 0 || liveMemtables.get(0).getOperations() == 0);
+               && (liveMemtables.size() == 0 || liveMemtables.get(0).operationCount() == 0);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java b/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
new file mode 100644
index 0000000..91a5b27
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.marshal;
+
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import org.apache.cassandra.cql3.CQL3Type;
+import org.apache.cassandra.cql3.Constants;
+import org.apache.cassandra.cql3.Term;
+import org.apache.cassandra.serializers.MarshalException;
+import org.apache.cassandra.serializers.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
+
+// Fully compatible with UUID, and indeed is interpreted as UUID for UDF
+public abstract class AbstractTimeUUIDType<T> extends TemporalType<T>
+{
+    AbstractTimeUUIDType()
+    {
+        super(ComparisonType.CUSTOM);
+    } // singleton
+
+    public boolean isEmptyValueMeaningless()
+    {
+        return true;
+    }
+
+    public <VL, VR> int compareCustom(VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
+    {
+        // Compare for length
+        boolean p1 = accessorL.size(left) == 16, p2 = accessorR.size(right) == 16;
+        if (!(p1 & p2))
+        {
+            // should we assert exactly 16 bytes (or 0)? seems prudent
+            assert p1 || accessorL.isEmpty(left);
+            assert p2 || accessorR.isEmpty(right);
+            return p1 ? 1 : p2 ? -1 : 0;
+        }
+
+        long msb1 = accessorL.getLong(left, 0);
+        long msb2 = accessorR.getLong(right, 0);
+        msb1 = reorderTimestampBytes(msb1);
+        msb2 = reorderTimestampBytes(msb2);
+
+        assert (msb1 & topbyte(0xf0L)) == topbyte(0x10L);
+        assert (msb2 & topbyte(0xf0L)) == topbyte(0x10L);
+
+        int c = Long.compare(msb1, msb2);
+        if (c != 0)
+            return c;
+
+        // this has to be a signed per-byte comparison for compatibility
+        // so we transform the bytes so that a simple long comparison is equivalent
+        long lsb1 = signedBytesToNativeLong(accessorL.getLong(left, 8));
+        long lsb2 = signedBytesToNativeLong(accessorR.getLong(right, 8));
+        return Long.compare(lsb1, lsb2);
+    }
+
+    // takes as input 8 signed bytes in native machine order
+    // returns the first byte unchanged, and the following 7 bytes converted to an unsigned representation
+    // which is the same as a 2's complement long in native format
+    public static long signedBytesToNativeLong(long signedBytes)
+    {
+        return signedBytes ^ 0x0080808080808080L;
+    }
+
+    private static long topbyte(long topbyte)
+    {
+        return topbyte << 56;
+    }
+
+    protected static long reorderTimestampBytes(long input)
+    {
+        return    (input <<  48)
+                  | ((input <<  16) & 0xFFFF00000000L)
+                  |  (input >>> 32);
+    }
+
+    public ByteBuffer fromString(String source) throws MarshalException
+    {
+        ByteBuffer parsed = UUIDType.parse(source);
+        if (parsed == null)
+            throw new MarshalException(String.format("Unknown timeuuid representation: %s", source));
+        if (parsed.remaining() == 16 && UUIDType.version(parsed) != 1)
+            throw new MarshalException("TimeUUID supports only version 1 UUIDs");
+        return parsed;
+    }
+
+    @Override
+    public Term fromJSONObject(Object parsed) throws MarshalException
+    {
+        try
+        {
+            return new Constants.Value(fromString((String) parsed));
+        }
+        catch (ClassCastException exc)
+        {
+            throw new MarshalException(
+                    String.format("Expected a string representation of a timeuuid, but got a %s: %s", parsed.getClass().getSimpleName(), parsed));
+        }
+    }
+
+    public CQL3Type asCQL3Type()
+    {
+        return CQL3Type.Native.TIMEUUID;
+    }
+
+    @Override
+    public ByteBuffer decomposeUntyped(Object value)
+    {
+        if (value instanceof UUID)
+            return UUIDSerializer.instance.serialize((UUID) value);
+        if (value instanceof TimeUUID)
+            return TimeUUID.Serializer.instance.serialize((TimeUUID) value);
+        return super.decomposeUntyped(value);
+    }
+
+    @Override
+    public int valueLengthIfFixed()
+    {
+        return 16;
+    }
+
+    @Override
+    public long toTimeInMillis(ByteBuffer value)
+    {
+        return TimeUUID.deserialize(value).unix(MILLISECONDS);
+    }
+
+    @Override
+    public ByteBuffer addDuration(ByteBuffer temporal, ByteBuffer duration)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ByteBuffer substractDuration(ByteBuffer temporal, ByteBuffer duration)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ByteBuffer now()
+    {
+        return ByteBuffer.wrap(nextTimeUUIDAsBytes());
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        return obj instanceof AbstractTimeUUIDType<?>;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractType.java b/src/java/org/apache/cassandra/db/marshal/AbstractType.java
index 19cf849..74d4006 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractType.java
@@ -32,6 +32,7 @@
 import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.Term;
+import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
@@ -126,6 +127,11 @@
         return getSerializer().deserialize(value, accessor);
     }
 
+    public ByteBuffer decomposeUntyped(Object value)
+    {
+        return decompose((T) value);
+    }
+
     public ByteBuffer decompose(T value)
     {
         return getSerializer().serialize(value);
@@ -148,6 +154,11 @@
         return getString(bytes, ByteBufferAccessor.instance);
     }
 
+    public String toCQLString(ByteBuffer bytes)
+    {
+        return asCQL3Type().toCQLLiteral(bytes, ProtocolVersion.CURRENT);
+    }
+
     /** get a byte representation of the given string. */
     public abstract ByteBuffer fromString(String source) throws MarshalException;
 
@@ -234,6 +245,11 @@
         return new CQL3Type.Custom(this);
     }
 
+    public AbstractType<?> udfType()
+    {
+        return this;
+    }
+
     /**
      * Same as compare except that this ignore ReversedType. This is to be use when
      * comparing 2 values to decide for a CQL condition (see Operator.isSatisfiedBy) as
@@ -313,9 +329,11 @@
      *
      * Note that a type should be compatible with at least itself.
      */
-    public boolean isValueCompatibleWith(AbstractType<?> otherType)
+    public boolean isValueCompatibleWith(AbstractType<?> previous)
     {
-        return isValueCompatibleWithInternal((otherType instanceof ReversedType) ? ((ReversedType) otherType).baseType : otherType);
+        AbstractType<?> thisType =          isReversed() ? ((ReversedType<?>)     this).baseType : this;
+        AbstractType<?> thatType = previous.isReversed() ? ((ReversedType<?>) previous).baseType : previous;
+        return thisType.isValueCompatibleWithInternal(thatType);
     }
 
     /**
@@ -328,6 +346,18 @@
     }
 
     /**
+     * Similar to {@link #isValueCompatibleWith(AbstractType)}, but takes into account {@link Cell} encoding.
+     * In particular, this method doesn't consider two types serialization compatible if one of them has fixed
+     * length (overrides {@link #valueLengthIfFixed()}, and the other one doesn't.
+     */
+    public boolean isSerializationCompatibleWith(AbstractType<?> previous)
+    {
+        return isValueCompatibleWith(previous)
+               && valueLengthIfFixed() == previous.valueLengthIfFixed()
+               && isMultiCell() == previous.isMultiCell();
+    }
+
+    /**
      * An alternative comparison function used by CollectionsType in conjunction with CompositeType.
      *
      * This comparator is only called to compare components of a CompositeType. It gets the value of the
@@ -494,7 +524,7 @@
 
             if (l > maxValueSize)
                 throw new IOException(String.format("Corrupt value length %d encountered, as it exceeds the maximum of %d, " +
-                                                    "which is set via max_value_size_in_mb in cassandra.yaml",
+                                                    "which is set via max_value_size in cassandra.yaml",
                                                     l, maxValueSize));
 
             return accessor.read(in, l);
diff --git a/src/java/org/apache/cassandra/db/marshal/AsciiType.java b/src/java/org/apache/cassandra/db/marshal/AsciiType.java
index 05077ee..2d78c1a 100644
--- a/src/java/org/apache/cassandra/db/marshal/AsciiType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AsciiType.java
@@ -35,7 +35,7 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
-public class AsciiType extends AbstractType<String>
+public class AsciiType extends StringType
 {
     public static final AsciiType instance = new AsciiType();
 
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java b/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
index 7d13844..df24a62 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
@@ -29,6 +29,8 @@
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.ByteArrayUtil;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FastByteOperations;
@@ -235,6 +237,18 @@
     }
 
     @Override
+    public TimeUUID toTimeUUID(byte[] value)
+    {
+        return TimeUUID.fromBytes(getLong(value, 0), getLong(value, 8));
+    }
+
+    @Override
+    public Ballot toBallot(byte[] value)
+    {
+        return Ballot.deserialize(value);
+    }
+
+    @Override
     public int putShort(byte[] dst, int offset, short value)
     {
         ByteArrayUtil.putShort(dst, offset, value);
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java b/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
index a0f9c1d..40a3bf4 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
@@ -28,6 +28,8 @@
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FastByteOperations;
 import org.apache.cassandra.utils.UUIDGen;
@@ -239,6 +241,18 @@
     }
 
     @Override
+    public TimeUUID toTimeUUID(ByteBuffer value)
+    {
+        return TimeUUID.fromBytes(value.getLong(value.position()), value.getLong(value.position() + 8));
+    }
+
+    @Override
+    public Ballot toBallot(ByteBuffer value)
+    {
+        return Ballot.deserialize(value);
+    }
+
+    @Override
     public int putShort(ByteBuffer dst, int offset, short value)
     {
         dst.putShort(dst.position() + offset, value);
diff --git a/src/java/org/apache/cassandra/db/marshal/CollectionType.java b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
index 0d627a5..c52cddc 100644
--- a/src/java/org/apache/cassandra/db/marshal/CollectionType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
@@ -175,7 +175,7 @@
             return false;
 
         // the value comparator is only used for Cell values, so sorting doesn't matter
-        return this.valueComparator().isValueCompatibleWith(tprev.valueComparator());
+        return this.valueComparator().isSerializationCompatibleWith(tprev.valueComparator());
     }
 
     @Override
@@ -199,6 +199,15 @@
         return isValueCompatibleWithFrozen(tprev);
     }
 
+    @Override
+    public boolean isSerializationCompatibleWith(AbstractType<?> previous)
+    {
+        if (!isValueCompatibleWith(previous))
+            return false;
+
+        return valueComparator().isSerializationCompatibleWith(((CollectionType<?>)previous).valueComparator());
+    }
+
     /** A version of isCompatibleWith() to deal with non-multicell (frozen) collections */
     protected abstract boolean isCompatibleWithFrozen(CollectionType<?> previous);
 
diff --git a/src/java/org/apache/cassandra/db/marshal/DurationType.java b/src/java/org/apache/cassandra/db/marshal/DurationType.java
index 134a6f8..2afbfc1 100644
--- a/src/java/org/apache/cassandra/db/marshal/DurationType.java
+++ b/src/java/org/apache/cassandra/db/marshal/DurationType.java
@@ -26,7 +26,6 @@
 import org.apache.cassandra.serializers.DurationSerializer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.TypeSerializer;
-import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 /**
diff --git a/src/java/org/apache/cassandra/db/marshal/LegacyTimeUUIDType.java b/src/java/org/apache/cassandra/db/marshal/LegacyTimeUUIDType.java
new file mode 100644
index 0000000..528a3e8
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/marshal/LegacyTimeUUIDType.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.marshal;
+
+import java.util.UUID;
+
+import org.apache.cassandra.serializers.TypeSerializer;
+import org.apache.cassandra.serializers.UUIDSerializer;
+
+// Fully compatible with UUID, and indeed is interpreted as UUID for UDF
+public class LegacyTimeUUIDType extends AbstractTimeUUIDType<UUID>
+{
+    public static final LegacyTimeUUIDType instance = new LegacyTimeUUIDType();
+
+    public TypeSerializer<UUID> getSerializer()
+    {
+        return UUIDSerializer.instance;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/marshal/ListType.java b/src/java/org/apache/cassandra/db/marshal/ListType.java
index cc63937..281f7ee 100644
--- a/src/java/org/apache/cassandra/db/marshal/ListType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ListType.java
@@ -31,6 +31,7 @@
 import org.apache.cassandra.serializers.CollectionSerializer;
 import org.apache.cassandra.serializers.ListSerializer;
 import org.apache.cassandra.serializers.MarshalException;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.transport.ProtocolVersion;
 
 public class ListType<T> extends CollectionType<List<T>>
@@ -103,7 +104,7 @@
         return elements;
     }
 
-    public AbstractType<UUID> nameComparator()
+    public AbstractType<TimeUUID> nameComparator()
     {
         return TimeUUIDType.instance;
     }
diff --git a/src/java/org/apache/cassandra/db/marshal/ReversedType.java b/src/java/org/apache/cassandra/db/marshal/ReversedType.java
index 8a4b58d..ceea84a 100644
--- a/src/java/org/apache/cassandra/db/marshal/ReversedType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ReversedType.java
@@ -106,12 +106,6 @@
     }
 
     @Override
-    public boolean isValueCompatibleWith(AbstractType<?> otherType)
-    {
-        return this.baseType.isValueCompatibleWith(otherType);
-    }
-
-    @Override
     public CQL3Type asCQL3Type()
     {
         return baseType.asCQL3Type();
diff --git a/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java b/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
index f883ccd..8f1d677 100644
--- a/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
+++ b/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
@@ -23,7 +23,6 @@
 import org.apache.cassandra.cql3.Constants;
 import org.apache.cassandra.cql3.Duration;
 import org.apache.cassandra.cql3.Term;
-import org.apache.cassandra.cql3.statements.RequestValidations;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.SimpleDateSerializer;
 import org.apache.cassandra.serializers.TypeSerializer;
diff --git a/src/java/org/apache/cassandra/db/marshal/StringType.java b/src/java/org/apache/cassandra/db/marshal/StringType.java
new file mode 100644
index 0000000..29aff58
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/marshal/StringType.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.marshal;
+
+import java.nio.ByteBuffer;
+
+public abstract class StringType extends AbstractType<String>
+{
+    protected StringType(ComparisonType comparisonType)
+    {
+        super(comparisonType);
+    }
+
+    public ByteBuffer concat(StringType leftType,
+                             ByteBuffer left,
+                             StringType rightType,
+                             ByteBuffer right)
+    {
+        String leftS = leftType.compose(left);
+        String rightS = rightType.compose(right);
+
+        return decompose(leftS + rightS);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/marshal/TemporalType.java b/src/java/org/apache/cassandra/db/marshal/TemporalType.java
index 4e2ac5a..945dae0 100644
--- a/src/java/org/apache/cassandra/db/marshal/TemporalType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TemporalType.java
@@ -21,6 +21,8 @@
 
 import org.apache.cassandra.cql3.Duration;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Base type for temporal types (timestamp, date ...).
  *
@@ -38,7 +40,7 @@
      */
     public ByteBuffer now()
     {
-        return fromTimeInMillis(System.currentTimeMillis());
+        return fromTimeInMillis(currentTimeMillis());
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/db/marshal/TimeType.java b/src/java/org/apache/cassandra/db/marshal/TimeType.java
index be20ba7..fd8fac4 100644
--- a/src/java/org/apache/cassandra/db/marshal/TimeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TimeType.java
@@ -19,7 +19,6 @@
 
 import java.nio.ByteBuffer;
 import java.time.LocalTime;
-import java.time.ZoneId;
 import java.time.ZoneOffset;
 
 import org.apache.cassandra.cql3.Constants;
diff --git a/src/java/org/apache/cassandra/db/marshal/TimeUUIDType.java b/src/java/org/apache/cassandra/db/marshal/TimeUUIDType.java
index 6cf1375..d3b0dec 100644
--- a/src/java/org/apache/cassandra/db/marshal/TimeUUIDType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TimeUUIDType.java
@@ -17,144 +17,22 @@
  */
 package org.apache.cassandra.db.marshal;
 
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import org.apache.cassandra.cql3.CQL3Type;
-import org.apache.cassandra.cql3.ColumnSpecification;
-import org.apache.cassandra.cql3.Constants;
-import org.apache.cassandra.cql3.Term;
 import org.apache.cassandra.serializers.TypeSerializer;
-import org.apache.cassandra.utils.UUIDGen;
-import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.serializers.TimeUUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
-public class TimeUUIDType extends TemporalType<UUID>
+// Fully compatible with UUID, and indeed is interpreted as UUID for UDF
+public class TimeUUIDType extends AbstractTimeUUIDType<TimeUUID>
 {
     public static final TimeUUIDType instance = new TimeUUIDType();
 
-    TimeUUIDType()
+    public TypeSerializer<TimeUUID> getSerializer()
     {
-        super(ComparisonType.CUSTOM);
-    } // singleton
-
-    public boolean isEmptyValueMeaningless()
-    {
-        return true;
-    }
-
-    public <VL, VR> int compareCustom(VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
-    {
-        // Compare for length
-        boolean p1 = accessorL.size(left) == 16, p2 = accessorR.size(right) == 16;
-        if (!(p1 & p2))
-        {
-            // should we assert exactly 16 bytes (or 0)? seems prudent
-            assert p1 || accessorL.isEmpty(left);
-            assert p2 || accessorR.isEmpty(right);
-            return p1 ? 1 : p2 ? -1 : 0;
-        }
-
-        long msb1 = accessorL.getLong(left, 0);
-        long msb2 = accessorR.getLong(right, 0);
-        msb1 = reorderTimestampBytes(msb1);
-        msb2 = reorderTimestampBytes(msb2);
-
-        assert (msb1 & topbyte(0xf0L)) == topbyte(0x10L);
-        assert (msb2 & topbyte(0xf0L)) == topbyte(0x10L);
-
-        int c = Long.compare(msb1, msb2);
-        if (c != 0)
-            return c;
-
-        // this has to be a signed per-byte comparison for compatibility
-        // so we transform the bytes so that a simple long comparison is equivalent
-        long lsb1 = signedBytesToNativeLong(accessorL.getLong(left, 8));
-        long lsb2 = signedBytesToNativeLong(accessorR.getLong(right, 8));
-        return Long.compare(lsb1, lsb2);
-    }
-
-    // takes as input 8 signed bytes in native machine order
-    // returns the first byte unchanged, and the following 7 bytes converted to an unsigned representation
-    // which is the same as a 2's complement long in native format
-    private static long signedBytesToNativeLong(long signedBytes)
-    {
-        return signedBytes ^ 0x0080808080808080L;
-    }
-
-    private static long topbyte(long topbyte)
-    {
-        return topbyte << 56;
-    }
-
-    protected static long reorderTimestampBytes(long input)
-    {
-        return    (input <<  48)
-                  | ((input <<  16) & 0xFFFF00000000L)
-                  |  (input >>> 32);
-    }
-
-    public ByteBuffer fromString(String source) throws MarshalException
-    {
-        ByteBuffer parsed = UUIDType.parse(source);
-        if (parsed == null)
-            throw new MarshalException(String.format("Unknown timeuuid representation: %s", source));
-        if (parsed.remaining() == 16 && UUIDType.version(parsed) != 1)
-            throw new MarshalException("TimeUUID supports only version 1 UUIDs");
-        return parsed;
+        return TimeUUID.Serializer.instance;
     }
 
     @Override
-    public Term fromJSONObject(Object parsed) throws MarshalException
+    public AbstractType<?> udfType()
     {
-        try
-        {
-            return new Constants.Value(fromString((String) parsed));
-        }
-        catch (ClassCastException exc)
-        {
-            throw new MarshalException(
-                    String.format("Expected a string representation of a timeuuid, but got a %s: %s", parsed.getClass().getSimpleName(), parsed));
-        }
-    }
-
-    public CQL3Type asCQL3Type()
-    {
-        return CQL3Type.Native.TIMEUUID;
-    }
-
-    public TypeSerializer<UUID> getSerializer()
-    {
-        return TimeUUIDSerializer.instance;
-    }
-
-    @Override
-    public int valueLengthIfFixed()
-    {
-        return 16;
-    }
-
-    @Override
-    public long toTimeInMillis(ByteBuffer value)
-    {
-        return UUIDGen.unixTimestamp(UUIDGen.getUUID(value));
-    }
-
-    @Override
-    public ByteBuffer addDuration(ByteBuffer temporal, ByteBuffer duration)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public ByteBuffer substractDuration(ByteBuffer temporal, ByteBuffer duration)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public ByteBuffer now()
-    {
-        return ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes());
+        return LegacyTimeUUIDType.instance;
     }
 }
diff --git a/src/java/org/apache/cassandra/db/marshal/TimestampType.java b/src/java/org/apache/cassandra/db/marshal/TimestampType.java
index 0dac6b0..ccf1da3 100644
--- a/src/java/org/apache/cassandra/db/marshal/TimestampType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TimestampType.java
@@ -23,7 +23,6 @@
 import org.apache.cassandra.cql3.Constants;
 import org.apache.cassandra.cql3.Duration;
 import org.apache.cassandra.cql3.Term;
-import org.apache.cassandra.cql3.statements.RequestValidations;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/src/java/org/apache/cassandra/db/marshal/TupleType.java b/src/java/org/apache/cassandra/db/marshal/TupleType.java
index 83fbb25..cc08487 100644
--- a/src/java/org/apache/cassandra/db/marshal/TupleType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TupleType.java
@@ -205,9 +205,17 @@
      */
     public ByteBuffer[] split(ByteBuffer value)
     {
-        ByteBuffer[] components = new ByteBuffer[size()];
+        return split(value, size(), this);
+    }
+
+    /**
+     * Split a tuple value into its component values.
+     */
+    public static ByteBuffer[] split(ByteBuffer value, int numberOfElements, TupleType type)
+    {
+        ByteBuffer[] components = new ByteBuffer[numberOfElements];
         ByteBuffer input = value.duplicate();
-        for (int i = 0; i < size(); i++)
+        for (int i = 0; i < numberOfElements; i++)
         {
             if (!input.hasRemaining())
                 return Arrays.copyOfRange(components, 0, i);
@@ -226,7 +234,7 @@
         {
             throw new InvalidRequestException(String.format(
             "Expected %s %s for %s column, but got more",
-            size(), size() == 1 ? "value" : "values", this.asCQL3Type()));
+            numberOfElements, numberOfElements == 1 ? "value" : "values", type.asCQL3Type()));
         }
 
         return components;
diff --git a/src/java/org/apache/cassandra/db/marshal/UTF8Type.java b/src/java/org/apache/cassandra/db/marshal/UTF8Type.java
index db62b57..e256070 100644
--- a/src/java/org/apache/cassandra/db/marshal/UTF8Type.java
+++ b/src/java/org/apache/cassandra/db/marshal/UTF8Type.java
@@ -32,7 +32,7 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
-public class UTF8Type extends AbstractType<String>
+public class UTF8Type extends StringType
 {
     public static final UTF8Type instance = new UTF8Type();
 
diff --git a/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java b/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
index 10532ff..a51836e 100644
--- a/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
+++ b/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
@@ -37,6 +37,8 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.db.ClusteringPrefix.Kind.*;
 
@@ -322,6 +324,12 @@
     /** returns a UUID from offset 0 */
     UUID toUUID(V value);
 
+    /** returns a TimeUUID from offset 0 */
+    TimeUUID toTimeUUID(V value);
+
+    /** returns a TimeUUID from offset 0 */
+    Ballot toBallot(V value);
+
     /**
      * writes the short value {@param value} to {@param dst} at offset {@param offset}
      * @return the number of bytes written to {@param value}
diff --git a/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java b/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
new file mode 100644
index 0000000..ae12516
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ImmediateExecutor;
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.WrappedRunnable;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.concurrent.Promise;
+import org.apache.cassandra.utils.memory.HeapPool;
+import org.apache.cassandra.utils.memory.MemtableAllocator;
+import org.apache.cassandra.utils.memory.MemtableCleaner;
+import org.apache.cassandra.utils.memory.MemtablePool;
+import org.apache.cassandra.utils.memory.NativePool;
+import org.apache.cassandra.utils.memory.SlabPool;
+
+/**
+ * A memtable that uses memory tracked and maybe allocated via a MemtableAllocator from a MemtablePool.
+ * Provides methods of memory tracking and triggering flushes when the relevant limits are reached.
+ */
+public abstract class AbstractAllocatorMemtable extends AbstractMemtableWithCommitlog
+{
+    private static final Logger logger = LoggerFactory.getLogger(AbstractAllocatorMemtable.class);
+
+    public static final MemtablePool MEMORY_POOL = AbstractAllocatorMemtable.createMemtableAllocatorPool();
+
+    protected final Owner owner;
+    protected final MemtableAllocator allocator;
+
+    // Record the comparator of the CFS at the creation of the memtable. This
+    // is only used when a user update the CF comparator, to know if the
+    // memtable was created with the new or old comparator.
+    protected final ClusteringComparator initialComparator;
+
+    private final long creationNano = Clock.Global.nanoTime();
+
+    private static MemtablePool createMemtableAllocatorPool()
+    {
+        Config.MemtableAllocationType allocationType = DatabaseDescriptor.getMemtableAllocationType();
+        long heapLimit = DatabaseDescriptor.getMemtableHeapSpaceInMiB() << 20;
+        long offHeapLimit = DatabaseDescriptor.getMemtableOffheapSpaceInMiB() << 20;
+        float memtableCleanupThreshold = DatabaseDescriptor.getMemtableCleanupThreshold();
+        MemtableCleaner cleaner = AbstractAllocatorMemtable::flushLargestMemtable;
+        return createMemtableAllocatorPoolInternal(allocationType, heapLimit, offHeapLimit, memtableCleanupThreshold, cleaner);
+    }
+
+    @VisibleForTesting
+    public static MemtablePool createMemtableAllocatorPoolInternal(Config.MemtableAllocationType allocationType,
+                                                                   long heapLimit, long offHeapLimit,
+                                                                   float memtableCleanupThreshold, MemtableCleaner cleaner)
+    {
+        switch (allocationType)
+        {
+        case unslabbed_heap_buffers_logged:
+            return new HeapPool.Logged(heapLimit, memtableCleanupThreshold, cleaner);
+        case unslabbed_heap_buffers:
+            logger.debug("Memtables allocating with on-heap buffers");
+            return new HeapPool(heapLimit, memtableCleanupThreshold, cleaner);
+        case heap_buffers:
+            logger.debug("Memtables allocating with on-heap slabs");
+            return new SlabPool(heapLimit, 0, memtableCleanupThreshold, cleaner);
+        case offheap_buffers:
+            logger.debug("Memtables allocating with off-heap buffers");
+            return new SlabPool(heapLimit, offHeapLimit, memtableCleanupThreshold, cleaner);
+        case offheap_objects:
+            logger.debug("Memtables allocating with off-heap objects");
+            return new NativePool(heapLimit, offHeapLimit, memtableCleanupThreshold, cleaner);
+        default:
+            throw new AssertionError();
+        }
+    }
+
+    // only to be used by init(), to setup the very first memtable for the cfs
+    public AbstractAllocatorMemtable(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadataRef, Owner owner)
+    {
+        super(metadataRef, commitLogLowerBound);
+        this.allocator = MEMORY_POOL.newAllocator(metadataRef.toString());
+        this.initialComparator = metadata.get().comparator;
+        this.owner = owner;
+        scheduleFlush();
+    }
+
+    public MemtableAllocator getAllocator()
+    {
+        return allocator;
+    }
+
+    @Override
+    public boolean shouldSwitch(ColumnFamilyStore.FlushReason reason)
+    {
+        switch (reason)
+        {
+        case SCHEMA_CHANGE:
+            return initialComparator != metadata().comparator // If the CF comparator has changed, because our partitions reference the old one
+                   || metadata().params.memtable.factory() != factory(); // If a different type of memtable is requested
+        case OWNED_RANGES_CHANGE:
+            return false; // by default we don't use the local ranges, thus this has no effect
+        default:
+            return true;
+        }
+    }
+
+    public void metadataUpdated()
+    {
+        // We decided not to swap out this memtable, but if the flush period has changed we must schedule it for the
+        // new expiration time.
+        scheduleFlush();
+    }
+
+    public void localRangesUpdated()
+    {
+        // nothing to be done by default
+    }
+
+    public void performSnapshot(String snapshotName)
+    {
+        throw new AssertionError("performSnapshot must be implemented if shouldSwitch(SNAPSHOT) can return false.");
+    }
+
+    protected abstract Factory factory();
+
+    public void switchOut(OpOrder.Barrier writeBarrier, AtomicReference<CommitLogPosition> commitLogUpperBound)
+    {
+        super.switchOut(writeBarrier, commitLogUpperBound);
+        allocator.setDiscarding();
+    }
+
+    public void discard()
+    {
+        super.discard();
+        allocator.setDiscarded();
+    }
+
+    public String toString()
+    {
+        MemoryUsage usage = Memtable.getMemoryUsage(this);
+        return String.format("Memtable-%s@%s(%s serialized bytes, %s ops, %s)",
+                             metadata.get().name,
+                             hashCode(),
+                             FBUtilities.prettyPrintMemory(getLiveDataSize()),
+                             operationCount(),
+                             usage);
+    }
+
+    @Override
+    public void addMemoryUsageTo(MemoryUsage stats)
+    {
+        stats.ownershipRatioOnHeap += getAllocator().onHeap().ownershipRatio();
+        stats.ownershipRatioOffHeap += getAllocator().offHeap().ownershipRatio();
+        stats.ownsOnHeap += getAllocator().onHeap().owns();
+        stats.ownsOffHeap += getAllocator().offHeap().owns();
+    }
+
+    public void markExtraOnHeapUsed(long additionalSpace, OpOrder.Group opGroup)
+    {
+        getAllocator().onHeap().allocate(additionalSpace, opGroup);
+    }
+
+    public void markExtraOffHeapUsed(long additionalSpace, OpOrder.Group opGroup)
+    {
+        getAllocator().offHeap().allocate(additionalSpace, opGroup);
+    }
+
+    void scheduleFlush()
+    {
+        int period = metadata().params.memtableFlushPeriodInMs;
+        if (period > 0)
+            scheduleFlush(owner, period);
+    }
+
+    private static void scheduleFlush(Owner owner, int period)
+    {
+        logger.trace("scheduling flush in {} ms", period);
+        WrappedRunnable runnable = new WrappedRunnable()
+        {
+            protected void runMayThrow()
+            {
+                Memtable current = owner.getCurrentMemtable();
+                if (current instanceof AbstractAllocatorMemtable)
+                    ((AbstractAllocatorMemtable) current).flushIfPeriodExpired();
+            }
+        };
+        ScheduledExecutors.scheduledTasks.scheduleSelfRecurring(runnable, period, TimeUnit.MILLISECONDS);
+    }
+
+    private void flushIfPeriodExpired()
+    {
+        int period = metadata().params.memtableFlushPeriodInMs;
+        if (period > 0 && (Clock.Global.nanoTime() - creationNano >= TimeUnit.MILLISECONDS.toNanos(period)))
+        {
+            if (isClean())
+            {
+                // if we're still clean, instead of swapping just reschedule a flush for later
+                scheduleFlush(owner, period);
+            }
+            else
+            {
+                // we'll be rescheduled by the constructor of the Memtable.
+                owner.signalFlushRequired(AbstractAllocatorMemtable.this,
+                                          ColumnFamilyStore.FlushReason.MEMTABLE_PERIOD_EXPIRED);
+            }
+        }
+    }
+
+    /**
+     * Finds the largest memtable, as a percentage of *either* on- or off-heap memory limits, and immediately
+     * queues it for flushing. If the memtable selected is flushed before this completes, no work is done.
+     */
+    public static Future<Boolean> flushLargestMemtable()
+    {
+        float largestRatio = 0f;
+        AbstractAllocatorMemtable largestMemtable = null;
+        Memtable.MemoryUsage largestUsage = null;
+        float liveOnHeap = 0, liveOffHeap = 0;
+        // we take a reference to the current main memtable for the CF prior to snapping its ownership ratios
+        // to ensure we have some ordering guarantee for performing the switchMemtableIf(), i.e. we will only
+        // swap if the memtables we are measuring here haven't already been swapped by the time we try to swap them
+        for (Memtable currentMemtable : ColumnFamilyStore.activeMemtables())
+        {
+            if (!(currentMemtable instanceof AbstractAllocatorMemtable))
+                continue;
+            AbstractAllocatorMemtable current = (AbstractAllocatorMemtable) currentMemtable;
+
+            // find the total ownership ratio for the memtable and all SecondaryIndexes owned by this CF,
+            // both on- and off-heap, and select the largest of the two ratios to weight this CF
+            MemoryUsage usage = Memtable.newMemoryUsage();
+            current.addMemoryUsageTo(usage);
+
+            for (Memtable indexMemtable : current.owner.getIndexMemtables())
+                if (indexMemtable instanceof AbstractAllocatorMemtable)
+                    indexMemtable.addMemoryUsageTo(usage);
+
+            float ratio = Math.max(usage.ownershipRatioOnHeap, usage.ownershipRatioOffHeap);
+            if (ratio > largestRatio)
+            {
+                largestMemtable = current;
+                largestUsage = usage;
+                largestRatio = ratio;
+            }
+
+            liveOnHeap += usage.ownershipRatioOnHeap;
+            liveOffHeap += usage.ownershipRatioOffHeap;
+        }
+
+        Promise<Boolean> returnFuture = new AsyncPromise<>();
+
+        if (largestMemtable != null)
+        {
+            float usedOnHeap = MEMORY_POOL.onHeap.usedRatio();
+            float usedOffHeap = MEMORY_POOL.offHeap.usedRatio();
+            float flushingOnHeap = MEMORY_POOL.onHeap.reclaimingRatio();
+            float flushingOffHeap = MEMORY_POOL.offHeap.reclaimingRatio();
+            logger.info("Flushing largest {} to free up room. Used total: {}, live: {}, flushing: {}, this: {}",
+                        largestMemtable.owner, ratio(usedOnHeap, usedOffHeap), ratio(liveOnHeap, liveOffHeap),
+                        ratio(flushingOnHeap, flushingOffHeap), ratio(largestUsage.ownershipRatioOnHeap, largestUsage.ownershipRatioOffHeap));
+
+            Future<CommitLogPosition> flushFuture = largestMemtable.owner.signalFlushRequired(largestMemtable, ColumnFamilyStore.FlushReason.MEMTABLE_LIMIT);
+            flushFuture.addListener(() -> {
+                try
+                {
+                    flushFuture.get();
+                    returnFuture.trySuccess(true);
+                }
+                catch (Throwable t)
+                {
+                    returnFuture.tryFailure(t);
+                }
+            }, ImmediateExecutor.INSTANCE);
+        }
+        else
+        {
+            logger.debug("Flushing of largest memtable, not done, no memtable found");
+
+            returnFuture.trySuccess(false);
+        }
+
+        return returnFuture;
+    }
+
+    private static String ratio(float onHeap, float offHeap)
+    {
+        return String.format("%.2f/%.2f", onHeap, offHeap);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/AbstractMemtable.java b/src/java/org/apache/cassandra/db/memtable/AbstractMemtable.java
new file mode 100644
index 0000000..ca6dbf6
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/AbstractMemtable.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
+
+public abstract class AbstractMemtable implements Memtable
+{
+    protected final AtomicLong currentOperations = new AtomicLong(0);
+    protected final ColumnsCollector columnsCollector;
+    protected final StatsCollector statsCollector = new StatsCollector();
+    // The smallest timestamp for all partitions stored in this memtable
+    protected AtomicLong minTimestamp = new AtomicLong(Long.MAX_VALUE);
+    // The smallest local deletion time for all partitions in this memtable
+    protected AtomicInteger minLocalDeletionTime = new AtomicInteger(Integer.MAX_VALUE);
+    // Note: statsCollector has corresponding statistics to the two above, but starts with an epoch value which is not
+    // correct for their usage.
+
+    protected TableMetadataRef metadata;
+
+    public AbstractMemtable(TableMetadataRef metadataRef)
+    {
+        this.metadata = metadataRef;
+        this.columnsCollector = new ColumnsCollector(metadata.get().regularAndStaticColumns());
+    }
+
+    @VisibleForTesting
+    public AbstractMemtable(TableMetadataRef metadataRef, long minTimestamp)
+    {
+        this.metadata = metadataRef;
+        this.columnsCollector = new ColumnsCollector(metadata.get().regularAndStaticColumns());
+        this.minTimestamp = new AtomicLong(minTimestamp);
+    }
+
+    public TableMetadata metadata()
+    {
+        return metadata.get();
+    }
+
+    public long operationCount()
+    {
+        return currentOperations.get();
+    }
+
+    /**
+     * Returns the minTS if one available, otherwise NO_MIN_TIMESTAMP.
+     *
+     * EncodingStats uses a synthetic epoch TS at 2015. We don't want to leak that (CASSANDRA-18118) so we return NO_MIN_TIMESTAMP instead.
+     *
+     * @return The minTS or NO_MIN_TIMESTAMP if none available
+     */
+    public long getMinTimestamp()
+    {
+        return minTimestamp.get() != EncodingStats.NO_STATS.minTimestamp ? minTimestamp.get() : NO_MIN_TIMESTAMP;
+    }
+
+    public int getMinLocalDeletionTime()
+    {
+        return minLocalDeletionTime.get();
+    }
+
+    protected static void updateMin(AtomicLong minTracker, long newValue)
+    {
+        while (true)
+        {
+            long existing = minTracker.get();
+            if (existing <= newValue)
+                break;
+            if (minTracker.compareAndSet(existing, newValue))
+                break;
+        }
+    }
+
+    protected static void updateMin(AtomicInteger minTracker, int newValue)
+    {
+        while (true)
+        {
+            int existing = minTracker.get();
+            if (existing <= newValue)
+                break;
+            if (minTracker.compareAndSet(existing, newValue))
+                break;
+        }
+    }
+
+    RegularAndStaticColumns columns()
+    {
+        return columnsCollector.get();
+    }
+
+    EncodingStats encodingStats()
+    {
+        return statsCollector.get();
+    }
+
+    protected static class ColumnsCollector
+    {
+        private final HashMap<ColumnMetadata, AtomicBoolean> predefined = new HashMap<>();
+        private final ConcurrentSkipListSet<ColumnMetadata> extra = new ConcurrentSkipListSet<>();
+
+        ColumnsCollector(RegularAndStaticColumns columns)
+        {
+            for (ColumnMetadata def : columns.statics)
+                predefined.put(def, new AtomicBoolean());
+            for (ColumnMetadata def : columns.regulars)
+                predefined.put(def, new AtomicBoolean());
+        }
+
+        public void update(RegularAndStaticColumns columns)
+        {
+            for (ColumnMetadata s : columns.statics)
+                update(s);
+            for (ColumnMetadata r : columns.regulars)
+                update(r);
+        }
+
+        public void update(ColumnsCollector other)
+        {
+            for (Map.Entry<ColumnMetadata, AtomicBoolean> v : other.predefined.entrySet())
+                if (v.getValue().get())
+                    update(v.getKey());
+
+            extra.addAll(other.extra);
+        }
+
+        private void update(ColumnMetadata definition)
+        {
+            AtomicBoolean present = predefined.get(definition);
+            if (present != null)
+            {
+                if (!present.get())
+                    present.set(true);
+            }
+            else
+            {
+                extra.add(definition);
+            }
+        }
+
+        /**
+         * Get the current state of the columns set.
+         *
+         * Note: If this is executed while mutations are still being performed on the table (e.g. to prepare
+         * an sstable for streaming when Memtable.Factory.streamFromMemtable() is true), the resulting view may be
+         * in a somewhat inconsistent state (it may include partial updates, as well as miss updates older than
+         * ones it does include).
+         */
+        public RegularAndStaticColumns get()
+        {
+            RegularAndStaticColumns.Builder builder = RegularAndStaticColumns.builder();
+            for (Map.Entry<ColumnMetadata, AtomicBoolean> e : predefined.entrySet())
+                if (e.getValue().get())
+                    builder.add(e.getKey());
+            return builder.addAll(extra).build();
+        }
+    }
+
+    protected static class StatsCollector
+    {
+        private final AtomicReference<EncodingStats> stats = new AtomicReference<>(EncodingStats.NO_STATS);
+
+        public void update(EncodingStats newStats)
+        {
+            while (true)
+            {
+                EncodingStats current = stats.get();
+                EncodingStats updated = current.mergeWith(newStats);
+                if (stats.compareAndSet(current, updated))
+                    return;
+            }
+        }
+
+        public EncodingStats get()
+        {
+            return stats.get();
+        }
+    }
+
+    protected abstract class AbstractFlushablePartitionSet<P extends Partition> implements FlushablePartitionSet<P>
+    {
+        public long dataSize()
+        {
+            return getLiveDataSize();
+        }
+
+        public CommitLogPosition commitLogLowerBound()
+        {
+            return AbstractMemtable.this.getCommitLogLowerBound();
+        }
+
+        public LastCommitLogPosition commitLogUpperBound()
+        {
+            return AbstractMemtable.this.getFinalCommitLogUpperBound();
+        }
+
+        public EncodingStats encodingStats()
+        {
+            return AbstractMemtable.this.encodingStats();
+        }
+
+        public RegularAndStaticColumns columns()
+        {
+            return AbstractMemtable.this.columns();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/AbstractMemtableWithCommitlog.java b/src/java/org/apache/cassandra/db/memtable/AbstractMemtableWithCommitlog.java
new file mode 100644
index 0000000..4fe39a1
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/AbstractMemtableWithCommitlog.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+
+/**
+ * Memtable that uses a commit log for persistence. Provides methods of tracking the commit log positions covered by
+ * it and safely switching between memtables.
+ */
+public abstract class AbstractMemtableWithCommitlog extends AbstractMemtable
+{
+    // The approximate lower bound by this memtable; must be <= commitLogLowerBound once our predecessor
+    // has been finalised, and this is enforced in the ColumnFamilyStore.setCommitLogUpperBound
+    private final CommitLogPosition approximateCommitLogLowerBound = CommitLog.instance.getCurrentPosition();
+    // the precise lower bound of CommitLogPosition owned by this memtable; equal to its predecessor's commitLogUpperBound
+    private final AtomicReference<CommitLogPosition> commitLogLowerBound;
+    // the write barrier for directing writes to this memtable or the next during a switch
+    private volatile OpOrder.Barrier writeBarrier;
+    // the precise upper bound of CommitLogPosition owned by this memtable
+    private volatile AtomicReference<CommitLogPosition> commitLogUpperBound;
+
+    public AbstractMemtableWithCommitlog(TableMetadataRef metadataRef, AtomicReference<CommitLogPosition> commitLogLowerBound)
+    {
+        super(metadataRef);
+        this.commitLogLowerBound = commitLogLowerBound;
+    }
+
+    public CommitLogPosition getApproximateCommitLogLowerBound()
+    {
+        return approximateCommitLogLowerBound;
+    }
+
+    public void switchOut(OpOrder.Barrier writeBarrier, AtomicReference<CommitLogPosition> commitLogUpperBound)
+    {
+        // This can prepare the memtable data for deletion; it will still be used while the flush is proceeding.
+        // A setDiscarded call will follow.
+        assert this.writeBarrier == null;
+        this.commitLogUpperBound = commitLogUpperBound;
+        this.writeBarrier = writeBarrier;
+    }
+
+    public void discard()
+    {
+        assert writeBarrier != null : "Memtable must be switched out before being discarded.";
+    }
+
+    // decide if this memtable should take the write, or if it should go to the next memtable
+    @Override
+    public boolean accepts(OpOrder.Group opGroup, CommitLogPosition commitLogPosition)
+    {
+        // if the barrier hasn't been set yet, then this memtable is still the newest and is taking ALL writes.
+        OpOrder.Barrier barrier = this.writeBarrier;
+        if (barrier == null)
+            return true;
+        // Note that if this races with taking the barrier the opGroup and commit log position we were given must
+        // necessarily be before the barrier and any LastCommitLogPosition is set, thus this function will return true
+        // and no update to commitLogUpperBound is necessary.
+
+        // If the barrier has been set and issued, but is in the past, we are definitely destined for a future memtable.
+        // Because we issue the barrier after taking LastCommitLogPosition and mutations take their position after
+        // taking the opGroup, this condition also ensures the given commit log position is greater than the chosen
+        // upper bound.
+        if (!barrier.isAfter(opGroup))
+            return false;
+        // We are in the segment of time between the barrier is constructed (and the memtable is switched out)
+        // and the barrier is issued.
+        // if we aren't durable we are directed only by the barrier
+        if (commitLogPosition == null)
+            return true;
+        while (true)
+        {
+            // If the CL boundary has been set, the mutation can be accepted depending on whether it falls before it.
+            // However, if it has not been set, the old sstable must still accept writes but we must also ensure that
+            // their positions are accounted for in the boundary (as there may be a delay between taking the log
+            // position for the boundary and setting it where a mutation sneaks in).
+            // Thus, if the boundary hasn't been finalised yet, we simply update it to the max of its current value and
+            // ours; this permits us to coordinate a safe boundary, as the boundary choice is made atomically wrt our
+            // max() maintenance, so an operation cannot sneak into the past.
+            CommitLogPosition currentLast = commitLogUpperBound.get();
+            if (currentLast instanceof LastCommitLogPosition)
+                return currentLast.compareTo(commitLogPosition) >= 0;
+            if (currentLast != null && currentLast.compareTo(commitLogPosition) >= 0)
+                return true;
+            if (commitLogUpperBound.compareAndSet(currentLast, commitLogPosition))
+                return true;
+        }
+    }
+
+    public CommitLogPosition getCommitLogLowerBound()
+    {
+        return commitLogLowerBound.get();
+    }
+
+    public LastCommitLogPosition getFinalCommitLogUpperBound()
+    {
+        assert commitLogUpperBound != null : "Commit log upper bound should be set before flushing";
+        assert commitLogUpperBound.get() instanceof LastCommitLogPosition : "Commit log upper bound has not been sealed yet? " + commitLogUpperBound.get();
+        return (LastCommitLogPosition) commitLogUpperBound.get();
+    }
+
+    public boolean mayContainDataBefore(CommitLogPosition position)
+    {
+        return approximateCommitLogLowerBound.compareTo(position) < 0;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/Flushing.java b/src/java/org/apache/cassandra/db/memtable/Flushing.java
new file mode 100644
index 0000000..1a31374
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/Flushing.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import com.google.common.base.Throwables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.db.DiskBoundaries;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.SerializationHeader;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.commitlog.IntervalSet;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SSTableMultiWriter;
+import org.apache.cassandra.io.sstable.format.SSTableFormat;
+import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
+import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.FBUtilities;
+
+public class Flushing
+{
+    private static final Logger logger = LoggerFactory.getLogger(Flushing.class);
+
+    private Flushing() // prevent instantiation
+    {
+    }
+
+    public static List<FlushRunnable> flushRunnables(ColumnFamilyStore cfs,
+                                                     Memtable memtable,
+                                                     LifecycleTransaction txn)
+    {
+        DiskBoundaries diskBoundaries = cfs.getDiskBoundaries();
+        List<PartitionPosition> boundaries = diskBoundaries.positions;
+        List<Directories.DataDirectory> locations = diskBoundaries.directories;
+        if (boundaries == null)
+        {
+            FlushRunnable runnable = flushRunnable(cfs, memtable, null, null, txn, null);
+            return Collections.singletonList(runnable);
+        }
+
+        List<FlushRunnable> runnables = new ArrayList<>(boundaries.size());
+        PartitionPosition rangeStart = boundaries.get(0).getPartitioner().getMinimumToken().minKeyBound();
+        try
+        {
+            for (int i = 0; i < boundaries.size(); i++)
+            {
+                PartitionPosition t = boundaries.get(i);
+                FlushRunnable runnable = flushRunnable(cfs, memtable, rangeStart, t, txn, locations.get(i));
+
+                runnables.add(runnable);
+                rangeStart = t;
+            }
+            return runnables;
+        }
+        catch (Throwable e)
+        {
+            throw Throwables.propagate(abortRunnables(runnables, e));
+        }
+    }
+
+    @SuppressWarnings("resource")   // writer owned by runnable, to be closed or aborted by its caller
+    static FlushRunnable flushRunnable(ColumnFamilyStore cfs,
+                                       Memtable memtable,
+                                       PartitionPosition from,
+                                       PartitionPosition to,
+                                       LifecycleTransaction txn,
+                                       Directories.DataDirectory flushLocation)
+    {
+        Memtable.FlushablePartitionSet<?> flushSet = memtable.getFlushSet(from, to);
+        SSTableFormat.Type formatType = SSTableFormat.Type.current();
+        long estimatedSize = formatType.info.getWriterFactory().estimateSize(flushSet);
+
+        Descriptor descriptor = flushLocation == null
+                                ? cfs.newSSTableDescriptor(cfs.getDirectories().getWriteableLocationAsFile(estimatedSize), formatType)
+                                : cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(flushLocation), formatType);
+
+        SSTableMultiWriter writer = createFlushWriter(cfs,
+                                                      flushSet,
+                                                      txn,
+                                                      descriptor,
+                                                      flushSet.partitionCount());
+
+        return new FlushRunnable(flushSet, writer, cfs.metric, true);
+    }
+
+    public static Throwable abortRunnables(List<FlushRunnable> runnables, Throwable t)
+    {
+        if (runnables != null)
+            for (FlushRunnable runnable : runnables)
+                t = runnable.writer.abort(t);
+        return t;
+    }
+
+    public static class FlushRunnable implements Callable<SSTableMultiWriter>
+    {
+        private final Memtable.FlushablePartitionSet<?> toFlush;
+
+        private final SSTableMultiWriter writer;
+        private final TableMetrics metrics;
+        private final boolean isBatchLogTable;
+        private final boolean logCompletion;
+
+        public FlushRunnable(Memtable.FlushablePartitionSet<?> flushSet,
+                             SSTableMultiWriter writer,
+                             TableMetrics metrics,
+                             boolean logCompletion)
+        {
+            this.toFlush = flushSet;
+            this.writer = writer;
+            this.metrics = metrics;
+            this.isBatchLogTable = toFlush.metadata() == SystemKeyspace.Batches;
+            this.logCompletion = logCompletion;
+        }
+
+        private void writeSortedContents()
+        {
+            logger.info("Writing {}, flushed range = [{}, {})", toFlush.memtable(), toFlush.from(), toFlush.to());
+
+            // (we can't clear out the map as-we-go to free up memory,
+            //  since the memtable is being used for queries in the "pending flush" category)
+            for (Partition partition : toFlush)
+            {
+                // Each batchlog partition is a separate entry in the log. And for an entry, we only do 2
+                // operations: 1) we insert the entry and 2) we delete it. Further, BL data is strictly local,
+                // we don't need to preserve tombstones for repair. So if both operation are in this
+                // memtable (which will almost always be the case if there is no ongoing failure), we can
+                // just skip the entry (CASSANDRA-4667).
+                if (isBatchLogTable && !partition.partitionLevelDeletion().isLive() && partition.hasRows())
+                    continue;
+
+                if (!partition.isEmpty())
+                {
+                    try (UnfilteredRowIterator iter = partition.unfilteredIterator())
+                    {
+                        writer.append(iter);
+                    }
+                }
+            }
+
+            if (logCompletion)
+            {
+                long bytesFlushed = writer.getFilePointer();
+                logger.info("Completed flushing {} ({}) for commitlog position {}",
+                            writer.getFilename(),
+                            FBUtilities.prettyPrintMemory(bytesFlushed),
+                            toFlush.memtable().getFinalCommitLogUpperBound());
+                // Update the metrics
+                metrics.bytesFlushed.inc(bytesFlushed);
+            }
+        }
+
+        @Override
+        public SSTableMultiWriter call()
+        {
+            writeSortedContents();
+            return writer;
+            // We don't close the writer on error as the caller aborts all runnables if one happens.
+        }
+
+        @Override
+        public String toString()
+        {
+            return "Flush " + toFlush.metadata().keyspace + '.' + toFlush.metadata().name;
+        }
+    }
+
+    public static SSTableMultiWriter createFlushWriter(ColumnFamilyStore cfs,
+                                                       Memtable.FlushablePartitionSet<?> flushSet,
+                                                       LifecycleTransaction txn,
+                                                       Descriptor descriptor,
+                                                       long partitionCount)
+    {
+        MetadataCollector sstableMetadataCollector = new MetadataCollector(flushSet.metadata().comparator)
+                                                     .commitLogIntervals(new IntervalSet<>(flushSet.commitLogLowerBound(),
+                                                                                           flushSet.commitLogUpperBound()));
+
+        return cfs.createSSTableMultiWriter(descriptor,
+                                            partitionCount,
+                                            ActiveRepairService.UNREPAIRED_SSTABLE,
+                                            ActiveRepairService.NO_PENDING_REPAIR,
+                                            false,
+                                            sstableMetadataCollector,
+                                            new SerializationHeader(true,
+                                                                    flushSet.metadata(),
+                                                                    flushSet.columns(),
+                                                                    flushSet.encodingStats()),
+                                            txn);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/Memtable.java b/src/java/org/apache/cassandra/db/memtable/Memtable.java
new file mode 100644
index 0000000..3a18a8c
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/Memtable.java
@@ -0,0 +1,433 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.UnfilteredSource;
+import org.apache.cassandra.index.transactions.UpdateTransaction;
+import org.apache.cassandra.io.sstable.format.SSTableWriter;
+import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+
+/**
+ * Memtable interface. This defines the operations the ColumnFamilyStore can perform with memtables.
+ * They are of several types:
+ * - construction factory interface
+ * - write and read operations: put, rowIterator and partitionIterator
+ * - statistics and features, including partition counts, data size, encoding stats, written columns
+ * - memory usage tracking, including methods of retrieval and of adding extra allocated space (used non-CFS secondary
+ *   indexes)
+ * - flush functionality, preparing the set of partitions to flush for given ranges
+ * - lifecycle management, i.e. operations that prepare and execute switch to a different memtable, together
+ *   with ways of tracking the affected commit log spans
+ *
+ * See Memtable_API.md for details on implementing and using alternative memtable implementations.
+ */
+public interface Memtable extends Comparable<Memtable>, UnfilteredSource
+{
+    public static final long NO_MIN_TIMESTAMP = -1;
+
+    // Construction
+
+    /**
+     * Factory interface for constructing memtables, and querying write durability features.
+     *
+     * The factory is chosen using the MemtableParams class (passed as argument to
+     * {@code CREATE TABLE ... WITH memtable = '<configuration_name>'} where the configuration definition is a map given
+     * under {@code memtable_configurations} in cassandra.yaml). To make that possible, implementations must provide
+     * either a static {@code FACTORY} field (if they accept no further option) or a static
+     * {@code factory(Map<String, String>)} method. In the latter case, the method should avoid creating
+     * multiple instances of the factory for the same parameters, or factories should at least implement hashCode and
+     * equals.
+     */
+    interface Factory
+    {
+        /**
+         * Create a memtable.
+         *
+         * @param commitLogLowerBound A commit log lower bound for the new memtable. This will be equal to the previous
+         *                            memtable's upper bound and defines the span of positions that any flushed sstable
+         *                            will cover.
+         * @param metadaRef Pointer to the up-to-date table metadata.
+         * @param owner Owning objects that will receive flush requests triggered by the memtable (e.g. on expiration).
+         */
+        Memtable create(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadaRef, Owner owner);
+
+        /**
+         * If the memtable can achieve write durability directly (i.e. using some feature other than the commitlog, e.g.
+         * persistent memory), it can return true here, in which case the commit log will not store mutations in this
+         * table.
+         * Note that doing so will prevent point-in-time restores and changed data capture, thus a durable memtable must
+         * allow the option of turning commit log writing on even if it does not need it.
+         */
+        default boolean writesShouldSkipCommitLog()
+        {
+            return false;
+        }
+
+        /**
+         * This should be true if the memtable can achieve write durability for crash recovery directly (i.e. using some
+         * feature other than the commitlog, e.g. persistent memory).
+         * Setting this flag to true means that the commitlog should not replay mutations for this table on restart,
+         * and that it should not try to preserve segments that contain relevant data.
+         * Unless writesShouldSkipCommitLog() is also true, writes will be recorded in the commit log as they may be
+         * needed for changed data capture or point-in-time restore.
+         */
+        default boolean writesAreDurable()
+        {
+            return false;
+        }
+
+        /**
+         * Normally we can receive streamed sstables directly, skipping the memtable stage (zero-copy-streaming). When
+         * the memtable is the primary data store (e.g. persistent memtables), it will usually prefer to receive the
+         * data instead.
+         *
+         * If this returns true, all streamed sstables's content will be read and replayed as mutations, disabling
+         * zero-copy streaming.
+         */
+        default boolean streamToMemtable()
+        {
+            return false;
+        }
+
+        /**
+         * When we need to stream data, we usually flush and stream the resulting sstables. This will not work correctly
+         * if the memtable does not want to flush for streaming (e.g. persistent memtables acting as primary data
+         * store), because data (not just recent) will be missing from the streamed view. Such memtables must present
+         * their data separately for streaming.
+         * In other words if the memtable returns false on shouldSwitch(STREAMING/REPAIR), its factory must return true
+         * here.
+         *
+         * If this flag returns true, streaming will write the relevant content that resides in the memtable to
+         * temporary sstables, stream these sstables and then delete them.
+         */
+        default boolean streamFromMemtable()
+        {
+            return false;
+        }
+
+        /**
+         * Override this method to include implementation-specific memtable metrics in the table metrics.
+         *
+         * Memtable metrics lifecycle matches table lifecycle. It is the table that owns the metrics and
+         * decides when to release them.
+         */
+        default TableMetrics.ReleasableMetric createMemtableMetrics(TableMetadataRef metadataRef)
+        {
+            return null;
+        }
+    }
+
+    /**
+     * Interface for providing signals back and requesting information from the owner, i.e. the object that controls the
+     * memtable. This is usually the ColumnFamilyStore; the interface is used to limit the dependency of memtables on
+     * the details of its implementation.
+     */
+    interface Owner
+    {
+        /** Signal to the owner that a flush is required (e.g. in response to hitting space limits) */
+        Future<CommitLogPosition> signalFlushRequired(Memtable memtable, ColumnFamilyStore.FlushReason reason);
+
+        /** Get the current memtable for this owner. Used to avoid capturing memtable in scheduled flush tasks. */
+        Memtable getCurrentMemtable();
+
+        /**
+         * Collect the index memtables flushed together with this. Used to accurately calculate memory that would be
+         * freed by a flush.
+         */
+        Iterable<Memtable> getIndexMemtables();
+
+        /**
+         * Construct a list of boundaries that split the locally-owned ranges into the given number of shards,
+         * splitting the owned space evenly. It is up to the memtable to use this information.
+         * Any changes in the ring structure (e.g. added or removed nodes) will invalidate the splits; in such a case
+         * the memtable will be sent a {@link #shouldSwitch}(OWNED_RANGES_CHANGE) and, should that return false, a
+         * {@link #localRangesUpdated()} call.
+         */
+        ShardBoundaries localRangeSplits(int shardCount);
+    }
+
+    // Main write and read operations
+
+    /**
+     * Put new data in the memtable. This operation may block until enough memory is available in the memory pool.
+     *
+     * @param update the partition update, may be a new partition or an update to an existing one
+     * @param indexer receives information about the update's effect
+     * @param opGroup write operation group, used to permit the operation to complete if it is needed to complete a
+     *                flush to free space.
+     *
+     * @return the smallest timestamp delta between corresponding rows from existing and update. A
+     * timestamp delta being computed as the difference between the cells and DeletionTimes from any existing partition
+     * and those in {@code update}. See CASSANDRA-7979.
+     */
+    long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup);
+
+    // Read operations are provided by the UnfilteredSource interface.
+
+    // Statistics
+
+    /** Number of partitions stored in the memtable */
+    long partitionCount();
+
+    /** Size of the data not accounting for any metadata / mapping overheads */
+    long getLiveDataSize();
+
+    /**
+     * Number of "operations" (in the sense defined in {@link PartitionUpdate#operationCount()}) the memtable has
+     * executed.
+     */
+    long operationCount();
+
+    /**
+     * The table's definition metadata.
+     *
+     * Note that this tracks the current state of the table and is not necessarily the same as what was used to create
+     * the memtable.
+     */
+    TableMetadata metadata();
+
+
+    // Memory usage tracking
+
+    /**
+     * Add this memtable's used memory to the given usage object. This can be used to retrieve a single memtable's usage
+     * as well as to combine the ones of related sstables (e.g. a table and its table-based secondary indexes).
+     */
+    void addMemoryUsageTo(MemoryUsage usage);
+
+
+    /**
+     * Creates a holder for memory usage collection.
+     *
+     * This is used to track on- and off-heap memory, as well as the ratio to the total permitted memtable memory.
+     */
+    static MemoryUsage newMemoryUsage()
+    {
+        return new MemoryUsage();
+    }
+
+    /**
+     * Shorthand for the getting a given table's memory usage.
+     * Implemented as a static to prevent implementations altering expectations by e.g. returning a cached object.
+     */
+    static MemoryUsage getMemoryUsage(Memtable memtable)
+    {
+        MemoryUsage usage = newMemoryUsage();
+        memtable.addMemoryUsageTo(usage);
+        return usage;
+    }
+
+    @NotThreadSafe
+    class MemoryUsage
+    {
+        /** On-heap memory used in bytes */
+        public long ownsOnHeap = 0;
+        /** Off-heap memory used in bytes */
+        public long ownsOffHeap = 0;
+        /** On-heap memory as ratio to permitted memtable space */
+        public float ownershipRatioOnHeap = 0.0f;
+        /** Off-heap memory as ratio to permitted memtable space */
+        public float ownershipRatioOffHeap = 0.0f;
+
+        @Override
+        public String toString()
+        {
+            return String.format("%s (%.0f%%) on-heap, %s (%.0f%%) off-heap",
+                                 FBUtilities.prettyPrintMemory(ownsOnHeap),
+                                 ownershipRatioOnHeap * 100,
+                                 FBUtilities.prettyPrintMemory(ownsOffHeap),
+                                 ownershipRatioOffHeap * 100);
+        }
+    }
+
+    /**
+     * Adjust the used on-heap space by the given size (e.g. to reflect memory used by a non-table-based index).
+     * This operation may block until enough memory is available in the memory pool.
+     *
+     * @param additionalSpace the number of allocated bytes
+     * @param opGroup write operation group, used to permit the operation to complete if it is needed to complete a
+     *                flush to free space.
+     */
+    void markExtraOnHeapUsed(long additionalSpace, OpOrder.Group opGroup);
+
+    /**
+     * Adjust the used off-heap space by the given size (e.g. to reflect memory used by a non-table-based index).
+     * This operation may block until enough memory is available in the memory pool.
+     *
+     * @param additionalSpace the number of allocated bytes
+     * @param opGroup write operation group, used to permit the operation to complete if it is needed to complete a
+     *                flush to free space.
+     */
+    void markExtraOffHeapUsed(long additionalSpace, OpOrder.Group opGroup);
+
+
+    // Flushing
+
+    /**
+     * Get the collection of data between the given partition boundaries in a form suitable for flushing.
+     */
+    FlushablePartitionSet<?> getFlushSet(PartitionPosition from, PartitionPosition to);
+
+    /**
+     * A collection of partitions for flushing plus some information required for writing an sstable.
+     *
+     * Note that the listed entries must conform with the specified metadata. In particular, if the memtable is still
+     * being written to, care must be taken to not list newer items as they may violate the bounds collected by the
+     * encoding stats or refer to columns that don't exist in the collected columns set.
+     */
+    interface FlushablePartitionSet<P extends Partition> extends Iterable<P>, SSTableWriter.SSTableSizeParameters
+    {
+        Memtable memtable();
+
+        PartitionPosition from();
+        PartitionPosition to();
+
+        /** The commit log position at the time that this memtable was created */
+        CommitLogPosition commitLogLowerBound();
+        /** The commit log position at the time that this memtable was switched out */
+        CommitLogPosition commitLogUpperBound();
+
+        /** The set of all columns that have been written */
+        RegularAndStaticColumns columns();
+        /** Statistics required for writing an sstable efficiently */
+        EncodingStats encodingStats();
+
+        default TableMetadata metadata()
+        {
+            return memtable().metadata();
+        }
+
+        default boolean isEmpty()
+        {
+            return partitionCount() > 0;
+        }
+    }
+
+
+    // Lifecycle management
+
+    /**
+     * Called to tell the memtable that it is being switched out and will be flushed (or dropped) and discarded.
+     * Will be followed by a {@link #getFlushSet} call (if the table is not truncated or dropped), and a
+     * {@link #discard}.
+     *
+     * @param writeBarrier The barrier that will signal that all writes to this memtable have completed. That is, the
+     *                     point after which writes cannot be accepted by this memtable (it is permitted for writes
+     *                     before this barrier to go into the next; see {@link #accepts}).
+     * @param commitLogUpperBound The upper commit log position for this memtable. The value may be modified after this
+     *                            call and will match the next memtable's lower commit log bound.
+     */
+    void switchOut(OpOrder.Barrier writeBarrier, AtomicReference<CommitLogPosition> commitLogUpperBound);
+
+    /**
+     * This memtable is no longer in use or required for outstanding flushes or operations.
+     * All held memory must be released.
+     */
+    void discard();
+
+    /**
+     * Decide if this memtable should take a write with the given parameters, or if the write should go to the next
+     * memtable. This enforces that no writes after the barrier set by {@link #switchOut} can be accepted, and
+     * is also used to define a shared commit log bound as the upper for this memtable and lower for the next.
+     */
+    boolean accepts(OpOrder.Group opGroup, CommitLogPosition commitLogPosition);
+
+    /** Approximate commit log lower bound, <= getCommitLogLowerBound, used as a time stamp for ordering */
+    CommitLogPosition getApproximateCommitLogLowerBound();
+
+    /** The commit log position at the time that this memtable was created */
+    CommitLogPosition getCommitLogLowerBound();
+
+    /** The commit log position at the time that this memtable was switched out */
+    LastCommitLogPosition getFinalCommitLogUpperBound();
+
+    /** True if the memtable can contain any data that was written before the given commit log position */
+    boolean mayContainDataBefore(CommitLogPosition position);
+
+    /** True if the memtable contains no data */
+    boolean isClean();
+
+    /** Order memtables by time as reflected in the commit log position at time of construction */
+    default int compareTo(Memtable that)
+    {
+        return this.getApproximateCommitLogLowerBound().compareTo(that.getApproximateCommitLogLowerBound());
+    }
+
+    /**
+     * Decides whether the memtable should be switched/flushed for the passed reason.
+     * Normally this will return true, but e.g. persistent memtables may choose not to flush. Returning false will
+     * trigger further action for some reasons:
+     * - SCHEMA_CHANGE will be followed by metadataUpdated().
+     * - OWNED_RANGES_CHANGE will be followed by localRangesUpdated().
+     * - SNAPSHOT will be followed by performSnapshot().
+     * - STREAMING/REPAIR will be followed by creating a FlushSet for the streamed/repaired ranges. This data will be
+     *   used to create sstables, which will be streamed and then deleted.
+     * This will not be called to perform truncation or drop (in that case the memtable is unconditionally dropped),
+     * but a flush may nevertheless be requested in that case to prepare a snapshot.
+     */
+    boolean shouldSwitch(ColumnFamilyStore.FlushReason reason);
+
+    /**
+     * Called when the table's metadata is updated. The memtable's metadata reference now points to the new version.
+     * This will not be called if {@link #shouldSwitch)(SCHEMA_CHANGE) returns true, as the memtable will be swapped out
+     * instead.
+     */
+    void metadataUpdated();
+
+    /**
+     * Called when the known ranges have been updated and owner.localRangeSplits() may return different values.
+     * This will not be called if {@link #shouldSwitch)(OWNED_RANGES_CHANGE) returns true, as the memtable will be
+     * swapped out instead.
+     */
+    void localRangesUpdated();
+
+    /**
+     * If the memtable needs to do some special action for snapshots (e.g. because it is persistent and does not want
+     * to flush), it should return false on the above with reason SNAPSHOT and implement this method.
+     */
+    void performSnapshot(String snapshotName);
+
+    /**
+     * Special commit log position marker used in the upper bound marker setting process
+     * (see {@link org.apache.cassandra.db.ColumnFamilyStore#setCommitLogUpperBound} and {@link AbstractMemtable#accepts})
+     */
+    public static final class LastCommitLogPosition extends CommitLogPosition
+    {
+        public LastCommitLogPosition(CommitLogPosition copy)
+        {
+            super(copy.segmentId, copy.position);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/Memtable_API.md b/src/java/org/apache/cassandra/db/memtable/Memtable_API.md
new file mode 100644
index 0000000..8a582e3
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/Memtable_API.md
@@ -0,0 +1,178 @@
+# Memtable API
+
+[CEP-11](https://cwiki.apache.org/confluence/display/CASSANDRA/CEP-11%3A+Pluggable+memtable+implementations) 
+/ [CASSANDRA-17034](https://issues.apache.org/jira/browse/CASSANDRA-17034)
+
+## Configuration specification
+
+Memtable types and options are specified using memtable "configurations", which specify an implementation class
+and its parameters. 
+
+The memtable configurations are defined in `cassandra.yaml`, using the following format:
+
+```yaml
+memtable:
+    configurations:
+        ⟨configuration name⟩:
+          class_name: ⟨class⟩
+          inherits: ⟨configuration name⟩
+          parameters:
+            ⟨parameters⟩
+```
+
+Configurations can copy the properties from others, including being full copies of another, which can be useful for
+easily remapping one name to another configuration.
+
+The default memtable configuration is named `default`. It can be overridden if the yaml specifies it (including
+using inheritance to copy another configuration), and it can be inherited, even if it is not explicitly defined in
+the yaml (e.g. to change some parameter but not the memtable class).
+
+Examples:
+
+```yaml
+memtable:
+    configurations:
+        more_shards:
+          inherits: default
+          parameters:
+             shards: 32
+```
+
+```yaml
+memtable:
+    configurations:
+        skiplist:
+          class_name: SkipListMemtable
+        sharded:
+          class_name: ShardedSkipListMemtable
+        default:
+          inherits: sharded
+```
+
+Note that the database will only validate the memtable class and its parameters when a configuration needs to be
+instantiated for a table.
+
+## Memtable selection
+
+Once a configuration has been defined, it can be used by specifying it in the `memtable` parameter of a `CREATE TABLE`
+or `ALTER TABLE` statement, for example:
+
+```
+CREATE TABLE ... WITH ... AND memtable = 'trie';
+```
+or
+```
+ALTER TABLE ... WITH memtable = 'skiplist';
+```
+
+If a memtable is not specified, the configuration `default` will be used. To reset a table to the default memtable,
+use
+```
+ALTER TABLE ... WITH memtable = 'default';
+```
+
+The memtable configuration selection is per table, i.e. it will be propagated to all nodes in the cluster. If some nodes
+do not have a definition for that configuration or cannot instantiate the class, they will log an error and fall 
+back to the default memtable configuration to avoid schema disagreements. However, if some nodes are still on a version 
+of Cassandra before 4.1, they will reject the schema change. We therefore recommend using a separate `ALTER` statement 
+to change a table's memtable implementation; upgrading all nodes to 4.1 or later is required to use the API.
+
+As additional safety when first deploying an alternative implementation to a production cluster, one may consider
+first deploying a remapped `default` configuration to all nodes in the cluster, switching the schema to reference
+it, and then changing the implementation by modifying the configuration one node at a time.
+
+For example, a remapped default can be specified with this:
+```yaml
+memtable:
+    configurations:
+        better_memtable:
+            inherits: default
+```
+selected via
+```
+ALTER TABLE production_table WITH memtable = 'better_memtable';
+```
+and later switched one node at a time to
+```yaml
+memtable:
+    configurations:
+        our_memtable:
+            class_name: ...
+        better_memtable:
+            inherits: our_memtable
+```
+
+## Memtable implementation
+
+A memtable implementation is an implementation of the `Memtable` interface. The main work of the class will be
+performed by the `put`, `rowIterator` and `partitionIterator` methods, used to write and read information to/from the
+memtable. In addition to this, the implementation must support retrieval of the content in a form suitable for flushing 
+(via `getFlushSet`), memory use and statistics tracking, mechanics for triggering a flush for reasons
+controlled by the memtable (e.g. exhaustion of the given memory allocation), and finally mechanisms for tracking the
+commit log spans covered by a memtable.
+
+Abstract classes that provide the latter parts of the functionality (expected to be shared by most
+implementations) are provided as the `AbstractMemtable` (statistics tracking), `AbstractMemtableWithCommitlog` (adds
+commit log span tracking) and `AbstractAllocatorMemtable` (adds memory management via the `Allocator` class, together
+with flush triggering on memory use and time interval expiration).
+
+The memtable API also gives the memtable some control over flushing and the functioning of the commit log. The former
+is there to permit memtables that operate long-term and/or can handle some events internally, without a need to flush.
+The latter enables memtables that have an internal durability mechanism, such as ones using persistent memory or a
+tightly integrated commit log (e.g. using the commit log buffers for memtable data storage).
+
+The memtable implementation must also provide a mechanism for memtable construction called a memtable "factory" 
+(the `Memtable.Factory` interface). Some features of the implementation may be needed before an instance is created or
+where the memtable instance is not accessible. To make working with them more straightforward, the following 
+memtable-controlled options are implemented on the factory:
+
+- `boolean writesAreDurable()` should return true if the memtable has its own persistence mechanism and does not want
+  the commitlog to provide persistence. In this case the commit log can still store the writes for changed-data-capture (CDC)
+  or point-in-time restore (PITR), but it need not keep them for replay until the memtable is flushed.
+- `boolean writesShouldSkipCommitLog()` should return true if the memtable does not want the commit log to store any of
+  its data. The expectation for this flag is that a persistent memtable will take a configuration parameter to turn this
+  option on to improve performance. Enabling this flag is not compatible with CDC or PITR.
+- `boolean streamToMemtable()` and `boolean streamFromMemtable()` should return true if the memtable is long-lived and 
+  cannot flush to facilitate streaming. In this case the streaming code will implement the process in a way that 
+  retrieves data in the memtable before sending, and applies received data in the memtable instead of directly creating 
+  an sstable.
+
+### Instantiation and configuration
+
+The memtables are instantiated by the factory, which is constructed via reflection on creating a `ColumnFamilyStore` or
+altering the table's configuration.
+
+Memtable classes must either contain a static `FACTORY` field (if they take no arguments other than class), or implement 
+a `factory(Map<String, String>)` method, which is called using the configuration `parameters`. For validation, the
+latter should consume any further options (using `map.remove`).
+
+The `MemtableParams` class will look for the specified class name (prefixed with `org.apache.cassandra.db.memtable.`
+if only a short name was given), then look for a `factory` method. If it finds one, it will call it with a copy of the 
+supplied parameters; if it does not, it will look for the `FACTORY` field and use its value if found. It will error out
+if the class was not found, if neither the method or field was found, or if the user supplied parameters that did not 
+get consumed.
+
+Because multiple configurations and tables may use the same parameters, it is expected that the factory method will
+store and reuse constructed factories to avoid wasting space for duplicate objects (this is typical for configuration 
+objects in Cassandra).
+
+At this time many of the configuration parameters for memtables are still configured using top-level parameters like
+`memtable_allocation_type` in `cassandra.yaml` and `memtable_flush_period_in_ms` in the table schema.
+
+
+### Sample implementation
+
+The API comes with a proof-of-concept implementation, a sharded skip-list memtable implemented by the 
+`ShardedSkipListMemtable` class. The difference between this and the default memtable is that the sharded version breaks 
+the token space served by the node into roughly equal regions and uses a separate skip-list for each shard. Sharding
+spreads the write concurrency among these independent skip lists, reducing congestion and can lead to significantly
+improved write throughput.
+
+This implementation takes two parameters, `shards` which specifies the number of shards to split into (by default, the
+number of CPU threads available to the process) and `serialize_writes`, which, if set to `true` causes writes to the
+memtable to be synchronized. The latter can be useful to minimize space and time wasted for unsuccesful lockless 
+partition modification where a new copy of the partition would be prepared but not used due to concurrent modification.
+Regardless of the setting, reads can always execute in parallel, including concurrently with writes.
+
+Please note that sharding cannot be used with non-hashing partitioners (i.e. `ByteOrderPartitioner` or 
+`OrderPreservingPartitioner`).
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java b/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java
new file mode 100644
index 0000000..fb9cc98
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.memtable;
+
+import java.util.Arrays;
+import java.util.List;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.dht.Token;
+
+/**
+ * Holds boundaries (tokens) used to map a particular token (so partition key) to a shard id.
+ * In practice, each keyspace has its associated boundaries, see {@link Keyspace}.
+ * <p>
+ * Technically, if we use {@code n} shards, this is a list of {@code n-1} tokens and each token {@code tk} gets assigned
+ * to the shard ID corresponding to the slot of the smallest token in the list that is greater to {@code tk}, or {@code n}
+ * if {@code tk} is bigger than any token in the list.
+ */
+public class ShardBoundaries
+{
+    private static final Token[] EMPTY_TOKEN_ARRAY = new Token[0];
+
+    // Special boundaries that map all tokens to one shard.
+    // These boundaries will be used in either of these cases:
+    // - there is only 1 shard configured
+    // - the default partitioner doesn't support splitting
+    // - the keyspace is local system keyspace
+    public static final ShardBoundaries NONE = new ShardBoundaries(EMPTY_TOKEN_ARRAY, -1);
+
+    private final Token[] boundaries;
+    public final long ringVersion;
+
+    @VisibleForTesting
+    public ShardBoundaries(Token[] boundaries, long ringVersion)
+    {
+        this.boundaries = boundaries;
+        this.ringVersion = ringVersion;
+    }
+
+    public ShardBoundaries(List<Token> boundaries, long ringVersion)
+    {
+        this(boundaries.toArray(EMPTY_TOKEN_ARRAY), ringVersion);
+    }
+
+    /**
+     * Computes the shard to use for the provided token.
+     */
+    public int getShardForToken(Token tk)
+    {
+        for (int i = 0; i < boundaries.length; i++)
+        {
+            if (tk.compareTo(boundaries[i]) < 0)
+                return i;
+        }
+        return boundaries.length;
+    }
+
+    /**
+     * Computes the shard to use for the provided key.
+     */
+    public int getShardForKey(PartitionPosition key)
+    {
+        // Boundaries are missing if the node is not sufficiently initialized yet
+        if (boundaries.length == 0)
+            return 0;
+
+        assert (key.getPartitioner() == DatabaseDescriptor.getPartitioner());
+        return getShardForToken(key.getToken());
+    }
+
+    /**
+     * The number of shards that this boundaries support, that is how many different shard ids {@link #getShardForToken} might
+     * possibly return.
+     *
+     * @return the number of shards supported by theses boundaries.
+     */
+    public int shardCount()
+    {
+        return boundaries.length + 1;
+    }
+
+    @Override
+    public String toString()
+    {
+        if (boundaries.length == 0)
+            return "shard 0: (min, max)";
+
+        StringBuilder sb = new StringBuilder();
+        sb.append("shard 0: (min, ").append(boundaries[0]).append(") ");
+        for (int i = 0; i < boundaries.length - 1; i++)
+            sb.append("shard ").append(i+1).append(": (").append(boundaries[i]).append(", ").append(boundaries[i+1]).append("] ");
+        sb.append("shard ").append(boundaries.length).append(": (").append(boundaries[boundaries.length-1]).append(", max)");
+        return sb.toString();
+    }
+
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+
+        ShardBoundaries that = (ShardBoundaries) o;
+
+        return Arrays.equals(boundaries, that.boundaries);
+    }
+
+    public int hashCode()
+    {
+        return Arrays.hashCode(boundaries);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java b/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
new file mode 100644
index 0000000..51cd5f2
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
@@ -0,0 +1,569 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.memtable;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterators;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.DataRange;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.filter.ClusteringIndexFilter;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
+import org.apache.cassandra.db.partitions.AtomicBTreePartition;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Bounds;
+import org.apache.cassandra.dht.IncludingExcludingBounds;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.index.transactions.UpdateTransaction;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.memory.Cloner;
+import org.apache.cassandra.utils.memory.MemtableAllocator;
+import org.github.jamm.Unmetered;
+
+/**
+ * A proof-of-concept sharded memtable implementation. This implementation splits the partition skip-list into several
+ * independent skip-lists each covering a roughly equal part of the token space served by this node. This reduces
+ * congestion of the skip-list from concurrent writes and can lead to improved write throughput.
+ *
+ * The implementation takes two parameters:
+ * - shards: the number of shards to split into.
+ * - serialize_writes: if false, each shard may serve multiple writes in parallel; if true, writes to each shard are
+ *   synchronized.
+ *
+ * Also see Memtable_API.md.
+ */
+public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
+{
+    private static final Logger logger = LoggerFactory.getLogger(ShardedSkipListMemtable.class);
+
+    public static final String SHARDS_OPTION = "shards";
+    public static final String LOCKING_OPTION = "serialize_writes";
+
+    // The boundaries for the keyspace as they were calculated when the memtable is created.
+    // The boundaries will be NONE for system keyspaces or if StorageService is not yet initialized.
+    // The fact this is fixed for the duration of the memtable lifetime, guarantees we'll always pick the same shard
+    // for a given key, even if we race with the StorageService initialization or with topology changes.
+    @Unmetered
+    final ShardBoundaries boundaries;
+
+    /**
+     * Core-specific memtable regions. All writes must go through the specific core. The data structures used
+     * are concurrent-read safe, thus reads can be carried out from any thread.
+     */
+    final MemtableShard[] shards;
+
+    @VisibleForTesting
+    public static final String SHARD_COUNT_PROPERTY = "cassandra.memtable.shard.count";
+
+    // default shard count, used when a specific number of shards is not specified in the parameters
+    private static final int SHARD_COUNT = Integer.getInteger(SHARD_COUNT_PROPERTY, FBUtilities.getAvailableProcessors());
+
+    private final Factory factory;
+
+    // only to be used by init(), to setup the very first memtable for the cfs
+    ShardedSkipListMemtable(AtomicReference<CommitLogPosition> commitLogLowerBound,
+                            TableMetadataRef metadataRef,
+                            Owner owner,
+                            Integer shardCountOption,
+                            Factory factory)
+    {
+        super(commitLogLowerBound, metadataRef, owner);
+        int shardCount = shardCountOption != null ? shardCountOption : SHARD_COUNT;
+        this.boundaries = owner.localRangeSplits(shardCount);
+        this.shards = generatePartitionShards(boundaries.shardCount(), allocator, metadataRef);
+        this.factory = factory;
+    }
+
+    private static MemtableShard[] generatePartitionShards(int splits,
+                                                           MemtableAllocator allocator,
+                                                           TableMetadataRef metadata)
+    {
+        MemtableShard[] partitionMapContainer = new MemtableShard[splits];
+        for (int i = 0; i < splits; i++)
+            partitionMapContainer[i] = new MemtableShard(metadata, allocator);
+
+        return partitionMapContainer;
+    }
+
+    public boolean isClean()
+    {
+        for (MemtableShard shard : shards)
+            if (!shard.isEmpty())
+                return false;
+        return true;
+    }
+
+    @Override
+    protected Memtable.Factory factory()
+    {
+        return factory;
+    }
+
+    /**
+     * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the appropriate
+     * OpOrdering.
+     *
+     * commitLogSegmentPosition should only be null if this is a secondary index, in which case it is *expected* to be null
+     */
+    public long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup)
+    {
+        DecoratedKey key = update.partitionKey();
+        MemtableShard shard = shards[boundaries.getShardForKey(key)];
+        return shard.put(key, update, indexer, opGroup);
+    }
+
+    /**
+     * Technically we should scatter gather on all the core threads because the size in following calls are not
+     * using volatile variables, but for metrics purpose this should be good enough.
+     */
+    @Override
+    public long getLiveDataSize()
+    {
+        long total = 0L;
+        for (MemtableShard shard : shards)
+            total += shard.liveDataSize();
+        return total;
+    }
+
+    @Override
+    public long operationCount()
+    {
+        long total = 0L;
+        for (MemtableShard shard : shards)
+            total += shard.currentOperations();
+        return total;
+    }
+
+    @Override
+    public long partitionCount()
+    {
+        int total = 0;
+        for (MemtableShard shard : shards)
+            total += shard.size();
+        return total;
+    }
+
+    /**
+     * Returns the minTS if one available, otherwise NO_MIN_TIMESTAMP.
+     *
+     * EncodingStats uses a synthetic epoch TS at 2015. We don't want to leak that (CASSANDRA-18118) so we return NO_MIN_TIMESTAMP instead.
+     *
+     * @return The minTS or NO_MIN_TIMESTAMP if none available
+     */
+    @Override
+    public long getMinTimestamp()
+    {
+        long min = Long.MAX_VALUE;
+        for (MemtableShard shard : shards)
+            min =  Long.min(min, shard.minTimestamp());
+        return min != EncodingStats.NO_STATS.minTimestamp ? min : NO_MIN_TIMESTAMP;
+    }
+
+    @Override
+    public int getMinLocalDeletionTime()
+    {
+        int min = Integer.MAX_VALUE;
+        for (MemtableShard shard : shards)
+            min = Integer.min(min, shard.minLocalDeletionTime());
+        return min;
+    }
+
+    @Override
+    RegularAndStaticColumns columns()
+    {
+        for (MemtableShard shard : shards)
+            columnsCollector.update(shard.columnsCollector);
+        return columnsCollector.get();
+    }
+
+    @Override
+    EncodingStats encodingStats()
+    {
+        for (MemtableShard shard : shards)
+            statsCollector.update(shard.statsCollector.get());
+        return statsCollector.get();
+    }
+
+    @Override
+    public MemtableUnfilteredPartitionIterator partitionIterator(final ColumnFilter columnFilter,
+                                                                 final DataRange dataRange,
+                                                                 SSTableReadsListener readsListener)
+    {
+        AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();
+
+        PartitionPosition left = keyRange.left;
+        PartitionPosition right = keyRange.right;
+
+        boolean isBound = keyRange instanceof Bounds;
+        boolean includeStart = isBound || keyRange instanceof IncludingExcludingBounds;
+        boolean includeStop = isBound || keyRange instanceof Range;
+
+        Iterator<AtomicBTreePartition> iterator = getPartitionIterator(left, includeStart, right, includeStop);
+
+        return new MemtableUnfilteredPartitionIterator(metadata(), iterator, columnFilter, dataRange);
+        // readsListener is ignored as it only accepts sstable signals
+    }
+
+    private Iterator<AtomicBTreePartition> getPartitionIterator(PartitionPosition left, boolean includeStart, PartitionPosition right, boolean includeStop)
+    {
+        int leftShard = left != null && !left.isMinimum() ? boundaries.getShardForKey(left) : 0;
+        int rightShard = right != null && !right.isMinimum() ? boundaries.getShardForKey(right) : boundaries.shardCount() - 1;
+        Iterator<AtomicBTreePartition> iterator;
+        if (leftShard == rightShard)
+            iterator = shards[leftShard].getPartitionsSubMap(left, includeStart, right, includeStop).values().iterator();
+        else
+        {
+            Iterator<AtomicBTreePartition>[] iters = new Iterator[rightShard - leftShard + 1];
+            int i = leftShard;
+            iters[0] = shards[leftShard].getPartitionsSubMap(left, includeStart, null, true).values().iterator();
+            for (++i; i < rightShard; ++i)
+                iters[i - leftShard] = shards[i].partitions.values().iterator();
+            iters[i - leftShard] = shards[i].getPartitionsSubMap(null, true, right, includeStop).values().iterator();
+            iterator = Iterators.concat(iters);
+        }
+        return iterator;
+    }
+
+    private Partition getPartition(DecoratedKey key)
+    {
+        int shardIndex = boundaries.getShardForKey(key);
+        return shards[shardIndex].partitions.get(key);
+    }
+
+    @Override
+    public UnfilteredRowIterator rowIterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
+    {
+        Partition p = getPartition(key);
+        if (p == null)
+            return null;
+        else
+            return p.unfilteredIterator(selectedColumns, slices, reversed);
+    }
+
+    @Override
+    public UnfilteredRowIterator rowIterator(DecoratedKey key)
+    {
+        Partition p = getPartition(key);
+        return p != null ? p.unfilteredIterator() : null;
+    }
+
+    public FlushablePartitionSet<AtomicBTreePartition> getFlushSet(PartitionPosition from, PartitionPosition to)
+    {
+        long keySize = 0;
+        int keyCount = 0;
+
+        for (Iterator<AtomicBTreePartition> it = getPartitionIterator(from, true, to,false); it.hasNext();)
+        {
+            AtomicBTreePartition en = it.next();
+            keySize += en.partitionKey().getKey().remaining();
+            keyCount++;
+        }
+        long partitionKeySize = keySize;
+        int partitionCount = keyCount;
+        Iterator<AtomicBTreePartition> toFlush = getPartitionIterator(from, true, to,false);
+
+        return new AbstractFlushablePartitionSet<AtomicBTreePartition>()
+        {
+            public Memtable memtable()
+            {
+                return ShardedSkipListMemtable.this;
+            }
+
+            public PartitionPosition from()
+            {
+                return from;
+            }
+
+            public PartitionPosition to()
+            {
+                return to;
+            }
+
+            public long partitionCount()
+            {
+                return partitionCount;
+            }
+
+            public Iterator<AtomicBTreePartition> iterator()
+            {
+                return toFlush;
+            }
+
+            public long partitionKeysSize()
+            {
+                return partitionKeySize;
+            }
+        };
+    }
+
+    static class MemtableShard
+    {
+        // The following fields are volatile as we have to make sure that when we
+        // collect results from all sub-ranges, the thread accessing the value
+        // is guaranteed to see the changes to the values.
+
+        // The smallest timestamp for all partitions stored in this shard
+        private final AtomicLong minTimestamp = new AtomicLong(Long.MAX_VALUE);
+        private final AtomicInteger minLocalDeletionTime = new AtomicInteger(Integer.MAX_VALUE);
+
+        private final AtomicLong liveDataSize = new AtomicLong(0);
+
+        private final AtomicLong currentOperations = new AtomicLong(0);
+
+        // We index the memtable by PartitionPosition only for the purpose of being able
+        // to select key range using Token.KeyBound. However put() ensures that we
+        // actually only store DecoratedKey.
+        private final ConcurrentNavigableMap<PartitionPosition, AtomicBTreePartition> partitions = new ConcurrentSkipListMap<>();
+
+        private final ColumnsCollector columnsCollector;
+
+        private final StatsCollector statsCollector;
+
+        @Unmetered  // total pool size should not be included in memtable's deep size
+        private final MemtableAllocator allocator;
+
+        private final TableMetadataRef metadata;
+
+        @VisibleForTesting
+        MemtableShard(TableMetadataRef metadata, MemtableAllocator allocator)
+        {
+            this.columnsCollector = new ColumnsCollector(metadata.get().regularAndStaticColumns());
+            this.statsCollector = new StatsCollector();
+            this.allocator = allocator;
+            this.metadata = metadata;
+        }
+
+        public long put(DecoratedKey key, PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup)
+        {
+            Cloner cloner = allocator.cloner(opGroup);
+            AtomicBTreePartition previous = partitions.get(key);
+
+            long initialSize = 0;
+            if (previous == null)
+            {
+                final DecoratedKey cloneKey = cloner.clone(key);
+                AtomicBTreePartition empty = new AtomicBTreePartition(metadata, cloneKey, allocator);
+                // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent
+                previous = partitions.putIfAbsent(cloneKey, empty);
+                if (previous == null)
+                {
+                    previous = empty;
+                    // allocate the row overhead after the fact; this saves over allocating and having to free after, but
+                    // means we can overshoot our declared limit.
+                    int overhead = (int) (cloneKey.getToken().getHeapSize() + SkipListMemtable.ROW_OVERHEAD_HEAP_SIZE);
+                    allocator.onHeap().allocate(overhead, opGroup);
+                    initialSize = 8;
+                }
+            }
+
+            long[] pair = previous.addAllWithSizeDelta(update, cloner, opGroup, indexer);
+            updateMin(minTimestamp, update.stats().minTimestamp);
+            updateMin(minLocalDeletionTime, update.stats().minLocalDeletionTime);
+            liveDataSize.addAndGet(initialSize + pair[0]);
+            columnsCollector.update(update.columns());
+            statsCollector.update(update.stats());
+            currentOperations.addAndGet(update.operationCount());
+            return pair[1];
+        }
+
+        private Map<PartitionPosition, AtomicBTreePartition> getPartitionsSubMap(PartitionPosition left,
+                                                                                 boolean includeLeft,
+                                                                                 PartitionPosition right,
+                                                                                 boolean includeRight)
+        {
+            if (left != null && left.isMinimum())
+                left = null;
+            if (right != null && right.isMinimum())
+                right = null;
+
+            try
+            {
+                if (left == null)
+                    return right == null ? partitions : partitions.headMap(right, includeRight);
+                else
+                    return right == null
+                           ? partitions.tailMap(left, includeLeft)
+                           : partitions.subMap(left, includeLeft, right, includeRight);
+            }
+            catch (IllegalArgumentException e)
+            {
+                logger.error("Invalid range requested {} - {}", left, right);
+                throw e;
+            }
+        }
+
+        public boolean isEmpty()
+        {
+            return partitions.isEmpty();
+        }
+
+        public int size()
+        {
+            return partitions.size();
+        }
+
+        long minTimestamp()
+        {
+            return minTimestamp.get();
+        }
+
+        long liveDataSize()
+        {
+            return liveDataSize.get();
+        }
+
+        long currentOperations()
+        {
+            return currentOperations.get();
+        }
+
+        public int minLocalDeletionTime()
+        {
+            return minLocalDeletionTime.get();
+        }
+    }
+
+    public static class MemtableUnfilteredPartitionIterator extends AbstractUnfilteredPartitionIterator implements UnfilteredPartitionIterator
+    {
+        private final TableMetadata metadata;
+        private final Iterator<AtomicBTreePartition> iter;
+        private final ColumnFilter columnFilter;
+        private final DataRange dataRange;
+
+        public MemtableUnfilteredPartitionIterator(TableMetadata metadata, Iterator<AtomicBTreePartition> iterator, ColumnFilter columnFilter, DataRange dataRange)
+        {
+            this.metadata = metadata;
+            this.iter = iterator;
+            this.columnFilter = columnFilter;
+            this.dataRange = dataRange;
+        }
+
+        public TableMetadata metadata()
+        {
+            return metadata;
+        }
+
+        public boolean hasNext()
+        {
+            return iter.hasNext();
+        }
+
+        public UnfilteredRowIterator next()
+        {
+            AtomicBTreePartition entry = iter.next();
+            DecoratedKey key = entry.partitionKey();
+            ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key);
+
+            return filter.getUnfilteredRowIterator(columnFilter, entry);
+        }
+    }
+
+    static class Locking extends ShardedSkipListMemtable
+    {
+        Locking(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadataRef, Owner owner, Integer shardCountOption, Factory factory)
+        {
+            super(commitLogLowerBound, metadataRef, owner, shardCountOption, factory);
+        }
+
+        /**
+         * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the appropriate
+         * OpOrdering.
+         *
+         * commitLogSegmentPosition should only be null if this is a secondary index, in which case it is *expected* to be null
+         */
+        public long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup)
+        {
+            DecoratedKey key = update.partitionKey();
+            MemtableShard shard = shards[boundaries.getShardForKey(key)];
+            synchronized (shard)
+            {
+                return shard.put(key, update, indexer, opGroup);
+            }
+        }
+
+    }
+
+    public static Factory factory(Map<String, String> optionsCopy)
+    {
+        String shardsString = optionsCopy.remove(SHARDS_OPTION);
+        Integer shardCount = shardsString != null ? Integer.parseInt(shardsString) : null;
+        boolean isLocking = Boolean.parseBoolean(optionsCopy.remove(LOCKING_OPTION));
+        return new Factory(shardCount, isLocking);
+    }
+
+    static class Factory implements Memtable.Factory
+    {
+        final Integer shardCount;
+        final boolean isLocking;
+
+        Factory(Integer shardCount, boolean isLocking)
+        {
+            this.shardCount = shardCount;
+            this.isLocking = isLocking;
+        }
+
+        public Memtable create(AtomicReference<CommitLogPosition> commitLogLowerBound,
+                               TableMetadataRef metadataRef,
+                               Owner owner)
+        {
+            return isLocking
+                   ? new Locking(commitLogLowerBound, metadataRef, owner, shardCount, this)
+                   : new ShardedSkipListMemtable(commitLogLowerBound, metadataRef, owner, shardCount, this);
+        }
+
+        public boolean equals(Object o)
+        {
+            if (this == o)
+                return true;
+            if (o == null || getClass() != o.getClass())
+                return false;
+            Factory factory = (Factory) o;
+            return Objects.equals(shardCount, factory.shardCount);
+        }
+
+        public int hashCode()
+        {
+            return Objects.hash(shardCount);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java b/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
new file mode 100644
index 0000000..17c0cce
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
@@ -0,0 +1,374 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.memtable;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.BufferDecoratedKey;
+import org.apache.cassandra.db.DataRange;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.filter.ClusteringIndexFilter;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.partitions.AbstractBTreePartition;
+import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
+import org.apache.cassandra.db.partitions.AtomicBTreePartition;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Bounds;
+import org.apache.cassandra.dht.IncludingExcludingBounds;
+import org.apache.cassandra.dht.Murmur3Partitioner.LongToken;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.index.transactions.UpdateTransaction;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.ObjectSizes;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.memory.Cloner;
+import org.apache.cassandra.utils.memory.MemtableAllocator;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.MEMTABLE_OVERHEAD_COMPUTE_STEPS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.MEMTABLE_OVERHEAD_SIZE;
+
+public class SkipListMemtable extends AbstractAllocatorMemtable
+{
+    private static final Logger logger = LoggerFactory.getLogger(SkipListMemtable.class);
+
+    public static final Factory FACTORY = SkipListMemtableFactory.INSTANCE;
+
+    protected static final int ROW_OVERHEAD_HEAP_SIZE;
+    static
+    {
+        int userDefinedOverhead = MEMTABLE_OVERHEAD_SIZE.getInt(-1);
+        if (userDefinedOverhead > 0)
+            ROW_OVERHEAD_HEAP_SIZE = userDefinedOverhead;
+        else
+            ROW_OVERHEAD_HEAP_SIZE = estimateRowOverhead(MEMTABLE_OVERHEAD_COMPUTE_STEPS.getInt());
+    }
+
+    // We index the memtable by PartitionPosition only for the purpose of being able
+    // to select key range using Token.KeyBound. However put() ensures that we
+    // actually only store DecoratedKey.
+    private final ConcurrentNavigableMap<PartitionPosition, AtomicBTreePartition> partitions = new ConcurrentSkipListMap<>();
+
+    private final AtomicLong liveDataSize = new AtomicLong(0);
+
+    protected SkipListMemtable(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadataRef, Owner owner)
+    {
+        super(commitLogLowerBound, metadataRef, owner);
+    }
+
+    @Override
+    protected Factory factory()
+    {
+        return FACTORY;
+    }
+
+    @Override
+    public boolean isClean()
+    {
+        return partitions.isEmpty();
+    }
+
+    /**
+     * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the appropriate
+     * OpOrdering.
+     *
+     * commitLogSegmentPosition should only be null if this is a secondary index, in which case it is *expected* to be null
+     */
+    @Override
+    public long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup)
+    {
+        Cloner cloner = allocator.cloner(opGroup);
+        AtomicBTreePartition previous = partitions.get(update.partitionKey());
+
+        long initialSize = 0;
+        if (previous == null)
+        {
+            final DecoratedKey cloneKey = cloner.clone(update.partitionKey());
+            AtomicBTreePartition empty = new AtomicBTreePartition(metadata, cloneKey, allocator);
+            // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent
+            previous = partitions.putIfAbsent(cloneKey, empty);
+            if (previous == null)
+            {
+                previous = empty;
+                // allocate the row overhead after the fact; this saves over allocating and having to free after, but
+                // means we can overshoot our declared limit.
+                int overhead = (int) (cloneKey.getToken().getHeapSize() + ROW_OVERHEAD_HEAP_SIZE);
+                allocator.onHeap().allocate(overhead, opGroup);
+                initialSize = 8;
+            }
+        }
+
+        long[] pair = previous.addAllWithSizeDelta(update, cloner, opGroup, indexer);
+        updateMin(minTimestamp, update.stats().minTimestamp);
+        updateMin(minLocalDeletionTime, update.stats().minLocalDeletionTime);
+        liveDataSize.addAndGet(initialSize + pair[0]);
+        columnsCollector.update(update.columns());
+        statsCollector.update(update.stats());
+        currentOperations.addAndGet(update.operationCount());
+        return pair[1];
+    }
+
+    @Override
+    public long partitionCount()
+    {
+        return partitions.size();
+    }
+
+    @Override
+    public MemtableUnfilteredPartitionIterator partitionIterator(final ColumnFilter columnFilter,
+                                                                 final DataRange dataRange,
+                                                                 SSTableReadsListener readsListener)
+    {
+        AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();
+
+        PartitionPosition left = keyRange.left;
+        PartitionPosition right = keyRange.right;
+
+        boolean isBound = keyRange instanceof Bounds;
+        boolean includeLeft = isBound || keyRange instanceof IncludingExcludingBounds;
+        boolean includeRight = isBound || keyRange instanceof Range;
+        Map<PartitionPosition, AtomicBTreePartition> subMap = getPartitionsSubMap(left,
+                                                                                  includeLeft,
+                                                                                  right,
+                                                                                  includeRight);
+
+        return new MemtableUnfilteredPartitionIterator(metadata.get(), subMap, columnFilter, dataRange);
+        // readsListener is ignored as it only accepts sstable signals
+    }
+
+    private Map<PartitionPosition, AtomicBTreePartition> getPartitionsSubMap(PartitionPosition left,
+                                                                             boolean includeLeft,
+                                                                             PartitionPosition right,
+                                                                             boolean includeRight)
+    {
+        if (left != null && left.isMinimum())
+            left = null;
+        if (right != null && right.isMinimum())
+            right = null;
+
+        try
+        {
+            if (left == null)
+                return right == null ? partitions : partitions.headMap(right, includeRight);
+            else
+                return right == null
+                       ? partitions.tailMap(left, includeLeft)
+                       : partitions.subMap(left, includeLeft, right, includeRight);
+        }
+        catch (IllegalArgumentException e)
+        {
+            logger.error("Invalid range requested {} - {}", left, right);
+            throw e;
+        }
+    }
+
+    Partition getPartition(DecoratedKey key)
+    {
+        return partitions.get(key);
+    }
+
+    @Override
+    public UnfilteredRowIterator rowIterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
+    {
+        Partition p = getPartition(key);
+        if (p == null)
+            return null;
+        else
+            return p.unfilteredIterator(selectedColumns, slices, reversed);
+    }
+
+    @Override
+    public UnfilteredRowIterator rowIterator(DecoratedKey key)
+    {
+        Partition p = getPartition(key);
+        return p != null ? p.unfilteredIterator() : null;
+    }
+
+    private static int estimateRowOverhead(final int count)
+    {
+        // calculate row overhead
+        try (final OpOrder.Group group = new OpOrder().start())
+        {
+            int rowOverhead;
+            MemtableAllocator allocator = MEMORY_POOL.newAllocator("");
+            Cloner cloner = allocator.cloner(group);
+            ConcurrentNavigableMap<PartitionPosition, Object> partitions = new ConcurrentSkipListMap<>();
+            final Object val = new Object();
+            for (int i = 0 ; i < count ; i++)
+                partitions.put(cloner.clone(new BufferDecoratedKey(new LongToken(i), ByteBufferUtil.EMPTY_BYTE_BUFFER)), val);
+            double avgSize = ObjectSizes.measureDeep(partitions) / (double) count;
+            rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize));
+            rowOverhead -= ObjectSizes.measureDeep(new LongToken(0));
+            rowOverhead += AtomicBTreePartition.EMPTY_SIZE;
+            rowOverhead += AbstractBTreePartition.HOLDER_UNSHARED_HEAP_SIZE;
+            allocator.setDiscarding();
+            allocator.setDiscarded();
+            return rowOverhead;
+        }
+    }
+
+    @Override
+    public FlushablePartitionSet<?> getFlushSet(PartitionPosition from, PartitionPosition to)
+    {
+        Map<PartitionPosition, AtomicBTreePartition> toFlush = getPartitionsSubMap(from, true, to, false);
+        long keysSize = 0;
+        long keyCount = 0;
+
+        boolean trackContention = logger.isTraceEnabled();
+        if (trackContention)
+        {
+            int heavilyContendedRowCount = 0;
+
+            for (AtomicBTreePartition partition : toFlush.values())
+            {
+                keysSize += partition.partitionKey().getKey().remaining();
+                ++keyCount;
+                if (partition.useLock())
+                    heavilyContendedRowCount++;
+            }
+
+            if (heavilyContendedRowCount > 0)
+                logger.trace("High update contention in {}/{} partitions of {} ", heavilyContendedRowCount, toFlush.size(), SkipListMemtable.this);
+        }
+        else
+        {
+            for (PartitionPosition key : toFlush.keySet())
+            {
+                //  make sure we don't write non-sensical keys
+                assert key instanceof DecoratedKey;
+                keysSize += ((DecoratedKey) key).getKey().remaining();
+                ++keyCount;
+            }
+        }
+        final long partitionKeysSize = keysSize;
+        final long partitionCount = keyCount;
+
+        return new AbstractFlushablePartitionSet<AtomicBTreePartition>()
+        {
+            @Override
+            public Memtable memtable()
+            {
+                return SkipListMemtable.this;
+            }
+
+            @Override
+            public PartitionPosition from()
+            {
+                return from;
+            }
+
+            @Override
+            public PartitionPosition to()
+            {
+                return to;
+            }
+
+            @Override
+            public long partitionCount()
+            {
+                return partitionCount;
+            }
+
+            @Override
+            public Iterator<AtomicBTreePartition> iterator()
+            {
+                return toFlush.values().iterator();
+            }
+
+            @Override
+            public long partitionKeysSize()
+            {
+                return partitionKeysSize;
+            }
+        };
+    }
+
+
+    private static class MemtableUnfilteredPartitionIterator extends AbstractUnfilteredPartitionIterator implements UnfilteredPartitionIterator
+    {
+        private final TableMetadata metadata;
+        private final Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter;
+        private final ColumnFilter columnFilter;
+        private final DataRange dataRange;
+
+        MemtableUnfilteredPartitionIterator(TableMetadata metadata, Map<PartitionPosition, AtomicBTreePartition> map, ColumnFilter columnFilter, DataRange dataRange)
+        {
+            this.metadata = metadata;
+            this.iter = map.entrySet().iterator();
+            this.columnFilter = columnFilter;
+            this.dataRange = dataRange;
+        }
+
+        @Override
+        public TableMetadata metadata()
+        {
+            return metadata;
+        }
+
+        @Override
+        public boolean hasNext()
+        {
+            return iter.hasNext();
+        }
+
+        @Override
+        public UnfilteredRowIterator next()
+        {
+            Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iter.next();
+            // Actual stored key should be true DecoratedKey
+            assert entry.getKey() instanceof DecoratedKey;
+            DecoratedKey key = (DecoratedKey)entry.getKey();
+            ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key);
+
+            return filter.getUnfilteredRowIterator(columnFilter, entry.getValue());
+        }
+    }
+
+    @Override
+    public long getLiveDataSize()
+    {
+        return liveDataSize.get();
+    }
+
+    /**
+     * For testing only. Give this memtable too big a size to make it always fail flushing.
+     */
+    @VisibleForTesting
+    public void makeUnflushable()
+    {
+        liveDataSize.addAndGet(1024L * 1024 * 1024 * 1024 * 1024);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/SkipListMemtableFactory.java b/src/java/org/apache/cassandra/db/memtable/SkipListMemtableFactory.java
new file mode 100644
index 0000000..76dee02
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/SkipListMemtableFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.apache.cassandra.config.InheritingClass;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.schema.TableMetadataRef;
+
+/**
+ * This class makes better sense as an inner class to SkipListMemtable (which could be as simple as
+ * FACTORY = SkipListMemtable::new), but having it there causes the SkipListMemtable class to be initialized the first
+ * time it is referenced (e.g. during default memtable factory construction).
+ *
+ * Some tests want to setup table parameters before initializing DatabaseDescriptor -- this allows them to do so, and
+ * also makes sure the memtable memory pools are not created for offline tools.
+ */
+public class SkipListMemtableFactory implements Memtable.Factory
+{
+    @Override
+    public Memtable create(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadaRef, Memtable.Owner owner)
+    {
+        return new SkipListMemtable(commitLogLowerBound, metadaRef, owner);
+    }
+
+    public static final SkipListMemtableFactory INSTANCE = new SkipListMemtableFactory();
+    public static InheritingClass CONFIGURATION = new InheritingClass(null, SkipListMemtable.class.getName(), ImmutableMap.of());
+}
diff --git a/src/java/org/apache/cassandra/db/monitoring/MonitorableImpl.java b/src/java/org/apache/cassandra/db/monitoring/MonitorableImpl.java
index a6e7947..31b5404 100644
--- a/src/java/org/apache/cassandra/db/monitoring/MonitorableImpl.java
+++ b/src/java/org/apache/cassandra/db/monitoring/MonitorableImpl.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.db.monitoring;
 
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 public abstract class MonitorableImpl implements Monitorable
 {
diff --git a/src/java/org/apache/cassandra/db/monitoring/MonitoringTask.java b/src/java/org/apache/cassandra/db/monitoring/MonitoringTask.java
index 0f8555f..d681e4b 100644
--- a/src/java/org/apache/cassandra/db/monitoring/MonitoringTask.java
+++ b/src/java/org/apache/cassandra/db/monitoring/MonitoringTask.java
@@ -23,9 +23,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -40,7 +38,8 @@
 
 import static java.lang.System.getProperty;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 /**
  * A task for monitoring in progress operations, currently only read queries, and aborting them if they time out.
@@ -207,7 +206,7 @@
         OperationsQueue(int maxOperations)
         {
             this.maxOperations = maxOperations;
-            this.queue = maxOperations > 0 ? new ArrayBlockingQueue<>(maxOperations) : new LinkedBlockingQueue<>();
+            this.queue = maxOperations > 0 ? newBlockingQueue(maxOperations) : newBlockingQueue();
             this.numDroppedOperations = new AtomicLong();
         }
 
diff --git a/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
index fe9934e..5926ced 100644
--- a/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
@@ -68,6 +68,11 @@
             this.staticRow = staticRow == null ? Rows.EMPTY_STATIC_ROW : staticRow;
             this.stats = stats;
         }
+
+        protected Holder withColumns(RegularAndStaticColumns columns)
+        {
+            return new Holder(columns, this.tree, this.deletionInfo, this.staticRow, this.stats);
+        }
     }
 
     public DeletionInfo deletionInfo()
@@ -377,26 +382,52 @@
     @Override
     public String toString()
     {
-        StringBuilder sb = new StringBuilder();
+        return toString(true);
+    }
 
-        sb.append(String.format("[%s] key=%s partition_deletion=%s columns=%s",
-                                metadata(),
-                                metadata().partitionKeyType.getString(partitionKey().getKey()),
-                                partitionLevelDeletion(),
-                                columns()));
+    public String toString(boolean includeFullDetails)
+    {
+        StringBuilder sb = new StringBuilder();
+        if (includeFullDetails)
+        {
+            sb.append(String.format("[%s.%s] key=%s partition_deletion=%s columns=%s",
+                                    metadata().keyspace,
+                                    metadata().name,
+                                    metadata().partitionKeyType.getString(partitionKey().getKey()),
+                                    partitionLevelDeletion(),
+                                    columns()));
+        }
+        else
+        {
+            sb.append("key=").append(metadata().partitionKeyType.getString(partitionKey().getKey()));
+        }
 
         if (staticRow() != Rows.EMPTY_STATIC_ROW)
-            sb.append("\n    ").append(staticRow().toString(metadata(), true));
+            sb.append("\n    ").append(staticRow().toString(metadata(), includeFullDetails));
 
         try (UnfilteredRowIterator iter = unfilteredIterator())
         {
             while (iter.hasNext())
-                sb.append("\n    ").append(iter.next().toString(metadata(), true));
+                sb.append("\n    ").append(iter.next().toString(metadata(), includeFullDetails));
         }
-
         return sb.toString();
     }
 
+    @Override
+    public boolean equals(Object obj)
+    {
+        if (!(obj instanceof PartitionUpdate))
+            return false;
+
+        PartitionUpdate that = (PartitionUpdate) obj;
+        Holder a = this.holder(), b = that.holder();
+        return partitionKey.equals(that.partitionKey)
+               && metadata().id.equals(that.metadata().id)
+               && a.deletionInfo.equals(b.deletionInfo)
+               && a.staticRow.equals(b.staticRow)
+               && Iterators.elementsEqual(iterator(), that.iterator());
+    }
+
     public int rowCount()
     {
         return BTree.size(holder().tree);
diff --git a/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
index 7b275d0..986e707 100644
--- a/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
@@ -39,9 +39,10 @@
 import org.apache.cassandra.utils.memory.Cloner;
 import org.apache.cassandra.utils.memory.HeapCloner;
 import org.apache.cassandra.utils.memory.MemtableAllocator;
-
 import com.google.common.annotations.VisibleForTesting;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * A thread-safe and atomic Partition implementation.
  *
@@ -74,7 +75,7 @@
 
     /**
      * (clock + allocation) granularity are combined to give us an acceptable (waste) allocation rate that is defined by
-     * the passage of real time of ALLOCATION_GRANULARITY_BYTES/CLOCK_GRANULARITY, or in this case 7.63Kb/ms, or 7.45Mb/s
+     * the passage of real time of ALLOCATION_GRANULARITY_BYTES/CLOCK_GRANULARITY, or in this case 7.63KiB/ms, or 7.45Mb/s
      *
      * in wasteTracker we maintain within EXCESS_WASTE_OFFSET before the current time; whenever we waste bytes
      * we increment the current value if it is within this window, and set it to the min of the window plus our waste
@@ -310,7 +311,7 @@
             while (TRACKER_PESSIMISTIC_LOCKING != (oldTrackerValue = wasteTracker))
             {
                 // Note this time value has an arbitrary offset, but is a constant rate 32 bit counter (that may wrap)
-                int time = (int) (System.nanoTime() >>> CLOCK_SHIFT);
+                int time = (int) (nanoTime() >>> CLOCK_SHIFT);
                 int delta = oldTrackerValue - time;
                 if (oldTrackerValue == TRACKER_NEVER_WASTED || delta >= 0 || delta < -EXCESS_WASTE_OFFSET)
                     delta = -EXCESS_WASTE_OFFSET;
diff --git a/src/java/org/apache/cassandra/db/partitions/Partition.java b/src/java/org/apache/cassandra/db/partitions/Partition.java
index b6297a1..8888104 100644
--- a/src/java/org/apache/cassandra/db/partitions/Partition.java
+++ b/src/java/org/apache/cassandra/db/partitions/Partition.java
@@ -22,7 +22,6 @@
 import javax.annotation.Nullable;
 
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.db.Slices;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.filter.ColumnFilter;
@@ -51,6 +50,11 @@
     public boolean isEmpty();
 
     /**
+     * Whether the partition object has rows. This may be false but partition still be non-empty if it has a deletion.
+     */
+    boolean hasRows();
+
+    /**
      * Returns the row corresponding to the provided clustering, or null if there is not such row.
      *
      * @param clustering clustering key to search
diff --git a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
index 5390666..6019d56 100644
--- a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
+++ b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
@@ -17,14 +17,18 @@
  */
 package org.apache.cassandra.db.partitions;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import com.google.common.primitives.Ints;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,6 +43,9 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.btree.BTree;
 import org.apache.cassandra.utils.btree.UpdateFunction;
+import org.apache.cassandra.utils.vint.VIntCoding;
+
+import static org.apache.cassandra.db.rows.UnfilteredRowIteratorSerializer.IS_EMPTY;
 
 /**
  * Stores updates made on a partition.
@@ -205,6 +212,20 @@
         return new PartitionUpdate(iterator.metadata(), iterator.partitionKey(), holder, deletionInfo, false);
     }
 
+
+    public PartitionUpdate withOnlyPresentColumns()
+    {
+        Set<ColumnMetadata> columnSet = new HashSet<>();
+
+        for (Row row : this)
+            for (ColumnData column : row)
+                columnSet.add(column.column());
+
+        RegularAndStaticColumns columns = RegularAndStaticColumns.builder().addAll(columnSet).build();
+        return new PartitionUpdate(this.metadata, this.partitionKey, this.holder.withColumns(columns), this.deletionInfo.mutableCopy(), false);
+    }
+
+
     protected boolean canHaveShadowedData()
     {
         return canHaveShadowedData;
@@ -323,23 +344,19 @@
      */
     public int dataSize()
     {
-        int size = 0;
+        return Ints.saturatedCast(BTree.<Row>accumulate(holder.tree, (row, value) -> row.dataSize() + value, 0L)
+                + holder.staticRow.dataSize() + holder.deletionInfo.dataSize());
+    }
 
-        if (holder.staticRow != null)
-        {
-            for (ColumnData cd : holder.staticRow.columnData())
-            {
-                size += cd.dataSize();
-            }
-        }
-
-        for (Row row : this)
-        {
-            size += row.clustering().dataSize();
-            for (ColumnData cd : row)
-                size += cd.dataSize();
-        }
-        return size;
+    /**
+     * The size of the data contained in this update.
+     *
+     * @return the size of the data contained in this update.
+     */
+    public long unsharedHeapSize()
+    {
+        return BTree.<Row>accumulate(holder.tree, (row, value) -> row.unsharedHeapSize() + value, 0L)
+                + holder.staticRow.unsharedHeapSize() + holder.deletionInfo.unsharedHeapSize();
     }
 
     public TableMetadata metadata()
@@ -678,6 +695,21 @@
                                        false);
         }
 
+        public static boolean isEmpty(ByteBuffer in, DeserializationHelper.Flag flag, DecoratedKey key) throws IOException
+        {
+            int position = in.position();
+            position += 16; // CFMetaData.serializer.deserialize(in, version);
+            if (position >= in.limit())
+                throw new EOFException();
+            // DecoratedKey key = metadata.decorateKey(ByteBufferUtil.readWithVIntLength(in));
+            int keyLength = (int) VIntCoding.getUnsignedVInt(in, position);
+            position += keyLength + VIntCoding.computeUnsignedVIntSize(keyLength);
+            if (position >= in.limit())
+                throw new EOFException();
+            int flags = in.get(position) & 0xff;
+            return (flags & IS_EMPTY) != 0;
+        }
+
         public long serializedSize(PartitionUpdate update, int version)
         {
             try (UnfilteredRowIterator iter = update.unfilteredIterator())
diff --git a/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java b/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
index d9e9036..09f3ae3 100644
--- a/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
+++ b/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.db.partitions;
 
 import java.util.function.LongPredicate;
-import java.util.function.Predicate;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.rows.*;
diff --git a/src/java/org/apache/cassandra/db/repair/CassandraKeyspaceRepairManager.java b/src/java/org/apache/cassandra/db/repair/CassandraKeyspaceRepairManager.java
index 4fa8650..6e70666 100644
--- a/src/java/org/apache/cassandra/db/repair/CassandraKeyspaceRepairManager.java
+++ b/src/java/org/apache/cassandra/db/repair/CassandraKeyspaceRepairManager.java
@@ -19,16 +19,16 @@
 package org.apache.cassandra.db.repair;
 
 import java.util.Collection;
-import java.util.UUID;
+import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.function.BooleanSupplier;
 
-import com.google.common.util.concurrent.ListenableFuture;
-
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.repair.KeyspaceRepairManager;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 
 public class CassandraKeyspaceRepairManager implements KeyspaceRepairManager
 {
@@ -40,11 +40,11 @@
     }
 
     @Override
-    public ListenableFuture prepareIncrementalRepair(UUID sessionID,
-                                                     Collection<ColumnFamilyStore> tables,
-                                                     RangesAtEndpoint tokenRanges,
-                                                     ExecutorService executor,
-                                                     BooleanSupplier isCancelled)
+    public Future<List<Void>> prepareIncrementalRepair(TimeUUID sessionID,
+                                                       Collection<ColumnFamilyStore> tables,
+                                                       RangesAtEndpoint tokenRanges,
+                                                       ExecutorService executor,
+                                                       BooleanSupplier isCancelled)
     {
         PendingAntiCompaction pac = new PendingAntiCompaction(sessionID, tables, tokenRanges, executor, isCancelled);
         return pac.run();
diff --git a/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java b/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java
index aab9f0b..62c1c33 100644
--- a/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java
+++ b/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java
@@ -20,7 +20,6 @@
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 
@@ -32,8 +31,11 @@
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.metrics.TopPartitionTracker;
 import org.apache.cassandra.repair.TableRepairManager;
 import org.apache.cassandra.repair.ValidationPartitionIterator;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.service.ActiveRepairService;
 
 public class CassandraTableRepairManager implements TableRepairManager
@@ -46,9 +48,9 @@
     }
 
     @Override
-    public ValidationPartitionIterator getValidationIterator(Collection<Range<Token>> ranges, UUID parentId, UUID sessionID, boolean isIncremental, int nowInSec) throws IOException
+    public ValidationPartitionIterator getValidationIterator(Collection<Range<Token>> ranges, TimeUUID parentId, TimeUUID sessionID, boolean isIncremental, int nowInSec, TopPartitionTracker.Collector topPartitionCollector) throws IOException, NoSuchRepairSessionException
     {
-        return new CassandraValidationIterator(cfs, ranges, parentId, sessionID, isIncremental, nowInSec);
+        return new CassandraValidationIterator(cfs, ranges, parentId, sessionID, isIncremental, nowInSec, topPartitionCollector);
     }
 
     @Override
@@ -58,7 +60,7 @@
     }
 
     @Override
-    public void incrementalSessionCompleted(UUID sessionID)
+    public void incrementalSessionCompleted(TimeUUID sessionID)
     {
         CompactionManager.instance.submitBackground(cfs);
     }
diff --git a/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java b/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java
index 6f2256f..53ef657 100644
--- a/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java
+++ b/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java
@@ -25,11 +25,11 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.function.LongPredicate;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Collections2;
 import com.google.common.collect.Maps;
 
 import org.slf4j.Logger;
@@ -51,13 +51,16 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.metrics.TopPartitionTracker;
 import org.apache.cassandra.repair.ValidationPartitionIterator;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class CassandraValidationIterator extends ValidationPartitionIterator
 {
     private static final Logger logger = LoggerFactory.getLogger(CassandraValidationIterator.class);
@@ -102,23 +105,18 @@
 
     private static class ValidationCompactionIterator extends CompactionIterator
     {
-        public ValidationCompactionIterator(List<ISSTableScanner> scanners, ValidationCompactionController controller, int nowInSec, ActiveCompactionsTracker activeCompactions)
+        public ValidationCompactionIterator(List<ISSTableScanner> scanners, ValidationCompactionController controller, int nowInSec, ActiveCompactionsTracker activeCompactions, TopPartitionTracker.Collector topPartitionCollector)
         {
-            super(OperationType.VALIDATION, scanners, controller, nowInSec, UUIDGen.getTimeUUID(), activeCompactions);
+            super(OperationType.VALIDATION, scanners, controller, nowInSec, nextTimeUUID(), activeCompactions, topPartitionCollector);
         }
     }
 
     @VisibleForTesting
-    static synchronized Refs<SSTableReader> getSSTablesToValidate(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, UUID parentId, boolean isIncremental)
+    public static synchronized Refs<SSTableReader> getSSTablesToValidate(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, TimeUUID parentId, boolean isIncremental) throws NoSuchRepairSessionException
     {
         Refs<SSTableReader> sstables;
 
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(parentId);
-        if (prs == null)
-        {
-            // this means the parent repair session was removed - the repair session failed on another node and we removed it
-            return new Refs<>();
-        }
 
         Set<SSTableReader> sstablesToValidate = new HashSet<>();
 
@@ -126,7 +124,6 @@
         if (prs.isPreview())
         {
             predicate = prs.previewKind.predicate();
-
         }
         else if (isIncremental)
         {
@@ -175,7 +172,7 @@
     private final long estimatedPartitions;
     private final Map<Range<Token>, Long> rangePartitionCounts;
 
-    public CassandraValidationIterator(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, UUID parentId, UUID sessionID, boolean isIncremental, int nowInSec) throws IOException
+    public CassandraValidationIterator(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, TimeUUID parentId, TimeUUID sessionID, boolean isIncremental, int nowInSec, TopPartitionTracker.Collector topPartitionCollector) throws IOException, NoSuchRepairSessionException
     {
         this.cfs = cfs;
 
@@ -198,26 +195,30 @@
             if (!isIncremental)
             {
                 // flush first so everyone is validating data that is as similar as possible
-                StorageService.instance.forceKeyspaceFlush(cfs.keyspace.getName(), cfs.name);
+                cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.VALIDATION);
+                // Note: we also flush for incremental repair during the anti-compaction process.
             }
             sstables = getSSTablesToValidate(cfs, ranges, parentId, isIncremental);
         }
 
+        // Persistent memtables will not flush or snapshot to sstables, make an sstable with their data.
+        cfs.writeAndAddMemtableRanges(parentId,
+                                      () -> Collections2.transform(Range.normalize(ranges), Range::makeRowRange),
+                                      sstables);
+
         Preconditions.checkArgument(sstables != null);
+
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(parentId);
-        if (prs != null)
-        {
-            logger.info("{}, parentSessionId={}: Performing validation compaction on {} sstables in {}.{}",
-                        prs.previewKind.logPrefix(sessionID),
-                        parentId,
-                        sstables.size(),
-                        cfs.keyspace.getName(),
-                        cfs.getTableName());
-        }
+        logger.info("{}, parentSessionId={}: Performing validation compaction on {} sstables in {}.{}",
+                    prs.previewKind.logPrefix(sessionID),
+                    parentId,
+                    sstables.size(),
+                    cfs.keyspace.getName(),
+                    cfs.getTableName());
 
         controller = new ValidationCompactionController(cfs, getDefaultGcBefore(cfs, nowInSec));
         scanners = cfs.getCompactionStrategyManager().getScanners(sstables, ranges);
-        ci = new ValidationCompactionIterator(scanners.scanners, controller, nowInSec, CompactionManager.instance.active);
+        ci = new ValidationCompactionIterator(scanners.scanners, controller, nowInSec, CompactionManager.instance.active, topPartitionCollector);
 
         long allPartitions = 0;
         rangePartitionCounts = Maps.newHashMapWithExpectedSize(ranges.size());
@@ -241,6 +242,12 @@
     }
 
     @Override
+    public long getBytesRead()
+    {
+        return ci.getBytesRead();
+    }
+
+    @Override
     public void close()
     {
         // TODO: can any of this fail and leave stuff unreleased?
diff --git a/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java b/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java
index e0ee68d..af9888a 100644
--- a/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java
+++ b/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java
@@ -22,23 +22,23 @@
 import java.util.Collection;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.function.BooleanSupplier;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListenableFutureTask;
-import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
 
+import org.apache.cassandra.concurrent.FutureTask;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -57,6 +57,7 @@
 
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Performs an anti compaction on a set of tables and token ranges, isolating the unrepaired sstables
@@ -104,9 +105,9 @@
     static class AntiCompactionPredicate implements Predicate<SSTableReader>
     {
         private final Collection<Range<Token>> ranges;
-        private final UUID prsid;
+        private final TimeUUID prsid;
 
-        public AntiCompactionPredicate(Collection<Range<Token>> ranges, UUID prsid)
+        public AntiCompactionPredicate(Collection<Range<Token>> ranges, TimeUUID prsid)
         {
             this.ranges = ranges;
             this.prsid = prsid;
@@ -166,19 +167,19 @@
     public static class AcquisitionCallable implements Callable<AcquireResult>
     {
         private final ColumnFamilyStore cfs;
-        private final UUID sessionID;
+        private final TimeUUID sessionID;
         private final AntiCompactionPredicate predicate;
         private final int acquireRetrySeconds;
         private final int acquireSleepMillis;
 
         @VisibleForTesting
-        public AcquisitionCallable(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, UUID sessionID, int acquireRetrySeconds, int acquireSleepMillis)
+        public AcquisitionCallable(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, TimeUUID sessionID, int acquireRetrySeconds, int acquireSleepMillis)
         {
             this(cfs, sessionID, acquireRetrySeconds, acquireSleepMillis, new AntiCompactionPredicate(ranges, sessionID));
         }
 
         @VisibleForTesting
-        AcquisitionCallable(ColumnFamilyStore cfs, UUID sessionID, int acquireRetrySeconds, int acquireSleepMillis, AntiCompactionPredicate predicate)
+        AcquisitionCallable(ColumnFamilyStore cfs, TimeUUID sessionID, int acquireRetrySeconds, int acquireSleepMillis, AntiCompactionPredicate predicate)
         {
             this.cfs = cfs;
             this.sessionID = sessionID;
@@ -218,7 +219,7 @@
             logger.debug("acquiring sstables for pending anti compaction on session {}", sessionID);
             // try to modify after cancelling running compactions. This will attempt to cancel in flight compactions including the given sstables for
             // up to a minute, after which point, null will be returned
-            long start = System.currentTimeMillis();
+            long start = currentTimeMillis();
             long delay = TimeUnit.SECONDS.toMillis(acquireRetrySeconds);
             // Note that it is `predicate` throwing SSTableAcquisitionException if it finds a conflicting sstable
             // and we only retry when runWithCompactionsDisabled throws when uses the predicate, not when acquireTuple is.
@@ -238,10 +239,10 @@
                                 sessionID,
                                 e.getMessage(),
                                 acquireSleepMillis,
-                                TimeUnit.SECONDS.convert(delay + start - System.currentTimeMillis(), TimeUnit.MILLISECONDS));
+                                TimeUnit.SECONDS.convert(delay + start - currentTimeMillis(), TimeUnit.MILLISECONDS));
                     Uninterruptibles.sleepUninterruptibly(acquireSleepMillis, TimeUnit.MILLISECONDS);
 
-                    if (System.currentTimeMillis() - start > delay)
+                    if (currentTimeMillis() - start > delay)
                         logger.warn("{} Timed out waiting to acquire sstables", sessionID, e);
 
                 }
@@ -250,25 +251,25 @@
                     logger.error("Got exception disabling compactions for session {}", sessionID, t);
                     throw t;
                 }
-            } while (System.currentTimeMillis() - start < delay);
+            } while (currentTimeMillis() - start < delay);
             return null;
         }
     }
 
-    static class AcquisitionCallback implements AsyncFunction<List<AcquireResult>, Object>
+    static class AcquisitionCallback implements Function<List<AcquireResult>, Future<List<Void>>>
     {
-        private final UUID parentRepairSession;
+        private final TimeUUID parentRepairSession;
         private final RangesAtEndpoint tokenRanges;
         private final BooleanSupplier isCancelled;
 
-        public AcquisitionCallback(UUID parentRepairSession, RangesAtEndpoint tokenRanges, BooleanSupplier isCancelled)
+        public AcquisitionCallback(TimeUUID parentRepairSession, RangesAtEndpoint tokenRanges, BooleanSupplier isCancelled)
         {
             this.parentRepairSession = parentRepairSession;
             this.tokenRanges = tokenRanges;
             this.isCancelled = isCancelled;
         }
 
-        ListenableFuture<?> submitPendingAntiCompaction(AcquireResult result)
+        Future<Void> submitPendingAntiCompaction(AcquireResult result)
         {
             return CompactionManager.instance.submitPendingAntiCompaction(result.cfs, tokenRanges, result.refs, result.txn, parentRepairSession, isCancelled);
         }
@@ -287,7 +288,7 @@
             });
         }
 
-        public ListenableFuture apply(List<AcquireResult> results) throws Exception
+        public Future<List<Void>> apply(List<AcquireResult> results)
         {
             if (Iterables.any(results, AcquisitionCallback::shouldAbort))
             {
@@ -305,26 +306,26 @@
                                                "This is usually caused by running multiple incremental repairs on nodes that share token ranges",
                                                parentRepairSession);
                 logger.warn(message);
-                return Futures.immediateFailedFuture(new SSTableAcquisitionException(message));
+                return ImmediateFuture.failure(new SSTableAcquisitionException(message));
             }
             else
             {
-                List<ListenableFuture<?>> pendingAntiCompactions = new ArrayList<>(results.size());
+                List<Future<Void>> pendingAntiCompactions = new ArrayList<>(results.size());
                 for (AcquireResult result : results)
                 {
                     if (result.txn != null)
                     {
-                        ListenableFuture<?> future = submitPendingAntiCompaction(result);
+                        Future<Void> future = submitPendingAntiCompaction(result);
                         pendingAntiCompactions.add(future);
                     }
                 }
 
-                return Futures.allAsList(pendingAntiCompactions);
+                return FutureCombiner.allOf(pendingAntiCompactions);
             }
         }
     }
 
-    private final UUID prsId;
+    private final TimeUUID prsId;
     private final Collection<ColumnFamilyStore> tables;
     private final RangesAtEndpoint tokenRanges;
     private final ExecutorService executor;
@@ -332,7 +333,7 @@
     private final int acquireSleepMillis;
     private final BooleanSupplier isCancelled;
 
-    public PendingAntiCompaction(UUID prsId,
+    public PendingAntiCompaction(TimeUUID prsId,
                                  Collection<ColumnFamilyStore> tables,
                                  RangesAtEndpoint tokenRanges,
                                  ExecutorService executor,
@@ -342,7 +343,7 @@
     }
 
     @VisibleForTesting
-    PendingAntiCompaction(UUID prsId,
+    PendingAntiCompaction(TimeUUID prsId,
                           Collection<ColumnFamilyStore> tables,
                           RangesAtEndpoint tokenRanges,
                           int acquireRetrySeconds,
@@ -359,29 +360,29 @@
         this.isCancelled = isCancelled;
     }
 
-    public ListenableFuture run()
+    public Future<List<Void>> run()
     {
-        List<ListenableFutureTask<AcquireResult>> tasks = new ArrayList<>(tables.size());
+        List<FutureTask<AcquireResult>> tasks = new ArrayList<>(tables.size());
         for (ColumnFamilyStore cfs : tables)
         {
-            cfs.forceBlockingFlush();
-            ListenableFutureTask<AcquireResult> task = ListenableFutureTask.create(getAcquisitionCallable(cfs, tokenRanges.ranges(), prsId, acquireRetrySeconds, acquireSleepMillis));
+            cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.ANTICOMPACTION);
+            FutureTask<AcquireResult> task = new FutureTask<>(getAcquisitionCallable(cfs, tokenRanges.ranges(), prsId, acquireRetrySeconds, acquireSleepMillis));
             executor.submit(task);
             tasks.add(task);
         }
-        ListenableFuture<List<AcquireResult>> acquisitionResults = Futures.successfulAsList(tasks);
-        ListenableFuture compactionResult = Futures.transformAsync(acquisitionResults, getAcquisitionCallback(prsId, tokenRanges), MoreExecutors.directExecutor());
-        return compactionResult;
+
+        Future<List<AcquireResult>> acquisitionResults = FutureCombiner.successfulOf(tasks);
+        return acquisitionResults.flatMap(getAcquisitionCallback(prsId, tokenRanges));
     }
 
     @VisibleForTesting
-    protected AcquisitionCallable getAcquisitionCallable(ColumnFamilyStore cfs, Set<Range<Token>> ranges, UUID prsId, int acquireRetrySeconds, int acquireSleepMillis)
+    protected AcquisitionCallable getAcquisitionCallable(ColumnFamilyStore cfs, Set<Range<Token>> ranges, TimeUUID prsId, int acquireRetrySeconds, int acquireSleepMillis)
     {
         return new AcquisitionCallable(cfs, ranges, prsId, acquireRetrySeconds, acquireSleepMillis);
     }
 
     @VisibleForTesting
-    protected AcquisitionCallback getAcquisitionCallback(UUID prsId, RangesAtEndpoint tokenRanges)
+    protected AcquisitionCallback getAcquisitionCallback(TimeUUID prsId, RangesAtEndpoint tokenRanges)
     {
         return new AcquisitionCallback(prsId, tokenRanges, isCancelled);
     }
diff --git a/src/java/org/apache/cassandra/db/rows/AbstractCell.java b/src/java/org/apache/cassandra/db/rows/AbstractCell.java
index d538ee7..b9b5d60 100644
--- a/src/java/org/apache/cassandra/db/rows/AbstractCell.java
+++ b/src/java/org/apache/cassandra/db/rows/AbstractCell.java
@@ -98,6 +98,12 @@
         return this;
     }
 
+
+    public Cell<?> purgeDataOlderThan(long timestamp)
+    {
+        return this.timestamp() < timestamp ? null : this;
+    }
+
     @Override
     public Cell<?> clone(ByteBufferCloner cloner)
     {
@@ -201,11 +207,11 @@
         AbstractType<?> type = column().type;
         if (type instanceof CollectionType && type.isMultiCell())
         {
-            CollectionType ct = (CollectionType)type;
+            CollectionType<?> ct = (CollectionType<?>) type;
             return String.format("[%s[%s]=%s %s]",
                                  column().name,
                                  ct.nameComparator().getString(path().get(0)),
-                                 ct.valueComparator().getString(value(), accessor()),
+                                 isTombstone() ? "<tombstone>" : ct.valueComparator().getString(value(), accessor()),
                                  livenessInfoString());
         }
         if (isTombstone())
diff --git a/src/java/org/apache/cassandra/db/rows/AbstractRangeTombstoneMarker.java b/src/java/org/apache/cassandra/db/rows/AbstractRangeTombstoneMarker.java
index 7dac1fa..a2c3a4d 100644
--- a/src/java/org/apache/cassandra/db/rows/AbstractRangeTombstoneMarker.java
+++ b/src/java/org/apache/cassandra/db/rows/AbstractRangeTombstoneMarker.java
@@ -17,8 +17,6 @@
  */
 package org.apache.cassandra.db.rows;
 
-import java.nio.ByteBuffer;
-
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.ClusteringBoundOrBoundary;
 
diff --git a/src/java/org/apache/cassandra/db/rows/ArrayCell.java b/src/java/org/apache/cassandra/db/rows/ArrayCell.java
index 48a97a7..65fd729 100644
--- a/src/java/org/apache/cassandra/db/rows/ArrayCell.java
+++ b/src/java/org/apache/cassandra/db/rows/ArrayCell.java
@@ -111,6 +111,13 @@
         return super.clone(cloner);
     }
 
+    @Override
+    public long unsharedHeapSize()
+    {
+        return EMPTY_SIZE + ObjectSizes.sizeOfArray(value) + (path == null ? 0 : path.unsharedHeapSize());
+    }
+
+    @Override
     public long unsharedHeapSizeExcludingData()
     {
         return EMPTY_SIZE + ObjectSizes.sizeOfArray(value) - value.length + (path == null ? 0 : path.unsharedHeapSizeExcludingData());
diff --git a/src/java/org/apache/cassandra/db/rows/BTreeRow.java b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
index 5a10f37..125932b 100644
--- a/src/java/org/apache/cassandra/db/rows/BTreeRow.java
+++ b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
@@ -469,6 +469,18 @@
         return transformAndFilter(newInfo, newDeletion, (cd) -> cd.purge(purger, nowInSec));
     }
 
+    public Row purgeDataOlderThan(long timestamp, boolean enforceStrictLiveness)
+    {
+        LivenessInfo newInfo = primaryKeyLivenessInfo.timestamp() < timestamp ? LivenessInfo.EMPTY : primaryKeyLivenessInfo;
+        Deletion newDeletion = deletion.time().markedForDeleteAt() < timestamp ? Deletion.LIVE : deletion;
+
+        // when enforceStrictLiveness is set, a row is considered dead when it's PK liveness info is not present
+        if (enforceStrictLiveness && newDeletion.isLive() && newInfo.isEmpty())
+            return null;
+
+        return transformAndFilter(newInfo, newDeletion, cd -> cd.purgeDataOlderThan(timestamp));
+    }
+
     @Override
     public Row transformAndFilter(LivenessInfo info, Deletion deletion, Function<ColumnData, ColumnData> function)
     {
@@ -514,6 +526,19 @@
         return Ints.checkedCast(accumulate((cd, v) -> v + cd.dataSize(), dataSize));
     }
 
+    @Override
+    public long unsharedHeapSize()
+    {
+        long heapSize = EMPTY_SIZE
+                        + clustering.unsharedHeapSize()
+                        + primaryKeyLivenessInfo.unsharedHeapSize()
+                        + deletion.unsharedHeapSize()
+                        + BTree.sizeOfStructureOnHeap(btree);
+
+        return accumulate((cd, v) -> v + cd.unsharedHeapSize(), heapSize);
+    }
+
+    @Override
     public long unsharedHeapSizeExcludingData()
     {
         long heapSize = EMPTY_SIZE
diff --git a/src/java/org/apache/cassandra/db/rows/BufferCell.java b/src/java/org/apache/cassandra/db/rows/BufferCell.java
index fc85b39..c18fd3e 100644
--- a/src/java/org/apache/cassandra/db/rows/BufferCell.java
+++ b/src/java/org/apache/cassandra/db/rows/BufferCell.java
@@ -135,6 +135,12 @@
     }
 
     @Override
+    public long unsharedHeapSize()
+    {
+        return EMPTY_SIZE + ObjectSizes.sizeOnHeapOf(value) + (path == null ? 0 : path.unsharedHeapSize());
+    }
+
+    @Override
     public Cell<?> clone(ByteBufferCloner cloner)
     {
         if (!value.hasRemaining() && path == null)
@@ -143,6 +149,7 @@
         return super.clone(cloner);
     }
 
+    @Override
     public long unsharedHeapSizeExcludingData()
     {
         return EMPTY_SIZE + ObjectSizes.sizeOnHeapExcludingData(value) + (path == null ? 0 : path.unsharedHeapSizeExcludingData());
diff --git a/src/java/org/apache/cassandra/db/rows/Cell.java b/src/java/org/apache/cassandra/db/rows/Cell.java
index 1a7cd71..eafdc37 100644
--- a/src/java/org/apache/cassandra/db/rows/Cell.java
+++ b/src/java/org/apache/cassandra/db/rows/Cell.java
@@ -174,6 +174,10 @@
     // Overrides super type to provide a more precise return type.
     public abstract Cell<?> purge(DeletionPurger purger, int nowInSec);
 
+    @Override
+    // Overrides super type to provide a more precise return type.
+    public abstract Cell<?> purgeDataOlderThan(long timestamp);
+
     /**
      * The serialization format for cell is:
      *     [ flags ][ timestamp ][ deletion time ][    ttl    ][ path size ][ path ][ value size ][ value ]
diff --git a/src/java/org/apache/cassandra/db/rows/CellPath.java b/src/java/org/apache/cassandra/db/rows/CellPath.java
index aacccf3..8e018a7 100644
--- a/src/java/org/apache/cassandra/db/rows/CellPath.java
+++ b/src/java/org/apache/cassandra/db/rows/CellPath.java
@@ -21,6 +21,7 @@
 import java.nio.ByteBuffer;
 import java.util.Objects;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.db.Digest;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -31,7 +32,7 @@
 /**
  * A path for a cell belonging to a complex column type (non-frozen collection or UDT).
  */
-public abstract class CellPath
+public abstract class CellPath implements IMeasurableMemory
 {
     public static final CellPath BOTTOM = new EmptyCellPath();
     public static final CellPath TOP = new EmptyCellPath();
@@ -64,6 +65,8 @@
 
     public abstract long unsharedHeapSizeExcludingData();
 
+    public abstract long unsharedHeapSize();
+
     @Override
     public final int hashCode()
     {
@@ -126,6 +129,13 @@
             return new SingleItemCellPath(cloner.clone(value));
         }
 
+        @Override
+        public long unsharedHeapSize()
+        {
+            return EMPTY_SIZE + ObjectSizes.sizeOnHeapOf(value);
+        }
+
+        @Override
         public long unsharedHeapSizeExcludingData()
         {
             return EMPTY_SIZE + ObjectSizes.sizeOnHeapExcludingData(value);
@@ -150,6 +160,14 @@
             return this;
         }
 
+        @Override
+        public long unsharedHeapSize()
+        {
+            // empty only happens with a cached reference, so 0 unshared space
+            return 0;
+        }
+
+        @Override
         public long unsharedHeapSizeExcludingData()
         {
             return 0;
diff --git a/src/java/org/apache/cassandra/db/rows/ColumnData.java b/src/java/org/apache/cassandra/db/rows/ColumnData.java
index e4fa83c..e0bc552 100644
--- a/src/java/org/apache/cassandra/db/rows/ColumnData.java
+++ b/src/java/org/apache/cassandra/db/rows/ColumnData.java
@@ -19,6 +19,7 @@
 
 import java.util.Comparator;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.db.Digest;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.db.DeletionPurger;
@@ -36,7 +37,7 @@
  * In practice, there is only 2 implementations of this: either {@link Cell} for simple columns
  * or {@code ComplexColumnData} for complex columns.
  */
-public abstract class ColumnData
+public abstract class ColumnData implements IMeasurableMemory
 {
     public static final Comparator<ColumnData> comparator = (cd1, cd2) -> cd1.column().compareTo(cd2.column());
 
@@ -193,7 +194,7 @@
         {
             if (!(existing instanceof ComplexColumnData))
             {
-                if (activeDeletion.deletes((Cell) existing))
+                if (activeDeletion.deletes((Cell<?>) existing))
                 {
                     recordDeletion.delete(existing);
                     return null;
@@ -246,6 +247,8 @@
 
     public abstract long unsharedHeapSizeExcludingData();
 
+    public abstract long unsharedHeapSize();
+
     /**
      * Validate the column data.
      *
@@ -285,6 +288,7 @@
     public abstract ColumnData markCounterLocalToBeCleared();
 
     public abstract ColumnData purge(DeletionPurger purger, int nowInSec);
+    public abstract ColumnData purgeDataOlderThan(long timestamp);
 
     public abstract long maxTimestamp();
 }
diff --git a/src/java/org/apache/cassandra/db/rows/ComplexColumnData.java b/src/java/org/apache/cassandra/db/rows/ComplexColumnData.java
index ee86e5d..12880b7 100644
--- a/src/java/org/apache/cassandra/db/rows/ComplexColumnData.java
+++ b/src/java/org/apache/cassandra/db/rows/ComplexColumnData.java
@@ -28,11 +28,8 @@
 import org.apache.cassandra.db.DeletionTime;
 import org.apache.cassandra.db.Digest;
 import org.apache.cassandra.db.LivenessInfo;
-import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.ByteType;
-import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.db.marshal.SetType;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.DroppedColumn;
@@ -136,6 +133,13 @@
         return size;
     }
 
+    public long unsharedHeapSize()
+    {
+        long heapSize = EMPTY_SIZE + BTree.sizeOnHeapOf(cells) + complexDeletion.unsharedHeapSize();
+        return BTree.<Cell>accumulate(cells, (cell, value) -> value + cell.unsharedHeapSize(), heapSize);
+    }
+
+    @Override
     public long unsharedHeapSizeExcludingData()
     {
         long heapSize = EMPTY_SIZE + BTree.sizeOnHeapOf(cells);
@@ -210,6 +214,12 @@
         return transformAndFilter(complexDeletion, (cell) -> filter.fetchedCellIsQueried(column, cell.path()) ? null : cell);
     }
 
+    public ComplexColumnData purgeDataOlderThan(long timestamp)
+    {
+        DeletionTime newDeletion = complexDeletion.markedForDeleteAt() < timestamp ? DeletionTime.LIVE : complexDeletion;
+        return transformAndFilter(newDeletion, (cell) -> cell.purgeDataOlderThan(timestamp));
+    }
+
     private ComplexColumnData update(DeletionTime newDeletion, Object[] newCells)
     {
         if (cells == newCells && newDeletion == complexDeletion)
diff --git a/src/java/org/apache/cassandra/db/rows/NativeCell.java b/src/java/org/apache/cassandra/db/rows/NativeCell.java
index d538504..e7941cf 100644
--- a/src/java/org/apache/cassandra/db/rows/NativeCell.java
+++ b/src/java/org/apache/cassandra/db/rows/NativeCell.java
@@ -166,6 +166,13 @@
         return new BufferCell(column, timestamp(), ttl(), localDeletionTime(), ByteBufferUtil.EMPTY_BYTE_BUFFER, path());
     }
 
+    @Override
+    public long unsharedHeapSize()
+    {
+        return EMPTY_SIZE;
+    }
+
+    @Override
     public long unsharedHeapSizeExcludingData()
     {
         return EMPTY_SIZE;
diff --git a/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundMarker.java b/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundMarker.java
index d030997..67f43c9 100644
--- a/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundMarker.java
+++ b/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundMarker.java
@@ -22,6 +22,7 @@
 import org.apache.cassandra.db.marshal.ValueAccessor;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.memory.ByteBufferCloner;
 
 /**
@@ -29,6 +30,8 @@
  */
 public class RangeTombstoneBoundMarker extends AbstractRangeTombstoneMarker<ClusteringBound<?>>
 {
+    private static final long EMPTY_SIZE = ObjectSizes.measure(new RangeTombstoneBoundMarker(new ArrayClusteringBound(ClusteringPrefix.Kind.INCL_START_BOUND, AbstractArrayClusteringPrefix.EMPTY_VALUES_ARRAY), null));
+
     private final DeletionTime deletion;
 
     public RangeTombstoneBoundMarker(ClusteringBound<?> bound, DeletionTime deletion)
@@ -157,6 +160,12 @@
         deletion.digest(digest);
     }
 
+    @Override
+    public long unsharedHeapSize()
+    {
+        return EMPTY_SIZE + deletion.unsharedHeapSize();
+    }
+
     public String toString(TableMetadata metadata)
     {
         return String.format("Marker %s@%d/%d", bound.toString(metadata), deletion.markedForDeleteAt(), deletion.localDeletionTime());
diff --git a/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundaryMarker.java b/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundaryMarker.java
index a1be85d..c36dcfd 100644
--- a/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundaryMarker.java
+++ b/src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundaryMarker.java
@@ -23,13 +23,18 @@
 import org.apache.cassandra.db.marshal.ValueAccessor;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
+
+import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.memory.ByteBufferCloner;
 
+
 /**
  * A range tombstone marker that represents a boundary between 2 range tombstones (i.e. it closes one range and open another).
  */
 public class RangeTombstoneBoundaryMarker extends AbstractRangeTombstoneMarker<ClusteringBoundary<?>>
 {
+    private static final long EMPTY_SIZE = ObjectSizes.measure(new RangeTombstoneBoundaryMarker(new ArrayClusteringBoundary(ClusteringPrefix.Kind.INCL_END_EXCL_START_BOUNDARY, new byte[][] { new byte[0]}), null, null));
+
     private final DeletionTime endDeletion;
     private final DeletionTime startDeletion;
 
@@ -188,6 +193,12 @@
         startDeletion.digest(digest);
     }
 
+    @Override
+    public long unsharedHeapSize()
+    {
+        return EMPTY_SIZE + startDeletion.unsharedHeapSize() + endDeletion.unsharedHeapSize();
+    }
+
     public String toString(TableMetadata metadata)
     {
         return String.format("Marker %s@%d/%d-%d/%d",
diff --git a/src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java b/src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java
index 3b64545..2db6204 100644
--- a/src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java
+++ b/src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java
@@ -19,6 +19,7 @@
 
 import java.util.*;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.utils.memory.ByteBufferCloner;
 
@@ -27,7 +28,7 @@
  * <p>
  * There is 2 types of markers: bounds (see {@link RangeTombstoneBoundMarker}) and boundaries (see {@link RangeTombstoneBoundaryMarker}).
  */
-public interface RangeTombstoneMarker extends Unfiltered
+public interface RangeTombstoneMarker extends Unfiltered, IMeasurableMemory
 {
     @Override
     public ClusteringBoundOrBoundary<?> clustering();
diff --git a/src/java/org/apache/cassandra/db/rows/Row.java b/src/java/org/apache/cassandra/db/rows/Row.java
index 40d1467..bf8c051 100644
--- a/src/java/org/apache/cassandra/db/rows/Row.java
+++ b/src/java/org/apache/cassandra/db/rows/Row.java
@@ -22,6 +22,7 @@
 import java.util.function.Consumer;
 import java.util.function.Function;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.schema.ColumnMetadata;
@@ -49,7 +50,7 @@
  * it's own data. For instance, a {@code Row} cannot contains a cell that is deleted by its own
  * row deletion.
  */
-public interface Row extends Unfiltered, Iterable<ColumnData>
+public interface Row extends Unfiltered, Iterable<ColumnData>, IMeasurableMemory
 {
     /**
      * The clustering values for this row.
@@ -116,7 +117,7 @@
      * 
      * @param nowInSec the current time to decide what is deleted and what isn't
      * @param enforceStrictLiveness whether the row should be purged if there is no PK liveness info,
-     *                              normally retrieved from {@link CFMetaData#enforceStrictLiveness()}
+     *                              normally retrieved from {@link TableMetadata#enforceStrictLiveness()}
      * @return true if there is some live information
      */
     public boolean hasLiveData(int nowInSec, boolean enforceStrictLiveness);
@@ -278,6 +279,11 @@
      */
     public Row withOnlyQueriedData(ColumnFilter filter);
 
+    /*
+     * Returns a copy of this row without any data with a timestamp older than the one provided
+     */
+    public Row purgeDataOlderThan(long timestamp, boolean enforceStrictLiveness);
+
     /**
      * Returns a copy of this row where all counter cells have they "local" shard marked for clearing.
      */
@@ -310,6 +316,7 @@
     public long unsharedHeapSizeExcludingData();
 
     public String toString(TableMetadata metadata, boolean fullDetails);
+    public long unsharedHeapSize();
 
     /**
      * Apply a function to every column in a row
@@ -650,6 +657,14 @@
         public SimpleBuilder delete();
 
         /**
+         * Deletes the whole row with a timestamp that is just before the new data's timestamp, to make sure no expired
+         * data remains on the row.
+         *
+         * @return this builder.
+         */
+        public SimpleBuilder deletePrevious();
+
+        /**
          * Removes the value for a given column (creating a tombstone).
          *
          * @param columnName the name of the column to delete.
diff --git a/src/java/org/apache/cassandra/db/rows/Rows.java b/src/java/org/apache/cassandra/db/rows/Rows.java
index c44e0a2..8a8de61 100644
--- a/src/java/org/apache/cassandra/db/rows/Rows.java
+++ b/src/java/org/apache/cassandra/db/rows/Rows.java
@@ -237,8 +237,8 @@
     }
 
     /**
-     * Merges two rows into the given builder, mainly for merging memtable rows. In addition to reconciling the cells
-     * in each row, the liveness info, and deletion times for the row and complex columns are also merged.
+     * Merges two rows. In addition to reconciling the cells in each row, the liveness info, and deletion times for
+     * the row and complex columns are also merged.
      * <p>
      * Note that this method assumes that the provided rows can meaningfully be reconciled together. That is,
      * that the rows share the same clustering value, and belong to the same partition.
diff --git a/src/java/org/apache/cassandra/db/rows/ThrottledUnfilteredIterator.java b/src/java/org/apache/cassandra/db/rows/ThrottledUnfilteredIterator.java
index 40be716..cbbac64 100644
--- a/src/java/org/apache/cassandra/db/rows/ThrottledUnfilteredIterator.java
+++ b/src/java/org/apache/cassandra/db/rows/ThrottledUnfilteredIterator.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db.rows;
 
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorSerializer.java b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorSerializer.java
index 938a3ee..11541ee 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorSerializer.java
@@ -66,7 +66,7 @@
 {
     protected static final Logger logger = LoggerFactory.getLogger(UnfilteredRowIteratorSerializer.class);
 
-    private static final int IS_EMPTY               = 0x01;
+    public  static final int IS_EMPTY               = 0x01;
     private static final int IS_REVERSED            = 0x02;
     private static final int HAS_PARTITION_DELETION = 0x04;
     private static final int HAS_STATIC_ROW         = 0x08;
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
index b6f4254..4d1d71b 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
@@ -97,7 +97,7 @@
     {
         @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator
         UnfilteredRowIterator iter = RTBoundValidator.validate(
-            sstable.iterator(partitionKey(), filter.getSlices(metadata()), selectedColumns, filter.isReversed(), listener),
+            sstable.rowIterator(partitionKey(), filter.getSlices(metadata()), selectedColumns, filter.isReversed(), listener),
             RTBoundValidator.Stage.SSTABLE,
             false
         );
@@ -185,7 +185,7 @@
         // CASSANDRA-11369 is there to fix this afterwards.
 
         // Creating the iterator ensures that rowIndexEntry is loaded if available (partitions bigger than
-        // DatabaseDescriptor.column_index_size_in_kb)
+        // DatabaseDescriptor.column_index_size)
         if (!canUseMetadataLowerBound())
             maybeInit();
 
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredSource.java b/src/java/org/apache/cassandra/db/rows/UnfilteredSource.java
new file mode 100644
index 0000000..b984522
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSource.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.rows;
+
+import org.apache.cassandra.db.DataRange;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+
+/**
+ * Common data access interface for sstables and memtables.
+ */
+public interface UnfilteredSource
+{
+    /**
+     * Returns a row iterator for the given partition, applying the specified row and column filters.
+     *
+     * @param key the partition key
+     * @param slices the row ranges to return
+     * @param columnFilter filter to apply to all returned partitions
+     * @param reversed true if the content should be returned in reverse order
+     * @param listener a listener used to handle internal read events
+     */
+    UnfilteredRowIterator rowIterator(DecoratedKey key,
+                                      Slices slices,
+                                      ColumnFilter columnFilter,
+                                      boolean reversed,
+                                      SSTableReadsListener listener);
+
+    default UnfilteredRowIterator rowIterator(DecoratedKey key)
+    {
+        return rowIterator(key, Slices.ALL, ColumnFilter.NONE, false, SSTableReadsListener.NOOP_LISTENER);
+    }
+
+    /**
+     * Returns a partition iterator for the given data range.
+     *
+     * @param columnFilter filter to apply to all returned partitions
+     * @param dataRange the partition and clustering range queried
+     * @param listener a listener used to handle internal read events
+     */
+    UnfilteredPartitionIterator partitionIterator(ColumnFilter columnFilter,
+                                                  DataRange dataRange,
+                                                  SSTableReadsListener listener);
+
+    /** Minimum timestamp of all stored data */
+    long getMinTimestamp();
+
+    /** Minimum local deletion time in the memtable */
+    int getMinLocalDeletionTime();
+}
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamReader.java b/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamReader.java
index ff9e6f7..005a9aa 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamReader.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamReader.java
@@ -19,12 +19,10 @@
 
 import java.io.IOException;
 
-import com.google.common.base.Throwables;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.io.compress.CompressionMetadata;
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -35,8 +33,6 @@
 import org.apache.cassandra.utils.ChecksumType;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static org.apache.cassandra.utils.Throwables.extractIOExceptionCause;
-
 /**
  * CassandraStreamReader that reads from streamed compressed SSTable
  */
@@ -58,7 +54,7 @@
      */
     @Override
     @SuppressWarnings("resource") // input needs to remain open, streams on top of it can't be closed
-    public SSTableMultiWriter read(DataInputPlus inputPlus) throws IOException
+    public SSTableMultiWriter read(DataInputPlus inputPlus) throws Throwable
     {
         long totalSize = totalSize();
 
@@ -82,6 +78,7 @@
             deserializer = new StreamDeserializer(cfs.metadata(), in, inputVersion, getHeader(cfs.metadata()));
             writer = createWriter(cfs, totalSize, repairedAt, pendingRepair, format);
             String filename = writer.getFilename();
+            String sectionName = filename + '-' + fileSeqNum;
             int sectionIdx = 0;
             for (SSTableReader.PartitionPositionBounds section : sections)
             {
@@ -93,15 +90,19 @@
                 cis.position(section.lowerPosition);
                 in.reset(0);
 
+                long lastBytesRead = 0;
                 while (in.getBytesRead() < sectionLength)
                 {
                     writePartition(deserializer, writer);
                     // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
-                    session.progress(filename + '-' + fileSeqNum, ProgressInfo.Direction.IN, cis.chunkBytesRead(), totalSize);
+                    long bytesRead = cis.chunkBytesRead();
+                    long bytesDelta = bytesRead - lastBytesRead;
+                    lastBytesRead = bytesRead;
+                    session.progress(sectionName, ProgressInfo.Direction.IN, bytesRead, bytesDelta, totalSize);
                 }
                 assert in.getBytesRead() == sectionLength;
             }
-            logger.trace("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}", session.planId(), fileSeqNum,
+            logger.info("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}", session.planId(), fileSeqNum,
                          session.peer, FBUtilities.prettyPrintMemory(cis.chunkBytesRead()), FBUtilities.prettyPrintMemory(totalSize));
             return writer;
         }
@@ -111,12 +112,8 @@
             logger.warn("[Stream {}] Error while reading partition {} from stream on ks='{}' and table='{}'.",
                         session.planId(), partitionKey, cfs.keyspace.getName(), cfs.getTableName());
             if (writer != null)
-            {
-                writer.abort(e);
-            }
-            if (extractIOExceptionCause(e).isPresent())
-                throw e;
-            throw Throwables.propagate(e);
+                e = writer.abort(e);
+            throw e;
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamWriter.java b/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamWriter.java
index 8d0b67f..41fd9b1 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamWriter.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraCompressedStreamWriter.java
@@ -30,9 +30,8 @@
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.ChannelProxy;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.net.AsyncStreamingOutputPlus;
 import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -57,9 +56,8 @@
     }
 
     @Override
-    public void write(DataOutputStreamPlus output) throws IOException
+    public void write(StreamingDataOutputPlus out) throws IOException
     {
-        AsyncStreamingOutputPlus out = (AsyncStreamingOutputPlus) output;
         long totalSize = totalSize();
         logger.debug("[Stream #{}] Start streaming file {} to {}, repairedAt = {}, totalSize = {}", session.planId(),
                      sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize);
@@ -73,6 +71,7 @@
             int sectionIdx = 0;
 
             // stream each of the required sections of the file
+            String filename = sstable.descriptor.filenameFor(Component.DATA);
             for (Section section : sections)
             {
                 // length of the section to stream
@@ -96,7 +95,7 @@
 
                     bytesTransferred += toTransfer;
                     progress += toTransfer;
-                    session.progress(sstable.descriptor.filenameFor(Component.DATA), ProgressInfo.Direction.OUT, progress, totalSize);
+                    session.progress(filename, ProgressInfo.Direction.OUT, progress, toTransfer, totalSize);
                 }
             }
             logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}",
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java b/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java
index 0bfe993..515c85d 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java
@@ -18,18 +18,16 @@
 
 package org.apache.cassandra.db.streaming;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.function.UnaryOperator;
 
-import com.google.common.base.Throwables;
-import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
@@ -37,6 +35,7 @@
 import org.apache.cassandra.io.sstable.format.big.BigTableZeroCopyWriter;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.ProgressInfo;
 import org.apache.cassandra.streaming.StreamReceiver;
@@ -85,7 +84,7 @@
      */
     @SuppressWarnings("resource") // input needs to remain open, streams on top of it can't be closed
     @Override
-    public SSTableMultiWriter read(DataInputPlus in) throws IOException
+    public SSTableMultiWriter read(DataInputPlus in) throws Throwable
     {
         ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
         if (cfs == null)
@@ -123,7 +122,7 @@
                              prettyPrintMemory(totalSize));
 
                 writer.writeComponent(component.type, in, length);
-                session.progress(writer.descriptor.filenameFor(component), ProgressInfo.Direction.IN, length, length);
+                session.progress(writer.descriptor.filenameFor(component), ProgressInfo.Direction.IN, length, length, length);
                 bytesRead += length;
 
                 logger.debug("[Stream #{}] Finished receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}",
@@ -147,8 +146,7 @@
             logger.error("[Stream {}] Error while reading sstable from stream for table = {}", session.planId(), cfs.metadata(), e);
             if (writer != null)
                 e = writer.abort(e);
-            Throwables.throwIfUnchecked(e);
-            throw new RuntimeException(e);
+            throw e;
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriter.java b/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriter.java
index ef82eb2..3d679a5 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriter.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriter.java
@@ -26,8 +26,8 @@
 
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.net.AsyncStreamingOutputPlus;
 import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamManager;
 import org.apache.cassandra.streaming.StreamSession;
 
@@ -53,7 +53,7 @@
         this.sstable = sstable;
         this.context = context;
         this.manifest = context.manifest();
-        this.limiter = StreamManager.getRateLimiter(session.peer);
+        this.limiter = StreamManager.getEntireSSTableRateLimiter(session.peer);
     }
 
     /**
@@ -63,7 +63,7 @@
      * @param out where this writes data to
      * @throws IOException on any I/O error
      */
-    public void write(AsyncStreamingOutputPlus out) throws IOException
+    public void write(StreamingDataOutputPlus out) throws IOException
     {
         long totalSize = manifest.totalSize();
         logger.debug("[Stream #{}] Start streaming sstable {} to {}, repairedAt = {}, totalSize = {}",
@@ -84,7 +84,7 @@
             logger.debug("[Stream #{}] Streaming {}.{} gen {} component {} size {}", session.planId(),
                          sstable.getKeyspaceName(),
                          sstable.getColumnFamilyName(),
-                         sstable.descriptor.generation,
+                         sstable.descriptor.id,
                          component,
                          prettyPrintMemory(length));
 
@@ -93,13 +93,13 @@
             long bytesWritten = out.writeFileToChannel(channel, limiter);
             progress += bytesWritten;
 
-            session.progress(sstable.descriptor.filenameFor(component), ProgressInfo.Direction.OUT, bytesWritten, length);
+            session.progress(sstable.descriptor.filenameFor(component), ProgressInfo.Direction.OUT, bytesWritten, bytesWritten, length);
 
             logger.debug("[Stream #{}] Finished streaming {}.{} gen {} component {} to {}, xfered = {}, length = {}, totalSize = {}",
                          session.planId(),
                          sstable.getKeyspaceName(),
                          sstable.getColumnFamilyName(),
-                         sstable.descriptor.generation,
+                         sstable.descriptor.id,
                          component,
                          session.peer,
                          prettyPrintMemory(bytesWritten),
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraIncomingFile.java b/src/java/org/apache/cassandra/db/streaming/CassandraIncomingFile.java
index 2d6a4fd..3e87d67 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraIncomingFile.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraIncomingFile.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.db.streaming;
 
-import java.io.IOException;
 import java.util.Objects;
 
 import com.google.common.base.Preconditions;
@@ -64,10 +63,11 @@
     }
 
     @Override
-    public synchronized void read(DataInputPlus in, int version) throws IOException
+    public synchronized void read(DataInputPlus in, int version) throws Throwable
     {
         CassandraStreamHeader streamHeader = CassandraStreamHeader.serializer.deserialize(in, version);
         logger.debug("Incoming stream entireSSTable={} components={}", streamHeader.isEntireSSTable, streamHeader.componentManifest);
+        session.countStreamedIn(streamHeader.isEntireSSTable);
 
         IStreamReader reader;
         if (streamHeader.isEntireSSTable)
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java b/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java
index 0904720..367c304 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -30,12 +29,12 @@
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.net.AsyncStreamingOutputPlus;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.OutgoingStream;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.streaming.StreamSession;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Ref;
 
 /**
@@ -142,25 +141,21 @@
     }
 
     @Override
-    public UUID getPendingRepair()
+    public TimeUUID getPendingRepair()
     {
         return ref.get().getPendingRepair();
     }
 
     @Override
-    public void write(StreamSession session, DataOutputStreamPlus out, int version) throws IOException
+    public void write(StreamSession session, StreamingDataOutputPlus out, int version) throws IOException
     {
-        // FileStreamTask uses AsyncStreamingOutputPlus for streaming.
-        assert out instanceof AsyncStreamingOutputPlus : "Unexpected DataOutputStreamPlus " + out.getClass();
-
         SSTableReader sstable = ref.get();
 
         if (shouldStreamEntireSSTable)
         {
             // Acquire lock to avoid concurrent sstable component mutation because of stats update or index summary
             // redistribution, otherwise file sizes recorded in component manifest will be different from actual
-            // file sizes. (Note: Windows doesn't support atomic replace and index summary redistribution deletes
-            // existing file first)
+            // file sizes.
             // Recreate the latest manifest and hard links for mutatable components in case they are modified.
             try (ComponentContext context = sstable.runWithLock(ignored -> ComponentContext.create(sstable.descriptor)))
             {
@@ -169,7 +164,7 @@
                 out.flush();
 
                 CassandraEntireSSTableStreamWriter writer = new CassandraEntireSSTableStreamWriter(sstable, session, context);
-                writer.write((AsyncStreamingOutputPlus) out);
+                writer.write(out);
             }
         }
         else
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamManager.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamManager.java
index a84fd27..46cf253 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamManager.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamManager.java
@@ -40,6 +40,7 @@
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.streaming.TableStreamManager;
 import org.apache.cassandra.streaming.messages.StreamMessageHeader;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Ref;
 import org.apache.cassandra.utils.concurrent.Refs;
 import org.slf4j.Logger;
@@ -49,7 +50,6 @@
 import java.util.Collection;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 
 /**
  * Implements the streaming interface for the native cassandra storage engine.
@@ -81,8 +81,9 @@
         return new CassandraStreamReceiver(cfs, session, totalStreams);
     }
 
+    @SuppressWarnings("resource")   // references placed onto returned collection or closed on error
     @Override
-    public Collection<OutgoingStream> createOutgoingStreams(StreamSession session, RangesAtEndpoint replicas, UUID pendingRepair, PreviewKind previewKind)
+    public Collection<OutgoingStream> createOutgoingStreams(StreamSession session, RangesAtEndpoint replicas, TimeUUID pendingRepair, PreviewKind previewKind)
     {
         Refs<SSTableReader> refs = new Refs<>();
         try
@@ -126,6 +127,9 @@
                 return sstables;
             }).refs);
 
+            // This call is normally preceded by a memtable flush in StreamSession.addTransferRanges.
+            // Persistent memtables will not flush, make an sstable with their data.
+            cfs.writeAndAddMemtableRanges(session.getPendingRepair(), () -> Range.normalize(keyRanges), refs);
 
             List<Range<Token>> normalizedFullRanges = Range.normalize(replicas.onlyFull().ranges());
             List<Range<Token>> normalizedAllRanges = Range.normalize(replicas.ranges());
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java
index 6835fad..04268f0 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java
@@ -17,31 +17,38 @@
  */
 package org.apache.cassandra.db.streaming;
 
-import java.io.*;
+import java.io.IOError;
+import java.io.IOException;
 import java.util.Collection;
-import java.util.UUID;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
 import com.google.common.collect.UnmodifiableIterator;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.exceptions.UnknownColumnException;
-import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.io.util.TrackedDataInputPlus;
-import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.DeletionTime;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
-import org.apache.cassandra.db.rows.*;
+import org.apache.cassandra.db.rows.DeserializationHelper;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.exceptions.UnknownColumnException;
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
 import org.apache.cassandra.io.sstable.SSTableSimpleIterator;
 import org.apache.cassandra.io.sstable.format.RangeAwareSSTableWriter;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.TrackedDataInputPlus;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.streaming.ProgressInfo;
 import org.apache.cassandra.streaming.StreamReceiver;
 import org.apache.cassandra.streaming.StreamSession;
@@ -49,6 +56,7 @@
 import org.apache.cassandra.streaming.messages.StreamMessageHeader;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.net.MessagingService.current_version;
 
@@ -64,7 +72,7 @@
     protected final StreamSession session;
     protected final Version inputVersion;
     protected final long repairedAt;
-    protected final UUID pendingRepair;
+    protected final TimeUUID pendingRepair;
     protected final SSTableFormat.Type format;
     protected final int sstableLevel;
     protected final SerializationHeader.Component header;
@@ -98,16 +106,14 @@
      */
     @SuppressWarnings("resource") // input needs to remain open, streams on top of it can't be closed
     @Override
-    public SSTableMultiWriter read(DataInputPlus inputPlus) throws IOException
+    public SSTableMultiWriter read(DataInputPlus inputPlus) throws Throwable
     {
         long totalSize = totalSize();
 
         ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
         if (cfs == null)
-        {
             // schema was dropped during streaming
-            throw new IOException("CF " + tableId + " was dropped during streaming");
-        }
+            throw new IllegalStateException("Table " + tableId + " was dropped during streaming");
 
         logger.debug("[Stream #{}] Start receiving file #{} from {}, repairedAt = {}, size = {}, ks = '{}', table = '{}', pendingRepair = '{}'.",
                      session.planId(), fileSeqNum, session.peer, repairedAt, totalSize, cfs.keyspace.getName(),
@@ -120,11 +126,16 @@
             TrackedDataInputPlus in = new TrackedDataInputPlus(streamCompressionInputStream);
             deserializer = new StreamDeserializer(cfs.metadata(), in, inputVersion, getHeader(cfs.metadata()));
             writer = createWriter(cfs, totalSize, repairedAt, pendingRepair, format);
+            String sequenceName = writer.getFilename() + '-' + fileSeqNum;
+            long lastBytesRead = 0;
             while (in.getBytesRead() < totalSize)
             {
                 writePartition(deserializer, writer);
                 // TODO move this to BytesReadTracker
-                session.progress(writer.getFilename() + '-' + fileSeqNum, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
+                long bytesRead = in.getBytesRead();
+                long bytesDelta = bytesRead - lastBytesRead;
+                lastBytesRead = bytesRead;
+                session.progress(sequenceName, ProgressInfo.Direction.IN, bytesRead, bytesDelta, totalSize);
             }
             logger.debug("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}",
                          session.planId(), fileSeqNum, session.peer, FBUtilities.prettyPrintMemory(in.getBytesRead()), FBUtilities.prettyPrintMemory(totalSize));
@@ -136,10 +147,8 @@
             logger.warn("[Stream {}] Error while reading partition {} from stream on ks='{}' and table='{}'.",
                         session.planId(), partitionKey, cfs.keyspace.getName(), cfs.getTableName(), e);
             if (writer != null)
-            {
-                writer.abort(e);
-            }
-            throw Throwables.propagate(e);
+                e = writer.abort(e);
+            throw e;
         }
     }
 
@@ -148,7 +157,7 @@
         return header != null? header.toHeader(metadata) : null; //pre-3.0 sstable have no SerializationHeader
     }
     @SuppressWarnings("resource")
-    protected SSTableMultiWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt, UUID pendingRepair, SSTableFormat.Type format) throws IOException
+    protected SSTableMultiWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt, TimeUUID pendingRepair, SSTableFormat.Type format) throws IOException
     {
         Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize);
         if (localDir == null)
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
index b2b2ce5..48de8b5 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
@@ -28,7 +28,7 @@
 
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
 import org.apache.cassandra.io.sstable.SSTable;
-import org.apache.cassandra.streaming.StreamReceiveTask;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -181,11 +181,13 @@
      * For CDC-enabled tables, we want to ensure that the mutations are run through the CommitLog so they
      * can be archived by the CDC process on discard.
      */
-    private boolean requiresWritePath(ColumnFamilyStore cfs) {
-        return hasCDC(cfs) || (session.streamOperation().requiresViewBuild() && hasViews(cfs));
+    private boolean requiresWritePath(ColumnFamilyStore cfs)
+    {
+        return hasCDC(cfs) || cfs.streamToMemtable() || (session.streamOperation().requiresViewBuild() && hasViews(cfs));
     }
 
-    private void sendThroughWritePath(ColumnFamilyStore cfs, Collection<SSTableReader> readers) {
+    private void sendThroughWritePath(ColumnFamilyStore cfs, Collection<SSTableReader> readers)
+    {
         boolean hasCdc = hasCDC(cfs);
         ColumnFilter filter = ColumnFilter.all(cfs.metadata());
         for (SSTableReader reader : readers)
@@ -273,7 +275,7 @@
         // the streamed sstables.
         if (requiresWritePath)
         {
-            cfs.forceBlockingFlush();
+            cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.STREAMS_RECEIVED);
             abort();
         }
     }
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java
index 6481f4b..9d9ea3c 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java
@@ -17,11 +17,11 @@
  */
 package org.apache.cassandra.db.streaming;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,9 +33,8 @@
 import org.apache.cassandra.io.util.ChannelProxy;
 import org.apache.cassandra.io.util.DataIntegrityMetadata;
 import org.apache.cassandra.io.util.DataIntegrityMetadata.ChecksumValidator;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.net.AsyncStreamingOutputPlus;
 import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamManager;
 import org.apache.cassandra.streaming.StreamManager.StreamRateLimiter;
 import org.apache.cassandra.streaming.StreamSession;
@@ -75,16 +74,15 @@
      *
      * CassandraStreamWriter uses LZF compression on wire to decrease size to transfer.
      *
-     * @param output where this writes data to
+     * @param out where this writes data to
      * @throws IOException on any I/O error
      */
-    public void write(DataOutputStreamPlus output) throws IOException
+    public void write(StreamingDataOutputPlus out) throws IOException
     {
         long totalSize = totalSize();
         logger.debug("[Stream #{}] Start streaming file {} to {}, repairedAt = {}, totalSize = {}", session.planId(),
                      sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize);
 
-        AsyncStreamingOutputPlus out = (AsyncStreamingOutputPlus) output;
         try(ChannelProxy proxy = sstable.getDataChannel().newChannel();
             ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
                                           ? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
@@ -96,6 +94,7 @@
             long progress = 0L;
 
             // stream each of the required sections of the file
+            String filename = sstable.descriptor.filenameFor(Component.DATA);
             for (SSTableReader.PartitionPositionBounds section : sections)
             {
                 long start = validator == null ? section.lowerPosition : validator.chunkStart(section.lowerPosition);
@@ -114,8 +113,9 @@
                     long lastBytesRead = write(proxy, validator, out, start, transferOffset, toTransfer, bufferSize);
                     start += lastBytesRead;
                     bytesRead += lastBytesRead;
-                    progress += (lastBytesRead - transferOffset);
-                    session.progress(sstable.descriptor.filenameFor(Component.DATA), ProgressInfo.Direction.OUT, progress, totalSize);
+                    long delta = lastBytesRead - transferOffset;
+                    progress += delta;
+                    session.progress(filename, ProgressInfo.Direction.OUT, progress, delta, totalSize);
                     transferOffset = 0;
                 }
 
@@ -145,7 +145,7 @@
      *
      * @throws java.io.IOException on any I/O error
      */
-    protected long write(ChannelProxy proxy, ChecksumValidator validator, AsyncStreamingOutputPlus output, long start, int transferOffset, int toTransfer, int bufferSize) throws IOException
+    protected long write(ChannelProxy proxy, ChecksumValidator validator, StreamingDataOutputPlus output, long start, int transferOffset, int toTransfer, int bufferSize) throws IOException
     {
         // the count of bytes to read off disk
         int minReadable = (int) Math.min(bufferSize, proxy.size() - start);
diff --git a/src/java/org/apache/cassandra/db/streaming/ComponentContext.java b/src/java/org/apache/cassandra/db/streaming/ComponentContext.java
index b9c60b9..c8c08aa 100644
--- a/src/java/org/apache/cassandra/db/streaming/ComponentContext.java
+++ b/src/java/org/apache/cassandra/db/streaming/ComponentContext.java
@@ -26,9 +26,7 @@
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.util.FileUtils;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
 import java.util.HashMap;
 import java.util.Map;
@@ -38,6 +36,8 @@
  * Mutable SSTable components and their hardlinks to avoid concurrent sstable component modification
  * during entire-sstable-streaming.
  */
+import org.apache.cassandra.io.util.File;
+
 public class ComponentContext implements AutoCloseable
 {
     private static final Logger logger = LoggerFactory.getLogger(ComponentContext.class);
@@ -81,9 +81,9 @@
      */
     public FileChannel channel(Descriptor descriptor, Component component, long size) throws IOException
     {
-        String toTransfer = hardLinks.containsKey(component) ? hardLinks.get(component).getPath() : descriptor.filenameFor(component);
+        String toTransfer = hardLinks.containsKey(component) ? hardLinks.get(component).path() : descriptor.filenameFor(component);
         @SuppressWarnings("resource") // file channel will be closed by Caller
-        FileChannel channel = new RandomAccessFile(toTransfer, "r").getChannel();
+        FileChannel channel = new File(toTransfer).newReadChannel();
 
         assert size == channel.size() : String.format("Entire sstable streaming expects %s file size to be %s but got %s.",
                                                       component, size, channel.size());
diff --git a/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java b/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java
index bb896ca..b77b594 100644
--- a/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java
+++ b/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java
@@ -28,13 +28,14 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.*;
 
 /**
  * SSTable components and their sizes to be tranfered via entire-sstable-streaming
  */
+import org.apache.cassandra.io.util.File;
+
 public final class ComponentManifest implements Iterable<Component>
 {
     private static final List<Component> STREAM_COMPONENTS = ImmutableList.of(Component.DATA, Component.PRIMARY_INDEX, Component.STATS,
@@ -105,6 +106,14 @@
         return components.hashCode();
     }
 
+    @Override
+    public String toString()
+    {
+        return "ComponentManifest{" +
+               "components=" + components +
+               '}';
+    }
+
     public static final IVersionedSerializer<ComponentManifest> serializer = new IVersionedSerializer<ComponentManifest>()
     {
         public void serialize(ComponentManifest manifest, DataOutputPlus out, int version) throws IOException
diff --git a/src/java/org/apache/cassandra/db/streaming/IStreamReader.java b/src/java/org/apache/cassandra/db/streaming/IStreamReader.java
index cf93bc2..e7cb2a2 100644
--- a/src/java/org/apache/cassandra/db/streaming/IStreamReader.java
+++ b/src/java/org/apache/cassandra/db/streaming/IStreamReader.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.db.streaming;
 
-import java.io.IOException;
-
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
 import org.apache.cassandra.io.util.DataInputPlus;
 
@@ -28,5 +26,5 @@
  */
 public interface IStreamReader
 {
-    public SSTableMultiWriter read(DataInputPlus inputPlus) throws IOException;
+    SSTableMultiWriter read(DataInputPlus inputPlus) throws Throwable;
 }
diff --git a/src/java/org/apache/cassandra/db/view/TableViews.java b/src/java/org/apache/cassandra/db/view/TableViews.java
index 8e64ef3..619e9f8 100644
--- a/src/java/org/apache/cassandra/db/view/TableViews.java
+++ b/src/java/org/apache/cassandra/db/view/TableViews.java
@@ -42,6 +42,8 @@
 import org.apache.cassandra.utils.btree.BTree;
 import org.apache.cassandra.utils.btree.BTreeSet;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 
 /**
  * Groups all the views for a given table.
@@ -103,10 +105,10 @@
         views.forEach(View::stopBuild);
     }
 
-    public void forceBlockingFlush()
+    public void forceBlockingFlush(ColumnFamilyStore.FlushReason reason)
     {
         for (ColumnFamilyStore viewCfs : allViewsCfs())
-            viewCfs.forceBlockingFlush();
+            viewCfs.forceBlockingFlush(reason);
     }
 
     public void dumpMemtables()
@@ -147,13 +149,13 @@
 
         // Read modified rows
         int nowInSec = FBUtilities.nowInSeconds();
-        long queryStartNanoTime = System.nanoTime();
+        long queryStartNanoTime = nanoTime();
         SinglePartitionReadCommand command = readExistingRowsCommand(update, views, nowInSec);
         if (command == null)
             return;
 
         ColumnFamilyStore cfs = Keyspace.openAndGetStore(update.metadata());
-        long start = System.nanoTime();
+        long start = nanoTime();
         Collection<Mutation> mutations;
         try (ReadExecutionController orderGroup = command.executionController();
              UnfilteredRowIterator existings = UnfilteredPartitionIterators.getOnlyElement(command.executeLocally(orderGroup), command);
@@ -161,7 +163,7 @@
         {
             mutations = Iterators.getOnlyElement(generateViewUpdates(views, updates, existings, nowInSec, false));
         }
-        Keyspace.openAndGetStore(update.metadata()).metric.viewReadTime.update(System.nanoTime() - start, TimeUnit.NANOSECONDS);
+        Keyspace.openAndGetStore(update.metadata()).metric.viewReadTime.update(nanoTime() - start, TimeUnit.NANOSECONDS);
 
         if (!mutations.isEmpty())
             StorageProxy.mutateMV(update.partitionKey().getKey(), mutations, writeCommitLog, baseComplete, queryStartNanoTime);
diff --git a/src/java/org/apache/cassandra/db/view/ViewBuilder.java b/src/java/org/apache/cassandra/db/view/ViewBuilder.java
index a88ffbe..daedf48 100644
--- a/src/java/org/apache/cassandra/db/view/ViewBuilder.java
+++ b/src/java/org/apache/cassandra/db/view/ViewBuilder.java
@@ -22,16 +22,12 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,10 +41,13 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.locator.Replicas;
-import org.apache.cassandra.repair.SystemDistributedKeyspace;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 
 import static java.util.stream.Collectors.toList;
 
@@ -73,7 +72,7 @@
     private final Set<ViewBuilderTask> tasks = Sets.newConcurrentHashSet();
     private volatile long keysBuilt = 0;
     private volatile boolean isStopped = false;
-    private volatile Future<?> future = Futures.immediateFuture(null);
+    private volatile Future<?> future = ImmediateFuture.success(null);
 
     ViewBuilder(ColumnFamilyStore baseCfs, View view)
     {
@@ -96,7 +95,7 @@
 
             logger.debug("Starting build of view({}.{}). Flushing base table {}.{}",
                          ksName, view.name, ksName, baseCfs.name);
-            baseCfs.forceBlockingFlush();
+            baseCfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.VIEW_BUILD_STARTED);
 
             loadStatusAndBuild();
         }
@@ -162,21 +161,21 @@
 
         // Submit a new view build task for each building range.
         // We keep record of all the submitted tasks to be able of stopping them.
-        List<ListenableFuture<Long>> futures = pendingRanges.entrySet()
-                                                            .stream()
-                                                            .map(e -> new ViewBuilderTask(baseCfs,
-                                                                                          view,
-                                                                                          e.getKey(),
-                                                                                          e.getValue().left,
-                                                                                          e.getValue().right))
-                                                            .peek(tasks::add)
-                                                            .map(CompactionManager.instance::submitViewBuilder)
-                                                            .collect(toList());
+        List<Future<Long>> futures = pendingRanges.entrySet()
+                                                  .stream()
+                                                  .map(e -> new ViewBuilderTask(baseCfs,
+                                                                                view,
+                                                                                e.getKey(),
+                                                                                e.getValue().left,
+                                                                                e.getValue().right))
+                                                  .peek(tasks::add)
+                                                  .map(CompactionManager.instance::submitViewBuilder)
+                                                  .collect(toList());
 
         // Add a callback to process any eventual new local range and mark the view as built, doing a delayed retry if
         // the tasks don't succeed
-        ListenableFuture<List<Long>> future = Futures.allAsList(futures);
-        Futures.addCallback(future, new FutureCallback<List<Long>>()
+        Future<List<Long>> future = FutureCombiner.allOf(futures);
+        future.addCallback(new FutureCallback<List<Long>>()
         {
             public void onSuccess(List<Long> result)
             {
@@ -200,7 +199,7 @@
                     logger.warn("Materialized View failed to complete, sleeping 5 minutes before restarting", t);
                 }
             }
-        }, MoreExecutors.directExecutor());
+        });
         this.future = future;
     }
 
@@ -228,10 +227,16 @@
     /**
      * Stops the view building.
      */
-    synchronized void stop()
+    void stop()
     {
-        boolean wasStopped = isStopped;
-        internalStop(false);
+        boolean wasStopped;
+        synchronized (this)
+        {
+            wasStopped = isStopped;
+            internalStop(false);
+        }
+        // TODO: very unclear what the goal is here. why do we wait only if we were the first to invoke stop?
+        // but we wait outside the synchronized block to avoid a deadlock with `build` in the future callback
         if (!wasStopped)
             FBUtilities.waitOnFuture(future);
     }
diff --git a/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java b/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java
index c84c697..dac96f6 100644
--- a/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java
+++ b/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java
@@ -21,7 +21,6 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
-import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -29,10 +28,8 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
 import com.google.common.base.Objects;
-import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.PeekingIterator;
-import com.google.common.util.concurrent.Futures;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,9 +57,12 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class ViewBuilderTask extends CompactionInfo.Holder implements Callable<Long>
 {
     private static final Logger logger = LoggerFactory.getLogger(ViewBuilderTask.class);
@@ -72,7 +72,7 @@
     private final ColumnFamilyStore baseCfs;
     private final View view;
     private final Range<Token> range;
-    private final UUID compactionId;
+    private final TimeUUID compactionId;
     private volatile Token prevToken;
     private volatile long keysBuilt = 0;
     private volatile boolean isStopped = false;
@@ -84,7 +84,7 @@
         this.baseCfs = baseCfs;
         this.view = view;
         this.range = range;
-        this.compactionId = UUIDGen.getTimeUUID();
+        this.compactionId = nextTimeUUID();
         this.prevToken = lastToken;
         this.keysBuilt = keysBuilt;
     }
@@ -115,7 +115,7 @@
                                                        .generateViewUpdates(Collections.singleton(view), data, empty, nowInSec, true);
 
             AtomicLong noBase = new AtomicLong(Long.MAX_VALUE);
-            mutations.forEachRemaining(m -> StorageProxy.mutateMV(key.getKey(), m, true, noBase, System.nanoTime()));
+            mutations.forEachRemaining(m -> StorageProxy.mutateMV(key.getKey(), m, true, noBase, nanoTime()));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/view/ViewManager.java b/src/java/org/apache/cassandra/db/view/ViewManager.java
index 7e3ea1b..111f96a 100644
--- a/src/java/org/apache/cassandra/db/view/ViewManager.java
+++ b/src/java/org/apache/cassandra/db/view/ViewManager.java
@@ -32,7 +32,7 @@
 import org.apache.cassandra.schema.ViewMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.partitions.*;
-import org.apache.cassandra.repair.SystemDistributedKeyspace;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.schema.Views;
 import org.apache.cassandra.service.StorageService;
 
diff --git a/src/java/org/apache/cassandra/db/view/ViewUtils.java b/src/java/org/apache/cassandra/db/view/ViewUtils.java
index b5aa063..55a462c 100644
--- a/src/java/org/apache/cassandra/db/view/ViewUtils.java
+++ b/src/java/org/apache/cassandra/db/view/ViewUtils.java
@@ -23,13 +23,11 @@
 
 import com.google.common.collect.Iterables;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.locator.EndpointsForToken;
 import org.apache.cassandra.locator.NetworkTopologyStrategy;
 import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.utils.FBUtilities;
 
 public final class ViewUtils
 {
diff --git a/src/java/org/apache/cassandra/db/virtual/AbstractMutableVirtualTable.java b/src/java/org/apache/cassandra/db/virtual/AbstractMutableVirtualTable.java
new file mode 100644
index 0000000..7044312
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/AbstractMutableVirtualTable.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Optional;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.BoundType;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Range;
+
+import org.apache.commons.lang3.ArrayUtils;
+
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ClusteringPrefix;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.marshal.CompositeType;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
+
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
+import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
+
+/**
+ * An abstract virtual table implementation that builds the resultset on demand and allows fine-grained source
+ * modification via INSERT/UPDATE, DELETE and TRUNCATE operations.
+ * 
+ * Virtual table implementation need to be thread-safe has they can be called from different threads.
+ */
+public abstract class AbstractMutableVirtualTable extends AbstractVirtualTable
+{
+
+    protected AbstractMutableVirtualTable(TableMetadata metadata)
+    {
+        super(metadata);
+    }
+
+    @Override
+    public final void apply(PartitionUpdate update)
+    {
+        ColumnValues partitionKey = ColumnValues.from(metadata(), update.partitionKey());
+
+        if (update.deletionInfo().isLive())
+            update.forEach(row ->
+            {
+                ColumnValues clusteringColumns = ColumnValues.from(metadata(), row.clustering());
+
+                if (row.deletion().isLive())
+                {
+                    if (row.columnCount() == 0)
+                    {
+                        applyColumnUpdate(partitionKey, clusteringColumns, Optional.empty());
+                    }
+                    else
+                    {
+                        row.forEach(columnData ->
+                        {
+                            checkFalse(columnData.column().isComplex(), "Complex type columns are not supported by table %s", metadata);
+
+                            Cell<?> cell = (Cell<?>) columnData;
+
+                            if (cell.isTombstone())
+                                applyColumnDeletion(partitionKey, clusteringColumns, columnName(cell));
+                            else
+                                applyColumnUpdate(partitionKey,
+                                        clusteringColumns,
+                                        Optional.of(ColumnValue.from(cell)));
+                        });
+                    }
+                }
+                else
+                    applyRowDeletion(partitionKey, clusteringColumns);
+            });
+        else
+        {
+            // MutableDeletionInfo may have partition delete or range tombstone list or both
+            if (update.deletionInfo().hasRanges())
+                update.deletionInfo()
+                        .rangeIterator(false)
+                        .forEachRemaining(rt -> applyRangeTombstone(partitionKey, toRange(rt.deletedSlice())));
+
+            if (!update.deletionInfo().getPartitionDeletion().isLive())
+                applyPartitionDeletion(partitionKey);
+        }
+    }
+
+    protected void applyPartitionDeletion(ColumnValues partitionKey)
+    {
+        throw invalidRequest("Partition deletion is not supported by table %s", metadata);
+    }
+
+    private Range<ColumnValues> toRange(Slice slice)
+    {
+        ClusteringBound<?> startBound = slice.start();
+        ClusteringBound<?> endBound = slice.end();
+
+        if (startBound.isBottom())
+        {
+            if (endBound.isTop())
+                return Range.all();
+
+            return Range.upTo(ColumnValues.from(metadata(), endBound), boundType(endBound));
+        }
+
+        if (endBound.isTop())
+            return Range.downTo(ColumnValues.from(metadata(), startBound), boundType(startBound));
+
+        ColumnValues start = ColumnValues.from(metadata(), startBound);
+        BoundType startType = boundType(startBound);
+
+        ColumnValues end = ColumnValues.from(metadata(), endBound);
+        BoundType endType = boundType(endBound);
+
+        return Range.range(start, startType, end, endType);
+    }
+
+    private static BoundType boundType(ClusteringBound<?> bound)
+    {
+        return bound.isInclusive() ? BoundType.CLOSED : BoundType.OPEN;
+    }
+
+    protected void applyRangeTombstone(ColumnValues partitionKey, Range<ColumnValues> range)
+    {
+        throw invalidRequest("Range deletion is not supported by table %s", metadata);
+    }
+
+    protected void applyRowDeletion(ColumnValues partitionKey, ColumnValues clusteringColumns)
+    {
+        throw invalidRequest("Row deletion is not supported by table %s", metadata);
+    }
+
+    protected void applyColumnDeletion(ColumnValues partitionKey, ColumnValues clusteringColumns, String columnName)
+    {
+        throw invalidRequest("Column deletion is not supported by table %s", metadata);
+    }
+
+    protected void applyColumnUpdate(ColumnValues partitionKey,
+                                     ColumnValues clusteringColumns,
+                                     Optional<ColumnValue> columnValue)
+    {
+        throw invalidRequest("Column modification is not supported by table %s", metadata);
+    }
+
+    private static String columnName(Cell<?> cell)
+    {
+        return cell.column().name.toCQLString();
+    }
+
+    /**
+     * A set of partition key or clustering column values.
+     */
+    public static final class ColumnValues implements Comparable<ColumnValues>
+    {
+        /**
+         * An empty set of column values.
+         */
+        private static final ColumnValues EMPTY = new ColumnValues(ImmutableList.of(), ArrayUtils.EMPTY_OBJECT_ARRAY);
+
+        /**
+         * The column metadata for the set of columns.
+         */
+        private final ImmutableList<ColumnMetadata> metadata;
+
+        /**
+         * The column values. The number of values can be smaller than the number of values if only
+         * a sub-set of the column values is specified (e.g. clustering prefix).
+         */
+        private final Object[] values;
+
+        /**
+         * Returns the set of column values corresponding to the specified partition key.
+         *
+         * @param metadata the table metadata
+         * @param partitionKey the partition key
+         * @return the set of columns values corresponding to the specified partition key
+         */
+        public static ColumnValues from(TableMetadata metadata, DecoratedKey partitionKey)
+        {
+            if (metadata.partitionKeyType instanceof CompositeType)
+            {
+                ByteBuffer[] buffers = ((CompositeType) metadata.partitionKeyType).split(partitionKey.getKey());
+                return ColumnValues.from(metadata.partitionKeyColumns(), buffers);
+            }
+
+            return ColumnValues.from(metadata.partitionKeyColumns(), partitionKey.getKey());
+        }
+
+        /**
+         * Returns the set of column values corresponding to the specified clustering prefix.
+         *
+         * @param metadata the table metadata
+         * @param prefix the clustering prefix
+         * @return the set of columns values corresponding to the specified clustering prefix
+         */
+        public static ColumnValues from(TableMetadata metadata, ClusteringPrefix<?> prefix)
+        {
+            if (prefix == Clustering.EMPTY)
+                return EMPTY;
+
+            return ColumnValues.from(metadata.clusteringColumns(), prefix.getBufferArray());
+        }
+
+        private static ColumnValues from(ImmutableList<ColumnMetadata> metadata, ByteBuffer... buffers)
+        {
+            return new ColumnValues(metadata, convert(metadata, buffers));
+        }
+
+        /**
+         * Create a {@code ColumnValues} for the specified set of columns.
+         *
+         * @param metadata the partition or clustering columns metadata
+         * @param values the partition or clustering column values
+         */
+        public ColumnValues(List<ColumnMetadata> metadata, Object... values)
+        {
+            this.metadata = ImmutableList.copyOf(metadata);
+            this.values = values;
+        }
+
+        /**
+         * Deserializes the column values.
+         *
+         * @param metadata the column metadata
+         * @param buffers the serialized column values
+         * @return the deserialized column values
+         */
+        private static Object[] convert(ImmutableList<ColumnMetadata> metadata, ByteBuffer[] buffers)
+        {
+            Object[] values = new Object[buffers.length];
+            for (int i = 0; i < buffers.length; i++)
+            {
+                values[i] = metadata.get(i).type.compose(buffers[i]);
+            }
+            return values;
+        }
+
+        /**
+         * Returns the name of the specified column
+         *
+         * @param i the column index
+         * @return the column name
+         */
+        public String name(int i)
+        {
+            Preconditions.checkPositionIndex(i, values.length);
+            return metadata.get(i).name.toCQLString();
+        }
+
+        /**
+         * Returns the value for the specified column
+         *
+         * @param i the column index
+         * @return the column value
+         */
+        @SuppressWarnings("unchecked")
+        public <V> V value(int i)
+        {
+            Preconditions.checkPositionIndex(i, values.length);
+            return (V) values[i];
+        }
+
+        /**
+         * Returns the number of column values.
+         *
+         * @return the number of column values.
+         */
+        public int size()
+        {
+            return values.length;
+        }
+
+        @Override
+        public String toString()
+        {
+            StringBuilder builder = new StringBuilder();
+            builder.append('[');
+            for (int i = 0, m = metadata.size(); i <m; i++)
+            {
+                if (i != 0)
+                    builder.append(", ");
+
+                builder.append(metadata.get(i).name.toCQLString())
+                       .append(" : ");
+
+                if (i < values.length)
+                       builder.append(i < values.length ? values[i].toString() : "unspecified");
+            }
+            return builder.append(']').toString();
+        }
+
+        @Override
+        public int compareTo(ColumnValues o)
+        {
+            assert metadata.equals(o.metadata);
+
+            int s1 = size();
+            int s2 = o.size();
+            int minSize = Math.min(s1, s2);
+
+            for (int i = 0; i < minSize; i++)
+            {
+                int cmp = compare(values[i], o.values[i]);
+                if (cmp != 0)
+                    return cmp;
+            }
+
+            return 0;
+        }
+
+        @SuppressWarnings("unchecked")
+        private <T extends Comparable<T>> int compare(Object c1, Object c2)
+        {
+            return ((T) c1).compareTo((T) c2);
+        }
+    }
+
+    /**
+     * A regular column value.
+     */
+    public static final class ColumnValue
+    {
+        /**
+         * The column metadata
+         */
+        private final ColumnMetadata metadata;
+
+        /**
+         * The column value
+         */
+        private final Object value;
+
+        /**
+         * Returns the column value corresponding to the specified cell.
+         *
+         * @param cell the column cell metadata
+         * @return the column value corresponding to the specified cell
+         */
+        public static ColumnValue from(Cell<?> cell)
+        {
+            ColumnMetadata metadata = cell.column();
+            return new ColumnValue(metadata, metadata.type.compose(cell.buffer()));
+        }
+
+        private ColumnValue(ColumnMetadata metadata, Object value)
+        {
+            this.metadata = metadata;
+            this.value = value;
+        }
+
+        /**
+         * Returns the column name.
+         *
+         * @return the column name
+         */
+        public String name()
+        {
+            return metadata.name.toCQLString();
+        }
+
+        /**
+         * Returns the column value.
+         *
+         * @return the column value
+         */
+        @SuppressWarnings("unchecked")
+        public <V> V value()
+        {
+            return (V) value;
+        }
+
+        @Override
+        public String toString()
+        {
+            return String.format("%s : %s", name(), value());
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java b/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
index c2de1db..cf90c42 100644
--- a/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
@@ -38,6 +38,8 @@
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.schema.TableMetadata;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * An abstract virtual table implementation that builds the resultset on demand.
  */
@@ -48,7 +50,7 @@
     protected AbstractVirtualTable(TableMetadata metadata)
     {
         if (!metadata.isVirtual())
-            throw new IllegalArgumentException();
+            throw new IllegalArgumentException("Cannot instantiate a non-virtual table");
 
         this.metadata = metadata;
     }
@@ -81,7 +83,7 @@
         if (null == partition)
             return EmptyIterators.unfilteredPartition(metadata);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         UnfilteredRowIterator rowIterator = partition.toRowIterator(metadata(), clusteringIndexFilter, columnFilter, now);
         return new SingletonUnfilteredPartitionIterator(rowIterator);
     }
@@ -96,7 +98,7 @@
 
         Iterator<Partition> iterator = data.getPartitions(dataRange);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
 
         return new AbstractUnfilteredPartitionIterator()
         {
@@ -127,6 +129,18 @@
         throw new InvalidRequestException("Modification is not supported by table " + metadata);
     }
 
+    @Override
+    public void truncate()
+    {
+        throw new InvalidRequestException("Truncation is not supported by table " + metadata);
+    }
+
+    @Override
+    public String toString()
+    {
+        return metadata().toString();
+    }
+
     public interface DataSet
     {
         boolean isEmpty();
diff --git a/src/java/org/apache/cassandra/db/virtual/BatchMetricsTable.java b/src/java/org/apache/cassandra/db/virtual/BatchMetricsTable.java
new file mode 100644
index 0000000..948f2a1
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/BatchMetricsTable.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import com.codahale.metrics.Snapshot;
+import org.apache.cassandra.cql3.statements.BatchStatement;
+import org.apache.cassandra.db.marshal.DoubleType;
+import org.apache.cassandra.db.marshal.LongType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.metrics.BatchMetrics;
+import org.apache.cassandra.schema.TableMetadata;
+
+public class BatchMetricsTable extends AbstractVirtualTable
+{
+
+    private static final String PARTITIONS_PER_LOGGED_BATCH = "partitions_per_logged_batch";
+    private static final String PARTITIONS_PER_UNLOGGED_BATCH = "partitions_per_unlogged_batch";
+    private static final String PARTITIONS_PER_COUNTER_BATCH = "partitions_per_counter_batch";
+    private final static String P50 = "p50th";
+    private final static String P99 = "p99th";
+    private final static String P999 = "p999th";
+    private final static String MAX = "max";
+
+    BatchMetricsTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "batch_metrics")
+                           .comment("Metrics specific to batch statements")
+                           .kind(TableMetadata.Kind.VIRTUAL)
+                           .partitioner(new LocalPartitioner(UTF8Type.instance))
+                           .addPartitionKeyColumn("name", UTF8Type.instance)
+                           .addRegularColumn(P50, DoubleType.instance)
+                           .addRegularColumn(P99, DoubleType.instance)
+                           .addRegularColumn(P999, DoubleType.instance)
+                           .addRegularColumn(MAX, LongType.instance)
+                           .build());
+    }
+
+    @Override
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+        BatchMetrics metrics = BatchStatement.metrics;
+        addRow(result, PARTITIONS_PER_LOGGED_BATCH, metrics.partitionsPerLoggedBatch.getSnapshot());
+        addRow(result, PARTITIONS_PER_UNLOGGED_BATCH, metrics.partitionsPerUnloggedBatch.getSnapshot());
+        addRow(result, PARTITIONS_PER_COUNTER_BATCH, metrics.partitionsPerCounterBatch.getSnapshot());
+
+        return result;
+    }
+
+    private void addRow(SimpleDataSet dataSet, String name, Snapshot snapshot)
+    {
+        dataSet.row(name)
+               .column(P50, snapshot.getMedian())
+               .column(P99, snapshot.get99thPercentile())
+               .column(P999, snapshot.get999thPercentile())
+               .column(MAX, snapshot.getMax());
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/CQLMetricsTable.java b/src/java/org/apache/cassandra/db/virtual/CQLMetricsTable.java
new file mode 100644
index 0000000..acd8947
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/CQLMetricsTable.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.db.marshal.DoubleType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.metrics.CQLMetrics;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.cql3.QueryProcessor;
+
+
+final class CQLMetricsTable extends AbstractVirtualTable
+{
+    public static final String TABLE_NAME = "cql_metrics";
+    public static final String PREPARED_STATEMENTS_COUNT = "prepared_statements_count";
+    public static final String PREPARED_STATEMENTS_EVICTED = "prepared_statements_evicted";
+    public static final String PREPARED_STATEMENTS_EXECUTED = "prepared_statements_executed";
+    public static final String PREPARED_STATEMENTS_RATIO = "prepared_statements_ratio";
+    public static final String REGULAR_STATEMENTS_EXECUTED = "regular_statements_executed";
+    public static final String NAME_COL = "name";
+    public static final String VALUE_COL = "value";
+
+    private final CQLMetrics cqlMetrics;
+
+    CQLMetricsTable(String keyspace)
+    {
+        this(keyspace, QueryProcessor.metrics);
+    }
+
+    // For dependency injection
+    @VisibleForTesting
+    CQLMetricsTable(String keyspace, CQLMetrics cqlMetrics)
+    {
+        super(TableMetadata.builder(keyspace, TABLE_NAME)
+                           .comment("Metrics specific to CQL prepared statement caching")
+                           .kind(TableMetadata.Kind.VIRTUAL)
+                           .partitioner(new LocalPartitioner(UTF8Type.instance))
+                           .addPartitionKeyColumn(NAME_COL, UTF8Type.instance)
+                           .addRegularColumn(VALUE_COL, DoubleType.instance)
+                           .build());
+        this.cqlMetrics = cqlMetrics;
+    }
+
+    @Override
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+        addRow(result, PREPARED_STATEMENTS_COUNT, cqlMetrics.preparedStatementsCount.getValue());
+        addRow(result, PREPARED_STATEMENTS_EVICTED, cqlMetrics.preparedStatementsEvicted.getCount());
+        addRow(result, PREPARED_STATEMENTS_EXECUTED, cqlMetrics.preparedStatementsExecuted.getCount());
+        addRow(result, PREPARED_STATEMENTS_RATIO, cqlMetrics.preparedStatementsRatio.getValue());
+        addRow(result, REGULAR_STATEMENTS_EXECUTED, cqlMetrics.regularStatementsExecuted.getCount());
+
+        return result;
+    }
+
+    private void addRow(SimpleDataSet dataSet, String name, double value)
+    {
+        dataSet.row(name)
+               .column(VALUE_COL, value);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/ClientsTable.java b/src/java/org/apache/cassandra/db/virtual/ClientsTable.java
index 40e175b..d39c269 100644
--- a/src/java/org/apache/cassandra/db/virtual/ClientsTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/ClientsTable.java
@@ -33,6 +33,7 @@
     private static final String USERNAME = "username";
     private static final String CONNECTION_STAGE = "connection_stage";
     private static final String PROTOCOL_VERSION = "protocol_version";
+    private static final String CLIENT_OPTIONS = "client_options";
     private static final String DRIVER_NAME = "driver_name";
     private static final String DRIVER_VERSION = "driver_version";
     private static final String REQUEST_COUNT = "request_count";
@@ -52,6 +53,7 @@
                            .addRegularColumn(USERNAME, UTF8Type.instance)
                            .addRegularColumn(CONNECTION_STAGE, UTF8Type.instance)
                            .addRegularColumn(PROTOCOL_VERSION, Int32Type.instance)
+                           .addRegularColumn(CLIENT_OPTIONS, MapType.getInstance(UTF8Type.instance, UTF8Type.instance, false))
                            .addRegularColumn(DRIVER_NAME, UTF8Type.instance)
                            .addRegularColumn(DRIVER_VERSION, UTF8Type.instance)
                            .addRegularColumn(REQUEST_COUNT, LongType.instance)
@@ -75,6 +77,7 @@
                   .column(USERNAME, client.username().orElse(null))
                   .column(CONNECTION_STAGE, client.stage().toString().toLowerCase())
                   .column(PROTOCOL_VERSION, client.protocolVersion())
+                  .column(CLIENT_OPTIONS, client.clientOptions().orElse(null))
                   .column(DRIVER_NAME, client.driverName().orElse(null))
                   .column(DRIVER_VERSION, client.driverVersion().orElse(null))
                   .column(REQUEST_COUNT, client.requestCount())
diff --git a/src/java/org/apache/cassandra/db/virtual/CredentialsCacheKeysTable.java b/src/java/org/apache/cassandra/db/virtual/CredentialsCacheKeysTable.java
new file mode 100644
index 0000000..f5bc62c
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/CredentialsCacheKeysTable.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import java.util.Optional;
+
+import org.apache.cassandra.auth.IAuthenticator;
+import org.apache.cassandra.auth.PasswordAuthenticator;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.schema.TableMetadata;
+
+final class CredentialsCacheKeysTable extends AbstractMutableVirtualTable
+{
+    private static final String ROLE = "role";
+
+    @SuppressWarnings("OptionalUsedAsFieldOrParameterType")
+    private final Optional<PasswordAuthenticator> passwordAuthenticatorOptional;
+
+    CredentialsCacheKeysTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "credentials_cache_keys")
+                .comment("keys in the credentials cache")
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .partitioner(new LocalPartitioner(UTF8Type.instance))
+                .addPartitionKeyColumn(ROLE, UTF8Type.instance)
+                .build());
+
+        IAuthenticator authenticator = DatabaseDescriptor.getAuthenticator();
+        if (authenticator instanceof PasswordAuthenticator)
+            this.passwordAuthenticatorOptional = Optional.of((PasswordAuthenticator) authenticator);
+        else
+            this.passwordAuthenticatorOptional = Optional.empty();
+    }
+
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+
+        passwordAuthenticatorOptional
+                .ifPresent(passwordAuthenticator -> passwordAuthenticator.getCredentialsCache().getAll()
+                        .forEach((roleName, ignored) -> result.row(roleName)));
+
+        return result;
+    }
+
+    @Override
+    protected void applyPartitionDeletion(ColumnValues partitionKey)
+    {
+        String roleName = partitionKey.value(0);
+
+        passwordAuthenticatorOptional
+                .ifPresent(passwordAuthenticator -> passwordAuthenticator.getCredentialsCache().invalidate(roleName));
+    }
+
+    @Override
+    public void truncate()
+    {
+        passwordAuthenticatorOptional
+                .ifPresent(passwordAuthenticator -> passwordAuthenticator.getCredentialsCache().invalidate());
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/GossipInfoTable.java b/src/java/org/apache/cassandra/db/virtual/GossipInfoTable.java
new file mode 100644
index 0000000..bd612c6
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/GossipInfoTable.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.db.marshal.InetAddressType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.TableMetadata;
+
+import static org.apache.cassandra.gms.ApplicationState.TOKENS;
+
+/**
+ * A {@link VirtualTable} that return the Gossip information in tabular format.
+ */
+final class GossipInfoTable extends AbstractVirtualTable
+{
+    static final String TABLE_NAME = "gossip_info";
+    static final String TABLE_COMMENT = "lists the gossip information for the cluster";
+
+    static final String ADDRESS = "address";
+    static final String PORT = "port";
+    static final String HOSTNAME = "hostname";
+    static final String GENERATION = "generation";
+    static final String HEARTBEAT = "heartbeat";
+
+    static final ApplicationState[] STATES_FOR_VERSIONS = ApplicationState.values();
+    static final ApplicationState[] STATES_FOR_VALUES;
+
+    static
+    {
+        EnumSet<ApplicationState> applicationStates = EnumSet.allOf(ApplicationState.class);
+        // do not add a column for the ApplicationState.TOKENS value
+        applicationStates.remove(TOKENS);
+        STATES_FOR_VALUES = applicationStates.toArray(new ApplicationState[0]);
+    }
+
+    private final Supplier<Map<InetAddressAndPort, EndpointState>> endpointStateMapSupplier;
+
+    /**
+     * Construct a new {@link GossipInfoTable} for the given {@code keyspace}.
+     *
+     * @param keyspace the name of the keyspace
+     */
+    GossipInfoTable(String keyspace)
+    {
+        this(keyspace, () -> Gossiper.instance.endpointStateMap);
+    }
+
+    @VisibleForTesting
+    GossipInfoTable(String keyspace, Supplier<Map<InetAddressAndPort, EndpointState>> endpointStateMapSupplier)
+    {
+        super(buildTableMetadata(keyspace));
+        this.endpointStateMapSupplier = endpointStateMapSupplier;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+        for (Map.Entry<InetAddressAndPort, EndpointState> entry : endpointStateMapSupplier.get().entrySet())
+        {
+            InetAddressAndPort endpoint = entry.getKey();
+            // we are making a copy of endpoint state as a value of an entry of the returned map
+            // might be updated on the fly by LoadBroadcaster, and we want to be sure that
+            // the returned data are capturing a particular point in time
+            EndpointState localState = new EndpointState(entry.getValue());
+
+            SimpleDataSet dataSet = result.row(endpoint.getAddress(), endpoint.getPort())
+                                          .column(HOSTNAME, endpoint.getHostName())
+                                          .column(GENERATION, getGeneration(localState))
+                                          .column(HEARTBEAT, getHeartBeat(localState));
+
+            for (ApplicationState state : STATES_FOR_VALUES)
+                dataSet.column(state.name().toLowerCase(), getValue(localState, state));
+
+            for (ApplicationState state : STATES_FOR_VERSIONS)
+                dataSet.column(state.name().toLowerCase() + "_version", getVersion(localState, state));
+        }
+        return result;
+    }
+
+    /**
+     * Return the heartbeat generation of a given {@link EndpointState} or null if {@code localState} is null.
+     *
+     * @param localState a nullable endpoint state
+     * @return the heartbeat generation if available, null otherwise
+     */
+    private Integer getGeneration(EndpointState localState)
+    {
+        return localState == null ? null : localState.getHeartBeatState().getGeneration();
+    }
+
+    /**
+     * Return the heartbeat version of a given {@link EndpointState} or null if {@code localState} is null.
+     *
+     * @param localState a nullable endpoint state
+     * @return the heartbeat version if available, null otherwise
+     */
+    private Integer getHeartBeat(EndpointState localState)
+    {
+        return localState == null ? null : localState.getHeartBeatState().getHeartBeatVersion();
+    }
+
+    /**
+     * Returns the value from the {@link VersionedValue} of a given {@link ApplicationState key}, or null
+     * if {@code localState} is null or the {@link VersionedValue} does not exist in the {@link ApplicationState}.
+     *
+     * @param localState a nullable endpoint state
+     * @param key        the key to the application state
+     * @return the value, or null if not available
+     */
+    private String getValue(EndpointState localState, ApplicationState key)
+    {
+        VersionedValue value;
+        return localState == null || (value = localState.getApplicationState(key)) == null ? null : value.value;
+    }
+
+    /**
+     * Returns the version from the {@link VersionedValue} of a given {@link ApplicationState key}, or null
+     * if {@code localState} is null or the {@link VersionedValue} does not exist in the {@link ApplicationState}.
+     *
+     * @param localState a nullable endpoint state
+     * @param key        the key to the application state
+     * @return the version, or null if not available
+     */
+    private Integer getVersion(EndpointState localState, ApplicationState key)
+    {
+        VersionedValue value;
+        return localState == null || (value = localState.getApplicationState(key)) == null ? null : value.version;
+    }
+
+    /**
+     * Builds the {@link TableMetadata} to be provided to the superclass
+     *
+     * @param keyspace the name of the keyspace
+     * @return the TableMetadata class
+     */
+    private static TableMetadata buildTableMetadata(String keyspace)
+    {
+        TableMetadata.Builder builder = TableMetadata.builder(keyspace, TABLE_NAME)
+                                                     .comment(TABLE_COMMENT)
+                                                     .kind(TableMetadata.Kind.VIRTUAL)
+                                                     .partitioner(new LocalPartitioner(InetAddressType.instance))
+                                                     .addPartitionKeyColumn(ADDRESS, InetAddressType.instance)
+                                                     .addClusteringColumn(PORT, Int32Type.instance)
+                                                     .addRegularColumn(HOSTNAME, UTF8Type.instance)
+                                                     .addRegularColumn(GENERATION, Int32Type.instance)
+                                                     .addRegularColumn(HEARTBEAT, Int32Type.instance);
+
+        for (ApplicationState state : STATES_FOR_VALUES)
+            builder.addRegularColumn(state.name().toLowerCase(), UTF8Type.instance);
+
+        for (ApplicationState state : STATES_FOR_VERSIONS)
+            builder.addRegularColumn(state.name().toLowerCase() + "_version", Int32Type.instance);
+
+        return builder.build();
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/InternodeInboundTable.java b/src/java/org/apache/cassandra/db/virtual/InternodeInboundTable.java
index b0afe8f..0da5870 100644
--- a/src/java/org/apache/cassandra/db/virtual/InternodeInboundTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/InternodeInboundTable.java
@@ -114,7 +114,7 @@
     {
         String dc = DatabaseDescriptor.getEndpointSnitch().getDatacenter(addressAndPort);
         String rack = DatabaseDescriptor.getEndpointSnitch().getRack(addressAndPort);
-        dataSet.row(addressAndPort.address, addressAndPort.port, dc, rack)
+        dataSet.row(addressAndPort.getAddress(), addressAndPort.getPort(), dc, rack)
                .column(USING_BYTES, handlers.usingCapacity())
                .column(USING_RESERVE_BYTES, handlers.usingEndpointReserveCapacity())
                .column(CORRUPT_FRAMES_RECOVERED, handlers.corruptFramesRecovered())
diff --git a/src/java/org/apache/cassandra/db/virtual/InternodeOutboundTable.java b/src/java/org/apache/cassandra/db/virtual/InternodeOutboundTable.java
index 87b3823..687f845 100644
--- a/src/java/org/apache/cassandra/db/virtual/InternodeOutboundTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/InternodeOutboundTable.java
@@ -115,7 +115,7 @@
         String dc = DatabaseDescriptor.getEndpointSnitch().getDatacenter(addressAndPort);
         String rack = DatabaseDescriptor.getEndpointSnitch().getRack(addressAndPort);
         long pendingBytes = sum(connections, OutboundConnection::pendingBytes);
-        dataSet.row(addressAndPort.address, addressAndPort.port, dc, rack)
+        dataSet.row(addressAndPort.getAddress(), addressAndPort.getPort(), dc, rack)
                .column(USING_BYTES, pendingBytes)
                .column(USING_RESERVE_BYTES, connections.usingReserveBytes())
                .column(PENDING_COUNT, sum(connections, OutboundConnection::pendingCount))
diff --git a/src/java/org/apache/cassandra/db/virtual/JmxPermissionsCacheKeysTable.java b/src/java/org/apache/cassandra/db/virtual/JmxPermissionsCacheKeysTable.java
new file mode 100644
index 0000000..b048b92
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/JmxPermissionsCacheKeysTable.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.schema.TableMetadata;
+
+final class JmxPermissionsCacheKeysTable extends AbstractMutableVirtualTable
+{
+    private static final String ROLE = "role";
+
+    JmxPermissionsCacheKeysTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "jmx_permissions_cache_keys")
+                .comment("keys in the JMX permissions cache")
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .partitioner(new LocalPartitioner(UTF8Type.instance))
+                .addPartitionKeyColumn(ROLE, UTF8Type.instance)
+                .build());
+    }
+
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+
+        AuthorizationProxy.jmxPermissionsCache.getAll()
+                .forEach((roleResource, ignored) -> result.row(roleResource.getRoleName()));
+
+        return result;
+    }
+
+    @Override
+    protected void applyPartitionDeletion(ColumnValues partitionKey)
+    {
+        RoleResource roleResource = RoleResource.role(partitionKey.value(0));
+
+        AuthorizationProxy.jmxPermissionsCache.invalidate(roleResource);
+    }
+
+    @Override
+    public void truncate()
+    {
+        AuthorizationProxy.jmxPermissionsCache.invalidate();
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/LocalRepairTables.java b/src/java/org/apache/cassandra/db/virtual/LocalRepairTables.java
new file mode 100644
index 0000000..1f440ab
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/LocalRepairTables.java
@@ -0,0 +1,496 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.db.marshal.UUIDType;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.CommonRange;
+import org.apache.cassandra.repair.RepairJobDesc;
+import org.apache.cassandra.repair.state.Completable;
+import org.apache.cassandra.repair.state.CoordinatorState;
+import org.apache.cassandra.repair.state.JobState;
+import org.apache.cassandra.repair.state.ParticipateState;
+import org.apache.cassandra.repair.state.SessionState;
+import org.apache.cassandra.repair.state.State;
+import org.apache.cassandra.repair.state.ValidationState;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
+
+public class LocalRepairTables
+{
+    private LocalRepairTables()
+    {
+    }
+
+    public static Collection<VirtualTable> getAll(String keyspace)
+    {
+        return Arrays.asList(
+        new RepairTable(keyspace),
+        new SessionTable(keyspace),
+        new JobTable(keyspace),
+        new ParticipateTable(keyspace),
+        new ValidationTable(keyspace)
+        );
+    }
+
+    private static final String JOB_DESC_COLUMNS = "  repair_id timeuuid,\n" +
+                                                   "  session_id timeuuid,\n" +
+                                                   "  keyspace_name text,\n" +
+                                                   "  table_name text,\n" +
+                                                   "  ranges frozen<list<text>>,\n";
+
+    static final class RepairTable extends AbstractVirtualTable
+    {
+        protected RepairTable(String keyspace)
+        {
+            super(parse(keyspace, "Repair summary",
+                        "CREATE TABLE repairs (\n" +
+                        stdColumnsWithStatus(true) +
+                        "  command_id int,\n" +
+                        "  keyspace_name text,\n" +
+                        // human readable definition of what the repair is doing
+                        "  type text,\n" +
+                        // list of all sessions; this is lazy so only once the session is created will it be present, so this dynamically changes within the life of a repair
+                        "  sessions frozen<set<timeuuid>>,\n" +
+
+                        // options_ maps to RepairOption
+                        "  options_parallelism text,\n" +
+                        "  options_primary_range boolean,\n" +
+                        "  options_incremental boolean,\n" +
+                        "  options_trace boolean,\n" +
+                        "  options_job_threads int,\n" +
+                        "  options_subrange_repair boolean,\n" +
+                        "  options_pull_repair boolean,\n" +
+                        "  options_force_repair boolean,\n" +
+                        "  options_preview_kind text,\n" +
+                        "  options_optimise_streams boolean,\n" +
+                        "  options_ignore_unreplicated_keyspaces boolean,\n" +
+                        "  options_column_families frozen<set<text>>,\n" +
+                        "  options_data_centers frozen<set<text>>,\n" +
+                        "  options_hosts frozen<set<text>>,\n" +
+                        "  options_ranges frozen<set<text>>,\n" +
+
+                        "  table_names frozen<list<text>>,\n" +
+                        "  ranges frozen<list<list<text>>>,\n" +
+                        "  unfiltered_ranges frozen<list<list<text>>>,\n" +
+                        "  participants frozen<list<text>>,\n" +
+                        stateColumns(CoordinatorState.State.class) +
+                        "\n" +
+                        "  PRIMARY KEY ( (id) )\n" +
+                        ")"));
+        }
+
+        public DataSet data()
+        {
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ActiveRepairService.instance.coordinators().forEach(s -> updateDataset(result, s));
+            return result;
+        }
+
+        public DataSet data(DecoratedKey partitionKey)
+        {
+            TimeUUID id = TimeUUIDType.instance.compose(partitionKey.getKey());
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            CoordinatorState state = ActiveRepairService.instance.coordinator(id);
+            if (state != null)
+                updateDataset(result, state);
+            return result;
+        }
+
+        private void updateDataset(SimpleDataSet result, CoordinatorState state)
+        {
+            result.row(state.id);
+            addState(result, state);
+            result.column("type", getType(state));
+            result.column("keyspace_name", state.keyspace);
+            result.column("command_id", state.cmd);
+
+            result.column("options_parallelism", state.options.getParallelism().name());
+            result.column("options_primary_range", state.options.isPrimaryRange());
+            result.column("options_trace", state.options.isTraced());
+            result.column("options_job_threads", state.options.getJobThreads());
+            result.column("options_subrange_repair", state.options.isSubrangeRepair());
+            result.column("options_pull_repair", state.options.isPullRepair());
+            result.column("options_force_repair", state.options.isForcedRepair());
+            result.column("options_preview_kind", state.options.getPreviewKind().name());
+            result.column("options_optimise_streams", state.options.optimiseStreams());
+            result.column("options_ignore_unreplicated_keyspaces", state.options.ignoreUnreplicatedKeyspaces());
+            result.column("options_column_families", state.options.getColumnFamilies());
+            result.column("options_data_centers", state.options.getDataCenters());
+            result.column("options_hosts", state.options.getHosts());
+            result.column("options_ranges", toStringSet(state.options.getRanges()));
+
+            result.column("sessions", state.getSessionIds());
+
+            String[] columnFamilyNames = state.getColumnFamilyNames();
+            result.column("table_names", columnFamilyNames == null ? null : Arrays.asList(columnFamilyNames));
+
+            Set<InetAddressAndPort> participants = state.getParticipants();
+            result.column("participants", participants == null ? null : toStringList(participants));
+
+            List<CommonRange> ranges = state.getFilteredCommonRanges();
+            result.column("ranges", ranges == null ? null : ranges.stream().map(c -> c.ranges).map(LocalRepairTables::toStringList).collect(Collectors.toList()));
+
+            ranges = state.getCommonRanges();
+            result.column("unfiltered_ranges", ranges == null ? null : ranges.stream().map(c -> c.ranges).map(LocalRepairTables::toStringList).collect(Collectors.toList()));
+        }
+
+        private String getType(CoordinatorState state)
+        {
+            if (state.options.isPreview())
+            {
+                switch (state.options.getPreviewKind())
+                {
+                    case ALL: return "preview full";
+                    case REPAIRED: return "preview repaired";
+                    case UNREPAIRED: return "preview unrepaired";
+                    case NONE: throw new AssertionError("NONE preview kind not expected when preview repair is set");
+                    default: throw new AssertionError("Unknown preview kind: " + state.options.getPreviewKind());
+                }
+            }
+            else if (state.options.isIncremental())
+            {
+                return "incremental";
+            }
+            return "full";
+        }
+    }
+
+    private static final class SessionTable extends AbstractVirtualTable
+    {
+        SessionTable(String keyspace)
+        {
+            super(parse(keyspace, "Repair session",
+                        "CREATE TABLE repair_sessions (\n" +
+                        stdColumnsWithStatus(true) +
+                        "  repair_id timeuuid,\n" +
+                        "  keyspace_name text,\n" +
+                        "  table_names frozen<list<text>>,\n" +
+                        "  ranges frozen<list<text>>,\n" +
+                        "  participants frozen<list<text>>,\n" +
+                        "  jobs frozen<set<uuid>>,\n" +
+                        "\n" +
+                        stateColumns(SessionState.State.class) +
+                        "\n" +
+                        "  PRIMARY KEY ( (id) )\n" +
+                        ")"));
+        }
+
+        @Override
+        public DataSet data()
+        {
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ActiveRepairService.instance.coordinators().stream()
+                                                .flatMap(s -> s.getSessions().stream())
+                                                .forEach(s -> updateDataset(result, s));
+            return result;
+        }
+
+        private void updateDataset(SimpleDataSet result, SessionState state)
+        {
+            result.row(state.id);
+            addState(result, state);
+            result.column("repair_id", state.parentRepairSession);
+            result.column("keyspace_name", state.keyspace);
+            result.column("table_names", Arrays.asList(state.cfnames));
+            result.column("ranges", toStringList(state.commonRange.ranges));
+            result.column("jobs", state.getJobIds());
+            result.column("participants", toStringList(state.getParticipants()));
+        }
+    }
+
+    private static final class JobTable extends AbstractVirtualTable
+    {
+        JobTable(String keyspace)
+        {
+            super(parse(keyspace, "Repair job",
+                        "CREATE TABLE repair_jobs (\n" +
+                        stdColumnsWithStatus(false) +
+                        "  participants frozen<list<text>>,\n" +
+                        JOB_DESC_COLUMNS +
+                        "\n" +
+                        stateColumns(JobState.State.class) +
+                        "\n" +
+                        "  PRIMARY KEY ( (id) )\n" +
+                        ")"));
+        }
+
+        @Override
+        public DataSet data()
+        {
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ActiveRepairService.instance.coordinators().stream()
+                                                .flatMap(s -> s.getSessions().stream())
+                                                .flatMap(s -> s.getJobs().stream())
+                                                .forEach(s -> updateDataset(result, s));
+            return result;
+        }
+
+        private void updateDataset(SimpleDataSet result, JobState state)
+        {
+            result.row(state.id);
+            addState(result, state);
+            addState(result, state.desc);
+            result.column("participants", toStringList(state.getParticipants()));
+        }
+    }
+
+    static final class ParticipateTable extends AbstractVirtualTable
+    {
+        protected ParticipateTable(String keyspace)
+        {
+            super(parse(keyspace, "Repair participate summary",
+                        "CREATE TABLE repair_participates (" +
+                        stdColumns(true) +
+                        "  initiator  text,\n" +
+                        "  tables frozen<set<text>>, \n" +
+                        "  ranges frozen<list<text>>,\n" +
+                        "  incremental boolean,\n" +
+                        "  global boolean,\n" +
+                        "  preview_kind text,\n" +
+                        "  repaired_at timestamp,\n" +
+                        "  validations frozen<set<uuid>>,\n" +
+                        "\n" +
+                        "  PRIMARY KEY ( (id) )\n" +
+                        ")"));
+        }
+
+        @Override
+        public DataSet data()
+        {
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ActiveRepairService.instance.participates().stream()
+                                        .forEach(s -> updateDataset(result, s));
+            return result;
+        }
+
+        @Override
+        public DataSet data(DecoratedKey partitionKey)
+        {
+            TimeUUID id = TimeUUIDType.instance.compose(partitionKey.getKey());
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ParticipateState state = ActiveRepairService.instance.participate(id);
+            if (state != null)
+                updateDataset(result, state);
+            return result;
+        }
+
+        private void updateDataset(SimpleDataSet result, ParticipateState state)
+        {
+            result.row(state.id);
+            addCompletableState(result, state);
+            result.column("initiator", state.initiator.toString());
+            result.column("tables", state.tableIds.stream()
+                                                  .map(Schema.instance::getTableMetadata)
+                                                  .filter(a -> a != null) // getTableMetadata returns null if id isn't know, most likely dropped
+                                                  .map(Object::toString)
+                                                  .collect(Collectors.toSet()));
+            result.column("incremental", state.incremental);
+            result.column("global", state.global);
+            result.column("preview_kind", state.previewKind.name());
+            if (state.repairedAt != 0)
+                result.column("repaired_at", new Date(state.repairedAt));
+            result.column("validations", state.validationIds());
+            result.column("ranges", toStringList(state.ranges));
+        }
+    }
+
+    private static final class ValidationTable extends AbstractVirtualTable
+    {
+        ValidationTable(String keyspace)
+        {
+            super(parse(keyspace, "Repair validation",
+                        "CREATE TABLE repair_validations (\n" +
+                        stdColumnsWithStatus(false) +
+                        JOB_DESC_COLUMNS +
+                        "  initiator  text,\n" +
+                        "  estimated_partitions  bigint,\n" +
+                        "  estimated_total_bytes  bigint,\n" +
+                        "  partitions_processed  bigint,\n" +
+                        "  bytes_read  bigint,\n" +
+                        "  progress_percentage float,\n" +
+                        "\n" +
+                        stateColumns(ValidationState.State.class) +
+                        "\n" +
+                        "  PRIMARY KEY ( (id) )\n" +
+                        ")"));
+        }
+
+        @Override
+        public DataSet data()
+        {
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ActiveRepairService.instance.validations().stream()
+                                                     .forEach(s -> updateDataset(result, s));
+            return result;
+        }
+
+        @Override
+        public DataSet data(DecoratedKey partitionKey)
+        {
+            UUID id = UUIDType.instance.compose(partitionKey.getKey());
+            SimpleDataSet result = new SimpleDataSet(metadata());
+            ValidationState state = ActiveRepairService.instance.validation(id);
+            if (state != null)
+                updateDataset(result, state);
+            return result;
+        }
+
+        private void updateDataset(SimpleDataSet result, ValidationState state)
+        {
+            result.row(state.id);
+            addState(result, state);
+            addState(result, state.desc);
+            result.column("initiator", state.initiator.toString());
+            result.column("estimated_partitions", state.estimatedPartitions == 0 ? null : state.estimatedPartitions);
+            result.column("estimated_total_bytes", state.estimatedTotalBytes == 0 ? null : state.estimatedTotalBytes);
+            result.column("partitions_processed", state.partitionsProcessed == 0 ? null : state.partitionsProcessed);
+            result.column("progress_percentage", round(state.getProgress() * 100));
+            result.column("bytes_read", state.bytesRead);
+        }
+    }
+
+    private static String timestampColumnName(Enum<?> e)
+    {
+        return timestampColumnName(e.name().toLowerCase());
+    }
+
+    private static String timestampColumnName(String e)
+    {
+        return "state_" + e + "_timestamp";
+    }
+
+    private static String stateColumns(Class<? extends Enum<?>> klass)
+    {
+        StringBuilder sb = new StringBuilder();
+        for (Enum<?> e : klass.getEnumConstants())
+            sb.append("  ").append(timestampColumnName(e)).append(" timestamp, \n");
+        return sb.toString();
+    }
+
+    private static String stdStateColumns()
+    {
+        StringBuilder sb = new StringBuilder();
+        sb.append("  ").append(timestampColumnName("init")).append(" timestamp, \n");
+        for (State.Result.Kind kind : State.Result.Kind.values())
+            sb.append("  ").append(timestampColumnName(kind)).append(" timestamp, \n");
+        return sb.toString();
+    }
+
+    private static void addCompletableState(SimpleDataSet ds, Completable<?> state)
+    {
+        // read timestamp early to see latest data
+        ds.column("last_updated_at", new Date(state.getLastUpdatedAtMillis()));
+        ds.column("duration_millis", state.getDurationMillis());
+        State.Result result = state.getResult();
+        ds.column("failure_cause", state.getFailureCause());
+        ds.column("success_message", state.getSuccessMessage());
+        ds.column(timestampColumnName("init"), new Date(state.getInitializedAtMillis()));
+        ds.column("completed", result != null);
+
+        if (result != null)
+            ds.column(timestampColumnName(result.kind), new Date(state.getLastUpdatedAtMillis()));
+    }
+
+    private static <T extends Enum<T>> void addState(SimpleDataSet ds, State<T, ?> state)
+    {
+        addCompletableState(ds, state);
+
+        T currentState = state.getStatus();
+        State.Result result = state.getResult();
+        ds.column("status", result != null ? result.kind.name().toLowerCase() : currentState == null ? "init" : currentState.name().toLowerCase());
+        for (Map.Entry<T, Long> e : state.getStateTimesMillis().entrySet())
+        {
+            if (e.getValue().longValue() != 0)
+                ds.column(timestampColumnName(e.getKey()), new Date(e.getValue()));
+        }
+    }
+
+    @VisibleForTesting
+    static float round(float value)
+    {
+        return Math.round(value * 100.0F) / 100.0F;
+    }
+
+    private static void addState(SimpleDataSet result, RepairJobDesc desc)
+    {
+        result.column("repair_id", desc.parentSessionId);
+        result.column("session_id", desc.sessionId);
+        result.column("keyspace_name", desc.keyspace);
+        result.column("table_name", desc.columnFamily);
+        result.column("ranges", toStringList(desc.ranges));
+    }
+
+    private static <T> List<String> toStringList(Collection<T> list)
+    {
+        if (list == null)
+            return null;
+        return list.stream().map(Object::toString).collect(Collectors.toList());
+    }
+
+    private static <T> Set<String> toStringSet(Collection<T> list)
+    {
+        if (list == null)
+            return null;
+        return list.stream().map(Object::toString).collect(Collectors.toSet());
+    }
+
+    private static TableMetadata parse(String keyspace, String comment, String query)
+    {
+        return CreateTableStatement.parse(query, keyspace)
+                                   .comment(comment)
+                                   .kind(TableMetadata.Kind.VIRTUAL)
+                                   .partitioner(new LocalPartitioner(UUIDType.instance))
+                                   .build();
+    }
+
+    private static String stdColumns(boolean time)
+    {
+        String str = "  id timeuuid,\n" +
+                     "  last_updated_at timestamp,\n" +
+                     "  completed boolean,\n" +
+                     "  duration_millis bigint,\n" +
+                     "  failure_cause text,\n" +
+                     "  success_message text,\n";
+        if (!time)
+            str = str.replace("id timeuuid", "id uuid");
+        return str + stdStateColumns();
+    }
+
+    private static String stdColumnsWithStatus(boolean time)
+    {
+        return stdColumns(time) + "  status text,\n";
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/NetworkPermissionsCacheKeysTable.java b/src/java/org/apache/cassandra/db/virtual/NetworkPermissionsCacheKeysTable.java
new file mode 100644
index 0000000..0c0c625
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/NetworkPermissionsCacheKeysTable.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.schema.TableMetadata;
+
+final class NetworkPermissionsCacheKeysTable extends AbstractMutableVirtualTable
+{
+    private static final String ROLE = "role";
+
+    NetworkPermissionsCacheKeysTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "network_permissions_cache_keys")
+                .comment("keys in the network permissions cache")
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .partitioner(new LocalPartitioner(UTF8Type.instance))
+                .addPartitionKeyColumn(ROLE, UTF8Type.instance)
+                .build());
+    }
+
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+
+        AuthenticatedUser.networkPermissionsCache.getAll()
+                .forEach((roleResource, ignored) -> result.row(roleResource.getRoleName()));
+
+        return result;
+    }
+
+    @Override
+    protected void applyPartitionDeletion(ColumnValues partitionKey)
+    {
+        RoleResource roleResource = RoleResource.role(partitionKey.value(0));
+
+        AuthenticatedUser.networkPermissionsCache.invalidate(roleResource);
+    }
+
+    @Override
+    public void truncate()
+    {
+        AuthenticatedUser.networkPermissionsCache.invalidate();
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/PendingHintsTable.java b/src/java/org/apache/cassandra/db/virtual/PendingHintsTable.java
new file mode 100644
index 0000000..55d648c
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/PendingHintsTable.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.net.InetAddress;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.marshal.InetAddressType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.TimestampType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.db.marshal.UUIDType;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.gms.FailureDetectorMBean;
+import org.apache.cassandra.hints.HintsService;
+import org.apache.cassandra.hints.PendingHintsInfo;
+import org.apache.cassandra.locator.IEndpointSnitch;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+
+final class PendingHintsTable extends AbstractVirtualTable
+{
+    private static final String HOST_ID = "host_id";
+    private static final String ADDRESS = "address";
+    private static final String PORT = "port";
+    private static final String RACK = "rack";
+    private static final String DC = "dc";
+    private static final String STATUS = "status";
+    private static final String FILES = "files";
+    private static final String NEWEST = "newest";
+    private static final String OLDEST = "oldest";
+
+    PendingHintsTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "pending_hints")
+                           .comment("Pending hints that this node has for other nodes")
+                           .kind(TableMetadata.Kind.VIRTUAL)
+                           .partitioner(new LocalPartitioner(UUIDType.instance))
+                           .addPartitionKeyColumn(HOST_ID, UUIDType.instance)
+                           .addRegularColumn(ADDRESS, InetAddressType.instance)
+                           .addRegularColumn(PORT, Int32Type.instance)
+                           .addRegularColumn(RACK, UTF8Type.instance)
+                           .addRegularColumn(DC, UTF8Type.instance)
+                           .addRegularColumn(STATUS, UTF8Type.instance)
+                           .addRegularColumn(FILES, Int32Type.instance)
+                           .addRegularColumn(NEWEST, TimestampType.instance)
+                           .addRegularColumn(OLDEST, TimestampType.instance)
+                           .build());
+    }
+
+    @Override
+    public DataSet data()
+    {
+        List<PendingHintsInfo> pendingHints = HintsService.instance.getPendingHintsInfo();
+        IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
+
+        SimpleDataSet result = new SimpleDataSet(metadata());
+
+        Map<String, String> simpleStates;
+        if (FailureDetector.instance instanceof FailureDetectorMBean)
+            simpleStates = ((FailureDetectorMBean) FailureDetector.instance).getSimpleStatesWithPort();
+        else
+            simpleStates = Collections.emptyMap();
+
+        for (PendingHintsInfo info : pendingHints)
+        {
+            InetAddressAndPort addressAndPort = StorageService.instance.getEndpointForHostId(info.hostId);
+            InetAddress address = null;
+            Integer port = null;
+            String rack = "Unknown";
+            String dc = "Unknown";
+            String status = "Unknown";
+            if (addressAndPort != null)
+            {
+                address = addressAndPort.getAddress();
+                port = addressAndPort.getPort();
+                rack = snitch.getRack(addressAndPort);
+                dc = snitch.getDatacenter(addressAndPort);
+                status = simpleStates.getOrDefault(addressAndPort.toString(), status);
+            }
+            result.row(info.hostId)
+                  .column(ADDRESS, address)
+                  .column(PORT, port)
+                  .column(RACK, rack)
+                  .column(DC, dc)
+                  .column(STATUS, status)
+                  .column(FILES, info.totalFiles)
+                  .column(NEWEST, new Date(info.newestTimestamp))
+                  .column(OLDEST, new Date(info.oldestTimestamp));
+        }
+        return result;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/PermissionsCacheKeysTable.java b/src/java/org/apache/cassandra/db/virtual/PermissionsCacheKeysTable.java
new file mode 100644
index 0000000..e2b4fd1
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/PermissionsCacheKeysTable.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.IResource;
+import org.apache.cassandra.auth.Resources;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.Pair;
+
+final class PermissionsCacheKeysTable extends AbstractMutableVirtualTable
+{
+    private static final String ROLE = "role";
+    private static final String RESOURCE = "resource";
+
+    PermissionsCacheKeysTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "permissions_cache_keys")
+                .comment("keys in the permissions cache")
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .partitioner(new LocalPartitioner(UTF8Type.instance))
+                .addPartitionKeyColumn(ROLE, UTF8Type.instance)
+                .addPartitionKeyColumn(RESOURCE, UTF8Type.instance)
+                .build());
+    }
+
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+
+        AuthenticatedUser.permissionsCache.getAll()
+                .forEach((userResoursePair, ignored) ->
+                        result.row(userResoursePair.left.getName(), userResoursePair.right.getName()));
+
+        return result;
+    }
+
+    @Override
+    protected void applyPartitionDeletion(ColumnValues partitionKey)
+    {
+        AuthenticatedUser user = new AuthenticatedUser(partitionKey.value(0));
+        IResource resource = resourceFromNameIfExists(partitionKey.value(1));
+        // no need to delete invalid resource
+        if (resource == null)
+            return;
+
+        AuthenticatedUser.permissionsCache.invalidate(Pair.create(user, resource));
+    }
+
+    @Override
+    public void truncate()
+    {
+        AuthenticatedUser.permissionsCache.invalidate();
+    }
+
+    private IResource resourceFromNameIfExists(String name)
+    {
+        try
+        {
+            return Resources.fromName(name);
+        }
+        catch (IllegalArgumentException e)
+        {
+            return null;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/RolesCacheKeysTable.java b/src/java/org/apache/cassandra/db/virtual/RolesCacheKeysTable.java
new file mode 100644
index 0000000..58e0949
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/RolesCacheKeysTable.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.auth.Roles;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.schema.TableMetadata;
+
+final class RolesCacheKeysTable extends AbstractMutableVirtualTable
+{
+    private static final String ROLE = "role";
+
+    RolesCacheKeysTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, "roles_cache_keys")
+                .comment("keys in the roles cache")
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .partitioner(new LocalPartitioner(UTF8Type.instance))
+                .addPartitionKeyColumn(ROLE, UTF8Type.instance)
+                .build());
+    }
+
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+
+        Roles.cache.getAll()
+                .forEach((roleResource, ignored) -> result.row(roleResource.getRoleName()));
+
+        return result;
+    }
+
+    @Override
+    protected void applyPartitionDeletion(ColumnValues partitionKey)
+    {
+        RoleResource roleResource = RoleResource.role(partitionKey.value(0));
+
+        Roles.cache.invalidate(roleResource);
+    }
+
+    @Override
+    public void truncate()
+    {
+        Roles.cache.invalidate();
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/SSTableTasksTable.java b/src/java/org/apache/cassandra/db/virtual/SSTableTasksTable.java
index 20033df..488b580 100644
--- a/src/java/org/apache/cassandra/db/virtual/SSTableTasksTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/SSTableTasksTable.java
@@ -20,9 +20,10 @@
 import org.apache.cassandra.db.compaction.CompactionInfo;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.marshal.DoubleType;
+import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.LongType;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.db.marshal.UTF8Type;
-import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.dht.LocalPartitioner;
 import org.apache.cassandra.schema.TableMetadata;
 
@@ -34,6 +35,7 @@
     private final static String COMPLETION_RATIO = "completion_ratio";
     private final static String KIND = "kind";
     private final static String PROGRESS = "progress";
+    private final static String SSTABLES = "sstables";
     private final static String TOTAL = "total";
     private final static String UNIT = "unit";
 
@@ -45,10 +47,11 @@
                            .partitioner(new LocalPartitioner(UTF8Type.instance))
                            .addPartitionKeyColumn(KEYSPACE_NAME, UTF8Type.instance)
                            .addClusteringColumn(TABLE_NAME, UTF8Type.instance)
-                           .addClusteringColumn(TASK_ID, UUIDType.instance)
+                           .addClusteringColumn(TASK_ID, TimeUUIDType.instance)
                            .addRegularColumn(COMPLETION_RATIO, DoubleType.instance)
                            .addRegularColumn(KIND, UTF8Type.instance)
                            .addRegularColumn(PROGRESS, LongType.instance)
+                           .addRegularColumn(SSTABLES, Int32Type.instance)
                            .addRegularColumn(TOTAL, LongType.instance)
                            .addRegularColumn(UNIT, UTF8Type.instance)
                            .build());
@@ -71,6 +74,7 @@
                   .column(COMPLETION_RATIO, completionRatio)
                   .column(KIND, task.getTaskType().toString().toLowerCase())
                   .column(PROGRESS, completed)
+                  .column(SSTABLES, task.getSSTables().size())
                   .column(TOTAL, total)
                   .column(UNIT, task.getUnit().toString().toLowerCase());
         }
diff --git a/src/java/org/apache/cassandra/db/virtual/SettingsTable.java b/src/java/org/apache/cassandra/db/virtual/SettingsTable.java
index b0ae018..7a4c39e 100644
--- a/src/java/org/apache/cassandra/db/virtual/SettingsTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/SettingsTable.java
@@ -17,46 +17,31 @@
  */
 package org.apache.cassandra.db.virtual;
 
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.util.Arrays;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
-import java.util.function.BiConsumer;
-import java.util.stream.Collectors;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Functions;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 
-import org.apache.cassandra.audit.AuditLogOptions;
-import org.apache.cassandra.config.*;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Loader;
+import org.apache.cassandra.config.Properties;
+import org.apache.cassandra.config.Replacement;
+import org.apache.cassandra.config.Replacements;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.LocalPartitioner;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.transport.ServerError;
+import org.apache.cassandra.service.ClientWarn;
+import org.yaml.snakeyaml.introspector.Property;
 
 final class SettingsTable extends AbstractVirtualTable
 {
     private static final String NAME = "name";
     private static final String VALUE = "value";
 
-    @VisibleForTesting
-    static final Map<String, Field> FIELDS =
-        Arrays.stream(Config.class.getFields())
-              .filter(f -> !Modifier.isStatic(f.getModifiers()))
-              .collect(Collectors.toMap(Field::getName, Functions.identity()));
-
-    @VisibleForTesting
-    final Map<String, BiConsumer<SimpleDataSet, Field>> overrides =
-        ImmutableMap.<String, BiConsumer<SimpleDataSet, Field>>builder()
-                    .put("audit_logging_options", this::addAuditLoggingOptions)
-                    .put("client_encryption_options", this::addEncryptionOptions)
-                    .put("server_encryption_options", this::addEncryptionOptions)
-                    .put("transparent_data_encryption_options", this::addTransparentEncryptionOptions)
-                    .build();
+    private static final Map<String, String> BACKWARDS_COMPATABLE_NAMES = ImmutableMap.copyOf(getBackwardsCompatableNames());
+    protected static final Map<String, Property> PROPERTIES = ImmutableMap.copyOf(getProperties());
 
     private final Config config;
 
@@ -77,57 +62,15 @@
         this.config = config;
     }
 
-    @VisibleForTesting
-    Object getValue(Field f)
-    {
-        Object value;
-        try
-        {
-            value = f.get(config);
-        }
-        catch (IllegalAccessException | IllegalArgumentException e)
-        {
-            throw new ServerError(e);
-        }
-        return value;
-    }
-
-    private void addValue(SimpleDataSet result, Field f)
-    {
-        Object value = getValue(f);
-        if (value == null)
-        {
-            result.row(f.getName());
-        }
-        else if (overrides.containsKey(f.getName()))
-        {
-            overrides.get(f.getName()).accept(result, f);
-        }
-        else
-        {
-            if (value.getClass().isArray())
-                value = Arrays.toString((Object[]) value);
-            result.row(f.getName()).column(VALUE, value.toString());
-        }
-    }
-
     @Override
     public DataSet data(DecoratedKey partitionKey)
     {
         SimpleDataSet result = new SimpleDataSet(metadata());
         String name = UTF8Type.instance.compose(partitionKey.getKey());
-        Field field = FIELDS.get(name);
-        if (field != null)
-        {
-            addValue(result, field);
-        }
-        else
-        {
-            // rows created by overrides might be directly queried so include them in result to be possibly filtered
-            for (String override : overrides.keySet())
-                if (name.startsWith(override))
-                    addValue(result, FIELDS.get(override));
-        }
+        if (BACKWARDS_COMPATABLE_NAMES.containsKey(name))
+            ClientWarn.instance.warn("key '" + name + "' is deprecated; should switch to '" + BACKWARDS_COMPATABLE_NAMES.get(name) + "'");
+        if (PROPERTIES.containsKey(name))
+            result.row(name).column(VALUE, getValue(PROPERTIES.get(name)));
         return result;
     }
 
@@ -135,56 +78,83 @@
     public DataSet data()
     {
         SimpleDataSet result = new SimpleDataSet(metadata());
-        for (Field setting : FIELDS.values())
-            addValue(result, setting);
+        for (Map.Entry<String, Property> e : PROPERTIES.entrySet())
+            result.row(e.getKey()).column(VALUE, getValue(e.getValue()));
         return result;
     }
 
-    private void addAuditLoggingOptions(SimpleDataSet result, Field f)
+    private String getValue(Property prop)
     {
-        Preconditions.checkArgument(AuditLogOptions.class.isAssignableFrom(f.getType()));
-
-        AuditLogOptions value = (AuditLogOptions) getValue(f);
-        result.row(f.getName() + "_enabled").column(VALUE, Boolean.toString(value.enabled));
-        result.row(f.getName() + "_logger").column(VALUE, value.logger.class_name);
-        result.row(f.getName() + "_audit_logs_dir").column(VALUE, value.audit_logs_dir);
-        result.row(f.getName() + "_included_keyspaces").column(VALUE, value.included_keyspaces);
-        result.row(f.getName() + "_excluded_keyspaces").column(VALUE, value.excluded_keyspaces);
-        result.row(f.getName() + "_included_categories").column(VALUE, value.included_categories);
-        result.row(f.getName() + "_excluded_categories").column(VALUE, value.excluded_categories);
-        result.row(f.getName() + "_included_users").column(VALUE, value.included_users);
-        result.row(f.getName() + "_excluded_users").column(VALUE, value.excluded_users);
+        Object value = prop.get(config);
+        return value == null ? null : value.toString();
     }
 
-    private void addEncryptionOptions(SimpleDataSet result, Field f)
+    private static Map<String, Property> getProperties()
     {
-        Preconditions.checkArgument(EncryptionOptions.class.isAssignableFrom(f.getType()));
-
-        EncryptionOptions value = (EncryptionOptions) getValue(f);
-        result.row(f.getName() + "_enabled").column(VALUE, Boolean.toString(value.isEnabled()));
-        result.row(f.getName() + "_algorithm").column(VALUE, value.algorithm);
-        result.row(f.getName() + "_protocol").column(VALUE, Objects.toString(value.acceptedProtocols(), null));
-        result.row(f.getName() + "_cipher_suites").column(VALUE, Objects.toString(value.cipher_suites, null));
-        result.row(f.getName() + "_client_auth").column(VALUE, Boolean.toString(value.require_client_auth));
-        result.row(f.getName() + "_endpoint_verification").column(VALUE, Boolean.toString(value.require_endpoint_verification));
-        result.row(f.getName() + "_optional").column(VALUE, Boolean.toString(value.isOptional()));
-
-        if (value instanceof EncryptionOptions.ServerEncryptionOptions)
+        Loader loader = Properties.defaultLoader();
+        Map<String, Property> properties = loader.flatten(Config.class);
+        // only handling top-level replacements for now, previous logic was only top level so not a regression
+        Map<String, Replacement> replacements = Replacements.getNameReplacements(Config.class).get(Config.class);
+        if (replacements != null)
         {
-            EncryptionOptions.ServerEncryptionOptions server = (EncryptionOptions.ServerEncryptionOptions) value;
-            result.row(f.getName() + "_internode_encryption").column(VALUE, server.internode_encryption.toString());
-            result.row(f.getName() + "_legacy_ssl_storage_port").column(VALUE, Boolean.toString(server.enable_legacy_ssl_storage_port));
+            for (Replacement r : replacements.values())
+            {
+                Property latest = properties.get(r.newName);
+                assert latest != null : "Unable to find replacement new name: " + r.newName;
+                Property conflict = properties.put(r.oldName, r.toProperty(latest));
+                // some configs kept the same name, but changed the type, if this is detected then rely on the replaced property
+                assert conflict == null || r.oldName.equals(r.newName) : String.format("New property %s attempted to replace %s, but this property already exists", latest.getName(), conflict.getName());
+            }
         }
+        for (Map.Entry<String, String> e : BACKWARDS_COMPATABLE_NAMES.entrySet())
+        {
+            String oldName = e.getKey();
+            if (properties.containsKey(oldName))
+                throw new AssertionError("Name " + oldName + " is present in Config, this adds a conflict as this name had a different meaning in " + SettingsTable.class.getSimpleName());
+            String newName = e.getValue();
+            Property prop = Objects.requireNonNull(properties.get(newName), newName + " cant be found for " + oldName);
+            properties.put(oldName, Properties.rename(oldName, prop));
+        }
+        return properties;
     }
 
-    private void addTransparentEncryptionOptions(SimpleDataSet result, Field f)
+    /**
+     * settings table was released in 4.0 and attempted to support nested properties for a few hand selected properties.
+     * The issue is that 4.0 used '_' to seperate the names, which makes it hard to map back to the yaml names; to solve
+     * this 4.1+ uses '.' to avoid possible conflicts, this class provides mappings from old names to the '.' names.
+     *
+     * There were a handle full of properties which had custom names, names not present in the yaml, this map also
+     * fixes this and returns the proper (what is accessable via yaml) names.
+     */
+    private static Map<String, String> getBackwardsCompatableNames()
     {
-        Preconditions.checkArgument(TransparentDataEncryptionOptions.class.isAssignableFrom(f.getType()));
+        Map<String, String> names = new HashMap<>();
+        // Names that dont match yaml
+        names.put("audit_logging_options_logger", "audit_logging_options.logger.class_name");
+        names.put("server_encryption_options_client_auth", "server_encryption_options.require_client_auth");
+        names.put("server_encryption_options_endpoint_verification", "server_encryption_options.require_endpoint_verification");
+        names.put("server_encryption_options_legacy_ssl_storage_port", "server_encryption_options.legacy_ssl_storage_port_enabled");
+        names.put("server_encryption_options_protocol", "server_encryption_options.accepted_protocols");
 
-        TransparentDataEncryptionOptions value = (TransparentDataEncryptionOptions) getValue(f);
-        result.row(f.getName() + "_enabled").column(VALUE, Boolean.toString(value.enabled));
-        result.row(f.getName() + "_cipher").column(VALUE, value.cipher);
-        result.row(f.getName() + "_chunk_length_kb").column(VALUE, Integer.toString(value.chunk_length_kb));
-        result.row(f.getName() + "_iv_length").column(VALUE, Integer.toString(value.iv_length));
+        // matching names
+        names.put("audit_logging_options_audit_logs_dir", "audit_logging_options.audit_logs_dir");
+        names.put("audit_logging_options_enabled", "audit_logging_options.enabled");
+        names.put("audit_logging_options_excluded_categories", "audit_logging_options.excluded_categories");
+        names.put("audit_logging_options_excluded_keyspaces", "audit_logging_options.excluded_keyspaces");
+        names.put("audit_logging_options_excluded_users", "audit_logging_options.excluded_users");
+        names.put("audit_logging_options_included_categories", "audit_logging_options.included_categories");
+        names.put("audit_logging_options_included_keyspaces", "audit_logging_options.included_keyspaces");
+        names.put("audit_logging_options_included_users", "audit_logging_options.included_users");
+        names.put("server_encryption_options_algorithm", "server_encryption_options.algorithm");
+        names.put("server_encryption_options_cipher_suites", "server_encryption_options.cipher_suites");
+        names.put("server_encryption_options_enabled", "server_encryption_options.enabled");
+        names.put("server_encryption_options_internode_encryption", "server_encryption_options.internode_encryption");
+        names.put("server_encryption_options_optional", "server_encryption_options.optional");
+        names.put("transparent_data_encryption_options_chunk_length_kb", "transparent_data_encryption_options.chunk_length_kb");
+        names.put("transparent_data_encryption_options_cipher", "transparent_data_encryption_options.cipher");
+        names.put("transparent_data_encryption_options_enabled", "transparent_data_encryption_options.enabled");
+        names.put("transparent_data_encryption_options_iv_length", "transparent_data_encryption_options.iv_length");
+
+        return names;
     }
 }
diff --git a/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java b/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java
index b8cb9f5..715f4f8 100644
--- a/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java
+++ b/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java
@@ -176,8 +176,10 @@
         private void add(String columnName, Object value)
         {
             ColumnMetadata column = metadata.getColumn(ByteBufferUtil.bytes(columnName));
-            if (null == column || !column.isRegular())
-                throw new IllegalArgumentException();
+            if (column == null)
+                throw new IllegalArgumentException("Unknown column: " + columnName);
+            if (!column.isRegular())
+                throw new IllegalArgumentException(String.format("Expect a regular column %s, but got %s", columnName, column.kind));
             values.put(column, value);
         }
 
@@ -188,9 +190,16 @@
 
             columns.forEach(c ->
             {
-                Object value = values.get(c);
-                if (null != value)
-                    builder.addCell(BufferCell.live(c, now, decompose(c.type, value)));
+                try
+                {
+                    Object value = values.get(c);
+                    if (null != value)
+                        builder.addCell(BufferCell.live(c, now, decompose(c.type, value)));
+                }
+                catch (Exception e)
+                {
+                    throw new SerializationException(c, e);
+                }
             });
 
             return builder.build();
@@ -202,9 +211,16 @@
         }
     }
 
-    @SuppressWarnings("unchecked")
-    private static <T> ByteBuffer decompose(AbstractType<?> type, T value)
+    private static ByteBuffer decompose(AbstractType<?> type, Object value)
     {
-        return ((AbstractType<T>) type).decompose(value);
+        return type.decomposeUntyped(value);
+    }
+
+    public static class SerializationException extends RuntimeException
+    {
+        public SerializationException(ColumnMetadata c, Throwable t)
+        {
+            super("Unable to serialize column " + c.name + " " + c.type.asCQL3Type(), t, true, false);
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/db/virtual/StreamingVirtualTable.java b/src/java/org/apache/cassandra/db/virtual/StreamingVirtualTable.java
new file mode 100644
index 0000000..f01e799
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/StreamingVirtualTable.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import java.util.Date;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.streaming.StreamManager;
+import org.apache.cassandra.streaming.StreamingState;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.cql3.statements.schema.CreateTableStatement.parse;
+
+public class StreamingVirtualTable extends AbstractVirtualTable
+{
+    public StreamingVirtualTable(String keyspace)
+    {
+        super(parse("CREATE TABLE streaming (" +
+                    "  id timeuuid,\n" +
+                    "  follower boolean,\n" +
+                    "  operation text, \n" +
+                    "  peers frozen<list<text>>,\n" +
+                    "  status text,\n" +
+                    "  progress_percentage float,\n" +
+                    "  last_updated_at timestamp,\n" +
+                    "  duration_millis bigint,\n" +
+                    "  failure_cause text,\n" +
+                    "  success_message text,\n" +
+                    "\n" +
+                    StreamingState.Sessions.columns() +
+                    "\n" +
+                    stateColumns() +
+                    "\n" +
+                    "PRIMARY KEY ((id))" +
+                    ")", keyspace)
+              .kind(TableMetadata.Kind.VIRTUAL)
+              .build());
+    }
+
+    private static String stateColumns()
+    {
+        StringBuilder sb = new StringBuilder();
+        for (StreamingState.Status state : StreamingState.Status.values())
+            sb.append("  status_").append(state.name().toLowerCase()).append("_timestamp timestamp,\n");
+        return sb.toString();
+    }
+
+    @Override
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+        StreamManager.instance.getStreamingStates()
+                              .forEach(s -> updateDataSet(result, s));
+        return result;
+    }
+
+    @Override
+    public DataSet data(DecoratedKey partitionKey)
+    {
+        TimeUUID id = TimeUUIDType.instance.compose(partitionKey.getKey());
+        SimpleDataSet result = new SimpleDataSet(metadata());
+        StreamingState state = StreamManager.instance.getStreamingState(id);
+        if (state != null)
+            updateDataSet(result, state);
+        return result;
+    }
+
+    private void updateDataSet(SimpleDataSet ds, StreamingState state)
+    {
+        ds.row(state.id());
+        ds.column("last_updated_at", new Date(state.lastUpdatedAtMillis())); // read early to see latest state
+        ds.column("follower", state.follower());
+        ds.column("operation", state.operation().getDescription());
+        ds.column("peers", state.peers().stream().map(Object::toString).collect(Collectors.toList()));
+        ds.column("status", state.status().name().toLowerCase());
+        ds.column("progress_percentage", round(state.progress() * 100));
+        ds.column("duration_millis", state.durationMillis());
+        ds.column("failure_cause", state.failureCause());
+        ds.column("success_message", state.successMessage());
+        for (Map.Entry<StreamingState.Status, Long> e : state.stateTimesMillis().entrySet())
+            ds.column("status_" + e.getKey().name().toLowerCase() + "_timestamp", new Date(e.getValue()));
+
+        state.sessions().update(ds);
+    }
+
+    static float round(float value)
+    {
+        return Math.round(value * 100) / 100;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java b/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
index 92da4af..f13e61c 100644
--- a/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
+++ b/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
@@ -36,7 +36,18 @@
                     .add(new ThreadPoolsTable(VIRTUAL_VIEWS))
                     .add(new InternodeOutboundTable(VIRTUAL_VIEWS))
                     .add(new InternodeInboundTable(VIRTUAL_VIEWS))
+                    .add(new PendingHintsTable(VIRTUAL_VIEWS))
                     .addAll(TableMetricTables.getAll(VIRTUAL_VIEWS))
+                    .add(new CredentialsCacheKeysTable(VIRTUAL_VIEWS))
+                    .add(new JmxPermissionsCacheKeysTable(VIRTUAL_VIEWS))
+                    .add(new NetworkPermissionsCacheKeysTable(VIRTUAL_VIEWS))
+                    .add(new PermissionsCacheKeysTable(VIRTUAL_VIEWS))
+                    .add(new RolesCacheKeysTable(VIRTUAL_VIEWS))
+                    .add(new CQLMetricsTable(VIRTUAL_VIEWS))
+                    .add(new BatchMetricsTable(VIRTUAL_VIEWS))
+                    .add(new StreamingVirtualTable(VIRTUAL_VIEWS))
+                    .add(new GossipInfoTable(VIRTUAL_VIEWS))
+                    .addAll(LocalRepairTables.getAll(VIRTUAL_VIEWS))
                     .build());
     }
 }
diff --git a/src/java/org/apache/cassandra/db/virtual/VirtualKeyspaceRegistry.java b/src/java/org/apache/cassandra/db/virtual/VirtualKeyspaceRegistry.java
index 5e0f90c..23814cd 100644
--- a/src/java/org/apache/cassandra/db/virtual/VirtualKeyspaceRegistry.java
+++ b/src/java/org/apache/cassandra/db/virtual/VirtualKeyspaceRegistry.java
@@ -40,7 +40,10 @@
 
     public void register(VirtualKeyspace keyspace)
     {
-        virtualKeyspaces.put(keyspace.name(), keyspace);
+        VirtualKeyspace previous = virtualKeyspaces.put(keyspace.name(), keyspace);
+        // some tests choose to replace the keyspace, if so make sure to cleanup tables as well
+        if (previous != null)
+            previous.tables().forEach(t -> virtualTables.remove(t));
         keyspace.tables().forEach(t -> virtualTables.put(t.metadata().id, t));
     }
 
diff --git a/src/java/org/apache/cassandra/db/virtual/VirtualMutation.java b/src/java/org/apache/cassandra/db/virtual/VirtualMutation.java
index 09ac4a6..3e26032 100644
--- a/src/java/org/apache/cassandra/db/virtual/VirtualMutation.java
+++ b/src/java/org/apache/cassandra/db/virtual/VirtualMutation.java
@@ -19,6 +19,7 @@
 
 import java.util.Collection;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableMap;
@@ -26,6 +27,7 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.IMutation;
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.schema.TableId;
 
@@ -105,6 +107,12 @@
     }
 
     @Override
+    public Supplier<Mutation> hintOnFailure()
+    {
+        return null;
+    }
+
+    @Override
     public void validateIndexedColumns()
     {
         // no-op
diff --git a/src/java/org/apache/cassandra/db/virtual/VirtualTable.java b/src/java/org/apache/cassandra/db/virtual/VirtualTable.java
index ea196ca..5373f4c 100644
--- a/src/java/org/apache/cassandra/db/virtual/VirtualTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/VirtualTable.java
@@ -48,7 +48,7 @@
     TableMetadata metadata();
 
     /**
-     * Applies the specified update.
+     * Applies the specified update, if supported.
      * @param update the update to apply
      */
     void apply(PartitionUpdate update);
@@ -71,4 +71,9 @@
      * @return the rows corresponding to the requested data.
      */
     UnfilteredPartitionIterator select(DataRange dataRange, ColumnFilter columnFilter);
+
+    /**
+     * Truncates data from the underlying source, if supported.
+     */
+    void truncate();
 }
diff --git a/src/java/org/apache/cassandra/dht/BootStrapper.java b/src/java/org/apache/cassandra/dht/BootStrapper.java
index 39ebf78..9b017c6 100644
--- a/src/java/org/apache/cassandra/dht/BootStrapper.java
+++ b/src/java/org/apache/cassandra/dht/BootStrapper.java
@@ -20,8 +20,7 @@
 import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import com.google.common.util.concurrent.ListenableFuture;
-
+import org.apache.cassandra.utils.concurrent.Future;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -61,7 +60,7 @@
         this.tokenMetadata = tmd;
     }
 
-    public ListenableFuture<StreamState> bootstrap(StreamStateStore stateStore, boolean useStrictConsistency)
+    public Future<StreamState> bootstrap(StreamStateStore stateStore, boolean useStrictConsistency)
     {
         logger.trace("Beginning bootstrap process");
 
@@ -74,7 +73,7 @@
                                                    stateStore,
                                                    true,
                                                    DatabaseDescriptor.getStreamingConnectionsPerHost());
-        final List<String> nonLocalStrategyKeyspaces = Schema.instance.getNonLocalStrategyKeyspaces();
+        final Collection<String> nonLocalStrategyKeyspaces = Schema.instance.getNonLocalStrategyKeyspaces().names();
         if (nonLocalStrategyKeyspaces.isEmpty())
             logger.debug("Schema does not contain any non-local keyspaces to stream on bootstrap");
         for (String keyspaceName : nonLocalStrategyKeyspaces)
@@ -154,7 +153,7 @@
      * otherwise, if allocationKeyspace is specified use the token allocation algorithm to generate suitable tokens
      * else choose num_tokens tokens at random
      */
-    public static Collection<Token> getBootstrapTokens(final TokenMetadata metadata, InetAddressAndPort address, long schemaWaitDelay) throws ConfigurationException
+    public static Collection<Token> getBootstrapTokens(final TokenMetadata metadata, InetAddressAndPort address, long schemaTimeoutMillis, long ringTimeoutMillis) throws ConfigurationException
     {
         String allocationKeyspace = DatabaseDescriptor.getAllocateTokensForKeyspace();
         Integer allocationLocalRf = DatabaseDescriptor.getAllocateTokensForLocalRf();
@@ -175,10 +174,10 @@
             throw new ConfigurationException("num_tokens must be >= 1");
 
         if (allocationKeyspace != null)
-            return allocateTokens(metadata, address, allocationKeyspace, numTokens, schemaWaitDelay);
+            return allocateTokens(metadata, address, allocationKeyspace, numTokens, schemaTimeoutMillis, ringTimeoutMillis);
 
         if (allocationLocalRf != null)
-            return allocateTokens(metadata, address, allocationLocalRf, numTokens, schemaWaitDelay);
+            return allocateTokens(metadata, address, allocationLocalRf, numTokens, schemaTimeoutMillis, ringTimeoutMillis);
 
         if (numTokens == 1)
             logger.warn("Picking random token for a single vnode.  You should probably add more vnodes and/or use the automatic token allocation mechanism.");
@@ -207,9 +206,10 @@
                                             InetAddressAndPort address,
                                             String allocationKeyspace,
                                             int numTokens,
-                                            long schemaWaitDelay)
+                                            long schemaTimeoutMillis,
+                                            long ringTimeoutMillis)
     {
-        StorageService.instance.waitForSchema(schemaWaitDelay);
+        StorageService.instance.waitForSchema(schemaTimeoutMillis, ringTimeoutMillis);
         if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
             Gossiper.waitToSettle();
 
@@ -228,9 +228,10 @@
                                             InetAddressAndPort address,
                                             int rf,
                                             int numTokens,
-                                            long schemaWaitDelay)
+                                            long schemaTimeoutMillis,
+                                            long ringTimeoutMillis)
     {
-        StorageService.instance.waitForSchema(schemaWaitDelay);
+        StorageService.instance.waitForSchema(schemaTimeoutMillis, ringTimeoutMillis);
         if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
             Gossiper.waitToSettle();
 
diff --git a/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java b/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
index a6314dc..3a5db52 100644
--- a/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
@@ -37,7 +37,6 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
diff --git a/src/java/org/apache/cassandra/dht/Datacenters.java b/src/java/org/apache/cassandra/dht/Datacenters.java
index 9695a09..b1d96eb 100644
--- a/src/java/org/apache/cassandra/dht/Datacenters.java
+++ b/src/java/org/apache/cassandra/dht/Datacenters.java
@@ -25,7 +25,6 @@
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.FBUtilities;
 
 public class Datacenters
 {
diff --git a/src/java/org/apache/cassandra/dht/LocalPartitioner.java b/src/java/org/apache/cassandra/dht/LocalPartitioner.java
index ca48ed1..09cd2b7 100644
--- a/src/java/org/apache/cassandra/dht/LocalPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/LocalPartitioner.java
@@ -152,7 +152,7 @@
         @Override
         public int compareTo(Token o)
         {
-            assert getPartitioner() == o.getPartitioner();
+            assert getPartitioner() == o.getPartitioner() : String.format("partitioners do not match; %s != %s", getPartitioner(), o.getPartitioner());
             return comparator.compare(token, ((LocalToken) o).token);
         }
 
diff --git a/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java b/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
index 2856f13..e2daac4 100644
--- a/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
+++ b/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
@@ -144,7 +144,7 @@
     {
         static final long serialVersionUID = -5833580143318243006L;
 
-        final long token;
+        public final long token;
 
         public LongToken(long token)
         {
@@ -204,11 +204,21 @@
         }
 
         @Override
-        public Token increaseSlightly()
+        public LongToken increaseSlightly()
         {
             return new LongToken(token + 1);
         }
 
+        public LongToken decreaseSlightly()
+        {
+            return new LongToken(token - 1);
+        }
+
+        public static ByteBuffer keyForToken(long token)
+        {
+            return keyForToken(new LongToken(token));
+        }
+
         /**
          * Reverses murmur3 to find a possible 16 byte key that generates a given token
          */
@@ -216,7 +226,7 @@
         public static ByteBuffer keyForToken(LongToken token)
         {
             ByteBuffer result = ByteBuffer.allocate(16);
-            long[] inv = MurmurHash.inv_hash3_x64_128(new long[] {token.token, 0L});
+            long[] inv = MurmurHash.inv_hash3_x64_128(new long[]{ token.token, 0L });
             result.putLong(inv[0]).putLong(inv[1]).position(0);
             return result;
         }
diff --git a/src/java/org/apache/cassandra/dht/Range.java b/src/java/org/apache/cassandra/dht/Range.java
index 5b2f3d9..2c46899 100644
--- a/src/java/org/apache/cassandra/dht/Range.java
+++ b/src/java/org/apache/cassandra/dht/Range.java
@@ -483,7 +483,7 @@
      * Given a list of unwrapped ranges sorted by left position, return an
      * equivalent list of ranges but with no overlapping ranges.
      */
-    private static <T extends RingPosition<T>> List<Range<T>> deoverlap(List<Range<T>> ranges)
+    public static <T extends RingPosition<T>> List<Range<T>> deoverlap(List<Range<T>> ranges)
     {
         if (ranges.isEmpty())
             return ranges;
diff --git a/src/java/org/apache/cassandra/dht/StreamStateStore.java b/src/java/org/apache/cassandra/dht/StreamStateStore.java
index e62bc04..6d79e9a 100644
--- a/src/java/org/apache/cassandra/dht/StreamStateStore.java
+++ b/src/java/org/apache/cassandra/dht/StreamStateStore.java
@@ -30,7 +30,6 @@
 import org.apache.cassandra.streaming.StreamEventHandler;
 import org.apache.cassandra.streaming.StreamRequest;
 import org.apache.cassandra.streaming.StreamState;
-import org.apache.cassandra.utils.Pair;
 
 /**
  * Store and update available ranges (data already received) to system keyspace.
diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java b/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java
index f8c13df..6a382c0 100644
--- a/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java
+++ b/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java
@@ -98,7 +98,7 @@
 
         public int nodeId()
         {
-            return fakeAddressAndPort.port;
+            return fakeAddressAndPort.getPort();
         }
 
         public int rackId()
@@ -187,4 +187,4 @@
             throw new IllegalStateException("Unexpected UnknownHostException", e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java b/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java
index 6dbd37c..8cb5fe1 100644
--- a/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java
+++ b/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java
@@ -26,4 +26,4 @@
      * @return Some hashable object.
      */
     Object getGroup(Unit unit);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java
index 2eb9a4c..7645317 100644
--- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java
+++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java
@@ -24,4 +24,4 @@
 public interface TokenAllocator<Unit>
 {
     Collection<Token> addUnit(Unit newUnit, int numTokens);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java
index 04d7455..c29980c 100644
--- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java
+++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java
@@ -193,4 +193,4 @@
         return service.isEnabled(TokenAllocatorEvent.class, type);
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java
index ca59938..acb5ed2 100644
--- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java
+++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java
@@ -110,4 +110,4 @@
         ret.put("tokenInfo", String.valueOf(tokenInfo));
         return ret;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/diag/DiagnosticEvent.java b/src/java/org/apache/cassandra/diag/DiagnosticEvent.java
index 5de703b..229710c 100644
--- a/src/java/org/apache/cassandra/diag/DiagnosticEvent.java
+++ b/src/java/org/apache/cassandra/diag/DiagnosticEvent.java
@@ -20,6 +20,8 @@
 import java.io.Serializable;
 import java.util.Map;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Base class for internally emitted events used for diagnostics and testing.
  */
@@ -28,7 +30,7 @@
     /**
      * Event creation time.
      */
-    public final long timestamp = System.currentTimeMillis();
+    public final long timestamp = currentTimeMillis();
 
     /**
      * Name of allocating thread.
diff --git a/src/java/org/apache/cassandra/diag/DiagnosticEventService.java b/src/java/org/apache/cassandra/diag/DiagnosticEventService.java
index 34a4de7..8a8391c 100644
--- a/src/java/org/apache/cassandra/diag/DiagnosticEventService.java
+++ b/src/java/org/apache/cassandra/diag/DiagnosticEventService.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.diag;
 
 import java.io.Serializable;
-import java.lang.management.ManagementFactory;
 import java.util.Collection;
 import java.util.Map;
 import java.util.Objects;
@@ -27,8 +26,6 @@
 import java.util.function.Consumer;
 
 import javax.annotation.Nullable;
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
 
 import com.google.common.collect.ImmutableCollection;
 import com.google.common.collect.ImmutableMap;
@@ -338,4 +335,4 @@
             return Objects.hash(wrapped);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java b/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java
index 8e991e6..e82b40c 100644
--- a/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java
+++ b/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java
@@ -33,6 +33,8 @@
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.progress.jmx.JMXBroadcastExecutor;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Broadcaster for notifying JMX clients on newly available data. Periodically sends {@link Notification}s
  * containing a list of event types and greatest event IDs. Consumers may use this information to
@@ -100,7 +102,7 @@
     {
         // ensure monotonic properties of ids
         if (summary.compute(key, (k, v) -> v == null ? id : id.compareTo(v) > 0 ? id : v) == id) {
-            summary.put("last_updated_at", System.currentTimeMillis());
+            summary.put("last_updated_at", currentTimeMillis());
             scheduleBroadcast();
         }
     }
@@ -132,7 +134,7 @@
         Notification notification = new Notification("event_last_id_summary",
                                                      "LastEventIdBroadcaster",
                                                      notificationSerialNumber.incrementAndGet(),
-                                                     System.currentTimeMillis(),
+                                                     currentTimeMillis(),
                                                      "Event last IDs summary");
         notification.setUserData(summary);
         sendNotification(notification);
diff --git a/src/java/org/apache/cassandra/exceptions/CasWriteTimeoutException.java b/src/java/org/apache/cassandra/exceptions/CasWriteTimeoutException.java
index b134764..32cc014 100644
--- a/src/java/org/apache/cassandra/exceptions/CasWriteTimeoutException.java
+++ b/src/java/org/apache/cassandra/exceptions/CasWriteTimeoutException.java
@@ -20,13 +20,14 @@
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.WriteType;
 
+
 public class CasWriteTimeoutException extends WriteTimeoutException
 {
     public final int contentions;
 
     public CasWriteTimeoutException(WriteType writeType, ConsistencyLevel consistency, int received, int blockFor, int contentions)
     {
-        super(writeType, consistency, received, blockFor, String.format("CAS operation timed out - encountered contentions: %d", contentions));
+        super(writeType, consistency, received, blockFor, String.format("CAS operation timed out: received %d of %d required responses after %d contention retries", received, blockFor, contentions));
         this.contentions = contentions;
     }
 }
diff --git a/src/java/org/apache/cassandra/exceptions/CassandraException.java b/src/java/org/apache/cassandra/exceptions/CassandraException.java
index 58521df..119daac 100644
--- a/src/java/org/apache/cassandra/exceptions/CassandraException.java
+++ b/src/java/org/apache/cassandra/exceptions/CassandraException.java
@@ -17,6 +17,11 @@
  */
 package org.apache.cassandra.exceptions;
 
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
 public abstract class CassandraException extends RuntimeException implements TransportException
 {
     private final ExceptionCode code;
diff --git a/src/java/org/apache/cassandra/exceptions/ExceptionCode.java b/src/java/org/apache/cassandra/exceptions/ExceptionCode.java
index 1766951..8bb0cfd 100644
--- a/src/java/org/apache/cassandra/exceptions/ExceptionCode.java
+++ b/src/java/org/apache/cassandra/exceptions/ExceptionCode.java
@@ -21,10 +21,14 @@
 import java.util.Map;
 
 import org.apache.cassandra.transport.ProtocolException;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
 
 /**
  * Exceptions code, as defined by the binary protocol.
  */
+@Shared(scope = SIMULATION)
 public enum ExceptionCode
 {
     SERVER_ERROR    (0x0000),
diff --git a/src/java/org/apache/cassandra/exceptions/ReadAbortException.java b/src/java/org/apache/cassandra/exceptions/ReadAbortException.java
new file mode 100644
index 0000000..0075858
--- /dev/null
+++ b/src/java/org/apache/cassandra/exceptions/ReadAbortException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.exceptions;
+
+import java.util.Map;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+/**
+ * Special Read Failure which is caused by user query; implies a user request is not allowed and not that Cassandra had an issue.
+ */
+public abstract class ReadAbortException extends ReadFailureException
+{
+    protected ReadAbortException(String msg, ConsistencyLevel consistency, int received, int blockFor, boolean dataPresent, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
+    {
+        super(msg, consistency, received, blockFor, dataPresent, failureReasonByEndpoint);
+    }
+}
diff --git a/src/java/org/apache/cassandra/exceptions/ReadFailureException.java b/src/java/org/apache/cassandra/exceptions/ReadFailureException.java
index 744cad4..698c8ae 100644
--- a/src/java/org/apache/cassandra/exceptions/ReadFailureException.java
+++ b/src/java/org/apache/cassandra/exceptions/ReadFailureException.java
@@ -33,4 +33,10 @@
         super(ExceptionCode.READ_FAILURE, consistency, received, blockFor, ImmutableMap.copyOf(failureReasonByEndpoint));
         this.dataPresent = dataPresent;
     }
+
+    protected ReadFailureException(String msg, ConsistencyLevel consistency, int received, int blockFor, boolean dataPresent, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
+    {
+        super(ExceptionCode.READ_FAILURE, msg, consistency, received, blockFor, failureReasonByEndpoint);
+        this.dataPresent = dataPresent;
+    }
 }
diff --git a/src/java/org/apache/cassandra/exceptions/ReadSizeAbortException.java b/src/java/org/apache/cassandra/exceptions/ReadSizeAbortException.java
new file mode 100644
index 0000000..ed81008
--- /dev/null
+++ b/src/java/org/apache/cassandra/exceptions/ReadSizeAbortException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.exceptions;
+
+import java.util.Map;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+public class ReadSizeAbortException extends ReadAbortException
+{
+    public ReadSizeAbortException(String msg, ConsistencyLevel consistency, int received, int blockFor, boolean dataPresent, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
+    {
+        super(msg, consistency, received, blockFor, dataPresent, failureReasonByEndpoint);
+    }
+}
diff --git a/src/java/org/apache/cassandra/exceptions/RepairException.java b/src/java/org/apache/cassandra/exceptions/RepairException.java
index db219a2..eb93c37 100644
--- a/src/java/org/apache/cassandra/exceptions/RepairException.java
+++ b/src/java/org/apache/cassandra/exceptions/RepairException.java
@@ -25,24 +25,36 @@
  */
 public class RepairException extends Exception
 {
-    public final RepairJobDesc desc;
-    public final PreviewKind previewKind;
+    private final boolean shouldLogWarn;
 
-    public RepairException(RepairJobDesc desc, String message)
+    private RepairException(RepairJobDesc desc, PreviewKind previewKind, String message, boolean shouldLogWarn)
     {
-        this(desc, null, message);
+        this(desc.toString(previewKind != null ? previewKind : PreviewKind.NONE) + ' ' + message, shouldLogWarn);
     }
 
-    public RepairException(RepairJobDesc desc, PreviewKind previewKind, String message)
+    private RepairException(String msg, boolean shouldLogWarn)
     {
-        super(message);
-        this.desc = desc;
-        this.previewKind = previewKind != null ? previewKind : PreviewKind.NONE;
+        super(msg);
+        this.shouldLogWarn = shouldLogWarn;
     }
 
-    @Override
-    public String getMessage()
+    public static RepairException error(RepairJobDesc desc, PreviewKind previewKind, String message)
     {
-        return desc.toString(previewKind) + ' ' + super.getMessage();
+        return new RepairException(desc, previewKind, message, false);
+    }
+
+    public static RepairException warn(RepairJobDesc desc, PreviewKind previewKind, String message)
+    {
+        return new RepairException(desc, previewKind, message, true);
+    }
+
+    public static RepairException warn(String message)
+    {
+        return new RepairException(message, true);
+    }
+
+    public static boolean shouldWarn(Throwable throwable)
+    {
+        return throwable instanceof RepairException && ((RepairException)throwable).shouldLogWarn;
     }
 }
diff --git a/src/java/org/apache/cassandra/exceptions/RequestExecutionException.java b/src/java/org/apache/cassandra/exceptions/RequestExecutionException.java
index 4db108a..d80559a 100644
--- a/src/java/org/apache/cassandra/exceptions/RequestExecutionException.java
+++ b/src/java/org/apache/cassandra/exceptions/RequestExecutionException.java
@@ -17,6 +17,11 @@
  */
 package org.apache.cassandra.exceptions;
 
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
 public abstract class RequestExecutionException extends CassandraException
 {
     protected RequestExecutionException(ExceptionCode code, String msg)
diff --git a/src/java/org/apache/cassandra/exceptions/RequestFailureException.java b/src/java/org/apache/cassandra/exceptions/RequestFailureException.java
index 56cee1a..a715347 100644
--- a/src/java/org/apache/cassandra/exceptions/RequestFailureException.java
+++ b/src/java/org/apache/cassandra/exceptions/RequestFailureException.java
@@ -32,7 +32,12 @@
 
     protected RequestFailureException(ExceptionCode code, ConsistencyLevel consistency, int received, int blockFor, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
     {
-        super(code, buildErrorMessage(received, failureReasonByEndpoint));
+        this(code, buildErrorMessage(received, failureReasonByEndpoint), consistency, received, blockFor, failureReasonByEndpoint);
+    }
+
+    public RequestFailureException(ExceptionCode code, String msg, ConsistencyLevel consistency, int received, int blockFor, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
+    {
+        super(code, buildErrorMessage(msg, failureReasonByEndpoint));
         this.consistency = consistency;
         this.received = received;
         this.blockFor = blockFor;
@@ -41,10 +46,7 @@
 
     private static String buildErrorMessage(int received, Map<InetAddressAndPort, RequestFailureReason> failures)
     {
-        return String.format("Operation failed - received %d responses and %d failures: %s",
-                             received,
-                             failures.size(),
-                             buildFailureString(failures));
+        return String.format("received %d responses and %d failures", received, failures.size());
     }
 
     private static String buildFailureString(Map<InetAddressAndPort, RequestFailureReason> failures)
@@ -53,4 +55,13 @@
                        .map(e -> String.format("%s from %s", e.getValue(), e.getKey()))
                        .collect(Collectors.joining(", "));
     }
+
+    private static String buildErrorMessage(CharSequence msg, Map<InetAddressAndPort, RequestFailureReason> failures)
+    {
+        StringBuilder sb = new StringBuilder("Operation failed - ");
+        sb.append(msg);
+        if (failures != null && !failures.isEmpty())
+            sb.append(": ").append(buildFailureString(failures));
+        return sb.toString();
+    }
 }
diff --git a/src/java/org/apache/cassandra/exceptions/RequestFailureReason.java b/src/java/org/apache/cassandra/exceptions/RequestFailureReason.java
index 1cdbdb5..3d3476a 100644
--- a/src/java/org/apache/cassandra/exceptions/RequestFailureReason.java
+++ b/src/java/org/apache/cassandra/exceptions/RequestFailureReason.java
@@ -35,7 +35,9 @@
     UNKNOWN                  (0),
     READ_TOO_MANY_TOMBSTONES (1),
     TIMEOUT                  (2),
-    INCOMPATIBLE_SCHEMA      (3);
+    INCOMPATIBLE_SCHEMA      (3),
+    READ_SIZE                (4),
+    NODE_DOWN                (5);
 
     public static final Serializer serializer = new Serializer();
 
diff --git a/src/java/org/apache/cassandra/exceptions/TombstoneAbortException.java b/src/java/org/apache/cassandra/exceptions/TombstoneAbortException.java
new file mode 100644
index 0000000..833a093
--- /dev/null
+++ b/src/java/org/apache/cassandra/exceptions/TombstoneAbortException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.exceptions;
+
+import java.util.Map;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+import static org.apache.cassandra.service.reads.thresholds.WarningsSnapshot.tombstoneAbortMessage;
+
+public class TombstoneAbortException extends ReadAbortException
+{
+    public final int nodes;
+    public final long tombstones;
+
+    public TombstoneAbortException(int nodes, long tombstones, String cql, boolean dataPresent, ConsistencyLevel consistency, int received, int blockFor, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
+    {
+        super(tombstoneAbortMessage(nodes, tombstones, cql), consistency, received, blockFor, dataPresent, failureReasonByEndpoint);
+        this.nodes = nodes;
+        this.tombstones = tombstones;
+    }
+}
diff --git a/src/java/org/apache/cassandra/exceptions/TransportException.java b/src/java/org/apache/cassandra/exceptions/TransportException.java
index 70d1da5..9807749 100644
--- a/src/java/org/apache/cassandra/exceptions/TransportException.java
+++ b/src/java/org/apache/cassandra/exceptions/TransportException.java
@@ -17,6 +17,11 @@
  */
 package org.apache.cassandra.exceptions;
 
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
 public interface TransportException
 {
     /**
diff --git a/src/java/org/apache/cassandra/fql/FullQueryLogger.java b/src/java/org/apache/cassandra/fql/FullQueryLogger.java
index 0604df6..415b894 100644
--- a/src/java/org/apache/cassandra/fql/FullQueryLogger.java
+++ b/src/java/org/apache/cassandra/fql/FullQueryLogger.java
@@ -17,10 +17,8 @@
  */
 package org.apache.cassandra.fql;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
@@ -31,6 +29,7 @@
 import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,6 +48,7 @@
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.binlog.BinLog;
 import org.apache.cassandra.utils.binlog.BinLogOptions;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 import org.apache.cassandra.utils.concurrent.WeightedQueue;
 import org.github.jamm.MemoryLayoutSpecification;
 
@@ -177,7 +177,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
         finally
         {
@@ -210,7 +210,7 @@
             //Then decide whether to clean the last used path, possibly configured by JMX
             if (binLog != null && binLog.path != null)
             {
-                File pathFile = binLog.path.toFile();
+                File pathFile = new File(binLog.path);
                 if (pathFile.exists())
                 {
                     pathsToClean.add(pathFile);
diff --git a/src/java/org/apache/cassandra/gms/ApplicationState.java b/src/java/org/apache/cassandra/gms/ApplicationState.java
index 4e20d62..c45d3c2 100644
--- a/src/java/org/apache/cassandra/gms/ApplicationState.java
+++ b/src/java/org/apache/cassandra/gms/ApplicationState.java
@@ -56,6 +56,7 @@
      * a comma-separated list.
      **/
     SSTABLE_VERSIONS,
+    DISK_USAGE,
     // DO NOT EDIT OR REMOVE PADDING STATES BELOW - only add new states above.  See CASSANDRA-16484
     X1,
     X2,
diff --git a/src/java/org/apache/cassandra/gms/EndpointState.java b/src/java/org/apache/cassandra/gms/EndpointState.java
index fe29c4b..e756744 100644
--- a/src/java/org/apache/cassandra/gms/EndpointState.java
+++ b/src/java/org/apache/cassandra/gms/EndpointState.java
@@ -24,6 +24,8 @@
 
 import javax.annotation.Nullable;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -32,18 +34,20 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.NullableSerializer;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * This abstraction represents both the HeartBeatState and the ApplicationState in an EndpointState
  * instance. Any state for a given endpoint can be retrieved from this instance.
  */
-
-
 public class EndpointState
 {
     protected static final Logger logger = LoggerFactory.getLogger(EndpointState.class);
 
     public final static IVersionedSerializer<EndpointState> serializer = new EndpointStateSerializer();
+    public final static IVersionedSerializer<EndpointState> nullableSerializer = NullableSerializer.wrap(serializer);
 
     private volatile HeartBeatState hbState;
     private final AtomicReference<Map<ApplicationState, VersionedValue>> applicationState;
@@ -66,11 +70,12 @@
     {
         hbState = initialHbState;
         applicationState = new AtomicReference<Map<ApplicationState, VersionedValue>>(new EnumMap<>(states));
-        updateTimestamp = System.nanoTime();
+        updateTimestamp = nanoTime();
         isAlive = true;
     }
 
-    HeartBeatState getHeartBeatState()
+    @VisibleForTesting
+    public HeartBeatState getHeartBeatState()
     {
         return hbState;
     }
@@ -178,7 +183,7 @@
 
     void updateTimestamp()
     {
-        updateTimestamp = System.nanoTime();
+        updateTimestamp = nanoTime();
     }
 
     public boolean isAlive()
@@ -255,6 +260,20 @@
     {
         return "EndpointState: HeartBeatState = " + hbState + ", AppStateMap = " + applicationState.get();
     }
+
+    public boolean isSupersededBy(EndpointState that)
+    {
+        int thisGeneration = this.getHeartBeatState().getGeneration();
+        int thatGeneration = that.getHeartBeatState().getGeneration();
+
+        if (thatGeneration > thisGeneration)
+            return true;
+
+        if (thisGeneration > thatGeneration)
+            return false;
+
+        return Gossiper.getMaxEndpointStateVersion(that) > Gossiper.getMaxEndpointStateVersion(this);
+    }
 }
 
 class EndpointStateSerializer implements IVersionedSerializer<EndpointState>
diff --git a/src/java/org/apache/cassandra/gms/FailureDetector.java b/src/java/org/apache/cassandra/gms/FailureDetector.java
index 7cd14a3..5177154 100644
--- a/src/java/org/apache/cassandra/gms/FailureDetector.java
+++ b/src/java/org/apache/cassandra/gms/FailureDetector.java
@@ -27,8 +27,8 @@
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Predicate;
-import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.*;
+
 import org.apache.cassandra.locator.Replica;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,7 +40,8 @@
 import org.apache.cassandra.utils.MBeanWrapper;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.LINE_SEPARATOR;
-import static org.apache.cassandra.utils.MonotonicClock.preciseTime;
+import static org.apache.cassandra.config.DatabaseDescriptor.newFailureDetector;
+import static org.apache.cassandra.utils.MonotonicClock.Global.preciseTime;
 
 /**
  * This FailureDetector is an implementation of the paper titled
@@ -71,7 +72,7 @@
             return DEFAULT_MAX_PAUSE;
     }
 
-    public static final IFailureDetector instance = new FailureDetector();
+    public static final IFailureDetector instance = newFailureDetector();
     public static final Predicate<InetAddressAndPort> isEndpointAlive = instance::isAlive;
     public static final Predicate<Replica> isReplicaAlive = r -> isEndpointAlive.test(r.endpoint());
 
@@ -106,20 +107,35 @@
 
     public String getAllEndpointStates()
     {
-        return getAllEndpointStates(false);
+        return getAllEndpointStates(false, false);
+    }
+
+    public String getAllEndpointStatesWithResolveIp()
+    {
+        return getAllEndpointStates(false, true);
     }
 
     public String getAllEndpointStatesWithPort()
     {
-        return getAllEndpointStates(true);
+        return getAllEndpointStates(true, false);
+    }
+
+    public String getAllEndpointStatesWithPortAndResolveIp()
+    {
+        return getAllEndpointStates(true, true);
     }
 
     public String getAllEndpointStates(boolean withPort)
     {
+        return getAllEndpointStates(withPort, false);
+    }
+
+    public String getAllEndpointStates(boolean withPort, boolean resolveIp)
+    {
         StringBuilder sb = new StringBuilder();
         for (Map.Entry<InetAddressAndPort, EndpointState> entry : Gossiper.instance.endpointStateMap.entrySet())
         {
-            sb.append(entry.getKey().toString(withPort)).append("\n");
+            sb.append(resolveIp ? entry.getKey().getHostName(withPort) : entry.getKey().toString(withPort)).append("\n");
             appendEndpointState(sb, entry.getValue());
         }
         return sb.toString();
@@ -254,7 +270,7 @@
         }
         catch (IOException e)
         {
-            throw new FSWriteError(e, (path == null) ? null : path.toFile());
+            throw new FSWriteError(e, path);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/gms/FailureDetectorMBean.java b/src/java/org/apache/cassandra/gms/FailureDetectorMBean.java
index 6be31b0..c7673d9 100644
--- a/src/java/org/apache/cassandra/gms/FailureDetectorMBean.java
+++ b/src/java/org/apache/cassandra/gms/FailureDetectorMBean.java
@@ -32,7 +32,9 @@
     public double getPhiConvictThreshold();
 
     @Deprecated public String getAllEndpointStates();
+    @Deprecated public String getAllEndpointStatesWithResolveIp();
     public String getAllEndpointStatesWithPort();
+    public String getAllEndpointStatesWithPortAndResolveIp();
 
     public String getEndpointState(String address) throws UnknownHostException;
 
diff --git a/src/java/org/apache/cassandra/gms/GossipDigestAck2.java b/src/java/org/apache/cassandra/gms/GossipDigestAck2.java
index 0e4062b..ed959a5 100644
--- a/src/java/org/apache/cassandra/gms/GossipDigestAck2.java
+++ b/src/java/org/apache/cassandra/gms/GossipDigestAck2.java
@@ -43,11 +43,6 @@
     {
         this.epStateMap = epStateMap;
     }
-
-    Map<InetAddressAndPort, EndpointState> getEndpointStateMap()
-    {
-        return epStateMap;
-    }
 }
 
 class GossipDigestAck2Serializer implements IVersionedSerializer<GossipDigestAck2>
diff --git a/src/java/org/apache/cassandra/gms/GossipDigestAck2VerbHandler.java b/src/java/org/apache/cassandra/gms/GossipDigestAck2VerbHandler.java
index 58c1589..0f01999 100644
--- a/src/java/org/apache/cassandra/gms/GossipDigestAck2VerbHandler.java
+++ b/src/java/org/apache/cassandra/gms/GossipDigestAck2VerbHandler.java
@@ -44,7 +44,7 @@
                 logger.trace("Ignoring GossipDigestAck2Message because gossip is disabled");
             return;
         }
-        Map<InetAddressAndPort, EndpointState> remoteEpStateMap = message.payload.getEndpointStateMap();
+        Map<InetAddressAndPort, EndpointState> remoteEpStateMap = message.payload.epStateMap;
         /* Notify the Failure Detector */
         Gossiper.instance.notifyFailureDetector(remoteEpStateMap);
         Gossiper.instance.applyStateLocally(remoteEpStateMap);
diff --git a/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java b/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java
index 0242d83..5fbe7ce 100644
--- a/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java
+++ b/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java
@@ -29,6 +29,7 @@
 import org.apache.cassandra.net.MessagingService;
 
 import static org.apache.cassandra.net.Verb.GOSSIP_DIGEST_ACK2;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class GossipDigestAckVerbHandler extends GossipVerbHandler<GossipDigestAck>
 {
@@ -68,7 +69,7 @@
             // Ignore any GossipDigestAck messages that we handle before a regular GossipDigestSyn has been send.
             // This will prevent Acks from leaking over from the shadow round that are not actual part of
             // the regular gossip conversation.
-            if ((System.nanoTime() - Gossiper.instance.firstSynSendAt) < 0 || Gossiper.instance.firstSynSendAt == 0)
+            if ((nanoTime() - Gossiper.instance.firstSynSendAt) < 0 || Gossiper.instance.firstSynSendAt == 0)
             {
                 if (logger.isTraceEnabled())
                     logger.trace("Ignoring unrequested GossipDigestAck from {}", from);
diff --git a/src/java/org/apache/cassandra/gms/GossipDigestSyn.java b/src/java/org/apache/cassandra/gms/GossipDigestSyn.java
index 17c8da3..7c2ae94 100644
--- a/src/java/org/apache/cassandra/gms/GossipDigestSyn.java
+++ b/src/java/org/apache/cassandra/gms/GossipDigestSyn.java
@@ -30,6 +30,7 @@
  * This is the first message that gets sent out as a start of the Gossip protocol in a
  * round.
  */
+
 public class GossipDigestSyn
 {
     public static final IVersionedSerializer<GossipDigestSyn> serializer = new GossipDigestSynSerializer();
diff --git a/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java b/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java
index 83c8568..f2622ef 100644
--- a/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java
+++ b/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java
@@ -39,4 +39,4 @@
         Gossiper.instance.markAsShutdown(message.from());
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/gms/Gossiper.java b/src/java/org/apache/cassandra/gms/Gossiper.java
index 17e315a..ae213c0 100644
--- a/src/java/org/apache/cassandra/gms/Gossiper.java
+++ b/src/java/org/apache/cassandra/gms/Gossiper.java
@@ -20,7 +20,14 @@
 import java.net.UnknownHostException;
 import java.util.*;
 import java.util.Map.Entry;
-import java.util.concurrent.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.BooleanSupplier;
 import java.util.function.Supplier;
@@ -35,10 +42,10 @@
 import com.google.common.collect.Iterables;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFutureTask;
 import com.google.common.util.concurrent.Uninterruptibles;
 
-import org.apache.cassandra.concurrent.JMXEnabledSingleThreadExecutor;
+import org.apache.cassandra.concurrent.*;
+import org.apache.cassandra.concurrent.FutureTask;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.NoPayload;
 import org.apache.cassandra.net.Verb;
@@ -51,8 +58,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.SystemKeyspace;
@@ -64,11 +69,22 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.RecomputingSupplier;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+import org.apache.cassandra.utils.concurrent.NotScheduledFuture;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DISABLE_GOSSIP_ENDPOINT_REMOVAL;
 import static org.apache.cassandra.config.CassandraRelevantProperties.GOSSIPER_QUARANTINE_DELAY;
+import static org.apache.cassandra.config.CassandraRelevantProperties.GOSSIPER_SKIP_WAITING_TO_SETTLE;
+import static org.apache.cassandra.config.CassandraRelevantProperties.SHUTDOWN_ANNOUNCE_DELAY_IN_MS;
+import static org.apache.cassandra.config.DatabaseDescriptor.getClusterName;
+import static org.apache.cassandra.config.DatabaseDescriptor.getPartitionerName;
 import static org.apache.cassandra.net.NoPayload.noPayload;
 import static org.apache.cassandra.net.Verb.ECHO_REQ;
 import static org.apache.cassandra.net.Verb.GOSSIP_DIGEST_SYN;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * This module is responsible for Gossiping information for the local endpoint. This abstraction
@@ -94,7 +110,7 @@
         public static final String DISABLE_THREAD_VALIDATION = "cassandra.gossip.disable_thread_validation";
     }
 
-    private static final DebuggableScheduledThreadPoolExecutor executor = new DebuggableScheduledThreadPoolExecutor("GossipTasks");
+    private static final ScheduledExecutorPlus executor = executorFactory().scheduled("GossipTasks");
 
     static final ApplicationState[] STATES = ApplicationState.values();
     static final List<String> DEAD_STATES = Arrays.asList(VersionedValue.REMOVING_TOKEN, VersionedValue.REMOVED_TOKEN,
@@ -112,7 +128,7 @@
     private volatile ScheduledFuture<?> scheduledGossipTask;
     private static final ReentrantLock taskLock = new ReentrantLock();
     public final static int intervalInMillis = 1000;
-    public final static int QUARANTINE_DELAY = GOSSIPER_QUARANTINE_DELAY.getInt(StorageService.RING_DELAY * 2);
+    public final static int QUARANTINE_DELAY = GOSSIPER_QUARANTINE_DELAY.getInt(StorageService.RING_DELAY_MILLIS * 2);
     private static final Logger logger = LoggerFactory.getLogger(Gossiper.class);
     private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(logger, 15L, TimeUnit.MINUTES);
 
@@ -142,8 +158,9 @@
     @VisibleForTesting
     final Set<InetAddressAndPort> seeds = new ConcurrentSkipListSet<>();
 
-    /* map where key is the endpoint and value is the state associated with the endpoint */
-    final ConcurrentMap<InetAddressAndPort, EndpointState> endpointStateMap = new ConcurrentHashMap<>();
+    /* map where key is the endpoint and value is the state associated with the endpoint.
+     * This is made public to be consumed by the GossipInfoTable virtual table */
+    public final ConcurrentMap<InetAddressAndPort, EndpointState> endpointStateMap = new ConcurrentHashMap<>();
 
     /* map where key is endpoint and value is timestamp when this endpoint was removed from
      * gossip. We will ignore any gossip regarding these endpoints for QUARANTINE_DELAY time
@@ -159,7 +176,7 @@
     // endpoint states as gathered during shadow round
     private final Map<InetAddressAndPort, EndpointState> endpointShadowStateMap = new ConcurrentHashMap<>();
 
-    private volatile long lastProcessedMessageAt = System.currentTimeMillis();
+    private volatile long lastProcessedMessageAt = currentTimeMillis();
 
     /**
      * This property is initially set to {@code true} which means that we have no information about the other nodes.
@@ -237,6 +254,7 @@
     }
 
     private static final boolean disableThreadValidation = Boolean.getBoolean(Props.DISABLE_THREAD_VALIDATION);
+    private static volatile boolean disableEndpointRemoval = DISABLE_GOSSIP_ENDPOINT_REMOVAL.getBoolean();
 
     private static long getVeryLongTime()
     {
@@ -251,7 +269,7 @@
 
     private static boolean isInGossipStage()
     {
-        return ((JMXEnabledSingleThreadExecutor) Stage.GOSSIP.executor()).isExecutedBy(Thread.currentThread());
+        return Stage.GOSSIP.executor().inExecutor();
     }
 
     private static void checkProperThreadForStateMutation()
@@ -282,16 +300,17 @@
                 taskLock.lock();
 
                 /* Update the local heartbeat counter. */
-                endpointStateMap.get(FBUtilities.getBroadcastAddressAndPort()).getHeartBeatState().updateHeartBeat();
+                endpointStateMap.get(getBroadcastAddressAndPort()).getHeartBeatState().updateHeartBeat();
                 if (logger.isTraceEnabled())
                     logger.trace("My heartbeat is now {}", endpointStateMap.get(FBUtilities.getBroadcastAddressAndPort()).getHeartBeatState().getHeartBeatVersion());
                 final List<GossipDigest> gDigests = new ArrayList<>();
+
                 Gossiper.instance.makeGossipDigest(gDigests);
 
                 if (gDigests.size() > 0)
                 {
-                    GossipDigestSyn digestSynMessage = new GossipDigestSyn(DatabaseDescriptor.getClusterName(),
-                                                                           DatabaseDescriptor.getPartitionerName(),
+                    GossipDigestSyn digestSynMessage = new GossipDigestSyn(getClusterName(),
+                                                                           getPartitionerName(),
                                                                            gDigests);
                     Message<GossipDigestSyn> message = Message.out(GOSSIP_DIGEST_SYN, digestSynMessage);
                     /* Gossip to some random live member */
@@ -437,8 +456,8 @@
     public Set<InetAddressAndPort> getLiveMembers()
     {
         Set<InetAddressAndPort> liveMembers = new HashSet<>(liveEndpoints);
-        if (!liveMembers.contains(FBUtilities.getBroadcastAddressAndPort()))
-            liveMembers.add(FBUtilities.getBroadcastAddressAndPort());
+        if (!liveMembers.contains(getBroadcastAddressAndPort()))
+            liveMembers.add(getBroadcastAddressAndPort());
         return liveMembers;
     }
 
@@ -477,7 +496,7 @@
     {
         Long downtime = unreachableEndpoints.get(ep);
         if (downtime != null)
-            return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - downtime);
+            return TimeUnit.NANOSECONDS.toMillis(nanoTime() - downtime);
         else
             return 0L;
     }
@@ -516,13 +535,17 @@
             return;
         }
 
-        ListenableFutureTask task = ListenableFutureTask.create(runnable, null);
+        FutureTask<?> task = new FutureTask<>(runnable);
         Stage.GOSSIP.execute(task);
         try
         {
             task.get();
         }
-        catch (InterruptedException | ExecutionException e)
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+        catch (ExecutionException e)
         {
             throw new AssertionError(e);
         }
@@ -587,7 +610,7 @@
      * @param epState
      * @return
      */
-    int getMaxEndpointStateVersion(EndpointState epState)
+    static int getMaxEndpointStateVersion(EndpointState epState)
     {
         int maxVersion = epState.getHeartBeatState().getHeartBeatVersion();
         for (Map.Entry<ApplicationState, VersionedValue> state : epState.states())
@@ -632,6 +655,9 @@
                 logger.warn("Seeds list is now empty!");
         }
 
+        if (disableEndpointRemoval)
+            return;
+
         liveEndpoints.remove(endpoint);
         unreachableEndpoints.remove(endpoint);
         MessagingService.instance().versions.reset(endpoint);
@@ -642,6 +668,16 @@
         GossiperDiagnostics.removedEndpoint(this, endpoint);
     }
 
+    @VisibleForTesting
+    public void unsafeAnnulEndpoint(InetAddressAndPort endpoint)
+    {
+        removeEndpoint(endpoint);
+        justRemovedEndpoints.remove(endpoint);
+        endpointStateMap.remove(endpoint);
+        expireTimeEndpointMap.remove(endpoint);
+        unreachableEndpoints.remove(endpoint);
+    }
+
     /**
      * Quarantines the endpoint for QUARANTINE_DELAY
      *
@@ -649,7 +685,7 @@
      */
     private void quarantineEndpoint(InetAddressAndPort endpoint)
     {
-        quarantineEndpoint(endpoint, System.currentTimeMillis());
+        quarantineEndpoint(endpoint, currentTimeMillis());
     }
 
     /**
@@ -660,6 +696,8 @@
      */
     private void quarantineEndpoint(InetAddressAndPort endpoint, long quarantineExpiration)
     {
+        if (disableEndpointRemoval)
+            return;
         justRemovedEndpoints.put(endpoint, quarantineExpiration);
         GossiperDiagnostics.quarantinedEndpoint(this, endpoint, quarantineExpiration);
     }
@@ -671,8 +709,7 @@
     public void replacementQuarantine(InetAddressAndPort endpoint)
     {
         // remember, quarantineEndpoint will effectively already add QUARANTINE_DELAY, so this is 2x
-        logger.debug("");
-        quarantineEndpoint(endpoint, System.currentTimeMillis() + QUARANTINE_DELAY);
+        quarantineEndpoint(endpoint, currentTimeMillis() + QUARANTINE_DELAY);
         GossiperDiagnostics.replacementQuarantine(this, endpoint);
     }
 
@@ -743,8 +780,8 @@
         // remember this node's generation
         int generation = epState.getHeartBeatState().getGeneration();
         logger.info("Removing host: {}", hostId);
-        logger.info("Sleeping for {}ms to ensure {} does not change", StorageService.RING_DELAY, endpoint);
-        Uninterruptibles.sleepUninterruptibly(StorageService.RING_DELAY, TimeUnit.MILLISECONDS);
+        logger.info("Sleeping for {}ms to ensure {} does not change", StorageService.RING_DELAY_MILLIS, endpoint);
+        Uninterruptibles.sleepUninterruptibly(StorageService.RING_DELAY_MILLIS, TimeUnit.MILLISECONDS);
         // make sure it did not change
         epState = endpointStateMap.get(endpoint);
         if (epState.getHeartBeatState().getGeneration() != generation)
@@ -806,14 +843,14 @@
 
             if (epState == null)
             {
-                epState = new EndpointState(new HeartBeatState((int) ((System.currentTimeMillis() + 60000) / 1000), 9999));
+                epState = new EndpointState(new HeartBeatState((int) ((currentTimeMillis() + 60000) / 1000), 9999));
             }
             else
             {
                 int generation = epState.getHeartBeatState().getGeneration();
                 int heartbeat = epState.getHeartBeatState().getHeartBeatVersion();
-                logger.info("Sleeping for {}ms to ensure {} does not change", StorageService.RING_DELAY, endpoint);
-                Uninterruptibles.sleepUninterruptibly(StorageService.RING_DELAY, TimeUnit.MILLISECONDS);
+                logger.info("Sleeping for {}ms to ensure {} does not change", StorageService.RING_DELAY_MILLIS, endpoint);
+                Uninterruptibles.sleepUninterruptibly(StorageService.RING_DELAY_MILLIS, TimeUnit.MILLISECONDS);
                 // make sure it did not change
                 EndpointState newState = endpointStateMap.get(endpoint);
                 if (newState == null)
@@ -880,7 +917,7 @@
         if (logger.isTraceEnabled())
             logger.trace("Sending a GossipDigestSyn to {} ...", to);
         if (firstSynSendAt == 0)
-            firstSynSendAt = System.nanoTime();
+            firstSynSendAt = nanoTime();
         MessagingService.instance().send(message, to);
 
         boolean isSeed = seeds.contains(to);
@@ -921,7 +958,7 @@
         int size = seeds.size();
         if (size > 0)
         {
-            if (size == 1 && seeds.contains(FBUtilities.getBroadcastAddressAndPort()))
+            if (size == 1 && seeds.contains(getBroadcastAddressAndPort()))
             {
                 return;
             }
@@ -1014,10 +1051,10 @@
         if (logger.isTraceEnabled())
             logger.trace("Performing status check ...");
 
-        long now = System.currentTimeMillis();
-        long nowNano = System.nanoTime();
+        long now = currentTimeMillis();
+        long nowNano = nanoTime();
 
-        long pending = ((JMXEnabledThreadPoolExecutor) Stage.GOSSIP.executor()).metrics.pendingTasks.getValue();
+        long pending = Stage.GOSSIP.executor().getPendingTaskCount();
         if (pending > 0 && lastProcessedMessageAt < now - 1000)
         {
             // if some new messages just arrived, give the executor some time to work on them
@@ -1034,7 +1071,7 @@
         Set<InetAddressAndPort> eps = endpointStateMap.keySet();
         for (InetAddressAndPort endpoint : eps)
         {
-            if (endpoint.equals(FBUtilities.getBroadcastAddressAndPort()))
+            if (endpoint.equals(getBroadcastAddressAndPort()))
                 continue;
 
             FailureDetector.instance.interpret(endpoint);
@@ -1102,11 +1139,28 @@
         return endpointStateMap.get(ep);
     }
 
+    public EndpointState copyEndpointStateForEndpoint(InetAddressAndPort ep)
+    {
+        EndpointState epState = endpointStateMap.get(ep);
+        if (epState == null)
+            return null;
+        return new EndpointState(epState);
+    }
+
     public ImmutableSet<InetAddressAndPort> getEndpoints()
     {
         return ImmutableSet.copyOf(endpointStateMap.keySet());
     }
 
+    public String getForEndpoint(InetAddressAndPort ep, ApplicationState state)
+    {
+        EndpointState epState = endpointStateMap.get(ep);
+        if (epState == null)
+            return null;
+        VersionedValue value = epState.getApplicationState(state);
+        return value == null ? null : value.value;
+    }
+
     public int getEndpointCount()
     {
         return endpointStateMap.size();
@@ -1241,7 +1295,7 @@
         return ep1.getHeartBeatState().getGeneration() - ep2.getHeartBeatState().getGeneration();
     }
 
-    void notifyFailureDetector(Map<InetAddressAndPort, EndpointState> remoteEpStateMap)
+    public void notifyFailureDetector(Map<InetAddressAndPort, EndpointState> remoteEpStateMap)
     {
         for (Entry<InetAddressAndPort, EndpointState> entry : remoteEpStateMap.entrySet())
         {
@@ -1251,6 +1305,9 @@
 
     void notifyFailureDetector(InetAddressAndPort endpoint, EndpointState remoteEndpointState)
     {
+        if (remoteEndpointState == null)
+            return;
+
         EndpointState localEndpointState = endpointStateMap.get(endpoint);
         /*
          * If the local endpoint state exists then report to the FD only
@@ -1352,8 +1409,11 @@
     private void silentlyMarkDead(InetAddressAndPort addr, EndpointState localState)
     {
         localState.markDead();
-        liveEndpoints.remove(addr);
-        unreachableEndpoints.put(addr, System.nanoTime());
+        if (!disableEndpointRemoval)
+        {
+            liveEndpoints.remove(addr);
+            unreachableEndpoints.put(addr, nanoTime());
+        }
     }
 
     /**
@@ -1472,7 +1532,7 @@
         for (Entry<InetAddressAndPort, EndpointState> entry : epStateMap.entrySet())
         {
             InetAddressAndPort ep = entry.getKey();
-            if ( ep.equals(FBUtilities.getBroadcastAddressAndPort()) && !isInShadowRound())
+            if (ep.equals(getBroadcastAddressAndPort()) && !isInShadowRound())
                 continue;
             if (justRemovedEndpoints.containsKey(ep))
             {
@@ -1494,7 +1554,7 @@
             {
                 int localGeneration = localEpStatePtr.getHeartBeatState().getGeneration();
                 int remoteGeneration = remoteState.getHeartBeatState().getGeneration();
-                long localTime = System.currentTimeMillis()/1000;
+                long localTime = currentTimeMillis() / 1000;
                 if (logger.isTraceEnabled())
                     logger.trace("{} local generation {}, remote generation {}", ep, localGeneration, remoteGeneration);
 
@@ -1574,12 +1634,33 @@
         if (!hasMajorVersion3Nodes())
             localState.removeMajorVersion3LegacyApplicationStates();
 
+        // need to run STATUS or STATUS_WITH_PORT first to handle BOOT_REPLACE correctly (else won't be a member, so TOKENS won't be processed)
         for (Entry<ApplicationState, VersionedValue> updatedEntry : updatedStates)
         {
+            switch (updatedEntry.getKey())
+            {
+                default:
+                    continue;
+                case STATUS:
+                    if (localState.containsApplicationState(ApplicationState.STATUS_WITH_PORT))
+                        continue;
+                case STATUS_WITH_PORT:
+            }
+            doOnChangeNotifications(addr, updatedEntry.getKey(), updatedEntry.getValue());
+        }
+
+        for (Entry<ApplicationState, VersionedValue> updatedEntry : updatedStates)
+        {
+            switch (updatedEntry.getKey())
+            {
+                // We should have alredy handled these two states above:
+                case STATUS_WITH_PORT:
+                case STATUS:
+                    continue;
+            }
             // filters out legacy change notifications
             // only if local state already indicates that the peer has the new fields
             if ((ApplicationState.INTERNAL_IP == updatedEntry.getKey() && localState.containsApplicationState(ApplicationState.INTERNAL_ADDRESS_AND_PORT))
-                ||(ApplicationState.STATUS == updatedEntry.getKey() && localState.containsApplicationState(ApplicationState.STATUS_WITH_PORT))
                 || (ApplicationState.RPC_ADDRESS == updatedEntry.getKey() && localState.containsApplicationState(ApplicationState.NATIVE_ADDRESS_AND_PORT)))
                 continue;
             doOnChangeNotifications(addr, updatedEntry.getKey(), updatedEntry.getValue());
@@ -1596,7 +1677,7 @@
     }
 
     // notify that an application state has changed
-    private void doOnChangeNotifications(InetAddressAndPort addr, ApplicationState state, VersionedValue value)
+    public void doOnChangeNotifications(InetAddressAndPort addr, ApplicationState state, VersionedValue value)
     {
         for (IEndpointStateChangeSubscriber subscriber : subscribers)
         {
@@ -1758,7 +1839,7 @@
         buildSeedsList();
         /* initialize the heartbeat state for this localEndpoint */
         maybeInitializeLocalState(generationNbr);
-        EndpointState localState = endpointStateMap.get(FBUtilities.getBroadcastAddressAndPort());
+        EndpointState localState = endpointStateMap.get(getBroadcastAddressAndPort());
         localState.addApplicationStates(preloadLocalStates);
         minVersionSupplier.recompute();
 
@@ -1805,17 +1886,15 @@
         if (seeds.isEmpty() && peers.isEmpty())
             return endpointShadowStateMap;
 
-        boolean isSeed = DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddressAndPort());
+        boolean isSeed = DatabaseDescriptor.getSeeds().contains(getBroadcastAddressAndPort());
         // We double RING_DELAY if we're not a seed to increase chance of successful startup during a full cluster bounce,
         // giving the seeds a chance to startup before we fail the shadow round
-        int shadowRoundDelay =  isSeed ? StorageService.RING_DELAY : StorageService.RING_DELAY * 2;
+        int shadowRoundDelay = isSeed ? StorageService.RING_DELAY_MILLIS : StorageService.RING_DELAY_MILLIS * 2;
         seedsInShadowRound.clear();
         endpointShadowStateMap.clear();
         // send a completely empty syn
         List<GossipDigest> gDigests = new ArrayList<>();
-        GossipDigestSyn digestSynMessage = new GossipDigestSyn(DatabaseDescriptor.getClusterName(),
-                DatabaseDescriptor.getPartitionerName(),
-                gDigests);
+        GossipDigestSyn digestSynMessage = new GossipDigestSyn(getClusterName(), getPartitionerName(), gDigests);
         Message<GossipDigestSyn> message = Message.out(GOSSIP_DIGEST_SYN, digestSynMessage);
 
         inShadowRound = true;
@@ -1858,9 +1937,9 @@
                 }
             }
         }
-        catch (InterruptedException wtf)
+        catch (InterruptedException e)
         {
-            throw new RuntimeException(wtf);
+            throw new UncheckedInterruptedException(e);
         }
 
         return ImmutableMap.copyOf(endpointShadowStateMap);
@@ -1871,7 +1950,7 @@
     {
         for (InetAddressAndPort seed : DatabaseDescriptor.getSeeds())
         {
-            if (seed.equals(FBUtilities.getBroadcastAddressAndPort()))
+            if (seed.equals(getBroadcastAddressAndPort()))
                 continue;
             seeds.add(seed);
         }
@@ -1890,7 +1969,7 @@
         {
             for (InetAddressAndPort seed : DatabaseDescriptor.getSeeds())
             {
-                if (seed.equals(FBUtilities.getBroadcastAddressAndPort()))
+                if (seed.equals(getBroadcastAddressAndPort()))
                     continue;
                 tmp.add(seed);
             }
@@ -1944,12 +2023,12 @@
         HeartBeatState hbState = new HeartBeatState(generationNbr);
         EndpointState localState = new EndpointState(hbState);
         localState.markAlive();
-        endpointStateMap.putIfAbsent(FBUtilities.getBroadcastAddressAndPort(), localState);
+        endpointStateMap.putIfAbsent(getBroadcastAddressAndPort(), localState);
     }
 
     public void forceNewerGeneration()
     {
-        EndpointState epstate = endpointStateMap.get(FBUtilities.getBroadcastAddressAndPort());
+        EndpointState epstate = endpointStateMap.get(getBroadcastAddressAndPort());
         epstate.getHeartBeatState().forceNewerGenerationUnsafe();
     }
 
@@ -1960,7 +2039,7 @@
     public void addSavedEndpoint(InetAddressAndPort ep)
     {
         checkProperThreadForStateMutation();
-        if (ep.equals(FBUtilities.getBroadcastAddressAndPort()))
+        if (ep.equals(getBroadcastAddressAndPort()))
         {
             logger.debug("Attempt to add self as saved endpoint");
             return;
@@ -1989,7 +2068,7 @@
     private void addLocalApplicationStateInternal(ApplicationState state, VersionedValue value)
     {
         assert taskLock.isHeldByCurrentThread();
-        InetAddressAndPort epAddr = FBUtilities.getBroadcastAddressAndPort();
+        InetAddressAndPort epAddr = getBroadcastAddressAndPort();
         EndpointState epState = endpointStateMap.get(epAddr);
         assert epState != null : "Can't find endpoint state for " + epAddr;
         // Fire "before change" notifications:
@@ -2027,7 +2106,7 @@
 
     public void stop()
     {
-        EndpointState mystate = endpointStateMap.get(FBUtilities.getBroadcastAddressAndPort());
+        EndpointState mystate = endpointStateMap.get(getBroadcastAddressAndPort());
         if (mystate != null && !isSilentShutdownState(mystate) && StorageService.instance.isJoined())
         {
             logger.info("Announcing shutdown");
@@ -2036,7 +2115,7 @@
             Message message = Message.out(Verb.GOSSIP_SHUTDOWN, noPayload);
             for (InetAddressAndPort ep : liveEndpoints)
                 MessagingService.instance().send(message, ep);
-            Uninterruptibles.sleepUninterruptibly(Integer.getInteger("cassandra.shutdown_announce_in_ms", 2000), TimeUnit.MILLISECONDS);
+            Uninterruptibles.sleepUninterruptibly(SHUTDOWN_ANNOUNCE_DELAY_IN_MS.getInt(), TimeUnit.MILLISECONDS);
         }
         else
             logger.warn("No local state, state is in silent shutdown, or node hasn't joined, not announcing shutdown");
@@ -2098,7 +2177,7 @@
         }
     }
 
-    protected boolean isInShadowRound()
+    public boolean isInShadowRound()
     {
         return inShadowRound;
     }
@@ -2141,6 +2220,9 @@
         Map<ApplicationState, VersionedValue> states = new EnumMap<>(ApplicationState.class);
         states.put(ApplicationState.NET_VERSION, StorageService.instance.valueFactory.networkVersion(netVersion));
         states.put(ApplicationState.HOST_ID, StorageService.instance.valueFactory.hostId(uuid));
+        states.put(ApplicationState.RPC_ADDRESS, StorageService.instance.valueFactory.rpcaddress(addr.getAddress()));
+        states.put(ApplicationState.INTERNAL_ADDRESS_AND_PORT, StorageService.instance.valueFactory.internalAddressAndPort(addr));
+        states.put(ApplicationState.RELEASE_VERSION, StorageService.instance.valueFactory.releaseVersion());
         localState.addApplicationStates(states);
     }
 
@@ -2172,7 +2254,7 @@
 
     public static long computeExpireTime()
     {
-        return System.currentTimeMillis() + Gossiper.aVeryLongTime;
+        return currentTimeMillis() + aVeryLongTime;
     }
 
     @Nullable
@@ -2212,7 +2294,7 @@
 
     public static void waitToSettle()
     {
-        int forceAfter = Integer.getInteger("cassandra.skip_wait_for_gossip_to_settle", -1);
+        int forceAfter = GOSSIPER_SKIP_WAITING_TO_SETTLE.getInt();
         if (forceAfter == 0)
         {
             return;
@@ -2300,11 +2382,20 @@
      */
     public boolean isUpgradingFromVersionLowerThan(CassandraVersion referenceVersion)
     {
-        CassandraVersion v = upgradeFromVersionMemoized.get();
-        if (SystemKeyspace.NULL_VERSION.equals(v) && scheduledGossipTask == null)
-            return false;
+        return isUpgradingFromVersionLowerThanC17653(referenceVersion).left;
+    }
 
-        return v != null && v.compareTo(referenceVersion) < 0;
+    /* TODO: Aux method for debug purposes on fixing C17653. To be removed*/
+    @VisibleForTesting
+    public Pair<Boolean, CassandraVersion> isUpgradingFromVersionLowerThanC17653(CassandraVersion referenceVersion)
+    {
+        CassandraVersion v = upgradeFromVersionMemoized.get();
+        if (CassandraVersion.NULL_VERSION.equals(v) && scheduledGossipTask == null)
+            return Pair.create(false, v);
+
+        boolean res = v != null && v.compareTo(referenceVersion) < 0;
+
+        return Pair.create(res, v);
     }
 
     private boolean nodesAgreeOnSchema(Collection<InetAddressAndPort> nodes)
@@ -2369,7 +2460,7 @@
         CassandraVersion minVersion = null;
 
         for (InetAddressAndPort addr : Iterables.concat(Gossiper.instance.getLiveMembers(),
-                                                 Gossiper.instance.getUnreachableMembers()))
+                                                        Gossiper.instance.getUnreachableMembers()))
         {
             String versionString = getReleaseVersionString(addr);
             // Raced with changes to gossip state, wait until next iteration
@@ -2398,4 +2489,71 @@
 
         return minVersion;
     }
+
+    public void unsafeSetEnabled()
+    {
+        scheduledGossipTask = new NotScheduledFuture<>();
+        firstSynSendAt = 1;
+    }
+
+    public Collection<InetAddressAndPort> unsafeClearRemoteState()
+    {
+        List<InetAddressAndPort> removed = new ArrayList<>();
+        for (InetAddressAndPort ep : endpointStateMap.keySet())
+        {
+            if (ep.equals(getBroadcastAddressAndPort()))
+                continue;
+
+            for (IEndpointStateChangeSubscriber subscriber : subscribers)
+                subscriber.onRemove(ep);
+
+            removed.add(ep);
+        }
+        this.endpointStateMap.keySet().retainAll(Collections.singleton(getBroadcastAddressAndPort()));
+        this.endpointShadowStateMap.keySet().retainAll(Collections.singleton(getBroadcastAddressAndPort()));
+        this.expireTimeEndpointMap.keySet().retainAll(Collections.singleton(getBroadcastAddressAndPort()));
+        this.justRemovedEndpoints.keySet().retainAll(Collections.singleton(getBroadcastAddressAndPort()));
+        this.unreachableEndpoints.keySet().retainAll(Collections.singleton(getBroadcastAddressAndPort()));
+        return removed;
+    }
+
+    public void unsafeGossipWith(InetAddressAndPort ep)
+    {
+        /* Update the local heartbeat counter. */
+        EndpointState epState = endpointStateMap.get(getBroadcastAddressAndPort());
+        if (epState != null)
+        {
+            epState.getHeartBeatState().updateHeartBeat();
+            if (logger.isTraceEnabled())
+                logger.trace("My heartbeat is now {}", epState.getHeartBeatState().getHeartBeatVersion());
+        }
+
+        final List<GossipDigest> gDigests = new ArrayList<GossipDigest>();
+        Gossiper.instance.makeGossipDigest(gDigests);
+
+        GossipDigestSyn digestSynMessage = new GossipDigestSyn(getClusterName(),
+                getPartitionerName(),
+                gDigests);
+        Message<GossipDigestSyn> message = Message.out(GOSSIP_DIGEST_SYN, digestSynMessage);
+
+        MessagingService.instance().send(message, ep);
+    }
+
+    public void unsafeSendShutdown(InetAddressAndPort to)
+    {
+        Message<?> message = Message.out(Verb.GOSSIP_SHUTDOWN, noPayload);
+        MessagingService.instance().send(message, to);
+    }
+
+    public void unsafeSendLocalEndpointStateTo(InetAddressAndPort ep)
+    {
+        /* Update the local heartbeat counter. */
+        EndpointState epState = endpointStateMap.get(getBroadcastAddressAndPort());
+        if (epState == null)
+            throw new IllegalStateException();
+
+        GossipDigestAck2 digestAck2Message = new GossipDigestAck2(Collections.singletonMap(getBroadcastAddressAndPort(), epState));
+        Message<GossipDigestAck2> message = Message.out(Verb.GOSSIP_DIGEST_ACK2, digestAck2Message);
+        MessagingService.instance().send(message, ep);
+    }
 }
diff --git a/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java b/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java
index 57552cc..13f8647 100644
--- a/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java
+++ b/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java
@@ -110,4 +110,4 @@
     {
         return service.isEnabled(GossiperEvent.class, type);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/gms/GossiperEvent.java b/src/java/org/apache/cassandra/gms/GossiperEvent.java
index 4ec0cf4..71fee7c 100644
--- a/src/java/org/apache/cassandra/gms/GossiperEvent.java
+++ b/src/java/org/apache/cassandra/gms/GossiperEvent.java
@@ -108,4 +108,4 @@
         ret.put("unreachableEndpoints", String.valueOf(unreachableEndpoints));
         return ret;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/gms/GossiperMBean.java b/src/java/org/apache/cassandra/gms/GossiperMBean.java
index 92df2cd..47d7207 100644
--- a/src/java/org/apache/cassandra/gms/GossiperMBean.java
+++ b/src/java/org/apache/cassandra/gms/GossiperMBean.java
@@ -38,4 +38,4 @@
     /** Returns each node's database release version */
     public Map<String, List<String>> getReleaseVersionsWithPort();
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/gms/HeartBeatState.java b/src/java/org/apache/cassandra/gms/HeartBeatState.java
index 75f4f56..3f633cb 100644
--- a/src/java/org/apache/cassandra/gms/HeartBeatState.java
+++ b/src/java/org/apache/cassandra/gms/HeartBeatState.java
@@ -19,6 +19,8 @@
 
 import java.io.*;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -27,6 +29,7 @@
 /**
  * HeartBeat State associated with any given endpoint.
  */
+
 public class HeartBeatState
 {
     public static final int EMPTY_VERSION = -1;
@@ -67,7 +70,7 @@
         return version == EMPTY_VERSION;
     }
 
-    int getGeneration()
+    public int getGeneration()
     {
         return generation;
     }
@@ -77,7 +80,7 @@
         version = VersionGenerator.getNextVersion();
     }
 
-    int getHeartBeatVersion()
+    public int getHeartBeatVersion()
     {
         return version;
     }
@@ -87,7 +90,8 @@
         generation += 1;
     }
 
-    void forceHighestPossibleVersionUnsafe()
+    @VisibleForTesting
+    public void forceHighestPossibleVersionUnsafe()
     {
         version = Integer.MAX_VALUE;
     }
diff --git a/src/java/org/apache/cassandra/gms/TokenSerializer.java b/src/java/org/apache/cassandra/gms/TokenSerializer.java
index 40d14f8..0048e7c 100644
--- a/src/java/org/apache/cassandra/gms/TokenSerializer.java
+++ b/src/java/org/apache/cassandra/gms/TokenSerializer.java
@@ -30,7 +30,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 
-
 public class TokenSerializer
 {
     private static final Logger logger = LoggerFactory.getLogger(TokenSerializer.class);
@@ -62,4 +61,4 @@
         }
         return tokens;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/gms/VersionedValue.java b/src/java/org/apache/cassandra/gms/VersionedValue.java
index 880cb98..26644e1 100644
--- a/src/java/org/apache/cassandra/gms/VersionedValue.java
+++ b/src/java/org/apache/cassandra/gms/VersionedValue.java
@@ -172,6 +172,11 @@
             return new VersionedValue(String.valueOf(load));
         }
 
+        public VersionedValue diskUsage(String state)
+        {
+            return new VersionedValue(state);
+        }
+
         public VersionedValue schema(UUID newVersion)
         {
             return new VersionedValue(newVersion.toString());
@@ -190,6 +195,14 @@
                                                     Long.toString(expireTime)));
         }
 
+        @VisibleForTesting
+        public VersionedValue left(Collection<Token> tokens, long expireTime, int generation)
+        {
+            return new VersionedValue(versionString(VersionedValue.STATUS_LEFT,
+                                                    makeTokenString(tokens),
+                                                    Long.toString(expireTime)), generation);
+        }
+
         public VersionedValue moving(Token token)
         {
             return new VersionedValue(VersionedValue.STATUS_MOVING + VersionedValue.DELIMITER + partitioner.getTokenFactory().toString(token));
diff --git a/src/java/org/apache/cassandra/hadoop/ConfigHelper.java b/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
index f01197d..cc539b1 100644
--- a/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
+++ b/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
@@ -45,7 +45,7 @@
     private static final String INPUT_PREDICATE_CONFIG = "cassandra.input.predicate";
     private static final String INPUT_KEYRANGE_CONFIG = "cassandra.input.keyRange";
     private static final String INPUT_SPLIT_SIZE_CONFIG = "cassandra.input.split.size";
-    private static final String INPUT_SPLIT_SIZE_IN_MB_CONFIG = "cassandra.input.split.size_mb";
+    private static final String INPUT_SPLIT_SIZE_IN_MIB_CONFIG = "cassandra.input.split.size_mb";
     private static final String INPUT_WIDEROWS_CONFIG = "cassandra.input.widerows";
     private static final int DEFAULT_SPLIT_SIZE = 64 * 1024;
     private static final String RANGE_BATCH_SIZE_CONFIG = "cassandra.range.batch.size";
@@ -185,21 +185,21 @@
      * the overhead of each map will take up the bulk of the job time.
      *
      * @param conf          Job configuration you are about to run
-     * @param splitSizeMb   Input split size in MB
+     * @param splitSizeMb   Input split size in MiB
      */
     public static void setInputSplitSizeInMb(Configuration conf, int splitSizeMb)
     {
-        conf.setInt(INPUT_SPLIT_SIZE_IN_MB_CONFIG, splitSizeMb);
+        conf.setInt(INPUT_SPLIT_SIZE_IN_MIB_CONFIG, splitSizeMb);
     }
 
     /**
      * cassandra.input.split.size will be used if the value is undefined or negative.
      * @param conf  Job configuration you are about to run
-     * @return      split size in MB or -1 if it is undefined.
+     * @return      split size in MiB or -1 if it is undefined.
      */
     public static int getInputSplitSizeInMb(Configuration conf)
     {
-        return conf.getInt(INPUT_SPLIT_SIZE_IN_MB_CONFIG, -1);
+        return conf.getInt(INPUT_SPLIT_SIZE_IN_MIB_CONFIG, -1);
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java
index 45ffa4e..82d5e8a 100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.hadoop.cql3;
 
 import java.io.Closeable;
-import java.io.File;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -28,6 +27,7 @@
 import java.util.concurrent.*;
 
 import com.google.common.net.HostAndPort;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -169,7 +169,7 @@
                                      .using(insertStatement)
                                      .withPartitioner(ConfigHelper.getOutputPartitioner(conf))
                                      .inDirectory(outputDir)
-                                     .withBufferSizeInMB(Integer.parseInt(conf.get(BUFFER_SIZE_IN_MB, "64")))
+                                     .withBufferSizeInMiB(Integer.parseInt(conf.get(BUFFER_SIZE_IN_MB, "64")))
                                      .withPartitioner(partitioner)
                                      .build();
         }
@@ -226,9 +226,9 @@
     
     private File getTableDirectory() throws IOException
     {
-        File dir = new File(String.format("%s%s%s%s%s-%s", getOutputLocation(), File.separator, keyspace, File.separator, table, UUID.randomUUID().toString()));
+        File dir = new File(String.format("%s%s%s%s%s-%s", getOutputLocation(), File.pathSeparator(), keyspace, File.pathSeparator(), table, UUID.randomUUID().toString()));
         
-        if (!dir.exists() && !dir.mkdirs())
+        if (!dir.exists() && !dir.tryCreateDirectories())
         {
             throw new IOException("Failed to created output directory: " + dir);
         }
diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlConfigHelper.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlConfigHelper.java
index f9a6f3a..998a754 100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlConfigHelper.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlConfigHelper.java
@@ -20,7 +20,6 @@
 *
 */
 import java.nio.file.Files;
-import java.nio.file.Paths;
 import java.io.InputStream;
 import java.io.IOException;
 import java.security.KeyManagementException;
@@ -52,6 +51,7 @@
 import com.datastax.driver.core.SSLOptions;
 import com.datastax.driver.core.SocketOptions;
 import org.apache.cassandra.hadoop.ConfigHelper;
+import org.apache.cassandra.io.util.File;
 import org.apache.hadoop.conf.Configuration;
 
 
@@ -625,7 +625,7 @@
         TrustManagerFactory tmf = null;
         if (truststorePath.isPresent())
         {
-            try (InputStream tsf = Files.newInputStream(Paths.get(truststorePath.get())))
+            try (InputStream tsf = Files.newInputStream(File.getPath(truststorePath.get())))
             {
                 KeyStore ts = KeyStore.getInstance("JKS");
                 ts.load(tsf, truststorePassword.isPresent() ? truststorePassword.get().toCharArray() : null);
@@ -637,7 +637,7 @@
         KeyManagerFactory kmf = null;
         if (keystorePath.isPresent())
         {
-            try (InputStream ksf = Files.newInputStream(Paths.get(keystorePath.get())))
+            try (InputStream ksf = Files.newInputStream(File.getPath(keystorePath.get())))
             {
                 KeyStore ks = KeyStore.getInstance("JKS");
                 ks.load(ksf, keystorePassword.isPresent() ? keystorePassword.get().toCharArray() : null);
diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java
index 1ea8eda..0755684 100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java
@@ -54,7 +54,8 @@
 import org.apache.cassandra.hadoop.*;
 import org.apache.cassandra.utils.*;
 
-import static java.util.stream.Collectors.toMap;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Hadoop InputFormat allowing map/reduce against Cassandra rows within one ColumnFamily.
@@ -135,7 +136,7 @@
         logger.trace("partitioner is {}", partitioner);
 
         // canonical ranges, split into pieces, fetching the splits in parallel
-        ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
+        ExecutorService executor = executorFactory().pooled("HadoopInput", 128);
         List<org.apache.hadoop.mapreduce.InputSplit> splits = new ArrayList<>();
 
         String[] inputInitialAddress = ConfigHelper.getInputInitialAddress(conf).split(",");
@@ -242,7 +243,7 @@
         }
 
         assert splits.size() > 0;
-        Collections.shuffle(splits, new Random(System.nanoTime()));
+        Collections.shuffle(splits, new Random(nanoTime()));
         return splits;
     }
 
@@ -329,8 +330,8 @@
     private Map<TokenRange, Long> getSubSplits(String keyspace, String cfName, TokenRange range, Host host, Configuration conf, Session session)
     {
         int splitSize = ConfigHelper.getInputSplitSize(conf);
-        int splitSizeMb = ConfigHelper.getInputSplitSizeInMb(conf);
-        return describeSplits(keyspace, cfName, range, host, splitSize, splitSizeMb, session);
+        int splitSizeMiB = ConfigHelper.getInputSplitSizeInMb(conf);
+        return describeSplits(keyspace, cfName, range, host, splitSize, splitSizeMiB, session);
     }
 
     private static Map<TokenRange, List<Host>> getRangeMap(String keyspace, Metadata metadata, String targetDC)
diff --git a/src/java/org/apache/cassandra/hadoop/package-info.java b/src/java/org/apache/cassandra/hadoop/package-info.java
new file mode 100644
index 0000000..835165b
--- /dev/null
+++ b/src/java/org/apache/cassandra/hadoop/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package was deprecated. See CASSANDRA-16984.
+ */
+@Deprecated
+package org.apache.cassandra.hadoop;
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java b/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java
index e6e8b38..339e45e 100644
--- a/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java
+++ b/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.zip.CRC32;
@@ -26,7 +25,6 @@
 
 import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.io.util.*;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Throwables;
 import org.apache.cassandra.utils.NativeLibrary;
 
diff --git a/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java b/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java
index 8792e32..63a59cd 100644
--- a/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java
+++ b/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
@@ -28,6 +27,8 @@
 
 import org.apache.cassandra.io.compress.ICompressor;
 
+import org.apache.cassandra.io.util.File;
+
 public class CompressedHintsWriter extends HintsWriter
 {
     // compressed and uncompressed size is stored at the beginning of each compressed block
diff --git a/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java b/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java
index 4786d9c..f9822d9 100644
--- a/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java
+++ b/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
@@ -26,6 +25,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.security.EncryptionUtils;
 import org.apache.cassandra.io.compress.ICompressor;
 
diff --git a/src/java/org/apache/cassandra/hints/Hint.java b/src/java/org/apache/cassandra/hints/Hint.java
index 6c7c5d4..3089894 100644
--- a/src/java/org/apache/cassandra/hints/Hint.java
+++ b/src/java/org/apache/cassandra/hints/Hint.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Throwables;
@@ -34,10 +33,14 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.vint.VIntCoding;
+import org.assertj.core.util.VisibleForTesting;
 
 import static org.apache.cassandra.db.TypeSizes.sizeof;
 import static org.apache.cassandra.db.TypeSizes.sizeofUnsignedVInt;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Encapsulates the hinted mutation, its creation time, and the gc grace seconds param for each table involved.
@@ -92,7 +95,7 @@
     /**
      * Applies the contained mutation unless it's expired, filtering out any updates for truncated tables
      */
-    CompletableFuture<?> applyFuture()
+    Future<?> applyFuture()
     {
         if (isLive())
         {
@@ -106,7 +109,7 @@
                 return filtered.applyFuture();
         }
 
-        return CompletableFuture.completedFuture(null);
+        return ImmediateFuture.success(null);
     }
 
     void apply()
@@ -134,13 +137,24 @@
      */
     public boolean isLive()
     {
-        return isLive(creationTime, System.currentTimeMillis(), ttl());
+        return isLive(creationTime, currentTimeMillis(), ttl());
     }
 
     static boolean isLive(long creationTime, long now, int hintTTL)
     {
-        long expirationTime = creationTime + TimeUnit.SECONDS.toMillis(Math.min(hintTTL, maxHintTTL));
-        return expirationTime > now;
+        return expirationInMillis(creationTime, hintTTL) > now;
+    }
+
+    @VisibleForTesting
+    long expirationInMillis()
+    {
+        int smallestGCGS = Math.min(gcgs, mutation.smallestGCGS());
+        return expirationInMillis(creationTime, smallestGCGS);
+    }
+
+    private static long expirationInMillis(long creationTime, int hintTTL)
+    {
+        return creationTime + TimeUnit.SECONDS.toMillis(Math.min(hintTTL, maxHintTTL));
     }
 
     static final class Serializer implements IVersionedSerializer<Hint>
diff --git a/src/java/org/apache/cassandra/hints/HintDiagnostics.java b/src/java/org/apache/cassandra/hints/HintDiagnostics.java
index 3ff0834..285193b 100644
--- a/src/java/org/apache/cassandra/hints/HintDiagnostics.java
+++ b/src/java/org/apache/cassandra/hints/HintDiagnostics.java
@@ -82,4 +82,4 @@
         return service.isEnabled(HintEvent.class, type);
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/hints/HintEvent.java b/src/java/org/apache/cassandra/hints/HintEvent.java
index d8b6943..695357e 100644
--- a/src/java/org/apache/cassandra/hints/HintEvent.java
+++ b/src/java/org/apache/cassandra/hints/HintEvent.java
@@ -99,4 +99,4 @@
         }
         return ret;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/hints/HintVerbHandler.java b/src/java/org/apache/cassandra/hints/HintVerbHandler.java
index 2fbe475..e6758d0 100644
--- a/src/java/org/apache/cassandra/hints/HintVerbHandler.java
+++ b/src/java/org/apache/cassandra/hints/HintVerbHandler.java
@@ -94,7 +94,7 @@
         else
         {
             // the common path - the node is both the destination and a valid replica for the hint.
-            hint.applyFuture().thenAccept(o -> respond(message)).exceptionally(e -> {logger.debug("Failed to apply hint", e); return null;});
+            hint.applyFuture().addCallback(o -> respond(message), e -> logger.debug("Failed to apply hint", e));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/hints/HintsBuffer.java b/src/java/org/apache/cassandra/hints/HintsBuffer.java
index d944b4d..d6dcfb3 100644
--- a/src/java/org/apache/cassandra/hints/HintsBuffer.java
+++ b/src/java/org/apache/cassandra/hints/HintsBuffer.java
@@ -26,11 +26,13 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.zip.CRC32;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.DataOutputBufferFixed;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.AbstractIterator;
+import org.apache.cassandra.utils.Clock;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 
 import static org.apache.cassandra.utils.FBUtilities.updateChecksum;
@@ -58,6 +60,7 @@
 
     private final ConcurrentMap<UUID, Queue<Integer>> offsets;
     private final OpOrder appendOrder;
+    private final ConcurrentMap<UUID, Long> earliestHintByHost; // Stores time of the earliest hint in the buffer for each host
 
     private HintsBuffer(ByteBuffer slab)
     {
@@ -66,6 +69,7 @@
         position = new AtomicLong();
         offsets = new ConcurrentHashMap<>();
         appendOrder = new OpOrder();
+        earliestHintByHost = new ConcurrentHashMap<>();
     }
 
     static HintsBuffer create(int slabSize)
@@ -141,6 +145,21 @@
         };
     }
 
+    /**
+     * Retrieve the time of the earliest hint in the buffer for a specific node
+     * @param hostId UUID of the node
+     * @return timestamp for the earliest hint in the buffer, or {@link Global#currentTimeMillis()}
+     */
+    long getEarliestHintTime(UUID hostId)
+    {
+        return earliestHintByHost.getOrDefault(hostId, Clock.Global.currentTimeMillis());
+    }
+
+    void clearEarliestHintForHostId(UUID hostId)
+    {
+        earliestHintByHost.remove(hostId);
+    }
+
     @SuppressWarnings("resource")
     Allocation allocate(int hintSize)
     {
@@ -222,8 +241,15 @@
         void write(Iterable<UUID> hostIds, Hint hint)
         {
             write(hint);
+            long ts = Clock.Global.currentTimeMillis();
             for (UUID hostId : hostIds)
+            {
+                // We only need the time of the first hint in the buffer
+                if (DatabaseDescriptor.hintWindowPersistentEnabled())
+                    earliestHintByHost.putIfAbsent(hostId, ts);
+
                 put(hostId, offset);
+            }
         }
 
         public void close()
diff --git a/src/java/org/apache/cassandra/hints/HintsBufferPool.java b/src/java/org/apache/cassandra/hints/HintsBufferPool.java
index 7f66efd..78f07dd 100644
--- a/src/java/org/apache/cassandra/hints/HintsBufferPool.java
+++ b/src/java/org/apache/cassandra/hints/HintsBufferPool.java
@@ -18,12 +18,15 @@
 package org.apache.cassandra.hints;
 
 import java.io.Closeable;
+import java.util.Iterator;
 import java.util.UUID;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 /**
  * A primitive pool of {@link HintsBuffer} buffers. Under normal conditions should only hold two buffers - the currently
@@ -45,7 +48,7 @@
 
     HintsBufferPool(int bufferSize, FlushCallback flushCallback)
     {
-        reserveBuffers = new LinkedBlockingQueue<>();
+        reserveBuffers = newBlockingQueue();
         this.bufferSize = bufferSize;
         this.flushCallback = flushCallback;
     }
@@ -63,6 +66,22 @@
         }
     }
 
+    /**
+     * Get the earliest hint for a specific node from all buffers
+     * @param hostId UUID of the node
+     * @return timestamp for the earliest hint
+     */
+    long getEarliestHintForHost(UUID hostId)
+    {
+        long min = currentBuffer().getEarliestHintTime(hostId);
+        Iterator<HintsBuffer> it = reserveBuffers.iterator();
+
+        while (it.hasNext())
+            min = Math.min(min, it.next().getEarliestHintTime(hostId));
+
+        return min;
+    }
+
     private HintsBuffer.Allocation allocate(int hintSize)
     {
         HintsBuffer current = currentBuffer();
@@ -117,7 +136,7 @@
             }
             catch (InterruptedException e)
             {
-                throw new RuntimeException(e);
+                throw new UncheckedInterruptedException(e);
             }
         }
         currentBuffer = buffer == null ? createBuffer() : buffer;
diff --git a/src/java/org/apache/cassandra/hints/HintsCatalog.java b/src/java/org/apache/cassandra/hints/HintsCatalog.java
index 81ec98e..859252f 100644
--- a/src/java/org/apache/cassandra/hints/HintsCatalog.java
+++ b/src/java/org/apache/cassandra/hints/HintsCatalog.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -27,6 +26,7 @@
 import javax.annotation.Nullable;
 
 import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -34,7 +34,6 @@
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.NativeLibrary;
 import org.apache.cassandra.utils.SyncUtil;
 
@@ -148,7 +147,7 @@
 
     void fsyncDirectory()
     {
-        int fd = NativeLibrary.tryOpenDirectory(hintsDirectory.getAbsolutePath());
+        int fd = NativeLibrary.tryOpenDirectory(hintsDirectory.absolutePath());
         if (fd != -1)
         {
             try
@@ -158,14 +157,14 @@
             }
             catch (FSError e) // trySync failed
             {
-                logger.error("Unable to sync directory {}", hintsDirectory.getAbsolutePath(), e);
+                logger.error("Unable to sync directory {}", hintsDirectory.absolutePath(), e);
                 FileUtils.handleFSErrorAndPropagate(e);
             }
         }
-        else if (!FBUtilities.isWindows)
+        else
         {
-            logger.error("Unable to open directory {}", hintsDirectory.getAbsolutePath());
-            FileUtils.handleFSErrorAndPropagate(new FSWriteError(new IOException(String.format("Unable to open hint directory %s", hintsDirectory.getAbsolutePath())), hintsDirectory.getAbsolutePath()));
+            logger.error("Unable to open directory {}", hintsDirectory.absolutePath());
+            FileUtils.handleFSErrorAndPropagate(new FSWriteError(new IOException(String.format("Unable to open hint directory %s", hintsDirectory.absolutePath())), hintsDirectory.absolutePath()));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java b/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java
new file mode 100644
index 0000000..4d10711
--- /dev/null
+++ b/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.hints;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.service.StorageService;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
+/**
+ * Delete the expired orphaned hints files.
+ * An orphaned file is considered as no associating endpoint with its host ID.
+ * An expired file is one that has lived longer than the largest gcgs of all tables.
+ */
+final class HintsCleanupTrigger implements Runnable
+{
+    private static final Logger logger = LoggerFactory.getLogger(HintsCleanupTrigger.class);
+    private final HintsCatalog hintsCatalog;
+    private final HintsDispatchExecutor dispatchExecutor;
+
+    HintsCleanupTrigger(HintsCatalog catalog, HintsDispatchExecutor dispatchExecutor)
+    {
+        this.hintsCatalog = catalog;
+        this.dispatchExecutor = dispatchExecutor;
+    }
+
+    public void run()
+    {
+        if (!DatabaseDescriptor.isAutoHintsCleanupEnabled())
+            return;
+
+        hintsCatalog.stores()
+                    .filter(store -> StorageService.instance.getEndpointForHostId(store.hostId) == null)
+                    .forEach(this::cleanup);
+    }
+
+    private void cleanup(HintsStore hintsStore)
+    {
+        logger.info("Found orphaned hints files for host: {}. Try to delete.", hintsStore.hostId);
+
+        // The host ID has been replaced and the store is still writing hint for the old host
+        if (hintsStore.isWriting())
+            hintsStore.closeWriter();
+
+        // Interrupt the dispatch if any. At this step, it is certain that the hintsStore is orphaned.
+        dispatchExecutor.interruptDispatch(hintsStore.hostId);
+        Runnable cleanup = () -> hintsStore.deleteExpiredHints(currentTimeMillis());
+        ScheduledExecutors.optionalTasks.execute(cleanup);
+    }
+}
diff --git a/src/java/org/apache/cassandra/hints/HintsDescriptor.java b/src/java/org/apache/cassandra/hints/HintsDescriptor.java
index 1979637..8e1f782 100644
--- a/src/java/org/apache/cassandra/hints/HintsDescriptor.java
+++ b/src/java/org/apache/cassandra/hints/HintsDescriptor.java
@@ -19,7 +19,6 @@
 
 import java.io.DataInput;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -35,6 +34,9 @@
 import com.google.common.base.MoreObjects;
 import com.google.common.base.Objects;
 import com.google.common.collect.ImmutableMap;
+
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -207,6 +209,16 @@
         return String.format("%s-%s-%s.crc32", hostId, timestamp, version);
     }
 
+    File file(File hintsDirectory)
+    {
+        return new File(hintsDirectory, fileName());
+    }
+
+    File checksumFile(File hintsDirectory)
+    {
+        return new File(hintsDirectory, checksumFileName());
+    }
+
     int messagingVersion()
     {
         return messagingVersion(version);
@@ -232,13 +244,13 @@
 
     static Optional<HintsDescriptor> readFromFileQuietly(Path path)
     {
-        try (RandomAccessFile raf = new RandomAccessFile(path.toFile(), "r"))
+        try (FileInputStreamPlus raf = new FileInputStreamPlus(path))
         {
             return Optional.of(deserialize(raf));
         }
         catch (ChecksumMismatchException e)
         {
-            throw new FSReadError(e, path.toFile());
+            throw new FSReadError(e, path);
         }
         catch (IOException e)
         {
@@ -271,15 +283,15 @@
         }
     }
 
-    static HintsDescriptor readFromFile(Path path)
+    static HintsDescriptor readFromFile(File path)
     {
-        try (RandomAccessFile raf = new RandomAccessFile(path.toFile(), "r"))
+        try (FileInputStreamPlus raf = new FileInputStreamPlus(path))
         {
             return deserialize(raf);
         }
         catch (IOException e)
         {
-            throw new FSReadError(e, path.toFile());
+            throw new FSReadError(e, path);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java b/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java
index b5eb0b1..0f34db6 100644
--- a/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java
+++ b/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java
@@ -17,10 +17,11 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.util.Map;
 import java.util.UUID;
-import java.util.concurrent.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.BooleanSupplier;
 import java.util.function.Predicate;
@@ -31,12 +32,16 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.FSReadError;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
 /**
  * A multi-threaded (by default) executor for dispatching hints.
@@ -48,7 +53,7 @@
     private static final Logger logger = LoggerFactory.getLogger(HintsDispatchExecutor.class);
 
     private final File hintsDirectory;
-    private final ExecutorService executor;
+    private final ExecutorPlus executor;
     private final AtomicBoolean isPaused;
     private final Predicate<InetAddressAndPort> isAlive;
     private final Map<UUID, Future> scheduledDispatches;
@@ -60,10 +65,11 @@
         this.isAlive = isAlive;
 
         scheduledDispatches = new ConcurrentHashMap<>();
-        executor = new JMXEnabledThreadPoolExecutor(maxThreads, 1, TimeUnit.MINUTES,
-                                                    new LinkedBlockingQueue<>(),
-                                                    new NamedThreadFactory("HintsDispatcher", Thread.MIN_PRIORITY),
-                                                    "internal");
+        executor = executorFactory()
+                .withJmxInternal()
+                .configurePooled("HintsDispatcher", maxThreads)
+                .withThreadPriority(Thread.MIN_PRIORITY)
+                .build();
     }
 
     /*
@@ -79,7 +85,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new AssertionError(e);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
@@ -120,7 +126,11 @@
             if (future != null)
                 future.get();
         }
-        catch (ExecutionException | InterruptedException e)
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+        catch (ExecutionException e)
         {
             throw new RuntimeException(e);
         }
@@ -167,7 +177,7 @@
             }
             catch (InterruptedException e)
             {
-                throw new RuntimeException(e);
+                throw new UncheckedInterruptedException(e);
             }
 
             hostId = hostIdSupplier.get();
@@ -206,7 +216,7 @@
             // not total outgoing hints traffic from this node - this is why the rate limiter is not shared between
             // all the dispatch tasks (as there will be at most one dispatch task for a particular host id at a time).
             int nodesCount = Math.max(1, StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1);
-            double throttleInBytes = DatabaseDescriptor.getHintedHandoffThrottleInKB() * 1024.0 / nodesCount;
+            double throttleInBytes = DatabaseDescriptor.getHintedHandoffThrottleInKiB() * 1024.0 / nodesCount;
             this.rateLimiter = RateLimiter.create(throttleInBytes == 0 ? Double.MAX_VALUE : throttleInBytes);
         }
 
@@ -266,7 +276,7 @@
 
         private boolean deliver(HintsDescriptor descriptor, InetAddressAndPort address)
         {
-            File file = new File(hintsDirectory, descriptor.fileName());
+            File file = descriptor.file(hintsDirectory);
             InputPosition offset = store.getDispatchOffset(descriptor);
 
             BooleanSupplier shouldAbort = () -> !isAlive.test(address) || isPaused.get();
@@ -275,27 +285,43 @@
                 if (offset != null)
                     dispatcher.seek(offset);
 
-                if (dispatcher.dispatch())
+                try
                 {
-                    store.delete(descriptor);
-                    store.cleanUp(descriptor);
-                    logger.info("Finished hinted handoff of file {} to endpoint {}: {}", descriptor.fileName(), address, hostId);
-                    return true;
+                    if (dispatcher.dispatch())
+                    {
+                        store.delete(descriptor);
+                        store.cleanUp(descriptor);
+                        logger.info("Finished hinted handoff of file {} to endpoint {}: {}", descriptor.fileName(), address, hostId);
+                        return true;
+                    }
+                    else
+                    {
+                        handleDispatchFailure(dispatcher, descriptor, address);
+                        return false;
+                    }
                 }
-                else
+                // we wrap InterruptedException in UncheckedInterruptedException
+                // without that catch, undispatched HintsDescriptor won't be added back to the store and cleaned
+                // up by HintsStore.delete in tests
+                catch (UncheckedInterruptedException e)
                 {
-                    store.markDispatchOffset(descriptor, dispatcher.dispatchPosition());
-                    store.offerFirst(descriptor);
-                    logger.info("Finished hinted handoff of file {} to endpoint {}: {}, partially", descriptor.fileName(), address, hostId);
-                    return false;
+                    handleDispatchFailure(dispatcher, descriptor, address);
+                    throw e;
                 }
             }
         }
 
+        private void handleDispatchFailure(HintsDispatcher dispatcher, HintsDescriptor descriptor, InetAddressAndPort address)
+        {
+            store.markDispatchOffset(descriptor, dispatcher.dispatchPosition());
+            store.offerFirst(descriptor);
+            logger.info("Finished hinted handoff of file {} to endpoint {}: {}, partially", descriptor.fileName(), address, hostId);
+        }
+
         // for each hint in the hints file for a node that isn't part of the ring anymore, write RF hints for each replica
         private void convert(HintsDescriptor descriptor)
         {
-            File file = new File(hintsDirectory, descriptor.fileName());
+            File file = descriptor.file(hintsDirectory);
 
             try (HintsReader reader = HintsReader.open(file, rateLimiter))
             {
diff --git a/src/java/org/apache/cassandra/hints/HintsDispatchTrigger.java b/src/java/org/apache/cassandra/hints/HintsDispatchTrigger.java
index ca38c0c..0dfc6e1 100644
--- a/src/java/org/apache/cassandra/hints/HintsDispatchTrigger.java
+++ b/src/java/org/apache/cassandra/hints/HintsDispatchTrigger.java
@@ -19,12 +19,9 @@
 
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.schema.Schema;
 
-import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
-
 /**
  * A simple dispatch trigger that's being run every 10 seconds.
  *
diff --git a/src/java/org/apache/cassandra/hints/HintsDispatcher.java b/src/java/org/apache/cassandra/hints/HintsDispatcher.java
index 743b275..b627338 100644
--- a/src/java/org/apache/cassandra/hints/HintsDispatcher.java
+++ b/src/java/org/apache/cassandra/hints/HintsDispatcher.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.function.BooleanSupplier;
@@ -29,14 +28,19 @@
 
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.HintsServiceMetrics;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.Condition;
 
+
+import static org.apache.cassandra.hints.HintsDispatcher.Callback.Outcome.*;
+import static org.apache.cassandra.metrics.HintsServiceMetrics.updateDelayMetrics;
 import static org.apache.cassandra.net.Verb.HINT_REQ;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 /**
  * Dispatches a single hints file to a specified node in a batched manner.
@@ -205,12 +209,12 @@
         return callback;
     }
 
-    private static final class Callback implements RequestCallback
+    static final class Callback implements RequestCallback
     {
         enum Outcome { SUCCESS, TIMEOUT, FAILURE, INTERRUPTED }
 
         private final long start = approxTime.now();
-        private final SimpleCondition condition = new SimpleCondition();
+        private final Condition condition = newOneTimeCondition();
         private volatile Outcome outcome;
         private final long hintCreationNanoTime;
 
@@ -229,10 +233,10 @@
             catch (InterruptedException e)
             {
                 logger.warn("Hint dispatch was interrupted", e);
-                return Outcome.INTERRUPTED;
+                return INTERRUPTED;
             }
 
-            return timedOut ? Outcome.TIMEOUT : outcome;
+            return timedOut ? TIMEOUT : outcome;
         }
 
         @Override
@@ -244,15 +248,15 @@
         @Override
         public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason)
         {
-            outcome = Outcome.FAILURE;
+            outcome = FAILURE;
             condition.signalAll();
         }
 
         @Override
         public void onResponse(Message msg)
         {
-            HintsServiceMetrics.updateDelayMetrics(msg.from(), approxTime.now() - this.hintCreationNanoTime);
-            outcome = Outcome.SUCCESS;
+            updateDelayMetrics(msg.from(), approxTime.now() - this.hintCreationNanoTime);
+            outcome = SUCCESS;
             condition.signalAll();
         }
     }
diff --git a/src/java/org/apache/cassandra/hints/HintsReader.java b/src/java/org/apache/cassandra/hints/HintsReader.java
index 9a5f75a..708a916 100644
--- a/src/java/org/apache/cassandra/hints/HintsReader.java
+++ b/src/java/org/apache/cassandra/hints/HintsReader.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.hints;
 
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Iterator;
@@ -28,6 +27,7 @@
 import com.google.common.primitives.Ints;
 import com.google.common.util.concurrent.RateLimiter;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,6 +36,8 @@
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.AbstractIterator;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * A paged non-compressed hints reader that provides two iterators:
  * - a 'raw' ByteBuffer iterator that doesn't deserialize the hints, but returns the pre-encoded hints verbatim
@@ -51,7 +53,7 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(HintsReader.class);
 
-    // don't read more than 512 KB of hints at a time.
+    // don't read more than 512 KiB of hints at a time.
     private static final int PAGE_SIZE = 512 << 10;
 
     private final HintsDescriptor descriptor;
@@ -164,7 +166,7 @@
     final class HintsIterator extends AbstractIterator<Hint>
     {
         private final InputPosition offset;
-        private final long now = System.currentTimeMillis();
+        private final long now = currentTimeMillis();
 
         HintsIterator(InputPosition offset)
         {
@@ -270,7 +272,7 @@
     final class BuffersIterator extends AbstractIterator<ByteBuffer>
     {
         private final InputPosition offset;
-        private final long now = System.currentTimeMillis();
+        private final long now = currentTimeMillis();
 
         BuffersIterator(InputPosition offset)
         {
diff --git a/src/java/org/apache/cassandra/hints/HintsService.java b/src/java/org/apache/cassandra/hints/HintsService.java
index a02367f..8fbfab8 100644
--- a/src/java/org/apache/cassandra/hints/HintsService.java
+++ b/src/java/org/apache/cassandra/hints/HintsService.java
@@ -17,14 +17,16 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.net.UnknownHostException;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.UUID;
-import java.util.concurrent.*;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
@@ -32,7 +34,10 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableMap;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.ReplicaLayout;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,6 +54,7 @@
 import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.MBeanWrapper;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static com.google.common.collect.Iterables.filter;
 import static com.google.common.collect.Iterables.transform;
@@ -59,6 +65,7 @@
  * - a single-threaded write executor
  * - a multi-threaded dispatch executor
  * - the buffer pool for writing hints into
+ * - an optional scheduled task to clean up the applicable hints files
  *
  * The front-end for everything hints related.
  */
@@ -83,6 +90,7 @@
 
     private final ScheduledFuture triggerFlushingFuture;
     private volatile ScheduledFuture triggerDispatchFuture;
+    private final ScheduledFuture triggerCleanupFuture;
 
     public final HintedHandoffMetrics metrics;
 
@@ -112,6 +120,11 @@
                                                                                         flushPeriod,
                                                                                         flushPeriod,
                                                                                         TimeUnit.MILLISECONDS);
+
+        // periodically cleanup the expired hints
+        HintsCleanupTrigger cleanupTrigger = new HintsCleanupTrigger(catalog, dispatchExecutor);
+        triggerCleanupFuture = ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(cleanupTrigger, 1, 1, TimeUnit.HOURS);
+
         metrics = new HintedHandoffMetrics();
     }
 
@@ -183,7 +196,7 @@
         // judicious use of streams: eagerly materializing probably cheaper
         // than performing filters / translations 2x extra via Iterables.filter/transform
         List<UUID> hostIds = replicas.stream()
-                .filter(StorageProxy::shouldHint)
+                .filter(replica -> StorageProxy.shouldHint(replica, false))
                 .map(replica -> StorageService.instance.getHostIdForEndpoint(replica.endpoint()))
                 .collect(Collectors.toList());
 
@@ -234,6 +247,19 @@
     }
 
     /**
+     * Get the total size in bytes of all the hints files associating with the host on disk.
+     * @param hostId, belonging host
+     * @return total file size, in bytes
+     */
+    public long getTotalHintsSize(UUID hostId)
+    {
+        HintsStore store = catalog.getNullable(hostId);
+        if (store == null)
+            return 0;
+        return store.getTotalFileSize();
+    }
+
+    /**
      * Gracefully and blockingly shut down the service.
      *
      * Will abort dispatch sessions that are currently in progress (which is okay, it's idempotent),
@@ -251,6 +277,8 @@
 
         triggerFlushingFuture.cancel(false);
 
+        triggerCleanupFuture.cancel(false);
+
         writeExecutor.flushBufferPool(bufferPool).get();
         writeExecutor.closeAllWriters().get();
 
@@ -262,6 +290,31 @@
     }
 
     /**
+     * Returns all pending hints that this node has.
+     *
+     * @return a list of {@link PendingHintsInfo}
+     */
+    public List<PendingHintsInfo> getPendingHintsInfo()
+    {
+        return catalog.stores()
+                      .filter(HintsStore::hasFiles)
+                      .map(HintsStore::getPendingHintsInfo)
+                      .collect(Collectors.toList());
+    }
+
+    /**
+     * Returns all pending hints that this node has.
+     *
+     * @return a list of maps with endpoints' ids, total number of hint files, their oldest and newest timestamps.
+     */
+    public List<Map<String, String>> getPendingHints()
+    {
+        return getPendingHintsInfo().stream()
+                                    .map(PendingHintsInfo::asMap)
+                                    .collect(Collectors.toList());
+    }
+
+    /**
      * Deletes all hints for all destinations. Doesn't make snapshots - should be used with care.
      */
     public void deleteAllHints()
@@ -332,7 +385,11 @@
             flushFuture.get();
             closeFuture.get();
         }
-        catch (InterruptedException | ExecutionException e)
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+        catch (ExecutionException e)
         {
             throw new RuntimeException(e);
         }
@@ -369,7 +426,11 @@
             flushFuture.get();
             closeFuture.get();
         }
-        catch (InterruptedException | ExecutionException e)
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+        catch (ExecutionException e)
         {
             throw new RuntimeException(e);
         }
@@ -383,6 +444,20 @@
         return dispatchExecutor.transfer(catalog, hostIdSupplier);
     }
 
+    /**
+     * Get the earliest hint written for a particular node,
+     * @param hostId UUID of the node to check it's hints.
+     * @return earliest hint as per unix time or Long.MIN_VALUE if hostID is null
+     */
+    public long getEarliestHintForHost(UUID hostId)
+    {
+        // Need to check only the first descriptor + all buffers.
+        HintsStore store = catalog.get(hostId);
+        HintsDescriptor desc = store.getFirstDescriptor();
+        long timestamp = desc == null ? Clock.Global.currentTimeMillis() : desc.timestamp;
+        return Math.min(timestamp, bufferPool.getEarliestHintForHost(hostId));
+    }
+
     HintsCatalog getCatalog()
     {
         return catalog;
diff --git a/src/java/org/apache/cassandra/hints/HintsServiceMBean.java b/src/java/org/apache/cassandra/hints/HintsServiceMBean.java
index fe0abcc..7fd7695 100644
--- a/src/java/org/apache/cassandra/hints/HintsServiceMBean.java
+++ b/src/java/org/apache/cassandra/hints/HintsServiceMBean.java
@@ -17,6 +17,9 @@
  */
 package org.apache.cassandra.hints;
 
+import java.util.List;
+import java.util.Map;
+
 public interface HintsServiceMBean
 {
     /**
@@ -40,4 +43,11 @@
      * being dispatched right now, or being written to).
      */
     void deleteAllHintsForEndpoint(String address);
+
+    /**
+     * Returns all pending hints that this node has.
+     *
+     * @return a list of endpoints with relevant hint information - total number of files, newest and oldest timestamps.
+     */
+    List<Map<String, String>> getPendingHints();
 }
diff --git a/src/java/org/apache/cassandra/hints/HintsStore.java b/src/java/org/apache/cassandra/hints/HintsStore.java
index aeefbd7..b7850ee 100644
--- a/src/java/org/apache/cassandra/hints/HintsStore.java
+++ b/src/java/org/apache/cassandra/hints/HintsStore.java
@@ -17,24 +17,31 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.function.Predicate;
+
+import javax.annotation.Nullable;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.gms.FailureDetector;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.SyncUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Encapsulates the state of a peer's hints: the queue of hints files for dispatch, and the current writer (if any).
  *
@@ -53,6 +60,7 @@
     private final Map<HintsDescriptor, InputPosition> dispatchPositions;
     private final Deque<HintsDescriptor> dispatchDequeue;
     private final Queue<HintsDescriptor> corruptedFiles;
+    private final Map<HintsDescriptor, Long> hintsExpirations;
 
     // last timestamp used in a descriptor; make sure to not reuse the same timestamp for new descriptors.
     private volatile long lastUsedTimestamp;
@@ -67,6 +75,7 @@
         dispatchPositions = new ConcurrentHashMap<>();
         dispatchDequeue = new ConcurrentLinkedDeque<>(descriptors);
         corruptedFiles = new ConcurrentLinkedQueue<>();
+        hintsExpirations = new ConcurrentHashMap<>();
 
         //noinspection resource
         lastUsedTimestamp = descriptors.stream().mapToLong(d -> d.timestamp).max().orElse(0L);
@@ -84,11 +93,37 @@
         return dispatchDequeue.size();
     }
 
+    @VisibleForTesting
+    int getHintsExpirationsMapSize()
+    {
+        return hintsExpirations.size();
+    }
+
     InetAddressAndPort address()
     {
         return StorageService.instance.getEndpointForHostId(hostId);
     }
 
+    @Nullable
+    PendingHintsInfo getPendingHintsInfo()
+    {
+        Iterator<HintsDescriptor> descriptors = dispatchDequeue.iterator();
+        int queueSize = 0;
+        long minTimestamp = Long.MAX_VALUE;
+        long maxTimestamp = Long.MIN_VALUE;
+        while (descriptors.hasNext())
+        {
+            HintsDescriptor descriptor = descriptors.next();
+            minTimestamp = Math.min(minTimestamp, descriptor.timestamp);
+            maxTimestamp = Math.max(maxTimestamp, descriptor.timestamp);
+            queueSize++;
+        }
+
+        if (queueSize == 0)
+            return null;
+        return new PendingHintsInfo(hostId, queueSize, minTimestamp, maxTimestamp);
+    }
+
     boolean isLive()
     {
         InetAddressAndPort address = address();
@@ -126,16 +161,63 @@
         }
     }
 
+    void deleteExpiredHints(long now)
+    {
+        deleteHints(it -> hasExpired(it, now));
+    }
+
+    private boolean hasExpired(HintsDescriptor descriptor, long now)
+    {
+        Long cachedExpiresAt = hintsExpirations.get(descriptor);
+        if (null != cachedExpiresAt)
+            return cachedExpiresAt <= now;
+
+        File hintFile = new File(hintsDirectory, descriptor.fileName());
+        // the file does not exist or if an I/O error occurs
+        if (!hintFile.exists() || hintFile.lastModified() == 0)
+            return false;
+
+        // 'lastModified' can be considered as the upper bound of the hint creation time.
+        // So the TTL upper bound of all hints in the file can be estimated by lastModified + maxGcgs of all tables
+        long ttl = hintFile.lastModified() + Schema.instance.largestGcgs();
+        hintsExpirations.put(descriptor, ttl);
+        return ttl <= now;
+    }
+
+    private void deleteHints(Predicate<HintsDescriptor> predicate)
+    {
+        Set<HintsDescriptor> removeSet = new HashSet<>();
+        try
+        {
+            for (HintsDescriptor descriptor : Iterables.concat(dispatchDequeue, corruptedFiles))
+            {
+                if (predicate.test(descriptor))
+                {
+                    cleanUp(descriptor);
+                    delete(descriptor);
+                    removeSet.add(descriptor);
+                }
+            }
+        }
+        finally // remove the already deleted hints from internal queues in case of exception
+        {
+            dispatchDequeue.removeAll(removeSet);
+            corruptedFiles.removeAll(removeSet);
+        }
+    }
+
     void delete(HintsDescriptor descriptor)
     {
-        File hintsFile = new File(hintsDirectory, descriptor.fileName());
-        if (hintsFile.delete())
+        File hintsFile = descriptor.file(hintsDirectory);
+        if (hintsFile.tryDelete())
             logger.info("Deleted hint file {}", descriptor.fileName());
-        else
+        else if (hintsFile.exists())
             logger.error("Failed to delete hint file {}", descriptor.fileName());
+        else
+            logger.info("Already deleted hint file {}", descriptor.fileName());
 
         //noinspection ResultOfMethodCallIgnored
-        new File(hintsDirectory, descriptor.checksumFileName()).delete();
+        descriptor.checksumFile(hintsDirectory).tryDelete();
     }
 
     boolean hasFiles()
@@ -153,9 +235,24 @@
         dispatchPositions.put(descriptor, inputPosition);
     }
 
+
+    /**
+     * @return the total size of all files belonging to the hints store, in bytes.
+     */
+    long getTotalFileSize()
+    {
+        long total = 0;
+        for (HintsDescriptor descriptor : Iterables.concat(dispatchDequeue, corruptedFiles))
+        {
+            total += descriptor.file(hintsDirectory).length();
+        }
+        return total;
+    }
+
     void cleanUp(HintsDescriptor descriptor)
     {
         dispatchPositions.remove(descriptor);
+        hintsExpirations.remove(descriptor);
     }
 
     void markCorrupted(HintsDescriptor descriptor)
@@ -163,6 +260,14 @@
         corruptedFiles.add(descriptor);
     }
 
+    /**
+     * @return a copy of the first {@link HintsDescriptor} in the queue for dispatch or {@code null} if queue is empty.
+     */
+    HintsDescriptor getFirstDescriptor()
+    {
+        return dispatchDequeue.peekFirst();
+    }
+
     /*
      * Methods dealing with HintsWriter.
      *
@@ -188,7 +293,7 @@
 
     private HintsWriter openWriter()
     {
-        lastUsedTimestamp = Math.max(System.currentTimeMillis(), lastUsedTimestamp + 1);
+        lastUsedTimestamp = Math.max(currentTimeMillis(), lastUsedTimestamp + 1);
         HintsDescriptor descriptor = new HintsDescriptor(hostId, lastUsedTimestamp, writerParams);
 
         try
diff --git a/src/java/org/apache/cassandra/hints/HintsWriteExecutor.java b/src/java/org/apache/cassandra/hints/HintsWriteExecutor.java
index 51a5362..9d64e4a 100644
--- a/src/java/org/apache/cassandra/hints/HintsWriteExecutor.java
+++ b/src/java/org/apache/cassandra/hints/HintsWriteExecutor.java
@@ -20,16 +20,21 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Iterator;
-import java.util.concurrent.*;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.FSError;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
 /**
  * A single threaded executor that exclusively writes all the hints and otherwise manipulate the writers.
@@ -46,18 +51,18 @@
 
     private final HintsCatalog catalog;
     private final ByteBuffer writeBuffer;
-    private final ExecutorService executor;
+    private final ExecutorPlus executor;
 
     HintsWriteExecutor(HintsCatalog catalog)
     {
         this.catalog = catalog;
 
         writeBuffer = ByteBuffer.allocateDirect(WRITE_BUFFER_SIZE);
-        executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("HintsWriteExecutor", 1);
+        executor = executorFactory().sequential("HintsWriteExecutor");
     }
 
     /*
-     * Should be very fast (worst case scenario - write a few 10s of megabytes to disk).
+     * Should be very fast (worst case scenario - write a few 10s of mebibytes to disk).
      */
     void shutdownBlocking()
     {
@@ -102,7 +107,11 @@
         {
             executor.submit(new FsyncWritersTask(stores)).get();
         }
-        catch (InterruptedException | ExecutionException e)
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+        catch (ExecutionException e)
         {
             throw new RuntimeException(e);
         }
@@ -185,7 +194,7 @@
         {
             HintsBuffer buffer = bufferPool.currentBuffer();
             buffer.waitForModifications();
-            stores.forEach(store -> flush(buffer.consumingHintsIterator(store.hostId), store));
+            stores.forEach(store -> flush(buffer.consumingHintsIterator(store.hostId), store, buffer));
         }
     }
 
@@ -207,10 +216,10 @@
 
     private void flush(HintsBuffer buffer)
     {
-        buffer.hostIds().forEach(hostId -> flush(buffer.consumingHintsIterator(hostId), catalog.get(hostId)));
+        buffer.hostIds().forEach(hostId -> flush(buffer.consumingHintsIterator(hostId), catalog.get(hostId), buffer));
     }
 
-    private void flush(Iterator<ByteBuffer> iterator, HintsStore store)
+    private void flush(Iterator<ByteBuffer> iterator, HintsStore store, HintsBuffer buffer)
     {
         while (true)
         {
@@ -222,7 +231,27 @@
 
             // exceeded the size limit for an individual file, but still have more to write
             // close the current writer and continue flushing to a new one in the next iteration
-            store.closeWriter();
+            try
+            {
+                store.closeWriter();
+            }
+            finally
+            {
+                /*
+                We remove the earliest hint for a respective hostId of the store from the buffer,
+                we are removing it specifically after we closed the store above in try block
+                so hints are persisted on disk before.
+
+                There is a periodic flushing of a buffer driven by hints_flush_period and clearing
+                this entry upon every flush would remove the information what is the earliest hint in the buffer
+                for a respective node prematurely.
+
+                Since this flushing method is called for every host id a buffer holds, we will eventually
+                remove all hostIds of the earliest hints of the buffer, and it will be added again as soon as there
+                is a new hint for that node to be delivered.
+                */
+                buffer.clearEarliestHintForHostId(store.hostId);
+            }
         }
     }
 
diff --git a/src/java/org/apache/cassandra/hints/HintsWriter.java b/src/java/org/apache/cassandra/hints/HintsWriter.java
index 589802b..663427a 100644
--- a/src/java/org/apache/cassandra/hints/HintsWriter.java
+++ b/src/java/org/apache/cassandra/hints/HintsWriter.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
@@ -33,6 +32,7 @@
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.DataOutputBufferFixed;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.NativeLibrary;
 import org.apache.cassandra.utils.SyncUtil;
 import org.apache.cassandra.utils.Throwables;
@@ -67,7 +67,7 @@
     @SuppressWarnings("resource") // HintsWriter owns channel
     static HintsWriter create(File directory, HintsDescriptor descriptor) throws IOException
     {
-        File file = new File(directory, descriptor.fileName());
+        File file = descriptor.file(directory);
 
         FileChannel channel = FileChannel.open(file.toPath(), StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
         int fd = NativeLibrary.getfd(channel);
@@ -102,7 +102,7 @@
 
     private void writeChecksum()
     {
-        File checksumFile = new File(directory, descriptor.checksumFileName());
+        File checksumFile = descriptor.checksumFile(directory);
         try (OutputStream out = Files.newOutputStream(checksumFile.toPath()))
         {
             out.write(Integer.toHexString((int) globalCRC.getValue()).getBytes(StandardCharsets.UTF_8));
@@ -284,7 +284,7 @@
 
         private void maybeFsync()
         {
-            if (position() >= lastSyncPosition + DatabaseDescriptor.getTrickleFsyncIntervalInKb() * 1024L)
+            if (position() >= lastSyncPosition + DatabaseDescriptor.getTrickleFsyncIntervalInKiB() * 1024L)
                 fsync();
         }
 
@@ -294,8 +294,8 @@
 
             // don't skip page cache for tiny files, on the assumption that if they are tiny, the target node is probably
             // alive, and if so, the file will be closed and dispatched shortly (within a minute), and the file will be dropped.
-            if (position >= DatabaseDescriptor.getTrickleFsyncIntervalInKb() * 1024L)
-                NativeLibrary.trySkipCache(fd, 0, position - (position % PAGE_SIZE), file.getPath());
+            if (position >= DatabaseDescriptor.getTrickleFsyncIntervalInKiB() * 1024L)
+                NativeLibrary.trySkipCache(fd, 0, position - (position % PAGE_SIZE), file.path());
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/hints/PendingHintsInfo.java b/src/java/org/apache/cassandra/hints/PendingHintsInfo.java
new file mode 100644
index 0000000..b4f21cd
--- /dev/null
+++ b/src/java/org/apache/cassandra/hints/PendingHintsInfo.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.hints;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.UUID;
+
+import com.google.common.base.MoreObjects;
+
+public class PendingHintsInfo
+{
+    public static final String HOST_ID = "host_id";
+    public static final String TOTAL_FILES = "total_files";
+    public static final String OLDEST_TIMESTAMP = "oldest_timestamp";
+    public static final String NEWEST_TIMESTAMP = "newest_timestamp";
+
+    public final UUID hostId;
+    public final int totalFiles;
+    public final long oldestTimestamp;
+    public final long newestTimestamp;
+
+    public PendingHintsInfo(UUID hostId, int totalFiles, long oldestTimestamp, long newestTimestamp)
+    {
+        this.hostId = hostId;
+        this.totalFiles = totalFiles;
+        this.oldestTimestamp = oldestTimestamp;
+        this.newestTimestamp = newestTimestamp;
+    }
+
+    public Map<String, String> asMap()
+    {
+        Map<String, String> ret = new HashMap<>();
+        ret.put(HOST_ID, hostId.toString());
+        ret.put(TOTAL_FILES, String.valueOf(totalFiles));
+        ret.put(OLDEST_TIMESTAMP, String.valueOf(oldestTimestamp));
+        ret.put(NEWEST_TIMESTAMP, String.valueOf(newestTimestamp));
+        return ret;
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        PendingHintsInfo that = (PendingHintsInfo) o;
+        return totalFiles == that.totalFiles &&
+               oldestTimestamp == that.oldestTimestamp &&
+               newestTimestamp == that.newestTimestamp &&
+               Objects.equals(hostId, that.hostId);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(hostId, totalFiles, oldestTimestamp, newestTimestamp);
+    }
+
+    @Override
+    public String toString()
+    {
+        return MoreObjects.toStringHelper(this)
+                          .add("hostId", hostId)
+                          .add("totalFiles", totalFiles)
+                          .add("oldestTimestamp", oldestTimestamp)
+                          .add("newestTimestamp", newestTimestamp)
+                          .toString();
+    }
+}
diff --git a/src/java/org/apache/cassandra/hints/package-info.java b/src/java/org/apache/cassandra/hints/package-info.java
index faa7b9f..b853f31 100644
--- a/src/java/org/apache/cassandra/hints/package-info.java
+++ b/src/java/org/apache/cassandra/hints/package-info.java
@@ -41,4 +41,4 @@
  * {@link org.apache.cassandra.hints.HintsService} wraps the catalog, the pool, and the two executors, acting as a front-end
  * for hints.
  */
-package org.apache.cassandra.hints;
\ No newline at end of file
+package org.apache.cassandra.hints;
diff --git a/src/java/org/apache/cassandra/index/Index.java b/src/java/org/apache/cassandra/index/Index.java
index e9d3d3c..9f51b16 100644
--- a/src/java/org/apache/cassandra/index/Index.java
+++ b/src/java/org/apache/cassandra/index/Index.java
@@ -26,6 +26,7 @@
 import java.util.concurrent.Callable;
 import java.util.function.BiFunction;
 
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.db.*;
@@ -267,7 +268,27 @@
     public Optional<ColumnFamilyStore> getBackingTable();
 
     /**
-     * Return a task which performs a blocking flush of the index's data to persistent storage.
+     * Return a task which performs a blocking flush of the index's data corresponding to the provided
+     * base table's Memtable. This may extract any necessary data from the base table's Memtable as part of the flush.
+     *
+     * This version of the method is invoked whenever we flush the base table. If the index stores no in-memory data
+     * of its own, it is safe to only implement this method.
+     *
+     * @return task to be executed by the index manager to perform the flush.
+     */
+    public default Callable<?> getBlockingFlushTask(Memtable baseCfs)
+    {
+        return getBlockingFlushTask();
+    }
+
+    /**
+     * Return a task which performs a blocking flush of any in-memory index data to persistent storage,
+     * independent of any flush of the base table.
+     *
+     * Note that this method is only invoked outside of normal flushes: if there is no in-memory storage
+     * for this index, and it only extracts data on flush from the base table's Memtable, then it is safe to
+     * perform no work.
+     *
      * @return task to be executed by the index manager to perform the flush.
      */
     public Callable<?> getBlockingFlushTask();
diff --git a/src/java/org/apache/cassandra/index/SecondaryIndexManager.java b/src/java/org/apache/cassandra/index/SecondaryIndexManager.java
index e9b22ef..93ecd59 100644
--- a/src/java/org/apache/cassandra/index/SecondaryIndexManager.java
+++ b/src/java/org/apache/cassandra/index/SecondaryIndexManager.java
@@ -19,7 +19,9 @@
 
 import java.lang.reflect.Constructor;
 import java.util.*;
-import java.util.concurrent.*;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Function;
 import java.util.stream.Collectors;
@@ -28,36 +30,29 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
+import com.google.common.collect.*;
 import com.google.common.primitives.Longs;
 import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-
 import org.apache.commons.lang3.StringUtils;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
-import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.FutureTask;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.statements.schema.IndexTarget;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.filter.ClusteringIndexSliceFilter;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.db.lifecycle.SSTableSet;
 import org.apache.cassandra.db.lifecycle.View;
-import org.apache.cassandra.db.marshal.ValueAccessor;
-import org.apache.cassandra.db.partitions.*;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.Index.IndexBuildingSupport;
@@ -75,8 +70,9 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
-import org.apache.cassandra.utils.concurrent.Refs;
+import org.apache.cassandra.utils.concurrent.*;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.utils.ExecutorUtils.awaitTermination;
 import static org.apache.cassandra.utils.ExecutorUtils.shutdown;
 
@@ -158,16 +154,12 @@
     private final Map<String, AtomicInteger> inProgressBuilds = Maps.newConcurrentMap();
 
     // executes tasks returned by Indexer#addIndexColumn which may require index(es) to be (re)built
-    private static final ListeningExecutorService asyncExecutor = MoreExecutors.listeningDecorator(
-    new JMXEnabledThreadPoolExecutor(1,
-                                     Stage.KEEP_ALIVE_SECONDS,
-                                     TimeUnit.SECONDS,
-                                     new LinkedBlockingQueue<>(),
-                                     new NamedThreadFactory("SecondaryIndexManagement"),
-                                     "internal"));
+    private static final ExecutorPlus asyncExecutor = executorFactory()
+            .withJmxInternal()
+            .sequential("SecondaryIndexManagement");
 
     // executes all blocking tasks produced by Indexers e.g. getFlushTask, getMetadataReloadTask etc
-    private static final ListeningExecutorService blockingExecutor = MoreExecutors.newDirectExecutorService();
+    private static final ExecutorPlus blockingExecutor = ImmediateExecutor.INSTANCE;
 
     /**
      * The underlying column family containing the source data for these indexes
@@ -205,12 +197,12 @@
         Index index = indexes.get(indexDef.name);
         Callable<?> reloadTask = index.getMetadataReloadTask(indexDef);
         return reloadTask == null
-               ? Futures.immediateFuture(null)
+               ? ImmediateFuture.success(null)
                : blockingExecutor.submit(reloadTask);
     }
 
     @SuppressWarnings("unchecked")
-    private synchronized Future<?> createIndex(IndexMetadata indexDef, boolean isNewCF)
+    private synchronized Future<Void> createIndex(IndexMetadata indexDef, boolean isNewCF)
     {
         final Index index = createInstance(indexDef);
         index.register(this);
@@ -219,13 +211,15 @@
 
         markIndexesBuilding(ImmutableSet.of(index), true, isNewCF);
 
-        Callable<?> initialBuildTask = null;
+        FutureTask<?> initialBuildTask = null;
         // if the index didn't register itself, we can probably assume that no initialization needs to happen
         if (indexes.containsKey(indexDef.name))
         {
             try
             {
-                initialBuildTask = index.getInitializationTask();
+                Callable<?> call = index.getInitializationTask();
+                if (call != null)
+                    initialBuildTask = new FutureTask<>(call);
             }
             catch (Throwable t)
             {
@@ -238,27 +232,25 @@
         if (initialBuildTask == null)
         {
             markIndexBuilt(index, true);
-            return Futures.immediateFuture(null);
+            return ImmediateFuture.success(null);
         }
 
         // otherwise run the initialization task asynchronously with a callback to mark it built or failed
-        final SettableFuture initialization = SettableFuture.create();
-        Futures.addCallback(asyncExecutor.submit(initialBuildTask), new FutureCallback()
-        {
-            @Override
-            public void onFailure(Throwable t)
-            {
-                logAndMarkIndexesFailed(Collections.singleton(index), t, true);
-                initialization.setException(t);
-            }
-
-            @Override
-            public void onSuccess(Object o)
-            {
+        final Promise<Void> initialization = new AsyncPromise<>();
+        // we want to ensure we invoke this task asynchronously, so we want to add our callback before submission
+        // to ensure the work is not completed before we register the callback and so it gets performed by us.
+        // This is because Keyspace.open("system") can transitively attempt to open Keyspace.open("system")
+        initialBuildTask.addCallback(
+            success -> {
                 markIndexBuilt(index, true);
-                initialization.set(o);
+                initialization.trySuccess(null);
+            },
+            failure -> {
+                logAndMarkIndexesFailed(Collections.singleton(index), failure, true);
+                initialization.tryFailure(failure);
             }
-        }, MoreExecutors.directExecutor());
+        );
+        asyncExecutor.execute(initialBuildTask);
 
         return initialization;
     }
@@ -267,7 +259,7 @@
      * Adds and builds a index
      *
      * @param indexDef the IndexMetadata describing the index
-     * @param isNewCF true if the index is added as part of a new table/columnfamily (i.e. loading a CF at startup), 
+     * @param isNewCF true if the index is added as part of a new table/columnfamily (i.e. loading a CF at startup),
      * false for all other cases (i.e. newly added index)
      */
     public synchronized Future<?> addIndex(IndexMetadata indexDef, boolean isNewCF)
@@ -384,7 +376,7 @@
 
         // Once we are tracking new writes, flush any memtable contents to not miss them from the sstable-based rebuild
         if (needsFlush)
-            baseCfs.forceBlockingFlush();
+            baseCfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.INDEX_BUILD_STARTED);
 
         // Now that we are tracking new writes and we haven't left untracked contents on the memtables, we are ready to
         // index the sstables
@@ -516,15 +508,15 @@
             byType.forEach((buildingSupport, groupedIndexes) ->
                            {
                                SecondaryIndexBuilder builder = buildingSupport.getIndexBuildTask(baseCfs, groupedIndexes, sstables);
-                               final SettableFuture build = SettableFuture.create();
-                               Futures.addCallback(CompactionManager.instance.submitIndexBuild(builder), new FutureCallback()
+                               final AsyncPromise<Object> build = new AsyncPromise<>();
+                               CompactionManager.instance.submitIndexBuild(builder).addCallback(new FutureCallback()
                                {
                                    @Override
                                    public void onFailure(Throwable t)
                                    {
                                        logAndMarkIndexesFailed(groupedIndexes, t, false);
                                        unbuiltIndexes.addAll(groupedIndexes);
-                                       build.setException(t);
+                                       build.tryFailure(t);
                                    }
 
                                    @Override
@@ -533,9 +525,9 @@
                                        groupedIndexes.forEach(i -> markIndexBuilt(i, isFullRebuild));
                                        logger.info("Index build of {} completed", getIndexNames(groupedIndexes));
                                        builtIndexes.addAll(groupedIndexes);
-                                       build.set(o);
+                                       build.trySuccess(o);
                                    }
-                               }, MoreExecutors.directExecutor());
+                               });
                                futures.add(build);
                            });
 
@@ -624,7 +616,7 @@
      *
      * @param indexes the index to be marked as building
      * @param isFullRebuild {@code true} if this method is invoked as a full index rebuild, {@code false} otherwise
-     * @param isNewCF {@code true} if this method is invoked when initializing a new table/columnfamily (i.e. loading a CF at startup), 
+     * @param isNewCF {@code true} if this method is invoked when initializing a new table/columnfamily (i.e. loading a CF at startup),
      * {@code false} for all other cases (i.e. newly added index)
      */
     private synchronized void markIndexesBuilding(Set<Index> indexes, boolean isFullRebuild, boolean isNewCF)
@@ -675,7 +667,7 @@
             if (writableIndexes.put(indexName, index) == null)
                 logger.info("Index [{}] became writable after successful build.", indexName);
         }
-        
+
         AtomicInteger counter = inProgressBuilds.get(indexName);
         if (counter != null)
         {
@@ -683,7 +675,7 @@
             if (counter.decrementAndGet() == 0)
             {
                 inProgressBuilds.remove(indexName);
-                if (!needsFullRebuild.contains(indexName) && DatabaseDescriptor.isDaemonInitialized())
+                if (!needsFullRebuild.contains(indexName) && DatabaseDescriptor.isDaemonInitialized() && Keyspace.isInitialized())
                     SystemKeyspace.setIndexBuilt(baseCfs.keyspace.getName(), indexName);
             }
         }
@@ -786,10 +778,11 @@
     /**
      * Remove all indexes
      */
-    public void dropAllIndexes()
+    public void dropAllIndexes(boolean dropData)
     {
         markAllIndexesRemoved();
-        invalidateAllIndexesBlocking();
+        if (dropData)
+            invalidateAllIndexesBlocking();
     }
 
     @VisibleForTesting
@@ -815,17 +808,6 @@
     }
 
     /**
-     * Performs a blocking flush of all custom indexes
-     */
-    public void flushAllNonCFSBackedIndexesBlocking()
-    {
-        executeAllBlocking(indexes.values()
-                                  .stream()
-                                  .filter(index -> !index.getBackingTable().isPresent()),
-                           Index::getBlockingFlushTask, null);
-    }
-
-    /**
      * Performs a blocking execution of pre-join tasks of all indexes
      */
     public void executePreJoinTasksBlocking(boolean hadBootstrap)
@@ -851,7 +833,7 @@
         {
             indexes.forEach(index ->
                             index.getBackingTable()
-                                 .map(cfs -> wait.add(cfs.forceFlush()))
+                                 .map(cfs -> wait.add(cfs.forceFlush(ColumnFamilyStore.FlushReason.INDEX_BUILD_COMPLETED)))
                                  .orElseGet(() -> nonCfsIndexes.add(index)));
         }
 
@@ -860,6 +842,18 @@
     }
 
     /**
+     * Performs a blocking flush of all custom indexes
+     */
+    public void flushAllNonCFSBackedIndexesBlocking(Memtable baseCfsMemtable)
+    {
+        executeAllBlocking(indexes.values()
+                                  .stream()
+                                  .filter(index -> !index.getBackingTable().isPresent()),
+                           index -> index.getBlockingFlushTask(baseCfsMemtable),
+                           null);
+    }
+
+    /**
      * @return all indexes which are marked as built and ready to use
      */
     public List<String> getBuiltIndexNames()
@@ -889,19 +883,34 @@
         return !indexes.isEmpty();
     }
 
+    public void indexPartition(DecoratedKey key, Set<Index> indexes, int pageSize)
+    {
+        indexPartition(key, indexes, pageSize, baseCfs.metadata().regularAndStaticColumns());
+    }
+
     /**
      * When building an index against existing data in sstables, add the given partition to the index
+     *
+     * @param key the key for the partition being indexed
+     * @param indexes the indexes that must be updated
+     * @param pageSize the number of {@link Unfiltered} objects to process in a single page
+     * @param columns the columns indexed by at least one of the supplied indexes
      */
-    public void indexPartition(DecoratedKey key, Set<Index> indexes, int pageSize)
+    public void indexPartition(DecoratedKey key, Set<Index> indexes, int pageSize, RegularAndStaticColumns columns)
     {
         if (logger.isTraceEnabled())
             logger.trace("Indexing partition {}", baseCfs.metadata().partitionKeyType.getString(key.getKey()));
 
         if (!indexes.isEmpty())
         {
-            SinglePartitionReadCommand cmd = SinglePartitionReadCommand.fullPartitionRead(baseCfs.metadata(),
-                                                                                          FBUtilities.nowInSeconds(),
-                                                                                          key);
+            SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(baseCfs.metadata(),
+                                                                               FBUtilities.nowInSeconds(),
+                                                                               ColumnFilter.selection(columns),
+                                                                               RowFilter.NONE,
+                                                                               DataLimits.NONE,
+                                                                               key,
+                                                                               new ClusteringIndexSliceFilter(Slices.ALL, false));
+
             int nowInSec = cmd.nowInSec();
             boolean readStatic = false;
 
@@ -1190,7 +1199,7 @@
     {
         if (!hasIndexes())
             return UpdateTransaction.NO_OP;
-        
+
         ArrayList<Index.Indexer> idxrs = new ArrayList<>();
         for (Index i : writableIndexes.values())
         {
@@ -1198,7 +1207,7 @@
             if (idxr != null)
                 idxrs.add(idxr);
         }
-        
+
         if (idxrs.size() == 0)
             return UpdateTransaction.NO_OP;
         else
@@ -1507,17 +1516,17 @@
         }
     }
 
-    private void executeBlocking(Callable<?> task, FutureCallback<Object> callback)
+    private void executeBlocking(Callable<?> task, FutureCallback callback)
     {
         if (null != task)
         {
-            ListenableFuture<?> f = blockingExecutor.submit(task);
-            if (callback != null) Futures.addCallback(f, callback, MoreExecutors.directExecutor());
+            Future<?> f = blockingExecutor.submit(task);
+            if (callback != null) f.addCallback(callback);
             FBUtilities.waitOnFuture(f);
         }
     }
 
-    private void executeAllBlocking(Stream<Index> indexers, Function<Index, Callable<?>> function, FutureCallback<Object> callback)
+    private void executeAllBlocking(Stream<Index> indexers, Function<Index, Callable<?>> function, FutureCallback callback)
     {
         if (function == null)
         {
@@ -1531,8 +1540,8 @@
                              Callable<?> task = function.apply(indexer);
                              if (null != task)
                              {
-                                 ListenableFuture<?> f = blockingExecutor.submit(task);
-                                 if (callback != null) Futures.addCallback(f, callback, MoreExecutors.directExecutor());
+                                 Future<?> f = blockingExecutor.submit(task);
+                                 if (callback != null) f.addCallback(callback);
                                  waitFor.add(f);
                              }
                          });
diff --git a/src/java/org/apache/cassandra/index/internal/CassandraIndex.java b/src/java/org/apache/cassandra/index/internal/CassandraIndex.java
index 2561040..0aac15d 100644
--- a/src/java/org/apache/cassandra/index/internal/CassandraIndex.java
+++ b/src/java/org/apache/cassandra/index/internal/CassandraIndex.java
@@ -184,7 +184,7 @@
     public Callable<Void> getBlockingFlushTask()
     {
         return () -> {
-            indexCfs.forceBlockingFlush();
+            indexCfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.INDEX_TABLE_FLUSH);
             return null;
         };
     }
@@ -659,7 +659,7 @@
         CompactionManager.instance.interruptCompactionForCFs(cfss, (sstable) -> true, true);
         CompactionManager.instance.waitForCessation(cfss, (sstable) -> true);
         Keyspace.writeOrder.awaitNewBarrier();
-        indexCfs.forceBlockingFlush();
+        indexCfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.INDEX_REMOVED);
         indexCfs.readOrdering.awaitNewBarrier();
         indexCfs.invalidate();
     }
@@ -685,7 +685,7 @@
     @SuppressWarnings("resource")
     private void buildBlocking()
     {
-        baseCfs.forceBlockingFlush();
+        baseCfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.INDEX_BUILD_STARTED);
 
         try (ColumnFamilyStore.RefViewFragment viewFragment = baseCfs.selectAndReference(View.selectFunction(SSTableSet.CANONICAL));
              Refs<SSTableReader> sstables = viewFragment.refs)
@@ -709,7 +709,7 @@
                                                                          ImmutableSet.copyOf(sstables));
             Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
             FBUtilities.waitOnFuture(future);
-            indexCfs.forceBlockingFlush();
+            indexCfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.INDEX_BUILD_COMPLETED);
         }
         logger.info("Index build of {} complete", metadata.name);
     }
diff --git a/src/java/org/apache/cassandra/index/internal/CollatedViewIndexBuilder.java b/src/java/org/apache/cassandra/index/internal/CollatedViewIndexBuilder.java
index 3c005c4..07bdc42 100644
--- a/src/java/org/apache/cassandra/index/internal/CollatedViewIndexBuilder.java
+++ b/src/java/org/apache/cassandra/index/internal/CollatedViewIndexBuilder.java
@@ -19,10 +19,10 @@
 
 import java.util.Collection;
 import java.util.Set;
-import java.util.UUID;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.RegularAndStaticColumns;
 import org.apache.cassandra.db.compaction.CompactionInfo;
 import org.apache.cassandra.db.compaction.CompactionInterruptedException;
 import org.apache.cassandra.db.compaction.OperationType;
@@ -30,7 +30,10 @@
 import org.apache.cassandra.index.SecondaryIndexBuilder;
 import org.apache.cassandra.io.sstable.ReducingKeyIterator;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * Manages building an entire index from column family data. Runs on to compaction manager.
@@ -40,7 +43,7 @@
     private final ColumnFamilyStore cfs;
     private final Set<Index> indexers;
     private final ReducingKeyIterator iter;
-    private final UUID compactionId;
+    private final TimeUUID compactionId;
     private final Collection<SSTableReader> sstables;
 
     public CollatedViewIndexBuilder(ColumnFamilyStore cfs, Set<Index> indexers, ReducingKeyIterator iter, Collection<SSTableReader> sstables)
@@ -48,18 +51,18 @@
         this.cfs = cfs;
         this.indexers = indexers;
         this.iter = iter;
-        this.compactionId = UUIDGen.getTimeUUID();
+        this.compactionId = nextTimeUUID();
         this.sstables = sstables;
     }
 
     public CompactionInfo getCompactionInfo()
     {
         return new CompactionInfo(cfs.metadata(),
-                OperationType.INDEX_BUILD,
-                iter.getBytesRead(),
-                iter.getTotalBytes(),
-                compactionId,
-                sstables);
+                                  OperationType.INDEX_BUILD,
+                                  iter.getBytesRead(),
+                                  iter.getTotalBytes(),
+                                  compactionId,
+                                  sstables);
     }
 
     public void build()
@@ -67,12 +70,14 @@
         try
         {
             int pageSize = cfs.indexManager.calculateIndexingPageSize();
+            RegularAndStaticColumns targetPartitionColumns = extractIndexedColumns();
+            
             while (iter.hasNext())
             {
                 if (isStopRequested())
                     throw new CompactionInterruptedException(getCompactionInfo());
                 DecoratedKey key = iter.next();
-                cfs.indexManager.indexPartition(key, indexers, pageSize);
+                cfs.indexManager.indexPartition(key, indexers, pageSize, targetPartitionColumns);
             }
         }
         finally
@@ -80,4 +85,30 @@
             iter.close();
         }
     }
+
+    private RegularAndStaticColumns extractIndexedColumns()
+    {
+        RegularAndStaticColumns.Builder builder = RegularAndStaticColumns.builder();
+        
+        for (Index index : indexers)
+        {
+            boolean isPartitionIndex = true;
+            
+            for (ColumnMetadata column : cfs.metadata().regularAndStaticColumns())
+            {
+                if (index.dependsOn(column))
+                {
+                    builder.add(column);
+                    isPartitionIndex = false;
+                }
+            }
+
+            // if any index declares no dependency on any column, it is a full partition index
+            // so we can use the base partition columns as the input source
+            if (isPartitionIndex)
+                return cfs.metadata().regularAndStaticColumns();
+        }
+        
+        return builder.build();
+    }
 }
diff --git a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
index 815b881..457dee13 100644
--- a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
@@ -24,7 +24,6 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.ClusteringIndexNamesFilter;
-import org.apache.cassandra.db.filter.ClusteringIndexSliceFilter;
 import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
@@ -34,7 +33,6 @@
 import org.apache.cassandra.index.internal.CassandraIndexSearcher;
 import org.apache.cassandra.index.internal.IndexEntry;
 import org.apache.cassandra.utils.btree.BTreeSet;
-import org.apache.cassandra.utils.concurrent.OpOrder;
 
 
 public class CompositesSearcher extends CassandraIndexSearcher
diff --git a/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java b/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java
index b044bbe..4c8e75d 100644
--- a/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java
+++ b/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java
@@ -86,4 +86,4 @@
                 || !cell.isLive(nowInSec)
                 || compare(indexValue, cell) != 0);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java b/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java
index 2114d42..9e865d9 100644
--- a/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java
+++ b/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java
@@ -160,4 +160,4 @@
             return iterator;
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/SASIIndex.java b/src/java/org/apache/cassandra/index/sasi/SASIIndex.java
index b1998bc..1e86bc6 100644
--- a/src/java/org/apache/cassandra/index/sasi/SASIIndex.java
+++ b/src/java/org/apache/cassandra/index/sasi/SASIIndex.java
@@ -72,9 +72,7 @@
                                                        Set<Index> indexes,
                                                        Collection<SSTableReader> sstablesToRebuild)
         {
-            NavigableMap<SSTableReader, Map<ColumnMetadata, ColumnIndex>> sstables = new TreeMap<>((a, b) -> {
-                return Integer.compare(a.descriptor.generation, b.descriptor.generation);
-            });
+            NavigableMap<SSTableReader, Map<ColumnMetadata, ColumnIndex>> sstables = new TreeMap<>(SSTableReader.idComparator);
 
             indexes.stream()
                    .filter((i) -> i instanceof SASIIndex)
@@ -113,8 +111,7 @@
         Tracker tracker = baseCfs.getTracker();
         tracker.subscribe(this);
 
-        SortedMap<SSTableReader, Map<ColumnMetadata, ColumnIndex>> toRebuild = new TreeMap<>((a, b)
-                                                -> Integer.compare(a.descriptor.generation, b.descriptor.generation));
+        SortedMap<SSTableReader, Map<ColumnMetadata, ColumnIndex>> toRebuild = new TreeMap<>(SSTableReader.idComparator);
 
         for (SSTableReader sstable : index.init(tracker.getView().liveSSTables()))
         {
@@ -294,7 +291,7 @@
 
             public void adjustMemtableSize(long additionalSpace, OpOrder.Group opGroup)
             {
-                baseCfs.getTracker().getView().getCurrentMemtable().getAllocator().onHeap().allocate(additionalSpace, opGroup);
+                baseCfs.getTracker().getView().getCurrentMemtable().markExtraOnHeapUsed(additionalSpace, opGroup);
             }
         };
     }
diff --git a/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java b/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java
index bb42dc2..57a3f51 100644
--- a/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java
+++ b/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java
@@ -20,10 +20,10 @@
  */
 package org.apache.cassandra.index.sasi;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.*;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -42,12 +42,14 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 class SASIIndexBuilder extends SecondaryIndexBuilder
 {
     private final ColumnFamilyStore cfs;
-    private final UUID compactionId = UUIDGen.getTimeUUID();
+    private final TimeUUID compactionId = nextTimeUUID();
 
     private final SortedMap<SSTableReader, Map<ColumnMetadata, ColumnIndex>> sstables;
 
diff --git a/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java b/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java
index c67c39c..d756737 100644
--- a/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java
+++ b/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.index.sasi;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -33,6 +32,7 @@
 import org.apache.cassandra.index.sasi.utils.RangeIterator;
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.concurrent.Ref;
 
diff --git a/src/java/org/apache/cassandra/index/sasi/TermIterator.java b/src/java/org/apache/cassandra/index/sasi/TermIterator.java
index 85f81b0..d65b386 100644
--- a/src/java/org/apache/cassandra/index/sasi/TermIterator.java
+++ b/src/java/org/apache/cassandra/index/sasi/TermIterator.java
@@ -20,25 +20,27 @@
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 
 import io.netty.util.concurrent.FastThreadLocal;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.index.sasi.disk.OnDiskIndexBuilder;
 import org.apache.cassandra.index.sasi.disk.Token;
 import org.apache.cassandra.index.sasi.plan.Expression;
 import org.apache.cassandra.index.sasi.utils.RangeUnionIterator;
 import org.apache.cassandra.index.sasi.utils.RangeIterator;
 import org.apache.cassandra.io.util.FileUtils;
 
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.Uninterruptibles;
-
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static java.lang.String.format;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.index.sasi.disk.OnDiskIndexBuilder.Mode.CONTAINS;
+import static org.apache.cassandra.index.sasi.plan.Expression.Op.PREFIX;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
+
 public class TermIterator extends RangeIterator<Long, Token>
 {
     private static final Logger logger = LoggerFactory.getLogger(TermIterator.class);
@@ -53,16 +55,8 @@
             logger.info("Search Concurrency Factor is set to {} for {}", concurrencyFactor, currentThread);
 
             return (concurrencyFactor <= 1)
-                    ? MoreExecutors.newDirectExecutorService()
-                    : Executors.newFixedThreadPool(concurrencyFactor, new ThreadFactory()
-            {
-                public final AtomicInteger count = new AtomicInteger();
-
-                public Thread newThread(Runnable task)
-                {
-                    return NamedThreadFactory.createThread(task, currentThread + "-SEARCH-" + count.incrementAndGet(), true);
-                }
-            });
+                    ? ImmediateExecutor.INSTANCE
+                    : executorFactory().pooled(currentThread + "-SEARCH-", concurrencyFactor);
         }
     };
 
@@ -99,14 +93,14 @@
 
         try
         {
-            final CountDownLatch latch = new CountDownLatch(perSSTableIndexes.size());
+            final CountDownLatch latch = newCountDownLatch(perSSTableIndexes.size());
             final ExecutorService searchExecutor = SEARCH_EXECUTOR.get();
 
             for (final SSTableIndex index : perSSTableIndexes)
             {
-                if (e.getOp() == Expression.Op.PREFIX &&
-                    index.mode() == OnDiskIndexBuilder.Mode.CONTAINS && !index.hasMarkedPartials())
-                    throw new UnsupportedOperationException(String.format("The index %s has not yet been upgraded " +
+                if (e.getOp() == PREFIX &&
+                    index.mode() == CONTAINS && !index.hasMarkedPartials())
+                    throw new UnsupportedOperationException(format("The index %s has not yet been upgraded " +
                                                                           "to support prefix queries in CONTAINS mode. " +
                                                                           "Wait for compaction or rebuild the index.",
                                                                           index.getPath()));
@@ -114,7 +108,7 @@
 
                 if (!index.reference())
                 {
-                    latch.countDown();
+                    latch.decrement();
                     continue;
                 }
 
@@ -142,16 +136,16 @@
                         releaseIndex(referencedIndexes, index);
 
                         if (logger.isDebugEnabled())
-                            logger.debug(String.format("Failed search an index %s, skipping.", index.getPath()), e1);
+                            logger.debug(format("Failed search an index %s, skipping.", index.getPath()), e1);
                     }
                     finally
                     {
-                        latch.countDown();
+                        latch.decrement();
                     }
                 });
             }
 
-            Uninterruptibles.awaitUninterruptibly(latch);
+            latch.awaitUninterruptibly();
 
             // checkpoint right away after all indexes complete search because we might have crossed the quota
             e.checkpoint();
diff --git a/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java b/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java
index 05dfedc..b7f297b 100644
--- a/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java
+++ b/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java
@@ -108,4 +108,4 @@
     {
         return VALID_ANALYZABLE_TYPES.containsKey(validator);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StemmerFactory.java b/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StemmerFactory.java
index d278c28..457876a 100644
--- a/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StemmerFactory.java
+++ b/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StemmerFactory.java
@@ -22,8 +22,7 @@
 import java.util.Locale;
 import java.util.Map;
 
-import com.google.common.util.concurrent.MoreExecutors;
-
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.tartarus.snowball.SnowballStemmer;
 import org.tartarus.snowball.ext.*;
 
@@ -42,7 +41,7 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(StemmerFactory.class);
     private static final LoadingCache<Class, Constructor<?>> STEMMER_CONSTRUCTOR_CACHE = Caffeine.newBuilder()
-            .executor(MoreExecutors.directExecutor())
+            .executor(ImmediateExecutor.INSTANCE)
             .build(new CacheLoader<Class, Constructor<?>>()
             {
                 public Constructor<?> load(Class aClass) throws Exception
diff --git a/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java b/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java
index 1548a6a..b85a36f 100644
--- a/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java
+++ b/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.index.sasi.analyzer.filter;
 
 import java.io.BufferedReader;
-import java.io.File;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
@@ -28,11 +27,11 @@
 import java.util.Set;
 import java.util.concurrent.CompletionException;
 
-import com.google.common.util.concurrent.MoreExecutors;
-
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
 
+import org.apache.cassandra.concurrent.ImmediateExecutor;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,13 +44,13 @@
 
     private static final String DEFAULT_RESOURCE_EXT = "_ST.txt";
     private static final String DEFAULT_RESOURCE_PREFIX = StopWordFactory.class.getPackage()
-            .getName().replace(".", File.separator);
+            .getName().replace(".", File.pathSeparator());
     private static final Set<String> SUPPORTED_LANGUAGES = new HashSet<>(
             Arrays.asList("ar","bg","cs","de","en","es","fi","fr","hi","hu","it",
             "pl","pt","ro","ru","sv"));
 
     private static final LoadingCache<String, Set<String>> STOP_WORDS_CACHE = Caffeine.newBuilder()
-            .executor(MoreExecutors.directExecutor())
+            .executor(ImmediateExecutor.INSTANCE)
             .build(StopWordFactory::getStopWordsFromResource);
 
     public static Set<String> getStopWordsForLanguage(Locale locale)
@@ -74,7 +73,7 @@
     private static Set<String> getStopWordsFromResource(String language)
     {
         Set<String> stopWords = new HashSet<>();
-        String resourceName = DEFAULT_RESOURCE_PREFIX + File.separator + language + DEFAULT_RESOURCE_EXT;
+        String resourceName = DEFAULT_RESOURCE_PREFIX + File.pathSeparator() + language + DEFAULT_RESOURCE_EXT;
         try (InputStream is = StopWordFactory.class.getClassLoader().getResourceAsStream(resourceName);
              BufferedReader r = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)))
         {
diff --git a/src/java/org/apache/cassandra/index/sasi/conf/ColumnIndex.java b/src/java/org/apache/cassandra/index/sasi/conf/ColumnIndex.java
index 4c9c59e..81b776d 100644
--- a/src/java/org/apache/cassandra/index/sasi/conf/ColumnIndex.java
+++ b/src/java/org/apache/cassandra/index/sasi/conf/ColumnIndex.java
@@ -31,7 +31,7 @@
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.db.marshal.UTF8Type;
@@ -223,10 +223,10 @@
 
         Op operator = Op.valueOf(op);
         return !(isTokenized && operator == Op.EQ) // EQ is only applicable to non-tokenized indexes
+               && operator != Op.IN // IN operator is not supported
                && !(isTokenized && mode.mode == OnDiskIndexBuilder.Mode.CONTAINS && operator == Op.PREFIX) // PREFIX not supported on tokenized CONTAINS mode indexes
                && !(isLiteral() && operator == Op.RANGE) // RANGE only applicable to indexes non-literal indexes
                && mode.supports(operator); // for all other cases let's refer to index itself
-
     }
 
     public static ByteBuffer getValueOf(ColumnMetadata column, Row row, int nowInSecs)
diff --git a/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java b/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java
index 99516b8..bf2293f 100644
--- a/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java
+++ b/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.index.sasi.conf;
 
-import java.io.File;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
@@ -29,6 +28,7 @@
 import org.apache.cassandra.index.sasi.SSTableIndex;
 import org.apache.cassandra.index.sasi.conf.view.View;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.Pair;
 
 import org.slf4j.Logger;
@@ -181,7 +181,7 @@
             }
             catch (Throwable t)
             {
-                logger.error("Can't open index file at " + indexFile.getAbsolutePath() + ", skipping.", t);
+                logger.error("Can't open index file at " + indexFile.absolutePath() + ", skipping.", t);
                 if (index != null)
                     index.release();
             }
diff --git a/src/java/org/apache/cassandra/index/sasi/conf/IndexMode.java b/src/java/org/apache/cassandra/index/sasi/conf/IndexMode.java
index 875d2f7..7d4f5c9 100644
--- a/src/java/org/apache/cassandra/index/sasi/conf/IndexMode.java
+++ b/src/java/org/apache/cassandra/index/sasi/conf/IndexMode.java
@@ -190,7 +190,7 @@
 
         if (maxMemBytes > 100L * 1073741824)
         {
-            logger.error("{} configured as {} is above 100GB, reverting to default 1GB", INDEX_MAX_FLUSH_MEMORY_OPTION, maxMemBytes);
+            logger.error("{} configured as {} is above 100GiB, reverting to default 1GB", INDEX_MAX_FLUSH_MEMORY_OPTION, maxMemBytes);
             maxMemBytes = DEFAULT_MAX_MEM_BYTES;
         }
         return new IndexMode(mode, isLiteral, isAnalyzed, analyzerClass, maxMemBytes);
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndex.java b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndex.java
index 4d43cd9..e438079 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndex.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndex.java
@@ -19,6 +19,7 @@
 
 import java.io.*;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.*;
 import java.util.stream.Collectors;
 
@@ -33,6 +34,8 @@
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.util.ChannelProxy;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
@@ -121,13 +124,11 @@
         keyFetcher = keyReader;
 
         comparator = cmp;
-        indexPath = index.getAbsolutePath();
+        indexPath = index.absolutePath();
 
-        RandomAccessFile backingFile = null;
-        try
+
+        try (FileInputStreamPlus backingFile = new FileInputStreamPlus(index))
         {
-            backingFile = new RandomAccessFile(index, "r");
-
             descriptor = new Descriptor(backingFile.readUTF());
 
             termSize = OnDiskIndexBuilder.TermSize.of(backingFile.readShort());
@@ -141,32 +142,29 @@
             mode = OnDiskIndexBuilder.Mode.mode(backingFile.readUTF());
             hasMarkedPartials = backingFile.readBoolean();
 
-            indexSize = backingFile.length();
-            indexFile = new MappedBuffer(new ChannelProxy(indexPath, backingFile.getChannel()));
-
-            // start of the levels
-            indexFile.position(indexFile.getLong(indexSize - 8));
-
-            int numLevels = indexFile.getInt();
-            levels = new PointerLevel[numLevels];
-            for (int i = 0; i < levels.length; i++)
-            {
-                int blockCount = indexFile.getInt();
-                levels[i] = new PointerLevel(indexFile.position(), blockCount);
-                indexFile.position(indexFile.position() + blockCount * 8);
-            }
-
-            int blockCount = indexFile.getInt();
-            dataLevel = new DataLevel(indexFile.position(), blockCount);
+            FileChannel channel = index.newReadChannel();
+            indexSize = channel.size();
+            indexFile = new MappedBuffer(new ChannelProxy(indexPath, channel));
         }
         catch (IOException e)
         {
             throw new FSReadError(e, index);
         }
-        finally
+
+        // start of the levels
+        indexFile.position(indexFile.getLong(indexSize - 8));
+
+        int numLevels = indexFile.getInt();
+        levels = new PointerLevel[numLevels];
+        for (int i = 0; i < levels.length; i++)
         {
-            FileUtils.closeQuietly(backingFile);
+            int blockCount = indexFile.getInt();
+            levels[i] = new PointerLevel(indexFile.position(), blockCount);
+            indexFile.position(indexFile.position() + blockCount * 8);
         }
+
+        int blockCount = indexFile.getInt();
+        dataLevel = new DataLevel(indexFile.position(), blockCount);
     }
 
     public boolean hasMarkedPartials()
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java
index 0298539..a6faa04 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.index.sasi.disk;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
@@ -247,15 +246,7 @@
         // no terms means there is nothing to build
         if (terms.isEmpty())
         {
-            try
-            {
-                file.createNewFile();
-            }
-            catch (IOException e)
-            {
-                throw new FSWriteError(e, file);
-            }
-
+            file.createFileIfNotExists();
             return false;
         }
 
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java
index 0af4ba2..fb5e9b9 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java
@@ -17,16 +17,14 @@
  */
 package org.apache.cassandra.index.sasi.disk;
 
-import java.io.File;
 import java.nio.ByteBuffer;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.*;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.compaction.OperationType;
@@ -43,36 +41,34 @@
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.Uninterruptibles;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
+
 public class PerSSTableIndexWriter implements SSTableFlushObserver
 {
     private static final Logger logger = LoggerFactory.getLogger(PerSSTableIndexWriter.class);
 
     private static final int POOL_SIZE = 8;
-    private static final ThreadPoolExecutor INDEX_FLUSHER_MEMTABLE;
-    private static final ThreadPoolExecutor INDEX_FLUSHER_GENERAL;
+    private static final ExecutorPlus INDEX_FLUSHER_MEMTABLE;
+    private static final ExecutorPlus INDEX_FLUSHER_GENERAL;
 
     static
     {
-        INDEX_FLUSHER_GENERAL = new JMXEnabledThreadPoolExecutor(POOL_SIZE, POOL_SIZE, 1, TimeUnit.MINUTES,
-                                                                 new LinkedBlockingQueue<>(),
-                                                                 new NamedThreadFactory("SASI-General"),
-                                                                 "internal");
-        INDEX_FLUSHER_GENERAL.allowCoreThreadTimeOut(true);
+        INDEX_FLUSHER_GENERAL = executorFactory().withJmxInternal()
+                                                 .pooled("SASI-General", POOL_SIZE);
 
-        INDEX_FLUSHER_MEMTABLE = new JMXEnabledThreadPoolExecutor(POOL_SIZE, POOL_SIZE, 1, TimeUnit.MINUTES,
-                                                                  new LinkedBlockingQueue<>(),
-                                                                  new NamedThreadFactory("SASI-Memtable"),
-                                                                  "internal");
-        INDEX_FLUSHER_MEMTABLE.allowCoreThreadTimeOut(true);
+        INDEX_FLUSHER_MEMTABLE = executorFactory().withJmxInternal()
+                                                  .pooled("SASI-Memtable", POOL_SIZE);
     }
 
     private final int nowInSec = FBUtilities.nowInSeconds();
@@ -139,11 +135,11 @@
 
         try
         {
-            CountDownLatch latch = new CountDownLatch(indexes.size());
+            CountDownLatch latch = newCountDownLatch(indexes.size());
             for (Index index : indexes.values())
                 index.complete(latch);
 
-            Uninterruptibles.awaitUninterruptibly(latch);
+            latch.awaitUninterruptibly();
         }
         finally
         {
@@ -251,7 +247,7 @@
             final String segmentFile = filename(isFinal);
 
             return () -> {
-                long start = System.nanoTime();
+                long start = nanoTime();
 
                 try
                 {
@@ -266,7 +262,7 @@
                 finally
                 {
                     if (!isFinal)
-                        logger.info("Flushed index segment {}, took {} ms.", segmentFile, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+                        logger.info("Flushed index segment {}, took {} ms.", segmentFile, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
                 }
             };
         }
@@ -276,7 +272,7 @@
             logger.info("Scheduling index flush to {}", outputFile);
 
             getExecutor().submit((Runnable) () -> {
-                long start1 = System.nanoTime();
+                long start1 = nanoTime();
 
                 OnDiskIndex[] parts = new OnDiskIndex[segments.size() + 1];
 
@@ -294,7 +290,7 @@
                     {
                         @SuppressWarnings("resource")
                         OnDiskIndex last = scheduleSegmentFlush(false).call();
-                        segments.add(Futures.immediateFuture(last));
+                        segments.add(ImmediateFuture.success(last));
                     }
 
                     int index = 0;
@@ -324,7 +320,7 @@
                 }
                 finally
                 {
-                    logger.info("Index flush to {} took {} ms.", outputFile, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start1));
+                    logger.info("Index flush to {} took {} ms.", outputFile, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start1));
 
                     for (int segment = 0; segment < segmentNumber; segment++)
                     {
@@ -337,7 +333,7 @@
                         FileUtils.delete(outputFile + "_" + segment);
                     }
 
-                    latch.countDown();
+                    latch.decrement();
                 }
             });
         }
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java b/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java
index e510cdd..3a401ca 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java
@@ -520,4 +520,4 @@
             return index < offsets.length ? keyFetcher.apply(offsets[index++]) : endOfData();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java b/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java
index 29cecc8..01a536c 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java
@@ -75,4 +75,4 @@
 
     int serializedSize();
     void write(DataOutputPlus out) throws IOException;
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java b/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java
index 0f681b7..d60914d 100644
--- a/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java
+++ b/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java
@@ -126,4 +126,4 @@
             return keys.iterator();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/plan/Expression.java b/src/java/org/apache/cassandra/index/sasi/plan/Expression.java
index 8de45e8..6c3b9b8 100644
--- a/src/java/org/apache/cassandra/index/sasi/plan/Expression.java
+++ b/src/java/org/apache/cassandra/index/sasi/plan/Expression.java
@@ -47,7 +47,7 @@
 
     public enum Op
     {
-        EQ, MATCH, PREFIX, SUFFIX, CONTAINS, NOT_EQ, RANGE;
+        EQ, MATCH, PREFIX, SUFFIX, CONTAINS, NOT_EQ, RANGE, IN;
 
         public static Op valueOf(Operator operator)
         {
@@ -56,6 +56,9 @@
                 case EQ:
                     return EQ;
 
+                case IN:
+                    return IN;
+
                 case NEQ:
                     return NOT_EQ;
 
diff --git a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java
index db16c52..60538e1 100644
--- a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java
+++ b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java
@@ -49,6 +49,8 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class QueryController
 {
     private final long executionQuota;
@@ -65,7 +67,7 @@
         this.command = command;
         this.range = command.dataRange();
         this.executionQuota = TimeUnit.MILLISECONDS.toNanos(timeQuotaMs);
-        this.executionStart = System.nanoTime();
+        this.executionStart = nanoTime();
     }
 
     public TableMetadata metadata()
@@ -154,7 +156,7 @@
 
     public void checkpoint()
     {
-	long executionTime = (System.nanoTime() - executionStart);
+	long executionTime = (nanoTime() - executionStart);
 
         if (executionTime >= executionQuota)
             throw new TimeQuotaExceededException(
diff --git a/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java b/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java
index 81e535d..cc327bc 100644
--- a/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java
+++ b/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java
@@ -81,4 +81,4 @@
     {
         return term.compareTo(comparator, o.get().getTerm());
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java b/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java
index dde3c8a..7c86b97 100644
--- a/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java
+++ b/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java
@@ -80,4 +80,4 @@
      * Note: Not all operations support {@link Decision#REMOVE}.
      */
     Decision select(Map.Entry<? extends K, ? extends V> entry);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java b/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java
index 9187894..a36af98 100644
--- a/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java
+++ b/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java
@@ -1258,4 +1258,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/FSDiskFullWriteError.java b/src/java/org/apache/cassandra/io/FSDiskFullWriteError.java
index ebb07e2..abf0b6c 100644
--- a/src/java/org/apache/cassandra/io/FSDiskFullWriteError.java
+++ b/src/java/org/apache/cassandra/io/FSDiskFullWriteError.java
@@ -28,10 +28,4 @@
                                             mutationSize,
                                             keyspace)));
     }
-
-    @Override
-    public String toString()
-    {
-        return "FSDiskFullWriteError";
-    }
 }
diff --git a/src/java/org/apache/cassandra/io/FSError.java b/src/java/org/apache/cassandra/io/FSError.java
index e09bac7..4c06d9c 100644
--- a/src/java/org/apache/cassandra/io/FSError.java
+++ b/src/java/org/apache/cassandra/io/FSError.java
@@ -17,17 +17,38 @@
  */
 package org.apache.cassandra.io;
 
-import java.io.File;
 import java.io.IOError;
+import java.nio.file.Path;
+
+import org.apache.cassandra.io.util.File;
 
 public abstract class FSError extends IOError
 {
-    public final File path;
+    final String message;
+    public final String path;
 
     public FSError(Throwable cause, File path)
     {
+        this(null, cause, path);
+    }
+
+    public FSError(Throwable cause, Path path)
+    {
+        this(null, cause, path);
+    }
+
+    public FSError(String message, Throwable cause, File path)
+    {
         super(cause);
-        this.path = path;
+        this.message = message;
+        this.path = path.toString();
+    }
+
+    public FSError(String message, Throwable cause, Path path)
+    {
+        super(cause);
+        this.message = message;
+        this.path = path.toString();
     }
 
     /**
@@ -45,4 +66,10 @@
 
         return null;
     }
+
+    @Override
+    public String toString()
+    {
+        return getClass().getSimpleName() + (message != null ? ' ' + message : "") + (path != null ? " in " + path : "");
+    }
 }
diff --git a/src/java/org/apache/cassandra/io/FSErrorHandler.java b/src/java/org/apache/cassandra/io/FSErrorHandler.java
index 081ec0b..b7d2836 100644
--- a/src/java/org/apache/cassandra/io/FSErrorHandler.java
+++ b/src/java/org/apache/cassandra/io/FSErrorHandler.java
@@ -27,4 +27,5 @@
 {
     void handleCorruptSSTable(CorruptSSTableException e);
     void handleFSError(FSError e);
+    default void handleStartupFSError(Throwable t) {}
 }
diff --git a/src/java/org/apache/cassandra/io/FSNoDiskAvailableForWriteError.java b/src/java/org/apache/cassandra/io/FSNoDiskAvailableForWriteError.java
index 14dcd38..415f204 100644
--- a/src/java/org/apache/cassandra/io/FSNoDiskAvailableForWriteError.java
+++ b/src/java/org/apache/cassandra/io/FSNoDiskAvailableForWriteError.java
@@ -30,10 +30,4 @@
         super(new IOException(String.format("The data directories for the %s keyspace have been marked as unwritable",
                                             keyspace)));
     }
-
-    @Override
-    public String toString()
-    {
-        return "FSNoDiskAvailableForWriteError";
-    }
 }
diff --git a/src/java/org/apache/cassandra/io/FSReadError.java b/src/java/org/apache/cassandra/io/FSReadError.java
index c557fc5..ac15534 100644
--- a/src/java/org/apache/cassandra/io/FSReadError.java
+++ b/src/java/org/apache/cassandra/io/FSReadError.java
@@ -17,10 +17,18 @@
  */
 package org.apache.cassandra.io;
 
-import java.io.File;
+
+import java.nio.file.Path;
+
+import org.apache.cassandra.io.util.File;
 
 public class FSReadError extends FSError
 {
+    public FSReadError(Throwable cause, Path path)
+    {
+        super(cause, path);
+    }
+
     public FSReadError(Throwable cause, File path)
     {
         super(cause, path);
@@ -31,9 +39,23 @@
         this(cause, new File(path));
     }
 
-    @Override
-    public String toString()
+    public FSReadError(String message, Throwable cause, Path path)
     {
-        return "FSReadError in " + path;
+        super(message, cause, path);
+    }
+
+    public FSReadError(String message, Throwable cause, File path)
+    {
+        super(message, cause, path);
+    }
+
+    public FSReadError(String message, Throwable cause, String path)
+    {
+        this(message, cause, new File(path));
+    }
+
+    public FSReadError(String message, Throwable cause)
+    {
+        this(message, cause, new File(""));
     }
 }
diff --git a/src/java/org/apache/cassandra/io/FSWriteError.java b/src/java/org/apache/cassandra/io/FSWriteError.java
index b419086..81f00bd 100644
--- a/src/java/org/apache/cassandra/io/FSWriteError.java
+++ b/src/java/org/apache/cassandra/io/FSWriteError.java
@@ -17,10 +17,18 @@
  */
 package org.apache.cassandra.io;
 
-import java.io.File;
+
+import java.nio.file.Path;
+
+import org.apache.cassandra.io.util.File;
 
 public class FSWriteError extends FSError
 {
+    public FSWriteError(Throwable cause, Path path)
+    {
+        super(cause, path);
+    }
+
     public FSWriteError(Throwable cause, File path)
     {
         super(cause, path);
@@ -36,9 +44,23 @@
         this(cause, new File(""));
     }
 
-    @Override
-    public String toString()
+    public FSWriteError(String message, Throwable cause, Path path)
     {
-        return "FSWriteError in " + path;
+        super(message, cause, path);
+    }
+
+    public FSWriteError(String message, Throwable cause, File path)
+    {
+        super(message, cause, path);
+    }
+
+    public FSWriteError(String message, Throwable cause, String path)
+    {
+        this(message, cause, new File(path));
+    }
+
+    public FSWriteError(String message, Throwable cause)
+    {
+        this(message, cause, new File(""));
     }
 }
diff --git a/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java b/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
index 2190824..024e4ef 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
@@ -19,7 +19,6 @@
 
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.Channels;
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index 45cf7af..5af9c92 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -17,21 +17,15 @@
  */
 package org.apache.cassandra.io.compress;
 
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.io.BufferedOutputStream;
+import java.nio.file.NoSuchFileException;
 import java.io.DataInput;
-import java.io.DataInputStream;
 import java.io.DataOutput;
-import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
+
+import org.apache.cassandra.io.util.*;
 import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.Collection;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.SortedSet;
@@ -50,12 +44,7 @@
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.io.util.Memory;
-import org.apache.cassandra.io.util.SafeMemory;
 import org.apache.cassandra.schema.CompressionParams;
-import org.apache.cassandra.utils.SyncUtil;
 import org.apache.cassandra.utils.concurrent.Transactional;
 import org.apache.cassandra.utils.concurrent.Ref;
 
@@ -106,7 +95,7 @@
     {
         this.indexFilePath = indexFilePath;
 
-        try (DataInputStream stream = new DataInputStream(Files.newInputStream(Paths.get(indexFilePath))))
+        try (FileInputStreamPlus stream = new File(indexFilePath).newInputStream())
         {
             String compressorName = stream.readUTF();
             int optionCount = stream.readInt();
@@ -134,7 +123,7 @@
             compressedFileLength = compressedLength;
             chunkOffsets = readChunkOffsets(stream);
         }
-        catch (FileNotFoundException e)
+        catch (FileNotFoundException | NoSuchFileException e)
         {
             throw new RuntimeException(e);
         }
@@ -412,17 +401,16 @@
             }
 
             // flush the data to disk
-            try (FileOutputStream fos = new FileOutputStream(filePath);
-                 DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos)))
+            try (FileOutputStreamPlus out = new FileOutputStreamPlus(filePath))
             {
                 writeHeader(out, dataLength, count);
                 for (int i = 0; i < count; i++)
                     out.writeLong(offsets.getLong(i * 8L));
 
                 out.flush();
-                SyncUtil.sync(fos);
+                out.sync();
             }
-            catch (FileNotFoundException fnfe)
+            catch (FileNotFoundException | NoSuchFileException fnfe)
             {
                 throw Throwables.propagate(fnfe);
             }
diff --git a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
index 4eaf1fe..49ceb76 100644
--- a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
@@ -17,20 +17,21 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
-import java.io.FileFilter;
+
 import java.io.IOException;
 import java.io.Closeable;
 import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Stream;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.EncodingStats;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.service.ActiveRepairService;
 
@@ -43,7 +44,7 @@
     protected final TableMetadataRef metadata;
     protected final RegularAndStaticColumns columns;
     protected SSTableFormat.Type formatType = SSTableFormat.Type.current();
-    protected static AtomicInteger generation = new AtomicInteger(0);
+    protected static final AtomicReference<SSTableId> id = new AtomicReference<>(SSTableIdFactory.instance.defaultBuilder().generator(Stream.empty()).get());
     protected boolean makeRangeAware = false;
 
     protected AbstractSSTableSimpleWriter(File directory, TableMetadataRef metadata, RegularAndStaticColumns columns)
@@ -63,8 +64,7 @@
         this.makeRangeAware = makeRangeAware;
     }
 
-
-    protected SSTableTxnWriter createWriter()
+    protected SSTableTxnWriter createWriter() throws IOException
     {
         SerializationHeader header = new SerializationHeader(true, metadata.get(), columns, EncodingStats.NO_STATS);
 
@@ -82,38 +82,29 @@
                                        Collections.emptySet());
     }
 
-    private static Descriptor createDescriptor(File directory, final String keyspace, final String columnFamily, final SSTableFormat.Type fmt)
+    private static Descriptor createDescriptor(File directory, final String keyspace, final String columnFamily, final SSTableFormat.Type fmt) throws IOException
     {
-        int maxGen = getNextGeneration(directory, columnFamily);
-        return new Descriptor(directory, keyspace, columnFamily, maxGen + 1, fmt);
+        SSTableId nextGen = getNextId(directory, columnFamily);
+        return new Descriptor(directory, keyspace, columnFamily, nextGen, fmt);
     }
 
-    private static int getNextGeneration(File directory, final String columnFamily)
+    private static SSTableId getNextId(File directory, final String columnFamily) throws IOException
     {
-        final Set<Descriptor> existing = new HashSet<>();
-        directory.listFiles(new FileFilter()
+        while (true)
         {
-            public boolean accept(File file)
+            try (Stream<Path> existingPaths = Files.list(directory.toPath()))
             {
-                Descriptor desc = SSTable.tryDescriptorFromFilename(file);
-                if (desc == null)
-                    return false;
+                Stream<SSTableId> existingIds = existingPaths.map(File::new)
+                                                             .map(SSTable::tryDescriptorFromFilename)
+                                                             .filter(d -> d != null && d.cfname.equals(columnFamily))
+                                                             .map(d -> d.id);
 
-                if (desc.cfname.equals(columnFamily))
-                    existing.add(desc);
-
-                return false;
-            }
-        });
-        int maxGen = generation.getAndIncrement();
-        for (Descriptor desc : existing)
-        {
-            while (desc.generation > maxGen)
-            {
-                maxGen = generation.getAndIncrement();
+                SSTableId lastId = id.get();
+                SSTableId newId = SSTableIdFactory.instance.defaultBuilder().generator(Stream.concat(existingIds, Stream.of(lastId))).get();
+                if (id.compareAndSet(lastId, newId))
+                    return newId;
             }
         }
-        return maxGen;
     }
 
     PartitionUpdate.Builder getUpdateFor(ByteBuffer key) throws IOException
diff --git a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
index 0b8dbae..6d54561 100644
--- a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.io.sstable;
 
 import java.io.Closeable;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -26,10 +25,14 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.SortedSet;
+import java.util.NavigableSet;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.cql3.statements.schema.CreateTypeStatement;
@@ -41,20 +44,23 @@
 import org.apache.cassandra.cql3.functions.types.TypeCodec;
 import org.apache.cassandra.cql3.functions.types.UserType;
 import org.apache.cassandra.cql3.statements.ModificationStatement;
-import org.apache.cassandra.cql3.statements.UpdateStatement;
 import org.apache.cassandra.db.Clustering;
-import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.*;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Utility to write SSTables.
  * <p>
@@ -70,7 +76,7 @@
  *   String insert = "INSERT INTO myKs.myTable (k, v1, v2, v3) VALUES (?, ?, ?, ?)";
  *
  *   // Creates a new writer. You need to provide at least the directory where to write the created sstable,
- *   // the schema for the sstable to write and a (prepared) insert statement to use. If you do not use the
+ *   // the schema for the sstable to write and a (prepared) modification statement to use. If you do not use the
  *   // default partitioner (Murmur3Partitioner), you will also need to provide the partitioner in use, see
  *   // CQLSSTableWriter.Builder for more details on the available options.
  *   CQLSSTableWriter writer = CQLSSTableWriter.builder()
@@ -99,6 +105,7 @@
 
     static
     {
+        CassandraRelevantProperties.FORCE_LOAD_LOCAL_KEYSPACES.setBoolean(true);
         DatabaseDescriptor.clientInitialization(false);
         // Partitioner is not set in client mode.
         if (DatabaseDescriptor.getPartitioner() == null)
@@ -106,14 +113,14 @@
     }
 
     private final AbstractSSTableSimpleWriter writer;
-    private final UpdateStatement insert;
+    private final ModificationStatement modificationStatement;
     private final List<ColumnSpecification> boundNames;
     private final List<TypeCodec> typeCodecs;
 
-    private CQLSSTableWriter(AbstractSSTableSimpleWriter writer, UpdateStatement insert, List<ColumnSpecification> boundNames)
+    private CQLSSTableWriter(AbstractSSTableSimpleWriter writer, ModificationStatement modificationStatement, List<ColumnSpecification> boundNames)
     {
         this.writer = writer;
-        this.insert = insert;
+        this.modificationStatement = modificationStatement;
         this.boundNames = boundNames;
         this.typeCodecs = boundNames.stream().map(bn ->  UDHelper.codecFor(UDHelper.driverType(bn.type)))
                                              .collect(Collectors.toList());
@@ -135,7 +142,7 @@
      * This is a shortcut for {@code addRow(Arrays.asList(values))}.
      *
      * @param values the row values (corresponding to the bind variables of the
-     * insertion statement used when creating by this writer).
+     * modification statement used when creating by this writer).
      * @return this writer.
      */
     public CQLSSTableWriter addRow(Object... values)
@@ -156,7 +163,7 @@
      * {@link #rawAddRow} instead.
      *
      * @param values the row values (corresponding to the bind variables of the
-     * insertion statement used when creating by this writer).
+     * modification statement used when creating by this writer).
      * @return this writer.
      */
     public CQLSSTableWriter addRow(List<Object> values)
@@ -179,7 +186,7 @@
      * <p>
      * This is equivalent to the other addRow methods, but takes a map whose
      * keys are the names of the columns to add instead of taking a list of the
-     * values in the order of the insert statement used during construction of
+     * values in the order of the modification statement used during construction of
      * this write.
      * <p>
      * Please note that the column names in the map keys must be in lowercase unless
@@ -190,7 +197,7 @@
      * @param values a map of colum name to column values representing the new
      * row to add. Note that if a column is not part of the map, it's value will
      * be {@code null}. If the map contains keys that does not correspond to one
-     * of the column of the insert statement used when creating this writer, the
+     * of the column of the modification statement used when creating this writer, the
      * the corresponding value is ignored.
      * @return this writer.
      */
@@ -212,7 +219,7 @@
      * Adds a new row to the writer given already serialized values.
      *
      * @param values the row values (corresponding to the bind variables of the
-     * insertion statement used when creating by this writer) as binary.
+     * modification statement used when creating by this writer) as binary.
      * @return this writer.
      */
     public CQLSSTableWriter rawAddRow(ByteBuffer... values)
@@ -227,7 +234,7 @@
      * This is a shortcut for {@code rawAddRow(Arrays.asList(values))}.
      *
      * @param values the row values (corresponding to the bind variables of the
-     * insertion statement used when creating by this writer) as binary.
+     * modification statement used when creating by this writer) as binary.
      * @return this writer.
      */
     public CQLSSTableWriter rawAddRow(List<ByteBuffer> values)
@@ -237,26 +244,41 @@
             throw new InvalidRequestException(String.format("Invalid number of arguments, expecting %d values but got %d", boundNames.size(), values.size()));
 
         QueryOptions options = QueryOptions.forInternalCalls(null, values);
-        List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
-        SortedSet<Clustering<?>> clusterings = insert.createClustering(options);
+        ClientState state = ClientState.forInternalCalls();
+        List<ByteBuffer> keys = modificationStatement.buildPartitionKeyNames(options, state);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         // Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
         // and that forces a lot of initialization that we don't want.
-        UpdateParameters params = new UpdateParameters(insert.metadata,
-                                                       insert.updatedColumns(),
+        UpdateParameters params = new UpdateParameters(modificationStatement.metadata,
+                                                       modificationStatement.updatedColumns(),
+                                                       ClientState.forInternalCalls(),
                                                        options,
-                                                       insert.getTimestamp(TimeUnit.MILLISECONDS.toMicros(now), options),
-                                                       (int) TimeUnit.MILLISECONDS.toSeconds(now),
-                                                       insert.getTimeToLive(options),
+                                                       modificationStatement.getTimestamp(TimeUnit.MILLISECONDS.toMicros(now), options),
+                                                       options.getNowInSec((int) TimeUnit.MILLISECONDS.toSeconds(now)),
+                                                       modificationStatement.getTimeToLive(options),
                                                        Collections.emptyMap());
 
         try
         {
-            for (ByteBuffer key : keys)
+            if (modificationStatement.hasSlices()) {
+                Slices slices = modificationStatement.createSlices(options);
+
+                for (ByteBuffer key : keys)
+                {
+                    for (Slice slice : slices)
+                        modificationStatement.addUpdateForKey(writer.getUpdateFor(key), slice, params);
+                }
+            }
+            else
             {
-                for (Clustering<?> clustering : clusterings)
-                    insert.addUpdateForKey(writer.getUpdateFor(key), clustering, params);
+                NavigableSet<Clustering<?>> clusterings = modificationStatement.createClustering(options, state);
+
+                for (ByteBuffer key : keys)
+                {
+                    for (Clustering clustering : clusterings)
+                        modificationStatement.addUpdateForKey(writer.getUpdateFor(key), clustering, params);
+                }
             }
             return this;
         }
@@ -273,13 +295,13 @@
      * <p>
      * This is equivalent to the other rawAddRow methods, but takes a map whose
      * keys are the names of the columns to add instead of taking a list of the
-     * values in the order of the insert statement used during construction of
+     * values in the order of the modification statement used during construction of
      * this write.
      *
      * @param values a map of colum name to column values representing the new
      * row to add. Note that if a column is not part of the map, it's value will
      * be {@code null}. If the map contains keys that does not correspond to one
-     * of the column of the insert statement used when creating this writer, the
+     * of the column of the modification statement used when creating this writer, the
      * the corresponding value is ignored.
      * @return this writer.
      */
@@ -305,7 +327,7 @@
      */
     public UserType getUDType(String dataType)
     {
-        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(insert.keyspace());
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(modificationStatement.keyspace());
         org.apache.cassandra.db.marshal.UserType userType = ksm.types.getNullable(ByteBufferUtil.bytes(dataType));
         return (UserType) UDHelper.driverType(userType);
     }
@@ -348,11 +370,11 @@
 
         private CreateTableStatement.Raw schemaStatement;
         private final List<CreateTypeStatement.Raw> typeStatements;
-        private ModificationStatement.Parsed insertStatement;
+        private ModificationStatement.Parsed modificationStatement;
         private IPartitioner partitioner;
 
         private boolean sorted = false;
-        private long bufferSizeInMB = 128;
+        private long bufferSizeInMiB = 128;
 
         protected Builder() {
             this.typeStatements = new ArrayList<>();
@@ -387,7 +409,7 @@
         {
             if (!directory.exists())
                 throw new IllegalArgumentException(directory + " doesn't exists");
-            if (!directory.canWrite())
+            if (!directory.isWritable())
                 throw new IllegalArgumentException(directory + " exists but is not writable");
 
             this.directory = directory;
@@ -437,24 +459,26 @@
         }
 
         /**
-         * The INSERT or UPDATE statement defining the order of the values to add for a given CQL row.
+         * The INSERT, UPDATE, or DELETE statement defining the order of the values to add for a given CQL row.
          * <p>
-         * Please note that the provided INSERT statement <b>must</b> use a fully-qualified
+         * Please note that the provided statement <b>must</b> use a fully-qualified
          * table name, one that include the keyspace name. Moreover, said statement must use
          * bind variables since these variables will be bound to values by the resulting writer.
          * <p>
          * This is a mandatory option.
          *
-         * @param insert an insertion statement that defines the order
+         * @param modificationStatement an insert, update, or delete statement that defines the order
          * of column values to use.
          * @return this builder.
          *
-         * @throws IllegalArgumentException if {@code insertStatement} is not a valid insertion
+         * @throws IllegalArgumentException if {@code modificationStatement} is not a valid insert, update, or delete
          * statement, does not have a fully-qualified table name or have no bind variables.
          */
-        public Builder using(String insert)
+        public Builder using(String modificationStatement)
         {
-            this.insertStatement = QueryProcessor.parseStatement(insert, ModificationStatement.Parsed.class, "INSERT/UPDATE");
+            this.modificationStatement = QueryProcessor.parseStatement(modificationStatement,
+                                                                       ModificationStatement.Parsed.class,
+                                                                       "INSERT/UPDATE/DELETE");
             return this;
         }
 
@@ -465,16 +489,36 @@
          * a new SSTable. This correspond roughly to the data size that will have the created
          * sstable.
          * <p>
-         * The default is 128MB, which should be reasonable for a 1GB heap. If you experience
+         * The default is 128MiB, which should be reasonable for a 1GiB heap. If you experience
          * OOM while using the writer, you should lower this value.
          *
-         * @param size the size to use in MB.
+         * @param size the size to use in MiB.
          * @return this builder.
          */
+        public Builder withBufferSizeInMiB(int size)
+        {
+            this.bufferSizeInMiB = size;
+            return this;
+        }
+
+        /**
+         * This method is deprecated in favor of the new withBufferSizeInMiB(int size)
+         * The size of the buffer to use.
+         * <p>
+         * This defines how much data will be buffered before being written as
+         * a new SSTable. This correspond roughly to the data size that will have the created
+         * sstable.
+         * <p>
+         * The default is 128MiB, which should be reasonable for a 1GiB heap. If you experience
+         * OOM while using the writer, you should lower this value.
+         *
+         * @param size the size to use in MiB.
+         * @return this builder.
+         */
+        @Deprecated
         public Builder withBufferSizeInMB(int size)
         {
-            this.bufferSizeInMB = size;
-            return this;
+            return withBufferSizeInMiB(size);
         }
 
         /**
@@ -482,7 +526,7 @@
          * <p>
          * If this option is used, the resulting writer will expect rows to be
          * added in SSTable sorted order (and an exception will be thrown if that
-         * is not the case during insertion). The SSTable sorted order means that
+         * is not the case during modification). The SSTable sorted order means that
          * rows are added such that their partition key respect the partitioner
          * order.
          * <p>
@@ -490,7 +534,7 @@
          * the rows in order, which is rarely the case. If you can provide the
          * rows in order however, using this sorted might be more efficient.
          * <p>
-         * Note that if used, some option like withBufferSizeInMB will be ignored.
+         * Note that if used, some option like withBufferSizeInMiB will be ignored.
          *
          * @return this builder.
          */
@@ -507,27 +551,23 @@
                 throw new IllegalStateException("No ouptut directory specified, you should provide a directory with inDirectory()");
             if (schemaStatement == null)
                 throw new IllegalStateException("Missing schema, you should provide the schema for the SSTable to create with forTable()");
-            if (insertStatement == null)
-                throw new IllegalStateException("No insert statement specified, you should provide an insert statement through using()");
+            if (modificationStatement == null)
+                throw new IllegalStateException("No modification (INSERT/UPDATE/DELETE) statement specified, you should provide a modification statement through using()");
 
+            Preconditions.checkState(Sets.difference(SchemaConstants.LOCAL_SYSTEM_KEYSPACE_NAMES, Schema.instance.getKeyspaces()).isEmpty(),
+                                     "Local keyspaces were not loaded. If this is running as a client, please make sure to add %s=true system property.",
+                                     CassandraRelevantProperties.FORCE_LOAD_LOCAL_KEYSPACES.getKey());
             synchronized (CQLSSTableWriter.class)
             {
-                if (Schema.instance.getKeyspaceMetadata(SchemaConstants.SCHEMA_KEYSPACE_NAME) == null)
-                    Schema.instance.load(Schema.getSystemKeyspaceMetadata());
-                if (Schema.instance.getKeyspaceMetadata(SchemaConstants.SYSTEM_KEYSPACE_NAME) == null)
-                    Schema.instance.load(SystemKeyspace.metadata());
 
                 String keyspaceName = schemaStatement.keyspace();
 
-                if (Schema.instance.getKeyspaceMetadata(keyspaceName) == null)
-                {
-                    Schema.instance.load(KeyspaceMetadata.create(keyspaceName,
-                                                                 KeyspaceParams.simple(1),
-                                                                 Tables.none(),
-                                                                 Views.none(),
-                                                                 Types.none(),
-                                                                 Functions.none()));
-                }
+                Schema.instance.transform(SchemaTransformations.addKeyspace(KeyspaceMetadata.create(keyspaceName,
+                                                                                                           KeyspaceParams.simple(1),
+                                                                                                           Tables.none(),
+                                                                                                           Views.none(),
+                                                                                                           Types.none(),
+                                                                                                           Functions.none()), true));
 
                 KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspaceName);
 
@@ -535,21 +575,22 @@
                 if (tableMetadata == null)
                 {
                     Types types = createTypes(keyspaceName);
+                    Schema.instance.transform(SchemaTransformations.addTypes(types, true));
                     tableMetadata = createTable(types);
-                    Schema.instance.load(ksm.withSwapped(ksm.tables.with(tableMetadata)).withSwapped(types));
+                    Schema.instance.transform(SchemaTransformations.addTable(tableMetadata, true));
                 }
 
-                UpdateStatement preparedInsert = prepareInsert();
+                ModificationStatement preparedModificationStatement = prepareModificationStatement();
 
                 TableMetadataRef ref = TableMetadataRef.forOfflineTools(tableMetadata);
                 AbstractSSTableSimpleWriter writer = sorted
-                                                   ? new SSTableSimpleWriter(directory, ref, preparedInsert.updatedColumns())
-                                                   : new SSTableSimpleUnsortedWriter(directory, ref, preparedInsert.updatedColumns(), bufferSizeInMB);
+                                                     ? new SSTableSimpleWriter(directory, ref, preparedModificationStatement.updatedColumns())
+                                                     : new SSTableSimpleUnsortedWriter(directory, ref, preparedModificationStatement.updatedColumns(), bufferSizeInMiB);
 
                 if (formatType != null)
                     writer.setSSTableFormatType(formatType);
 
-                return new CQLSSTableWriter(writer, preparedInsert, preparedInsert.getBindVariables());
+                return new CQLSSTableWriter(writer, preparedModificationStatement, preparedModificationStatement.getBindVariables());
             }
         }
 
@@ -580,24 +621,24 @@
         }
 
         /**
-         * Prepares insert statement for writing data to SSTable
+         * Prepares modification statement for writing data to SSTable
          *
-         * @return prepared Insert statement and it's bound names
+         * @return prepared modification statement and it's bound names
          */
-        private UpdateStatement prepareInsert()
+        private ModificationStatement prepareModificationStatement()
         {
             ClientState state = ClientState.forInternalCalls();
-            UpdateStatement insert = (UpdateStatement) insertStatement.prepare(state);
-            insert.validate(state);
+            ModificationStatement preparedModificationStatement = modificationStatement.prepare(state);
+            preparedModificationStatement.validate(state);
 
-            if (insert.hasConditions())
+            if (preparedModificationStatement.hasConditions())
                 throw new IllegalArgumentException("Conditional statements are not supported");
-            if (insert.isCounter())
-                throw new IllegalArgumentException("Counter update statements are not supported");
-            if (insert.getBindVariables().isEmpty())
-                throw new IllegalArgumentException("Provided insert statement has no bind variables");
+            if (preparedModificationStatement.isCounter())
+                throw new IllegalArgumentException("Counter modification statements are not supported");
+            if (preparedModificationStatement.getBindVariables().isEmpty())
+                throw new IllegalArgumentException("Provided preparedModificationStatement statement has no bind variables");
 
-            return insert;
+            return preparedModificationStatement;
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java b/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java
index 93be2ee..991a91d 100644
--- a/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java
+++ b/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java
@@ -17,7 +17,8 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
+
+import org.apache.cassandra.io.util.File;
 
 public class CorruptSSTableException extends RuntimeException
 {
diff --git a/src/java/org/apache/cassandra/io/sstable/Descriptor.java b/src/java/org/apache/cassandra/io/sstable/Descriptor.java
index 85f7ef6..75d5185 100644
--- a/src/java/org/apache/cassandra/io/sstable/Descriptor.java
+++ b/src/java/org/apache/cassandra/io/sstable/Descriptor.java
@@ -17,9 +17,6 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
-import java.io.IOError;
-import java.io.IOException;
 import java.util.*;
 import java.util.regex.Pattern;
 
@@ -32,14 +29,15 @@
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.sstable.metadata.IMetadataSerializer;
 import org.apache.cassandra.io.sstable.metadata.MetadataSerializer;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.utils.Pair;
-import org.apache.cassandra.utils.UUIDGen;
 
 import static org.apache.cassandra.io.sstable.Component.separator;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * A SSTable is described by the keyspace and column family it contains data
- * for, a generation (where higher generations contain more recent data) and
+ * for, an id (generation - where higher generations contain more recent data) and
  * an alphabetic version string.
  *
  * A descriptor can be marked as temporary, which influences generated filenames.
@@ -51,7 +49,9 @@
 
     public static String TMP_EXT = ".tmp";
 
-    private static final Splitter filenameSplitter = Splitter.on('-');
+    public static final char FILENAME_SEPARATOR = '-';
+
+    private static final Splitter filenameSplitter = Splitter.on(FILENAME_SEPARATOR);
 
     /** canonicalized path to the directory where SSTable resides */
     public final File directory;
@@ -59,7 +59,7 @@
     public final Version version;
     public final String ksname;
     public final String cfname;
-    public final int generation;
+    public final SSTableId id;
     public final SSTableFormat.Type formatType;
     private final int hashCode;
 
@@ -67,53 +67,37 @@
      * A descriptor that assumes CURRENT_VERSION.
      */
     @VisibleForTesting
-    public Descriptor(File directory, String ksname, String cfname, int generation)
+    public Descriptor(File directory, String ksname, String cfname, SSTableId id)
     {
-        this(SSTableFormat.Type.current().info.getLatestVersion(), directory, ksname, cfname, generation, SSTableFormat.Type.current());
+        this(SSTableFormat.Type.current().info.getLatestVersion(), directory, ksname, cfname, id, SSTableFormat.Type.current());
     }
 
     /**
      * Constructor for sstable writers only.
      */
-    public Descriptor(File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType)
+    public Descriptor(File directory, String ksname, String cfname, SSTableId id, SSTableFormat.Type formatType)
     {
-        this(formatType.info.getLatestVersion(), directory, ksname, cfname, generation, formatType);
+        this(formatType.info.getLatestVersion(), directory, ksname, cfname, id, formatType);
     }
 
     @VisibleForTesting
-    public Descriptor(String version, File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType)
+    public Descriptor(String version, File directory, String ksname, String cfname, SSTableId id, SSTableFormat.Type formatType)
     {
-        this(formatType.info.getVersion(version), directory, ksname, cfname, generation, formatType);
+        this(formatType.info.getVersion(version), directory, ksname, cfname, id, formatType);
     }
 
-    public Descriptor(Version version, File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType)
+    public Descriptor(Version version, File directory, String ksname, String cfname, SSTableId id, SSTableFormat.Type formatType)
     {
         assert version != null && directory != null && ksname != null && cfname != null && formatType.info.getLatestVersion().getClass().equals(version.getClass());
         this.version = version;
-        try
-        {
-            this.directory = directory.getCanonicalFile();
-        }
-        catch (IOException e)
-        {
-            throw new IOError(e);
-        }
+        this.directory = directory.toCanonical();
         this.ksname = ksname;
         this.cfname = cfname;
-        this.generation = generation;
+        this.id = id;
         this.formatType = formatType;
 
-        hashCode = Objects.hashCode(version, this.directory, generation, ksname, cfname, formatType);
-    }
-
-    public Descriptor withGeneration(int newGeneration)
-    {
-        return new Descriptor(version, directory, ksname, cfname, newGeneration, formatType);
-    }
-
-    public Descriptor withFormatType(SSTableFormat.Type newType)
-    {
-        return new Descriptor(newType.info.getLatestVersion(), directory, ksname, cfname, generation, newType);
+        // directory is unnecessary for hashCode, and for simulator consistency we do not include it
+        hashCode = Objects.hashCode(version, id, ksname, cfname, formatType);
     }
 
     public String tmpFilenameFor(Component component)
@@ -128,7 +112,7 @@
     {
         // Use UUID to handle concurrent streamings on the same sstable.
         // TMP_EXT allows temp file to be removed by {@link ColumnFamilyStore#scrubDataDirectories}
-        return String.format("%s.%s%s", filenameFor(component), UUIDGen.getTimeUUID(), TMP_EXT);
+        return String.format("%s.%s%s", filenameFor(component), nextTimeUUID(), TMP_EXT);
     }
 
     public String filenameFor(Component component)
@@ -136,10 +120,15 @@
         return baseFilename() + separator + component.name();
     }
 
+    public File fileFor(Component component)
+    {
+        return new File(filenameFor(component));
+    }
+
     public String baseFilename()
     {
         StringBuilder buff = new StringBuilder();
-        buff.append(directory).append(File.separatorChar);
+        buff.append(directory).append(File.pathSeparator());
         appendFileName(buff);
         return buff.toString();
     }
@@ -147,7 +136,7 @@
     private void appendFileName(StringBuilder buff)
     {
         buff.append(version).append(separator);
-        buff.append(generation);
+        buff.append(id.toString());
         buff.append(separator).append(formatType.name);
     }
 
@@ -156,7 +145,7 @@
         final StringBuilder buff = new StringBuilder();
         if (Directories.isSecondaryIndexFolder(directory))
         {
-            buff.append(directory.getName()).append(File.separator);
+            buff.append(directory.name()).append(File.pathSeparator());
         }
 
         appendFileName(buff);
@@ -172,7 +161,7 @@
     /** Return any temporary files found in the directory */
     public List<File> getTemporaryFiles()
     {
-        File[] tmpFiles = directory.listFiles((dir, name) ->
+        File[] tmpFiles = directory.tryList((dir, name) ->
                                               name.endsWith(Descriptor.TMP_EXT));
 
         List<File> ret = new ArrayList<>(tmpFiles.length);
@@ -184,7 +173,7 @@
 
     public static boolean isValidFile(File file)
     {
-        String filename = file.getName();
+        String filename = file.name();
         return filename.endsWith(".db") && !LEGACY_TMP_REGEX.matcher(filename).matches();
     }
 
@@ -242,9 +231,9 @@
         // We need to extract the keyspace and table names from the parent directories, so make sure we deal with the
         // absolute path.
         if (!file.isAbsolute())
-            file = file.getAbsoluteFile();
+            file = file.toAbsolute();
 
-        String name = file.getName();
+        String name = file.name();
         List<String> tokens = filenameSplitter.splitToList(name);
         int size = tokens.size();
 
@@ -266,14 +255,14 @@
         if (!Version.validate(versionString))
             throw invalidSSTable(name, "invalid version %s", versionString);
 
-        int generation;
+        SSTableId id;
         try
         {
-            generation = Integer.parseInt(tokens.get(1));
+            id = SSTableIdFactory.instance.fromString(tokens.get(1));
         }
-        catch (NumberFormatException e)
+        catch (RuntimeException e)
         {
-            throw invalidSSTable(name, "the 'generation' part of the name doesn't parse as a number");
+            throw invalidSSTable(name, "the 'id' part (%s) of the name doesn't parse as a valid unique identifier", tokens.get(1));
         }
 
         String formatString = tokens.get(2);
@@ -282,7 +271,7 @@
         {
             format = SSTableFormat.Type.validate(formatString);
         }
-        catch (IllegalArgumentException e)
+        catch (RuntimeException e)
         {
             throw invalidSSTable(name, "unknown 'format' part (%s)", formatString);
         }
@@ -300,30 +289,30 @@
         String indexName = "";
         if (Directories.isSecondaryIndexFolder(tableDir))
         {
-            indexName = tableDir.getName();
+            indexName = tableDir.name();
             tableDir = parentOf(name, tableDir);
         }
 
         // Then it can be a backup or a snapshot
-        if (tableDir.getName().equals(Directories.BACKUPS_SUBDIR) && tableDir.getParentFile().getName().contains("-"))
-            tableDir = tableDir.getParentFile();
+        if (tableDir.name().equals(Directories.BACKUPS_SUBDIR) && tableDir.parent().name().contains("-"))
+            tableDir = tableDir.parent();
         else
         {
             File keyspaceOrSnapshotDir = parentOf(name, tableDir);
-            if (keyspaceOrSnapshotDir.getName().equals(Directories.SNAPSHOT_SUBDIR)
-                && parentOf(name, keyspaceOrSnapshotDir).getName().contains("-"))
+            if (keyspaceOrSnapshotDir.name().equals(Directories.SNAPSHOT_SUBDIR)
+                && parentOf(name, keyspaceOrSnapshotDir).name().contains("-"))
                 tableDir = parentOf(name, keyspaceOrSnapshotDir);
         }
 
-        String table = tableDir.getName().split("-")[0] + indexName;
-        String keyspace = parentOf(name, tableDir).getName();
+        String table = tableDir.name().split("-")[0] + indexName;
+        String keyspace = parentOf(name, tableDir).name();
 
-        return Pair.create(new Descriptor(version, directory, keyspace, table, generation, format), component);
+        return Pair.create(new Descriptor(version, directory, keyspace, table, id, format), component);
     }
 
     private static File parentOf(String name, File file)
     {
-        File parent = file.getParentFile();
+        File parent = file.parent();
         if (parent == null)
             throw invalidSSTable(name, "cannot extract keyspace and table name; make sure the sstable is in the proper sub-directories");
         return parent;
@@ -362,7 +351,7 @@
             return false;
         Descriptor that = (Descriptor)o;
         return that.directory.equals(this.directory)
-                       && that.generation == this.generation
+                       && that.id.equals(this.id)
                        && that.ksname.equals(this.ksname)
                        && that.cfname.equals(this.cfname)
                        && that.version.equals(this.version)
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
index e24436d..fa0fb2c 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
@@ -27,7 +27,6 @@
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -36,7 +35,7 @@
 
 /**
  * {@code IndexInfo} is embedded in the indexed version of {@link RowIndexEntry}.
- * Each instance roughly covers a range of {@link org.apache.cassandra.config.Config#column_index_size_in_kb column_index_size_in_kb} kB
+ * Each instance roughly covers a range of {@link org.apache.cassandra.config.Config#column_index_size column_index_size} KiB
  * and contains the first and last clustering value (or slice bound), its offset in the data file and width in the data file.
  * <p>
  * Each {@code IndexInfo} object is serialized as follows.
@@ -59,7 +58,7 @@
  */
 public class IndexInfo
 {
-    private static final long EMPTY_SIZE = ObjectSizes.measure(new IndexInfo(null, null, 0, 0, null));
+    public static final long EMPTY_SIZE = ObjectSizes.measure(new IndexInfo(null, null, 0, 0, null));
 
     public final long offset;
     public final long width;
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryBuilder.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryBuilder.java
index cb6fcc0..75cca84 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryBuilder.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryBuilder.java
@@ -211,7 +211,7 @@
             else
             {
                 // we cannot fully sample this sstable due to too much memory in the index summary, so let's tell the user
-                logger.error("Memory capacity of index summary exceeded (2GB), index summary will not cover full sstable, " +
+                logger.error("Memory capacity of index summary exceeded (2GiB), index summary will not cover full sstable, " +
                              "you should increase min_sampling_level");
             }
         }
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
index 396801a..b11ad2b 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
@@ -30,10 +30,12 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
+
+import com.codahale.metrics.Timer;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -51,6 +53,7 @@
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.WrappedRunnable;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 /**
  * Manages the fixed-size memory pool for index summaries, periodically resizing them
  * in order to give more memory to hot sstables and less memory to cold sstables.
@@ -63,7 +66,7 @@
 
     private long memoryPoolBytes;
 
-    private final DebuggableScheduledThreadPoolExecutor executor;
+    private final ScheduledExecutorPlus executor;
 
     // our next scheduled resizing run
     private ScheduledFuture future;
@@ -76,14 +79,14 @@
 
     private IndexSummaryManager()
     {
-        executor = new DebuggableScheduledThreadPoolExecutor(1, "IndexSummaryManager", Thread.MIN_PRIORITY);
+        executor = executorFactory().scheduled(false, "IndexSummaryManager", Thread.MIN_PRIORITY);
 
-        long indexSummarySizeInMB = DatabaseDescriptor.getIndexSummaryCapacityInMB();
+        long indexSummarySizeInMB = DatabaseDescriptor.getIndexSummaryCapacityInMiB();
         int interval = DatabaseDescriptor.getIndexSummaryResizeIntervalInMinutes();
         logger.info("Initializing index summary manager with a memory pool size of {} MB and a resize interval of {} minutes",
                     indexSummarySizeInMB, interval);
 
-        setMemoryPoolCapacityInMB(DatabaseDescriptor.getIndexSummaryCapacityInMB());
+        setMemoryPoolCapacityInMB(DatabaseDescriptor.getIndexSummaryCapacityInMiB());
         setResizeIntervalInMinutes(DatabaseDescriptor.getIndexSummaryResizeIntervalInMinutes());
     }
 
@@ -229,7 +232,7 @@
         Pair<Long, Map<TableId, LifecycleTransaction>> redistributionTransactionInfo = getRestributionTransactions();
         Map<TableId, LifecycleTransaction> transactions = redistributionTransactionInfo.right;
         long nonRedistributingOffHeapSize = redistributionTransactionInfo.left;
-        try
+        try (Timer.Context ctx = CompactionManager.instance.getMetrics().indexSummaryRedistributionTime.time())
         {
             redistributeSummaries(new IndexSummaryRedistribution(transactions,
                                                                  nonRedistributingOffHeapSize,
@@ -277,6 +280,11 @@
     @VisibleForTesting
     public void shutdownAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
     {
+        if (future != null)
+        {
+            future.cancel(false);
+            future = null;
+        }
         ExecutorUtils.shutdownAndWait(timeout, unit, executor);
     }
 }
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java
index 3382350..9ba3d40 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java
@@ -27,7 +27,7 @@
 
     /**
      * Returns the current actual off-heap memory usage of the index summaries for all non-compacting sstables.
-     * @return The amount of memory used in MB.
+     * @return The amount of memory used in MiB.
      */
     public double getMemoryPoolSizeInMB();
 
@@ -41,5 +41,10 @@
     public void redistributeSummaries() throws IOException;
 
     public int getResizeIntervalInMinutes();
+
+    /**
+     * Set resizeIntervalInMinutes = -1 for disabled; This is the equivalent of index_summary_resize_interval being
+     * set to null in cassandra.yaml
+     */
     public void setResizeIntervalInMinutes(int resizeIntervalInMinutes);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
index 90a8621..8bbe709 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
@@ -25,7 +25,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -43,9 +43,12 @@
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Refs;
 
 import static org.apache.cassandra.io.sstable.Downsampling.BASE_SAMPLING_LEVEL;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class IndexSummaryRedistribution extends CompactionInfo.Holder
 {
@@ -61,7 +64,7 @@
     private final Map<TableId, LifecycleTransaction> transactions;
     private final long nonRedistributingOffHeapSize;
     private final long memoryPoolBytes;
-    private final UUID compactionId;
+    private final TimeUUID compactionId;
     private volatile long remainingSpace;
 
     /**
@@ -75,11 +78,12 @@
         this.transactions = transactions;
         this.nonRedistributingOffHeapSize = nonRedistributingOffHeapSize;
         this.memoryPoolBytes = memoryPoolBytes;
-        this.compactionId = UUID.randomUUID();
+        this.compactionId = nextTimeUUID();
     }
 
     public List<SSTableReader> redistributeSummaries() throws IOException
     {
+        long start = nanoTime();
         logger.info("Redistributing index summaries");
         List<SSTableReader> redistribute = new ArrayList<>();
         for (LifecycleTransaction txn : transactions.values())
@@ -91,7 +95,7 @@
         for (SSTableReader sstable : redistribute)
             total += sstable.getIndexSummaryOffHeapSize();
 
-        logger.trace("Beginning redistribution of index summaries for {} sstables with memory pool size {} MB; current spaced used is {} MB",
+        logger.info("Beginning redistribution of index summaries for {} sstables with memory pool size {} MiB; current spaced used is {} MiB",
                      redistribute.size(), memoryPoolBytes / 1024L / 1024L, total / 1024.0 / 1024.0);
 
         final Map<SSTableReader, Double> readRates = new HashMap<>(redistribute.size());
@@ -116,7 +120,7 @@
 
         long remainingBytes = memoryPoolBytes - nonRedistributingOffHeapSize;
 
-        logger.trace("Index summaries for compacting SSTables are using {} MB of space",
+        logger.trace("Index summaries for compacting SSTables are using {} MiB of space",
                      (memoryPoolBytes - remainingBytes) / 1024.0 / 1024.0);
         List<SSTableReader> newSSTables;
         try (Refs<SSTableReader> refs = Refs.ref(sstablesByHotness))
@@ -129,9 +133,9 @@
         total = nonRedistributingOffHeapSize;
         for (SSTableReader sstable : newSSTables)
             total += sstable.getIndexSummaryOffHeapSize();
-        if (logger.isTraceEnabled())
-            logger.trace("Completed resizing of index summaries; current approximate memory used: {}",
-                     FBUtilities.prettyPrintMemory(total));
+
+        logger.info("Completed resizing of index summaries; current approximate memory used: {} MiB, time spent: {}ms",
+                    total / 1024.0 / 1024.0, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
 
         return newSSTables;
     }
@@ -243,6 +247,7 @@
         }
 
         // downsample first, then upsample
+        logger.info("index summaries: downsample: {}, force resample: {}, upsample: {}, force upsample: {}", toDownsample.size(), forceResample.size(), toUpsample.size(), forceUpsample.size());
         toDownsample.addAll(forceResample);
         toDownsample.addAll(toUpsample);
         toDownsample.addAll(forceUpsample);
diff --git a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
index 1a5792c..ceacf87 100644
--- a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
+++ b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -26,6 +25,7 @@
 import org.apache.cassandra.db.RowIndexEntry;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.AbstractIterator;
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java
index 2a0d3f7..81030c2 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTable.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java
@@ -17,17 +17,24 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.*;
+
+import java.io.FileNotFoundException;
+import java.io.IOError;
+import java.io.IOException;
+import java.io.PrintWriter;
 import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.util.*;
 import java.util.concurrent.CopyOnWriteArraySet;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
-import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,14 +46,17 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.DiskOptimizationStrategy;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.memory.HeapCloner;
 
+import static org.apache.cassandra.io.util.File.WriteMode.APPEND;
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
 
@@ -95,6 +105,12 @@
         this.optimizationStrategy = Objects.requireNonNull(optimizationStrategy);
     }
 
+    @VisibleForTesting
+    public Set<Component> getComponents()
+    {
+        return ImmutableSet.copyOf(components);
+    }
+
     /**
      * We use a ReferenceQueue to manage deleting files that have been compacted
      * and for which no more SSTable references exist.  But this is not guaranteed
@@ -233,7 +249,7 @@
             {
                 return readTOC(desc);
             }
-            catch (FileNotFoundException e)
+            catch (FileNotFoundException | NoSuchFileException e)
             {
                 Set<Component> components = discoverComponentsFor(desc);
                 if (components.isEmpty())
@@ -267,7 +283,7 @@
     /** @return An estimate of the number of keys contained in the given index file. */
     public static long estimateRowsFromIndex(RandomAccessReader ifile, Descriptor descriptor) throws IOException
     {
-        // collect sizes for the first 10000 keys, or first 10 megabytes of data
+        // collect sizes for the first 10000 keys, or first 10 mebibytes of data
         final int SAMPLES_CAP = 10000, BYTES_CAP = (int)Math.min(10000000, ifile.length());
         int keys = 0;
         while (ifile.getFilePointer() < BYTES_CAP && keys < SAMPLES_CAP)
@@ -317,7 +333,7 @@
     protected static Set<Component> readTOC(Descriptor descriptor, boolean skipMissing) throws IOException
     {
         File tocFile = new File(descriptor.filenameFor(Component.TOC));
-        List<String> componentNames = Files.readLines(tocFile, Charset.defaultCharset());
+        List<String> componentNames = Files.readAllLines(tocFile.toPath());
         Set<Component> components = Sets.newHashSetWithExpectedSize(componentNames.size());
         for (String componentName : componentNames)
         {
@@ -336,13 +352,13 @@
     protected static void appendTOC(Descriptor descriptor, Collection<Component> components)
     {
         File tocFile = new File(descriptor.filenameFor(Component.TOC));
-        try (FileOutputStream fos = new FileOutputStream(tocFile);
-             PrintWriter w = new PrintWriter(fos))
+        try (FileOutputStreamPlus out = tocFile.newOutputStream(APPEND);
+             PrintWriter w = new PrintWriter(out))
         {
             for (Component component : components)
                 w.println(component.name);
             w.flush();
-            fos.getFD().sync();
+            out.sync();
         }
         catch (IOException e)
         {
@@ -368,7 +384,7 @@
         return AbstractBounds.bounds(first.getToken(), true, last.getToken(), true);
     }
 
-    public static void validateRepairedMetadata(long repairedAt, UUID pendingRepair, boolean isTransient)
+    public static void validateRepairedMetadata(long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         Preconditions.checkArgument((pendingRepair == NO_PENDING_REPAIR) || (repairedAt == UNREPAIRED_SSTABLE),
                                     "pendingRepair cannot be set on a repaired sstable");
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java b/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java
index 3577259..f78200a 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
@@ -36,6 +35,7 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -298,7 +298,17 @@
     {
         Stream.of(path)
               .flatMap(SSTableHeaderFix::maybeExpandDirectory)
-              .filter(p -> Descriptor.fromFilenameWithComponent(p.toFile()).right.type == Component.Type.DATA)
+              .filter(p -> {
+                  try
+                  {
+                      return Descriptor.fromFilenameWithComponent(new File(p)).right.type == Component.Type.DATA;
+                  }
+                  catch (IllegalArgumentException t)
+                  {
+                      logger.info("Couldn't parse filename {}, ignoring", p);
+                      return false;
+                  }
+              })
               .map(Path::toString)
               .map(Descriptor::fromFilename)
               .forEach(descriptors::add);
@@ -888,26 +898,26 @@
         private void scanDataDirectory(Directories.DataDirectory dataDirectory)
         {
             info.accept(String.format("Scanning data directory %s", dataDirectory.location));
-            File[] ksDirs = dataDirectory.location.listFiles();
+            File[] ksDirs = dataDirectory.location.tryList();
             if (ksDirs == null)
                 return;
             for (File ksDir : ksDirs)
             {
-                if (!ksDir.isDirectory() || !ksDir.canRead())
+                if (!ksDir.isDirectory() || !ksDir.isReadable())
                     continue;
 
-                String name = ksDir.getName();
+                String name = ksDir.name();
 
                 // silently ignore all system keyspaces
                 if (SchemaConstants.isLocalSystemKeyspace(name) || SchemaConstants.isReplicatedSystemKeyspace(name))
                     continue;
 
-                File[] tabDirs = ksDir.listFiles();
+                File[] tabDirs = ksDir.tryList();
                 if (tabDirs == null)
                     continue;
                 for (File tabDir : tabDirs)
                 {
-                    if (!tabDir.isDirectory() || !tabDir.canRead())
+                    if (!tabDir.isDirectory() || !tabDir.isReadable())
                         continue;
 
                     processFileOrDirectory(tabDir.toPath());
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableId.java b/src/java/org/apache/cassandra/io/sstable/SSTableId.java
new file mode 100644
index 0000000..a3d95dd
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableId.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.sstable;
+
+import java.nio.ByteBuffer;
+import java.util.function.Supplier;
+import java.util.stream.Stream;
+
+import org.apache.cassandra.io.util.File;
+
+/**
+ * Represents a unique identifier in the sstable descriptor filename.
+ * This ensures each sstable file uniqueness for the certain table on a single node. However, new implementations
+ * should ensure the uniqueness across the entire cluster. The legacy implementation which does not satisfy cluster-wide
+ * uniqueness will be deprecated and eventually removed.
+ * <p>
+ * A new implementation must adhere to the following invariants:
+ * - Must be locally sortable - that is, the comparison must reflect the comparison of generation times of identifiers
+ * generated on the same node
+ * - String representation must *not* include the {@link Descriptor#FILENAME_SEPARATOR} character, see {@link Descriptor#fromFilenameWithComponent(File)}
+ * - must be case-insensitive because the sstables can be stored on case-insensitive file system
+ * <p>
+ */
+public interface SSTableId
+{
+    /**
+     * Creates a byte format of the identifier that can be parsed by
+     * {@link Builder#fromBytes(ByteBuffer)}
+     */
+    ByteBuffer asBytes();
+
+    /**
+     * Creates a String format of the identifier that can be parsed by
+     * {@link Builder#fromString(String)}
+     * <p>
+     * Must not contain any {@link Descriptor#FILENAME_SEPARATOR} character as it is used in the Descriptor
+     * see {@link Descriptor#fromFilenameWithComponent(File)}
+     */
+    @Override
+    String toString();
+
+    /**
+     * Builder that can create instances of certain implementation of {@link SSTableId}.
+     */
+    interface Builder<T extends SSTableId>
+    {
+        /**
+         * Creates a new generator of identifiers. Each supplied value must be different to all the previously generated
+         * values and different to all the provided existing identifiers.
+         */
+        Supplier<T> generator(Stream<SSTableId> existingIdentifiers);
+
+        boolean isUniqueIdentifier(String str);
+
+        boolean isUniqueIdentifier(ByteBuffer bytes);
+
+        /**
+         * Creates an identifier instance from its string representation
+         *
+         * @param str string representation as returned by {@link SSTableId#toString()}
+         * @throws IllegalArgumentException when the provided string is not a valid string representation of the identifier
+         */
+        T fromString(String str) throws IllegalArgumentException;
+
+        /**
+         * Creates an identifier instance from its binary representation
+         * <p>
+         * The method expects the identifier is encoded in all remaining bytes of the buffer. The method does not move the
+         * pointer of the buffer.
+         *
+         * @param bytes binary representation as returned by {@link #asBytes()}
+         * @throws IllegalArgumentException when the provided bytes are not a valid binary representation of the identifier
+         */
+        T fromBytes(ByteBuffer bytes) throws IllegalArgumentException;
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableIdFactory.java b/src/java/org/apache/cassandra/io/sstable/SSTableIdFactory.java
new file mode 100644
index 0000000..d5b3276
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableIdFactory.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.sstable;
+
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+import java.util.stream.Stream;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+public class SSTableIdFactory
+{
+    public static final SSTableIdFactory instance = new SSTableIdFactory();
+
+    /**
+     * Constructs the instance of {@link SSTableId} from the given string representation.
+     * It finds the right builder by verifying whether the given string is the representation of the related identifier
+     * type using {@link SSTableId.Builder#isUniqueIdentifier(String)} method.
+     *
+     * @throws IllegalArgumentException when the provided string representation does not represent id of any type
+     */
+    public SSTableId fromString(String str) throws IllegalArgumentException
+    {
+        return Stream.of(UUIDBasedSSTableId.Builder.instance, SequenceBasedSSTableId.Builder.instance)
+                     .filter(b -> b.isUniqueIdentifier(str))
+                     .findFirst()
+                     .map(b -> b.fromString(str))
+                     .orElseThrow(() -> new IllegalArgumentException("String '" + str + "' does not match any SSTable identifier format"));
+    }
+
+    /**
+     * Constructs the instance of {@link SSTableId} from the given bytes.
+     * It finds the right builder by verifying whether the given buffer is the representation of the related identifier
+     * type using {@link SSTableId.Builder#isUniqueIdentifier(ByteBuffer)} method.
+     *
+     * The method expects the identifier is encoded in all remaining bytes of the buffer. The method does not move the
+     * pointer of the buffer.
+     *
+     * @throws IllegalArgumentException when the provided binary representation does not represent id of any type
+     */
+    public SSTableId fromBytes(ByteBuffer bytes)
+    {
+        return Stream.of(UUIDBasedSSTableId.Builder.instance, SequenceBasedSSTableId.Builder.instance)
+                     .filter(b -> b.isUniqueIdentifier(bytes))
+                     .findFirst()
+                     .map(b -> b.fromBytes(bytes))
+                     .orElseThrow(() -> new IllegalArgumentException("Byte buffer of length " + bytes.remaining() + " does not match any SSTable identifier format"));
+    }
+
+    /**
+     * Returns default identifiers builder.
+     */
+    @SuppressWarnings("unchecked")
+    public SSTableId.Builder<SSTableId> defaultBuilder()
+    {
+        SSTableId.Builder<? extends SSTableId> builder = DatabaseDescriptor.isUUIDSSTableIdentifiersEnabled()
+                                                         ? UUIDBasedSSTableId.Builder.instance
+                                                         : SequenceBasedSSTableId.Builder.instance;
+        return (SSTableId.Builder<SSTableId>) builder;
+    }
+
+    /**
+     * Compare sstable identifiers so that UUID based identifier is always greater than sequence based identifier
+     */
+    public final static Comparator<SSTableId> COMPARATOR = Comparator.nullsFirst((id1, id2) -> {
+        if (id1 instanceof UUIDBasedSSTableId)
+        {
+            UUIDBasedSSTableId uuidId1 = (UUIDBasedSSTableId) id1;
+            return (id2 instanceof UUIDBasedSSTableId) ? uuidId1.compareTo((UUIDBasedSSTableId) id2) : 1;
+        }
+        else if (id1 instanceof SequenceBasedSSTableId)
+        {
+            SequenceBasedSSTableId seqId1 = (SequenceBasedSSTableId) id1;
+            return (id2 instanceof SequenceBasedSSTableId) ? seqId1.compareTo((SequenceBasedSSTableId) id2) : -1;
+        }
+        else
+        {
+            throw new AssertionError("Unsupported comparison between " + id1.getClass().getName() + " and  " + id2.getClass().getName());
+        }
+    });
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
index 1e980fe..7ddbe72 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
@@ -17,13 +17,13 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.util.*;
 
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 
 import org.apache.cassandra.db.streaming.CassandraOutgoingFile;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.io.FSError;
 import org.apache.cassandra.schema.TableMetadataRef;
@@ -38,6 +38,8 @@
 
 import org.apache.cassandra.utils.concurrent.Ref;
 
+import static org.apache.cassandra.streaming.StreamingChannel.Factory.Global.streamingFactory;
+
 /**
  * Cassandra SSTable bulk loader.
  * Load an externally created sstable into a cluster.
@@ -62,7 +64,7 @@
     public SSTableLoader(File directory, Client client, OutputHandler outputHandler, int connectionsPerHost, String targetKeyspace)
     {
         this.directory = directory;
-        this.keyspace = targetKeyspace != null ? targetKeyspace : directory.getParentFile().getName();
+        this.keyspace = targetKeyspace != null ? targetKeyspace : directory.parent().name();
         this.client = client;
         this.outputHandler = outputHandler;
         this.connectionsPerHost = connectionsPerHost;
@@ -76,8 +78,8 @@
         LifecycleTransaction.getFiles(directory.toPath(),
                                       (file, type) ->
                                       {
-                                          File dir = file.getParentFile();
-                                          String name = file.getName();
+                                          File dir = file.parent();
+                                          String name = file.name();
 
                                           if (type != Directories.FileType.FINAL)
                                           {
@@ -97,7 +99,6 @@
                                           }
 
                                           TableMetadataRef metadata = client.getTableMetadata(desc.cfname);
-
                                           if (metadata == null)
                                           {
                                               outputHandler.output(String.format("Skipping file %s: table %s.%s doesn't exist", name, keyspace, desc.cfname));
@@ -270,9 +271,9 @@
          *
          * @return StreamConnectionFactory to use
          */
-        public StreamConnectionFactory getConnectionFactory()
+        public StreamingChannel.Factory getConnectionFactory()
         {
-            return new DefaultConnectionFactory();
+            return streamingFactory();
         }
 
         /**
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java
index a3d5ae9..e394bbd 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java
@@ -36,7 +36,7 @@
 import org.apache.cassandra.utils.concurrent.Transactional;
 
 /**
- * Wraps one or more writers as output for rewriting one or more readers: every sstable_preemptive_open_interval_in_mb
+ * Wraps one or more writers as output for rewriting one or more readers: every sstable_preemptive_open_interval
  * we look in the summary we're collecting for the latest writer for the penultimate key that we know to have been fully
  * flushed to the index file, and then double check that the key is fully present in the flushed data file.
  * Then we move the starts of each reader forwards to that point, replace them in the Tracker, and attach a runnable
@@ -62,7 +62,7 @@
     private final ILifecycleTransaction transaction; // the readers we are rewriting (updated as they are replaced)
     private final List<SSTableReader> preparedForCommit = new ArrayList<>();
 
-    private long currentlyOpenedEarlyAt; // the position (in MB) in the target file we last (re)opened at
+    private long currentlyOpenedEarlyAt; // the position (in MiB) in the target file we last (re)opened at
 
     private final List<SSTableWriter> writers = new ArrayList<>();
     private final boolean keepOriginals; // true if we do not want to obsolete the originals
@@ -106,7 +106,7 @@
 
     private static long calculateOpenInterval(boolean shouldOpenEarly)
     {
-        long interval = DatabaseDescriptor.getSSTablePreemptiveOpenIntervalInMB() * (1L << 20);
+        long interval = DatabaseDescriptor.getSSTablePreemptiveOpenIntervalInMiB() * (1L << 20);
         if (disableEarlyOpeningForTests || !shouldOpenEarly || interval < 0)
             interval = Long.MAX_VALUE;
         return interval;
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
index 7ac2ebc..3b1ed1b 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
@@ -17,25 +17,29 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Throwables;
 
 import io.netty.util.concurrent.FastThreadLocalThread;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.SerializationHeader;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.db.rows.SerializationHelper;
 import org.apache.cassandra.db.rows.UnfilteredSerializer;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 /**
  * A SSTable writer that doesn't assume rows are in sorted order.
@@ -59,7 +63,7 @@
     private final SerializationHeader header;
     private final SerializationHelper helper;
 
-    private final BlockingQueue<Buffer> writeQueue = new SynchronousQueue<Buffer>();
+    private final BlockingQueue<Buffer> writeQueue = newBlockingQueue(0);
     private final DiskWriter diskWriter = new DiskWriter();
 
     SSTableSimpleUnsortedWriter(File directory, TableMetadataRef metadata, RegularAndStaticColumns columns, long bufferSizeInMB)
@@ -165,7 +169,7 @@
             }
             catch (InterruptedException e)
             {
-                throw new RuntimeException(e);
+                throw new UncheckedInterruptedException(e);
             }
         }
     }
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
index 530a03b..c9356a3 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
@@ -17,13 +17,13 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
 
 import com.google.common.base.Throwables;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadataRef;
 
 /**
@@ -48,7 +48,7 @@
         super(directory, metadata, columns);
     }
 
-    private SSTableTxnWriter getOrCreateWriter()
+    private SSTableTxnWriter getOrCreateWriter() throws IOException
     {
         if (writer == null)
             writer = createWriter();
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
index cfb1365..83c5542 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
@@ -20,7 +20,6 @@
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.UUID;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -34,6 +33,7 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
 /**
@@ -99,7 +99,7 @@
     }
 
     @SuppressWarnings("resource") // log and writer closed during doPostCleanup
-    public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor descriptor, long keyCount, long repairedAt, UUID pendingRepair, boolean isTransient, int sstableLevel, SerializationHeader header)
+    public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, int sstableLevel, SerializationHeader header)
     {
         LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
         SSTableMultiWriter writer = cfs.createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, sstableLevel, header, txn);
@@ -111,7 +111,7 @@
     public static SSTableTxnWriter createRangeAware(TableMetadataRef metadata,
                                                     long keyCount,
                                                     long repairedAt,
-                                                    UUID pendingRepair,
+                                                    TimeUUID pendingRepair,
                                                     boolean isTransient,
                                                     SSTableFormat.Type type,
                                                     int sstableLevel,
@@ -140,7 +140,7 @@
                                           Descriptor descriptor,
                                           long keyCount,
                                           long repairedAt,
-                                          UUID pendingRepair,
+                                          TimeUUID pendingRepair,
                                           boolean isTransient,
                                           int sstableLevel,
                                           SerializationHeader header,
@@ -153,7 +153,7 @@
         return new SSTableTxnWriter(txn, writer);
     }
 
-    public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor desc, long keyCount, long repairedAt, UUID pendingRepair, boolean isTransient, SerializationHeader header)
+    public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor desc, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SerializationHeader header)
     {
         return create(cfs, desc, keyCount, repairedAt, pendingRepair, isTransient, 0, header);
     }
diff --git a/src/java/org/apache/cassandra/io/sstable/SequenceBasedSSTableId.java b/src/java/org/apache/cassandra/io/sstable/SequenceBasedSSTableId.java
new file mode 100644
index 0000000..acb91f8
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/sstable/SequenceBasedSSTableId.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.sstable;
+
+import java.nio.ByteBuffer;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Generation identifier based on sequence of integers.
+ * This has been the standard implementation in C* since inception.
+ */
+public class SequenceBasedSSTableId implements SSTableId, Comparable<SequenceBasedSSTableId>
+{
+    public final int generation;
+
+    public SequenceBasedSSTableId(final int generation)
+    {
+        assert generation >= 0;
+
+        this.generation = generation;
+    }
+
+    @Override
+    public int compareTo(SequenceBasedSSTableId o)
+    {
+        if (o == null)
+            return 1;
+        else if (o == this)
+            return 0;
+
+        return Integer.compare(this.generation, o.generation);
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass())
+            return false;
+        SequenceBasedSSTableId that = (SequenceBasedSSTableId) o;
+        return generation == that.generation;
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(generation);
+    }
+
+    @Override
+    public ByteBuffer asBytes()
+    {
+        ByteBuffer bytes = ByteBuffer.allocate(Integer.BYTES);
+        bytes.putInt(0, generation);
+        return bytes;
+    }
+
+    @Override
+    public String toString()
+    {
+        return String.valueOf(generation);
+    }
+
+    public static class Builder implements SSTableId.Builder<SequenceBasedSSTableId>
+    {
+        public final static Builder instance = new Builder();
+
+        private final static Pattern PATTERN = Pattern.compile("\\d+");
+
+        /**
+         * Generates a sequential number to represent an sstables identifier. The first generated identifier will be
+         * greater by one than the largest generation number found across the provided existing identifiers.
+         */
+        @Override
+        public Supplier<SequenceBasedSSTableId> generator(Stream<SSTableId> existingIdentifiers)
+        {
+            int value = existingIdentifiers.filter(SequenceBasedSSTableId.class::isInstance)
+                                           .map(SequenceBasedSSTableId.class::cast)
+                                           .mapToInt(id -> id.generation)
+                                           .max()
+                                           .orElse(0);
+
+            AtomicInteger fileIndexGenerator = new AtomicInteger(value);
+            return () -> new SequenceBasedSSTableId(fileIndexGenerator.incrementAndGet());
+        }
+
+        @Override
+        public boolean isUniqueIdentifier(String str)
+        {
+            return str != null && !str.isEmpty() && str.length() <= 10 && PATTERN.matcher(str).matches();
+        }
+
+        @Override
+        public boolean isUniqueIdentifier(ByteBuffer bytes)
+        {
+            return bytes != null && bytes.remaining() == Integer.BYTES && bytes.getInt(0) >= 0;
+        }
+
+        @Override
+        public SequenceBasedSSTableId fromString(String token) throws IllegalArgumentException
+        {
+            return new SequenceBasedSSTableId(Integer.parseInt(token));
+        }
+
+        @Override
+        public SequenceBasedSSTableId fromBytes(ByteBuffer bytes) throws IllegalArgumentException
+        {
+            Preconditions.checkArgument(bytes.remaining() == Integer.BYTES, "Buffer does not have a valid number of bytes remaining. Expecting: %s but was: %s", Integer.BYTES, bytes.remaining());
+            return new SequenceBasedSSTableId(bytes.getInt(0));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java
index a84f07e..ec28528 100644
--- a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java
@@ -19,7 +19,6 @@
 
 import java.util.Collection;
 import java.util.Collections;
-import java.util.UUID;
 
 import org.apache.cassandra.db.RowIndexEntry;
 import org.apache.cassandra.db.SerializationHeader;
@@ -31,6 +30,7 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class SimpleSSTableMultiWriter implements SSTableMultiWriter
 {
@@ -110,7 +110,7 @@
     public static SSTableMultiWriter create(Descriptor descriptor,
                                             long keyCount,
                                             long repairedAt,
-                                            UUID pendingRepair,
+                                            TimeUUID pendingRepair,
                                             boolean isTransient,
                                             TableMetadataRef metadata,
                                             MetadataCollector metadataCollector,
diff --git a/src/java/org/apache/cassandra/io/sstable/SnapshotDeletingTask.java b/src/java/org/apache/cassandra/io/sstable/SnapshotDeletingTask.java
deleted file mode 100644
index d23c488..0000000
--- a/src/java/org/apache/cassandra/io/sstable/SnapshotDeletingTask.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.io.sstable;
-
-import java.io.File;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.concurrent.ScheduledExecutors;
-import org.apache.cassandra.db.WindowsFailedSnapshotTracker;
-import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.io.util.FileUtils;
-
-public class SnapshotDeletingTask implements Runnable
-{
-    private static final Logger logger = LoggerFactory.getLogger(SnapshotDeletingTask.class);
-
-    public final File path;
-    private static final Queue<Runnable> failedTasks = new ConcurrentLinkedQueue<>();
-
-    public static void addFailedSnapshot(File path)
-    {
-        logger.warn("Failed to delete snapshot [{}]. Will retry after further sstable deletions. Folder will be deleted on JVM shutdown or next node restart on crash.", path);
-        WindowsFailedSnapshotTracker.handleFailedSnapshot(path);
-        failedTasks.add(new SnapshotDeletingTask(path));
-    }
-
-    private SnapshotDeletingTask(File path)
-    {
-        this.path = path;
-    }
-
-    public void run()
-    {
-        try
-        {
-            FileUtils.deleteRecursive(path);
-            logger.info("Successfully deleted snapshot {}.", path);
-        }
-        catch (FSWriteError e)
-        {
-            failedTasks.add(this);
-        }
-    }
-
-    /**
-     * Retry all failed deletions.
-     */
-    public static void rescheduleFailedTasks()
-    {
-        Runnable task;
-        while ( null != (task = failedTasks.poll()))
-            ScheduledExecutors.nonPeriodicTasks.submit(task);
-    }
-
-    @VisibleForTesting
-    public static int pendingDeletionCount()
-    {
-        return failedTasks.size();
-    }
-}
diff --git a/src/java/org/apache/cassandra/io/sstable/UUIDBasedSSTableId.java b/src/java/org/apache/cassandra/io/sstable/UUIDBasedSSTableId.java
new file mode 100644
index 0000000..9cec587
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/sstable/UUIDBasedSSTableId.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.sstable;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.Objects;
+import java.util.function.Supplier;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+import javax.annotation.Nonnull;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.utils.TimeUUID;
+
+/**
+ * SSTable generation identifiers that can be stored across nodes in one directory/bucket
+ * <p>
+ * Uses the UUID v1 identifiers
+ */
+public final class UUIDBasedSSTableId implements SSTableId, Comparable<UUIDBasedSSTableId>
+{
+    public final static int STRING_LEN = 28;
+    public final static int BYTES_LEN = 16;
+
+    private final TimeUUID uuid;
+    private final String repr;
+
+    public UUIDBasedSSTableId(TimeUUID uuid)
+    {
+        this.uuid = uuid;
+        this.repr = asString();
+    }
+
+    @Override
+    public ByteBuffer asBytes()
+    {
+        return ByteBuffer.allocate(16)
+                         .order(ByteOrder.BIG_ENDIAN)
+                         .putLong(0, uuid.uuidTimestamp())
+                         .putLong(Long.BYTES, uuid.lsb());
+    }
+
+    private String asString()
+    {
+        long ts = uuid.uuidTimestamp();
+        long nanoPart = ts % 10_000_000;
+        ts = ts / 10_000_000;
+        long seconds = ts % 86_400;
+        ts = ts / 86_400;
+
+        return String.format("%4s_%4s_%5s%13s",
+                             Long.toString(ts, 36),
+                             Long.toString(seconds, 36),
+                             Long.toString(nanoPart, 36),
+                             Long.toUnsignedString(uuid.lsb(), 36)).replace(' ', '0');
+    }
+
+    @Override
+    public String toString()
+    {
+        return repr;
+    }
+
+    @Override
+    public int compareTo(UUIDBasedSSTableId o)
+    {
+        if (o == null)
+            return 1;
+        else if (o == this)
+            return 0;
+
+        return uuid.compareTo(o.uuid);
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass())
+            return false;
+        UUIDBasedSSTableId that = (UUIDBasedSSTableId) o;
+        return uuid.equals(that.uuid);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(uuid);
+    }
+
+    public static class Builder implements SSTableId.Builder<UUIDBasedSSTableId>
+    {
+        public static final Builder instance = new Builder();
+        private final static Pattern PATTERN = Pattern.compile("([0-9a-z]{4})_([0-9a-z]{4})_([0-9a-z]{5})([0-9a-z]{13})", Pattern.CASE_INSENSITIVE);
+
+        /**
+         * Creates a new UUID based identifiers generator.
+         *
+         * @param existingIdentifiers not used by UUID based generator
+         */
+        @Override
+        public Supplier<UUIDBasedSSTableId> generator(Stream<SSTableId> existingIdentifiers)
+        {
+            return () -> new UUIDBasedSSTableId(TimeUUID.Generator.nextTimeUUID());
+        }
+
+        @Override
+        public boolean isUniqueIdentifier(String str)
+        {
+            return str != null && str.length() == STRING_LEN && PATTERN.matcher(str).matches();
+        }
+
+        @Override
+        public boolean isUniqueIdentifier(ByteBuffer bytes)
+        {
+            return bytes != null && bytes.remaining() == BYTES_LEN;
+        }
+
+        @Override
+        public UUIDBasedSSTableId fromString(@Nonnull String s) throws IllegalArgumentException
+        {
+            Matcher m = PATTERN.matcher(s);
+            if (!m.matches())
+                throw new IllegalArgumentException("String '" + s + "' is not a valid UUID based sstable identifier");
+
+            long dayPart = Long.parseLong(m.group(1), 36);
+            long secondPart = Long.parseLong(m.group(2), 36);
+            long nanoPart = Long.parseLong(m.group(3), 36);
+            long ts = (dayPart * 86_400 + secondPart) * 10_000_000 + nanoPart;
+            long randomPart = Long.parseUnsignedLong(m.group(4), 36);
+
+            TimeUUID uuid = new TimeUUID(ts, randomPart);
+            return new UUIDBasedSSTableId(uuid);
+        }
+
+        @Override
+        public UUIDBasedSSTableId fromBytes(@Nonnull ByteBuffer bytes) throws IllegalArgumentException
+        {
+            Preconditions.checkArgument(bytes.remaining() == UUIDBasedSSTableId.BYTES_LEN, "Buffer does not have a valid number of bytes remaining. Expecting: %s but was: %s", UUIDBasedSSTableId.BYTES_LEN, bytes.remaining());
+            bytes = bytes.order() == ByteOrder.BIG_ENDIAN ? bytes : bytes.duplicate().order(ByteOrder.BIG_ENDIAN);
+            TimeUUID uuid = new TimeUUID(bytes.getLong(0), bytes.getLong(Long.BYTES));
+            return new UUIDBasedSSTableId(uuid);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
index 43b1957..e9b15b1 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.UUID;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -35,6 +34,7 @@
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class RangeAwareSSTableWriter implements SSTableMultiWriter
 {
@@ -43,7 +43,7 @@
     private final int sstableLevel;
     private final long estimatedKeys;
     private final long repairedAt;
-    private final UUID pendingRepair;
+    private final TimeUUID pendingRepair;
     private final boolean isTransient;
     private final SSTableFormat.Type format;
     private final SerializationHeader header;
@@ -54,7 +54,7 @@
     private final List<SSTableReader> finishedReaders = new ArrayList<>();
     private SSTableMultiWriter currentWriter = null;
 
-    public RangeAwareSSTableWriter(ColumnFamilyStore cfs, long estimatedKeys, long repairedAt, UUID pendingRepair, boolean isTransient, SSTableFormat.Type format, int sstableLevel, long totalSize, LifecycleNewTracker lifecycleNewTracker, SerializationHeader header) throws IOException
+    public RangeAwareSSTableWriter(ColumnFamilyStore cfs, long estimatedKeys, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SSTableFormat.Type format, int sstableLevel, long totalSize, LifecycleNewTracker lifecycleNewTracker, SerializationHeader header) throws IOException
     {
         DiskBoundaries db = cfs.getDiskBoundaries();
         directories = db.directories;
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableFlushObserver.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableFlushObserver.java
index f0b6bac..569925e 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableFlushObserver.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableFlushObserver.java
@@ -52,4 +52,4 @@
      * Called when all data is written to the file and it's ready to be finished up.
      */
     void complete();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
index 45297ef..f26cf65 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
@@ -17,9 +17,11 @@
  */
 package org.apache.cassandra.io.sstable.format;
 
-import java.io.*;
+
+import java.io.IOException;
 import java.lang.ref.WeakReference;
 import java.nio.ByteBuffer;
+import java.nio.file.NoSuchFileException;
 import java.util.*;
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,6 +32,10 @@
 import com.google.common.collect.Ordering;
 import com.google.common.primitives.Longs;
 import com.google.common.util.concurrent.RateLimiter;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.db.rows.UnfilteredSource;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,8 +45,7 @@
 
 import org.apache.cassandra.cache.InstrumentingCache;
 import org.apache.cassandra.cache.KeyCacheKey;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
@@ -69,12 +74,11 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.*;
-import org.apache.cassandra.utils.concurrent.OpOrder;
-import org.apache.cassandra.utils.concurrent.Ref;
-import org.apache.cassandra.utils.concurrent.SelfRefCounted;
-import org.apache.cassandra.utils.BloomFilterSerializer;
+import org.apache.cassandra.utils.concurrent.*;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.db.Directories.SECONDARY_INDEX_NAME_SEPARATOR;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 /**
  * An SSTableReader can be constructed in a number of places, but typically is either
@@ -135,21 +139,25 @@
  *
  * TODO: fill in details about Tracker and lifecycle interactions for tools, and for compaction strategies
  */
-public abstract class SSTableReader extends SSTable implements SelfRefCounted<SSTableReader>
+public abstract class SSTableReader extends SSTable implements UnfilteredSource, SelfRefCounted<SSTableReader>
 {
     private static final Logger logger = LoggerFactory.getLogger(SSTableReader.class);
 
-    private static final ScheduledThreadPoolExecutor syncExecutor = initSyncExecutor();
-    private static ScheduledThreadPoolExecutor initSyncExecutor()
+    private static final boolean TRACK_ACTIVITY = CassandraRelevantProperties.DISABLE_SSTABLE_ACTIVITY_TRACKING.getBoolean();
+
+    private static final ScheduledExecutorPlus syncExecutor = initSyncExecutor();
+    private static ScheduledExecutorPlus initSyncExecutor()
     {
         if (DatabaseDescriptor.isClientOrToolInitialized())
             return null;
 
         // Do NOT start this thread pool in client mode
 
-        ScheduledThreadPoolExecutor syncExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("read-hotness-tracker"));
+        ScheduledExecutorPlus syncExecutor = executorFactory().scheduled("read-hotness-tracker");
         // Immediately remove readMeter sync task when cancelled.
-        syncExecutor.setRemoveOnCancelPolicy(true);
+        // TODO: should we set this by default on all scheduled executors?
+        if (syncExecutor instanceof ScheduledThreadPoolExecutor)
+            ((ScheduledThreadPoolExecutor)syncExecutor).setRemoveOnCancelPolicy(true);
         return syncExecutor;
     }
     private static final RateLimiter meterSyncThrottle = RateLimiter.create(100.0);
@@ -162,7 +170,8 @@
 
     public static final Comparator<SSTableReader> sstableComparator = (o1, o2) -> o1.first.compareTo(o2.first);
 
-    public static final Comparator<SSTableReader> generationReverseComparator = (o1, o2) -> -Integer.compare(o1.descriptor.generation, o2.descriptor.generation);
+    public static final Comparator<SSTableReader> idComparator = Comparator.comparing(t -> t.descriptor.id, SSTableIdFactory.COMPARATOR);
+    public static final Comparator<SSTableReader> idReverseComparator = idComparator.reversed();
 
     public static final Ordering<SSTableReader> sstableOrdering = Ordering.from(sstableComparator);
 
@@ -175,7 +184,7 @@
     };
 
     /**
-     * maxDataAge is a timestamp in local server time (e.g. System.currentTimeMilli) which represents an upper bound
+     * maxDataAge is a timestamp in local server time (e.g. Global.currentTimeMilli) which represents an upper bound
      * to the newest piece of data stored in the sstable. In other words, this sstable does not contain items created
      * later than maxDataAge.
      *
@@ -525,46 +534,52 @@
     public static Collection<SSTableReader> openAll(Set<Map.Entry<Descriptor, Set<Component>>> entries,
                                                     final TableMetadataRef metadata)
     {
-        final Collection<SSTableReader> sstables = new LinkedBlockingQueue<>();
+        final Collection<SSTableReader> sstables = newBlockingQueue();
 
-        ExecutorService executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("SSTableBatchOpen", FBUtilities.getAvailableProcessors());
-        for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
+        ExecutorPlus executor = executorFactory().pooled("SSTableBatchOpen", FBUtilities.getAvailableProcessors());
+        try
         {
-            Runnable runnable = new Runnable()
+            for (final Map.Entry<Descriptor, Set<Component>> entry : entries)
             {
-                public void run()
+                Runnable runnable = new Runnable()
                 {
-                    SSTableReader sstable;
-                    try
+                    public void run()
                     {
-                        sstable = open(entry.getKey(), entry.getValue(), metadata);
+                        SSTableReader sstable;
+                        try
+                        {
+                            sstable = open(entry.getKey(), entry.getValue(), metadata);
+                        }
+                        catch (CorruptSSTableException ex)
+                        {
+                            JVMStabilityInspector.inspectThrowable(ex);
+                            logger.error("Corrupt sstable {}; skipping table", entry, ex);
+                            return;
+                        }
+                        catch (FSError ex)
+                        {
+                            JVMStabilityInspector.inspectThrowable(ex);
+                            logger.error("Cannot read sstable {}; file system error, skipping table", entry, ex);
+                            return;
+                        }
+                        sstables.add(sstable);
                     }
-                    catch (CorruptSSTableException ex)
-                    {
-                        JVMStabilityInspector.inspectThrowable(ex);
-                        logger.error("Corrupt sstable {}; skipping table", entry, ex);
-                        return;
-                    }
-                    catch (FSError ex)
-                    {
-                        JVMStabilityInspector.inspectThrowable(ex);
-                        logger.error("Cannot read sstable {}; file system error, skipping table", entry, ex);
-                        return;
-                    }
-                    sstables.add(sstable);
-                }
-            };
-            executor.submit(runnable);
+                };
+                executor.submit(runnable);
+            }
+        }
+        finally
+        {
+            executor.shutdown();
         }
 
-        executor.shutdown();
         try
         {
             executor.awaitTermination(7, TimeUnit.DAYS);
         }
         catch (InterruptedException e)
         {
-            throw new AssertionError(e);
+            throw new UncheckedInterruptedException(e);
         }
 
         return sstables;
@@ -613,7 +628,7 @@
                 if (expectedComponents.contains(Component.COMPRESSION_INFO) && !actualComponents.contains(Component.COMPRESSION_INFO))
                 {
                     String compressionInfoFileName = descriptor.filenameFor(Component.COMPRESSION_INFO);
-                    throw new CorruptSSTableException(new FileNotFoundException(compressionInfoFileName), compressionInfoFileName);
+                    throw new CorruptSSTableException(new NoSuchFileException(compressionInfoFileName), compressionInfoFileName);
                 }
             }
             catch (IOException e)
@@ -719,7 +734,7 @@
         if (summariesFile.exists())
             FileUtils.deleteWithConfirm(summariesFile);
 
-        try (DataOutputStreamPlus oStream = new BufferedDataOutputStreamPlus(new FileOutputStream(summariesFile)))
+        try (DataOutputStreamPlus oStream = new FileOutputStreamPlus(summariesFile))
         {
             IndexSummary.serializer.serialize(summary, oStream);
             ByteBufferUtil.writeWithLength(first.getKey(), oStream);
@@ -727,7 +742,7 @@
         }
         catch (IOException e)
         {
-            logger.trace("Cannot save SSTable Summary: ", e);
+            logger.error("Cannot save SSTable Summary: ", e);
 
             // corrupted hence delete it and let it load it now.
             if (summariesFile.exists())
@@ -738,7 +753,7 @@
     public static void saveBloomFilter(Descriptor descriptor, IFilter filter)
     {
         File filterFile = new File(descriptor.filenameFor(Component.FILTER));
-        try (DataOutputStreamPlus stream = new BufferedDataOutputStreamPlus(new FileOutputStream(filterFile)))
+        try (DataOutputStreamPlus stream = new FileOutputStreamPlus(filterFile))
         {
             BloomFilterSerializer.serialize((BloomFilter) filter, stream);
             stream.flush();
@@ -1405,13 +1420,7 @@
                                                  boolean permitMatchPastLast,
                                                  SSTableReadsListener listener);
 
-    public abstract UnfilteredRowIterator iterator(DecoratedKey key,
-                                                   Slices slices,
-                                                   ColumnFilter selectedColumns,
-                                                   boolean reversed,
-                                                   SSTableReadsListener listener);
-
-    public abstract UnfilteredRowIterator iterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed);
+    public abstract UnfilteredRowIterator rowIterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed);
 
     public abstract UnfilteredRowIterator simpleIterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, boolean tombstoneOnly);
 
@@ -1572,14 +1581,6 @@
      */
     public abstract ISSTableScanner getScanner(Iterator<AbstractBounds<PartitionPosition>> rangeIterator);
 
-    /**
-     * @param columns the columns to return.
-     * @param dataRange filter to use when reading the columns
-     * @param listener a listener used to handle internal read events
-     * @return A Scanner for seeking over the rows of the SSTable.
-     */
-    public abstract ISSTableScanner getScanner(ColumnFilter columns, DataRange dataRange, SSTableReadsListener listener);
-
     public FileDataInput getFileDataInput(long position)
     {
         return dfile.createReader(position);
@@ -1620,7 +1621,7 @@
                 continue;
             if (null != limiter)
                 limiter.acquire();
-            File targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
+            File targetLink = new File(snapshotDirectoryPath, sourceFile.name());
             FileUtils.createHardLink(sourceFile, targetLink);
         }
     }
@@ -1655,7 +1656,7 @@
         return sstableMetadata.pendingRepair != ActiveRepairService.NO_PENDING_REPAIR;
     }
 
-    public UUID getPendingRepair()
+    public TimeUUID getPendingRepair()
     {
         return sstableMetadata.pendingRepair;
     }
@@ -1848,7 +1849,7 @@
     /**
      * Mutate sstable repair metadata with a lock to avoid racing with entire-sstable-streaming and then reload sstable metadata
      */
-    public void mutateRepairedAndReload(long newRepairedAt, UUID newPendingRepair, boolean isTransient) throws IOException
+    public void mutateRepairedAndReload(long newRepairedAt, TimeUUID newPendingRepair, boolean isTransient) throws IOException
     {
         synchronized (tidy.global)
         {
@@ -1968,7 +1969,7 @@
 
     void setup(boolean trackHotness)
     {
-        tidy.setup(this, trackHotness);
+        tidy.setup(this, TRACK_ACTIVITY && trackHotness);
         this.readMeter = tidy.global.readMeter;
     }
 
@@ -1989,6 +1990,13 @@
 
     }
 
+    public boolean maybePresent(DecoratedKey key)
+    {
+        // if we don't have bloom filter(bf_fp_chance=1.0 or filter file is missing),
+        // we check index file instead.
+        return bf instanceof AlwaysPresentFilter && getPosition(key, Operator.EQ, false) != null || bf.isPresent(key);
+    }
+
     /**
      * One instance per SSTableReader we create.
      *
@@ -2084,6 +2092,12 @@
                     if (logger.isTraceEnabled())
                         logger.trace("Async instance tidier for {}, completed", descriptor);
                 }
+
+                @Override
+                public String toString()
+                {
+                    return "Tidy " + descriptor.ksname + '.' + descriptor.cfname + '-' + descriptor.id;
+                }
             });
         }
 
@@ -2137,14 +2151,14 @@
             // Don't track read rates for tables in the system keyspace and don't bother trying to load or persist
             // the read meter when in client mode.
             // Also, do not track read rates when running in client or tools mode (syncExecuter isn't available in these modes)
-            if (SchemaConstants.isLocalSystemKeyspace(desc.ksname) || DatabaseDescriptor.isClientOrToolInitialized())
+            if (!TRACK_ACTIVITY || SchemaConstants.isLocalSystemKeyspace(desc.ksname) || DatabaseDescriptor.isClientOrToolInitialized())
             {
                 readMeter = null;
                 readMeterSyncFuture = NULL;
                 return;
             }
 
-            readMeter = SystemKeyspace.getSSTableReadMeter(desc.ksname, desc.cfname, desc.generation);
+            readMeter = SystemKeyspace.getSSTableReadMeter(desc.ksname, desc.cfname, desc.id);
             // sync the average read rate to system.sstable_activity every five minutes, starting one minute from now
             readMeterSyncFuture = new WeakReference<>(syncExecutor.scheduleAtFixedRate(new Runnable()
             {
@@ -2153,7 +2167,7 @@
                     if (obsoletion == null)
                     {
                         meterSyncThrottle.acquire();
-                        SystemKeyspace.persistSSTableReadMeter(desc.ksname, desc.cfname, desc.generation, readMeter);
+                        SystemKeyspace.persistSSTableReadMeter(desc.ksname, desc.cfname, desc.id, readMeter);
                     }
                 }
             }, 1, 5, TimeUnit.MINUTES));
@@ -2191,18 +2205,27 @@
         public static Ref<GlobalTidy> get(SSTableReader sstable)
         {
             Descriptor descriptor = sstable.descriptor;
-            Ref<GlobalTidy> refc = lookup.get(descriptor);
-            if (refc != null)
-                return refc.ref();
-            final GlobalTidy tidy = new GlobalTidy(sstable);
-            refc = new Ref<>(tidy, tidy);
-            Ref<?> ex = lookup.putIfAbsent(descriptor, refc);
-            if (ex != null)
+
+            while (true)
             {
-                refc.close();
-                throw new AssertionError();
+                Ref<GlobalTidy> ref = lookup.get(descriptor);
+                if (ref == null)
+                {
+                    final GlobalTidy tidy = new GlobalTidy(sstable);
+                    ref = new Ref<>(tidy, tidy);
+                    Ref<GlobalTidy> ex = lookup.putIfAbsent(descriptor, ref);
+                    if (ex == null)
+                        return ref;
+                    ref = ex;
+                }
+
+                Ref<GlobalTidy> newRef = ref.tryRef();
+                if (newRef != null)
+                    return newRef;
+
+                // raced with tidy
+                lookup.remove(descriptor, ref);
             }
-            return refc;
         }
     }
 
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java
index 8fe1def..6ca74f0 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java
@@ -28,7 +28,9 @@
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.io.sstable.metadata.ValidationMetadata;
 import org.apache.cassandra.io.util.DiskOptimizationStrategy;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileHandle;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.schema.TableMetadata;
@@ -37,16 +39,17 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.BufferedInputStream;
 import java.io.DataInputStream;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
-import java.nio.file.Paths;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.cassandra.io.sstable.format.SSTableReader.OpenReason.NORMAL;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public abstract class SSTableReaderBuilder
 {
     private static final Logger logger = LoggerFactory.getLogger(SSTableReaderBuilder.class);
@@ -125,7 +128,7 @@
         if (!summariesFile.exists())
         {
             if (logger.isDebugEnabled())
-                logger.debug("SSTable Summary File {} does not exist", summariesFile.getAbsolutePath());
+                logger.debug("SSTable Summary File {} does not exist", summariesFile.absolutePath());
             return;
         }
 
@@ -144,7 +147,7 @@
         {
             if (summary != null)
                 summary.close();
-            logger.trace("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
+            logger.trace("Cannot deserialize SSTable Summary File {}: {}", summariesFile.path(), e.getMessage());
             // corrupted; delete it and fall back to creating a new summary
             FileUtils.closeQuietly(iStream);
             // delete it and fall back to creating a new summary
@@ -233,7 +236,7 @@
      */
     IFilter loadBloomFilter() throws IOException
     {
-        try (DataInputStream stream = new DataInputStream(new BufferedInputStream(Files.newInputStream(Paths.get(descriptor.filenameFor(Component.FILTER))))))
+        try (FileInputStreamPlus stream = new File(descriptor.filenameFor(Component.FILTER)).newInputStream())
         {
             return BloomFilterSerializer.deserialize(stream, descriptor.version.hasOldBfFormat());
         }
@@ -270,7 +273,7 @@
                         StatsMetadata statsMetadata,
                         SerializationHeader header)
         {
-            super(descriptor, metadataRef, System.currentTimeMillis(), components, statsMetadata, SSTableReader.OpenReason.NORMAL, header);
+            super(descriptor, metadataRef, currentTimeMillis(), components, statsMetadata, NORMAL, header);
         }
 
         @Override
@@ -338,7 +341,7 @@
                        StatsMetadata statsMetadata,
                        SerializationHeader header)
         {
-            super(descriptor, metadataRef, System.currentTimeMillis(), components, statsMetadata, SSTableReader.OpenReason.NORMAL, header);
+            super(descriptor, metadataRef, currentTimeMillis(), components, statsMetadata, NORMAL, header);
             this.validationMetadata = validationMetadata;
             this.isOffline = isOffline;
         }
@@ -353,9 +356,9 @@
             try
             {
                 // load index and filter
-                long start = System.nanoTime();
+                long start = nanoTime();
                 load(validationMetadata, isOffline, components, DatabaseDescriptor.getDiskOptimizationStrategy(), statsMetadata);
-                logger.trace("INDEX LOAD TIME for {}: {} ms.", descriptor, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+                logger.trace("INDEX LOAD TIME for {}: {} ms.", descriptor, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
             }
             catch (IOException t)
             {
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
index 43c50c5..f82a7c2 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
@@ -25,10 +25,16 @@
 import com.google.common.collect.Sets;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.DeletionPurger;
 import org.apache.cassandra.db.RowIndexEntry;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
+import org.apache.cassandra.db.rows.ComplexColumnData;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.Unfiltered;
 import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.index.Index;
 import org.apache.cassandra.io.FSWriteError;
@@ -40,9 +46,13 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataType;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
 /**
@@ -54,7 +64,7 @@
 public abstract class SSTableWriter extends SSTable implements Transactional
 {
     protected long repairedAt;
-    protected UUID pendingRepair;
+    protected TimeUUID pendingRepair;
     protected boolean isTransient;
     protected long maxDataAge = -1;
     protected final long keyCount;
@@ -77,7 +87,7 @@
     protected SSTableWriter(Descriptor descriptor,
                             long keyCount,
                             long repairedAt,
-                            UUID pendingRepair,
+                            TimeUUID pendingRepair,
                             boolean isTransient,
                             TableMetadataRef metadata,
                             MetadataCollector metadataCollector,
@@ -98,7 +108,7 @@
     public static SSTableWriter create(Descriptor descriptor,
                                        Long keyCount,
                                        Long repairedAt,
-                                       UUID pendingRepair,
+                                       TimeUUID pendingRepair,
                                        boolean isTransient,
                                        TableMetadataRef metadata,
                                        MetadataCollector metadataCollector,
@@ -113,7 +123,7 @@
     public static SSTableWriter create(Descriptor descriptor,
                                        long keyCount,
                                        long repairedAt,
-                                       UUID pendingRepair,
+                                       TimeUUID pendingRepair,
                                        boolean isTransient,
                                        int sstableLevel,
                                        SerializationHeader header,
@@ -128,7 +138,7 @@
                                        Descriptor descriptor,
                                        long keyCount,
                                        long repairedAt,
-                                       UUID pendingRepair,
+                                       TimeUUID pendingRepair,
                                        boolean isTransient,
                                        int sstableLevel,
                                        SerializationHeader header,
@@ -143,7 +153,7 @@
     public static SSTableWriter create(Descriptor descriptor,
                                        long keyCount,
                                        long repairedAt,
-                                       UUID pendingRepair,
+                                       TimeUUID pendingRepair,
                                        boolean isTransient,
                                        SerializationHeader header,
                                        Collection<Index> indexes,
@@ -372,12 +382,25 @@
         FileUtils.createHardLinkWithoutConfirm(tmpdesc.filenameFor(Component.SUMMARY), newdesc.filenameFor(Component.SUMMARY));
     }
 
+    /**
+     * Parameters for calculating the expected size of an sstable. Exposed on memtable flush sets (i.e. collected
+     * subsets of a memtable that will be written to sstables).
+     */
+    public interface SSTableSizeParameters
+    {
+        long partitionCount();
+        long partitionKeysSize();
+        long dataSize();
+    }
+
     public static abstract class Factory
     {
+        public abstract long estimateSize(SSTableSizeParameters parameters);
+
         public abstract SSTableWriter open(Descriptor descriptor,
                                            long keyCount,
                                            long repairedAt,
-                                           UUID pendingRepair,
+                                           TimeUUID pendingRepair,
                                            boolean isTransient,
                                            TableMetadataRef metadata,
                                            MetadataCollector metadataCollector,
@@ -385,4 +408,43 @@
                                            Collection<SSTableFlushObserver> observers,
                                            LifecycleNewTracker lifecycleNewTracker);
     }
+
+    public static void guardCollectionSize(TableMetadata metadata, DecoratedKey partitionKey, Unfiltered unfiltered)
+    {
+        if (!Guardrails.collectionSize.enabled() && !Guardrails.itemsPerCollection.enabled())
+            return;
+
+        if (!unfiltered.isRow() || SchemaConstants.isSystemKeyspace(metadata.keyspace))
+            return;
+
+        Row row = (Row) unfiltered;
+        for (ColumnMetadata column : row.columns())
+        {
+            if (!column.type.isCollection() || !column.type.isMultiCell())
+                continue;
+
+            ComplexColumnData cells = row.getComplexColumnData(column);
+            if (cells == null)
+                continue;
+
+            ComplexColumnData liveCells = cells.purge(DeletionPurger.PURGE_ALL, FBUtilities.nowInSeconds());
+            if (liveCells == null)
+                continue;
+
+            int cellsSize = liveCells.dataSize();
+            int cellsCount = liveCells.cellsCount();
+
+            if (!Guardrails.collectionSize.triggersOn(cellsSize, null) &&
+                !Guardrails.itemsPerCollection.triggersOn(cellsCount, null))
+                continue;
+
+            String keyString = metadata.primaryKeyAsCQLLiteral(partitionKey.getKey(), row.clustering());
+            String msg = String.format("%s in row %s in table %s",
+                                       column.name.toString(),
+                                       keyString,
+                                       metadata);
+            Guardrails.collectionSize.guard(cellsSize, msg, true, null);
+            Guardrails.itemsPerCollection.guard(cellsCount, msg, true, null);
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
index ff0d791..c84782f 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.io.sstable.format.big;
 
 import java.util.Collection;
-import java.util.UUID;
 
 import org.apache.cassandra.io.sstable.SSTable;
 import org.apache.cassandra.schema.TableMetadata;
@@ -30,6 +29,7 @@
 import org.apache.cassandra.io.sstable.format.*;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Legacy bigtable format
@@ -79,10 +79,19 @@
     static class WriterFactory extends SSTableWriter.Factory
     {
         @Override
+        public long estimateSize(SSTableWriter.SSTableSizeParameters parameters)
+        {
+            return (long) ((parameters.partitionKeysSize() // index entries
+                            + parameters.partitionKeysSize() // keys in data file
+                            + parameters.dataSize()) // data
+                           * 1.2); // bloom filter and row index overhead
+        }
+
+        @Override
         public SSTableWriter open(Descriptor descriptor,
                                   long keyCount,
                                   long repairedAt,
-                                  UUID pendingRepair,
+                                  TimeUUID pendingRepair,
                                   boolean isTransient,
                                   TableMetadataRef metadata,
                                   MetadataCollector metadataCollector,
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java
index dc13031..e0edd7a 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java
@@ -57,18 +57,18 @@
         super(builder);
     }
 
-    public UnfilteredRowIterator iterator(DecoratedKey key,
-                                          Slices slices,
-                                          ColumnFilter selectedColumns,
-                                          boolean reversed,
-                                          SSTableReadsListener listener)
+    public UnfilteredRowIterator rowIterator(DecoratedKey key,
+                                             Slices slices,
+                                             ColumnFilter selectedColumns,
+                                             boolean reversed,
+                                             SSTableReadsListener listener)
     {
         RowIndexEntry rie = getPosition(key, SSTableReader.Operator.EQ, listener);
-        return iterator(null, key, rie, slices, selectedColumns, reversed);
+        return rowIterator(null, key, rie, slices, selectedColumns, reversed);
     }
 
     @SuppressWarnings("resource")
-    public UnfilteredRowIterator iterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed)
+    public UnfilteredRowIterator rowIterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed)
     {
         if (indexEntry == null)
             return UnfilteredRowIterators.noRowsIterator(metadata(), key, Rows.EMPTY_STATIC_ROW, DeletionTime.LIVE, reversed);
@@ -78,7 +78,7 @@
     }
 
     @Override
-    public ISSTableScanner getScanner(ColumnFilter columns, DataRange dataRange, SSTableReadsListener listener)
+    public ISSTableScanner partitionIterator(ColumnFilter columns, DataRange dataRange, SSTableReadsListener listener)
     {
         return BigTableScanner.getScanner(this, columns, dataRange, listener);
     }
@@ -152,7 +152,7 @@
             if (!bf.isPresent((DecoratedKey)key))
             {
                 listener.onSSTableSkipped(this, SkippingReason.BLOOM_FILTER);
-                Tracing.trace("Bloom filter allows skipping sstable {}", descriptor.generation);
+                Tracing.trace("Bloom filter allows skipping sstable {}", descriptor.id);
                 bloomFilterTracker.addTrueNegative();
                 return null;
             }
@@ -168,7 +168,7 @@
                 // we do not need to track "true positive" for Bloom Filter here because it has been already tracked
                 // inside getCachedPosition method
                 listener.onSSTableSelected(this, cachedPosition, SelectionReason.KEY_CACHE_HIT);
-                Tracing.trace("Key cache hit for sstable {}", descriptor.generation);
+                Tracing.trace("Key cache hit for sstable {}", descriptor.id);
                 return cachedPosition;
             }
         }
@@ -197,7 +197,7 @@
             if (op == Operator.EQ && updateCacheAndStats)
                 bloomFilterTracker.addFalsePositive();
             listener.onSSTableSkipped(this, SkippingReason.MIN_MAX_KEYS);
-            Tracing.trace("Check against min and max keys allows skipping sstable {}", descriptor.generation);
+            Tracing.trace("Check against min and max keys allows skipping sstable {}", descriptor.id);
             return null;
         }
 
@@ -244,7 +244,7 @@
                         if (op == SSTableReader.Operator.EQ && updateCacheAndStats)
                             bloomFilterTracker.addFalsePositive();
                         listener.onSSTableSkipped(this, SkippingReason.PARTITION_INDEX_LOOKUP);
-                        Tracing.trace("Partition index lookup allows skipping sstable {}", descriptor.generation);
+                        Tracing.trace("Partition index lookup allows skipping sstable {}", descriptor.id);
                         return null;
                     }
                 }
@@ -275,7 +275,7 @@
                     if (op == Operator.EQ && updateCacheAndStats)
                         bloomFilterTracker.addTruePositive();
                     listener.onSSTableSelected(this, indexEntry, SelectionReason.INDEX_ENTRY_FOUND);
-                    Tracing.trace("Partition index with {} entries found for sstable {}", indexEntry.columnsIndexCount(), descriptor.generation);
+                    Tracing.trace("Partition index with {} entries found for sstable {}", indexEntry.columnsIndexCount(), descriptor.id);
                     return indexEntry;
                 }
 
@@ -291,7 +291,7 @@
         if (op == SSTableReader.Operator.EQ && updateCacheAndStats)
             bloomFilterTracker.addFalsePositive();
         listener.onSSTableSkipped(this, SkippingReason.INDEX_ENTRY_NOT_FOUND);
-        Tracing.trace("Partition index lookup complete (bloom filter false positive) for sstable {}", descriptor.generation);
+        Tracing.trace("Partition index lookup complete (bloom filter false positive) for sstable {}", descriptor.id);
         return null;
     }
 
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScanner.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScanner.java
index 20105cd..235b9b1 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScanner.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScanner.java
@@ -365,7 +365,7 @@
                             }
 
                             ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(partitionKey());
-                            return sstable.iterator(dfile, partitionKey(), currentEntry, filter.getSlices(BigTableScanner.this.metadata()), columns, filter.isReversed());
+                            return sstable.rowIterator(dfile, partitionKey(), currentEntry, filter.getSlices(BigTableScanner.this.metadata()), columns, filter.isReversed());
                         }
                         catch (CorruptSSTableException | IOException e)
                         {
@@ -441,5 +441,10 @@
         {
             return null;
         }
+
+        public int getMinLocalDeletionTime()
+        {
+            return DeletionTime.LIVE.localDeletionTime();
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
index eeb9153..e8dff32 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
@@ -17,13 +17,16 @@
  */
 package org.apache.cassandra.io.sstable.format.big;
 
-import java.io.*;
+
+import java.io.IOException;
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
 import java.util.*;
+import java.util.stream.Stream;
 
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,8 +51,11 @@
 import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.*;
+import org.apache.cassandra.utils.concurrent.SharedCloseableImpl;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class BigTableWriter extends SSTableWriter
 {
     private static final Logger logger = LoggerFactory.getLogger(BigTableWriter.class);
@@ -65,13 +71,13 @@
 
     private final SequentialWriterOption writerOption = SequentialWriterOption.newBuilder()
                                                         .trickleFsync(DatabaseDescriptor.getTrickleFsync())
-                                                        .trickleFsyncByteInterval(DatabaseDescriptor.getTrickleFsyncIntervalInKb() * 1024)
+                                                        .trickleFsyncByteInterval(DatabaseDescriptor.getTrickleFsyncIntervalInKiB() * 1024)
                                                         .build();
 
     public BigTableWriter(Descriptor descriptor,
                           long keyCount,
                           long repairedAt,
-                          UUID pendingRepair,
+                          TimeUUID pendingRepair,
                           boolean isTransient,
                           TableMetadataRef metadata,
                           MetadataCollector metadataCollector, 
@@ -231,6 +237,7 @@
             long endPosition = dataFile.position();
             long rowSize = endPosition - startPosition;
             maybeLogLargePartitionWarning(key, rowSize);
+            maybeLogManyTombstonesWarning(key, metadataCollector.totalTombstones);
             metadataCollector.addPartitionSizeInBytes(rowSize);
             afterAppend(key, endPosition, entry, columnIndexWriter.buffer());
             return entry;
@@ -259,6 +266,15 @@
         }
     }
 
+    private void maybeLogManyTombstonesWarning(DecoratedKey key, int tombstoneCount)
+    {
+        if (tombstoneCount > DatabaseDescriptor.getCompactionTombstoneWarningThreshold())
+        {
+            String keyString = metadata().partitionKeyType.getString(key.getKey());
+            logger.warn("Writing {} tombstones to {}/{}:{} in sstable {}", tombstoneCount, metadata.keyspace, metadata.name, keyString, getFilename());
+        }
+    }
+
     private static class StatsCollector extends Transformation
     {
         private final MetadataCollector collector;
@@ -324,32 +340,50 @@
         if (boundary == null)
             return null;
 
-        StatsMetadata stats = statsMetadata();
-        assert boundary.indexLength > 0 && boundary.dataLength > 0;
-        // open the reader early
-        IndexSummary indexSummary = iwriter.summary.build(metadata().partitioner, boundary);
-        long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
-        int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
-        FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength);
-        if (compression)
-            dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength));
-        int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
-        FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength);
-        invalidateCacheAtBoundary(dfile);
-        SSTableReader sstable = SSTableReader.internalOpen(descriptor,
-                                                           components, metadata,
-                                                           ifile, dfile,
-                                                           indexSummary,
-                                                           iwriter.bf.sharedCopy(), 
-                                                           maxDataAge, 
-                                                           stats, 
-                                                           SSTableReader.OpenReason.EARLY, 
-                                                           header);
+        IndexSummary indexSummary = null;
+        FileHandle ifile = null;
+        FileHandle dfile = null;
+        SSTableReader sstable = null;
 
-        // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed)
-        sstable.first = getMinimalKey(first);
-        sstable.last = getMinimalKey(boundary.lastKey);
-        return sstable;
+        try
+        {
+            StatsMetadata stats = statsMetadata();
+            assert boundary.indexLength > 0 && boundary.dataLength > 0;
+            // open the reader early
+            indexSummary = iwriter.summary.build(metadata().partitioner, boundary);
+            long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
+            int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
+            ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength);
+            if (compression)
+                dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength));
+            int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
+            dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength);
+            invalidateCacheAtBoundary(dfile);
+            sstable = SSTableReader.internalOpen(descriptor,
+                                                 components, metadata,
+                                                 ifile, dfile,
+                                                 indexSummary,
+                                                 iwriter.bf.sharedCopy(),
+                                                 maxDataAge,
+                                                 stats,
+                                                 SSTableReader.OpenReason.EARLY,
+                                                 header);
+
+            // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed)
+            sstable.first = getMinimalKey(first);
+            sstable.last = getMinimalKey(boundary.lastKey);
+            return sstable;
+        }
+        catch (Throwable t)
+        {
+            JVMStabilityInspector.inspectThrowable(t);
+            // If we successfully created our sstable, we can rely on its InstanceTidier to clean things up for us
+            if (sstable != null)
+                sstable.selfRef().release();
+            else
+                Stream.of(indexSummary, ifile, dfile).filter(Objects::nonNull).forEach(SharedCloseableImpl::close);
+            throw t;
+        }
     }
 
     void invalidateCacheAtBoundary(FileHandle dfile)
@@ -374,33 +408,51 @@
     private SSTableReader openFinal(SSTableReader.OpenReason openReason)
     {
         if (maxDataAge < 0)
-            maxDataAge = System.currentTimeMillis();
+            maxDataAge = currentTimeMillis();
 
-        StatsMetadata stats = statsMetadata();
-        // finalize in-memory state for the reader
-        IndexSummary indexSummary = iwriter.summary.build(metadata().partitioner);
-        long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
-        int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
-        int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
-        FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete();
-        if (compression)
-            dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0));
-        FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete();
-        invalidateCacheAtBoundary(dfile);
-        SSTableReader sstable = SSTableReader.internalOpen(descriptor,
-                                                           components,
-                                                           metadata,
-                                                           ifile,
-                                                           dfile,
-                                                           indexSummary,
-                                                           iwriter.bf.sharedCopy(),
-                                                           maxDataAge,
-                                                           stats,
-                                                           openReason,
-                                                           header);
-        sstable.first = getMinimalKey(first);
-        sstable.last = getMinimalKey(last);
-        return sstable;
+        IndexSummary indexSummary = null;
+        FileHandle ifile = null;
+        FileHandle dfile = null;
+        SSTableReader sstable = null;
+
+        try
+        {
+            StatsMetadata stats = statsMetadata();
+            // finalize in-memory state for the reader
+            indexSummary = iwriter.summary.build(metadata().partitioner);
+            long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
+            int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
+            int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
+            ifile = iwriter.builder.bufferSize(indexBufferSize).complete();
+            if (compression)
+                dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0));
+            dfile = dbuilder.bufferSize(dataBufferSize).complete();
+            invalidateCacheAtBoundary(dfile);
+            sstable = SSTableReader.internalOpen(descriptor,
+                                                 components,
+                                                 metadata,
+                                                 ifile,
+                                                 dfile,
+                                                 indexSummary,
+                                                 iwriter.bf.sharedCopy(),
+                                                 maxDataAge,
+                                                 stats,
+                                                 openReason,
+                                                 header);
+            sstable.first = getMinimalKey(first);
+            sstable.last = getMinimalKey(last);
+            return sstable;
+        }
+        catch (Throwable t)
+        {
+            JVMStabilityInspector.inspectThrowable(t);
+            // If we successfully created our sstable, we can rely on its InstanceTidier to clean things up for us
+            if (sstable != null)
+                sstable.selfRef().release();
+            else
+                Stream.of(indexSummary, ifile, dfile).filter(Objects::nonNull).forEach(SharedCloseableImpl::close);
+            throw t;
+        }
     }
 
     protected SSTableWriter.TransactionalProxy txnProxy()
@@ -458,7 +510,7 @@
         }
         catch (IOException e)
         {
-            throw new FSWriteError(e, file.getPath());
+            throw new FSWriteError(e, file.path());
         }
     }
 
@@ -535,13 +587,12 @@
             if (components.contains(Component.FILTER))
             {
                 String path = descriptor.filenameFor(Component.FILTER);
-                try (FileOutputStream fos = new FileOutputStream(path);
-                     DataOutputStreamPlus stream = new BufferedDataOutputStreamPlus(fos))
+                try (FileOutputStreamPlus stream = new FileOutputStreamPlus(path))
                 {
                     // bloom filter
                     BloomFilterSerializer.serialize((BloomFilter) bf, stream);
                     stream.flush();
-                    SyncUtil.sync(fos);
+                    stream.sync();
                 }
                 catch (IOException e)
                 {
@@ -587,7 +638,7 @@
 
         protected Throwable doAbort(Throwable accumulate)
         {
-            return indexFile.abort(accumulate);
+            return summary.close(indexFile.abort(accumulate));
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java
index 2564e96..f640349 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java
@@ -18,8 +18,8 @@
 package org.apache.cassandra.io.sstable.format.big;
 
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
 import java.util.Collection;
 import java.util.EnumMap;
 import java.util.Map;
@@ -28,6 +28,7 @@
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -202,7 +203,7 @@
             writer.close();
     }
 
-    public void writeComponent(Component.Type type, DataInputPlus in, long size)
+    public void writeComponent(Component.Type type, DataInputPlus in, long size) throws ClosedChannelException
     {
         logger.info("Writing component {} to {} length {}", type, componentWriters.get(type).getPath(), prettyPrintMemory(size));
 
@@ -212,7 +213,7 @@
             write(in, size, componentWriters.get(type));
     }
 
-    private void write(AsyncStreamingInputPlus in, long size, SequentialWriter writer)
+    private void write(AsyncStreamingInputPlus in, long size, SequentialWriter writer) throws ClosedChannelException
     {
         logger.info("Block Writing component to {} length {}", writer.getPath(), prettyPrintMemory(size));
 
@@ -221,14 +222,20 @@
             in.consume(writer::writeDirectlyToChannel, size);
             writer.sync();
         }
-        // FIXME: handle ACIP exceptions properly
-        catch (EOFException | AsyncStreamingInputPlus.InputTimeoutException e)
+        catch (EOFException e)
         {
             in.close();
         }
+        catch (ClosedChannelException e)
+        {
+            // FSWriteError triggers disk failure policy, but if we get a connection issue we do not want to do that
+            // so rethrow so the error handling logic higher up is able to deal with this
+            // see CASSANDRA-17116
+            throw e;
+        }
         catch (IOException e)
         {
             throw new FSWriteError(e, writer.getPath());
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/IMetadataSerializer.java b/src/java/org/apache/cassandra/io/sstable/metadata/IMetadataSerializer.java
index fc1ce42..ff1e604 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/IMetadataSerializer.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/IMetadataSerializer.java
@@ -20,13 +20,13 @@
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.Map;
-import java.util.UUID;
 import java.util.function.UnaryOperator;
 
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Interface for SSTable metadata serializer
@@ -94,7 +94,7 @@
      * NOTE: mutating stats metadata of a live sstable will race with entire-sstable-streaming, please use
      * {@link SSTableReader#mutateLevelAndReload} instead on live sstable.
      */
-    public void mutateRepairMetadata(Descriptor descriptor, long newRepairedAt, UUID newPendingRepair, boolean isTransient) throws IOException;
+    public void mutateRepairMetadata(Descriptor descriptor, long newRepairedAt, TimeUUID newPendingRepair, boolean isTransient) throws IOException;
 
     /**
      * Replace the sstable metadata file ({@code -Statistics.db}) with the given components.
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
old mode 100755
new mode 100644
index be824ef..1375331
--- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
@@ -39,7 +39,9 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.EstimatedHistogram;
+import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MurmurHash;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.streamhist.TombstoneHistogram;
 import org.apache.cassandra.utils.streamhist.StreamingTombstoneHistogramBuilder;
 
@@ -105,6 +107,7 @@
     protected boolean hasLegacyCounterShards = false;
     protected long totalColumnsSet;
     protected long totalRows;
+    public int totalTombstones;
 
     /**
      * Default cardinality estimation method is to use HyperLogLog++.
@@ -114,6 +117,7 @@
      */
     protected ICardinality cardinality = new HyperLogLogPlus(13, 25);
     private final ClusteringComparator comparator;
+    private final int nowInSec = FBUtilities.nowInSeconds();
 
     private final UUID originatingHostId;
 
@@ -149,6 +153,7 @@
     {
         long hashed = MurmurHash.hash2_64(key, key.position(), key.remaining(), 0);
         cardinality.offerHashed(hashed);
+        totalTombstones = 0;
         return this;
     }
 
@@ -182,6 +187,8 @@
         updateTimestamp(newInfo.timestamp());
         updateTTL(newInfo.ttl());
         updateLocalDeletionTime(newInfo.localExpirationTime());
+        if (!newInfo.isLive(nowInSec))
+            updateTombstoneCount();
     }
 
     public void update(Cell<?> cell)
@@ -189,6 +196,8 @@
         updateTimestamp(cell.timestamp());
         updateTTL(cell.ttl());
         updateLocalDeletionTime(cell.localDeletionTime());
+        if (!cell.isLive(nowInSec))
+            updateTombstoneCount();
     }
 
     public void update(DeletionTime dt)
@@ -197,6 +206,7 @@
         {
             updateTimestamp(dt.markedForDeleteAt());
             updateLocalDeletionTime(dt.localDeletionTime());
+            updateTombstoneCount();
         }
     }
 
@@ -218,6 +228,11 @@
             estimatedTombstoneDropTime.update(newLocalDeletionTime);
     }
 
+    private void updateTombstoneCount()
+    {
+        ++totalTombstones;
+    }
+
     private void updateTTL(int newTTL)
     {
         ttlTracker.update(newTTL);
@@ -247,7 +262,7 @@
         this.hasLegacyCounterShards = this.hasLegacyCounterShards || hasLegacyCounterShards;
     }
 
-    public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt, UUID pendingRepair, boolean isTransient, SerializationHeader header)
+    public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SerializationHeader header)
     {
         Preconditions.checkState((minClustering == null && maxClustering == null)
                                  || comparator.compare(maxClustering, minClustering) >= 0);
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
index 042103e..f13daa4 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
@@ -17,7 +17,10 @@
  */
 package org.apache.cassandra.io.sstable.metadata;
 
-import java.io.*;
+import org.apache.cassandra.io.util.*;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.util.*;
 import java.util.function.UnaryOperator;
 import java.util.zip.CRC32;
@@ -33,15 +36,7 @@
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.Version;
-import org.apache.cassandra.io.util.DataInputBuffer;
-import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.io.util.FileDataInput;
-import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
-import org.apache.cassandra.io.util.RandomAccessReader;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.utils.FBUtilities.updateChecksumInt;
 
@@ -242,7 +237,7 @@
     }
 
     @Override
-    public void mutateRepairMetadata(Descriptor descriptor, long newRepairedAt, UUID newPendingRepair, boolean isTransient) throws IOException
+    public void mutateRepairMetadata(Descriptor descriptor, long newRepairedAt, TimeUUID newPendingRepair, boolean isTransient) throws IOException
     {
         if (logger.isTraceEnabled())
             logger.trace("Mutating {} to repairedAt time {} and pendingRepair {}",
@@ -263,7 +258,7 @@
     public void rewriteSSTableMetadata(Descriptor descriptor, Map<MetadataType, MetadataComponent> currentComponents) throws IOException
     {
         String filePath = descriptor.tmpFilenameFor(Component.STATS);
-        try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(filePath)))
+        try (DataOutputStreamPlus out = new FileOutputStreamPlus(filePath))
         {
             serialize(currentComponents, out, descriptor.version);
             out.flush();
@@ -273,10 +268,6 @@
             Throwables.throwIfInstanceOf(e, FileNotFoundException.class);
             throw new FSWriteError(e, filePath);
         }
-        // we cant move a file on top of another file in windows:
-        if (FBUtilities.isWindows)
-            FileUtils.delete(descriptor.filenameFor(Component.STATS));
         FileUtils.renameWithConfirm(filePath, descriptor.filenameFor(Component.STATS));
-
     }
 }
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
old mode 100755
new mode 100644
index 69ca455..ee5505f
--- a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
@@ -36,12 +36,11 @@
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.EstimatedHistogram;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.streamhist.TombstoneHistogram;
 import org.apache.cassandra.utils.UUIDSerializer;
-import org.apache.cassandra.utils.UUIDSerializer;
 
 /**
  * SSTable metadata that always stay on heap.
@@ -70,7 +69,7 @@
     public final long totalColumnsSet;
     public final long totalRows;
     public final UUID originatingHostId;
-    public final UUID pendingRepair;
+    public final TimeUUID pendingRepair;
     public final boolean isTransient;
     // just holds the current encoding stats to avoid allocating - it is not serialized
     public final EncodingStats encodingStats;
@@ -94,7 +93,7 @@
                          long totalColumnsSet,
                          long totalRows,
                          UUID originatingHostId,
-                         UUID pendingRepair,
+                         TimeUUID pendingRepair,
                          boolean isTransient)
     {
         this.estimatedPartitionSize = estimatedPartitionSize;
@@ -175,7 +174,7 @@
                                  isTransient);
     }
 
-    public StatsMetadata mutateRepairedMetadata(long newRepairedAt, UUID newPendingRepair, boolean newIsTransient)
+    public StatsMetadata mutateRepairedMetadata(long newRepairedAt, TimeUUID newPendingRepair, boolean newIsTransient)
     {
         return new StatsMetadata(estimatedPartitionSize,
                                  estimatedCellPerPartitionCount,
@@ -290,7 +289,7 @@
             {
                 size += 1;
                 if (component.pendingRepair != null)
-                    size += UUIDSerializer.serializer.serializedSize(component.pendingRepair, 0);
+                    size += TimeUUID.sizeInBytes();
             }
 
             if (version.hasIsTransient())
@@ -344,7 +343,7 @@
                 if (component.pendingRepair != null)
                 {
                     out.writeByte(1);
-                    UUIDSerializer.serializer.serialize(component.pendingRepair, out, 0);
+                    component.pendingRepair.serialize(out);
                 }
                 else
                 {
@@ -441,10 +440,10 @@
             else
                 commitLogIntervals = new IntervalSet<CommitLogPosition>(commitLogLowerBound, commitLogUpperBound);
 
-            UUID pendingRepair = null;
+            TimeUUID pendingRepair = null;
             if (version.hasPendingRepair() && in.readByte() != 0)
             {
-                pendingRepair = UUIDSerializer.serializer.deserialize(in, 0);
+                pendingRepair = TimeUUID.deserialize(in);
             }
 
             boolean isTransient = version.hasIsTransient() && in.readBoolean();
diff --git a/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java b/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java
index 7962c0f..a8b61dd 100644
--- a/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java
+++ b/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java
@@ -58,4 +58,4 @@
     {
         return 0; // Only valid for compressed files.
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java b/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java
index 7d1e91d..4e9bbb5 100644
--- a/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java
+++ b/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java
@@ -17,19 +17,17 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
 import java.nio.channels.WritableByteChannel;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 import net.nicoulaj.compilecommand.annotations.DontInline;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.utils.FastByteOperations;
-import org.apache.cassandra.utils.memory.MemoryUtil;
 
 /**
  * An implementation of the DataOutputStreamPlus interface using a ByteBuffer to stage writes
@@ -43,26 +41,6 @@
 
     protected ByteBuffer buffer;
 
-    public BufferedDataOutputStreamPlus(RandomAccessFile ras)
-    {
-        this(ras.getChannel());
-    }
-
-    public BufferedDataOutputStreamPlus(RandomAccessFile ras, int bufferSize)
-    {
-        this(ras.getChannel(), bufferSize);
-    }
-
-    public BufferedDataOutputStreamPlus(FileOutputStream fos)
-    {
-        this(fos.getChannel());
-    }
-
-    public BufferedDataOutputStreamPlus(FileOutputStream fos, int bufferSize)
-    {
-        this(fos.getChannel(), bufferSize);
-    }
-
     public BufferedDataOutputStreamPlus(WritableByteChannel wbc)
     {
         this(wbc, DEFAULT_BUFFER_SIZE);
@@ -75,7 +53,8 @@
         Preconditions.checkArgument(bufferSize >= 8, "Buffer size must be large enough to accommodate a long/double");
     }
 
-    protected BufferedDataOutputStreamPlus(WritableByteChannel channel, ByteBuffer buffer)
+    @VisibleForTesting
+    public BufferedDataOutputStreamPlus(WritableByteChannel channel, ByteBuffer buffer)
     {
         super(channel);
         this.buffer = buffer;
@@ -169,6 +148,21 @@
     }
 
     @Override
+    public void writeBytes(long register, int bytes) throws IOException
+    {
+        if (buffer.remaining() < Long.BYTES)
+        {
+            super.writeBytes(register, bytes);
+        }
+        else
+        {
+            int pos = buffer.position();
+            buffer.putLong(pos, register);
+            buffer.position(pos + bytes);
+        }
+    }
+
+    @Override
     public void writeShort(int v) throws IOException
     {
         writeChar(v);
@@ -266,6 +260,9 @@
     @Override
     public void close() throws IOException
     {
+        if (buffer == null)
+            return;
+
         doFlush(0);
         channel.close();
         FileUtils.clean(buffer);
diff --git a/src/java/org/apache/cassandra/io/util/ChannelProxy.java b/src/java/org/apache/cassandra/io/util/ChannelProxy.java
index 9ff46b7..717def7 100644
--- a/src/java/org/apache/cassandra/io/util/ChannelProxy.java
+++ b/src/java/org/apache/cassandra/io/util/ChannelProxy.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.MappedByteBuffer;
@@ -63,7 +62,7 @@
 
     public ChannelProxy(File file)
     {
-        this(file.getPath(), openChannel(file));
+        this(file.path(), openChannel(file));
     }
 
     public ChannelProxy(String filePath, FileChannel channel)
diff --git a/src/java/org/apache/cassandra/io/util/ChecksumWriter.java b/src/java/org/apache/cassandra/io/util/ChecksumWriter.java
index 50aaccb..194602c 100644
--- a/src/java/org/apache/cassandra/io/util/ChecksumWriter.java
+++ b/src/java/org/apache/cassandra/io/util/ChecksumWriter.java
@@ -18,7 +18,9 @@
 
 package org.apache.cassandra.io.util;
 
-import java.io.*;
+import java.io.DataOutput;
+import java.io.IOError;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.util.zip.CRC32;
@@ -88,12 +90,11 @@
 
     public void writeFullChecksum(@Nonnull File digestFile)
     {
-        try (FileOutputStream fos = new FileOutputStream(digestFile);
-             DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos)))
+        try (FileOutputStreamPlus fos = new FileOutputStreamPlus(digestFile))
         {
-            out.write(String.valueOf(fullChecksum.getValue()).getBytes(StandardCharsets.UTF_8));
-            out.flush();
-            fos.getFD().sync();
+            fos.write(String.valueOf(fullChecksum.getValue()).getBytes(StandardCharsets.UTF_8));
+            fos.flush();
+            fos.getChannel().force(true);
         }
         catch (IOException e)
         {
diff --git a/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java b/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java
index 2e59e3b..62927f5 100644
--- a/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java
+++ b/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 
 import org.apache.cassandra.utils.ChecksumType;
@@ -32,7 +31,7 @@
         {
             DataIntegrityMetadata.ChecksumValidator validator = new DataIntegrityMetadata.ChecksumValidator(ChecksumType.CRC32,
                                                                                                             RandomAccessReader.open(crcFile),
-                                                                                                            file.getPath());
+                                                                                                            file.path());
             Rebufferer rebufferer = new ChecksummedRebufferer(channel, validator);
             return new RandomAccessReader.RandomAccessReaderWithOwnChannel(rebufferer);
         }
diff --git a/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java b/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java
index f89e7cc..fa8fad7 100644
--- a/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java
+++ b/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.Optional;
 
diff --git a/src/java/org/apache/cassandra/io/util/ChunkReader.java b/src/java/org/apache/cassandra/io/util/ChunkReader.java
index 1d3439e..33bf792 100644
--- a/src/java/org/apache/cassandra/io/util/ChunkReader.java
+++ b/src/java/org/apache/cassandra/io/util/ChunkReader.java
@@ -48,4 +48,4 @@
      * This is not guaranteed to be fulfilled.
      */
     BufferType preferredBufferType();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/DataInputPlus.java b/src/java/org/apache/cassandra/io/util/DataInputPlus.java
index 7c29ee1..bda8461 100644
--- a/src/java/org/apache/cassandra/io/util/DataInputPlus.java
+++ b/src/java/org/apache/cassandra/io/util/DataInputPlus.java
@@ -19,11 +19,15 @@
 
 import java.io.*;
 
+import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.vint.VIntCoding;
 
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
 /**
  * Extension to DataInput that provides support for reading varints
  */
+@Shared(scope = SIMULATION)
 public interface DataInputPlus extends DataInput
 {
     default long readVInt() throws IOException
@@ -60,6 +64,8 @@
 
     /**
      * Wrapper around an InputStream that provides no buffering but can decode varints
+     *
+     * TODO: probably shouldn't use DataInputStream as a parent
      */
     public class DataInputStreamPlus extends DataInputStream implements DataInputPlus
     {
diff --git a/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java b/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
index 277b359..65d4e58 100644
--- a/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
+++ b/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.io.util;
 
 import java.io.Closeable;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.zip.CheckedInputStream;
diff --git a/src/java/org/apache/cassandra/io/util/DataOutputPlus.java b/src/java/org/apache/cassandra/io/util/DataOutputPlus.java
index b94d097..205dab7 100644
--- a/src/java/org/apache/cassandra/io/util/DataOutputPlus.java
+++ b/src/java/org/apache/cassandra/io/util/DataOutputPlus.java
@@ -21,18 +21,22 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.vint.VIntCoding;
 
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
 /**
  * Extension to DataOutput that provides for writing ByteBuffer and Memory, potentially with an efficient
  * implementation that is zero copy or at least has reduced bounds checking overhead.
  */
+@Shared(scope = SIMULATION)
 public interface DataOutputPlus extends DataOutput
 {
     // write the buffer without modifying its position
     void write(ByteBuffer buffer) throws IOException;
 
-    default void write(Memory memory, long offset, long length) throws IOException
+    default void write(ReadableMemory memory, long offset, long length) throws IOException
     {
         for (ByteBuffer buffer : memory.asByteBuffers(offset, length))
             write(buffer);
@@ -56,6 +60,54 @@
     }
 
     /**
+     * An efficient way to write the type {@code bytes} of a long
+     *
+     * @param register - the long value to be written
+     * @param bytes - the number of bytes the register occupies. Valid values are between 1 and 8 inclusive.
+     * @throws IOException
+     */
+    default void writeBytes(long register, int bytes) throws IOException
+    {
+        switch (bytes)
+        {
+            case 0:
+                break;
+            case 1:
+                writeByte((int)(register >>> 56));
+                break;
+            case 2:
+                writeShort((int)(register >> 48));
+                break;
+            case 3:
+                writeShort((int)(register >> 48));
+                writeByte((int)(register >> 40));
+                break;
+            case 4:
+                writeInt((int)(register >> 32));
+                break;
+            case 5:
+                writeInt((int)(register >> 32));
+                writeByte((int)(register >> 24));
+                break;
+            case 6:
+                writeInt((int)(register >> 32));
+                writeShort((int)(register >> 16));
+                break;
+            case 7:
+                writeInt((int)(register >> 32));
+                writeShort((int)(register >> 16));
+                writeByte((int)(register >> 8));
+                break;
+            case 8:
+                writeLong(register);
+                break;
+            default:
+                throw new IllegalArgumentException();
+        }
+
+    }
+
+    /**
      * Returns the current position of the underlying target like a file-pointer
      * or the position withing a buffer. Not every implementation may support this
      * functionality. Whether or not this functionality is supported can be checked
diff --git a/src/java/org/apache/cassandra/io/util/File.java b/src/java/org/apache/cassandra/io/util/File.java
new file mode 100644
index 0000000..1f48707
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/File.java
@@ -0,0 +1,775 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.net.URI;
+import java.nio.channels.FileChannel;
+import java.nio.file.*; // checkstyle: permit this import
+import java.util.Objects;
+import java.util.function.BiPredicate;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.Predicate;
+import java.util.stream.Stream;
+
+import javax.annotation.Nullable;
+
+import com.google.common.util.concurrent.RateLimiter;
+
+import net.openhft.chronicle.core.util.ThrowingFunction;
+import org.apache.cassandra.io.FSWriteError;
+
+import static org.apache.cassandra.io.util.PathUtils.filename;
+import static org.apache.cassandra.utils.Throwables.maybeFail;
+
+/**
+ * A thin wrapper around java.nio.file.Path to provide more ergonomic functionality.
+ *
+ * TODO codebase probably should not use tryList, as unexpected exceptions are hidden;
+ *      probably want to introduce e.g. listIfExists
+ */
+public class File implements Comparable<File>
+{
+    private static FileSystem filesystem = FileSystems.getDefault();
+
+    public enum WriteMode { OVERWRITE, APPEND }
+
+    public static String pathSeparator()
+    {
+        return filesystem.getSeparator();
+    }
+
+    @Nullable final Path path; // nullable to support concept of empty path, that resolves to the working directory if converted to an absolute path
+
+    /**
+     * Construct a File representing the child {@code child} of {@code parent}
+     */
+    public File(String parent, String child)
+    {
+        this(parent.isEmpty() ? null : filesystem.getPath(parent), child);
+    }
+
+    /**
+     * Construct a File representing the child {@code child} of {@code parent}
+     */
+    public File(File parent, String child)
+    {
+        this(parent.path, child);
+    }
+
+    /**
+     * Construct a File representing the child {@code child} of {@code parent}
+     */
+    public File(Path parent, String child)
+    {
+        // if "empty abstract path" (a la java.io.File) is provided, we should behave as though resolving relative path
+        if (child.startsWith(pathSeparator()))
+            child = child.substring(pathSeparator().length());
+        this.path = parent == null ? filesystem.getPath(child) : parent.resolve(child);
+    }
+
+    /**
+     * Construct a File representing the provided {@code path}
+     */
+    public File(String path)
+    {
+        this(path.isEmpty() ? null : filesystem.getPath(path));
+    }
+
+    /**
+     * Create a File equivalent to the java.io.File provided
+     */
+    public File(java.io.File file)
+    {
+        this(file.getPath().isEmpty() ? null : file.toPath());
+    }
+
+    /**
+     * Construct a File representing the child {@code child} of {@code parent}
+     */
+    public File(java.io.File parent, String child)
+    {
+        this(new File(parent), child);
+    }
+
+    /**
+     * Convenience constructor equivalent to {@code new File(Paths.get(path))}
+     */
+    public File(URI path)
+    {
+        this(Paths.get(path)); //TODO unsafe if uri is file:// as it uses default file system and not File.filesystem
+        if (!path.isAbsolute() || path.isOpaque()) throw new IllegalArgumentException();
+    }
+
+    /**
+     * @param path the path to wrap
+     */
+    public File(Path path)
+    {
+        if (path != null && path.getFileSystem() != filesystem)
+            throw new IllegalArgumentException("Incompatible file system");
+
+        this.path = path;
+    }
+
+
+    public static Path getPath(String first, String... more)
+    {
+        return filesystem.getPath(first, more);
+    }
+
+    /**
+     * Try to delete the file, returning true iff it was deleted by us. Does not ordinarily throw exceptions.
+     */
+    public boolean tryDelete()
+    {
+        return path != null && PathUtils.tryDelete(path);
+    }
+
+    /**
+     * This file will be deleted, and any exceptions encountered merged with {@code accumulate} to the return value
+     */
+    public Throwable delete(Throwable accumulate)
+    {
+        return delete(accumulate, null);
+    }
+
+    /**
+     * This file will be deleted, obeying the provided rate limiter.
+     * Any exceptions encountered will be merged with {@code accumulate} to the return value
+     */
+    public Throwable delete(Throwable accumulate, RateLimiter rateLimiter)
+    {
+        return PathUtils.delete(toPathForWrite(), accumulate, rateLimiter);
+    }
+
+    /**
+     * This file will be deleted, with any failures being reported with an FSError
+     * @throws FSWriteError if cannot be deleted
+     */
+    public void delete()
+    {
+        maybeFail(delete(null, null));
+    }
+
+    /**
+     * This file will be deleted, with any failures being reported with an FSError
+     * @throws FSWriteError if cannot be deleted
+     */
+    public void deleteIfExists()
+    {
+        if (path != null)
+            PathUtils.deleteIfExists(path);
+    }
+
+    /**
+     * This file will be deleted, obeying the provided rate limiter.
+     * @throws FSWriteError if cannot be deleted
+     */
+    public void delete(RateLimiter rateLimiter)
+    {
+        maybeFail(delete(null, rateLimiter));
+    }
+
+    /**
+     * Deletes all files and subdirectories under "dir".
+     * @throws FSWriteError if any part of the tree cannot be deleted
+     */
+    public void deleteRecursive(RateLimiter rateLimiter)
+    {
+        PathUtils.deleteRecursive(toPathForWrite(), rateLimiter);
+    }
+
+    /**
+     * Deletes all files and subdirectories under "dir".
+     * @throws FSWriteError if any part of the tree cannot be deleted
+     */
+    public void deleteRecursive()
+    {
+        PathUtils.deleteRecursive(toPathForWrite());
+    }
+
+    /**
+     * Try to delete the file on process exit.
+     */
+    public void deleteOnExit()
+    {
+        if (path != null) PathUtils.deleteOnExit(path);
+    }
+
+    /**
+     * This file will be deleted on clean shutdown; if it is a directory, its entire contents
+     * <i>at the time of shutdown</i> will be deleted
+     */
+    public void deleteRecursiveOnExit()
+    {
+        if (path != null)
+            PathUtils.deleteRecursiveOnExit(path);
+    }
+
+    /**
+     * Try to rename the file atomically, if the system supports it.
+     * @return true iff successful, false if it fails for any reason.
+     */
+    public boolean tryMove(File to)
+    {
+        return path != null && PathUtils.tryRename(path, to.path);
+    }
+
+    /**
+     * Atomically (if supported) rename/move this file to {@code to}
+     * @throws FSWriteError if any part of the tree cannot be deleted
+     */
+    public void move(File to)
+    {
+        PathUtils.rename(toPathForRead(), to.toPathForWrite());
+    }
+
+    /**
+     * @return the length of the file if it exists and if we can read it; 0 otherwise.
+     */
+    public long length()
+    {
+        return path == null ? 0L : PathUtils.tryGetLength(path);
+    }
+
+    /**
+     * @return the last modified time in millis of the path if it exists and we can read it; 0 otherwise.
+     */
+    public long lastModified()
+    {
+        return path == null ? 0L : PathUtils.tryGetLastModified(path);
+    }
+
+    /**
+     * Try to set the last modified time in millis of the path
+     * @return true if it exists and we can write it; return false otherwise.
+     */
+    public boolean trySetLastModified(long value)
+    {
+        return path != null && PathUtils.trySetLastModified(path, value);
+    }
+
+    /**
+     * Try to set if the path is readable by its owner
+     * @return true if it exists and we can write it; return false otherwise.
+     */
+    public boolean trySetReadable(boolean value)
+    {
+        return path != null && PathUtils.trySetReadable(path, value);
+    }
+
+    /**
+     * Try to set if the path is writable by its owner
+     * @return true if it exists and we can write it; return false otherwise.
+     */
+    public boolean trySetWritable(boolean value)
+    {
+        return path != null && PathUtils.trySetWritable(path, value);
+    }
+
+    /**
+     * Try to set if the path is executable by its owner
+     * @return true if it exists and we can write it; return false otherwise.
+     */
+    public boolean trySetExecutable(boolean value)
+    {
+        return path != null && PathUtils.trySetExecutable(path, value);
+    }
+
+    /**
+     * @return true if the path exists, false if it does not, or we cannot determine due to some exception
+     */
+    public boolean exists()
+    {
+        return path != null && PathUtils.exists(path);
+    }
+
+    /**
+     * @return true if the path refers to a directory
+     */
+    public boolean isDirectory()
+    {
+        return path != null && PathUtils.isDirectory(path);
+    }
+
+    /**
+     * @return true if the path refers to a regular file
+     */
+    public boolean isFile()
+    {
+        return path != null && PathUtils.isFile(path);
+    }
+
+    /**
+     * @return true if the path can be read by us
+     */
+    public boolean isReadable()
+    {
+        return path != null && Files.isReadable(path);
+    }
+
+    /**
+     * @return true if the path can be written by us
+     */
+    public boolean isWritable()
+    {
+        return path != null && Files.isWritable(path);
+    }
+
+    /**
+     * @return true if the path can be executed by us
+     */
+    public boolean isExecutable()
+    {
+        return path != null && Files.isExecutable(path);
+    }
+
+    /**
+     * Try to create a new regular file at this path.
+     * @return true if successful, false if it already exists
+     */
+    public boolean createFileIfNotExists()
+    {
+        return PathUtils.createFileIfNotExists(toPathForWrite());
+    }
+
+    public boolean createDirectoriesIfNotExists()
+    {
+        return PathUtils.createDirectoriesIfNotExists(toPathForWrite());
+    }
+
+    /**
+     * Try to create a directory at this path.
+     * Return true if a new directory was created at this path, and false otherwise.
+     */
+    public boolean tryCreateDirectory()
+    {
+        return path != null && PathUtils.tryCreateDirectory(path);
+    }
+
+    /**
+     * Try to create a directory at this path, creating any parent directories as necessary.
+     * @return true if a new directory was created at this path, and false otherwise.
+     */
+    public boolean tryCreateDirectories()
+    {
+        return path != null && PathUtils.tryCreateDirectories(path);
+    }
+
+    /**
+     * @return the parent file, or null if none
+     */
+    public File parent()
+    {
+        if (path == null) return null;
+        Path parent = path.getParent();
+        if (parent == null) return null;
+        return new File(parent);
+    }
+
+    /**
+     * @return the parent file's path, or null if none
+     */
+    public String parentPath()
+    {
+        File parent = parent();
+        return parent == null ? null : parent.toString();
+    }
+
+    /**
+     * @return true if the path has no relative path elements
+     */
+    public boolean isAbsolute()
+    {
+        return path != null && path.isAbsolute();
+    }
+
+    public boolean isAncestorOf(File child)
+    {
+        return PathUtils.isContained(toPath(), child.toPath());
+    }
+
+    /**
+     * @return a File that represents the same path as this File with any relative path elements resolved.
+     *         If this is the empty File, returns the working directory.
+     */
+    public File toAbsolute()
+    {
+        return new File(toPath().toAbsolutePath());
+    }
+
+    /** {@link #toAbsolute} */
+    public String absolutePath()
+    {
+        return toPath().toAbsolutePath().toString();
+    }
+
+    /**
+     * @return a File that represents the same path as this File with any relative path elements and links resolved.
+     *         If this is the empty File, returns the working directory.
+     */
+    public File toCanonical()
+    {
+        Path canonical = PathUtils.toCanonicalPath(toPath());
+        return canonical == path ? this : new File(canonical);
+    }
+
+    /** {@link #toCanonical} */
+    public String canonicalPath()
+    {
+        return toCanonical().toString();
+    }
+
+    /**
+     * @return the last path element for this file
+     */
+    public String name()
+    {
+        return path == null ? "" : filename(path);
+    }
+
+    public void forEach(Consumer<File> forEach)
+    {
+        PathUtils.forEach(path, path -> forEach.accept(new File(path)));
+    }
+
+    public void forEachRecursive(Consumer<File> forEach)
+    {
+        PathUtils.forEachRecursive(path, path -> forEach.accept(new File(path)));
+    }
+
+    private static <V> ThrowingFunction<IOException, V, RuntimeException> nulls() { return ignore -> null; }
+    private static <V> ThrowingFunction<IOException, V, IOException> rethrow()
+    {
+        return fail -> {
+            if (fail == null) throw new FileNotFoundException();
+            throw fail;
+        };
+    }
+    private static <V> ThrowingFunction<IOException, V, UncheckedIOException> unchecked()
+    {
+        return fail -> {
+            if (fail == null) fail = new FileNotFoundException();
+            throw new UncheckedIOException(fail);
+        };
+    }
+
+
+    /**
+     * @return if a directory, the names of the files within; null otherwise
+     */
+    public String[] tryListNames()
+    {
+        return tryListNames(nulls());
+    }
+
+    /**
+     * @return if a directory, the names of the files within, filtered by the provided predicate; null otherwise
+     */
+    public String[] tryListNames(BiPredicate<File, String> filter)
+    {
+        return tryListNames(filter, nulls());
+    }
+
+    /**
+     * @return if a directory, the files within; null otherwise
+     */
+    public File[] tryList()
+    {
+        return tryList(nulls());
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    public File[] tryList(Predicate<File> filter)
+    {
+        return tryList(filter, nulls());
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    public File[] tryList(BiPredicate<File, String> filter)
+    {
+        return tryList(filter, nulls());
+    }
+
+    /**
+     * @return if a directory, the names of the files within; null otherwise
+     */
+    public String[] listNames() throws IOException
+    {
+        return tryListNames(rethrow());
+    }
+
+    /**
+     * @return if a directory, the names of the files within, filtered by the provided predicate; null otherwise
+     */
+    public String[] listNames(BiPredicate<File, String> filter) throws IOException
+    {
+        return tryListNames(filter, rethrow());
+    }
+
+    /**
+     * @return if a directory, the files within; null otherwise
+     */
+    public File[] list() throws IOException
+    {
+        return tryList(rethrow());
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    public File[] list(Predicate<File> filter) throws IOException
+    {
+        return tryList(filter, rethrow());
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    public File[] list(BiPredicate<File, String> filter) throws IOException
+    {
+        return tryList(filter, rethrow());
+    }
+
+    /**
+     * @return if a directory, the names of the files within; null otherwise
+     */
+    public String[] listNamesUnchecked() throws UncheckedIOException
+    {
+        return tryListNames(unchecked());
+    }
+
+    /**
+     * @return if a directory, the names of the files within, filtered by the provided predicate; null otherwise
+     */
+    public String[] listNamesUnchecked(BiPredicate<File, String> filter) throws UncheckedIOException
+    {
+        return tryListNames(filter, unchecked());
+    }
+
+    /**
+     * @return if a directory, the files within; null otherwise
+     */
+    public File[] listUnchecked() throws UncheckedIOException
+    {
+        return tryList(unchecked());
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    public File[] listUnchecked(Predicate<File> filter) throws UncheckedIOException
+    {
+        return tryList(filter, unchecked());
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; throw an UncheckedIO exception otherwise
+     */
+    public File[] listUnchecked(BiPredicate<File, String> filter) throws UncheckedIOException
+    {
+        return tryList(filter, unchecked());
+    }
+
+    /**
+     * @return if a directory, the names of the files within; null otherwise
+     */
+    public <T extends Throwable> String[] tryListNames(ThrowingFunction<IOException, String[], T> orElse) throws T
+    {
+        return tryListNames(path, Function.identity(), orElse);
+    }
+
+    /**
+     * @return if a directory, the names of the files within, filtered by the provided predicate; null otherwise
+     */
+    public <T extends Throwable> String[] tryListNames(BiPredicate<File, String> filter, ThrowingFunction<IOException, String[], T> orElse) throws T
+    {
+        return tryList(path, stream -> stream.map(PathUtils::filename).filter(filename -> filter.test(this, filename)), String[]::new, orElse);
+    }
+
+    /**
+     * @return if a directory, the files within; null otherwise
+     */
+    private <T extends Throwable> File[] tryList(ThrowingFunction<IOException, File[], T> orElse) throws T
+    {
+        return tryList(path, Function.identity(), orElse);
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    private <T extends Throwable> File[] tryList(Predicate<File> filter, ThrowingFunction<IOException, File[], T> orElse) throws T
+    {
+        return tryList(path, stream -> stream.filter(filter), orElse);
+    }
+
+    /**
+     * @return if a directory, the files within, filtered by the provided predicate; null otherwise
+     */
+    private <T extends Throwable> File[] tryList(BiPredicate<File, String> filter, ThrowingFunction<IOException, File[], T> orElse) throws T
+    {
+        return tryList(path, stream -> stream.filter(file -> filter.test(this, file.name())), orElse);
+    }
+
+    private static <T extends Throwable> String[] tryListNames(Path path, Function<Stream<File>, Stream<File>> toFiles, ThrowingFunction<IOException, String[], T> orElse) throws T
+    {
+        if (path == null)
+            return orElse.apply(null);
+        return PathUtils.tryList(path, stream -> toFiles.apply(stream.map(File::new)).map(File::name), String[]::new, orElse);
+    }
+
+    private static <T extends Throwable, V> V[] tryList(Path path, Function<Stream<Path>, Stream<V>> transformation, IntFunction<V[]> constructor, ThrowingFunction<IOException, V[], T> orElse) throws T
+    {
+        if (path == null)
+            return orElse.apply(null);
+        return PathUtils.tryList(path, transformation, constructor, orElse);
+    }
+
+    private static <T extends Throwable> File[] tryList(Path path, Function<Stream<File>, Stream<File>> toFiles, ThrowingFunction<IOException, File[], T> orElse) throws T
+    {
+        if (path == null)
+            return orElse.apply(null);
+        return PathUtils.tryList(path, stream -> toFiles.apply(stream.map(File::new)), File[]::new, orElse);
+    }
+
+    /**
+     * @return the path of this file
+     */
+    public String path()
+    {
+        return toString();
+    }
+
+    /**
+     * @return the {@link Path} of this file
+     */
+    public Path toPath()
+    {
+        return path == null ? filesystem.getPath("") : path;
+    }
+
+    /**
+     * @return the path of this file
+     */
+    @Override
+    public String toString()
+    {
+        return path == null ? "" : path.toString();
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return path == null ? 0 : path.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        return obj instanceof File && Objects.equals(path, ((File) obj).path);
+    }
+
+    @Override
+    public int compareTo(File that)
+    {
+        if (this.path == null || that.path == null)
+            return this.path == null && that.path == null ? 0 : this.path == null ? -1 : 1;
+        return this.path.compareTo(that.path);
+    }
+
+    public java.io.File toJavaIOFile()
+    {
+        return path == null ? new java.io.File("") // checkstyle: permit this instantiation
+                            : path.toFile(); // checkstyle: permit this invocation
+    }
+
+    /**
+     * @return a new {@link FileChannel} for reading
+     */
+    public FileChannel newReadChannel() throws NoSuchFileException
+    {
+        return PathUtils.newReadChannel(toPathForRead());
+    }
+
+    /**
+     * @return a new {@link FileChannel} for reading or writing; file will be created if it doesn't exist
+     */
+    public FileChannel newReadWriteChannel() throws NoSuchFileException
+    {
+        return PathUtils.newReadWriteChannel(toPathForRead());
+    }
+
+    /**
+     * @param mode whether or not the channel appends to the underlying file
+     * @return a new {@link FileChannel} for writing; file will be created if it doesn't exist
+     */
+    public FileChannel newWriteChannel(WriteMode mode) throws NoSuchFileException
+    {
+        switch (mode)
+        {
+            default: throw new AssertionError();
+            case APPEND: return PathUtils.newWriteAppendChannel(toPathForWrite());
+            case OVERWRITE: return PathUtils.newWriteOverwriteChannel(toPathForWrite());
+        }
+    }
+
+    public FileWriter newWriter(WriteMode mode) throws IOException
+    {
+        return new FileWriter(this, mode);
+    }
+
+    public FileOutputStreamPlus newOutputStream(WriteMode mode) throws NoSuchFileException
+    {
+        return new FileOutputStreamPlus(this, mode);
+    }
+
+    public FileInputStreamPlus newInputStream() throws NoSuchFileException
+    {
+        return new FileInputStreamPlus(this);
+    }
+
+    private Path toPathForWrite()
+    {
+        if (path == null)
+            throw new IllegalStateException("Cannot write to an empty path");
+        return path;
+    }
+
+    private Path toPathForRead()
+    {
+        if (path == null)
+            throw new IllegalStateException("Cannot read from an empty path");
+        return path;
+    }
+
+    public static void unsafeSetFilesystem(FileSystem fs)
+    {
+        filesystem = fs;
+    }
+}
+
diff --git a/src/java/org/apache/cassandra/io/util/FileHandle.java b/src/java/org/apache/cassandra/io/util/FileHandle.java
index 6d3ae7c..6bab460 100644
--- a/src/java/org/apache/cassandra/io/util/FileHandle.java
+++ b/src/java/org/apache/cassandra/io/util/FileHandle.java
@@ -148,8 +148,16 @@
     public FileDataInput createReader(long position)
     {
         RandomAccessReader reader = createReader();
-        reader.seek(position);
-        return reader;
+        try
+        {
+            reader.seek(position);
+            return reader;
+        }
+        catch (Throwable t)
+        {
+            try { reader.close(); } catch (Throwable t2) { t.addSuppressed(t2); }
+            throw t;
+        }
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java b/src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java
new file mode 100644
index 0000000..79e8438
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+
+public class FileInputStreamPlus extends RebufferingInputStream
+{
+    final FileChannel channel;
+
+    public FileInputStreamPlus(String file) throws NoSuchFileException
+    {
+        this(new File(file));
+    }
+
+    public FileInputStreamPlus(File file) throws NoSuchFileException
+    {
+        this(file.newReadChannel());
+    }
+
+    public FileInputStreamPlus(Path path) throws NoSuchFileException
+    {
+        this(PathUtils.newReadChannel(path));
+    }
+
+    public FileInputStreamPlus(Path path, int bufferSize) throws NoSuchFileException
+    {
+        this(PathUtils.newReadChannel(path), bufferSize);
+    }
+
+    private FileInputStreamPlus(FileChannel channel)
+    {
+        this(channel, 1 << 14);
+    }
+
+    private FileInputStreamPlus(FileChannel channel, int bufferSize)
+    {
+        super(ByteBuffer.allocateDirect(bufferSize));
+        this.channel = channel;
+        this.buffer.limit(0);
+    }
+
+    @Override
+    protected void reBuffer() throws IOException
+    {
+        buffer.clear();
+        channel.read(buffer);
+        buffer.flip();
+    }
+
+    public FileChannel getChannel()
+    {
+        return channel;
+    }
+
+    @Override
+    public void close() throws IOException
+    {
+        try
+        {
+            super.close();
+        }
+        finally
+        {
+            try
+            {
+                FileUtils.clean(buffer);
+            }
+            finally
+            {
+                channel.close();
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java b/src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java
new file mode 100644
index 0000000..0cfd3e3
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+
+import static org.apache.cassandra.io.util.File.WriteMode.OVERWRITE;
+
+public class FileOutputStreamPlus extends BufferedDataOutputStreamPlus
+{
+    public FileOutputStreamPlus(String path) throws NoSuchFileException
+    {
+        this(path, OVERWRITE);
+    }
+
+    public FileOutputStreamPlus(String path, File.WriteMode mode) throws NoSuchFileException
+    {
+        this(new File(path), mode);
+    }
+
+    public FileOutputStreamPlus(File file) throws NoSuchFileException
+    {
+        this(file, OVERWRITE);
+    }
+
+    public FileOutputStreamPlus(File file, File.WriteMode mode) throws NoSuchFileException
+    {
+        super(file.newWriteChannel(mode));
+    }
+
+    public FileOutputStreamPlus(Path path) throws NoSuchFileException
+    {
+        this(path, OVERWRITE);
+    }
+
+    public FileOutputStreamPlus(Path path, File.WriteMode mode) throws NoSuchFileException
+    {
+        this(new File(path), mode);
+    }
+
+    public void sync() throws IOException
+    {
+        ((FileChannel)channel).force(true);
+    }
+
+    public FileChannel getChannel()
+    {
+        return (FileChannel) channel;
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/util/FileReader.java b/src/java/org/apache/cassandra/io/util/FileReader.java
new file mode 100644
index 0000000..86d0388
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/FileReader.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+public class FileReader extends InputStreamReader
+{
+    @SuppressWarnings("resource") // FISP is closed by ISR::close
+    public FileReader(String file) throws IOException
+    {
+        super(new FileInputStreamPlus(file));
+    }
+
+    @SuppressWarnings("resource") // FISP is closed by ISR::close
+    public FileReader(File file) throws IOException
+    {
+        super(new FileInputStreamPlus(file));
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/util/FileUtils.java b/src/java/org/apache/cassandra/io/util/FileUtils.java
index 2aba2f7..01f1f18 100644
--- a/src/java/org/apache/cassandra/io/util/FileUtils.java
+++ b/src/java/org/apache/cassandra/io/util/FileUtils.java
@@ -17,7 +17,10 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.*;
+import java.io.BufferedWriter;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
 import java.lang.invoke.MethodHandle;
 import java.lang.invoke.MethodHandles;
 import java.lang.reflect.Method;
@@ -27,18 +30,22 @@
 import java.nio.channels.FileChannel;
 import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
-import java.nio.file.*;
+import java.nio.file.DirectoryNotEmptyException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.StandardOpenOption;
 import java.nio.file.attribute.BasicFileAttributes;
-import java.nio.file.attribute.FileAttributeView;
-import java.nio.file.attribute.FileStoreAttributeView;
 import java.text.DecimalFormat;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashSet;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Optional;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
@@ -50,43 +57,34 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.io.FSError;
 import org.apache.cassandra.io.FSErrorHandler;
-import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
-import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.SyncUtil;
 
 import static com.google.common.base.Throwables.propagate;
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR;
-import static org.apache.cassandra.config.CassandraRelevantProperties.USE_NIX_RECURSIVE_DELETE;
 import static org.apache.cassandra.utils.Throwables.maybeFail;
-import static org.apache.cassandra.utils.Throwables.merge;
 
 public final class FileUtils
 {
     public static final Charset CHARSET = StandardCharsets.UTF_8;
 
     private static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
-    private static final NoSpamLogger nospam1m = NoSpamLogger.getLogger(logger, 1, TimeUnit.MINUTES);
 
-    public static final long ONE_KB = 1024;
-    public static final long ONE_MB = 1024 * ONE_KB;
-    public static final long ONE_GB = 1024 * ONE_MB;
-    public static final long ONE_TB = 1024 * ONE_GB;
+    public static final long ONE_KIB = 1024;
+    public static final long ONE_MIB = 1024 * ONE_KIB;
+    public static final long ONE_GIB = 1024 * ONE_MIB;
+    public static final long ONE_TIB = 1024 * ONE_GIB;
 
     private static final DecimalFormat df = new DecimalFormat("#.##");
     private static final AtomicReference<Optional<FSErrorHandler>> fsErrorHandler = new AtomicReference<>(Optional.empty());
 
-    private static Class clsDirectBuffer;
-    private static MethodHandle mhDirectBufferCleaner;
-    private static MethodHandle mhCleanerClean;
+    private static final Class clsDirectBuffer;
+    private static final MethodHandle mhDirectBufferCleaner;
+    private static final MethodHandle mhCleanerClean;
 
     static
     {
@@ -126,39 +124,32 @@
     }
 
     /**
-     * Pretty much like {@link File#createTempFile(String, String, File)}, but with
+     * Pretty much like {@link java.io.File#createTempFile(String, String, java.io.File)}, but with
      * the guarantee that the "random" part of the generated file name between
      * {@code prefix} and {@code suffix} is a positive, increasing {@code long} value.
      */
     public static File createTempFile(String prefix, String suffix, File directory)
     {
-        try
-        {
-            // Do not use java.io.File.createTempFile(), because some tests rely on the
-            // behavior that the "random" part in the temp file name is a positive 'long'.
-            // However, at least since Java 9 the code to generate the "random" part
-            // uses an _unsigned_ random long generated like this:
-            // Long.toUnsignedString(new java.util.Random.nextLong())
+        // Do not use java.io.File.createTempFile(), because some tests rely on the
+        // behavior that the "random" part in the temp file name is a positive 'long'.
+        // However, at least since Java 9 the code to generate the "random" part
+        // uses an _unsigned_ random long generated like this:
+        // Long.toUnsignedString(new java.util.Random.nextLong())
 
-            while (true)
-            {
-                // The contract of File.createTempFile() says, that it must not return
-                // the same file name again. We do that here in a very simple way,
-                // that probably doesn't cover all edge cases. Just rely on system
-                // wall clock and return strictly increasing values from that.
-                long num = tempFileNum.getAndIncrement();
-
-                // We have a positive long here, which is safe to use for example
-                // for CommitLogTest.
-                String fileName = prefix + Long.toString(num) + suffix;
-                File candidate = new File(directory, fileName);
-                if (candidate.createNewFile())
-                    return candidate;
-            }
-        }
-        catch (IOException e)
+        while (true)
         {
-            throw new FSWriteError(e, directory);
+            // The contract of File.createTempFile() says, that it must not return
+            // the same file name again. We do that here in a very simple way,
+            // that probably doesn't cover all edge cases. Just rely on system
+            // wall clock and return strictly increasing values from that.
+            long num = tempFileNum.getAndIncrement();
+
+            // We have a positive long here, which is safe to use for example
+            // for CommitLogTest.
+            String fileName = prefix + num + suffix;
+            File candidate = new File(directory, fileName);
+            if (candidate.createFileIfNotExists())
+                return candidate;
         }
     }
 
@@ -196,6 +187,11 @@
         }
     }
 
+    public static void createHardLinkWithConfirm(String from, String to)
+    {
+        createHardLinkWithConfirm(new File(from), new File(to));
+    }
+
     public static void createHardLinkWithConfirm(File from, File to)
     {
         try
@@ -212,11 +208,6 @@
         }
     }
 
-    public static void createHardLinkWithConfirm(String from, String to)
-    {
-        createHardLinkWithConfirm(new File(from), new File(to));
-    }
-
     public static void createHardLinkWithoutConfirm(String from, String to)
     {
         try
@@ -230,62 +221,11 @@
         }
     }
 
-    public static Throwable deleteWithConfirm(String filePath, Throwable accumulate)
-    {
-        return deleteWithConfirm(new File(filePath), accumulate, null);
-    }
-
-    public static Throwable deleteWithConfirm(File file, Throwable accumulate)
-    {
-        return deleteWithConfirm(file, accumulate, null);
-    }
-    
-    public static Throwable deleteWithConfirm(File file, Throwable accumulate, RateLimiter rateLimiter)
-    {
-        try
-        {
-            if (rateLimiter != null)
-            {
-                double throttled = rateLimiter.acquire();
-                if (throttled > 0.0)
-                    nospam1m.warn("Throttling file deletion: waited {} seconds to delete {}", throttled, file);
-            }
-            Files.delete(file.toPath());
-        }
-        catch (Throwable t)
-        {
-            try
-            {
-                throw new FSWriteError(t, file);
-            }
-            catch (Throwable t2)
-            {
-                accumulate = merge(accumulate, t2);
-            }
-        }
-        return accumulate;
-    }
-
-    public static void deleteWithConfirm(String file)
-    {
-        deleteWithConfirm(new File(file));
-    }
-
-    public static void deleteWithConfirm(File file)
-    {
-        maybeFail(deleteWithConfirm(file, null, null));
-    }
-
-    public static void deleteWithConfirmWithThrottle(File file, RateLimiter rateLimiter)
-    {
-        maybeFail(deleteWithConfirm(file, null, rateLimiter));
-    }
-
     public static void copyWithOutConfirm(String from, String to)
     {
         try
         {
-            Files.copy(Paths.get(from), Paths.get(to));
+            Files.copy(File.getPath(from), File.getPath(to));
         }
         catch (IOException e)
         {
@@ -303,7 +243,7 @@
     {
         assert from.exists();
         if (logger.isTraceEnabled())
-            logger.trace("Copying {} to {}", from.getPath(), to.getPath());
+            logger.trace("Copying {} to {}", from.path(), to.path());
 
         try
         {
@@ -315,74 +255,16 @@
         }
     }
 
-    public static void renameWithOutConfirm(String from, String to)
-    {
-        try
-        {
-            atomicMoveWithFallback(new File(from).toPath(), new File(to).toPath());
-        }
-        catch (IOException e)
-        {
-            if (logger.isTraceEnabled())
-                logger.trace("Could not move file "+from+" to "+to, e);
-        }
-    }
-
-    public static void renameWithConfirm(String from, String to)
-    {
-        renameWithConfirm(new File(from), new File(to));
-    }
-
-    public static void renameWithConfirm(File from, File to)
-    {
-        assert from.exists();
-        if (logger.isTraceEnabled())
-            logger.trace("Renaming {} to {}", from.getPath(), to.getPath());
-        // this is not FSWE because usually when we see it it's because we didn't close the file before renaming it,
-        // and Windows is picky about that.
-        try
-        {
-            atomicMoveWithFallback(from.toPath(), to.toPath());
-        }
-        catch (IOException e)
-        {
-            throw new RuntimeException(String.format("Failed to rename %s to %s", from.getPath(), to.getPath()), e);
-        }
-    }
-
-    /**
-     * Move a file atomically, if it fails, it falls back to a non-atomic operation
-     * @param from
-     * @param to
-     * @throws IOException
-     */
-    private static void atomicMoveWithFallback(Path from, Path to) throws IOException
-    {
-        try
-        {
-            Files.move(from, to, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
-        }
-        catch (AtomicMoveNotSupportedException e)
-        {
-            logger.trace("Could not do an atomic move", e);
-            Files.move(from, to, StandardCopyOption.REPLACE_EXISTING);
-        }
-
-    }
-
     public static void truncate(String path, long size)
     {
-        try(FileChannel channel = FileChannel.open(Paths.get(path), StandardOpenOption.READ, StandardOpenOption.WRITE))
+        File file = new File(path);
+        try (FileChannel channel = file.newReadWriteChannel())
         {
             channel.truncate(size);
         }
-        catch (NoSuchFileException | FileNotFoundException nfe)
-        {
-            throw new RuntimeException(nfe);
-        }
         catch (IOException e)
         {
-            throw new FSWriteError(e, path);
+            throw PathUtils.propagateUnchecked(e, file.toPath(), true);
         }
     }
 
@@ -455,49 +337,18 @@
 
     public static String getCanonicalPath(String filename)
     {
-        try
-        {
-            return new File(filename).getCanonicalPath();
-        }
-        catch (IOException e)
-        {
-            throw new FSReadError(e, filename);
-        }
+        return new File(filename).canonicalPath();
     }
 
     public static String getCanonicalPath(File file)
     {
-        try
-        {
-            return file.getCanonicalPath();
-        }
-        catch (IOException e)
-        {
-            throw new FSReadError(e, file);
-        }
+        return file.canonicalPath();
     }
 
     /** Return true if file is contained in folder */
     public static boolean isContained(File folder, File file)
     {
-        Path folderPath = Paths.get(getCanonicalPath(folder));
-        Path filePath = Paths.get(getCanonicalPath(file));
-
-        return filePath.startsWith(folderPath);
-    }
-
-    /** Convert absolute path into a path relative to the base path */
-    public static String getRelativePath(String basePath, String path)
-    {
-        try
-        {
-            return Paths.get(basePath).relativize(Paths.get(path)).toString();
-        }
-        catch(Exception ex)
-        {
-            String absDataPath = FileUtils.getCanonicalPath(basePath);
-            return Paths.get(absDataPath).relativize(Paths.get(path)).toString();
-        }
+        return folder.isAncestorOf(file);
     }
 
     public static void clean(ByteBuffer buffer)
@@ -528,52 +379,6 @@
         }
     }
 
-    public static void createDirectory(String directory)
-    {
-        createDirectory(new File(directory));
-    }
-
-    public static void createDirectory(File directory)
-    {
-        if (!directory.exists())
-        {
-            if (!directory.mkdirs())
-                throw new FSWriteError(new IOException("Failed to mkdirs " + directory), directory);
-        }
-    }
-
-    public static boolean delete(String file)
-    {
-        if (!StorageService.instance.isDaemonSetupCompleted())
-            logger.info("Deleting file during startup: {}", file);
-
-        File f = new File(file);
-        return f.delete();
-    }
-
-    public static void delete(File... files)
-    {
-        for ( File file : files )
-        {
-            if (!StorageService.instance.isDaemonSetupCompleted())
-                logger.info("Deleting file during startup: {}", file);
-
-            file.delete();
-        }
-    }
-
-    public static void deleteAsync(final String file)
-    {
-        Runnable runnable = new Runnable()
-        {
-            public void run()
-            {
-                deleteWithConfirm(new File(file));
-            }
-        };
-        ScheduledExecutors.nonPeriodicTasks.execute(runnable);
-    }
-
     public static long parseFileSize(String value)
     {
         long result;
@@ -584,22 +389,22 @@
         }
         if (value.endsWith(" TiB"))
         {
-            result = Math.round(Double.valueOf(value.replace(" TiB", "")) * ONE_TB);
+            result = Math.round(Double.valueOf(value.replace(" TiB", "")) * ONE_TIB);
             return result;
         }
         else if (value.endsWith(" GiB"))
         {
-            result = Math.round(Double.valueOf(value.replace(" GiB", "")) * ONE_GB);
+            result = Math.round(Double.valueOf(value.replace(" GiB", "")) * ONE_GIB);
             return result;
         }
         else if (value.endsWith(" KiB"))
         {
-            result = Math.round(Double.valueOf(value.replace(" KiB", "")) * ONE_KB);
+            result = Math.round(Double.valueOf(value.replace(" KiB", "")) * ONE_KIB);
             return result;
         }
         else if (value.endsWith(" MiB"))
         {
-            result = Math.round(Double.valueOf(value.replace(" MiB", "")) * ONE_MB);
+            result = Math.round(Double.valueOf(value.replace(" MiB", "")) * ONE_MIB);
             return result;
         }
         else if (value.endsWith(" bytes"))
@@ -616,27 +421,27 @@
     public static String stringifyFileSize(double value)
     {
         double d;
-        if ( value >= ONE_TB )
+        if (value >= ONE_TIB)
         {
-            d = value / ONE_TB;
+            d = value / ONE_TIB;
             String val = df.format(d);
             return val + " TiB";
         }
-        else if ( value >= ONE_GB )
+        else if (value >= ONE_GIB)
         {
-            d = value / ONE_GB;
+            d = value / ONE_GIB;
             String val = df.format(d);
             return val + " GiB";
         }
-        else if ( value >= ONE_MB )
+        else if (value >= ONE_MIB)
         {
-            d = value / ONE_MB;
+            d = value / ONE_MIB;
             String val = df.format(d);
             return val + " MiB";
         }
-        else if ( value >= ONE_KB )
+        else if (value >= ONE_KIB)
         {
-            d = value / ONE_KB;
+            d = value / ONE_KIB;
             String val = df.format(d);
             return val + " KiB";
         }
@@ -647,134 +452,6 @@
         }
     }
 
-    /**
-     * Deletes all files and subdirectories under "dir".
-     * @param dir Directory to be deleted
-     * @throws FSWriteError if any part of the tree cannot be deleted
-     */
-    public static void deleteRecursiveWithThrottle(File dir, RateLimiter rateLimiter)
-    {
-        if (dir.isDirectory())
-        {
-            String[] children = dir.list();
-            for (String child : children)
-                deleteRecursiveWithThrottle(new File(dir, child), rateLimiter);
-        }
-
-        // The directory is now empty so now it can be smoked
-        deleteWithConfirmWithThrottle(dir, rateLimiter);
-    }
-
-
-    /**
-     * Deletes the specified directory after having deleted its content.
-     *
-     * @param dir Directory to be deleted
-     * @throws FSWriteError if any part of the tree cannot be deleted
-     */
-    public static void deleteRecursive(File dir)
-    {
-        if (USE_NIX_RECURSIVE_DELETE.getBoolean() && dir.toPath().getFileSystem() == FileSystems.getDefault())
-        {
-            deleteRecursiveUsingNixCommand(dir.toPath(), false);
-            return;
-        }
-
-        deleteChildrenRecursive(dir);
-
-        // The directory is now empty, so now it can be smoked
-        deleteWithConfirm(dir);
-    }
-
-    /**
-     * Deletes all files and subdirectories under "dir".
-     *
-     * @param dir Directory to be deleted
-     * @throws FSWriteError if any part of the tree cannot be deleted
-     */
-    public static void deleteChildrenRecursive(File dir)
-    {
-        if (dir.isDirectory())
-        {
-            String[] children = dir.list();
-            if (children.length == 0)
-                return;
-
-            if (USE_NIX_RECURSIVE_DELETE.getBoolean() && dir.toPath().getFileSystem() == FileSystems.getDefault())
-            {
-                for (String child : children)
-                    deleteRecursiveUsingNixCommand(dir.toPath().resolve(child), false);
-            }
-            else
-            {
-                for (String child : children)
-                    deleteRecursive(new File(dir, child));
-            }
-        }
-    }
-
-    /**
-     * Uses unix `rm -r` to delete a directory recursively.
-     * This method can be much faster than deleting files and directories recursively by traversing them with Java.
-     * Though, we use it only for tests because it provides less information about the problem when something goes wrong.
-     *
-     * @param path        path to be deleted
-     * @param quietly     if quietly, additional `-f` flag is added to the `rm` command so that it will not complain in case
-     *                    the provided path is missing
-     */
-    private static void deleteRecursiveUsingNixCommand(Path path, boolean quietly)
-    {
-        String[] cmd = new String[]{ "rm",
-                                     quietly ? "-drf" : "-dr",
-                                     path.toAbsolutePath().toString() };
-
-        try
-        {
-            Process p = Runtime.getRuntime().exec(cmd);
-            int result = p.waitFor();
-
-            String out, err;
-            try (BufferedReader outReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
-                 BufferedReader errReader = new BufferedReader(new InputStreamReader(p.getErrorStream())))
-            {
-                out = outReader.lines().collect(Collectors.joining("\n"));
-                err = errReader.lines().collect(Collectors.joining("\n"));
-            }
-
-            if (result != 0 && Files.exists(path))
-            {
-                logger.error("{} returned:\nstdout:\n{}\n\nstderr:\n{}", Arrays.toString(cmd), out, err);
-                throw new IOException(String.format("%s returned non-zero exit code: %d%nstdout:%n%s%n%nstderr:%n%s", Arrays.toString(cmd), result, out, err));
-            }
-        }
-        catch (IOException e)
-        {
-            throw new FSWriteError(e, path.toString());
-        }
-        catch (InterruptedException e)
-        {
-            Thread.currentThread().interrupt();
-            throw new FSWriteError(e, path.toString());
-        }
-    }
-
-    /**
-     * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown.
-     * @param dir Directory to be deleted
-     */
-    public static void deleteRecursiveOnExit(File dir)
-    {
-        if (dir.isDirectory())
-        {
-            String[] children = dir.list();
-            for (String child : children)
-                deleteRecursiveOnExit(new File(dir, child));
-        }
-
-        logger.trace("Scheduling deferred deletion of file: {}", dir);
-        dir.deleteOnExit();
-    }
-
     public static void handleCorruptSSTable(CorruptSSTableException e)
     {
         fsErrorHandler.get().ifPresent(handler -> handler.handleCorruptSSTable(e));
@@ -785,6 +462,11 @@
         fsErrorHandler.get().ifPresent(handler -> handler.handleFSError(e));
     }
 
+    public static void handleStartupFSError(Throwable t)
+    {
+        fsErrorHandler.get().ifPresent(handler -> handler.handleStartupFSError(t));
+    }
+
     /**
      * handleFSErrorAndPropagate will invoke the disk failure policy error handler,
      * which may or may not stop the daemon or transports. However, if we don't exit,
@@ -806,6 +488,9 @@
      */
     public static long folderSize(File folder)
     {
+        if (!folder.exists())
+            return 0;
+
         final long [] sizeArr = {0L};
         try
         {
@@ -817,6 +502,15 @@
                     sizeArr[0] += attrs.size();
                     return FileVisitResult.CONTINUE;
                 }
+
+                @Override
+                public FileVisitResult visitFileFailed(Path path, IOException e) throws IOException
+                {
+                    if (e instanceof NoSuchFileException)
+                        return FileVisitResult.CONTINUE;
+                    else
+                        throw e;
+                }
             });
         }
         catch (IOException e)
@@ -826,41 +520,6 @@
         return sizeArr[0];
     }
 
-    public static void copyTo(DataInput in, OutputStream out, int length) throws IOException
-    {
-        byte[] buffer = new byte[64 * 1024];
-        int copiedBytes = 0;
-
-        while (copiedBytes + buffer.length < length)
-        {
-            in.readFully(buffer);
-            out.write(buffer);
-            copiedBytes += buffer.length;
-        }
-
-        if (copiedBytes < length)
-        {
-            int left = length - copiedBytes;
-            in.readFully(buffer, 0, left);
-            out.write(buffer, 0, left);
-        }
-    }
-
-    public static boolean isSubDirectory(File parent, File child) throws IOException
-    {
-        parent = parent.getCanonicalFile();
-        child = child.getCanonicalFile();
-
-        File toCheck = child;
-        while (toCheck != null)
-        {
-            if (parent.equals(toCheck))
-                return true;
-            toCheck = toCheck.getParentFile();
-        }
-        return false;
-    }
-
     public static void append(File file, String ... lines)
     {
         if (file.exists())
@@ -894,7 +553,10 @@
      */
     public static void write(File file, List<String> lines, StandardOpenOption ... options)
     {
-        Set<StandardOpenOption> optionsSet = new HashSet<>(Arrays.asList(options));
+        Set<StandardOpenOption> optionsSet = EnumSet.noneOf(StandardOpenOption.class);
+        for (StandardOpenOption option : options)
+            optionsSet.add(option);
+
         //Emulate the old FileSystemProvider.newOutputStream behavior for open options.
         if (optionsSet.isEmpty())
         {
@@ -953,70 +615,109 @@
         fsErrorHandler.getAndSet(Optional.ofNullable(handler));
     }
 
-    /**
-     * Returns the size of the specified partition.
-     * <p>This method handles large file system by returning {@code Long.MAX_VALUE} if the  size overflow.
-     * See <a href='https://bugs.openjdk.java.net/browse/JDK-8179320'>JDK-8179320</a> for more information.</p>
-     *
-     * @param file the partition
-     * @return the size, in bytes, of the partition or {@code 0L} if the abstract pathname does not name a partition
-     */
-    public static long getTotalSpace(File file)
+    @Deprecated
+    public static void createDirectory(String directory)
     {
-        return handleLargeFileSystem(file.getTotalSpace());
+        createDirectory(new File(directory));
+    }
+
+    @Deprecated
+    public static void createDirectory(File directory)
+    {
+        PathUtils.createDirectoriesIfNotExists(directory.toPath());
+    }
+
+    @Deprecated
+    public static boolean delete(String file)
+    {
+        return new File(file).tryDelete();
+    }
+
+    @Deprecated
+    public static void delete(File... files)
+    {
+        for (File file : files)
+            file.tryDelete();
     }
 
     /**
-     * Returns the number of unallocated bytes on the specified partition.
-     * <p>This method handles large file system by returning {@code Long.MAX_VALUE} if the  number of unallocated bytes
-     * overflow. See <a href='https://bugs.openjdk.java.net/browse/JDK-8179320'>JDK-8179320</a> for more information</p>
-     *
-     * @param file the partition
-     * @return the number of unallocated bytes on the partition or {@code 0L}
-     * if the abstract pathname does not name a partition.
+     * Deletes all files and subdirectories under "dir".
+     * @param dir Directory to be deleted
+     * @throws FSWriteError if any part of the tree cannot be deleted
      */
-    public static long getFreeSpace(File file)
+    @Deprecated
+    public static void deleteRecursiveWithThrottle(File dir, RateLimiter rateLimiter)
     {
-        return handleLargeFileSystem(file.getFreeSpace());
+        dir.deleteRecursive(rateLimiter);
     }
 
     /**
-     * Returns the number of available bytes on the specified partition.
-     * <p>This method handles large file system by returning {@code Long.MAX_VALUE} if the  number of available bytes
-     * overflow. See <a href='https://bugs.openjdk.java.net/browse/JDK-8179320'>JDK-8179320</a> for more information</p>
-     *
-     * @param file the partition
-     * @return the number of available bytes on the partition or {@code 0L}
-     * if the abstract pathname does not name a partition.
+     * Deletes all files and subdirectories under "dir".
+     * @param dir Directory to be deleted
+     * @throws FSWriteError if any part of the tree cannot be deleted
      */
-    public static long getUsableSpace(File file)
+    @Deprecated
+    public static void deleteRecursive(File dir)
     {
-        return handleLargeFileSystem(file.getUsableSpace());
+        dir.deleteRecursive();
     }
 
     /**
-     * Returns the {@link FileStore} representing the file store where a file
-     * is located. This {@link FileStore} handles large file system by returning {@code Long.MAX_VALUE}
-     * from {@code FileStore#getTotalSpace()}, {@code FileStore#getUnallocatedSpace()} and {@code FileStore#getUsableSpace()}
-     * it the value is bigger than {@code Long.MAX_VALUE}. See <a href='https://bugs.openjdk.java.net/browse/JDK-8162520'>JDK-8162520</a>
-     * for more information.
-     *
-     * @param path the path to the file
-     * @return the file store where the file is stored
+     * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown.
+     * @param dir Directory to be deleted
      */
-    public static FileStore getFileStore(Path path) throws IOException
+    @Deprecated
+    public static void deleteRecursiveOnExit(File dir)
     {
-        return new SafeFileStore(Files.getFileStore(path));
+        dir.deleteRecursiveOnExit();
     }
 
-    /**
-     * Handle large file system by returning {@code Long.MAX_VALUE} when the size overflows.
-     * @param size returned by the Java's FileStore methods
-     * @return the size or {@code Long.MAX_VALUE} if the size was bigger than {@code Long.MAX_VALUE}
-     */
-    private static long handleLargeFileSystem(long size)
+    @Deprecated
+    public static boolean isSubDirectory(File parent, File child)
     {
-        return size < 0 ? Long.MAX_VALUE : size;
+        return parent.isAncestorOf(child);
+    }
+
+    @Deprecated
+    public static Throwable deleteWithConfirm(File file, Throwable accumulate)
+    {
+        return file.delete(accumulate, null);
+    }
+
+    @Deprecated
+    public static Throwable deleteWithConfirm(File file, Throwable accumulate, RateLimiter rateLimiter)
+    {
+        return file.delete(accumulate, rateLimiter);
+    }
+
+    @Deprecated
+    public static void deleteWithConfirm(String file)
+    {
+        deleteWithConfirm(new File(file));
+    }
+
+    @Deprecated
+    public static void deleteWithConfirm(File file)
+    {
+        file.delete();
+    }
+
+    @Deprecated
+    public static void renameWithOutConfirm(String from, String to)
+    {
+        new File(from).tryMove(new File(to));
+    }
+
+    @Deprecated
+    public static void renameWithConfirm(String from, String to)
+    {
+        renameWithConfirm(new File(from), new File(to));
+    }
+
+    @Deprecated
+    public static void renameWithConfirm(File from, File to)
+    {
+        from.move(to);
     }
 
     /**
@@ -1027,88 +728,6 @@
     }
 
     /**
-     * FileStore decorator used to safely handle large file system.
-     *
-     * <p>Java's FileStore methods (getTotalSpace/getUnallocatedSpace/getUsableSpace) are limited to reporting bytes as
-     * signed long (2^63-1), if the filesystem is any bigger, then the size overflows. {@code SafeFileStore} will
-     * return {@code Long.MAX_VALUE} if the size overflow.</p>
-     *
-     * @see <a href="https://bugs.openjdk.java.net/browse/JDK-8162520">JDK-8162520</a>.
-     */
-    private static final class SafeFileStore extends FileStore
-    {
-        /**
-         * The decorated {@code FileStore}
-         */
-        private final FileStore fileStore;
-
-        public SafeFileStore(FileStore fileStore)
-        {
-            this.fileStore = fileStore;
-        }
-
-        @Override
-        public String name()
-        {
-            return fileStore.name();
-        }
-
-        @Override
-        public String type()
-        {
-            return fileStore.type();
-        }
-
-        @Override
-        public boolean isReadOnly()
-        {
-            return fileStore.isReadOnly();
-        }
-
-        @Override
-        public long getTotalSpace() throws IOException
-        {
-            return handleLargeFileSystem(fileStore.getTotalSpace());
-        }
-
-        @Override
-        public long getUsableSpace() throws IOException
-        {
-            return handleLargeFileSystem(fileStore.getUsableSpace());
-        }
-
-        @Override
-        public long getUnallocatedSpace() throws IOException
-        {
-            return handleLargeFileSystem(fileStore.getUnallocatedSpace());
-        }
-
-        @Override
-        public boolean supportsFileAttributeView(Class<? extends FileAttributeView> type)
-        {
-            return fileStore.supportsFileAttributeView(type);
-        }
-
-        @Override
-        public boolean supportsFileAttributeView(String name)
-        {
-            return fileStore.supportsFileAttributeView(name);
-        }
-
-        @Override
-        public <V extends FileStoreAttributeView> V getFileStoreAttributeView(Class<V> type)
-        {
-            return fileStore.getFileStoreAttributeView(type);
-        }
-
-        @Override
-        public Object getAttribute(String attribute) throws IOException
-        {
-            return fileStore.getAttribute(attribute);
-        }
-    }
-
-    /**
      * Moves the contents of a directory to another directory.
      * <p>Once a file has been copied to the target directory it will be deleted from the source directory.
      * If a file already exists in the target directory a warning will be logged and the file will not
@@ -1125,9 +744,9 @@
         {
             Files.createDirectories(target);
 
-            for (File f : source.toFile().listFiles())
+            for (File f : new File(source).tryList())
             {
-                String fileName = f.getName();
+                String fileName = f.name();
                 moveRecursively(source.resolve(fileName), target.resolve(fileName));
             }
 
diff --git a/src/java/org/apache/cassandra/io/util/FileWriter.java b/src/java/org/apache/cassandra/io/util/FileWriter.java
new file mode 100644
index 0000000..bbfb595
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/FileWriter.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+
+import org.apache.cassandra.io.util.File.WriteMode;
+
+public class FileWriter extends OutputStreamWriter
+{
+    @SuppressWarnings("resource") // FOSP is closed by OSW::close
+    public FileWriter(File file) throws IOException
+    {
+        super(new FileOutputStreamPlus(file));
+    }
+
+    @SuppressWarnings("resource") // FOSP is closed by OSW::close
+    public FileWriter(File file, WriteMode mode) throws IOException
+    {
+        super(new FileOutputStreamPlus(file, mode));
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/util/Memory.java b/src/java/org/apache/cassandra/io/util/Memory.java
index eaa6e91..7fd4225 100644
--- a/src/java/org/apache/cassandra/io/util/Memory.java
+++ b/src/java/org/apache/cassandra/io/util/Memory.java
@@ -32,7 +32,7 @@
 /**
  * An off-heap region of memory that must be manually free'd when no longer needed.
  */
-public class Memory implements AutoCloseable
+public class Memory implements AutoCloseable, ReadableMemory
 {
     private static final Unsafe unsafe;
     static
@@ -68,7 +68,7 @@
         // we permit a 0 peer iff size is zero, since such an allocation makes no sense, and an allocator would be
         // justified in returning a null pointer (and permitted to do so: http://www.cplusplus.com/reference/cstdlib/malloc)
         if (peer == 0)
-            throw new OutOfMemoryError();
+            throw new OutOfMemoryError(); // checkstyle: permit this instantiation
     }
 
     // create a memory object that references the exacy same memory location as the one provided.
diff --git a/src/java/org/apache/cassandra/io/util/PathUtils.java b/src/java/org/apache/cassandra/io/util/PathUtils.java
new file mode 100644
index 0000000..4b3efdb
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/PathUtils.java
@@ -0,0 +1,865 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.util;
+
+import java.io.*;
+import java.nio.channels.FileChannel;
+import java.nio.file.*;
+import java.nio.file.attribute.*;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.function.*;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import javax.annotation.Nullable;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.RateLimiter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.openhft.chronicle.core.util.ThrowingFunction;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.io.FSError;
+import org.apache.cassandra.io.FSReadError;
+import org.apache.cassandra.io.FSWriteError;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+import static java.nio.file.StandardOpenOption.*;
+import static java.util.Collections.unmodifiableSet;
+import static org.apache.cassandra.config.CassandraRelevantProperties.USE_NIX_RECURSIVE_DELETE;
+import static org.apache.cassandra.utils.Throwables.merge;
+
+/**
+ * Vernacular: tryX means return false or 0L on any failure; XIfNotY means propagate any exceptions besides those caused by Y
+ *
+ * This class tries to apply uniform IOException handling, and does not propagate IOException except for NoSuchFileException.
+ * Any harmless/application error exceptions are propagated as UncheckedIOException, and anything else as an FSReadError or FSWriteError.
+ * Semantically this is a little incoherent throughout the codebase, as we intercept IOException haphazardly and treaat
+ * it inconsistently - we should ideally migrate to using {@link #propagate(IOException, Path, boolean)} et al globally.
+ */
+public final class PathUtils
+{
+    private static final boolean consistentDirectoryListings = CassandraRelevantProperties.DETERMINISM_CONSISTENT_DIRECTORY_LISTINGS.getBoolean();
+
+    private static final Set<StandardOpenOption> READ_OPTIONS = unmodifiableSet(EnumSet.of(READ));
+    private static final Set<StandardOpenOption> WRITE_OPTIONS = unmodifiableSet(EnumSet.of(WRITE, CREATE, TRUNCATE_EXISTING));
+    private static final Set<StandardOpenOption> WRITE_APPEND_OPTIONS = unmodifiableSet(EnumSet.of(WRITE, CREATE, APPEND));
+    private static final Set<StandardOpenOption> READ_WRITE_OPTIONS = unmodifiableSet(EnumSet.of(READ, WRITE, CREATE));
+    private static final FileAttribute<?>[] NO_ATTRIBUTES = new FileAttribute[0];
+
+    private static final Logger logger = LoggerFactory.getLogger(PathUtils.class);
+    private static final NoSpamLogger nospam1m = NoSpamLogger.getLogger(logger, 1, TimeUnit.MINUTES);
+
+    private static Consumer<Path> onDeletion = path -> {
+        if (StorageService.instance.isDaemonSetupCompleted())
+            setDeletionListener(ignore -> {});
+        else
+            logger.trace("Deleting file during startup: {}", path);
+    };
+
+    public static FileChannel newReadChannel(Path path) throws NoSuchFileException
+    {
+        return newFileChannel(path, READ_OPTIONS);
+    }
+
+    public static FileChannel newReadWriteChannel(Path path) throws NoSuchFileException
+    {
+        return newFileChannel(path, READ_WRITE_OPTIONS);
+    }
+
+    public static FileChannel newWriteOverwriteChannel(Path path) throws NoSuchFileException
+    {
+        return newFileChannel(path, WRITE_OPTIONS);
+    }
+
+    public static FileChannel newWriteAppendChannel(Path path) throws NoSuchFileException
+    {
+        return newFileChannel(path, WRITE_APPEND_OPTIONS);
+    }
+
+    private static FileChannel newFileChannel(Path path, Set<StandardOpenOption> options) throws NoSuchFileException
+    {
+        try
+        {
+            return FileChannel.open(path, options, PathUtils.NO_ATTRIBUTES);
+        }
+        catch (IOException e)
+        {
+            throw propagateUncheckedOrNoSuchFileException(e, path, options.contains(WRITE));
+        }
+    }
+
+    public static void setDeletionListener(Consumer<Path> newOnDeletion)
+    {
+        onDeletion = newOnDeletion;
+    }
+
+    public static String filename(Path path)
+    {
+        return path.getFileName().toString();
+    }
+
+    public static <T> T[] list(Path path, Function<Stream<Path>, Stream<T>> transform, IntFunction<T[]> arrayFactory)
+    {
+        try (Stream<Path> stream = Files.list(path))
+        {
+            return transform.apply(consistentDirectoryListings ? stream.sorted() : stream)
+                    .toArray(arrayFactory);
+        }
+        catch (NoSuchFileException e)
+        {
+            return null;
+        }
+        catch (IOException e)
+        {
+            throw propagateUnchecked(e, path, false);
+        }
+    }
+
+    public static <T extends Throwable, V> V[] tryList(Path path, Function<Stream<Path>, Stream<V>> transform, IntFunction<V[]> arrayFactory, ThrowingFunction<IOException, V[], T> orElse) throws T
+    {
+        try (Stream<Path> stream = Files.list(path))
+        {
+            return transform.apply(consistentDirectoryListings ? stream.sorted() : stream)
+                    .toArray(arrayFactory);
+        }
+        catch (IOException e)
+        {
+            return orElse.apply(e);
+        }
+    }
+
+    public static void forEach(Path path, Consumer<Path> forEach)
+    {
+        try (Stream<Path> stream = Files.list(path))
+        {
+            (consistentDirectoryListings ? stream.sorted() : stream).forEach(forEach);
+        }
+        catch (IOException e)
+        {
+            throw propagateUnchecked(e, path, false);
+        }
+    }
+
+    public static void forEachRecursive(Path path, Consumer<Path> forEach)
+    {
+        Consumer<Path> forEachRecursive = new Consumer<Path>()
+        {
+            @Override
+            public void accept(Path child)
+            {
+                forEach.accept(child);
+                forEach(child, this);
+            }
+        };
+        forEach(path, forEachRecursive);
+    }
+
+    public static long tryGetLength(Path path)
+    {
+        return tryOnPath(path, Files::size);
+    }
+
+    public static long tryGetLastModified(Path path)
+    {
+        return tryOnPath(path, p -> Files.getLastModifiedTime(p).toMillis());
+    }
+
+    public static boolean trySetLastModified(Path path, long lastModified)
+    {
+        try
+        {
+            Files.setLastModifiedTime(path, FileTime.fromMillis(lastModified));
+            return true;
+        }
+        catch (IOException e)
+        {
+            return false;
+        }
+    }
+
+    public static boolean trySetReadable(Path path, boolean readable)
+    {
+        return trySet(path, PosixFilePermission.OWNER_READ, readable);
+    }
+
+    public static boolean trySetWritable(Path path, boolean writeable)
+    {
+        return trySet(path, PosixFilePermission.OWNER_WRITE, writeable);
+    }
+
+    public static boolean trySetExecutable(Path path, boolean executable)
+    {
+        return trySet(path, PosixFilePermission.OWNER_EXECUTE, executable);
+    }
+
+    public static boolean trySet(Path path, PosixFilePermission permission, boolean set)
+    {
+        try
+        {
+            PosixFileAttributeView view = path.getFileSystem().provider().getFileAttributeView(path, PosixFileAttributeView.class);
+            PosixFileAttributes attributes = view.readAttributes();
+            Set<PosixFilePermission> permissions = attributes.permissions();
+            if (set == permissions.contains(permission))
+                return true;
+            if (set) permissions.add(permission);
+            else permissions.remove(permission);
+            view.setPermissions(permissions);
+            return true;
+        }
+        catch (IOException e)
+        {
+            return false;
+        }
+    }
+
+    public static Throwable delete(Path file, Throwable accumulate)
+    {
+        try
+        {
+            delete(file);
+        }
+        catch (FSError t)
+        {
+            accumulate = merge(accumulate, t);
+        }
+        return accumulate;
+    }
+
+    public static void delete(Path file)
+    {
+        try
+        {
+            Files.delete(file);
+            onDeletion.accept(file);
+        }
+        catch (IOException e)
+        {
+            throw propagateUnchecked(e, file, true);
+        }
+    }
+
+    public static void deleteIfExists(Path file)
+    {
+        try
+        {
+            Files.delete(file);
+            onDeletion.accept(file);
+        }
+        catch (IOException e)
+        {
+            if (e instanceof FileNotFoundException | e instanceof NoSuchFileException)
+                return;
+
+            throw propagateUnchecked(e, file, true);
+        }
+    }
+
+    public static boolean tryDelete(Path file)
+    {
+        try
+        {
+            Files.delete(file);
+            onDeletion.accept(file);
+            return true;
+        }
+        catch (IOException e)
+        {
+            return false;
+        }
+    }
+
+    public static void delete(Path file, @Nullable RateLimiter rateLimiter)
+    {
+        if (rateLimiter != null)
+        {
+            double throttled = rateLimiter.acquire();
+            if (throttled > 0.0)
+                nospam1m.warn("Throttling file deletion: waited {} seconds to delete {}", throttled, file);
+        }
+        delete(file);
+    }
+
+    public static Throwable delete(Path file, Throwable accumulate, @Nullable RateLimiter rateLimiter)
+    {
+        try
+        {
+            delete(file, rateLimiter);
+        }
+        catch (Throwable t)
+        {
+            accumulate = merge(accumulate, t);
+        }
+        return accumulate;
+    }
+
+    /**
+     * Uses unix `rm -r` to delete a directory recursively.
+     * Note that, it will trigger {@link #onDeletion} listener only for the provided path and will not call it for any
+     * nested path. This method can be much faster than deleting files and directories recursively by traversing them
+     * with Java. Though, we use it only for tests because it provides less information about the problem when something
+     * goes wrong.
+     *
+     * @param path    path to be deleted
+     * @param quietly if quietly, additional `-f` flag is added to the `rm` command so that it will not complain in case
+     *                the provided path is missing
+     */
+    private static void deleteRecursiveUsingNixCommand(Path path, boolean quietly)
+    {
+        String [] cmd = new String[]{ "rm", quietly ? "-rdf" : "-rd", path.toAbsolutePath().toString() };
+        try
+        {
+            if (!quietly && !Files.exists(path))
+                throw new NoSuchFileException(path.toString());
+
+            Process p = Runtime.getRuntime().exec(cmd);
+            int result = p.waitFor();
+
+            String out, err;
+            try (BufferedReader outReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
+                 BufferedReader errReader = new BufferedReader(new InputStreamReader(p.getErrorStream())))
+            {
+                out = outReader.lines().collect(Collectors.joining("\n"));
+                err = errReader.lines().collect(Collectors.joining("\n"));
+            }
+
+            if (result != 0 && Files.exists(path))
+            {
+                logger.error("{} returned:\nstdout:\n{}\n\nstderr:\n{}", Arrays.toString(cmd), out, err);
+                throw new IOException(String.format("%s returned non-zero exit code: %d%nstdout:%n%s%n%nstderr:%n%s", Arrays.toString(cmd), result, out, err));
+            }
+
+            onDeletion.accept(path);
+        }
+        catch (IOException e)
+        {
+            throw propagateUnchecked(e, path, true);
+        }
+        catch (InterruptedException e)
+        {
+            Thread.currentThread().interrupt();
+            throw new FSWriteError(e, path);
+        }
+    }
+
+    /**
+     * Deletes all files and subdirectories under "path".
+     * @param path file to be deleted
+     * @throws FSWriteError if any part of the tree cannot be deleted
+     */
+    public static void deleteRecursive(Path path)
+    {
+        if (USE_NIX_RECURSIVE_DELETE.getBoolean() && path.getFileSystem() == FileSystems.getDefault())
+        {
+            deleteRecursiveUsingNixCommand(path, false);
+            return;
+        }
+
+        if (isDirectory(path))
+            forEach(path, PathUtils::deleteRecursive);
+
+        // The directory is now empty, so now it can be smoked
+        delete(path);
+    }
+
+    /**
+     * Deletes all files and subdirectories under "path".
+     * @param path file to be deleted
+     * @throws FSWriteError if any part of the tree cannot be deleted
+     */
+    public static void deleteRecursive(Path path, RateLimiter rateLimiter)
+    {
+        if (USE_NIX_RECURSIVE_DELETE.getBoolean() && path.getFileSystem() == FileSystems.getDefault())
+        {
+            deleteRecursiveUsingNixCommand(path, false);
+            return;
+        }
+
+        deleteRecursive(path, rateLimiter, p -> deleteRecursive(p, rateLimiter));
+    }
+
+    /**
+     * Deletes all files and subdirectories under "path".
+     * @param path file to be deleted
+     * @throws FSWriteError if any part of the tree cannot be deleted
+     */
+    private static void deleteRecursive(Path path, RateLimiter rateLimiter, Consumer<Path> deleteRecursive)
+    {
+        if (isDirectory(path))
+            forEach(path, deleteRecursive);
+
+        // The directory is now empty so now it can be smoked
+        delete(path, rateLimiter);
+    }
+
+    /**
+     * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown.
+     * @param dir Directory to be deleted
+     */
+    public synchronized static void deleteRecursiveOnExit(Path dir)
+    {
+        ON_EXIT.add(dir, true);
+    }
+
+    /**
+     * Schedules deletion of the file only on JVM shutdown.
+     * @param file File to be deleted
+     */
+    public synchronized static void deleteOnExit(Path file)
+    {
+        ON_EXIT.add(file, false);
+    }
+
+    public static boolean tryRename(Path from, Path to)
+    {
+        logger.trace("Renaming {} to {}", from, to);
+        try
+        {
+            atomicMoveWithFallback(from, to);
+            return true;
+        }
+        catch (IOException e)
+        {
+            logger.trace("Could not move file {} to {}", from, to, e);
+            return false;
+        }
+    }
+
+    public static void rename(Path from, Path to)
+    {
+        logger.trace("Renaming {} to {}", from, to);
+        try
+        {
+            atomicMoveWithFallback(from, to);
+        }
+        catch (IOException e)
+        {
+            logger.trace("Could not move file {} to {}", from, to, e);
+
+            // TODO: try to decide if is read or write? for now, have assumed write
+            throw propagateUnchecked(String.format("Failed to rename %s to %s", from, to), e, to, true);
+        }
+    }
+
+    /**
+     * Move a file atomically, if it fails, it falls back to a non-atomic operation
+     */
+    private static void atomicMoveWithFallback(Path from, Path to) throws IOException
+    {
+        try
+        {
+            Files.move(from, to, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
+        }
+        catch (AtomicMoveNotSupportedException e)
+        {
+            logger.trace("Could not do an atomic move", e);
+            Files.move(from, to, StandardCopyOption.REPLACE_EXISTING);
+        }
+    }
+
+    // true if can determine exists, false if any exception occurs
+    public static boolean exists(Path path)
+    {
+        return Files.exists(path);
+    }
+
+    // true if can determine is a directory, false if any exception occurs
+    public static boolean isDirectory(Path path)
+    {
+        return Files.isDirectory(path);
+    }
+
+    // true if can determine is a regular file, false if any exception occurs
+    public static boolean isFile(Path path)
+    {
+        return Files.isRegularFile(path);
+    }
+
+    /**
+     * @param path create file if not exists
+     * @throws IOError if cannot perform the operation
+     * @return true if a new file was created
+     */
+    public static boolean createFileIfNotExists(Path path)
+    {
+        return ifNotExists(path, Files::createFile);
+    }
+
+    /**
+     * @param path create directory if not exists
+     * @throws IOError if cannot perform the operation
+     * @return true if a new directory was created
+     */
+    public static boolean createDirectoryIfNotExists(Path path)
+    {
+        return ifNotExists(path, Files::createDirectory);
+    }
+
+    /**
+     * @param path create directory (and parents) if not exists
+     * @throws IOError if cannot perform the operation
+     * @return true if a new directory was created
+     */
+    public static boolean createDirectoriesIfNotExists(Path path)
+    {
+        return ifNotExists(path, Files::createDirectories);
+    }
+
+    /**
+     * @param path create directory if not exists and action can be performed
+     * @return true if a new directory was created, false otherwise (for any reason)
+     */
+    public static boolean tryCreateDirectory(Path path)
+    {
+        return tryConsume(path, Files::createDirectory);
+    }
+
+    /**
+     * @param path create directory (and parents) if not exists and action can be performed
+     * @return true if the new directory was created, false otherwise (for any reason)
+     */
+    public static boolean tryCreateDirectories(Path path)
+    {
+        if (exists(path))
+            return false;
+
+        tryCreateDirectories(path.toAbsolutePath().getParent());
+        return tryCreateDirectory(path);
+    }
+
+    /**
+     * @return file if exists, otherwise nearest parent that exists; null if nothing in path exists
+     */
+    public static Path findExistingAncestor(Path file)
+    {
+        if (!file.equals(file.normalize()))
+            throw new IllegalArgumentException("Must be invoked on a path without redundant elements");
+
+        Path parent = file;
+        while (parent != null && !Files.exists(parent))
+            parent = parent.getParent();
+        return parent;
+    }
+
+    /**
+     * 1) Convert to an absolute path without redundant path elements;
+     * 2) If the file exists, resolve any links to the underlying fille;
+     * 3) If the file does not exist, find the first ancestor that does and resolve the path from there
+     */
+    public static Path toCanonicalPath(Path file)
+    {
+        Preconditions.checkNotNull(file);
+
+        file = file.toAbsolutePath().normalize();
+        Path parent = findExistingAncestor(file);
+
+        if (parent == null)
+            return file;
+        if (parent == file)
+            return toRealPath(file);
+        return toRealPath(parent).resolve(parent.relativize(file));
+    }
+
+    private static Path toRealPath(Path path)
+    {
+        try
+        {
+            return path.toRealPath();
+        }
+        catch (IOException e)
+        {
+            throw propagateUnchecked(e, path, false);
+        }
+    }
+
+    /**
+     * Return true if file's canonical path is contained in folder's canonical path.
+     *
+     * Propagates any exceptions encountered finding canonical paths.
+     */
+    public static boolean isContained(Path folder, Path file)
+    {
+        Path realFolder = toCanonicalPath(folder), realFile = toCanonicalPath(file);
+        return realFile.startsWith(realFolder);
+    }
+
+    @VisibleForTesting
+    static public void runOnExitThreadsAndClear()
+    {
+        DeleteOnExit.runOnExitThreadsAndClear();
+    }
+
+    static public void clearOnExitThreads()
+    {
+        DeleteOnExit.clearOnExitThreads();
+    }
+
+
+    private static final class DeleteOnExit implements Runnable
+    {
+        private boolean isRegistered;
+        private final Set<Path> deleteRecursivelyOnExit = new HashSet<>();
+        private final Set<Path> deleteOnExit = new HashSet<>();
+
+        private static List<Thread> onExitThreads = new ArrayList<>();
+
+        private static void runOnExitThreadsAndClear()
+        {
+            List<Thread> toRun;
+            synchronized (onExitThreads)
+            {
+                toRun = new ArrayList<>(onExitThreads);
+                onExitThreads.clear();
+            }
+            Runtime runtime = Runtime.getRuntime();
+            toRun.forEach(onExitThread -> {
+                try
+                {
+                    runtime.removeShutdownHook(onExitThread);
+                    //noinspection CallToThreadRun
+                    onExitThread.run();
+                }
+                catch (Exception ex)
+                {
+                    logger.warn("Exception thrown when cleaning up files to delete on exit, continuing.", ex);
+                }
+            });
+        }
+
+        private static void clearOnExitThreads()
+        {
+            synchronized (onExitThreads)
+            {
+                Runtime runtime = Runtime.getRuntime();
+                onExitThreads.forEach(runtime::removeShutdownHook);
+                onExitThreads.clear();
+            }
+        }
+
+        DeleteOnExit()
+        {
+            final Thread onExitThread = new Thread(this); // checkstyle: permit this instantiation
+            synchronized (onExitThreads)
+            {
+                onExitThreads.add(onExitThread);
+            }
+            Runtime.getRuntime().addShutdownHook(onExitThread);
+        }
+
+        synchronized void add(Path path, boolean recursive)
+        {
+            if (!isRegistered)
+            {
+                isRegistered = true;
+            }
+            logger.trace("Scheduling deferred {}deletion of file: {}", recursive ? "recursive " : "", path);
+            (recursive ? deleteRecursivelyOnExit : deleteOnExit).add(path);
+        }
+
+        public void run()
+        {
+            for (Path path : deleteOnExit)
+            {
+                try
+                {
+                    if (exists(path))
+                        delete(path);
+                }
+                catch (Throwable t)
+                {
+                    logger.warn("Failed to delete {} on exit", path, t);
+                }
+            }
+            for (Path path : deleteRecursivelyOnExit)
+            {
+                try
+                {
+                    if (exists(path))
+                        deleteRecursive(path);
+                }
+                catch (Throwable t)
+                {
+                    logger.warn("Failed to delete {} on exit", path, t);
+                }
+            }
+        }
+    }
+    private static final DeleteOnExit ON_EXIT = new DeleteOnExit();
+
+    public interface IOConsumer { void accept(Path path) throws IOException; }
+    public interface IOToLongFunction<V> { long apply(V path) throws IOException; }
+
+    private static boolean ifNotExists(Path path, IOConsumer consumer)
+    {
+        try
+        {
+            consumer.accept(path);
+            return true;
+        }
+        catch (FileAlreadyExistsException fae)
+        {
+            return false;
+        }
+        catch (IOException e)
+        {
+            throw propagateUnchecked(e, path, true);
+        }
+    }
+
+    private static boolean tryConsume(Path path, IOConsumer function)
+    {
+        try
+        {
+            function.accept(path);
+            return true;
+        }
+        catch (IOException e)
+        {
+            return false;
+        }
+    }
+
+    private static long tryOnPath(Path path, IOToLongFunction<Path> function)
+    {
+        try
+        {
+            return function.apply(path);
+        }
+        catch (IOException e)
+        {
+            return 0L;
+        }
+    }
+
+    private static long tryOnFileStore(Path path, IOToLongFunction<FileStore> function)
+    {
+        return tryOnFileStore(path, function, ignore -> {});
+    }
+
+    private static long tryOnFileStore(Path path, IOToLongFunction<FileStore> function, Consumer<IOException> orElse)
+    {
+        try
+        {
+            Path ancestor = findExistingAncestor(path.toAbsolutePath().normalize());
+            if (ancestor == null)
+            {
+                orElse.accept(new NoSuchFileException(path.toString()));
+                return 0L;
+            }
+            return function.apply(Files.getFileStore(ancestor));
+        }
+        catch (IOException e)
+        {
+            orElse.accept(e);
+            return 0L;
+        }
+    }
+
+    /**
+     * Returns the number of bytes (determined by the provided MethodHandle) on the specified partition.
+     * <p>This method handles large file system by returning {@code Long.MAX_VALUE} if the  number of available bytes
+     * overflow. See <a href='https://bugs.openjdk.java.net/browse/JDK-8179320'>JDK-8179320</a> for more information</p>
+     *
+     * @param path the partition (or a file within it)
+     */
+    public static long tryGetSpace(Path path, IOToLongFunction<FileStore> getSpace)
+    {
+        return handleLargeFileSystem(tryOnFileStore(path, getSpace));
+    }
+
+    public static long tryGetSpace(Path path, IOToLongFunction<FileStore> getSpace, Consumer<IOException> orElse)
+    {
+        return handleLargeFileSystem(tryOnFileStore(path, getSpace, orElse));
+    }
+
+    /**
+     * Handle large file system by returning {@code Long.MAX_VALUE} when the size overflows.
+     * @param size returned by the Java's FileStore methods
+     * @return the size or {@code Long.MAX_VALUE} if the size was bigger than {@code Long.MAX_VALUE}
+     */
+    private static long handleLargeFileSystem(long size)
+    {
+        return size < 0 ? Long.MAX_VALUE : size;
+    }
+
+    /**
+     * Private constructor as the class contains only static methods.
+     */
+    private PathUtils()
+    {
+    }
+
+    /**
+     * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException
+     */
+    public static RuntimeException propagateUnchecked(IOException ioe, Path path, boolean write)
+    {
+        return propagateUnchecked(null, ioe, path, write);
+    }
+
+    /**
+     * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException
+     */
+    public static RuntimeException propagateUnchecked(String message, IOException ioe, Path path, boolean write)
+    {
+        if (ioe instanceof FileAlreadyExistsException
+            || ioe instanceof NoSuchFileException
+            || ioe instanceof AtomicMoveNotSupportedException
+            || ioe instanceof java.nio.file.DirectoryNotEmptyException
+            || ioe instanceof java.nio.file.FileSystemLoopException
+            || ioe instanceof java.nio.file.NotDirectoryException
+            || ioe instanceof java.nio.file.NotLinkException)
+            throw new UncheckedIOException(message, ioe);
+
+        if (write) throw new FSWriteError(message, ioe, path);
+        else throw new FSReadError(message, ioe, path);
+    }
+
+    /**
+     * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException - except for NoSuchFileException
+     */
+    public static NoSuchFileException propagateUncheckedOrNoSuchFileException(IOException ioe, Path path, boolean write) throws NoSuchFileException
+    {
+        if (ioe instanceof NoSuchFileException)
+            throw (NoSuchFileException) ioe;
+
+        throw propagateUnchecked(ioe, path, write);
+    }
+
+    /**
+     * propagate an IOException either as itself or an FSWriteError or FSReadError
+     */
+    public static <E extends IOException> E propagate(E ioe, Path path, boolean write) throws E
+    {
+        if (ioe instanceof FileAlreadyExistsException
+            || ioe instanceof NoSuchFileException
+            || ioe instanceof AtomicMoveNotSupportedException
+            || ioe instanceof java.nio.file.DirectoryNotEmptyException
+            || ioe instanceof java.nio.file.FileSystemLoopException
+            || ioe instanceof java.nio.file.NotDirectoryException
+            || ioe instanceof java.nio.file.NotLinkException)
+            throw ioe;
+
+        if (write) throw new FSWriteError(ioe, path);
+        else throw new FSReadError(ioe, path);
+    }
+}
diff --git a/src/java/org/apache/cassandra/io/util/RandomAccessReader.java b/src/java/org/apache/cassandra/io/util/RandomAccessReader.java
index 33d0127..1a18813 100644
--- a/src/java/org/apache/cassandra/io/util/RandomAccessReader.java
+++ b/src/java/org/apache/cassandra/io/util/RandomAccessReader.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteOrder;
 
diff --git a/src/java/org/apache/cassandra/io/util/ReadableMemory.java b/src/java/org/apache/cassandra/io/util/ReadableMemory.java
new file mode 100644
index 0000000..ccb717d
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/ReadableMemory.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface ReadableMemory
+{
+    ByteBuffer[] asByteBuffers(long offset, long length);
+}
diff --git a/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java b/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java
index 3ddb143..5d38e80 100644
--- a/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java
+++ b/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java
@@ -33,4 +33,4 @@
      * Needed for tests. Returns the table's CRC check chance, which is only set for compressed tables.
      */
     double getCrcCheckChance();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/Rebufferer.java b/src/java/org/apache/cassandra/io/util/Rebufferer.java
index 2fc7ffa..9920de9 100644
--- a/src/java/org/apache/cassandra/io/util/Rebufferer.java
+++ b/src/java/org/apache/cassandra/io/util/Rebufferer.java
@@ -81,4 +81,4 @@
             // nothing to do
         }
     };
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/RewindableDataInput.java b/src/java/org/apache/cassandra/io/util/RewindableDataInput.java
index c202f60..0a0eee4 100644
--- a/src/java/org/apache/cassandra/io/util/RewindableDataInput.java
+++ b/src/java/org/apache/cassandra/io/util/RewindableDataInput.java
@@ -27,4 +27,4 @@
     void reset(DataPosition mark) throws IOException;
 
     long bytesPastMark(DataPosition mark);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java b/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java
deleted file mode 100644
index a1842bc..0000000
--- a/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.io.util;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.RandomAccessFile;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.cassandra.utils.Throwables.maybeFail;
-import static org.apache.cassandra.utils.Throwables.merge;
-
-/**
- * Adds mark/reset functionality to another input stream by caching read bytes to a memory buffer and
- * spilling to disk if necessary.
- *
- * When the stream is marked via {@link #mark()} or {@link #mark(int)}, up to
- * <code>maxMemBufferSize</code> will be cached in memory (heap). If more than
- * <code>maxMemBufferSize</code> bytes are read while the stream is marked, the
- * following bytes are cached on the <code>spillFile</code> for up to <code>maxDiskBufferSize</code>.
- *
- * Please note that successive calls to {@link #mark()} and {@link #reset()} will write
- * sequentially to the same <code>spillFile</code> until <code>maxDiskBufferSize</code> is reached.
- * At this point, if less than <code>maxDiskBufferSize</code> bytes are currently cached on the
- * <code>spillFile</code>, the remaining bytes are written to the beginning of the file,
- * treating the <code>spillFile</code> as a circular buffer.
- *
- * If more than <code>maxMemBufferSize + maxDiskBufferSize</code> are cached while the stream is marked,
- * the following {@link #reset()} invocation will throw a {@link IllegalStateException}.
- *
- */
-public class RewindableDataInputStreamPlus extends FilterInputStream implements RewindableDataInput, Closeable
-{
-    private boolean marked = false;
-    private boolean exhausted = false;
-    private AtomicBoolean closed = new AtomicBoolean(false);
-
-    protected int memAvailable = 0;
-    protected int diskTailAvailable = 0;
-    protected int diskHeadAvailable = 0;
-
-    private final File spillFile;
-    private final int initialMemBufferSize;
-    private final int maxMemBufferSize;
-    private final int maxDiskBufferSize;
-
-    private volatile byte memBuffer[];
-    private int memBufferSize;
-    private RandomAccessFile spillBuffer;
-
-    private final DataInputPlus dataReader;
-
-    public RewindableDataInputStreamPlus(InputStream in, int initialMemBufferSize, int maxMemBufferSize,
-                                         File spillFile, int maxDiskBufferSize)
-    {
-        super(in);
-        dataReader = new DataInputStreamPlus(this);
-        this.initialMemBufferSize = initialMemBufferSize;
-        this.maxMemBufferSize = maxMemBufferSize;
-        this.spillFile = spillFile;
-        this.maxDiskBufferSize = maxDiskBufferSize;
-    }
-
-    /* RewindableDataInput methods */
-
-    /**
-     * Marks the current position of a stream to return to this position later via the {@link #reset(DataPosition)} method.
-     * @return An empty @link{DataPosition} object
-     */
-    public DataPosition mark()
-    {
-        mark(0);
-        return new RewindableDataInputPlusMark();
-    }
-
-    /**
-     * Rewinds to the previously marked position via the {@link #mark()} method.
-     * @param mark it's not possible to return to a custom position, so this parameter is ignored.
-     * @throws IOException if an error ocurs while resetting
-     */
-    public void reset(DataPosition mark) throws IOException
-    {
-        reset();
-    }
-
-    public long bytesPastMark(DataPosition mark)
-    {
-        return maxMemBufferSize - memAvailable + (diskTailAvailable == -1? 0 : maxDiskBufferSize - diskHeadAvailable - diskTailAvailable);
-    }
-
-
-    protected static class RewindableDataInputPlusMark implements DataPosition
-    {
-    }
-
-    /* InputStream methods */
-
-    public boolean markSupported()
-    {
-        return true;
-    }
-
-    /**
-     * Marks the current position of a stream to return to this position
-     * later via the {@link #reset()} method.
-     * @param readlimit the maximum amount of bytes to cache
-     */
-    public synchronized void mark(int readlimit)
-    {
-        if (marked)
-            throw new IllegalStateException("Cannot mark already marked stream.");
-
-        if (memAvailable > 0 || diskHeadAvailable > 0 || diskTailAvailable > 0)
-            throw new IllegalStateException("Can only mark stream after reading previously marked data.");
-
-        marked = true;
-        memAvailable = maxMemBufferSize;
-        diskHeadAvailable = -1;
-        diskTailAvailable = -1;
-    }
-
-    public synchronized void reset() throws IOException
-    {
-        if (!marked)
-            throw new IOException("Must call mark() before calling reset().");
-
-        if (exhausted)
-            throw new IOException(String.format("Read more than capacity: %d bytes.", maxMemBufferSize + maxDiskBufferSize));
-
-        memAvailable = maxMemBufferSize - memAvailable;
-        memBufferSize = memAvailable;
-
-        if (diskTailAvailable == -1)
-        {
-            diskHeadAvailable = 0;
-            diskTailAvailable = 0;
-        }
-        else
-        {
-            int initialPos = diskTailAvailable > 0 ? 0 : (int)getIfNotClosed(spillBuffer).getFilePointer();
-            int diskMarkpos = initialPos + diskHeadAvailable;
-            getIfNotClosed(spillBuffer).seek(diskMarkpos);
-
-            diskHeadAvailable = diskMarkpos - diskHeadAvailable;
-            diskTailAvailable = (maxDiskBufferSize - diskTailAvailable) - diskMarkpos;
-        }
-
-        marked = false;
-    }
-
-    public int available() throws IOException
-    {
-
-        return super.available() + (marked? 0 : memAvailable + diskHeadAvailable + diskTailAvailable);
-    }
-
-    public int read() throws IOException
-    {
-        int read = readOne();
-        if (read == -1)
-            return read;
-
-        if (marked)
-        {
-            //mark exhausted
-            if (isExhausted(1))
-            {
-                exhausted = true;
-                return read;
-            }
-
-            writeOne(read);
-        }
-
-        return read;
-    }
-
-    public int read(byte[] b, int off, int len) throws IOException
-    {
-        int readBytes = readMulti(b, off, len);
-        if (readBytes == -1)
-            return readBytes;
-
-        if (marked)
-        {
-            //check we have space on buffer
-            if (isExhausted(readBytes))
-            {
-                exhausted = true;
-                return readBytes;
-            }
-
-            writeMulti(b, off, readBytes);
-        }
-
-        return readBytes;
-    }
-
-    private void maybeCreateDiskBuffer() throws IOException
-    {
-        if (spillBuffer == null)
-        {
-            if (!spillFile.getParentFile().exists())
-                spillFile.getParentFile().mkdirs();
-            spillFile.createNewFile();
-
-            this.spillBuffer = new RandomAccessFile(spillFile, "rw");
-        }
-    }
-
-
-    private int readOne() throws IOException
-    {
-        if (!marked)
-        {
-            if (memAvailable > 0)
-            {
-                int pos = memBufferSize - memAvailable;
-                memAvailable--;
-                return getIfNotClosed(memBuffer)[pos] & 0xff;
-            }
-
-            if (diskTailAvailable > 0 || diskHeadAvailable > 0)
-            {
-                int read = getIfNotClosed(spillBuffer).read();
-                if (diskTailAvailable > 0)
-                    diskTailAvailable--;
-                else if (diskHeadAvailable > 0)
-                    diskHeadAvailable++;
-                if (diskTailAvailable == 0)
-                    spillBuffer.seek(0);
-                return read;
-            }
-        }
-
-        return getIfNotClosed(in).read();
-    }
-
-    private boolean isExhausted(int readBytes)
-    {
-        return exhausted || readBytes > memAvailable + (long)(diskTailAvailable == -1? maxDiskBufferSize : diskTailAvailable + diskHeadAvailable);
-    }
-
-    private int readMulti(byte[] b, int off, int len) throws IOException
-    {
-        int readBytes = 0;
-        if (!marked)
-        {
-            if (memAvailable > 0)
-            {
-                readBytes += memAvailable < len ? memAvailable : len;
-                int pos = memBufferSize - memAvailable;
-                System.arraycopy(memBuffer, pos, b, off, readBytes);
-                memAvailable -= readBytes;
-                off += readBytes;
-                len -= readBytes;
-            }
-            if (len > 0 && diskTailAvailable > 0)
-            {
-                int readFromTail = diskTailAvailable < len? diskTailAvailable : len;
-                readFromTail = getIfNotClosed(spillBuffer).read(b, off, readFromTail);
-                readBytes += readFromTail;
-                diskTailAvailable -= readFromTail;
-                off += readFromTail;
-                len -= readFromTail;
-                if (diskTailAvailable == 0)
-                    spillBuffer.seek(0);
-            }
-            if (len > 0 && diskHeadAvailable > 0)
-            {
-                int readFromHead = diskHeadAvailable < len? diskHeadAvailable : len;
-                readFromHead = getIfNotClosed(spillBuffer).read(b, off, readFromHead);
-                readBytes += readFromHead;
-                diskHeadAvailable -= readFromHead;
-                off += readFromHead;
-                len -= readFromHead;
-            }
-        }
-
-        if (len > 0)
-            readBytes += getIfNotClosed(in).read(b, off, len);
-
-        return readBytes;
-    }
-
-    private void writeMulti(byte[] b, int off, int len) throws IOException
-    {
-        if (memAvailable > 0)
-        {
-            if (memBuffer == null)
-                memBuffer = new byte[initialMemBufferSize];
-            int pos = maxMemBufferSize - memAvailable;
-            int memWritten = memAvailable < len? memAvailable : len;
-            if (pos + memWritten >= getIfNotClosed(memBuffer).length)
-                growMemBuffer(pos, memWritten);
-            System.arraycopy(b, off, memBuffer, pos, memWritten);
-            off += memWritten;
-            len -= memWritten;
-            memAvailable -= memWritten;
-        }
-
-        if (len > 0)
-        {
-            if (diskTailAvailable == -1)
-            {
-                maybeCreateDiskBuffer();
-                diskHeadAvailable = (int)spillBuffer.getFilePointer();
-                diskTailAvailable = maxDiskBufferSize - diskHeadAvailable;
-            }
-
-            if (len > 0 && diskTailAvailable > 0)
-            {
-                int diskTailWritten = diskTailAvailable < len? diskTailAvailable : len;
-                getIfNotClosed(spillBuffer).write(b, off, diskTailWritten);
-                off += diskTailWritten;
-                len -= diskTailWritten;
-                diskTailAvailable -= diskTailWritten;
-                if (diskTailAvailable == 0)
-                    spillBuffer.seek(0);
-            }
-
-            if (len > 0 && diskTailAvailable > 0)
-            {
-                int diskHeadWritten = diskHeadAvailable < len? diskHeadAvailable : len;
-                getIfNotClosed(spillBuffer).write(b, off, diskHeadWritten);
-            }
-        }
-    }
-
-    private void writeOne(int value) throws IOException
-    {
-        if (memAvailable > 0)
-        {
-            if (memBuffer == null)
-                memBuffer = new byte[initialMemBufferSize];
-            int pos = maxMemBufferSize - memAvailable;
-            if (pos == getIfNotClosed(memBuffer).length)
-                growMemBuffer(pos, 1);
-            getIfNotClosed(memBuffer)[pos] = (byte)value;
-            memAvailable--;
-            return;
-        }
-
-        if (diskTailAvailable == -1)
-        {
-            maybeCreateDiskBuffer();
-            diskHeadAvailable = (int)spillBuffer.getFilePointer();
-            diskTailAvailable = maxDiskBufferSize - diskHeadAvailable;
-        }
-
-        if (diskTailAvailable > 0 || diskHeadAvailable > 0)
-        {
-            getIfNotClosed(spillBuffer).write(value);
-            if (diskTailAvailable > 0)
-                diskTailAvailable--;
-            else if (diskHeadAvailable > 0)
-                diskHeadAvailable--;
-            if (diskTailAvailable == 0)
-                spillBuffer.seek(0);
-            return;
-        }
-    }
-
-    public int read(byte[] b) throws IOException
-    {
-        return read(b, 0, b.length);
-    }
-
-    private void growMemBuffer(int pos, int writeSize)
-    {
-        int newSize = Math.min(2 * (pos + writeSize), maxMemBufferSize);
-        byte newBuffer[] = new byte[newSize];
-        System.arraycopy(memBuffer, 0, newBuffer, 0, pos);
-        memBuffer = newBuffer;
-    }
-
-    public long skip(long n) throws IOException
-    {
-        long skipped = 0;
-
-        if (marked)
-        {
-            //if marked, we need to cache skipped bytes
-            while (n-- > 0 && read() != -1)
-            {
-                skipped++;
-            }
-            return skipped;
-        }
-
-        if (memAvailable > 0)
-        {
-            skipped += memAvailable < n ? memAvailable : n;
-            memAvailable -= skipped;
-            n -= skipped;
-        }
-        if (n > 0 && diskTailAvailable > 0)
-        {
-            int skipFromTail = diskTailAvailable < n? diskTailAvailable : (int)n;
-            getIfNotClosed(spillBuffer).skipBytes(skipFromTail);
-            diskTailAvailable -= skipFromTail;
-            skipped += skipFromTail;
-            n -= skipFromTail;
-            if (diskTailAvailable == 0)
-                spillBuffer.seek(0);
-        }
-        if (n > 0 && diskHeadAvailable > 0)
-        {
-            int skipFromHead = diskHeadAvailable < n? diskHeadAvailable : (int)n;
-            getIfNotClosed(spillBuffer).skipBytes(skipFromHead);
-            diskHeadAvailable -= skipFromHead;
-            skipped += skipFromHead;
-            n -= skipFromHead;
-        }
-
-        if (n > 0)
-            skipped += getIfNotClosed(in).skip(n);
-
-        return skipped;
-    }
-
-    private <T> T getIfNotClosed(T in) throws IOException
-    {
-        if (closed.get())
-            throw new IOException("Stream closed");
-        return in;
-    }
-
-    public void close() throws IOException
-    {
-        close(true);
-    }
-
-    public void close(boolean closeUnderlying) throws IOException
-    {
-        if (closed.compareAndSet(false, true))
-        {
-            Throwable fail = null;
-            if (closeUnderlying)
-            {
-                try
-                {
-                    super.close();
-                }
-                catch (IOException e)
-                {
-                    fail = merge(fail, e);
-                }
-            }
-            try
-            {
-                if (spillBuffer != null)
-                {
-                    this.spillBuffer.close();
-                    this.spillBuffer = null;
-                }
-            } catch (IOException e)
-            {
-                fail = merge(fail, e);
-            }
-            try
-            {
-                if (spillFile.exists())
-                {
-                    spillFile.delete();
-                }
-            }
-            catch (Throwable e)
-            {
-                fail = merge(fail, e);
-            }
-            maybeFail(fail, IOException.class);
-        }
-    }
-
-    /* DataInputPlus methods */
-
-    public void readFully(byte[] b) throws IOException
-    {
-        dataReader.readFully(b);
-    }
-
-    public void readFully(byte[] b, int off, int len) throws IOException
-    {
-        dataReader.readFully(b, off, len);
-    }
-
-    public int skipBytes(int n) throws IOException
-    {
-        return dataReader.skipBytes(n);
-    }
-
-    public boolean readBoolean() throws IOException
-    {
-        return dataReader.readBoolean();
-    }
-
-    public byte readByte() throws IOException
-    {
-        return dataReader.readByte();
-    }
-
-    public int readUnsignedByte() throws IOException
-    {
-        return dataReader.readUnsignedByte();
-    }
-
-    public short readShort() throws IOException
-    {
-        return dataReader.readShort();
-    }
-
-    public int readUnsignedShort() throws IOException
-    {
-        return dataReader.readUnsignedShort();
-    }
-
-    public char readChar() throws IOException
-    {
-        return dataReader.readChar();
-    }
-
-    public int readInt() throws IOException
-    {
-        return dataReader.readInt();
-    }
-
-    public long readLong() throws IOException
-    {
-        return dataReader.readLong();
-    }
-
-    public float readFloat() throws IOException
-    {
-        return dataReader.readFloat();
-    }
-
-    public double readDouble() throws IOException
-    {
-        return dataReader.readDouble();
-    }
-
-    public String readLine() throws IOException
-    {
-        return dataReader.readLine();
-    }
-
-    public String readUTF() throws IOException
-    {
-        return dataReader.readUTF();
-    }
-}
diff --git a/src/java/org/apache/cassandra/io/util/SequentialWriter.java b/src/java/org/apache/cassandra/io/util/SequentialWriter.java
index 9ad944b..431ece3 100644
--- a/src/java/org/apache/cassandra/io/util/SequentialWriter.java
+++ b/src/java/org/apache/cassandra/io/util/SequentialWriter.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
@@ -114,7 +113,7 @@
                 FileChannel channel = FileChannel.open(file.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
                 try
                 {
-                    SyncUtil.trySyncDir(file.getParentFile());
+                    SyncUtil.trySyncDir(file.parent());
                 }
                 catch (Throwable t)
                 {
@@ -163,7 +162,7 @@
         this.strictFlushing = strictFlushing;
         this.fchannel = (FileChannel)channel;
 
-        this.filePath = file.getAbsolutePath();
+        this.filePath = file.absolutePath();
 
         this.option = option;
     }
diff --git a/src/java/org/apache/cassandra/io/util/SequentialWriterOption.java b/src/java/org/apache/cassandra/io/util/SequentialWriterOption.java
index 61f375b..49093c1 100644
--- a/src/java/org/apache/cassandra/io/util/SequentialWriterOption.java
+++ b/src/java/org/apache/cassandra/io/util/SequentialWriterOption.java
@@ -32,14 +32,15 @@
      * Default write option.
      *
      * <ul>
-     *   <li>buffer size: 64 KB
+     *   <li>buffer size: 64 KiB
      *   <li>buffer type: on heap
      *   <li>trickle fsync: false
-     *   <li>trickle fsync byte interval: 10 MB
+     *   <li>trickle fsync byte interval: 10 MiB
      *   <li>finish on close: false
      * </ul>
      */
     public static final SequentialWriterOption DEFAULT = SequentialWriterOption.newBuilder().build();
+    public static final SequentialWriterOption FINISH_ON_CLOSE = SequentialWriterOption.newBuilder().finishOnClose(true).build();
 
     private final int bufferSize;
     private final BufferType bufferType;
@@ -108,7 +109,7 @@
         private BufferType bufferType = BufferType.ON_HEAP;
         /* default: no trickle fsync */
         private boolean trickleFsync = false;
-        /* default tricle fsync byte interval: 10MB */
+        /* default tricle fsync byte interval: 10MiB */
         private int trickleFsyncByteInterval = 10 * 1024 * 1024;
         private boolean finishOnClose = false;
 
diff --git a/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java b/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java
index bc1a529..05fdb6b 100644
--- a/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java
+++ b/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java
@@ -69,4 +69,4 @@
                              bufferSize,
                              fileLength());
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/UnbufferedDataOutputStreamPlus.java b/src/java/org/apache/cassandra/io/util/UnbufferedDataOutputStreamPlus.java
index 3d83212..a90477f 100644
--- a/src/java/org/apache/cassandra/io/util/UnbufferedDataOutputStreamPlus.java
+++ b/src/java/org/apache/cassandra/io/util/UnbufferedDataOutputStreamPlus.java
@@ -25,8 +25,6 @@
 
 import org.apache.cassandra.utils.memory.MemoryUtil;
 
-import com.google.common.base.Function;
-
 /**
  * Base class for DataOutput implementations that does not have an optimized implementations of Plus methods
  * and does no buffering.
diff --git a/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java b/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java
index 2ec555c..cc21062 100644
--- a/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java
+++ b/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java
@@ -23,16 +23,21 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
 
+import java.util.AbstractList;
 import java.util.AbstractMap;
 import java.util.AbstractSet;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Comparator;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Objects;
+import java.util.RandomAccess;
 import java.util.Set;
+import java.util.Spliterator;
 import java.util.function.BiConsumer;
 import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.function.Supplier;
@@ -43,6 +48,9 @@
  * A collection like class for Replica objects. Since the Replica class contains inetaddress, range, and
  * transient replication status, basic contains and remove methods can be ambiguous. Replicas forces you
  * to be explicit about what you're checking the container for, or removing from it.
+ *
+ * TODO: there's nothing about this collection that's unique to Replicas, and the implementation
+ *       could make a useful general purpose immutable list<->set
  */
 public abstract class AbstractReplicaCollection<C extends AbstractReplicaCollection<C>> implements ReplicaCollection<C>
 {
@@ -67,8 +75,10 @@
      * A simple list with no comodification checks and immutability by default (only append permitted, and only one initial copy)
      * this permits us to reduce the amount of garbage generated, by not wrapping iterators or unnecessarily copying
      * and reduces the amount of indirection necessary, as well as ensuring monomorphic callsites
+     *
+     * TODO flatten into AbstractReplicaCollection?
      */
-    protected static class ReplicaList implements Iterable<Replica>
+    protected final static class ReplicaList implements Iterable<Replica>
     {
         private static final Replica[] EMPTY = new Replica[0];
         Replica[] contents;
@@ -123,7 +133,7 @@
             return new ReplicaList(contents, this.begin + begin, end - begin);
         }
 
-        public ReplicaList sorted(Comparator<Replica> comparator)
+        public ReplicaList sorted(Comparator<? super Replica> comparator)
         {
             Replica[] copy = Arrays.copyOfRange(contents, begin, begin + size);
             Arrays.sort(copy, comparator);
@@ -135,6 +145,37 @@
             return Arrays.stream(contents, begin, begin + size);
         }
 
+        @Override
+        public void forEach(Consumer<? super Replica> forEach)
+        {
+            for (int i = begin, end = begin + size ; i < end ; ++i)
+                forEach.accept(contents[i]);
+        }
+
+        /** see {@link ReplicaCollection#count(Predicate)}*/
+        public int count(Predicate<? super Replica> test)
+        {
+            int count = 0;
+            for (int i = begin, end = i + size ; i < end ; ++i)
+                if (test.test(contents[i]))
+                    ++count;
+            return count;
+        }
+
+        public final boolean anyMatch(Predicate<? super Replica> predicate)
+        {
+            for (int i = begin, end = i + size ; i < end ; ++i)
+                if (predicate.test(contents[i]))
+                    return true;
+            return false;
+        }
+
+        @Override
+        public Spliterator<Replica> spliterator()
+        {
+            return Arrays.spliterator(contents, begin, begin + size);
+        }
+
         // we implement our own iterator, because it is trivial to do so, and in monomorphic call sites
         // will compile down to almost optimal indexed for loop
         @Override
@@ -161,7 +202,7 @@
 
         // we implement our own iterator, because it is trivial to do so, and in monomorphic call sites
         // will compile down to almost optimal indexed for loop
-        public <K> Iterator<K> transformIterator(Function<Replica, K> function)
+        public <K> Iterator<K> transformIterator(Function<? super Replica, ? extends K> function)
         {
             return new Iterator<K>()
             {
@@ -184,7 +225,7 @@
         // we implement our own iterator, because it is trivial to do so, and in monomorphic call sites
         // will compile down to almost optimal indexed for loop
         // in this case, especially, it is impactful versus Iterables.limit(Iterables.filter())
-        private Iterator<Replica> filterIterator(Predicate<Replica> predicate, int limit)
+        private Iterator<Replica> filterIterator(Predicate<? super Replica> predicate, int limit)
         {
             return new Iterator<Replica>()
             {
@@ -216,6 +257,12 @@
             };
         }
 
+        protected <T> void forEach(Function<? super Replica, T> function, Consumer<? super T> action)
+        {
+            for (int i = begin, end = begin + size ; i < end ; ++i)
+                action.accept(function.apply(contents[i]));
+        }
+
         @VisibleForTesting
         public boolean equals(Object to)
         {
@@ -261,6 +308,12 @@
             public boolean contains(Object o) { return containsKey(o); }
             @Override
             public Iterator<K> iterator() { return list.transformIterator(toKey); }
+
+            @Override
+            public void forEach(Consumer<? super K> action)
+            {
+                list.forEach(toKey, action);
+            }
         }
 
         class EntrySet extends AbstractImmutableSet<Entry<K, Replica>>
@@ -316,7 +369,7 @@
         public boolean containsKey(Object key)
         {
             Preconditions.checkNotNull(key);
-            return get((K)key) != null;
+            return get(key) != null;
         }
 
         public Replica get(Object key)
@@ -366,6 +419,35 @@
         }
     }
 
+    static class AsList<T> extends AbstractList<T> implements RandomAccess
+    {
+        final Function<Replica, T> view;
+        final ReplicaList list;
+
+        AsList(Function<Replica, T> view, ReplicaList list)
+        {
+            this.view = view;
+            this.list = list;
+        }
+
+        public final T get(int index)
+        {
+            return view.apply(list.get(index));
+        }
+
+        public final int size()
+        {
+            return list.size;
+        }
+
+        @Override
+        public final void forEach(Consumer<? super T> forEach)
+        {
+            list.forEach(view, forEach);
+        }
+    }
+
+
     protected final ReplicaList list;
     AbstractReplicaCollection(ReplicaList list)
     {
@@ -396,24 +478,30 @@
         return snapshot(subList);
     }
 
-    /** see {@link ReplicaCollection#count(Predicate)}*/
-    public int count(Predicate<Replica> predicate)
+    public final <T> List<T> asList(Function<Replica, T> view)
     {
-        int count = 0;
-        for (int i = 0 ; i < list.size() ; ++i)
-            if (predicate.test(list.get(i)))
-                ++count;
-        return count;
+        return new AsList<>(view, list);
+    }
+
+    /** see {@link ReplicaCollection#count(Predicate)}*/
+    public final int count(Predicate<? super Replica> test)
+    {
+        return list.count(test);
+    }
+
+    public final boolean anyMatch(Predicate<? super Replica> test)
+    {
+        return list.anyMatch(test);
     }
 
     /** see {@link ReplicaCollection#filter(Predicate)}*/
-    public final C filter(Predicate<Replica> predicate)
+    public final C filter(Predicate<? super Replica> predicate)
     {
         return filter(predicate, Integer.MAX_VALUE);
     }
 
     /** see {@link ReplicaCollection#filter(Predicate, int)}*/
-    public final C filter(Predicate<Replica> predicate, int limit)
+    public final C filter(Predicate<? super Replica> predicate, int limit)
     {
         if (isEmpty())
             return snapshot();
@@ -457,19 +545,19 @@
     }
 
     /** see {@link ReplicaCollection#filterLazily(Predicate)}*/
-    public final Iterable<Replica> filterLazily(Predicate<Replica> predicate)
+    public final Iterable<Replica> filterLazily(Predicate<? super Replica> predicate)
     {
         return filterLazily(predicate, Integer.MAX_VALUE);
     }
 
     /** see {@link ReplicaCollection#filterLazily(Predicate,int)}*/
-    public final Iterable<Replica> filterLazily(Predicate<Replica> predicate, int limit)
+    public final Iterable<Replica> filterLazily(Predicate<? super Replica> predicate, int limit)
     {
         return () -> list.filterIterator(predicate, limit);
     }
 
     /** see {@link ReplicaCollection#sorted(Comparator)}*/
-    public final C sorted(Comparator<Replica> comparator)
+    public final C sorted(Comparator<? super Replica> comparator)
     {
         return snapshot(list.sorted(comparator));
     }
@@ -494,6 +582,11 @@
         return list.iterator();
     }
 
+    public final void forEach(Consumer<? super Replica> forEach)
+    {
+        list.forEach(forEach);
+    }
+
     public final Stream<Replica> stream() { return list.stream(); }
 
     /**
diff --git a/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java b/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
index 909a7f6..c233fc0 100644
--- a/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
+++ b/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
@@ -24,6 +24,7 @@
 import java.util.Collections;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
 
 import com.google.common.base.Preconditions;
 
@@ -32,6 +33,7 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.WriteType;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.RingPosition;
@@ -39,6 +41,7 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict;
 import org.apache.cassandra.service.AbstractWriteResponseHandler;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.DatacenterSyncWriteResponseHandler;
 import org.apache.cassandra.service.DatacenterWriteResponseHandler;
 import org.apache.cassandra.service.WriteResponseHandler;
@@ -128,17 +131,20 @@
      */
     public abstract EndpointsForRange calculateNaturalReplicas(Token searchToken, TokenMetadata tokenMetadata);
 
-    public <T> AbstractWriteResponseHandler<T> getWriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan,
+    public <T> AbstractWriteResponseHandler<T> getWriteResponseHandler(ReplicaPlan.ForWrite replicaPlan,
                                                                        Runnable callback,
                                                                        WriteType writeType,
+                                                                       Supplier<Mutation> hintOnFailure,
                                                                        long queryStartNanoTime)
     {
-        return getWriteResponseHandler(replicaPlan, callback, writeType, queryStartNanoTime, DatabaseDescriptor.getIdealConsistencyLevel());
+        return getWriteResponseHandler(replicaPlan, callback, writeType, hintOnFailure,
+                                       queryStartNanoTime, DatabaseDescriptor.getIdealConsistencyLevel());
     }
 
-    public <T> AbstractWriteResponseHandler<T> getWriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan,
+    public <T> AbstractWriteResponseHandler<T> getWriteResponseHandler(ReplicaPlan.ForWrite replicaPlan,
                                                                        Runnable callback,
                                                                        WriteType writeType,
+                                                                       Supplier<Mutation> hintOnFailure,
                                                                        long queryStartNanoTime,
                                                                        ConsistencyLevel idealConsistencyLevel)
     {
@@ -146,15 +152,15 @@
         if (replicaPlan.consistencyLevel().isDatacenterLocal())
         {
             // block for in this context will be localnodes block.
-            resultResponseHandler = new DatacenterWriteResponseHandler<T>(replicaPlan, callback, writeType, queryStartNanoTime);
+            resultResponseHandler = new DatacenterWriteResponseHandler<T>(replicaPlan, callback, writeType, hintOnFailure, queryStartNanoTime);
         }
         else if (replicaPlan.consistencyLevel() == ConsistencyLevel.EACH_QUORUM && (this instanceof NetworkTopologyStrategy))
         {
-            resultResponseHandler = new DatacenterSyncWriteResponseHandler<T>(replicaPlan, callback, writeType, queryStartNanoTime);
+            resultResponseHandler = new DatacenterSyncWriteResponseHandler<T>(replicaPlan, callback, writeType, hintOnFailure, queryStartNanoTime);
         }
         else
         {
-            resultResponseHandler = new WriteResponseHandler<T>(replicaPlan, callback, writeType, queryStartNanoTime);
+            resultResponseHandler = new WriteResponseHandler<T>(replicaPlan, callback, writeType, hintOnFailure, queryStartNanoTime);
         }
 
         //Check if tracking the ideal consistency level is configured
@@ -173,6 +179,7 @@
                 AbstractWriteResponseHandler<T> idealHandler = getWriteResponseHandler(replicaPlan.withConsistencyLevel(idealConsistencyLevel),
                                                                                        callback,
                                                                                        writeType,
+                                                                                       hintOnFailure,
                                                                                        queryStartNanoTime,
                                                                                        idealConsistencyLevel);
                 resultResponseHandler.setIdealCLResponseHandler(idealHandler);
@@ -280,7 +287,17 @@
 
     public abstract void validateOptions() throws ConfigurationException;
 
-    public abstract void maybeWarnOnOptions();
+    @Deprecated // use #maybeWarnOnOptions(ClientState) instead
+    public void maybeWarnOnOptions()
+    {
+        // nothing to do here
+    }
+
+    public void maybeWarnOnOptions(ClientState state)
+    {
+        maybeWarnOnOptions();
+    }
+
 
     /*
      * The options recognized by the strategy.
@@ -378,12 +395,13 @@
                                                    Class<? extends AbstractReplicationStrategy> strategyClass,
                                                    TokenMetadata tokenMetadata,
                                                    IEndpointSnitch snitch,
-                                                   Map<String, String> strategyOptions) throws ConfigurationException
+                                                   Map<String, String> strategyOptions,
+                                                   ClientState state) throws ConfigurationException
     {
         AbstractReplicationStrategy strategy = createInternal(keyspaceName, strategyClass, tokenMetadata, snitch, strategyOptions);
         strategy.validateExpectedOptions();
         strategy.validateOptions();
-        strategy.maybeWarnOnOptions();
+        strategy.maybeWarnOnOptions(state);
         if (strategy.hasTransientReplicas() && !DatabaseDescriptor.isTransientReplicationEnabled())
         {
             throw new ConfigurationException("Transient replication is disabled. Enable in cassandra.yaml to use.");
@@ -415,6 +433,7 @@
         try
         {
             ReplicationFactor rf = ReplicationFactor.fromString(s);
+            
             if (rf.hasTransientReplicas())
             {
                 if (DatabaseDescriptor.getNumTokens() > 1)
diff --git a/src/java/org/apache/cassandra/locator/CloudstackSnitch.java b/src/java/org/apache/cassandra/locator/CloudstackSnitch.java
index be6d3c4..d857953 100644
--- a/src/java/org/apache/cassandra/locator/CloudstackSnitch.java
+++ b/src/java/org/apache/cassandra/locator/CloudstackSnitch.java
@@ -20,9 +20,7 @@
 import java.io.DataInputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedReader;
-import java.io.FileReader;
 import java.io.IOException;
-import java.io.File;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URI;
@@ -31,6 +29,8 @@
 import java.util.regex.Pattern;
 import java.util.regex.Matcher;
 
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.cassandra.db.SystemKeyspace;
diff --git a/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java b/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
index 976e1da..2248248 100644
--- a/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
+++ b/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
@@ -271,7 +271,7 @@
 
     private void updateScores() // this is expensive
     {
-        if (!StorageService.instance.isGossipActive())
+        if (!StorageService.instance.isInitialized())
             return;
         if (!registered)
         {
@@ -320,7 +320,7 @@
 
     public Map<InetAddress, Double> getScores()
     {
-        return scores.entrySet().stream().collect(Collectors.toMap(address -> address.getKey().address, Map.Entry::getValue));
+        return scores.entrySet().stream().collect(Collectors.toMap(address -> address.getKey().getAddress(), Map.Entry::getValue));
     }
 
     public Map<String, Double> getScoresWithPort()
diff --git a/src/java/org/apache/cassandra/locator/Ec2MultiRegionSnitch.java b/src/java/org/apache/cassandra/locator/Ec2MultiRegionSnitch.java
index f9de755..45c387d 100644
--- a/src/java/org/apache/cassandra/locator/Ec2MultiRegionSnitch.java
+++ b/src/java/org/apache/cassandra/locator/Ec2MultiRegionSnitch.java
@@ -73,7 +73,7 @@
             throw new RuntimeException(e);
         }
         Gossiper.instance.addLocalApplicationState(ApplicationState.INTERNAL_ADDRESS_AND_PORT, StorageService.instance.valueFactory.internalAddressAndPort(address));
-        Gossiper.instance.addLocalApplicationState(ApplicationState.INTERNAL_IP, StorageService.instance.valueFactory.internalIP(address.address));
+        Gossiper.instance.addLocalApplicationState(ApplicationState.INTERNAL_IP, StorageService.instance.valueFactory.internalIP(address.getAddress()));
         Gossiper.instance.register(new ReconnectableSnitchHelper(this, ec2region, true));
     }
 }
diff --git a/src/java/org/apache/cassandra/locator/Endpoints.java b/src/java/org/apache/cassandra/locator/Endpoints.java
index c1a9282..1561db2 100644
--- a/src/java/org/apache/cassandra/locator/Endpoints.java
+++ b/src/java/org/apache/cassandra/locator/Endpoints.java
@@ -21,15 +21,12 @@
 import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict;
 import org.apache.cassandra.utils.FBUtilities;
 
-import java.util.AbstractList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 
-import com.google.common.collect.Lists;
-
 /**
  * A collection of Endpoints for a given ring position.  This will typically reside in a ReplicaLayout,
  * representing some subset of the endpoints for the Token or Range
@@ -56,20 +53,14 @@
         return byEndpoint().keySet();
     }
 
+    public InetAddressAndPort endpoint(int i)
+    {
+        return get(i).endpoint();
+    }
+
     public List<InetAddressAndPort> endpointList()
     {
-        return new AbstractList<InetAddressAndPort>()
-        {
-            public InetAddressAndPort get(int index)
-            {
-                return list.get(index).endpoint();
-            }
-
-            public int size()
-            {
-                return list.size;
-            }
-        };
+        return asList(Replica::endpoint);
     }
 
     public Map<InetAddressAndPort, Replica> byEndpoint()
@@ -89,6 +80,11 @@
                         replica);
     }
 
+    public boolean contains(InetAddressAndPort endpoint)
+    {
+        return endpoint != null && byEndpoint().containsKey(endpoint);
+    }
+
     public E withoutSelf()
     {
         InetAddressAndPort self = FBUtilities.getBroadcastAddressAndPort();
diff --git a/src/java/org/apache/cassandra/locator/EndpointsByReplica.java b/src/java/org/apache/cassandra/locator/EndpointsByReplica.java
index 72d8751..9590842 100644
--- a/src/java/org/apache/cassandra/locator/EndpointsByReplica.java
+++ b/src/java/org/apache/cassandra/locator/EndpointsByReplica.java
@@ -23,8 +23,6 @@
 import com.google.common.collect.Maps;
 import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict;
 
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 
 public class EndpointsByReplica extends ReplicaMultimap<Replica, EndpointsForRange>
diff --git a/src/java/org/apache/cassandra/locator/EndpointsForToken.java b/src/java/org/apache/cassandra/locator/EndpointsForToken.java
index c709988..70cd763 100644
--- a/src/java/org/apache/cassandra/locator/EndpointsForToken.java
+++ b/src/java/org/apache/cassandra/locator/EndpointsForToken.java
@@ -19,7 +19,11 @@
 package org.apache.cassandra.locator;
 
 import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
 
 import java.util.Arrays;
 import java.util.Collection;
@@ -67,6 +71,11 @@
         return new EndpointsForToken(token, newList, byEndpoint);
     }
 
+    public Replica lookup(InetAddressAndPort endpoint)
+    {
+        return byEndpoint().get(endpoint);
+    }
+
     public static class Builder extends EndpointsForToken implements ReplicaCollection.Builder<EndpointsForToken>
     {
         boolean built;
@@ -146,4 +155,34 @@
         if (replicas.isEmpty()) return empty(token);
         return builder(token, replicas.size()).addAll(replicas).build();
     }
+
+    public static EndpointsForToken natural(Keyspace keyspace, Token token)
+    {
+        return keyspace.getReplicationStrategy().getNaturalReplicasForToken(token);
+    }
+
+    public static EndpointsForToken natural(AbstractReplicationStrategy replicationStrategy, Token token)
+    {
+        return replicationStrategy.getNaturalReplicasForToken(token);
+    }
+
+    public static EndpointsForToken natural(TableMetadata table, Token token)
+    {
+        return natural(Keyspace.open(table.keyspace), token);
+    }
+
+    public static EndpointsForToken pending(TableMetadata table, Token token)
+    {
+        return pending(table.keyspace, token);
+    }
+
+    public static EndpointsForToken pending(Keyspace keyspace, Token token)
+    {
+        return pending(keyspace.getName(), token);
+    }
+
+    public static EndpointsForToken pending(String keyspace, Token token)
+    {
+        return StorageService.instance.getTokenMetadata().pendingEndpointsForToken(token, keyspace);
+    }
 }
diff --git a/src/java/org/apache/cassandra/locator/IEndpointSnitch.java b/src/java/org/apache/cassandra/locator/IEndpointSnitch.java
index 381a642..0120391 100644
--- a/src/java/org/apache/cassandra/locator/IEndpointSnitch.java
+++ b/src/java/org/apache/cassandra/locator/IEndpointSnitch.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.locator;
 
+import java.net.InetSocketAddress;
 import java.util.Set;
 
 import org.apache.cassandra.utils.FBUtilities;
@@ -55,6 +56,11 @@
         return getDatacenter(FBUtilities.getBroadcastAddressAndPort());
     }
 
+    default String getDatacenter(InetSocketAddress endpoint)
+    {
+        return getDatacenter(InetAddressAndPort.getByAddress(endpoint));
+    }
+
     default public String getDatacenter(Replica replica)
     {
         return getDatacenter(replica.endpoint());
diff --git a/src/java/org/apache/cassandra/locator/InOurDc.java b/src/java/org/apache/cassandra/locator/InOurDc.java
new file mode 100644
index 0000000..34e8ef8
--- /dev/null
+++ b/src/java/org/apache/cassandra/locator/InOurDc.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.locator;
+
+import java.util.function.Predicate;
+
+import static org.apache.cassandra.config.DatabaseDescriptor.getEndpointSnitch;
+import static org.apache.cassandra.config.DatabaseDescriptor.getLocalDataCenter;
+
+public class InOurDc
+{
+    private static ReplicaTester replicas;
+    private static EndpointTester endpoints;
+
+    final String dc;
+    final IEndpointSnitch snitch;
+
+    private InOurDc(String dc, IEndpointSnitch snitch)
+    {
+        this.dc = dc;
+        this.snitch = snitch;
+    }
+
+    boolean stale()
+    {
+        return dc != getLocalDataCenter()
+                || snitch != getEndpointSnitch()
+                // this final clause checks if somehow the snitch/localDc have got out of whack;
+                // presently, this is possible but very unlikely, but this check will also help
+                // resolve races on these global fields as well
+                || !dc.equals(snitch.getLocalDatacenter());
+    }
+
+    private static final class ReplicaTester extends InOurDc implements Predicate<Replica>
+    {
+        private ReplicaTester(String dc, IEndpointSnitch snitch)
+        {
+            super(dc, snitch);
+        }
+
+        @Override
+        public boolean test(Replica replica)
+        {
+            return dc.equals(snitch.getDatacenter(replica.endpoint()));
+        }
+    }
+
+    private static final class EndpointTester extends InOurDc implements Predicate<InetAddressAndPort>
+    {
+        private EndpointTester(String dc, IEndpointSnitch snitch)
+        {
+            super(dc, snitch);
+        }
+
+        @Override
+        public boolean test(InetAddressAndPort endpoint)
+        {
+            return dc.equals(snitch.getDatacenter(endpoint));
+        }
+    }
+
+    public static Predicate<Replica> replicas()
+    {
+        ReplicaTester cur = replicas;
+        if (cur == null || cur.stale())
+            replicas = cur = new ReplicaTester(getLocalDataCenter(), getEndpointSnitch());
+        return cur;
+    }
+
+    public static Predicate<InetAddressAndPort> endpoints()
+    {
+        EndpointTester cur = endpoints;
+        if (cur == null || cur.stale())
+            endpoints = cur = new EndpointTester(getLocalDataCenter(), getEndpointSnitch());
+        return cur;
+    }
+
+    public static boolean isInOurDc(Replica replica)
+    {
+        return replicas().test(replica);
+    }
+
+    public static boolean isInOurDc(InetAddressAndPort endpoint)
+    {
+        return endpoints().test(endpoint);
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/locator/InOurDcTester.java b/src/java/org/apache/cassandra/locator/InOurDcTester.java
deleted file mode 100644
index 514c7ef..0000000
--- a/src/java/org/apache/cassandra/locator/InOurDcTester.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.locator;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.utils.FBUtilities;
-import java.util.function.Predicate;
-
-public class InOurDcTester
-{
-    private static ReplicaTester replicas;
-    private static EndpointTester endpoints;
-
-    final String dc;
-    final IEndpointSnitch snitch;
-
-    private InOurDcTester(String dc, IEndpointSnitch snitch)
-    {
-        this.dc = dc;
-        this.snitch = snitch;
-    }
-
-    boolean stale()
-    {
-        return dc != DatabaseDescriptor.getLocalDataCenter()
-                || snitch != DatabaseDescriptor.getEndpointSnitch()
-                // this final clause checks if somehow the snitch/localDc have got out of whack;
-                // presently, this is possible but very unlikely, but this check will also help
-                // resolve races on these global fields as well
-                || !dc.equals(snitch.getLocalDatacenter());
-    }
-
-    private static final class ReplicaTester extends InOurDcTester implements Predicate<Replica>
-    {
-        private ReplicaTester(String dc, IEndpointSnitch snitch)
-        {
-            super(dc, snitch);
-        }
-
-        @Override
-        public boolean test(Replica replica)
-        {
-            return dc.equals(snitch.getDatacenter(replica.endpoint()));
-        }
-    }
-
-    private static final class EndpointTester extends InOurDcTester implements Predicate<InetAddressAndPort>
-    {
-        private EndpointTester(String dc, IEndpointSnitch snitch)
-        {
-            super(dc, snitch);
-        }
-
-        @Override
-        public boolean test(InetAddressAndPort endpoint)
-        {
-            return dc.equals(snitch.getDatacenter(endpoint));
-        }
-    }
-
-    public static Predicate<Replica> replicas()
-    {
-        ReplicaTester cur = replicas;
-        if (cur == null || cur.stale())
-            replicas = cur = new ReplicaTester(DatabaseDescriptor.getLocalDataCenter(), DatabaseDescriptor.getEndpointSnitch());
-        return cur;
-    }
-
-    public static Predicate<InetAddressAndPort> endpoints()
-    {
-        EndpointTester cur = endpoints;
-        if (cur == null || cur.stale())
-            endpoints = cur = new EndpointTester(DatabaseDescriptor.getLocalDataCenter(), DatabaseDescriptor.getEndpointSnitch());
-        return cur;
-    }
-
-}
diff --git a/src/java/org/apache/cassandra/locator/InetAddressAndPort.java b/src/java/org/apache/cassandra/locator/InetAddressAndPort.java
index 6e67a23..e6a920b 100644
--- a/src/java/org/apache/cassandra/locator/InetAddressAndPort.java
+++ b/src/java/org/apache/cassandra/locator/InetAddressAndPort.java
@@ -22,6 +22,7 @@
 import java.net.Inet4Address;
 import java.net.Inet6Address;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
 import java.util.regex.Pattern;
@@ -51,7 +52,7 @@
  *
  */
 @SuppressWarnings("UnstableApiUsage")
-public final class InetAddressAndPort implements Comparable<InetAddressAndPort>, Serializable
+public final class InetAddressAndPort extends InetSocketAddress implements Comparable<InetAddressAndPort>, Serializable
 {
     private static final long serialVersionUID = 0;
 
@@ -61,23 +62,20 @@
     //to always override the defaults.
     static volatile int defaultPort = 7000;
 
-    public final InetAddress address;
     public final byte[] addressBytes;
-    public final int port;
 
     private InetAddressAndPort(InetAddress address, byte[] addressBytes, int port)
     {
+        super(address, port);
         Preconditions.checkNotNull(address);
         Preconditions.checkNotNull(addressBytes);
         validatePortRange(port);
-        this.address = address;
-        this.port = port;
         this.addressBytes = addressBytes;
     }
 
     public InetAddressAndPort withPort(int port)
     {
-        return new InetAddressAndPort(address, addressBytes, port);
+        return new InetAddressAndPort(getAddress(), addressBytes, port);
     }
 
     private static void validatePortRange(int port)
@@ -89,26 +87,6 @@
     }
 
     @Override
-    public boolean equals(Object o)
-    {
-        if (this == o) return true;
-        if (o == null || getClass() != o.getClass()) return false;
-
-        InetAddressAndPort that = (InetAddressAndPort) o;
-
-        if (port != that.port) return false;
-        return address.equals(that.address);
-    }
-
-    @Override
-    public int hashCode()
-    {
-        int result = address.hashCode();
-        result = 31 * result + port;
-        return result;
-    }
-
-    @Override
     public int compareTo(InetAddressAndPort o)
     {
         int retval = FastByteOperations.compareUnsigned(addressBytes, 0, addressBytes.length, o.addressBytes, 0, o.addressBytes.length);
@@ -117,7 +95,7 @@
             return retval;
         }
 
-        return Integer.compare(port, o.port);
+        return Integer.compare(getPort(), o.getPort());
     }
 
     public String getHostAddressAndPort()
@@ -141,31 +119,56 @@
 
     public String getHostAddress(boolean withPort)
     {
+        return hostAddress(this, withPort);
+    }
+
+    public String getHostName(boolean withPort)
+    {
+        return withPort ? String.format("%s:%s", getHostName(), getPort()) : getHostName();
+    }
+
+    public static String hostAddressAndPort(InetSocketAddress address)
+    {
+        return hostAddress(address, true);
+    }
+
+    public static String hostAddress(InetSocketAddress address, boolean withPort)
+    {
         if (withPort)
         {
-            return HostAndPort.fromParts(address.getHostAddress(), port).toString();
+            return HostAndPort.fromParts(address.getAddress().getHostAddress(), address.getPort()).toString();
         }
         else
         {
-            return address.getHostAddress();
+            return address.getAddress().getHostAddress();
         }
     }
 
     @Override
     public String toString()
     {
-        return toString(true);
+        return toString(this);
     }
 
     public String toString(boolean withPort)
     {
+        return toString(this, withPort);
+    }
+
+    public static String toString(InetSocketAddress address)
+    {
+        return toString(address, true);
+    }
+
+    public static String toString(InetSocketAddress address, boolean withPort)
+    {
         if (withPort)
         {
-            return toString(address, port);
+            return toString(address.getAddress(), address.getPort());
         }
         else
         {
-            return address.toString();
+            return address.getAddress().toString();
         }
     }
 
@@ -237,6 +240,13 @@
         return getByAddressOverrideDefaults(address, null);
     }
 
+    public static InetAddressAndPort getByAddress(InetSocketAddress address)
+    {
+        if (address instanceof InetAddressAndPort)
+            return (InetAddressAndPort) address;
+        return new InetAddressAndPort(address.getAddress(), address.getAddress().getAddress(), address.getPort());
+    }
+
     public static InetAddressAndPort getByAddressOverrideDefaults(InetAddress address, Integer port)
     {
         if (port == null)
@@ -296,18 +306,27 @@
 
         public void serialize(InetAddressAndPort endpoint, DataOutputPlus out, int version) throws IOException
         {
-            byte[] buf = endpoint.addressBytes;
+            serialize(endpoint.addressBytes, endpoint.getPort(), out, version);
+        }
 
+        public void serialize(InetSocketAddress endpoint, DataOutputPlus out, int version) throws IOException
+        {
+            byte[] address = endpoint instanceof InetAddressAndPort ? ((InetAddressAndPort) endpoint).addressBytes : endpoint.getAddress().getAddress();
+            serialize(address, endpoint.getPort(), out, version);
+        }
+
+        void serialize(byte[] address, int port, DataOutputPlus out, int version) throws IOException
+        {
             if (version >= MessagingService.VERSION_40)
             {
-                out.writeByte(buf.length + 2);
-                out.write(buf);
-                out.writeShort(endpoint.port);
+                out.writeByte(address.length + 2);
+                out.write(address);
+                out.writeShort(port);
             }
             else
             {
-                out.writeByte(buf.length);
-                out.write(buf);
+                out.writeByte(address.length);
+                out.write(address);
             }
         }
 
@@ -366,19 +385,24 @@
 
         public long serializedSize(InetAddressAndPort from, int version)
         {
+            return serializedSize((InetSocketAddress) from, version);
+        }
+
+        public long serializedSize(InetSocketAddress from, int version)
+        {
             //4.0 includes a port number
             if (version >= MessagingService.VERSION_40)
             {
-                if (from.address instanceof Inet4Address)
+                if (from.getAddress() instanceof Inet4Address)
                     return 1 + 4 + 2;
-                assert from.address instanceof Inet6Address;
+                assert from.getAddress() instanceof Inet6Address;
                 return 1 + 16 + 2;
             }
             else
             {
-                if (from.address instanceof Inet4Address)
+                if (from.getAddress() instanceof Inet4Address)
                     return 1 + 4;
-                assert from.address instanceof Inet6Address;
+                assert from.getAddress() instanceof Inet6Address;
                 return 1 + 16;
             }
         }
@@ -400,7 +424,7 @@
             {
                 out.writeByte(buf.length + 2);
                 out.write(buf);
-                out.writeShort(endpoint.port);
+                out.writeShort(endpoint.getPort());
             }
             else
             {
@@ -413,16 +437,16 @@
             //4.0 includes a port number
             if (version >= MessagingService.VERSION_40)
             {
-                if (from.address instanceof Inet4Address)
+                if (from.getAddress() instanceof Inet4Address)
                     return 1 + 4 + 2;
-                assert from.address instanceof Inet6Address;
+                assert from.getAddress() instanceof Inet6Address;
                 return 1 + 16 + 2;
             }
             else
             {
-                if (from.address instanceof Inet4Address)
+                if (from.getAddress() instanceof Inet4Address)
                     return 4;
-                assert from.address instanceof Inet6Address;
+                assert from.getAddress() instanceof Inet6Address;
                 return 16;
             }
         }
diff --git a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
index 2b4a0ea..1d39bbe 100644
--- a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
+++ b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
@@ -20,6 +20,8 @@
 import java.util.*;
 import java.util.Map.Entry;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -30,6 +32,7 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.TokenMetadata.Topology;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
@@ -38,6 +41,7 @@
 import com.google.common.collect.ImmutableMultimap;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Multimaps;
+import com.google.common.collect.Sets;
 
 /**
  * <p>
@@ -261,11 +265,20 @@
      * the "replication_factor" options out into the known datacenters. It is called via reflection from
      * {@link AbstractReplicationStrategy#prepareReplicationStrategyOptions(Class, Map, Map)}.
      *
-     * @param options The proposed strategy options that will be potentially mutated
+     * @param options The proposed strategy options that will be potentially mutated. If empty, replication_factor will
+     *                be added either from previousOptions if one exists, or from default_keyspace_rf configuration.
      * @param previousOptions Any previous strategy options in the case of an ALTER statement
      */
     protected static void prepareOptions(Map<String, String> options, Map<String, String> previousOptions)
     {
+        // add replication_factor only if there is no explicit mention of DCs. Otherwise, non-mentioned DCs will be added with default RF
+        if (options.isEmpty())
+        {
+            String rf = previousOptions.containsKey(REPLICATION_FACTOR) ? previousOptions.get(REPLICATION_FACTOR)
+                                                                        : Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF());
+            options.putIfAbsent(REPLICATION_FACTOR, rf);
+        }
+
         String replication = options.remove(REPLICATION_FACTOR);
 
         if (replication == null && options.size() == 0)
@@ -302,6 +315,15 @@
 
         // Validate the data center names
         super.validateExpectedOptions();
+
+        if (keyspaceName.equalsIgnoreCase(SchemaConstants.AUTH_KEYSPACE_NAME))
+        {
+            Set<String> differenceSet = Sets.difference((Set<String>) recognizedOptions(), configOptions.keySet());
+            if (!differenceSet.isEmpty())
+            {
+                throw new ConfigurationException("Following datacenters have active nodes and must be present in replication options for keyspace " + SchemaConstants.AUTH_KEYSPACE_NAME + ": " + differenceSet.toString());
+            }
+        }
     }
 
     @Override
@@ -317,7 +339,7 @@
     }
 
     @Override
-    public void maybeWarnOnOptions()
+    public void maybeWarnOnOptions(ClientState state)
     {
         if (!SchemaConstants.isSystemKeyspace(keyspaceName))
         {
@@ -327,6 +349,7 @@
 
                 String dc = e.getKey();
                 ReplicationFactor rf = getReplicationFactor(dc);
+                Guardrails.minimumReplicationFactor.guard(rf.fullReplicas, keyspaceName, false, state);
                 int nodeCount = dcsNodes.get(dc).size();
                 // nodeCount==0 on many tests
                 if (rf.fullReplicas > nodeCount && nodeCount != 0)
diff --git a/src/java/org/apache/cassandra/locator/RackInferringSnitch.java b/src/java/org/apache/cassandra/locator/RackInferringSnitch.java
index 6ae10cc..3429ad1 100644
--- a/src/java/org/apache/cassandra/locator/RackInferringSnitch.java
+++ b/src/java/org/apache/cassandra/locator/RackInferringSnitch.java
@@ -25,11 +25,11 @@
 {
     public String getRack(InetAddressAndPort endpoint)
     {
-        return Integer.toString(endpoint.address.getAddress()[2] & 0xFF, 10);
+        return Integer.toString(endpoint.getAddress().getAddress()[2] & 0xFF, 10);
     }
 
     public String getDatacenter(InetAddressAndPort endpoint)
     {
-        return Integer.toString(endpoint.address.getAddress()[1] & 0xFF, 10);
+        return Integer.toString(endpoint.getAddress().getAddress()[1] & 0xFF, 10);
     }
 }
diff --git a/src/java/org/apache/cassandra/locator/RangesByEndpoint.java b/src/java/org/apache/cassandra/locator/RangesByEndpoint.java
index 1a71141..023d7ee 100644
--- a/src/java/org/apache/cassandra/locator/RangesByEndpoint.java
+++ b/src/java/org/apache/cassandra/locator/RangesByEndpoint.java
@@ -22,8 +22,6 @@
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 
 public class RangesByEndpoint extends ReplicaMultimap<InetAddressAndPort, RangesAtEndpoint>
@@ -55,4 +53,4 @@
         }
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
index 7bb38cb..d8ca9e4 100644
--- a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
+++ b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
@@ -30,8 +30,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.cassandra.net.ConnectionType.SMALL_MESSAGES;
-
 /**
  * Sidekick helper for snitches that want to reconnect from one IP addr for a node to another.
  * Typically, this is for situations like EC2 where a node will have a public address and a private address,
diff --git a/src/java/org/apache/cassandra/locator/ReplicaCollection.java b/src/java/org/apache/cassandra/locator/ReplicaCollection.java
index ec671d5..b679b50 100644
--- a/src/java/org/apache/cassandra/locator/ReplicaCollection.java
+++ b/src/java/org/apache/cassandra/locator/ReplicaCollection.java
@@ -62,14 +62,14 @@
     /**
      * @return the number of replicas that match the predicate
      */
-    public abstract int count(Predicate<Replica> predicate);
+    public abstract int count(Predicate<? super Replica> predicate);
 
     /**
      * @return a *eagerly constructed* copy of this collection containing the Replica that match the provided predicate.
      * An effort will be made to either return ourself, or a subList, where possible.
      * It is guaranteed that no changes to any upstream Builder will affect the state of the result.
      */
-    public abstract C filter(Predicate<Replica> predicate);
+    public abstract C filter(Predicate<? super Replica> predicate);
 
     /**
      * @return a *eagerly constructed* copy of this collection containing the Replica that match the provided predicate.
@@ -77,18 +77,18 @@
      * It is guaranteed that no changes to any upstream Builder will affect the state of the result.
      * Only the first maxSize items will be returned.
      */
-    public abstract C filter(Predicate<Replica> predicate, int maxSize);
+    public abstract C filter(Predicate<? super Replica> predicate, int maxSize);
 
     /**
      * @return a *lazily constructed* Iterable over this collection, containing the Replica that match the provided predicate.
      */
-    public abstract Iterable<Replica> filterLazily(Predicate<Replica> predicate);
+    public abstract Iterable<Replica> filterLazily(Predicate<? super Replica> predicate);
 
     /**
      * @return a *lazily constructed* Iterable over this collection, containing the Replica that match the provided predicate.
      * Only the first maxSize matching items will be returned.
      */
-    public abstract Iterable<Replica> filterLazily(Predicate<Replica> predicate, int maxSize);
+    public abstract Iterable<Replica> filterLazily(Predicate<? super Replica> predicate, int maxSize);
 
     /**
      * @return an *eagerly constructed* copy of this collection containing the Replica at positions [start..end);
@@ -101,7 +101,7 @@
      * @return an *eagerly constructed* copy of this collection containing the Replica re-ordered according to this comparator
      * It is guaranteed that no changes to any upstream Builder will affect the state of the result.
      */
-    public abstract C sorted(Comparator<Replica> comparator);
+    public abstract C sorted(Comparator<? super Replica> comparator);
 
     public abstract Iterator<Replica> iterator();
     public abstract Stream<Replica> stream();
diff --git a/src/java/org/apache/cassandra/locator/ReplicaLayout.java b/src/java/org/apache/cassandra/locator/ReplicaLayout.java
index ff81732..351e837 100644
--- a/src/java/org/apache/cassandra/locator/ReplicaLayout.java
+++ b/src/java/org/apache/cassandra/locator/ReplicaLayout.java
@@ -25,7 +25,6 @@
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.gms.FailureDetector;
-import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 
 import java.util.Set;
@@ -169,13 +168,14 @@
         @Override
         public Token token() { return natural().token(); }
 
-        public ReplicaLayout.ForTokenWrite filter(Predicate<Replica> filter)
+        public ForTokenWrite filter(Predicate<Replica> filter)
         {
             EndpointsForToken filtered = all().filter(filter);
             // AbstractReplicaCollection.filter returns itself if all elements match the filter
             if (filtered == all()) return this;
+            if (pending().isEmpty()) return new ForTokenWrite(replicationStrategy(), filtered, pending(), filtered);
             // unique by endpoint, so can for efficiency filter only on endpoint
-            return new ReplicaLayout.ForTokenWrite(
+            return new ForTokenWrite(
                     replicationStrategy(),
                     natural().keep(filtered.endpoints()),
                     pending().keep(filtered.endpoints()),
@@ -206,8 +206,8 @@
         // TODO: these should be cached, not the natural replicas
         // TODO: race condition to fetch these. implications??
         AbstractReplicationStrategy replicationStrategy = keyspace.getReplicationStrategy();
-        EndpointsForToken natural = replicationStrategy.getNaturalReplicasForToken(token);
-        EndpointsForToken pending = StorageService.instance.getTokenMetadata().pendingEndpointsForToken(token, keyspace.getName());
+        EndpointsForToken natural = EndpointsForToken.natural(replicationStrategy, token);
+        EndpointsForToken pending = EndpointsForToken.pending(keyspace, token);
         return forTokenWrite(replicationStrategy, natural, pending);
     }
 
@@ -325,7 +325,7 @@
      * @return the read layout for a token - this includes only live natural replicas, i.e. those that are not pending
      * and not marked down by the failure detector. these are reverse sorted by the badness score of the configured snitch
      */
-    static ReplicaLayout.ForTokenRead forTokenReadLiveSorted(AbstractReplicationStrategy replicationStrategy, Token token)
+    public static ReplicaLayout.ForTokenRead forTokenReadLiveSorted(AbstractReplicationStrategy replicationStrategy, Token token)
     {
         EndpointsForToken replicas = replicationStrategy.getNaturalReplicasForToken(token);
         replicas = DatabaseDescriptor.getEndpointSnitch().sortedByProximity(FBUtilities.getBroadcastAddressAndPort(), replicas);
diff --git a/src/java/org/apache/cassandra/locator/ReplicaPlan.java b/src/java/org/apache/cassandra/locator/ReplicaPlan.java
index 51cab13..3bb3ec0 100644
--- a/src/java/org/apache/cassandra/locator/ReplicaPlan.java
+++ b/src/java/org/apache/cassandra/locator/ReplicaPlan.java
@@ -25,69 +25,89 @@
 import org.apache.cassandra.dht.AbstractBounds;
 
 import java.util.function.Predicate;
+import java.util.function.Supplier;
 
-public abstract class ReplicaPlan<E extends Endpoints<E>>
+public interface ReplicaPlan<E extends Endpoints<E>, P extends ReplicaPlan<E, P>>
 {
-    protected final Keyspace keyspace;
-    protected final ConsistencyLevel consistencyLevel;
-    // The snapshot of the replication strategy when instantiating.
-    // It could be different than the one fetched from Keyspace later, e.g. RS altered during the query.
-    // Use the snapshot to calculate {@code blockFor} in order to have a consistent view of RS for the query.
-    protected final AbstractReplicationStrategy replicationStrategy;
+    Keyspace keyspace();
+    AbstractReplicationStrategy replicationStrategy();
+    ConsistencyLevel consistencyLevel();
 
-    // all nodes we will contact via any mechanism, including hints
-    // i.e., for:
-    //  - reads, only live natural replicas
-    //      ==> live.natural().subList(0, blockFor + initial speculate)
-    //  - writes, includes all full, and any pending replicas, (and only any necessary transient ones to make up the difference)
-    //      ==> liveAndDown.natural().filter(isFull) ++ liveAndDown.pending() ++ live.natural.filter(isTransient, req)
-    //  - paxos, includes all live replicas (natural+pending), for this DC if SERIAL_LOCAL
-    //      ==> live.all()  (if consistencyLevel.isDCLocal(), then .filter(consistencyLevel.isLocal))
-    private final E contacts;
+    E contacts();
 
-    ReplicaPlan(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, E contacts)
+    Replica lookup(InetAddressAndPort endpoint);
+    P withContacts(E contacts);
+
+    interface ForRead<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>> extends ReplicaPlan<E, P>
     {
-        assert contacts != null;
-        this.keyspace = keyspace;
-        this.replicationStrategy = replicationStrategy;
-        this.consistencyLevel = consistencyLevel;
-        this.contacts = contacts;
+        int readQuorum();
+        E readCandidates();
+
+        default Replica firstUncontactedCandidate(Predicate<Replica> extraPredicate)
+        {
+            return Iterables.tryFind(readCandidates(), r -> extraPredicate.test(r) && !contacts().contains(r)).orNull();
+        }
     }
 
-    public abstract int blockFor();
+    abstract class AbstractReplicaPlan<E extends Endpoints<E>, P extends ReplicaPlan<E, P>> implements ReplicaPlan<E, P>
+    {
+        protected final Keyspace keyspace;
+        protected final ConsistencyLevel consistencyLevel;
+        // The snapshot of the replication strategy when instantiating.
+        // It could be different than the one fetched from Keyspace later, e.g. RS altered during the query.
+        // Use the snapshot to calculate {@code blockFor} in order to have a consistent view of RS for the query.
+        protected final AbstractReplicationStrategy replicationStrategy;
 
-    public E contacts() { return contacts; }
+        // all nodes we will contact via any mechanism, including hints
+        // i.e., for:
+        //  - reads, only live natural replicas
+        //      ==> live.natural().subList(0, blockFor + initial speculate)
+        //  - writes, includes all full, and any pending replicas, (and only any necessary transient ones to make up the difference)
+        //      ==> liveAndDown.natural().filter(isFull) ++ liveAndDown.pending() ++ live.natural.filter(isTransient, req)
+        //  - paxos, includes all live replicas (natural+pending), for this DC if SERIAL_LOCAL
+        //      ==> live.all()  (if consistencyLevel.isDCLocal(), then .filter(consistencyLevel.isLocal))
+        private final E contacts;
 
-    // TODO: should this semantically return true if we contain the endpoint, not the exact replica?
-    public boolean contacts(Replica replica) { return contacts.contains(replica); }
-    public Keyspace keyspace() { return keyspace; }
-    public AbstractReplicationStrategy replicationStrategy() { return replicationStrategy; }
-    public ConsistencyLevel consistencyLevel() { return consistencyLevel; }
+        AbstractReplicaPlan(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, E contacts)
+        {
+            assert contacts != null;
+            this.keyspace = keyspace;
+            this.replicationStrategy = replicationStrategy;
+            this.consistencyLevel = consistencyLevel;
+            this.contacts = contacts;
+        }
 
-    public static abstract class ForRead<E extends Endpoints<E>> extends ReplicaPlan<E>
+        public E contacts() { return contacts; }
+
+        public Keyspace keyspace() { return keyspace; }
+        public AbstractReplicationStrategy replicationStrategy() { return replicationStrategy; }
+        public ConsistencyLevel consistencyLevel() { return consistencyLevel; }
+    }
+
+    public static abstract class AbstractForRead<E extends Endpoints<E>, P extends ForRead<E, P>> extends AbstractReplicaPlan<E, P> implements ForRead<E, P>
     {
         // all nodes we *could* contacts; typically all natural replicas that are believed to be alive
         // we will consult this collection to find uncontacted nodes we might contact if we doubt we will meet consistency level
-        private final E candidates;
+        final E candidates;
 
-        ForRead(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, E candidates, E contacts)
+        AbstractForRead(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, E candidates, E contacts)
         {
             super(keyspace, replicationStrategy, consistencyLevel, contacts);
             this.candidates = candidates;
         }
 
-        public int blockFor() { return consistencyLevel.blockFor(replicationStrategy); }
+        public int readQuorum() { return consistencyLevel.blockFor(replicationStrategy); }
 
-        public E candidates() { return candidates; }
+        public E readCandidates() { return candidates; }
 
         public Replica firstUncontactedCandidate(Predicate<Replica> extraPredicate)
         {
-            return Iterables.tryFind(candidates(), r -> extraPredicate.test(r) && !contacts(r)).orNull();
+            return Iterables.tryFind(readCandidates(), r -> extraPredicate.test(r) && !contacts().contains(r)).orNull();
         }
 
         public Replica lookup(InetAddressAndPort endpoint)
         {
-            return candidates().byEndpoint().get(endpoint);
+            return readCandidates().byEndpoint().get(endpoint);
         }
 
         public String toString()
@@ -96,7 +116,7 @@
         }
     }
 
-    public static class ForTokenRead extends ForRead<EndpointsForToken>
+    public static class ForTokenRead extends AbstractForRead<EndpointsForToken, ForTokenRead>
     {
         public ForTokenRead(Keyspace keyspace,
                             AbstractReplicationStrategy replicationStrategy,
@@ -107,13 +127,13 @@
             super(keyspace, replicationStrategy, consistencyLevel, candidates, contacts);
         }
 
-        ForTokenRead withContact(EndpointsForToken newContact)
+        public ForTokenRead withContacts(EndpointsForToken newContact)
         {
-            return new ForTokenRead(keyspace, replicationStrategy, consistencyLevel, candidates(), newContact);
+            return new ForTokenRead(keyspace, replicationStrategy, consistencyLevel, candidates, newContact);
         }
     }
 
-    public static class ForRangeRead extends ForRead<EndpointsForRange>
+    public static class ForRangeRead extends AbstractForRead<EndpointsForRange, ForRangeRead>
     {
         final AbstractBounds<PartitionPosition> range;
         final int vnodeCount;
@@ -138,20 +158,20 @@
          */
         public int vnodeCount() { return vnodeCount; }
 
-        ForRangeRead withContact(EndpointsForRange newContact)
+        public ForRangeRead withContacts(EndpointsForRange newContact)
         {
-            return new ForRangeRead(keyspace, replicationStrategy, consistencyLevel, range, candidates(), newContact, vnodeCount);
+            return new ForRangeRead(keyspace, replicationStrategy, consistencyLevel, range, readCandidates(), newContact, vnodeCount);
         }
     }
 
-    public static abstract class ForWrite<E extends Endpoints<E>> extends ReplicaPlan<E>
+    public static class ForWrite extends AbstractReplicaPlan<EndpointsForToken, ForWrite>
     {
         // TODO: this is only needed because of poor isolation of concerns elsewhere - we can remove it soon, and will do so in a follow-up patch
-        final E pending;
-        final E liveAndDown;
-        final E live;
+        final EndpointsForToken pending;
+        final EndpointsForToken liveAndDown;
+        final EndpointsForToken live;
 
-        ForWrite(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, E pending, E liveAndDown, E live, E contact)
+        public ForWrite(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, EndpointsForToken pending, EndpointsForToken liveAndDown, EndpointsForToken live, EndpointsForToken contact)
         {
             super(keyspace, replicationStrategy, consistencyLevel, contact);
             this.pending = pending;
@@ -159,46 +179,43 @@
             this.live = live;
         }
 
-        public int blockFor() { return consistencyLevel.blockForWrite(replicationStrategy, pending()); }
+        public int writeQuorum() { return consistencyLevel.blockForWrite(replicationStrategy, pending()); }
 
         /** Replicas that a region of the ring is moving to; not yet ready to serve reads, but should receive writes */
-        public E pending() { return pending; }
+        public EndpointsForToken pending() { return pending; }
+
         /** Replicas that can participate in the write - this always includes all nodes (pending and natural) in all DCs, except for paxos LOCAL_QUORUM (which is local DC only) */
-        public E liveAndDown() { return liveAndDown; }
+        public EndpointsForToken liveAndDown() { return liveAndDown; }
+
         /** The live replicas present in liveAndDown, usually derived from FailureDetector.isReplicaAlive */
-        public E live() { return live; }
+        public EndpointsForToken live() { return live; }
+
         /** Calculate which live endpoints we could have contacted, but chose not to */
-        public E liveUncontacted() { return live().filter(r -> !contacts(r)); }
+        public EndpointsForToken liveUncontacted() { return live().filter(r -> !contacts().contains(r)); }
+
         /** Test liveness, consistent with the upfront analysis done for this operation (i.e. test membership of live()) */
         public boolean isAlive(Replica replica) { return live.endpoints().contains(replica.endpoint()); }
+
         public Replica lookup(InetAddressAndPort endpoint)
         {
             return liveAndDown().byEndpoint().get(endpoint);
         }
 
+        private ForWrite copy(ConsistencyLevel newConsistencyLevel, EndpointsForToken newContact)
+        {
+            return new ForWrite(keyspace, replicationStrategy, newConsistencyLevel, pending(), liveAndDown(), live(), newContact);
+        }
+
+        ForWrite withConsistencyLevel(ConsistencyLevel newConsistencylevel) { return copy(newConsistencylevel, contacts()); }
+        public ForWrite withContacts(EndpointsForToken newContact) { return copy(consistencyLevel, newContact); }
+
         public String toString()
         {
             return "ReplicaPlan.ForWrite [ CL: " + consistencyLevel + " keyspace: " + keyspace + " liveAndDown: " + liveAndDown + " live: " + live + " contacts: " + contacts() +  " ]";
         }
     }
 
-    public static class ForTokenWrite extends ForWrite<EndpointsForToken>
-    {
-        public ForTokenWrite(Keyspace keyspace, AbstractReplicationStrategy replicationStrategy, ConsistencyLevel consistencyLevel, EndpointsForToken pending, EndpointsForToken liveAndDown, EndpointsForToken live, EndpointsForToken contact)
-        {
-            super(keyspace, replicationStrategy, consistencyLevel, pending, liveAndDown, live, contact);
-        }
-
-        private ReplicaPlan.ForTokenWrite copy(ConsistencyLevel newConsistencyLevel, EndpointsForToken newContact)
-        {
-            return new ReplicaPlan.ForTokenWrite(keyspace, replicationStrategy, newConsistencyLevel, pending(), liveAndDown(), live(), newContact);
-        }
-
-        ForTokenWrite withConsistencyLevel(ConsistencyLevel newConsistencylevel) { return copy(newConsistencylevel, contacts()); }
-        public ForTokenWrite withContact(EndpointsForToken newContact) { return copy(consistencyLevel, newContact); }
-    }
-
-    public static class ForPaxosWrite extends ForWrite<EndpointsForToken>
+    public static class ForPaxosWrite extends ForWrite
     {
         final int requiredParticipants;
 
@@ -219,8 +236,11 @@
      * the constructor should be visible by the normal process of sharing data between threads (i.e. executors, etc)
      * and any updates will either be seen or not seen, perhaps not promptly, but certainly not incompletely.
      * The contained ReplicaPlan has only final member properties, so it cannot be seen partially initialised.
+     *
+     * TODO: there's no reason this couldn't be achieved instead by a ReplicaPlan with mutable contacts,
+     *       simplifying the hierarchy
      */
-    public interface Shared<E extends Endpoints<E>, P extends ReplicaPlan<E>>
+    public interface Shared<E extends Endpoints<E>, P extends ReplicaPlan<E, P>> extends Supplier<P>
     {
         /**
          * add the provided replica to this shared plan, by updating the internal reference
@@ -230,29 +250,22 @@
          * get the shared replica plan, non-volatile (so maybe stale) but no risk of partially initialised
          */
         public P get();
-        /**
-         * get the shared replica plan, non-volatile (so maybe stale) but no risk of partially initialised,
-         * but replace its 'contacts' with those provided
-         */
-        public abstract P getWithContacts(E endpoints);
     }
 
     public static class SharedForTokenRead implements Shared<EndpointsForToken, ForTokenRead>
     {
         private ForTokenRead replicaPlan;
         SharedForTokenRead(ForTokenRead replicaPlan) { this.replicaPlan = replicaPlan; }
-        public void addToContacts(Replica replica) { replicaPlan = replicaPlan.withContact(Endpoints.append(replicaPlan.contacts(), replica)); }
+        public void addToContacts(Replica replica) { replicaPlan = replicaPlan.withContacts(Endpoints.append(replicaPlan.contacts(), replica)); }
         public ForTokenRead get() { return replicaPlan; }
-        public ForTokenRead getWithContacts(EndpointsForToken newContact) { return replicaPlan.withContact(newContact); }
     }
 
     public static class SharedForRangeRead implements Shared<EndpointsForRange, ForRangeRead>
     {
         private ForRangeRead replicaPlan;
         SharedForRangeRead(ForRangeRead replicaPlan) { this.replicaPlan = replicaPlan; }
-        public void addToContacts(Replica replica) { replicaPlan = replicaPlan.withContact(Endpoints.append(replicaPlan.contacts(), replica)); }
+        public void addToContacts(Replica replica) { replicaPlan = replicaPlan.withContacts(Endpoints.append(replicaPlan.contacts(), replica)); }
         public ForRangeRead get() { return replicaPlan; }
-        public ForRangeRead getWithContacts(EndpointsForRange newContact) { return replicaPlan.withContact(newContact); }
     }
 
     public static SharedForTokenRead shared(ForTokenRead replicaPlan) { return new SharedForTokenRead(replicaPlan); }
diff --git a/src/java/org/apache/cassandra/locator/ReplicaPlans.java b/src/java/org/apache/cassandra/locator/ReplicaPlans.java
index 67b89e5..c39862a 100644
--- a/src/java/org/apache/cassandra/locator/ReplicaPlans.java
+++ b/src/java/org/apache/cassandra/locator/ReplicaPlans.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.locator;
 
 import com.carrotsearch.hppc.ObjectIntHashMap;
-import com.carrotsearch.hppc.cursors.ObjectIntCursor;
 import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ArrayListMultimap;
@@ -134,7 +133,7 @@
                     if (logger.isTraceEnabled())
                     {
                         logger.trace(String.format("Local replicas %s are insufficient to satisfy LOCAL_QUORUM requirement of %d live replicas and %d full replicas in '%s'",
-                                allLive.filter(InOurDcTester.replicas()), blockFor, blockForFullReplicas, DatabaseDescriptor.getLocalDataCenter()));
+                                                   allLive.filter(InOurDc.replicas()), blockFor, blockForFullReplicas, DatabaseDescriptor.getLocalDataCenter()));
                     }
                     throw UnavailableException.create(consistencyLevel, blockFor, blockForFullReplicas, localLive.allReplicas(), localLive.fullReplicas());
                 }
@@ -176,23 +175,23 @@
     /**
      * Construct a ReplicaPlan for writing to exactly one node, with CL.ONE. This node is *assumed* to be alive.
      */
-    public static ReplicaPlan.ForTokenWrite forSingleReplicaWrite(Keyspace keyspace, Token token, Replica replica)
+    public static ReplicaPlan.ForWrite forSingleReplicaWrite(Keyspace keyspace, Token token, Replica replica)
     {
         EndpointsForToken one = EndpointsForToken.of(token, replica);
         EndpointsForToken empty = EndpointsForToken.empty(token);
-        return new ReplicaPlan.ForTokenWrite(keyspace, keyspace.getReplicationStrategy(), ConsistencyLevel.ONE, empty, one, one, one);
+        return new ReplicaPlan.ForWrite(keyspace, keyspace.getReplicationStrategy(), ConsistencyLevel.ONE, empty, one, one, one);
     }
 
     /**
      * A forwarding counter write is always sent to a single owning coordinator for the range, by the original coordinator
      * (if it is not itself an owner)
      */
-    public static ReplicaPlan.ForTokenWrite forForwardingCounterWrite(Keyspace keyspace, Token token, Replica replica)
+    public static ReplicaPlan.ForWrite forForwardingCounterWrite(Keyspace keyspace, Token token, Replica replica)
     {
         return forSingleReplicaWrite(keyspace, token, replica);
     }
 
-    public static ReplicaPlan.ForTokenWrite forLocalBatchlogWrite()
+    public static ReplicaPlan.ForWrite forLocalBatchlogWrite()
     {
         Token token = DatabaseDescriptor.getPartitioner().getMinimumToken();
         Keyspace systemKeypsace = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME);
@@ -212,7 +211,7 @@
      *
      * @param isAny if batch consistency level is ANY, in which case a local node will be picked
      */
-    public static ReplicaPlan.ForTokenWrite forBatchlogWrite(boolean isAny) throws UnavailableException
+    public static ReplicaPlan.ForWrite forBatchlogWrite(boolean isAny) throws UnavailableException
     {
         // A single case we write not for range or token, but multiple mutations to many tokens
         Token token = DatabaseDescriptor.getPartitioner().getMinimumToken();
@@ -318,41 +317,41 @@
         return result;
     }
 
-    public static ReplicaPlan.ForTokenWrite forReadRepair(Token token, ReplicaPlan.ForRead<?> readPlan) throws UnavailableException
+    public static ReplicaPlan.ForWrite forReadRepair(Token token, ReplicaPlan<?, ?> readPlan) throws UnavailableException
     {
-        return forWrite(readPlan.keyspace, readPlan.consistencyLevel, token, writeReadRepair(readPlan));
+        return forWrite(readPlan.keyspace(), readPlan.consistencyLevel(), token, writeReadRepair(readPlan));
     }
 
-    public static ReplicaPlan.ForTokenWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, Token token, Selector selector) throws UnavailableException
+    public static ReplicaPlan.ForWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, Token token, Selector selector) throws UnavailableException
     {
         return forWrite(keyspace, consistencyLevel, ReplicaLayout.forTokenWriteLiveAndDown(keyspace, token), selector);
     }
 
     @VisibleForTesting
-    public static ReplicaPlan.ForTokenWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, EndpointsForToken natural, EndpointsForToken pending, Predicate<Replica> isAlive, Selector selector) throws UnavailableException
+    public static ReplicaPlan.ForWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, EndpointsForToken natural, EndpointsForToken pending, Predicate<Replica> isAlive, Selector selector) throws UnavailableException
     {
         return forWrite(keyspace, consistencyLevel, ReplicaLayout.forTokenWrite(keyspace.getReplicationStrategy(), natural, pending), isAlive, selector);
     }
 
-    public static ReplicaPlan.ForTokenWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, Selector selector) throws UnavailableException
+    public static ReplicaPlan.ForWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, Selector selector) throws UnavailableException
     {
         return forWrite(keyspace, consistencyLevel, liveAndDown, FailureDetector.isReplicaAlive, selector);
     }
 
-    private static ReplicaPlan.ForTokenWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, Predicate<Replica> isAlive, Selector selector) throws UnavailableException
+    private static ReplicaPlan.ForWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, Predicate<Replica> isAlive, Selector selector) throws UnavailableException
     {
         ReplicaLayout.ForTokenWrite live = liveAndDown.filter(isAlive);
         return forWrite(keyspace, consistencyLevel, liveAndDown, live, selector);
     }
 
-    public static ReplicaPlan.ForTokenWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, ReplicaLayout.ForTokenWrite live, Selector selector) throws UnavailableException
+    public static ReplicaPlan.ForWrite forWrite(Keyspace keyspace, ConsistencyLevel consistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, ReplicaLayout.ForTokenWrite live, Selector selector) throws UnavailableException
     {
         assert liveAndDown.replicationStrategy() == live.replicationStrategy()
                : "ReplicaLayout liveAndDown and live should be derived from the same replication strategy.";
         AbstractReplicationStrategy replicationStrategy = liveAndDown.replicationStrategy();
         EndpointsForToken contacts = selector.select(consistencyLevel, liveAndDown, live);
         assureSufficientLiveReplicasForWrite(replicationStrategy, consistencyLevel, live.all(), liveAndDown.pending());
-        return new ReplicaPlan.ForTokenWrite(keyspace, replicationStrategy, consistencyLevel, liveAndDown.pending(), liveAndDown.all(), live.all(), contacts);
+        return new ReplicaPlan.ForWrite(keyspace, replicationStrategy, consistencyLevel, liveAndDown.pending(), liveAndDown.all(), live.all(), contacts);
     }
 
     public interface Selector
@@ -435,7 +434,7 @@
      * the minimal number of nodes to meet the consistency level, and prefer nodes we contacted on read to minimise
      * data transfer.
      */
-    public static Selector writeReadRepair(ReplicaPlan.ForRead<?> readPlan)
+    public static Selector writeReadRepair(ReplicaPlan<?, ?> readPlan)
     {
         return new Selector()
         {
@@ -498,7 +497,7 @@
         {
             // TODO: we should cleanup our semantics here, as we're filtering ALL nodes to localDC which is unexpected for ReplicaPlan
             // Restrict natural and pending to node in the local DC only
-            liveAndDown = liveAndDown.filter(InOurDcTester.replicas());
+            liveAndDown = liveAndDown.filter(InOurDc.replicas());
         }
 
         ReplicaLayout.ForTokenWrite live = liveAndDown.filter(FailureDetector.isReplicaAlive);
@@ -527,7 +526,7 @@
     private static <E extends Endpoints<E>> E candidatesForRead(ConsistencyLevel consistencyLevel, E liveNaturalReplicas)
     {
         return consistencyLevel.isDatacenterLocal()
-                ? liveNaturalReplicas.filter(InOurDcTester.replicas())
+                ? liveNaturalReplicas.filter(InOurDc.replicas())
                 : liveNaturalReplicas;
     }
 
@@ -622,7 +621,7 @@
     {
         // TODO: should we be asserting that the ranges are adjacent?
         AbstractBounds<PartitionPosition> newRange = left.range().withNewRight(right.range().right);
-        EndpointsForRange mergedCandidates = left.candidates().keep(right.candidates().endpoints());
+        EndpointsForRange mergedCandidates = left.readCandidates().keep(right.readCandidates().endpoints());
         AbstractReplicationStrategy replicationStrategy = keyspace.getReplicationStrategy();
 
         // Check if there are enough shared endpoints for the merge to be possible.
diff --git a/src/java/org/apache/cassandra/locator/Replicas.java b/src/java/org/apache/cassandra/locator/Replicas.java
index 1b299cf..c53815c 100644
--- a/src/java/org/apache/cassandra/locator/Replicas.java
+++ b/src/java/org/apache/cassandra/locator/Replicas.java
@@ -78,7 +78,7 @@
     public static ReplicaCount countInOurDc(ReplicaCollection<?> replicas)
     {
         ReplicaCount count = new ReplicaCount();
-        Predicate<Replica> inOurDc = InOurDcTester.replicas();
+        Predicate<Replica> inOurDc = InOurDc.replicas();
         for (Replica replica : replicas)
             if (inOurDc.test(replica))
                 count.increment(replica);
diff --git a/src/java/org/apache/cassandra/locator/SimpleStrategy.java b/src/java/org/apache/cassandra/locator/SimpleStrategy.java
index 928ac97..e5b9210 100644
--- a/src/java/org/apache/cassandra/locator/SimpleStrategy.java
+++ b/src/java/org/apache/cassandra/locator/SimpleStrategy.java
@@ -26,10 +26,13 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.StorageService;
 
@@ -99,12 +102,13 @@
     }
 
     @Override
-    public void maybeWarnOnOptions()
+    public void maybeWarnOnOptions(ClientState state)
     {
         if (!SchemaConstants.isSystemKeyspace(keyspaceName))
         {
             int nodeCount = StorageService.instance.getHostIdToEndpoint().size();
             // nodeCount==0 on many tests
+            Guardrails.minimumReplicationFactor.guard(rf.fullReplicas, keyspaceName, false, state);
             if (rf.fullReplicas > nodeCount && nodeCount != 0)
             {
                 String msg = "Your replication factor " + rf.fullReplicas
@@ -123,4 +127,13 @@
     {
         return Collections.singleton(REPLICATION_FACTOR);
     }
+
+    protected static void prepareOptions(Map<String, String> options, Map<String, String> previousOptions)
+    {
+        // When altering from NTS to SS, previousOptions could have multiple different RFs for different data centers - so we
+        // will instead default to DefaultRF configuration if RF is not mentioned with the alter statement
+        String rf = previousOptions.containsKey(REPLICATION_FACTOR) ? previousOptions.get(REPLICATION_FACTOR)
+                                                                    : Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF());
+        options.putIfAbsent(REPLICATION_FACTOR, rf);
+    }
 }
diff --git a/src/java/org/apache/cassandra/locator/TokenMetadata.java b/src/java/org/apache/cassandra/locator/TokenMetadata.java
index 92551ac..6b0ea7d 100644
--- a/src/java/org/apache/cassandra/locator/TokenMetadata.java
+++ b/src/java/org/apache/cassandra/locator/TokenMetadata.java
@@ -31,6 +31,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.*;
+
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -49,6 +50,7 @@
 import org.apache.cassandra.utils.SortedBiMultiValMap;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.LINE_SEPARATOR;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class TokenMetadata
 {
@@ -189,8 +191,7 @@
     public void updateNormalTokens(Collection<Token> tokens, InetAddressAndPort endpoint)
     {
         Multimap<InetAddressAndPort, Token> endpointTokens = HashMultimap.create();
-        for (Token token : tokens)
-            endpointTokens.put(endpoint, token);
+        endpointTokens.putAll(endpoint, tokens);
         updateNormalTokens(endpointTokens);
     }
 
@@ -257,23 +258,7 @@
         lock.writeLock().lock();
         try
         {
-            InetAddressAndPort storedEp = endpointToHostIdMap.inverse().get(hostId);
-            if (storedEp != null)
-            {
-                if (!storedEp.equals(endpoint) && (FailureDetector.instance.isAlive(storedEp)))
-                {
-                    throw new RuntimeException(String.format("Host ID collision between active endpoint %s and %s (id=%s)",
-                                                             storedEp,
-                                                             endpoint,
-                                                             hostId));
-                }
-            }
-
-            UUID storedId = endpointToHostIdMap.get(endpoint);
-            if ((storedId != null) && (!storedId.equals(hostId)))
-                logger.warn("Changing {}'s host ID from {} to {}", endpoint, storedId, hostId);
-
-            endpointToHostIdMap.forcePut(endpoint, hostId);
+            updateEndpointToHostIdMap(hostId, endpoint);
         }
         finally
         {
@@ -282,6 +267,44 @@
 
     }
 
+    public void updateHostIds(Map<UUID, InetAddressAndPort> hostIdToEndpointMap)
+    {
+        lock.writeLock().lock();
+        try
+        {
+            for (Map.Entry<UUID, InetAddressAndPort> entry : hostIdToEndpointMap.entrySet())
+            {
+                updateEndpointToHostIdMap(entry.getKey(), entry.getValue());
+            }
+        }
+        finally
+        {
+            lock.writeLock().unlock();
+        }
+
+    }
+    
+    private void updateEndpointToHostIdMap(UUID hostId, InetAddressAndPort endpoint)
+    {
+        InetAddressAndPort storedEp = endpointToHostIdMap.inverse().get(hostId);
+        if (storedEp != null)
+        {
+            if (!storedEp.equals(endpoint) && (FailureDetector.instance.isAlive(storedEp)))
+            {
+                throw new RuntimeException(String.format("Host ID collision between active endpoint %s and %s (id=%s)",
+                                                         storedEp,
+                                                         endpoint,
+                                                         hostId));
+            }
+        }
+
+        UUID storedId = endpointToHostIdMap.get(endpoint);
+        if ((storedId != null) && (!storedId.equals(hostId)))
+            logger.warn("Changing {}'s host ID from {} to {}", endpoint, storedId, hostId);
+
+        endpointToHostIdMap.forcePut(endpoint, hostId);
+    }
+
     /** Return the unique host ID for an end-point. */
     public UUID getHostId(InetAddressAndPort endpoint)
     {
@@ -482,7 +505,7 @@
         try
         {
             bootstrapTokens.removeValue(endpoint);
-            tokenToEndpointMap.removeValue(endpoint);
+
             topology = topology.unbuild().removeEndpoint(endpoint).build();
             leavingEndpoints.remove(endpoint);
             if (replacementToOriginal.remove(endpoint) != null)
@@ -490,8 +513,12 @@
                 logger.debug("Node {} failed during replace.", endpoint);
             }
             endpointToHostIdMap.remove(endpoint);
-            sortedTokens = sortTokens();
-            invalidateCachedRingsUnsafe();
+            Collection<Token> removedTokens = tokenToEndpointMap.removeValue(endpoint);
+            if (removedTokens != null && !removedTokens.isEmpty())
+            {
+                sortedTokens = sortTokens();
+                invalidateCachedRingsUnsafe();
+            }
         }
         finally
         {
@@ -575,7 +602,7 @@
         lock.readLock().lock();
         try
         {
-            assert isMember(endpoint); // don't want to return nulls
+            assert isMember(endpoint): String.format("Unable to get tokens for %s; it is not a member", endpoint); // don't want to return nulls
             return new ArrayList<>(tokenToEndpointMap.inverse().get(endpoint));
         }
         finally
@@ -838,51 +865,17 @@
     public void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName)
     {
         // avoid race between both branches - do not use a lock here as this will block any other unrelated operations!
-        long startedAt = System.currentTimeMillis();
+        long startedAt = currentTimeMillis();
         synchronized (pendingRanges)
         {
             TokenMetadataDiagnostics.pendingRangeCalculationStarted(this, keyspaceName);
 
-            // create clone of current state
-            BiMultiValMap<Token, InetAddressAndPort> bootstrapTokensClone;
-            Set<InetAddressAndPort> leavingEndpointsClone;
-            Set<Pair<Token, InetAddressAndPort>> movingEndpointsClone;
-            TokenMetadata metadata;
+            unsafeCalculatePendingRanges(strategy, keyspaceName);
 
-            lock.readLock().lock();
-            try
-            {
-
-                if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && movingEndpoints.isEmpty())
-                {
-                    if (logger.isTraceEnabled())
-                        logger.trace("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", keyspaceName);
-                    if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && movingEndpoints.isEmpty())
-                    {
-                        if (logger.isTraceEnabled())
-                            logger.trace("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", keyspaceName);
-                        pendingRanges.put(keyspaceName, new PendingRangeMaps());
-
-                        return;
-                    }
-                }
-
-                bootstrapTokensClone  = new BiMultiValMap<>(this.bootstrapTokens);
-                leavingEndpointsClone = new HashSet<>(this.leavingEndpoints);
-                movingEndpointsClone = new HashSet<>(this.movingEndpoints);
-                metadata = this.cloneOnlyTokenMap();
-            }
-            finally
-            {
-                lock.readLock().unlock();
-            }
-
-            pendingRanges.put(keyspaceName, calculatePendingRanges(strategy, metadata, bootstrapTokensClone,
-                                                                   leavingEndpointsClone, movingEndpointsClone));
             if (logger.isDebugEnabled())
                 logger.debug("Starting pending range calculation for {}", keyspaceName);
 
-            long took = System.currentTimeMillis() - startedAt;
+            long took = currentTimeMillis() - startedAt;
 
             if (logger.isDebugEnabled())
                 logger.debug("Pending range calculation for {} completed (took: {}ms)", keyspaceName, took);
@@ -891,6 +884,46 @@
         }
     }
 
+    public void unsafeCalculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName)
+    {
+        // create clone of current state
+        BiMultiValMap<Token, InetAddressAndPort> bootstrapTokensClone;
+        Set<InetAddressAndPort> leavingEndpointsClone;
+        Set<Pair<Token, InetAddressAndPort>> movingEndpointsClone;
+        TokenMetadata metadata;
+
+        lock.readLock().lock();
+        try
+        {
+
+            if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && movingEndpoints.isEmpty())
+            {
+                if (logger.isTraceEnabled())
+                    logger.trace("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", keyspaceName);
+                if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && movingEndpoints.isEmpty())
+                {
+                    if (logger.isTraceEnabled())
+                        logger.trace("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", keyspaceName);
+                    pendingRanges.put(keyspaceName, new PendingRangeMaps());
+
+                    return;
+                }
+            }
+
+            bootstrapTokensClone  = new BiMultiValMap<>(this.bootstrapTokens);
+            leavingEndpointsClone = new HashSet<>(this.leavingEndpoints);
+            movingEndpointsClone = new HashSet<>(this.movingEndpoints);
+            metadata = this.cloneOnlyTokenMap();
+        }
+        finally
+        {
+            lock.readLock().unlock();
+        }
+
+        pendingRanges.put(keyspaceName, calculatePendingRanges(strategy, metadata, bootstrapTokensClone,
+                                                               leavingEndpointsClone, movingEndpointsClone));
+    }
+
     /**
      * @see TokenMetadata#calculatePendingRanges(AbstractReplicationStrategy, String)
      */
@@ -1366,7 +1399,7 @@
      */
     public Topology getTopology()
     {
-        assert this != StorageService.instance.getTokenMetadata();
+        assert !DatabaseDescriptor.isDaemonInitialized() || this != StorageService.instance.getTokenMetadata();
         return topology;
     }
 
diff --git a/src/java/org/apache/cassandra/metrics/CQLMetrics.java b/src/java/org/apache/cassandra/metrics/CQLMetrics.java
index 1020e92..ce91333 100644
--- a/src/java/org/apache/cassandra/metrics/CQLMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/CQLMetrics.java
@@ -32,6 +32,8 @@
     public final Counter preparedStatementsExecuted;
     public final Counter preparedStatementsEvicted;
 
+    public final Counter useStatementsExecuted;
+
     public final Gauge<Integer> preparedStatementsCount;
     public final Gauge<Double> preparedStatementsRatio;
 
@@ -41,13 +43,9 @@
         preparedStatementsExecuted = Metrics.counter(factory.createMetricName("PreparedStatementsExecuted"));
         preparedStatementsEvicted = Metrics.counter(factory.createMetricName("PreparedStatementsEvicted"));
 
-        preparedStatementsCount = Metrics.register(factory.createMetricName("PreparedStatementsCount"), new Gauge<Integer>()
-        {
-            public Integer getValue()
-            {
-                return QueryProcessor.preparedStatementsCount();
-            }
-        });
+        useStatementsExecuted = Metrics.counter(factory.createMetricName("UseStatementsExecuted"));
+
+        preparedStatementsCount = Metrics.register(factory.createMetricName("PreparedStatementsCount"), QueryProcessor::preparedStatementsCount);
         preparedStatementsRatio = Metrics.register(factory.createMetricName("PreparedStatementsRatio"), new RatioGauge()
         {
             public Ratio getRatio()
diff --git a/src/java/org/apache/cassandra/metrics/CassandraMetricsRegistry.java b/src/java/org/apache/cassandra/metrics/CassandraMetricsRegistry.java
index 1ae2455..37c37e3 100644
--- a/src/java/org/apache/cassandra/metrics/CassandraMetricsRegistry.java
+++ b/src/java/org/apache/cassandra/metrics/CassandraMetricsRegistry.java
@@ -45,6 +45,7 @@
     private final Map<String, ThreadPoolMetrics> threadPoolMetrics = new ConcurrentHashMap<>();
 
     private final MBeanWrapper mBeanServer = MBeanWrapper.instance;
+    public final static TimeUnit DEFAULT_TIMER_UNIT = TimeUnit.MICROSECONDS;
 
     private CassandraMetricsRegistry()
     {
@@ -98,19 +99,49 @@
 
     public Timer timer(MetricName name)
     {
-        Timer timer = register(name, new Timer(new DecayingEstimatedHistogramReservoir()));
-        registerMBean(timer, name.getMBeanName());
+        return timer(name, DEFAULT_TIMER_UNIT);
+    }
 
+    public SnapshottingTimer timer(MetricName name, MetricName alias)
+    {
+        return timer(name, alias, DEFAULT_TIMER_UNIT);
+    }
+
+    public SnapshottingTimer timer(MetricName name, TimeUnit durationUnit)
+    {
+        SnapshottingTimer timer = register(name, new SnapshottingTimer(CassandraMetricsRegistry.createReservoir(durationUnit)));
+        registerMBean(timer, name.getMBeanName());
         return timer;
     }
 
-    public Timer timer(MetricName name, MetricName alias)
+    public SnapshottingTimer timer(MetricName name, MetricName alias, TimeUnit durationUnit)
     {
-        Timer timer = timer(name);
+        SnapshottingTimer timer = timer(name, durationUnit);
         registerAlias(name, alias);
         return timer;
     }
 
+    public static SnapshottingReservoir createReservoir(TimeUnit durationUnit)
+    {
+        SnapshottingReservoir reservoir;
+        if (durationUnit != TimeUnit.NANOSECONDS)
+        {
+            SnapshottingReservoir underlying = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION,
+                                                                           DecayingEstimatedHistogramReservoir.LOW_BUCKET_COUNT,
+                                                                           DecayingEstimatedHistogramReservoir.DEFAULT_STRIPE_COUNT);
+            // fewer buckets should suffice if timer is not based on nanos
+            reservoir = new ScalingReservoir(underlying,
+                                             // timer update values in nanos.
+                                             v -> durationUnit.convert(v, TimeUnit.NANOSECONDS));
+        }
+        else
+        {
+            // Use more buckets if timer is created with nanos resolution.
+            reservoir = new DecayingEstimatedHistogramReservoir();
+        }
+        return reservoir;
+    }
+
     public <T extends Metric> T register(MetricName name, T metric)
     {
         try
@@ -196,7 +227,7 @@
         else if (metric instanceof Histogram)
             mbean = new JmxHistogram((Histogram) metric, name);
         else if (metric instanceof Timer)
-            mbean = new JmxTimer((Timer) metric, name, TimeUnit.SECONDS, TimeUnit.MICROSECONDS);
+            mbean = new JmxTimer((Timer) metric, name, TimeUnit.SECONDS, DEFAULT_TIMER_UNIT);
         else if (metric instanceof Metered)
             mbean = new JmxMeter((Metered) metric, name, TimeUnit.SECONDS);
         else
@@ -532,7 +563,6 @@
     static class JmxTimer extends JmxMeter implements JmxTimerMBean
     {
         private final Timer metric;
-        private final double durationFactor;
         private final String durationUnit;
         private long[] last = null;
 
@@ -543,68 +573,67 @@
         {
             super(metric, objectName, rateUnit);
             this.metric = metric;
-            this.durationFactor = 1.0 / durationUnit.toNanos(1);
             this.durationUnit = durationUnit.toString().toLowerCase(Locale.US);
         }
 
         @Override
         public double get50thPercentile()
         {
-            return metric.getSnapshot().getMedian() * durationFactor;
+            return metric.getSnapshot().getMedian();
         }
 
         @Override
         public double getMin()
         {
-            return metric.getSnapshot().getMin() * durationFactor;
+            return metric.getSnapshot().getMin();
         }
 
         @Override
         public double getMax()
         {
-            return metric.getSnapshot().getMax() * durationFactor;
+            return metric.getSnapshot().getMax();
         }
 
         @Override
         public double getMean()
         {
-            return metric.getSnapshot().getMean() * durationFactor;
+            return metric.getSnapshot().getMean();
         }
 
         @Override
         public double getStdDev()
         {
-            return metric.getSnapshot().getStdDev() * durationFactor;
+            return metric.getSnapshot().getStdDev();
         }
 
         @Override
         public double get75thPercentile()
         {
-            return metric.getSnapshot().get75thPercentile() * durationFactor;
+            return metric.getSnapshot().get75thPercentile();
         }
 
         @Override
         public double get95thPercentile()
         {
-            return metric.getSnapshot().get95thPercentile() * durationFactor;
+            return metric.getSnapshot().get95thPercentile();
         }
 
         @Override
         public double get98thPercentile()
         {
-            return metric.getSnapshot().get98thPercentile() * durationFactor;
+            return metric.getSnapshot().get98thPercentile();
         }
 
         @Override
         public double get99thPercentile()
         {
-            return metric.getSnapshot().get99thPercentile() * durationFactor;
+            return metric.getSnapshot().get99thPercentile();
         }
 
         @Override
         public double get999thPercentile()
         {
-            return metric.getSnapshot().get999thPercentile() * durationFactor;
+            return metric.getSnapshot().get999thPercentile();
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/metrics/ClientMetrics.java b/src/java/org/apache/cassandra/metrics/ClientMetrics.java
index 29ae693..4e3eb4f 100644
--- a/src/java/org/apache/cassandra/metrics/ClientMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/ClientMetrics.java
@@ -46,6 +46,7 @@
     private Gauge<Integer> pausedConnectionsGauge;
     
     private Meter requestDiscarded;
+    private Meter requestDispatched;
 
     private Meter protocolException;
     private Meter unknownException;
@@ -68,6 +69,7 @@
     public void unpauseConnection() { pausedConnections.decrementAndGet(); }
 
     public void markRequestDiscarded() { requestDiscarded.mark(); }
+    public void markRequestDispatched() { requestDispatched.mark(); }
 
     public List<ConnectedClient> allConnectedClients()
     {
@@ -119,6 +121,7 @@
         pausedConnections = new AtomicInteger();
         pausedConnectionsGauge = registerGauge("PausedConnections", pausedConnections::get);
         requestDiscarded = registerMeter("RequestDiscarded");
+        requestDispatched = registerMeter("RequestDispatched");
 
         protocolException = registerMeter("ProtocolException");
         unknownException = registerMeter("UnknownException");
diff --git a/src/java/org/apache/cassandra/metrics/ClientRequestMetrics.java b/src/java/org/apache/cassandra/metrics/ClientRequestMetrics.java
index e3a6970..4080870 100644
--- a/src/java/org/apache/cassandra/metrics/ClientRequestMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/ClientRequestMetrics.java
@@ -22,6 +22,9 @@
 
 
 import com.codahale.metrics.Meter;
+import org.apache.cassandra.exceptions.ReadAbortException;
+import org.apache.cassandra.exceptions.ReadSizeAbortException;
+import org.apache.cassandra.exceptions.TombstoneAbortException;
 
 import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
 
@@ -31,6 +34,11 @@
     public final Meter timeouts;
     public final Meter unavailables;
     public final Meter failures;
+    public final Meter aborts;
+    public final Meter tombstoneAborts;
+    public final Meter readSizeAborts;
+    public final Meter localRequests;
+    public final Meter remoteRequests;
 
     public ClientRequestMetrics(String scope)
     {
@@ -39,6 +47,26 @@
         timeouts = Metrics.meter(factory.createMetricName("Timeouts"));
         unavailables = Metrics.meter(factory.createMetricName("Unavailables"));
         failures = Metrics.meter(factory.createMetricName("Failures"));
+        aborts = Metrics.meter(factory.createMetricName("Aborts"));
+        tombstoneAborts = Metrics.meter(factory.createMetricName("TombstoneAborts"));
+        readSizeAborts = Metrics.meter(factory.createMetricName("ReadSizeAborts"));
+        localRequests = Metrics.meter(factory.createMetricName("LocalRequests"));
+        remoteRequests = Metrics.meter(factory.createMetricName("RemoteRequests"));
+    }
+
+    public void markAbort(Throwable cause)
+    {
+        aborts.mark();
+        if (!(cause instanceof ReadAbortException))
+            return;
+        if (cause instanceof TombstoneAbortException)
+        {
+            tombstoneAborts.mark();
+        }
+        else if (cause instanceof ReadSizeAbortException)
+        {
+            readSizeAborts.mark();
+        }
     }
 
     public void release()
@@ -47,5 +75,10 @@
         Metrics.remove(factory.createMetricName("Timeouts"));
         Metrics.remove(factory.createMetricName("Unavailables"));
         Metrics.remove(factory.createMetricName("Failures"));
+        Metrics.remove(factory.createMetricName("Aborts"));
+        Metrics.remove(factory.createMetricName("TombstoneAborts"));
+        Metrics.remove(factory.createMetricName("ReadSizeAborts"));
+        Metrics.remove(factory.createMetricName("LocalRequests"));
+        Metrics.remove(factory.createMetricName("RemoteRequests"));
     }
 }
diff --git a/src/java/org/apache/cassandra/metrics/ClientRequestsMetricsHolder.java b/src/java/org/apache/cassandra/metrics/ClientRequestsMetricsHolder.java
index 05ab338..26f2913 100644
--- a/src/java/org/apache/cassandra/metrics/ClientRequestsMetricsHolder.java
+++ b/src/java/org/apache/cassandra/metrics/ClientRequestsMetricsHolder.java
@@ -30,8 +30,8 @@
     public static final CASClientRequestMetrics casReadMetrics = new CASClientRequestMetrics("CASRead");
     public static final ViewWriteMetrics viewWriteMetrics = new ViewWriteMetrics("ViewWrite");
 
-    private static final Map<ConsistencyLevel, ClientRequestMetrics> readMetricsMap = new EnumMap<>(ConsistencyLevel.class);
-    private static final Map<ConsistencyLevel, ClientWriteRequestMetrics> writeMetricsMap = new EnumMap<>(ConsistencyLevel.class);
+    public static final Map<ConsistencyLevel, ClientRequestMetrics> readMetricsMap = new EnumMap<>(ConsistencyLevel.class);
+    public static final Map<ConsistencyLevel, ClientWriteRequestMetrics> writeMetricsMap = new EnumMap<>(ConsistencyLevel.class);
 
     static
     {
diff --git a/src/java/org/apache/cassandra/metrics/CommitLogMetrics.java b/src/java/org/apache/cassandra/metrics/CommitLogMetrics.java
index a3302bc..cb53575 100644
--- a/src/java/org/apache/cassandra/metrics/CommitLogMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/CommitLogMetrics.java
@@ -42,6 +42,8 @@
     public final Timer waitingOnSegmentAllocation;
     /** The time spent waiting on CL sync; for Periodic this is only occurs when the sync is lagging its sync interval */
     public final Timer waitingOnCommit;
+    /** Time spent actually flushing the contents of a buffer to disk */
+    public final Timer waitingOnFlush;
     /** Number and rate of oversized mutations */
     public final Meter oversizedMutations;
 
@@ -49,6 +51,7 @@
     {
         waitingOnSegmentAllocation = Metrics.timer(factory.createMetricName("WaitingOnSegmentAllocation"));
         waitingOnCommit = Metrics.timer(factory.createMetricName("WaitingOnCommit"));
+        waitingOnFlush = Metrics.timer(factory.createMetricName("WaitingOnFlush"));
         oversizedMutations = Metrics.meter(factory.createMetricName("OverSizedMutations"));
     }
 
diff --git a/src/java/org/apache/cassandra/metrics/CompactionMetrics.java b/src/java/org/apache/cassandra/metrics/CompactionMetrics.java
index 46e5940..0fe1ec7 100644
--- a/src/java/org/apache/cassandra/metrics/CompactionMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/CompactionMetrics.java
@@ -18,18 +18,17 @@
 package org.apache.cassandra.metrics;
 
 import java.util.*;
-import java.util.concurrent.ThreadPoolExecutor;
 
 import com.codahale.metrics.Counter;
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Meter;
 
+import com.codahale.metrics.Timer;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.compaction.ActiveCompactions;
 import org.apache.cassandra.db.compaction.CompactionInfo;
 import org.apache.cassandra.db.compaction.CompactionManager;
-import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 
@@ -53,7 +52,8 @@
     public final Meter totalCompactionsCompleted;
     /** Total number of bytes compacted since server [re]start */
     public final Counter bytesCompacted;
-
+    /** Time spent redistributing index summaries */
+    public final Timer indexSummaryRedistributionTime;
 
     /** Total number of compactions that have had sstables drop out of them */
     public final Counter compactionsReduced;
@@ -64,7 +64,7 @@
     /** Total number of compactions which have outright failed due to lack of disk space */
     public final Counter compactionsAborted;
 
-    public CompactionMetrics(final ThreadPoolExecutor... collectors)
+    public CompactionMetrics(final ExecutorPlus... collectors)
     {
         pendingTasks = Metrics.register(factory.createMetricName("PendingTasks"), new Gauge<Integer>()
         {
@@ -139,7 +139,7 @@
             public Long getValue()
             {
                 long completedTasks = 0;
-                for (ThreadPoolExecutor collector : collectors)
+                for (ExecutorPlus collector : collectors)
                     completedTasks += collector.getCompletedTaskCount();
                 return completedTasks;
             }
@@ -151,5 +151,6 @@
         compactionsReduced = Metrics.counter(factory.createMetricName("CompactionsReduced"));
         sstablesDropppedFromCompactions = Metrics.counter(factory.createMetricName("SSTablesDroppedFromCompaction"));
         compactionsAborted = Metrics.counter(factory.createMetricName("CompactionsAborted"));
+        indexSummaryRedistributionTime = Metrics.timer(factory.createMetricName("IndexSummaryRedistributionTime"));
     }
 }
diff --git a/src/java/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoir.java b/src/java/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoir.java
index 6dd1687..308023e 100644
--- a/src/java/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoir.java
+++ b/src/java/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoir.java
@@ -22,17 +22,23 @@
 import java.io.OutputStreamWriter;
 import java.io.PrintWriter;
 import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
 import java.util.Objects;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLongArray;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Ints;
 
-import com.codahale.metrics.Clock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.codahale.metrics.Reservoir;
 import com.codahale.metrics.Snapshot;
 import org.apache.cassandra.utils.EstimatedHistogram;
+import org.apache.cassandra.utils.MonotonicClock;
+import org.apache.cassandra.utils.NoSpamLogger;
 
 import static java.lang.Math.max;
 import static java.lang.Math.min;
@@ -76,13 +82,15 @@
  *   <li>[3]: https://github.com/dropwizard/metrics/blob/v3.1.2/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java</li>
  * </ul>
  */
-public class DecayingEstimatedHistogramReservoir implements Reservoir
+public class DecayingEstimatedHistogramReservoir implements SnapshottingReservoir
 {
-
+    private static final Logger logger = LoggerFactory.getLogger(DecayingEstimatedHistogramReservoir.class);
+    private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(logger, 5L, TimeUnit.MINUTES);
     /**
      * The default number of decayingBuckets. Use this bucket count to reduce memory allocation for bucket offsets.
      */
     public static final int DEFAULT_BUCKET_COUNT = 164;
+    public static final int LOW_BUCKET_COUNT = 127;
     public static final int DEFAULT_STRIPE_COUNT = Integer.parseInt(System.getProperty("cassandra.dehr_stripe_count", "2"));
     public static final int MAX_BUCKET_COUNT = 237;
     public static final boolean DEFAULT_ZERO_CONSIDERATION = false;
@@ -146,21 +154,20 @@
 
     public static final long HALF_TIME_IN_S = 60L;
     public static final double MEAN_LIFETIME_IN_S = HALF_TIME_IN_S / Math.log(2.0);
-    public static final long LANDMARK_RESET_INTERVAL_IN_MS = 30L * 60L * 1000L;
+    public static final long LANDMARK_RESET_INTERVAL_IN_NS = TimeUnit.MINUTES.toNanos(30L);
 
     private final AtomicBoolean rescaling = new AtomicBoolean(false);
     private volatile long decayLandmark;
 
     // Wrapper around System.nanoTime() to simplify unit testing.
-    private final Clock clock;
-
+    private final MonotonicClock clock;
 
     /**
      * Construct a decaying histogram with default number of buckets and without considering zeroes.
      */
     public DecayingEstimatedHistogramReservoir()
     {
-        this(DEFAULT_ZERO_CONSIDERATION, DEFAULT_BUCKET_COUNT, DEFAULT_STRIPE_COUNT, Clock.defaultClock());
+        this(DEFAULT_ZERO_CONSIDERATION, DEFAULT_BUCKET_COUNT, DEFAULT_STRIPE_COUNT, MonotonicClock.Global.approxTime);
     }
 
     /**
@@ -171,7 +178,7 @@
      */
     public DecayingEstimatedHistogramReservoir(boolean considerZeroes)
     {
-        this(considerZeroes, DEFAULT_BUCKET_COUNT, DEFAULT_STRIPE_COUNT, Clock.defaultClock());
+        this(considerZeroes, DEFAULT_BUCKET_COUNT, DEFAULT_STRIPE_COUNT, MonotonicClock.Global.approxTime);
     }
 
     /**
@@ -183,17 +190,17 @@
      */
     public DecayingEstimatedHistogramReservoir(boolean considerZeroes, int bucketCount, int stripes)
     {
-        this(considerZeroes, bucketCount, stripes, Clock.defaultClock());
+        this(considerZeroes, bucketCount, stripes, MonotonicClock.Global.approxTime);
     }
 
     @VisibleForTesting
-    public DecayingEstimatedHistogramReservoir(Clock clock)
+    public DecayingEstimatedHistogramReservoir(MonotonicClock clock)
     {
         this(DEFAULT_ZERO_CONSIDERATION, DEFAULT_BUCKET_COUNT, DEFAULT_STRIPE_COUNT, clock);
     }
 
     @VisibleForTesting
-    DecayingEstimatedHistogramReservoir(boolean considerZeroes, int bucketCount, int stripes, Clock clock)
+    DecayingEstimatedHistogramReservoir(boolean considerZeroes, int bucketCount, int stripes, MonotonicClock clock)
     {
         assert bucketCount <= MAX_BUCKET_COUNT : "bucket count cannot exceed: " + MAX_BUCKET_COUNT;
 
@@ -217,7 +224,7 @@
         decayingBuckets = new AtomicLongArray((bucketOffsets.length + 1) * nStripes);
         buckets = new AtomicLongArray((bucketOffsets.length + 1) * nStripes);
         this.clock = clock;
-        decayLandmark = clock.getTime();
+        decayLandmark = clock.now();
         int distributionPrime = 1;
         for (int prime : DISTRIBUTION_PRIMES)
         {
@@ -237,7 +244,7 @@
      */
     public void update(long value)
     {
-        long now = clock.getTime();
+        long now = clock.now();
         rescaleIfNeeded(now);
 
         int index = findIndex(bucketOffsets, value);
@@ -282,7 +289,7 @@
 
     private double forwardDecayWeight(long now)
     {
-        return Math.exp(((now - decayLandmark) / 1000.0) / MEAN_LIFETIME_IN_S);
+        return Math.exp(TimeUnit.NANOSECONDS.toSeconds(now - decayLandmark) / MEAN_LIFETIME_IN_S);
     }
 
     /**
@@ -311,12 +318,20 @@
      *
      * @return the snapshot
      */
+    @Override
     public Snapshot getSnapshot()
     {
         rescaleIfNeeded();
         return new EstimatedHistogramReservoirSnapshot(this);
     }
 
+    @Override
+    public Snapshot getPercentileSnapshot()
+    {
+        rescaleIfNeeded();
+        return new DecayingBucketsOnlySnapshot(this);
+    }
+
     /**
      * @return true if this histogram has overflowed -- that is, a value larger than our largest bucket could bound was added
      */
@@ -344,7 +359,7 @@
 
     private void rescaleIfNeeded()
     {
-        rescaleIfNeeded(clock.getTime());
+        rescaleIfNeeded(clock.now());
     }
 
     private void rescaleIfNeeded(long now)
@@ -379,7 +394,7 @@
 
     private boolean needRescale(long now)
     {
-        return (now - decayLandmark) > LANDMARK_RESET_INTERVAL_IN_MS;
+        return (now - decayLandmark) > LANDMARK_RESET_INTERVAL_IN_NS;
     }
 
     @VisibleForTesting
@@ -429,42 +444,16 @@
 
     }
 
-    /**
-     * Represents a snapshot of the decaying histogram.
-     *
-     * The decaying buckets are copied into a snapshot array to give a consistent view for all getters. However, the
-     * copy is made without a write-lock and so other threads may change the buckets while the array is copied,
-     * probably causign a slight skew up in the quantiles and mean values.
-     *
-     * The decaying buckets will be used for quantile calculations and mean values, but the non decaying buckets will be
-     * exposed for calls to {@link Snapshot#getValues()}.
-     */
-    static class EstimatedHistogramReservoirSnapshot extends Snapshot
+    private static abstract class AbstractSnapshot extends Snapshot
     {
-        private final long[] decayingBuckets;
-        private final long[] values;
-        private long count;
-        private long snapshotLandmark;
-        private long[] bucketOffsets;
-        private DecayingEstimatedHistogramReservoir reservoir;
+        protected final long[] decayingBuckets;
+        protected final long[] bucketOffsets;
 
-        public EstimatedHistogramReservoirSnapshot(DecayingEstimatedHistogramReservoir reservoir)
+        AbstractSnapshot(DecayingEstimatedHistogramReservoir reservoir)
         {
-            final int length = reservoir.size();
-            final double rescaleFactor = reservoir.forwardDecayWeight(reservoir.clock.getTime());
-
+            int length = reservoir.size();
             this.decayingBuckets = new long[length];
-            this.values = new long[length];
-            this.snapshotLandmark = reservoir.decayLandmark;
             this.bucketOffsets = reservoir.bucketOffsets; // No need to copy, these are immutable
-
-            for (int i = 0; i < length; i++)
-            {
-                this.decayingBuckets[i] = Math.round(reservoir.bucketValue(i, true) / rescaleFactor);
-                this.values[i] = reservoir.bucketValue(i, false);
-            }
-            this.count = count();
-            this.reservoir = reservoir;
         }
 
         /**
@@ -474,6 +463,7 @@
          * @return estimated value at given quantile
          * @throws IllegalStateException in case the histogram overflowed
          */
+        @Override
         public double getValue(double quantile)
         {
             assert quantile >= 0 && quantile <= 1.0;
@@ -481,7 +471,10 @@
             final int lastBucket = decayingBuckets.length - 1;
 
             if (decayingBuckets[lastBucket] > 0)
-                throw new IllegalStateException("Unable to compute when histogram overflowed");
+            {
+                try { throw new IllegalStateException("EstimatedHistogram overflow: " + Arrays.toString(decayingBuckets)); }
+                catch (IllegalStateException e) { noSpamLogger.warn("", e); }
+            }
 
             final long qcount = (long) Math.ceil(count() * quantile);
             if (qcount == 0)
@@ -498,48 +491,11 @@
         }
 
         /**
-         * Will return a snapshot of the non-decaying buckets.
-         *
-         * The values returned will not be consistent with the quantile and mean values. The caller must be aware of the
-         * offsets created by {@link EstimatedHistogram#getBucketOffsets()} to make use of the values returned.
-         *
-         * @return a snapshot of the non-decaying buckets.
-         */
-        public long[] getValues()
-        {
-            return values;
-        }
-
-        /**
-         * @see {@link Snapshot#size()}
-         * @return
-         */
-        public int size()
-        {
-            return Ints.saturatedCast(count);
-        }
-
-        @VisibleForTesting
-        public long getSnapshotLandmark()
-        {
-            return snapshotLandmark;
-        }
-
-        @VisibleForTesting
-        public Range getBucketingRangeForValue(long value)
-        {
-            int index = findIndex(bucketOffsets, value);
-            long max = bucketOffsets[index];
-            long min = index == 0 ? 0 : 1 + bucketOffsets[index - 1];
-            return new Range(min, max);
-        }
-
-        /**
          * Return the number of registered values taking forward decay into account.
          *
          * @return the sum of all bucket values
          */
-        private long count()
+        protected long count()
         {
             long sum = 0L;
             for (int i = 0; i < decayingBuckets.length; i++)
@@ -556,6 +512,7 @@
          * @return the largest value that could have been added to this reservoir, or Long.MAX_VALUE if the reservoir
          * overflowed
          */
+        @Override
         public long getMax()
         {
             final int lastBucket = decayingBuckets.length - 1;
@@ -577,6 +534,7 @@
          * @return the mean histogram value (average of bucket offsets, weighted by count)
          * @throws IllegalStateException if any values were greater than the largest bucket threshold
          */
+        @Override
         public double getMean()
         {
             final int lastBucket = decayingBuckets.length - 1;
@@ -604,6 +562,7 @@
          *
          * @return the smallest value that could have been added to this reservoir
          */
+        @Override
         public long getMin()
         {
             for (int i = 0; i < decayingBuckets.length; i++)
@@ -622,6 +581,7 @@
          *
          * @return an estimate of the standard deviation
          */
+        @Override
         public double getStdDev()
         {
             final int lastBucket = decayingBuckets.length - 1;
@@ -651,6 +611,7 @@
             }
         }
 
+        @Override
         public void dump(OutputStream output)
         {
             try (PrintWriter out = new PrintWriter(new OutputStreamWriter(output, StandardCharsets.UTF_8)))
@@ -663,6 +624,77 @@
                 }
             }
         }
+    }
+
+    /**
+     * Represents a snapshot of the decaying histogram.
+     *
+     * The decaying buckets are copied into a snapshot array to give a consistent view for all getters. However, the
+     * copy is made without a write-lock and so other threads may change the buckets while the array is copied,
+     * probably causing a slight skew up in the quantiles and mean values.
+     *
+     * The decaying buckets will be used for quantile calculations and mean values, but the non decaying buckets will be
+     * exposed for calls to {@link Snapshot#getValues()}.
+     */
+    static class EstimatedHistogramReservoirSnapshot extends AbstractSnapshot
+    {
+        private final long[] values;
+        private long count;
+        private long snapshotLandmark;
+        private final DecayingEstimatedHistogramReservoir reservoir;
+
+        public EstimatedHistogramReservoirSnapshot(DecayingEstimatedHistogramReservoir reservoir)
+        {
+            super(reservoir);
+            
+            int length = reservoir.size();
+            double rescaleFactor = reservoir.forwardDecayWeight(reservoir.clock.now());
+
+            this.values = new long[length];
+            this.snapshotLandmark = reservoir.decayLandmark;
+
+            for (int i = 0; i < length; i++)
+            {
+                this.decayingBuckets[i] = Math.round(reservoir.bucketValue(i, true) / rescaleFactor);
+                this.values[i] = reservoir.bucketValue(i, false);
+            }
+            this.count = count();
+            this.reservoir = reservoir;
+        }
+
+        /**
+         * Will return a snapshot of the non-decaying buckets.
+         *
+         * The values returned will not be consistent with the quantile and mean values. The caller must be aware of the
+         * offsets created by {@link EstimatedHistogram#getBucketOffsets()} to make use of the values returned.
+         *
+         * @return a snapshot of the non-decaying buckets.
+         */
+        public long[] getValues()
+        {
+            return values;
+        }
+
+        @Override
+        public int size()
+        {
+            return Ints.saturatedCast(count);
+        }
+
+        @VisibleForTesting
+        public long getSnapshotLandmark()
+        {
+            return snapshotLandmark;
+        }
+
+        @VisibleForTesting
+        public Range getBucketingRangeForValue(long value)
+        {
+            int index = findIndex(bucketOffsets, value);
+            long max = bucketOffsets[index];
+            long min = index == 0 ? 0 : 1 + bucketOffsets[index - 1];
+            return new Range(min, max);
+        }
 
         /**
          * Adds another DecayingEstimatedHistogramReservoir's Snapshot to this one. Both reservoirs must have same bucket definitions. This will rescale both snapshots if needed.
@@ -722,12 +754,51 @@
             }
         }
 
-        public void rebaseReservoir() 
+        public void rebaseReservoir()
         {
             this.reservoir.rebase(this);
         }
     }
 
+    /**
+     * Like {@link EstimatedHistogramReservoirSnapshot}, represents a snapshot of a given histogram reservoir.
+     * 
+     * Unlike {@link EstimatedHistogramReservoirSnapshot}, this only copies and supports operations based on the
+     * decaying buckets from the source reservoir. (ex. percentiles, min, max) It also does not support snapshot 
+     * merging or rebasing on the source reservoir.
+     */
+    private static class DecayingBucketsOnlySnapshot extends AbstractSnapshot
+    {
+        private final long count;
+
+        public DecayingBucketsOnlySnapshot(DecayingEstimatedHistogramReservoir reservoir)
+        {
+            super(reservoir);
+
+            int length = reservoir.size();
+            double rescaleFactor = reservoir.forwardDecayWeight(reservoir.clock.now());
+
+            for (int i = 0; i < length; i++)
+            {
+                this.decayingBuckets[i] = Math.round(reservoir.bucketValue(i, true) / rescaleFactor);
+            }
+
+            this.count = count();
+        }
+
+        @Override
+        public long[] getValues()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int size()
+        {
+            return Ints.saturatedCast(count);
+        }
+    }
+
     static class Range
     {
         public final long min;
@@ -752,5 +823,11 @@
         {
             return Objects.hash(min, max);
         }
+
+        @Override
+        public String toString()
+        {
+            return "[" + min + ',' + max + ']';
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/metrics/DenylistMetrics.java b/src/java/org/apache/cassandra/metrics/DenylistMetrics.java
new file mode 100644
index 0000000..0372787
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/DenylistMetrics.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Meter;
+
+import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+
+public class DenylistMetrics
+{
+    private final Meter writesRejected;
+    private final Meter readsRejected;
+    private final Meter rangeReadsRejected;
+    private final Meter totalRequestsRejected;
+
+    public DenylistMetrics()
+    {
+        final MetricNameFactory factory = new DefaultNameFactory("StorageProxy", "PartitionDenylist");
+        writesRejected = Metrics.meter(factory.createMetricName("WriteRejected"));
+        readsRejected = Metrics.meter(factory.createMetricName("ReadRejected"));
+        rangeReadsRejected = Metrics.meter(factory.createMetricName("RangeReadRejected"));
+        totalRequestsRejected = Metrics.meter(factory.createMetricName("TotalRejected"));
+    }
+
+    public void incrementWritesRejected()
+    {
+        writesRejected.mark();
+        totalRequestsRejected.mark();
+    }
+
+    public void incrementReadsRejected()
+    {
+        readsRejected.mark();
+        totalRequestsRejected.mark();
+    }
+
+    public void incrementRangeReadsRejected()
+    {
+        rangeReadsRejected.mark();
+        totalRequestsRejected.mark();
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/metrics/HintedHandoffMetrics.java b/src/java/org/apache/cassandra/metrics/HintedHandoffMetrics.java
index 0261e8e..bc87218 100644
--- a/src/java/org/apache/cassandra/metrics/HintedHandoffMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/HintedHandoffMetrics.java
@@ -19,19 +19,19 @@
 
 import java.util.Map.Entry;
 
-import com.google.common.util.concurrent.MoreExecutors;
-
 import com.codahale.metrics.Counter;
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
 
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.utils.UUIDGen;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * Metrics for {@link org.apache.cassandra.hints.HintsService}.
@@ -44,12 +44,12 @@
 
     /** Total number of hints which are not stored, This is not a cache. */
     private final LoadingCache<InetAddressAndPort, DifferencingCounter> notStored = Caffeine.newBuilder()
-                                                                                            .executor(MoreExecutors.directExecutor())
+                                                                                            .executor(ImmediateExecutor.INSTANCE)
                                                                                             .build(DifferencingCounter::new);
 
     /** Total number of hints that have been created, This is not a cache. */
     private final LoadingCache<InetAddressAndPort, Counter> createdHintCounts = Caffeine.newBuilder()
-                                                                                        .executor(MoreExecutors.directExecutor())
+                                                                                        .executor(ImmediateExecutor.INSTANCE)
                                                                                         .build(address -> Metrics.counter(factory.createMetricName("Hints_created-" + address.toString().replace(':', '.'))));
 
     public void incrCreatedHints(InetAddressAndPort address)
@@ -70,7 +70,7 @@
             if (difference == 0)
                 continue;
             logger.warn("{} has {} dropped hints, because node is down past configured hint window.", entry.getKey(), difference);
-            SystemKeyspace.updateHintsDropped(entry.getKey(), UUIDGen.getTimeUUID(), (int) difference);
+            SystemKeyspace.updateHintsDropped(entry.getKey(), nextTimeUUID(), (int) difference);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/metrics/HintsServiceMetrics.java b/src/java/org/apache/cassandra/metrics/HintsServiceMetrics.java
index 424f502..bcff389 100644
--- a/src/java/org/apache/cassandra/metrics/HintsServiceMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/HintsServiceMetrics.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.metrics;
 
-import com.google.common.util.concurrent.MoreExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -25,6 +24,7 @@
 import com.codahale.metrics.Meter;
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.locator.InetAddressAndPort;
 
 import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
@@ -47,7 +47,7 @@
 
     /** Histograms per-endpoint of hint delivery delays, This is not a cache. */
     private static final LoadingCache<InetAddressAndPort, Histogram> delayByEndpoint = Caffeine.newBuilder()
-                                                                                               .executor(MoreExecutors.directExecutor())
+                                                                                               .executor(ImmediateExecutor.INSTANCE)
                                                                                                .build(address -> Metrics.histogram(factory.createMetricName("Hint_delays-"+address.toString().replace(':', '.')), false));
 
     public static void updateDelayMetrics(InetAddressAndPort endpoint, long delay)
diff --git a/src/java/org/apache/cassandra/metrics/KeyspaceMetrics.java b/src/java/org/apache/cassandra/metrics/KeyspaceMetrics.java
index dadbe47..776027e 100644
--- a/src/java/org/apache/cassandra/metrics/KeyspaceMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/KeyspaceMetrics.java
@@ -153,6 +153,21 @@
     public final Histogram repairedDataTrackingOverreadRows;
     public final Timer repairedDataTrackingOverreadTime;
 
+    public final Meter clientTombstoneWarnings;
+    public final Meter clientTombstoneAborts;
+
+    public final Meter coordinatorReadSizeWarnings;
+    public final Meter coordinatorReadSizeAborts;
+    public final Histogram coordinatorReadSize;
+
+    public final Meter localReadSizeWarnings;
+    public final Meter localReadSizeAborts;
+    public final Histogram localReadSize;
+
+    public final Meter rowIndexSizeWarnings;
+    public final Meter rowIndexSizeAborts;
+    public final Histogram rowIndexSize;
+
     public final MetricNameFactory factory;
     private Keyspace keyspace;
 
@@ -235,6 +250,21 @@
 
         repairedDataTrackingOverreadRows = createKeyspaceHistogram("RepairedDataTrackingOverreadRows", false);
         repairedDataTrackingOverreadTime = createKeyspaceTimer("RepairedDataTrackingOverreadTime");
+
+        clientTombstoneWarnings = createKeyspaceMeter("ClientTombstoneWarnings");
+        clientTombstoneAborts = createKeyspaceMeter("ClientTombstoneAborts");
+
+        coordinatorReadSizeWarnings = createKeyspaceMeter("CoordinatorReadSizeWarnings");
+        coordinatorReadSizeAborts = createKeyspaceMeter("CoordinatorReadSizeAborts");
+        coordinatorReadSize = createKeyspaceHistogram("CoordinatorReadSize", false);
+
+        localReadSizeWarnings = createKeyspaceMeter("LocalReadSizeWarnings");
+        localReadSizeAborts = createKeyspaceMeter("LocalReadSizeAborts");
+        localReadSize = createKeyspaceHistogram("LocalReadSize", false);
+
+        rowIndexSizeWarnings = createKeyspaceMeter("RowIndexSizeWarnings");
+        rowIndexSizeAborts = createKeyspaceMeter("RowIndexSizeAborts");
+        rowIndexSize = createKeyspaceHistogram("RowIndexSize", false);
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/metrics/LatencyMetrics.java b/src/java/org/apache/cassandra/metrics/LatencyMetrics.java
index bf0bc1f..af8ad71 100644
--- a/src/java/org/apache/cassandra/metrics/LatencyMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/LatencyMetrics.java
@@ -89,7 +89,7 @@
         this.aliasFactory = aliasFactory;
         this.namePrefix = namePrefix;
 
-        LatencyMetricsTimer timer = new LatencyMetrics.LatencyMetricsTimer(new DecayingEstimatedHistogramReservoir());
+        LatencyMetricsTimer timer = new LatencyMetrics.LatencyMetricsTimer(CassandraMetricsRegistry.createReservoir(TimeUnit.MICROSECONDS));
         Counter counter = new LatencyMetricsCounter();
 
         if (aliasFactory == null)
diff --git a/src/java/org/apache/cassandra/metrics/MessagingMetrics.java b/src/java/org/apache/cassandra/metrics/MessagingMetrics.java
index 4948af6..bef6d08 100644
--- a/src/java/org/apache/cassandra/metrics/MessagingMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/MessagingMetrics.java
@@ -41,6 +41,7 @@
 import org.apache.cassandra.utils.StatusLogger;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.metrics.CassandraMetricsRegistry.DEFAULT_TIMER_UNIT;
 import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
 
 /**
@@ -209,8 +210,8 @@
                                       LOG_DROPPED_INTERVAL_IN_MS,
                                       droppedInternal,
                                       droppedCrossNode,
-                                      TimeUnit.NANOSECONDS.toMillis((long) droppedForVerb.metrics.internalDroppedLatency.getSnapshot().getMean()),
-                                      TimeUnit.NANOSECONDS.toMillis((long) droppedForVerb.metrics.crossNodeDroppedLatency.getSnapshot().getMean())));
+                                      DEFAULT_TIMER_UNIT.toMillis((long) droppedForVerb.metrics.internalDroppedLatency.getSnapshot().getMean()),
+                                      DEFAULT_TIMER_UNIT.toMillis((long) droppedForVerb.metrics.crossNodeDroppedLatency.getSnapshot().getMean())));
                 ++count;
             }
         }
diff --git a/src/java/org/apache/cassandra/metrics/PaxosMetrics.java b/src/java/org/apache/cassandra/metrics/PaxosMetrics.java
new file mode 100644
index 0000000..62c62d8
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/PaxosMetrics.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Counter;
+
+import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+
+public class PaxosMetrics
+{
+    private static final MetricNameFactory factory = new DefaultNameFactory("Paxos");
+    public static final Counter linearizabilityViolations = Metrics.counter(factory.createMetricName("LinearizabilityViolations"));
+    public static void initialize() {}
+}
diff --git a/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java b/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
index 989e09f..0319e26 100644
--- a/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
@@ -35,6 +35,7 @@
     public static final Meter repairedBackground = Metrics.meter(factory.createMetricName("RepairedBackground"));
     @Deprecated
     public static final Meter attempted = Metrics.meter(factory.createMetricName("Attempted"));
+    public static final Meter timedOut = Metrics.meter(factory.createMetricName("RepairTimedOut"));
 
     // Incremented when additional requests were sent during blocking read repair due to unavailable or slow nodes
     public static final Meter speculatedRead = Metrics.meter(factory.createMetricName("SpeculatedRead"));
diff --git a/src/java/org/apache/cassandra/metrics/Sampler.java b/src/java/org/apache/cassandra/metrics/Sampler.java
index cfe3f3b..b3d0f21 100644
--- a/src/java/org/apache/cassandra/metrics/Sampler.java
+++ b/src/java/org/apache/cassandra/metrics/Sampler.java
@@ -19,18 +19,19 @@
 
 import java.io.Serializable;
 import java.util.List;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.MonotonicClock;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 public abstract class Sampler<T>
 {
     public enum SamplerType
@@ -39,22 +40,15 @@
     }
 
     @VisibleForTesting
-    MonotonicClock clock = MonotonicClock.approxTime;
+    MonotonicClock clock = MonotonicClock.Global.approxTime;
 
     @VisibleForTesting
-    static final ThreadPoolExecutor samplerExecutor = new JMXEnabledThreadPoolExecutor(1, 1,
-            TimeUnit.SECONDS,
-            new ArrayBlockingQueue<Runnable>(1000),
-            new NamedThreadFactory("Sampler"),
-            "internal");
-
-    static
-    {
-        samplerExecutor.setRejectedExecutionHandler((runnable, executor) ->
-        {
-            MessagingService.instance().metrics.recordSelfDroppedMessage(Verb._SAMPLE);
-        });
-    }
+    static final ExecutorPlus samplerExecutor = executorFactory()
+            .withJmxInternal()
+            .configureSequential("Sampler")
+            .withQueueLimit(1000)
+            .withRejectedExecutionHandler((runnable, executor) -> MessagingService.instance().metrics.recordSelfDroppedMessage(Verb._SAMPLE))
+            .build();
 
     public void addSample(final T item, final int value)
     {
@@ -94,4 +88,9 @@
             return "Sample [value=" + value + ", count=" + count + ", error=" + error + "]";
         }
     }
+
+    public static void shutdownNowAndWait(long time, TimeUnit units) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownNowAndWait(time, units, samplerExecutor);
+    }
 }
diff --git a/src/java/org/apache/cassandra/metrics/ScalingReservoir.java b/src/java/org/apache/cassandra/metrics/ScalingReservoir.java
new file mode 100644
index 0000000..7d81ffd
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/ScalingReservoir.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Snapshot;
+
+/**
+ * A reservoir that scales the values before updating.
+ */
+public class ScalingReservoir implements SnapshottingReservoir
+{
+    private final SnapshottingReservoir delegate;
+    private final ScaleFunction scaleFunc;
+
+    public ScalingReservoir(SnapshottingReservoir reservoir, ScaleFunction scaleFunc)
+    {
+        this.delegate = reservoir;
+        this.scaleFunc = scaleFunc;
+    }
+
+    @Override
+    public int size()
+    {
+        return delegate.size();
+    }
+
+    @Override
+    public void update(long value)
+    {
+        delegate.update(scaleFunc.apply(value));
+    }
+
+    @Override
+    public Snapshot getSnapshot()
+    {
+        return delegate.getSnapshot();
+    }
+
+    @Override
+    public Snapshot getPercentileSnapshot()
+    {
+        return delegate.getPercentileSnapshot();
+    }
+
+    /**
+     * Scale the input value.
+     *
+     * Not using {@linkplain java.util.function.Function<Long, Long>} to avoid auto-boxing.
+     */
+    @FunctionalInterface
+    public static interface ScaleFunction
+    {
+        long apply(long value);
+    }
+}
diff --git a/src/java/org/apache/cassandra/metrics/SnapshottingReservoir.java b/src/java/org/apache/cassandra/metrics/SnapshottingReservoir.java
new file mode 100644
index 0000000..1b6c968
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/SnapshottingReservoir.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Reservoir;
+import com.codahale.metrics.Snapshot;
+
+public interface SnapshottingReservoir extends Reservoir
+{
+    Snapshot getPercentileSnapshot();
+}
diff --git a/src/java/org/apache/cassandra/metrics/SnapshottingTimer.java b/src/java/org/apache/cassandra/metrics/SnapshottingTimer.java
new file mode 100644
index 0000000..3536492
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/SnapshottingTimer.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Clock;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
+
+public class SnapshottingTimer extends Timer
+{
+    private final SnapshottingReservoir reservoir;
+    
+    public SnapshottingTimer(SnapshottingReservoir reservoir)
+    {
+        this(reservoir, Clock.defaultClock());
+    }
+
+    public SnapshottingTimer(SnapshottingReservoir reservoir, Clock clock)
+    {
+        super(reservoir, clock);
+        this.reservoir = reservoir;
+    }
+
+    public Snapshot getPercentileSnapshot()
+    {
+        return reservoir.getPercentileSnapshot();
+    }
+}
diff --git a/src/java/org/apache/cassandra/metrics/StreamingMetrics.java b/src/java/org/apache/cassandra/metrics/StreamingMetrics.java
index 54df233..e38b605 100644
--- a/src/java/org/apache/cassandra/metrics/StreamingMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/StreamingMetrics.java
@@ -47,6 +47,8 @@
     public final Counter outgoingBytes;
     /* Measures the time taken for processing the incoming stream message after being deserialized, including the time to flush to disk. */
     public final Timer incomingProcessTime;
+    private final Counter entireSSTablesStreamedIn;
+    private final Counter partialSSTablesStreamedIn;
 
     public static StreamingMetrics get(InetAddressAndPort ip)
     {
@@ -79,5 +81,13 @@
         incomingBytes = Metrics.counter(factory.createMetricName("IncomingBytes"));
         outgoingBytes= Metrics.counter(factory.createMetricName("OutgoingBytes"));
         incomingProcessTime = Metrics.timer(factory.createMetricName("IncomingProcessTime"));
+
+        entireSSTablesStreamedIn = Metrics.counter(factory.createMetricName("EntireSSTablesStreamedIn"));
+        partialSSTablesStreamedIn = Metrics.counter(factory.createMetricName("PartialSSTablesStreamedIn"));
+    }
+
+    public void countStreamedIn(boolean isEntireSSTable)
+    {
+        (isEntireSSTable ? entireSSTablesStreamedIn : partialSSTablesStreamedIn).inc();
     }
 }
diff --git a/src/java/org/apache/cassandra/metrics/TableMetrics.java b/src/java/org/apache/cassandra/metrics/TableMetrics.java
index 09f41a1..5e7ab78 100644
--- a/src/java/org/apache/cassandra/metrics/TableMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/TableMetrics.java
@@ -17,7 +17,9 @@
  */
 package org.apache.cassandra.metrics;
 
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
 import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 import java.nio.ByteBuffer;
 import java.util.*;
@@ -37,7 +39,7 @@
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.lifecycle.SSTableSet;
 import org.apache.cassandra.db.lifecycle.View;
 import org.apache.cassandra.index.SecondaryIndexManager;
@@ -205,9 +207,9 @@
     /** ratio of how much we anticompact vs how much we could mutate the repair status*/
     public final Gauge<Double> mutatedAnticompactionGauge;
 
-    public final Timer coordinatorReadLatency;
+    public final SnapshottingTimer coordinatorReadLatency;
     public final Timer coordinatorScanLatency;
-    public final Timer coordinatorWriteLatency;
+    public final SnapshottingTimer coordinatorWriteLatency;
 
     /** Time spent waiting for free memtable space, either on- or off-heap */
     public final Histogram waitingOnFreeMemtableSpace;
@@ -259,11 +261,26 @@
     /** When sampler activated, will track the slowest local reads **/
     public final Sampler<String> topLocalReadQueryTime;
 
+    public final TableMeter clientTombstoneWarnings;
+    public final TableMeter clientTombstoneAborts;
+
+    public final TableMeter coordinatorReadSizeWarnings;
+    public final TableMeter coordinatorReadSizeAborts;
+    public final TableHistogram coordinatorReadSize;
+
+    public final TableMeter localReadSizeWarnings;
+    public final TableMeter localReadSizeAborts;
+    public final TableHistogram localReadSize;
+
+    public final TableMeter rowIndexSizeWarnings;
+    public final TableMeter rowIndexSizeAborts;
+    public final TableHistogram rowIndexSize;
+
     private static Pair<Long, Long> totalNonSystemTablesSize(Predicate<SSTableReader> predicate)
     {
         long total = 0;
         long filtered = 0;
-        for (String keyspace : Schema.instance.getNonSystemKeyspaces())
+        for (String keyspace : Schema.instance.getNonSystemKeyspaces().names())
         {
 
             Keyspace k = Schema.instance.getKeyspaceInstance(keyspace);
@@ -376,11 +393,16 @@
      *
      * @param cfs ColumnFamilyStore to measure metrics
      */
-    public TableMetrics(final ColumnFamilyStore cfs)
+    public TableMetrics(final ColumnFamilyStore cfs, ReleasableMetric memtableMetrics)
     {
         factory = new TableMetricNameFactory(cfs, "Table");
         aliasFactory = new TableMetricNameFactory(cfs, "ColumnFamily");
 
+        if (memtableMetrics != null)
+        {
+            all.add(memtableMetrics);
+        }
+
         samplers = new EnumMap<>(SamplerType.class);
         topReadPartitionFrequency = new FrequencySampler<ByteBuffer>()
         {
@@ -425,16 +447,16 @@
         samplers.put(SamplerType.LOCAL_READ_TIME, topLocalReadQueryTime);
 
         memtableColumnsCount = createTableGauge("MemtableColumnsCount", 
-                                                () -> cfs.getTracker().getView().getCurrentMemtable().getOperations());
+                                                () -> cfs.getTracker().getView().getCurrentMemtable().operationCount());
 
         // MemtableOnHeapSize naming deprecated in 4.0
         memtableOnHeapDataSize = createTableGaugeWithDeprecation("MemtableOnHeapDataSize", "MemtableOnHeapSize", 
-                                                                 () -> cfs.getTracker().getView().getCurrentMemtable().getAllocator().onHeap().owns(), 
+                                                                 () -> Memtable.getMemoryUsage(cfs.getTracker().getView().getCurrentMemtable()).ownsOnHeap,
                                                                  new GlobalTableGauge("MemtableOnHeapDataSize"));
 
         // MemtableOffHeapSize naming deprecated in 4.0
         memtableOffHeapDataSize = createTableGaugeWithDeprecation("MemtableOffHeapDataSize", "MemtableOffHeapSize", 
-                                                                  () -> cfs.getTracker().getView().getCurrentMemtable().getAllocator().offHeap().owns(), 
+                                                                  () -> Memtable.getMemoryUsage(cfs.getTracker().getView().getCurrentMemtable()).ownsOffHeap,
                                                                   new GlobalTableGauge("MemtableOnHeapDataSize"));
         
         memtableLiveDataSize = createTableGauge("MemtableLiveDataSize", 
@@ -445,10 +467,7 @@
         {
             public Long getValue()
             {
-                long size = 0;
-                for (ColumnFamilyStore cfs2 : cfs.concatWithIndexes())
-                    size += cfs2.getTracker().getView().getCurrentMemtable().getAllocator().onHeap().owns();
-                return size;
+                return getMemoryUsageWithIndexes(cfs).ownsOnHeap;
             }
         }, new GlobalTableGauge("AllMemtablesOnHeapDataSize"));
 
@@ -457,10 +476,7 @@
         {
             public Long getValue()
             {
-                long size = 0;
-                for (ColumnFamilyStore cfs2 : cfs.concatWithIndexes())
-                    size += cfs2.getTracker().getView().getCurrentMemtable().getAllocator().offHeap().owns();
-                return size;
+                return getMemoryUsageWithIndexes(cfs).ownsOffHeap;
             }
         }, new GlobalTableGauge("AllMemtablesOffHeapDataSize"));
         allMemtablesLiveDataSize = createTableGauge("AllMemtablesLiveDataSize", new Gauge<Long>()
@@ -809,10 +825,10 @@
         speculativeRetries = createTableCounter("SpeculativeRetries");
         speculativeFailedRetries = createTableCounter("SpeculativeFailedRetries");
         speculativeInsufficientReplicas = createTableCounter("SpeculativeInsufficientReplicas");
-        speculativeSampleLatencyNanos = createTableGauge("SpeculativeSampleLatencyNanos", () -> cfs.sampleReadLatencyNanos);
+        speculativeSampleLatencyNanos = createTableGauge("SpeculativeSampleLatencyNanos", () -> MICROSECONDS.toNanos(cfs.sampleReadLatencyMicros));
 
         additionalWrites = createTableCounter("AdditionalWrites");
-        additionalWriteLatencyNanos = createTableGauge("AdditionalWriteLatencyNanos", () -> cfs.additionalWriteLatencyNanos);
+        additionalWriteLatencyNanos = createTableGauge("AdditionalWriteLatencyNanos", () -> MICROSECONDS.toNanos(cfs.additionalWriteLatencyMicros));
 
         keyCacheHitRate = createTableGauge("KeyCacheHitRate", "KeyCacheHitRate", new RatioGauge()
         {
@@ -913,6 +929,30 @@
             }
             return cnt;
         });
+
+        clientTombstoneWarnings = createTableMeter("ClientTombstoneWarnings", cfs.keyspace.metric.clientTombstoneWarnings);
+        clientTombstoneAborts = createTableMeter("ClientTombstoneAborts", cfs.keyspace.metric.clientTombstoneAborts);
+
+        coordinatorReadSizeWarnings = createTableMeter("CoordinatorReadSizeWarnings", cfs.keyspace.metric.coordinatorReadSizeWarnings);
+        coordinatorReadSizeAborts = createTableMeter("CoordinatorReadSizeAborts", cfs.keyspace.metric.coordinatorReadSizeAborts);
+        coordinatorReadSize = createTableHistogram("CoordinatorReadSize", cfs.keyspace.metric.coordinatorReadSize, false);
+
+        localReadSizeWarnings = createTableMeter("LocalReadSizeWarnings", cfs.keyspace.metric.localReadSizeWarnings);
+        localReadSizeAborts = createTableMeter("LocalReadSizeAborts", cfs.keyspace.metric.localReadSizeAborts);
+        localReadSize = createTableHistogram("LocalReadSize", cfs.keyspace.metric.localReadSize, false);
+
+        rowIndexSizeWarnings = createTableMeter("RowIndexSizeWarnings", cfs.keyspace.metric.rowIndexSizeWarnings);
+        rowIndexSizeAborts = createTableMeter("RowIndexSizeAborts", cfs.keyspace.metric.rowIndexSizeAborts);
+        rowIndexSize = createTableHistogram("RowIndexSize", cfs.keyspace.metric.rowIndexSize, false);
+    }
+
+    private Memtable.MemoryUsage getMemoryUsageWithIndexes(ColumnFamilyStore cfs)
+    {
+        Memtable.MemoryUsage usage = Memtable.newMemoryUsage();
+        cfs.getTracker().getView().getCurrentMemtable().addMemoryUsageTo(usage);
+        for (ColumnFamilyStore indexCfs : cfs.indexManager.getAllIndexColumnFamilyStores())
+            indexCfs.getTracker().getView().getCurrentMemtable().addMemoryUsageTo(usage);
+        return usage;
     }
 
     public void updateSSTableIterated(int count)
@@ -1106,9 +1146,9 @@
         return new TableTimer(cfTimer, keyspaceTimer, global);
     }
 
-    protected Timer createTableTimer(String name)
+    protected SnapshottingTimer createTableTimer(String name)
     {
-        Timer tableTimer = Metrics.timer(factory.createMetricName(name), aliasFactory.createMetricName(name));
+        SnapshottingTimer tableTimer = Metrics.timer(factory.createMetricName(name), aliasFactory.createMetricName(name));
         register(name, name, tableTimer);
         return tableTimer;
     }
@@ -1261,12 +1301,12 @@
             private Context(Timer [] all)
             {
                 this.all = all;
-                start = System.nanoTime();
+                start = nanoTime();
             }
 
             public void close()
             {
-                long duration = System.nanoTime() - start;
+                long duration = nanoTime() - start;
                 for (Timer t : all)
                     t.update(duration, TimeUnit.NANOSECONDS);
             }
diff --git a/src/java/org/apache/cassandra/metrics/ThreadPoolMetrics.java b/src/java/org/apache/cassandra/metrics/ThreadPoolMetrics.java
index 3ba984a..62ef177 100644
--- a/src/java/org/apache/cassandra/metrics/ThreadPoolMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/ThreadPoolMetrics.java
@@ -21,7 +21,7 @@
 
 import com.codahale.metrics.Counter;
 import com.codahale.metrics.Gauge;
-import org.apache.cassandra.concurrent.LocalAwareExecutorService;
+import org.apache.cassandra.concurrent.ResizableThreadPool;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry.MetricName;
 
 import static java.lang.String.format;
@@ -75,7 +75,7 @@
      * @param path Type of thread pool
      * @param poolName Name of thread pool to identify metrics
      */
-    public ThreadPoolMetrics(LocalAwareExecutorService executor, String path, String poolName)
+    public ThreadPoolMetrics(ResizableThreadPool executor, String path, String poolName)
     {
         this.path = path;
         this.poolName = poolName;
diff --git a/src/java/org/apache/cassandra/metrics/TopPartitionTracker.java b/src/java/org/apache/cassandra/metrics/TopPartitionTracker.java
new file mode 100644
index 0000000..56a860a
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/TopPartitionTracker.java
@@ -0,0 +1,407 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import java.io.Closeable;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.RangeTombstoneMarker;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.transform.Transformation;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.sstable.SSTable;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
+/**
+ * Tracks top partitions, currently by size and by tombstone count
+ *
+ * Collects during full and preview (-vd) repair since then we read the full partition
+ *
+ * Note that since we can run sub range repair there might be windows where the top partitions are not correct -
+ * for example, assume we track the top 2 partitions for this node:
+ *
+ * tokens with size:
+ * (a, 100); (b, 40); (c, 10); (d, 100); (e, 50); (f, 10)
+ * - top2: a, d
+ * now a is deleted and we run a repair for keys [a, c]
+ * - top2: b, d
+ * and when we repair [d, f]
+ * - top2: d, e
+ *
+ */
+public class TopPartitionTracker implements Closeable
+{
+    private final static String SIZES = "SIZES";
+    private final static String TOMBSTONES = "TOMBSTONES";
+
+    private final AtomicReference<TopHolder> topSizes = new AtomicReference<>();
+    private final AtomicReference<TopHolder> topTombstones = new AtomicReference<>();
+    private final TableMetadata metadata;
+    private final Future<?> scheduledSave;
+    private long lastTombstoneSave = 0;
+    private long lastSizeSave = 0;
+
+    public TopPartitionTracker(TableMetadata metadata)
+    {
+        this.metadata = metadata;
+        topSizes.set(new TopHolder(SystemKeyspace.getTopPartitions(metadata, SIZES),
+                                   DatabaseDescriptor.getMaxTopSizePartitionCount(),
+                                   DatabaseDescriptor.getMinTrackedPartitionSizeInBytes().toBytes()));
+        topTombstones.set(new TopHolder(SystemKeyspace.getTopPartitions(metadata, TOMBSTONES),
+                                        DatabaseDescriptor.getMaxTopTombstonePartitionCount(),
+                                        DatabaseDescriptor.getMinTrackedPartitionTombstoneCount()));
+        scheduledSave = ScheduledExecutors.optionalTasks.scheduleAtFixedRate(this::save, 60, 60, TimeUnit.MINUTES);
+    }
+
+    public void close()
+    {
+        scheduledSave.cancel(true);
+    }
+
+    @VisibleForTesting
+    public void save()
+    {
+        TopHolder sizes = topSizes.get();
+        if (!sizes.top.isEmpty() && sizes.lastUpdate > lastSizeSave)
+        {
+            SystemKeyspace.saveTopPartitions(metadata, SIZES, sizes.top, sizes.lastUpdate);
+            lastSizeSave = sizes.lastUpdate;
+        }
+
+        TopHolder tombstones = topTombstones.get();
+        if (!tombstones.top.isEmpty() && tombstones.lastUpdate > lastTombstoneSave)
+        {
+            SystemKeyspace.saveTopPartitions(metadata, TOMBSTONES, tombstones.top, tombstones.lastUpdate);
+            lastTombstoneSave = tombstones.lastUpdate;
+        }
+    }
+
+    public void merge(Collector collector)
+    {
+        while (true)
+        {
+            TopHolder cur = topSizes.get();
+            TopHolder newSizes = cur.merge(collector.sizes, StorageService.instance.getLocalReplicas(metadata.keyspace).ranges());
+            if (topSizes.compareAndSet(cur, newSizes))
+                break;
+        }
+
+        while (true)
+        {
+            TopHolder cur = topTombstones.get();
+            TopHolder newTombstones = cur.merge(collector.tombstones, StorageService.instance.getLocalReplicas(metadata.keyspace).ranges());
+            if (topTombstones.compareAndSet(cur, newTombstones))
+                break;
+        }
+    }
+
+    @Override
+    public String toString()
+    {
+        return "TopPartitionTracker:\n" +
+               "topSizes:\n" + topSizes.get() + '\n' +
+               "topTombstones:\n" + topTombstones.get() + '\n';
+    }
+
+    public Map<String, Long> getTopTombstonePartitionMap()
+    {
+        return topTombstones.get().toMap(metadata);
+    }
+
+    public Map<String, Long> getTopSizePartitionMap()
+    {
+        return topSizes.get().toMap(metadata);
+    }
+
+    @VisibleForTesting
+    public TopHolder topSizes()
+    {
+        return topSizes.get();
+    }
+
+    @VisibleForTesting
+    public TopHolder topTombstones()
+    {
+        return topTombstones.get();
+    }
+
+    public static class Collector
+    {
+        private final TopHolder tombstones;
+        private final TopHolder sizes;
+
+        public Collector(Collection<Range<Token>> ranges)
+        {
+            this.tombstones = new TopHolder(DatabaseDescriptor.getMaxTopTombstonePartitionCount(),
+                                            DatabaseDescriptor.getMinTrackedPartitionTombstoneCount(),
+                                            ranges);
+            this.sizes = new TopHolder(DatabaseDescriptor.getMaxTopSizePartitionCount(),
+                                       DatabaseDescriptor.getMinTrackedPartitionSizeInBytes().toBytes(),
+                                       ranges);
+        }
+
+        public void trackTombstoneCount(DecoratedKey key, long count)
+        {
+            tombstones.track(key, count);
+        }
+
+        public void trackPartitionSize(DecoratedKey key, long size)
+        {
+            sizes.track(key, size);
+        }
+
+        public String toString()
+        {
+            return "tombstones:\n"+tombstones+"\nsizes:\n"+sizes;
+        }
+    }
+
+    public static class TopHolder
+    {
+        public final NavigableSet<TopPartition> top;
+        private final int maxTopPartitionCount;
+        private final long minTrackedValue;
+        private final Collection<Range<Token>> ranges;
+        private long currentMinValue = Long.MAX_VALUE;
+        public final long lastUpdate;
+
+        private TopHolder(int maxTopPartitionCount, long minTrackedValue, Collection<Range<Token>> ranges)
+        {
+            this(maxTopPartitionCount, minTrackedValue, new TreeSet<>(), ranges, 0);
+        }
+
+        private TopHolder(int maxTopPartitionCount, long minTrackedValue, NavigableSet<TopPartition> top, Collection<Range<Token>> ranges, long lastUpdate)
+        {
+            this.maxTopPartitionCount = maxTopPartitionCount;
+            this.minTrackedValue = minTrackedValue;
+            this.top = top;
+            this.ranges = ranges;
+            this.lastUpdate = lastUpdate;
+        }
+
+        private TopHolder(StoredTopPartitions storedTopPartitions,
+                          int maxTopPartitionCount,
+                          long minTrackedValue)
+        {
+            this.maxTopPartitionCount = maxTopPartitionCount;
+            this.minTrackedValue = minTrackedValue;
+            top = new TreeSet<>();
+            this.ranges = null;
+            this.lastUpdate = storedTopPartitions.lastUpdated;
+
+            for (TopPartition topPartition : storedTopPartitions.topPartitions)
+                track(topPartition);
+        }
+
+        public void track(DecoratedKey key, long value)
+        {
+            if (value < minTrackedValue)
+                return;
+
+            if (top.size() < maxTopPartitionCount || value > currentMinValue)
+                track(new TopPartition(SSTable.getMinimalKey(key), value));
+        }
+
+        private void track(TopPartition tp)
+        {
+            top.add(tp);
+            while (top.size() > maxTopPartitionCount)
+            {
+                top.pollLast();
+                currentMinValue = top.last().value;
+            }
+            currentMinValue = Math.min(tp.value, currentMinValue);
+        }
+
+        /**
+         * we merge any pre-existing top partitions on to the ones we just collected if they are outside of the
+         * range collected.
+         *
+         * This means that if a large partition is deleted it will disappear from the top partitions
+         *
+         * @param holder the newly collected holder - this will get copied and any existing token outside of the collected ranges will get added to the copy
+         * @param ownedRanges the ranges this node owns - any existing token outside of these ranges will get dropped
+         */
+        public TopHolder merge(TopHolder holder, Collection<Range<Token>> ownedRanges)
+        {
+            TopHolder mergedHolder = holder.cloneForMerging(currentTimeMillis());
+            for (TopPartition existingTop : top)
+            {
+                if (!Range.isInRanges(existingTop.key.getToken(), mergedHolder.ranges) &&
+                    (ownedRanges.isEmpty() || Range.isInRanges(existingTop.key.getToken(), ownedRanges))) // make sure we drop any tokens that we don't own anymore
+                    mergedHolder.track(existingTop);
+            }
+            return mergedHolder;
+        }
+
+        private TopHolder cloneForMerging(long lastUpdate)
+        {
+            return new TopHolder(maxTopPartitionCount, minTrackedValue, new TreeSet<>(top), ranges, lastUpdate);
+        }
+
+        public String toString()
+        {
+            int i = 0;
+            Iterator<TopPartition> it = top.iterator();
+            StringBuilder sb = new StringBuilder();
+            while (it.hasNext())
+            {
+                i++;
+                sb.append(i).append(':').append(it.next()).append(System.lineSeparator());
+            }
+            return sb.toString();
+        }
+
+        public Map<String, Long> toMap(TableMetadata metadata)
+        {
+            Map<String, Long> topPartitionsMap = new LinkedHashMap<>();
+            for (TopPartitionTracker.TopPartition topPartition : top)
+            {
+                String key = metadata.partitionKeyType.getString(topPartition.key.getKey());
+                topPartitionsMap.put(key, topPartition.value);
+            }
+            return topPartitionsMap;
+        }
+    }
+
+    private static final Comparator<TopPartition> comparator = (o1, o2) -> {
+        int cmp = -Long.compare(o1.value, o2.value);
+        if (cmp != 0) return cmp;
+        return o1.key.compareTo(o2.key);
+    };
+
+    public static class TopPartition implements Comparable<TopPartition>
+    {
+        public final DecoratedKey key;
+        public final long value;
+
+        public TopPartition(DecoratedKey key, long value)
+        {
+            this.key = key;
+            this.value = value;
+        }
+
+        @Override
+        public int compareTo(TopPartition o)
+        {
+            return comparator.compare(this, o);
+        }
+
+        @Override
+        public String toString()
+        {
+            return "TopPartition{" +
+                   "key=" + key +
+                   ", value=" + value +
+                   '}';
+        }
+    }
+
+    public static class TombstoneCounter extends Transformation<UnfilteredRowIterator>
+    {
+        private final TopPartitionTracker.Collector collector;
+        private final int nowInSec;
+        private long tombstoneCount = 0;
+        private DecoratedKey key = null;
+
+        public TombstoneCounter(TopPartitionTracker.Collector collector, int nowInSec)
+        {
+            this.collector = collector;
+            this.nowInSec = nowInSec;
+        }
+
+        @Override
+        public Row applyToRow(Row row)
+        {
+            if (!row.deletion().isLive())
+                tombstoneCount++;
+            if (row.hasDeletion(nowInSec))
+            {
+                for (Cell<?> c : row.cells())
+                    if (c.isTombstone())
+                        tombstoneCount++;
+            }
+            return row;
+        }
+
+        @Override
+        public RangeTombstoneMarker applyToMarker(RangeTombstoneMarker marker)
+        {
+            tombstoneCount++;
+            return marker;
+        }
+
+        @Override
+        protected UnfilteredRowIterator applyToPartition(UnfilteredRowIterator partition)
+        {
+            reset(partition.partitionKey());
+            if (!partition.partitionLevelDeletion().isLive())
+                tombstoneCount++;
+            return Transformation.apply(partition, this);
+        }
+
+        private void reset(DecoratedKey key)
+        {
+            tombstoneCount = 0;
+            this.key = key;
+        }
+
+        @Override
+        public void onPartitionClose()
+        {
+            collector.trackTombstoneCount(key, tombstoneCount);
+        }
+    }
+
+    public static class StoredTopPartitions
+    {
+        public static StoredTopPartitions EMPTY = new StoredTopPartitions(Collections.emptyList(), 0);
+        public final List<TopPartition> topPartitions;
+        public final long lastUpdated;
+
+        public StoredTopPartitions(List<TopPartition> topPartitions, long lastUpdated)
+        {
+            this.topPartitions = topPartitions;
+            this.lastUpdated = lastUpdated;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/net/AbstractMessageHandler.java b/src/java/org/apache/cassandra/net/AbstractMessageHandler.java
index d709729..e2cf68d 100644
--- a/src/java/org/apache/cassandra/net/AbstractMessageHandler.java
+++ b/src/java/org/apache/cassandra/net/AbstractMessageHandler.java
@@ -22,7 +22,6 @@
 import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import java.util.concurrent.atomic.AtomicLongFieldUpdater;
 
@@ -34,18 +33,18 @@
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.ChannelInboundHandlerAdapter;
 import io.netty.channel.EventLoop;
+import org.apache.cassandra.metrics.ClientMetrics;
 import org.apache.cassandra.net.FrameDecoder.CorruptFrame;
 import org.apache.cassandra.net.FrameDecoder.Frame;
 import org.apache.cassandra.net.FrameDecoder.FrameProcessor;
 import org.apache.cassandra.net.FrameDecoder.IntactFrame;
 import org.apache.cassandra.net.Message.Header;
 import org.apache.cassandra.net.ResourceLimits.Limit;
-import org.apache.cassandra.utils.NoSpamLogger;
 
 import static java.lang.Math.max;
 import static java.lang.Math.min;
 import static org.apache.cassandra.net.Crc.InvalidCrc;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 /**
  * Core logic for handling inbound message deserialization and execution (in tandem with {@link FrameDecoder}).
@@ -98,8 +97,8 @@
  * the untouched frames to the correct thread pool for the verb to be deserialized there and immediately processed.
  *
  * See {@link LargeMessage} and subclasses for concrete {@link AbstractMessageHandler} implementations for details
- * of the large-message accumulating state-machine, and {@link ProcessMessage} and its inheritors for the differences
- *in execution.
+ * of the large-message accumulating state-machine, and {@link InboundMessageHandler.ProcessMessage} and its inheritors 
+ * for the differences in execution.
  *
  * # Flow control (backpressure)
  *
@@ -135,8 +134,7 @@
 public abstract class AbstractMessageHandler extends ChannelInboundHandlerAdapter implements FrameProcessor
 {
     private static final Logger logger = LoggerFactory.getLogger(AbstractMessageHandler.class);
-    private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(logger, 1L, TimeUnit.SECONDS);
-
+    
     protected final FrameDecoder decoder;
 
     protected final Channel channel;
@@ -304,12 +302,17 @@
         try
         {
             /*
-             * Process up to one message using supplied overriden reserves - one of them pre-allocated,
-             * and guaranteed to be enough for one message - then, if no obstacles enountered, reactivate
+             * Process up to one message using supplied overridden reserves - one of them pre-allocated,
+             * and guaranteed to be enough for one message - then, if no obstacles encountered, reactivate
              * the frame decoder using normal reserve capacities.
              */
             if (processUpToOneMessage(endpointReserve, globalReserve))
+            {
                 decoder.reactivate();
+
+                if (decoder.isActive())
+                    ClientMetrics.instance.unpauseConnection();
+            }
         }
         catch (Throwable t)
         {
@@ -321,7 +324,7 @@
 
     // return true if the handler should be reactivated - if no new hurdles were encountered,
     // like running out of the other kind of reserve capacity
-    private boolean processUpToOneMessage(Limit endpointReserve, Limit globalReserve) throws IOException
+    protected boolean processUpToOneMessage(Limit endpointReserve, Limit globalReserve) throws IOException
     {
         UpToOneMessageFrameProcessor processor = new UpToOneMessageFrameProcessor(endpointReserve, globalReserve);
         decoder.processBacklog(processor);
diff --git a/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java
index 163981c..983db3e 100644
--- a/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java
@@ -92,7 +92,7 @@
      * <p>
      * If this method returns normally, the ChannelPromise MUST be writtenAndFlushed, or else completed exceptionally.
      */
-    protected ChannelPromise beginFlush(int byteCount, int lowWaterMark, int highWaterMark) throws IOException
+    protected ChannelPromise beginFlush(long byteCount, long lowWaterMark, long highWaterMark) throws IOException
     {
         waitForSpace(byteCount, lowWaterMark, highWaterMark);
 
@@ -125,18 +125,18 @@
      *
      * If we currently have lowWaterMark or fewer bytes flushing, we are good to go.
      * If our new write will not take us over our highWaterMark, we are good to go.
-     * Otherwise we wait until either of these conditions are met.
+     * Otherwise, we wait until either of these conditions are met.
      *
      * This may only be invoked by the writer thread, never by the eventLoop.
      *
      * @throws IOException if a prior asynchronous flush failed
      */
-    private void waitForSpace(int bytesToWrite, int lowWaterMark, int highWaterMark) throws IOException
+    private void waitForSpace(long bytesToWrite, long lowWaterMark, long highWaterMark) throws IOException
     {
         // decide when we would be willing to carry on writing
         // we are always writable if we have lowWaterMark or fewer bytes, no matter how many bytes we are flushing
         // our callers should not be supplying more than (highWaterMark - lowWaterMark) bytes, but we must work correctly if they do
-        int wakeUpWhenFlushing = highWaterMark - bytesToWrite;
+        long wakeUpWhenFlushing = highWaterMark - bytesToWrite;
         waitUntilFlushed(max(lowWaterMark, wakeUpWhenFlushing), lowWaterMark);
         flushing += bytesToWrite;
     }
@@ -147,7 +147,7 @@
      *
      * This may only be invoked by the writer thread, never by the eventLoop.
      */
-    void waitUntilFlushed(int wakeUpWhenExcessBytesWritten, int signalWhenExcessBytesWritten) throws IOException
+    void waitUntilFlushed(long wakeUpWhenExcessBytesWritten, long signalWhenExcessBytesWritten) throws IOException
     {
         // we assume that we are happy to wake up at least as early as we will be signalled; otherwise we will never exit
         assert signalWhenExcessBytesWritten <= wakeUpWhenExcessBytesWritten;
@@ -265,4 +265,4 @@
         throw new UnsupportedOperationException();
     }
 
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/AsyncChannelPromise.java b/src/java/org/apache/cassandra/net/AsyncChannelPromise.java
index d2c9d0b..00253fa 100644
--- a/src/java/org/apache/cassandra/net/AsyncChannelPromise.java
+++ b/src/java/org/apache/cassandra/net/AsyncChannelPromise.java
@@ -22,15 +22,16 @@
 import io.netty.channel.ChannelFuture;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.ChannelPromise;
-import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Future; // checkstyle: permit this import
 import io.netty.util.concurrent.GenericFutureListener;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
 
 /**
  * See {@link AsyncPromise} and {@link io.netty.channel.ChannelPromise}
  *
  * This class is all boiler plate, just ensuring we return ourselves and invoke the correct Promise method.
  */
-public class AsyncChannelPromise extends AsyncPromise<Void> implements ChannelPromise
+public class AsyncChannelPromise extends AsyncPromise.WithExecutor<Void> implements ChannelPromise
 {
     private final Channel channel;
 
@@ -92,7 +93,7 @@
         return setSuccess(null);
     }
 
-    public ChannelPromise setSuccess(Void v)
+    public AsyncChannelPromise setSuccess(Void v)
     {
         super.setSuccess(v);
         return this;
@@ -103,58 +104,56 @@
         return trySuccess(null);
     }
 
-    public ChannelPromise setFailure(Throwable throwable)
+    public AsyncChannelPromise setFailure(Throwable throwable)
     {
         super.setFailure(throwable);
         return this;
     }
 
-    public ChannelPromise sync() throws InterruptedException
+    public AsyncChannelPromise sync() throws InterruptedException
     {
         super.sync();
         return this;
     }
 
-    public ChannelPromise syncUninterruptibly()
+    public AsyncChannelPromise syncUninterruptibly()
     {
         super.syncUninterruptibly();
         return this;
     }
 
-    public ChannelPromise await() throws InterruptedException
+    public AsyncChannelPromise await() throws InterruptedException
     {
         super.await();
         return this;
     }
 
-    public ChannelPromise awaitUninterruptibly()
+    public AsyncChannelPromise awaitUninterruptibly()
     {
         super.awaitUninterruptibly();
         return this;
     }
 
-    public ChannelPromise addListener(GenericFutureListener<? extends Future<? super Void>> listener)
+    public AsyncChannelPromise addListener(GenericFutureListener<? extends Future<? super Void>> listener)
     {
         super.addListener(listener);
         return this;
     }
 
-    public ChannelPromise addListeners(GenericFutureListener<? extends Future<? super Void>>... listeners)
+    public AsyncChannelPromise addListeners(GenericFutureListener<? extends Future<? super Void>>... listeners)
     {
         super.addListeners(listeners);
         return this;
     }
 
-    public ChannelPromise removeListener(GenericFutureListener<? extends Future<? super Void>> listener)
+    public AsyncChannelPromise removeListener(GenericFutureListener<? extends Future<? super Void>> listener)
     {
-        super.removeListener(listener);
-        return this;
+        throw new UnsupportedOperationException();
     }
 
-    public ChannelPromise removeListeners(GenericFutureListener<? extends Future<? super Void>>... listeners)
+    public AsyncChannelPromise removeListeners(GenericFutureListener<? extends Future<? super Void>>... listeners)
     {
-        super.removeListeners(listeners);
-        return this;
+        throw new UnsupportedOperationException();
     }
 
     public ChannelPromise unvoid()
diff --git a/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java
index 8ef0a8f..e1bcfed 100644
--- a/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java
@@ -128,4 +128,4 @@
             buffer = null;
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/AsyncOneResponse.java b/src/java/org/apache/cassandra/net/AsyncOneResponse.java
index ba83c84..f3c42cd 100644
--- a/src/java/org/apache/cassandra/net/AsyncOneResponse.java
+++ b/src/java/org/apache/cassandra/net/AsyncOneResponse.java
@@ -19,7 +19,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
-import io.netty.util.concurrent.ImmediateEventExecutor;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
 
 /**
  * A callback specialized for returning a value from a single target; that is, this is for messages
@@ -27,11 +27,6 @@
  */
 public class AsyncOneResponse<T> extends AsyncPromise<T> implements RequestCallback<T>
 {
-    public AsyncOneResponse()
-    {
-        super(ImmediateEventExecutor.INSTANCE);
-    }
-
     public void onResponse(Message<T> response)
     {
         setSuccess(response.payload);
diff --git a/src/java/org/apache/cassandra/net/AsyncPromise.java b/src/java/org/apache/cassandra/net/AsyncPromise.java
deleted file mode 100644
index 36bc304..0000000
--- a/src/java/org/apache/cassandra/net/AsyncPromise.java
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.net;
-
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import io.netty.util.concurrent.EventExecutor;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
-import io.netty.util.concurrent.Promise;
-import io.netty.util.internal.PlatformDependent;
-import io.netty.util.internal.ThrowableUtil;
-import org.apache.cassandra.utils.concurrent.WaitQueue;
-
-import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater.*;
-
-/**
- * Netty's DefaultPromise uses a mutex to coordinate notifiers AND waiters between the eventLoop and the other threads.
- * Since we register cross-thread listeners, this has the potential to block internode messaging for an unknown
- * number of threads for an unknown period of time, if we are unlucky with the scheduler (which will certainly
- * happen, just with some unknown but low periodicity)
- *
- * At the same time, we manage some other efficiencies:
- *  - We save some space when registering listeners, especially if there is only one listener, as we perform no
- *    extra allocations in this case.
- *  - We permit efficient initial state declaration, avoiding unnecessary CAS or lock acquisitions when mutating
- *    a Promise we are ourselves constructing (and can easily add more; only those we use have been added)
- *
- * We can also make some guarantees about our behaviour here, although we primarily mirror Netty.
- * Specifically, we can guarantee that notifiers are always invoked in the order they are added (which may be true
- * for netty, but was unclear and is not declared).  This is useful for ensuring the correctness of some of our
- * behaviours in OutboundConnection without having to jump through extra hoops.
- *
- * The implementation loosely follows that of Netty's DefaultPromise, with some slight changes; notably that we have
- * no synchronisation on our listeners, instead using a CoW list that is cleared each time we notify listeners.
- *
- * We handle special values slightly differently.  We do not use a special value for null, instead using
- * a special value to indicate the result has not been set yet.  This means that once isSuccess() holds,
- * the result must be a correctly typed object (modulo generics pitfalls).
- * All special values are also instances of FailureHolder, which simplifies a number of the logical conditions.
- *
- * @param <V>
- */
-public class AsyncPromise<V> implements Promise<V>
-{
-    private static final Logger logger = LoggerFactory.getLogger(AsyncPromise.class);
-
-    private final EventExecutor executor;
-    private volatile Object result;
-    private volatile GenericFutureListener<? extends Future<? super V>> listeners;
-    private volatile WaitQueue waiting;
-    private static final AtomicReferenceFieldUpdater<AsyncPromise, Object> resultUpdater = newUpdater(AsyncPromise.class, Object.class, "result");
-    private static final AtomicReferenceFieldUpdater<AsyncPromise, GenericFutureListener> listenersUpdater = newUpdater(AsyncPromise.class, GenericFutureListener.class, "listeners");
-    private static final AtomicReferenceFieldUpdater<AsyncPromise, WaitQueue> waitingUpdater = newUpdater(AsyncPromise.class, WaitQueue.class, "waiting");
-
-    private static final FailureHolder UNSET = new FailureHolder(null);
-    private static final FailureHolder UNCANCELLABLE = new FailureHolder(null);
-    private static final FailureHolder CANCELLED = new FailureHolder(ThrowableUtil.unknownStackTrace(new CancellationException(), AsyncPromise.class, "cancel(...)"));
-
-    private static final DeferredGenericFutureListener NOTIFYING = future -> {};
-    private static interface DeferredGenericFutureListener<F extends Future<?>> extends GenericFutureListener<F> {}
-
-    private static final class FailureHolder
-    {
-        final Throwable cause;
-        private FailureHolder(Throwable cause)
-        {
-            this.cause = cause;
-        }
-    }
-
-    public AsyncPromise(EventExecutor executor)
-    {
-        this(executor, UNSET);
-    }
-
-    private AsyncPromise(EventExecutor executor, FailureHolder initialState)
-    {
-        this.executor = executor;
-        this.result = initialState;
-    }
-
-    public AsyncPromise(EventExecutor executor, GenericFutureListener<? extends Future<? super V>> listener)
-    {
-        this(executor);
-        this.listeners = listener;
-    }
-
-    AsyncPromise(EventExecutor executor, FailureHolder initialState, GenericFutureListener<? extends Future<? super V>> listener)
-    {
-        this(executor, initialState);
-        this.listeners = listener;
-    }
-
-    public static <V> AsyncPromise<V> uncancellable(EventExecutor executor)
-    {
-        return new AsyncPromise<>(executor, UNCANCELLABLE);
-    }
-
-    public static <V> AsyncPromise<V> uncancellable(EventExecutor executor, GenericFutureListener<? extends Future<? super V>> listener)
-    {
-        return new AsyncPromise<>(executor, UNCANCELLABLE);
-    }
-
-    public Promise<V> setSuccess(V v)
-    {
-        if (!trySuccess(v))
-            throw new IllegalStateException("complete already: " + this);
-        return this;
-    }
-
-    public Promise<V> setFailure(Throwable throwable)
-    {
-        if (!tryFailure(throwable))
-            throw new IllegalStateException("complete already: " + this);
-        return this;
-    }
-
-    public boolean trySuccess(V v)
-    {
-        return trySet(v);
-    }
-
-    public boolean tryFailure(Throwable throwable)
-    {
-        return trySet(new FailureHolder(throwable));
-    }
-
-    public boolean setUncancellable()
-    {
-        if (trySet(UNCANCELLABLE))
-            return true;
-        return result == UNCANCELLABLE;
-    }
-
-    public boolean cancel(boolean b)
-    {
-        return trySet(CANCELLED);
-    }
-
-    /**
-     * Shared implementation of various promise completion methods.
-     * Updates the result if it is possible to do so, returning success/failure.
-     *
-     * If the promise is UNSET the new value will succeed;
-     *          if it is UNCANCELLABLE it will succeed only if the new value is not CANCELLED
-     *          otherwise it will fail, as isDone() is implied
-     *
-     * If the update succeeds, and the new state implies isDone(), any listeners and waiters will be notified
-     */
-    private boolean trySet(Object v)
-    {
-        while (true)
-        {
-            Object current = result;
-            if (isDone(current) || (current == UNCANCELLABLE && v == CANCELLED))
-                return false;
-            if (resultUpdater.compareAndSet(this, current, v))
-            {
-                if (v != UNCANCELLABLE)
-                {
-                    notifyListeners();
-                    notifyWaiters();
-                }
-                return true;
-            }
-        }
-    }
-
-    public boolean isSuccess()
-    {
-        return isSuccess(result);
-    }
-
-    private static boolean isSuccess(Object result)
-    {
-        return !(result instanceof FailureHolder);
-    }
-
-    public boolean isCancelled()
-    {
-        return isCancelled(result);
-    }
-
-    private static boolean isCancelled(Object result)
-    {
-        return result == CANCELLED;
-    }
-
-    public boolean isDone()
-    {
-        return isDone(result);
-    }
-
-    private static boolean isDone(Object result)
-    {
-        return result != UNSET && result != UNCANCELLABLE;
-    }
-
-    public boolean isCancellable()
-    {
-        Object result = this.result;
-        return result == UNSET;
-    }
-
-    public Throwable cause()
-    {
-        Object result = this.result;
-        if (result instanceof FailureHolder)
-            return ((FailureHolder) result).cause;
-        return null;
-    }
-
-    /**
-     * if isSuccess(), returns the value, otherwise returns null
-     */
-    @SuppressWarnings("unchecked")
-    public V getNow()
-    {
-        Object result = this.result;
-        if (isSuccess(result))
-            return (V) result;
-        return null;
-    }
-
-    public V get() throws InterruptedException, ExecutionException
-    {
-        await();
-        return getWhenDone();
-    }
-
-    public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
-    {
-        if (!await(timeout, unit))
-            throw new TimeoutException();
-        return getWhenDone();
-    }
-
-    /**
-     * Shared implementation of get() after suitable await(); assumes isDone(), and returns
-     * either the success result or throws the suitable exception under failure
-     */
-    @SuppressWarnings("unchecked")
-    private V getWhenDone() throws ExecutionException
-    {
-        Object result = this.result;
-        if (isSuccess(result))
-            return (V) result;
-        if (result == CANCELLED)
-            throw new CancellationException();
-        throw new ExecutionException(((FailureHolder) result).cause);
-    }
-
-    /**
-     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
-     * so may cause problems for reporting stack traces
-     */
-    public Promise<V> sync() throws InterruptedException
-    {
-        await();
-        rethrowIfFailed();
-        return this;
-    }
-
-    /**
-     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
-     * so may cause problems for reporting stack traces
-     */
-    public Promise<V> syncUninterruptibly()
-    {
-        awaitUninterruptibly();
-        rethrowIfFailed();
-        return this;
-    }
-
-    private void rethrowIfFailed()
-    {
-        Throwable cause = this.cause();
-        if (cause != null)
-        {
-            PlatformDependent.throwException(cause);
-        }
-    }
-
-    public Promise<V> addListener(GenericFutureListener<? extends Future<? super V>> listener)
-    {
-        listenersUpdater.accumulateAndGet(this, listener, AsyncPromise::appendListener);
-        if (isDone())
-            notifyListeners();
-        return this;
-    }
-
-    public Promise<V> addListeners(GenericFutureListener<? extends Future<? super V>> ... listeners)
-    {
-        // this could be more efficient if we cared, but we do not
-        return addListener(future -> {
-            for (GenericFutureListener<? extends Future<? super V>> listener : listeners)
-                AsyncPromise.invokeListener((GenericFutureListener<Future<? super V>>)listener, future);
-        });
-    }
-
-    public Promise<V> removeListener(GenericFutureListener<? extends Future<? super V>> listener)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    public Promise<V> removeListeners(GenericFutureListener<? extends Future<? super V>> ... listeners)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    @SuppressWarnings("unchecked")
-    private void notifyListeners()
-    {
-        if (!executor.inEventLoop())
-        {
-            // submit this method, to guarantee we invoke in the submitted order
-            executor.execute(this::notifyListeners);
-            return;
-        }
-
-        if (listeners == null || listeners instanceof DeferredGenericFutureListener<?>)
-            return; // either no listeners, or we are already notifying listeners, so we'll get to the new one when ready
-
-        // first run our notifiers
-        while (true)
-        {
-            GenericFutureListener listeners = listenersUpdater.getAndSet(this, NOTIFYING);
-            if (listeners != null)
-                invokeListener(listeners, this);
-
-            if (listenersUpdater.compareAndSet(this, NOTIFYING, null))
-                return;
-        }
-    }
-
-    private static <F extends Future<?>> void invokeListener(GenericFutureListener<F> listener, F future)
-    {
-        try
-        {
-            listener.operationComplete(future);
-        }
-        catch (Throwable t)
-        {
-            logger.error("Failed to invoke listener {} to {}", listener, future, t);
-        }
-    }
-
-    private static <F extends Future<?>> GenericFutureListener<F> appendListener(GenericFutureListener<F> prevListener, GenericFutureListener<F> newListener)
-    {
-        GenericFutureListener<F> result = newListener;
-
-        if (prevListener != null && prevListener != NOTIFYING)
-        {
-            result = future -> {
-                invokeListener(prevListener, future);
-                // we will wrap the outer invocation with invokeListener, so no need to do it here too
-                newListener.operationComplete(future);
-            };
-        }
-
-        if (prevListener instanceof DeferredGenericFutureListener<?>)
-        {
-            GenericFutureListener<F> wrap = result;
-            result = (DeferredGenericFutureListener<F>) wrap::operationComplete;
-        }
-
-        return result;
-    }
-
-    public Promise<V> await() throws InterruptedException
-    {
-        await(0L, (signal, nanos) -> { signal.await(); return true; } );
-        return this;
-    }
-
-    public Promise<V> awaitUninterruptibly()
-    {
-        await(0L, (signal, nanos) -> { signal.awaitUninterruptibly(); return true; } );
-        return this;
-    }
-
-    public boolean await(long timeout, TimeUnit unit) throws InterruptedException
-    {
-        return await(unit.toNanos(timeout),
-                     (signal, nanos) -> signal.awaitUntil(nanos + System.nanoTime()));
-    }
-
-    public boolean await(long timeoutMillis) throws InterruptedException
-    {
-        return await(timeoutMillis, TimeUnit.MILLISECONDS);
-    }
-
-    public boolean awaitUninterruptibly(long timeout, TimeUnit unit)
-    {
-        return await(unit.toNanos(timeout),
-                     (signal, nanos) -> signal.awaitUntilUninterruptibly(nanos + System.nanoTime()));
-    }
-
-    public boolean awaitUninterruptibly(long timeoutMillis)
-    {
-        return awaitUninterruptibly(timeoutMillis, TimeUnit.MILLISECONDS);
-    }
-
-    interface Awaiter<T extends Throwable>
-    {
-        boolean await(WaitQueue.Signal value, long nanos) throws T;
-    }
-
-    /**
-     * A clean way to implement each variant of await using lambdas; we permit a nanos parameter
-     * so that we can implement this without any unnecessary lambda allocations, although not
-     * all implementations need the nanos parameter (i.e. those that wait indefinitely)
-     */
-    private <T extends Throwable> boolean await(long nanos, Awaiter<T> awaiter) throws T
-    {
-        if (isDone())
-            return true;
-
-        WaitQueue.Signal await = registerToWait();
-        if (null != await)
-            return awaiter.await(await, nanos);
-
-        return true;
-    }
-
-    /**
-     * Register a signal that will be notified when the promise is completed;
-     * if the promise becomes completed before this signal is registered, null is returned
-     */
-    private WaitQueue.Signal registerToWait()
-    {
-        WaitQueue waiting = this.waiting;
-        if (waiting == null && !waitingUpdater.compareAndSet(this, null, waiting = new WaitQueue()))
-            waiting = this.waiting;
-        assert waiting != null;
-
-        WaitQueue.Signal signal = waiting.register();
-        if (!isDone())
-            return signal;
-        signal.cancel();
-        return null;
-    }
-
-    private void notifyWaiters()
-    {
-        WaitQueue waiting = this.waiting;
-        if (waiting != null)
-            waiting.signalAll();
-    }
-
-    public String toString()
-    {
-        Object result = this.result;
-        if (isSuccess(result))
-            return "(success: " + result + ')';
-        if (result == UNCANCELLABLE)
-            return "(uncancellable)";
-        if (result == CANCELLED)
-            return "(cancelled)";
-        if (isDone(result))
-            return "(failure: " + ((FailureHolder) result).cause + ')';
-        return "(incomplete)";
-    }
-}
diff --git a/src/java/org/apache/cassandra/net/AsyncStreamingInputPlus.java b/src/java/org/apache/cassandra/net/AsyncStreamingInputPlus.java
index 84fb8ac..ee18a87 100644
--- a/src/java/org/apache/cassandra/net/AsyncStreamingInputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncStreamingInputPlus.java
@@ -17,12 +17,10 @@
  */
 package org.apache.cassandra.net;
 
-import java.io.EOFException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Ints;
@@ -32,16 +30,15 @@
 import io.netty.buffer.Unpooled;
 import io.netty.channel.Channel;
 import org.apache.cassandra.io.util.RebufferingInputStream;
+import org.apache.cassandra.streaming.StreamingDataInputPlus;
 
-// TODO: rewrite
-public class AsyncStreamingInputPlus extends RebufferingInputStream
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
+
+/*
+ * This class expects a single producer (Netty event loop) and single consumer thread (StreamingDeserializerTask).
+ */
+public class AsyncStreamingInputPlus extends RebufferingInputStream implements StreamingDataInputPlus
 {
-    public static class InputTimeoutException extends IOException
-    {
-    }
-
-    private static final long DEFAULT_REBUFFER_BLOCK_IN_MILLIS = TimeUnit.MINUTES.toMillis(3);
-
     private final Channel channel;
 
     /**
@@ -51,22 +48,15 @@
 
     private final BlockingQueue<ByteBuf> queue;
 
-    private final long rebufferTimeoutNanos;
-
-    private volatile boolean isClosed;
+    private boolean isProducerClosed = false;
+    private boolean isConsumerClosed = false;
 
     public AsyncStreamingInputPlus(Channel channel)
     {
-        this(channel, DEFAULT_REBUFFER_BLOCK_IN_MILLIS, TimeUnit.MILLISECONDS);
-    }
-
-    AsyncStreamingInputPlus(Channel channel, long rebufferTimeout, TimeUnit rebufferTimeoutUnit)
-    {
         super(Unpooled.EMPTY_BUFFER.nioBuffer());
         currentBuf = Unpooled.EMPTY_BUFFER;
 
-        queue = new LinkedBlockingQueue<>();
-        rebufferTimeoutNanos = rebufferTimeoutUnit.toNanos(rebufferTimeout);
+        queue = newBlockingQueue();
 
         this.channel = channel;
         channel.config().setAutoRead(false);
@@ -79,18 +69,11 @@
      */
     public boolean append(ByteBuf buf) throws IllegalStateException
     {
-        if (isClosed) return false;
+        if (isProducerClosed)
+            return false; // buf should be released in NettyStreamingChannel.channelRead
 
         queue.add(buf);
 
-        /*
-         * it's possible for append() to race with close(), so we need to ensure
-         * that the bytebuf gets released in that scenario
-         */
-        if (isClosed)
-            while ((buf = queue.poll()) != null)
-                buf.release();
-
         return true;
     }
 
@@ -99,16 +82,17 @@
      *
      * Release open buffers and poll the {@link #queue} for more data.
      * <p>
-     * This is best, and more or less expected, to be invoked on a consuming thread (not the event loop)
-     * becasue if we block on the queue we can't fill it on the event loop (as that's where the buffers are coming from).
+     * This is invoked on a consuming thread (not the event loop)
+     * because if we block on the queue we can't fill it on the event loop (as that's where the buffers are coming from).
      *
-     * @throws EOFException when no further reading from this instance should occur. Implies this instance is closed.
-     * @throws InputTimeoutException when no new buffers arrive for reading before
-     * the {@link #rebufferTimeoutNanos} elapses while blocking. It's then not safe to reuse this instance again.
+     * @throws ClosedChannelException when no further reading from this instance should occur. Implies this instance is closed.
      */
     @Override
-    protected void reBuffer() throws EOFException, InputTimeoutException
+    protected void reBuffer() throws ClosedChannelException
     {
+        if (isConsumerClosed)
+            throw new ClosedChannelException();
+
         if (queue.isEmpty())
             channel.read();
 
@@ -117,21 +101,24 @@
         buffer = null;
 
         ByteBuf next = null;
-        try
+        do
         {
-            next = queue.poll(rebufferTimeoutNanos, TimeUnit.NANOSECONDS);
-        }
-        catch (InterruptedException ie)
+            try
+            {
+                next = queue.take(); // rely on sentinel being sent to terminate this loop
+            }
+            catch (InterruptedException ie)
+            {
+                // ignore interruptions, retry and rely on being shut down by requestClosure
+            }
+        } while (next == null);
+
+        if (next == Unpooled.EMPTY_BUFFER) // the indicator that the input is closed
         {
-            // nop
+            isConsumerClosed = true;
+            throw new ClosedChannelException();
         }
 
-        if (null == next)
-            throw new InputTimeoutException();
-
-        if (next == Unpooled.EMPTY_BUFFER) // Unpooled.EMPTY_BUFFER is the indicator that the input is closed
-            throw new EOFException();
-
         currentBuf = next;
         buffer = next.nioBuffer();
     }
@@ -183,17 +170,9 @@
         return Ints.checkedCast(count);
     }
 
-    // TODO:JEB add docs
-    // TL;DR if there's no Bufs open anywhere here, issue a channle read to try and grab data.
-    public void maybeIssueRead()
-    {
-        if (isEmpty())
-            channel.read();
-    }
-
     public boolean isEmpty()
     {
-        return queue.isEmpty() && (buffer == null || !buffer.hasRemaining());
+        return isConsumerClosed || (queue.isEmpty() && (buffer == null || !buffer.hasRemaining()));
     }
 
     /**
@@ -204,9 +183,11 @@
     @Override
     public void close()
     {
-        if (isClosed)
+        if (isConsumerClosed)
             return;
 
+        isConsumerClosed = true;
+
         if (currentBuf != null)
         {
             currentBuf.release();
@@ -218,19 +199,16 @@
         {
             try
             {
-                ByteBuf buf = queue.poll(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
+                ByteBuf buf = queue.take();
                 if (buf == Unpooled.EMPTY_BUFFER)
                     break;
-                else
-                    buf.release();
+                buf.release();
             }
             catch (InterruptedException e)
             {
-                //
+                // ignore and rely on requestClose having been called
             }
         }
-
-        isClosed = true;
     }
 
     /**
@@ -240,7 +218,11 @@
      */
     public void requestClosure()
     {
-        queue.add(Unpooled.EMPTY_BUFFER);
+        if (!isProducerClosed)
+        {
+            queue.add(Unpooled.EMPTY_BUFFER);
+            isProducerClosed = true;
+        }
     }
 
     // TODO: let's remove this like we did for AsyncChannelOutputPlus
diff --git a/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java
index 3a9c075..915e8a3 100644
--- a/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java
@@ -29,13 +29,14 @@
 
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelPromise;
+import io.netty.channel.DefaultFileRegion;
 import io.netty.channel.FileRegion;
 import io.netty.channel.WriteBufferWaterMark;
 import io.netty.handler.ssl.SslHandler;
 import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.net.SharedDefaultFileRegion.SharedFileChannel;
-import org.apache.cassandra.streaming.StreamManager.StreamRateLimiter;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.utils.memory.BufferPool;
 import org.apache.cassandra.utils.memory.BufferPools;
 
@@ -50,7 +51,7 @@
  * The correctness of this class depends on the ChannelPromise we create against a Channel always being completed,
  * which appears to be a guarantee provided by Netty so long as the event loop is running.
  */
-public class AsyncStreamingOutputPlus extends AsyncChannelOutputPlus
+public class AsyncStreamingOutputPlus extends AsyncChannelOutputPlus implements StreamingDataOutputPlus
 {
     private static final Logger logger = LoggerFactory.getLogger(AsyncStreamingOutputPlus.class);
 
@@ -97,28 +98,6 @@
         return flushed() + buffer.position();
     }
 
-    public interface BufferSupplier
-    {
-        /**
-         * Request a buffer with at least the given capacity.
-         * This method may only be invoked once, and the lifetime of buffer it returns will be managed
-         * by the AsyncChannelOutputPlus it was created for.
-         */
-        ByteBuffer get(int capacity) throws IOException;
-    }
-
-    public interface Write
-    {
-        /**
-         * Write to a buffer, and flush its contents to the channel.
-         * <p>
-         * The lifetime of the buffer will be managed by the AsyncChannelOutputPlus you issue this Write to.
-         * If the method exits successfully, the contents of the buffer will be written to the channel, otherwise
-         * the buffer will be cleaned and the exception propagated to the caller.
-         */
-        void write(BufferSupplier supplier) throws IOException;
-    }
-
     /**
      * Provide a lambda that can request a buffer of suitable size, then fill the buffer and have
      * that buffer written and flushed to the underlying channel, without having to handle buffer
@@ -126,7 +105,7 @@
      * <p>
      * Any exception thrown by the Write will be propagated to the caller, after any buffer is cleaned up.
      */
-    public int writeToChannel(Write write, StreamRateLimiter limiter) throws IOException
+    public int writeToChannel(Write write, RateLimiter limiter) throws IOException
     {
         doFlush(0);
         class Holder
@@ -168,17 +147,17 @@
     /**
      * Writes all data in file channel to stream: <br>
      * * For zero-copy-streaming, 1MiB at a time, with at most 2MiB in flight at once. <br>
-     * * For streaming with SSL, 64kb at a time, with at most 32+64kb (default low water mark + batch size) in flight. <br>
+     * * For streaming with SSL, 64KiB at a time, with at most 32+64KiB (default low water mark + batch size) in flight. <br>
      * <p>
      * This method takes ownership of the provided {@link FileChannel}.
      * <p>
      * WARNING: this method blocks only for permission to write to the netty channel; it exits before
      * the {@link FileRegion}(zero-copy) or {@link ByteBuffer}(ssl) is flushed to the network.
      */
-    public long writeFileToChannel(FileChannel file, StreamRateLimiter limiter) throws IOException
+    public long writeFileToChannel(FileChannel file, RateLimiter limiter) throws IOException
     {
         if (channel.pipeline().get(SslHandler.class) != null)
-            // each batch is loaded into ByteBuffer, 64kb is more BufferPool friendly.
+            // each batch is loaded into ByteBuffer, 64KiB is more BufferPool friendly.
             return writeFileToChannel(file, limiter, 1 << 16);
         else
             // write files in 1MiB chunks, since there may be blocking work performed to fetch it from disk,
@@ -187,7 +166,7 @@
     }
 
     @VisibleForTesting
-    long writeFileToChannel(FileChannel fc, StreamRateLimiter limiter, int batchSize) throws IOException
+    long writeFileToChannel(FileChannel fc, RateLimiter limiter, int batchSize) throws IOException
     {
         final long length = fc.size();
         long bytesTransferred = 0;
@@ -224,7 +203,29 @@
     }
 
     @VisibleForTesting
-    long writeFileToChannelZeroCopy(FileChannel file, StreamRateLimiter limiter, int batchSize, int lowWaterMark, int highWaterMark) throws IOException
+    long writeFileToChannelZeroCopy(FileChannel file, RateLimiter limiter, int batchSize, int lowWaterMark, int highWaterMark) throws IOException
+    {
+        if (!limiter.isRateLimited())
+            return writeFileToChannelZeroCopyUnthrottled(file);
+        else
+            return writeFileToChannelZeroCopyThrottled(file, limiter, batchSize, lowWaterMark, highWaterMark);
+    }
+
+    private long writeFileToChannelZeroCopyUnthrottled(FileChannel file) throws IOException
+    {
+        final long length = file.size();
+
+        if (logger.isTraceEnabled())
+            logger.trace("Writing {} bytes", length);
+
+        ChannelPromise promise = beginFlush(length, 0, length);
+        final DefaultFileRegion defaultFileRegion = new DefaultFileRegion(file, 0, length);
+        channel.writeAndFlush(defaultFileRegion, promise);
+
+        return length;
+    }
+
+    private long writeFileToChannelZeroCopyThrottled(FileChannel file, RateLimiter limiter, int batchSize, int lowWaterMark, int highWaterMark) throws IOException
     {
         final long length = file.size();
         long bytesTransferred = 0;
@@ -232,9 +233,10 @@
         final SharedFileChannel sharedFile = SharedDefaultFileRegion.share(file);
         try
         {
+            int toWrite;
             while (bytesTransferred < length)
             {
-                int toWrite = (int) min(batchSize, length - bytesTransferred);
+                toWrite = (int) min(batchSize, length - bytesTransferred);
 
                 limiter.acquire(toWrite);
                 ChannelPromise promise = beginFlush(toWrite, lowWaterMark, highWaterMark);
@@ -267,4 +269,4 @@
             buffer = null;
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/Crc.java b/src/java/org/apache/cassandra/net/Crc.java
index 9cd6edd..8f63e51 100644
--- a/src/java/org/apache/cassandra/net/Crc.java
+++ b/src/java/org/apache/cassandra/net/Crc.java
@@ -45,7 +45,7 @@
         }
     }
 
-    static CRC32 crc32()
+    public static CRC32 crc32()
     {
         CRC32 crc = crc32.get();
         crc.reset();
diff --git a/src/java/org/apache/cassandra/net/FrameDecoder.java b/src/java/org/apache/cassandra/net/FrameDecoder.java
index 64e30ef..4cfbf6d 100644
--- a/src/java/org/apache/cassandra/net/FrameDecoder.java
+++ b/src/java/org/apache/cassandra/net/FrameDecoder.java
@@ -191,6 +191,14 @@
     abstract void addLastTo(ChannelPipeline pipeline);
 
     /**
+     * @return true if we are actively decoding and processing frames
+     */
+    public boolean isActive()
+    {
+        return isActive;
+    }
+    
+    /**
      * For use by InboundMessageHandler (or other upstream handlers) that want to start receiving frames.
      */
     public void activate(FrameProcessor processor)
@@ -208,7 +216,7 @@
      * For use by InboundMessageHandler (or other upstream handlers) that want to resume
      * receiving frames after previously indicating that processing should be paused.
      */
-    void reactivate() throws IOException
+    public void reactivate() throws IOException
     {
         if (isActive)
             throw new IllegalStateException("Tried to reactivate an already active FrameDecoder");
@@ -282,7 +290,8 @@
     {
         decode(frames, bytes);
 
-        if (isActive) isActive = deliver(processor);
+        if (isActive)
+            isActive = deliver(processor);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/net/FrameDecoderCrc.java b/src/java/org/apache/cassandra/net/FrameDecoderCrc.java
index 238a890..2a54f5f 100644
--- a/src/java/org/apache/cassandra/net/FrameDecoderCrc.java
+++ b/src/java/org/apache/cassandra/net/FrameDecoderCrc.java
@@ -25,7 +25,6 @@
 import io.netty.channel.ChannelPipeline;
 
 import static org.apache.cassandra.net.Crc.*;
-import static org.apache.cassandra.net.Crc.updateCrc32;
 
 /**
  * Framing format that protects integrity of data in movement with CRCs (of both header and payload).
diff --git a/src/java/org/apache/cassandra/net/FrameEncoderCrc.java b/src/java/org/apache/cassandra/net/FrameEncoderCrc.java
index 1d16868..3646248 100644
--- a/src/java/org/apache/cassandra/net/FrameEncoderCrc.java
+++ b/src/java/org/apache/cassandra/net/FrameEncoderCrc.java
@@ -23,7 +23,6 @@
 
 import io.netty.buffer.ByteBuf;
 import io.netty.channel.ChannelHandler;
-import org.apache.cassandra.utils.memory.BufferPool;
 
 import static org.apache.cassandra.net.Crc.*;
 
diff --git a/src/java/org/apache/cassandra/net/FrameEncoderLegacyLZ4.java b/src/java/org/apache/cassandra/net/FrameEncoderLegacyLZ4.java
index 000fab7..fd8b36b 100644
--- a/src/java/org/apache/cassandra/net/FrameEncoderLegacyLZ4.java
+++ b/src/java/org/apache/cassandra/net/FrameEncoderLegacyLZ4.java
@@ -27,7 +27,6 @@
 import net.jpountz.xxhash.XXHashFactory;
 import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.memory.BufferPool;
 
 import static java.lang.Integer.reverseBytes;
 import static java.lang.Math.min;
diff --git a/src/java/org/apache/cassandra/net/FrameEncoderUnprotected.java b/src/java/org/apache/cassandra/net/FrameEncoderUnprotected.java
index 3d10acf..8ea36c6 100644
--- a/src/java/org/apache/cassandra/net/FrameEncoderUnprotected.java
+++ b/src/java/org/apache/cassandra/net/FrameEncoderUnprotected.java
@@ -21,7 +21,6 @@
 
 import io.netty.buffer.ByteBuf;
 import io.netty.channel.ChannelHandler;
-import org.apache.cassandra.utils.memory.BufferPool;
 
 import static org.apache.cassandra.net.FrameEncoderCrc.HEADER_LENGTH;
 import static org.apache.cassandra.net.FrameEncoderCrc.writeHeader;
diff --git a/src/java/org/apache/cassandra/net/FutureCombiner.java b/src/java/org/apache/cassandra/net/FutureCombiner.java
deleted file mode 100644
index dd094bd..0000000
--- a/src/java/org/apache/cassandra/net/FutureCombiner.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.net;
-
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
-import io.netty.util.concurrent.GlobalEventExecutor;
-import io.netty.util.concurrent.Promise;
-
-/**
- * Netty's PromiseCombiner is not threadsafe, and we combine futures from multiple event executors.
- *
- * This class groups a number of Future into a single logical Future, by registering a listener to each that
- * decrements a shared counter; if any of them fail, the FutureCombiner is completed with the first cause,
- * but in all scenario only completes when all underlying future have completed (exceptionally or otherwise)
- *
- * This Future is always uncancellable.
- *
- * We extend FutureDelegate, and simply provide it an uncancellable Promise that will be completed by the listeners
- * registered to the input futures.
- */
-class FutureCombiner extends FutureDelegate<Void>
-{
-    private volatile boolean failed;
-
-    private volatile Throwable firstCause;
-    private static final AtomicReferenceFieldUpdater<FutureCombiner, Throwable> firstCauseUpdater =
-        AtomicReferenceFieldUpdater.newUpdater(FutureCombiner.class, Throwable.class, "firstCause");
-
-    private volatile int waitingOn;
-    private static final AtomicIntegerFieldUpdater<FutureCombiner> waitingOnUpdater =
-        AtomicIntegerFieldUpdater.newUpdater(FutureCombiner.class, "waitingOn");
-
-    FutureCombiner(Collection<? extends Future<?>> combine)
-    {
-        this(AsyncPromise.uncancellable(GlobalEventExecutor.INSTANCE), combine);
-    }
-
-    private FutureCombiner(Promise<Void> combined, Collection<? extends Future<?>> combine)
-    {
-        super(combined);
-
-        if (0 == (waitingOn = combine.size()))
-            combined.trySuccess(null);
-
-        GenericFutureListener<? extends Future<Object>> listener = result ->
-        {
-            if (!result.isSuccess())
-            {
-                firstCauseUpdater.compareAndSet(this, null, result.cause());
-                failed = true;
-            }
-
-            if (0 == waitingOnUpdater.decrementAndGet(this))
-            {
-                if (failed)
-                    combined.tryFailure(firstCause);
-                else
-                    combined.trySuccess(null);
-            }
-        };
-
-        for (Future<?> future : combine)
-            future.addListener(listener);
-    }
-}
diff --git a/src/java/org/apache/cassandra/net/FutureDelegate.java b/src/java/org/apache/cassandra/net/FutureDelegate.java
index f04a432..b46fa7c 100644
--- a/src/java/org/apache/cassandra/net/FutureDelegate.java
+++ b/src/java/org/apache/cassandra/net/FutureDelegate.java
@@ -21,7 +21,7 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Future; // checkstyle: permit this import
 import io.netty.util.concurrent.GenericFutureListener;
 
 /**
@@ -29,7 +29,7 @@
  *
  * See {@link FutureCombiner} and {@link FutureResult}
  */
-class FutureDelegate<V> implements Future<V>
+public class FutureDelegate<V> implements Future<V>
 {
     final Future<V> delegate;
 
@@ -53,26 +53,6 @@
         return delegate.cause();
     }
 
-    public Future<V> addListener(GenericFutureListener<? extends Future<? super V>> genericFutureListener)
-    {
-        return delegate.addListener(genericFutureListener);
-    }
-
-    public Future<V> addListeners(GenericFutureListener<? extends Future<? super V>>... genericFutureListeners)
-    {
-        return delegate.addListeners(genericFutureListeners);
-    }
-
-    public Future<V> removeListener(GenericFutureListener<? extends Future<? super V>> genericFutureListener)
-    {
-        return delegate.removeListener(genericFutureListener);
-    }
-
-    public Future<V> removeListeners(GenericFutureListener<? extends Future<? super V>>... genericFutureListeners)
-    {
-        return delegate.removeListeners(genericFutureListeners);
-    }
-
     public Future<V> sync() throws InterruptedException
     {
         return delegate.sync();
@@ -142,4 +122,28 @@
     {
         return delegate.get(timeout, unit);
     }
+
+    @Override
+    public io.netty.util.concurrent.Future<V> addListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> genericFutureListener)
+    {
+        return delegate.addListener(genericFutureListener);
+    }
+
+    @Override
+    public io.netty.util.concurrent.Future<V> addListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... genericFutureListeners)
+    {
+        return delegate.addListeners(genericFutureListeners);
+    }
+
+    @Override
+    public io.netty.util.concurrent.Future<V> removeListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> genericFutureListener)
+    {
+        return delegate.removeListener(genericFutureListener);
+    }
+
+    @Override
+    public io.netty.util.concurrent.Future<V> removeListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... genericFutureListeners)
+    {
+        return delegate.removeListeners(genericFutureListeners);
+    }
 }
diff --git a/src/java/org/apache/cassandra/net/FutureResult.java b/src/java/org/apache/cassandra/net/FutureResult.java
index 8d43dbe..e2648c5 100644
--- a/src/java/org/apache/cassandra/net/FutureResult.java
+++ b/src/java/org/apache/cassandra/net/FutureResult.java
@@ -17,14 +17,13 @@
  */
 package org.apache.cassandra.net;
 
-import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
 
 /**
  * An abstraction for yielding a result performed by an asynchronous task,
- * for whom we may wish to offer cancellation,
- * but no other access to the underlying task
+ * for whom we may wish to offer cancellation, but no other access to the underlying task
  */
-class FutureResult<V> extends FutureDelegate<V>
+public class FutureResult<V> extends FutureDelegate<V>
 {
     private final Future<?> tryCancel;
 
@@ -32,7 +31,7 @@
      * @param result the Future that will be completed by {@link #cancel}
      * @param cancel the Future that is performing the work, and to whom any cancellation attempts will be proxied
      */
-    FutureResult(Future<V> result, Future<?> cancel)
+    public FutureResult(Future<V> result, Future<?> cancel)
     {
         super(result);
         this.tryCancel = cancel;
diff --git a/src/java/org/apache/cassandra/net/GlobalBufferPoolAllocator.java b/src/java/org/apache/cassandra/net/GlobalBufferPoolAllocator.java
index 16fd5c6..f559a63 100644
--- a/src/java/org/apache/cassandra/net/GlobalBufferPoolAllocator.java
+++ b/src/java/org/apache/cassandra/net/GlobalBufferPoolAllocator.java
@@ -34,7 +34,7 @@
         super();
     }
 
-    static ByteBuf wrap(ByteBuffer buffer)
+    public static ByteBuf wrap(ByteBuffer buffer)
     {
         return new Wrapped(instance, buffer, buffer.capacity());
     }
diff --git a/src/java/org/apache/cassandra/net/HandshakeProtocol.java b/src/java/org/apache/cassandra/net/HandshakeProtocol.java
index bfdcc2c..a82c115 100644
--- a/src/java/org/apache/cassandra/net/HandshakeProtocol.java
+++ b/src/java/org/apache/cassandra/net/HandshakeProtocol.java
@@ -40,7 +40,6 @@
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
 import static org.apache.cassandra.net.Message.validateLegacyProtocolMagic;
 import static org.apache.cassandra.net.Crc.*;
-import static org.apache.cassandra.net.Crc.computeCrc32;
 import static org.apache.cassandra.net.OutboundConnectionSettings.*;
 
 /**
@@ -53,7 +52,8 @@
  * it will simply disconnect and reconnect with a more appropriate version. But if the version is acceptable, the connection
  * initiator sends the third message of the protocol, after which it considers the connection ready.
  */
-class HandshakeProtocol
+@VisibleForTesting
+public class HandshakeProtocol
 {
     static final long TIMEOUT_MILLIS = 3 * DatabaseDescriptor.getRpcTimeout(MILLISECONDS);
 
diff --git a/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java b/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
index 4c31adf..807d026 100644
--- a/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
+++ b/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
@@ -21,6 +21,7 @@
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 import java.util.List;
+import java.util.NoSuchElementException;
 import java.util.concurrent.Future;
 import java.util.function.Consumer;
 
@@ -34,6 +35,7 @@
 import io.netty.channel.ChannelFuture;
 import io.netty.channel.ChannelFutureListener;
 import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
 import io.netty.channel.ChannelInitializer;
 import io.netty.channel.ChannelOption;
 import io.netty.channel.ChannelPipeline;
@@ -44,20 +46,22 @@
 import io.netty.handler.logging.LoggingHandler;
 import io.netty.handler.ssl.SslContext;
 import io.netty.handler.ssl.SslHandler;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.OutboundConnectionSettings.Framing;
+import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.security.SSLFactory;
-import org.apache.cassandra.streaming.async.StreamingInboundHandler;
+import org.apache.cassandra.streaming.StreamDeserializingTask;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.async.NettyStreamingChannel;
 import org.apache.cassandra.utils.memory.BufferPools;
 
 import static java.lang.Math.*;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.net.MessagingService.*;
-import static org.apache.cassandra.net.MessagingService.VERSION_40;
-import static org.apache.cassandra.net.MessagingService.current_version;
-import static org.apache.cassandra.net.MessagingService.minimum_version;
 import static org.apache.cassandra.net.SocketFactory.WIRETRACE;
 import static org.apache.cassandra.net.SocketFactory.newSslHandler;
 
@@ -67,6 +71,8 @@
 
     private static class Initializer extends ChannelInitializer<SocketChannel>
     {
+        private static final String PIPELINE_INTERNODE_ERROR_EXCLUSIONS = "Internode Error Exclusions";
+
         private final InboundConnectionSettings settings;
         private final ChannelGroup channelGroup;
         private final Consumer<ChannelPipeline> pipelineInjector;
@@ -82,6 +88,9 @@
         @Override
         public void initChannel(SocketChannel channel) throws Exception
         {
+            // if any of the handlers added fail they will send the error to the "head", so this needs to be first
+            channel.pipeline().addFirst(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, new InternodeErrorExclusionsHandler());
+
             channelGroup.add(channel);
 
             channel.config().setOption(ChannelOption.ALLOCATOR, GlobalBufferPoolAllocator.instance);
@@ -99,14 +108,14 @@
             {
                 case UNENCRYPTED:
                     // Handler checks for SSL connection attempts and cleanly rejects them if encryption is disabled
-                    pipeline.addFirst("rejectssl", new RejectSslHandler());
+                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, "rejectssl", new RejectSslHandler());
                     break;
                 case OPTIONAL:
-                    pipeline.addFirst("ssl", new OptionalSslHandler(settings.encryption));
+                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, "ssl", new OptionalSslHandler(settings.encryption));
                     break;
                 case ENCRYPTED:
                     SslHandler sslHandler = getSslHandler("creating", channel, settings.encryption);
-                    pipeline.addFirst("ssl", sslHandler);
+                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, "ssl", sslHandler);
                     break;
             }
 
@@ -114,7 +123,20 @@
                 pipeline.addLast("logger", new LoggingHandler(LogLevel.INFO));
 
             channel.pipeline().addLast("handshake", new Handler(settings));
+        }
+    }
 
+    private static class InternodeErrorExclusionsHandler extends ChannelInboundHandlerAdapter
+    {
+        @Override
+        public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception
+        {
+            if (DatabaseDescriptor.getInternodeErrorReportingExclusions().contains(ctx.channel().remoteAddress()))
+            {
+                logger.debug("Excluding internode exception for {}; address contained in internode_error_reporting_exclusions", ctx.channel().remoteAddress(), cause);
+                return;
+            }
+            super.exceptionCaught(ctx, cause);
         }
     }
 
@@ -138,7 +160,7 @@
             bootstrap.childOption(ChannelOption.SO_RCVBUF, socketReceiveBufferSizeInBytes);
 
         InetAddressAndPort bind = initializer.settings.bindAddress;
-        ChannelFuture channelFuture = bootstrap.bind(new InetSocketAddress(bind.address, bind.port));
+        ChannelFuture channelFuture = bootstrap.bind(new InetSocketAddress(bind.getAddress(), bind.getPort()));
 
         if (!channelFuture.awaitUninterruptibly().isSuccess())
         {
@@ -156,9 +178,7 @@
                 throw new ConfigurationException(bind + " is in use by another process.  Change listen_address:storage_port " +
                                                  "in cassandra.yaml to values that do not conflict with other services");
             }
-            // looking at the jdk source, solaris/windows bind failue messages both use the phrase "cannot assign requested address".
-            // windows message uses "Cannot" (with a capital 'C'), and solaris (a/k/a *nux) doe not. hence we search for "annot" <sigh>
-            else if (causeString.contains("annot assign requested address"))
+            else if (causeString.contains("cannot assign requested address"))
             {
                 throw new ConfigurationException("Unable to bind to address " + bind
                                                  + ". Set listen_address in cassandra.yaml to an interface you can bind to, e.g., your private IP address on EC2");
@@ -213,20 +233,30 @@
                 failHandshake(ctx);
             }, HandshakeProtocol.TIMEOUT_MILLIS, MILLISECONDS);
 
-            authenticate(ctx.channel().remoteAddress());
+            if (!authenticate(ctx.channel().remoteAddress()))
+            {
+                failHandshake(ctx);
+            }
         }
 
-        private void authenticate(SocketAddress socketAddress) throws IOException
+        private boolean authenticate(SocketAddress socketAddress) throws IOException
         {
             if (socketAddress.getClass().getSimpleName().equals("EmbeddedSocketAddress"))
-                return;
+                return true;
 
             if (!(socketAddress instanceof InetSocketAddress))
                 throw new IOException(String.format("Unexpected SocketAddress type: %s, %s", socketAddress.getClass(), socketAddress));
 
             InetSocketAddress addr = (InetSocketAddress)socketAddress;
             if (!settings.authenticate(addr.getAddress(), addr.getPort()))
-                throw new IOException("Authentication failure for inbound connection from peer " + addr);
+            {
+                // Log at info level as anything that can reach the inbound port could hit this
+                // and trigger a log of noise.  Failed outbound connections to known cluster endpoints
+                // still fail with an ERROR message and exception to alert operators that aren't watching logs closely.
+                logger.info("Authenticate rejected inbound internode connection from {}", addr);
+                return false;
+            }
+            return true;
         }
 
         @Override
@@ -250,6 +280,7 @@
                 logger.warn("peer {} attempted to establish an unencrypted connection (broadcast address {})",
                             ctx.channel().remoteAddress(), initiate.from);
                 failHandshake(ctx);
+                return;
             }
 
             if (initiate.acceptVersions != null)
@@ -275,11 +306,13 @@
                 {
                     logger.info("peer {} only supports messaging versions higher ({}) than this node supports ({})", ctx.channel().remoteAddress(), initiate.acceptVersions.min, current_version);
                     failHandshake(ctx);
+                    return;
                 }
                 else if (initiate.acceptVersions.max < accept.min)
                 {
                     logger.info("peer {} only supports messaging versions lower ({}) than this node supports ({})", ctx.channel().remoteAddress(), initiate.acceptVersions.max, minimum_version);
                     failHandshake(ctx);
+                    return;
                 }
                 else
                 {
@@ -302,6 +335,7 @@
                     {
                         logger.warn("Received stream using protocol version {} (my version {}). Terminating connection", version, settings.acceptStreaming.max);
                         failHandshake(ctx);
+                        return;
                     }
                     setupStreamingPipeline(initiate.from, ctx);
                 }
@@ -357,14 +391,22 @@
 
         private void exceptionCaught(Channel channel, Throwable cause)
         {
-            logger.error("Failed to properly handshake with peer {}. Closing the channel.", channel.remoteAddress(), cause);
+            final SocketAddress remoteAddress = channel.remoteAddress();
+            boolean reportingExclusion = DatabaseDescriptor.getInternodeErrorReportingExclusions().contains(remoteAddress);
+
+            if (reportingExclusion)
+                logger.debug("Excluding internode exception for {}; address contained in internode_error_reporting_exclusions", remoteAddress, cause);
+            else
+                logger.error("Failed to properly handshake with peer {}. Closing the channel.", remoteAddress, cause);
+
             try
             {
                 failHandshake(channel);
             }
             catch (Throwable t)
             {
-                logger.error("Unexpected exception in {}.exceptionCaught", this.getClass().getSimpleName(), t);
+                if (!reportingExclusion)
+                    logger.error("Unexpected exception in {}.exceptionCaught", this.getClass().getSimpleName(), t);
             }
         }
 
@@ -375,9 +417,24 @@
 
         private void failHandshake(Channel channel)
         {
-            channel.close();
+            // Cancel the handshake timeout as early as possible as it calls this method
             if (handshakeTimeout != null)
                 handshakeTimeout.cancel(true);
+
+            // prevent further decoding of buffered data by removing this handler before closing
+            // otherwise the pending bytes will be decoded again on close, throwing further exceptions.
+            try
+            {
+                channel.pipeline().remove(this);
+            }
+            catch (NoSuchElementException ex)
+            {
+                // possible race with the handshake timeout firing and removing this handler already
+            }
+            finally
+            {
+                channel.close();
+            }
         }
 
         private void setupStreamingPipeline(InetAddressAndPort from, ChannelHandlerContext ctx)
@@ -395,7 +452,15 @@
             }
 
             BufferPools.forNetworking().setRecycleWhenFreeForCurrentThread(false);
-            pipeline.replace(this, "streamInbound", new StreamingInboundHandler(from, current_version, null));
+
+            // we can't infer the type of streaming connection at this point,
+            // so we use CONTROL unconditionally; it's ugly but does what we want
+            // (establishes an AsyncStreamingInputPlus)
+            NettyStreamingChannel streamingChannel =
+                new NettyStreamingChannel(current_version, channel, StreamingChannel.Kind.CONTROL);
+            pipeline.replace(this, "streamInbound", streamingChannel);
+            executorFactory().startThread(String.format("Stream-Deserializer-%s-%s", from, channel.id()),
+                                          new StreamDeserializingTask(null, streamingChannel, current_version));
 
             logger.info("{} streaming connection established, version = {}, framing = {}, encryption = {}",
                         SocketFactory.channelId(from,
@@ -469,14 +534,22 @@
 
             pipeline.addLast("deserialize", handler);
 
-            pipeline.remove(this);
+            try
+            {
+                pipeline.remove(this);
+            }
+            catch (NoSuchElementException ex)
+            {
+                // possible race with the handshake timeout firing and removing this handler already
+            }
         }
     }
 
     private static SslHandler getSslHandler(String description, Channel channel, EncryptionOptions.ServerEncryptionOptions encryptionOptions) throws IOException
     {
-        final boolean buildTrustStore = true;
-        SslContext sslContext = SSLFactory.getOrCreateSslContext(encryptionOptions, buildTrustStore, SSLFactory.SocketType.SERVER);
+        final boolean verifyPeerCertificate = true;
+        SslContext sslContext = SSLFactory.getOrCreateSslContext(encryptionOptions, verifyPeerCertificate,
+                                                                 ISslContextFactory.SocketType.SERVER);
         InetSocketAddress peer = encryptionOptions.require_endpoint_verification ? (InetSocketAddress) channel.remoteAddress() : null;
         SslHandler sslHandler = newSslHandler(channel, sslContext, peer);
         logger.trace("{} inbound netty SslContext: context={}, engine={}", description, sslContext.getClass().getName(), sslHandler.engine().getClass().getName());
diff --git a/src/java/org/apache/cassandra/net/InboundConnectionSettings.java b/src/java/org/apache/cassandra/net/InboundConnectionSettings.java
index 00def4f..2eab9bc 100644
--- a/src/java/org/apache/cassandra/net/InboundConnectionSettings.java
+++ b/src/java/org/apache/cassandra/net/InboundConnectionSettings.java
@@ -73,7 +73,7 @@
 
     public boolean authenticate(InetAddressAndPort endpoint)
     {
-        return authenticator.authenticate(endpoint.address, endpoint.port);
+        return authenticator.authenticate(endpoint.getAddress(), endpoint.getPort());
     }
 
     public boolean authenticate(InetAddress address, int port)
@@ -84,7 +84,7 @@
     public String toString()
     {
         return format("address: (%s), nic: %s, encryption: %s",
-                      bindAddress, FBUtilities.getNetworkInterface(bindAddress.address), SocketFactory.encryptionOptionsSummary(encryption));
+                      bindAddress, FBUtilities.getNetworkInterface(bindAddress.getAddress()), SocketFactory.encryptionOptionsSummary(encryption));
     }
 
     public InboundConnectionSettings withAuthenticator(IInternodeAuthenticator authenticator)
@@ -168,9 +168,9 @@
     public InboundConnectionSettings withDefaults()
     {
         // this is for the socket that can be plain, only ssl, or optional plain/ssl
-        if (bindAddress.port != DatabaseDescriptor.getStoragePort() && bindAddress.port != DatabaseDescriptor.getSSLStoragePort())
+        if (bindAddress.getPort() != DatabaseDescriptor.getStoragePort() && bindAddress.getPort() != DatabaseDescriptor.getSSLStoragePort())
             throw new ConfigurationException(format("Local endpoint port %d doesn't match YAML configured port %d or legacy SSL port %d",
-                                                    bindAddress.port, DatabaseDescriptor.getStoragePort(), DatabaseDescriptor.getSSLStoragePort()));
+                                                    bindAddress.getPort(), DatabaseDescriptor.getStoragePort(), DatabaseDescriptor.getSSLStoragePort()));
 
         IInternodeAuthenticator authenticator = this.authenticator;
         ServerEncryptionOptions encryption = this.encryption;
diff --git a/src/java/org/apache/cassandra/net/InboundMessageHandler.java b/src/java/org/apache/cassandra/net/InboundMessageHandler.java
index 64d0a8c..e12fcec 100644
--- a/src/java/org/apache/cassandra/net/InboundMessageHandler.java
+++ b/src/java/org/apache/cassandra/net/InboundMessageHandler.java
@@ -34,8 +34,6 @@
 import org.apache.cassandra.io.util.DataInputBuffer;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.Message.Header;
-import org.apache.cassandra.net.FrameDecoder.Frame;
-import org.apache.cassandra.net.FrameDecoder.FrameProcessor;
 import org.apache.cassandra.net.FrameDecoder.IntactFrame;
 import org.apache.cassandra.net.FrameDecoder.CorruptFrame;
 import org.apache.cassandra.net.ResourceLimits.Limit;
@@ -45,7 +43,7 @@
 import org.apache.cassandra.utils.NoSpamLogger;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 /**
  * Implementation of {@link AbstractMessageHandler} for processing internode messages from peers.
@@ -398,7 +396,7 @@
         if (state != null) state.trace("{} message received from {}", header.verb, header.from);
 
         callbacks.onDispatched(task.size(), header);
-        header.verb.stage.execute(task, ExecutorLocals.create(state));
+        header.verb.stage.execute(ExecutorLocals.create(state), task);
     }
 
     private abstract class ProcessMessage implements Runnable
diff --git a/src/java/org/apache/cassandra/net/InboundMessageHandlers.java b/src/java/org/apache/cassandra/net/InboundMessageHandlers.java
index a706557..c7b9463 100644
--- a/src/java/org/apache/cassandra/net/InboundMessageHandlers.java
+++ b/src/java/org/apache/cassandra/net/InboundMessageHandlers.java
@@ -32,7 +32,7 @@
 import org.apache.cassandra.net.Message.Header;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 /**
  * An aggregation of {@link InboundMessageHandler}s for all connections from a peer.
diff --git a/src/java/org/apache/cassandra/net/InboundSockets.java b/src/java/org/apache/cassandra/net/InboundSockets.java
index fc57224..58cd88e 100644
--- a/src/java/org/apache/cassandra/net/InboundSockets.java
+++ b/src/java/org/apache/cassandra/net/InboundSockets.java
@@ -31,13 +31,15 @@
 import io.netty.channel.group.ChannelGroup;
 import io.netty.channel.group.DefaultChannelGroup;
 import io.netty.util.concurrent.DefaultEventExecutor;
-import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
 import io.netty.util.concurrent.GlobalEventExecutor;
 import io.netty.util.concurrent.PromiseNotifier;
 import io.netty.util.concurrent.SucceededFuture;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
 
 class InboundSockets
 {
@@ -97,15 +99,28 @@
                     throw new IllegalStateException();
                 binding = InboundConnectionInitiator.bind(settings, connections, pipelineInjector);
             }
-
-            return binding.addListener(ignore -> {
+            // isOpen is defined as "listen.isOpen", but this is set AFTER the binding future is set
+            // to make sure the future returned does not complete until listen is set, need a new
+            // future to replicate "Future.map" behavior.
+            AsyncChannelPromise promise = new AsyncChannelPromise(binding.channel());
+            binding.addListener(f -> {
+                if (!f.isSuccess())
+                {
+                    synchronized (this)
+                    {
+                        binding = null;
+                    }
+                    promise.setFailure(f.cause());
+                    return;
+                }
                 synchronized (this)
                 {
-                    if (binding.isSuccess())
-                        listen = binding.channel();
+                    listen = binding.channel();
                     binding = null;
                 }
+                promise.setSuccess(null);
             });
+            return promise;
         }
 
         /**
@@ -125,7 +140,7 @@
                 if (listen != null)
                     closing.add(listen.close());
                 closing.add(connections.close());
-                new FutureCombiner(closing)
+                FutureCombiner.nettySuccessListener(closing)
                        .addListener(future -> {
                            executor.shutdownGracefully();
                            shutdownExecutors.accept(executor);
@@ -202,7 +217,7 @@
         InboundConnectionSettings       settings = template.withDefaults();
         InboundConnectionSettings legacySettings = template.withLegacySslStoragePortDefaults();
 
-        if (settings.encryption.enable_legacy_ssl_storage_port)
+        if (settings.encryption.legacy_ssl_storage_port_enabled)
         {
             out.add(new InboundSocket(legacySettings));
 
@@ -224,7 +239,7 @@
         for (InboundSocket socket : sockets)
             opening.add(socket.open(pipelineInjector));
 
-        return new FutureCombiner(opening);
+        return FutureCombiner.nettySuccessListener(opening);
     }
 
     public Future<Void> open()
@@ -232,7 +247,7 @@
         List<Future<Void>> opening = new ArrayList<>();
         for (InboundSocket socket : sockets)
             opening.add(socket.open());
-        return new FutureCombiner(opening);
+        return FutureCombiner.nettySuccessListener(opening);
     }
 
     public boolean isListening()
@@ -248,7 +263,7 @@
         List<Future<Void>> closing = new ArrayList<>();
         for (InboundSocket address : sockets)
             closing.add(address.close(shutdownExecutors));
-        return new FutureCombiner(closing);
+        return FutureCombiner.nettySuccessListener(closing);
     }
     public Future<Void> close()
     {
diff --git a/src/java/org/apache/cassandra/net/Message.java b/src/java/org/apache/cassandra/net/Message.java
index bc176bd..09e4ba3 100644
--- a/src/java/org/apache/cassandra/net/Message.java
+++ b/src/java/org/apache/cassandra/net/Message.java
@@ -23,7 +23,6 @@
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -45,9 +44,9 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.tracing.Tracing.TraceType;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MonotonicClockTranslation;
 import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
@@ -59,7 +58,8 @@
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
 import static org.apache.cassandra.net.MessagingService.instance;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 import static org.apache.cassandra.utils.vint.VIntCoding.computeUnsignedVIntSize;
 import static org.apache.cassandra.utils.vint.VIntCoding.getUnsignedVInt;
 import static org.apache.cassandra.utils.vint.VIntCoding.skipUnsignedVInt;
@@ -78,7 +78,7 @@
     public final Header header;
     public final T payload;
 
-    private Message(Header header, T payload)
+    Message(Header header, T payload)
     {
         this.header = header;
         this.payload = payload;
@@ -93,7 +93,7 @@
     /** Whether the message has crossed the node boundary, that is whether it originated from another node. */
     public boolean isCrossNode()
     {
-        return !from().equals(FBUtilities.getBroadcastAddressAndPort());
+        return !from().equals(getBroadcastAddressAndPort());
     }
 
     /**
@@ -146,6 +146,11 @@
         return header.callBackOnFailure();
     }
 
+    public boolean trackWarnings()
+    {
+        return header.trackWarnings();
+    }
+
     /** See CASSANDRA-14145 */
     public boolean trackRepairedData()
     {
@@ -167,7 +172,7 @@
     }
 
     @Nullable
-    public UUID traceSession()
+    public TimeUUID traceSession()
     {
         return header.traceSession();
     }
@@ -198,6 +203,11 @@
         return outWithParam(nextId(), verb, payload, null, null);
     }
 
+    public static <T> Message<T> synthetic(InetAddressAndPort from, Verb verb, T payload)
+    {
+        return new Message<>(new Header(-1, verb, from, -1, -1, 0, NO_PARAMS), payload);
+    }
+
     public static <T> Message<T> out(Verb verb, T payload, long expiresAtNanos)
     {
         return outWithParam(nextId(), verb, expiresAtNanos, payload, 0, null, null);
@@ -215,6 +225,7 @@
         return outWithParam(nextId(), verb, 0, payload, flag2.addTo(flag1.addTo(0)), null, null);
     }
 
+    @VisibleForTesting
     static <T> Message<T> outWithParam(long id, Verb verb, T payload, ParamType paramType, Object paramValue)
     {
         return outWithParam(id, verb, 0, payload, paramType, paramValue);
@@ -227,10 +238,14 @@
 
     private static <T> Message<T> outWithParam(long id, Verb verb, long expiresAtNanos, T payload, int flags, ParamType paramType, Object paramValue)
     {
+        return withParam(getBroadcastAddressAndPort(), id, verb, expiresAtNanos, payload, flags, paramType, paramValue);
+    }
+
+    private static <T> Message<T> withParam(InetAddressAndPort from, long id, Verb verb, long expiresAtNanos, T payload, int flags, ParamType paramType, Object paramValue)
+    {
         if (payload == null)
             throw new IllegalArgumentException();
 
-        InetAddressAndPort from = FBUtilities.getBroadcastAddressAndPort();
         long createdAtNanos = approxTime.now();
         if (expiresAtNanos == 0)
             expiresAtNanos = verb.expiresAtNanos(createdAtNanos);
@@ -244,6 +259,18 @@
         return outWithParam(0, verb, payload, null, null);
     }
 
+    /**
+     * Used by the {@code MultiRangeReadCommand} to split multi-range responses from a replica
+     * into single-range responses.
+     */
+    public static <T> Message<T> remoteResponse(InetAddressAndPort from, Verb verb, T payload)
+    {
+        assert verb.isResponse();
+        long createdAtNanos = approxTime.now();
+        long expiresAtNanos = verb.expiresAtNanos(createdAtNanos);
+        return new Message<>(new Header(0, verb, from, createdAtNanos, expiresAtNanos, 0, NO_PARAMS), payload);
+    }
+
     /** Builds a response Message with provided payload, and all the right fields inferred from request Message */
     public <T> Message<T> responseWith(T payload)
     {
@@ -267,6 +294,11 @@
         return outWithParam(id, Verb.FAILURE_RSP, expiresAtNanos, reason, null, null);
     }
 
+    public <V> Message<V> withPayload(V newPayload)
+    {
+        return new Message<>(header, newPayload);
+    }
+
     Message<T> withCallBackOnFailure()
     {
         return new Message<>(header.withFlag(MessageFlag.CALL_BACK_ON_FAILURE), payload);
@@ -277,6 +309,23 @@
         return new Message<>(header.withParam(ParamType.FORWARD_TO, peers), payload);
     }
 
+    public Message<T> withFlag(MessageFlag flag)
+    {
+        return new Message<>(header.withFlag(flag), payload);
+    }
+
+    public Message<T> withParam(ParamType type, Object value)
+    {
+        return new Message<>(header.withParam(type, value), payload);
+    }
+
+    public Message<T> withParams(Map<ParamType, Object> values)
+    {
+        if (values == null || values.isEmpty())
+            return this;
+        return new Message<>(header.withParams(values), payload);
+    }
+
     private static final EnumMap<ParamType, Object> NO_PARAMS = new EnumMap<>(ParamType.class);
 
     private static Map<ParamType, Object> buildParams(ParamType type, Object value)
@@ -305,6 +354,16 @@
         return params;
     }
 
+    private static Map<ParamType, Object> addParams(Map<ParamType, Object> params, Map<ParamType, Object> values)
+    {
+        if (values == null || values.isEmpty())
+            return params;
+
+        params = new EnumMap<>(params);
+        params.putAll(values);
+        return params;
+    }
+
     /*
      * id generation
      */
@@ -393,6 +452,11 @@
             return new Header(id, verb, from, createdAtNanos, expiresAtNanos, flags, addParam(params, type, value));
         }
 
+        Header withParams(Map<ParamType, Object> values)
+        {
+            return new Header(id, verb, from, createdAtNanos, expiresAtNanos, flags, addParams(params, values));
+        }
+
         boolean callBackOnFailure()
         {
             return MessageFlag.CALL_BACK_ON_FAILURE.isIn(flags);
@@ -403,6 +467,11 @@
             return MessageFlag.TRACK_REPAIRED_DATA.isIn(flags);
         }
 
+        boolean trackWarnings()
+        {
+            return MessageFlag.TRACK_WARNINGS.isIn(flags);
+        }
+
         @Nullable
         ForwardingInfo forwardTo()
         {
@@ -412,13 +481,15 @@
         @Nullable
         InetAddressAndPort respondTo()
         {
-            return (InetAddressAndPort) params.get(ParamType.RESPOND_TO);
+            InetAddressAndPort respondTo = (InetAddressAndPort) params.get(ParamType.RESPOND_TO);
+            if (respondTo == null) respondTo = from;
+            return respondTo;
         }
 
         @Nullable
-        public UUID traceSession()
+        public TimeUUID traceSession()
         {
-            return (UUID) params.get(ParamType.TRACE_SESSION);
+            return (TimeUUID) params.get(ParamType.TRACE_SESSION);
         }
 
         @Nullable
@@ -427,6 +498,11 @@
             return (TraceType) params.getOrDefault(ParamType.TRACE_TYPE, TraceType.QUERY);
         }
 
+        public Map<ParamType, Object> params()
+        {
+            return Collections.unmodifiableMap(params);
+        }
+
         @Nullable
         public Map<String,byte[]> customParams()
         {
@@ -521,7 +597,7 @@
             if (expiresAtNanos == 0 && verb != null && createdAtNanos != 0)
                 expiresAtNanos = verb.expiresAtNanos(createdAtNanos);
             if (!this.verb.isResponse() && from == null) // default to sending from self if we're a request verb
-                from = FBUtilities.getBroadcastAddressAndPort();
+                from = getBroadcastAddressAndPort();
             return this;
         }
 
@@ -782,7 +858,7 @@
         {
             serializeHeaderPost40(message.header, out, version);
             out.writeUnsignedVInt(message.payloadSize(version));
-            message.getPayloadSerializer().serialize(message.payload, out, version);
+            message.verb().serializer().serialize(message.payload, out, version);
         }
 
         private <T> Message<T> deserializePost40(DataInputPlus in, InetAddressAndPort peer, int version) throws IOException
diff --git a/src/java/org/apache/cassandra/net/MessageFlag.java b/src/java/org/apache/cassandra/net/MessageFlag.java
index c74784d..441b06b 100644
--- a/src/java/org/apache/cassandra/net/MessageFlag.java
+++ b/src/java/org/apache/cassandra/net/MessageFlag.java
@@ -27,7 +27,9 @@
     /** a failure response should be sent back in case of failure */
     CALL_BACK_ON_FAILURE (0),
     /** track repaired data - see CASSANDRA-14145 */
-    TRACK_REPAIRED_DATA  (1);
+    TRACK_REPAIRED_DATA  (1),
+    /** allow creating warnings or aborting queries based off query - see CASSANDRA-16850 */
+    TRACK_WARNINGS(2);
 
     private final int id;
 
diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java
index cd124ad..bc39056 100644
--- a/src/java/org/apache/cassandra/net/MessagingService.java
+++ b/src/java/org/apache/cassandra/net/MessagingService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.net;
 
+import java.io.IOException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.List;
@@ -26,10 +27,12 @@
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -37,6 +40,7 @@
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.metrics.MessagingMetrics;
 import org.apache.cassandra.service.AbstractWriteResponseHandler;
 import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.FBUtilities;
@@ -45,6 +49,7 @@
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.concurrent.Stage.MUTATION;
 import static org.apache.cassandra.config.CassandraRelevantProperties.NON_GRACEFUL_SHUTDOWN;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.Throwables.maybeFail;
 
 /**
@@ -195,7 +200,7 @@
  * implemented in {@link org.apache.cassandra.db.virtual.InternodeInboundTable} and
  * {@link org.apache.cassandra.db.virtual.InternodeOutboundTable} respectively.
  */
-public final class MessagingService extends MessagingServiceMBeanImpl
+public class MessagingService extends MessagingServiceMBeanImpl
 {
     private static final Logger logger = LoggerFactory.getLogger(MessagingService.class);
 
@@ -203,6 +208,7 @@
     public static final int VERSION_30 = 10;
     public static final int VERSION_3014 = 11;
     public static final int VERSION_40 = 12;
+    public static final int VERSION_41 = 13;
     public static final int minimum_version = VERSION_30;
     public static final int current_version = VERSION_40;
     static AcceptVersions accept_messaging = new AcceptVersions(minimum_version, current_version);
@@ -259,10 +265,65 @@
     @VisibleForTesting
     MessagingService(boolean testOnly)
     {
-        super(testOnly);
+        this(testOnly, new EndpointMessagingVersions(), new MessagingMetrics());
+    }
+
+    @VisibleForTesting
+    MessagingService(boolean testOnly, EndpointMessagingVersions versions, MessagingMetrics metrics)
+    {
+        super(testOnly, versions, metrics);
         OutboundConnections.scheduleUnusedConnectionMonitoring(this, ScheduledExecutors.scheduledTasks, 1L, TimeUnit.HOURS);
     }
 
+    public <T> org.apache.cassandra.utils.concurrent.Future<Message<T>> sendWithResult(Message message, InetAddressAndPort to)
+    {
+        AsyncPromise<Message<T>> promise = new AsyncPromise<>();
+        MessagingService.instance().sendWithCallback(message, to, new RequestCallback<T>()
+        {
+            @Override
+            public void onResponse(Message<T> msg)
+            {
+                promise.trySuccess(msg);
+            }
+
+            @Override
+            public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason)
+            {
+                promise.tryFailure(new FailureResponseException(from, failureReason));
+            }
+
+            @Override
+            public boolean invokeOnFailure()
+            {
+                return true;
+            }
+        });
+        return promise;
+    }
+
+    public static class FailureResponseException extends IOException
+    {
+        private final InetAddressAndPort from;
+        private final RequestFailureReason failureReason;
+
+        public FailureResponseException(InetAddressAndPort from, RequestFailureReason failureReason)
+        {
+            super(String.format("Failure from %s: %s", from, failureReason.name()));
+            this.from = from;
+            this.failureReason = failureReason;
+        }
+
+        public InetAddressAndPort from()
+        {
+            return from;
+        }
+
+        public RequestFailureReason failureReason()
+        {
+            return failureReason;
+        }
+    }
+
     /**
      * Send a non-mutation message to a given endpoint. This method specifies a callback
      * which is invoked with the actual response.
@@ -296,10 +357,10 @@
      * @param handler callback interface which is used to pass the responses or
      *                suggest that a timeout occurred to the invoker of the send().
      */
-    public void sendWriteWithCallback(Message message, Replica to, AbstractWriteResponseHandler<?> handler, boolean allowHints)
+    public void sendWriteWithCallback(Message message, Replica to, AbstractWriteResponseHandler<?> handler)
     {
         assert message.callBackOnFailure();
-        callbacks.addWithExpiration(handler, message, to, handler.consistencyLevel(), allowHints);
+        callbacks.addWithExpiration(handler, message, to);
         send(message, to.endpoint(), null);
     }
 
@@ -315,6 +376,23 @@
         send(message, to, null);
     }
 
+    /**
+     * Send a message to a given endpoint. This method adheres to the fire and forget
+     * style messaging.
+     *
+     * @param message messages to be sent.
+     * @param response
+     */
+    public <V> void respond(V response, Message<?> message)
+    {
+        send(message.responseWith(response), message.respondTo());
+    }
+
+    public <V> void respondWithFailure(RequestFailureReason reason, Message<?> message)
+    {
+        send(Message.failureResponse(message.id(), message.expiresAtNanos(), reason), message.respondTo());
+    }
+
     public void send(Message message, InetAddressAndPort to, ConnectionType specifyConnection)
     {
         if (logger.isTraceEnabled())
@@ -462,8 +540,8 @@
             for (OutboundConnections pool : channelManagers.values())
                 closing.add(pool.close(true));
 
-            long deadline = System.nanoTime() + units.toNanos(timeout);
-            maybeFail(() -> new FutureCombiner(closing).get(timeout, units),
+            long deadline = nanoTime() + units.toNanos(timeout);
+            maybeFail(() -> FutureCombiner.nettySuccessListener(closing).get(timeout, units),
                       () -> {
                           List<ExecutorService> inboundExecutors = new ArrayList<>();
                           inboundSockets.close(synchronizedList(inboundExecutors)::add).get();
@@ -486,8 +564,8 @@
             for (OutboundConnections pool : channelManagers.values())
                 closing.add(pool.close(false));
 
-            long deadline = System.nanoTime() + units.toNanos(timeout);
-            maybeFail(() -> new FutureCombiner(closing).get(timeout, units),
+            long deadline = nanoTime() + units.toNanos(timeout);
+            maybeFail(() -> FutureCombiner.nettySuccessListener(closing).get(timeout, units),
                       () -> {
                           if (shutdownExecutors)
                               shutdownExecutors(deadline);
diff --git a/src/java/org/apache/cassandra/net/MessagingServiceMBeanImpl.java b/src/java/org/apache/cassandra/net/MessagingServiceMBeanImpl.java
index bea2b8c..b77fa83 100644
--- a/src/java/org/apache/cassandra/net/MessagingServiceMBeanImpl.java
+++ b/src/java/org/apache/cassandra/net/MessagingServiceMBeanImpl.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.net.UnknownHostException;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -42,11 +41,13 @@
     public final ConcurrentMap<InetAddressAndPort, OutboundConnections> channelManagers = new ConcurrentHashMap<>();
     public final ConcurrentMap<InetAddressAndPort, InboundMessageHandlers> messageHandlers = new ConcurrentHashMap<>();
 
-    public final EndpointMessagingVersions versions = new EndpointMessagingVersions();
-    public final MessagingMetrics metrics = new MessagingMetrics();
+    public final EndpointMessagingVersions versions;
+    public final MessagingMetrics metrics;
 
-    MessagingServiceMBeanImpl(boolean testOnly)
+    public MessagingServiceMBeanImpl(boolean testOnly, EndpointMessagingVersions versions, MessagingMetrics metrics)
     {
+        this.versions = versions;
+        this.metrics = metrics;
         if (!testOnly)
         {
             MBeanWrapper.instance.registerMBean(this, MBEAN_NAME);
diff --git a/src/java/org/apache/cassandra/net/OutboundConnection.java b/src/java/org/apache/cassandra/net/OutboundConnection.java
index 82eb6ce..821521b 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnection.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnection.java
@@ -23,7 +23,6 @@
 import java.net.InetSocketAddress;
 import java.nio.channels.ClosedChannelException;
 import java.util.Objects;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -34,7 +33,9 @@
 import javax.annotation.Nullable;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.Uninterruptibles;
+
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,8 +45,8 @@
 import io.netty.channel.ChannelInboundHandlerAdapter;
 import io.netty.channel.EventLoop;
 import io.netty.channel.unix.Errors;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.Promise;
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
+import io.netty.util.concurrent.Promise; //checkstyle: permit this import
 import io.netty.util.concurrent.PromiseNotifier;
 import io.netty.util.concurrent.SucceededFuture;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -55,6 +56,7 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static java.lang.Math.max;
 import static java.lang.Math.min;
@@ -66,8 +68,9 @@
 import static org.apache.cassandra.net.ResourceLimits.Outcome.*;
 import static org.apache.cassandra.net.SocketFactory.*;
 import static org.apache.cassandra.utils.FBUtilities.prettyPrintMemory;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 import static org.apache.cassandra.utils.Throwables.isCausedBy;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
 
 /**
  * Represents a connection type to a peer, and handles the state transistions on the connection and the netty {@link Channel}.
@@ -1031,7 +1034,7 @@
                 }
                 catch (InterruptedException e)
                 {
-                    throw new RuntimeException(e);
+                    throw new UncheckedInterruptedException(e);
                 }
             });
         }
@@ -1097,7 +1100,7 @@
 
                 if (hasPending())
                 {
-                    Promise<Result<MessagingSuccess>> result = new AsyncPromise<>(eventLoop);
+                    Promise<Result<MessagingSuccess>> result = AsyncPromise.withExecutor(eventLoop);
                     state = new Connecting(state.disconnected(), result, eventLoop.schedule(() -> attempt(result), max(100, retryRateMillis), MILLISECONDS));
                     retryRateMillis = min(1000, retryRateMillis * 2);
                 }
@@ -1196,7 +1199,7 @@
                  * is made before the endpointToVersion table is initially constructed or out
                  * of date (e.g. if outbound connections are established for gossip
                  * as a result of an inbound connection) and can result in the wrong outbound
-                 * port being selected if configured with enable_legacy_ssl_storage_port=true.
+                 * port being selected if configured with legacy_ssl_storage_port_enabled=true.
                  */
                 int knownMessagingVersion = messagingVersion();
                 if (knownMessagingVersion != messagingVersion)
@@ -1226,7 +1229,7 @@
 
             Future<Result<MessagingSuccess>> initiate()
             {
-                Promise<Result<MessagingSuccess>> result = new AsyncPromise<>(eventLoop);
+                Promise<Result<MessagingSuccess>> result = AsyncPromise.withExecutor(eventLoop);
                 state = new Connecting(state.disconnected(), result);
                 attempt(result);
                 return result;
@@ -1514,13 +1517,12 @@
 
         Runnable clearQueue = () ->
         {
-            CountDownLatch done = new CountDownLatch(1);
+            CountDownLatch done = newCountDownLatch(1);
             queue.runEventually(withLock -> {
                 withLock.consume(this::onClosed);
-                done.countDown();
+                done.decrement();
             });
-            //noinspection UnstableApiUsage
-            Uninterruptibles.awaitUninterruptibly(done);
+            done.awaitUninterruptibly();
         };
 
         if (flushQueue)
diff --git a/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java b/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
index f1fa6b7..a187068 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
@@ -24,6 +24,10 @@
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
+import io.netty.util.concurrent.Promise; //checkstyle: permit this import
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,14 +49,11 @@
 import io.netty.handler.ssl.SslClosedEngineException;
 import io.netty.handler.ssl.SslContext;
 import io.netty.handler.ssl.SslHandler;
-import io.netty.util.concurrent.FailedFuture;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.Promise;
 import io.netty.util.concurrent.ScheduledFuture;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.HandshakeProtocol.Initiate;
 import org.apache.cassandra.net.OutboundConnectionInitiator.Result.MessagingSuccess;
 import org.apache.cassandra.net.OutboundConnectionInitiator.Result.StreamingSuccess;
+import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.security.SSLFactory;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.memory.BufferPools;
@@ -107,7 +108,7 @@
      */
     public static Future<Result<StreamingSuccess>> initiateStreaming(EventLoop eventLoop, OutboundConnectionSettings settings, int requestMessagingVersion)
     {
-        return new OutboundConnectionInitiator<StreamingSuccess>(STREAMING, settings, requestMessagingVersion, new AsyncPromise<>(eventLoop))
+        return new OutboundConnectionInitiator<StreamingSuccess>(STREAMING, settings, requestMessagingVersion, AsyncPromise.withExecutor(eventLoop))
                .initiate(eventLoop);
     }
 
@@ -133,13 +134,13 @@
         {
             // interrupt other connections, so they must attempt to re-authenticate
             MessagingService.instance().interruptOutbound(settings.to);
-            return new FailedFuture<>(eventLoop, new IOException("authentication failed to " + settings.connectToId()));
+            return ImmediateFuture.failure(new IOException("authentication failed to " + settings.connectToId()));
         }
 
         // this is a bit ugly, but is the easiest way to ensure that if we timeout we can propagate a suitable error message
         // and still guarantee that, if on timing out we raced with success, the successfully created channel is handled
         AtomicBoolean timedout = new AtomicBoolean();
-        Future<Void> bootstrap = createBootstrap(eventLoop)
+        io.netty.util.concurrent.Future<Void> bootstrap = createBootstrap(eventLoop)
                                  .connect()
                                  .addListener(future -> {
                                      eventLoop.execute(() -> {
@@ -187,7 +188,7 @@
             bootstrap.option(ChannelOption.SO_SNDBUF, settings.socketSendBufferSizeInBytes);
 
         InetAddressAndPort remoteAddress = settings.connectTo;
-        bootstrap.remoteAddress(new InetSocketAddress(remoteAddress.address, remoteAddress.port));
+        bootstrap.remoteAddress(new InetSocketAddress(remoteAddress.getAddress(), remoteAddress.getPort()));
         return bootstrap;
     }
 
@@ -201,10 +202,11 @@
             if (settings.withEncryption())
             {
                 // check if we should actually encrypt this connection
-                SslContext sslContext = SSLFactory.getOrCreateSslContext(settings.encryption, true, SSLFactory.SocketType.CLIENT);
+                SslContext sslContext = SSLFactory.getOrCreateSslContext(settings.encryption, true,
+                                                                         ISslContextFactory.SocketType.CLIENT);
                 // for some reason channel.remoteAddress() will return null
                 InetAddressAndPort address = settings.to;
-                InetSocketAddress peer = settings.encryption.require_endpoint_verification ? new InetSocketAddress(address.address, address.port) : null;
+                InetSocketAddress peer = settings.encryption.require_endpoint_verification ? new InetSocketAddress(address.getAddress(), address.getPort()) : null;
                 SslHandler sslHandler = newSslHandler(channel, sslContext, peer);
                 logger.trace("creating outbound netty SslContext: context={}, engine={}", sslContext.getClass().getName(), sslHandler.engine().getClass().getName());
                 pipeline.addFirst("ssl", sslHandler);
diff --git a/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java b/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
index d9038ee..5b246e3 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
@@ -159,7 +159,7 @@
 
     public boolean authenticate()
     {
-        return authenticator.authenticate(to.address, to.port);
+        return authenticator.authenticate(to.getAddress(), to.getPort());
     }
 
     public boolean withEncryption()
diff --git a/src/java/org/apache/cassandra/net/OutboundConnections.java b/src/java/org/apache/cassandra/net/OutboundConnections.java
index 3f607d1..ad87ec5 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnections.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnections.java
@@ -27,22 +27,30 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.carrotsearch.hppc.ObjectObjectHashMap;
-import io.netty.util.concurrent.Future;
-import org.apache.cassandra.config.Config;
-import org.apache.cassandra.gms.Gossiper;
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.InternodeOutboundMetrics;
 import org.apache.cassandra.utils.NoSpamLogger;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
+import static java.lang.Integer.getInteger;
+import static java.lang.Math.max;
+import static org.apache.cassandra.config.Config.PROPERTY_PREFIX;
+import static org.apache.cassandra.gms.Gossiper.instance;
+import static org.apache.cassandra.net.FrameEncoderCrc.HEADER_AND_TRAILER_LENGTH;
+import static org.apache.cassandra.net.LegacyLZ4Constants.HEADER_LENGTH;
 import static org.apache.cassandra.net.MessagingService.current_version;
 import static org.apache.cassandra.net.ConnectionType.URGENT_MESSAGES;
 import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
 import static org.apache.cassandra.net.ConnectionType.SMALL_MESSAGES;
+import static org.apache.cassandra.net.ResourceLimits.*;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 /**
  * Groups a set of outbound connections to a given peer, and routes outgoing messages to the appropriate connection
@@ -54,12 +62,12 @@
     private static final Logger logger = LoggerFactory.getLogger(OutboundConnections.class);
 
     @VisibleForTesting
-    public static final int LARGE_MESSAGE_THRESHOLD = Integer.getInteger(Config.PROPERTY_PREFIX + "otcp_large_message_threshold", 1024 * 64)
-    - Math.max(Math.max(LegacyLZ4Constants.HEADER_LENGTH, FrameEncoderCrc.HEADER_AND_TRAILER_LENGTH), FrameEncoderLZ4.HEADER_AND_TRAILER_LENGTH);
+    public static final int LARGE_MESSAGE_THRESHOLD = getInteger(PROPERTY_PREFIX + "otcp_large_message_threshold", 1024 * 64)
+    - max(max(HEADER_LENGTH, HEADER_AND_TRAILER_LENGTH), FrameEncoderLZ4.HEADER_AND_TRAILER_LENGTH);
 
-    private final SimpleCondition metricsReady = new SimpleCondition();
+    private final Condition metricsReady = newOneTimeCondition();
     private volatile InternodeOutboundMetrics metrics;
-    private final ResourceLimits.Limit reserveCapacity;
+    private final Limit reserveCapacity;
 
     private OutboundConnectionSettings template;
     public final OutboundConnection small;
@@ -69,8 +77,8 @@
     private OutboundConnections(OutboundConnectionSettings template)
     {
         this.template = template = template.withDefaultReserveLimits();
-        reserveCapacity = new ResourceLimits.Concurrent(template.applicationSendQueueReserveEndpointCapacityInBytes);
-        ResourceLimits.EndpointAndGlobal reserveCapacityInBytes = new ResourceLimits.EndpointAndGlobal(reserveCapacity, template.applicationSendQueueReserveGlobalCapacityInBytes);
+        reserveCapacity = new Concurrent(template.applicationSendQueueReserveEndpointCapacityInBytes);
+        EndpointAndGlobal reserveCapacityInBytes = new EndpointAndGlobal(reserveCapacity, template.applicationSendQueueReserveGlobalCapacityInBytes);
         this.small = new OutboundConnection(SMALL_MESSAGES, template, reserveCapacityInBytes);
         this.large = new OutboundConnection(LARGE_MESSAGES, template, reserveCapacityInBytes);
         this.urgent = new OutboundConnection(URGENT_MESSAGES, template, reserveCapacityInBytes);
@@ -117,7 +125,7 @@
     synchronized Future<Void> reconnectWithNewIp(InetAddressAndPort addr)
     {
         template = template.withConnectTo(addr);
-        return new FutureCombiner(
+        return FutureCombiner.nettySuccessListener(
             apply(c -> c.reconnectWith(template))
         );
     }
@@ -131,7 +139,7 @@
     {
         // immediately release our metrics, so that if we need to re-open immediately we can safely register a new one
         releaseMetrics();
-        return new FutureCombiner(
+        return FutureCombiner.nettySuccessListener(
             apply(c -> c.scheduleClose(time, unit, flushQueues))
         );
     }
@@ -145,7 +153,7 @@
     {
         // immediately release our metrics, so that if we need to re-open immediately we can safely register a new one
         releaseMetrics();
-        return new FutureCombiner(
+        return FutureCombiner.nettySuccessListener(
             apply(c -> c.close(flushQueues))
         );
     }
@@ -158,7 +166,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
 
         if (metrics != null)
@@ -299,7 +307,7 @@
                     continue;
 
                 if (cur.small == prev.small && cur.large == prev.large && cur.urgent == prev.urgent
-                    && !Gossiper.instance.isKnownEndpoint(connections.template.to))
+                    && !instance.isKnownEndpoint(connections.template.to))
                 {
                     logger.info("Closing outbound connections to {}, as inactive and not known by Gossiper",
                                 connections.template.to);
diff --git a/src/java/org/apache/cassandra/net/OutboundMessageQueue.java b/src/java/org/apache/cassandra/net/OutboundMessageQueue.java
index d7360a0..8280055 100644
--- a/src/java/org/apache/cassandra/net/OutboundMessageQueue.java
+++ b/src/java/org/apache/cassandra/net/OutboundMessageQueue.java
@@ -17,10 +17,8 @@
  */
 package org.apache.cassandra.net;
 
-import java.util.Collections;
 import java.util.IdentityHashMap;
 import java.util.Set;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLongFieldUpdater;
 import java.util.concurrent.atomic.AtomicReference;
@@ -28,13 +26,17 @@
 import java.util.function.Consumer;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.Uninterruptibles;
+
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.utils.MonotonicClock;
 
+import static java.lang.Long.MAX_VALUE;
 import static java.lang.Math.min;
+import static java.util.Collections.newSetFromMap;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
 
 /**
  * A composite queue holding messages to be delivered by an {@link OutboundConnection}.
@@ -437,8 +439,8 @@
 
     private class RemoveRunner extends AtomicReference<Remove> implements Runnable
     {
-        final CountDownLatch done = new CountDownLatch(1);
-        final Set<Message<?>> removed = Collections.newSetFromMap(new IdentityHashMap<>());
+        final CountDownLatch done = newCountDownLatch(1);
+        final Set<Message<?>> removed = newSetFromMap(new IdentityHashMap<>());
 
         RemoveRunner() { super(new Remove(null, null)); }
 
@@ -449,7 +451,7 @@
 
         public void run()
         {
-            Set<Message<?>> remove = Collections.newSetFromMap(new IdentityHashMap<>());
+            Set<Message<?>> remove = newSetFromMap(new IdentityHashMap<>());
             removeRunner = null;
             Remove undo = getAndSet(null);
             while (undo.message != null)
@@ -460,7 +462,7 @@
 
             class Remover implements PrunableArrayQueue.Pruner<Message<?>>
             {
-                private long earliestExpiresAt = Long.MAX_VALUE;
+                private long earliestExpiresAt = MAX_VALUE;
 
                 @Override
                 public boolean shouldPrune(Message<?> message)
@@ -488,7 +490,7 @@
             long nowNanos = clock.now();
             maybeUpdateNextExpirationDeadline(nowNanos, maybeUpdateEarliestExpiresAt(nowNanos, remover.earliestExpiresAt));
 
-            done.countDown();
+            done.decrement();
         }
     }
 
@@ -517,8 +519,7 @@
             }
         }
 
-        //noinspection UnstableApiUsage
-        Uninterruptibles.awaitUninterruptibly(runner.done);
+        runner.done.awaitUninterruptibly();
         return runner.removed.contains(remove);
     }
 
diff --git a/src/java/org/apache/cassandra/net/ParamType.java b/src/java/org/apache/cassandra/net/ParamType.java
index 007605b..37f4bf8 100644
--- a/src/java/org/apache/cassandra/net/ParamType.java
+++ b/src/java/org/apache/cassandra/net/ParamType.java
@@ -19,13 +19,14 @@
 
 import java.util.HashMap;
 import java.util.Map;
-
 import javax.annotation.Nullable;
 
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.tracing.Tracing;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.Int32Serializer;
+import org.apache.cassandra.utils.Int64Serializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static java.lang.Math.max;
 
@@ -51,12 +52,19 @@
     @Deprecated
     FAILURE_CALLBACK    (4, "CAL_BAC",       LegacyFlag.serializer),
 
-    TRACE_SESSION       (5, "TraceSession",  UUIDSerializer.serializer),
+    TRACE_SESSION       (5, "TraceSession",  TimeUUID.Serializer.instance),
     TRACE_TYPE          (6, "TraceType",     Tracing.traceTypeSerializer),
 
     @Deprecated
     TRACK_REPAIRED_DATA (7, "TrackRepaired", LegacyFlag.serializer),
 
+    TOMBSTONE_FAIL(8, "TSF", Int32Serializer.serializer),
+    TOMBSTONE_WARNING(9, "TSW", Int32Serializer.serializer),
+    LOCAL_READ_SIZE_FAIL(10, "LRSF", Int64Serializer.serializer),
+    LOCAL_READ_SIZE_WARN(11, "LRSW", Int64Serializer.serializer),
+    ROW_INDEX_READ_SIZE_FAIL(12, "RIRSF", Int64Serializer.serializer),
+    ROW_INDEX_READ_SIZE_WARN(13, "RIRSW", Int64Serializer.serializer),
+
     CUSTOM_MAP          (14, "CUSTOM",       CustomParamsSerializer.serializer);
 
     final int id;
diff --git a/src/java/org/apache/cassandra/net/RequestCallback.java b/src/java/org/apache/cassandra/net/RequestCallback.java
index 5bbe011..bd14cae 100644
--- a/src/java/org/apache/cassandra/net/RequestCallback.java
+++ b/src/java/org/apache/cassandra/net/RequestCallback.java
@@ -41,6 +41,12 @@
     }
 
     /**
+     * Returns true if the callback handles failure reporting - in which case the remove host will be asked to
+     * report failures to us in the event of a problem processing the request.
+     *
+     * TODO: this is an error prone method, and we should be handling failures everywhere
+     *       so we should probably just start doing that, and remove this method
+     *
      * @return true if the callback should be invoked on failure
      */
     default boolean invokeOnFailure()
@@ -56,4 +62,5 @@
     {
         return false;
     }
+
 }
diff --git a/src/java/org/apache/cassandra/net/RequestCallbackWithFailure.java b/src/java/org/apache/cassandra/net/RequestCallbackWithFailure.java
new file mode 100644
index 0000000..685797a
--- /dev/null
+++ b/src/java/org/apache/cassandra/net/RequestCallbackWithFailure.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.net;
+
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+public interface RequestCallbackWithFailure<T> extends RequestCallback<T>
+{
+    /**
+     * Called when there is an exception on the remote node or timeout happens
+     */
+    void onFailure(InetAddressAndPort from, RequestFailureReason failureReason);
+
+    /**
+     * @return true if the callback should be invoked on failure
+     */
+    default boolean invokeOnFailure()
+    {
+        return true;
+    }
+}
diff --git a/src/java/org/apache/cassandra/net/RequestCallbacks.java b/src/java/org/apache/cassandra/net/RequestCallbacks.java
index c102ee1..663126f 100644
--- a/src/java/org/apache/cassandra/net/RequestCallbacks.java
+++ b/src/java/org/apache/cassandra/net/RequestCallbacks.java
@@ -20,35 +20,32 @@
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeoutException;
 
 import javax.annotation.Nullable;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.io.IVersionedAsymmetricSerializer;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.metrics.InternodeOutboundMetrics;
 import org.apache.cassandra.service.AbstractWriteResponseHandler;
-import org.apache.cassandra.service.StorageProxy;
-import org.apache.cassandra.service.paxos.Commit;
-import org.apache.cassandra.utils.FBUtilities;
 
 import static java.lang.String.format;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.concurrent.ExecutorFactory.SimulatorSemantics.DISCARD;
 import static org.apache.cassandra.concurrent.Stage.INTERNAL_RESPONSE;
-import static org.apache.cassandra.utils.MonotonicClock.preciseTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.preciseTime;
 
 /**
  * An expiring map of request callbacks.
@@ -64,14 +61,14 @@
     private static final Logger logger = LoggerFactory.getLogger(RequestCallbacks.class);
 
     private final MessagingService messagingService;
-    private final ScheduledExecutorService executor = new DebuggableScheduledThreadPoolExecutor("Callback-Map-Reaper");
+    private final ScheduledExecutorPlus executor = executorFactory().scheduled("Callback-Map-Reaper", DISCARD);
     private final ConcurrentMap<CallbackKey, CallbackInfo> callbacks = new ConcurrentHashMap<>();
 
     RequestCallbacks(MessagingService messagingService)
     {
         this.messagingService = messagingService;
 
-        long expirationInterval = DatabaseDescriptor.getMinRpcTimeout(NANOSECONDS) / 2;
+        long expirationInterval = defaultExpirationInterval();
         executor.scheduleWithFixedDelay(this::expire, expirationInterval, expirationInterval, NANOSECONDS);
     }
 
@@ -88,7 +85,8 @@
      * Remove and return the {@link CallbackInfo} associated with given id and peer, if known.
      */
     @Nullable
-    CallbackInfo remove(long id, InetAddressAndPort peer)
+    @VisibleForTesting
+    public CallbackInfo remove(long id, InetAddressAndPort peer)
     {
         return callbacks.remove(key(id, peer));
     }
@@ -96,23 +94,18 @@
     /**
      * Register the provided {@link RequestCallback}, inferring expiry and id from the provided {@link Message}.
      */
-    void addWithExpiration(RequestCallback cb, Message message, InetAddressAndPort to)
+    public void addWithExpiration(RequestCallback<?> cb, Message<?> message, InetAddressAndPort to)
     {
-        // mutations need to call the overload with a ConsistencyLevel
-        assert message.verb() != Verb.MUTATION_REQ && message.verb() != Verb.COUNTER_MUTATION_REQ && message.verb() != Verb.PAXOS_COMMIT_REQ;
+        // mutations need to call the overload
+        assert message.verb() != Verb.MUTATION_REQ && message.verb() != Verb.COUNTER_MUTATION_REQ;
         CallbackInfo previous = callbacks.put(key(message.id(), to), new CallbackInfo(message, to, cb));
         assert previous == null : format("Callback already exists for id %d/%s! (%s)", message.id(), to, previous);
     }
 
-    // FIXME: shouldn't need a special overload for writes; hinting should be part of AbstractWriteResponseHandler
-    public void addWithExpiration(AbstractWriteResponseHandler<?> cb,
-                                  Message<?> message,
-                                  Replica to,
-                                  ConsistencyLevel consistencyLevel,
-                                  boolean allowHints)
+    public void addWithExpiration(AbstractWriteResponseHandler<?> cb, Message<?> message, Replica to)
     {
         assert message.verb() == Verb.MUTATION_REQ || message.verb() == Verb.COUNTER_MUTATION_REQ || message.verb() == Verb.PAXOS_COMMIT_REQ;
-        CallbackInfo previous = callbacks.put(key(message.id(), to.endpoint()), new WriteCallbackInfo(message, to, cb, consistencyLevel, allowHints));
+        CallbackInfo previous = callbacks.put(key(message.id(), to.endpoint()), new CallbackInfo(message, to.endpoint(), cb));
         assert previous == null : format("Callback already exists for id %d/%s! (%s)", message.id(), to.endpoint(), previous);
     }
 
@@ -169,14 +162,6 @@
 
         if (info.invokeOnFailure())
             INTERNAL_RESPONSE.submit(() -> info.callback.onFailure(info.peer, RequestFailureReason.TIMEOUT));
-
-        // FIXME: this has never belonged here, should be part of onFailure() in AbstractWriteResponseHandler
-        if (info.shouldHint())
-        {
-            WriteCallbackInfo writeCallbackInfo = ((WriteCallbackInfo) info);
-            Mutation mutation = writeCallbackInfo.mutation();
-            StorageProxy.submitHint(mutation, writeCallbackInfo.getReplica(), null);
-        }
     }
 
     void shutdownNow(boolean expireCallbacks)
@@ -199,7 +184,7 @@
     {
         if (!executor.isTerminated())
         {
-            long wait = deadlineNanos - System.nanoTime();
+            long wait = deadlineNanos - nanoTime();
             if (wait <= 0 || !executor.awaitTermination(wait, NANOSECONDS))
                 throw new TimeoutException();
         }
@@ -249,13 +234,14 @@
         }
     }
 
-    static class CallbackInfo
+    @VisibleForTesting
+    public static class CallbackInfo
     {
         final long createdAtNanos;
         final long expiresAtNanos;
 
         final InetAddressAndPort peer;
-        final RequestCallback callback;
+        public final RequestCallback callback;
 
         @Deprecated // for 3.0 compatibility purposes only
         public final Verb responseVerb;
@@ -279,11 +265,6 @@
             return atNano > expiresAtNanos;
         }
 
-        boolean shouldHint()
-        {
-            return false;
-        }
-
         boolean invokeOnFailure()
         {
             return callback.invokeOnFailure();
@@ -295,53 +276,6 @@
         }
     }
 
-    // FIXME: shouldn't need a specialized container for write callbacks; hinting should be part of
-    //        AbstractWriteResponseHandler implementation.
-    static class WriteCallbackInfo extends CallbackInfo
-    {
-        // either a Mutation, or a Paxos Commit (MessageOut)
-        private final Object mutation;
-        private final Replica replica;
-
-        @VisibleForTesting
-        WriteCallbackInfo(Message message, Replica replica, RequestCallback<?> callback, ConsistencyLevel consistencyLevel, boolean allowHints)
-        {
-            super(message, replica.endpoint(), callback);
-            this.mutation = shouldHint(allowHints, message, consistencyLevel) ? message.payload : null;
-            //Local writes shouldn't go through messaging service (https://issues.apache.org/jira/browse/CASSANDRA-10477)
-            //noinspection AssertWithSideEffects
-            assert !peer.equals(FBUtilities.getBroadcastAddressAndPort());
-            this.replica = replica;
-        }
-
-        public boolean shouldHint()
-        {
-            return mutation != null && StorageProxy.shouldHint(replica);
-        }
-
-        public Replica getReplica()
-        {
-            return replica;
-        }
-
-        public Mutation mutation()
-        {
-            return getMutation(mutation);
-        }
-
-        private static Mutation getMutation(Object object)
-        {
-            assert object instanceof Commit || object instanceof Mutation : object;
-            return object instanceof Commit ? ((Commit) object).makeMutation()
-                                            : (Mutation) object;
-        }
-
-        private static boolean shouldHint(boolean allowHints, Message sentMessage, ConsistencyLevel consistencyLevel)
-        {
-            return allowHints && sentMessage.verb() != Verb.COUNTER_MUTATION_REQ && consistencyLevel != ConsistencyLevel.ANY;
-        }
-    }
-
     @Override
     public void onOverloaded(Message<?> message, InetAddressAndPort peer)
     {
@@ -375,4 +309,9 @@
         if (null != forwardTo)
             forwardTo.forEach(this::removeAndExpire);
     }
+
+    public static long defaultExpirationInterval()
+    {
+        return DatabaseDescriptor.getMinRpcTimeout(NANOSECONDS) / 2;
+    }
 }
diff --git a/src/java/org/apache/cassandra/net/ResourceLimits.java b/src/java/org/apache/cassandra/net/ResourceLimits.java
index 8899040..cfddfc3 100644
--- a/src/java/org/apache/cassandra/net/ResourceLimits.java
+++ b/src/java/org/apache/cassandra/net/ResourceLimits.java
@@ -41,7 +41,7 @@
          * Sets the total amount of permits represented by this {@link Limit} - the capacity
          *
          * If the old limit has been reached and the new limit is large enough to allow for more
-         * permits to be aqcuired, subsequent calls to {@link #allocate(long)} or {@link #tryAllocate(long)}
+         * permits to be acquired, subsequent calls to {@link #allocate(long)} or {@link #tryAllocate(long)}
          * will succeed.
          *
          * If the new limit is lower than the current amount of allocated permits then subsequent calls
@@ -163,7 +163,7 @@
                 // possible it would require synchronizing the closing of all outbound connections
                 // and reinitializing the Concurrent limit before reopening.  For such an unlikely path
                 // (previously this was an assert), it is safer to terminate the JVM and have something external
-                // restart and get back to a known good state rather than intermittendly crashing on any of
+                // restart and get back to a known good state rather than intermittently crashing on any of
                 // the connections sharing this limit.
                 throw new UnrecoverableIllegalStateException(
                     "Internode messaging byte limits that are shared between connections is invalid (using="+using+")");
diff --git a/src/java/org/apache/cassandra/net/ResponseVerbHandler.java b/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
index 369e5f4..1cee468 100644
--- a/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
+++ b/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
@@ -24,7 +24,7 @@
 import org.apache.cassandra.tracing.Tracing;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 class ResponseVerbHandler implements IVerbHandler
 {
diff --git a/src/java/org/apache/cassandra/net/SocketFactory.java b/src/java/org/apache/cassandra/net/SocketFactory.java
index 9d2f6ad..33fff6b 100644
--- a/src/java/org/apache/cassandra/net/SocketFactory.java
+++ b/src/java/org/apache/cassandra/net/SocketFactory.java
@@ -24,7 +24,6 @@
 import java.nio.channels.spi.SelectorProvider;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeoutException;
 import javax.annotation.Nullable;
@@ -73,6 +72,7 @@
 import static io.netty.channel.unix.Errors.ERRNO_ECONNRESET_NEGATIVE;
 import static io.netty.channel.unix.Errors.ERROR_ECONNREFUSED_NEGATIVE;
 import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.utils.Throwables.isCausedBy;
 
 /**
@@ -178,7 +178,7 @@
     private final EventLoopGroup defaultGroup;
     // we need a separate EventLoopGroup for outbound streaming because sendFile is blocking
     private final EventLoopGroup outboundStreamingGroup;
-    final ExecutorService synchronousWorkExecutor = Executors.newCachedThreadPool(new NamedThreadFactory("Messaging-SynchronousWork"));
+    final ExecutorService synchronousWorkExecutor = executorFactory().pooled("Messaging-SynchronousWork", Integer.MAX_VALUE);
 
     SocketFactory()
     {
@@ -319,7 +319,7 @@
     static String addressId(InetAddressAndPort address, InetSocketAddress realAddress)
     {
         String str = address.toString();
-        if (!address.address.equals(realAddress.getAddress()) || address.port != realAddress.getPort())
+        if (!address.getAddress().equals(realAddress.getAddress()) || address.getPort() != realAddress.getPort())
             str += '(' + InetAddressAndPort.toString(realAddress.getAddress(), realAddress.getPort()) + ')';
         return str;
     }
diff --git a/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java b/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
index 8bc1e5d..0197a6b 100644
--- a/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
+++ b/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
@@ -17,36 +17,40 @@
  */
 package org.apache.cassandra.net;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Function;
-import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.SetMultimap;
-import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.EndpointState;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
-import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.util.stream.Collectors.groupingBy;
+import static java.util.stream.Collectors.mapping;
+import static java.util.stream.Collectors.toList;
 import static org.apache.cassandra.net.Verb.PING_REQ;
 import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
 import static org.apache.cassandra.net.ConnectionType.SMALL_MESSAGES;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
 
 public class StartupClusterConnectivityChecker
 {
@@ -117,15 +121,17 @@
                         TimeUnit.NANOSECONDS.toSeconds(timeoutNanos));
         }
 
-        AckMap acks = new AckMap(3);
+        // The threshold is 3 because for each peer we want to have 3 acks,
+        // one for small message connection, one for large message connnection and one for alive event from gossip.
+        AckMap acks = new AckMap(3, peers);
         Map<String, CountDownLatch> dcToRemainingPeers = new HashMap<>(datacenterToPeers.size());
         for (String datacenter: datacenterToPeers.keys())
         {
             dcToRemainingPeers.put(datacenter,
-                                   new CountDownLatch(Math.max(datacenterToPeers.get(datacenter).size() - 1, 0)));
+                                   newCountDownLatch(Math.max(datacenterToPeers.get(datacenter).size() - 1, 0)));
         }
 
-        long startNanos = System.nanoTime();
+        long startNanos = nanoTime();
 
         // set up a listener to react to new nodes becoming alive (in gossip), and account for all the nodes that are already alive
         Set<InetAddressAndPort> alivePeers = Collections.newSetFromMap(new ConcurrentHashMap<>());
@@ -143,33 +149,40 @@
                 String datacenter = peerToDatacenter.get(peer);
                 // We have to check because we might only have the local DC in the map
                 if (dcToRemainingPeers.containsKey(datacenter))
-                    dcToRemainingPeers.get(datacenter).countDown();
+                    dcToRemainingPeers.get(datacenter).decrement();
             }
         }
 
         boolean succeeded = true;
         for (CountDownLatch countDownLatch : dcToRemainingPeers.values())
         {
-            long remainingNanos = Math.max(1, timeoutNanos - (System.nanoTime() - startNanos));
+            long remainingNanos = Math.max(1, timeoutNanos - (nanoTime() - startNanos));
             //noinspection UnstableApiUsage
-            succeeded &= Uninterruptibles.awaitUninterruptibly(countDownLatch, remainingNanos, TimeUnit.NANOSECONDS);
+            succeeded &= countDownLatch.awaitUninterruptibly(remainingNanos, TimeUnit.NANOSECONDS);
         }
 
         Gossiper.instance.unregister(listener);
 
-        Map<String, Long> numDown = dcToRemainingPeers.entrySet().stream()
-                                                      .collect(Collectors.toMap(Map.Entry::getKey,
-                                                                                e -> e.getValue().getCount()));
-
         if (succeeded)
         {
             logger.info("Ensured sufficient healthy connections with {} after {} milliseconds",
-                        numDown.keySet(), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
+                        dcToRemainingPeers.keySet(), TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNanos));
         }
         else
         {
+            // dc -> missing peer host addresses
+            Map<String, List<String>> peersDown = acks.getMissingPeers().stream()
+                                                      .collect(groupingBy(peer -> {
+                                                                              String dc = peerToDatacenter.get(peer);
+                                                                              if (dc != null)
+                                                                                  return dc;
+                                                                              return StringUtils.defaultString(getDatacenterSource.apply(peer), "unknown");
+                                                                          },
+                                                                          mapping(InetAddressAndPort::getHostAddressAndPort,
+                                                                                  toList())));
             logger.warn("Timed out after {} milliseconds, was waiting for remaining peers to connect: {}",
-                        TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos), numDown);
+                        TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNanos),
+                        peersDown);
         }
 
         return succeeded;
@@ -188,7 +201,7 @@
                 String datacenter = getDatacenter.apply(msg.from());
                 // We have to check because we might only have the local DC in the map
                 if (dcToRemainingPeers.containsKey(datacenter))
-                    dcToRemainingPeers.get(datacenter).countDown();
+                    dcToRemainingPeers.get(datacenter).decrement();
             }
         };
 
@@ -227,7 +240,7 @@
             {
                 String datacenter = getDatacenter.apply(endpoint);
                 if (dcToRemainingPeers.containsKey(datacenter))
-                    dcToRemainingPeers.get(datacenter).countDown();
+                    dcToRemainingPeers.get(datacenter).decrement();
             }
         }
     }
@@ -237,15 +250,37 @@
         private final int threshold;
         private final Map<InetAddressAndPort, AtomicInteger> acks;
 
-        AckMap(int threshold)
+        AckMap(int threshold, Iterable<InetAddressAndPort> initialPeers)
         {
             this.threshold = threshold;
             acks = new ConcurrentHashMap<>();
+            for (InetAddressAndPort peer : initialPeers)
+                initOrGetCounter(peer);
         }
 
         boolean incrementAndCheck(InetAddressAndPort address)
         {
-            return acks.computeIfAbsent(address, addr -> new AtomicInteger(0)).incrementAndGet() == threshold;
+            return initOrGetCounter(address).incrementAndGet() == threshold;
+        }
+
+        /**
+         * Get a list of peers that has not fully ack'd, i.e. not reaching threshold acks
+         */
+        List<InetAddressAndPort> getMissingPeers()
+        {
+            List<InetAddressAndPort> missingPeers = new ArrayList<>();
+            for (Map.Entry<InetAddressAndPort, AtomicInteger> entry : acks.entrySet())
+            {
+                if (entry.getValue().get() < threshold)
+                    missingPeers.add(entry.getKey());
+            }
+            return missingPeers;
+        }
+
+        // init the counter for the peer just in case
+        private AtomicInteger initOrGetCounter(InetAddressAndPort address)
+        {
+            return acks.computeIfAbsent(address, addr -> new AtomicInteger(0));
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/net/Verb.java b/src/java/org/apache/cassandra/net/Verb.java
index 9d8b76d..d50a187 100644
--- a/src/java/org/apache/cassandra/net/Verb.java
+++ b/src/java/org/apache/cassandra/net/Verb.java
@@ -71,28 +71,39 @@
 import org.apache.cassandra.repair.messages.SyncRequest;
 import org.apache.cassandra.repair.messages.ValidationResponse;
 import org.apache.cassandra.repair.messages.ValidationRequest;
+import org.apache.cassandra.schema.SchemaMutationsSerializer;
 import org.apache.cassandra.schema.SchemaPullVerbHandler;
 import org.apache.cassandra.schema.SchemaPushVerbHandler;
 import org.apache.cassandra.schema.SchemaVersionVerbHandler;
+import org.apache.cassandra.service.paxos.PaxosCommit;
+import org.apache.cassandra.service.paxos.PaxosCommitAndPrepare;
+import org.apache.cassandra.service.paxos.PaxosPrepare;
+import org.apache.cassandra.service.paxos.PaxosPrepareRefresh;
+import org.apache.cassandra.service.paxos.PaxosPropose;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupHistory;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupRequest;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupResponse;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupComplete;
+import org.apache.cassandra.service.paxos.cleanup.PaxosStartPrepareCleanup;
+import org.apache.cassandra.service.paxos.cleanup.PaxosFinishPrepareCleanup;
 import org.apache.cassandra.utils.BooleanSerializer;
 import org.apache.cassandra.service.EchoVerbHandler;
 import org.apache.cassandra.service.SnapshotVerbHandler;
 import org.apache.cassandra.service.paxos.Commit;
-import org.apache.cassandra.service.paxos.CommitVerbHandler;
+import org.apache.cassandra.service.paxos.Commit.Agreed;
 import org.apache.cassandra.service.paxos.PrepareResponse;
-import org.apache.cassandra.service.paxos.PrepareVerbHandler;
-import org.apache.cassandra.service.paxos.ProposeVerbHandler;
+import org.apache.cassandra.service.paxos.v1.PrepareVerbHandler;
+import org.apache.cassandra.service.paxos.v1.ProposeVerbHandler;
 import org.apache.cassandra.streaming.ReplicationDoneVerbHandler;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.UUIDSerializer;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.concurrent.Stage.*;
-import static org.apache.cassandra.concurrent.Stage.INTERNAL_RESPONSE;
-import static org.apache.cassandra.concurrent.Stage.MISC;
 import static org.apache.cassandra.net.VerbTimeouts.*;
 import static org.apache.cassandra.net.Verb.Kind.*;
 import static org.apache.cassandra.net.Verb.Priority.*;
-import static org.apache.cassandra.schema.MigrationManager.MigrationsSerializer;
 
 /**
  * Note that priorities except P0 are presently unused.  P0 corresponds to urgent, i.e. what used to be the "Gossip" connection.
@@ -108,14 +119,14 @@
     BATCH_STORE_RSP        (65,  P1, writeTimeout,    REQUEST_RESPONSE,  () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
     BATCH_STORE_REQ        (5,   P3, writeTimeout,    MUTATION,          () -> Batch.serializer,                     () -> BatchStoreVerbHandler.instance,      BATCH_STORE_RSP     ),
     BATCH_REMOVE_RSP       (66,  P1, writeTimeout,    REQUEST_RESPONSE,  () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
-    BATCH_REMOVE_REQ       (6,   P3, writeTimeout,    MUTATION,          () -> UUIDSerializer.serializer,            () -> BatchRemoveVerbHandler.instance,     BATCH_REMOVE_RSP    ),
+    BATCH_REMOVE_REQ       (6,   P3, writeTimeout,    MUTATION,          () -> TimeUUID.Serializer.instance,         () -> BatchRemoveVerbHandler.instance,     BATCH_REMOVE_RSP    ),
 
     PAXOS_PREPARE_RSP      (93,  P2, writeTimeout,    REQUEST_RESPONSE,  () -> PrepareResponse.serializer,           () -> ResponseVerbHandler.instance                             ),
     PAXOS_PREPARE_REQ      (33,  P2, writeTimeout,    MUTATION,          () -> Commit.serializer,                    () -> PrepareVerbHandler.instance,         PAXOS_PREPARE_RSP   ),
     PAXOS_PROPOSE_RSP      (94,  P2, writeTimeout,    REQUEST_RESPONSE,  () -> BooleanSerializer.serializer,         () -> ResponseVerbHandler.instance                             ),
     PAXOS_PROPOSE_REQ      (34,  P2, writeTimeout,    MUTATION,          () -> Commit.serializer,                    () -> ProposeVerbHandler.instance,         PAXOS_PROPOSE_RSP   ),
     PAXOS_COMMIT_RSP       (95,  P2, writeTimeout,    REQUEST_RESPONSE,  () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
-    PAXOS_COMMIT_REQ       (35,  P2, writeTimeout,    MUTATION,          () -> Commit.serializer,                    () -> CommitVerbHandler.instance,          PAXOS_COMMIT_RSP    ),
+    PAXOS_COMMIT_REQ       (35,  P2, writeTimeout,    MUTATION,          () -> Agreed.serializer,                    () -> PaxosCommit.requestHandler,          PAXOS_COMMIT_RSP    ),
 
     TRUNCATE_RSP           (79,  P0, truncateTimeout, REQUEST_RESPONSE,  () -> TruncateResponse.serializer,          () -> ResponseVerbHandler.instance                             ),
     TRUNCATE_REQ           (19,  P0, truncateTimeout, MUTATION,          () -> TruncateRequest.serializer,           () -> TruncateVerbHandler.instance,        TRUNCATE_RSP        ),
@@ -140,41 +151,63 @@
 
     // P1 because messages can be arbitrarily large or aren't crucial
     SCHEMA_PUSH_RSP        (98,  P1, rpcTimeout,      MIGRATION,         () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
-    SCHEMA_PUSH_REQ        (18,  P1, rpcTimeout,      MIGRATION,         () -> MigrationsSerializer.instance,        () -> SchemaPushVerbHandler.instance,      SCHEMA_PUSH_RSP     ),
-    SCHEMA_PULL_RSP        (88,  P1, rpcTimeout,      MIGRATION,         () -> MigrationsSerializer.instance,        () -> ResponseVerbHandler.instance                             ),
+    SCHEMA_PUSH_REQ        (18,  P1, rpcTimeout,      MIGRATION,         () -> SchemaMutationsSerializer.instance,   () -> SchemaPushVerbHandler.instance,      SCHEMA_PUSH_RSP     ),
+    SCHEMA_PULL_RSP        (88,  P1, rpcTimeout,      MIGRATION,         () -> SchemaMutationsSerializer.instance,   () -> ResponseVerbHandler.instance                             ),
     SCHEMA_PULL_REQ        (28,  P1, rpcTimeout,      MIGRATION,         () -> NoPayload.serializer,                 () -> SchemaPullVerbHandler.instance,      SCHEMA_PULL_RSP     ),
     SCHEMA_VERSION_RSP     (80,  P1, rpcTimeout,      MIGRATION,         () -> UUIDSerializer.serializer,            () -> ResponseVerbHandler.instance                             ),
     SCHEMA_VERSION_REQ     (20,  P1, rpcTimeout,      MIGRATION,         () -> NoPayload.serializer,                 () -> SchemaVersionVerbHandler.instance,   SCHEMA_VERSION_RSP  ),
 
     // repair; mostly doesn't use callbacks and sends responses as their own request messages, with matching sessions by uuid; should eventually harmonize and make idiomatic
-    REPAIR_RSP             (100, P1, repairMsgTimeout,REQUEST_RESPONSE,  () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
-    VALIDATION_RSP         (102, P1, longTimeout     ,ANTI_ENTROPY,      () -> ValidationResponse.serializer,        () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    VALIDATION_REQ         (101, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> ValidationRequest.serializer,         () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    SYNC_RSP               (104, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> SyncResponse.serializer,              () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    SYNC_REQ               (103, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> SyncRequest.serializer,               () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    PREPARE_MSG            (105, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> PrepareMessage.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    SNAPSHOT_MSG           (106, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> SnapshotMessage.serializer,           () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    CLEANUP_MSG            (107, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> CleanupMessage.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    PREPARE_CONSISTENT_RSP (109, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> PrepareConsistentResponse.serializer, () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    PREPARE_CONSISTENT_REQ (108, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> PrepareConsistentRequest.serializer,  () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    FINALIZE_PROPOSE_MSG   (110, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> FinalizePropose.serializer,           () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    FINALIZE_PROMISE_MSG   (111, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> FinalizePromise.serializer,           () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    FINALIZE_COMMIT_MSG    (112, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> FinalizeCommit.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    FAILED_SESSION_MSG     (113, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> FailSession.serializer,               () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    STATUS_RSP             (115, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> StatusResponse.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
-    STATUS_REQ             (114, P1, repairMsgTimeout,ANTI_ENTROPY,      () -> StatusRequest.serializer,             () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    REPAIR_RSP             (100, P1, repairTimeout,   REQUEST_RESPONSE,  () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
+    VALIDATION_RSP         (102, P1, longTimeout,     ANTI_ENTROPY,      () -> ValidationResponse.serializer,        () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    VALIDATION_REQ         (101, P1, repairTimeout,   ANTI_ENTROPY,      () -> ValidationRequest.serializer,         () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    SYNC_RSP               (104, P1, repairTimeout,   ANTI_ENTROPY,      () -> SyncResponse.serializer,              () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    SYNC_REQ               (103, P1, repairTimeout,   ANTI_ENTROPY,      () -> SyncRequest.serializer,               () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    PREPARE_MSG            (105, P1, repairTimeout,   ANTI_ENTROPY,      () -> PrepareMessage.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    SNAPSHOT_MSG           (106, P1, repairTimeout,   ANTI_ENTROPY,      () -> SnapshotMessage.serializer,           () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    CLEANUP_MSG            (107, P1, repairTimeout,   ANTI_ENTROPY,      () -> CleanupMessage.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    PREPARE_CONSISTENT_RSP (109, P1, repairTimeout,   ANTI_ENTROPY,      () -> PrepareConsistentResponse.serializer, () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    PREPARE_CONSISTENT_REQ (108, P1, repairTimeout,   ANTI_ENTROPY,      () -> PrepareConsistentRequest.serializer,  () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    FINALIZE_PROPOSE_MSG   (110, P1, repairTimeout,   ANTI_ENTROPY,      () -> FinalizePropose.serializer,           () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    FINALIZE_PROMISE_MSG   (111, P1, repairTimeout,   ANTI_ENTROPY,      () -> FinalizePromise.serializer,           () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    FINALIZE_COMMIT_MSG    (112, P1, repairTimeout,   ANTI_ENTROPY,      () -> FinalizeCommit.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    FAILED_SESSION_MSG     (113, P1, repairTimeout,   ANTI_ENTROPY,      () -> FailSession.serializer,               () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    STATUS_RSP             (115, P1, repairTimeout,   ANTI_ENTROPY,      () -> StatusResponse.serializer,            () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
+    STATUS_REQ             (114, P1, repairTimeout,   ANTI_ENTROPY,      () -> StatusRequest.serializer,             () -> RepairMessageVerbHandler.instance,   REPAIR_RSP          ),
 
     REPLICATION_DONE_RSP   (82,  P0, rpcTimeout,      MISC,              () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
     REPLICATION_DONE_REQ   (22,  P0, rpcTimeout,      MISC,              () -> NoPayload.serializer,                 () -> ReplicationDoneVerbHandler.instance, REPLICATION_DONE_RSP),
     SNAPSHOT_RSP           (87,  P0, rpcTimeout,      MISC,              () -> NoPayload.serializer,                 () -> ResponseVerbHandler.instance                             ),
     SNAPSHOT_REQ           (27,  P0, rpcTimeout,      MISC,              () -> SnapshotCommand.serializer,           () -> SnapshotVerbHandler.instance,        SNAPSHOT_RSP        ),
 
+    PAXOS2_COMMIT_REMOTE_REQ         (38, P2, writeTimeout,  MUTATION,          () -> Mutation.serializer,                     () -> MutationVerbHandler.instance,                          MUTATION_RSP                     ),
+    PAXOS2_COMMIT_REMOTE_RSP         (39, P2, writeTimeout,  REQUEST_RESPONSE,  () -> NoPayload.serializer,                    () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_PREPARE_RSP               (50, P2, writeTimeout,  REQUEST_RESPONSE,  () -> PaxosPrepare.responseSerializer,         () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_PREPARE_REQ               (40, P2, writeTimeout,  MUTATION,          () -> PaxosPrepare.requestSerializer,          () -> PaxosPrepare.requestHandler,                           PAXOS2_PREPARE_RSP               ),
+    PAXOS2_PREPARE_REFRESH_RSP       (51, P2, writeTimeout,  REQUEST_RESPONSE,  () -> PaxosPrepareRefresh.responseSerializer,  () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_PREPARE_REFRESH_REQ       (41, P2, writeTimeout,  MUTATION,          () -> PaxosPrepareRefresh.requestSerializer,   () -> PaxosPrepareRefresh.requestHandler,                    PAXOS2_PREPARE_REFRESH_RSP       ),
+    PAXOS2_PROPOSE_RSP               (52, P2, writeTimeout,  REQUEST_RESPONSE,  () -> PaxosPropose.responseSerializer,         () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_PROPOSE_REQ               (42, P2, writeTimeout,  MUTATION,          () -> PaxosPropose.requestSerializer,          () -> PaxosPropose.requestHandler,                           PAXOS2_PROPOSE_RSP               ),
+    PAXOS2_COMMIT_AND_PREPARE_RSP    (53, P2, writeTimeout,  REQUEST_RESPONSE,  () -> PaxosPrepare.responseSerializer,         () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_COMMIT_AND_PREPARE_REQ    (43, P2, writeTimeout,  MUTATION,          () -> PaxosCommitAndPrepare.requestSerializer, () -> PaxosCommitAndPrepare.requestHandler,                  PAXOS2_COMMIT_AND_PREPARE_RSP    ),
+    PAXOS2_REPAIR_RSP                (54, P2, writeTimeout,  PAXOS_REPAIR,      () -> PaxosRepair.responseSerializer,          () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_REPAIR_REQ                (44, P2, writeTimeout,  PAXOS_REPAIR,      () -> PaxosRepair.requestSerializer,           () -> PaxosRepair.requestHandler,                            PAXOS2_REPAIR_RSP                ),
+    PAXOS2_CLEANUP_START_PREPARE_RSP (55, P2, repairTimeout, PAXOS_REPAIR,      () -> PaxosCleanupHistory.serializer,          () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_CLEANUP_START_PREPARE_REQ (45, P2, repairTimeout, PAXOS_REPAIR,      () -> PaxosStartPrepareCleanup.serializer,     () -> PaxosStartPrepareCleanup.verbHandler,                  PAXOS2_CLEANUP_START_PREPARE_RSP ),
+    PAXOS2_CLEANUP_RSP               (56, P2, repairTimeout, PAXOS_REPAIR,      () -> NoPayload.serializer,                    () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_CLEANUP_REQ               (46, P2, repairTimeout, PAXOS_REPAIR,      () -> PaxosCleanupRequest.serializer,          () -> PaxosCleanupRequest.verbHandler,                       PAXOS2_CLEANUP_RSP               ),
+    PAXOS2_CLEANUP_RSP2              (57, P2, repairTimeout, PAXOS_REPAIR,      () -> PaxosCleanupResponse.serializer,         () -> PaxosCleanupResponse.verbHandler                                                        ),
+    PAXOS2_CLEANUP_FINISH_PREPARE_RSP(58, P2, repairTimeout, PAXOS_REPAIR,      () -> NoPayload.serializer,                    () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_CLEANUP_FINISH_PREPARE_REQ(47, P2, repairTimeout, IMMEDIATE,         () -> PaxosCleanupHistory.serializer,          () -> PaxosFinishPrepareCleanup.verbHandler,                 PAXOS2_CLEANUP_FINISH_PREPARE_RSP),
+    PAXOS2_CLEANUP_COMPLETE_RSP      (59, P2, repairTimeout, PAXOS_REPAIR,      () -> NoPayload.serializer,                    () -> ResponseVerbHandler.instance                                                            ),
+    PAXOS2_CLEANUP_COMPLETE_REQ      (48, P2, repairTimeout, PAXOS_REPAIR,      () -> PaxosCleanupComplete.serializer,         () -> PaxosCleanupComplete.verbHandler,                      PAXOS2_CLEANUP_COMPLETE_RSP      ),
+
     // generic failure response
     FAILURE_RSP            (99,  P0, noTimeout,       REQUEST_RESPONSE,  () -> RequestFailureReason.serializer,      () -> ResponseVerbHandler.instance                             ),
 
     // dummy verbs
     _TRACE                 (30,  P1, rpcTimeout,      TRACING,           () -> NoPayload.serializer,                 () -> null                                                     ),
-    _SAMPLE                (42,  P1, rpcTimeout,      INTERNAL_RESPONSE, () -> NoPayload.serializer,                 () -> null                                                     ),
+    _SAMPLE                (49,  P1, rpcTimeout,      INTERNAL_RESPONSE, () -> NoPayload.serializer,                 () -> null                                                     ),
     _TEST_1                (10,  P0, writeTimeout,    IMMEDIATE,         () -> NoPayload.serializer,                 () -> null                                                     ),
     _TEST_2                (11,  P1, rpcTimeout,      IMMEDIATE,         () -> NoPayload.serializer,                 () -> null                                                     ),
 
@@ -188,7 +221,6 @@
     // CUSTOM VERBS
     UNUSED_CUSTOM_VERB     (CUSTOM,
                             0,   P1, rpcTimeout,      INTERNAL_RESPONSE, () -> null,                                 () -> null                                                     ),
-
     ;
 
     public static final List<Verb> VERBS = ImmutableList.copyOf(Verb.values());
@@ -227,7 +259,7 @@
     private final Supplier<? extends IVersionedAsymmetricSerializer<?, ?>> serializer;
     private final Supplier<? extends IVerbHandler<?>> handler;
 
-    final Verb responseVerb;
+    public final Verb responseVerb;
 
     private final ToLongFunction<TimeUnit> expiration;
 
@@ -447,8 +479,8 @@
     static final ToLongFunction<TimeUnit> rangeTimeout    = DatabaseDescriptor::getRangeRpcTimeout;
     static final ToLongFunction<TimeUnit> counterTimeout  = DatabaseDescriptor::getCounterWriteRpcTimeout;
     static final ToLongFunction<TimeUnit> truncateTimeout = DatabaseDescriptor::getTruncateRpcTimeout;
+    static final ToLongFunction<TimeUnit> repairTimeout   = DatabaseDescriptor::getRepairRpcTimeout;
     static final ToLongFunction<TimeUnit> pingTimeout     = DatabaseDescriptor::getPingTimeout;
     static final ToLongFunction<TimeUnit> longTimeout     = units -> Math.max(DatabaseDescriptor.getRpcTimeout(units), units.convert(5L, TimeUnit.MINUTES));
-    static final ToLongFunction<TimeUnit> noTimeout       = units -> { throw new IllegalStateException(); };
-    static final ToLongFunction<TimeUnit> repairMsgTimeout= DatabaseDescriptor::getRepairRpcTimeout;
-}
\ No newline at end of file
+    static final ToLongFunction<TimeUnit> noTimeout       = units -> {  throw new IllegalStateException(); };
+}
diff --git a/src/java/org/apache/cassandra/notifications/MemtableDiscardedNotification.java b/src/java/org/apache/cassandra/notifications/MemtableDiscardedNotification.java
index 778cad0..849b2f6 100644
--- a/src/java/org/apache/cassandra/notifications/MemtableDiscardedNotification.java
+++ b/src/java/org/apache/cassandra/notifications/MemtableDiscardedNotification.java
@@ -17,7 +17,7 @@
  */
 package org.apache.cassandra.notifications;
 
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 
 public class MemtableDiscardedNotification implements INotification
 {
diff --git a/src/java/org/apache/cassandra/notifications/MemtableRenewedNotification.java b/src/java/org/apache/cassandra/notifications/MemtableRenewedNotification.java
index 4c7e6c5..776c9da 100644
--- a/src/java/org/apache/cassandra/notifications/MemtableRenewedNotification.java
+++ b/src/java/org/apache/cassandra/notifications/MemtableRenewedNotification.java
@@ -17,7 +17,7 @@
  */
 package org.apache.cassandra.notifications;
 
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 
 public class MemtableRenewedNotification implements INotification
 {
diff --git a/src/java/org/apache/cassandra/notifications/MemtableSwitchedNotification.java b/src/java/org/apache/cassandra/notifications/MemtableSwitchedNotification.java
index 946de4e..b1737be 100644
--- a/src/java/org/apache/cassandra/notifications/MemtableSwitchedNotification.java
+++ b/src/java/org/apache/cassandra/notifications/MemtableSwitchedNotification.java
@@ -17,7 +17,7 @@
  */
 package org.apache.cassandra.notifications;
 
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 
 public class MemtableSwitchedNotification implements INotification
 {
diff --git a/src/java/org/apache/cassandra/notifications/SSTableAddedNotification.java b/src/java/org/apache/cassandra/notifications/SSTableAddedNotification.java
index 9c95a18..857af69 100644
--- a/src/java/org/apache/cassandra/notifications/SSTableAddedNotification.java
+++ b/src/java/org/apache/cassandra/notifications/SSTableAddedNotification.java
@@ -21,7 +21,7 @@
 
 import javax.annotation.Nullable;
 
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 
 /**
diff --git a/src/java/org/apache/cassandra/repair/AbstractRepairTask.java b/src/java/org/apache/cassandra/repair/AbstractRepairTask.java
new file mode 100644
index 0000000..d2a6f1a
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/AbstractRepairTask.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Objects;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+
+public abstract class AbstractRepairTask implements RepairTask
+{
+    protected static final Logger logger = LoggerFactory.getLogger(AbstractRepairTask.class);
+
+    protected final RepairOption options;
+    protected final String keyspace;
+    protected final RepairNotifier notifier;
+
+    protected AbstractRepairTask(RepairOption options, String keyspace, RepairNotifier notifier)
+    {
+        this.options = Objects.requireNonNull(options);
+        this.keyspace = Objects.requireNonNull(keyspace);
+        this.notifier = Objects.requireNonNull(notifier);
+    }
+
+    private List<RepairSession> submitRepairSessions(TimeUUID parentSession,
+                                                     boolean isIncremental,
+                                                     ExecutorPlus executor,
+                                                     List<CommonRange> commonRanges,
+                                                     String... cfnames)
+    {
+        List<RepairSession> futures = new ArrayList<>(options.getRanges().size());
+
+        for (CommonRange commonRange : commonRanges)
+        {
+            logger.info("Starting RepairSession for {}", commonRange);
+            RepairSession session = ActiveRepairService.instance.submitRepairSession(parentSession,
+                                                                                     commonRange,
+                                                                                     keyspace,
+                                                                                     options.getParallelism(),
+                                                                                     isIncremental,
+                                                                                     options.isPullRepair(),
+                                                                                     options.getPreviewKind(),
+                                                                                     options.optimiseStreams(),
+                                                                                     options.repairPaxos(),
+                                                                                     options.paxosOnly(),
+                                                                                     executor,
+                                                                                     cfnames);
+            if (session == null)
+                continue;
+            session.addCallback(new RepairSessionCallback(session));
+            futures.add(session);
+        }
+        return futures;
+    }
+
+    protected Future<CoordinatedRepairResult> runRepair(TimeUUID parentSession,
+                                                        boolean isIncremental,
+                                                        ExecutorPlus executor,
+                                                        List<CommonRange> commonRanges,
+                                                        String... cfnames)
+    {
+        List<RepairSession> allSessions = submitRepairSessions(parentSession, isIncremental, executor, commonRanges, cfnames);
+        List<Collection<Range<Token>>> ranges = Lists.transform(allSessions, RepairSession::ranges);
+        Future<List<RepairSessionResult>> f = FutureCombiner.successfulOf(allSessions);
+        return f.map(results -> {
+            logger.debug("Repair result: {}", results);
+            return CoordinatedRepairResult.create(ranges, results);
+        });
+    }
+
+    private class RepairSessionCallback implements FutureCallback<RepairSessionResult>
+    {
+        private final RepairSession session;
+
+        public RepairSessionCallback(RepairSession session)
+        {
+            this.session = session;
+        }
+
+        public void onSuccess(RepairSessionResult result)
+        {
+            String message = String.format("Repair session %s for range %s finished", session.getId(),
+                                           session.ranges().toString());
+            notifier.notifyProgress(message);
+        }
+
+        public void onFailure(Throwable t)
+        {
+            String message = String.format("Repair session %s for range %s failed with error %s",
+                                           session.getId(), session.ranges().toString(), t.getMessage());
+            notifier.notifyError(new RuntimeException(message, t));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/AsymmetricRemoteSyncTask.java b/src/java/org/apache/cassandra/repair/AsymmetricRemoteSyncTask.java
index 9762c9f..c050f2c 100644
--- a/src/java/org/apache/cassandra/repair/AsymmetricRemoteSyncTask.java
+++ b/src/java/org/apache/cassandra/repair/AsymmetricRemoteSyncTask.java
@@ -57,11 +57,11 @@
     {
         if (success)
         {
-            set(stat.withSummaries(summaries));
+            trySuccess(stat.withSummaries(summaries));
         }
         else
         {
-            setException(new RepairException(desc, previewKind, String.format("Sync failed between %s and %s", nodePair.coordinator, nodePair.peer)));
+            tryFailure(RepairException.warn(desc, previewKind, String.format("Sync failed between %s and %s", nodePair.coordinator, nodePair.peer)));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/repair/CoordinatedRepairResult.java b/src/java/org/apache/cassandra/repair/CoordinatedRepairResult.java
new file mode 100644
index 0000000..9593acc
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/CoordinatedRepairResult.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+
+import javax.annotation.Nullable;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+
+public class CoordinatedRepairResult
+{
+    public final Collection<Range<Token>> successfulRanges;
+    public final Collection<Range<Token>> failedRanges;
+    public final Collection<Range<Token>> skippedRanges;
+    public final Optional<List<RepairSessionResult>> results;
+
+    public CoordinatedRepairResult(Collection<Range<Token>> successfulRanges,
+                                   Collection<Range<Token>> failedRanges,
+                                   Collection<Range<Token>> skippedRanges,
+                                   List<RepairSessionResult> results)
+    {
+        this.successfulRanges = successfulRanges != null ? ImmutableList.copyOf(successfulRanges) : Collections.emptyList();
+        this.failedRanges = failedRanges != null ? ImmutableList.copyOf(failedRanges) : Collections.emptyList();
+        this.skippedRanges = skippedRanges != null ? ImmutableList.copyOf(skippedRanges) : Collections.emptyList();
+        this.results = Optional.ofNullable(results);
+    }
+
+    public static CoordinatedRepairResult create(List<Collection<Range<Token>>> ranges, List<RepairSessionResult> results)
+    {
+        if (results == null || results.isEmpty())
+            // something went wrong; assume all sessions failed
+            return failed(ranges);
+
+        assert ranges.size() == results.size() : String.format("range size %d != results size %d;ranges: %s, results: %s", ranges.size(), results.size(), ranges, results);
+        Collection<Range<Token>> successfulRanges = new ArrayList<>();
+        Collection<Range<Token>> failedRanges = new ArrayList<>();
+        Collection<Range<Token>> skippedRanges = new ArrayList<>();
+        int index = 0;
+        for (RepairSessionResult sessionResult : results)
+        {
+            if (sessionResult != null)
+            {
+                // don't record successful repair if we had to skip ranges
+                Collection<Range<Token>> replicas = sessionResult.skippedReplicas ? skippedRanges : successfulRanges;
+                replicas.addAll(sessionResult.ranges);
+            }
+            else
+            {
+                // FutureCombiner.successfulOf doesn't keep track of the original, but maintains order, so
+                // can fetch the original session
+                failedRanges.addAll(Objects.requireNonNull(ranges.get(index)));
+            }
+            index++;
+        }
+        return new CoordinatedRepairResult(successfulRanges, failedRanges, skippedRanges, results);
+    }
+
+    private static CoordinatedRepairResult failed(@Nullable List<Collection<Range<Token>>> ranges)
+    {
+        Collection<Range<Token>> failedRanges = new ArrayList<>(ranges == null ? 0 : ranges.size());
+        if (ranges != null)
+            ranges.forEach(failedRanges::addAll);
+        return new CoordinatedRepairResult(null, failedRanges, null, null);
+    }
+
+    /**
+     * Utility method for tests to produce a success result; should only be used by tests as syntaxtic sugar as all
+     * results must be present else an error is thrown.
+     */
+    @VisibleForTesting
+    public static CoordinatedRepairResult success(List<RepairSessionResult> results)
+    {
+        assert results != null && results.stream().allMatch(a -> a != null) : String.format("results was null or had a null (failed) result: %s", results);
+        List<Collection<Range<Token>>> ranges = Lists.transform(results, a -> a.ranges);
+        return create(ranges, results);
+    }
+
+    public boolean hasFailed()
+    {
+        return !failedRanges.isEmpty();
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/IncrementalRepairTask.java b/src/java/org/apache/cassandra/repair/IncrementalRepairTask.java
new file mode 100644
index 0000000..af1a234
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/IncrementalRepairTask.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.consistent.CoordinatorSession;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
+
+public class IncrementalRepairTask extends AbstractRepairTask
+{
+    private final TimeUUID parentSession;
+    private final RepairRunnable.NeighborsAndRanges neighborsAndRanges;
+    private final String[] cfnames;
+
+    protected IncrementalRepairTask(RepairOption options,
+                                    String keyspace,
+                                    RepairNotifier notifier,
+                                    TimeUUID parentSession,
+                                    RepairRunnable.NeighborsAndRanges neighborsAndRanges,
+                                    String[] cfnames)
+    {
+        super(options, keyspace, notifier);
+        this.parentSession = parentSession;
+        this.neighborsAndRanges = neighborsAndRanges;
+        this.cfnames = cfnames;
+    }
+
+    @Override
+    public String name()
+    {
+        return "Repair";
+    }
+
+    @Override
+    public Future<CoordinatedRepairResult> performUnsafe(ExecutorPlus executor) throws Exception
+    {
+        // the local node also needs to be included in the set of participants, since coordinator sessions aren't persisted
+        Set<InetAddressAndPort> allParticipants = ImmutableSet.<InetAddressAndPort>builder()
+                                                  .addAll(neighborsAndRanges.participants)
+                                                  .add(FBUtilities.getBroadcastAddressAndPort())
+                                                  .build();
+        // Not necessary to include self for filtering. The common ranges only contains neighbhor node endpoints.
+        List<CommonRange> allRanges = neighborsAndRanges.filterCommonRanges(keyspace, cfnames);
+
+        CoordinatorSession coordinatorSession = ActiveRepairService.instance.consistent.coordinated.registerSession(parentSession, allParticipants, neighborsAndRanges.shouldExcludeDeadParticipants);
+
+        return coordinatorSession.execute(() -> runRepair(parentSession, true, executor, allRanges, cfnames));
+
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/KeyspaceRepairManager.java b/src/java/org/apache/cassandra/repair/KeyspaceRepairManager.java
index 0739f10..9042ebe 100644
--- a/src/java/org/apache/cassandra/repair/KeyspaceRepairManager.java
+++ b/src/java/org/apache/cassandra/repair/KeyspaceRepairManager.java
@@ -19,14 +19,14 @@
 package org.apache.cassandra.repair;
 
 import java.util.Collection;
-import java.util.UUID;
+import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.function.BooleanSupplier;
 
-import com.google.common.util.concurrent.ListenableFuture;
-
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.locator.RangesAtEndpoint;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 
 /**
  * Keyspace level hook for repair.
@@ -38,9 +38,9 @@
      * been notified that the repair session has been completed, the data associated with the given session id must
      * not be combined with repaired or unrepaired data, or data from other repair sessions.
      */
-    ListenableFuture prepareIncrementalRepair(UUID sessionID,
-                                              Collection<ColumnFamilyStore> tables,
-                                              RangesAtEndpoint tokenRanges,
-                                              ExecutorService executor,
-                                              BooleanSupplier isCancelled);
+    Future<List<Void>> prepareIncrementalRepair(TimeUUID sessionID,
+                                                Collection<ColumnFamilyStore> tables,
+                                                RangesAtEndpoint tokenRanges,
+                                                ExecutorService executor,
+                                                BooleanSupplier isCancelled);
 }
diff --git a/src/java/org/apache/cassandra/repair/LocalSyncTask.java b/src/java/org/apache/cassandra/repair/LocalSyncTask.java
index 9931575..28a6acc 100644
--- a/src/java/org/apache/cassandra/repair/LocalSyncTask.java
+++ b/src/java/org/apache/cassandra/repair/LocalSyncTask.java
@@ -19,8 +19,6 @@
 
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -43,6 +41,9 @@
 import org.apache.cassandra.tracing.TraceState;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Promise;
 
 /**
  * LocalSyncTask performs streaming between local(coordinator) node and remote replica.
@@ -53,7 +54,7 @@
 
     private static final Logger logger = LoggerFactory.getLogger(LocalSyncTask.class);
 
-    private final UUID pendingRepair;
+    private final TimeUUID pendingRepair;
 
     @VisibleForTesting
     public final boolean requestRanges;
@@ -61,10 +62,10 @@
     public final boolean transferRanges;
 
     private final AtomicBoolean active = new AtomicBoolean(true);
-    private final CompletableFuture<StreamPlan> planFuture = new CompletableFuture<>();
+    private final Promise<StreamPlan> planPromise = new AsyncPromise<>();
 
     public LocalSyncTask(RepairJobDesc desc, InetAddressAndPort local, InetAddressAndPort remote,
-                         List<Range<Token>> diff, UUID pendingRepair,
+                         List<Range<Token>> diff, TimeUUID pendingRepair,
                          boolean requestRanges, boolean transferRanges, PreviewKind previewKind)
     {
         super(desc, local, remote, diff, previewKind);
@@ -119,7 +120,7 @@
 
             StreamPlan plan = createStreamPlan();
             plan.execute();
-            planFuture.complete(plan);
+            planPromise.setSuccess(plan);
         }
     }
 
@@ -165,7 +166,7 @@
                                            status, desc.sessionId, nodePair.coordinator, nodePair.peer, desc.columnFamily);
             logger.info("{} {}", previewKind.logPrefix(desc.sessionId), message);
             Tracing.traceRepair(message);
-            set(result.hasAbortedSession() ? stat : stat.withSummaries(result.createSummaries()));
+            trySuccess(result.hasAbortedSession() ? stat : stat.withSummaries(result.createSummaries()));
             finished();
         }
     }
@@ -175,7 +176,7 @@
     {
         if (active.compareAndSet(true, false))
         {
-            setException(t);
+            tryFailure(t);
             finished();
         }
     }
@@ -194,7 +195,7 @@
     @Override
     public void abort()
     {
-        planFuture.whenComplete((plan, cause) ->
+        planPromise.addCallback((plan, cause) ->
         {
             assert plan != null : "StreamPlan future should never be completed exceptionally";
             plan.getCoordinator().getAllStreamSessions().forEach(StreamSession::abort);
diff --git a/src/java/org/apache/cassandra/repair/NoSuchRepairSessionException.java b/src/java/org/apache/cassandra/repair/NoSuchRepairSessionException.java
new file mode 100644
index 0000000..8a86f1a
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/NoSuchRepairSessionException.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.repair;
+
+import org.apache.cassandra.utils.TimeUUID;
+
+public class NoSuchRepairSessionException extends Exception
+{
+    public NoSuchRepairSessionException(TimeUUID parentSessionId)
+    {
+        super(String.format("Parent repair session with id = %s does not exist.", parentSessionId));
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/NormalRepairTask.java b/src/java/org/apache/cassandra/repair/NormalRepairTask.java
new file mode 100644
index 0000000..56a03f8
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/NormalRepairTask.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+import java.util.List;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
+
+public class NormalRepairTask extends AbstractRepairTask
+{
+    private final TimeUUID parentSession;
+    private final List<CommonRange> commonRanges;
+    private final String[] cfnames;
+
+    protected NormalRepairTask(RepairOption options,
+                               String keyspace,
+                               RepairNotifier notifier,
+                               TimeUUID parentSession,
+                               List<CommonRange> commonRanges,
+                               String[] cfnames)
+    {
+        super(options, keyspace, notifier);
+        this.parentSession = parentSession;
+        this.commonRanges = commonRanges;
+        this.cfnames = cfnames;
+    }
+
+    @Override
+    public String name()
+    {
+        return "Repair";
+    }
+
+    @Override
+    public Future<CoordinatedRepairResult> performUnsafe(ExecutorPlus executor)
+    {
+        return runRepair(parentSession, false, executor, commonRanges, cfnames);
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/PreviewRepairTask.java b/src/java/org/apache/cassandra/repair/PreviewRepairTask.java
new file mode 100644
index 0000000..7ce7d1f
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/PreviewRepairTask.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.metrics.RepairMetrics;
+import org.apache.cassandra.repair.consistent.SyncStatSummary;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.DiagnosticSnapshotService;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
+
+public class PreviewRepairTask extends AbstractRepairTask
+{
+    private final TimeUUID parentSession;
+    private final List<CommonRange> commonRanges;
+    private final String[] cfnames;
+    private volatile String successMessage = name() + " completed successfully";
+
+    protected PreviewRepairTask(RepairOption options, String keyspace, RepairNotifier notifier, TimeUUID parentSession, List<CommonRange> commonRanges, String[] cfnames)
+    {
+        super(options, keyspace, notifier);
+        this.parentSession = parentSession;
+        this.commonRanges = commonRanges;
+        this.cfnames = cfnames;
+    }
+
+    @Override
+    public String name()
+    {
+        return "Repair preview";
+    }
+
+    @Override
+    public String successMessage()
+    {
+        return successMessage;
+    }
+
+    @Override
+    public Future<CoordinatedRepairResult> performUnsafe(ExecutorPlus executor)
+    {
+        Future<CoordinatedRepairResult> f = runRepair(parentSession, false, executor, commonRanges, cfnames);
+        return f.map(result -> {
+            if (result.hasFailed())
+                return result;
+
+            PreviewKind previewKind = options.getPreviewKind();
+            Preconditions.checkState(previewKind != PreviewKind.NONE, "Preview is NONE");
+            SyncStatSummary summary = new SyncStatSummary(true);
+            summary.consumeSessionResults(result.results);
+
+            final String message;
+            if (summary.isEmpty())
+            {
+                message = previewKind == PreviewKind.REPAIRED ? "Repaired data is in sync" : "Previewed data was in sync";
+            }
+            else
+            {
+                message = (previewKind == PreviewKind.REPAIRED ? "Repaired data is inconsistent\n" : "Preview complete\n") + summary;
+                RepairMetrics.previewFailures.inc();
+                if (previewKind == PreviewKind.REPAIRED)
+                    maybeSnapshotReplicas(parentSession, keyspace, result.results.get()); // we know its present as summary used it
+            }
+            successMessage += "; " + message;
+            notifier.notification(message);
+
+            return result;
+        });
+    }
+
+    private void maybeSnapshotReplicas(TimeUUID parentSession, String keyspace, List<RepairSessionResult> results)
+    {
+        if (!DatabaseDescriptor.snapshotOnRepairedDataMismatch())
+            return;
+
+        try
+        {
+            Set<String> mismatchingTables = new HashSet<>();
+            Set<InetAddressAndPort> nodes = new HashSet<>();
+            for (RepairSessionResult sessionResult : results)
+            {
+                for (RepairResult repairResult : emptyIfNull(sessionResult.repairJobResults))
+                {
+                    for (SyncStat stat : emptyIfNull(repairResult.stats))
+                    {
+                        if (stat.numberOfDifferences > 0)
+                            mismatchingTables.add(repairResult.desc.columnFamily);
+                        // snapshot all replicas, even if they don't have any differences
+                        nodes.add(stat.nodes.coordinator);
+                        nodes.add(stat.nodes.peer);
+                    }
+                }
+            }
+
+            String snapshotName = DiagnosticSnapshotService.getSnapshotName(DiagnosticSnapshotService.REPAIRED_DATA_MISMATCH_SNAPSHOT_PREFIX);
+            for (String table : mismatchingTables)
+            {
+                // we can just check snapshot existence locally since the repair coordinator is always a replica (unlike in the read case)
+                if (!Keyspace.open(keyspace).getColumnFamilyStore(table).snapshotExists(snapshotName))
+                {
+                    logger.info("{} Snapshotting {}.{} for preview repair mismatch with tag {} on instances {}",
+                                options.getPreviewKind().logPrefix(parentSession),
+                                keyspace, table, snapshotName, nodes);
+                    DiagnosticSnapshotService.repairedDataMismatch(Keyspace.open(keyspace).getColumnFamilyStore(table).metadata(), nodes);
+                }
+                else
+                {
+                    logger.info("{} Not snapshotting {}.{} - snapshot {} exists",
+                                options.getPreviewKind().logPrefix(parentSession),
+                                keyspace, table, snapshotName);
+                }
+            }
+        }
+        catch (Exception e)
+        {
+            logger.error("{} Failed snapshotting replicas", options.getPreviewKind().logPrefix(parentSession), e);
+        }
+    }
+
+    private static <T> Iterable<T> emptyIfNull(Iterable<T> iter)
+    {
+        if (iter == null)
+            return Collections.emptyList();
+        return iter;
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/RepairJob.java b/src/java/org/apache/cassandra/repair/RepairJob.java
index a9ac6af..aba8bd8 100644
--- a/src/java/org/apache/cassandra/repair/RepairJob.java
+++ b/src/java/org/apache/cassandra/repair/RepairJob.java
@@ -23,10 +23,16 @@
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.*;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.repair.state.JobState;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -38,41 +44,55 @@
 import org.apache.cassandra.repair.asymmetric.PreferedNodeFilter;
 import org.apache.cassandra.repair.asymmetric.ReduceHelper;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanup;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTrees;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
+import static org.apache.cassandra.config.DatabaseDescriptor.paxosRepairEnabled;
+import static org.apache.cassandra.service.paxos.Paxos.useV2;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * RepairJob runs repair on given ColumnFamily.
  */
-public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
+public class RepairJob extends AsyncFuture<RepairResult> implements Runnable
 {
     private static final Logger logger = LoggerFactory.getLogger(RepairJob.class);
 
-    private final RepairSession session;
+    public final JobState state;
     private final RepairJobDesc desc;
+    private final RepairSession session;
     private final RepairParallelism parallelismDegree;
-    private final ListeningExecutorService taskExecutor;
-    
-    private final List<SyncTask> syncTasks = new CopyOnWriteArrayList<>();
+    private final ExecutorPlus taskExecutor;
+
+    @VisibleForTesting
+    final List<ValidationTask> validationTasks = new CopyOnWriteArrayList<>();
+
+    @VisibleForTesting
+    final List<SyncTask> syncTasks = new CopyOnWriteArrayList<>();
 
     /**
      * Create repair job to run on specific columnfamily
-     *
-     * @param session RepairSession that this RepairJob belongs
+     *  @param session RepairSession that this RepairJob belongs
      * @param columnFamily name of the ColumnFamily to repair
      */
     public RepairJob(RepairSession session, String columnFamily)
     {
         this.session = session;
-        this.desc = new RepairJobDesc(session.parentRepairSession, session.getId(), session.keyspace, columnFamily, session.commonRange.ranges);
         this.taskExecutor = session.taskExecutor;
         this.parallelismDegree = session.parallelismDegree;
+        this.desc = new RepairJobDesc(session.state.parentRepairSession, session.getId(), session.state.keyspace, columnFamily, session.state.commonRange.ranges);
+        this.state = new JobState(desc, session.state.commonRange.endpoints);
     }
 
     public int getNowInSeconds()
@@ -94,74 +114,114 @@
      * This sets up necessary task and runs them on given {@code taskExecutor}.
      * After submitting all tasks, waits until validation with replica completes.
      */
-    @SuppressWarnings("UnstableApiUsage")
     public void run()
     {
+        state.phase.start();
         Keyspace ks = Keyspace.open(desc.keyspace);
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(desc.columnFamily);
         cfs.metric.repairsStarted.inc();
-        List<InetAddressAndPort> allEndpoints = new ArrayList<>(session.commonRange.endpoints);
+        List<InetAddressAndPort> allEndpoints = new ArrayList<>(session.state.commonRange.endpoints);
         allEndpoints.add(FBUtilities.getBroadcastAddressAndPort());
 
-        ListenableFuture<List<TreeResponse>> validations;
+        Future<List<TreeResponse>> treeResponses;
+        Future<Void> paxosRepair;
+        if (paxosRepairEnabled() && ((useV2() && session.repairPaxos) || session.paxosOnly))
+        {
+            logger.info("{} {}.{} starting paxos repair", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
+            TableMetadata metadata = Schema.instance.getTableMetadata(desc.keyspace, desc.columnFamily);
+            paxosRepair = PaxosCleanup.cleanup(allEndpoints, metadata, desc.ranges, session.state.commonRange.hasSkippedReplicas, taskExecutor);
+        }
+        else
+        {
+            logger.info("{} {}.{} not running paxos repair", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
+            paxosRepair = ImmediateFuture.success(null);
+        }
+
+        if (session.paxosOnly)
+        {
+            paxosRepair.addCallback(new FutureCallback<Void>()
+            {
+                public void onSuccess(Void v)
+                {
+                    logger.info("{} {}.{} paxos repair completed", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
+                    trySuccess(new RepairResult(desc, Collections.emptyList()));
+                }
+
+                /**
+                 * Snapshot, validation and sync failures are all handled here
+                 */
+                public void onFailure(Throwable t)
+                {
+                    logger.warn("{} {}.{} paxos repair failed", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
+                    tryFailure(t);
+                }
+            }, taskExecutor);
+            return;
+        }
+
         // Create a snapshot at all nodes unless we're using pure parallel repairs
         if (parallelismDegree != RepairParallelism.PARALLEL)
         {
-            ListenableFuture<List<InetAddressAndPort>> allSnapshotTasks;
+            Future<?> allSnapshotTasks;
             if (session.isIncremental)
             {
                 // consistent repair does it's own "snapshotting"
-                allSnapshotTasks = Futures.immediateFuture(allEndpoints);
+                allSnapshotTasks = paxosRepair.map(input -> allEndpoints);
             }
             else
             {
                 // Request snapshot to all replica
-                List<ListenableFuture<InetAddressAndPort>> snapshotTasks = new ArrayList<>(allEndpoints.size());
-                for (InetAddressAndPort endpoint : allEndpoints)
-                {
-                    SnapshotTask snapshotTask = new SnapshotTask(desc, endpoint);
-                    snapshotTasks.add(snapshotTask);
-                    taskExecutor.execute(snapshotTask);
-                }
-                allSnapshotTasks = Futures.allAsList(snapshotTasks);
+                allSnapshotTasks = paxosRepair.flatMap(input -> {
+                    List<Future<InetAddressAndPort>> snapshotTasks = new ArrayList<>(allEndpoints.size());
+                    state.phase.snapshotsSubmitted();
+                    for (InetAddressAndPort endpoint : allEndpoints)
+                    {
+                        SnapshotTask snapshotTask = new SnapshotTask(desc, endpoint);
+                        snapshotTasks.add(snapshotTask);
+                        taskExecutor.execute(snapshotTask);
+                    }
+                    return FutureCombiner.allOf(snapshotTasks).map(a -> {
+                        state.phase.snapshotsCompleted();
+                        return a;
+                    });
+                });
             }
 
             // When all snapshot complete, send validation requests
-            validations = Futures.transformAsync(allSnapshotTasks, new AsyncFunction<List<InetAddressAndPort>, List<TreeResponse>>()
-            {
-                public ListenableFuture<List<TreeResponse>> apply(List<InetAddressAndPort> endpoints)
-                {
-                    if (parallelismDegree == RepairParallelism.SEQUENTIAL)
-                        return sendSequentialValidationRequest(endpoints);
-                    else
-                        return sendDCAwareValidationRequest(endpoints);
-                }
-            }, taskExecutor);
+            treeResponses = allSnapshotTasks.flatMap(endpoints -> {
+                if (parallelismDegree == RepairParallelism.SEQUENTIAL)
+                    return sendSequentialValidationRequest(allEndpoints);
+                else
+                    return sendDCAwareValidationRequest(allEndpoints);
+                }, taskExecutor);
         }
         else
         {
             // If not sequential, just send validation request to all replica
-            validations = sendValidationRequest(allEndpoints);
+            treeResponses = paxosRepair.flatMap(input -> sendValidationRequest(allEndpoints));
         }
+        treeResponses = treeResponses.map(a -> {
+            state.phase.validationCompleted();
+            return a;
+        });
 
         // When all validations complete, submit sync tasks
-        ListenableFuture<List<SyncStat>> syncResults = Futures.transformAsync(validations,
-                                                                              session.optimiseStreams && !session.pullRepair ? this::optimisedSyncing : this::standardSyncing,
-                                                                              taskExecutor);
+        Future<List<SyncStat>> syncResults = treeResponses.flatMap(session.optimiseStreams && !session.pullRepair ? this::optimisedSyncing : this::standardSyncing, taskExecutor);
 
         // When all sync complete, set the final result
-        Futures.addCallback(syncResults, new FutureCallback<List<SyncStat>>()
+        syncResults.addCallback(new FutureCallback<List<SyncStat>>()
         {
             @Override
             public void onSuccess(List<SyncStat> stats)
             {
+                state.phase.success();
                 if (!session.previewKind.isPreview())
                 {
                     logger.info("{} {}.{} is fully synced", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
                     SystemDistributedKeyspace.successfulRepairJob(session.getId(), desc.keyspace, desc.columnFamily);
                 }
                 cfs.metric.repairsCompleted.inc();
-                set(new RepairResult(desc, stats));
+                trySuccess(new RepairResult(desc, stats));
             }
 
             /**
@@ -170,6 +230,9 @@
             @Override
             public void onFailure(Throwable t)
             {
+                state.phase.fail(t);
+                // Make sure all validation tasks have cleaned up the off-heap Merkle trees they might contain.
+                validationTasks.forEach(ValidationTask::abort);
                 syncTasks.forEach(SyncTask::abort);
 
                 if (!session.previewKind.isPreview())
@@ -178,18 +241,21 @@
                     SystemDistributedKeyspace.failedRepairJob(session.getId(), desc.keyspace, desc.columnFamily, t);
                 }
                 cfs.metric.repairsCompleted.inc();
-                setException(t);
+                tryFailure(t instanceof NoSuchRepairSessionExceptionWrapper
+                           ? ((NoSuchRepairSessionExceptionWrapper) t).wrapped
+                           : t);
             }
         }, taskExecutor);
     }
 
     private boolean isTransient(InetAddressAndPort ep)
     {
-        return session.commonRange.transEndpoints.contains(ep);
+        return session.state.commonRange.transEndpoints.contains(ep);
     }
 
-    private ListenableFuture<List<SyncStat>> standardSyncing(List<TreeResponse> trees)
+    private Future<List<SyncStat>> standardSyncing(List<TreeResponse> trees)
     {
+        state.phase.streamSubmitted();
         List<SyncTask> syncTasks = createStandardSyncTasks(desc,
                                                            trees,
                                                            FBUtilities.getLocalAddressAndPort(),
@@ -208,7 +274,7 @@
                                                   boolean pullRepair,
                                                   PreviewKind previewKind)
     {
-        long startedAt = System.currentTimeMillis();
+        long startedAt = currentTimeMillis();
         List<SyncTask> syncTasks = new ArrayList<>();
         // We need to difference all trees one against another
         for (int i = 0; i < trees.size() - 1; ++i)
@@ -263,12 +329,13 @@
         }
         trees.get(trees.size() - 1).trees.release();
         logger.info("Created {} sync tasks based on {} merkle tree responses for {} (took: {}ms)",
-                    syncTasks.size(), trees.size(), desc.parentSessionId, System.currentTimeMillis() - startedAt);
+                    syncTasks.size(), trees.size(), desc.parentSessionId, currentTimeMillis() - startedAt);
         return syncTasks;
     }
 
-    private ListenableFuture<List<SyncStat>> optimisedSyncing(List<TreeResponse> trees)
+    private Future<List<SyncStat>> optimisedSyncing(List<TreeResponse> trees)
     {
+        state.phase.streamSubmitted();
         List<SyncTask> syncTasks = createOptimisedSyncingSyncTasks(desc,
                                                                    trees,
                                                                    FBUtilities.getLocalAddressAndPort(),
@@ -280,22 +347,42 @@
         return executeTasks(syncTasks);
     }
 
-    @SuppressWarnings("UnstableApiUsage")
     @VisibleForTesting
-    ListenableFuture<List<SyncStat>> executeTasks(List<SyncTask> tasks)
+    Future<List<SyncStat>> executeTasks(List<SyncTask> tasks)
     {
-        // this throws if the parent session has failed
-        ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId);
-        syncTasks.addAll(tasks);
-
-        for (SyncTask task : tasks)
+        try
         {
-            if (!task.isLocal())
-                session.trackSyncCompletion(Pair.create(desc, task.nodePair()), (CompletableRemoteSyncTask) task);
-            taskExecutor.submit(task);
-        }
+            ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId);
+            syncTasks.addAll(tasks);
 
-        return Futures.allAsList(tasks);
+            for (SyncTask task : tasks)
+            {
+                if (!task.isLocal())
+                    session.trackSyncCompletion(Pair.create(desc, task.nodePair()), (CompletableRemoteSyncTask) task);
+                taskExecutor.execute(task);
+            }
+
+            return FutureCombiner.allOf(tasks);
+        }
+        catch (NoSuchRepairSessionException e)
+        {
+            return ImmediateFuture.failure(new NoSuchRepairSessionExceptionWrapper(e));
+        }
+    }
+
+    // provided so we can throw NoSuchRepairSessionException from executeTasks without
+    // having to make it unchecked. Required as this is called as from standardSyncing/
+    // optimisedSyncing passed as a Function to transform merkle tree responses and so
+    // can't throw checked exceptions. These are unwrapped in the onFailure callback of
+    // that transformation so as to not pollute the checked usage of
+    // NoSuchRepairSessionException in the rest of the codebase.
+    private static class NoSuchRepairSessionExceptionWrapper extends RuntimeException
+    {
+        private final NoSuchRepairSessionException wrapped;
+        private NoSuchRepairSessionExceptionWrapper(NoSuchRepairSessionException wrapped)
+        {
+            this.wrapped = wrapped;
+        }
     }
 
     static List<SyncTask> createOptimisedSyncingSyncTasks(RepairJobDesc desc,
@@ -306,7 +393,7 @@
                                                           boolean isIncremental,
                                                           PreviewKind previewKind)
     {
-        long startedAt = System.currentTimeMillis();
+        long startedAt = currentTimeMillis();
         List<SyncTask> syncTasks = new ArrayList<>();
         // We need to difference all trees one against another
         DifferenceHolder diffHolder = new DifferenceHolder(trees);
@@ -357,7 +444,7 @@
             }
         }
         logger.info("Created {} optimised sync tasks based on {} merkle tree responses for {} (took: {}ms)",
-                    syncTasks.size(), trees.size(), desc.parentSessionId, System.currentTimeMillis() - startedAt);
+                    syncTasks.size(), trees.size(), desc.parentSessionId, currentTimeMillis() - startedAt);
         logger.trace("Optimised sync tasks for {}: {}", desc.parentSessionId, syncTasks);
         return syncTasks;
     }
@@ -373,38 +460,39 @@
      * @param endpoints Endpoint addresses to send validation request
      * @return Future that can get all {@link TreeResponse} from replica, if all validation succeed.
      */
-    @SuppressWarnings("UnstableApiUsage")
-    private ListenableFuture<List<TreeResponse>> sendValidationRequest(Collection<InetAddressAndPort> endpoints)
+    private Future<List<TreeResponse>> sendValidationRequest(Collection<InetAddressAndPort> endpoints)
     {
+        state.phase.validationSubmitted();
         String message = String.format("Requesting merkle trees for %s (to %s)", desc.columnFamily, endpoints);
         logger.info("{} {}", session.previewKind.logPrefix(desc.sessionId), message);
         Tracing.traceRepair(message);
         int nowInSec = getNowInSeconds();
-        List<ListenableFuture<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
+        List<Future<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
         for (InetAddressAndPort endpoint : endpoints)
         {
-            ValidationTask task = new ValidationTask(desc, endpoint, nowInSec, session.previewKind);
+            ValidationTask task = newValidationTask(endpoint, nowInSec);
             tasks.add(task);
             session.trackValidationCompletion(Pair.create(desc, endpoint), task);
             taskExecutor.execute(task);
         }
-        return Futures.allAsList(tasks);
+        return FutureCombiner.allOf(tasks);
     }
 
     /**
      * Creates {@link ValidationTask} and submit them to task executor so that tasks run sequentially.
      */
-    private ListenableFuture<List<TreeResponse>> sendSequentialValidationRequest(Collection<InetAddressAndPort> endpoints)
+    private Future<List<TreeResponse>> sendSequentialValidationRequest(Collection<InetAddressAndPort> endpoints)
     {
+        state.phase.validationSubmitted();
         String message = String.format("Requesting merkle trees for %s (to %s)", desc.columnFamily, endpoints);
         logger.info("{} {}", session.previewKind.logPrefix(desc.sessionId), message);
         Tracing.traceRepair(message);
         int nowInSec = getNowInSeconds();
-        List<ListenableFuture<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
+        List<Future<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
 
         Queue<InetAddressAndPort> requests = new LinkedList<>(endpoints);
         InetAddressAndPort address = requests.poll();
-        ValidationTask firstTask = new ValidationTask(desc, address, nowInSec, session.previewKind);
+        ValidationTask firstTask = newValidationTask(address, nowInSec);
         logger.info("{} Validating {}", session.previewKind.logPrefix(desc.sessionId), address);
         session.trackValidationCompletion(Pair.create(desc, address), firstTask);
         tasks.add(firstTask);
@@ -412,9 +500,9 @@
         while (requests.size() > 0)
         {
             final InetAddressAndPort nextAddress = requests.poll();
-            final ValidationTask nextTask = new ValidationTask(desc, nextAddress, nowInSec, session.previewKind);
+            final ValidationTask nextTask = newValidationTask(nextAddress, nowInSec);
             tasks.add(nextTask);
-            Futures.addCallback(currentTask, new FutureCallback<TreeResponse>()
+            currentTask.addCallback(new FutureCallback<TreeResponse>()
             {
                 public void onSuccess(TreeResponse result)
                 {
@@ -425,24 +513,25 @@
 
                 // failure is handled at root of job chain
                 public void onFailure(Throwable t) {}
-            }, MoreExecutors.directExecutor());
+            });
             currentTask = nextTask;
         }
         // start running tasks
         taskExecutor.execute(firstTask);
-        return Futures.allAsList(tasks);
+        return FutureCombiner.allOf(tasks);
     }
 
     /**
      * Creates {@link ValidationTask} and submit them to task executor so that tasks run sequentially within each dc.
      */
-    private ListenableFuture<List<TreeResponse>> sendDCAwareValidationRequest(Collection<InetAddressAndPort> endpoints)
+    private Future<List<TreeResponse>> sendDCAwareValidationRequest(Collection<InetAddressAndPort> endpoints)
     {
+        state.phase.validationSubmitted();
         String message = String.format("Requesting merkle trees for %s (to %s)", desc.columnFamily, endpoints);
         logger.info("{} {}", session.previewKind.logPrefix(desc.sessionId), message);
         Tracing.traceRepair(message);
         int nowInSec = getNowInSeconds();
-        List<ListenableFuture<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
+        List<Future<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
 
         Map<String, Queue<InetAddressAndPort>> requestsByDatacenter = new HashMap<>();
         for (InetAddressAndPort endpoint : endpoints)
@@ -461,7 +550,7 @@
         {
             Queue<InetAddressAndPort> requests = entry.getValue();
             InetAddressAndPort address = requests.poll();
-            ValidationTask firstTask = new ValidationTask(desc, address, nowInSec, session.previewKind);
+            ValidationTask firstTask = newValidationTask(address, nowInSec);
             logger.info("{} Validating {}", session.previewKind.logPrefix(session.getId()), address);
             session.trackValidationCompletion(Pair.create(desc, address), firstTask);
             tasks.add(firstTask);
@@ -469,9 +558,9 @@
             while (requests.size() > 0)
             {
                 final InetAddressAndPort nextAddress = requests.poll();
-                final ValidationTask nextTask = new ValidationTask(desc, nextAddress, nowInSec, session.previewKind);
+                final ValidationTask nextTask = newValidationTask(nextAddress, nowInSec);
                 tasks.add(nextTask);
-                Futures.addCallback(currentTask, new FutureCallback<TreeResponse>()
+                currentTask.addCallback(new FutureCallback<TreeResponse>()
                 {
                     public void onSuccess(TreeResponse result)
                     {
@@ -482,12 +571,19 @@
 
                     // failure is handled at root of job chain
                     public void onFailure(Throwable t) {}
-                }, MoreExecutors.directExecutor());
+                });
                 currentTask = nextTask;
             }
             // start running tasks
             taskExecutor.execute(firstTask);
         }
-        return Futures.allAsList(tasks);
+        return FutureCombiner.allOf(tasks);
     }
-}
\ No newline at end of file
+
+    private ValidationTask newValidationTask(InetAddressAndPort endpoint, int nowInSec)
+    {
+        ValidationTask task = new ValidationTask(desc, endpoint, nowInSec, session.previewKind);
+        validationTasks.add(task);
+        return task;
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/RepairJobDesc.java b/src/java/org/apache/cassandra/repair/RepairJobDesc.java
index 4aaf655..c6b1898 100644
--- a/src/java/org/apache/cassandra/repair/RepairJobDesc.java
+++ b/src/java/org/apache/cassandra/repair/RepairJobDesc.java
@@ -18,12 +18,15 @@
 package org.apache.cassandra.repair;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.UUID;
 
 import com.google.common.base.Objects;
 
+import org.apache.commons.lang3.ArrayUtils;
+
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.dht.IPartitioner;
@@ -32,9 +35,11 @@
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.streaming.PreviewKind;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.ByteBufferUtil.getArray;
 
 /**
  * RepairJobDesc is used from various repair processes to distinguish one RepairJob to another.
@@ -45,15 +50,15 @@
 {
     public static final IVersionedSerializer<RepairJobDesc> serializer = new RepairJobDescSerializer();
 
-    public final UUID parentSessionId;
+    public final TimeUUID parentSessionId;
     /** RepairSession id */
-    public final UUID sessionId;
+    public final TimeUUID sessionId;
     public final String keyspace;
     public final String columnFamily;
     /** repairing range  */
     public final Collection<Range<Token>> ranges;
 
-    public RepairJobDesc(UUID parentSessionId, UUID sessionId, String keyspace, String columnFamily, Collection<Range<Token>> ranges)
+    public RepairJobDesc(TimeUUID parentSessionId, TimeUUID sessionId, String keyspace, String columnFamily, Collection<Range<Token>> ranges)
     {
         this.parentSessionId = parentSessionId;
         this.sessionId = sessionId;
@@ -62,6 +67,16 @@
         this.ranges = ranges;
     }
 
+    public UUID determanisticId()
+    {
+        byte[] bytes = getArray(bytes(parentSessionId));
+        bytes = ArrayUtils.addAll(bytes, getArray(bytes(sessionId)));
+        bytes = ArrayUtils.addAll(bytes, keyspace.getBytes(StandardCharsets.UTF_8));
+        bytes = ArrayUtils.addAll(bytes, columnFamily.getBytes(StandardCharsets.UTF_8));
+        bytes = ArrayUtils.addAll(bytes, ranges.toString().getBytes(StandardCharsets.UTF_8));
+        return UUID.nameUUIDFromBytes(bytes);
+    }
+
     @Override
     public String toString()
     {
@@ -102,9 +117,9 @@
         {
             out.writeBoolean(desc.parentSessionId != null);
             if (desc.parentSessionId != null)
-                UUIDSerializer.serializer.serialize(desc.parentSessionId, out, version);
+                desc.parentSessionId.serialize(out);
 
-            UUIDSerializer.serializer.serialize(desc.sessionId, out, version);
+            desc.sessionId.serialize(out);
             out.writeUTF(desc.keyspace);
             out.writeUTF(desc.columnFamily);
             IPartitioner.validate(desc.ranges);
@@ -115,10 +130,10 @@
 
         public RepairJobDesc deserialize(DataInputPlus in, int version) throws IOException
         {
-            UUID parentSessionId = null;
+            TimeUUID parentSessionId = null;
             if (in.readBoolean())
-                parentSessionId = UUIDSerializer.serializer.deserialize(in, version);
-            UUID sessionId = UUIDSerializer.serializer.deserialize(in, version);
+                parentSessionId = TimeUUID.deserialize(in);
+            TimeUUID sessionId = TimeUUID.deserialize(in);
             String keyspace = in.readUTF();
             String columnFamily = in.readUTF();
 
@@ -140,8 +155,8 @@
         {
             int size = TypeSizes.sizeof(desc.parentSessionId != null);
             if (desc.parentSessionId != null)
-                size += UUIDSerializer.serializer.serializedSize(desc.parentSessionId, version);
-            size += UUIDSerializer.serializer.serializedSize(desc.sessionId, version);
+                size += TimeUUID.sizeInBytes();
+            size += TimeUUID.sizeInBytes();
             size += TypeSizes.sizeof(desc.keyspace);
             size += TypeSizes.sizeof(desc.columnFamily);
             size += TypeSizes.sizeof(desc.ranges.size());
diff --git a/src/java/org/apache/cassandra/repair/RepairMessageVerbHandler.java b/src/java/org/apache/cassandra/repair/RepairMessageVerbHandler.java
index 7488f2e..58612f7 100644
--- a/src/java/org/apache/cassandra/repair/RepairMessageVerbHandler.java
+++ b/src/java/org/apache/cassandra/repair/RepairMessageVerbHandler.java
@@ -28,9 +28,12 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.repair.messages.*;
+import org.apache.cassandra.repair.state.ParticipateState;
+import org.apache.cassandra.repair.state.ValidationState;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.net.Verb.VALIDATION_RSP;
 
@@ -45,12 +48,12 @@
 
     private static final Logger logger = LoggerFactory.getLogger(RepairMessageVerbHandler.class);
 
-    private boolean isIncremental(UUID sessionID)
+    private boolean isIncremental(TimeUUID sessionID)
     {
         return ActiveRepairService.instance.consistent.local.isSessionInProgress(sessionID);
     }
 
-    private PreviewKind previewKind(UUID sessionID)
+    private PreviewKind previewKind(TimeUUID sessionID) throws NoSuchRepairSessionException
     {
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
         return prs != null ? prs.previewKind : PreviewKind.NONE;
@@ -65,12 +68,19 @@
             switch (message.verb())
             {
                 case PREPARE_MSG:
+                {
                     PrepareMessage prepareMessage = (PrepareMessage) message.payload;
                     logger.debug("Preparing, {}", prepareMessage);
-
+                    ParticipateState state = new ParticipateState(message.from(), prepareMessage);
+                    if (!ActiveRepairService.instance.register(state))
+                    {
+                        logger.debug("Duplicate prepare message found for {}", state.id);
+                        return;
+                    }
                     if (!ActiveRepairService.verifyCompactionsPendingThreshold(prepareMessage.parentRepairSession, prepareMessage.previewKind))
                     {
                         // error is logged in verifyCompactionsPendingThreshold
+                        state.phase.fail("Too many pending compactions");
                         sendFailureResponse(message);
                         return;
                     }
@@ -81,8 +91,10 @@
                         ColumnFamilyStore columnFamilyStore = ColumnFamilyStore.getIfExists(tableId);
                         if (columnFamilyStore == null)
                         {
-                            logErrorAndSendFailureResponse(String.format("Table with id %s was dropped during prepare phase of repair",
-                                                                         tableId), message);
+                            String reason = String.format("Table with id %s was dropped during prepare phase of repair",
+                                                          tableId);
+                            state.phase.fail(reason);
+                            logErrorAndSendFailureResponse(reason, message);
                             return;
                         }
                         columnFamilyStores.add(columnFamilyStore);
@@ -92,19 +104,29 @@
                                                                              columnFamilyStores,
                                                                              prepareMessage.ranges,
                                                                              prepareMessage.isIncremental,
-                                                                             prepareMessage.timestamp,
+                                                                             prepareMessage.repairedAt,
                                                                              prepareMessage.isGlobal,
                                                                              prepareMessage.previewKind);
                     MessagingService.instance().send(message.emptyResponse(), message.from());
+                }
                     break;
 
                 case SNAPSHOT_MSG:
+                {
                     logger.debug("Snapshotting {}", desc);
+                    ParticipateState state = ActiveRepairService.instance.participate(desc.parentSessionId);
+                    if (state == null)
+                    {
+                        logErrorAndSendFailureResponse("Unknown repair " + desc.parentSessionId, message);
+                        return;
+                    }
                     final ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(desc.keyspace, desc.columnFamily);
                     if (cfs == null)
                     {
-                        logErrorAndSendFailureResponse(String.format("Table %s.%s was dropped during snapshot phase of repair %s",
-                                                                     desc.keyspace, desc.columnFamily, desc.parentSessionId), message);
+                        String reason = String.format("Table %s.%s was dropped during snapshot phase of repair %s",
+                                                      desc.keyspace, desc.columnFamily, desc.parentSessionId);
+                        state.phase.fail(reason);
+                        logErrorAndSendFailureResponse(reason, message);
                         return;
                     }
 
@@ -121,30 +143,70 @@
                     }
                     logger.debug("Enqueuing response to snapshot request {} to {}", desc.sessionId, message.from());
                     MessagingService.instance().send(message.emptyResponse(), message.from());
+                }
                     break;
 
                 case VALIDATION_REQ:
+                {
                     // notify initiator that the message has been received, allowing this method to take as long as it needs to
                     MessagingService.instance().send(message.emptyResponse(), message.from());
                     ValidationRequest validationRequest = (ValidationRequest) message.payload;
                     logger.debug("Validating {}", validationRequest);
-                    // trigger read-only compaction
-                    ColumnFamilyStore store = ColumnFamilyStore.getIfExists(desc.keyspace, desc.columnFamily);
-                    if (store == null)
+
+                    ParticipateState participate = ActiveRepairService.instance.participate(desc.parentSessionId);
+                    if (participate == null)
                     {
-                        logger.error("Table {}.{} was dropped during snapshot phase of repair {}",
-                                     desc.keyspace, desc.columnFamily, desc.parentSessionId);
-                        MessagingService.instance().send(Message.out(VALIDATION_RSP, new ValidationResponse(desc)), message.from());
+                        logErrorAndSendFailureResponse("Unknown repair " + desc.parentSessionId, message);
                         return;
                     }
 
-                    ActiveRepairService.instance.consistent.local.maybeSetRepairing(desc.parentSessionId);
-                    Validator validator = new Validator(desc, message.from(), validationRequest.nowInSec,
-                                                        isIncremental(desc.parentSessionId), previewKind(desc.parentSessionId));
-                    ValidationManager.instance.submitValidation(store, validator);
+                    ValidationState vState = new ValidationState(desc, message.from());
+                    if (!participate.register(vState))
+                    {
+                        logger.debug("Duplicate validation message found for parent={}, validation={}", participate.id, vState.id);
+                        return;
+                    }
+                    try
+                    {
+                        // trigger read-only compaction
+                        ColumnFamilyStore store = ColumnFamilyStore.getIfExists(desc.keyspace, desc.columnFamily);
+                        if (store == null)
+                        {
+                            logger.error("Table {}.{} was dropped during validation phase of repair {}",
+                                         desc.keyspace, desc.columnFamily, desc.parentSessionId);
+                            vState.phase.fail(String.format("Table %s.%s was dropped", desc.keyspace, desc.columnFamily));
+                            MessagingService.instance().send(Message.out(VALIDATION_RSP, new ValidationResponse(desc)), message.from());
+                            return;
+                        }
+
+                        ActiveRepairService.instance.consistent.local.maybeSetRepairing(desc.parentSessionId);
+                        PreviewKind previewKind;
+                        try
+                        {
+                            previewKind = previewKind(desc.parentSessionId);
+                        }
+                        catch (NoSuchRepairSessionException e)
+                        {
+                            logger.warn("Parent repair session {} has been removed, failing repair", desc.parentSessionId);
+                            vState.phase.fail(e);
+                            MessagingService.instance().send(Message.out(VALIDATION_RSP, new ValidationResponse(desc)), message.from());
+                            return;
+                        }
+
+                        Validator validator = new Validator(vState, validationRequest.nowInSec,
+                                                            isIncremental(desc.parentSessionId), previewKind);
+                        ValidationManager.instance.submitValidation(store, validator);
+                    }
+                    catch (Throwable t)
+                    {
+                        vState.phase.fail(t);
+                        throw t;
+                    }
+                }
                     break;
 
                 case SYNC_REQ:
+                {
                     // notify initiator that the message has been received, allowing this method to take as long as it needs to
                     MessagingService.instance().send(message.emptyResponse(), message.from());
                     // forwarded sync request
@@ -159,13 +221,19 @@
                                                                        request.previewKind,
                                                                        request.asymmetric);
                     task.run();
+                }
                     break;
 
                 case CLEANUP_MSG:
+                {
                     logger.debug("cleaning up repair");
                     CleanupMessage cleanup = (CleanupMessage) message.payload;
+                    ParticipateState state = ActiveRepairService.instance.participate(cleanup.parentRepairSession);
+                    if (state != null)
+                        state.phase.success("Cleanup message recieved");
                     ActiveRepairService.instance.removeParentRepairSession(cleanup.parentRepairSession);
                     MessagingService.instance().send(message.emptyResponse(), message.from());
+                }
                     break;
 
                 case PREPARE_CONSISTENT_REQ:
@@ -211,7 +279,12 @@
         {
             logger.error("Got error, removing parent repair session");
             if (desc != null && desc.parentSessionId != null)
+            {
+                ParticipateState parcipate = ActiveRepairService.instance.participate(desc.parentSessionId);
+                if (parcipate != null)
+                    parcipate.phase.fail(e);
                 ActiveRepairService.instance.removeParentRepairSession(desc.parentSessionId);
+            }
             throw new RuntimeException(e);
         }
     }
diff --git a/src/java/org/apache/cassandra/repair/RepairNotifier.java b/src/java/org/apache/cassandra/repair/RepairNotifier.java
new file mode 100644
index 0000000..977bc4e
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/RepairNotifier.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+public interface RepairNotifier
+{
+    void notification(String message);
+    void notifyError(Throwable error);
+    void notifyProgress(String message);
+}
diff --git a/src/java/org/apache/cassandra/repair/RepairRunnable.java b/src/java/org/apache/cassandra/repair/RepairRunnable.java
index 48a7e36..7607b04 100644
--- a/src/java/org/apache/cassandra/repair/RepairRunnable.java
+++ b/src/java/org/apache/cassandra/repair/RepairRunnable.java
@@ -22,15 +22,11 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -41,92 +37,81 @@
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
+
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.codahale.metrics.Timer;
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.metrics.RepairMetrics;
-import org.apache.cassandra.gms.FailureDetector;
-import org.apache.cassandra.repair.consistent.SyncStatSummary;
-import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.cql3.statements.SelectStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RepairException;
+import org.apache.cassandra.gms.FailureDetector;
 import org.apache.cassandra.locator.EndpointsForRange;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.metrics.StorageMetrics;
-import org.apache.cassandra.repair.consistent.CoordinatorSession;
 import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.repair.state.CoordinatorState;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus;
 import org.apache.cassandra.service.ClientState;
-import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.tracing.TraceKeyspace;
 import org.apache.cassandra.tracing.TraceState;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.transport.messages.ResultMessage;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.DiagnosticSnapshotService;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.Throwables;
 import org.apache.cassandra.utils.WrappedRunnable;
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventNotifier;
 import org.apache.cassandra.utils.progress.ProgressEventType;
 import org.apache.cassandra.utils.progress.ProgressListener;
 
-public class RepairRunnable implements Runnable, ProgressEventNotifier
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.repair.state.AbstractState.COMPLETE;
+import static org.apache.cassandra.repair.state.AbstractState.INIT;
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public class RepairRunnable implements Runnable, ProgressEventNotifier, RepairNotifier
 {
     private static final Logger logger = LoggerFactory.getLogger(RepairRunnable.class);
 
+    private static final AtomicInteger THREAD_COUNTER = new AtomicInteger(1);
+
+    public final CoordinatorState state;
     private final StorageService storageService;
-    private final int cmd;
-    private final RepairOption options;
-    private final String keyspace;
 
     private final String tag;
-    private final AtomicInteger progressCounter = new AtomicInteger();
-    private final int totalProgress;
-
-    private final long creationTimeMillis = System.currentTimeMillis();
-    private final UUID parentSession = UUIDGen.getTimeUUID();
 
     private final List<ProgressListener> listeners = new ArrayList<>();
-
-    private static final AtomicInteger threadCounter = new AtomicInteger(1);
     private final AtomicReference<Throwable> firstError = new AtomicReference<>(null);
 
     private TraceState traceState;
 
     public RepairRunnable(StorageService storageService, int cmd, RepairOption options, String keyspace)
     {
+        this.state = new CoordinatorState(cmd, keyspace, options);
         this.storageService = storageService;
-        this.cmd = cmd;
-        this.options = options;
-        this.keyspace = keyspace;
 
         this.tag = "repair:" + cmd;
-        // get valid column families, calculate neighbors, validation, prepare for repair + number of ranges to repair
-        this.totalProgress = 4 + options.getRanges().size();
+        ActiveRepairService.instance.register(state);
     }
 
     @Override
@@ -150,42 +135,63 @@
         }
     }
 
+    @Override
     public void notification(String msg)
     {
         logger.info(msg);
-        fireProgressEvent(new ProgressEvent(ProgressEventType.NOTIFICATION, progressCounter.get(), totalProgress, msg));
+        fireProgressEvent(jmxEvent(ProgressEventType.NOTIFICATION, msg));
     }
 
-    private void skip(String msg)
-    {
-        notification("Repair " + parentSession + " skipped: " + msg);
-        success(msg);
-    }
-
-    private void success(String msg)
-    {
-        fireProgressEvent(new ProgressEvent(ProgressEventType.SUCCESS, progressCounter.get(), totalProgress, msg));
-        ActiveRepairService.instance.recordRepairStatus(cmd, ActiveRepairService.ParentRepairStatus.COMPLETED,
-                                                        ImmutableList.of(msg));
-        complete(null);
-    }
-
+    @Override
     public void notifyError(Throwable error)
     {
         // exception should be ignored
         if (error instanceof SomeRepairFailedException)
             return;
-        logger.error("Repair {} failed:", parentSession, error);
+
+        if (Throwables.anyCauseMatches(error, RepairException::shouldWarn))
+        {
+            logger.warn("Repair {} aborted: {}", state.id, error.getMessage());
+            if (logger.isDebugEnabled())
+                logger.debug("Repair {} aborted: ", state.id, error);
+        }
+        else
+        {
+            logger.error("Repair {} failed:", state.id, error);
+        }
 
         StorageMetrics.repairExceptions.inc();
-        String errorMessage = String.format("Repair command #%d failed with error %s", cmd, error.getMessage());
-        fireProgressEvent(new ProgressEvent(ProgressEventType.ERROR, progressCounter.get(), totalProgress, errorMessage));
+        String errorMessage = String.format("Repair command #%d failed with error %s", state.cmd, error.getMessage());
+        fireProgressEvent(jmxEvent(ProgressEventType.ERROR, errorMessage));
         firstError.compareAndSet(null, error);
 
         // since this can fail, update table only after updating in-memory and notification state
         maybeStoreParentRepairFailure(error);
     }
 
+    @Override
+    public void notifyProgress(String message)
+    {
+        logger.info(message);
+        fireProgressEvent(jmxEvent(ProgressEventType.PROGRESS, message));
+    }
+
+    private void skip(String msg)
+    {
+        state.phase.skip(msg);
+        notification("Repair " + state.id + " skipped: " + msg);
+        success(msg);
+    }
+
+    private void success(String msg)
+    {
+        state.phase.success(msg);
+        fireProgressEvent(jmxEvent(ProgressEventType.SUCCESS, msg));
+        ActiveRepairService.instance.recordRepairStatus(state.cmd, ActiveRepairService.ParentRepairStatus.COMPLETED,
+                                                        ImmutableList.of(msg));
+        complete(null);
+    }
+
     private void fail(String reason)
     {
         if (reason == null)
@@ -193,11 +199,12 @@
             Throwable error = firstError.get();
             reason = error != null ? error.getMessage() : "Some repair failed";
         }
-        String completionMessage = String.format("Repair command #%d finished with error", cmd);
+        state.phase.fail(reason);
+        String completionMessage = String.format("Repair command #%d finished with error", state.cmd);
 
         // Note we rely on the first message being the reason for the failure
         // when inspecting this state from RepairRunner.queryForCompletedRepair
-        ActiveRepairService.instance.recordRepairStatus(cmd, ParentRepairStatus.FAILED,
+        ActiveRepairService.instance.recordRepairStatus(state.cmd, ParentRepairStatus.FAILED,
                                                         ImmutableList.of(reason, completionMessage));
 
         complete(completionMessage);
@@ -205,23 +212,23 @@
 
     private void complete(String msg)
     {
-        long durationMillis = System.currentTimeMillis() - creationTimeMillis;
+        long durationMillis = state.getDurationMillis();
         if (msg == null)
         {
             String duration = DurationFormatUtils.formatDurationWords(durationMillis, true, true);
-            msg = String.format("Repair command #%d finished in %s", cmd, duration);
+            msg = String.format("Repair command #%d finished in %s", state.cmd, duration);
         }
 
-        fireProgressEvent(new ProgressEvent(ProgressEventType.COMPLETE, progressCounter.get(), totalProgress, msg));
-        logger.info(options.getPreviewKind().logPrefix(parentSession) + msg);
+        fireProgressEvent(jmxEvent(ProgressEventType.COMPLETE, msg));
+        logger.info(state.options.getPreviewKind().logPrefix(state.id) + msg);
 
-        ActiveRepairService.instance.removeParentRepairSession(parentSession);
+        ActiveRepairService.instance.removeParentRepairSession(state.id);
         TraceState localState = traceState;
-        if (options.isTraced() && localState != null)
+        if (state.options.isTraced() && localState != null)
         {
             for (ProgressListener listener : listeners)
                 localState.removeProgressListener(listener);
-            // Because DebuggableThreadPoolExecutor#afterExecute and this callback
+            // Because ExecutorPlus#afterExecute and this callback
             // run in a nondeterministic order (within the same thread), the
             // TraceState may have been nulled out at this point. The TraceState
             // should be traceState, so just set it without bothering to check if it
@@ -231,7 +238,7 @@
             Tracing.instance.stopSession();
         }
 
-        Keyspace.open(keyspace).metric.repairTime.update(durationMillis, TimeUnit.MILLISECONDS);
+        Keyspace.open(state.keyspace).metric.repairTime.update(durationMillis, TimeUnit.MILLISECONDS);
     }
 
     public void run()
@@ -253,16 +260,19 @@
 
     private void runMayThrow() throws Exception
     {
-        ActiveRepairService.instance.recordRepairStatus(cmd, ParentRepairStatus.IN_PROGRESS, ImmutableList.of());
+        state.phase.setup();
+        ActiveRepairService.instance.recordRepairStatus(state.cmd, ParentRepairStatus.IN_PROGRESS, ImmutableList.of());
 
         List<ColumnFamilyStore> columnFamilies = getColumnFamilies();
         String[] cfnames = columnFamilies.stream().map(cfs -> cfs.name).toArray(String[]::new);
 
         this.traceState = maybeCreateTraceState(columnFamilies);
-
         notifyStarting();
-
         NeighborsAndRanges neighborsAndRanges = getNeighborsAndRanges();
+        // We test to validate the start JMX notification is seen before we compute neighbors and ranges
+        // but in state (vtable) tracking, we rely on getNeighborsAndRanges to know where we are running repair...
+        // JMX start != state start, its possible we fail in getNeighborsAndRanges and state start is never reached
+        state.phase.start(columnFamilies, neighborsAndRanges);
 
         maybeStoreParentRepairStart(cfnames);
 
@@ -273,85 +283,81 @@
 
     private List<ColumnFamilyStore> getColumnFamilies() throws IOException
     {
-        String[] columnFamilies = options.getColumnFamilies().toArray(new String[options.getColumnFamilies().size()]);
-        Iterable<ColumnFamilyStore> validColumnFamilies = storageService.getValidColumnFamilies(false, false, keyspace, columnFamilies);
-        progressCounter.incrementAndGet();
+        String[] columnFamilies = state.options.getColumnFamilies().toArray(new String[state.options.getColumnFamilies().size()]);
+        Iterable<ColumnFamilyStore> validColumnFamilies = storageService.getValidColumnFamilies(false, false, state.keyspace, columnFamilies);
 
         if (Iterables.isEmpty(validColumnFamilies))
-            throw new SkipRepairException(String.format("%s Empty keyspace, skipping repair: %s", parentSession, keyspace));
+            throw new SkipRepairException(String.format("%s Empty keyspace, skipping repair: %s", state.id, state.keyspace));
         return Lists.newArrayList(validColumnFamilies);
     }
 
     private TraceState maybeCreateTraceState(Iterable<ColumnFamilyStore> columnFamilyStores)
     {
-        if (!options.isTraced())
+        if (!state.options.isTraced())
             return null;
 
         StringBuilder cfsb = new StringBuilder();
         for (ColumnFamilyStore cfs : columnFamilyStores)
             cfsb.append(", ").append(cfs.keyspace.getName()).append(".").append(cfs.name);
 
-        UUID sessionId = Tracing.instance.newSession(Tracing.TraceType.REPAIR);
-        TraceState traceState = Tracing.instance.begin("repair", ImmutableMap.of("keyspace", keyspace, "columnFamilies",
+        TimeUUID sessionId = Tracing.instance.newSession(Tracing.TraceType.REPAIR);
+        TraceState traceState = Tracing.instance.begin("repair", ImmutableMap.of("keyspace", state.keyspace, "columnFamilies",
                                                                                  cfsb.substring(2)));
         traceState.enableActivityNotification(tag);
         for (ProgressListener listener : listeners)
             traceState.addProgressListener(listener);
-        Thread queryThread = createQueryThread(cmd, sessionId);
+        Thread queryThread = createQueryThread(sessionId);
         queryThread.setName("RepairTracePolling");
-        queryThread.start();
         return traceState;
     }
 
     private void notifyStarting()
     {
-        String message = String.format("Starting repair command #%d (%s), repairing keyspace %s with %s", cmd, parentSession, keyspace,
-                                       options);
+        String message = String.format("Starting repair command #%d (%s), repairing keyspace %s with %s", state.cmd, state.id, state.keyspace,
+                                       state.options);
         logger.info(message);
         Tracing.traceRepair(message);
-        fireProgressEvent(new ProgressEvent(ProgressEventType.START, 0, 100, message));
+        fireProgressEvent(jmxEvent(ProgressEventType.START, message));
     }
 
-    private NeighborsAndRanges getNeighborsAndRanges()
+    private NeighborsAndRanges getNeighborsAndRanges() throws RepairException
     {
         Set<InetAddressAndPort> allNeighbors = new HashSet<>();
         List<CommonRange> commonRanges = new ArrayList<>();
 
         //pre-calculate output of getLocalReplicas and pass it to getNeighbors to increase performance and prevent
         //calculation multiple times
-        Iterable<Range<Token>> keyspaceLocalRanges = storageService.getLocalReplicas(keyspace).ranges();
+        Iterable<Range<Token>> keyspaceLocalRanges = storageService.getLocalReplicas(state.keyspace).ranges();
 
-        for (Range<Token> range : options.getRanges())
+        for (Range<Token> range : state.options.getRanges())
         {
-            EndpointsForRange neighbors = ActiveRepairService.getNeighbors(keyspace, keyspaceLocalRanges, range,
-                                                                           options.getDataCenters(),
-                                                                           options.getHosts());
+            EndpointsForRange neighbors = ActiveRepairService.getNeighbors(state.keyspace, keyspaceLocalRanges, range,
+                                                                           state.options.getDataCenters(),
+                                                                           state.options.getHosts());
             if (neighbors.isEmpty())
             {
-                if (options.ignoreUnreplicatedKeyspaces())
+                if (state.options.ignoreUnreplicatedKeyspaces())
                 {
-                    logger.info("{} Found no neighbors for range {} for {} - ignoring since repairing with --ignore-unreplicated-keyspaces", parentSession, range, keyspace);
+                    logger.info("{} Found no neighbors for range {} for {} - ignoring since repairing with --ignore-unreplicated-keyspaces", state.id, range, state.keyspace);
                     continue;
                 }
                 else
                 {
-                    throw new RuntimeException(String.format("Nothing to repair for %s in %s - aborting", range, keyspace));
+                    throw RepairException.warn(String.format("Nothing to repair for %s in %s - aborting", range, state.keyspace));
                 }
             }
             addRangeToNeighbors(commonRanges, range, neighbors);
             allNeighbors.addAll(neighbors.endpoints());
         }
 
-        if (options.ignoreUnreplicatedKeyspaces() && allNeighbors.isEmpty())
+        if (state.options.ignoreUnreplicatedKeyspaces() && allNeighbors.isEmpty())
         {
             throw new SkipRepairException(String.format("Nothing to repair for %s in %s - unreplicated keyspace is ignored since repair was called with --ignore-unreplicated-keyspaces",
-                                                        options.getRanges(),
-                                                        keyspace));
+                                                        state.options.getRanges(),
+                                                        state.keyspace));
         }
 
-        progressCounter.incrementAndGet();
-
-        boolean shouldExcludeDeadParticipants = options.isForcedRepair();
+        boolean shouldExcludeDeadParticipants = state.options.isForcedRepair();
 
         if (shouldExcludeDeadParticipants)
         {
@@ -364,389 +370,93 @@
 
     private void maybeStoreParentRepairStart(String[] cfnames)
     {
-        if (!options.isPreview())
+        if (!state.options.isPreview())
         {
-            SystemDistributedKeyspace.startParentRepair(parentSession, keyspace, cfnames, options);
+            SystemDistributedKeyspace.startParentRepair(state.id, state.keyspace, cfnames, state.options);
         }
     }
 
     private void maybeStoreParentRepairSuccess(Collection<Range<Token>> successfulRanges)
     {
-        if (!options.isPreview())
+        if (!state.options.isPreview())
         {
-            SystemDistributedKeyspace.successfulParentRepair(parentSession, successfulRanges);
+            SystemDistributedKeyspace.successfulParentRepair(state.id, successfulRanges);
         }
     }
 
     private void maybeStoreParentRepairFailure(Throwable error)
     {
-        if (!options.isPreview())
+        if (!state.options.isPreview())
         {
-            SystemDistributedKeyspace.failParentRepair(parentSession, error);
+            SystemDistributedKeyspace.failParentRepair(state.id, error);
         }
     }
 
     private void prepare(List<ColumnFamilyStore> columnFamilies, Set<InetAddressAndPort> allNeighbors, boolean force)
     {
-        try (Timer.Context ignore = Keyspace.open(keyspace).metric.repairPrepareTime.time())
+        state.phase.prepareStart();
+        try (Timer.Context ignore = Keyspace.open(state.keyspace).metric.repairPrepareTime.time())
         {
-            ActiveRepairService.instance.prepareForRepair(parentSession, FBUtilities.getBroadcastAddressAndPort(), allNeighbors, options, force, columnFamilies);
-            progressCounter.incrementAndGet();
+            ActiveRepairService.instance.prepareForRepair(state.id, FBUtilities.getBroadcastAddressAndPort(), allNeighbors, state.options, force, columnFamilies);
         }
+        state.phase.prepareComplete();
     }
 
     private void repair(String[] cfnames, NeighborsAndRanges neighborsAndRanges)
     {
-        if (options.isPreview())
+        RepairTask task;
+        if (state.options.isPreview())
         {
-            previewRepair(parentSession,
-                          creationTimeMillis,
-                          neighborsAndRanges.filterCommonRanges(keyspace, cfnames),
-                          neighborsAndRanges.participants,
-                          cfnames);
+            task = new PreviewRepairTask(state.options, state.keyspace, this, state.id, neighborsAndRanges.filterCommonRanges(state.keyspace, cfnames), cfnames);
         }
-        else if (options.isIncremental())
+        else if (state.options.isIncremental())
         {
-            incrementalRepair(parentSession,
-                              creationTimeMillis,
-                              traceState,
-                              neighborsAndRanges,
-                              neighborsAndRanges.participants,
-                              cfnames);
+            task = new IncrementalRepairTask(state.options, state.keyspace, this, state.id, neighborsAndRanges, cfnames);
         }
         else
         {
-            normalRepair(parentSession,
-                         creationTimeMillis,
-                         traceState,
-                         neighborsAndRanges.filterCommonRanges(keyspace, cfnames),
-                         neighborsAndRanges.participants,
-                         cfnames);
+            task = new NormalRepairTask(state.options, state.keyspace, this, state.id, neighborsAndRanges.filterCommonRanges(state.keyspace, cfnames), cfnames);
         }
-    }
 
-    private void normalRepair(UUID parentSession,
-                              long startTime,
-                              TraceState traceState,
-                              List<CommonRange> commonRanges,
-                              Set<InetAddressAndPort> preparedEndpoints,
-                              String... cfnames)
-    {
-
-        // Set up RepairJob executor for this repair command.
-        ListeningExecutorService executor = createExecutor();
-
-        // Setting the repairedAt time to UNREPAIRED_SSTABLE causes the repairedAt times to be preserved across streamed sstables
-        final ListenableFuture<List<RepairSessionResult>> allSessions = submitRepairSessions(parentSession, false, executor, commonRanges, cfnames);
-
-        // After all repair sessions completes(successful or not),
-        // run anticompaction if necessary and send finish notice back to client
-        final Collection<Range<Token>> successfulRanges = new ArrayList<>();
-        final AtomicBoolean hasFailure = new AtomicBoolean();
-        ListenableFuture repairResult = Futures.transformAsync(allSessions, new AsyncFunction<List<RepairSessionResult>, Object>()
-        {
-            @SuppressWarnings("unchecked")
-            public ListenableFuture apply(List<RepairSessionResult> results)
+        ExecutorPlus executor = createExecutor();
+        state.phase.repairSubmitted();
+        Future<CoordinatedRepairResult> f = task.perform(executor);
+        f.addCallback((result, failure) -> {
+            state.phase.repairCompleted();
+            try
             {
-                logger.debug("Repair result: {}", results);
-                // filter out null(=failed) results and get successful ranges
-                for (RepairSessionResult sessionResult : results)
+                if (failure != null)
                 {
-                    if (sessionResult != null)
-                    {
-                        // don't record successful repair if we had to skip ranges
-                        if (!sessionResult.skippedReplicas)
-                        {
-                            successfulRanges.addAll(sessionResult.ranges);
-                        }
-                    }
-                    else
-                    {
-                        hasFailure.compareAndSet(false, true);
-                    }
-                }
-                return Futures.immediateFuture(null);
-            }
-        }, MoreExecutors.directExecutor());
-        Futures.addCallback(repairResult,
-                            new RepairCompleteCallback(parentSession,
-                                                       successfulRanges,
-                                                       preparedEndpoints,
-                                                       startTime,
-                                                       traceState,
-                                                       hasFailure,
-                                                       executor),
-                            MoreExecutors.directExecutor());
-    }
-
-    private void incrementalRepair(UUID parentSession,
-                                   long startTime,
-                                   TraceState traceState,
-                                   NeighborsAndRanges neighborsAndRanges,
-                                   Set<InetAddressAndPort> preparedEndpoints,
-                                   String... cfnames)
-    {
-        // the local node also needs to be included in the set of participants, since coordinator sessions aren't persisted
-        Set<InetAddressAndPort> allParticipants = ImmutableSet.<InetAddressAndPort>builder()
-                                                  .addAll(neighborsAndRanges.participants)
-                                                  .add(FBUtilities.getBroadcastAddressAndPort())
-                                                  .build();
-        // Not necessary to include self for filtering. The common ranges only contains neighbhor node endpoints.
-        List<CommonRange> allRanges = neighborsAndRanges.filterCommonRanges(keyspace, cfnames);
-
-        CoordinatorSession coordinatorSession = ActiveRepairService.instance.consistent.coordinated.registerSession(parentSession, allParticipants, neighborsAndRanges.shouldExcludeDeadParticipants);
-        ListeningExecutorService executor = createExecutor();
-        AtomicBoolean hasFailure = new AtomicBoolean(false);
-        ListenableFuture repairResult = coordinatorSession.execute(() -> submitRepairSessions(parentSession, true, executor, allRanges, cfnames),
-                                                                   hasFailure);
-        Collection<Range<Token>> ranges = new HashSet<>();
-        for (Collection<Range<Token>> range : Iterables.transform(allRanges, cr -> cr.ranges))
-        {
-            ranges.addAll(range);
-        }
-        Futures.addCallback(repairResult,
-                            new RepairCompleteCallback(parentSession, ranges, preparedEndpoints, startTime, traceState, hasFailure, executor),
-                            MoreExecutors.directExecutor());
-    }
-
-    private void previewRepair(UUID parentSession,
-                               long startTime,
-                               List<CommonRange> commonRanges,
-                               Set<InetAddressAndPort> preparedEndpoints,
-                               String... cfnames)
-    {
-
-        logger.debug("Starting preview repair for {}", parentSession);
-        // Set up RepairJob executor for this repair command.
-        ListeningExecutorService executor = createExecutor();
-
-        final ListenableFuture<List<RepairSessionResult>> allSessions = submitRepairSessions(parentSession, false, executor, commonRanges, cfnames);
-
-        Futures.addCallback(allSessions, new FutureCallback<List<RepairSessionResult>>()
-        {
-            public void onSuccess(List<RepairSessionResult> results)
-            {
-                try
-                {
-                    if (results == null || results.stream().anyMatch(s -> s == null))
-                    {
-                        // something failed
-                        fail(null);
-                        return;
-                    }
-                    PreviewKind previewKind = options.getPreviewKind();
-                    Preconditions.checkState(previewKind != PreviewKind.NONE, "Preview is NONE");
-                    SyncStatSummary summary = new SyncStatSummary(true);
-                    summary.consumeSessionResults(results);
-
-                    final String message;
-                    if (summary.isEmpty())
-                    {
-                        message = previewKind == PreviewKind.REPAIRED ? "Repaired data is in sync" : "Previewed data was in sync";
-                    }
-                    else
-                    {
-                        message = (previewKind == PreviewKind.REPAIRED ? "Repaired data is inconsistent\n" : "Preview complete\n") + summary.toString();
-                        RepairMetrics.previewFailures.inc();
-                        if (previewKind == PreviewKind.REPAIRED)
-                            maybeSnapshotReplicas(parentSession, keyspace, results);
-                    }
-                    notification(message);
-
-                    success("Repair preview completed successfully");
-                    ActiveRepairService.instance.cleanUp(parentSession, preparedEndpoints);
-                }
-                catch (Throwable t)
-                {
-                    logger.error("Error completing preview repair", t);
-                    onFailure(t);
-                }
-                finally
-                {
-                    executor.shutdown();
-                }
-            }
-
-            public void onFailure(Throwable t)
-            {
-                notifyError(t);
-                fail("Error completing preview repair: " + t.getMessage());
-                executor.shutdown();
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    private void maybeSnapshotReplicas(UUID parentSession, String keyspace, List<RepairSessionResult> results)
-    {
-        if (!DatabaseDescriptor.snapshotOnRepairedDataMismatch())
-            return;
-
-        try
-        {
-            Set<String> mismatchingTables = new HashSet<>();
-            Set<InetAddressAndPort> nodes = new HashSet<>();
-            for (RepairSessionResult sessionResult : results)
-            {
-                for (RepairResult repairResult : emptyIfNull(sessionResult.repairJobResults))
-                {
-                    for (SyncStat stat : emptyIfNull(repairResult.stats))
-                    {
-                        if (stat.numberOfDifferences > 0)
-                            mismatchingTables.add(repairResult.desc.columnFamily);
-                        // snapshot all replicas, even if they don't have any differences
-                        nodes.add(stat.nodes.coordinator);
-                        nodes.add(stat.nodes.peer);
-                    }
-                }
-            }
-
-            String snapshotName = DiagnosticSnapshotService.getSnapshotName(DiagnosticSnapshotService.REPAIRED_DATA_MISMATCH_SNAPSHOT_PREFIX);
-            for (String table : mismatchingTables)
-            {
-                // we can just check snapshot existence locally since the repair coordinator is always a replica (unlike in the read case)
-                if (!Keyspace.open(keyspace).getColumnFamilyStore(table).snapshotExists(snapshotName))
-                {
-                    logger.info("{} Snapshotting {}.{} for preview repair mismatch with tag {} on instances {}",
-                                options.getPreviewKind().logPrefix(parentSession),
-                                keyspace, table, snapshotName, nodes);
-                    DiagnosticSnapshotService.repairedDataMismatch(Keyspace.open(keyspace).getColumnFamilyStore(table).metadata(), nodes);
+                    notifyError(failure);
+                    fail(failure.getMessage());
                 }
                 else
                 {
-                    logger.info("{} Not snapshotting {}.{} - snapshot {} exists",
-                                options.getPreviewKind().logPrefix(parentSession),
-                                keyspace, table, snapshotName);
+                    maybeStoreParentRepairSuccess(result.successfulRanges);
+                    if (result.hasFailed())
+                    {
+                        fail(null);
+                    }
+                    else
+                    {
+                        success(task.successMessage());
+                        ActiveRepairService.instance.cleanUp(state.id, neighborsAndRanges.participants);
+                    }
                 }
             }
-        }
-        catch (Exception e)
-        {
-            logger.error("{} Failed snapshotting replicas", options.getPreviewKind().logPrefix(parentSession), e);
-        }
-    }
-
-    private static <T> Iterable<T> emptyIfNull(Iterable<T> iter)
-    {
-        if (iter == null)
-            return Collections.emptyList();
-        return iter;
-    }
-
-    private ListenableFuture<List<RepairSessionResult>> submitRepairSessions(UUID parentSession,
-                                                                             boolean isIncremental,
-                                                                             ListeningExecutorService executor,
-                                                                             List<CommonRange> commonRanges,
-                                                                             String... cfnames)
-    {
-        List<ListenableFuture<RepairSessionResult>> futures = new ArrayList<>(options.getRanges().size());
-
-        for (CommonRange commonRange : commonRanges)
-        {
-            logger.info("Starting RepairSession for {}", commonRange);
-            RepairSession session = ActiveRepairService.instance.submitRepairSession(parentSession,
-                                                                                     commonRange,
-                                                                                     keyspace,
-                                                                                     options.getParallelism(),
-                                                                                     isIncremental,
-                                                                                     options.isPullRepair(),
-                                                                                     options.getPreviewKind(),
-                                                                                     options.optimiseStreams(),
-                                                                                     executor,
-                                                                                     cfnames);
-            if (session == null)
-                continue;
-            Futures.addCallback(session, new RepairSessionCallback(session), MoreExecutors.directExecutor());
-            futures.add(session);
-        }
-        return Futures.successfulAsList(futures);
-    }
-
-    private ListeningExecutorService createExecutor()
-    {
-        return MoreExecutors.listeningDecorator(new JMXEnabledThreadPoolExecutor(options.getJobThreads(),
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.SECONDS,
-                                                                                 new LinkedBlockingQueue<>(),
-                                                                                 new NamedThreadFactory("Repair#" + cmd),
-                                                                                 "internal"));
-    }
-
-    private class RepairSessionCallback implements FutureCallback<RepairSessionResult>
-    {
-        private final RepairSession session;
-
-        public RepairSessionCallback(RepairSession session)
-        {
-            this.session = session;
-        }
-
-        public void onSuccess(RepairSessionResult result)
-        {
-            String message = String.format("Repair session %s for range %s finished", session.getId(),
-                                           session.ranges().toString());
-            logger.info(message);
-            fireProgressEvent(new ProgressEvent(ProgressEventType.PROGRESS,
-                                                progressCounter.incrementAndGet(),
-                                                totalProgress,
-                                                message));
-        }
-
-        public void onFailure(Throwable t)
-        {
-            String message = String.format("Repair session %s for range %s failed with error %s",
-                                           session.getId(), session.ranges().toString(), t.getMessage());
-            notifyError(new RuntimeException(message, t));
-        }
-    }
-
-    private class RepairCompleteCallback implements FutureCallback<Object>
-    {
-        final UUID parentSession;
-        final Collection<Range<Token>> successfulRanges;
-        final Set<InetAddressAndPort> preparedEndpoints;
-        final long startTime;
-        final TraceState traceState;
-        final AtomicBoolean hasFailure;
-        final ExecutorService executor;
-
-        public RepairCompleteCallback(UUID parentSession,
-                                      Collection<Range<Token>> successfulRanges,
-                                      Set<InetAddressAndPort> preparedEndpoints,
-                                      long startTime,
-                                      TraceState traceState,
-                                      AtomicBoolean hasFailure,
-                                      ExecutorService executor)
-        {
-            this.parentSession = parentSession;
-            this.successfulRanges = successfulRanges;
-            this.preparedEndpoints = preparedEndpoints;
-            this.startTime = startTime;
-            this.traceState = traceState;
-            this.hasFailure = hasFailure;
-            this.executor = executor;
-        }
-
-        public void onSuccess(Object result)
-        {
-            maybeStoreParentRepairSuccess(successfulRanges);
-            if (hasFailure.get())
+            finally
             {
-                fail(null);
+                executor.shutdown();
             }
-            else
-            {
-                success("Repair completed successfully");
-                ActiveRepairService.instance.cleanUp(parentSession, preparedEndpoints);
-            }
-            executor.shutdown();
-        }
+        });
+    }
 
-        public void onFailure(Throwable t)
-        {
-            notifyError(t);
-            fail(t.getMessage());
-            executor.shutdown();
-        }
+    private ExecutorPlus createExecutor()
+    {
+        return executorFactory()
+                .localAware()
+                .withJmxInternal()
+                .pooled("Repair#" + state.cmd, state.options.getJobThreads());
     }
 
     private static void addRangeToNeighbors(List<CommonRange> neighborRangeList, Range<Token> range, EndpointsForRange neighbors)
@@ -768,9 +478,9 @@
         neighborRangeList.add(new CommonRange(endpoints, transEndpoints, ranges));
     }
 
-    private Thread createQueryThread(final int cmd, final UUID sessionId)
+    private Thread createQueryThread(final TimeUUID sessionId)
     {
-        return NamedThreadFactory.createThread(new WrappedRunnable()
+        return executorFactory().startThread("Repair-Runnable-" + THREAD_COUNTER.incrementAndGet(), new WrappedRunnable()
         {
             // Query events within a time interval that overlaps the last by one second. Ignore duplicates. Ignore local traces.
             // Wake up upon local trace activity. Query when notified of trace activity with a timeout that doubles every two timeouts.
@@ -784,14 +494,14 @@
                 String query = String.format(format, SchemaConstants.TRACE_KEYSPACE_NAME, TraceKeyspace.EVENTS);
                 SelectStatement statement = (SelectStatement) QueryProcessor.parseStatement(query).prepare(ClientState.forInternalCalls());
 
-                ByteBuffer sessionIdBytes = ByteBufferUtil.bytes(sessionId);
+                ByteBuffer sessionIdBytes = sessionId.toBytes();
                 InetAddressAndPort source = FBUtilities.getBroadcastAddressAndPort();
 
                 HashSet<UUID>[] seen = new HashSet[]{ new HashSet<>(), new HashSet<>() };
                 int si = 0;
                 UUID uuid;
 
-                long tlast = System.currentTimeMillis(), tcur;
+                long tlast = currentTimeMillis(), tcur;
 
                 TraceState.Status status;
                 long minWaitMillis = 125;
@@ -811,12 +521,12 @@
                         timeout = minWaitMillis;
                         shouldDouble = false;
                     }
-                    ByteBuffer tminBytes = ByteBufferUtil.bytes(UUIDGen.minTimeUUID(tlast - 1000));
-                    ByteBuffer tmaxBytes = ByteBufferUtil.bytes(UUIDGen.maxTimeUUID(tcur = System.currentTimeMillis()));
+                    ByteBuffer tminBytes = TimeUUID.minAtUnixMillis(tlast - 1000).toBytes();
+                    ByteBuffer tmaxBytes = TimeUUID.maxAtUnixMillis(tcur = currentTimeMillis()).toBytes();
                     QueryOptions options = QueryOptions.forInternalCalls(ConsistencyLevel.ONE, Lists.newArrayList(sessionIdBytes,
                                                                                                                   tminBytes,
                                                                                                                   tmaxBytes));
-                    ResultMessage.Rows rows = statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+                    ResultMessage.Rows rows = statement.execute(forInternalCalls(), options, nanoTime());
                     UntypedResultSet result = UntypedResultSet.create(rows.result);
 
                     for (UntypedResultSet.Row r : result)
@@ -840,7 +550,14 @@
                     seen[si].clear();
                 }
             }
-        }, "Repair-Runnable-" + threadCounter.incrementAndGet());
+        });
+    }
+
+    private ProgressEvent jmxEvent(ProgressEventType type, String msg)
+    {
+        int length = CoordinatorState.State.values().length + 1; // +1 to include completed state
+        int currentState = state.getCurrentState();
+        return new ProgressEvent(type, currentState == INIT ? 0 : currentState == COMPLETE ? length : currentState, length, msg);
     }
 
     private static final class SkipRepairException extends RuntimeException
@@ -851,13 +568,13 @@
         }
     }
 
-    static final class NeighborsAndRanges
+    public static final class NeighborsAndRanges
     {
-        private final boolean shouldExcludeDeadParticipants;
-        private final Set<InetAddressAndPort> participants;
-        private final List<CommonRange> commonRanges;
+        final boolean shouldExcludeDeadParticipants;
+        public final Set<InetAddressAndPort> participants;
+        public final List<CommonRange> commonRanges;
 
-        NeighborsAndRanges(boolean shouldExcludeDeadParticipants, Set<InetAddressAndPort> participants, List<CommonRange> commonRanges)
+        public NeighborsAndRanges(boolean shouldExcludeDeadParticipants, Set<InetAddressAndPort> participants, List<CommonRange> commonRanges)
         {
             this.shouldExcludeDeadParticipants = shouldExcludeDeadParticipants;
             this.participants = participants;
@@ -869,7 +586,7 @@
          * and exludes ranges left without any participants
          * When not in the force mode, no-op.
          */
-        List<CommonRange> filterCommonRanges(String keyspace, String[] tableNames)
+        public List<CommonRange> filterCommonRanges(String keyspace, String[] tableNames)
         {
             if (!shouldExcludeDeadParticipants)
             {
diff --git a/src/java/org/apache/cassandra/repair/RepairSession.java b/src/java/org/apache/cassandra/repair/RepairSession.java
index 692cdcf..6fb455b 100644
--- a/src/java/org/apache/cassandra/repair/RepairSession.java
+++ b/src/java/org/apache/cassandra/repair/RepairSession.java
@@ -18,7 +18,13 @@
 package org.apache.cassandra.repair;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
@@ -27,20 +33,26 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.*;
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.repair.state.SessionState;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RepairException;
 import org.apache.cassandra.gms.*;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.repair.consistent.ConsistentSession;
 import org.apache.cassandra.repair.consistent.LocalSession;
 import org.apache.cassandra.repair.consistent.LocalSessions;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.SessionSummary;
@@ -48,6 +60,8 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTrees;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.concurrent.Future;
 
 /**
  * Coordinates the (active) repair of a list of non overlapping token ranges.
@@ -56,8 +70,11 @@
  * of column families. For each of the column family to repair, RepairSession
  * creates a {@link RepairJob} that handles the repair of that CF.
  *
- * A given RepairJob has the 2 main phases:
+ * A given RepairJob has the 3 main phases:
  * <ol>
+ *   <li>
+ *     Paxos repair: unfinished paxos operations in the range/keyspace/table are first completed
+ *   </li>
  *   <li>Validation phase: the job requests merkle trees from each of the replica involves
  *      ({@link org.apache.cassandra.repair.ValidationTask}) and waits until all trees are received (in
  *      validationComplete()).
@@ -84,24 +101,21 @@
  * Similarly, if a job is sequential, it will handle one SymmetricSyncTask at a time, but will handle
  * all of them in parallel otherwise.
  */
-public class RepairSession extends AbstractFuture<RepairSessionResult> implements IEndpointStateChangeSubscriber,
+public class RepairSession extends AsyncFuture<RepairSessionResult> implements IEndpointStateChangeSubscriber,
                                                                                   IFailureDetectionEventListener,
                                                                                   LocalSessions.Listener
 {
     private static final Logger logger = LoggerFactory.getLogger(RepairSession.class);
 
-    public final UUID parentRepairSession;
-    /** Repair session ID */
-    private final UUID id;
-    public final String keyspace;
-    private final String[] cfnames;
+    public final SessionState state;
     public final RepairParallelism parallelismDegree;
     public final boolean pullRepair;
 
     /** Range to repair */
-    public final CommonRange commonRange;
     public final boolean isIncremental;
     public final PreviewKind previewKind;
+    public final boolean repairPaxos;
+    public final boolean paxosOnly;
 
     private final AtomicBoolean isFailed = new AtomicBoolean(false);
 
@@ -111,7 +125,7 @@
     private final ConcurrentMap<Pair<RepairJobDesc, SyncNodePair>, CompletableRemoteSyncTask> syncingTasks = new ConcurrentHashMap<>();
 
     // Tasks(snapshot, validate request, differencing, ...) are run on taskExecutor
-    public final ListeningExecutorService taskExecutor;
+    public final ExecutorPlus taskExecutor;
     public final boolean optimiseStreams;
 
     private volatile boolean terminated = false;
@@ -119,15 +133,15 @@
     /**
      * Create new repair session.
      * @param parentRepairSession the parent sessions id
-     * @param id this sessions id
      * @param commonRange ranges to repair
      * @param keyspace name of keyspace
      * @param parallelismDegree specifies the degree of parallelism when calculating the merkle trees
      * @param pullRepair true if the repair should be one way (from remote host to this host and only applicable between two hosts--see RepairOption)
+     * @param repairPaxos true if incomplete paxos operations should be completed as part of repair
+     * @param paxosOnly true if we should only complete paxos operations, not run a normal repair
      * @param cfnames names of columnfamilies
      */
-    public RepairSession(UUID parentRepairSession,
-                         UUID id,
+    public RepairSession(TimeUUID parentRepairSession,
                          CommonRange commonRange,
                          String keyspace,
                          RepairParallelism parallelismDegree,
@@ -135,41 +149,40 @@
                          boolean pullRepair,
                          PreviewKind previewKind,
                          boolean optimiseStreams,
+                         boolean repairPaxos,
+                         boolean paxosOnly,
                          String... cfnames)
     {
+        this.repairPaxos = repairPaxos;
+        this.paxosOnly = paxosOnly;
         assert cfnames.length > 0 : "Repairing no column families seems pointless, doesn't it";
-
-        this.parentRepairSession = parentRepairSession;
-        this.id = id;
+        this.state = new SessionState(parentRepairSession, keyspace, cfnames, commonRange);
         this.parallelismDegree = parallelismDegree;
-        this.keyspace = keyspace;
-        this.cfnames = cfnames;
-        this.commonRange = commonRange;
         this.isIncremental = isIncremental;
         this.previewKind = previewKind;
         this.pullRepair = pullRepair;
         this.optimiseStreams = optimiseStreams;
-        this.taskExecutor = MoreExecutors.listeningDecorator(createExecutor());
+        this.taskExecutor = createExecutor();
     }
 
-    protected DebuggableThreadPoolExecutor createExecutor()
+    protected ExecutorPlus createExecutor()
     {
-        return DebuggableThreadPoolExecutor.createCachedThreadpoolWithMaxSize("RepairJobTask");
+        return ExecutorFactory.Global.executorFactory().pooled("RepairJobTask", Integer.MAX_VALUE);
     }
 
-    public UUID getId()
+    public TimeUUID getId()
     {
-        return id;
+        return state.id;
     }
 
     public Collection<Range<Token>> ranges()
     {
-        return commonRange.ranges;
+        return state.commonRange.ranges;
     }
 
     public Collection<InetAddressAndPort> endpoints()
     {
-        return commonRange.endpoints;
+        return state.commonRange.endpoints;
     }
 
     public void trackValidationCompletion(Pair<RepairJobDesc, InetAddressAndPort> key, ValidationTask task)
@@ -194,7 +207,12 @@
         ValidationTask task = validating.remove(Pair.create(desc, endpoint));
         if (task == null)
         {
-            assert terminated;
+            assert terminated : "The repair session should be terminated if the validation we're completing no longer exists.";
+            
+            // The trees may be off-heap, and will therefore need to be released.
+            if (trees != null)
+                trees.release();
+            
             return;
         }
 
@@ -235,7 +253,7 @@
     {
         StringBuilder sb = new StringBuilder();
         sb.append(FBUtilities.getBroadcastAddressAndPort());
-        for (InetAddressAndPort ep : commonRange.endpoints)
+        for (InetAddressAndPort ep : state.commonRange.endpoints)
             sb.append(", ").append(ep);
         return sb.toString();
     }
@@ -248,68 +266,74 @@
      *
      * @param executor Executor to run validation
      */
-    @SuppressWarnings("UnstableApiUsage")
-    public void start(ListeningExecutorService executor)
+    public void start(ExecutorPlus executor)
     {
+        state.phase.start();
         String message;
         if (terminated)
             return;
 
         logger.info("{} parentSessionId = {}: new session: will sync {} on range {} for {}.{}",
-                    previewKind.logPrefix(getId()), parentRepairSession, repairedNodes(), commonRange, keyspace, Arrays.toString(cfnames));
-        Tracing.traceRepair("Syncing range {}", commonRange);
-        if (!previewKind.isPreview())
+                    previewKind.logPrefix(getId()), state.parentRepairSession, repairedNodes(), state.commonRange, state.keyspace, Arrays.toString(state.cfnames));
+        Tracing.traceRepair("Syncing range {}", state.commonRange);
+        if (!previewKind.isPreview() && !paxosOnly)
         {
-            SystemDistributedKeyspace.startRepairs(getId(), parentRepairSession, keyspace, cfnames, commonRange);
+            SystemDistributedKeyspace.startRepairs(getId(), state.parentRepairSession, state.keyspace, state.cfnames, state.commonRange);
         }
 
-        if (commonRange.endpoints.isEmpty())
+        if (state.commonRange.endpoints.isEmpty())
         {
-            logger.info("{} {}", previewKind.logPrefix(getId()), message = String.format("No neighbors to repair with on range %s: session completed", commonRange));
+            logger.info("{} {}", previewKind.logPrefix(getId()), message = String.format("No neighbors to repair with on range %s: session completed", state.commonRange));
+            state.phase.skip(message);
             Tracing.traceRepair(message);
-            set(new RepairSessionResult(id, keyspace, commonRange.ranges, Lists.<RepairResult>newArrayList(), commonRange.hasSkippedReplicas));
+            trySuccess(new RepairSessionResult(state.id, state.keyspace, state.commonRange.ranges, Lists.<RepairResult>newArrayList(), state.commonRange.hasSkippedReplicas));
             if (!previewKind.isPreview())
             {
-                SystemDistributedKeyspace.failRepairs(getId(), keyspace, cfnames, new RuntimeException(message));
+                SystemDistributedKeyspace.failRepairs(getId(), state.keyspace, state.cfnames, new RuntimeException(message));
             }
             return;
         }
 
         // Checking all nodes are live
-        for (InetAddressAndPort endpoint : commonRange.endpoints)
+        for (InetAddressAndPort endpoint : state.commonRange.endpoints)
         {
-            if (!FailureDetector.instance.isAlive(endpoint) && !commonRange.hasSkippedReplicas)
+            if (!FailureDetector.instance.isAlive(endpoint) && !state.commonRange.hasSkippedReplicas)
             {
                 message = String.format("Cannot proceed on repair because a neighbor (%s) is dead: session failed", endpoint);
+                state.phase.fail(message);
                 logger.error("{} {}", previewKind.logPrefix(getId()), message);
                 Exception e = new IOException(message);
-                setException(e);
+                tryFailure(e);
                 if (!previewKind.isPreview())
                 {
-                    SystemDistributedKeyspace.failRepairs(getId(), keyspace, cfnames, e);
+                    SystemDistributedKeyspace.failRepairs(getId(), state.keyspace, state.cfnames, e);
                 }
                 return;
             }
         }
 
         // Create and submit RepairJob for each ColumnFamily
-        List<ListenableFuture<RepairResult>> jobs = new ArrayList<>(cfnames.length);
-        for (String cfname : cfnames)
+        state.phase.jobsSubmitted();
+        List<Future<RepairResult>> jobs = new ArrayList<>(state.cfnames.length);
+        for (String cfname : state.cfnames)
         {
             RepairJob job = new RepairJob(this, cfname);
+            state.register(job.state);
             executor.execute(job);
             jobs.add(job);
         }
 
         // When all RepairJobs are done without error, cleanup and set the final result
-        Futures.addCallback(Futures.allAsList(jobs), new FutureCallback<List<RepairResult>>()
+        FBUtilities.allOf(jobs).addCallback(new FutureCallback<List<RepairResult>>()
         {
             public void onSuccess(List<RepairResult> results)
             {
+                state.phase.success();
                 // this repair session is completed
                 logger.info("{} {}", previewKind.logPrefix(getId()), "Session completed successfully");
-                Tracing.traceRepair("Completed sync of range {}", commonRange);
-                set(new RepairSessionResult(id, keyspace, commonRange.ranges, results, commonRange.hasSkippedReplicas));
+                Tracing.traceRepair("Completed sync of range {}", state.commonRange);
+                trySuccess(new RepairSessionResult(state.id, state.keyspace, state.commonRange.ranges, results, state.commonRange.hasSkippedReplicas));
+
                 taskExecutor.shutdown();
                 // mark this session as terminated
                 terminate();
@@ -318,7 +342,12 @@
 
             public void onFailure(Throwable t)
             {
-                logger.error("{} Session completed with the following error", previewKind.logPrefix(getId()), t);
+                state.phase.fail(t);
+                String msg = "{} Session completed with the following error";
+                if (Throwables.anyCauseMatches(t, RepairException::shouldWarn))
+                    logger.warn(msg+ ": {}", previewKind.logPrefix(getId()), t.getMessage());
+                else
+                    logger.error(msg, previewKind.logPrefix(getId()), t);
                 Tracing.traceRepair("Session completed with the following error: {}", t);
                 forceShutdown(t);
             }
@@ -339,7 +368,7 @@
      */
     public void forceShutdown(Throwable reason)
     {
-        setException(reason);
+        tryFailure(reason);
         taskExecutor.shutdown();
         terminate();
         awaitTaskExecutorTermination();
@@ -372,7 +401,7 @@
 
     public void convict(InetAddressAndPort endpoint, double phi)
     {
-        if (!commonRange.endpoints.contains(endpoint))
+        if (!state.commonRange.endpoints.contains(endpoint))
             return;
 
         // We want a higher confidence in the failure detection than usual because failing a repair wrongly has a high cost.
@@ -401,8 +430,8 @@
             {
                 if (range.intersects(ranges()))
                 {
-                    logger.error("{} An intersecting incremental repair with session id = {} finished, preview repair might not be accurate", previewKind.logPrefix(getId()), session.sessionID);
-                    forceShutdown(new Exception("An incremental repair with session id "+session.sessionID+" finished during this preview repair runtime"));
+                    logger.warn("{} An intersecting incremental repair with session id = {} finished, preview repair might not be accurate", previewKind.logPrefix(getId()), session.sessionID);
+                    forceShutdown(RepairException.warn("An incremental repair with session id "+session.sessionID+" finished during this preview repair runtime"));
                     return;
                 }
             }
@@ -411,10 +440,10 @@
 
     private boolean includesTables(Set<TableId> tableIds)
     {
-        Keyspace ks = Keyspace.open(keyspace);
+        Keyspace ks = Keyspace.open(state.keyspace);
         if (ks != null)
         {
-            for (String table : cfnames)
+            for (String table : state.cfnames)
             {
                 ColumnFamilyStore cfs = ks.getColumnFamilyStore(table);
                 if (tableIds.contains(cfs.metadata.id))
diff --git a/src/java/org/apache/cassandra/repair/RepairSessionResult.java b/src/java/org/apache/cassandra/repair/RepairSessionResult.java
index 491ab2f..6217ef4 100644
--- a/src/java/org/apache/cassandra/repair/RepairSessionResult.java
+++ b/src/java/org/apache/cassandra/repair/RepairSessionResult.java
@@ -18,23 +18,23 @@
 package org.apache.cassandra.repair;
 
 import java.util.Collection;
-import java.util.UUID;
 
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Repair session result
  */
 public class RepairSessionResult
 {
-    public final UUID sessionId;
+    public final TimeUUID sessionId;
     public final String keyspace;
     public final Collection<Range<Token>> ranges;
     public final Collection<RepairResult> repairJobResults;
     public final boolean skippedReplicas;
 
-    public RepairSessionResult(UUID sessionId, String keyspace, Collection<Range<Token>> ranges, Collection<RepairResult> repairJobResults, boolean skippedReplicas)
+    public RepairSessionResult(TimeUUID sessionId, String keyspace, Collection<Range<Token>> ranges, Collection<RepairResult> repairJobResults, boolean skippedReplicas)
     {
         this.sessionId = sessionId;
         this.keyspace = keyspace;
diff --git a/src/java/org/apache/cassandra/repair/RepairTask.java b/src/java/org/apache/cassandra/repair/RepairTask.java
new file mode 100644
index 0000000..dc71d6e
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/RepairTask.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
+public interface RepairTask
+{
+    String name();
+
+    default String successMessage()
+    {
+        return name() + " completed successfully";
+    }
+
+    Future<CoordinatedRepairResult> performUnsafe(ExecutorPlus executor) throws Exception;
+
+    default Future<CoordinatedRepairResult> perform(ExecutorPlus executor)
+    {
+        try
+        {
+            return performUnsafe(executor);
+        }
+        catch (Exception | Error e)
+        {
+            JVMStabilityInspector.inspectThrowable(e);
+            return ImmediateFuture.failure(e);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/SnapshotTask.java b/src/java/org/apache/cassandra/repair/SnapshotTask.java
index 46532fb5..7f5dcd7 100644
--- a/src/java/org/apache/cassandra/repair/SnapshotTask.java
+++ b/src/java/org/apache/cassandra/repair/SnapshotTask.java
@@ -19,21 +19,20 @@
 
 import java.util.concurrent.RunnableFuture;
 
-import com.google.common.util.concurrent.AbstractFuture;
-
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.repair.messages.SnapshotMessage;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 
 import static org.apache.cassandra.net.Verb.SNAPSHOT_MSG;
 
 /**
  * SnapshotTask is a task that sends snapshot request.
  */
-public class SnapshotTask extends AbstractFuture<InetAddressAndPort> implements RunnableFuture<InetAddressAndPort>
+public class SnapshotTask extends AsyncFuture<InetAddressAndPort> implements RunnableFuture<InetAddressAndPort>
 {
     private final RepairJobDesc desc;
     private final InetAddressAndPort endpoint;
@@ -54,7 +53,7 @@
     /**
      * Callback for snapshot request. Run on INTERNAL_RESPONSE stage.
      */
-    static class SnapshotCallback implements RequestCallback
+    static class SnapshotCallback implements RequestCallback<InetAddressAndPort>
     {
         final SnapshotTask task;
 
@@ -71,7 +70,7 @@
         @Override
         public void onResponse(Message msg)
         {
-            task.set(task.endpoint);
+            task.trySuccess(task.endpoint);
         }
 
         @Override
@@ -83,7 +82,7 @@
         @Override
         public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason)
         {
-            task.setException(new RuntimeException("Could not create snapshot at " + from));
+            task.tryFailure(new RuntimeException("Could not create snapshot at " + from + "; " + failureReason));
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/repair/SomeRepairFailedException.java b/src/java/org/apache/cassandra/repair/SomeRepairFailedException.java
index 4b077b8..13ee0ae 100644
--- a/src/java/org/apache/cassandra/repair/SomeRepairFailedException.java
+++ b/src/java/org/apache/cassandra/repair/SomeRepairFailedException.java
@@ -28,4 +28,9 @@
 public class SomeRepairFailedException extends RuntimeException
 {
     public static final SomeRepairFailedException INSTANCE = new SomeRepairFailedException();
+
+    private SomeRepairFailedException()
+    {
+        super(null, null, false, false);
+    }
 }
diff --git a/src/java/org/apache/cassandra/repair/StreamingRepairTask.java b/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
index 9c6caf4..ea50a50 100644
--- a/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
+++ b/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.repair;
 
-import java.util.UUID;
 import java.util.Collections;
 import java.util.Collection;
 
@@ -38,11 +37,12 @@
 import org.apache.cassandra.streaming.StreamPlan;
 import org.apache.cassandra.streaming.StreamState;
 import org.apache.cassandra.streaming.StreamOperation;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.net.Verb.SYNC_RSP;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 
 /**
@@ -59,10 +59,10 @@
     private final InetAddressAndPort src;
     private final InetAddressAndPort dst;
     private final Collection<Range<Token>> ranges;
-    private final UUID pendingRepair;
+    private final TimeUUID pendingRepair;
     private final PreviewKind previewKind;
 
-    public StreamingRepairTask(RepairJobDesc desc, InetAddressAndPort initiator, InetAddressAndPort src, InetAddressAndPort dst, Collection<Range<Token>> ranges,  UUID pendingRepair, PreviewKind previewKind, boolean asymmetric)
+    public StreamingRepairTask(RepairJobDesc desc, InetAddressAndPort initiator, InetAddressAndPort src, InetAddressAndPort dst, Collection<Range<Token>> ranges, TimeUUID pendingRepair, PreviewKind previewKind, boolean asymmetric)
     {
         this.desc = desc;
         this.initiator = initiator;
diff --git a/src/java/org/apache/cassandra/repair/SymmetricRemoteSyncTask.java b/src/java/org/apache/cassandra/repair/SymmetricRemoteSyncTask.java
index 629f4bb..96f07f3 100644
--- a/src/java/org/apache/cassandra/repair/SymmetricRemoteSyncTask.java
+++ b/src/java/org/apache/cassandra/repair/SymmetricRemoteSyncTask.java
@@ -64,11 +64,11 @@
     {
         if (success)
         {
-            set(stat.withSummaries(summaries));
+            trySuccess(stat.withSummaries(summaries));
         }
         else
         {
-            setException(new RepairException(desc, previewKind, String.format("Sync failed between %s and %s", nodePair.coordinator, nodePair.peer)));
+            tryFailure(RepairException.warn(desc, previewKind, String.format("Sync failed between %s and %s", nodePair.coordinator, nodePair.peer)));
         }
         finished();
     }
diff --git a/src/java/org/apache/cassandra/repair/SyncTask.java b/src/java/org/apache/cassandra/repair/SyncTask.java
index 24e2068..b325eb4 100644
--- a/src/java/org/apache/cassandra/repair/SyncTask.java
+++ b/src/java/org/apache/cassandra/repair/SyncTask.java
@@ -23,8 +23,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AbstractFuture;
 
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,9 +37,10 @@
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.tracing.Tracing;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.net.Verb.SYNC_REQ;
 
-public abstract class SyncTask extends AbstractFuture<SyncStat> implements Runnable
+public abstract class SyncTask extends AsyncFuture<SyncStat> implements Runnable
 {
     private static final Logger logger = LoggerFactory.getLogger(SyncTask.class);
 
@@ -74,7 +75,7 @@
      */
     public final void run()
     {
-        startTime = System.currentTimeMillis();
+        startTime = currentTimeMillis();
 
 
         // choose a repair method based on the significance of the difference
@@ -83,7 +84,7 @@
         {
             logger.info(String.format(format, "are consistent"));
             Tracing.traceRepair("Endpoint {} is consistent with {} for {}", nodePair.coordinator, nodePair.peer, desc.columnFamily);
-            set(stat);
+            trySuccess(stat);
             return;
         }
 
@@ -101,7 +102,7 @@
     protected void finished()
     {
         if (startTime != Long.MIN_VALUE)
-            Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily).metric.repairSyncTime.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
+            Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily).metric.repairSyncTime.update(currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
     }
 
     public void abort() {}
@@ -111,6 +112,6 @@
         RepairMessage.sendMessageWithFailureCB(request,
                                                SYNC_REQ,
                                                to,
-                                               this::setException);
+                                               this::tryFailure);
     }
 }
diff --git a/src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java b/src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java
deleted file mode 100644
index a3f8774..0000000
--- a/src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.repair;
-
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.dht.Range;
-import org.apache.cassandra.dht.Token;
-import org.apache.cassandra.gms.Gossiper;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.repair.messages.RepairOption;
-import org.apache.cassandra.schema.CompactionParams;
-import org.apache.cassandra.schema.KeyspaceMetadata;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.Tables;
-import org.apache.cassandra.utils.FBUtilities;
-
-import static java.lang.String.format;
-
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-
-public final class SystemDistributedKeyspace
-{
-    private SystemDistributedKeyspace()
-    {
-    }
-
-    private static final Logger logger = LoggerFactory.getLogger(SystemDistributedKeyspace.class);
-
-    /**
-     * Generation is used as a timestamp for automatic table creation on startup.
-     * If you make any changes to the tables below, make sure to increment the
-     * generation and document your change here.
-     *
-     * gen 0: original definition in 2.2
-     * gen 1: (pre-)add options column to parent_repair_history in 3.0, 3.11
-     * gen 2: (pre-)add coordinator_port and participants_v2 columns to repair_history in 3.0, 3.11, 4.0
-     * gen 3: gc_grace_seconds raised from 0 to 10 days in CASSANDRA-12954 in 3.11.0
-     * gen 4: compression chunk length reduced to 16KiB, memtable_flush_period_in_ms now unset on all tables in 4.0
-     * gen 5: add ttl and TWCS to repair_history tables
-     */
-    public static final long GENERATION = 5;
-
-    public static final String REPAIR_HISTORY = "repair_history";
-
-    public static final String PARENT_REPAIR_HISTORY = "parent_repair_history";
-
-    public static final String VIEW_BUILD_STATUS = "view_build_status";
-
-    private static final TableMetadata RepairHistory =
-        parse(REPAIR_HISTORY,
-                "Repair history",
-                "CREATE TABLE %s ("
-                     + "keyspace_name text,"
-                     + "columnfamily_name text,"
-                     + "id timeuuid,"
-                     + "parent_id timeuuid,"
-                     + "range_begin text,"
-                     + "range_end text,"
-                     + "coordinator inet,"
-                     + "coordinator_port int,"
-                     + "participants set<inet>,"
-                     + "participants_v2 set<text>,"
-                     + "exception_message text,"
-                     + "exception_stacktrace text,"
-                     + "status text,"
-                     + "started_at timestamp,"
-                     + "finished_at timestamp,"
-                     + "PRIMARY KEY ((keyspace_name, columnfamily_name), id))")
-        .defaultTimeToLive((int) TimeUnit.DAYS.toSeconds(30))
-        .compaction(CompactionParams.twcs(ImmutableMap.of("compaction_window_unit","DAYS",
-                                                          "compaction_window_size","1")))
-        .build();
-
-    private static final TableMetadata ParentRepairHistory =
-        parse(PARENT_REPAIR_HISTORY,
-                "Repair history",
-                "CREATE TABLE %s ("
-                     + "parent_id timeuuid,"
-                     + "keyspace_name text,"
-                     + "columnfamily_names set<text>,"
-                     + "started_at timestamp,"
-                     + "finished_at timestamp,"
-                     + "exception_message text,"
-                     + "exception_stacktrace text,"
-                     + "requested_ranges set<text>,"
-                     + "successful_ranges set<text>,"
-                     + "options map<text, text>,"
-                     + "PRIMARY KEY (parent_id))")
-        .defaultTimeToLive((int) TimeUnit.DAYS.toSeconds(30))
-        .compaction(CompactionParams.twcs(ImmutableMap.of("compaction_window_unit","DAYS",
-                                                          "compaction_window_size","1")))
-        .build();
-
-    private static final TableMetadata ViewBuildStatus =
-        parse(VIEW_BUILD_STATUS,
-            "Materialized View build status",
-            "CREATE TABLE %s ("
-                     + "keyspace_name text,"
-                     + "view_name text,"
-                     + "host_id uuid,"
-                     + "status text,"
-                     + "PRIMARY KEY ((keyspace_name, view_name), host_id))").build();
-
-    private static TableMetadata.Builder parse(String table, String description, String cql)
-    {
-        return CreateTableStatement.parse(format(cql, table), SchemaConstants.DISTRIBUTED_KEYSPACE_NAME)
-                                   .id(TableId.forSystemTable(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, table))
-                                   .comment(description);
-    }
-
-    public static KeyspaceMetadata metadata()
-    {
-        return KeyspaceMetadata.create(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, KeyspaceParams.simple(3), Tables.of(RepairHistory, ParentRepairHistory, ViewBuildStatus));
-    }
-
-    public static void startParentRepair(UUID parent_id, String keyspaceName, String[] cfnames, RepairOption options)
-    {
-        Collection<Range<Token>> ranges = options.getRanges();
-        String query = "INSERT INTO %s.%s (parent_id, keyspace_name, columnfamily_names, requested_ranges, started_at,          options)"+
-                                 " VALUES (%s,        '%s',          { '%s' },           { '%s' },          toTimestamp(now()), { %s })";
-        String fmtQry = format(query,
-                                      SchemaConstants.DISTRIBUTED_KEYSPACE_NAME,
-                                      PARENT_REPAIR_HISTORY,
-                                      parent_id.toString(),
-                                      keyspaceName,
-                                      Joiner.on("','").join(cfnames),
-                                      Joiner.on("','").join(ranges),
-                                      toCQLMap(options.asMap(), RepairOption.RANGES_KEY, RepairOption.COLUMNFAMILIES_KEY));
-        processSilent(fmtQry);
-    }
-
-    private static String toCQLMap(Map<String, String> options, String ... ignore)
-    {
-        Set<String> toIgnore = Sets.newHashSet(ignore);
-        StringBuilder map = new StringBuilder();
-        boolean first = true;
-        for (Map.Entry<String, String> entry : options.entrySet())
-        {
-            if (!toIgnore.contains(entry.getKey()))
-            {
-                if (!first)
-                    map.append(',');
-                first = false;
-                map.append(format("'%s': '%s'", entry.getKey(), entry.getValue()));
-            }
-        }
-        return map.toString();
-    }
-
-    public static void failParentRepair(UUID parent_id, Throwable t)
-    {
-        String query = "UPDATE %s.%s SET finished_at = toTimestamp(now()), exception_message=?, exception_stacktrace=? WHERE parent_id=%s";
-
-        StringWriter sw = new StringWriter();
-        PrintWriter pw = new PrintWriter(sw);
-        t.printStackTrace(pw);
-        String fmtQuery = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, PARENT_REPAIR_HISTORY, parent_id.toString());
-        String message = t.getMessage();
-        processSilent(fmtQuery, message != null ? message : "", sw.toString());
-    }
-
-    public static void successfulParentRepair(UUID parent_id, Collection<Range<Token>> successfulRanges)
-    {
-        String query = "UPDATE %s.%s SET finished_at = toTimestamp(now()), successful_ranges = {'%s'} WHERE parent_id=%s";
-        String fmtQuery = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, PARENT_REPAIR_HISTORY, Joiner.on("','").join(successfulRanges), parent_id.toString());
-        processSilent(fmtQuery);
-    }
-
-    public static void startRepairs(UUID id, UUID parent_id, String keyspaceName, String[] cfnames, CommonRange commonRange)
-    {
-        //Don't record repair history if an upgrade is in progress as version 3 nodes generates errors
-        //due to schema differences
-        boolean includeNewColumns = !Gossiper.instance.hasMajorVersion3Nodes();
-
-        InetAddressAndPort coordinator = FBUtilities.getBroadcastAddressAndPort();
-        Set<String> participants = Sets.newHashSet();
-        Set<String> participants_v2 = Sets.newHashSet();
-
-        for (InetAddressAndPort endpoint : commonRange.endpoints)
-        {
-            participants.add(endpoint.getHostAddress(false));
-            participants_v2.add(endpoint.getHostAddressAndPort());
-        }
-
-        String query =
-                "INSERT INTO %s.%s (keyspace_name, columnfamily_name, id, parent_id, range_begin, range_end, coordinator, coordinator_port, participants, participants_v2, status, started_at) " +
-                        "VALUES (   '%s',          '%s',              %s, %s,        '%s',        '%s',      '%s',        %d,               { '%s' },     { '%s' },        '%s',   toTimestamp(now()))";
-        String queryWithoutNewColumns =
-                "INSERT INTO %s.%s (keyspace_name, columnfamily_name, id, parent_id, range_begin, range_end, coordinator, participants, status, started_at) " +
-                        "VALUES (   '%s',          '%s',              %s, %s,        '%s',        '%s',      '%s',               { '%s' },        '%s',   toTimestamp(now()))";
-
-        for (String cfname : cfnames)
-        {
-            for (Range<Token> range : commonRange.ranges)
-            {
-                String fmtQry;
-                if (includeNewColumns)
-                {
-                    fmtQry = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
-                                    keyspaceName,
-                                    cfname,
-                                    id.toString(),
-                                    parent_id.toString(),
-                                    range.left.toString(),
-                                    range.right.toString(),
-                                    coordinator.getHostAddress(false),
-                                    coordinator.port,
-                                    Joiner.on("', '").join(participants),
-                                    Joiner.on("', '").join(participants_v2),
-                                    RepairState.STARTED.toString());
-                }
-                else
-                {
-                    fmtQry = format(queryWithoutNewColumns, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
-                                    keyspaceName,
-                                    cfname,
-                                    id.toString(),
-                                    parent_id.toString(),
-                                    range.left.toString(),
-                                    range.right.toString(),
-                                    coordinator.getHostAddress(false),
-                                    Joiner.on("', '").join(participants),
-                                    RepairState.STARTED.toString());
-                }
-                processSilent(fmtQry);
-            }
-        }
-    }
-
-    public static void failRepairs(UUID id, String keyspaceName, String[] cfnames, Throwable t)
-    {
-        for (String cfname : cfnames)
-            failedRepairJob(id, keyspaceName, cfname, t);
-    }
-
-    public static void successfulRepairJob(UUID id, String keyspaceName, String cfname)
-    {
-        String query = "UPDATE %s.%s SET status = '%s', finished_at = toTimestamp(now()) WHERE keyspace_name = '%s' AND columnfamily_name = '%s' AND id = %s";
-        String fmtQuery = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
-                                        RepairState.SUCCESS.toString(),
-                                        keyspaceName,
-                                        cfname,
-                                        id.toString());
-        processSilent(fmtQuery);
-    }
-
-    public static void failedRepairJob(UUID id, String keyspaceName, String cfname, Throwable t)
-    {
-        String query = "UPDATE %s.%s SET status = '%s', finished_at = toTimestamp(now()), exception_message=?, exception_stacktrace=? WHERE keyspace_name = '%s' AND columnfamily_name = '%s' AND id = %s";
-        StringWriter sw = new StringWriter();
-        PrintWriter pw = new PrintWriter(sw);
-        t.printStackTrace(pw);
-        String fmtQry = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
-                                      RepairState.FAILED.toString(),
-                                      keyspaceName,
-                                      cfname,
-                                      id.toString());
-        processSilent(fmtQry, t.getMessage(), sw.toString());
-    }
-
-    public static void startViewBuild(String keyspace, String view, UUID hostId)
-    {
-        String query = "INSERT INTO %s.%s (keyspace_name, view_name, host_id, status) VALUES (?, ?, ?, ?)";
-        QueryProcessor.process(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS),
-                               ConsistencyLevel.ONE,
-                               Lists.newArrayList(bytes(keyspace),
-                                                  bytes(view),
-                                                  bytes(hostId),
-                                                  bytes(BuildStatus.STARTED.toString())));
-    }
-
-    public static void successfulViewBuild(String keyspace, String view, UUID hostId)
-    {
-        String query = "UPDATE %s.%s SET status = ? WHERE keyspace_name = ? AND view_name = ? AND host_id = ?";
-        QueryProcessor.process(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS),
-                               ConsistencyLevel.ONE,
-                               Lists.newArrayList(bytes(BuildStatus.SUCCESS.toString()),
-                                                  bytes(keyspace),
-                                                  bytes(view),
-                                                  bytes(hostId)));
-    }
-
-    public static Map<UUID, String> viewStatus(String keyspace, String view)
-    {
-        String query = "SELECT host_id, status FROM %s.%s WHERE keyspace_name = ? AND view_name = ?";
-        UntypedResultSet results;
-        try
-        {
-            results = QueryProcessor.execute(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS),
-                                             ConsistencyLevel.ONE,
-                                             keyspace,
-                                             view);
-        }
-        catch (Exception e)
-        {
-            return Collections.emptyMap();
-        }
-
-
-        Map<UUID, String> status = new HashMap<>();
-        for (UntypedResultSet.Row row : results)
-        {
-            status.put(row.getUUID("host_id"), row.getString("status"));
-        }
-        return status;
-    }
-
-    public static void setViewRemoved(String keyspaceName, String viewName)
-    {
-        String buildReq = "DELETE FROM %s.%s WHERE keyspace_name = ? AND view_name = ?";
-        QueryProcessor.executeInternal(format(buildReq, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS), keyspaceName, viewName);
-        forceBlockingFlush(VIEW_BUILD_STATUS);
-    }
-
-    private static void processSilent(String fmtQry, String... values)
-    {
-        try
-        {
-            List<ByteBuffer> valueList = new ArrayList<>(values.length);
-            for (String v : values)
-            {
-                valueList.add(bytes(v));
-            }
-            QueryProcessor.process(fmtQry, ConsistencyLevel.ANY, valueList);
-        }
-        catch (Throwable t)
-        {
-            logger.error("Error executing query "+fmtQry, t);
-        }
-    }
-
-    public static void forceBlockingFlush(String table)
-    {
-        if (!DatabaseDescriptor.isUnsafeSystem())
-            FBUtilities.waitOnFuture(Keyspace.open(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME).getColumnFamilyStore(table).forceFlush());
-    }
-
-    private enum RepairState
-    {
-        STARTED, SUCCESS, FAILED
-    }
-
-    private enum BuildStatus
-    {
-        UNKNOWN, STARTED, SUCCESS
-    }
-}
diff --git a/src/java/org/apache/cassandra/repair/TableRepairManager.java b/src/java/org/apache/cassandra/repair/TableRepairManager.java
index b72af1d..622374a 100644
--- a/src/java/org/apache/cassandra/repair/TableRepairManager.java
+++ b/src/java/org/apache/cassandra/repair/TableRepairManager.java
@@ -20,12 +20,13 @@
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.metrics.TopPartitionTracker;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Table level hook for repair
@@ -34,10 +35,10 @@
 {
     /**
      * Return a validation iterator for the given parameters. If isIncremental is true, the iterator must only include
-     * data previously isolated for repair with the given parentId. nowInSec should determine whether tombstones shouldn
+     * data previously isolated for repair with the given parentId. nowInSec should determine whether tombstones should
      * be purged or not.
      */
-    ValidationPartitionIterator getValidationIterator(Collection<Range<Token>> ranges, UUID parentId, UUID sessionID, boolean isIncremental, int nowInSec) throws IOException;
+    ValidationPartitionIterator getValidationIterator(Collection<Range<Token>> ranges, TimeUUID parentId, TimeUUID sessionID, boolean isIncremental, int nowInSec, TopPartitionTracker.Collector topPartitionCollector) throws IOException, NoSuchRepairSessionException;
 
     /**
      * Begin execution of the given validation callable. Which thread pool a validation should run in is an implementation detail.
@@ -54,7 +55,7 @@
      * time. If the repairedAt time is zero, the data for the given session should be demoted back to unrepaired. Otherwise,
      * it should be promoted to repaired with the given repaired time.
      */
-    void incrementalSessionCompleted(UUID sessionID);
+    void incrementalSessionCompleted(TimeUUID sessionID);
 
     /**
      * For snapshot repairs. A snapshot of the current data for the given ranges should be taken with the given name.
diff --git a/src/java/org/apache/cassandra/repair/ValidationManager.java b/src/java/org/apache/cassandra/repair/ValidationManager.java
index eb6ec96..a6c9fa9 100644
--- a/src/java/org/apache/cassandra/repair/ValidationManager.java
+++ b/src/java/org/apache/cassandra/repair/ValidationManager.java
@@ -30,14 +30,19 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
 import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.metrics.TopPartitionTracker;
+import org.apache.cassandra.repair.state.ValidationState;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTree;
 import org.apache.cassandra.utils.MerkleTrees;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class ValidationManager
 {
     private static final Logger logger = LoggerFactory.getLogger(ValidationManager.class);
@@ -48,13 +53,13 @@
 
     private static MerkleTrees createMerkleTrees(ValidationPartitionIterator validationIterator, Collection<Range<Token>> ranges, ColumnFamilyStore cfs)
     {
-        MerkleTrees tree = new MerkleTrees(cfs.getPartitioner());
+        MerkleTrees trees = new MerkleTrees(cfs.getPartitioner());
         long allPartitions = validationIterator.estimatedPartitions();
         Map<Range<Token>, Long> rangePartitionCounts = validationIterator.getRangePartitionCounts();
 
         // The repair coordinator must hold RF trees in memory at once, so a given validation compaction can only
         // use 1 / RF of the allowed space.
-        long availableBytes = (DatabaseDescriptor.getRepairSessionSpaceInMegabytes() * 1048576) /
+        long availableBytes = (DatabaseDescriptor.getRepairSessionSpaceInMiB() * 1048576) /
                               cfs.keyspace.getReplicationStrategy().getReplicationFactor().allReplicas;
 
         for (Range<Token> range : ranges)
@@ -72,21 +77,21 @@
                            : 0;
             // determine tree depth from number of partitions, capping at max tree depth (CASSANDRA-5263)
             int depth = numPartitions > 0 ? (int) Math.min(Math.ceil(Math.log(numPartitions) / Math.log(2)), maxDepth) : 0;
-            tree.addMerkleTree((int) Math.pow(2, depth), range);
+            trees.addMerkleTree((int) Math.pow(2, depth), range);
         }
         if (logger.isDebugEnabled())
         {
             // MT serialize may take time
-            logger.debug("Created {} merkle trees with merkle trees size {}, {} partitions, {} bytes", tree.ranges().size(), tree.size(), allPartitions, MerkleTrees.serializer.serializedSize(tree, 0));
+            logger.debug("Created {} merkle trees with merkle trees size {}, {} partitions, {} bytes", trees.ranges().size(), trees.size(), allPartitions, MerkleTrees.serializer.serializedSize(trees, 0));
         }
 
-        return tree;
+        return trees;
     }
 
-    private static ValidationPartitionIterator getValidationIterator(TableRepairManager repairManager, Validator validator) throws IOException
+    private static ValidationPartitionIterator getValidationIterator(TableRepairManager repairManager, Validator validator, TopPartitionTracker.Collector topPartitionCollector) throws IOException, NoSuchRepairSessionException
     {
         RepairJobDesc desc = validator.desc;
-        return repairManager.getValidationIterator(desc.ranges, desc.parentSessionId, desc.sessionId, validator.isIncremental, validator.nowInSec);
+        return repairManager.getValidationIterator(desc.ranges, desc.parentSessionId, desc.sessionId, validator.isIncremental, validator.nowInSec, topPartitionCollector);
     }
 
     /**
@@ -94,60 +99,81 @@
      * but without writing the merge result
      */
     @SuppressWarnings("resource")
-    private void doValidation(ColumnFamilyStore cfs, Validator validator) throws IOException
+    private void doValidation(ColumnFamilyStore cfs, Validator validator) throws IOException, NoSuchRepairSessionException
     {
         // this isn't meant to be race-proof, because it's not -- it won't cause bugs for a CFS to be dropped
         // mid-validation, or to attempt to validate a droped CFS.  this is just a best effort to avoid useless work,
         // particularly in the scenario where a validation is submitted before the drop, and there are compactions
         // started prior to the drop keeping some sstables alive.  Since validationCompaction can run
         // concurrently with other compactions, it would otherwise go ahead and scan those again.
+        ValidationState state = validator.state;
         if (!cfs.isValid())
+        {
+            state.phase.skip(String.format("Table %s is not valid", cfs));
             return;
+        }
+
+        TopPartitionTracker.Collector topPartitionCollector = null;
+        if (cfs.topPartitions != null && DatabaseDescriptor.topPartitionsEnabled() && isTopPartitionSupported(validator))
+            topPartitionCollector = new TopPartitionTracker.Collector(validator.desc.ranges);
 
         // Create Merkle trees suitable to hold estimated partitions for the given ranges.
         // We blindly assume that a partition is evenly distributed on all sstables for now.
-        long start = System.nanoTime();
-        long partitionCount = 0;
-        long estimatedTotalBytes = 0;
-        try (ValidationPartitionIterator vi = getValidationIterator(cfs.getRepairManager(), validator))
+        long start = nanoTime();
+        try (ValidationPartitionIterator vi = getValidationIterator(cfs.getRepairManager(), validator, topPartitionCollector))
         {
-            MerkleTrees tree = createMerkleTrees(vi, validator.desc.ranges, cfs);
-            try
+            state.phase.start(vi.estimatedPartitions(), vi.getEstimatedBytes());
+            MerkleTrees trees = createMerkleTrees(vi, validator.desc.ranges, cfs);
+            // validate the CF as we iterate over it
+            validator.prepare(cfs, trees, topPartitionCollector);
+            while (vi.hasNext())
             {
-                // validate the CF as we iterate over it
-                validator.prepare(cfs, tree);
-                while (vi.hasNext())
+                try (UnfilteredRowIterator partition = vi.next())
                 {
-                    try (UnfilteredRowIterator partition = vi.next())
-                    {
-                        validator.add(partition);
-                        partitionCount++;
-                    }
+                    validator.add(partition);
+                    state.partitionsProcessed++;
+                    state.bytesRead = vi.getBytesRead();
+                    if (state.partitionsProcessed % 1024 == 0) // update every so often
+                        state.updated();
                 }
-                validator.complete();
             }
-            finally
-            {
-                estimatedTotalBytes = vi.getEstimatedBytes();
-                partitionCount = vi.estimatedPartitions();
-            }
+            validator.complete();
         }
         finally
         {
-            cfs.metric.bytesValidated.update(estimatedTotalBytes);
-            cfs.metric.partitionsValidated.update(partitionCount);
+            cfs.metric.bytesValidated.update(state.estimatedTotalBytes);
+            cfs.metric.partitionsValidated.update(state.partitionsProcessed);
+            if (topPartitionCollector != null)
+                cfs.topPartitions.merge(topPartitionCollector);
         }
         if (logger.isDebugEnabled())
         {
-            long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+            long duration = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
             logger.debug("Validation of {} partitions (~{}) finished in {} msec, for {}",
-                         partitionCount,
-                         FBUtilities.prettyPrintMemory(estimatedTotalBytes),
+                         state.partitionsProcessed,
+                         FBUtilities.prettyPrintMemory(state.estimatedTotalBytes),
                          duration,
                          validator.desc);
         }
     }
 
+    private static boolean isTopPartitionSupported(Validator validator)
+    {
+        // supported: --validate, --full, --full --preview
+        switch (validator.getPreviewKind())
+        {
+            case NONE:
+                return !validator.isIncremental;
+            case ALL:
+            case REPAIRED:
+                return true;
+            case UNREPAIRED:
+                return false;
+            default:
+                throw new AssertionError("Unknown preview kind: " + validator.getPreviewKind());
+        }
+    }
+
     /**
      * Does not mutate data, so is not scheduled.
      */
@@ -161,15 +187,15 @@
                 {
                     doValidation(cfs, validator);
                 }
-                catch (PreviewRepairConflictWithIncrementalRepairException e)
+                catch (PreviewRepairConflictWithIncrementalRepairException | NoSuchRepairSessionException | CompactionInterruptedException e)
                 {
-                    validator.fail();
+                    validator.fail(e);
                     logger.warn(e.getMessage());
                 }
                 catch (Throwable e)
                 {
                     // we need to inform the remote end of our failure, otherwise it will hang on repair forever
-                    validator.fail();
+                    validator.fail(e);
                     logger.error("Validation failed.", e);
                     throw e;
                 }
diff --git a/src/java/org/apache/cassandra/repair/ValidationPartitionIterator.java b/src/java/org/apache/cassandra/repair/ValidationPartitionIterator.java
index ccfae41..a8f457d 100644
--- a/src/java/org/apache/cassandra/repair/ValidationPartitionIterator.java
+++ b/src/java/org/apache/cassandra/repair/ValidationPartitionIterator.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.repair;
 
-import java.io.IOException;
 import java.util.Map;
 
 import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
@@ -29,5 +28,6 @@
 {
     public abstract long getEstimatedBytes();
     public abstract long estimatedPartitions();
+    public abstract long getBytesRead();
     public abstract Map<Range<Token>, Long> getRangePartitionCounts();
 }
diff --git a/src/java/org/apache/cassandra/repair/ValidationTask.java b/src/java/org/apache/cassandra/repair/ValidationTask.java
index b4aef24..2ad1761 100644
--- a/src/java/org/apache/cassandra/repair/ValidationTask.java
+++ b/src/java/org/apache/cassandra/repair/ValidationTask.java
@@ -17,7 +17,7 @@
  */
 package org.apache.cassandra.repair;
 
-import com.google.common.util.concurrent.AbstractFuture;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.cassandra.exceptions.RepairException;
 import org.apache.cassandra.locator.InetAddressAndPort;
@@ -25,6 +25,7 @@
 import org.apache.cassandra.repair.messages.ValidationRequest;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.MerkleTrees;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 
 import static org.apache.cassandra.net.Verb.VALIDATION_REQ;
 
@@ -32,12 +33,14 @@
  * ValidationTask sends {@link ValidationRequest} to a replica.
  * When a replica sends back message, task completes.
  */
-public class ValidationTask extends AbstractFuture<TreeResponse> implements Runnable
+public class ValidationTask extends AsyncFuture<TreeResponse> implements Runnable
 {
     private final RepairJobDesc desc;
     private final InetAddressAndPort endpoint;
     private final int nowInSec;
     private final PreviewKind previewKind;
+    
+    private boolean active = true;
 
     public ValidationTask(RepairJobDesc desc, InetAddressAndPort endpoint, int nowInSec, PreviewKind previewKind)
     {
@@ -55,7 +58,7 @@
         RepairMessage.sendMessageWithFailureCB(new ValidationRequest(desc, nowInSec),
                                                VALIDATION_REQ,
                                                endpoint,
-                                               this::setException);
+                                               this::tryFailure);
     }
 
     /**
@@ -63,15 +66,60 @@
      *
      * @param trees MerkleTrees that is sent from replica. Null if validation failed on replica node.
      */
-    public void treesReceived(MerkleTrees trees)
+    public synchronized void treesReceived(MerkleTrees trees)
     {
         if (trees == null)
         {
-            setException(new RepairException(desc, previewKind, "Validation failed in " + endpoint));
+            active = false;
+            tryFailure(RepairException.warn(desc, previewKind, "Validation failed in " + endpoint));
+        }
+        else if (active)
+        {
+            trySuccess(new TreeResponse(endpoint, trees));
         }
         else
         {
-            set(new TreeResponse(endpoint, trees));
+            // If the task has already been aborted, just release the possibly off-heap trees and move along.
+            trees.release();
+            trySuccess(null);
         }
     }
+
+    /**
+     * Release any trees already received by this task, and place it a state where any trees 
+     * received subsequently will be properly discarded.
+     */
+    public synchronized void abort()
+    {
+        if (active) 
+        {
+            if (isDone())
+            {
+                try 
+                {
+                    // If we're done, this should return immediately.
+                    TreeResponse response = get();
+                    
+                    if (response.trees != null)
+                        response.trees.release();
+                } 
+                catch (InterruptedException e) 
+                {
+                    // Restore the interrupt.
+                    Thread.currentThread().interrupt();
+                } 
+                catch (ExecutionException e) 
+                {
+                    // Do nothing here. If an exception was set, there were no trees to release.
+                }
+            }
+            
+            active = false;
+        }
+    }
+    
+    public synchronized boolean isActive()
+    {
+        return active;
+    }
 }
diff --git a/src/java/org/apache/cassandra/repair/Validator.java b/src/java/org/apache/cassandra/repair/Validator.java
index 2f71729..abbfb4f 100644
--- a/src/java/org/apache/cassandra/repair/Validator.java
+++ b/src/java/org/apache/cassandra/repair/Validator.java
@@ -17,19 +17,11 @@
  */
 package org.apache.cassandra.repair;
 
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.hash.Funnel;
-import com.google.common.hash.HashCode;
-import com.google.common.hash.HashFunction;
-import com.google.common.hash.Hasher;
-import com.google.common.hash.Hashing;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,9 +34,11 @@
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.metrics.TopPartitionTracker;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.repair.messages.ValidationResponse;
+import org.apache.cassandra.repair.state.ValidationState;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.tracing.Tracing;
@@ -84,21 +78,24 @@
     private DecoratedKey lastKey;
 
     private final PreviewKind previewKind;
+    public final ValidationState state;
+    public TopPartitionTracker.Collector topPartitionCollector;
 
-    public Validator(RepairJobDesc desc, InetAddressAndPort initiator, int nowInSec, PreviewKind previewKind)
+    public Validator(ValidationState state, int nowInSec, PreviewKind previewKind)
     {
-        this(desc, initiator, nowInSec, false, false, previewKind);
+        this(state, nowInSec, false, false, previewKind);
     }
 
-    public Validator(RepairJobDesc desc, InetAddressAndPort initiator, int nowInSec, boolean isIncremental, PreviewKind previewKind)
+    public Validator(ValidationState state, int nowInSec, boolean isIncremental, PreviewKind previewKind)
     {
-        this(desc, initiator, nowInSec, false, isIncremental, previewKind);
+        this(state, nowInSec, false, isIncremental, previewKind);
     }
 
-    public Validator(RepairJobDesc desc, InetAddressAndPort initiator, int nowInSec, boolean evenTreeDistribution, boolean isIncremental, PreviewKind previewKind)
+    public Validator(ValidationState state, int nowInSec, boolean evenTreeDistribution, boolean isIncremental, PreviewKind previewKind)
     {
-        this.desc = desc;
-        this.initiator = initiator;
+        this.state = state;
+        this.desc = state.desc;
+        this.initiator = state.initiator;
         this.nowInSec = nowInSec;
         this.isIncremental = isIncremental;
         this.previewKind = previewKind;
@@ -108,21 +105,22 @@
         this.evenTreeDistribution = evenTreeDistribution;
     }
 
-    public void prepare(ColumnFamilyStore cfs, MerkleTrees tree)
+    public void prepare(ColumnFamilyStore cfs, MerkleTrees trees, TopPartitionTracker.Collector topPartitionCollector)
     {
-        this.trees = tree;
+        this.trees = trees;
+        this.topPartitionCollector = topPartitionCollector;
 
-        if (!tree.partitioner().preservesOrder() || evenTreeDistribution)
+        if (!trees.partitioner().preservesOrder() || evenTreeDistribution)
         {
-            // You can't beat an even tree distribution for md5
-            tree.init();
+            // You can't beat even trees distribution for md5
+            trees.init();
         }
         else
         {
             List<DecoratedKey> keys = new ArrayList<>();
             Random random = new Random();
 
-            for (Range<Token> range : tree.ranges())
+            for (Range<Token> range : trees.ranges())
             {
                 for (DecoratedKey sample : cfs.keySamples(range))
                 {
@@ -132,8 +130,8 @@
 
                 if (keys.isEmpty())
                 {
-                    // use an even tree distribution
-                    tree.init(range);
+                    // use even trees distribution
+                    trees.init(range);
                 }
                 else
                 {
@@ -142,15 +140,15 @@
                     while (true)
                     {
                         DecoratedKey dk = keys.get(random.nextInt(numKeys));
-                        if (!tree.split(dk.getToken()))
+                        if (!trees.split(dk.getToken()))
                             break;
                     }
                     keys.clear();
                 }
             }
         }
-        logger.debug("Prepared AEService trees of size {} for {}", trees.size(), desc);
-        ranges = tree.rangeIterator();
+        logger.debug("Prepared AEService trees of size {} for {}", this.trees.size(), desc);
+        ranges = trees.rangeIterator();
     }
 
     /**
@@ -182,6 +180,8 @@
         RowHash rowHash = rowHash(partition);
         if (rowHash != null)
         {
+            if(topPartitionCollector != null)
+                topPartitionCollector.trackPartitionSize(partition.partitionKey(), rowHash.size);
             range.addHash(rowHash);
         }
     }
@@ -224,6 +224,7 @@
             trees.logRowSizePerLeaf(logger);
         }
 
+        state.phase.sendingTrees();
         Stage.ANTI_ENTROPY.execute(this);
     }
 
@@ -232,9 +233,9 @@
      * This sends RepairStatus to inform the initiator that the validation has failed.
      * The actual reason for failure should be looked up in the log of the host calling this function.
      */
-    public void fail()
+    public void fail(Throwable e)
     {
-        logger.error("Failed creating a merkle tree for {}, {} (see log for details)", desc, initiator);
+        state.phase.fail(e);
         respond(new ValidationResponse(desc));
     }
 
@@ -254,9 +255,15 @@
             Tracing.traceRepair("Local completed merkle tree for {} for {}.{}", initiator, desc.keyspace, desc.columnFamily);
 
         }
+        state.phase.success();
         respond(new ValidationResponse(desc, trees));
     }
 
+    public PreviewKind getPreviewKind()
+    {
+        return previewKind;
+    }
+
     private boolean initiatorIsRemote()
     {
         return !FBUtilities.getBroadcastAddressAndPort().equals(initiator);
@@ -273,7 +280,7 @@
         /*
          * For local initiators, DO NOT send the message to self over loopback. This is a wasted ser/de loop
          * and a ton of garbage. Instead, move the trees off heap and invoke message handler. We could do it
-         * directly, since this method will only be called from {@code Stage.ENTI_ENTROPY}, but we do instead
+         * directly, since this method will only be called from {@code Stage.ANTI_ENTROPY}, but we do instead
          * execute a {@code Runnable} on the stage - in case that assumption ever changes by accident.
          */
         Stage.ANTI_ENTROPY.execute(() ->
diff --git a/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java b/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java
index 8b123a7..76ff5aa 100644
--- a/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java
+++ b/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java
@@ -99,4 +99,4 @@
                "perHostDifferences=" + perHostDifferences +
                '}';
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java b/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java
index e8ca85d..b2622ef 100644
--- a/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java
+++ b/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java
@@ -25,4 +25,4 @@
 public interface PreferedNodeFilter
 {
     public Set<InetAddressAndPort> apply(InetAddressAndPort streamingNode, Set<InetAddressAndPort> toStream);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/repair/asymmetric/RangeDenormalizer.java b/src/java/org/apache/cassandra/repair/asymmetric/RangeDenormalizer.java
index 2a29871..ee69f51 100644
--- a/src/java/org/apache/cassandra/repair/asymmetric/RangeDenormalizer.java
+++ b/src/java/org/apache/cassandra/repair/asymmetric/RangeDenormalizer.java
@@ -20,7 +20,6 @@
 
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 
diff --git a/src/java/org/apache/cassandra/repair/consistent/ConsistentSession.java b/src/java/org/apache/cassandra/repair/consistent/ConsistentSession.java
index e4d8ff0..86ecfb7 100644
--- a/src/java/org/apache/cassandra/repair/consistent/ConsistentSession.java
+++ b/src/java/org/apache/cassandra/repair/consistent/ConsistentSession.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.repair.consistent;
 
-import java.net.InetAddress;
 import java.util.Collection;
 import java.util.EnumMap;
 import java.util.List;
@@ -48,6 +47,7 @@
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.tools.nodetool.RepairAdmin;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Base class for consistent Local and Coordinator sessions
@@ -56,7 +56,7 @@
  * There are 4 stages to a consistent incremental repair.
  *
  * <h1>Repair prepare</h1>
- *  First, the normal {@link ActiveRepairService#prepareForRepair(UUID, InetAddressAndPort, Set, RepairOption, boolean, List)} stuff
+ *  First, the normal {@link ActiveRepairService#prepareForRepair(TimeUUID, InetAddressAndPort, Set, RepairOption, boolean, List)} stuff
  *  happens, which sends out {@link PrepareMessage} and creates a {@link ActiveRepairService.ParentRepairSession}
  *  on the coordinator and each of the neighbors.
  *
@@ -187,7 +187,7 @@
     }
 
     private volatile State state;
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
     public final InetAddressAndPort coordinator;
     public final ImmutableSet<TableId> tableIds;
     public final long repairedAt;
@@ -265,7 +265,7 @@
     abstract static class AbstractBuilder
     {
         private State state;
-        private UUID sessionID;
+        private TimeUUID sessionID;
         private InetAddressAndPort coordinator;
         private Set<TableId> ids;
         private long repairedAt;
@@ -277,7 +277,7 @@
             this.state = state;
         }
 
-        void withSessionID(UUID sessionID)
+        void withSessionID(TimeUUID sessionID)
         {
             this.sessionID = sessionID;
         }
diff --git a/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java b/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java
index 5ddac3f..88f3002 100644
--- a/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java
+++ b/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java
@@ -19,33 +19,29 @@
 package org.apache.cassandra.repair.consistent;
 
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
-import javax.annotation.Nullable;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
+
+import org.apache.cassandra.concurrent.ImmediateExecutor;
+import org.apache.cassandra.repair.CoordinatedRepairResult;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
+
 import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.exceptions.RepairException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.Verb;
-import org.apache.cassandra.repair.RepairSessionResult;
 import org.apache.cassandra.repair.SomeRepairFailedException;
 import org.apache.cassandra.repair.messages.FailSession;
 import org.apache.cassandra.repair.messages.FinalizeCommit;
@@ -53,6 +49,9 @@
 import org.apache.cassandra.repair.messages.PrepareConsistentRequest;
 import org.apache.cassandra.repair.messages.RepairMessage;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Coordinator side logic and state of a consistent repair session. Like {@link ActiveRepairService.ParentRepairSession},
@@ -64,8 +63,8 @@
     private static final Logger logger = LoggerFactory.getLogger(CoordinatorSession.class);
 
     private final Map<InetAddressAndPort, State> participantStates = new HashMap<>();
-    private final SettableFuture<Boolean> prepareFuture = SettableFuture.create();
-    private final SettableFuture<Boolean> finalizeProposeFuture = SettableFuture.create();
+    private final AsyncPromise<Void> prepareFuture = AsyncPromise.uncancellable();
+    private final AsyncPromise<Void> finalizeProposeFuture = AsyncPromise.uncancellable();
 
     private volatile long sessionStart = Long.MIN_VALUE;
     private volatile long repairStart = Long.MIN_VALUE;
@@ -148,7 +147,7 @@
         MessagingService.instance().send(message, destination);
     }
 
-    public ListenableFuture<Boolean> prepare()
+    public Future<Void> prepare()
     {
         Preconditions.checkArgument(allStates(State.PREPARING));
 
@@ -188,12 +187,11 @@
         if (getState() == State.PREPARED)
         {
             logger.info("Incremental repair session {} successfully prepared.", sessionID);
-            prepareFuture.set(true);
+            prepareFuture.trySuccess(null);
         }
         else
         {
             fail();
-            prepareFuture.set(false);
         }
     }
 
@@ -202,7 +200,7 @@
         setAll(State.REPAIRING);
     }
 
-    public synchronized ListenableFuture<Boolean> finalizePropose()
+    public synchronized Future<Void> finalizePropose()
     {
         Preconditions.checkArgument(allStates(State.REPAIRING));
         logger.info("Proposing finalization of repair session {}", sessionID);
@@ -224,7 +222,6 @@
         {
             logger.warn("Finalization proposal of session {} rejected by {}. Aborting session", sessionID, participant);
             fail();
-            finalizeProposeFuture.set(false);
         }
         else
         {
@@ -233,7 +230,7 @@
             if (getState() == State.FINALIZE_PROMISED)
             {
                 logger.info("Finalization proposal for repair session {} accepted by all participants.", sessionID);
-                finalizeProposeFuture.set(true);
+                finalizeProposeFuture.trySuccess(null);
             }
         }
     }
@@ -279,8 +276,8 @@
         setAll(State.FAILED);
 
         String exceptionMsg = String.format("Incremental repair session %s has failed", sessionID);
-        finalizeProposeFuture.setException(new RuntimeException(exceptionMsg));
-        prepareFuture.setException(new RuntimeException(exceptionMsg));
+        finalizeProposeFuture.tryFailure(RepairException.warn(exceptionMsg));
+        prepareFuture.tryFailure(RepairException.warn(exceptionMsg));
     }
 
     private static String formatDuration(long then, long now)
@@ -296,111 +293,51 @@
     /**
      * Runs the asynchronous consistent repair session. Actual repair sessions are scheduled via a submitter to make unit testing easier
      */
-    public ListenableFuture execute(Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSubmitter, AtomicBoolean hasFailure)
+    public Future<CoordinatedRepairResult> execute(Supplier<Future<CoordinatedRepairResult>> sessionSubmitter)
     {
         logger.info("Beginning coordination of incremental repair session {}", sessionID);
 
-        sessionStart = System.currentTimeMillis();
-        ListenableFuture<Boolean> prepareResult = prepare();
+        sessionStart = currentTimeMillis();
+        Future<Void> prepareResult = prepare();
 
         // run repair sessions normally
-        ListenableFuture<List<RepairSessionResult>> repairSessionResults = Futures.transformAsync(prepareResult, new AsyncFunction<Boolean, List<RepairSessionResult>>()
-        {
-            public ListenableFuture<List<RepairSessionResult>> apply(Boolean success) throws Exception
+        Future<CoordinatedRepairResult> repairSessionResults = prepareResult.flatMap(ignore -> {
+            repairStart = currentTimeMillis();
+            if (logger.isDebugEnabled())
+                logger.debug("Incremental repair {} prepare phase completed in {}", sessionID, formatDuration(sessionStart, repairStart));
+            setRepairing();
+            return sessionSubmitter.get();
+        });
+
+        // if any session failed, then fail the future
+        Future<CoordinatedRepairResult> onlySuccessSessionResults = repairSessionResults.flatMap(result -> {
+            finalizeStart = currentTimeMillis();
+            if (result.hasFailed())
             {
-                if (success)
-                {
-                    repairStart = System.currentTimeMillis();
-                    if (logger.isDebugEnabled())
-                    {
-                        logger.debug("Incremental repair {} prepare phase completed in {}", sessionID, formatDuration(sessionStart, repairStart));
-                    }
-                    setRepairing();
-                    return sessionSubmitter.get();
-                }
-                else
-                {
-                    return Futures.immediateFuture(null);
-                }
-
+                if (logger.isDebugEnabled())
+                    logger.debug("Incremental repair {} validation/stream phase completed in {}", sessionID, formatDuration(repairStart, finalizeStart));
+                return ImmediateFuture.failure(SomeRepairFailedException.INSTANCE);
             }
-        }, MoreExecutors.directExecutor());
+            return ImmediateFuture.success(result);
+        });
 
-        // mark propose finalization
-        ListenableFuture<Boolean> proposeFuture = Futures.transformAsync(repairSessionResults, new AsyncFunction<List<RepairSessionResult>, Boolean>()
-        {
-            public ListenableFuture<Boolean> apply(List<RepairSessionResult> results) throws Exception
+        // mark propose finalization and commit
+        Future<CoordinatedRepairResult> proposeFuture = onlySuccessSessionResults.flatMap(results -> finalizePropose().map(ignore -> {
+            if (logger.isDebugEnabled())
+                logger.debug("Incremental repair {} finalization phase completed in {}", sessionID, formatDuration(finalizeStart, currentTimeMillis()));
+            finalizeCommit();
+            if (logger.isDebugEnabled())
+                logger.debug("Incremental repair {} phase completed in {}", sessionID, formatDuration(sessionStart, currentTimeMillis()));
+            return results;
+        }));
+
+        return proposeFuture.addCallback((ignore, failure) -> {
+            if (failure != null)
             {
-                if (results == null || results.isEmpty() || Iterables.any(results, r -> r == null))
-                {
-                    finalizeStart = System.currentTimeMillis();
-                    if (logger.isDebugEnabled())
-                    {
-                        logger.debug("Incremental repair {} validation/stream phase completed in {}", sessionID, formatDuration(repairStart, finalizeStart));
-
-                    }
-                    return Futures.immediateFailedFuture(SomeRepairFailedException.INSTANCE);
-                }
-                else
-                {
-                    return finalizePropose();
-                }
+                if (logger.isDebugEnabled())
+                    logger.debug("Incremental repair {} phase failed in {}", sessionID, formatDuration(sessionStart, currentTimeMillis()));
+                fail();
             }
-        }, MoreExecutors.directExecutor());
-
-        // return execution result as set by following callback
-        SettableFuture<Boolean> resultFuture = SettableFuture.create();
-
-        // commit repaired data
-        Futures.addCallback(proposeFuture, new FutureCallback<Boolean>()
-        {
-            public void onSuccess(@Nullable Boolean result)
-            {
-                try
-                {
-                    if (result != null && result)
-                    {
-                        if (logger.isDebugEnabled())
-                        {
-                            logger.debug("Incremental repair {} finalization phase completed in {}", sessionID, formatDuration(finalizeStart, System.currentTimeMillis()));
-                        }
-                        finalizeCommit();
-                        if (logger.isDebugEnabled())
-                        {
-                            logger.debug("Incremental repair {} phase completed in {}", sessionID, formatDuration(sessionStart, System.currentTimeMillis()));
-                        }
-                    }
-                    else
-                    {
-                        hasFailure.set(true);
-                        fail();
-                    }
-                    resultFuture.set(result);
-                }
-                catch (Exception e)
-                {
-                    resultFuture.setException(e);
-                }
-            }
-
-            public void onFailure(Throwable t)
-            {
-                try
-                {
-                    if (logger.isDebugEnabled())
-                    {
-                        logger.debug("Incremental repair {} phase failed in {}", sessionID, formatDuration(sessionStart, System.currentTimeMillis()));
-                    }
-                    hasFailure.set(true);
-                    fail();
-                }
-                finally
-                {
-                    resultFuture.setException(t);
-                }
-            }
-        }, MoreExecutors.directExecutor());
-
-        return resultFuture;
+        }, ImmediateExecutor.INSTANCE);
     }
 }
diff --git a/src/java/org/apache/cassandra/repair/consistent/CoordinatorSessions.java b/src/java/org/apache/cassandra/repair/consistent/CoordinatorSessions.java
index b87a2c0..328c16e 100644
--- a/src/java/org/apache/cassandra/repair/consistent/CoordinatorSessions.java
+++ b/src/java/org/apache/cassandra/repair/consistent/CoordinatorSessions.java
@@ -21,7 +21,6 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.base.Preconditions;
 
@@ -30,20 +29,22 @@
 import org.apache.cassandra.repair.messages.FinalizePromise;
 import org.apache.cassandra.repair.messages.PrepareConsistentResponse;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Container for all consistent repair sessions a node is coordinating
  */
 public class CoordinatorSessions
 {
-    private final Map<UUID, CoordinatorSession> sessions = new HashMap<>();
+    private final Map<TimeUUID, CoordinatorSession> sessions = new HashMap<>();
 
     protected CoordinatorSession buildSession(CoordinatorSession.Builder builder)
     {
         return new CoordinatorSession(builder);
     }
 
-    public synchronized CoordinatorSession registerSession(UUID sessionId, Set<InetAddressAndPort> participants, boolean isForced)
+    public synchronized CoordinatorSession registerSession(TimeUUID sessionId, Set<InetAddressAndPort> participants, boolean isForced) throws NoSuchRepairSessionException
     {
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionId);
 
@@ -66,7 +67,7 @@
         return session;
     }
 
-    public synchronized CoordinatorSession getSession(UUID sessionId)
+    public synchronized CoordinatorSession getSession(TimeUUID sessionId)
     {
         return sessions.get(sessionId);
     }
diff --git a/src/java/org/apache/cassandra/repair/consistent/LocalSessionInfo.java b/src/java/org/apache/cassandra/repair/consistent/LocalSessionInfo.java
index f1f927b..fa2835e 100644
--- a/src/java/org/apache/cassandra/repair/consistent/LocalSessionInfo.java
+++ b/src/java/org/apache/cassandra/repair/consistent/LocalSessionInfo.java
@@ -62,7 +62,7 @@
         m.put(STARTED, Integer.toString(session.getStartedAt()));
         m.put(LAST_UPDATE, Integer.toString(session.getLastUpdate()));
         m.put(COORDINATOR, session.coordinator.toString());
-        m.put(PARTICIPANTS, Joiner.on(',').join(Iterables.transform(session.participants.stream().map(peer -> peer.address).collect(Collectors.toList()), InetAddress::getHostAddress)));
+        m.put(PARTICIPANTS, Joiner.on(',').join(Iterables.transform(session.participants.stream().map(peer -> peer.getAddress()).collect(Collectors.toList()), InetAddress::getHostAddress)));
         m.put(PARTICIPANTS_WP, Joiner.on(',').join(Iterables.transform(session.participants, InetAddressAndPort::getHostAddressAndPort)));
         m.put(TABLES, Joiner.on(',').join(Iterables.transform(session.tableIds, LocalSessionInfo::tableString)));
 
diff --git a/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java b/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java
index cfb90ef..ed2bf0b 100644
--- a/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java
+++ b/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java
@@ -33,7 +33,6 @@
 import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArraySet;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.function.BooleanSupplier;
 import java.util.function.Predicate;
@@ -50,10 +49,8 @@
 import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
 import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
 
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -93,9 +90,14 @@
 import org.apache.cassandra.repair.messages.StatusResponse;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.net.Verb.FAILED_SESSION_MSG;
 import static org.apache.cassandra.net.Verb.FINALIZE_PROMISE_MSG;
 import static org.apache.cassandra.net.Verb.PREPARE_CONSISTENT_RSP;
@@ -153,7 +155,7 @@
     private final String keyspace = SchemaConstants.SYSTEM_KEYSPACE_NAME;
     private final String table = SystemKeyspace.REPAIRS;
     private boolean started = false;
-    private volatile ImmutableMap<UUID, LocalSession> sessions = ImmutableMap.of();
+    private volatile ImmutableMap<TimeUUID, LocalSession> sessions = ImmutableMap.of();
     private volatile ImmutableMap<TableId, RepairedState> repairedStates = ImmutableMap.of();
 
     @VisibleForTesting
@@ -273,10 +275,10 @@
         PendingStat.Builder finalized = new PendingStat.Builder();
         PendingStat.Builder failed = new PendingStat.Builder();
 
-        Map<UUID, PendingStat> stats = cfs.getPendingRepairStats();
-        for (Map.Entry<UUID, PendingStat> entry : stats.entrySet())
+        Map<TimeUUID, PendingStat> stats = cfs.getPendingRepairStats();
+        for (Map.Entry<TimeUUID, PendingStat> entry : stats.entrySet())
         {
-            UUID sessionID = entry.getKey();
+            TimeUUID sessionID = entry.getKey();
             PendingStat stat = entry.getValue();
             Verify.verify(sessionID.equals(Iterables.getOnlyElement(stat.sessions)));
 
@@ -310,7 +312,7 @@
                                                                    && Range.intersects(ls.ranges, ranges));
 
         ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(tid);
-        Set<UUID> sessionIds = Sets.newHashSet(Iterables.transform(candidates, s -> s.sessionID));
+        Set<TimeUUID> sessionIds = Sets.newHashSet(Iterables.transform(candidates, s -> s.sessionID));
 
 
         return cfs.releaseRepairData(sessionIds, force);
@@ -320,7 +322,7 @@
      * hook for operators to cancel sessions, cancelling from a non-coordinator is an error, unless
      * force is set to true. Messages are sent out to other participants, but we don't wait for a response
      */
-    public void cancelSession(UUID sessionID, boolean force)
+    public void cancelSession(TimeUUID sessionID, boolean force)
     {
         logger.info("Cancelling local repair session {}", sessionID);
         LocalSession session = getSession(sessionID);
@@ -346,7 +348,7 @@
         Preconditions.checkArgument(!started, "LocalSessions.start can only be called once");
         Preconditions.checkArgument(sessions.isEmpty(), "No sessions should be added before start");
         UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(String.format("SELECT * FROM %s.%s", keyspace, table), 1000);
-        Map<UUID, LocalSession> loadedSessions = new HashMap<>();
+        Map<TimeUUID, LocalSession> loadedSessions = new HashMap<>();
         Map<TableId, List<RepairedState.Level>> initialLevels = new HashMap<>();
         for (UntypedResultSet.Row row : rows)
         {
@@ -363,9 +365,9 @@
             }
             catch (IllegalArgumentException | NullPointerException e)
             {
-                logger.warn("Unable to load malformed repair session {}, removing", row.has("parent_id") ? row.getUUID("parent_id") : null);
+                logger.warn("Unable to load malformed repair session {}, removing", row.has("parent_id") ? row.getTimeUUID("parent_id") : null);
                 if (row.has("parent_id"))
-                    deleteRow(row.getUUID("parent_id"));
+                    deleteRow(row.getTimeUUID("parent_id"));
             }
         }
         for (Map.Entry<TableId, List<RepairedState.Level>> entry : initialLevels.entrySet())
@@ -545,9 +547,9 @@
                                        Date.from(Instant.ofEpochSecond(session.getLastUpdate())),
                                        Date.from(Instant.ofEpochMilli(session.repairedAt)),
                                        session.getState().ordinal(),
-                                       session.coordinator.address,
-                                       session.coordinator.port,
-                                       session.participants.stream().map(participant -> participant.address).collect(Collectors.toSet()),
+                                       session.coordinator.getAddress(),
+                                       session.coordinator.getPort(),
+                                       session.participants.stream().map(participant -> participant.getAddress()).collect(Collectors.toSet()),
                                        session.participants.stream().map(participant -> participant.getHostAddressAndPort()).collect(Collectors.toSet()),
                                        serializeRanges(session.ranges),
                                        tableIdToUuid(session.tableIds));
@@ -564,7 +566,7 @@
     {
         LocalSession.Builder builder = LocalSession.builder();
         builder.withState(ConsistentSession.State.valueOf(row.getInt("state")));
-        builder.withSessionID(row.getUUID("parent_id"));
+        builder.withSessionID(row.getTimeUUID("parent_id"));
         InetAddressAndPort coordinator = InetAddressAndPort.getByAddressOverrideDefaults(
             row.getInetAddress("coordinator"),
             row.getInt("coordinator_port"));
@@ -592,7 +594,7 @@
         return buildSession(builder);
     }
 
-    private void deleteRow(UUID sessionID)
+    private void deleteRow(TimeUUID sessionID)
     {
         String query = "DELETE FROM %s.%s WHERE parent_id=?";
         QueryProcessor.executeInternal(String.format(query, keyspace, table), sessionID);
@@ -602,14 +604,14 @@
     {
         TableId tid = Schema.instance.getTableMetadata(keyspace, table).id;
         ColumnFamilyStore cfm = Schema.instance.getColumnFamilyStoreInstance(tid);
-        cfm.forceBlockingFlush();
+        cfm.forceBlockingFlush(ColumnFamilyStore.FlushReason.INTERNALLY_FORCED);
     }
 
     /**
      * Loads a session directly from the table. Should be used for testing only
      */
     @VisibleForTesting
-    LocalSession loadUnsafe(UUID sessionId)
+    LocalSession loadUnsafe(TimeUUID sessionId)
     {
         String query = "SELECT * FROM %s.%s WHERE parent_id=?";
         UntypedResultSet result = QueryProcessor.executeInternal(String.format(query, keyspace, table), sessionId);
@@ -626,7 +628,7 @@
         return new LocalSession(builder);
     }
 
-    public LocalSession getSession(UUID sessionID)
+    public LocalSession getSession(TimeUUID sessionID)
     {
         return sessions.get(sessionID);
     }
@@ -643,22 +645,22 @@
         Preconditions.checkArgument(!sessions.containsKey(session.sessionID),
                                     "LocalSession %s already exists", session.sessionID);
         Preconditions.checkArgument(started, "sessions cannot be added before LocalSessions is started");
-        sessions = ImmutableMap.<UUID, LocalSession>builder()
+        sessions = ImmutableMap.<TimeUUID, LocalSession>builder()
                                .putAll(sessions)
                                .put(session.sessionID, session)
                                .build();
     }
 
-    private synchronized void removeSession(UUID sessionID)
+    private synchronized void removeSession(TimeUUID sessionID)
     {
         Preconditions.checkArgument(sessionID != null);
-        Map<UUID, LocalSession> temp = new HashMap<>(sessions);
+        Map<TimeUUID, LocalSession> temp = new HashMap<>(sessions);
         temp.remove(sessionID);
         sessions = ImmutableMap.copyOf(temp);
     }
 
     @VisibleForTesting
-    LocalSession createSessionUnsafe(UUID sessionId, ActiveRepairService.ParentRepairSession prs, Set<InetAddressAndPort> peers)
+    LocalSession createSessionUnsafe(TimeUUID sessionId, ActiveRepairService.ParentRepairSession prs, Set<InetAddressAndPort> peers)
     {
         LocalSession.Builder builder = LocalSession.builder();
         builder.withState(ConsistentSession.State.PREPARING);
@@ -677,7 +679,7 @@
         return buildSession(builder);
     }
 
-    protected ActiveRepairService.ParentRepairSession getParentRepairSession(UUID sessionID)
+    protected ActiveRepairService.ParentRepairSession getParentRepairSession(TimeUUID sessionID) throws NoSuchRepairSessionException
     {
         return ActiveRepairService.instance.getParentRepairSession(sessionID);
     }
@@ -711,12 +713,12 @@
         }
     }
 
-    public void failSession(UUID sessionID)
+    public void failSession(TimeUUID sessionID)
     {
         failSession(sessionID, true);
     }
 
-    public void failSession(UUID sessionID, boolean sendMessage)
+    public void failSession(TimeUUID sessionID, boolean sendMessage)
     {
         failSession(getSession(sessionID), sendMessage);
     }
@@ -745,7 +747,7 @@
         }
     }
 
-    public synchronized void deleteSession(UUID sessionID)
+    public synchronized void deleteSession(TimeUUID sessionID)
     {
         logger.info("Deleting local repair session {}", sessionID);
         LocalSession session = getSession(sessionID);
@@ -756,12 +758,12 @@
     }
 
     @VisibleForTesting
-    ListenableFuture prepareSession(KeyspaceRepairManager repairManager,
-                                    UUID sessionID,
-                                    Collection<ColumnFamilyStore> tables,
-                                    RangesAtEndpoint tokenRanges,
-                                    ExecutorService executor,
-                                    BooleanSupplier isCancelled)
+    Future<List<Void>> prepareSession(KeyspaceRepairManager repairManager,
+                                      TimeUUID sessionID,
+                                      Collection<ColumnFamilyStore> tables,
+                                      RangesAtEndpoint tokenRanges,
+                                      ExecutorService executor,
+                                      BooleanSupplier isCancelled)
     {
         return repairManager.prepareIncrementalRepair(sessionID, tables, tokenRanges, executor, isCancelled);
     }
@@ -799,7 +801,7 @@
     public void handlePrepareMessage(InetAddressAndPort from, PrepareConsistentRequest request)
     {
         logger.trace("received {} from {}", request, from);
-        UUID sessionID = request.parentSession;
+        TimeUUID sessionID = request.parentSession;
         InetAddressAndPort coordinator = request.coordinator;
         Set<InetAddressAndPort> peers = request.participants;
 
@@ -819,23 +821,22 @@
         putSessionUnsafe(session);
         logger.info("Beginning local incremental repair session {}", session);
 
-        ExecutorService executor = Executors.newFixedThreadPool(parentSession.getColumnFamilyStores().size());
+        ExecutorService executor = executorFactory().pooled("Repair-" + sessionID, parentSession.getColumnFamilyStores().size());
 
         KeyspaceRepairManager repairManager = parentSession.getKeyspace().getRepairManager();
         RangesAtEndpoint tokenRanges = filterLocalRanges(parentSession.getKeyspace().getName(), parentSession.getRanges());
-        ListenableFuture repairPreparation = prepareSession(repairManager, sessionID, parentSession.getColumnFamilyStores(),
-                                                            tokenRanges, executor, () -> session.getState() != PREPARING);
+        Future<List<Void>> repairPreparation = prepareSession(repairManager, sessionID, parentSession.getColumnFamilyStores(),
+                                                          tokenRanges, executor, () -> session.getState() != PREPARING);
 
-        Futures.addCallback(repairPreparation, new FutureCallback<Object>()
+        repairPreparation.addCallback(new FutureCallback<List<Void>>()
         {
-            public void onSuccess(@Nullable Object result)
+            public void onSuccess(@Nullable List<Void> result)
             {
                 try
                 {
                     logger.info("Prepare phase for incremental repair session {} completed", sessionID);
                     if (!prepareSessionExceptFailed(session))
                         logger.info("Session {} failed before anticompaction completed", sessionID);
-
                     Message<PrepareConsistentResponse> message =
                         Message.out(PREPARE_CONSISTENT_RSP,
                                     new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), session.getState() != FAILED));
@@ -851,7 +852,12 @@
             {
                 try
                 {
-                    logger.error("Prepare phase for incremental repair session {} failed", sessionID, t);
+                    if (Throwables.anyCauseMatches(t, (throwable) -> throwable instanceof CompactionInterruptedException))
+                        logger.info("Anticompaction interrupted for session {}: {}", sessionID, t.getMessage());
+                    else if (Throwables.anyCauseMatches(t, (throwable) -> throwable instanceof NoSuchRepairSessionException))
+                        logger.warn("No such repair session: {}", sessionID);
+                    else
+                        logger.error("Prepare phase for incremental repair session {} failed", sessionID, t);
                     sendMessage(coordinator,
                                 Message.out(PREPARE_CONSISTENT_RSP,
                                             new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), false)));
@@ -862,7 +868,7 @@
                     executor.shutdown();
                 }
             }
-        }, MoreExecutors.directExecutor());
+        });
     }
 
     /**
@@ -884,7 +890,7 @@
         }
     }
 
-    public void maybeSetRepairing(UUID sessionID)
+    public void maybeSetRepairing(TimeUUID sessionID)
     {
         LocalSession session = getSession(sessionID);
         if (session != null && session.getState() != REPAIRING)
@@ -897,7 +903,7 @@
     public void handleFinalizeProposeMessage(InetAddressAndPort from, FinalizePropose propose)
     {
         logger.trace("received {} from {}", propose, from);
-        UUID sessionID = propose.sessionID;
+        TimeUUID sessionID = propose.sessionID;
         LocalSession session = getSession(sessionID);
         if (session == null)
         {
@@ -952,7 +958,7 @@
     public void handleFinalizeCommitMessage(InetAddressAndPort from, FinalizeCommit commit)
     {
         logger.trace("received {} from {}", commit, from);
-        UUID sessionID = commit.sessionID;
+        TimeUUID sessionID = commit.sessionID;
         LocalSession session = getSession(sessionID);
         if (session == null)
         {
@@ -987,7 +993,7 @@
     public void handleStatusRequest(InetAddressAndPort from, StatusRequest request)
     {
         logger.trace("received {} from {}", request, from);
-        UUID sessionID = request.sessionID;
+        TimeUUID sessionID = request.sessionID;
         LocalSession session = getSession(sessionID);
         if (session == null)
         {
@@ -1004,7 +1010,7 @@
     public void handleStatusResponse(InetAddressAndPort from, StatusResponse response)
     {
         logger.trace("received {} from {}", response, from);
-        UUID sessionID = response.sessionID;
+        TimeUUID sessionID = response.sessionID;
         LocalSession session = getSession(sessionID);
         if (session == null)
         {
@@ -1028,7 +1034,7 @@
     /**
      * determines if a local session exists, and if it's not finalized or failed
      */
-    public boolean isSessionInProgress(UUID sessionID)
+    public boolean isSessionInProgress(TimeUUID sessionID)
     {
         LocalSession session = getSession(sessionID);
         return session != null && session.getState() != FINALIZED && session.getState() != FAILED;
@@ -1037,7 +1043,7 @@
     /**
      * determines if a local session exists, and if it's in the finalized state
      */
-    public boolean isSessionFinalized(UUID sessionID)
+    public boolean isSessionFinalized(TimeUUID sessionID)
     {
         LocalSession session = getSession(sessionID);
         return session != null && session.getState() == FINALIZED;
@@ -1046,7 +1052,7 @@
     /**
      * determines if a local session exists
      */
-    public boolean sessionExists(UUID sessionID)
+    public boolean sessionExists(TimeUUID sessionID)
     {
         return getSession(sessionID) != null;
     }
@@ -1066,7 +1072,7 @@
      * Returns the repairedAt time for a sessions which is unknown, failed, or finalized
      * calling this for a session which is in progress throws an exception
      */
-    public long getFinalSessionRepairedAt(UUID sessionID)
+    public long getFinalSessionRepairedAt(TimeUUID sessionID)
     {
         LocalSession session = getSession(sessionID);
         if (session == null || session.getState() == FAILED)
diff --git a/src/java/org/apache/cassandra/repair/consistent/SyncStatSummary.java b/src/java/org/apache/cassandra/repair/consistent/SyncStatSummary.java
index f8e1bfb..3d21702 100644
--- a/src/java/org/apache/cassandra/repair/consistent/SyncStatSummary.java
+++ b/src/java/org/apache/cassandra/repair/consistent/SyncStatSummary.java
@@ -18,15 +18,16 @@
 
 package org.apache.cassandra.repair.consistent;
 
+import java.net.InetSocketAddress;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 
 import com.google.common.collect.Lists;
 
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.repair.RepairResult;
 import org.apache.cassandra.repair.RepairSessionResult;
 import org.apache.cassandra.repair.SyncStat;
@@ -44,14 +45,14 @@
 
     private static class Session
     {
-        final InetAddressAndPort src;
-        final InetAddressAndPort dst;
+        final InetSocketAddress src;
+        final InetSocketAddress dst;
 
         int files = 0;
         long bytes = 0;
         long ranges = 0;
 
-        Session(InetAddressAndPort src, InetAddressAndPort dst)
+        Session(InetSocketAddress src, InetSocketAddress dst)
         {
             this.src = src;
             this.dst = dst;
@@ -86,7 +87,7 @@
         int ranges = -1;
         boolean totalsCalculated = false;
 
-        final Map<Pair<InetAddressAndPort, InetAddressAndPort>, Session> sessions = new HashMap<>();
+        final Map<Pair<InetSocketAddress, InetSocketAddress>, Session> sessions = new HashMap<>();
 
         Table(String keyspace, String table)
         {
@@ -94,9 +95,9 @@
             this.table = table;
         }
 
-        Session getOrCreate(InetAddressAndPort from, InetAddressAndPort to)
+        Session getOrCreate(InetSocketAddress from, InetSocketAddress to)
         {
-            Pair<InetAddressAndPort, InetAddressAndPort> k = Pair.create(from, to);
+            Pair<InetSocketAddress, InetSocketAddress> k = Pair.create(from, to);
             if (!sessions.containsKey(k))
             {
                 sessions.put(k, new Session(from, to));
@@ -178,11 +179,11 @@
         summaries.get(cf).consumeStats(result.stats);
     }
 
-    public void consumeSessionResults(List<RepairSessionResult> results)
+    public void consumeSessionResults(Optional<List<RepairSessionResult>> results)
     {
-        if (results != null)
+        if (results.isPresent())
         {
-            filter(results, Objects::nonNull).forEach(r -> filter(r.repairJobResults, Objects::nonNull).forEach(this::consumeRepairResult));
+            filter(results.get(), Objects::nonNull).forEach(r -> filter(r.repairJobResults, Objects::nonNull).forEach(this::consumeRepairResult));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java b/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java
index f984411..2d21deb 100644
--- a/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java
+++ b/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java
@@ -22,7 +22,6 @@
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 
 import javax.management.openmbean.ArrayType;
 import javax.management.openmbean.CompositeData;
@@ -37,6 +36,7 @@
 import com.google.common.collect.Sets;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class CleanupSummary
 {
@@ -64,10 +64,10 @@
     public final String keyspace;
     public final String table;
 
-    public final Set<UUID> successful;
-    public final Set<UUID> unsuccessful;
+    public final Set<TimeUUID> successful;
+    public final Set<TimeUUID> unsuccessful;
 
-    public CleanupSummary(String keyspace, String table, Set<UUID> successful, Set<UUID> unsuccessful)
+    public CleanupSummary(String keyspace, String table, Set<TimeUUID> successful, Set<TimeUUID> unsuccessful)
     {
         this.keyspace = keyspace;
         this.table = table;
@@ -75,7 +75,7 @@
         this.unsuccessful = unsuccessful;
     }
 
-    public CleanupSummary(ColumnFamilyStore cfs, Set<UUID> successful, Set<UUID> unsuccessful)
+    public CleanupSummary(ColumnFamilyStore cfs, Set<TimeUUID> successful, Set<TimeUUID> unsuccessful)
     {
         this(cfs.keyspace.getName(), cfs.name, successful, unsuccessful);
     }
@@ -85,30 +85,30 @@
         Preconditions.checkArgument(l.keyspace.equals(r.keyspace));
         Preconditions.checkArgument(l.table.equals(r.table));
 
-        Set<UUID> unsuccessful = new HashSet<>(l.unsuccessful);
+        Set<TimeUUID> unsuccessful = new HashSet<>(l.unsuccessful);
         unsuccessful.addAll(r.unsuccessful);
 
-        Set<UUID> successful = new HashSet<>(l.successful);
+        Set<TimeUUID> successful = new HashSet<>(l.successful);
         successful.addAll(r.successful);
         successful.removeAll(unsuccessful);
 
         return new CleanupSummary(l.keyspace, l.table, successful, unsuccessful);
     }
 
-    private static String[] uuids2Strings(Set<UUID> uuids)
+    private static String[] uuids2Strings(Set<TimeUUID> uuids)
     {
         String[] strings = new String[uuids.size()];
         int idx = 0;
-        for (UUID uuid : uuids)
+        for (TimeUUID uuid : uuids)
             strings[idx++] = uuid.toString();
         return strings;
     }
 
-    private static Set<UUID> strings2Uuids(String[] strings)
+    private static Set<TimeUUID> strings2Uuids(String[] strings)
     {
-        Set<UUID> uuids = Sets.newHashSetWithExpectedSize(strings.length);
+        Set<TimeUUID> uuids = Sets.newHashSetWithExpectedSize(strings.length);
         for (String string : strings)
-            uuids.add(UUID.fromString(string));
+            uuids.add(TimeUUID.fromString(string));
 
         return uuids;
     }
diff --git a/src/java/org/apache/cassandra/repair/consistent/admin/PendingStat.java b/src/java/org/apache/cassandra/repair/consistent/admin/PendingStat.java
index 0c4424e..b7d4fea 100644
--- a/src/java/org/apache/cassandra/repair/consistent/admin/PendingStat.java
+++ b/src/java/org/apache/cassandra/repair/consistent/admin/PendingStat.java
@@ -23,7 +23,6 @@
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import javax.management.openmbean.ArrayType;
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.CompositeDataSupport;
@@ -32,13 +31,12 @@
 import javax.management.openmbean.OpenType;
 import javax.management.openmbean.SimpleType;
 
-import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
-import com.google.common.collect.Iterables;
 
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class PendingStat
 {
@@ -65,9 +63,9 @@
 
     public final long dataSize;
     public final int numSSTables;
-    public final Set<UUID> sessions;
+    public final Set<TimeUUID> sessions;
 
-    public PendingStat(long dataSize, int numSSTables, Set<UUID> sessions)
+    public PendingStat(long dataSize, int numSSTables, Set<TimeUUID> sessions)
     {
         this.dataSize = dataSize;
         this.numSSTables = numSSTables;
@@ -86,7 +84,7 @@
         values.put(COMPOSITE_NAMES[1], numSSTables);
         String[] sessionIds = new String[sessions.size()];
         int idx = 0;
-        for (UUID session : sessions)
+        for (TimeUUID session : sessions)
             sessionIds[idx++] = session.toString();
         values.put(COMPOSITE_NAMES[2], sessionIds);
 
@@ -104,9 +102,9 @@
     {
         Preconditions.checkArgument(cd.getCompositeType().equals(COMPOSITE_TYPE));
         Object[] values = cd.getAll(COMPOSITE_NAMES);
-        Set<UUID> sessions = new HashSet<>();
+        Set<TimeUUID> sessions = new HashSet<>();
         for (String session : (String[]) values[2])
-            sessions.add(UUID.fromString(session));
+            sessions.add(TimeUUID.fromString(session));
         return new PendingStat((long) values[0], (int) values[1], sessions);
     }
 
@@ -114,11 +112,11 @@
     {
         public long dataSize = 0;
         public int numSSTables = 0;
-        public Set<UUID> sessions = new HashSet<>();
+        public Set<TimeUUID> sessions = new HashSet<>();
 
         public Builder addSSTable(SSTableReader sstable)
         {
-            UUID sessionID = sstable.getPendingRepair();
+            TimeUUID sessionID = sstable.getPendingRepair();
             if (sessionID == null)
                 return this;
             dataSize += sstable.onDiskLength();
diff --git a/src/java/org/apache/cassandra/repair/consistent/admin/SchemaArgsParser.java b/src/java/org/apache/cassandra/repair/consistent/admin/SchemaArgsParser.java
index 67c0244..bfcc8cd 100644
--- a/src/java/org/apache/cassandra/repair/consistent/admin/SchemaArgsParser.java
+++ b/src/java/org/apache/cassandra/repair/consistent/admin/SchemaArgsParser.java
@@ -73,7 +73,7 @@
         if (schemaArgs.isEmpty())
         {
             // iterate over everything
-            Iterator<String> ksNames = Schema.instance.getNonLocalStrategyKeyspaces().iterator();
+            Iterator<String> ksNames = Schema.instance.getNonLocalStrategyKeyspaces().names().iterator();
 
             return new AbstractIterator<ColumnFamilyStore>()
             {
diff --git a/src/java/org/apache/cassandra/repair/messages/CleanupMessage.java b/src/java/org/apache/cassandra/repair/messages/CleanupMessage.java
index 5ec7fc6..0e1a74f 100644
--- a/src/java/org/apache/cassandra/repair/messages/CleanupMessage.java
+++ b/src/java/org/apache/cassandra/repair/messages/CleanupMessage.java
@@ -19,12 +19,11 @@
 
 import java.io.IOException;
 import java.util.Objects;
-import java.util.UUID;
 
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Message to cleanup repair resources on replica nodes.
@@ -33,9 +32,9 @@
  */
 public class CleanupMessage extends RepairMessage
 {
-    public final UUID parentRepairSession;
+    public final TimeUUID parentRepairSession;
 
-    public CleanupMessage(UUID parentRepairSession)
+    public CleanupMessage(TimeUUID parentRepairSession)
     {
         super(null);
         this.parentRepairSession = parentRepairSession;
@@ -60,18 +59,18 @@
     {
         public void serialize(CleanupMessage message, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(message.parentRepairSession, out, version);
+            message.parentRepairSession.serialize(out);
         }
 
         public CleanupMessage deserialize(DataInputPlus in, int version) throws IOException
         {
-            UUID parentRepairSession = UUIDSerializer.serializer.deserialize(in, version);
+            TimeUUID parentRepairSession = TimeUUID.deserialize(in);
             return new CleanupMessage(parentRepairSession);
         }
 
         public long serializedSize(CleanupMessage message, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(message.parentRepairSession, version);
+            return TimeUUID.sizeInBytes();
         }
     };
 }
diff --git a/src/java/org/apache/cassandra/repair/messages/FailSession.java b/src/java/org/apache/cassandra/repair/messages/FailSession.java
index b8c7ad3..f6826ee 100644
--- a/src/java/org/apache/cassandra/repair/messages/FailSession.java
+++ b/src/java/org/apache/cassandra/repair/messages/FailSession.java
@@ -19,18 +19,17 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class FailSession extends RepairMessage
 {
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
 
-    public FailSession(UUID sessionID)
+    public FailSession(TimeUUID sessionID)
     {
         super(null);
         assert sessionID != null;
@@ -56,17 +55,17 @@
     {
         public void serialize(FailSession msg, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(msg.sessionID, out, version);
+            msg.sessionID.serialize(out);
         }
 
         public FailSession deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new FailSession(UUIDSerializer.serializer.deserialize(in, version));
+            return new FailSession(TimeUUID.deserialize(in));
         }
 
         public long serializedSize(FailSession msg, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(msg.sessionID, version);
+            return TimeUUID.sizeInBytes();
         }
     };
 }
diff --git a/src/java/org/apache/cassandra/repair/messages/FinalizeCommit.java b/src/java/org/apache/cassandra/repair/messages/FinalizeCommit.java
index bb5cca7..ca4e6d8 100644
--- a/src/java/org/apache/cassandra/repair/messages/FinalizeCommit.java
+++ b/src/java/org/apache/cassandra/repair/messages/FinalizeCommit.java
@@ -19,18 +19,17 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class FinalizeCommit extends RepairMessage
 {
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
 
-    public FinalizeCommit(UUID sessionID)
+    public FinalizeCommit(TimeUUID sessionID)
     {
         super(null);
         assert sessionID != null;
@@ -63,17 +62,17 @@
     {
         public void serialize(FinalizeCommit msg, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(msg.sessionID, out, version);
+            msg.sessionID.serialize(out);
         }
 
         public FinalizeCommit deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new FinalizeCommit(UUIDSerializer.serializer.deserialize(in, version));
+            return new FinalizeCommit(TimeUUID.deserialize(in));
         }
 
         public long serializedSize(FinalizeCommit msg, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(msg.sessionID, version);
+            return TimeUUID.sizeInBytes();
         }
     };
 }
diff --git a/src/java/org/apache/cassandra/repair/messages/FinalizePromise.java b/src/java/org/apache/cassandra/repair/messages/FinalizePromise.java
index cfdc07c..c45f46d 100644
--- a/src/java/org/apache/cassandra/repair/messages/FinalizePromise.java
+++ b/src/java/org/apache/cassandra/repair/messages/FinalizePromise.java
@@ -19,24 +19,23 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
 
 public class FinalizePromise extends RepairMessage
 {
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
     public final InetAddressAndPort participant;
     public final boolean promised;
 
-    public FinalizePromise(UUID sessionID, InetAddressAndPort participant, boolean promised)
+    public FinalizePromise(TimeUUID sessionID, InetAddressAndPort participant, boolean promised)
     {
         super(null);
         assert sessionID != null;
@@ -70,21 +69,21 @@
     {
         public void serialize(FinalizePromise msg, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(msg.sessionID, out, version);
+            msg.sessionID.serialize(out);
             inetAddressAndPortSerializer.serialize(msg.participant, out, version);
             out.writeBoolean(msg.promised);
         }
 
         public FinalizePromise deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new FinalizePromise(UUIDSerializer.serializer.deserialize(in, version),
+            return new FinalizePromise(TimeUUID.deserialize(in),
                                        inetAddressAndPortSerializer.deserialize(in, version),
                                        in.readBoolean());
         }
 
         public long serializedSize(FinalizePromise msg, int version)
         {
-            long size = UUIDSerializer.serializer.serializedSize(msg.sessionID, version);
+            long size = TimeUUID.sizeInBytes();
             size += inetAddressAndPortSerializer.serializedSize(msg.participant, version);
             size += TypeSizes.sizeof(msg.promised);
             return size;
diff --git a/src/java/org/apache/cassandra/repair/messages/FinalizePropose.java b/src/java/org/apache/cassandra/repair/messages/FinalizePropose.java
index c21dd78..b3c4bfd 100644
--- a/src/java/org/apache/cassandra/repair/messages/FinalizePropose.java
+++ b/src/java/org/apache/cassandra/repair/messages/FinalizePropose.java
@@ -19,18 +19,17 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class FinalizePropose extends RepairMessage
 {
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
 
-    public FinalizePropose(UUID sessionID)
+    public FinalizePropose(TimeUUID sessionID)
     {
         super(null);
         assert sessionID != null;
@@ -63,17 +62,17 @@
     {
         public void serialize(FinalizePropose msg, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(msg.sessionID, out, version);
+            msg.sessionID.serialize(out);
         }
 
         public FinalizePropose deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new FinalizePropose(UUIDSerializer.serializer.deserialize(in, version));
+            return new FinalizePropose(TimeUUID.deserialize(in));
         }
 
         public long serializedSize(FinalizePropose msg, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(msg.sessionID, version);
+            return TimeUUID.sizeInBytes();
         }
     };
 }
diff --git a/src/java/org/apache/cassandra/repair/messages/PrepareConsistentRequest.java b/src/java/org/apache/cassandra/repair/messages/PrepareConsistentRequest.java
index c1be082..9ac1461 100644
--- a/src/java/org/apache/cassandra/repair/messages/PrepareConsistentRequest.java
+++ b/src/java/org/apache/cassandra/repair/messages/PrepareConsistentRequest.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.ImmutableSet;
 
@@ -30,17 +29,17 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
 
 public class PrepareConsistentRequest extends RepairMessage
 {
-    public final UUID parentSession;
+    public final TimeUUID parentSession;
     public final InetAddressAndPort coordinator;
     public final Set<InetAddressAndPort> participants;
 
-    public PrepareConsistentRequest(UUID parentSession, InetAddressAndPort coordinator, Set<InetAddressAndPort> participants)
+    public PrepareConsistentRequest(TimeUUID parentSession, InetAddressAndPort coordinator, Set<InetAddressAndPort> participants)
     {
         super(null);
         assert parentSession != null;
@@ -84,7 +83,7 @@
     {
         public void serialize(PrepareConsistentRequest request, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(request.parentSession, out, version);
+            request.parentSession.serialize(out);
             inetAddressAndPortSerializer.serialize(request.coordinator, out, version);
             out.writeInt(request.participants.size());
             for (InetAddressAndPort peer : request.participants)
@@ -95,7 +94,7 @@
 
         public PrepareConsistentRequest deserialize(DataInputPlus in, int version) throws IOException
         {
-            UUID sessionId = UUIDSerializer.serializer.deserialize(in, version);
+            TimeUUID sessionId = TimeUUID.deserialize(in);
             InetAddressAndPort coordinator = inetAddressAndPortSerializer.deserialize(in, version);
             int numPeers = in.readInt();
             Set<InetAddressAndPort> peers = new HashSet<>(numPeers);
@@ -109,7 +108,7 @@
 
         public long serializedSize(PrepareConsistentRequest request, int version)
         {
-            long size = UUIDSerializer.serializer.serializedSize(request.parentSession, version);
+            long size = TimeUUID.sizeInBytes();
             size += inetAddressAndPortSerializer.serializedSize(request.coordinator, version);
             size += TypeSizes.sizeof(request.participants.size());
             for (InetAddressAndPort peer : request.participants)
diff --git a/src/java/org/apache/cassandra/repair/messages/PrepareConsistentResponse.java b/src/java/org/apache/cassandra/repair/messages/PrepareConsistentResponse.java
index 00de77d..a422d7f 100644
--- a/src/java/org/apache/cassandra/repair/messages/PrepareConsistentResponse.java
+++ b/src/java/org/apache/cassandra/repair/messages/PrepareConsistentResponse.java
@@ -19,24 +19,23 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
 
 public class PrepareConsistentResponse extends RepairMessage
 {
-    public final UUID parentSession;
+    public final TimeUUID parentSession;
     public final InetAddressAndPort participant;
     public final boolean success;
 
-    public PrepareConsistentResponse(UUID parentSession, InetAddressAndPort participant, boolean success)
+    public PrepareConsistentResponse(TimeUUID parentSession, InetAddressAndPort participant, boolean success)
     {
         super(null);
         assert parentSession != null;
@@ -70,21 +69,21 @@
     {
         public void serialize(PrepareConsistentResponse response, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(response.parentSession, out, version);
+            response.parentSession.serialize(out);
             inetAddressAndPortSerializer.serialize(response.participant, out, version);
             out.writeBoolean(response.success);
         }
 
         public PrepareConsistentResponse deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new PrepareConsistentResponse(UUIDSerializer.serializer.deserialize(in, version),
+            return new PrepareConsistentResponse(TimeUUID.deserialize(in),
                                                  inetAddressAndPortSerializer.deserialize(in, version),
                                                  in.readBoolean());
         }
 
         public long serializedSize(PrepareConsistentResponse response, int version)
         {
-            long size = UUIDSerializer.serializer.serializedSize(response.parentSession, version);
+            long size = TimeUUID.sizeInBytes();
             size += inetAddressAndPortSerializer.serializedSize(response.participant, version);
             size += TypeSizes.sizeof(response.success);
             return size;
diff --git a/src/java/org/apache/cassandra/repair/messages/PrepareMessage.java b/src/java/org/apache/cassandra/repair/messages/PrepareMessage.java
index 7c8db93..7064227 100644
--- a/src/java/org/apache/cassandra/repair/messages/PrepareMessage.java
+++ b/src/java/org/apache/cassandra/repair/messages/PrepareMessage.java
@@ -22,7 +22,6 @@
 import java.util.Collection;
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
 
 import com.google.common.base.Preconditions;
 
@@ -36,7 +35,7 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.PreviewKind;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 
 public class PrepareMessage extends RepairMessage
@@ -44,20 +43,20 @@
     public final List<TableId> tableIds;
     public final Collection<Range<Token>> ranges;
 
-    public final UUID parentRepairSession;
+    public final TimeUUID parentRepairSession;
     public final boolean isIncremental;
-    public final long timestamp;
+    public final long repairedAt;
     public final boolean isGlobal;
     public final PreviewKind previewKind;
 
-    public PrepareMessage(UUID parentRepairSession, List<TableId> tableIds, Collection<Range<Token>> ranges, boolean isIncremental, long timestamp, boolean isGlobal, PreviewKind previewKind)
+    public PrepareMessage(TimeUUID parentRepairSession, List<TableId> tableIds, Collection<Range<Token>> ranges, boolean isIncremental, long repairedAt, boolean isGlobal, PreviewKind previewKind)
     {
         super(null);
         this.parentRepairSession = parentRepairSession;
         this.tableIds = tableIds;
         this.ranges = ranges;
         this.isIncremental = isIncremental;
-        this.timestamp = timestamp;
+        this.repairedAt = repairedAt;
         this.isGlobal = isGlobal;
         this.previewKind = previewKind;
     }
@@ -72,7 +71,7 @@
                isIncremental == other.isIncremental &&
                isGlobal == other.isGlobal &&
                previewKind == other.previewKind &&
-               timestamp == other.timestamp &&
+               repairedAt == other.repairedAt &&
                tableIds.equals(other.tableIds) &&
                ranges.equals(other.ranges);
     }
@@ -80,7 +79,7 @@
     @Override
     public int hashCode()
     {
-        return Objects.hash(parentRepairSession, isGlobal, previewKind, isIncremental, timestamp, tableIds, ranges);
+        return Objects.hash(parentRepairSession, isGlobal, previewKind, isIncremental, repairedAt, tableIds, ranges);
     }
 
     private static final String MIXED_MODE_ERROR = "Some nodes involved in repair are on an incompatible major version. " +
@@ -95,7 +94,7 @@
             out.writeInt(message.tableIds.size());
             for (TableId tableId : message.tableIds)
                 tableId.serialize(out);
-            UUIDSerializer.serializer.serialize(message.parentRepairSession, out, version);
+            message.parentRepairSession.serialize(out);
             out.writeInt(message.ranges.size());
             for (Range<Token> r : message.ranges)
             {
@@ -103,7 +102,7 @@
                 Range.tokenSerializer.serialize(r, out, version);
             }
             out.writeBoolean(message.isIncremental);
-            out.writeLong(message.timestamp);
+            out.writeLong(message.repairedAt);
             out.writeBoolean(message.isGlobal);
             out.writeInt(message.previewKind.getSerializationVal());
         }
@@ -116,7 +115,7 @@
             List<TableId> tableIds = new ArrayList<>(tableIdCount);
             for (int i = 0; i < tableIdCount; i++)
                 tableIds.add(TableId.deserialize(in));
-            UUID parentRepairSession = UUIDSerializer.serializer.deserialize(in, version);
+            TimeUUID parentRepairSession = TimeUUID.deserialize(in);
             int rangeCount = in.readInt();
             List<Range<Token>> ranges = new ArrayList<>(rangeCount);
             for (int i = 0; i < rangeCount; i++)
@@ -134,12 +133,12 @@
             size = TypeSizes.sizeof(message.tableIds.size());
             for (TableId tableId : message.tableIds)
                 size += tableId.serializedSize();
-            size += UUIDSerializer.serializer.serializedSize(message.parentRepairSession, version);
+            size += TimeUUID.sizeInBytes();
             size += TypeSizes.sizeof(message.ranges.size());
             for (Range<Token> r : message.ranges)
                 size += Range.tokenSerializer.serializedSize(r, version);
             size += TypeSizes.sizeof(message.isIncremental);
-            size += TypeSizes.sizeof(message.timestamp);
+            size += TypeSizes.sizeof(message.repairedAt);
             size += TypeSizes.sizeof(message.isGlobal);
             size += TypeSizes.sizeof(message.previewKind.getSerializationVal());
             return size;
@@ -154,7 +153,7 @@
                ", ranges=" + ranges +
                ", parentRepairSession=" + parentRepairSession +
                ", isIncremental=" + isIncremental +
-               ", timestamp=" + timestamp +
+               ", timestamp=" + repairedAt +
                ", isGlobal=" + isGlobal +
                '}';
     }
diff --git a/src/java/org/apache/cassandra/repair/messages/RepairMessage.java b/src/java/org/apache/cassandra/repair/messages/RepairMessage.java
index 165911d..00ce888 100644
--- a/src/java/org/apache/cassandra/repair/messages/RepairMessage.java
+++ b/src/java/org/apache/cassandra/repair/messages/RepairMessage.java
@@ -17,8 +17,6 @@
  */
 package org.apache.cassandra.repair.messages;
 
-import java.util.UUID;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -31,7 +29,9 @@
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.repair.RepairJobDesc;
+import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.net.MessageFlag.CALL_BACK_ON_FAILURE;
 
@@ -78,7 +78,7 @@
                 logger.error("[#{}] {} failed on {}: {}", request.desc.parentSessionId, verb, from, failureReason);
 
                 if (supportsTimeouts(from, request.desc.parentSessionId))
-                    failureCallback.onFailure(new RepairException(request.desc, String.format("Got %s failure from %s: %s", verb, from, failureReason)));
+                    failureCallback.onFailure(RepairException.error(request.desc, PreviewKind.NONE, String.format("Got %s failure from %s: %s", verb, from, failureReason)));
             }
         };
 
@@ -87,7 +87,7 @@
                                                      callback);
     }
 
-    private static boolean supportsTimeouts(InetAddressAndPort from, UUID parentSessionId)
+    private static boolean supportsTimeouts(InetAddressAndPort from, TimeUUID parentSessionId)
     {
         CassandraVersion remoteVersion = Gossiper.instance.getReleaseVersion(from);
         if (remoteVersion != null && remoteVersion.compareTo(SUPPORTS_TIMEOUTS) >= 0)
diff --git a/src/java/org/apache/cassandra/repair/messages/RepairOption.java b/src/java/org/apache/cassandra/repair/messages/RepairOption.java
index c2e2c18..6bb7fdb 100644
--- a/src/java/org/apache/cassandra/repair/messages/RepairOption.java
+++ b/src/java/org/apache/cassandra/repair/messages/RepairOption.java
@@ -20,17 +20,16 @@
 import java.util.*;
 
 import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.repair.RepairParallelism;
-import org.apache.cassandra.utils.FBUtilities;
 
 /**
  * Repair options.
@@ -52,6 +51,8 @@
     public static final String PREVIEW = "previewKind";
     public static final String OPTIMISE_STREAMS_KEY = "optimiseStreams";
     public static final String IGNORE_UNREPLICATED_KS = "ignoreUnreplicatedKeyspaces";
+    public static final String REPAIR_PAXOS_KEY = "repairPaxos";
+    public static final String PAXOS_ONLY_KEY = "paxosOnly";
 
     // we don't want to push nodes too much for repair
     public static final int MAX_JOB_THREADS = 4;
@@ -181,6 +182,14 @@
         boolean force = Boolean.parseBoolean(options.get(FORCE_REPAIR_KEY));
         boolean pullRepair = Boolean.parseBoolean(options.get(PULL_REPAIR_KEY));
         boolean ignoreUnreplicatedKeyspaces = Boolean.parseBoolean(options.get(IGNORE_UNREPLICATED_KS));
+        boolean repairPaxos = Boolean.parseBoolean(options.get(REPAIR_PAXOS_KEY));
+        boolean paxosOnly = Boolean.parseBoolean(options.get(PAXOS_ONLY_KEY));
+
+        if (previewKind != PreviewKind.NONE)
+        {
+            Preconditions.checkArgument(!repairPaxos, "repairPaxos must be set to false for preview repairs");
+            Preconditions.checkArgument(!paxosOnly, "paxosOnly must be set to false for preview repairs");
+        }
 
         int jobThreads = 1;
         if (options.containsKey(JOB_THREADS_KEY))
@@ -197,7 +206,7 @@
 
         boolean asymmetricSyncing = Boolean.parseBoolean(options.get(OPTIMISE_STREAMS_KEY));
 
-        RepairOption option = new RepairOption(parallelism, primaryRange, incremental, trace, jobThreads, ranges, !ranges.isEmpty(), pullRepair, force, previewKind, asymmetricSyncing, ignoreUnreplicatedKeyspaces);
+        RepairOption option = new RepairOption(parallelism, primaryRange, incremental, trace, jobThreads, ranges, !ranges.isEmpty(), pullRepair, force, previewKind, asymmetricSyncing, ignoreUnreplicatedKeyspaces, repairPaxos, paxosOnly);
 
         // data centers
         String dataCentersStr = options.get(DATACENTERS_KEY);
@@ -277,24 +286,18 @@
     private final PreviewKind previewKind;
     private final boolean optimiseStreams;
     private final boolean ignoreUnreplicatedKeyspaces;
+    private final boolean repairPaxos;
+    private final boolean paxosOnly;
 
     private final Collection<String> columnFamilies = new HashSet<>();
     private final Collection<String> dataCenters = new HashSet<>();
     private final Collection<String> hosts = new HashSet<>();
     private final Collection<Range<Token>> ranges = new HashSet<>();
 
-    public RepairOption(RepairParallelism parallelism, boolean primaryRange, boolean incremental, boolean trace, int jobThreads, Collection<Range<Token>> ranges, boolean isSubrangeRepair, boolean pullRepair, boolean forceRepair, PreviewKind previewKind, boolean optimiseStreams, boolean ignoreUnreplicatedKeyspaces)
+    public RepairOption(RepairParallelism parallelism, boolean primaryRange, boolean incremental, boolean trace, int jobThreads, Collection<Range<Token>> ranges, boolean isSubrangeRepair, boolean pullRepair, boolean forceRepair, PreviewKind previewKind, boolean optimiseStreams, boolean ignoreUnreplicatedKeyspaces, boolean repairPaxos, boolean paxosOnly)
     {
-        if (FBUtilities.isWindows &&
-            (DatabaseDescriptor.getDiskAccessMode() != Config.DiskAccessMode.standard || DatabaseDescriptor.getIndexAccessMode() != Config.DiskAccessMode.standard) &&
-            parallelism == RepairParallelism.SEQUENTIAL)
-        {
-            logger.warn("Sequential repair disabled when memory-mapped I/O is configured on Windows. Reverting to parallel.");
-            this.parallelism = RepairParallelism.PARALLEL;
-        }
-        else
-            this.parallelism = parallelism;
 
+        this.parallelism = parallelism;
         this.primaryRange = primaryRange;
         this.incremental = incremental;
         this.trace = trace;
@@ -306,6 +309,8 @@
         this.previewKind = previewKind;
         this.optimiseStreams = optimiseStreams;
         this.ignoreUnreplicatedKeyspaces = ignoreUnreplicatedKeyspaces;
+        this.repairPaxos = repairPaxos;
+        this.paxosOnly = paxosOnly;
     }
 
     public RepairParallelism getParallelism()
@@ -413,6 +418,16 @@
         return ignoreUnreplicatedKeyspaces;
     }
 
+    public boolean repairPaxos()
+    {
+        return repairPaxos;
+    }
+
+    public boolean paxosOnly()
+    {
+        return paxosOnly;
+    }
+
     @Override
     public String toString()
     {
@@ -430,6 +445,8 @@
                ", force repair: " + forceRepair +
                ", optimise streams: "+ optimiseStreams() +
                ", ignore unreplicated keyspaces: "+ ignoreUnreplicatedKeyspaces +
+               ", repairPaxos: " + repairPaxos +
+               ", paxosOnly: " + paxosOnly +
                ')';
     }
 
@@ -450,6 +467,8 @@
         options.put(FORCE_REPAIR_KEY, Boolean.toString(forceRepair));
         options.put(PREVIEW, previewKind.toString());
         options.put(OPTIMISE_STREAMS_KEY, Boolean.toString(optimiseStreams));
+        options.put(REPAIR_PAXOS_KEY, Boolean.toString(repairPaxos));
+        options.put(PAXOS_ONLY_KEY, Boolean.toString(paxosOnly));
         return options;
     }
 }
diff --git a/src/java/org/apache/cassandra/repair/messages/StatusRequest.java b/src/java/org/apache/cassandra/repair/messages/StatusRequest.java
index 09354e6..ec325ad 100644
--- a/src/java/org/apache/cassandra/repair/messages/StatusRequest.java
+++ b/src/java/org/apache/cassandra/repair/messages/StatusRequest.java
@@ -19,18 +19,17 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class StatusRequest extends RepairMessage
 {
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
 
-    public StatusRequest(UUID sessionID)
+    public StatusRequest(TimeUUID sessionID)
     {
         super(null);
         this.sessionID = sessionID;
@@ -62,17 +61,17 @@
     {
         public void serialize(StatusRequest msg, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(msg.sessionID, out, version);
+            msg.sessionID.serialize(out);
         }
 
         public StatusRequest deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new StatusRequest(UUIDSerializer.serializer.deserialize(in, version));
+            return new StatusRequest(TimeUUID.deserialize(in));
         }
 
         public long serializedSize(StatusRequest msg, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(msg.sessionID, version);
+            return TimeUUID.sizeInBytes();
         }
     };
 }
diff --git a/src/java/org/apache/cassandra/repair/messages/StatusResponse.java b/src/java/org/apache/cassandra/repair/messages/StatusResponse.java
index e62d337..7e7d6e0 100644
--- a/src/java/org/apache/cassandra/repair/messages/StatusResponse.java
+++ b/src/java/org/apache/cassandra/repair/messages/StatusResponse.java
@@ -19,21 +19,20 @@
 package org.apache.cassandra.repair.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.repair.consistent.ConsistentSession;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class StatusResponse extends RepairMessage
 {
-    public final UUID sessionID;
+    public final TimeUUID sessionID;
     public final ConsistentSession.State state;
 
-    public StatusResponse(UUID sessionID, ConsistentSession.State state)
+    public StatusResponse(TimeUUID sessionID, ConsistentSession.State state)
     {
         super(null);
         assert sessionID != null;
@@ -72,19 +71,19 @@
     {
         public void serialize(StatusResponse msg, DataOutputPlus out, int version) throws IOException
         {
-            UUIDSerializer.serializer.serialize(msg.sessionID, out, version);
+            msg.sessionID.serialize(out);
             out.writeInt(msg.state.ordinal());
         }
 
         public StatusResponse deserialize(DataInputPlus in, int version) throws IOException
         {
-            return new StatusResponse(UUIDSerializer.serializer.deserialize(in, version),
+            return new StatusResponse(TimeUUID.deserialize(in),
                                       ConsistentSession.State.valueOf(in.readInt()));
         }
 
         public long serializedSize(StatusResponse msg, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(msg.sessionID, version)
+            return TimeUUID.sizeInBytes()
                    + TypeSizes.sizeof(msg.state.ordinal());
         }
     };
diff --git a/src/java/org/apache/cassandra/repair/state/AbstractCompletable.java b/src/java/org/apache/cassandra/repair/state/AbstractCompletable.java
new file mode 100644
index 0000000..20e59d6
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/AbstractCompletable.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.base.Throwables;
+
+import org.apache.cassandra.utils.Clock;
+
+public class AbstractCompletable<I> implements Completable<I>
+{
+    private final long creationTimeMillis = Clock.Global.currentTimeMillis(); // used to convert from nanos to millis
+    private final long creationTimeNanos = Clock.Global.nanoTime();
+    private final AtomicReference<Result> result = new AtomicReference<>(null);
+    public final I id;
+    protected volatile long lastUpdatedAtNs;
+
+    public AbstractCompletable(I id)
+    {
+        this.id = id;
+    }
+
+    @Override
+    public I getId()
+    {
+        return id;
+    }
+
+    @Override
+    public long getInitializedAtMillis()
+    {
+        return nanosToMillis(creationTimeNanos);
+    }
+
+    @Override
+    public long getInitializedAtNanos()
+    {
+        return creationTimeNanos;
+    }
+
+    @Override
+    public long getLastUpdatedAtMillis()
+    {
+        return nanosToMillis(lastUpdatedAtNs);
+    }
+
+    @Override
+    public long getLastUpdatedAtNanos()
+    {
+        return lastUpdatedAtNs;
+    }
+
+    @Override
+    public Result getResult()
+    {
+        return result.get();
+    }
+
+    public void updated()
+    {
+        lastUpdatedAtNs = Clock.Global.nanoTime();
+    }
+
+    protected boolean tryResult(Result result)
+    {
+        if (!this.result.compareAndSet(null, result))
+            return false;
+        onComplete();
+        lastUpdatedAtNs = Clock.Global.nanoTime();
+        return true;
+    }
+
+    protected void onComplete() {}
+
+    protected long nanosToMillis(long nanos)
+    {
+        // nanos - creationTimeNanos = delta since init
+        return creationTimeMillis + TimeUnit.NANOSECONDS.toMillis(nanos - creationTimeNanos);
+    }
+
+    protected class BaseSkipPhase extends BasePhase
+    {
+        public void skip(String msg)
+        {
+            tryResult(Result.skip(msg));
+        }
+    }
+
+    protected class BasePhase
+    {
+        public void success()
+        {
+            tryResult(Result.success());
+        }
+
+        public void success(String msg)
+        {
+            tryResult(Result.success(msg));
+        }
+
+        public void fail(Throwable e)
+        {
+            fail(e == null ? null : Throwables.getStackTraceAsString(e));
+        }
+
+        public void fail(String failureCause)
+        {
+            tryResult(Result.fail(failureCause));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/AbstractState.java b/src/java/org/apache/cassandra/repair/state/AbstractState.java
new file mode 100644
index 0000000..f9b0a93
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/AbstractState.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.EnumMap;
+
+import org.apache.cassandra.utils.Clock;
+
+public abstract class AbstractState<T extends Enum<T>, I> extends AbstractCompletable<I> implements State<T, I>
+{
+    public static final int INIT = -1;
+    public static final int COMPLETE = -2;
+
+    private final Class<T> klass;
+    protected final long[] stateTimesNanos;
+    protected int currentState = INIT;
+
+    public AbstractState(I id, Class<T> klass)
+    {
+        super(id);
+        this.klass = klass;
+        this.stateTimesNanos = new long[klass.getEnumConstants().length];
+    }
+
+    @Override
+    public T getStatus()
+    {
+        int current = currentState;
+        if (current < 0) // init or complete
+            return null;
+        return klass.getEnumConstants()[current];
+    }
+
+    public int getCurrentState()
+    {
+        return currentState;
+    }
+
+    @Override
+    public EnumMap<T, Long> getStateTimesMillis()
+    {
+        long[] millis = getStateTimesMillisArray();
+        EnumMap<T, Long> map = new EnumMap<>(klass);
+        for (int i = 0; i < millis.length; i++)
+        {
+            long ms = millis[i];
+            if (ms != 0)
+                map.put(klass.getEnumConstants()[i], ms);
+        }
+        return map;
+    }
+
+    @Override
+    protected void onComplete()
+    {
+        currentState = COMPLETE;
+    }
+
+    private long[] getStateTimesMillisArray()
+    {
+        long[] millis = new long[stateTimesNanos.length];
+        for (int i = 0; i < millis.length; i++)
+        {
+            long value = stateTimesNanos[i];
+            if (value != 0)
+                millis[i] = nanosToMillis(value);
+        }
+        return millis;
+    }
+
+    protected void updateState(T state)
+    {
+        int currentState = this.currentState;
+        if (currentState >= state.ordinal())
+            throw new IllegalStateException("State went backwards; current=" + klass.getEnumConstants()[currentState] + ", desired=" + state);
+        long now = Clock.Global.nanoTime();
+        stateTimesNanos[this.currentState = state.ordinal()] = now;
+        lastUpdatedAtNs = now;
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/Completable.java b/src/java/org/apache/cassandra/repair/state/Completable.java
new file mode 100644
index 0000000..61813c6
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/Completable.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.utils.Clock;
+
+public interface Completable<I>
+{
+    I getId();
+
+    long getInitializedAtMillis();
+
+    long getInitializedAtNanos();
+
+    long getLastUpdatedAtMillis();
+
+    long getLastUpdatedAtNanos();
+
+    default long getDurationMillis()
+    {
+        long endNanos = getLastUpdatedAtNanos();
+        if (!isComplete())
+            endNanos = Clock.Global.nanoTime();
+        return TimeUnit.NANOSECONDS.toMillis(endNanos - getInitializedAtNanos());
+    }
+
+    Result getResult();
+
+    default boolean isComplete()
+    {
+        return getResult() != null;
+    }
+
+    default String getFailureCause()
+    {
+        Result r = getResult();
+        if (r == null || r.kind == Result.Kind.SUCCESS)
+            return null;
+        return r.message;
+    }
+
+    default String getSuccessMessage()
+    {
+        Result r = getResult();
+        if (r == null || r.kind != Result.Kind.SUCCESS)
+            return null;
+        return r.message;
+    }
+
+    class Result
+    {
+        public enum Kind
+        {SUCCESS, SKIPPED, FAILURE}
+
+        public final Result.Kind kind;
+        public final String message;
+
+        private Result(Result.Kind kind, String message)
+        {
+            this.kind = kind;
+            this.message = message;
+        }
+
+        protected static Result success()
+        {
+            return new Result(Result.Kind.SUCCESS, null);
+        }
+
+        protected static Result success(String msg)
+        {
+            return new Result(Result.Kind.SUCCESS, msg);
+        }
+
+        protected static Result skip(String msg)
+        {
+            return new Result(Result.Kind.SKIPPED, msg);
+        }
+
+        protected static Result fail(String msg)
+        {
+            return new Result(Result.Kind.FAILURE, msg);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/CoordinatorState.java b/src/java/org/apache/cassandra/repair/state/CoordinatorState.java
new file mode 100644
index 0000000..74ca3e3
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/CoordinatorState.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.CommonRange;
+import org.apache.cassandra.repair.RepairRunnable;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+public class CoordinatorState extends AbstractState<CoordinatorState.State, TimeUUID>
+{
+    public enum State
+    {
+        SETUP, START,
+        PREPARE_START, PREPARE_COMPLETE,
+        REPAIR_START, REPAIR_COMPLETE
+    }
+
+    public final int cmd;
+    public final String keyspace;
+    public final RepairOption options;
+    private final ConcurrentMap<TimeUUID, SessionState> sessions = new ConcurrentHashMap<>();
+
+    private List<ColumnFamilyStore> columnFamilies = null;
+    private RepairRunnable.NeighborsAndRanges neighborsAndRanges = null;
+
+    // API to split function calls for phase changes from getting the state
+    public final Phase phase = new Phase();
+
+    public CoordinatorState(int cmd, String keyspace, RepairOption options)
+    {
+        super(nextTimeUUID(), State.class);
+        this.cmd = cmd;
+        this.keyspace = Objects.requireNonNull(keyspace);
+        this.options = Objects.requireNonNull(options);
+    }
+
+    public Collection<SessionState> getSessions()
+    {
+        return sessions.values();
+    }
+
+    public Set<TimeUUID> getSessionIds()
+    {
+        return sessions.keySet();
+    }
+
+    public void register(SessionState state)
+    {
+        sessions.put(state.id, state);
+    }
+
+    public List<ColumnFamilyStore> getColumnFamilies()
+    {
+        return columnFamilies;
+    }
+
+    public String[] getColumnFamilyNames()
+    {
+        if (columnFamilies == null)
+            return null;
+        return columnFamilies.stream().map(ColumnFamilyStore::getTableName).toArray(String[]::new);
+    }
+
+    public RepairRunnable.NeighborsAndRanges getNeighborsAndRanges()
+    {
+        return neighborsAndRanges;
+    }
+
+    public Set<InetAddressAndPort> getParticipants()
+    {
+        if (neighborsAndRanges == null)
+            return null;
+        return neighborsAndRanges.participants;
+    }
+
+    public List<CommonRange> getCommonRanges()
+    {
+        if (neighborsAndRanges == null)
+            return null;
+        return neighborsAndRanges.commonRanges;
+    }
+
+    public List<CommonRange> getFilteredCommonRanges()
+    {
+        if (neighborsAndRanges == null)
+            return null;
+        return neighborsAndRanges.filterCommonRanges(keyspace, getColumnFamilyNames());
+    }
+
+    public final class Phase extends BaseSkipPhase
+    {
+        public void setup()
+        {
+            updateState(State.SETUP);
+        }
+
+        public void start(List<ColumnFamilyStore> columnFamilies, RepairRunnable.NeighborsAndRanges neighborsAndRanges)
+        {
+            CoordinatorState.this.columnFamilies = Objects.requireNonNull(columnFamilies);
+            CoordinatorState.this.neighborsAndRanges = Objects.requireNonNull(neighborsAndRanges);
+            updateState(State.START);
+        }
+
+        public void prepareStart()
+        {
+            updateState(State.PREPARE_START);
+        }
+
+        public void prepareComplete()
+        {
+            updateState(State.PREPARE_COMPLETE);
+        }
+
+        public void repairSubmitted()
+        {
+            updateState(State.REPAIR_START);
+        }
+
+        public void repairCompleted()
+        {
+            updateState(State.REPAIR_COMPLETE);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/JobState.java b/src/java/org/apache/cassandra/repair/state/JobState.java
new file mode 100644
index 0000000..f8d42c3
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/JobState.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.Set;
+import java.util.UUID;
+
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.RepairJobDesc;
+
+public class JobState extends AbstractState<JobState.State, UUID>
+{
+    public enum State
+    {
+        START,
+        SNAPSHOT_START, SNAPSHOT_COMPLETE,
+        VALIDATION_START, VALIDATION_COMPLETE,
+        STREAM_START
+    }
+
+    public final RepairJobDesc desc;
+    private final ImmutableSet<InetAddressAndPort> endpoints;
+
+    public final Phase phase = new Phase();
+
+    public JobState(RepairJobDesc desc, ImmutableSet<InetAddressAndPort> endpoints)
+    {
+        super(desc.determanisticId(), State.class);
+        this.desc = desc;
+        this.endpoints = endpoints;
+    }
+
+    public Set<InetAddressAndPort> getParticipants()
+    {
+        return endpoints;
+    }
+
+    public final class Phase extends BasePhase
+    {
+        public void start()
+        {
+            updateState(State.START);
+        }
+
+        public void snapshotsSubmitted()
+        {
+            updateState(State.SNAPSHOT_START);
+        }
+
+        public void snapshotsCompleted()
+        {
+            updateState(State.SNAPSHOT_COMPLETE);
+        }
+
+        public void validationSubmitted()
+        {
+            updateState(State.VALIDATION_START);
+        }
+
+        public void validationCompleted()
+        {
+            updateState(State.VALIDATION_COMPLETE);
+        }
+
+        public void streamSubmitted()
+        {
+            updateState(State.STREAM_START);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/ParticipateState.java b/src/java/org/apache/cassandra/repair/state/ParticipateState.java
new file mode 100644
index 0000000..c1faa4e
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/ParticipateState.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.messages.PrepareMessage;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.TimeUUID;
+
+public class ParticipateState extends AbstractCompletable<TimeUUID>
+{
+    public final InetAddressAndPort initiator;
+    public final List<TableId> tableIds;
+    public final Collection<Range<Token>> ranges;
+    public final boolean incremental;
+    public final long repairedAt;
+    public final boolean global;
+    public final PreviewKind previewKind;
+
+    private final ConcurrentMap<UUID, ValidationState> validations = new ConcurrentHashMap<>();
+
+    public final Phase phase = new Phase();
+
+    public ParticipateState(InetAddressAndPort initiator, PrepareMessage msg)
+    {
+        super(msg.parentRepairSession);
+        this.initiator = initiator;
+        this.tableIds = msg.tableIds;
+        this.ranges = msg.ranges;
+        this.incremental = msg.isIncremental;
+        this.repairedAt = msg.repairedAt;
+        this.global = msg.isGlobal;
+        this.previewKind = msg.previewKind;
+    }
+
+    public boolean register(ValidationState state)
+    {
+        ValidationState current = validations.putIfAbsent(state.id, state);
+        return current == null;
+    }
+
+    public Collection<ValidationState> validations()
+    {
+        return validations.values();
+    }
+
+    public Collection<UUID> validationIds()
+    {
+        return validations.keySet();
+    }
+
+    public class Phase extends BasePhase
+    {
+
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/SessionState.java b/src/java/org/apache/cassandra/repair/state/SessionState.java
new file mode 100644
index 0000000..8e8e648
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/SessionState.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.Collection;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.CommonRange;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+public class SessionState extends AbstractState<SessionState.State, TimeUUID>
+{
+    public enum State
+    {
+        START, JOBS_START
+    }
+
+    public final TimeUUID parentRepairSession;
+    public final String keyspace;
+    public final String[] cfnames;
+    public final CommonRange commonRange;
+    private final ConcurrentMap<UUID, JobState> jobs = new ConcurrentHashMap<>();
+
+    public final Phase phase = new Phase();
+
+    public SessionState(TimeUUID parentRepairSession, String keyspace, String[] cfnames, CommonRange commonRange)
+    {
+        super(nextTimeUUID(), State.class);
+        this.parentRepairSession = parentRepairSession;
+        this.keyspace = keyspace;
+        this.cfnames = cfnames;
+        this.commonRange = commonRange;
+    }
+
+    public Collection<JobState> getJobs()
+    {
+        return jobs.values();
+    }
+
+    public JobState getJob(UUID id)
+    {
+        return jobs.get(id);
+    }
+
+    public Set<UUID> getJobIds()
+    {
+        return jobs.keySet();
+    }
+
+    public Set<InetAddressAndPort> getParticipants()
+    {
+        return commonRange.endpoints;
+    }
+
+    public void register(JobState state)
+    {
+        jobs.put(state.id, state);
+    }
+
+    public final class Phase extends BaseSkipPhase
+    {
+        public void start()
+        {
+            updateState(State.START);
+        }
+
+        public void jobsSubmitted()
+        {
+            updateState(State.JOBS_START);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/repair/state/State.java b/src/java/org/apache/cassandra/repair/state/State.java
new file mode 100644
index 0000000..bfb9a13
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/State.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.EnumMap;
+
+public interface State<T extends Enum<T>, I> extends Completable<I>
+{
+    T getStatus();
+
+    EnumMap<T, Long> getStateTimesMillis();
+}
diff --git a/src/java/org/apache/cassandra/repair/state/ValidationState.java b/src/java/org/apache/cassandra/repair/state/ValidationState.java
new file mode 100644
index 0000000..ff2ca53
--- /dev/null
+++ b/src/java/org/apache/cassandra/repair/state/ValidationState.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.repair.state;
+
+import java.util.UUID;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.RepairJobDesc;
+
+public class ValidationState extends AbstractState<ValidationState.State, UUID>
+{
+    public enum State
+    { START, SENDING_TREES }
+
+    public final Phase phase = new Phase();
+    public final RepairJobDesc desc;
+    public final InetAddressAndPort initiator;
+    public long estimatedPartitions;
+    public long estimatedTotalBytes;
+    public long partitionsProcessed;
+    public long bytesRead;
+
+    public ValidationState(RepairJobDesc desc, InetAddressAndPort initiator)
+    {
+        super(desc.determanisticId(), State.class);
+        this.desc = desc;
+        this.initiator = initiator;
+    }
+
+    public float getProgress()
+    {
+        int currentState = this.currentState;
+        if (currentState == INIT)
+            return 0.0F;
+        if (currentState == COMPLETE)
+            return 1.0F;
+        if (estimatedPartitions == 0) // mostly to avoid / 0
+            return 0.0f;
+        return Math.min(0.99F, partitionsProcessed / (float) estimatedPartitions);
+    }
+
+    public final class Phase extends BaseSkipPhase
+    {
+        public void start(long estimatedPartitions, long estimatedTotalBytes)
+        {
+            updateState(State.START);
+            ValidationState.this.estimatedPartitions = estimatedPartitions;
+            ValidationState.this.estimatedTotalBytes = estimatedTotalBytes;
+        }
+
+        public void sendingTrees()
+        {
+            updateState(State.SENDING_TREES);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/ColumnMetadata.java b/src/java/org/apache/cassandra/schema/ColumnMetadata.java
index d48ca06..fdbd166 100644
--- a/src/java/org/apache/cassandra/schema/ColumnMetadata.java
+++ b/src/java/org/apache/cassandra/schema/ColumnMetadata.java
@@ -33,7 +33,6 @@
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.github.jamm.Unmetered;
 
 @Unmetered
@@ -447,9 +446,9 @@
             return "";
 
         StringBuilder sb = new StringBuilder();
-        sb.append(defs.next().name);
+        sb.append(defs.next().name.toCQLString());
         while (defs.hasNext())
-            sb.append(", ").append(defs.next().name);
+            sb.append(", ").append(defs.next().name.toCQLString());
         return sb.toString();
     }
 
diff --git a/src/java/org/apache/cassandra/schema/CompressionParams.java b/src/java/org/apache/cassandra/schema/CompressionParams.java
index 53760a9..be79ec6 100644
--- a/src/java/org/apache/cassandra/schema/CompressionParams.java
+++ b/src/java/org/apache/cassandra/schema/CompressionParams.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
@@ -35,6 +34,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -66,11 +66,13 @@
     public static final String ENABLED = "enabled";
     public static final String MIN_COMPRESS_RATIO = "min_compress_ratio";
 
-    public static final CompressionParams DEFAULT = new CompressionParams(LZ4Compressor.create(Collections.<String, String>emptyMap()),
-                                                                          DEFAULT_CHUNK_LENGTH,
-                                                                          calcMaxCompressedLength(DEFAULT_CHUNK_LENGTH, DEFAULT_MIN_COMPRESS_RATIO),
-                                                                          DEFAULT_MIN_COMPRESS_RATIO,
-                                                                          Collections.emptyMap());
+    public static final CompressionParams DEFAULT = !CassandraRelevantProperties.DETERMINISM_SSTABLE_COMPRESSION_DEFAULT.getBoolean()
+                                                    ? noCompression()
+                                                    : new CompressionParams(LZ4Compressor.create(Collections.emptyMap()),
+                                                                            DEFAULT_CHUNK_LENGTH,
+                                                                            calcMaxCompressedLength(DEFAULT_CHUNK_LENGTH, DEFAULT_MIN_COMPRESS_RATIO),
+                                                                            DEFAULT_MIN_COMPRESS_RATIO,
+                                                                            Collections.emptyMap());
 
     public static final CompressionParams NOOP = new CompressionParams(NoopCompressor.create(Collections.emptyMap()),
                                                                        // 4 KiB is often the underlying disk block size
@@ -368,7 +370,7 @@
     }
 
     /**
-     * Parse the chunk length (in KB) and returns it as bytes.
+     * Parse the chunk length (in KiB) and returns it as bytes.
      *
      * @param chLengthKB the length of the chunk to parse
      * @return the chunk length in bytes
diff --git a/src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandler.java b/src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandler.java
new file mode 100644
index 0000000..0f0c3e9
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandler.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.time.Duration;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.BiConsumer;
+import java.util.stream.Collectors;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Awaitable;
+
+import static org.apache.cassandra.schema.MigrationCoordinator.MAX_OUTSTANDING_VERSION_REQUESTS;
+
+public class DefaultSchemaUpdateHandler implements SchemaUpdateHandler, IEndpointStateChangeSubscriber
+{
+    private static final Logger logger = LoggerFactory.getLogger(DefaultSchemaUpdateHandler.class);
+
+    @VisibleForTesting
+    final MigrationCoordinator migrationCoordinator;
+
+    private final boolean requireSchemas;
+    private final BiConsumer<SchemaTransformationResult, Boolean> updateCallback;
+    private volatile DistributedSchema schema = DistributedSchema.EMPTY;
+
+    private volatile AsyncPromise<Void> requestedReset;
+
+    private MigrationCoordinator createMigrationCoordinator(MessagingService messagingService)
+    {
+        return new MigrationCoordinator(messagingService,
+                                        Stage.MIGRATION.executor(),
+                                        ScheduledExecutors.scheduledTasks,
+                                        MAX_OUTSTANDING_VERSION_REQUESTS,
+                                        Gossiper.instance,
+                                        this::getSchemaVersionForCoordinator,
+                                        this::applyMutationsFromCoordinator);
+    }
+
+    public DefaultSchemaUpdateHandler(BiConsumer<SchemaTransformationResult, Boolean> updateCallback)
+    {
+        this(null, MessagingService.instance(), !CassandraRelevantProperties.BOOTSTRAP_SKIP_SCHEMA_CHECK.getBoolean(), updateCallback);
+    }
+
+    public DefaultSchemaUpdateHandler(MigrationCoordinator migrationCoordinator,
+                                      MessagingService messagingService,
+                                      boolean requireSchemas,
+                                      BiConsumer<SchemaTransformationResult, Boolean> updateCallback)
+    {
+        this.requireSchemas = requireSchemas;
+        this.updateCallback = updateCallback;
+        this.migrationCoordinator = migrationCoordinator == null ? createMigrationCoordinator(messagingService) : migrationCoordinator;
+        Gossiper.instance.register(this);
+        SchemaPushVerbHandler.instance.register(msg -> {
+            synchronized (this)
+            {
+                if (requestedReset == null)
+                    applyMutations(msg.payload);
+            }
+        });
+        SchemaPullVerbHandler.instance.register(msg -> {
+            try
+            {
+                messagingService.send(msg.responseWith(getSchemaMutations()), msg.from());
+            }
+            catch (RuntimeException ex)
+            {
+                logger.error("Failed to send schema mutations to " + msg.from(), ex);
+            }
+        });
+    }
+
+    public synchronized void start()
+    {
+        if (StorageService.instance.isReplacing())
+            onRemove(DatabaseDescriptor.getReplaceAddress());
+
+        SchemaKeyspace.saveSystemKeyspacesSchema();
+
+        migrationCoordinator.start();
+    }
+
+    @Override
+    public boolean waitUntilReady(Duration timeout)
+    {
+        logger.debug("Waiting for schema to be ready (max {})", timeout);
+        boolean schemasReceived = migrationCoordinator.awaitSchemaRequests(timeout.toMillis());
+
+        if (schemasReceived)
+            return true;
+
+        logger.warn("There are nodes in the cluster with a different schema version than us, from which we did not merge schemas: " +
+                    "our version: ({}), outstanding versions -> endpoints: {}. Use -D{}}=true to ignore this, " +
+                    "-D{}=<ep1[,epN]> to skip specific endpoints, or -D{}=<ver1[,verN]> to skip specific schema versions",
+                    Schema.instance.getVersion(),
+                    migrationCoordinator.outstandingVersions(),
+                    CassandraRelevantProperties.BOOTSTRAP_SKIP_SCHEMA_CHECK.getKey(),
+                    CassandraRelevantProperties.IGNORED_SCHEMA_CHECK_ENDPOINTS.getKey(),
+                    CassandraRelevantProperties.IGNORED_SCHEMA_CHECK_VERSIONS.getKey());
+
+        if (requireSchemas)
+        {
+            logger.error("Didn't receive schemas for all known versions within the {}. Use -D{}=true to skip this check.",
+                         timeout, CassandraRelevantProperties.BOOTSTRAP_SKIP_SCHEMA_CHECK.getKey());
+
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public void onRemove(InetAddressAndPort endpoint)
+    {
+        migrationCoordinator.removeAndIgnoreEndpoint(endpoint);
+    }
+
+    @Override
+    public void onChange(InetAddressAndPort endpoint, ApplicationState state, VersionedValue value)
+    {
+        if (state == ApplicationState.SCHEMA)
+        {
+            EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
+            if (epState != null && !Gossiper.instance.isDeadState(epState) && StorageService.instance.getTokenMetadata().isMember(endpoint))
+            {
+                migrationCoordinator.reportEndpointVersion(endpoint, UUID.fromString(value.value));
+            }
+        }
+    }
+
+    @Override
+    public void onJoin(InetAddressAndPort endpoint, EndpointState epState)
+    {
+        // no-op
+    }
+
+    @Override
+    public void beforeChange(InetAddressAndPort endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue)
+    {
+        // no-op
+    }
+
+    @Override
+    public void onAlive(InetAddressAndPort endpoint, EndpointState state)
+    {
+        // no-op
+    }
+
+    @Override
+    public void onDead(InetAddressAndPort endpoint, EndpointState state)
+    {
+        // no-op
+    }
+
+    @Override
+    public void onRestart(InetAddressAndPort endpoint, EndpointState state)
+    {
+        // no-op
+    }
+
+    private synchronized SchemaTransformationResult applyMutations(Collection<Mutation> schemaMutations)
+    {
+        // fetch the current state of schema for the affected keyspaces only
+        DistributedSchema before = schema;
+
+        // apply the schema mutations
+        SchemaKeyspace.applyChanges(schemaMutations);
+
+        // only compare the keyspaces affected by this set of schema mutations
+        Set<String> affectedKeyspaces = SchemaKeyspace.affectedKeyspaces(schemaMutations);
+
+        // apply the schema mutations and fetch the new versions of the altered keyspaces
+        Keyspaces updatedKeyspaces = SchemaKeyspace.fetchKeyspaces(affectedKeyspaces);
+        Set<String> removedKeyspaces = affectedKeyspaces.stream().filter(ks -> !updatedKeyspaces.containsKeyspace(ks)).collect(Collectors.toSet());
+        Keyspaces afterKeyspaces = before.getKeyspaces().withAddedOrReplaced(updatedKeyspaces).without(removedKeyspaces);
+
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before.getKeyspaces(), afterKeyspaces);
+        UUID version = SchemaKeyspace.calculateSchemaDigest();
+        DistributedSchema after = new DistributedSchema(afterKeyspaces, version);
+        SchemaTransformationResult update = new SchemaTransformationResult(before, after, diff);
+
+        logger.info("Applying schema change due to received mutations: {}", update);
+        updateSchema(update, false);
+        return update;
+    }
+
+    @Override
+    public synchronized SchemaTransformationResult apply(SchemaTransformation transformation, boolean local)
+    {
+        DistributedSchema before = schema;
+        Keyspaces afterKeyspaces = transformation.apply(before.getKeyspaces());
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before.getKeyspaces(), afterKeyspaces);
+
+        if (diff.isEmpty())
+            return new SchemaTransformationResult(before, before, diff);
+
+        Collection<Mutation> mutations = SchemaKeyspace.convertSchemaDiffToMutations(diff, transformation.fixedTimestampMicros().orElse(FBUtilities.timestampMicros()));
+        SchemaKeyspace.applyChanges(mutations);
+
+        DistributedSchema after = new DistributedSchema(afterKeyspaces, SchemaKeyspace.calculateSchemaDigest());
+        SchemaTransformationResult update = new SchemaTransformationResult(before, after, diff);
+
+        updateSchema(update, local);
+        if (!local)
+        {
+            migrationCoordinator.executor.submit(() -> {
+                Pair<Set<InetAddressAndPort>, Set<InetAddressAndPort>> endpoints = migrationCoordinator.pushSchemaMutations(mutations);
+                SchemaAnnouncementDiagnostics.schemaTransformationAnnounced(endpoints.left(), endpoints.right(), transformation);
+            });
+        }
+
+        return update;
+    }
+
+    private void updateSchema(SchemaTransformationResult update, boolean local)
+    {
+        if (!update.diff.isEmpty())
+        {
+            this.schema = update.after;
+            logger.debug("Schema updated: {}", update);
+            updateCallback.accept(update, true);
+            if (!local)
+            {
+                migrationCoordinator.announce(update.after.getVersion());
+            }
+        }
+        else
+        {
+            logger.debug("Schema update is empty - skipping");
+        }
+    }
+
+    private synchronized void reload()
+    {
+        DistributedSchema before = this.schema;
+        DistributedSchema after = new DistributedSchema(SchemaKeyspace.fetchNonSystemKeyspaces(), SchemaKeyspace.calculateSchemaDigest());
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before.getKeyspaces(), after.getKeyspaces());
+        SchemaTransformationResult update = new SchemaTransformationResult(before, after, diff);
+
+        updateSchema(update, false);
+    }
+
+    @Override
+    public void reset(boolean local)
+    {
+        if (local)
+        {
+            reload();
+        }
+        else
+        {
+            migrationCoordinator.reset();
+            if (!migrationCoordinator.awaitSchemaRequests(CassandraRelevantProperties.MIGRATION_DELAY.getLong()))
+            {
+                logger.error("Timeout exceeded when waiting for schema from other nodes");
+            }
+        }
+    }
+
+    /**
+     * When clear is called the update handler will flag that the clear was requested. It means that migration
+     * coordinator will think that we have empty schema version and will apply whatever it receives from other nodes.
+     * When a first attempt to apply mutations from other node is called, it will first clear the schema and apply
+     * the mutations on a truncated table. The flag is then reset.
+     * <p>
+     * This way the clear is postponed until we really fetch any schema we can use as a replacement. Otherwise, nothing
+     * will happen. We will simply reset the flag after the timeout and throw exceptions to the caller.
+     *
+     * @return
+     */
+    @Override
+    public Awaitable clear()
+    {
+        synchronized (this)
+        {
+            if (requestedReset == null)
+            {
+                requestedReset = new AsyncPromise<>();
+                migrationCoordinator.reset();
+            }
+            return requestedReset;
+        }
+    }
+
+    private UUID getSchemaVersionForCoordinator()
+    {
+        if (requestedReset != null)
+            return SchemaConstants.emptyVersion;
+        else
+            return schema.getVersion();
+    }
+
+    private synchronized void applyMutationsFromCoordinator(InetAddressAndPort from, Collection<Mutation> mutations)
+    {
+        if (requestedReset != null && !mutations.isEmpty())
+        {
+            schema = DistributedSchema.EMPTY;
+            SchemaKeyspace.truncate();
+            requestedReset.setSuccess(null);
+            requestedReset = null;
+        }
+        applyMutations(mutations);
+    }
+
+    private synchronized Collection<Mutation> getSchemaMutations()
+    {
+        if (requestedReset != null)
+            return Collections.emptyList();
+        else
+            return SchemaKeyspace.convertSchemaToMutations();
+    }
+
+    public Map<UUID, Set<InetAddressAndPort>> getOutstandingSchemaVersions()
+    {
+        return migrationCoordinator.outstandingVersions();
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandlerFactory.java b/src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandlerFactory.java
new file mode 100644
index 0000000..2303663
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandlerFactory.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.function.BiConsumer;
+
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+
+public class DefaultSchemaUpdateHandlerFactory implements SchemaUpdateHandlerFactory
+{
+    public static final SchemaUpdateHandlerFactory instance = new DefaultSchemaUpdateHandlerFactory();
+
+    @Override
+    public SchemaUpdateHandler getSchemaUpdateHandler(boolean online, BiConsumer<SchemaTransformationResult, Boolean> updateSchemaCallback)
+    {
+        return online
+               ? new DefaultSchemaUpdateHandler(updateSchemaCallback)
+               : new OfflineSchemaUpdateHandler(updateSchemaCallback);
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/Diff.java b/src/java/org/apache/cassandra/schema/Diff.java
index 7112c85..dd26c7b 100644
--- a/src/java/org/apache/cassandra/schema/Diff.java
+++ b/src/java/org/apache/cassandra/schema/Diff.java
@@ -61,4 +61,15 @@
             return String.format("%s -> %s (%s)", before, after, kind);
         }
     }
+
+    @Override
+    public String toString()
+    {
+        return "Diff{" +
+               "created=" + created +
+               ", dropped=" + dropped +
+               ", altered=" + altered +
+               '}';
+    }
+
 }
diff --git a/src/java/org/apache/cassandra/schema/DistributedSchema.java b/src/java/org/apache/cassandra/schema/DistributedSchema.java
new file mode 100644
index 0000000..4fed9bb
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/DistributedSchema.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.Objects;
+import java.util.UUID;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Immutable snapshot of the current schema along with its version.
+ */
+public class DistributedSchema
+{
+    public static final DistributedSchema EMPTY = new DistributedSchema(Keyspaces.none(), SchemaConstants.emptyVersion);
+
+    private final Keyspaces keyspaces;
+    private final UUID version;
+
+    public DistributedSchema(Keyspaces keyspaces, UUID version)
+    {
+        Objects.requireNonNull(keyspaces);
+        Objects.requireNonNull(version);
+        this.keyspaces = keyspaces;
+        this.version = version;
+        validate();
+    }
+
+    public Keyspaces getKeyspaces()
+    {
+        return keyspaces;
+    }
+
+    public boolean isEmpty()
+    {
+        return SchemaConstants.emptyVersion.equals(version);
+    }
+
+    public UUID getVersion()
+    {
+        return version;
+    }
+
+    /**
+     * Converts the given schema version to a string. Returns {@code unknown}, if {@code version} is {@code null}
+     * or {@code "(empty)"}, if {@code version} refers to an {@link SchemaConstants#emptyVersion empty) schema.
+     */
+    public static String schemaVersionToString(UUID version)
+    {
+        return version == null
+               ? "unknown"
+               : SchemaConstants.emptyVersion.equals(version)
+                 ? "(empty)"
+                 : version.toString();
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        DistributedSchema schema = (DistributedSchema) o;
+        return keyspaces.equals(schema.keyspaces) && version.equals(schema.version);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(keyspaces, version);
+    }
+
+    private void validate()
+    {
+        keyspaces.forEach(ksm -> {
+            ksm.tables.forEach(tm -> Preconditions.checkArgument(tm.keyspace.equals(ksm.name), "Table %s metadata points to keyspace %s while defined in keyspace %s", tm.name, tm.keyspace, ksm.name));
+            ksm.views.forEach(vm -> Preconditions.checkArgument(vm.keyspace().equals(ksm.name), "View %s metadata points to keyspace %s while defined in keyspace %s", vm.name(), vm.keyspace(), ksm.name));
+            ksm.types.forEach(ut -> Preconditions.checkArgument(ut.keyspace.equals(ksm.name), "Type %s points to keyspace %s while defined in keyspace %s", ut.name, ut.keyspace, ksm.name));
+            ksm.functions.forEach(f -> Preconditions.checkArgument(f.name().keyspace.equals(ksm.name), "Function %s points to keyspace %s while defined in keyspace %s", f.name().name, f.name().keyspace, ksm.name));
+        });
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/Indexes.java b/src/java/org/apache/cassandra/schema/Indexes.java
index a83be4b..2e95779 100644
--- a/src/java/org/apache/cassandra/schema/Indexes.java
+++ b/src/java/org/apache/cassandra/schema/Indexes.java
@@ -21,7 +21,6 @@
 import java.util.stream.Stream;
 
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
 
 import static java.lang.String.format;
 
diff --git a/src/java/org/apache/cassandra/schema/KeyspaceMetadata.java b/src/java/org/apache/cassandra/schema/KeyspaceMetadata.java
index a029168..6d85391 100644
--- a/src/java/org/apache/cassandra/schema/KeyspaceMetadata.java
+++ b/src/java/org/apache/cassandra/schema/KeyspaceMetadata.java
@@ -119,6 +119,11 @@
         return new KeyspaceMetadata(name, kind, params, tables, views, types, functions);
     }
 
+    public KeyspaceMetadata empty()
+    {
+        return new KeyspaceMetadata(this.name, this.kind, this.params, Tables.none(), Views.none(), Types.none(), Functions.none());
+    }
+
     public boolean isVirtual()
     {
         return kind == Kind.VIRTUAL;
@@ -303,7 +308,7 @@
                                                     name));
         }
 
-        params.validate(name);
+        params.validate(name, null);
 
         tablesAndViews().forEach(TableMetadata::validate);
 
@@ -391,5 +396,19 @@
 
             return Optional.of(new KeyspaceDiff(before, after, tables, views, types, udfs, udas));
         }
+
+        @Override
+        public String toString()
+        {
+            return "KeyspaceDiff{" +
+                   "before=" + before +
+                   ", after=" + after +
+                   ", tables=" + tables +
+                   ", views=" + views +
+                   ", types=" + types +
+                   ", udfs=" + udfs +
+                   ", udas=" + udas +
+                   '}';
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/schema/KeyspaceParams.java b/src/java/org/apache/cassandra/schema/KeyspaceParams.java
index cc46474..539993e 100644
--- a/src/java/org/apache/cassandra/schema/KeyspaceParams.java
+++ b/src/java/org/apache/cassandra/schema/KeyspaceParams.java
@@ -23,6 +23,8 @@
 import com.google.common.base.MoreObjects;
 import com.google.common.base.Objects;
 
+import org.apache.cassandra.service.ClientState;
+
 /**
  * An immutable class representing keyspace parameters (durability and replication).
  */
@@ -89,9 +91,9 @@
         return new KeyspaceParams(true, ReplicationParams.nts(args));
     }
 
-    public void validate(String name)
+    public void validate(String name, ClientState state)
     {
-        replication.validate(name);
+        replication.validate(name, state);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/schema/Keyspaces.java b/src/java/org/apache/cassandra/schema/Keyspaces.java
index 1938d93..e9bd92c 100644
--- a/src/java/org/apache/cassandra/schema/Keyspaces.java
+++ b/src/java/org/apache/cassandra/schema/Keyspaces.java
@@ -17,9 +17,9 @@
  */
 package org.apache.cassandra.schema;
 
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.Optional;
-import java.util.Set;
 import java.util.function.Predicate;
 import java.util.stream.Stream;
 
@@ -67,7 +67,7 @@
         return keyspaces.values().stream();
     }
 
-    public Set<String> names()
+    public ImmutableSet<String> names()
     {
         return keyspaces.keySet();
     }
@@ -124,6 +124,11 @@
         return filter(k -> k != keyspace);
     }
 
+    public Keyspaces without(Collection<String> names)
+    {
+        return filter(k -> !names.contains(k.name));
+    }
+
     public Keyspaces withAddedOrUpdated(KeyspaceMetadata keyspace)
     {
         return builder().add(Iterables.filter(this, k -> !k.name.equals(keyspace.name)))
@@ -131,6 +136,37 @@
                         .build();
     }
 
+    /**
+     * Returns a new {@link Keyspaces} equivalent to this one, but with the provided keyspace metadata either added (if
+     * this {@link Keyspaces} does not have that keyspace), or replaced by the provided definition.
+     *
+     * <p>Note that if this contains the provided keyspace, its pre-existing definition is discarded and completely
+     * replaced with the newly provided one. See {@link #withAddedOrUpdated(KeyspaceMetadata)} if you wish the provided
+     * definition to be "merged" with the existing one instead.
+     *
+     * @param keyspace the keyspace metadata to add, or replace the existing definition with.
+     * @return the newly created object.
+     */
+    public Keyspaces withAddedOrReplaced(KeyspaceMetadata keyspace)
+    {
+        return builder().add(Iterables.filter(this, k -> !k.name.equals(keyspace.name)))
+                        .add(keyspace)
+                        .build();
+    }
+
+    /**
+     * Calls {@link #withAddedOrReplaced(KeyspaceMetadata)} on all the keyspaces of the provided {@link Keyspaces}.
+     *
+     * @param keyspaces the keyspaces to add, or replace if existing.
+     * @return the newly created object.
+     */
+    public Keyspaces withAddedOrReplaced(Keyspaces keyspaces)
+    {
+        return builder().add(Iterables.filter(this, k -> !keyspaces.containsKeyspace(k.name)))
+                        .add(keyspaces)
+                        .build();
+    }
+
     public void validate()
     {
         keyspaces.values().forEach(KeyspaceMetadata::validate);
@@ -154,6 +190,11 @@
         return keyspaces.values().toString();
     }
 
+    public int size()
+    {
+        return keyspaces.size();
+    }
+
     public static final class Builder
     {
         private final ImmutableMap.Builder<String, KeyspaceMetadata> keyspaces = new ImmutableMap.Builder<>();
@@ -192,14 +233,14 @@
         }
     }
 
-    static KeyspacesDiff diff(Keyspaces before, Keyspaces after)
+    public static KeyspacesDiff diff(Keyspaces before, Keyspaces after)
     {
         return KeyspacesDiff.diff(before, after);
     }
 
     public static final class KeyspacesDiff
     {
-        static final KeyspacesDiff NONE = new KeyspacesDiff(Keyspaces.none(), Keyspaces.none(), ImmutableList.of());
+        public static final KeyspacesDiff NONE = new KeyspacesDiff(Keyspaces.none(), Keyspaces.none(), ImmutableList.of());
 
         public final Keyspaces created;
         public final Keyspaces dropped;
@@ -235,5 +276,15 @@
         {
             return created.isEmpty() && dropped.isEmpty() && altered.isEmpty();
         }
+
+        @Override
+        public String toString()
+        {
+            return "KeyspacesDiff{" +
+                   "created=" + created +
+                   ", dropped=" + dropped +
+                   ", altered=" + altered +
+                   '}';
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/schema/MemtableParams.java b/src/java/org/apache/cassandra/schema/MemtableParams.java
new file mode 100644
index 0000000..3470b7a
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/MemtableParams.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.InheritingClass;
+import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.memtable.SkipListMemtableFactory;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+/**
+ * Memtable types and options are specified with these parameters. Memtable classes must either contain a static
+ * {@code FACTORY} field (if they take no arguments other than class), or implement a
+ * {@code factory(Map<String, String>)} method.
+ *
+ * The latter should consume any further options (using {@code map.remove}).
+ *
+ * See Memtable_API.md for further details on the configuration and usage of memtable implementations.
+  */
+public final class MemtableParams
+{
+    private final Memtable.Factory factory;
+    private final String configurationKey;
+
+    private MemtableParams(Memtable.Factory factory, String configurationKey)
+    {
+        this.configurationKey = configurationKey;
+        this.factory = factory;
+    }
+
+    public String configurationKey()
+    {
+        return configurationKey;
+    }
+
+    public Memtable.Factory factory()
+    {
+        return factory;
+    }
+
+    @Override
+    public String toString()
+    {
+        return configurationKey;
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof MemtableParams))
+            return false;
+
+        MemtableParams c = (MemtableParams) o;
+
+        return Objects.equal(configurationKey, c.configurationKey);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return configurationKey.hashCode();
+    }
+
+    private static final String DEFAULT_CONFIGURATION_KEY = "default";
+    private static final Memtable.Factory DEFAULT_MEMTABLE_FACTORY = SkipListMemtableFactory.INSTANCE;
+    private static final ParameterizedClass DEFAULT_CONFIGURATION = SkipListMemtableFactory.CONFIGURATION;
+    private static final Map<String, ParameterizedClass>
+        CONFIGURATION_DEFINITIONS = expandDefinitions(DatabaseDescriptor.getMemtableConfigurations());
+    private static final Map<String, MemtableParams> CONFIGURATIONS = new HashMap<>();
+    public static final MemtableParams DEFAULT = get(null);
+
+    public static MemtableParams get(String key)
+    {
+        if (key == null)
+            key = DEFAULT_CONFIGURATION_KEY;
+
+        synchronized (CONFIGURATIONS)
+        {
+            return CONFIGURATIONS.computeIfAbsent(key, MemtableParams::parseConfiguration);
+        }
+    }
+
+    public static MemtableParams getWithFallback(String key)
+    {
+        try
+        {
+            return get(key);
+        }
+        catch (ConfigurationException e)
+        {
+            LoggerFactory.getLogger(MemtableParams.class).error("Invalid memtable configuration \"" + key + "\" in schema. " +
+                                                                "Falling back to default to avoid schema mismatch.\n" +
+                                                                "Please ensure the correct definition is given in cassandra.yaml.",
+                                                                e);
+            return new MemtableParams(DEFAULT.factory(), key);
+        }
+    }
+
+    @VisibleForTesting
+    static Map<String, ParameterizedClass> expandDefinitions(Map<String, InheritingClass> memtableConfigurations)
+    {
+        if (memtableConfigurations == null)
+            return ImmutableMap.of(DEFAULT_CONFIGURATION_KEY, DEFAULT_CONFIGURATION);
+
+        LinkedHashMap<String, ParameterizedClass> configs = new LinkedHashMap<>(memtableConfigurations.size() + 1);
+
+        // If default is not overridden, add an entry first so that other configurations can inherit from it.
+        // If it is, process it in its point of definition, so that the default can inherit from another configuration.
+        if (!memtableConfigurations.containsKey(DEFAULT_CONFIGURATION_KEY))
+            configs.put(DEFAULT_CONFIGURATION_KEY, DEFAULT_CONFIGURATION);
+
+        for (Map.Entry<String, InheritingClass> en : memtableConfigurations.entrySet())
+            configs.put(en.getKey(), en.getValue().resolve(configs));
+
+        return ImmutableMap.copyOf(configs);
+    }
+
+    private static MemtableParams parseConfiguration(String configurationKey)
+    {
+        ParameterizedClass definition = CONFIGURATION_DEFINITIONS.get(configurationKey);
+
+        if (definition == null)
+            throw new ConfigurationException("Memtable configuration \"" + configurationKey + "\" not found.");
+        return new MemtableParams(getMemtableFactory(definition), configurationKey);
+    }
+
+
+    private static Memtable.Factory getMemtableFactory(ParameterizedClass options)
+    {
+        // Special-case this so that we don't initialize memtable class for tests that need to delay that.
+        if (options == DEFAULT_CONFIGURATION)
+            return DEFAULT_MEMTABLE_FACTORY;
+
+        String className = options.class_name;
+        if (className == null || className.isEmpty())
+            throw new ConfigurationException("The 'class_name' option must be specified.");
+
+        className = className.contains(".") ? className : "org.apache.cassandra.db.memtable." + className;
+        try
+        {
+            Memtable.Factory factory;
+            Class<?> clazz = Class.forName(className);
+            final Map<String, String> parametersCopy = options.parameters != null
+                                                       ? new HashMap<>(options.parameters)
+                                                       : new HashMap<>();
+            try
+            {
+                Method factoryMethod = clazz.getDeclaredMethod("factory", Map.class);
+                factory = (Memtable.Factory) factoryMethod.invoke(null, parametersCopy);
+            }
+            catch (NoSuchMethodException e)
+            {
+                // continue with FACTORY field
+                Field factoryField = clazz.getDeclaredField("FACTORY");
+                factory = (Memtable.Factory) factoryField.get(null);
+            }
+            if (!parametersCopy.isEmpty())
+                throw new ConfigurationException("Memtable class " + className + " does not accept any futher parameters, but " +
+                                                 parametersCopy + " were given.");
+            return factory;
+        }
+        catch (NoSuchFieldException | ClassNotFoundException | IllegalAccessException | InvocationTargetException | ClassCastException e)
+        {
+            if (e.getCause() instanceof ConfigurationException)
+                throw (ConfigurationException) e.getCause();
+            throw new ConfigurationException("Could not create memtable factory for class " + options, e);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/MigrationCoordinator.java b/src/java/org/apache/cassandra/schema/MigrationCoordinator.java
index 04e6625..61ef4c8 100644
--- a/src/java/org/apache/cassandra/schema/MigrationCoordinator.java
+++ b/src/java/org/apache/cassandra/schema/MigrationCoordinator.java
@@ -20,6 +20,7 @@
 
 import java.lang.management.ManagementFactory;
 import java.net.UnknownHostException;
+import java.time.Duration;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -29,32 +30,34 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.UUID;
-import java.util.WeakHashMap;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
 import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
 import java.util.function.LongSupplier;
+import java.util.function.Supplier;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Futures;
+import org.apache.cassandra.utils.NoSpamLogger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.FutureTask;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
-import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.EndpointState;
-import org.apache.cassandra.gms.FailureDetector;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.locator.InetAddressAndPort;
@@ -63,12 +66,21 @@
 import org.apache.cassandra.net.NoPayload;
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.Simulate;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.IGNORED_SCHEMA_CHECK_ENDPOINTS;
 import static org.apache.cassandra.config.CassandraRelevantProperties.IGNORED_SCHEMA_CHECK_VERSIONS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.SCHEMA_PULL_INTERVAL_MS;
+import static org.apache.cassandra.net.Verb.SCHEMA_PUSH_REQ;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.Simulate.With.MONITORS;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
 
 /**
  * Migration coordinator is responsible for tracking schema versions on various nodes and, if needed, synchronize the
@@ -78,12 +90,16 @@
  * In particular the Migration Coordinator keeps track of all schema versions reported from each node in the cluster.
  * As long as a certain version is advertised by some node, it is being tracked. As long as a version is tracked,
  * the migration coordinator tries to fetch it by its periodic job.
+ *
+ * It works in close cooperation with {@link DefaultSchemaUpdateHandler} which is responsible for maintaining local
+ * schema metadata stored in {@link SchemaKeyspace}.
  */
+@Simulate(with = MONITORS)
 public class MigrationCoordinator
 {
     private static final Logger logger = LoggerFactory.getLogger(MigrationCoordinator.class);
     private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(MigrationCoordinator.logger, 1, TimeUnit.MINUTES);
-    private static final Future<Void> FINISHED_FUTURE = Futures.immediateFuture(null);
+    private static final Future<Void> FINISHED_FUTURE = ImmediateFuture.success(null);
 
     private static LongSupplier getUptimeFn = () -> ManagementFactory.getRuntimeMXBean().getUptime();
 
@@ -93,21 +109,8 @@
         getUptimeFn = supplier;
     }
 
-
     private static final int MIGRATION_DELAY_IN_MS = CassandraRelevantProperties.MIGRATION_DELAY.getInt();
-    private static final int MAX_OUTSTANDING_VERSION_REQUESTS = 3;
-
-    public static final MigrationCoordinator instance = new MigrationCoordinator();
-
-    /**
-     * @see CassandraRelevantProperties#SCHEMA_PULL_BACKOFF_DELAY_MS
-     */
-    private static final long BACKOFF_DELAY_MS = CassandraRelevantProperties.SCHEMA_PULL_BACKOFF_DELAY_MS.getInt();
-
-    /**
-     * Holds the timestamps in ms for last pull request attempts.
-     */
-    private final WeakHashMap<InetAddressAndPort, Long> lastPullAttemptTimestamps = new WeakHashMap<>();
+    public static final int MAX_OUTSTANDING_VERSION_REQUESTS = 3;
 
     private static ImmutableSet<UUID> getIgnoredVersions()
     {
@@ -169,7 +172,7 @@
         /**
          * Threads waiting for schema synchronization are waiting until this object is signalled
          */
-        private final WaitQueue waitQueue = new WaitQueue();
+        private final WaitQueue waitQueue = newWaitQueue();
 
         /**
          * Whether this schema version have been received
@@ -215,30 +218,52 @@
 
     private final Map<UUID, VersionInfo> versionInfo = new HashMap<>();
     private final Map<InetAddressAndPort, UUID> endpointVersions = new HashMap<>();
-    private final AtomicInteger inflightTasks = new AtomicInteger();
     private final Set<InetAddressAndPort> ignoredEndpoints = getIgnoredEndpoints();
+    private final ScheduledExecutorService periodicCheckExecutor;
+    private final MessagingService messagingService;
+    private final AtomicReference<ScheduledFuture<?>> periodicPullTask = new AtomicReference<>();
+    private final int maxOutstandingVersionRequests;
+    private final Gossiper gossiper;
+    private final Supplier<UUID> schemaVersion;
+    private final BiConsumer<InetAddressAndPort, Collection<Mutation>> schemaUpdateCallback;
 
-    public void start()
-    {
-        int interval = CassandraRelevantProperties.SCHEMA_PULL_INTERVAL_MS.getInt();
-        ScheduledExecutors.scheduledTasks.scheduleWithFixedDelay(this::pullUnreceivedSchemaVersions, interval, interval, TimeUnit.MILLISECONDS);
-    }
+    final ExecutorPlus executor;
 
     /**
-     * Resets the migration coordinator by notifying all waiting threads and removing all the existing version info.
+     * Creates but does not start migration coordinator instance.
+     * @param messagingService      messaging service instance used to communicate with other nodes for pulling schema
+     *                              and pushing changes
+     * @param periodicCheckExecutor executor on which the periodic checks are scheduled
      */
-    public synchronized void reset()
+    MigrationCoordinator(MessagingService messagingService,
+                         ExecutorPlus executor,
+                         ScheduledExecutorService periodicCheckExecutor,
+                         int maxOutstandingVersionRequests,
+                         Gossiper gossiper,
+                         Supplier<UUID> schemaVersionSupplier,
+                         BiConsumer<InetAddressAndPort, Collection<Mutation>> schemaUpdateCallback)
     {
-        logger.info("Resetting migration coordinator...");
-
-        // clear all the managed information
-        this.endpointVersions.clear();
-        clearVersionsInfo();
+        this.messagingService = messagingService;
+        this.executor = executor;
+        this.periodicCheckExecutor = periodicCheckExecutor;
+        this.maxOutstandingVersionRequests = maxOutstandingVersionRequests;
+        this.gossiper = gossiper;
+        this.schemaVersion = schemaVersionSupplier;
+        this.schemaUpdateCallback = schemaUpdateCallback;
     }
 
-    synchronized List<Future<Void>> pullUnreceivedSchemaVersions()
+    void start()
     {
-        List<Future<Void>> futures = new ArrayList<>();
+        logger.info("Starting migration coordinator and scheduling pulling schema versions every {}", Duration.ofMillis(SCHEMA_PULL_INTERVAL_MS.getLong()));
+        announce(schemaVersion.get());
+        periodicPullTask.updateAndGet(curTask -> curTask == null
+                                                 ? periodicCheckExecutor.scheduleWithFixedDelay(this::pullUnreceivedSchemaVersions, SCHEMA_PULL_INTERVAL_MS.getLong(), SCHEMA_PULL_INTERVAL_MS.getLong(), TimeUnit.MILLISECONDS)
+                                                 : curTask);
+    }
+
+    private synchronized void pullUnreceivedSchemaVersions()
+    {
+        logger.debug("Pulling unreceived schema versions...");
         for (VersionInfo info : versionInfo.values())
         {
             if (info.wasReceived() || info.outstandingRequests.size() > 0)
@@ -247,15 +272,11 @@
                 continue;
             }
 
-            Future<Void> future = maybePullSchema(info);
-            if (future != null && future != FINISHED_FUTURE)
-                futures.add(future);
+            maybePullSchema(info);
         }
-
-        return futures;
     }
 
-    synchronized Future<Void> maybePullSchema(VersionInfo info)
+    private synchronized Future<Void> maybePullSchema(VersionInfo info)
     {
         if (info.endpoints.isEmpty() || info.wasReceived() || !shouldPullSchema(info.version))
         {
@@ -263,13 +284,13 @@
             return FINISHED_FUTURE;
         }
 
-        if (info.outstandingRequests.size() >= getMaxOutstandingVersionRequests())
+        if (info.outstandingRequests.size() >= maxOutstandingVersionRequests)
         {
-            logger.trace("Not pulling schema {} because the number of outstanding requests has been exceeded ({} >= {})", info.version, info.outstandingRequests.size(), getMaxOutstandingVersionRequests());
+            logger.trace("Not pulling schema {} because the number of outstanding requests has been exceeded ({} >= {})", info.version, info.outstandingRequests.size(), maxOutstandingVersionRequests);
             return FINISHED_FUTURE;
         }
 
-        for (int i=0, isize=info.requestQueue.size(); i<isize; i++)
+        for (int i = 0, isize = info.requestQueue.size(); i < isize; i++)
         {
             InetAddressAndPort endpoint = info.requestQueue.remove();
             if (!info.endpoints.contains(endpoint))
@@ -291,10 +312,10 @@
         }
 
         // no suitable endpoints were found, check again in a minute, the periodic task will pick it up
-        return null;
+        return FINISHED_FUTURE;
     }
 
-    public synchronized Map<UUID, Set<InetAddressAndPort>> outstandingVersions()
+    synchronized Map<UUID, Set<InetAddressAndPort>> outstandingVersions()
     {
         HashMap<UUID, Set<InetAddressAndPort>> map = new HashMap<>();
         for (VersionInfo info : versionInfo.values())
@@ -304,33 +325,21 @@
     }
 
     @VisibleForTesting
-    protected VersionInfo getVersionInfoUnsafe(UUID version)
+    VersionInfo getVersionInfoUnsafe(UUID version)
     {
         return versionInfo.get(version);
     }
 
-    @VisibleForTesting
-    protected int getMaxOutstandingVersionRequests()
+    private boolean shouldPullSchema(UUID version)
     {
-        return MAX_OUTSTANDING_VERSION_REQUESTS;
-    }
-
-    @VisibleForTesting
-    protected boolean isAlive(InetAddressAndPort endpoint)
-    {
-        return FailureDetector.instance.isAlive(endpoint);
-    }
-
-    @VisibleForTesting
-    protected boolean shouldPullSchema(UUID version)
-    {
-        if (Schema.instance.getVersion() == null)
+        UUID localSchemaVersion = schemaVersion.get();
+        if (localSchemaVersion == null)
         {
             logger.debug("Not pulling schema {} because the local schama version is not known yet", version);
             return false;
         }
 
-        if (Schema.instance.isSameVersion(version))
+        if (localSchemaVersion.equals(version))
         {
             logger.debug("Not pulling schema {} because it is the same as the local schema", version);
             return false;
@@ -339,15 +348,7 @@
         return true;
     }
 
-    // Since 3.0.14 protocol contains only a CASSANDRA-13004 bugfix, it is safe to accept schema changes
-    // from both 3.0 and 3.0.14.
-    private static boolean is30Compatible(int version)
-    {
-        return version == MessagingService.current_version || version == MessagingService.VERSION_3014;
-    }
-
-    @VisibleForTesting
-    protected boolean shouldPullFromEndpoint(InetAddressAndPort endpoint)
+    private boolean shouldPullFromEndpoint(InetAddressAndPort endpoint)
     {
         if (endpoint.equals(FBUtilities.getBroadcastAddressAndPort()))
         {
@@ -355,7 +356,7 @@
             return false;
         }
 
-        EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
+        EndpointState state = gossiper.getEndpointStateForEndpoint(endpoint);
         if (state == null)
         {
             logger.trace("Not pulling schema from endpoint {} because its state is unknown", endpoint);
@@ -375,19 +376,19 @@
             return false;
         }
 
-        if (!MessagingService.instance().versions.knows(endpoint))
+        if (!messagingService.versions.knows(endpoint))
         {
             logger.debug("Not pulling schema from {} because their messaging version is unknown", endpoint);
             return false;
         }
 
-        if (MessagingService.instance().versions.getRaw(endpoint) != MessagingService.current_version)
+        if (messagingService.versions.getRaw(endpoint) != MessagingService.current_version)
         {
             logger.debug("Not pulling schema from {} because their schema format is incompatible", endpoint);
             return false;
         }
 
-        if (Gossiper.instance.isGossipOnlyMember(endpoint))
+        if (gossiper.isGossipOnlyMember(endpoint))
         {
             logger.debug("Not pulling schema from {} because it's a gossip only member", endpoint);
             return false;
@@ -395,39 +396,33 @@
         return true;
     }
 
-    @VisibleForTesting
-    protected boolean shouldPullImmediately(InetAddressAndPort endpoint, UUID version)
+    private boolean shouldPullImmediately(InetAddressAndPort endpoint, UUID version)
     {
-        if (Schema.instance.isEmpty() || getUptimeFn.getAsLong() < MIGRATION_DELAY_IN_MS)
+        UUID localSchemaVersion = schemaVersion.get();
+        if (SchemaConstants.emptyVersion.equals(localSchemaVersion) || getUptimeFn.getAsLong() < MIGRATION_DELAY_IN_MS)
         {
             // If we think we may be bootstrapping or have recently started, submit MigrationTask immediately
             logger.debug("Immediately submitting migration task for {}, " +
                          "schema versions: local={}, remote={}",
                          endpoint,
-                         Schema.schemaVersionToString(Schema.instance.getVersion()),
-                         Schema.schemaVersionToString(version));
+                         DistributedSchema.schemaVersionToString(localSchemaVersion),
+                         DistributedSchema.schemaVersionToString(version));
             return true;
         }
         return false;
     }
 
-    @VisibleForTesting
-    protected boolean isLocalVersion(UUID version)
-    {
-        return Schema.instance.isSameVersion(version);
-    }
-
     /**
      * If a previous schema update brought our version the same as the incoming schema, don't apply it
      */
-    synchronized boolean shouldApplySchemaFor(VersionInfo info)
+    private synchronized boolean shouldApplySchemaFor(VersionInfo info)
     {
         if (info.wasReceived())
             return false;
-        return !isLocalVersion(info.version);
+        return !Objects.equals(schemaVersion.get(), info.version);
     }
 
-    public synchronized Future<Void> reportEndpointVersion(InetAddressAndPort endpoint, UUID version)
+    synchronized Future<Void> reportEndpointVersion(InetAddressAndPort endpoint, UUID version)
     {
         logger.debug("Reported schema {} at endpoint {}", version, endpoint);
         if (ignoredEndpoints.contains(endpoint) || IGNORED_VERSIONS.contains(version))
@@ -446,7 +441,7 @@
         }
 
         VersionInfo info = versionInfo.computeIfAbsent(version, VersionInfo::new);
-        if (isLocalVersion(version))
+        if (Objects.equals(schemaVersion.get(), version))
         {
             info.markReceived();
             logger.trace("Schema {} from {} has been marked as recevied because it is equal the local schema", version, endpoint);
@@ -464,19 +459,6 @@
         return maybePullSchema(info);
     }
 
-    public Future<Void> reportEndpointVersion(InetAddressAndPort endpoint, EndpointState state)
-    {
-        if (state == null)
-            return FINISHED_FUTURE;
-
-        UUID version = state.getSchemaVersion();
-
-        if (version == null)
-            return FINISHED_FUTURE;
-
-        return reportEndpointVersion(endpoint, version);
-    }
-
     private synchronized void removeEndpointFromVersion(InetAddressAndPort endpoint, UUID version)
     {
         if (version == null)
@@ -497,21 +479,55 @@
         }
     }
 
-    /**
-     * Remove all version info and signal all the waiting entities.
-     */
-    private synchronized void clearVersionsInfo()
+    private void clearVersionsInfo()
     {
         Iterator<Map.Entry<UUID, VersionInfo>> it = versionInfo.entrySet().iterator();
         while (it.hasNext())
         {
             Map.Entry<UUID, VersionInfo> entry = it.next();
             it.remove();
-            entry.getValue().waitQueue.signalAll();
+            entry.getValue().waitQueue.signal();
         }
     }
 
-    public synchronized void removeAndIgnoreEndpoint(InetAddressAndPort endpoint)
+    private void reportCurrentSchemaVersionOnEndpoint(InetAddressAndPort endpoint)
+    {
+        if (FBUtilities.getBroadcastAddressAndPort().equals(endpoint))
+        {
+            reportEndpointVersion(endpoint, schemaVersion.get());
+        }
+        else
+        {
+            EndpointState state = gossiper.getEndpointStateForEndpoint(endpoint);
+            if (state != null)
+            {
+                UUID v = state.getSchemaVersion();
+                if (v != null)
+                {
+                    reportEndpointVersion(endpoint, v);
+                }
+            }
+        }
+    }
+
+    /**
+     * Resets the migration coordinator by notifying all waiting threads and removing all the existing version info.
+     * Then, it is populated with the information about schema versions on different endpoints provided by Gossiper.
+     * Each version is marked as unreceived so the migration coordinator will start pulling schemas from other nodes.
+     */
+    synchronized void reset()
+    {
+        logger.info("Resetting migration coordinator...");
+
+        // clear all the managed information
+        this.endpointVersions.clear();
+        clearVersionsInfo();
+
+        // now report again the versions we are aware of
+        gossiper.getLiveMembers().forEach(this::reportCurrentSchemaVersionOnEndpoint);
+    }
+
+    synchronized void removeAndIgnoreEndpoint(InetAddressAndPort endpoint)
     {
         logger.debug("Removing and ignoring endpoint {}", endpoint);
         Preconditions.checkArgument(endpoint != null);
@@ -526,24 +542,14 @@
         }
     }
 
-    Future<Void> scheduleSchemaPull(InetAddressAndPort endpoint, VersionInfo info)
+    private Future<Void> scheduleSchemaPull(InetAddressAndPort endpoint, VersionInfo info)
     {
-        FutureTask<Void> task = new FutureTask<>(() -> pullSchema(new Callback(endpoint, info)), null);
+        FutureTask<Void> task = new FutureTask<>(() -> pullSchema(endpoint, new Callback(endpoint, info)));
+
         if (shouldPullImmediately(endpoint, info.version))
         {
-            long nextAttempt = lastPullAttemptTimestamps.getOrDefault(endpoint, 0L) + BACKOFF_DELAY_MS;
-            long now = System.currentTimeMillis();
-            if (nextAttempt <= now)
-            {
-                logger.debug("Pulling {} immediately from {}", info, endpoint);
-                submitToMigrationIfNotShutdown(task);
-            }
-            else
-            {
-                long delay = nextAttempt - now;
-                logger.debug("Previous pull of {} from {} failed. Postponing next attempt for {}ms", info, endpoint, delay);
-                ScheduledExecutors.nonPeriodicTasks.schedule(() -> submitToMigrationIfNotShutdown(task), delay, TimeUnit.MILLISECONDS);
-            }
+            logger.debug("Pulling {} immediately from {}", info, endpoint);
+            submitToMigrationIfNotShutdown(task);
         }
         else
         {
@@ -554,22 +560,29 @@
         return task;
     }
 
-    private static Future<?> submitToMigrationIfNotShutdown(Runnable task)
+    void announce(UUID schemaVersion)
+    {
+        if (gossiper.isEnabled())
+            gossiper.addLocalApplicationState(ApplicationState.SCHEMA, StorageService.instance.valueFactory.schema(schemaVersion));
+        SchemaDiagnostics.versionAnnounced(Schema.instance);
+    }
+
+    private Future<?> submitToMigrationIfNotShutdown(Runnable task)
     {
         boolean skipped = false;
         try
         {
-            if (Stage.MIGRATION.executor().isShutdown() || Stage.MIGRATION.executor().isTerminated())
+            if (executor.isShutdown() || executor.isTerminated())
             {
                 skipped = true;
-                return null;
+                return ImmediateFuture.success(null);
             }
-            return Stage.MIGRATION.submit(task);
+            return executor.submit(task);
         }
         catch (RejectedExecutionException ex)
         {
             skipped = true;
-            return null;
+            return ImmediateFuture.success(null);
         }
         finally
         {
@@ -580,13 +593,7 @@
         }
     }
 
-    @VisibleForTesting
-    protected void mergeSchemaFrom(InetAddressAndPort endpoint, Collection<Mutation> mutations)
-    {
-        Schema.instance.mergeAndAnnounceVersion(mutations);
-    }
-
-    class Callback implements RequestCallback<Collection<Mutation>>
+    private class Callback implements RequestCallback<Collection<Mutation>>
     {
         final InetAddressAndPort endpoint;
         final VersionInfo info;
@@ -620,7 +627,7 @@
                 {
                     try
                     {
-                        mergeSchemaFrom(endpoint, mutations);
+                        schemaUpdateCallback.accept(endpoint, mutations);
                     }
                     catch (Exception e)
                     {
@@ -638,79 +645,65 @@
         }
     }
 
-    private void pullSchema(Callback callback)
+    private void pullSchema(InetAddressAndPort endpoint, RequestCallback<Collection<Mutation>> callback)
     {
-        lastPullAttemptTimestamps.put(callback.endpoint, System.currentTimeMillis());
-
-        if (!isAlive(callback.endpoint))
+        if (!gossiper.isAlive(endpoint))
         {
-            noSpamLogger.warn("Can't send schema pull request: node {} is down.", callback.endpoint);
-            callback.fail();
+            noSpamLogger.warn("Can't send schema pull request: node {} is down.", endpoint);
+            callback.onFailure(endpoint, RequestFailureReason.UNKNOWN);
             return;
         }
 
         // There is a chance that quite some time could have passed between now and the MM#maybeScheduleSchemaPull(),
         // potentially enough for the endpoint node to restart - which is an issue if it does restart upgraded, with
         // a higher major.
-        if (!shouldPullFromEndpoint(callback.endpoint))
+        if (!shouldPullFromEndpoint(endpoint))
         {
-            logger.info("Skipped sending a migration request: node {} has a higher major version now.", callback.endpoint);
-            callback.fail();
+            logger.info("Skipped sending a migration request: node {} has a higher major version now.", endpoint);
+            callback.onFailure(endpoint, RequestFailureReason.UNKNOWN);
             return;
         }
 
-        logger.debug("Requesting schema from {}", callback.endpoint);
-        sendMigrationMessage(callback);
+        logger.debug("Requesting schema from {}", endpoint);
+        sendMigrationMessage(endpoint, callback);
     }
 
-    protected void sendMigrationMessage(Callback callback)
+    private void sendMigrationMessage(InetAddressAndPort endpoint, RequestCallback<Collection<Mutation>> callback)
     {
-        inflightTasks.getAndIncrement();
-        Message message = Message.out(Verb.SCHEMA_PULL_REQ, NoPayload.noPayload);
-        logger.info("Sending schema pull request to {}", callback.endpoint);
-        MessagingService.instance().sendWithCallback(message, callback.endpoint, callback);
+        Message<NoPayload> message = Message.out(Verb.SCHEMA_PULL_REQ, NoPayload.noPayload);
+        logger.info("Sending schema pull request to {}", endpoint);
+        messagingService.sendWithCallback(message, endpoint, callback);
     }
 
     private synchronized Future<Void> pullComplete(InetAddressAndPort endpoint, VersionInfo info, boolean wasSuccessful)
     {
-        inflightTasks.decrementAndGet();
         if (wasSuccessful)
-        {
             info.markReceived();
-            lastPullAttemptTimestamps.remove(endpoint);
-        }
 
         info.outstandingRequests.remove(endpoint);
         info.requestQueue.add(endpoint);
         return maybePullSchema(info);
     }
 
-    public int getInflightTasks()
-    {
-        return inflightTasks.get();
-    }
-
     /**
      * Wait until we've received schema responses for all versions we're aware of
      * @param waitMillis
      * @return true if response for all schemas were received, false if we timed out waiting
      */
-    public boolean awaitSchemaRequests(long waitMillis)
+    boolean awaitSchemaRequests(long waitMillis)
     {
         if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
             Gossiper.waitToSettle();
 
         if (versionInfo.isEmpty())
-        {
             logger.debug("Nothing in versionInfo - so no schemas to wait for");
-        }
 
-        WaitQueue.Signal signal = null;
+        List<WaitQueue.Signal> signalList = null;
         try
         {
             synchronized (this)
             {
-                List<WaitQueue.Signal> signalList = new ArrayList<>(versionInfo.size());
+                signalList = new ArrayList<>(versionInfo.size());
                 for (VersionInfo version : versionInfo.values())
                 {
                     if (version.wasReceived())
@@ -721,22 +714,47 @@
 
                 if (signalList.isEmpty())
                     return true;
-
-                WaitQueue.Signal[] signals = new WaitQueue.Signal[signalList.size()];
-                signalList.toArray(signals);
-                signal = WaitQueue.all(signals);
             }
 
-            return signal.awaitUntil(System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(waitMillis));
-        }
-        catch (InterruptedException e)
-        {
-            throw new RuntimeException(e);
+            long deadline = nanoTime() + TimeUnit.MILLISECONDS.toNanos(waitMillis);
+            return signalList.stream().allMatch(signal -> signal.awaitUntilUninterruptibly(deadline));
         }
         finally
         {
-            if (signal != null)
-                signal.cancel();
+            if (signalList != null)
+                signalList.forEach(WaitQueue.Signal::cancel);
         }
     }
+
+    Pair<Set<InetAddressAndPort>, Set<InetAddressAndPort>> pushSchemaMutations(Collection<Mutation> schemaMutations)
+    {
+        logger.debug("Pushing schema mutations: {}", schemaMutations);
+        Set<InetAddressAndPort> schemaDestinationEndpoints = new HashSet<>();
+        Set<InetAddressAndPort> schemaEndpointsIgnored = new HashSet<>();
+        Message<Collection<Mutation>> message = Message.out(SCHEMA_PUSH_REQ, schemaMutations);
+        for (InetAddressAndPort endpoint : gossiper.getLiveMembers())
+        {
+            if (shouldPushSchemaTo(endpoint))
+            {
+                logger.debug("Pushing schema mutations to {}: {}", endpoint, schemaMutations);
+                messagingService.send(message, endpoint);
+                schemaDestinationEndpoints.add(endpoint);
+            }
+            else
+            {
+                schemaEndpointsIgnored.add(endpoint);
+            }
+        }
+
+        return Pair.create(schemaDestinationEndpoints, schemaEndpointsIgnored);
+    }
+
+    private boolean shouldPushSchemaTo(InetAddressAndPort endpoint)
+    {
+        // only push schema to nodes with known and equal versions
+        return !endpoint.equals(FBUtilities.getBroadcastAddressAndPort())
+               && messagingService.versions.knows(endpoint)
+               && messagingService.versions.getRaw(endpoint) == MessagingService.current_version;
+    }
+
 }
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/schema/MigrationManager.java b/src/java/org/apache/cassandra/schema/MigrationManager.java
deleted file mode 100644
index 600de9a..0000000
--- a/src/java/org/apache/cassandra/schema/MigrationManager.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.schema;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.*;
-import java.lang.management.ManagementFactory;
-import java.util.function.LongSupplier;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.Futures;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.exceptions.AlreadyExistsException;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.gms.*;
-import org.apache.cassandra.io.IVersionedSerializer;
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
-import org.apache.cassandra.utils.FBUtilities;
-
-import static org.apache.cassandra.concurrent.Stage.MIGRATION;
-import static org.apache.cassandra.net.Verb.SCHEMA_PUSH_REQ;
-
-public class MigrationManager
-{
-    private static final Logger logger = LoggerFactory.getLogger(MigrationManager.class);
-
-    public static final MigrationManager instance = new MigrationManager();
-
-    private MigrationManager() {}
-
-    private static boolean shouldPushSchemaTo(InetAddressAndPort endpoint)
-    {
-        // only push schema to nodes with known and equal versions
-        return !endpoint.equals(FBUtilities.getBroadcastAddressAndPort())
-               && MessagingService.instance().versions.knows(endpoint)
-               && MessagingService.instance().versions.getRaw(endpoint) == MessagingService.current_version;
-    }
-
-    public static void announceNewKeyspace(KeyspaceMetadata ksm) throws ConfigurationException
-    {
-        announceNewKeyspace(ksm, false);
-    }
-
-    public static void announceNewKeyspace(KeyspaceMetadata ksm, boolean announceLocally) throws ConfigurationException
-    {
-        announceNewKeyspace(ksm, FBUtilities.timestampMicros(), announceLocally);
-    }
-
-    public static void announceNewKeyspace(KeyspaceMetadata ksm, long timestamp, boolean announceLocally) throws ConfigurationException
-    {
-        ksm.validate();
-
-        if (Schema.instance.getKeyspaceMetadata(ksm.name) != null)
-            throw new AlreadyExistsException(ksm.name);
-
-        logger.info("Create new Keyspace: {}", ksm);
-        announce(SchemaKeyspace.makeCreateKeyspaceMutation(ksm, timestamp), announceLocally);
-    }
-
-    public static void announceNewTable(TableMetadata cfm)
-    {
-        announceNewTable(cfm, true, FBUtilities.timestampMicros());
-    }
-
-    private static void announceNewTable(TableMetadata cfm, boolean throwOnDuplicate, long timestamp)
-    {
-        cfm.validate();
-
-        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(cfm.keyspace);
-        if (ksm == null)
-            throw new ConfigurationException(String.format("Cannot add table '%s' to non existing keyspace '%s'.", cfm.name, cfm.keyspace));
-        // If we have a table or a view which has the same name, we can't add a new one
-        else if (throwOnDuplicate && ksm.getTableOrViewNullable(cfm.name) != null)
-            throw new AlreadyExistsException(cfm.keyspace, cfm.name);
-
-        logger.info("Create new table: {}", cfm);
-        announce(SchemaKeyspace.makeCreateTableMutation(ksm, cfm, timestamp), false);
-    }
-
-    static void announceKeyspaceUpdate(KeyspaceMetadata ksm)
-    {
-        ksm.validate();
-
-        KeyspaceMetadata oldKsm = Schema.instance.getKeyspaceMetadata(ksm.name);
-        if (oldKsm == null)
-            throw new ConfigurationException(String.format("Cannot update non existing keyspace '%s'.", ksm.name));
-
-        logger.info("Update Keyspace '{}' From {} To {}", ksm.name, oldKsm, ksm);
-        announce(SchemaKeyspace.makeCreateKeyspaceMutation(ksm.name, ksm.params, FBUtilities.timestampMicros()), false);
-    }
-
-    public static void announceTableUpdate(TableMetadata tm)
-    {
-        announceTableUpdate(tm, false);
-    }
-
-    public static void announceTableUpdate(TableMetadata updated, boolean announceLocally)
-    {
-        updated.validate();
-
-        TableMetadata current = Schema.instance.getTableMetadata(updated.keyspace, updated.name);
-        if (current == null)
-            throw new ConfigurationException(String.format("Cannot update non existing table '%s' in keyspace '%s'.", updated.name, updated.keyspace));
-        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(current.keyspace);
-
-        updated.validateCompatibility(current);
-
-        long timestamp = FBUtilities.timestampMicros();
-
-        logger.info("Update table '{}/{}' From {} To {}", current.keyspace, current.name, current, updated);
-        Mutation.SimpleBuilder builder = SchemaKeyspace.makeUpdateTableMutation(ksm, current, updated, timestamp);
-
-        announce(builder, announceLocally);
-    }
-
-    static void announceKeyspaceDrop(String ksName)
-    {
-        KeyspaceMetadata oldKsm = Schema.instance.getKeyspaceMetadata(ksName);
-        if (oldKsm == null)
-            throw new ConfigurationException(String.format("Cannot drop non existing keyspace '%s'.", ksName));
-
-        logger.info("Drop Keyspace '{}'", oldKsm.name);
-        announce(SchemaKeyspace.makeDropKeyspaceMutation(oldKsm, FBUtilities.timestampMicros()), false);
-    }
-
-    public static void announceTableDrop(String ksName, String cfName, boolean announceLocally)
-    {
-        TableMetadata tm = Schema.instance.getTableMetadata(ksName, cfName);
-        if (tm == null)
-            throw new ConfigurationException(String.format("Cannot drop non existing table '%s' in keyspace '%s'.", cfName, ksName));
-        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(ksName);
-
-        logger.info("Drop table '{}/{}'", tm.keyspace, tm.name);
-        announce(SchemaKeyspace.makeDropTableMutation(ksm, tm, FBUtilities.timestampMicros()), announceLocally);
-    }
-
-    /**
-     * actively announce a new version to active hosts via rpc
-     * @param schema The schema mutation to be applied
-     */
-    private static void announce(Mutation.SimpleBuilder schema, boolean announceLocally)
-    {
-        List<Mutation> mutations = Collections.singletonList(schema.build());
-
-        if (announceLocally)
-            Schema.instance.merge(mutations);
-        else
-            announce(mutations);
-    }
-
-    public static void announce(Mutation change)
-    {
-        announce(Collections.singleton(change));
-    }
-
-    public static void announce(Collection<Mutation> schema)
-    {
-        Future<?> f = announceWithoutPush(schema);
-
-        Set<InetAddressAndPort> schemaDestinationEndpoints = new HashSet<>();
-        Set<InetAddressAndPort> schemaEndpointsIgnored = new HashSet<>();
-        Message<Collection<Mutation>> message = Message.out(SCHEMA_PUSH_REQ, schema);
-        for (InetAddressAndPort endpoint : Gossiper.instance.getLiveMembers())
-        {
-            if (shouldPushSchemaTo(endpoint))
-            {
-                MessagingService.instance().send(message, endpoint);
-                schemaDestinationEndpoints.add(endpoint);
-            }
-            else
-            {
-                schemaEndpointsIgnored.add(endpoint);
-            }
-        }
-
-        SchemaAnnouncementDiagnostics.schemaMutationsAnnounced(schemaDestinationEndpoints, schemaEndpointsIgnored);
-        FBUtilities.waitOnFuture(f);
-    }
-
-    public static Future<?> announceWithoutPush(Collection<Mutation> schema)
-    {
-        return MIGRATION.submit(() -> Schema.instance.mergeAndAnnounceVersion(schema));
-    }
-
-    public static KeyspacesDiff announce(SchemaTransformation transformation, boolean locally)
-    {
-        long now = FBUtilities.timestampMicros();
-
-        Future<Schema.TransformationResult> future =
-            MIGRATION.submit(() -> Schema.instance.transform(transformation, locally, now));
-
-        Schema.TransformationResult result = Futures.getUnchecked(future);
-        if (!result.success)
-            throw result.exception;
-
-        if (locally || result.diff.isEmpty())
-            return result.diff;
-
-        Set<InetAddressAndPort> schemaDestinationEndpoints = new HashSet<>();
-        Set<InetAddressAndPort> schemaEndpointsIgnored = new HashSet<>();
-        Message<Collection<Mutation>> message = Message.out(SCHEMA_PUSH_REQ, result.mutations);
-        for (InetAddressAndPort endpoint : Gossiper.instance.getLiveMembers())
-        {
-            if (shouldPushSchemaTo(endpoint))
-            {
-                MessagingService.instance().send(message, endpoint);
-                schemaDestinationEndpoints.add(endpoint);
-            }
-            else
-            {
-                schemaEndpointsIgnored.add(endpoint);
-            }
-        }
-
-        SchemaAnnouncementDiagnostics.schemaTransformationAnnounced(schemaDestinationEndpoints, schemaEndpointsIgnored,
-                                                                    transformation);
-
-        return result.diff;
-    }
-
-    /**
-     * Clear all locally stored schema information and reset schema to initial state.
-     * Called by user (via JMX) who wants to get rid of schema disagreement.
-     */
-    public static void resetLocalSchema()
-    {
-        logger.info("Starting local schema reset...");
-
-        logger.debug("Truncating schema tables...");
-
-        SchemaMigrationDiagnostics.resetLocalSchema();
-
-        Schema.instance.truncateSchemaKeyspace();
-
-        logger.debug("Clearing local schema keyspace definitions...");
-
-        Schema.instance.clear();
-
-        // clean the all version information from the MigrationCoordinator
-        MigrationCoordinator.instance.reset();
-
-        // now report again the versions we are aware of
-        Set<InetAddressAndPort> liveEndpoints = Gossiper.instance.getLiveMembers();
-        liveEndpoints.remove(FBUtilities.getBroadcastAddressAndPort());
-
-        // force migration if there are nodes around
-        for (InetAddressAndPort node : liveEndpoints)
-        {
-            EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(node);
-            Future<Void> pull = MigrationCoordinator.instance.reportEndpointVersion(node, state);
-            if (pull != null)
-                FBUtilities.waitOnFuture(pull);
-        }
-
-        logger.info("Local schema reset is complete.");
-    }
-
-    /**
-     * We have a set of non-local, distributed system keyspaces, e.g. system_traces, system_auth, etc.
-     * (see {@link SchemaConstants#REPLICATED_SYSTEM_KEYSPACE_NAMES}), that need to be created on cluster initialisation,
-     * and later evolved on major upgrades (sometimes minor too). This method compares the current known definitions
-     * of the tables (if the keyspace exists) to the expected, most modern ones expected by the running version of C*;
-     * if any changes have been detected, a schema Mutation will be created which, when applied, should make
-     * cluster's view of that keyspace aligned with the expected modern definition.
-     *
-     * @param keyspace   the expected modern definition of the keyspace
-     * @param generation timestamp to use for the table changes in the schema mutation
-     *
-     * @return empty Optional if the current definition is up to date, or an Optional with the Mutation that would
-     *         bring the schema in line with the expected definition.
-     */
-    public static Optional<Mutation> evolveSystemKeyspace(KeyspaceMetadata keyspace, long generation)
-    {
-        Mutation.SimpleBuilder builder = null;
-
-        KeyspaceMetadata definedKeyspace = Schema.instance.getKeyspaceMetadata(keyspace.name);
-        Tables definedTables = null == definedKeyspace ? Tables.none() : definedKeyspace.tables;
-
-        for (TableMetadata table : keyspace.tables)
-        {
-            if (table.equals(definedTables.getNullable(table.name)))
-                continue;
-
-            if (null == builder)
-            {
-                // for the keyspace definition itself (name, replication, durability) always use generation 0;
-                // this ensures that any changes made to replication by the user will never be overwritten.
-                builder = SchemaKeyspace.makeCreateKeyspaceMutation(keyspace.name, keyspace.params, 0);
-
-                // now set the timestamp to generation, so the tables have the expected timestamp
-                builder.timestamp(generation);
-            }
-
-            // for table definitions always use the provided generation; these tables, unlike their containing
-            // keyspaces, are *NOT* meant to be altered by the user; if their definitions need to change,
-            // the schema must be updated in code, and the appropriate generation must be bumped.
-            SchemaKeyspace.addTableToSchemaMutation(table, true, builder);
-        }
-
-        return builder == null ? Optional.empty() : Optional.of(builder.build());
-    }
-
-    public static class MigrationsSerializer implements IVersionedSerializer<Collection<Mutation>>
-    {
-        public static MigrationsSerializer instance = new MigrationsSerializer();
-
-        public void serialize(Collection<Mutation> schema, DataOutputPlus out, int version) throws IOException
-        {
-            out.writeInt(schema.size());
-            for (Mutation mutation : schema)
-                Mutation.serializer.serialize(mutation, out, version);
-        }
-
-        public Collection<Mutation> deserialize(DataInputPlus in, int version) throws IOException
-        {
-            int count = in.readInt();
-            Collection<Mutation> schema = new ArrayList<>(count);
-
-            for (int i = 0; i < count; i++)
-                schema.add(Mutation.serializer.deserialize(in, version));
-
-            return schema;
-        }
-
-        public long serializedSize(Collection<Mutation> schema, int version)
-        {
-            int size = TypeSizes.sizeof(schema.size());
-            for (Mutation mutation : schema)
-                size += mutation.serializedSize(version);
-            return size;
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/schema/OfflineSchemaUpdateHandler.java b/src/java/org/apache/cassandra/schema/OfflineSchemaUpdateHandler.java
new file mode 100644
index 0000000..16694b1
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/OfflineSchemaUpdateHandler.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.time.Duration;
+import java.util.UUID;
+import java.util.function.BiConsumer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+import org.apache.cassandra.utils.ByteArrayUtil;
+import org.apache.cassandra.utils.concurrent.Awaitable;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
+/**
+ * Update handler which works only in memory. It does not load or save the schema anywhere. It is used in client mode
+ * applications.
+ */
+public class OfflineSchemaUpdateHandler implements SchemaUpdateHandler
+{
+    private static final Logger logger = LoggerFactory.getLogger(OfflineSchemaUpdateHandler.class);
+
+    private final BiConsumer<SchemaTransformationResult, Boolean> updateCallback;
+
+    private volatile DistributedSchema schema = DistributedSchema.EMPTY;
+
+    public OfflineSchemaUpdateHandler(BiConsumer<SchemaTransformationResult, Boolean> updateCallback)
+    {
+        this.updateCallback = updateCallback;
+    }
+
+    @Override
+    public void start()
+    {
+        // no-op
+    }
+
+    @Override
+    public boolean waitUntilReady(Duration timeout)
+    {
+        return true;
+    }
+
+    @Override
+    public synchronized SchemaTransformationResult apply(SchemaTransformation transformation, boolean local)
+    {
+        DistributedSchema before = schema;
+        Keyspaces afterKeyspaces = transformation.apply(before.getKeyspaces());
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before.getKeyspaces(), afterKeyspaces);
+
+        if (diff.isEmpty())
+            return new SchemaTransformationResult(before, before, diff);
+
+        DistributedSchema after = new DistributedSchema(afterKeyspaces, UUID.nameUUIDFromBytes(ByteArrayUtil.bytes(afterKeyspaces.hashCode())));
+        SchemaTransformationResult update = new SchemaTransformationResult(before, after, diff);
+        this.schema = after;
+        logger.debug("Schema updated: {}", update);
+        updateCallback.accept(update, true);
+
+        return update;
+    }
+
+    @Override
+    public void reset(boolean local)
+    {
+        if (!local)
+            throw new UnsupportedOperationException();
+
+        apply(ignored -> SchemaKeyspace.fetchNonSystemKeyspaces(), local);
+    }
+
+    @Override
+    public synchronized Awaitable clear()
+    {
+        this.schema = DistributedSchema.EMPTY;
+        return ImmediateFuture.success(true);
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/PartitionDenylist.java b/src/java/org/apache/cassandra/schema/PartitionDenylist.java
new file mode 100644
index 0000000..53fcb5e
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/PartitionDenylist.java
@@ -0,0 +1,535 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSortedSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.github.benmanes.caffeine.cache.CacheLoader;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.LoadingCache;
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.reads.range.RangeCommands;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.cql3.QueryProcessor.process;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
+/**
+ * PartitionDenylist uses the system_distributed.partition_denylist table to maintain a list of denylisted partition keys
+ * for each keyspace/table.
+ *
+ * Keys can be entered manually into the partition_denylist table or via the JMX operation StorageProxyMBean.denylistKey
+ *
+ * The denylist is stored as one CQL partition per table, and the denylisted keys are column names in that partition. The denylisted
+ * keys for each table are cached in memory, and reloaded from the partition_denylist table every 10 minutes (default) or when the
+ * StorageProxyMBean.loadPartitionDenylist is called via JMX.
+ *
+ * Concurrency of the cache is provided by the concurrency semantics of the guava LoadingCache. All values (DenylistEntry) are
+ * immutable collections of keys/tokens which are replaced in whole when the cache refreshes from disk.
+ *
+ * The CL for the denylist is used on initial node load as well as on timer instigated cache refreshes. A JMX call by the
+ * operator to load the denylist cache will warn on CL unavailability but go through with the denylist load. This is to
+ * allow operators flexibility in the face of degraded cluster state and still grant them the ability to mutate the denylist
+ * cache and bring it up if there are things they need to block on startup.
+ *
+ * Notably, in the current design it's possible for a table *cache expiration instigated* reload to end up violating the
+ * contract on total denylisted keys allowed in the case where it initially loads with a value less than the DBD
+ * allowable max per table limit due to global constraint enforcement on initial load. Our load and reload function
+ * simply enforce the *per table* limit without consideration to what that entails at the global key level. While we
+ * could track the constrained state and count in DenylistEntry, for now the complexity doesn't seem to justify the
+ * protection against that edge case. The enforcement should take place on a user-instigated full reload as well as
+ * error messaging about count violations, so this only applies to situations in which someone adds a key and doesn't
+ * actively tell the cache to fully reload to take that key into consideration, which one could reasonably expect to be
+ * an antipattern.
+ */
+public class PartitionDenylist
+{
+    private static final Logger logger = LoggerFactory.getLogger(PartitionDenylist.class);
+    private static final NoSpamLogger AVAILABILITY_LOGGER = NoSpamLogger.getLogger(logger, 1, TimeUnit.MINUTES);
+
+    private final ExecutorService executor = executorFactory().pooled("DenylistCache", 2);
+
+    /** We effectively don't use our initial empty cache to denylist until the {@link #load()} call which will replace it */
+    private volatile LoadingCache<TableId, DenylistEntry> denylist = buildEmptyCache();
+
+    /** Denylist entry is never mutated once constructed, only replaced with a new entry when the cache is refreshed */
+    private static class DenylistEntry
+    {
+        public final ImmutableSet<ByteBuffer> keys;
+        public final ImmutableSortedSet<Token> tokens;
+
+        public DenylistEntry()
+        {
+            keys = ImmutableSet.of();
+            tokens = ImmutableSortedSet.of();
+        }
+
+        public DenylistEntry(final ImmutableSet<ByteBuffer> keys, final ImmutableSortedSet<Token> tokens)
+        {
+            this.keys = keys;
+            this.tokens = tokens;
+        }
+    }
+
+    /** synchronized on this */
+    private int loadAttempts = 0;
+
+    /** synchronized on this */
+    private int loadSuccesses = 0;
+
+    public synchronized int getLoadAttempts()
+    {
+        return loadAttempts;
+    }
+    public synchronized int getLoadSuccesses()
+    {
+        return loadSuccesses;
+    }
+
+    /**
+     * Performs initial load of the partition denylist. Should be called at startup and only loads if the operation
+     * is expected to succeed.  If it is not possible to load at call time, a timer is set to retry.
+     */
+    public void initialLoad()
+    {
+        if (!DatabaseDescriptor.getPartitionDenylistEnabled())
+            return;
+
+        synchronized (this)
+        {
+            loadAttempts++;
+        }
+
+        // Check if there are sufficient nodes to attempt reading all the denylist partitions before issuing the query.
+        // The pre-check prevents definite range-slice unavailables being marked and triggering an alert.  Nodes may still change
+        // state between the check and the query, but it should significantly reduce the alert volume.
+        String retryReason = "Insufficient nodes";
+        try
+        {
+            if (checkDenylistNodeAvailability())
+            {
+                load();
+                return;
+            }
+        }
+        catch (Throwable tr)
+        {
+            logger.error("Failed to load partition denylist", tr);
+            retryReason = "Exception";
+        }
+
+        // This path will also be taken on other failures other than UnavailableException,
+        // but seems like a good idea to retry anyway.
+        int retryInSeconds = DatabaseDescriptor.getDenylistInitialLoadRetrySeconds();
+        logger.info("{} while loading partition denylist cache. Scheduled retry in {} seconds.", retryReason, retryInSeconds);
+        ScheduledExecutors.optionalTasks.schedule(this::initialLoad, retryInSeconds, TimeUnit.SECONDS);
+    }
+
+    private boolean checkDenylistNodeAvailability()
+    {
+        boolean sufficientNodes = RangeCommands.sufficientLiveNodesForSelectStar(SystemDistributedKeyspace.PartitionDenylistTable,
+                                                                                 DatabaseDescriptor.getDenylistConsistencyLevel());
+        if (!sufficientNodes)
+        {
+            AVAILABILITY_LOGGER.warn("Attempting to load denylist and not enough nodes are available for a {} refresh. Reload the denylist when unavailable nodes are recovered to ensure your denylist remains in sync.",
+                                     DatabaseDescriptor.getDenylistConsistencyLevel());
+        }
+        return sufficientNodes;
+    }
+
+    /** Helper method as we need to both build cache on initial init but also on reload of cache contents and params */
+    private LoadingCache<TableId, DenylistEntry> buildEmptyCache()
+    {
+        // We rely on details of .refreshAfterWrite to reload this async in the background when it's hit:
+        // https://github.com/ben-manes/caffeine/wiki/Refresh
+        return Caffeine.newBuilder()
+                       .refreshAfterWrite(DatabaseDescriptor.getDenylistRefreshSeconds(), TimeUnit.SECONDS)
+                       .executor(executor)
+                       .build(new CacheLoader<TableId, DenylistEntry>()
+                       {
+                           @Override
+                           public DenylistEntry load(final TableId tid)
+                           {
+                               // We load whether or not the CL required count are available as the alternative is an
+                               // empty denylist. This allows operators to intervene in the event they need to deny or
+                               // undeny a specific partition key around a node recovery.
+                               checkDenylistNodeAvailability();
+                               return getDenylistForTableFromCQL(tid);
+                           }
+
+                           // The synchronous reload method defaults to being wrapped with a supplyAsync in CacheLoader.asyncReload
+                           @Override
+                           public DenylistEntry reload(final TableId tid, final DenylistEntry oldValue)
+                           {
+                               // Only process when we can hit the user specified CL for the denylist consistency on a timer prompted reload
+                               if (checkDenylistNodeAvailability())
+                               {
+                                   final DenylistEntry newEntry = getDenylistForTableFromCQL(tid);
+                                   if (newEntry != null)
+                                       return newEntry;
+                               }
+                               if (oldValue != null)
+                                   return oldValue;
+                               return new DenylistEntry();
+                           }
+                       });
+    }
+
+    /**
+     * We need to fully rebuild a new cache to accommodate deleting items from the denylist and potentially shrinking
+     * the max allowable size in the list. We do not serve queries out of this denylist until it is populated
+     * so as not to introduce a window of having a partially filled cache allow denylisted entries.
+     */
+    public void load()
+    {
+        final long start = currentTimeMillis();
+
+        final Map<TableId, DenylistEntry> allDenylists = getDenylistForAllTablesFromCQL();
+
+        // On initial load we have the slight overhead of GC'ing our initial empty cache
+        LoadingCache<TableId, DenylistEntry> newDenylist = buildEmptyCache();
+        newDenylist.putAll(allDenylists);
+
+        synchronized (this)
+        {
+            loadSuccesses++;
+        }
+        denylist = newDenylist;
+        logger.info("Loaded partition denylist cache in {}ms", currentTimeMillis() - start);
+    }
+
+    /**
+     * We expect the caller to confirm that we are working with a valid keyspace and table. Further, we expect the usage
+     * pattern of this to be one-off key by key, not in a bulk process, so we reload the entire table's deny list entry
+     * on an addition or removal.
+     */
+    public boolean addKeyToDenylist(final String keyspace, final String table, final ByteBuffer key)
+    {
+        if (!canDenylistKeyspace(keyspace))
+            return false;
+
+        final String insert = String.format("INSERT INTO system_distributed.partition_denylist (ks_name, table_name, key) VALUES ('%s', '%s', 0x%s)",
+                                            keyspace, table, ByteBufferUtil.bytesToHex(key));
+
+        try
+        {
+            process(insert, DatabaseDescriptor.getDenylistConsistencyLevel());
+            return refreshTableDenylist(keyspace, table);
+        }
+        catch (final RequestExecutionException e)
+        {
+            logger.error("Failed to denylist key [{}] in {}/{}", ByteBufferUtil.bytesToHex(key), keyspace, table, e);
+        }
+        return false;
+    }
+
+    /**
+     * We expect the caller to confirm that we are working with a valid keyspace and table.
+     */
+    public boolean removeKeyFromDenylist(final String keyspace, final String table, final ByteBuffer key)
+    {
+        final String delete = String.format("DELETE FROM system_distributed.partition_denylist " +
+                                            "WHERE ks_name = '%s' " +
+                                            "AND table_name = '%s' " +
+                                            "AND key = 0x%s",
+                                            keyspace, table, ByteBufferUtil.bytesToHex(key));
+
+        try
+        {
+            process(delete, DatabaseDescriptor.getDenylistConsistencyLevel());
+            return refreshTableDenylist(keyspace, table);
+        }
+        catch (final RequestExecutionException e)
+        {
+            logger.error("Failed to remove key from denylist: [{}] in {}/{}", ByteBufferUtil.bytesToHex(key), keyspace, table, e);
+        }
+        return false;
+    }
+
+    /**
+     * We disallow denylisting partitions in certain critical keyspaces to prevent users from making their clusters
+     * inoperable.
+     */
+    private boolean canDenylistKeyspace(final String keyspace)
+    {
+        return !SchemaConstants.DISTRIBUTED_KEYSPACE_NAME.equals(keyspace) &&
+               !SchemaConstants.SYSTEM_KEYSPACE_NAME.equals(keyspace) &&
+               !SchemaConstants.TRACE_KEYSPACE_NAME.equals(keyspace) &&
+               !SchemaConstants.VIRTUAL_SCHEMA.equals(keyspace) &&
+               !SchemaConstants.VIRTUAL_VIEWS.equals(keyspace) &&
+               !SchemaConstants.AUTH_KEYSPACE_NAME.equals(keyspace);
+    }
+
+    public boolean isKeyPermitted(final String keyspace, final String table, final ByteBuffer key)
+    {
+        return isKeyPermitted(getTableId(keyspace, table), key);
+    }
+
+    public boolean isKeyPermitted(final TableId tid, final ByteBuffer key)
+    {
+        final TableMetadata tmd = Schema.instance.getTableMetadata(tid);
+
+        // We have a few quick state checks to get out of the way first; this is hot path so we want to do these first if possible.
+        if (!DatabaseDescriptor.getPartitionDenylistEnabled() || tid == null || tmd == null || !canDenylistKeyspace(tmd.keyspace))
+            return true;
+
+        try
+        {
+            // If we don't have an entry for this table id, nothing in it is denylisted.
+            DenylistEntry entry = denylist.get(tid);
+            if (entry == null)
+                return true;
+            return !entry.keys.contains(key);
+        }
+        catch (final Exception e)
+        {
+            // In the event of an error accessing or populating the cache, assume it's not denylisted
+            logAccessFailure(tid, e);
+            return true;
+        }
+    }
+
+    private void logAccessFailure(final TableId tid, Throwable e)
+    {
+        final TableMetadata tmd = Schema.instance.getTableMetadata(tid);
+        if (tmd == null)
+            logger.debug("Failed to access partition denylist cache for unknown table id {}", tid.toString(), e);
+        else
+            logger.debug("Failed to access partition denylist cache for {}/{}", tmd.keyspace, tmd.name, e);
+    }
+
+    /**
+     * @return number of denylisted keys in range
+     */
+    public int getDeniedKeysInRangeCount(final String keyspace, final String table, final AbstractBounds<PartitionPosition> range)
+    {
+        return getDeniedKeysInRangeCount(getTableId(keyspace, table), range);
+    }
+
+    /**
+     * @return number of denylisted keys in range
+     */
+    public int getDeniedKeysInRangeCount(final TableId tid, final AbstractBounds<PartitionPosition> range)
+    {
+        final TableMetadata tmd = Schema.instance.getTableMetadata(tid);
+        if (!DatabaseDescriptor.getPartitionDenylistEnabled() || tid == null || tmd == null || !canDenylistKeyspace(tmd.keyspace))
+            return 0;
+
+        try
+        {
+            final DenylistEntry denylistEntry = denylist.get(tid);
+            if (denylistEntry == null || denylistEntry.tokens.size() == 0)
+                return 0;
+            final Token startToken = range.left.getToken();
+            final Token endToken = range.right.getToken();
+
+            // Normal case
+            if (startToken.compareTo(endToken) <= 0 || endToken.isMinimum())
+            {
+                NavigableSet<Token> subSet = denylistEntry.tokens.tailSet(startToken, PartitionPosition.Kind.MIN_BOUND == range.left.kind());
+                if (!endToken.isMinimum())
+                    subSet = subSet.headSet(endToken, PartitionPosition.Kind.MAX_BOUND == range.right.kind());
+                return subSet.size();
+            }
+
+            // Wrap around case
+            return denylistEntry.tokens.tailSet(startToken, PartitionPosition.Kind.MIN_BOUND == range.left.kind()).size()
+                   + denylistEntry.tokens.headSet(endToken, PartitionPosition.Kind.MAX_BOUND == range.right.kind()).size();
+        }
+        catch (final Exception e)
+        {
+            logAccessFailure(tid, e);
+            return 0;
+        }
+    }
+
+    /**
+     * Get up to the configured allowable limit per table of denylisted keys
+     */
+    private DenylistEntry getDenylistForTableFromCQL(final TableId tid)
+    {
+        return getDenylistForTableFromCQL(tid, DatabaseDescriptor.getDenylistMaxKeysPerTable());
+    }
+
+    /**
+     * Attempts to reload the DenylistEntry data from CQL for the given TableId and key count.
+     * @return empty denylist if we do not or cannot find the data, preserving the old value, otherwise the new value
+     */
+    private DenylistEntry getDenylistForTableFromCQL(final TableId tid, int limit)
+    {
+        final TableMetadata tmd = Schema.instance.getTableMetadata(tid);
+        if (tmd == null)
+            return null;
+
+        // We attempt to query just over our allowable max keys in order to check whether we have configured data beyond that limit and alert the user if so
+        final String readDenylist = String.format("SELECT * FROM %s.%s WHERE ks_name='%s' AND table_name='%s' LIMIT %d",
+                                                  SchemaConstants.DISTRIBUTED_KEYSPACE_NAME,
+                                                  SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE,
+                                                  tmd.keyspace,
+                                                  tmd.name,
+                                                  limit + 1);
+
+        try
+        {
+            final UntypedResultSet results = process(readDenylist, DatabaseDescriptor.getDenylistConsistencyLevel());
+
+            // If there's no data in CQL we want to return an empty DenylistEntry so we don't continue using the old value in the cache
+            if (results == null || results.isEmpty())
+                return new DenylistEntry();
+
+            if (results.size() > limit)
+            {
+                // If our limit is < the standard per table we know we're at a global violation because we've constrained that request limit already.
+                boolean globalLimit = limit != DatabaseDescriptor.getDenylistMaxKeysPerTable();
+                String violationType = globalLimit ? "global" : "per-table";
+                int errorLimit = globalLimit ? DatabaseDescriptor.getDenylistMaxKeysTotal() : limit;
+                logger.error("Partition denylist for {}/{} has exceeded the {} allowance of ({}). Remaining keys were ignored; " +
+                             "please reduce the total number of keys denied or increase the denylist_max_keys_per_table param in " +
+                             "cassandra.yaml to avoid inconsistency in denied partitions across nodes.",
+                             tmd.keyspace,
+                             tmd.name,
+                             violationType,
+                             errorLimit);
+            }
+
+            final Set<ByteBuffer> keys = new HashSet<>();
+            final NavigableSet<Token> tokens = new TreeSet<>();
+
+            int processed = 0;
+            for (final UntypedResultSet.Row row : results)
+            {
+                final ByteBuffer key = row.getBlob("key");
+                keys.add(key);
+                tokens.add(StorageService.instance.getTokenMetadata().partitioner.getToken(key));
+
+                processed++;
+                if (processed >= limit)
+                    break;
+            }
+            return new DenylistEntry(ImmutableSet.copyOf(keys), ImmutableSortedSet.copyOf(tokens));
+        }
+        catch (final RequestExecutionException e)
+        {
+            logger.error("Error reading partition_denylist table for {}/{}. Returning empty denylist.", tmd.keyspace, tmd.name, e);
+            return new DenylistEntry();
+        }
+    }
+
+    /**
+     * This method relies on {@link #getDenylistForTableFromCQL(TableId, int)} to pull a limited amount of keys
+     * on a per-table basis from CQL to load into the cache. We need to navigate both respecting the max cache size limit
+     * as well as respecting the per-table limit.
+     * @return non-null mapping of TableId to DenylistEntry
+     */
+    private Map<TableId, DenylistEntry> getDenylistForAllTablesFromCQL()
+    {
+        // While we warn the user in this case, we continue with the reload anyway.
+        checkDenylistNodeAvailability();
+
+        final String allDeniedTables = String.format("SELECT DISTINCT ks_name, table_name FROM %s.%s",
+                                                     SchemaConstants.DISTRIBUTED_KEYSPACE_NAME,
+                                                     SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE);
+        try
+        {
+            final UntypedResultSet deniedTableResults = process(allDeniedTables, DatabaseDescriptor.getDenylistConsistencyLevel());
+            if (deniedTableResults == null || deniedTableResults.isEmpty())
+                return Collections.emptyMap();
+
+            int totalProcessed = 0 ;
+            final Map<TableId, DenylistEntry> results = new HashMap<>();
+            for (final UntypedResultSet.Row row : deniedTableResults)
+            {
+                final String ks = row.getString("ks_name");
+                final String table = row.getString("table_name");
+                final TableId tid = getTableId(ks, table);
+                if (DatabaseDescriptor.getDenylistMaxKeysTotal() - totalProcessed <= 0)
+                {
+                    logger.error("Hit limit on allowable denylisted keys in total. Processed {} total entries. Not adding all entries to denylist for {}/{}." +
+                                 " Remove denylist entries in system_distributed.{} or increase your denylist_max_keys_total param in cassandra.yaml.",
+                                 totalProcessed,
+                                 ks,
+                                 table,
+                                 SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE);
+                    results.put(tid, new DenylistEntry());
+                }
+                else
+                {
+                    // Determine whether we can get up to table max or we need a subset at edge condition of max overflow.
+                    int allowedTableRecords = Math.min(DatabaseDescriptor.getDenylistMaxKeysPerTable(), DatabaseDescriptor.getDenylistMaxKeysTotal() - totalProcessed);
+                    DenylistEntry tableDenylist = getDenylistForTableFromCQL(tid, allowedTableRecords);
+                    if (tableDenylist != null)
+                        totalProcessed += tableDenylist.keys.size();
+                    results.put(tid, tableDenylist);
+                }
+            }
+            return results;
+        }
+        catch (final RequestExecutionException e)
+        {
+            logger.error("Error reading full partition denylist from "
+                         + SchemaConstants.DISTRIBUTED_KEYSPACE_NAME + "." + SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE +
+                         ". Partition Denylisting will be compromised. Exception: " + e);
+            return Collections.emptyMap();
+        }
+    }
+
+    private boolean refreshTableDenylist(String keyspace, String table)
+    {
+        checkDenylistNodeAvailability();
+        final TableId tid = getTableId(keyspace, table);
+        if (tid == null)
+        {
+            logger.warn("Got denylist mutation for unknown ks/cf: {}/{}. Skipping refresh.", keyspace, table);
+            return false;
+        }
+
+        DenylistEntry newEntry = getDenylistForTableFromCQL(tid);
+        denylist.put(tid, newEntry);
+        return true;
+    }
+
+    private TableId getTableId(final String keyspace, final String table)
+    {
+        TableMetadata tmd = Schema.instance.getTableMetadata(keyspace, table);
+        return tmd == null ? null : tmd.id;
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/ReplicationParams.java b/src/java/org/apache/cassandra/schema/ReplicationParams.java
index 048b4ed..2998aa5 100644
--- a/src/java/org/apache/cassandra/schema/ReplicationParams.java
+++ b/src/java/org/apache/cassandra/schema/ReplicationParams.java
@@ -27,6 +27,7 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CqlBuilder;
 import org.apache.cassandra.locator.*;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.StorageService;
 
 public final class ReplicationParams
@@ -70,25 +71,25 @@
         return new ReplicationParams(NetworkTopologyStrategy.class, options);
     }
 
-    public void validate(String name)
+    public void validate(String name, ClientState state)
     {
         // Attempt to instantiate the ARS, which will throw a ConfigurationException if the options aren't valid.
         TokenMetadata tmd = StorageService.instance.getTokenMetadata();
         IEndpointSnitch eps = DatabaseDescriptor.getEndpointSnitch();
-        AbstractReplicationStrategy.validateReplicationStrategy(name, klass, tmd, eps, options);
+        AbstractReplicationStrategy.validateReplicationStrategy(name, klass, tmd, eps, options, state);
     }
 
     public static ReplicationParams fromMap(Map<String, String> map) {
         return fromMapWithDefaults(map, new HashMap<>());
     }
 
-    public static ReplicationParams fromMapWithDefaults(Map<String, String> map, Map<String, String> defaults)
+    public static ReplicationParams fromMapWithDefaults(Map<String, String> map, Map<String, String> previousOptions)
     {
         Map<String, String> options = new HashMap<>(map);
         String className = options.remove(CLASS);
 
         Class<? extends AbstractReplicationStrategy> klass = AbstractReplicationStrategy.getClass(className);
-        AbstractReplicationStrategy.prepareReplicationStrategyOptions(klass, options, defaults);
+        AbstractReplicationStrategy.prepareReplicationStrategyOptions(klass, options, previousOptions);
 
         return new ReplicationParams(klass, options);
     }
diff --git a/src/java/org/apache/cassandra/schema/Schema.java b/src/java/org/apache/cassandra/schema/Schema.java
index 20687ae..0dba167 100644
--- a/src/java/org/apache/cassandra/schema/Schema.java
+++ b/src/java/org/apache/cassandra/schema/Schema.java
@@ -17,122 +17,134 @@
  */
 package org.apache.cassandra.schema;
 
-import java.net.UnknownHostException;
+import java.time.Duration;
 import java.util.*;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.stream.Collectors;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
 
-import com.google.common.collect.ImmutableList;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.MapDifference;
-import com.google.common.collect.Sets;
+import org.apache.commons.lang3.ObjectUtils;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.cql3.functions.*;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.LocalStrategy;
 import org.apache.cassandra.schema.KeyspaceMetadata.KeyspaceDiff;
 import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
 import org.apache.cassandra.service.PendingRangeCalculatorService;
 import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.Pair;
-import org.cliffc.high_scale_lib.NonBlockingHashMap;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.Awaitable;
+import org.apache.cassandra.utils.concurrent.LoadingMap;
 
-import static java.lang.String.format;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import static com.google.common.collect.Iterables.size;
+import static java.lang.String.format;
+import static org.apache.cassandra.config.DatabaseDescriptor.isDaemonInitialized;
+import static org.apache.cassandra.config.DatabaseDescriptor.isToolInitialized;
 
-public final class Schema implements SchemaProvider
+/**
+ * Manages shared schema, keyspace instances and table metadata refs. Provides methods to initialize, modify and query
+ * both the shared and local schema, as well as to register listeners.
+ * <p>
+ * This class should be the only entity used to query and manage schema. Internal details should not be access in
+ * production code (would be great if they were not accessed in the test code as well).
+ * <p>
+ * TL;DR: All modifications are made using the implementation of {@link SchemaUpdateHandler} obtained from the provided
+ * factory. After each modification, the internally managed table metadata refs and keyspaces instances are updated and
+ * notifications are sent to the registered listeners.
+ * When the schema change is applied by the update handler (regardless it is initiated locally or received from outside),
+ * the registered callback is executed which performs the remaining updates for tables metadata refs and keyspace
+ * instances (see {@link #mergeAndUpdateVersion(SchemaTransformationResult, boolean)}).
+ */
+public class Schema implements SchemaProvider
 {
+    private static final Logger logger = LoggerFactory.getLogger(Schema.class);
+
     public static final Schema instance = new Schema();
 
-    private volatile Keyspaces keyspaces = Keyspaces.none();
+    private volatile Keyspaces distributedKeyspaces = Keyspaces.none();
+    private volatile Keyspaces distributedAndLocalKeyspaces;
 
-    // UUID -> mutable metadata ref map. We have to update these in place every time a table changes.
-    private final Map<TableId, TableMetadataRef> metadataRefs = new NonBlockingHashMap<>();
+    private final Keyspaces localKeyspaces;
 
-    // (keyspace name, index name) -> mutable metadata ref map. We have to update these in place every time an index changes.
-    private final Map<Pair<String, String>, TableMetadataRef> indexMetadataRefs = new NonBlockingHashMap<>();
+    private volatile TableMetadataRefCache tableMetadataRefCache = TableMetadataRefCache.EMPTY;
 
     // Keyspace objects, one per keyspace. Only one instance should ever exist for any given keyspace.
-    private final Map<String, Keyspace> keyspaceInstances = new NonBlockingHashMap<>();
+    // We operate on loading map because we need to achieve atomic initialization with at-most-once semantics for
+    // loadFunction. Although it seems that this is a valid case for using ConcurrentHashMap.computeIfAbsent,
+    // we should not use it because we have no knowledge about the loadFunction and in fact that load function may
+    // do some nested calls to maybeAddKeyspaceInstance, also using different threads, and in a blocking manner.
+    // This may lead to a deadlock. The documentation of ConcurrentHashMap says that manipulating other keys inside
+    // the lambda passed to the computeIfAbsent method is prohibited.
+    private final LoadingMap<String, Keyspace> keyspaceInstances = new LoadingMap<>();
 
-    private volatile UUID version;
+    private volatile UUID version = SchemaConstants.emptyVersion;
 
-    private final List<SchemaChangeListener> changeListeners = new CopyOnWriteArrayList<>();
+    private final SchemaChangeNotifier schemaChangeNotifier = new SchemaChangeNotifier();
+
+    public final SchemaUpdateHandler updateHandler;
+
+    private final boolean online;
 
     /**
      * Initialize empty schema object and load the hardcoded system tables
      */
     private Schema()
     {
-        if (DatabaseDescriptor.isDaemonInitialized() || DatabaseDescriptor.isToolInitialized())
-        {
-            load(SchemaKeyspace.metadata());
-            load(SystemKeyspace.metadata());
-        }
+        this.online = isDaemonInitialized();
+        this.localKeyspaces = (CassandraRelevantProperties.FORCE_LOAD_LOCAL_KEYSPACES.getBoolean() || isDaemonInitialized() || isToolInitialized())
+                              ? Keyspaces.of(SchemaKeyspace.metadata(), SystemKeyspace.metadata())
+                              : Keyspaces.none();
+        this.distributedAndLocalKeyspaces = this.localKeyspaces;
+
+        this.localKeyspaces.forEach(this::loadNew);
+        this.updateHandler = SchemaUpdateHandlerFactoryProvider.instance.get().getSchemaUpdateHandler(online, this::mergeAndUpdateVersion);
+    }
+
+    @VisibleForTesting
+    public Schema(boolean online, Keyspaces localKeyspaces, SchemaUpdateHandler updateHandler)
+    {
+        this.online = online;
+        this.localKeyspaces = localKeyspaces;
+        this.distributedAndLocalKeyspaces = this.localKeyspaces;
+        this.updateHandler = updateHandler;
+    }
+
+    public void startSync()
+    {
+        logger.debug("Starting update handler");
+        updateHandler.start();
+    }
+
+    public boolean waitUntilReady(Duration timeout)
+    {
+        logger.debug("Waiting for update handler to be ready...");
+        return updateHandler.waitUntilReady(timeout);
     }
 
     /**
-     * Add entries to system_schema.* for the hardcoded system keyspaces
-     * 
-     * See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes
-     */
-    public synchronized void saveSystemKeyspace()
-    {
-        SchemaKeyspace.saveSystemKeyspacesSchema();
-    }
-
-    /**
-     * See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes
-     */
-    public synchronized void truncateSchemaKeyspace()
-    {
-        SchemaKeyspace.truncate();
-    }
-
-    /**
-     * See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes
-     */
-    public synchronized Collection<Mutation> schemaKeyspaceAsMutations()
-    {
-        return SchemaKeyspace.convertSchemaToMutations();
-    }
-
-    public static KeyspaceMetadata getSystemKeyspaceMetadata()
-    {
-        return SchemaKeyspace.metadata();
-    }
-
-    /**
-     * load keyspace (keyspace) definitions, but do not initialize the keyspace instances.
-     * Schema version may be updated as the result.
+     * Load keyspaces definitions from local storage, see {@link SchemaUpdateHandler#reset(boolean)}.
      */
     public void loadFromDisk()
     {
-        loadFromDisk(true);
-    }
-
-    /**
-     * Load schema definitions from disk.
-     *
-     * @param updateVersion true if schema version needs to be updated
-     */
-    public void loadFromDisk(boolean updateVersion)
-    {
-        SchemaDiagnostics.schemataLoading(this);
-        SchemaKeyspace.fetchNonSystemKeyspaces().forEach(this::load);
-        if (updateVersion)
-            updateVersion();
-        SchemaDiagnostics.schemataLoaded(this);
+        SchemaDiagnostics.schemaLoading(this);
+        updateHandler.reset(true);
+        SchemaDiagnostics.schemaLoaded(this);
     }
 
     /**
@@ -140,31 +152,28 @@
      *
      * @param ksm The metadata about keyspace
      */
-    synchronized public void load(KeyspaceMetadata ksm)
+    private synchronized void load(KeyspaceMetadata ksm)
     {
-        KeyspaceMetadata previous = keyspaces.getNullable(ksm.name);
+        Preconditions.checkArgument(!SchemaConstants.isLocalSystemKeyspace(ksm.name));
+        KeyspaceMetadata previous = distributedKeyspaces.getNullable(ksm.name);
 
         if (previous == null)
             loadNew(ksm);
         else
             reload(previous, ksm);
 
-        keyspaces = keyspaces.withAddedOrUpdated(ksm);
+        distributedKeyspaces = distributedKeyspaces.withAddedOrUpdated(ksm);
+        distributedAndLocalKeyspaces = distributedAndLocalKeyspaces.withAddedOrUpdated(ksm);
     }
 
-    private void loadNew(KeyspaceMetadata ksm)
+    private synchronized void loadNew(KeyspaceMetadata ksm)
     {
-        ksm.tablesAndViews()
-           .forEach(metadata -> metadataRefs.put(metadata.id, new TableMetadataRef(metadata)));
-
-        ksm.tables
-           .indexTables()
-           .forEach((name, metadata) -> indexMetadataRefs.put(Pair.create(ksm.name, name), new TableMetadataRef(metadata)));
+        this.tableMetadataRefCache = tableMetadataRefCache.withNewRefs(ksm);
 
         SchemaDiagnostics.metadataInitialized(this, ksm);
     }
 
-    private void reload(KeyspaceMetadata previous, KeyspaceMetadata updated)
+    private synchronized void reload(KeyspaceMetadata previous, KeyspaceMetadata updated)
     {
         Keyspace keyspace = getKeyspaceInstance(updated.name);
         if (null != keyspace)
@@ -175,44 +184,20 @@
 
         MapDifference<String, TableMetadata> indexesDiff = previous.tables.indexesDiff(updated.tables);
 
-        // clean up after removed entries
-        tablesDiff.dropped.forEach(table -> metadataRefs.remove(table.id));
-        viewsDiff.dropped.forEach(view -> metadataRefs.remove(view.metadata.id));
-
-        indexesDiff.entriesOnlyOnLeft()
-                   .values()
-                   .forEach(indexTable -> indexMetadataRefs.remove(Pair.create(indexTable.keyspace, indexTable.indexName().get())));
-
-        // load up new entries
-        tablesDiff.created.forEach(table -> metadataRefs.put(table.id, new TableMetadataRef(table)));
-        viewsDiff.created.forEach(view -> metadataRefs.put(view.metadata.id, new TableMetadataRef(view.metadata)));
-
-        indexesDiff.entriesOnlyOnRight()
-                   .values()
-                   .forEach(indexTable -> indexMetadataRefs.put(Pair.create(indexTable.keyspace, indexTable.indexName().get()), new TableMetadataRef(indexTable)));
-
-        // refresh refs to updated ones
-        tablesDiff.altered.forEach(diff -> metadataRefs.get(diff.after.id).set(diff.after));
-        viewsDiff.altered.forEach(diff -> metadataRefs.get(diff.after.metadata.id).set(diff.after.metadata));
-
-        indexesDiff.entriesDiffering()
-                   .values()
-                   .stream()
-                   .map(MapDifference.ValueDifference::rightValue)
-                   .forEach(indexTable -> indexMetadataRefs.get(Pair.create(indexTable.keyspace, indexTable.indexName().get())).set(indexTable));
+        this.tableMetadataRefCache = tableMetadataRefCache.withUpdatedRefs(previous, updated);
 
         SchemaDiagnostics.metadataReloaded(this, previous, updated, tablesDiff, viewsDiff, indexesDiff);
     }
 
     public void registerListener(SchemaChangeListener listener)
     {
-        changeListeners.add(listener);
+        schemaChangeNotifier.registerListener(listener);
     }
 
     @SuppressWarnings("unused")
     public void unregisterListener(SchemaChangeListener listener)
     {
-        changeListeners.remove(listener);
+        schemaChangeNotifier.unregisterListener(listener);
     }
 
     /**
@@ -225,7 +210,7 @@
     @Override
     public Keyspace getKeyspaceInstance(String keyspaceName)
     {
-        return keyspaceInstances.get(keyspaceName);
+        return keyspaceInstances.getIfReady(keyspaceName);
     }
 
     public ColumnFamilyStore getColumnFamilyStoreInstance(TableId id)
@@ -239,39 +224,49 @@
             return null;
 
         return instance.hasColumnFamilyStore(metadata.id)
-             ? instance.getColumnFamilyStore(metadata.id)
-             : null;
+               ? instance.getColumnFamilyStore(metadata.id)
+               : null;
     }
 
-    /**
-     * Store given Keyspace instance to the schema
-     *
-     * @param keyspace The Keyspace instance to store
-     *
-     * @throws IllegalArgumentException if Keyspace is already stored
-     */
     @Override
-    public void storeKeyspaceInstance(Keyspace keyspace)
+    public Keyspace maybeAddKeyspaceInstance(String keyspaceName, Supplier<Keyspace> loadFunction)
     {
-        if (keyspaceInstances.putIfAbsent(keyspace.getName(), keyspace) != null)
-            throw new IllegalArgumentException(String.format("Keyspace %s was already initialized.", keyspace.getName()));
+        return keyspaceInstances.blockingLoadIfAbsent(keyspaceName, loadFunction);
+    }
+
+    private Keyspace maybeRemoveKeyspaceInstance(String keyspaceName, Consumer<Keyspace> unloadFunction)
+    {
+        try
+        {
+            return keyspaceInstances.blockingUnloadIfPresent(keyspaceName, unloadFunction);
+        }
+        catch (LoadingMap.UnloadExecutionException e)
+        {
+            throw new AssertionError("Failed to unload the keyspace " + keyspaceName, e);
+        }
+    }
+
+    public Keyspaces distributedAndLocalKeyspaces()
+    {
+        return distributedAndLocalKeyspaces;
+    }
+
+    public Keyspaces distributedKeyspaces()
+    {
+        return distributedKeyspaces;
     }
 
     /**
-     * Remove keyspace from schema
-     *
-     * @param keyspaceName The name of the keyspace to remove
-     *
-     * @return removed keyspace instance or null if it wasn't found
+     * Compute the largest gc grace seconds amongst all the tables
+     * @return the largest gcgs.
      */
-    public Keyspace removeKeyspaceInstance(String keyspaceName)
+    public int largestGcgs()
     {
-        return keyspaceInstances.remove(keyspaceName);
-    }
-
-    public Keyspaces snapshot()
-    {
-        return keyspaces;
+        return distributedAndLocalKeyspaces().stream()
+                                             .flatMap(ksm -> ksm.tables.stream())
+                                             .mapToInt(tm -> tm.params.gcGraceSeconds)
+                                             .max()
+                                             .orElse(Integer.MIN_VALUE);
     }
 
     /**
@@ -279,30 +274,25 @@
      *
      * @param ksm The keyspace definition to remove
      */
-    synchronized void unload(KeyspaceMetadata ksm)
+    private synchronized void unload(KeyspaceMetadata ksm)
     {
-        keyspaces = keyspaces.without(ksm.name);
+        distributedKeyspaces = distributedKeyspaces.without(ksm.name);
+        distributedAndLocalKeyspaces = distributedAndLocalKeyspaces.without(ksm.name);
 
-        ksm.tablesAndViews()
-           .forEach(t -> metadataRefs.remove(t.id));
-
-        ksm.tables
-           .indexTables()
-           .keySet()
-           .forEach(name -> indexMetadataRefs.remove(Pair.create(ksm.name, name)));
+        this.tableMetadataRefCache = tableMetadataRefCache.withRemovedRefs(ksm);
 
         SchemaDiagnostics.metadataRemoved(this, ksm);
     }
 
     public int getNumberOfTables()
     {
-        return keyspaces.stream().mapToInt(k -> size(k.tablesAndViews())).sum();
+        return distributedAndLocalKeyspaces().stream().mapToInt(k -> size(k.tablesAndViews())).sum();
     }
 
     public ViewMetadata getView(String keyspaceName, String viewName)
     {
         assert keyspaceName != null;
-        KeyspaceMetadata ksm = keyspaces.getNullable(keyspaceName);
+        KeyspaceMetadata ksm = distributedAndLocalKeyspaces().getNullable(keyspaceName);
         return (ksm == null) ? null : ksm.views.getNullable(viewName);
     }
 
@@ -317,65 +307,64 @@
     public KeyspaceMetadata getKeyspaceMetadata(String keyspaceName)
     {
         assert keyspaceName != null;
-        KeyspaceMetadata keyspace = keyspaces.getNullable(keyspaceName);
+        KeyspaceMetadata keyspace = distributedAndLocalKeyspaces().getNullable(keyspaceName);
         return null != keyspace ? keyspace : VirtualKeyspaceRegistry.instance.getKeyspaceMetadataNullable(keyspaceName);
     }
 
-    private Set<String> getNonSystemKeyspacesSet()
+    /**
+     * Returns all non-local keyspaces, that is, all but {@link SchemaConstants#LOCAL_SYSTEM_KEYSPACE_NAMES}
+     * or virtual keyspaces.
+     * @deprecated use {@link #distributedKeyspaces()}
+     */
+    @Deprecated
+    public Keyspaces getNonSystemKeyspaces()
     {
-        return Sets.difference(keyspaces.names(), SchemaConstants.LOCAL_SYSTEM_KEYSPACE_NAMES);
+        return distributedKeyspaces;
     }
 
     /**
-     * @return collection of the non-system keyspaces (note that this count as system only the
-     * non replicated keyspaces, so keyspace like system_traces which are replicated are actually
-     * returned. See getUserKeyspace() below if you don't want those)
+     * Returns all non-local keyspaces whose replication strategy is not {@link LocalStrategy}.
      */
-    public ImmutableList<String> getNonSystemKeyspaces()
+    public Keyspaces getNonLocalStrategyKeyspaces()
     {
-        return ImmutableList.copyOf(getNonSystemKeyspacesSet());
+        return distributedKeyspaces.filter(keyspace -> keyspace.params.replication.klass != LocalStrategy.class);
     }
 
     /**
-     * @return a collection of keyspaces that do not use LocalStrategy for replication
+     * Returns user keyspaces, that is all but {@link SchemaConstants#LOCAL_SYSTEM_KEYSPACE_NAMES},
+     * {@link SchemaConstants#REPLICATED_SYSTEM_KEYSPACE_NAMES} or virtual keyspaces.
      */
-    public List<String> getNonLocalStrategyKeyspaces()
+    public Keyspaces getUserKeyspaces()
     {
-        return keyspaces.stream()
-                        .filter(keyspace -> keyspace.params.replication.klass != LocalStrategy.class)
-                        .map(keyspace -> keyspace.name)
-                        .collect(Collectors.toList());
-    }
-
-    /**
-     * @return collection of the user defined keyspaces
-     */
-    public List<String> getUserKeyspaces()
-    {
-        return ImmutableList.copyOf(Sets.difference(getNonSystemKeyspacesSet(), SchemaConstants.REPLICATED_SYSTEM_KEYSPACE_NAMES));
+        return distributedKeyspaces.without(SchemaConstants.REPLICATED_SYSTEM_KEYSPACE_NAMES);
     }
 
     /**
      * Get metadata about keyspace inner ColumnFamilies
      *
      * @param keyspaceName The name of the keyspace
-     *
      * @return metadata about ColumnFamilies the belong to the given keyspace
      */
     public Iterable<TableMetadata> getTablesAndViews(String keyspaceName)
     {
-        assert keyspaceName != null;
-        KeyspaceMetadata ksm = keyspaces.getNullable(keyspaceName);
-        assert ksm != null;
+        Preconditions.checkNotNull(keyspaceName);
+        KeyspaceMetadata ksm = ObjectUtils.getFirstNonNull(() -> distributedKeyspaces.getNullable(keyspaceName),
+                                                           () -> localKeyspaces.getNullable(keyspaceName));
+        Preconditions.checkNotNull(ksm, "Keyspace %s not found", keyspaceName);
         return ksm.tablesAndViews();
     }
 
     /**
      * @return collection of the all keyspace names registered in the system (system and non-system)
      */
-    public Set<String> getKeyspaces()
+    public ImmutableSet<String> getKeyspaces()
     {
-        return keyspaces.names();
+        return distributedAndLocalKeyspaces().names();
+    }
+
+    public Keyspaces getLocalKeyspaces()
+    {
+        return localKeyspaces;
     }
 
     /* TableMetadata/Ref query/control methods */
@@ -390,33 +379,24 @@
     @Override
     public TableMetadataRef getTableMetadataRef(String keyspace, String table)
     {
-        TableMetadata tm = getTableMetadata(keyspace, table);
-        return tm == null
-             ? null
-             : metadataRefs.get(tm.id);
+        return tableMetadataRefCache.getTableMetadataRef(keyspace, table);
     }
 
     public TableMetadataRef getIndexTableMetadataRef(String keyspace, String index)
     {
-        return indexMetadataRefs.get(Pair.create(keyspace, index));
-    }
-
-    Map<Pair<String, String>, TableMetadataRef> getIndexTableMetadataRefs()
-    {
-        return indexMetadataRefs;
+        return tableMetadataRefCache.getIndexTableMetadataRef(keyspace, index);
     }
 
     /**
      * Get Table metadata by its identifier
      *
      * @param id table or view identifier
-     *
      * @return metadata about Table or View
      */
     @Override
     public TableMetadataRef getTableMetadataRef(TableId id)
     {
-        return metadataRefs.get(id);
+        return tableMetadataRefCache.getTableMetadataRef(id);
     }
 
     @Override
@@ -425,19 +405,13 @@
         return getTableMetadataRef(descriptor.ksname, descriptor.cfname);
     }
 
-    Map<TableId, TableMetadataRef> getTableMetadataRefs()
-    {
-        return metadataRefs;
-    }
-
     /**
      * Given a keyspace name and table name, get the table
      * meta data. If the keyspace name or table name is not valid
      * this function returns null.
      *
      * @param keyspace The keyspace name
-     * @param table The table name
-     *
+     * @param table    The table name
      * @return TableMetadata object or null if it wasn't found
      */
     public TableMetadata getTableMetadata(String keyspace, String table)
@@ -447,15 +421,16 @@
 
         KeyspaceMetadata ksm = getKeyspaceMetadata(keyspace);
         return ksm == null
-             ? null
-             : ksm.getTableOrViewNullable(table);
+               ? null
+               : ksm.getTableOrViewNullable(table);
     }
 
     @Override
     public TableMetadata getTableMetadata(TableId id)
     {
-        TableMetadata table = keyspaces.getTableOrViewNullable(id);
-        return null != table ? table : VirtualKeyspaceRegistry.instance.getTableMetadataNullable(id);
+        return ObjectUtils.getFirstNonNull(() -> distributedKeyspaces.getTableOrViewNullable(id),
+                                           () -> localKeyspaces.getTableOrViewNullable(id),
+                                           () -> VirtualKeyspaceRegistry.instance.getTableMetadataNullable(id));
     }
 
     public TableMetadata validateTable(String keyspaceName, String tableName)
@@ -495,14 +470,14 @@
 
         KeyspaceMetadata ksm = getKeyspaceMetadata(name.keyspace);
         return ksm == null
-             ? Collections.emptyList()
-             : ksm.functions.get(name);
+               ? Collections.emptyList()
+               : ksm.functions.get(name);
     }
 
     /**
      * Find the function with the specified name
      *
-     * @param name fully qualified function name
+     * @param name     fully qualified function name
      * @param argTypes function argument types
      * @return an empty {@link Optional} if the keyspace or the function name are not found;
      *         a non-empty optional of {@link Function} otherwise
@@ -514,8 +489,8 @@
 
         KeyspaceMetadata ksm = getKeyspaceMetadata(name.keyspace);
         return ksm == null
-             ? Optional.empty()
-             : ksm.functions.find(name, argTypes);
+               ? Optional.empty()
+               : ksm.functions.find(name, argTypes);
     }
 
     /* Version control */
@@ -547,198 +522,158 @@
     /**
      * Read schema from system keyspace and calculate MD5 digest of every row, resulting digest
      * will be converted into UUID which would act as content-based version of the schema.
-     * 
+     *
      * See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes
      */
-    public synchronized void updateVersion()
+    private synchronized void updateVersion(UUID version)
     {
-        version = SchemaKeyspace.calculateSchemaDigest();
-        SystemKeyspace.updateSchemaVersion(version);
+        this.version = version;
         SchemaDiagnostics.versionUpdated(this);
     }
 
-    /*
-     * Like updateVersion, but also announces via gossip
-     */
-    public void updateVersionAndAnnounce()
-    {
-        updateVersion();
-        passiveAnnounceVersion();
-    }
-
     /**
-     * Announce my version passively over gossip.
-     * Used to notify nodes as they arrive in the cluster.
+     * When we receive {@link SchemaTransformationResult} in a callback invocation, the transformation result includes
+     * pre-transformation and post-transformation schema metadata and versions, and a diff between them. Basically
+     * we expect that the local image of the schema metadata ({@link #distributedKeyspaces}) and version ({@link #version})
+     * are the same as pre-transformation. However, it might not always be true because some changes might not be
+     * applied completely due to some errors. This methods is to emit warning in such case and recalculate diff so that
+     * it contains the changes between the local schema image ({@link #distributedKeyspaces} and the post-transformation
+     * schema. That recalculation allows the following updates in the callback to recover the schema.
+     *
+     * @param result the incoming transformation result
+     * @return recalculated transformation result if needed, otherwise the provided incoming result
      */
-    private void passiveAnnounceVersion()
+    private synchronized SchemaTransformationResult localDiff(SchemaTransformationResult result)
     {
-        Gossiper.instance.addLocalApplicationState(ApplicationState.SCHEMA, StorageService.instance.valueFactory.schema(version));
-        SchemaDiagnostics.versionAnnounced(this);
-    }
+        Keyspaces localBefore = distributedKeyspaces;
+        UUID localVersion = version;
+        boolean needNewDiff = false;
 
-    /**
-     * Clear all KS/CF metadata and reset version.
-     */
-    public synchronized void clear()
-    {
-        getNonSystemKeyspaces().forEach(k -> unload(getKeyspaceMetadata(k)));
-        updateVersionAndAnnounce();
-        SchemaDiagnostics.schemataCleared(this);
+        if (!Objects.equals(localBefore, result.before.getKeyspaces()))
+        {
+            logger.info("Schema was different to what we expected: {}", Keyspaces.diff(result.before.getKeyspaces(), localBefore));
+            needNewDiff = true;
+        }
+
+        if (!Objects.equals(localVersion, result.before.getVersion()))
+        {
+            logger.info("Schema version was different to what we expected: {} != {}", result.before.getVersion(), localVersion);
+            needNewDiff = true;
+        }
+
+        if (needNewDiff)
+            return new SchemaTransformationResult(new DistributedSchema(localBefore, localVersion),
+                                                  result.after,
+                                                  Keyspaces.diff(localBefore, result.after.getKeyspaces()));
+
+        return result;
     }
 
     /*
      * Reload schema from local disk. Useful if a user made changes to schema tables by hand, or has suspicion that
      * in-memory representation got out of sync somehow with what's on disk.
      */
-    public synchronized void reloadSchemaAndAnnounceVersion()
+    public void reloadSchemaAndAnnounceVersion()
     {
-        Keyspaces before = keyspaces.filter(k -> !SchemaConstants.isLocalSystemKeyspace(k.name));
-        Keyspaces after = SchemaKeyspace.fetchNonSystemKeyspaces();
-        merge(Keyspaces.diff(before, after));
-        updateVersionAndAnnounce();
+        updateHandler.reset(true);
     }
 
     /**
      * Merge remote schema in form of mutations with local and mutate ks/cf metadata objects
      * (which also involves fs operations on add/drop ks/cf)
      *
-     * @param mutations the schema changes to apply
-     *
      * @throws ConfigurationException If one of metadata attributes has invalid value
      */
-    public synchronized void mergeAndAnnounceVersion(Collection<Mutation> mutations)
+    @VisibleForTesting
+    public synchronized void mergeAndUpdateVersion(SchemaTransformationResult result, boolean dropData)
     {
-        merge(mutations);
-        updateVersionAndAnnounce();
+        result = localDiff(result);
+        schemaChangeNotifier.notifyPreChanges(result);
+        merge(result.diff, dropData);
+        updateVersion(result.after.getVersion());
+        if (online)
+            SystemKeyspace.updateSchemaVersion(result.after.getVersion());
+    }
+
+    public SchemaTransformationResult transform(SchemaTransformation transformation)
+    {
+        return transform(transformation, false);
+    }
+
+    public SchemaTransformationResult transform(SchemaTransformation transformation, boolean local)
+    {
+        return updateHandler.apply(transformation, local);
     }
 
     /**
-     * See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes
+     * Clear all locally stored schema information and fetch schema from another node.
+     * Called by user (via JMX) who wants to get rid of schema disagreement.
      */
-    public synchronized TransformationResult transform(SchemaTransformation transformation, boolean locally, long now) throws UnknownHostException
+    public void resetLocalSchema()
     {
-        KeyspacesDiff diff;
+        logger.debug("Clearing local schema...");
+
+        if (Gossiper.instance.getLiveMembers().stream().allMatch(ep -> FBUtilities.getBroadcastAddressAndPort().equals(ep)))
+            throw new InvalidRequestException("Cannot reset local schema when there are no other live nodes");
+
+        Awaitable clearCompletion = updateHandler.clear();
         try
         {
-            Keyspaces before = keyspaces;
-            Keyspaces after = transformation.apply(before);
-            diff = Keyspaces.diff(before, after);
+            if (!clearCompletion.await(StorageService.SCHEMA_DELAY_MILLIS, TimeUnit.MILLISECONDS))
+            {
+                throw new RuntimeException("Schema reset failed - no schema received from other nodes");
+            }
         }
-        catch (RuntimeException e)
+        catch (InterruptedException e)
         {
-            return new TransformationResult(e);
+            Thread.currentThread().interrupt();
+            throw new RuntimeException("Failed to reset schema - the thread has been interrupted");
         }
-
-        if (diff.isEmpty())
-            return new TransformationResult(diff, Collections.emptyList());
-
-        Collection<Mutation> mutations = SchemaKeyspace.convertSchemaDiffToMutations(diff, now);
-        SchemaKeyspace.applyChanges(mutations);
-
-        merge(diff);
-        updateVersion();
-        if (!locally)
-            passiveAnnounceVersion();
-
-        return new TransformationResult(diff, mutations);
+        SchemaDiagnostics.schemaCleared(this);
+        logger.info("Local schema reset completed");
     }
 
-    public static final class TransformationResult
+    private void merge(KeyspacesDiff diff, boolean removeData)
     {
-        public final boolean success;
-        public final RuntimeException exception;
-        public final KeyspacesDiff diff;
-        public final Collection<Mutation> mutations;
-
-        private TransformationResult(boolean success, RuntimeException exception, KeyspacesDiff diff, Collection<Mutation> mutations)
-        {
-            this.success = success;
-            this.exception = exception;
-            this.diff = diff;
-            this.mutations = mutations;
-        }
-
-        TransformationResult(RuntimeException exception)
-        {
-            this(false, exception, null, null);
-        }
-
-        TransformationResult(KeyspacesDiff diff, Collection<Mutation> mutations)
-        {
-            this(true, null, diff, mutations);
-        }
-    }
-
-    /**
-     * See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes
-     */
-    synchronized void merge(Collection<Mutation> mutations)
-    {
-        // only compare the keyspaces affected by this set of schema mutations
-        Set<String> affectedKeyspaces = SchemaKeyspace.affectedKeyspaces(mutations);
-
-        // fetch the current state of schema for the affected keyspaces only
-        Keyspaces before = keyspaces.filter(k -> affectedKeyspaces.contains(k.name));
-
-        // apply the schema mutations
-        SchemaKeyspace.applyChanges(mutations);
-
-        // apply the schema mutations and fetch the new versions of the altered keyspaces
-        Keyspaces after = SchemaKeyspace.fetchKeyspaces(affectedKeyspaces);
-
-        merge(Keyspaces.diff(before, after));
-    }
-
-    private void merge(KeyspacesDiff diff)
-    {
-        diff.dropped.forEach(this::dropKeyspace);
+        diff.dropped.forEach(keyspace -> dropKeyspace(keyspace, removeData));
         diff.created.forEach(this::createKeyspace);
-        diff.altered.forEach(this::alterKeyspace);
+        diff.altered.forEach(delta -> alterKeyspace(delta, removeData));
     }
 
-    private void alterKeyspace(KeyspaceDiff delta)
+    private void alterKeyspace(KeyspaceDiff delta, boolean dropData)
     {
         SchemaDiagnostics.keyspaceAltering(this, delta);
 
-        // drop tables and views
-        delta.views.dropped.forEach(this::dropView);
-        delta.tables.dropped.forEach(this::dropTable);
+        boolean initialized = Keyspace.isInitialized();
+
+        Keyspace keyspace = initialized ? getKeyspaceInstance(delta.before.name) : null;
+        if (initialized)
+        {
+            assert keyspace != null;
+            assert delta.before.name.equals(delta.after.name);
+
+            // drop tables and views
+            delta.views.dropped.forEach(v -> dropView(keyspace, v, dropData));
+            delta.tables.dropped.forEach(t -> dropTable(keyspace, t, dropData));
+        }
 
         load(delta.after);
 
-        // add tables and views
-        delta.tables.created.forEach(this::createTable);
-        delta.views.created.forEach(this::createView);
+        if (initialized)
+        {
+            // add tables and views
+            delta.tables.created.forEach(t -> createTable(keyspace, t));
+            delta.views.created.forEach(v -> createView(keyspace, v));
 
-        // update tables and views
-        delta.tables.altered.forEach(diff -> alterTable(diff.after));
-        delta.views.altered.forEach(diff -> alterView(diff.after));
+            // update tables and views
+            delta.tables.altered.forEach(diff -> alterTable(keyspace, diff.after));
+            delta.views.altered.forEach(diff -> alterView(keyspace, diff.after));
 
-        // deal with all added, and altered views
-        Keyspace.open(delta.after.name).viewManager.reload(true);
+            // deal with all added, and altered views
+            Keyspace.open(delta.after.name, this, true).viewManager.reload(true);
+        }
 
-        // notify on everything dropped
-        delta.udas.dropped.forEach(uda -> notifyDropAggregate((UDAggregate) uda));
-        delta.udfs.dropped.forEach(udf -> notifyDropFunction((UDFunction) udf));
-        delta.views.dropped.forEach(this::notifyDropView);
-        delta.tables.dropped.forEach(this::notifyDropTable);
-        delta.types.dropped.forEach(this::notifyDropType);
-
-        // notify on everything created
-        delta.types.created.forEach(this::notifyCreateType);
-        delta.tables.created.forEach(this::notifyCreateTable);
-        delta.views.created.forEach(this::notifyCreateView);
-        delta.udfs.created.forEach(udf -> notifyCreateFunction((UDFunction) udf));
-        delta.udas.created.forEach(uda -> notifyCreateAggregate((UDAggregate) uda));
-
-        // notify on everything altered
-        if (!delta.before.params.equals(delta.after.params))
-            notifyAlterKeyspace(delta.before, delta.after);
-        delta.types.altered.forEach(diff -> notifyAlterType(diff.before, diff.after));
-        delta.tables.altered.forEach(diff -> notifyAlterTable(diff.before, diff.after));
-        delta.views.altered.forEach(diff -> notifyAlterView(diff.before, diff.after));
-        delta.udfs.altered.forEach(diff -> notifyAlterFunction(diff.before, diff.after));
-        delta.udas.altered.forEach(diff -> notifyAlterAggregate(diff.before, diff.after));
+        schemaChangeNotifier.notifyKeyspaceAltered(delta, dropData);
         SchemaDiagnostics.keyspaceAltered(this, delta);
     }
 
@@ -746,192 +681,100 @@
     {
         SchemaDiagnostics.keyspaceCreating(this, keyspace);
         load(keyspace);
-        Keyspace.open(keyspace.name);
+        if (Keyspace.isInitialized())
+        {
+            Keyspace.open(keyspace.name, this, true);
+        }
 
-        notifyCreateKeyspace(keyspace);
-        keyspace.types.forEach(this::notifyCreateType);
-        keyspace.tables.forEach(this::notifyCreateTable);
-        keyspace.views.forEach(this::notifyCreateView);
-        keyspace.functions.udfs().forEach(this::notifyCreateFunction);
-        keyspace.functions.udas().forEach(this::notifyCreateAggregate);
+        schemaChangeNotifier.notifyKeyspaceCreated(keyspace);
         SchemaDiagnostics.keyspaceCreated(this, keyspace);
 
         // If keyspace has been added, we need to recalculate pending ranges to make sure
         // we send mutations to the correct set of bootstrapping nodes. Refer CASSANDRA-15433.
-        if (keyspace.params.replication.klass != LocalStrategy.class)
+        if (keyspace.params.replication.klass != LocalStrategy.class && Keyspace.isInitialized())
         {
-            PendingRangeCalculatorService.calculatePendingRanges(Keyspace.open(keyspace.name).getReplicationStrategy(), keyspace.name);
+            PendingRangeCalculatorService.calculatePendingRanges(Keyspace.open(keyspace.name, this, true).getReplicationStrategy(), keyspace.name);
         }
     }
 
-    private void dropKeyspace(KeyspaceMetadata keyspace)
+    private void dropKeyspace(KeyspaceMetadata keyspaceMetadata, boolean dropData)
     {
-        SchemaDiagnostics.keyspaceDroping(this, keyspace);
-        keyspace.views.forEach(this::dropView);
-        keyspace.tables.forEach(this::dropTable);
+        SchemaDiagnostics.keyspaceDropping(this, keyspaceMetadata);
 
-        // remove the keyspace from the static instances.
-        Keyspace.clear(keyspace.name);
-        unload(keyspace);
-        Keyspace.writeOrder.awaitNewBarrier();
+        boolean initialized = Keyspace.isInitialized();
+        Keyspace keyspace = initialized ? Keyspace.open(keyspaceMetadata.name, this, false) : null;
+        if (initialized)
+        {
+            if (keyspace == null)
+                return;
 
-        keyspace.functions.udas().forEach(this::notifyDropAggregate);
-        keyspace.functions.udfs().forEach(this::notifyDropFunction);
-        keyspace.views.forEach(this::notifyDropView);
-        keyspace.tables.forEach(this::notifyDropTable);
-        keyspace.types.forEach(this::notifyDropType);
-        notifyDropKeyspace(keyspace);
-        SchemaDiagnostics.keyspaceDroped(this, keyspace);
+            keyspaceMetadata.views.forEach(v -> dropView(keyspace, v, dropData));
+            keyspaceMetadata.tables.forEach(t -> dropTable(keyspace, t, dropData));
+
+            // remove the keyspace from the static instances
+            Keyspace unloadedKeyspace = maybeRemoveKeyspaceInstance(keyspaceMetadata.name, ks -> {
+                ks.unload(dropData);
+                unload(keyspaceMetadata);
+            });
+            assert unloadedKeyspace == keyspace;
+
+            Keyspace.writeOrder.awaitNewBarrier();
+        }
+        else
+        {
+            unload(keyspaceMetadata);
+        }
+
+        schemaChangeNotifier.notifyKeyspaceDropped(keyspaceMetadata, dropData);
+        SchemaDiagnostics.keyspaceDropped(this, keyspaceMetadata);
     }
 
-    private void dropView(ViewMetadata metadata)
+    private void dropView(Keyspace keyspace, ViewMetadata metadata, boolean dropData)
     {
-        Keyspace.open(metadata.keyspace()).viewManager.dropView(metadata.name());
-        dropTable(metadata.metadata);
+        keyspace.viewManager.dropView(metadata.name());
+        dropTable(keyspace, metadata.metadata, dropData);
     }
 
-    private void dropTable(TableMetadata metadata)
+    private void dropTable(Keyspace keyspace, TableMetadata metadata, boolean dropData)
     {
         SchemaDiagnostics.tableDropping(this, metadata);
-        ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(metadata.name);
-        assert cfs != null;
-        // make sure all the indexes are dropped, or else.
-        cfs.indexManager.markAllIndexesRemoved();
-        CompactionManager.instance.interruptCompactionFor(Collections.singleton(metadata), (sstable) -> true, true);
-        if (DatabaseDescriptor.isAutoSnapshot())
-            cfs.snapshot(Keyspace.getTimestampedSnapshotNameWithPrefix(cfs.name, ColumnFamilyStore.SNAPSHOT_DROP_PREFIX));
-        CommitLog.instance.forceRecycleAllSegments(Collections.singleton(metadata.id));
-        Keyspace.open(metadata.keyspace).dropCf(metadata.id);
+        keyspace.dropCf(metadata.id, dropData);
         SchemaDiagnostics.tableDropped(this, metadata);
     }
 
-    private void createTable(TableMetadata table)
+    private void createTable(Keyspace keyspace, TableMetadata table)
     {
         SchemaDiagnostics.tableCreating(this, table);
-        Keyspace.open(table.keyspace).initCf(metadataRefs.get(table.id), true);
+        keyspace.initCf(tableMetadataRefCache.getTableMetadataRef(table.id), true);
         SchemaDiagnostics.tableCreated(this, table);
     }
 
-    private void createView(ViewMetadata view)
+    private void createView(Keyspace keyspace, ViewMetadata view)
     {
-        Keyspace.open(view.keyspace()).initCf(metadataRefs.get(view.metadata.id), true);
+        SchemaDiagnostics.tableCreating(this, view.metadata);
+        keyspace.initCf(tableMetadataRefCache.getTableMetadataRef(view.metadata.id), true);
+        SchemaDiagnostics.tableCreated(this, view.metadata);
     }
 
-    private void alterTable(TableMetadata updated)
+    private void alterTable(Keyspace keyspace, TableMetadata updated)
     {
         SchemaDiagnostics.tableAltering(this, updated);
-        Keyspace.open(updated.keyspace).getColumnFamilyStore(updated.name).reload();
+        keyspace.getColumnFamilyStore(updated.name).reload();
         SchemaDiagnostics.tableAltered(this, updated);
     }
 
-    private void alterView(ViewMetadata updated)
+    private void alterView(Keyspace keyspace, ViewMetadata updated)
     {
-        Keyspace.open(updated.keyspace()).getColumnFamilyStore(updated.name()).reload();
+        SchemaDiagnostics.tableAltering(this, updated.metadata);
+        keyspace.getColumnFamilyStore(updated.name()).reload();
+        SchemaDiagnostics.tableAltered(this, updated.metadata);
     }
 
-    private void notifyCreateKeyspace(KeyspaceMetadata ksm)
+    public Map<UUID, Set<InetAddressAndPort>> getOutstandingSchemaVersions()
     {
-        changeListeners.forEach(l -> l.onCreateKeyspace(ksm.name));
+        return updateHandler instanceof DefaultSchemaUpdateHandler
+               ? ((DefaultSchemaUpdateHandler) updateHandler).getOutstandingSchemaVersions()
+               : Collections.emptyMap();
     }
 
-    private void notifyCreateTable(TableMetadata metadata)
-    {
-        changeListeners.forEach(l -> l.onCreateTable(metadata.keyspace, metadata.name));
-    }
-
-    private void notifyCreateView(ViewMetadata view)
-    {
-        changeListeners.forEach(l -> l.onCreateView(view.keyspace(), view.name()));
-    }
-
-    private void notifyCreateType(UserType ut)
-    {
-        changeListeners.forEach(l -> l.onCreateType(ut.keyspace, ut.getNameAsString()));
-    }
-
-    private void notifyCreateFunction(UDFunction udf)
-    {
-        changeListeners.forEach(l -> l.onCreateFunction(udf.name().keyspace, udf.name().name, udf.argTypes()));
-    }
-
-    private void notifyCreateAggregate(UDAggregate udf)
-    {
-        changeListeners.forEach(l -> l.onCreateAggregate(udf.name().keyspace, udf.name().name, udf.argTypes()));
-    }
-
-    private void notifyAlterKeyspace(KeyspaceMetadata before, KeyspaceMetadata after)
-    {
-        changeListeners.forEach(l -> l.onAlterKeyspace(after.name));
-    }
-
-    private void notifyAlterTable(TableMetadata before, TableMetadata after)
-    {
-        boolean changeAffectedPreparedStatements = before.changeAffectsPreparedStatements(after);
-        changeListeners.forEach(l -> l.onAlterTable(after.keyspace, after.name, changeAffectedPreparedStatements));
-    }
-
-    private void notifyAlterView(ViewMetadata before, ViewMetadata after)
-    {
-        boolean changeAffectedPreparedStatements = before.metadata.changeAffectsPreparedStatements(after.metadata);
-        changeListeners.forEach(l ->l.onAlterView(after.keyspace(), after.name(), changeAffectedPreparedStatements));
-    }
-
-    private void notifyAlterType(UserType before, UserType after)
-    {
-        changeListeners.forEach(l -> l.onAlterType(after.keyspace, after.getNameAsString()));
-    }
-
-    private void notifyAlterFunction(UDFunction before, UDFunction after)
-    {
-        changeListeners.forEach(l -> l.onAlterFunction(after.name().keyspace, after.name().name, after.argTypes()));
-    }
-
-    private void notifyAlterAggregate(UDAggregate before, UDAggregate after)
-    {
-        changeListeners.forEach(l -> l.onAlterAggregate(after.name().keyspace, after.name().name, after.argTypes()));
-    }
-
-    private void notifyDropKeyspace(KeyspaceMetadata ksm)
-    {
-        changeListeners.forEach(l -> l.onDropKeyspace(ksm.name));
-    }
-
-    private void notifyDropTable(TableMetadata metadata)
-    {
-        changeListeners.forEach(l -> l.onDropTable(metadata.keyspace, metadata.name));
-    }
-
-    private void notifyDropView(ViewMetadata view)
-    {
-        changeListeners.forEach(l -> l.onDropView(view.keyspace(), view.name()));
-    }
-
-    private void notifyDropType(UserType ut)
-    {
-        changeListeners.forEach(l -> l.onDropType(ut.keyspace, ut.getNameAsString()));
-    }
-
-    private void notifyDropFunction(UDFunction udf)
-    {
-        changeListeners.forEach(l -> l.onDropFunction(udf.name().keyspace, udf.name().name, udf.argTypes()));
-    }
-
-    private void notifyDropAggregate(UDAggregate udf)
-    {
-        changeListeners.forEach(l -> l.onDropAggregate(udf.name().keyspace, udf.name().name, udf.argTypes()));
-    }
-
-
-    /**
-     * Converts the given schema version to a string. Returns {@code unknown}, if {@code version} is {@code null}
-     * or {@code "(empty)"}, if {@code version} refers to an {@link SchemaConstants#emptyVersion empty) schema.
-     */
-    public static String schemaVersionToString(UUID version)
-    {
-        return version == null
-               ? "unknown"
-               : SchemaConstants.emptyVersion.equals(version)
-                 ? "(empty)"
-                 : version.toString();
-    }
 }
diff --git a/src/java/org/apache/cassandra/schema/SchemaAnnouncementEvent.java b/src/java/org/apache/cassandra/schema/SchemaAnnouncementEvent.java
index 4e0bd68..ea6b9b6 100644
--- a/src/java/org/apache/cassandra/schema/SchemaAnnouncementEvent.java
+++ b/src/java/org/apache/cassandra/schema/SchemaAnnouncementEvent.java
@@ -78,12 +78,12 @@
         HashMap<String, Serializable> ret = new HashMap<>();
         if (schemaDestinationEndpoints != null)
         {
-            Set<String> eps = schemaDestinationEndpoints.stream().map(InetAddressAndPort::toString).collect(Collectors.toSet());
+            Set<String> eps = schemaDestinationEndpoints.stream().map(Object::toString).collect(Collectors.toSet());
             ret.put("endpointDestinations", new HashSet<>(eps));
         }
         if (schemaEndpointsIgnored != null)
         {
-            Set<String> eps = schemaEndpointsIgnored.stream().map(InetAddressAndPort::toString).collect(Collectors.toSet());
+            Set<String> eps = schemaEndpointsIgnored.stream().map(Object::toString).collect(Collectors.toSet());
             ret.put("endpointIgnored", new HashSet<>(eps));
         }
         if (statement != null)
diff --git a/src/java/org/apache/cassandra/schema/SchemaChangeListener.java b/src/java/org/apache/cassandra/schema/SchemaChangeListener.java
index 4390309..55fa25f 100644
--- a/src/java/org/apache/cassandra/schema/SchemaChangeListener.java
+++ b/src/java/org/apache/cassandra/schema/SchemaChangeListener.java
@@ -17,86 +17,94 @@
  */
 package org.apache.cassandra.schema;
 
-import java.util.List;
+import org.apache.cassandra.cql3.functions.UDAggregate;
+import org.apache.cassandra.cql3.functions.UDFunction;
+import org.apache.cassandra.db.marshal.UserType;
 
-import org.apache.cassandra.db.marshal.AbstractType;
-
-public abstract class SchemaChangeListener
+public interface SchemaChangeListener
 {
-    public void onCreateKeyspace(String keyspace)
+    default void onCreateKeyspace(KeyspaceMetadata keyspace)
     {
     }
 
-    public void onCreateTable(String keyspace, String table)
+    default void onCreateTable(TableMetadata table)
     {
     }
 
-    public void onCreateView(String keyspace, String view)
+    default void onCreateView(ViewMetadata view)
     {
-        onCreateTable(keyspace, view);
+        onCreateTable(view.metadata);
     }
 
-    public void onCreateType(String keyspace, String type)
+    default void onCreateType(UserType type)
     {
     }
 
-    public void onCreateFunction(String keyspace, String function, List<AbstractType<?>> argumentTypes)
+    default void onCreateFunction(UDFunction function)
     {
     }
 
-    public void onCreateAggregate(String keyspace, String aggregate, List<AbstractType<?>> argumentTypes)
+    default void onCreateAggregate(UDAggregate aggregate)
     {
     }
 
-    public void onAlterKeyspace(String keyspace)
+    default void onAlterKeyspace(KeyspaceMetadata before, KeyspaceMetadata after)
+    {
+    }
+
+    default void onPreAlterTable(TableMetadata before, TableMetadata after)
     {
     }
 
     // the boolean flag indicates whether the change that triggered this event may have a substantive
     // impact on statements using the column family.
-    public void onAlterTable(String keyspace, String table, boolean affectsStatements)
+    default void onAlterTable(TableMetadata before, TableMetadata after, boolean affectStatements)
     {
     }
 
-    public void onAlterView(String keyspace, String view, boolean affectsStataments)
-    {
-        onAlterTable(keyspace, view, affectsStataments);
-    }
-
-    public void onAlterType(String keyspace, String type)
+    default void onPreAlterView(ViewMetadata before, ViewMetadata after)
     {
     }
 
-    public void onAlterFunction(String keyspace, String function, List<AbstractType<?>> argumentTypes)
+    default void onAlterView(ViewMetadata before, ViewMetadata after, boolean affectStatements)
+    {
+        onAlterTable(before.metadata, after.metadata, affectStatements);
+    }
+
+    default void onAlterType(UserType before, UserType after)
     {
     }
 
-    public void onAlterAggregate(String keyspace, String aggregate, List<AbstractType<?>> argumentTypes)
+    default void onAlterFunction(UDFunction before, UDFunction after)
     {
     }
 
-    public void onDropKeyspace(String keyspace)
+    default void onAlterAggregate(UDAggregate before, UDAggregate after)
     {
     }
 
-    public void onDropTable(String keyspace, String table)
+    default void onDropKeyspace(KeyspaceMetadata keyspace, boolean dropData)
     {
     }
 
-    public void onDropView(String keyspace, String view)
-    {
-        onDropTable(keyspace, view);
-    }
-
-    public void onDropType(String keyspace, String type)
+    default void onDropTable(TableMetadata table, boolean dropData)
     {
     }
 
-    public void onDropFunction(String keyspace, String function, List<AbstractType<?>> argumentTypes)
+    default void onDropView(ViewMetadata view, boolean dropData)
+    {
+        onDropTable(view.metadata, dropData);
+    }
+
+    default void onDropType(UserType type)
     {
     }
 
-    public void onDropAggregate(String keyspace, String aggregate, List<AbstractType<?>> argumentTypes)
+    default void onDropFunction(UDFunction function)
+    {
+    }
+
+    default void onDropAggregate(UDAggregate aggregate)
     {
     }
 }
diff --git a/src/java/org/apache/cassandra/schema/SchemaChangeNotifier.java b/src/java/org/apache/cassandra/schema/SchemaChangeNotifier.java
new file mode 100644
index 0000000..c4537e1
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SchemaChangeNotifier.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import org.apache.cassandra.cql3.functions.UDAggregate;
+import org.apache.cassandra.cql3.functions.UDFunction;
+import org.apache.cassandra.db.marshal.UserType;
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+
+/**
+ * Registers schema change listeners and sends the notifications. The interface of this class just takes the high level
+ * keyspace metadata changes. It iterates over all keyspaces elements and distributes appropriate notifications about
+ * changes around those elements (tables, views, types, functions).
+ */
+public class SchemaChangeNotifier
+{
+    private final List<SchemaChangeListener> changeListeners = new CopyOnWriteArrayList<>();
+
+    public void registerListener(SchemaChangeListener listener)
+    {
+        changeListeners.add(listener);
+    }
+
+    @SuppressWarnings("unused")
+    public void unregisterListener(SchemaChangeListener listener)
+    {
+        changeListeners.remove(listener);
+    }
+
+    public void notifyKeyspaceCreated(KeyspaceMetadata keyspace)
+    {
+        notifyCreateKeyspace(keyspace);
+        keyspace.types.forEach(this::notifyCreateType);
+        keyspace.tables.forEach(this::notifyCreateTable);
+        keyspace.views.forEach(this::notifyCreateView);
+        keyspace.functions.udfs().forEach(this::notifyCreateFunction);
+        keyspace.functions.udas().forEach(this::notifyCreateAggregate);
+    }
+
+    public void notifyKeyspaceAltered(KeyspaceMetadata.KeyspaceDiff delta, boolean dropData)
+    {
+        // notify on everything dropped
+        delta.udas.dropped.forEach(uda -> notifyDropAggregate((UDAggregate) uda));
+        delta.udfs.dropped.forEach(udf -> notifyDropFunction((UDFunction) udf));
+        delta.views.dropped.forEach(view -> notifyDropView(view, dropData));
+        delta.tables.dropped.forEach(metadata -> notifyDropTable(metadata, dropData));
+        delta.types.dropped.forEach(this::notifyDropType);
+
+        // notify on everything created
+        delta.types.created.forEach(this::notifyCreateType);
+        delta.tables.created.forEach(this::notifyCreateTable);
+        delta.views.created.forEach(this::notifyCreateView);
+        delta.udfs.created.forEach(udf -> notifyCreateFunction((UDFunction) udf));
+        delta.udas.created.forEach(uda -> notifyCreateAggregate((UDAggregate) uda));
+
+        // notify on everything altered
+        if (!delta.before.params.equals(delta.after.params))
+            notifyAlterKeyspace(delta.before, delta.after);
+        delta.types.altered.forEach(diff -> notifyAlterType(diff.before, diff.after));
+        delta.tables.altered.forEach(diff -> notifyAlterTable(diff.before, diff.after));
+        delta.views.altered.forEach(diff -> notifyAlterView(diff.before, diff.after));
+        delta.udfs.altered.forEach(diff -> notifyAlterFunction(diff.before, diff.after));
+        delta.udas.altered.forEach(diff -> notifyAlterAggregate(diff.before, diff.after));
+    }
+
+    public void notifyKeyspaceDropped(KeyspaceMetadata keyspace, boolean dropData)
+    {
+        keyspace.functions.udas().forEach(this::notifyDropAggregate);
+        keyspace.functions.udfs().forEach(this::notifyDropFunction);
+        keyspace.views.forEach(view -> notifyDropView(view, dropData));
+        keyspace.tables.forEach(metadata -> notifyDropTable(metadata, dropData));
+        keyspace.types.forEach(this::notifyDropType);
+        notifyDropKeyspace(keyspace, dropData);
+    }
+
+    public void notifyPreChanges(SchemaTransformationResult transformationResult)
+    {
+        transformationResult.diff.altered.forEach(this::notifyPreAlterKeyspace);
+    }
+
+    private void notifyPreAlterKeyspace(KeyspaceMetadata.KeyspaceDiff keyspaceDiff)
+    {
+        keyspaceDiff.tables.altered.forEach(this::notifyPreAlterTable);
+        keyspaceDiff.views.altered.forEach(this::notifyPreAlterView);
+    }
+
+    private void notifyPreAlterTable(Diff.Altered<TableMetadata> altered)
+    {
+        changeListeners.forEach(l -> l.onPreAlterTable(altered.before, altered.after));
+    }
+
+    private void notifyPreAlterView(Diff.Altered<ViewMetadata> altered)
+    {
+        changeListeners.forEach(l -> l.onPreAlterView(altered.before, altered.after));
+    }
+
+    private void notifyCreateKeyspace(KeyspaceMetadata ksm)
+    {
+        changeListeners.forEach(l -> l.onCreateKeyspace(ksm));
+    }
+
+    private void notifyCreateTable(TableMetadata metadata)
+    {
+        changeListeners.forEach(l -> l.onCreateTable(metadata));
+    }
+
+    private void notifyCreateView(ViewMetadata view)
+    {
+        changeListeners.forEach(l -> l.onCreateView(view));
+    }
+
+    private void notifyCreateType(UserType ut)
+    {
+        changeListeners.forEach(l -> l.onCreateType(ut));
+    }
+
+    private void notifyCreateFunction(UDFunction udf)
+    {
+        changeListeners.forEach(l -> l.onCreateFunction(udf));
+    }
+
+    private void notifyCreateAggregate(UDAggregate udf)
+    {
+        changeListeners.forEach(l -> l.onCreateAggregate(udf));
+    }
+
+    private void notifyAlterKeyspace(KeyspaceMetadata before, KeyspaceMetadata after)
+    {
+        changeListeners.forEach(l -> l.onAlterKeyspace(before, after));
+    }
+
+    private void notifyAlterTable(TableMetadata before, TableMetadata after)
+    {
+        boolean changeAffectedPreparedStatements = before.changeAffectsPreparedStatements(after);
+        changeListeners.forEach(l -> l.onAlterTable(before, after, changeAffectedPreparedStatements));
+    }
+
+    private void notifyAlterView(ViewMetadata before, ViewMetadata after)
+    {
+        boolean changeAffectedPreparedStatements = before.metadata.changeAffectsPreparedStatements(after.metadata);
+        changeListeners.forEach(l -> l.onAlterView(before, after, changeAffectedPreparedStatements));
+    }
+
+    private void notifyAlterType(UserType before, UserType after)
+    {
+        changeListeners.forEach(l -> l.onAlterType(before, after));
+    }
+
+    private void notifyAlterFunction(UDFunction before, UDFunction after)
+    {
+        changeListeners.forEach(l -> l.onAlterFunction(before, after));
+    }
+
+    private void notifyAlterAggregate(UDAggregate before, UDAggregate after)
+    {
+        changeListeners.forEach(l -> l.onAlterAggregate(before, after));
+    }
+
+    private void notifyDropKeyspace(KeyspaceMetadata ksm, boolean dropData)
+    {
+        changeListeners.forEach(l -> l.onDropKeyspace(ksm, dropData));
+    }
+
+    private void notifyDropTable(TableMetadata metadata, boolean dropData)
+    {
+        changeListeners.forEach(l -> l.onDropTable(metadata, dropData));
+    }
+
+    private void notifyDropView(ViewMetadata view, boolean dropData)
+    {
+        changeListeners.forEach(l -> l.onDropView(view, dropData));
+    }
+
+    private void notifyDropType(UserType ut)
+    {
+        changeListeners.forEach(l -> l.onDropType(ut));
+    }
+
+    private void notifyDropFunction(UDFunction udf)
+    {
+        changeListeners.forEach(l -> l.onDropFunction(udf));
+    }
+
+    private void notifyDropAggregate(UDAggregate udf)
+    {
+        changeListeners.forEach(l -> l.onDropAggregate(udf));
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/SchemaConstants.java b/src/java/org/apache/cassandra/schema/SchemaConstants.java
index 7b6b7de..888ea4a 100644
--- a/src/java/org/apache/cassandra/schema/SchemaConstants.java
+++ b/src/java/org/apache/cassandra/schema/SchemaConstants.java
@@ -25,9 +25,13 @@
 import java.util.regex.Pattern;
 
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
 
 import org.apache.cassandra.db.Digest;
 
+/**
+ * When adding new String keyspace names here, double check if it needs to be added to PartitionDenylist.canDenylistKeyspace
+ */
 public final class SchemaConstants
 {
     public static final Pattern PATTERN_WORD_CHARS = Pattern.compile("\\w+");
@@ -47,14 +51,18 @@
     public static final Set<String> LOCAL_SYSTEM_KEYSPACE_NAMES =
         ImmutableSet.of(SYSTEM_KEYSPACE_NAME, SCHEMA_KEYSPACE_NAME);
 
+    /* virtual table system keyspace names */
+    public static final Set<String> VIRTUAL_SYSTEM_KEYSPACE_NAMES =
+        ImmutableSet.of(VIRTUAL_VIEWS, VIRTUAL_SCHEMA);
+
     /* replicate system keyspace names (the ones with a "true" replication strategy) */
     public static final Set<String> REPLICATED_SYSTEM_KEYSPACE_NAMES =
         ImmutableSet.of(TRACE_KEYSPACE_NAME, AUTH_KEYSPACE_NAME, DISTRIBUTED_KEYSPACE_NAME);
     /**
-     * longest permissible KS or CF name.  Our main concern is that filename not be more than 255 characters;
-     * the filename will contain both the KS and CF names. Since non-schema-name components only take up
-     * ~64 characters, we could allow longer names than this, but on Windows, the entire path should be not greater than
-     * 255 characters, so a lower limit here helps avoid problems.  See CASSANDRA-4110.
+     * The longest permissible KS or CF name.
+     *
+     * Before CASSANDRA-16956, we used to care about not having the entire path longer than 255 characters because of
+     * Windows support but this limit is by implementing CASSANDRA-16956 not in effect anymore.
      */
     public static final int NAME_LENGTH = 48;
 
@@ -78,7 +86,7 @@
      */
     public static boolean isLocalSystemKeyspace(String keyspaceName)
     {
-        return LOCAL_SYSTEM_KEYSPACE_NAMES.contains(keyspaceName.toLowerCase());
+        return LOCAL_SYSTEM_KEYSPACE_NAMES.contains(keyspaceName.toLowerCase()) || isVirtualSystemKeyspace(keyspaceName);
     }
 
     /**
@@ -95,7 +103,7 @@
      */
     public static boolean isVirtualSystemKeyspace(String keyspaceName)
     {
-        return VIRTUAL_SCHEMA.equals(keyspaceName.toLowerCase()) || VIRTUAL_VIEWS.equals(keyspaceName.toLowerCase());
+        return VIRTUAL_SYSTEM_KEYSPACE_NAMES.contains(keyspaceName.toLowerCase());
     }
 
     /**
@@ -104,8 +112,16 @@
      */
     public static boolean isSystemKeyspace(String keyspaceName)
     {
-        return isLocalSystemKeyspace(keyspaceName)
-                || isReplicatedSystemKeyspace(keyspaceName)
-                || isVirtualSystemKeyspace(keyspaceName);
+        return isLocalSystemKeyspace(keyspaceName) // this includes vtables
+                || isReplicatedSystemKeyspace(keyspaceName);
+    }
+
+    /**
+     * Returns the set of all system keyspaces
+     * @return all system keyspaces
+     */
+    public static Set<String> getSystemKeyspaces()
+    {
+        return Sets.union(Sets.union(LOCAL_SYSTEM_KEYSPACE_NAMES, REPLICATED_SYSTEM_KEYSPACE_NAMES), VIRTUAL_SYSTEM_KEYSPACE_NAMES);
     }
 }
diff --git a/src/java/org/apache/cassandra/schema/SchemaDiagnostics.java b/src/java/org/apache/cassandra/schema/SchemaDiagnostics.java
index 12b8409..2924303 100644
--- a/src/java/org/apache/cassandra/schema/SchemaDiagnostics.java
+++ b/src/java/org/apache/cassandra/schema/SchemaDiagnostics.java
@@ -86,28 +86,28 @@
                                             delta.before, delta, null, null, null, null));
     }
 
-    static void keyspaceDroping(Schema schema, KeyspaceMetadata keyspace)
+    static void keyspaceDropping(Schema schema, KeyspaceMetadata keyspace)
     {
         if (isEnabled(SchemaEventType.KS_DROPPING))
             service.publish(new SchemaEvent(SchemaEventType.KS_DROPPING, schema, keyspace,
                                             null, null, null, null, null, null));
     }
 
-    static void keyspaceDroped(Schema schema, KeyspaceMetadata keyspace)
+    static void keyspaceDropped(Schema schema, KeyspaceMetadata keyspace)
     {
         if (isEnabled(SchemaEventType.KS_DROPPED))
             service.publish(new SchemaEvent(SchemaEventType.KS_DROPPED, schema, keyspace,
                                             null, null, null, null, null, null));
     }
 
-    static void schemataLoading(Schema schema)
+    static void schemaLoading(Schema schema)
     {
         if (isEnabled(SchemaEventType.SCHEMATA_LOADING))
             service.publish(new SchemaEvent(SchemaEventType.SCHEMATA_LOADING, schema, null,
                                             null, null, null, null, null, null));
     }
 
-    static void schemataLoaded(Schema schema)
+    static void schemaLoaded(Schema schema)
     {
         if (isEnabled(SchemaEventType.SCHEMATA_LOADED))
             service.publish(new SchemaEvent(SchemaEventType.SCHEMATA_LOADED, schema, null,
@@ -121,7 +121,7 @@
                                             null, null, null, null, null, null));
     }
 
-    static void schemataCleared(Schema schema)
+    static void schemaCleared(Schema schema)
     {
         if (isEnabled(SchemaEventType.SCHEMATA_CLEARED))
             service.publish(new SchemaEvent(SchemaEventType.SCHEMATA_CLEARED, schema, null,
diff --git a/src/java/org/apache/cassandra/schema/SchemaEvent.java b/src/java/org/apache/cassandra/schema/SchemaEvent.java
index d163a11..5703fe2 100644
--- a/src/java/org/apache/cassandra/schema/SchemaEvent.java
+++ b/src/java/org/apache/cassandra/schema/SchemaEvent.java
@@ -21,30 +21,30 @@
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
-import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
 import javax.annotation.Nullable;
 
-import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableCollection;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.MapDifference;
 
 import org.apache.cassandra.diag.DiagnosticEvent;
-import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.Collectors3;
 
 public final class SchemaEvent extends DiagnosticEvent
 {
     private final SchemaEventType type;
 
-    private final HashSet<String> keyspaces;
-    private final HashMap<String, String> indexTables;
-    private final HashMap<String, String> tables;
-    private final ArrayList<String> nonSystemKeyspaces;
-    private final ArrayList<String> userKeyspaces;
+    private final ImmutableCollection<String> keyspaces;
+    private final ImmutableMap<String, String> indexTables;
+    private final ImmutableCollection<String> tables;
+    private final ImmutableCollection<String> nonSystemKeyspaces;
+    private final ImmutableCollection<String> userKeyspaces;
     private final int numberOfTables;
     private final UUID version;
 
@@ -61,7 +61,7 @@
     @Nullable
     private final Views.ViewsDiff viewsDiff;
     @Nullable
-    private final MapDifference<String,TableMetadata> indexesDiff;
+    private final MapDifference<String, TableMetadata> indexesDiff;
 
     public enum SchemaEventType
     {
@@ -90,7 +90,7 @@
     SchemaEvent(SchemaEventType type, Schema schema, @Nullable KeyspaceMetadata ksUpdate,
                 @Nullable KeyspaceMetadata previous, @Nullable KeyspaceMetadata.KeyspaceDiff ksDiff,
                 @Nullable TableMetadata tableUpdate, @Nullable Tables.TablesDiff tablesDiff,
-                @Nullable Views.ViewsDiff viewsDiff, @Nullable MapDifference<String,TableMetadata> indexesDiff)
+                @Nullable Views.ViewsDiff viewsDiff, @Nullable MapDifference<String, TableMetadata> indexesDiff)
     {
         this.type = type;
         this.ksUpdate = ksUpdate;
@@ -101,27 +101,21 @@
         this.viewsDiff = viewsDiff;
         this.indexesDiff = indexesDiff;
 
-        this.keyspaces = new HashSet<>(schema.getKeyspaces());
-        this.nonSystemKeyspaces = new ArrayList<>(schema.getNonSystemKeyspaces());
-        this.userKeyspaces = new ArrayList<>(schema.getUserKeyspaces());
+        this.keyspaces = schema.distributedAndLocalKeyspaces().names();
+        this.nonSystemKeyspaces = schema.distributedKeyspaces().names();
+        this.userKeyspaces = schema.getUserKeyspaces().names();
         this.numberOfTables = schema.getNumberOfTables();
         this.version = schema.getVersion();
 
-        Map<Pair<String, String>, TableMetadataRef> indexTableMetadataRefs = schema.getIndexTableMetadataRefs();
-        Map<String, String> indexTables = indexTableMetadataRefs.entrySet().stream()
-                                                                .collect(Collectors.toMap(e -> e.getKey().left + ',' +
-                                                                                               e.getKey().right,
-                                                                                          e -> e.getValue().id.toHexString() + ',' +
-                                                                                               e.getValue().keyspace + ',' +
-                                                                                               e.getValue().name));
-        this.indexTables = new HashMap<>(indexTables);
-        Map<TableId, TableMetadataRef> tableMetadataRefs = schema.getTableMetadataRefs();
-        Map<String, String> tables = tableMetadataRefs.entrySet().stream()
-                                                      .collect(Collectors.toMap(e -> e.getKey().toHexString(),
-                                                                                e -> e.getValue().id.toHexString() + ',' +
-                                                                                     e.getValue().keyspace + ',' +
-                                                                                     e.getValue().name));
-        this.tables = new HashMap<>(tables);
+        this.indexTables = schema.distributedKeyspaces().stream()
+                                 .flatMap(ks -> ks.tables.indexTables().entrySet().stream())
+                                 .collect(Collectors3.toImmutableMap(e -> String.format("%s,%s", e.getValue().keyspace, e.getKey()),
+                                                                     e -> String.format("%s,%s,%s", e.getValue().id.toHexString(), e.getValue().keyspace, e.getValue().name)));
+
+        this.tables = schema.distributedKeyspaces().stream()
+                            .flatMap(ks -> StreamSupport.stream(ks.tablesAndViews().spliterator(), false))
+                            .map(e -> String.format("%s,%s,%s", e.id.toHexString(), e.keyspace, e.name))
+                            .collect(Collectors3.toImmutableList());
     }
 
     public SchemaEventType getType()
@@ -225,6 +219,7 @@
         ret.put("caching", repr(params.caching));
         ret.put("compaction", repr(params.compaction));
         ret.put("compression", repr(params.compression));
+        ret.put("memtable", repr(params.memtable));
         if (params.speculativeRetry != null) ret.put("speculativeRetry", params.speculativeRetry.kind().name());
         return ret;
     }
@@ -253,6 +248,11 @@
         return ret;
     }
 
+    private String repr(MemtableParams params)
+    {
+        return params.configurationKey();
+    }
+
     private HashMap<String, Serializable> repr(IndexMetadata index)
     {
         HashMap<String, Serializable> ret = new HashMap<>();
diff --git a/src/java/org/apache/cassandra/schema/SchemaKeyspace.java b/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
index a6fd028..3f223dd 100644
--- a/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
+++ b/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
@@ -49,6 +49,7 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Simulate;
 
 import static java.lang.String.format;
 
@@ -58,14 +59,16 @@
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
 import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
 import static org.apache.cassandra.schema.SchemaKeyspaceTables.*;
+import static org.apache.cassandra.utils.Simulate.With.GLOBAL_CLOCK;
 
 /**
  * system_schema.* tables and methods for manipulating them.
  * 
- * Please notice this class is _not_ thread safe. It should be accessed through {@link org.apache.cassandra.schema.Schema}. See CASSANDRA-16856/16996
+ * Please notice this class is _not_ thread safe and all methods which reads or updates the data in schema keyspace
+ * should be accessed only from the implementation of {@link SchemaUpdateHandler} in synchronized blocks.
  */
 @NotThreadSafe
-final class SchemaKeyspace
+public final class SchemaKeyspace
 {
     private SchemaKeyspace()
     {
@@ -80,7 +83,7 @@
      * The tables to which we added the cdc column. This is used in {@link #makeUpdateForSchema} below to make sure we skip that
      * column is cdc is disabled as the columns breaks pre-cdc to post-cdc upgrades (typically, 3.0 -> 3.X).
      */
-    private static final Set<String> TABLES_WITH_CDC_ADDED = ImmutableSet.of(TABLES, VIEWS);
+    private static final Set<String> TABLES_WITH_CDC_ADDED = ImmutableSet.of(SchemaKeyspaceTables.TABLES, SchemaKeyspaceTables.VIEWS);
 
     private static final TableMetadata Keyspaces =
         parse(KEYSPACES,
@@ -102,6 +105,7 @@
               + "comment text,"
               + "compaction frozen<map<text, text>>,"
               + "compression frozen<map<text, text>>,"
+              + "memtable text,"
               + "crc_check_chance double,"
               + "dclocal_read_repair_chance double," // no longer used, left for drivers' sake
               + "default_time_to_live int,"
@@ -169,6 +173,7 @@
               + "comment text,"
               + "compaction frozen<map<text, text>>,"
               + "compression frozen<map<text, text>>,"
+              + "memtable text,"
               + "crc_check_chance double,"
               + "dclocal_read_repair_chance double," // no longer used, left for drivers' sake
               + "default_time_to_live int,"
@@ -248,7 +253,7 @@
                                    .build();
     }
 
-    static KeyspaceMetadata metadata()
+    public static KeyspaceMetadata metadata()
     {
         return KeyspaceMetadata.create(SchemaConstants.SCHEMA_KEYSPACE_NAME, KeyspaceParams.local(), org.apache.cassandra.schema.Tables.of(ALL_TABLE_METADATA));
     }
@@ -257,32 +262,32 @@
     {
         Map<String, Mutation> mutations = new HashMap<>();
 
-        diff.created.forEach(k -> mutations.put(k.name, makeCreateKeyspaceMutation(k, timestamp).build()));
         diff.dropped.forEach(k -> mutations.put(k.name, makeDropKeyspaceMutation(k, timestamp).build()));
+        diff.created.forEach(k -> mutations.put(k.name, makeCreateKeyspaceMutation(k, timestamp).build()));
         diff.altered.forEach(kd ->
         {
             KeyspaceMetadata ks = kd.after;
 
             Mutation.SimpleBuilder builder = makeCreateKeyspaceMutation(ks.name, ks.params, timestamp);
 
-            kd.types.created.forEach(t -> addTypeToSchemaMutation(t, builder));
             kd.types.dropped.forEach(t -> addDropTypeToSchemaMutation(t, builder));
+            kd.types.created.forEach(t -> addTypeToSchemaMutation(t, builder));
             kd.types.altered(Difference.SHALLOW).forEach(td -> addTypeToSchemaMutation(td.after, builder));
 
-            kd.tables.created.forEach(t -> addTableToSchemaMutation(t, true, builder));
             kd.tables.dropped.forEach(t -> addDropTableToSchemaMutation(t, builder));
+            kd.tables.created.forEach(t -> addTableToSchemaMutation(t, true, builder));
             kd.tables.altered(Difference.SHALLOW).forEach(td -> addAlterTableToSchemaMutation(td.before, td.after, builder));
 
-            kd.views.created.forEach(v -> addViewToSchemaMutation(v, true, builder));
             kd.views.dropped.forEach(v -> addDropViewToSchemaMutation(v, builder));
+            kd.views.created.forEach(v -> addViewToSchemaMutation(v, true, builder));
             kd.views.altered(Difference.SHALLOW).forEach(vd -> addAlterViewToSchemaMutation(vd.before, vd.after, builder));
 
-            kd.udfs.created.forEach(f -> addFunctionToSchemaMutation((UDFunction) f, builder));
             kd.udfs.dropped.forEach(f -> addDropFunctionToSchemaMutation((UDFunction) f, builder));
+            kd.udfs.created.forEach(f -> addFunctionToSchemaMutation((UDFunction) f, builder));
             kd.udfs.altered(Difference.SHALLOW).forEach(fd -> addFunctionToSchemaMutation(fd.after, builder));
 
-            kd.udas.created.forEach(a -> addAggregateToSchemaMutation((UDAggregate) a, builder));
             kd.udas.dropped.forEach(a -> addDropAggregateToSchemaMutation((UDAggregate) a, builder));
+            kd.udas.created.forEach(a -> addAggregateToSchemaMutation((UDAggregate) a, builder));
             kd.udas.altered(Difference.SHALLOW).forEach(ad -> addAggregateToSchemaMutation(ad.after, builder));
 
             mutations.put(ks.name, builder.build());
@@ -294,6 +299,7 @@
     /**
      * Add entries to system_schema.* for the hardcoded system keyspaces
      */
+    @Simulate(with = GLOBAL_CLOCK)
     static void saveSystemKeyspacesSchema()
     {
         KeyspaceMetadata system = Schema.instance.getKeyspaceMetadata(SchemaConstants.SYSTEM_KEYSPACE_NAME);
@@ -316,20 +322,21 @@
 
     static void truncate()
     {
+        logger.debug("Truncating schema tables...");
         ALL.reverse().forEach(table -> getSchemaCFS(table).truncateBlocking());
     }
 
     private static void flush()
     {
         if (!DatabaseDescriptor.isUnsafeSystem())
-            ALL.forEach(table -> FBUtilities.waitOnFuture(getSchemaCFS(table).forceFlush()));
+            ALL.forEach(table -> FBUtilities.waitOnFuture(getSchemaCFS(table).forceFlush(ColumnFamilyStore.FlushReason.INTERNALLY_FORCED)));
     }
 
     /**
      * Read schema from system keyspace and calculate MD5 digest of every row, resulting digest
      * will be converted into UUID which would act as content-based version of the schema.
      */
-    static UUID calculateSchemaDigest()
+    public static UUID calculateSchemaDigest()
     {
         Digest digest = Digest.forSchema();
         for (String table : ALL)
@@ -395,7 +402,7 @@
 
                     DecoratedKey key = partition.partitionKey();
                     Mutation.PartitionUpdateCollector puCollector = mutationMap.computeIfAbsent(key, k -> new Mutation.PartitionUpdateCollector(SchemaConstants.SCHEMA_KEYSPACE_NAME, key));
-                    puCollector.add(makeUpdateForSchema(partition, cmd.columnFilter()));
+                    puCollector.add(makeUpdateForSchema(partition, cmd.columnFilter()).withOnlyPresentColumns());
                 }
             }
         }
@@ -438,10 +445,10 @@
     @SuppressWarnings("unchecked")
     private static DecoratedKey decorate(TableMetadata metadata, Object value)
     {
-        return metadata.partitioner.decorateKey(((AbstractType) metadata.partitionKeyType).decompose(value));
+        return metadata.partitioner.decorateKey(metadata.partitionKeyType.decomposeUntyped(value));
     }
 
-    static Mutation.SimpleBuilder makeCreateKeyspaceMutation(String name, KeyspaceParams params, long timestamp)
+    private static Mutation.SimpleBuilder makeCreateKeyspaceMutation(String name, KeyspaceParams params, long timestamp)
     {
         Mutation.SimpleBuilder builder = Mutation.simpleBuilder(Keyspaces.keyspace, decorate(Keyspaces, name))
                                                  .timestamp(timestamp);
@@ -454,6 +461,7 @@
         return builder;
     }
 
+    @VisibleForTesting
     static Mutation.SimpleBuilder makeCreateKeyspaceMutation(KeyspaceMetadata keyspace, long timestamp)
     {
         Mutation.SimpleBuilder builder = makeCreateKeyspaceMutation(keyspace.name, keyspace.params, timestamp);
@@ -467,7 +475,7 @@
         return builder;
     }
 
-    static Mutation.SimpleBuilder makeDropKeyspaceMutation(KeyspaceMetadata keyspace, long timestamp)
+    private static Mutation.SimpleBuilder makeDropKeyspaceMutation(KeyspaceMetadata keyspace, long timestamp)
     {
         Mutation.SimpleBuilder builder = Mutation.simpleBuilder(SchemaConstants.SCHEMA_KEYSPACE_NAME, decorate(Keyspaces, keyspace.name))
                                                  .timestamp(timestamp);
@@ -491,6 +499,7 @@
         builder.update(Types).row(type.name).delete();
     }
 
+    @VisibleForTesting
     static Mutation.SimpleBuilder makeCreateTableMutation(KeyspaceMetadata keyspace, TableMetadata table, long timestamp)
     {
         // Include the serialized keyspace in case the target node missed a CREATE KEYSPACE migration (see CASSANDRA-5631).
@@ -499,10 +508,11 @@
         return builder;
     }
 
-    static void addTableToSchemaMutation(TableMetadata table, boolean withColumnsAndTriggers, Mutation.SimpleBuilder builder)
+    private static void addTableToSchemaMutation(TableMetadata table, boolean withColumnsAndTriggers, Mutation.SimpleBuilder builder)
     {
         Row.SimpleBuilder rowBuilder = builder.update(Tables)
                                               .row(table.name)
+                                              .deletePrevious()
                                               .add("id", table.id.asUUID())
                                               .add("flags", TableMetadata.Flag.toStringSet(table.flags));
 
@@ -548,6 +558,11 @@
         // node sends table schema to a < 3.8 versioned node with an unknown column.
         if (DatabaseDescriptor.isCDCEnabled())
             builder.add("cdc", params.cdc);
+
+        // As above, only add the memtable column if the table uses a non-default memtable configuration to avoid RTE
+        // in mixed operation with pre-4.1 versioned node during upgrades.
+        if (params.memtable != MemtableParams.DEFAULT)
+            builder.add("memtable", params.memtable.configurationKey());
     }
 
     private static void addAlterTableToSchemaMutation(TableMetadata oldTable, TableMetadata newTable, Mutation.SimpleBuilder builder)
@@ -605,6 +620,7 @@
             addUpdatedIndexToSchemaMutation(newTable, diff.rightValue(), builder);
     }
 
+    @VisibleForTesting
     static Mutation.SimpleBuilder makeUpdateTableMutation(KeyspaceMetadata keyspace,
                                                           TableMetadata oldTable,
                                                           TableMetadata newTable,
@@ -637,14 +653,6 @@
         return Maps.difference(beforeMap, afterMap);
     }
 
-    static Mutation.SimpleBuilder makeDropTableMutation(KeyspaceMetadata keyspace, TableMetadata table, long timestamp)
-    {
-        // Include the serialized keyspace in case the target node missed a CREATE KEYSPACE migration (see CASSANDRA-5631).
-        Mutation.SimpleBuilder builder = makeCreateKeyspaceMutation(keyspace.name, keyspace.params, timestamp);
-        addDropTableToSchemaMutation(table, builder);
-        return builder;
-    }
-
     private static void addDropTableToSchemaMutation(TableMetadata table, Mutation.SimpleBuilder builder)
     {
         builder.update(Tables).row(table.name).delete();
@@ -714,6 +722,7 @@
         TableMetadata table = view.metadata;
         Row.SimpleBuilder rowBuilder = builder.update(Views)
                                               .row(view.name())
+                                              .deletePrevious()
                                               .add("include_all_columns", view.includeAllColumns)
                                               .add("base_table_id", view.baseTableId.asUUID())
                                               .add("base_table_name", view.baseTableName)
@@ -830,8 +839,7 @@
     /*
      * Fetching schema
      */
-
-    static Keyspaces fetchNonSystemKeyspaces()
+    public static Keyspaces fetchNonSystemKeyspaces()
     {
         return fetchKeyspacesWithout(SchemaConstants.LOCAL_SYSTEM_KEYSPACE_NAMES);
     }
@@ -943,6 +951,7 @@
                             .build();
     }
 
+    @VisibleForTesting
     static TableParams createTableParamsFromRow(UntypedResultSet.Row row)
     {
         return TableParams.builder()
@@ -951,6 +960,9 @@
                           .comment(row.getString("comment"))
                           .compaction(CompactionParams.fromMap(row.getFrozenTextMap("compaction")))
                           .compression(CompressionParams.fromMap(row.getFrozenTextMap("compression")))
+                          .memtable(MemtableParams.getWithFallback(row.has("memtable")
+                                                                   ? row.getString("memtable")
+                                                                   : null)) // memtable column was introduced in 4.1
                           .defaultTimeToLive(row.getInt("default_time_to_live"))
                           .extensions(row.getFrozenMap("extensions", UTF8Type.instance, BytesType.instance))
                           .gcGraceSeconds(row.getInt("gc_grace_seconds"))
@@ -983,6 +995,7 @@
         return columns;
     }
 
+    @VisibleForTesting
     static ColumnMetadata createColumnFromRow(UntypedResultSet.Row row, Types types)
     {
         String keyspace = row.getString("keyspace_name");
@@ -1029,7 +1042,7 @@
                                  ? ColumnMetadata.Kind.valueOf(row.getString("kind").toUpperCase())
                                  : ColumnMetadata.Kind.REGULAR;
         assert kind == ColumnMetadata.Kind.REGULAR || kind == ColumnMetadata.Kind.STATIC
-            : "Unexpected dropped column kind: " + kind.toString();
+            : "Unexpected dropped column kind: " + kind;
 
         ColumnMetadata column = new ColumnMetadata(keyspace, table, ColumnIdentifier.getInterned(name, true), type, ColumnMetadata.NO_POSITION, kind);
         long droppedTime = TimeUnit.MILLISECONDS.toMicros(row.getLong("dropped_time"));
@@ -1144,9 +1157,9 @@
 
         List<AbstractType<?>> argTypes = new ArrayList<>();
         for (String type : row.getFrozenList("argument_types", UTF8Type.instance))
-            argTypes.add(CQLTypeParser.parse(ksName, type, types));
+            argTypes.add(CQLTypeParser.parse(ksName, type, types).udfType());
 
-        AbstractType<?> returnType = CQLTypeParser.parse(ksName, row.getString("return_type"), types);
+        AbstractType<?> returnType = CQLTypeParser.parse(ksName, row.getString("return_type"), types).udfType();
 
         String language = row.getString("language");
         String body = row.getString("body");
@@ -1206,10 +1219,10 @@
         List<AbstractType<?>> argTypes =
             row.getFrozenList("argument_types", UTF8Type.instance)
                .stream()
-               .map(t -> CQLTypeParser.parse(ksName, t, types))
+               .map(t -> CQLTypeParser.parse(ksName, t, types).udfType())
                .collect(toList());
 
-        AbstractType<?> returnType = CQLTypeParser.parse(ksName, row.getString("return_type"), types);
+        AbstractType<?> returnType = CQLTypeParser.parse(ksName, row.getString("return_type"), types).udfType();
 
         FunctionName stateFunc = new FunctionName(ksName, (row.getString("state_func")));
 
diff --git a/src/java/org/apache/cassandra/schema/SchemaKeyspaceTables.java b/src/java/org/apache/cassandra/schema/SchemaKeyspaceTables.java
index b6e825d..c00a4f7 100644
--- a/src/java/org/apache/cassandra/schema/SchemaKeyspaceTables.java
+++ b/src/java/org/apache/cassandra/schema/SchemaKeyspaceTables.java
@@ -19,19 +19,19 @@
 
 import com.google.common.collect.ImmutableList;
 
-public final class SchemaKeyspaceTables
+public class SchemaKeyspaceTables
 {
-    public static final String INDEXES = "indexes";
-    public static final String AGGREGATES = "aggregates";
-    public static final String FUNCTIONS = "functions";
-    public static final String TYPES = "types";
-    public static final String VIEWS = "views";
-    public static final String TRIGGERS = "triggers";
-    public static final String DROPPED_COLUMNS = "dropped_columns";
-    public static final String COLUMNS = "columns";
-    public static final String TABLES = "tables";
     public static final String KEYSPACES = "keyspaces";
-
+    public static final String TABLES = "tables";
+    public static final String COLUMNS = "columns";
+    public static final String DROPPED_COLUMNS = "dropped_columns";
+    public static final String TRIGGERS = "triggers";
+    public static final String VIEWS = "views";
+    public static final String TYPES = "types";
+    public static final String FUNCTIONS = "functions";
+    public static final String AGGREGATES = "aggregates";
+    public static final String INDEXES = "indexes";
+ 
     /**
      * The order in this list matters.
      *
@@ -56,7 +56,4 @@
                                                                      VIEWS,
                                                                      KEYSPACES);
 
-    private SchemaKeyspaceTables()
-    {
-    }
 }
diff --git a/src/java/org/apache/cassandra/schema/SchemaMigrationDiagnostics.java b/src/java/org/apache/cassandra/schema/SchemaMigrationDiagnostics.java
deleted file mode 100644
index 62f1768..0000000
--- a/src/java/org/apache/cassandra/schema/SchemaMigrationDiagnostics.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.schema;
-
-import java.util.UUID;
-
-import org.apache.cassandra.diag.DiagnosticEventService;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.schema.SchemaMigrationEvent.MigrationManagerEventType;
-
-final class SchemaMigrationDiagnostics
-{
-    private static final DiagnosticEventService service = DiagnosticEventService.instance();
-
-    private SchemaMigrationDiagnostics()
-    {
-    }
-
-    static void unknownLocalSchemaVersion(InetAddressAndPort endpoint, UUID theirVersion)
-    {
-        if (isEnabled(MigrationManagerEventType.UNKNOWN_LOCAL_SCHEMA_VERSION))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.UNKNOWN_LOCAL_SCHEMA_VERSION, endpoint,
-                                                     theirVersion));
-    }
-
-    static void versionMatch(InetAddressAndPort endpoint, UUID theirVersion)
-    {
-        if (isEnabled(MigrationManagerEventType.VERSION_MATCH))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.VERSION_MATCH, endpoint, theirVersion));
-    }
-
-    static void skipPull(InetAddressAndPort endpoint, UUID theirVersion)
-    {
-        if (isEnabled(MigrationManagerEventType.SKIP_PULL))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.SKIP_PULL, endpoint, theirVersion));
-    }
-
-    static void resetLocalSchema()
-    {
-        if (isEnabled(MigrationManagerEventType.RESET_LOCAL_SCHEMA))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.RESET_LOCAL_SCHEMA, null, null));
-    }
-
-    static void taskCreated(InetAddressAndPort endpoint)
-    {
-        if (isEnabled(MigrationManagerEventType.TASK_CREATED))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.TASK_CREATED, endpoint, null));
-    }
-
-    static void taskSendAborted(InetAddressAndPort endpoint)
-    {
-        if (isEnabled(MigrationManagerEventType.TASK_SEND_ABORTED))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.TASK_SEND_ABORTED, endpoint, null));
-    }
-
-    static void taskRequestSend(InetAddressAndPort endpoint)
-    {
-        if (isEnabled(MigrationManagerEventType.TASK_REQUEST_SEND))
-            service.publish(new SchemaMigrationEvent(MigrationManagerEventType.TASK_REQUEST_SEND,
-                                                     endpoint, null));
-    }
-
-    private static boolean isEnabled(MigrationManagerEventType type)
-    {
-        return service.isEnabled(SchemaMigrationEvent.class, type);
-    }
-}
diff --git a/src/java/org/apache/cassandra/schema/SchemaMigrationEvent.java b/src/java/org/apache/cassandra/schema/SchemaMigrationEvent.java
deleted file mode 100644
index a984804..0000000
--- a/src/java/org/apache/cassandra/schema/SchemaMigrationEvent.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.schema;
-
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Queue;
-import java.util.UUID;
-import java.util.concurrent.CountDownLatch;
-
-import javax.annotation.Nullable;
-
-import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.diag.DiagnosticEvent;
-import org.apache.cassandra.gms.FailureDetector;
-import org.apache.cassandra.gms.Gossiper;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.MessagingService;
-
-/**
- * Internal events emitted by {@link MigrationManager}.
- */
-final class SchemaMigrationEvent extends DiagnosticEvent
-{
-    private final MigrationManagerEventType type;
-    @Nullable
-    private final InetAddressAndPort endpoint;
-    @Nullable
-    private final UUID endpointSchemaVersion;
-    private final UUID localSchemaVersion;
-    private final Integer localMessagingVersion;
-    private final SystemKeyspace.BootstrapState bootstrapState;
-    private final Integer inflightTaskCount;
-    @Nullable
-    private Integer endpointMessagingVersion;
-    @Nullable
-    private Boolean endpointGossipOnlyMember;
-    @Nullable
-    private Boolean isAlive;
-
-    enum MigrationManagerEventType
-    {
-        UNKNOWN_LOCAL_SCHEMA_VERSION,
-        VERSION_MATCH,
-        SKIP_PULL,
-        RESET_LOCAL_SCHEMA,
-        TASK_CREATED,
-        TASK_SEND_ABORTED,
-        TASK_REQUEST_SEND
-    }
-
-    SchemaMigrationEvent(MigrationManagerEventType type,
-                         @Nullable InetAddressAndPort endpoint, @Nullable UUID endpointSchemaVersion)
-    {
-        this.type = type;
-        this.endpoint = endpoint;
-        this.endpointSchemaVersion = endpointSchemaVersion;
-
-        localSchemaVersion = Schema.instance.getVersion();
-        localMessagingVersion = MessagingService.current_version;
-
-        inflightTaskCount = MigrationCoordinator.instance.getInflightTasks();
-
-        this.bootstrapState = SystemKeyspace.getBootstrapState();
-
-        if (endpoint == null) return;
-
-        if (MessagingService.instance().versions.knows(endpoint))
-            endpointMessagingVersion = MessagingService.instance().versions.getRaw(endpoint);
-
-        endpointGossipOnlyMember = Gossiper.instance.isGossipOnlyMember(endpoint);
-        this.isAlive = FailureDetector.instance.isAlive(endpoint);
-    }
-
-    public Enum<?> getType()
-    {
-        return type;
-    }
-
-    public Map<String, Serializable> toMap()
-    {
-        HashMap<String, Serializable> ret = new HashMap<>();
-        if (endpoint != null) ret.put("endpoint", endpoint.getHostAddressAndPort());
-        ret.put("endpointSchemaVersion", Schema.schemaVersionToString(endpointSchemaVersion));
-        ret.put("localSchemaVersion", Schema.schemaVersionToString(localSchemaVersion));
-        if (endpointMessagingVersion != null) ret.put("endpointMessagingVersion", endpointMessagingVersion);
-        if (localMessagingVersion != null) ret.put("localMessagingVersion", localMessagingVersion);
-        if (endpointGossipOnlyMember != null) ret.put("endpointGossipOnlyMember", endpointGossipOnlyMember);
-        if (isAlive != null) ret.put("endpointIsAlive", isAlive);
-        if (bootstrapState != null) ret.put("bootstrapState", bootstrapState.name());
-        if (inflightTaskCount != null) ret.put("inflightTaskCount", inflightTaskCount);
-        return ret;
-    }
-}
diff --git a/src/java/org/apache/cassandra/schema/SchemaMutationsSerializer.java b/src/java/org/apache/cassandra/schema/SchemaMutationsSerializer.java
new file mode 100644
index 0000000..ba65c0d
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SchemaMutationsSerializer.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+public class SchemaMutationsSerializer implements IVersionedSerializer<Collection<Mutation>>
+{
+    public static final SchemaMutationsSerializer instance = new SchemaMutationsSerializer();
+
+    public void serialize(Collection<Mutation> schema, DataOutputPlus out, int version) throws IOException
+    {
+        out.writeInt(schema.size());
+        for (Mutation mutation : schema)
+            Mutation.serializer.serialize(mutation, out, version);
+    }
+
+    public Collection<Mutation> deserialize(DataInputPlus in, int version) throws IOException
+    {
+        int count = in.readInt();
+        Collection<Mutation> schema = new ArrayList<>(count);
+
+        for (int i = 0; i < count; i++)
+            schema.add(Mutation.serializer.deserialize(in, version));
+
+        return schema;
+    }
+
+    public long serializedSize(Collection<Mutation> schema, int version)
+    {
+        int size = TypeSizes.sizeof(schema.size());
+        for (Mutation mutation : schema)
+            size += mutation.serializedSize(version);
+        return size;
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/SchemaProvider.java b/src/java/org/apache/cassandra/schema/SchemaProvider.java
index 07324ed..cbad42e 100644
--- a/src/java/org/apache/cassandra/schema/SchemaProvider.java
+++ b/src/java/org/apache/cassandra/schema/SchemaProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.cassandra.schema;
 
+import java.util.function.Supplier;
 import javax.annotation.Nullable;
 
 import org.apache.cassandra.db.Keyspace;
@@ -29,7 +30,7 @@
     @Nullable
     Keyspace getKeyspaceInstance(String keyspaceName);
 
-    void storeKeyspaceInstance(Keyspace keyspace);
+    Keyspace maybeAddKeyspaceInstance(String keyspaceName, Supplier<Keyspace> loadFunction);
 
     @Nullable
     KeyspaceMetadata getKeyspaceMetadata(String keyspaceName);
diff --git a/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java b/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java
index 16ee968..6589075 100644
--- a/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java
+++ b/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java
@@ -17,15 +17,15 @@
  */
 package org.apache.cassandra.schema;
 
-import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.function.Consumer;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.NoPayload;
 
 /**
@@ -38,10 +38,20 @@
 
     private static final Logger logger = LoggerFactory.getLogger(SchemaPullVerbHandler.class);
 
+    private final List<Consumer<Message<NoPayload>>> handlers = new CopyOnWriteArrayList<>();
+
+    public void register(Consumer<Message<NoPayload>> handler)
+    {
+        handlers.add(handler);
+    }
+
     public void doVerb(Message<NoPayload> message)
     {
         logger.trace("Received schema pull request from {}", message.from());
-        Message<Collection<Mutation>> response = message.responseWith(Schema.instance.schemaKeyspaceAsMutations());
-        MessagingService.instance().send(response, message.from());
+        List<Consumer<Message<NoPayload>>> handlers = this.handlers;
+        if (handlers.isEmpty())
+            throw new UnsupportedOperationException("There is no handler registered for schema pull verb");
+
+        handlers.forEach(h -> h.accept(message));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/schema/SchemaPushVerbHandler.java b/src/java/org/apache/cassandra/schema/SchemaPushVerbHandler.java
index f2f0faf..4f2325a 100644
--- a/src/java/org/apache/cassandra/schema/SchemaPushVerbHandler.java
+++ b/src/java/org/apache/cassandra/schema/SchemaPushVerbHandler.java
@@ -18,11 +18,13 @@
 package org.apache.cassandra.schema;
 
 import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.function.Consumer;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.Message;
@@ -39,11 +41,22 @@
 
     private static final Logger logger = LoggerFactory.getLogger(SchemaPushVerbHandler.class);
 
+    private final List<Consumer<Message<Collection<Mutation>>>> handlers = new CopyOnWriteArrayList<>();
+
+    public void register(Consumer<Message<Collection<Mutation>>> handler)
+    {
+        handlers.add(handler);
+    }
+
     public void doVerb(final Message<Collection<Mutation>> message)
     {
         logger.trace("Received schema push request from {}", message.from());
-
         SchemaAnnouncementDiagnostics.schemataMutationsReceived(message.from());
-        Stage.MIGRATION.submit(() -> Schema.instance.mergeAndAnnounceVersion(message.payload));
+
+        List<Consumer<Message<Collection<Mutation>>>> handlers = this.handlers;
+        if (handlers.isEmpty())
+            throw new UnsupportedOperationException("There is no handler registered for schema push verb");
+
+        handlers.forEach(h -> h.accept(message));
     }
 }
diff --git a/src/java/org/apache/cassandra/schema/SchemaTransformation.java b/src/java/org/apache/cassandra/schema/SchemaTransformation.java
index e2290a3..2b020a0 100644
--- a/src/java/org/apache/cassandra/schema/SchemaTransformation.java
+++ b/src/java/org/apache/cassandra/schema/SchemaTransformation.java
@@ -17,17 +17,51 @@
  */
 package org.apache.cassandra.schema;
 
-import java.net.UnknownHostException;
+import java.util.Optional;
 
 public interface SchemaTransformation
 {
     /**
      * Apply a statement transformation to a schema snapshot.
-     *
-     * Implementing methods should be side-effect free.
+     * <p>
+     * Implementing methods should be side-effect free (outside of throwing exceptions if the transformation cannot
+     * be successfully applied to the provided schema).
      *
      * @param schema Keyspaces to base the transformation on
      * @return Keyspaces transformed by the statement
      */
-    Keyspaces apply(Keyspaces schema) throws UnknownHostException;
+    Keyspaces apply(Keyspaces schema);
+
+    /**
+     * If the transformation should be applied with a certain timestamp, this method should be overriden. This is used
+     * by {@link SchemaTransformations#updateSystemKeyspace(KeyspaceMetadata, long)} when we need to set the fixed
+     * timestamp in order to preserve user settings.
+     */
+    default Optional<Long> fixedTimestampMicros()
+    {
+        return Optional.empty();
+    }
+
+    /**
+     * The result of applying (on this node) a given schema transformation.
+     */
+    class SchemaTransformationResult
+    {
+        public final DistributedSchema before;
+        public final DistributedSchema after;
+        public final Keyspaces.KeyspacesDiff diff;
+
+        public SchemaTransformationResult(DistributedSchema before, DistributedSchema after, Keyspaces.KeyspacesDiff diff)
+        {
+            this.before = before;
+            this.after = after;
+            this.diff = diff;
+        }
+
+        @Override
+        public String toString()
+        {
+            return String.format("SchemaTransformationResult{%s --> %s, diff=%s}", before.getVersion(), after.getVersion(), diff);
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/schema/SchemaTransformations.java b/src/java/org/apache/cassandra/schema/SchemaTransformations.java
new file mode 100644
index 0000000..124f9a6
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SchemaTransformations.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.Optional;
+
+import org.apache.cassandra.db.marshal.UserType;
+import org.apache.cassandra.exceptions.AlreadyExistsException;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
+
+/**
+ * Factory and utility methods to create simple schema transformation.
+ */
+public class SchemaTransformations
+{
+    /**
+     * Creates a schema transformation that adds the provided keyspace.
+     *
+     * @param keyspace       the keyspace to add.
+     * @param ignoreIfExists if {@code true}, the transformation is a no-op if a keyspace of the same name than
+     *                       {@code keyspace} already exists in the schema the transformation is applied on. Otherwise,
+     *                       the transformation throws an {@link AlreadyExistsException} in that case.
+     * @return the created transformation.
+     */
+    public static SchemaTransformation addKeyspace(KeyspaceMetadata keyspace, boolean ignoreIfExists)
+    {
+        return schema ->
+        {
+            KeyspaceMetadata existing = schema.getNullable(keyspace.name);
+            if (existing != null)
+            {
+                if (ignoreIfExists)
+                    return schema;
+
+                throw new AlreadyExistsException(keyspace.name);
+            }
+
+            return schema.withAddedOrUpdated(keyspace);
+        };
+    }
+
+    /**
+     * Creates a schema transformation that adds the provided table.
+     *
+     * @param table          the table to add.
+     * @param ignoreIfExists if {@code true}, the transformation is a no-op if a table of the same name than
+     *                       {@code table} already exists in the schema the transformation is applied on. Otherwise,
+     *                       the transformation throws an {@link AlreadyExistsException} in that case.
+     * @return the created transformation.
+     */
+    public static SchemaTransformation addTable(TableMetadata table, boolean ignoreIfExists)
+    {
+        return schema ->
+        {
+            KeyspaceMetadata keyspace = schema.getNullable(table.keyspace);
+            if (keyspace == null)
+                throw invalidRequest("Keyspace '%s' doesn't exist", table.keyspace);
+
+            if (keyspace.hasTable(table.name))
+            {
+                if (ignoreIfExists)
+                    return schema;
+
+                throw new AlreadyExistsException(table.keyspace, table.name);
+            }
+
+            table.validate();
+
+            return schema.withAddedOrUpdated(keyspace.withSwapped(keyspace.tables.with(table)));
+        };
+    }
+
+    public static SchemaTransformation addTypes(Types toAdd, boolean ignoreIfExists)
+    {
+        return schema ->
+        {
+            if (toAdd.isEmpty())
+                return schema;
+
+            String keyspaceName = toAdd.iterator().next().keyspace;
+            KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
+            if (null == keyspace)
+                throw invalidRequest("Keyspace '%s' doesn't exist", keyspaceName);
+
+            Types types = keyspace.types;
+            for (UserType type : toAdd)
+            {
+                if (types.containsType(type.name))
+                {
+                    if (ignoreIfExists)
+                        continue;
+
+                    throw new ConfigurationException("Type " + type + " already exists in " + keyspaceName);
+                }
+
+                types = types.with(type);
+            }
+            return schema.withAddedOrReplaced(keyspace.withSwapped(types));
+        };
+    }
+
+    /**
+     * Creates a schema transformation that adds the provided view.
+     *
+     * @param view           the view to add.
+     * @param ignoreIfExists if {@code true}, the transformation is a no-op if a view of the same name than
+     *                       {@code view} already exists in the schema the transformation is applied on. Otherwise,
+     *                       the transformation throws an {@link AlreadyExistsException} in that case.
+     * @return the created transformation.
+     */
+    public static SchemaTransformation addView(ViewMetadata view, boolean ignoreIfExists)
+    {
+        return schema ->
+        {
+            KeyspaceMetadata keyspace = schema.getNullable(view.keyspace());
+            if (keyspace == null)
+                throw invalidRequest("Cannot add view to non existing keyspace '%s'", view.keyspace());
+
+            if (keyspace.hasView(view.name()))
+            {
+                if (ignoreIfExists)
+                    return schema;
+
+                throw new AlreadyExistsException(view.keyspace(), view.name());
+            }
+
+            return schema.withAddedOrUpdated(keyspace.withSwapped(keyspace.views.with(view)));
+        };
+    }
+
+    /**
+     * We have a set of non-local, distributed system keyspaces, e.g. system_traces, system_auth, etc.
+     * (see {@link SchemaConstants#REPLICATED_SYSTEM_KEYSPACE_NAMES}), that need to be created on cluster initialisation,
+     * and later evolved on major upgrades (sometimes minor too). This method compares the current known definitions
+     * of the tables (if the keyspace exists) to the expected, most modern ones expected by the running version of C*.
+     * If any changes have been detected, a schema transformation returned by this method should make cluster's view of
+     * that keyspace aligned with the expected modern definition.
+     *
+     * @param keyspace   the metadata of the keyspace as it should be after application.
+     * @param generation timestamp to use for the table changes in the schema mutation
+     * @return the transformation.
+     */
+    public static SchemaTransformation updateSystemKeyspace(KeyspaceMetadata keyspace, long generation)
+    {
+        return new SchemaTransformation()
+        {
+            @Override
+            public Optional<Long> fixedTimestampMicros()
+            {
+                return Optional.of(generation);
+            }
+
+            @Override
+            public Keyspaces apply(Keyspaces schema)
+            {
+                KeyspaceMetadata updatedKeyspace = keyspace;
+                KeyspaceMetadata curKeyspace = schema.getNullable(keyspace.name);
+                if (curKeyspace != null)
+                {
+                    // If the keyspace already exists, we preserve whatever parameters it has.
+                    updatedKeyspace = updatedKeyspace.withSwapped(curKeyspace.params);
+
+                    for (TableMetadata curTable : curKeyspace.tables)
+                    {
+                        TableMetadata desiredTable = updatedKeyspace.tables.getNullable(curTable.name);
+                        if (desiredTable == null)
+                        {
+                            // preserve exsiting tables which are missing in the new keyspace definition
+                            updatedKeyspace = updatedKeyspace.withSwapped(updatedKeyspace.tables.with(curTable));
+                        }
+                        else
+                        {
+                            updatedKeyspace = updatedKeyspace.withSwapped(updatedKeyspace.tables.without(desiredTable));
+
+                            TableMetadata.Builder updatedBuilder = desiredTable.unbuild();
+
+                            for (ColumnMetadata column : curTable.regularAndStaticColumns())
+                            {
+                                if (!desiredTable.regularAndStaticColumns().contains(column))
+                                    updatedBuilder.addColumn(column);
+                            }
+
+                            updatedKeyspace = updatedKeyspace.withSwapped(updatedKeyspace.tables.with(updatedBuilder.build()));
+                        }
+                    }
+                }
+                return schema.withAddedOrReplaced(updatedKeyspace);
+            }
+        };
+    }
+
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/schema/SchemaUpdateHandler.java b/src/java/org/apache/cassandra/schema/SchemaUpdateHandler.java
new file mode 100644
index 0000000..f6711f3
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SchemaUpdateHandler.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.time.Duration;
+
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+import org.apache.cassandra.utils.concurrent.Awaitable;
+
+/**
+ * Schema update handler is responsible for maintaining the shared schema and synchronizing it with other nodes in
+ * the cluster, which means pushing and pulling changes, as well as tracking the current version in the cluster.
+ * <p/>
+ * The interface has been extracted to abstract out that functionality. It allows for various implementations like
+ * Gossip based (the default), ETCD, offline, etc., and make it easier for mocking in unit tests.
+ */
+public interface SchemaUpdateHandler
+{
+    /**
+     * Starts actively synchronizing schema with the rest of the cluster. It is called in the very beginning of the
+     * node startup. It is not expected to block - to await for the startup completion we have another method
+     * {@link #waitUntilReady(Duration)}.
+     */
+    void start();
+
+    /**
+     * Waits until the schema update handler is ready and returns the result. If the method returns {@code false} it
+     * means that readiness could not be achieved within the specified period of time. The method can be used just to
+     * check if schema is ready by passing {@link Duration#ZERO} as the timeout - in such case it returns immediately.
+     *
+     * @param timeout the maximum time to wait for schema readiness
+     * @return whether readiness is achieved
+     */
+    boolean waitUntilReady(Duration timeout);
+
+    /**
+     * Applies schema transformation in the underlying storage and synchronizes with other nodes.
+     *
+     * @param transformation schema transformation to be performed
+     * @param local          if true, the caller does not require synchronizing schema with other nodes - in practise local is
+     *                       used only in some tests
+     * @return transformation result
+     */
+    SchemaTransformationResult apply(SchemaTransformation transformation, boolean local);
+
+    /**
+     * Resets the schema either by reloading data from the local storage or from the other nodes. Once the schema is
+     * refreshed, the callbacks provided in the factory method are executed, and the updated schema version is announced.
+     *
+     * @param local whether we should reset with locally stored schema or fetch the schema from other nodes
+     */
+    void reset(boolean local);
+
+    /**
+     * Marks the local schema to be cleared and refreshed. Since calling this method, the update handler tries to obtain
+     * a fresh schema definition from a remote source. Once the schema definition is received, the local schema is
+     * replaced (instead of being merged which usually happens when the update is received).
+     * <p/>
+     * The returned awaitable is fulfilled when the schema is received and applied.
+     */
+    Awaitable clear();
+}
diff --git a/src/java/org/apache/cassandra/schema/SchemaUpdateHandlerFactory.java b/src/java/org/apache/cassandra/schema/SchemaUpdateHandlerFactory.java
new file mode 100644
index 0000000..f324a5d
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SchemaUpdateHandlerFactory.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.function.BiConsumer;
+
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+
+public interface SchemaUpdateHandlerFactory
+{
+    /**
+     * A factory which provides the appropriate schema update handler. The actual implementation may be different for
+     * different run modes (client, tool, daemon).
+     *
+     * @param online               whether schema update handler should work online and be aware of the other nodes (when in daemon mode)
+     * @param updateSchemaCallback callback which will be called right after the shared schema is updated
+     */
+    SchemaUpdateHandler getSchemaUpdateHandler(boolean online, BiConsumer<SchemaTransformationResult, Boolean> updateSchemaCallback);
+}
diff --git a/src/java/org/apache/cassandra/schema/SchemaUpdateHandlerFactoryProvider.java b/src/java/org/apache/cassandra/schema/SchemaUpdateHandlerFactoryProvider.java
new file mode 100644
index 0000000..9411a92
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SchemaUpdateHandlerFactoryProvider.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import javax.inject.Provider;
+
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.utils.FBUtilities;
+
+/**
+ * Provides the instance of SchemaUpdateHandler factory pointed by {@link #SUH_FACTORY_CLASS_PROPERTY} system property.
+ * If the property is not defined, the default factory {@link DefaultSchemaUpdateHandler} instance is returned.
+ */
+public class SchemaUpdateHandlerFactoryProvider implements Provider<SchemaUpdateHandlerFactory>
+{
+    public static final String SUH_FACTORY_CLASS_PROPERTY = "cassandra.schema.update_handler_factory.class";
+
+    public final static SchemaUpdateHandlerFactoryProvider instance = new SchemaUpdateHandlerFactoryProvider();
+
+    @Override
+    public SchemaUpdateHandlerFactory get()
+    {
+        String suhFactoryClassName = StringUtils.trimToNull(System.getProperty(SUH_FACTORY_CLASS_PROPERTY));
+        if (suhFactoryClassName == null)
+        {
+            return DefaultSchemaUpdateHandlerFactory.instance;
+        }
+        else
+        {
+            Class<SchemaUpdateHandlerFactory> suhFactoryClass = FBUtilities.classForName(suhFactoryClassName, "schema update handler factory");
+            try
+            {
+                return suhFactoryClass.newInstance();
+            }
+            catch (InstantiationException | IllegalAccessException ex)
+            {
+                throw new ConfigurationException(String.format("Failed to initialize schema update handler factory class %s defined in %s system property.",
+                                                               suhFactoryClassName, SUH_FACTORY_CLASS_PROPERTY), ex);
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/SystemDistributedKeyspace.java b/src/java/org/apache/cassandra/schema/SystemDistributedKeyspace.java
new file mode 100644
index 0000000..dc40093
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SystemDistributedKeyspace.java
@@ -0,0 +1,409 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.CommonRange;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static java.lang.String.format;
+
+import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+
+public final class SystemDistributedKeyspace
+{
+    private SystemDistributedKeyspace()
+    {
+    }
+
+    public static final String NAME = "system_distributed";
+
+    private static final int DEFAULT_RF = CassandraRelevantProperties.SYSTEM_DISTRIBUTED_DEFAULT_RF.getInt();
+    private static final Logger logger = LoggerFactory.getLogger(SystemDistributedKeyspace.class);
+
+    /**
+     * Generation is used as a timestamp for automatic table creation on startup.
+     * If you make any changes to the tables below, make sure to increment the
+     * generation and document your change here.
+     *
+     * gen 0: original definition in 2.2
+     * gen 1: (pre-)add options column to parent_repair_history in 3.0, 3.11
+     * gen 2: (pre-)add coordinator_port and participants_v2 columns to repair_history in 3.0, 3.11, 4.0
+     * gen 3: gc_grace_seconds raised from 0 to 10 days in CASSANDRA-12954 in 3.11.0
+     * gen 4: compression chunk length reduced to 16KiB, memtable_flush_period_in_ms now unset on all tables in 4.0
+     * gen 5: add ttl and TWCS to repair_history tables
+     * gen 6: add denylist table
+     */
+    public static final long GENERATION = 6;
+
+    public static final String REPAIR_HISTORY = "repair_history";
+
+    public static final String PARENT_REPAIR_HISTORY = "parent_repair_history";
+
+    public static final String VIEW_BUILD_STATUS = "view_build_status";
+
+    public static final String PARTITION_DENYLIST_TABLE = "partition_denylist";
+
+    private static final TableMetadata RepairHistory =
+        parse(REPAIR_HISTORY,
+                "Repair history",
+                "CREATE TABLE %s ("
+                     + "keyspace_name text,"
+                     + "columnfamily_name text,"
+                     + "id timeuuid,"
+                     + "parent_id timeuuid,"
+                     + "range_begin text,"
+                     + "range_end text,"
+                     + "coordinator inet,"
+                     + "coordinator_port int,"
+                     + "participants set<inet>,"
+                     + "participants_v2 set<text>,"
+                     + "exception_message text,"
+                     + "exception_stacktrace text,"
+                     + "status text,"
+                     + "started_at timestamp,"
+                     + "finished_at timestamp,"
+                     + "PRIMARY KEY ((keyspace_name, columnfamily_name), id))")
+        .defaultTimeToLive((int) TimeUnit.DAYS.toSeconds(30))
+        .compaction(CompactionParams.twcs(ImmutableMap.of("compaction_window_unit","DAYS",
+                                                          "compaction_window_size","1")))
+        .build();
+
+    private static final TableMetadata ParentRepairHistory =
+        parse(PARENT_REPAIR_HISTORY,
+                "Repair history",
+                "CREATE TABLE %s ("
+                     + "parent_id timeuuid,"
+                     + "keyspace_name text,"
+                     + "columnfamily_names set<text>,"
+                     + "started_at timestamp,"
+                     + "finished_at timestamp,"
+                     + "exception_message text,"
+                     + "exception_stacktrace text,"
+                     + "requested_ranges set<text>,"
+                     + "successful_ranges set<text>,"
+                     + "options map<text, text>,"
+                     + "PRIMARY KEY (parent_id))")
+        .defaultTimeToLive((int) TimeUnit.DAYS.toSeconds(30))
+        .compaction(CompactionParams.twcs(ImmutableMap.of("compaction_window_unit","DAYS",
+                                                          "compaction_window_size","1")))
+        .build();
+
+    private static final TableMetadata ViewBuildStatus =
+        parse(VIEW_BUILD_STATUS,
+            "Materialized View build status",
+            "CREATE TABLE %s ("
+                     + "keyspace_name text,"
+                     + "view_name text,"
+                     + "host_id uuid,"
+                     + "status text,"
+                     + "PRIMARY KEY ((keyspace_name, view_name), host_id))").build();
+
+    public static final TableMetadata PartitionDenylistTable =
+    parse(PARTITION_DENYLIST_TABLE,
+          "Partition keys which have been denied access",
+          "CREATE TABLE %s ("
+          + "ks_name text,"
+          + "table_name text,"
+          + "key blob,"
+          + "PRIMARY KEY ((ks_name, table_name), key))")
+    .build();
+
+    private static TableMetadata.Builder parse(String table, String description, String cql)
+    {
+        return CreateTableStatement.parse(format(cql, table), SchemaConstants.DISTRIBUTED_KEYSPACE_NAME)
+                                   .id(TableId.forSystemTable(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, table))
+                                   .comment(description);
+    }
+
+    public static KeyspaceMetadata metadata()
+    {
+        return KeyspaceMetadata.create(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, KeyspaceParams.simple(Math.max(DEFAULT_RF, DatabaseDescriptor.getDefaultKeyspaceRF())), Tables.of(RepairHistory, ParentRepairHistory, ViewBuildStatus, PartitionDenylistTable));
+    }
+
+    public static void startParentRepair(TimeUUID parent_id, String keyspaceName, String[] cfnames, RepairOption options)
+    {
+        Collection<Range<Token>> ranges = options.getRanges();
+        String query = "INSERT INTO %s.%s (parent_id, keyspace_name, columnfamily_names, requested_ranges, started_at,          options)"+
+                                 " VALUES (%s,        '%s',          { '%s' },           { '%s' },          toTimestamp(now()), { %s })";
+        String fmtQry = format(query,
+                                      SchemaConstants.DISTRIBUTED_KEYSPACE_NAME,
+                                      PARENT_REPAIR_HISTORY,
+                                      parent_id.toString(),
+                                      keyspaceName,
+                                      Joiner.on("','").join(cfnames),
+                                      Joiner.on("','").join(ranges),
+                                      toCQLMap(options.asMap(), RepairOption.RANGES_KEY, RepairOption.COLUMNFAMILIES_KEY));
+        processSilent(fmtQry);
+    }
+
+    private static String toCQLMap(Map<String, String> options, String ... ignore)
+    {
+        Set<String> toIgnore = Sets.newHashSet(ignore);
+        StringBuilder map = new StringBuilder();
+        boolean first = true;
+        for (Map.Entry<String, String> entry : options.entrySet())
+        {
+            if (!toIgnore.contains(entry.getKey()))
+            {
+                if (!first)
+                    map.append(',');
+                first = false;
+                map.append(format("'%s': '%s'", entry.getKey(), entry.getValue()));
+            }
+        }
+        return map.toString();
+    }
+
+    public static void failParentRepair(TimeUUID parent_id, Throwable t)
+    {
+        String query = "UPDATE %s.%s SET finished_at = toTimestamp(now()), exception_message=?, exception_stacktrace=? WHERE parent_id=%s";
+
+        StringWriter sw = new StringWriter();
+        PrintWriter pw = new PrintWriter(sw);
+        t.printStackTrace(pw);
+        String fmtQuery = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, PARENT_REPAIR_HISTORY, parent_id.toString());
+        String message = t.getMessage();
+        processSilent(fmtQuery, message != null ? message : "", sw.toString());
+    }
+
+    public static void successfulParentRepair(TimeUUID parent_id, Collection<Range<Token>> successfulRanges)
+    {
+        String query = "UPDATE %s.%s SET finished_at = toTimestamp(now()), successful_ranges = {'%s'} WHERE parent_id=%s";
+        String fmtQuery = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, PARENT_REPAIR_HISTORY, Joiner.on("','").join(successfulRanges), parent_id.toString());
+        processSilent(fmtQuery);
+    }
+
+    public static void startRepairs(TimeUUID id, TimeUUID parent_id, String keyspaceName, String[] cfnames, CommonRange commonRange)
+    {
+        // Don't record repair history if an upgrade is in progress as version 3 nodes generates errors
+        // due to schema differences
+        boolean includeNewColumns = !Gossiper.instance.hasMajorVersion3Nodes();
+
+        InetAddressAndPort coordinator = FBUtilities.getBroadcastAddressAndPort();
+        Set<String> participants = Sets.newHashSet();
+        Set<String> participants_v2 = Sets.newHashSet();
+
+        for (InetAddressAndPort endpoint : commonRange.endpoints)
+        {
+            participants.add(endpoint.getHostAddress(false));
+            participants_v2.add(endpoint.getHostAddressAndPort());
+        }
+
+        String query =
+                "INSERT INTO %s.%s (keyspace_name, columnfamily_name, id, parent_id, range_begin, range_end, coordinator, coordinator_port, participants, participants_v2, status, started_at) " +
+                        "VALUES (   '%s',          '%s',              %s, %s,        '%s',        '%s',      '%s',        %d,               { '%s' },     { '%s' },        '%s',   toTimestamp(now()))";
+        String queryWithoutNewColumns =
+                "INSERT INTO %s.%s (keyspace_name, columnfamily_name, id, parent_id, range_begin, range_end, coordinator, participants, status, started_at) " +
+                        "VALUES (   '%s',          '%s',              %s, %s,        '%s',        '%s',      '%s',               { '%s' },        '%s',   toTimestamp(now()))";
+
+        for (String cfname : cfnames)
+        {
+            for (Range<Token> range : commonRange.ranges)
+            {
+                String fmtQry;
+                if (includeNewColumns)
+                {
+                    fmtQry = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
+                                    keyspaceName,
+                                    cfname,
+                                    id.toString(),
+                                    parent_id.toString(),
+                                    range.left.toString(),
+                                    range.right.toString(),
+                                    coordinator.getHostAddress(false),
+                                    coordinator.getPort(),
+                                    Joiner.on("', '").join(participants),
+                                    Joiner.on("', '").join(participants_v2),
+                                    RepairState.STARTED.toString());
+                }
+                else
+                {
+                    fmtQry = format(queryWithoutNewColumns, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
+                                    keyspaceName,
+                                    cfname,
+                                    id.toString(),
+                                    parent_id.toString(),
+                                    range.left.toString(),
+                                    range.right.toString(),
+                                    coordinator.getHostAddress(false),
+                                    Joiner.on("', '").join(participants),
+                                    RepairState.STARTED.toString());
+                }
+                processSilent(fmtQry);
+            }
+        }
+    }
+
+    public static void failRepairs(TimeUUID id, String keyspaceName, String[] cfnames, Throwable t)
+    {
+        for (String cfname : cfnames)
+            failedRepairJob(id, keyspaceName, cfname, t);
+    }
+
+    public static void successfulRepairJob(TimeUUID id, String keyspaceName, String cfname)
+    {
+        String query = "UPDATE %s.%s SET status = '%s', finished_at = toTimestamp(now()) WHERE keyspace_name = '%s' AND columnfamily_name = '%s' AND id = %s";
+        String fmtQuery = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
+                                        RepairState.SUCCESS.toString(),
+                                        keyspaceName,
+                                        cfname,
+                                        id.toString());
+        processSilent(fmtQuery);
+    }
+
+    public static void failedRepairJob(TimeUUID id, String keyspaceName, String cfname, Throwable t)
+    {
+        String query = "UPDATE %s.%s SET status = '%s', finished_at = toTimestamp(now()), exception_message=?, exception_stacktrace=? WHERE keyspace_name = '%s' AND columnfamily_name = '%s' AND id = %s";
+        StringWriter sw = new StringWriter();
+        PrintWriter pw = new PrintWriter(sw);
+        t.printStackTrace(pw);
+        String fmtQry = format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, REPAIR_HISTORY,
+                                      RepairState.FAILED.toString(),
+                                      keyspaceName,
+                                      cfname,
+                                      id.toString());
+        String message = t.getMessage();
+        if (message == null)
+            message = t.getClass().getName();
+        processSilent(fmtQry, message, sw.toString());
+    }
+
+    public static void startViewBuild(String keyspace, String view, UUID hostId)
+    {
+        String query = "INSERT INTO %s.%s (keyspace_name, view_name, host_id, status) VALUES (?, ?, ?, ?)";
+        QueryProcessor.process(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS),
+                               ConsistencyLevel.ONE,
+                               Lists.newArrayList(bytes(keyspace),
+                                                  bytes(view),
+                                                  bytes(hostId),
+                                                  bytes(BuildStatus.STARTED.toString())));
+    }
+
+    public static void successfulViewBuild(String keyspace, String view, UUID hostId)
+    {
+        String query = "UPDATE %s.%s SET status = ? WHERE keyspace_name = ? AND view_name = ? AND host_id = ?";
+        QueryProcessor.process(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS),
+                               ConsistencyLevel.ONE,
+                               Lists.newArrayList(bytes(BuildStatus.SUCCESS.toString()),
+                                                  bytes(keyspace),
+                                                  bytes(view),
+                                                  bytes(hostId)));
+    }
+
+    public static Map<UUID, String> viewStatus(String keyspace, String view)
+    {
+        String query = "SELECT host_id, status FROM %s.%s WHERE keyspace_name = ? AND view_name = ?";
+        UntypedResultSet results;
+        try
+        {
+            results = QueryProcessor.execute(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS),
+                                             ConsistencyLevel.ONE,
+                                             keyspace,
+                                             view);
+        }
+        catch (Exception e)
+        {
+            return Collections.emptyMap();
+        }
+
+
+        Map<UUID, String> status = new HashMap<>();
+        for (UntypedResultSet.Row row : results)
+        {
+            status.put(row.getUUID("host_id"), row.getString("status"));
+        }
+        return status;
+    }
+
+    public static void setViewRemoved(String keyspaceName, String viewName)
+    {
+        String buildReq = "DELETE FROM %s.%s WHERE keyspace_name = ? AND view_name = ?";
+        QueryProcessor.executeInternal(format(buildReq, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS), keyspaceName, viewName);
+        forceBlockingFlush(VIEW_BUILD_STATUS, ColumnFamilyStore.FlushReason.INTERNALLY_FORCED);
+    }
+
+    private static void processSilent(String fmtQry, String... values)
+    {
+        try
+        {
+            List<ByteBuffer> valueList = new ArrayList<>(values.length);
+            for (String v : values)
+            {
+                valueList.add(bytes(v));
+            }
+            QueryProcessor.process(fmtQry, ConsistencyLevel.ANY, valueList);
+        }
+        catch (Throwable t)
+        {
+            logger.error("Error executing query "+fmtQry, t);
+        }
+    }
+
+    public static void forceBlockingFlush(String table, ColumnFamilyStore.FlushReason reason)
+    {
+        if (!DatabaseDescriptor.isUnsafeSystem())
+            FBUtilities.waitOnFuture(Keyspace.open(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME)
+                                             .getColumnFamilyStore(table)
+                                             .forceFlush(reason));
+    }
+
+    private enum RepairState
+    {
+        STARTED, SUCCESS, FAILED
+    }
+
+    private enum BuildStatus
+    {
+        UNKNOWN, STARTED, SUCCESS
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/TableId.java b/src/java/org/apache/cassandra/schema/TableId.java
index 695147f..fd47a47 100644
--- a/src/java/org/apache/cassandra/schema/TableId.java
+++ b/src/java/org/apache/cassandra/schema/TableId.java
@@ -25,7 +25,10 @@
 import org.apache.commons.lang3.ArrayUtils;
 
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
 
 /**
  * The unique identifier of a table.
@@ -35,6 +38,7 @@
  */
 public class TableId
 {
+    // TODO: should this be a TimeUUID?
     private final UUID id;
 
     private TableId(UUID id)
@@ -47,9 +51,10 @@
         return new TableId(id);
     }
 
+    // TODO: should we be using UUID.randomUUID()?
     public static TableId generate()
     {
-        return new TableId(UUIDGen.getTimeUUID());
+        return new TableId(nextTimeUUID().asUUID());
     }
 
     public static TableId fromString(String idString)
@@ -67,8 +72,13 @@
      */
     public static TableId forSystemTable(String keyspace, String table)
     {
-        assert SchemaConstants.isLocalSystemKeyspace(keyspace) || SchemaConstants.isReplicatedSystemKeyspace(keyspace);
-        return new TableId(UUID.nameUUIDFromBytes(ArrayUtils.addAll(keyspace.getBytes(), table.getBytes())));
+        assert SchemaConstants.isSystemKeyspace(keyspace) : String.format("Table %s.%s is not a system table; only keyspaces allowed are %s", keyspace, table, SchemaConstants.getSystemKeyspaces());
+        return unsafeDeterministic(keyspace, table);
+    }
+
+    public static TableId unsafeDeterministic(String keyspace, String table)
+    {
+        return new TableId(UUID.nameUUIDFromBytes(ArrayUtils.addAll(keyspace.getBytes(UTF_8), table.getBytes(UTF_8))));
     }
 
     public String toHexString()
diff --git a/src/java/org/apache/cassandra/schema/TableMetadata.java b/src/java/org/apache/cassandra/schema/TableMetadata.java
index 36f2382..2e9d507 100644
--- a/src/java/org/apache/cassandra/schema/TableMetadata.java
+++ b/src/java/org/apache/cassandra/schema/TableMetadata.java
@@ -40,6 +40,7 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.service.reads.SpeculativeRetryPolicy;
+import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.AbstractIterator;
 import org.github.jamm.Unmetered;
 
@@ -739,7 +740,12 @@
                 partitioner = DatabaseDescriptor.getPartitioner();
 
             if (id == null)
-                id = TableId.generate();
+            {
+                // make sure vtables use determiniestic ids so they can be referenced in calls cross-nodes
+                // see CASSANDRA-17295
+                if (DatabaseDescriptor.useDeterministicTableID() || kind == Kind.VIRTUAL) id = TableId.unsafeDeterministic(keyspace, name);
+                else id = TableId.generate();
+            }
 
             if (Flag.isCQLTable(flags))
                 return new TableMetadata(this);
@@ -861,6 +867,13 @@
             return this;
         }
 
+        public Builder memtable(MemtableParams val)
+        {
+            params.memtable(val);
+            return this;
+        }
+
+
         public Builder isCounter(boolean val)
         {
             return flag(Flag.COUNTER, val);
@@ -984,6 +997,11 @@
             return columns.values();
         }
 
+        public int numColumns()
+        {
+            return columns.size();
+        }
+
         public Set<String> columnNames()
         {
             return columns.values().stream().map(c -> c.name.toString()).collect(toSet());
@@ -1141,17 +1159,17 @@
     }
 
     public String toCqlString(boolean includeDroppedColumns,
-                              boolean internals,
+                              boolean withInternals,
                               boolean ifNotExists)
     {
         CqlBuilder builder = new CqlBuilder(2048);
-        appendCqlTo(builder, includeDroppedColumns, internals, ifNotExists);
+        appendCqlTo(builder, includeDroppedColumns, withInternals, ifNotExists);
         return builder.toString();
     }
 
     public void appendCqlTo(CqlBuilder builder,
                             boolean includeDroppedColumns,
-                            boolean internals,
+                            boolean withInternals,
                             boolean ifNotExists)
     {
         assert !isView();
@@ -1190,7 +1208,7 @@
         builder.append(" WITH ")
                .increaseIndent();
 
-        appendTableOptions(builder, internals);
+        appendTableOptions(builder, withInternals);
 
         builder.decreaseIndent();
 
@@ -1273,9 +1291,9 @@
                .newLine();
     }
 
-    void appendTableOptions(CqlBuilder builder, boolean internals)
+    void appendTableOptions(CqlBuilder builder, boolean withInternals)
     {
-        if (internals)
+        if (withInternals)
             builder.append("ID = ")
                    .append(id.toString())
                    .newLine()
@@ -1332,6 +1350,66 @@
         }
     }
 
+    /**
+     * Returns a string representation of a partition in a CQL-friendly format.
+     *
+     * For non-composite types it returns the result of {@link org.apache.cassandra.cql3.CQL3Type#toCQLLiteral}
+     * applied to the partition key.
+     * For composite types it applies {@link org.apache.cassandra.cql3.CQL3Type#toCQLLiteral} to each subkey and
+     * combines the results into a tuple.
+     *
+     * @param partitionKey a partition key
+     * @return CQL-like string representation of a partition key
+     */
+    public String partitionKeyAsCQLLiteral(ByteBuffer partitionKey)
+    {
+        return primaryKeyAsCQLLiteral(partitionKey, Clustering.EMPTY);
+    }
+
+    /**
+     * Returns a string representation of a primary key in a CQL-friendly format.
+     *
+     * @param partitionKey the partition key part of the primary key
+     * @param clustering the clustering key part of the primary key
+     * @return a CQL-like string representation of the specified primary key
+     */
+    public String primaryKeyAsCQLLiteral(ByteBuffer partitionKey, Clustering<?> clustering)
+    {
+        int clusteringSize = clustering.size();
+
+        String[] literals;
+        int i = 0;
+
+        if (partitionKeyType instanceof CompositeType)
+        {
+            List<AbstractType<?>> components = partitionKeyType.getComponents();
+            int size = components.size();
+            literals = new String[size + clusteringSize];
+            ByteBuffer[] values = ((CompositeType) partitionKeyType).split(partitionKey);
+            for (i = 0; i < size; i++)
+            {
+                literals[i] = asCQLLiteral(components.get(i), values[i]);
+            }
+        }
+        else
+        {
+            literals = new String[1 + clusteringSize];
+            literals[i++] = asCQLLiteral(partitionKeyType, partitionKey);
+        }
+
+        for (int j = 0; j < clusteringSize; j++)
+        {
+            literals[i++] = asCQLLiteral(clusteringColumns().get(j).type, clustering.bufferAt(j));
+        }
+
+        return i == 1 ? literals[0] : "(" + String.join(", ", literals) + ")";
+    }
+
+    private static String asCQLLiteral(AbstractType<?> type, ByteBuffer value)
+    {
+        return type.asCQL3Type().toCQLLiteral(value, ProtocolVersion.CURRENT);
+    }
+
     public static class CompactTableMetadata extends TableMetadata
     {
 
diff --git a/src/java/org/apache/cassandra/schema/TableMetadataRef.java b/src/java/org/apache/cassandra/schema/TableMetadataRef.java
index 3325510..dc4ff1d 100644
--- a/src/java/org/apache/cassandra/schema/TableMetadataRef.java
+++ b/src/java/org/apache/cassandra/schema/TableMetadataRef.java
@@ -96,4 +96,4 @@
     {
         return get().toString();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/schema/TableMetadataRefCache.java b/src/java/org/apache/cassandra/schema/TableMetadataRefCache.java
new file mode 100644
index 0000000..d947d1d
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/TableMetadataRefCache.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.Collections;
+import java.util.Map;
+
+import com.google.common.collect.MapDifference;
+import com.google.common.collect.Maps;
+
+import org.apache.cassandra.utils.Pair;
+
+/**
+ * Manages the cached {@link TableMetadataRef} objects which holds the references to {@link TableMetadata} objects.
+ * <p>
+ * The purpose of {@link TableMetadataRef} is that the reference to {@link TableMetadataRef} remains unchanged when
+ * the metadata of the table changes. {@link TableMetadata} is immutable, so when it changes, we only switch
+ * the reference inside the existing {@link TableMetadataRef} object.
+ */
+class TableMetadataRefCache
+{
+    public final static TableMetadataRefCache EMPTY = new TableMetadataRefCache(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
+
+    // UUID -> mutable metadata ref map. We have to update these in place every time a table changes.
+    private final Map<TableId, TableMetadataRef> metadataRefs;
+
+    // keyspace and table names -> mutable metadata ref map.
+    private final Map<Pair<String, String>, TableMetadataRef> metadataRefsByName;
+
+    // (keyspace name, index name) -> mutable metadata ref map. We have to update these in place every time an index changes.
+    private final Map<Pair<String, String>, TableMetadataRef> indexMetadataRefs;
+
+    public TableMetadataRefCache(Map<TableId, TableMetadataRef> metadataRefs,
+                                 Map<Pair<String, String>, TableMetadataRef> metadataRefsByName,
+                                 Map<Pair<String, String>, TableMetadataRef> indexMetadataRefs)
+    {
+        this.metadataRefs = Collections.unmodifiableMap(metadataRefs);
+        this.metadataRefsByName = Collections.unmodifiableMap(metadataRefsByName);
+        this.indexMetadataRefs = Collections.unmodifiableMap(indexMetadataRefs);
+    }
+
+    /**
+     * Returns cache copy with added the {@link TableMetadataRef} corresponding to the provided keyspace to {@link #metadataRefs} and
+     * {@link #indexMetadataRefs}, assuming the keyspace is new (in the sense of not being tracked by the manager yet).
+     */
+    TableMetadataRefCache withNewRefs(KeyspaceMetadata ksm)
+    {
+        return withUpdatedRefs(ksm.empty(), ksm);
+    }
+
+    /**
+     * Returns cache copy with updated the {@link TableMetadataRef} in {@link #metadataRefs} and {@link #indexMetadataRefs},
+     * for an existing updated keyspace given it's previous and new definition.
+     * <p>
+     * Note that {@link TableMetadataRef} are not duplicated and table metadata is altered in the existing refs.
+     */
+    TableMetadataRefCache withUpdatedRefs(KeyspaceMetadata previous, KeyspaceMetadata updated)
+    {
+        Tables.TablesDiff tablesDiff = Tables.diff(previous.tables, updated.tables);
+        Views.ViewsDiff viewsDiff = Views.diff(previous.views, updated.views);
+
+        MapDifference<String, TableMetadata> indexesDiff = previous.tables.indexesDiff(updated.tables);
+
+        boolean hasCreatedOrDroppedTablesOrViews = tablesDiff.created.size() > 0 || tablesDiff.dropped.size() > 0 || viewsDiff.created.size() > 0 || viewsDiff.dropped.size() > 0;
+        boolean hasCreatedOrDroppedIndexes = !indexesDiff.entriesOnlyOnRight().isEmpty() || !indexesDiff.entriesOnlyOnLeft().isEmpty();
+
+        Map<TableId, TableMetadataRef> metadataRefs = hasCreatedOrDroppedTablesOrViews ? Maps.newHashMap(this.metadataRefs) : this.metadataRefs;
+        Map<Pair<String, String>, TableMetadataRef> metadataRefsByName = hasCreatedOrDroppedTablesOrViews ? Maps.newHashMap(this.metadataRefsByName) : this.metadataRefsByName;
+        Map<Pair<String, String>, TableMetadataRef> indexMetadataRefs = hasCreatedOrDroppedIndexes ? Maps.newHashMap(this.indexMetadataRefs) : this.indexMetadataRefs;
+
+        // clean up after removed entries
+        tablesDiff.dropped.forEach(ref -> removeRef(metadataRefs, metadataRefsByName, ref));
+        viewsDiff.dropped.forEach(view -> removeRef(metadataRefs, metadataRefsByName, view.metadata));
+        indexesDiff.entriesOnlyOnLeft()
+                   .values()
+                   .forEach(indexTable -> indexMetadataRefs.remove(Pair.create(indexTable.keyspace, indexTable.indexName().get())));
+
+        // load up new entries
+        tablesDiff.created.forEach(table -> putRef(metadataRefs, metadataRefsByName, new TableMetadataRef(table)));
+        viewsDiff.created.forEach(view -> putRef(metadataRefs, metadataRefsByName, new TableMetadataRef(view.metadata)));
+        indexesDiff.entriesOnlyOnRight()
+                   .values()
+                   .forEach(indexTable -> indexMetadataRefs.put(Pair.create(indexTable.keyspace, indexTable.indexName().get()), new TableMetadataRef(indexTable)));
+
+        // refresh refs to updated ones
+        tablesDiff.altered.forEach(diff -> metadataRefs.get(diff.after.id).set(diff.after));
+        viewsDiff.altered.forEach(diff -> metadataRefs.get(diff.after.metadata.id).set(diff.after.metadata));
+        indexesDiff.entriesDiffering()
+                   .values()
+                   .stream()
+                   .map(MapDifference.ValueDifference::rightValue)
+                   .forEach(indexTable -> indexMetadataRefs.get(Pair.create(indexTable.keyspace, indexTable.indexName().get())).set(indexTable));
+
+        return new TableMetadataRefCache(metadataRefs, metadataRefsByName, indexMetadataRefs);
+    }
+
+    private void putRef(Map<TableId, TableMetadataRef> metadataRefs,
+                        Map<Pair<String, String>, TableMetadataRef> metadataRefsByName,
+                        TableMetadataRef ref)
+    {
+        metadataRefs.put(ref.id, ref);
+        metadataRefsByName.put(Pair.create(ref.keyspace, ref.name), ref);
+    }
+
+    private void removeRef(Map<TableId, TableMetadataRef> metadataRefs,
+                           Map<Pair<String, String>, TableMetadataRef> metadataRefsByName,
+                           TableMetadata tm)
+    {
+        metadataRefs.remove(tm.id);
+        metadataRefsByName.remove(Pair.create(tm.keyspace, tm.name));
+    }
+
+    /**
+     * Returns cache copy with removed the {@link TableMetadataRef} from {@link #metadataRefs} and {@link #indexMetadataRefs}
+     * for the provided (dropped) keyspace.
+     */
+    TableMetadataRefCache withRemovedRefs(KeyspaceMetadata ksm)
+    {
+        return withUpdatedRefs(ksm, ksm.empty());
+    }
+
+    public TableMetadataRef getTableMetadataRef(TableId id)
+    {
+        return metadataRefs.get(id);
+    }
+
+    public TableMetadataRef getTableMetadataRef(String keyspace, String table)
+    {
+        return metadataRefsByName.get(Pair.create(keyspace, table));
+    }
+
+    public TableMetadataRef getIndexTableMetadataRef(String keyspace, String index)
+    {
+        return indexMetadataRefs.get(Pair.create(keyspace, index));
+    }
+}
diff --git a/src/java/org/apache/cassandra/schema/TableParams.java b/src/java/org/apache/cassandra/schema/TableParams.java
index 62db8f7..440729c 100644
--- a/src/java/org/apache/cassandra/schema/TableParams.java
+++ b/src/java/org/apache/cassandra/schema/TableParams.java
@@ -46,6 +46,7 @@
         COMMENT,
         COMPACTION,
         COMPRESSION,
+        MEMTABLE,
         DEFAULT_TIME_TO_LIVE,
         EXTENSIONS,
         GC_GRACE_SECONDS,
@@ -78,6 +79,7 @@
     public final CachingParams caching;
     public final CompactionParams compaction;
     public final CompressionParams compression;
+    public final MemtableParams memtable;
     public final ImmutableMap<String, ByteBuffer> extensions;
     public final boolean cdc;
     public final ReadRepairStrategy readRepair;
@@ -99,6 +101,7 @@
         caching = builder.caching;
         compaction = builder.compaction;
         compression = builder.compression;
+        memtable = builder.memtable;
         extensions = builder.extensions;
         cdc = builder.cdc;
         readRepair = builder.readRepair;
@@ -116,6 +119,7 @@
                             .comment(params.comment)
                             .compaction(params.compaction)
                             .compression(params.compression)
+                            .memtable(params.memtable)
                             .crcCheckChance(params.crcCheckChance)
                             .defaultTimeToLive(params.defaultTimeToLive)
                             .gcGraceSeconds(params.gcGraceSeconds)
@@ -178,6 +182,9 @@
 
         if (memtableFlushPeriodInMs < 0)
             fail("%s must be greater than or equal to 0 (got %s)", Option.MEMTABLE_FLUSH_PERIOD_IN_MS, memtableFlushPeriodInMs);
+
+        if (cdc && memtable.factory().writesShouldSkipCommitLog())
+            fail("CDC cannot work if writes skip the commit log. Check your memtable configuration.");
     }
 
     private static void fail(String format, Object... args)
@@ -208,6 +215,7 @@
             && caching.equals(p.caching)
             && compaction.equals(p.compaction)
             && compression.equals(p.compression)
+            && memtable.equals(p.memtable)
             && extensions.equals(p.extensions)
             && cdc == p.cdc
             && readRepair == p.readRepair;
@@ -228,6 +236,7 @@
                                 caching,
                                 compaction,
                                 compression,
+                                memtable,
                                 extensions,
                                 cdc,
                                 readRepair);
@@ -249,6 +258,7 @@
                           .add(Option.CACHING.toString(), caching)
                           .add(Option.COMPACTION.toString(), compaction)
                           .add(Option.COMPRESSION.toString(), compression)
+                          .add(Option.MEMTABLE.toString(), memtable)
                           .add(Option.EXTENSIONS.toString(), extensions)
                           .add(Option.CDC.toString(), cdc)
                           .add(Option.READ_REPAIR.toString(), readRepair)
@@ -272,6 +282,8 @@
                .newLine()
                .append("AND compression = ").append(compression.asMap())
                .newLine()
+               .append("AND memtable = ").appendWithSingleQuotes(memtable.configurationKey())
+               .newLine()
                .append("AND crc_check_chance = ").append(crcCheckChance)
                .newLine();
 
@@ -315,6 +327,7 @@
         private CachingParams caching = CachingParams.DEFAULT;
         private CompactionParams compaction = CompactionParams.DEFAULT;
         private CompressionParams compression = CompressionParams.DEFAULT;
+        private MemtableParams memtable = MemtableParams.DEFAULT;
         private ImmutableMap<String, ByteBuffer> extensions = ImmutableMap.of();
         private boolean cdc;
         private ReadRepairStrategy readRepair = ReadRepairStrategy.BLOCKING;
@@ -400,6 +413,12 @@
             return this;
         }
 
+        public Builder memtable(MemtableParams val)
+        {
+            memtable = val;
+            return this;
+        }
+
         public Builder compression(CompressionParams val)
         {
             compression = val;
diff --git a/src/java/org/apache/cassandra/schema/Types.java b/src/java/org/apache/cassandra/schema/Types.java
index 76694cc..0d264c4 100644
--- a/src/java/org/apache/cassandra/schema/Types.java
+++ b/src/java/org/apache/cassandra/schema/Types.java
@@ -109,6 +109,11 @@
         return Iterables.filter(types.values(), t -> t.referencesUserType(name) && !t.name.equals(name));
     }
 
+    public boolean isEmpty()
+    {
+        return types.isEmpty();
+    }
+
     /**
      * Get the type with the specified name
      *
diff --git a/src/java/org/apache/cassandra/schema/Views.java b/src/java/org/apache/cassandra/schema/Views.java
index f926c07..15d13f3 100644
--- a/src/java/org/apache/cassandra/schema/Views.java
+++ b/src/java/org/apache/cassandra/schema/Views.java
@@ -252,4 +252,4 @@
             return new ViewsDiff(created, dropped, altered.build());
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java b/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java
new file mode 100644
index 0000000..c2ef851
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.TrustManagerFactory;
+
+import com.google.common.collect.ImmutableList;
+
+import io.netty.handler.ssl.CipherSuiteFilter;
+import io.netty.handler.ssl.ClientAuth;
+import io.netty.handler.ssl.OpenSsl;
+import io.netty.handler.ssl.SslContext;
+import io.netty.handler.ssl.SslContextBuilder;
+import io.netty.handler.ssl.SslProvider;
+import org.apache.cassandra.config.Config;
+
+/**
+ * Abstract class implementing {@code ISslContextFacotry} to provide most of the functionality that any
+ * implementation might need. This does not assume any file-based credentials for keys/certs hence provide a good base
+ * for any implementation that only need to customize the loading of keys/certs in a custom way.
+ * <p>
+ * {@code CAUTION:} While this is extremely useful abstraction, please be careful if you need to modify this class
+ * given possible custom implementations out there!
+ *
+ * @see DefaultSslContextFactory
+ */
+abstract public class AbstractSslContextFactory implements ISslContextFactory
+{
+    /*
+    This list is substituted in configurations that have explicitly specified the original "TLS" default,
+    by extracting it from the default "TLS" SSL Context instance
+     */
+    static protected final List<String> TLS_PROTOCOL_SUBSTITUTION = SSLFactory.tlsInstanceProtocolSubstitution();
+
+    protected boolean openSslIsAvailable;
+
+    protected final Map<String, Object> parameters;
+    protected final List<String> cipher_suites;
+    protected final String protocol;
+    protected final List<String> accepted_protocols;
+    protected final String algorithm;
+    protected final String store_type;
+    protected final boolean require_client_auth;
+    protected final boolean require_endpoint_verification;
+    /*
+    ServerEncryptionOptions does not use the enabled flag at all instead using the existing
+    internode_encryption option. So we force this protected and expose through isEnabled
+    so users of ServerEncryptionOptions can't accidentally use this when they should use isEnabled
+    Long term we need to refactor ClientEncryptionOptions and ServerEncryptionOptions to be separate
+    classes so we can choose appropriate configuration for each.
+    See CASSANDRA-15262 and CASSANDRA-15146
+     */
+    protected Boolean enabled;
+    protected Boolean optional;
+
+    /* For test only */
+    protected AbstractSslContextFactory()
+    {
+        parameters = new HashMap<>();
+        cipher_suites = null;
+        protocol = null;
+        accepted_protocols = null;
+        algorithm = null;
+        store_type = "JKS";
+        require_client_auth = false;
+        require_endpoint_verification = false;
+        enabled = null;
+        optional = null;
+        deriveIfOpenSslAvailable();
+    }
+
+    protected AbstractSslContextFactory(Map<String, Object> parameters)
+    {
+        this.parameters = parameters;
+        cipher_suites = getStringList("cipher_suites");
+        protocol = getString("protocol");
+        accepted_protocols = getStringList("accepted_protocols");
+        algorithm = getString("algorithm");
+        store_type = getString("store_type", "JKS");
+        require_client_auth = getBoolean("require_client_auth", false);
+        require_endpoint_verification = getBoolean("require_endpoint_verification", false);
+        enabled = getBoolean("enabled");
+        optional = getBoolean("optional");
+        deriveIfOpenSslAvailable();
+    }
+
+    /**
+     * Dervies if {@code OpenSSL} is available. It allows in-jvm dtests to disable tcnative openssl support by
+     * setting {@code cassandra.disable_tcactive_openssl} system property as {@code true}. Otherwise, it creates a
+     * circular reference that prevents the instance class loader from being garbage collected.
+     */
+    protected void deriveIfOpenSslAvailable()
+    {
+        if (Boolean.getBoolean(Config.PROPERTY_PREFIX + "disable_tcactive_openssl"))
+            openSslIsAvailable = false;
+        else
+            openSslIsAvailable = OpenSsl.isAvailable();
+    }
+
+    protected String getString(String key, String defaultValue)
+    {
+        return parameters.get(key) == null ? defaultValue : (String) parameters.get(key);
+    }
+
+    protected String getString(String key)
+    {
+        return (String) parameters.get(key);
+    }
+
+    protected List<String> getStringList(String key)
+    {
+        return (List<String>) parameters.get(key);
+    }
+
+    protected Boolean getBoolean(String key, boolean defaultValue)
+    {
+        return parameters.get(key) == null ? defaultValue : (Boolean) parameters.get(key);
+    }
+
+    protected Boolean getBoolean(String key)
+    {
+        return (Boolean) this.parameters.get(key);
+    }
+
+    @Override
+    public SSLContext createJSSESslContext(boolean verifyPeerCertificate) throws SSLException
+    {
+        TrustManager[] trustManagers = null;
+        if (verifyPeerCertificate)
+            trustManagers = buildTrustManagerFactory().getTrustManagers();
+
+        KeyManagerFactory kmf = buildKeyManagerFactory();
+
+        try
+        {
+            SSLContext ctx = SSLContext.getInstance("TLS");
+            ctx.init(kmf.getKeyManagers(), trustManagers, null);
+            return ctx;
+        }
+        catch (Exception e)
+        {
+            throw new SSLException("Error creating/initializing the SSL Context", e);
+        }
+    }
+
+    @Override
+    public SslContext createNettySslContext(boolean verifyPeerCertificate, SocketType socketType,
+                                            CipherSuiteFilter cipherFilter) throws SSLException
+    {
+        /*
+            There is a case where the netty/openssl combo might not support using KeyManagerFactory. Specifically,
+            I've seen this with the netty-tcnative dynamic openssl implementation. Using the netty-tcnative
+            static-boringssl works fine with KeyManagerFactory. If we want to support all of the netty-tcnative
+            options, we would need to fall back to passing in a file reference for both a x509 and PKCS#8 private
+            key file in PEM format (see {@link SslContextBuilder#forServer(File, File, String)}). However, we are
+            not supporting that now to keep the config/yaml API simple.
+         */
+        KeyManagerFactory kmf = buildKeyManagerFactory();
+        SslContextBuilder builder;
+        if (socketType == SocketType.SERVER)
+        {
+            builder = SslContextBuilder.forServer(kmf).clientAuth(this.require_client_auth ? ClientAuth.REQUIRE :
+                                                                  ClientAuth.NONE);
+        }
+        else
+        {
+            builder = SslContextBuilder.forClient().keyManager(kmf);
+        }
+
+        builder.sslProvider(getSslProvider()).protocols(getAcceptedProtocols());
+
+        // only set the cipher suites if the operator has explicity configured values for it; else, use the default
+        // for each ssl implemention (jdk or openssl)
+        if (cipher_suites != null && !cipher_suites.isEmpty())
+            builder.ciphers(cipher_suites, cipherFilter);
+
+        if (verifyPeerCertificate)
+            builder.trustManager(buildTrustManagerFactory());
+
+        return builder.build();
+    }
+
+    /**
+     * Combine the pre-4.0 protocol field with the accepted_protocols list, substituting a list of
+     * explicit protocols for the previous catchall default of "TLS"
+     *
+     * @return array of protocol names suitable for passing to SslContextBuilder.protocols, or null if the default
+     */
+    @Override
+    public List<String> getAcceptedProtocols()
+    {
+        if (accepted_protocols == null)
+        {
+            if (protocol == null)
+            {
+                return null;
+            }
+            // TLS is accepted by SSLContext.getInstance as a shorthand for give me an engine that
+            // can speak some TLS protocols.  It is not supported by SSLEngine.setAcceptedProtocols
+            // so substitute if the user hasn't provided an accepted protocol configuration
+            else if (protocol.equalsIgnoreCase("TLS"))
+            {
+                return TLS_PROTOCOL_SUBSTITUTION;
+            }
+            else // the user was trying to limit to a single specific protocol, so try that
+            {
+                return ImmutableList.of(protocol);
+            }
+        }
+
+        if (protocol != null && !protocol.equalsIgnoreCase("TLS") &&
+            accepted_protocols.stream().noneMatch(ap -> ap.equalsIgnoreCase(protocol)))
+        {
+            // If the user provided a non-generic default protocol, append it to accepted_protocols - they wanted
+            // it after all.
+            return ImmutableList.<String>builder().addAll(accepted_protocols).add(protocol).build();
+        }
+        else
+        {
+            return accepted_protocols;
+        }
+    }
+
+    @Override
+    public List<String> getCipherSuites()
+    {
+        return cipher_suites;
+    }
+
+    /**
+     * Returns {@link SslProvider} to be used to build Netty's SslContext.
+     *
+     * @return appropriate SslProvider
+     */
+    protected SslProvider getSslProvider()
+    {
+        return openSslIsAvailable ? SslProvider.OPENSSL : SslProvider.JDK;
+    }
+
+    abstract protected KeyManagerFactory buildKeyManagerFactory() throws SSLException;
+
+    abstract protected TrustManagerFactory buildTrustManagerFactory() throws SSLException;
+}
diff --git a/src/java/org/apache/cassandra/security/CipherFactory.java b/src/java/org/apache/cassandra/security/CipherFactory.java
index 3c13629..4674fd1 100644
--- a/src/java/org/apache/cassandra/security/CipherFactory.java
+++ b/src/java/org/apache/cassandra/security/CipherFactory.java
@@ -31,7 +31,6 @@
 import javax.crypto.spec.IvParameterSpec;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.MoreExecutors;
 
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
@@ -40,6 +39,7 @@
 import org.slf4j.LoggerFactory;
 
 import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.TransparentDataEncryptionOptions;
 
 /**
@@ -81,7 +81,7 @@
 
         cache = Caffeine.newBuilder() // by default cache is unbounded
                 .maximumSize(64) // a value large enough that we should never even get close (so nothing gets evicted)
-                .executor(MoreExecutors.directExecutor())
+                .executor(ImmediateExecutor.INSTANCE)
                 .removalListener((key, value, cause) ->
                 {
                     // maybe reload the key? (to avoid the reload being on the user's dime)
diff --git a/src/java/org/apache/cassandra/security/DefaultSslContextFactory.java b/src/java/org/apache/cassandra/security/DefaultSslContextFactory.java
new file mode 100644
index 0000000..92c88c5
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/DefaultSslContextFactory.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.util.Map;
+
+/**
+ * Cassandra's default implementation class for the configuration key {@code ssl_context_factory}. It uses
+ * file based keystores.
+ */
+public final class DefaultSslContextFactory extends FileBasedSslContextFactory
+{
+    public DefaultSslContextFactory(Map<String, Object> parameters)
+    {
+        super(parameters);
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/security/DisableSslContextFactory.java b/src/java/org/apache/cassandra/security/DisableSslContextFactory.java
new file mode 100644
index 0000000..9dab062
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/DisableSslContextFactory.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManagerFactory;
+
+public class DisableSslContextFactory extends AbstractSslContextFactory
+{
+    @Override
+    protected KeyManagerFactory buildKeyManagerFactory() throws SSLException
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    protected TrustManagerFactory buildTrustManagerFactory() throws SSLException
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public boolean hasKeystore()
+    {
+        return false;
+    }
+
+    @Override
+    public void initHotReloading() throws SSLException
+    {
+    }
+
+    @Override
+    public boolean shouldReload()
+    {
+        return false;
+    }
+}
diff --git a/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java b/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java
new file mode 100644
index 0000000..9876eb4
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.cert.X509Certificate;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManagerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.utils.Clock;
+
+/**
+ * Abstract implementation for {@link ISslContextFactory} using file based, standard keystore format with the ability
+ * to hot-reload the files upon file changes (detected by the {@code last modified timestamp}).
+ * <p>
+ * {@code CAUTION:} While this is a useful abstraction, please be careful if you need to modify this class
+ * given possible custom implementations out there!
+ */
+abstract public class FileBasedSslContextFactory extends AbstractSslContextFactory
+{
+    private static final Logger logger = LoggerFactory.getLogger(FileBasedSslContextFactory.class);
+
+    @VisibleForTesting
+    protected volatile boolean checkedExpiry = false;
+
+    /**
+     * List of files that trigger hot reloading of SSL certificates
+     */
+    protected volatile List<HotReloadableFile> hotReloadableFiles = new ArrayList<>();
+
+    protected String keystore;
+    protected String keystore_password;
+    protected String truststore;
+    protected String truststore_password;
+
+    public FileBasedSslContextFactory()
+    {
+        keystore = "conf/.keystore";
+        keystore_password = "cassandra";
+        truststore = "conf/.truststore";
+        truststore_password = "cassandra";
+    }
+
+    public FileBasedSslContextFactory(Map<String, Object> parameters)
+    {
+        super(parameters);
+        keystore = getString("keystore");
+        keystore_password = getString("keystore_password");
+        truststore = getString("truststore");
+        truststore_password = getString("truststore_password");
+    }
+
+    @Override
+    public boolean shouldReload()
+    {
+        return hotReloadableFiles.stream().anyMatch(HotReloadableFile::shouldReload);
+    }
+
+    @Override
+    public boolean hasKeystore()
+    {
+        return keystore != null && new File(keystore).exists();
+    }
+
+    private boolean hasTruststore()
+    {
+        return truststore != null && new File(truststore).exists();
+    }
+
+    @Override
+    public synchronized void initHotReloading()
+    {
+        boolean hasKeystore = hasKeystore();
+        boolean hasTruststore = hasTruststore();
+
+        if (hasKeystore || hasTruststore)
+        {
+            List<HotReloadableFile> fileList = new ArrayList<>();
+            if (hasKeystore)
+            {
+                fileList.add(new HotReloadableFile(keystore));
+            }
+            if (hasTruststore)
+            {
+                fileList.add(new HotReloadableFile(truststore));
+            }
+            hotReloadableFiles = fileList;
+        }
+    }
+
+    /**
+     * Validates the given keystore password.
+     *
+     * @param password           value
+     * @throws IllegalArgumentException if the {@code password} is empty as per the definition of {@link StringUtils#isEmpty(CharSequence)}
+     */
+    protected void validatePassword(String password)
+    {
+        boolean keystorePasswordEmpty = StringUtils.isEmpty(password);
+        if (keystorePasswordEmpty)
+        {
+            throw new IllegalArgumentException("'keystore_password' must be specified");
+        }
+    }
+
+    /**
+     * Builds required KeyManagerFactory from the file based keystore. It also checks for the PrivateKey's certificate's
+     * expiry and logs {@code warning} for each expired PrivateKey's certitificate.
+     *
+     * @return KeyManagerFactory built from the file based keystore.
+     * @throws SSLException if any issues encountered during the build process
+     */
+    @Override
+    protected KeyManagerFactory buildKeyManagerFactory() throws SSLException
+    {
+        /*
+         * Validation of the password is delayed until this point to allow nullable keystore passwords
+         * for other use-cases (CASSANDRA-18124).
+         */
+        validatePassword(keystore_password);
+
+        try (InputStream ksf = Files.newInputStream(File.getPath(keystore)))
+        {
+            final String algorithm = this.algorithm == null ? KeyManagerFactory.getDefaultAlgorithm() : this.algorithm;
+            KeyManagerFactory kmf = KeyManagerFactory.getInstance(algorithm);
+            KeyStore ks = KeyStore.getInstance(store_type);
+            ks.load(ksf, keystore_password.toCharArray());
+            if (!checkedExpiry)
+            {
+                checkExpiredCerts(ks);
+                checkedExpiry = true;
+            }
+            kmf.init(ks, keystore_password.toCharArray());
+            return kmf;
+        }
+        catch (Exception e)
+        {
+            throw new SSLException("failed to build key manager store for secure connections", e);
+        }
+    }
+
+    /**
+     * Builds TrustManagerFactory from the file based truststore.
+     *
+     * @return TrustManagerFactory from the file based truststore
+     * @throws SSLException if any issues encountered during the build process
+     */
+    @Override
+    protected TrustManagerFactory buildTrustManagerFactory() throws SSLException
+    {
+        try (InputStream tsf = Files.newInputStream(File.getPath(truststore)))
+        {
+            final String algorithm = this.algorithm == null ? TrustManagerFactory.getDefaultAlgorithm() : this.algorithm;
+            TrustManagerFactory tmf = TrustManagerFactory.getInstance(algorithm);
+            KeyStore ts = KeyStore.getInstance(store_type);
+
+            final char[] truststorePassword = StringUtils.isEmpty(truststore_password) ? null : truststore_password.toCharArray();
+            ts.load(tsf, truststorePassword);
+            tmf.init(ts);
+            return tmf;
+        }
+        catch (Exception e)
+        {
+            throw new SSLException("failed to build trust manager store for secure connections", e);
+        }
+    }
+
+    protected boolean checkExpiredCerts(KeyStore ks) throws KeyStoreException
+    {
+        boolean hasExpiredCerts = false;
+        final Date now = new Date(Clock.Global.currentTimeMillis());
+        for (Enumeration<String> aliases = ks.aliases(); aliases.hasMoreElements(); )
+        {
+            String alias = aliases.nextElement();
+            if (ks.getCertificate(alias).getType().equals("X.509"))
+            {
+                Date expires = ((X509Certificate) ks.getCertificate(alias)).getNotAfter();
+                if (expires.before(now))
+                {
+                    hasExpiredCerts = true;
+                    logger.warn("Certificate for {} expired on {}", alias, expires);
+                }
+            }
+        }
+        return hasExpiredCerts;
+    }
+
+    /**
+     * Helper class for hot reloading SSL Contexts
+     */
+    protected static class HotReloadableFile
+    {
+        private final File file;
+        private volatile long lastModTime;
+
+        HotReloadableFile(String path)
+        {
+            file = new File(path);
+            lastModTime = file.lastModified();
+        }
+
+        boolean shouldReload()
+        {
+            long curModTime = file.lastModified();
+            boolean result = curModTime != lastModTime;
+            lastModTime = curModTime;
+            return result;
+        }
+
+        @Override
+        public String toString()
+        {
+            return "HotReloadableFile{" +
+                   "file=" + file +
+                   ", lastModTime=" + lastModTime +
+                   '}';
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/security/ISslContextFactory.java b/src/java/org/apache/cassandra/security/ISslContextFactory.java
new file mode 100644
index 0000000..579c95e
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/ISslContextFactory.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.util.List;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLException;
+
+import io.netty.handler.ssl.CipherSuiteFilter;
+import io.netty.handler.ssl.SslContext;
+
+/**
+ * The purpose of this interface is to provide pluggable mechanism for creating custom JSSE and Netty SSLContext
+ * objects. Please use the Cassandra configuration key {@code ssl_context_factory} as part of {@code
+ * client_encryption_options}/{@code server_encryption_options} and provide a custom class-name implementing this
+ * interface with parameters to be used to plugin a your own way to load the SSLContext.
+ * <p>
+ * Implementation of this interface must have a constructor with argument of type {@code Map<String,Object>} to allow
+ * custom parameters, needed by the implementation, to be passed from the yaml configuration. Common SSL
+ * configurations like {@code protocol, algorithm, cipher_suites, accepted_protocols, require_client_auth,
+ * require_endpoint_verification, enabled, optional} will also be passed to that map by Cassanddra.
+ * <p>
+ * Since on top of Netty, Cassandra is internally using JSSE SSLContext also for certain use-cases- this interface
+ * has methods for both.
+ * <p>
+ * Below is an example of how to configure a custom implementation with parameters
+ * <pre>
+ * ssl_context_factory:
+ *       class_name: org.apache.cassandra.security.YourSslContextFactoryImpl
+ *       parameters:
+ *         key1: "value1"
+ *         key2: "value2"
+ *         key3: "value3"
+ * </pre>
+ */
+public interface ISslContextFactory
+{
+    /**
+     * Creates JSSE SSLContext.
+     *
+     * @param verifyPeerCertificate {@code true} if SSL peer's certificate needs to be verified; {@code false} otherwise
+     * @return JSSE's {@link SSLContext}
+     * @throws SSLException in case the Ssl Context creation fails for some reason
+     */
+    SSLContext createJSSESslContext(boolean verifyPeerCertificate) throws SSLException;
+
+    /**
+     * Creates Netty's SslContext object.
+     *
+     * @param verifyPeerCertificate {@code true} if SSL peer's certificate needs to be verified; {@code false} otherwise
+     * @param socketType            {@link SocketType} for Netty's Inbound or Outbound channels
+     * @param cipherFilter          to allow Netty's cipher suite filtering, e.g.
+     *                              {@link io.netty.handler.ssl.SslContextBuilder#ciphers(Iterable, CipherSuiteFilter)}
+     * @return Netty's {@link SslContext}
+     * @throws SSLException in case the Ssl Context creation fails for some reason
+     */
+    SslContext createNettySslContext(boolean verifyPeerCertificate, SocketType socketType,
+                                     CipherSuiteFilter cipherFilter) throws SSLException;
+
+    /**
+     * Initializes hot reloading of the security keys/certs. The implementation must guarantee this to be thread safe.
+     *
+     * @throws SSLException
+     */
+    void initHotReloading() throws SSLException;
+
+    /**
+     * Returns if any changes require the reloading of the SSL context returned by this factory.
+     * This will be called by Cassandra's periodic polling for any potential changes that will reload the SSL context.
+     * However only newer connections established after the reload will use the reloaded SSL context.
+     *
+     * @return {@code true} if SSL Context needs to be reload; {@code false} otherwise
+     */
+    boolean shouldReload();
+
+    /**
+     * Returns if this factory uses private keystore.
+     *
+     * @return {@code true} by default unless the implementation overrides this
+     */
+    default boolean hasKeystore()
+    {
+        return true;
+    }
+
+    /**
+     * Returns the prepared list of accepted protocols.
+     *
+     * @return array of protocol names suitable for passing to Netty's SslContextBuilder.protocols, or null if the
+     * default
+     */
+    List<String> getAcceptedProtocols();
+
+    /**
+     * Returns the list of cipher suites supported by the implementation.
+     *
+     * @return List of supported cipher suites
+     */
+    List<String> getCipherSuites();
+
+    /**
+     * Indicates if the process holds the inbound/listening (Server) end of the socket or the outbound side (Client).
+     */
+    enum SocketType
+    {
+        SERVER, CLIENT;
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/security/JKSKeyProvider.java b/src/java/org/apache/cassandra/security/JKSKeyProvider.java
index cea7b23..2fddf5e 100644
--- a/src/java/org/apache/cassandra/security/JKSKeyProvider.java
+++ b/src/java/org/apache/cassandra/security/JKSKeyProvider.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.security;
 
 import java.nio.file.Files;
-import java.nio.file.Paths;
 import java.io.InputStream;
 import java.io.IOException;
 import java.security.Key;
@@ -28,6 +27,7 @@
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.TransparentDataEncryptionOptions;
+import org.apache.cassandra.io.util.File;
 
 /**
  * A {@code KeyProvider} that retrieves keys from a java keystore.
@@ -48,7 +48,7 @@
     {
         this.options = options;
         logger.info("initializing keystore from file {}", options.get(PROP_KEYSTORE));
-        try (InputStream inputStream = Files.newInputStream(Paths.get(options.get(PROP_KEYSTORE))))
+        try (InputStream inputStream = Files.newInputStream(File.getPath(options.get(PROP_KEYSTORE))))
         {
             store = KeyStore.getInstance(options.get(PROP_KEYSTORE_TYPE));
             store.load(inputStream, options.get(PROP_KEYSTORE_PW).toCharArray());
diff --git a/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java b/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java
new file mode 100644
index 0000000..d62aef5
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java
@@ -0,0 +1,366 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.security.GeneralSecurityException;
+import java.security.KeyStore;
+import java.security.PrivateKey;
+import java.security.cert.Certificate;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManagerFactory;
+
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.io.util.File;
+
+
+/**
+ * SslContextFactory for the <a href="">PEM standard</a> encoded PKCS#8 private keys and X509 certificates/public-keys.
+ * It parses the key material based on the standard defined in the <a href="https://datatracker.ietf.org/doc/html/rfc7468">RFC 7468</a>.
+ * It creates <a href="https://datatracker.ietf.org/doc/html/rfc5208">PKCS# 8</a> based private key and X509 certificate(s)
+ * for the public key to build the required keystore and the truststore managers that are used for the SSL context creation.
+ * Internally it builds Java {@link KeyStore} with <a href="https://datatracker.ietf.org/doc/html/rfc7292">PKCS# 12</a> <a href="https://docs.oracle.com/en/java/javase/11/docs/specs/security/standard-names.html#keystore-types">store type</a>
+ * to be used for keystore and the truststore managers.
+ * <p>
+ * This factory also supports 'hot reloading' of the key material, the same way as defined by {@link FileBasedSslContextFactory},
+ * <b>if it is file based</b>. This factory ignores the existing 'store_type' configuration used for other file based store
+ * types like JKS.
+ * <p>
+ * You can configure this factory with either inline PEM data or with the files having the required PEM data as shown
+ * below,
+ *
+ * <b>Configuration: PEM keys/certs defined inline (mind the spaces in the YAML!)</b>
+ * <pre>
+ *     client/server_encryption_options:
+ *      ssl_context_factory:
+ *         class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+ *         parameters:
+ *             private_key: |
+ *              -----BEGIN ENCRYPTED PRIVATE KEY----- OR -----BEGIN PRIVATE KEY-----
+ *              <your base64 encoded private key>
+ *              -----END ENCRYPTED PRIVATE KEY----- OR -----END PRIVATE KEY-----
+ *              -----BEGIN CERTIFICATE-----
+ *              <your base64 encoded certificate chain>
+ *              -----END CERTIFICATE-----
+ *
+ *             private_key_password: "<your password if the private key is encrypted with a password>"
+ *
+ *             trusted_certificates: |
+ *               -----BEGIN CERTIFICATE-----
+ *               <your base64 encoded certificate>
+ *               -----END CERTIFICATE-----
+ * </pre>
+ *
+ * <b>Configuration: PEM keys/certs defined in files</b>
+ * <pre>
+ *     client/server_encryption_options:
+ *      ssl_context_factory:
+ *         class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+ *      keystore: <file path to the keystore file in the PEM format with the private key and the certificate chain>
+ *      keystore_password: "<your password if the private key is encrypted with a password>"
+ *      truststore: <file path to the truststore file in the PEM format>
+ * </pre>
+ */
+public final class PEMBasedSslContextFactory extends FileBasedSslContextFactory
+{
+    public static final String DEFAULT_TARGET_STORETYPE = "PKCS12";
+    private static final Logger logger = LoggerFactory.getLogger(PEMBasedSslContextFactory.class);
+    private String pemEncodedKey;
+    private String keyPassword;
+    private String pemEncodedCertificates;
+    private boolean maybeFileBasedPrivateKey;
+    private boolean maybeFileBasedTrustedCertificates;
+
+    public PEMBasedSslContextFactory()
+    {
+    }
+
+    public PEMBasedSslContextFactory(Map<String, Object> parameters)
+    {
+        super(parameters);
+        pemEncodedKey = getString(ConfigKey.ENCODED_KEY.getKeyName());
+        keyPassword = getString(ConfigKey.KEY_PASSWORD.getKeyName());
+        if (StringUtils.isEmpty(keyPassword))
+        {
+            keyPassword = keystore_password;
+        }
+        else if (!StringUtils.isEmpty(keystore_password) && !keyPassword.equals(keystore_password))
+        {
+            throw new IllegalArgumentException("'keystore_password' and 'key_password' both configurations are given and the " +
+                                               "values do not match");
+        }
+
+        if (!StringUtils.isEmpty(truststore_password))
+        {
+            logger.warn("PEM based truststore should not be using password. Ignoring the given value in " +
+                        "'truststore_password' configuration.");
+        }
+
+        pemEncodedCertificates = getString(ConfigKey.ENCODED_CERTIFICATES.getKeyName());
+
+        maybeFileBasedPrivateKey = StringUtils.isEmpty(pemEncodedKey);
+        maybeFileBasedTrustedCertificates = StringUtils.isEmpty(pemEncodedCertificates);
+
+        enforceSinglePrivateKeySource();
+        enforceSingleTurstedCertificatesSource();
+    }
+
+    /**
+     * Decides if this factory has a keystore defined - key material specified in files or inline to the configuration.
+     *
+     * @return {@code true} if there is a keystore defined; {@code false} otherwise
+     */
+    @Override
+    public boolean hasKeystore()
+    {
+        return maybeFileBasedPrivateKey ? keystoreFileExists() :
+               !StringUtils.isEmpty(pemEncodedKey);
+    }
+
+    /**
+     * Checks if the keystore file exists.
+     *
+     * @return {@code true} if keystore file exists; {@code false} otherwise
+     */
+    private boolean keystoreFileExists()
+    {
+        return keystore != null && new File(keystore).exists();
+    }
+
+    /**
+     * Decides if this factory has a truststore defined - key material specified in files or inline to the
+     * configuration.
+     *
+     * @return {@code true} if there is a truststore defined; {@code false} otherwise
+     */
+    private boolean hasTruststore()
+    {
+        return maybeFileBasedTrustedCertificates ? truststoreFileExists() :
+               !StringUtils.isEmpty(pemEncodedCertificates);
+    }
+
+    /**
+     * Checks if the truststore file exists.
+     *
+     * @return {@code true} if truststore file exists; {@code false} otherwise
+     */
+    private boolean truststoreFileExists()
+    {
+        return truststore != null && new File(truststore).exists();
+    }
+
+    /**
+     * This enables 'hot' reloading of the key/trust stores based on the last updated timestamps if they are file based.
+     */
+    @Override
+    public synchronized void initHotReloading()
+    {
+        List<HotReloadableFile> fileList = new ArrayList<>();
+        if (maybeFileBasedPrivateKey && hasKeystore())
+        {
+            fileList.add(new HotReloadableFile(keystore));
+        }
+        if (maybeFileBasedTrustedCertificates && hasTruststore())
+        {
+            fileList.add(new HotReloadableFile(truststore));
+        }
+        if (!fileList.isEmpty())
+        {
+            hotReloadableFiles = fileList;
+        }
+    }
+
+    /**
+     * Builds required KeyManagerFactory from the PEM based keystore. It also checks for the PrivateKey's certificate's
+     * expiry and logs {@code warning} for each expired PrivateKey's certitificate.
+     *
+     * @return KeyManagerFactory built from the PEM based keystore.
+     * @throws SSLException if any issues encountered during the build process
+     */
+    @Override
+    protected KeyManagerFactory buildKeyManagerFactory() throws SSLException
+    {
+        try
+        {
+            if (hasKeystore())
+            {
+                if (maybeFileBasedPrivateKey)
+                {
+                    pemEncodedKey = readPEMFile(keystore); // read PEM from the file
+                }
+
+                KeyManagerFactory kmf = KeyManagerFactory.getInstance(
+                algorithm == null ? KeyManagerFactory.getDefaultAlgorithm() : algorithm);
+                KeyStore ks = buildKeyStore();
+                if (!checkedExpiry)
+                {
+                    checkExpiredCerts(ks);
+                    checkedExpiry = true;
+                }
+                kmf.init(ks, keyPassword != null ? keyPassword.toCharArray() : null);
+                return kmf;
+            }
+            else
+            {
+                throw new SSLException("Must provide keystore or private_key in configuration for PEMBasedSSlContextFactory");
+            }
+        }
+        catch (Exception e)
+        {
+            throw new SSLException("Failed to build key manager store for secure connections", e);
+        }
+    }
+
+    /**
+     * Builds TrustManagerFactory from the PEM based truststore.
+     *
+     * @return TrustManagerFactory from the PEM based truststore
+     * @throws SSLException if any issues encountered during the build process
+     */
+    @Override
+    protected TrustManagerFactory buildTrustManagerFactory() throws SSLException
+    {
+        try
+        {
+            if (hasTruststore())
+            {
+                if (maybeFileBasedTrustedCertificates)
+                {
+                    pemEncodedCertificates = readPEMFile(truststore); // read PEM from the file
+                }
+
+                TrustManagerFactory tmf = TrustManagerFactory.getInstance(
+                algorithm == null ? TrustManagerFactory.getDefaultAlgorithm() : algorithm);
+                KeyStore ts = buildTrustStore();
+                tmf.init(ts);
+                return tmf;
+            }
+            else
+            {
+                throw new SSLException("Must provide truststore or trusted_certificates in configuration for " +
+                                       "PEMBasedSSlContextFactory");
+            }
+        }
+        catch (Exception e)
+        {
+            throw new SSLException("Failed to build trust manager store for secure connections", e);
+        }
+    }
+
+    private String readPEMFile(String file) throws IOException
+    {
+        return new String(Files.readAllBytes(File.getPath(file)));
+    }
+
+    /**
+     * Builds KeyStore object given the {@link #DEFAULT_TARGET_STORETYPE} out of the PEM formatted private key material.
+     * It uses {@code cassandra-ssl-keystore} as the alias for the created key-entry.
+     */
+    private KeyStore buildKeyStore() throws GeneralSecurityException, IOException
+    {
+        char[] keyPasswordArray = keyPassword != null ? keyPassword.toCharArray() : null;
+        PrivateKey privateKey = PEMReader.extractPrivateKey(pemEncodedKey, keyPassword);
+        Certificate[] certChainArray = PEMReader.extractCertificates(pemEncodedKey);
+        if (certChainArray == null || certChainArray.length == 0)
+        {
+            throw new SSLException("Could not read any certificates for the certChain for the private key");
+        }
+
+        KeyStore keyStore = KeyStore.getInstance(DEFAULT_TARGET_STORETYPE);
+        keyStore.load(null, null);
+        keyStore.setKeyEntry("cassandra-ssl-keystore", privateKey, keyPasswordArray, certChainArray);
+        return keyStore;
+    }
+
+    /**
+     * Builds KeyStore object given the {@link #DEFAULT_TARGET_STORETYPE} out of the PEM formatted certificates/public-key
+     * material.
+     * <p>
+     * It uses {@code cassandra-ssl-trusted-cert-<numeric-id>} as the alias for the created certificate-entry.
+     */
+    private KeyStore buildTrustStore() throws GeneralSecurityException, IOException
+    {
+        Certificate[] certChainArray = PEMReader.extractCertificates(pemEncodedCertificates);
+        if (certChainArray == null || certChainArray.length == 0)
+        {
+            throw new SSLException("Could not read any certificates from the given PEM");
+        }
+
+        KeyStore keyStore = KeyStore.getInstance(DEFAULT_TARGET_STORETYPE);
+        keyStore.load(null, null);
+        for (int i = 0; i < certChainArray.length; i++)
+        {
+            keyStore.setCertificateEntry("cassandra-ssl-trusted-cert-" + (i + 1), certChainArray[i]);
+        }
+        return keyStore;
+    }
+
+    /**
+     * Enforces that the configuration specified a sole source of loading private keys - either {@code keystore} (the
+     * actual file must exist) or {@code private_key}, not both.
+     */
+    private void enforceSinglePrivateKeySource()
+    {
+        if (keystoreFileExists() && !StringUtils.isEmpty(pemEncodedKey))
+        {
+            throw new IllegalArgumentException("Configuration must specify value for either keystore or private_key, " +
+                                               "not both for PEMBasedSSlContextFactory");
+        }
+    }
+
+    /**
+     * Enforces that the configuration specified a sole source of loading trusted certificates - either {@code
+     * truststore} (actual file must exist) or {@code trusted_certificates}, not both.
+     */
+    private void enforceSingleTurstedCertificatesSource()
+    {
+        if (truststoreFileExists() && !StringUtils.isEmpty(pemEncodedCertificates))
+        {
+            throw new IllegalArgumentException("Configuration must specify value for either truststore or " +
+                                               "trusted_certificates, not both for PEMBasedSSlContextFactory");
+        }
+    }
+
+    public enum ConfigKey
+    {
+        ENCODED_KEY("private_key"),
+        KEY_PASSWORD("private_key_password"),
+        ENCODED_CERTIFICATES("trusted_certificates");
+
+        final String keyName;
+
+        ConfigKey(String keyName)
+        {
+            this.keyName = keyName;
+        }
+
+        String getKeyName()
+        {
+            return keyName;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/security/PEMReader.java b/src/java/org/apache/cassandra/security/PEMReader.java
new file mode 100644
index 0000000..aaeeab8
--- /dev/null
+++ b/src/java/org/apache/cassandra/security/PEMReader.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.security.AlgorithmParameters;
+import java.security.GeneralSecurityException;
+import java.security.Key;
+import java.security.KeyFactory;
+import java.security.PrivateKey;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import javax.crypto.BadPaddingException;
+import javax.crypto.Cipher;
+import javax.crypto.EncryptedPrivateKeyInfo;
+import javax.crypto.SecretKeyFactory;
+import javax.crypto.spec.PBEKeySpec;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static java.util.regex.Pattern.CASE_INSENSITIVE;
+
+/**
+ * This is a helper class to read private keys and X509 certifificates encoded based on <a href="https://datatracker.ietf.org/doc/html/rfc1421">PEM (RFC 1421)</a>
+ * format. It can read Password Based Encrypted (PBE henceforth) private keys as well as non-encrypted private keys
+ * along with the X509 certificates/cert-chain based on the textual encoding defined in the <a href="https://datatracker.ietf.org/doc/html/rfc7468">RFC 7468</a>
+ * <p>
+ * The input private key must be in PKCS#8 format.
+ * <p>
+ * It returns PKCS#8 formatted private key and X509 certificates.
+ */
+public final class PEMReader
+{
+    /**
+     * The private key can be with any of these algorithms in order for this read to successfully parse it.
+     * Currently, supported algorithms are,
+     * <pre>
+     *     RSA, DSA or EC
+     * </pre>
+     * The first one to be evaluated is RSA, being the most common for private keys.
+     */
+    public static final Set<String> SUPPORTED_PRIVATE_KEY_ALGORITHMS = ImmutableSet.of("RSA", "DSA", "EC");
+    private static final Logger logger = LoggerFactory.getLogger(PEMReader.class);
+    private static final Pattern CERT_PATTERN = Pattern.compile("-+BEGIN\\s+.*CERTIFICATE[^-]*-+(?:\\s|\\r|\\n)+([a-z0-9+/=\\r\\n]+)-+END\\s+.*CERTIFICATE[^-]*-+", CASE_INSENSITIVE);
+    private static final Pattern KEY_PATTERN = Pattern.compile("-+BEGIN\\s+.*PRIVATE\\s+KEY[^-]*-+(?:\\s|\\r|\\n)+([a-z0-9+/=\\r\\n]+)-+END\\s+.*PRIVATE\\s+KEY[^-]*-+", CASE_INSENSITIVE);
+
+    /**
+     * Extracts private key from the PEM content for the private key, assuming its not PBE.
+     *
+     * @param unencryptedPEMKey private key stored as PEM content
+     * @return {@link PrivateKey} upon successful reading of the private key
+     * @throws IOException              in case PEM reading fails
+     * @throws GeneralSecurityException in case any issue encountered while reading the private key
+     */
+    public static PrivateKey extractPrivateKey(String unencryptedPEMKey) throws IOException, GeneralSecurityException
+    {
+        return extractPrivateKey(unencryptedPEMKey, null);
+    }
+
+    /**
+     * Extracts private key from the Password Based Encrypted PEM content for the private key.
+     *
+     * @param pemKey      PBE private key stored as PEM content
+     * @param keyPassword password to be used for the private key decryption
+     * @return {@link PrivateKey} upon successful reading of the private key
+     * @throws IOException              in case PEM reading fails
+     * @throws GeneralSecurityException in case any issue encountered while reading the private key
+     */
+    public static PrivateKey extractPrivateKey(String pemKey, String keyPassword) throws IOException,
+                                                                                         GeneralSecurityException
+    {
+        PKCS8EncodedKeySpec keySpec;
+        String base64EncodedKey = extractBase64EncodedKey(pemKey);
+        byte[] derKeyBytes = decodeBase64(base64EncodedKey);
+
+        if (!StringUtils.isEmpty(keyPassword))
+        {
+            logger.debug("Encrypted key's length: {}, key's password length: {}",
+                         derKeyBytes.length, keyPassword.length());
+
+            EncryptedPrivateKeyInfo epki = new EncryptedPrivateKeyInfo(derKeyBytes);
+            logger.debug("Encrypted private key info's algorithm name: {}", epki.getAlgName());
+
+            AlgorithmParameters params = epki.getAlgParameters();
+            PBEKeySpec pbeKeySpec = new PBEKeySpec(keyPassword.toCharArray());
+            Key encryptionKey = SecretKeyFactory.getInstance(epki.getAlgName()).generateSecret(pbeKeySpec);
+            pbeKeySpec.clearPassword();
+            logger.debug("Key algorithm: {}, key format: {}", encryptionKey.getAlgorithm(), encryptionKey.getFormat());
+
+            Cipher cipher = Cipher.getInstance(epki.getAlgName());
+            cipher.init(Cipher.DECRYPT_MODE, encryptionKey, params);
+            byte[] rawKeyBytes;
+            try
+            {
+                rawKeyBytes = cipher.doFinal(epki.getEncryptedData());
+            }
+            catch (BadPaddingException e)
+            {
+                throw new GeneralSecurityException("Failed to decrypt the private key data. Either the password " +
+                                                   "provided for the key is wrong or the private key data is " +
+                                                   "corrupted. msg=" + e.getMessage(), e);
+            }
+            logger.debug("Decrypted private key's length: {}", rawKeyBytes.length);
+
+            keySpec = new PKCS8EncodedKeySpec(rawKeyBytes);
+        }
+        else
+        {
+            logger.debug("Key length: {}", derKeyBytes.length);
+            keySpec = new PKCS8EncodedKeySpec(derKeyBytes);
+        }
+
+        PrivateKey privateKey = null;
+
+        /*
+         * Ideally we can inspect the OID (Object Identifier) from the private key with ASN.1 parser and identify the
+         * actual algorithm of the private key. For doing that, we have to use some special library like BouncyCastle.
+         * However in the absence of that, below brute-force approach can work- that is to try out all the supported
+         * private key algorithms given that there are only three major algorithms to verify against.
+         */
+        for (String privateKeyAlgorithm : SUPPORTED_PRIVATE_KEY_ALGORITHMS)
+        {
+            try
+            {
+                privateKey = KeyFactory.getInstance(privateKeyAlgorithm).generatePrivate(keySpec);
+                logger.info("Parsing for the private key finished with {} algorithm.", privateKeyAlgorithm);
+                return privateKey;
+            }
+            catch (Exception e)
+            {
+                logger.debug("Failed to parse the private key with {} algorithm. Will try the other supported " +
+                             "algorithms.", privateKeyAlgorithm);
+            }
+        }
+        throw new GeneralSecurityException("The given private key could not be parsed with any of the supported " +
+                                           "algorithms. Please see PEMReader#SUPPORTED_PRIVATE_KEY_ALGORITHMS.");
+    }
+
+    /**
+     * Extracts the certificates/cert-chain from the PEM content.
+     *
+     * @param pemCerts certificates/cert-chain stored as PEM content
+     * @return X509 certiificate list
+     * @throws GeneralSecurityException in case any issue encountered while reading the certificates
+     */
+    public static Certificate[] extractCertificates(String pemCerts) throws GeneralSecurityException
+    {
+        List<Certificate> certificateList = new ArrayList<>();
+        List<String> base64EncodedCerts = extractBase64EncodedCerts(pemCerts);
+        for (String base64EncodedCertificate : base64EncodedCerts)
+        {
+            certificateList.add(generateCertificate(base64EncodedCertificate));
+        }
+        Certificate[] certificates = certificateList.toArray(new Certificate[0]);
+        return certificates;
+    }
+
+    /**
+     * Generates the X509 certificate object given the base64 encoded PEM content.
+     *
+     * @param base64Certificate base64 encoded PEM content for the certificate
+     * @return X509 certificate
+     * @throws GeneralSecurityException in case any issue encountered while reading the certificate
+     */
+    private static Certificate generateCertificate(String base64Certificate) throws GeneralSecurityException
+    {
+        byte[] decodedCertificateBytes = decodeBase64(base64Certificate);
+        CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
+        X509Certificate certificate =
+        (X509Certificate) certificateFactory.generateCertificate(new ByteArrayInputStream(decodedCertificateBytes));
+        logCertificateDetails(certificate);
+        return certificate;
+    }
+
+    /**
+     * Logs X509 certificate details for the debugging purpose with {@code INFO} level log.
+     * Namely, it prints - Subject DN, Issuer DN, Certificate serial number and the certificate expiry date which
+     * could be very valuable for debugging any certificate related issues.
+     *
+     * @param certificate certificate to log
+     */
+    private static void logCertificateDetails(X509Certificate certificate)
+    {
+        assert certificate != null;
+        logger.info("*********** Certificate Details *****************");
+        logger.info("Subject DN: {}", certificate.getSubjectDN());
+        logger.info("Issuer DN: {}", certificate.getIssuerDN());
+        logger.info("Serial Number: {}", certificate.getSerialNumber());
+        logger.info("Expiry: {}", certificate.getNotAfter());
+    }
+
+    /**
+     * Parses the PEM formatted private key based on the standard pattern specified by the <a href="https://datatracker.ietf.org/doc/html/rfc7468#section-11">RFC 7468</a>.
+     *
+     * @param pemKey private key stored as PEM content
+     * @return base64 string contained within the defined encapsulation boundaries by the above RFC
+     * @throws GeneralSecurityException in case any issue encountered while parsing the key
+     */
+    private static String extractBase64EncodedKey(String pemKey) throws GeneralSecurityException
+    {
+        Matcher matcher = KEY_PATTERN.matcher(pemKey);
+        if (matcher.find())
+        {
+            return matcher.group(1).replaceAll("\\s", "");
+        }
+        else
+        {
+            throw new GeneralSecurityException("Invalid private key format");
+        }
+    }
+
+    /**
+     * Parses the PEM formatted certificate/public-key based on the standard pattern specified by the
+     * <a href="https://datatracker.ietf.org/doc/html/rfc7468#section-13">RFC 7468</a>.
+     *
+     * @param pemCerts certificate/public-key stored as PEM content
+     * @return list of base64 encoded certificates within the defined encapsulation boundaries by the above RFC
+     * @throws GeneralSecurityException in case any issue encountered parsing the certificate
+     */
+    private static List<String> extractBase64EncodedCerts(String pemCerts) throws GeneralSecurityException
+    {
+        List<String> certificateList = new ArrayList<>();
+        Matcher matcher = CERT_PATTERN.matcher(pemCerts);
+        if (!matcher.find())
+        {
+            throw new GeneralSecurityException("Invalid certificate format");
+        }
+
+        for (int start = 0; matcher.find(start); start = matcher.end())
+        {
+            String certificate = matcher.group(1).replaceAll("\\s", "");
+            certificateList.add(certificate);
+        }
+        return certificateList;
+    }
+
+    /**
+     * Decodes given input in Base64 format.
+     *
+     * @param base64Input input to be decoded
+     * @return byte[] containing decoded bytes
+     * @throws GeneralSecurityException in case it fails to decode the given base64 input
+     */
+    private static byte[] decodeBase64(String base64Input) throws GeneralSecurityException
+    {
+        try
+        {
+            return Base64.getDecoder().decode(base64Input);
+        }
+        catch (IllegalArgumentException e)
+        {
+            throw new GeneralSecurityException("Failed to decode given base64 input. msg=" + e.getMessage(), e);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/security/SSLFactory.java b/src/java/org/apache/cassandra/security/SSLFactory.java
index 22f0a9d..e06da1f 100644
--- a/src/java/org/apache/cassandra/security/SSLFactory.java
+++ b/src/java/org/apache/cassandra/security/SSLFactory.java
@@ -18,48 +18,33 @@
 package org.apache.cassandra.security;
 
 
-import java.io.File;
 import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.security.KeyStore;
-import java.security.cert.X509Certificate;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Date;
-import java.util.Enumeration;
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
-import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.SSLEngine;
 import javax.net.ssl.SSLParameters;
 import javax.net.ssl.SSLSocket;
-import javax.net.ssl.TrustManager;
-import javax.net.ssl.TrustManagerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import io.netty.buffer.ByteBufAllocator;
 import io.netty.handler.ssl.CipherSuiteFilter;
-import io.netty.handler.ssl.ClientAuth;
 import io.netty.handler.ssl.OpenSsl;
 import io.netty.handler.ssl.SslContext;
-import io.netty.handler.ssl.SslContextBuilder;
-import io.netty.handler.ssl.SslProvider;
 import io.netty.util.ReferenceCountUtil;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.security.ISslContextFactory.SocketType;
 
 /**
  * A Factory for providing and setting up client {@link SSLSocket}s. Also provides
@@ -73,18 +58,6 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(SSLFactory.class);
 
-    /**
-     * Indicates if the process holds the inbound/listening end of the socket ({@link SocketType#SERVER})), or the
-     * outbound side ({@link SocketType#CLIENT}).
-     */
-    public enum SocketType
-    {
-        SERVER, CLIENT
-    }
-
-    @VisibleForTesting
-    static volatile boolean checkedExpiry = false;
-
     // Isolate calls to OpenSsl.isAvailable to allow in-jvm dtests to disable tcnative openssl
     // support.  It creates a circular reference that prevents the instance class loader from being
     // garbage collected.
@@ -111,11 +84,6 @@
     private static final ConcurrentHashMap<CacheKey, SslContext> cachedSslContexts = new ConcurrentHashMap<>();
 
     /**
-     * List of files that trigger hot reloading of SSL certificates
-     */
-    private static volatile List<HotReloadableFile> hotReloadableFiles = ImmutableList.of();
-
-    /**
      * Default initial delay for hot reloading
      */
     public static final int DEFAULT_HOT_RELOAD_INITIAL_DELAY_SEC = 600;
@@ -130,38 +98,6 @@
      */
     private static boolean isHotReloadingInitialized = false;
 
-    /**
-     * Helper class for hot reloading SSL Contexts
-     */
-    private static class HotReloadableFile
-    {
-        private final File file;
-        private volatile long lastModTime;
-
-        HotReloadableFile(String path)
-        {
-            file = new File(path);
-            lastModTime = file.lastModified();
-        }
-
-        boolean shouldReload()
-        {
-            long curModTime = file.lastModified();
-            boolean result = curModTime != lastModTime;
-            lastModTime = curModTime;
-            return result;
-        }
-
-        @Override
-        public String toString()
-        {
-            return "HotReloadableFile{" +
-                       "file=" + file +
-                       ", lastModTime=" + lastModTime +
-                       '}';
-        }
-    }
-
     /** Provides the list of protocols that would have been supported if "TLS" was selected as the
      * protocol before the change for CASSANDRA-13325 that expects explicit protocol versions.
      * @return list of enabled protocol names
@@ -185,100 +121,25 @@
     /**
      * Create a JSSE {@link SSLContext}.
      */
-    public static SSLContext createSSLContext(EncryptionOptions options, boolean buildTruststore) throws IOException
+    public static SSLContext createSSLContext(EncryptionOptions options, boolean verifyPeerCertificate) throws IOException
     {
-        TrustManager[] trustManagers = null;
-        if (buildTruststore)
-            trustManagers = buildTrustManagerFactory(options).getTrustManagers();
-
-        KeyManagerFactory kmf = buildKeyManagerFactory(options);
-
-        try
-        {
-            SSLContext ctx = SSLContext.getInstance("TLS");
-            ctx.init(kmf.getKeyManagers(), trustManagers, null);
-            return ctx;
-        }
-        catch (Exception e)
-        {
-            throw new IOException("Error creating/initializing the SSL Context", e);
-        }
-    }
-
-    static TrustManagerFactory buildTrustManagerFactory(EncryptionOptions options) throws IOException
-    {
-        try (InputStream tsf = Files.newInputStream(Paths.get(options.truststore)))
-        {
-            TrustManagerFactory tmf = TrustManagerFactory.getInstance(
-            options.algorithm == null ? TrustManagerFactory.getDefaultAlgorithm() : options.algorithm);
-            KeyStore ts = KeyStore.getInstance(options.store_type);
-            ts.load(tsf, options.truststore_password.toCharArray());
-            tmf.init(ts);
-            return tmf;
-        }
-        catch (Exception e)
-        {
-            throw new IOException("failed to build trust manager store for secure connections", e);
-        }
-    }
-
-    static KeyManagerFactory buildKeyManagerFactory(EncryptionOptions options) throws IOException
-    {
-        try (InputStream ksf = Files.newInputStream(Paths.get(options.keystore)))
-        {
-            KeyManagerFactory kmf = KeyManagerFactory.getInstance(
-            options.algorithm == null ? KeyManagerFactory.getDefaultAlgorithm() : options.algorithm);
-            KeyStore ks = KeyStore.getInstance(options.store_type);
-            ks.load(ksf, options.keystore_password.toCharArray());
-            if (!checkedExpiry)
-            {
-                for (Enumeration<String> aliases = ks.aliases(); aliases.hasMoreElements(); )
-                {
-                    String alias = aliases.nextElement();
-                    if (ks.getCertificate(alias).getType().equals("X.509"))
-                    {
-                        Date expires = ((X509Certificate) ks.getCertificate(alias)).getNotAfter();
-                        if (expires.before(new Date()))
-                            logger.warn("Certificate for {} expired on {}", alias, expires);
-                    }
-                }
-                checkedExpiry = true;
-            }
-            kmf.init(ks, options.keystore_password.toCharArray());
-            return kmf;
-        }
-        catch (Exception e)
-        {
-            throw new IOException("failed to build key manager store for secure connections", e);
-        }
+        return options.sslContextFactoryInstance.createJSSESslContext(verifyPeerCertificate);
     }
 
     /**
      * get a netty {@link SslContext} instance
      */
-    public static SslContext getOrCreateSslContext(EncryptionOptions options, boolean buildTruststore,
+    public static SslContext getOrCreateSslContext(EncryptionOptions options, boolean verifyPeerCertificate,
                                                    SocketType socketType) throws IOException
     {
-        return getOrCreateSslContext(options, buildTruststore, socketType, openSslIsAvailable());
-    }
-
-    /**
-     * Get a netty {@link SslContext} instance.
-     */
-    @VisibleForTesting
-    static SslContext getOrCreateSslContext(EncryptionOptions options,
-                                            boolean buildTruststore,
-                                            SocketType socketType,
-                                            boolean useOpenSsl) throws IOException
-    {
-        CacheKey key = new CacheKey(options, socketType, useOpenSsl);
+        CacheKey key = new CacheKey(options, socketType);
         SslContext sslContext;
 
         sslContext = cachedSslContexts.get(key);
         if (sslContext != null)
             return sslContext;
 
-        sslContext = createNettySslContext(options, buildTruststore, socketType, useOpenSsl);
+        sslContext = createNettySslContext(options, verifyPeerCertificate, socketType);
 
         SslContext previous = cachedSslContexts.putIfAbsent(key, sslContext);
         if (previous == null)
@@ -291,52 +152,21 @@
     /**
      * Create a Netty {@link SslContext}
      */
-    static SslContext createNettySslContext(EncryptionOptions options, boolean buildTruststore,
-                                            SocketType socketType, boolean useOpenSsl) throws IOException
+    static SslContext createNettySslContext(EncryptionOptions options, boolean verifyPeerCertificate,
+                                            SocketType socketType) throws IOException
     {
-        return createNettySslContext(options, buildTruststore, socketType, useOpenSsl,
+        return createNettySslContext(options, verifyPeerCertificate, socketType,
                                      LoggingCipherSuiteFilter.QUIET_FILTER);
     }
 
     /**
      * Create a Netty {@link SslContext} with a supplied cipherFilter
      */
-    static SslContext createNettySslContext(EncryptionOptions options, boolean buildTruststore,
-                                            SocketType socketType, boolean useOpenSsl, CipherSuiteFilter cipherFilter) throws IOException
+    static SslContext createNettySslContext(EncryptionOptions options, boolean verifyPeerCertificate,
+                                            SocketType socketType, CipherSuiteFilter cipherFilter) throws IOException
     {
-        /*
-            There is a case where the netty/openssl combo might not support using KeyManagerFactory. specifically,
-            I've seen this with the netty-tcnative dynamic openssl implementation. using the netty-tcnative static-boringssl
-            works fine with KeyManagerFactory. If we want to support all of the netty-tcnative options, we would need
-            to fall back to passing in a file reference for both a x509 and PKCS#8 private key file in PEM format (see
-            {@link SslContextBuilder#forServer(File, File, String)}). However, we are not supporting that now to keep
-            the config/yaml API simple.
-         */
-        KeyManagerFactory kmf = buildKeyManagerFactory(options);
-        SslContextBuilder builder;
-        if (socketType == SocketType.SERVER)
-        {
-            builder = SslContextBuilder.forServer(kmf);
-            builder.clientAuth(options.require_client_auth ? ClientAuth.REQUIRE : ClientAuth.NONE);
-        }
-        else
-        {
-            builder = SslContextBuilder.forClient().keyManager(kmf);
-        }
-
-        builder.sslProvider(useOpenSsl ? SslProvider.OPENSSL : SslProvider.JDK);
-
-        builder.protocols(options.acceptedProtocols());
-
-        // only set the cipher suites if the opertor has explicity configured values for it; else, use the default
-        // for each ssl implemention (jdk or openssl)
-        if (options.cipher_suites != null && !options.cipher_suites.isEmpty())
-            builder.ciphers(options.cipher_suites, cipherFilter);
-
-        if (buildTruststore)
-            builder.trustManager(buildTrustManagerFactory(options));
-
-        return builder.build();
+        return options.sslContextFactoryInstance.createNettySslContext(verifyPeerCertificate, socketType,
+                                                                       cipherFilter);
     }
 
     /**
@@ -351,21 +181,58 @@
         if (!isHotReloadingInitialized)
             throw new IllegalStateException("Hot reloading functionality has not been initialized.");
 
-        logger.debug("Checking whether certificates have been updated {}", hotReloadableFiles);
+        logger.debug("Checking whether certificates have been updated for server {} and client {}",
+                     serverOpts.sslContextFactoryInstance.getClass().getName(), clientOpts.sslContextFactoryInstance.getClass().getName());
 
-        if (hotReloadableFiles.stream().anyMatch(HotReloadableFile::shouldReload))
+        if (serverOpts != null)
         {
-            logger.info("SSL certificates have been updated. Reseting the ssl contexts for new connections.");
-            try
+            checkCertFilesForHotReloading(serverOpts, "server_encryption_options", true);
+        }
+        if (clientOpts != null)
+        {
+            checkCertFilesForHotReloading(clientOpts, "client_encryption_options", clientOpts.require_client_auth);
+        }
+    }
+
+    private static void checkCertFilesForHotReloading(EncryptionOptions options, String contextDescription,
+                                                      boolean verifyPeerCertificate)
+    {
+        try
+        {
+            if (options.sslContextFactoryInstance.shouldReload())
             {
-                validateSslCerts(serverOpts, clientOpts);
-                cachedSslContexts.clear();
-            }
-            catch(Exception e)
-            {
-                logger.error("Failed to hot reload the SSL Certificates! Please check the certificate files.", e);
+                logger.info("SSL certificates have been updated for {}. Resetting the ssl contexts for new " +
+                            "connections.", options.getClass().getName());
+                validateSslContext(contextDescription, options, verifyPeerCertificate, false);
+                clearSslContextCache(options);
             }
         }
+        catch(Exception e)
+        {
+            logger.error("Failed to hot reload the SSL Certificates! Please check the certificate files.", e);
+        }
+    }
+
+    /**
+     * This clears the cache of Netty's SslContext objects for Client and Server sockets. This is made publically
+     * available so that any {@link ISslContextFactory}'s implementation can call this to handle any special scenario
+     * to invalidate the SslContext cache.
+     * This should be used with caution since the purpose of this cache is save costly creation of Netty's SslContext
+     * objects and this essentially results in re-creating it.
+     */
+    public static void clearSslContextCache()
+    {
+        cachedSslContexts.clear();
+    }
+
+    private static void clearSslContextCache(EncryptionOptions options)
+    {
+        cachedSslContexts.forEachKey(1, cacheKey -> {
+            if (cacheKey.encryptionOptions.equals(options))
+            {
+                cachedSslContexts.remove(cacheKey);
+            }
+        });
     }
 
     /**
@@ -383,22 +250,14 @@
 
         logger.debug("Initializing hot reloading SSLContext");
 
-        List<HotReloadableFile> fileList = new ArrayList<>();
-
-        if (serverOpts != null && serverOpts.tlsEncryptionPolicy() != EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED)
-        {
-            fileList.add(new HotReloadableFile(serverOpts.keystore));
-            fileList.add(new HotReloadableFile(serverOpts.truststore));
+        if ( serverOpts != null && serverOpts.tlsEncryptionPolicy() != EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED) {
+            serverOpts.sslContextFactoryInstance.initHotReloading();
         }
 
-        if (clientOpts != null && clientOpts.tlsEncryptionPolicy() != EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED)
-        {
-            fileList.add(new HotReloadableFile(clientOpts.keystore));
-            fileList.add(new HotReloadableFile(clientOpts.truststore));
+        if ( clientOpts != null && clientOpts.tlsEncryptionPolicy() != EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED) {
+            clientOpts.sslContextFactoryInstance.initHotReloading();
         }
 
-        hotReloadableFiles = ImmutableList.copyOf(fileList);
-
         if (!isHotReloadingInitialized)
         {
             ScheduledExecutors.scheduledTasks
@@ -485,7 +344,7 @@
         return !string.equals("SSLv2Hello");
     }
 
-    public static void validateSslContext(String contextDescription, EncryptionOptions options, boolean buildTrustStore, boolean logProtocolAndCiphers) throws IOException
+    public static void validateSslContext(String contextDescription, EncryptionOptions options, boolean verifyPeerCertificate, boolean logProtocolAndCiphers) throws IOException
     {
         if (options != null && options.tlsEncryptionPolicy() != EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED)
         {
@@ -493,7 +352,7 @@
             {
                 CipherSuiteFilter loggingCipherSuiteFilter = logProtocolAndCiphers ? new LoggingCipherSuiteFilter(contextDescription)
                                                                                    : LoggingCipherSuiteFilter.QUIET_FILTER;
-                SslContext serverSslContext = createNettySslContext(options, buildTrustStore, SocketType.SERVER, openSslIsAvailable(), loggingCipherSuiteFilter);
+                SslContext serverSslContext = createNettySslContext(options, verifyPeerCertificate, SocketType.SERVER, loggingCipherSuiteFilter);
                 try
                 {
                     SSLEngine engine = serverSslContext.newEngine(ByteBufAllocator.DEFAULT);
@@ -538,7 +397,7 @@
                 }
 
                 // Make sure it is possible to build the client context too
-                SslContext clientSslContext = createNettySslContext(options, buildTrustStore, SocketType.CLIENT, openSslIsAvailable());
+                SslContext clientSslContext = createNettySslContext(options, verifyPeerCertificate, SocketType.CLIENT);
                 ReferenceCountUtil.release(clientSslContext);
             }
             catch (Exception e)
@@ -561,13 +420,11 @@
     {
         private final EncryptionOptions encryptionOptions;
         private final SocketType socketType;
-        private final boolean useOpenSSL;
 
-        public CacheKey(EncryptionOptions encryptionOptions, SocketType socketType, boolean useOpenSSL)
+        public CacheKey(EncryptionOptions encryptionOptions, SocketType socketType)
         {
             this.encryptionOptions = encryptionOptions;
             this.socketType = socketType;
-            this.useOpenSSL = useOpenSSL;
         }
 
         public boolean equals(Object o)
@@ -576,7 +433,6 @@
             if (o == null || getClass() != o.getClass()) return false;
             CacheKey cacheKey = (CacheKey) o;
             return (socketType == cacheKey.socketType &&
-                    useOpenSSL == cacheKey.useOpenSSL &&
                     Objects.equals(encryptionOptions, cacheKey.encryptionOptions));
         }
 
@@ -585,7 +441,6 @@
             int result = 0;
             result += 31 * socketType.hashCode();
             result += 31 * encryptionOptions.hashCode();
-            result += 31 * Boolean.hashCode(useOpenSSL);
             return result;
         }
     }
diff --git a/src/java/org/apache/cassandra/serializers/ListSerializer.java b/src/java/org/apache/cassandra/serializers/ListSerializer.java
index 35c3141..7a2f634 100644
--- a/src/java/org/apache/cassandra/serializers/ListSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/ListSerializer.java
@@ -23,6 +23,7 @@
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.function.Predicate;
 
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -132,6 +133,41 @@
         }
     }
 
+    public boolean anyMatch(ByteBuffer serializedList, Predicate<ByteBuffer> predicate)
+    {
+        return anyMatch(serializedList, ByteBufferAccessor.instance, predicate);
+    }
+
+    public <V> boolean anyMatch(V input, ValueAccessor<V> accessor, Predicate<V> predicate)
+    {
+        try
+        {
+            int s = readCollectionSize(input, accessor, ProtocolVersion.V3);
+            int offset = sizeOfCollectionSize(s, ProtocolVersion.V3);
+
+            for (int i = 0; i < s; i++)
+            {
+                int size = accessor.getInt(input, offset);
+                if (size < 0)
+                    continue;
+
+                offset += TypeSizes.INT_SIZE;
+
+                V value = accessor.slice(input, offset, size);
+
+                if (predicate.test(value))
+                    return true;
+
+                offset += size;
+            }
+            return false;
+        }
+        catch (BufferUnderflowException e)
+        {
+            throw new MarshalException("Not enough bytes to read a list");
+        }
+    }
+
     /**
      * Returns the element at the given index in a list.
      * @param input a serialized list
diff --git a/src/java/org/apache/cassandra/serializers/MapSerializer.java b/src/java/org/apache/cassandra/serializers/MapSerializer.java
index 9c0a001..2e363b4 100644
--- a/src/java/org/apache/cassandra/serializers/MapSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/MapSerializer.java
@@ -24,7 +24,6 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.ValueComparators;
diff --git a/src/java/org/apache/cassandra/serializers/SetSerializer.java b/src/java/org/apache/cassandra/serializers/SetSerializer.java
index bd5e018..bf565be 100644
--- a/src/java/org/apache/cassandra/serializers/SetSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/SetSerializer.java
@@ -24,7 +24,6 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.ValueComparators;
 import org.apache.cassandra.db.marshal.ValueAccessor;
diff --git a/src/java/org/apache/cassandra/serializers/TimeUUIDSerializer.java b/src/java/org/apache/cassandra/serializers/TimeUUIDSerializer.java
deleted file mode 100644
index 9cde1dd..0000000
--- a/src/java/org/apache/cassandra/serializers/TimeUUIDSerializer.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.serializers;
-
-import org.apache.cassandra.db.marshal.ValueAccessor;
-
-public class TimeUUIDSerializer extends UUIDSerializer
-{
-    public static final TimeUUIDSerializer instance = new TimeUUIDSerializer();
-
-    public <V> void validate(V value, ValueAccessor<V> accessor) throws MarshalException
-    {
-        super.validate(value, accessor);
-        // Super class only validates the Time UUID
-        // version is bits 4-7 of byte 6.
-        if (!accessor.isEmpty(value))
-        {
-            if ((accessor.getByte(value, 6) & 0xf0) != 0x10)
-                throw new MarshalException("Invalid version for TimeUUID type.");
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/serializers/TimestampSerializer.java b/src/java/org/apache/cassandra/serializers/TimestampSerializer.java
index 21cac66..71782e5 100644
--- a/src/java/org/apache/cassandra/serializers/TimestampSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/TimestampSerializer.java
@@ -35,6 +35,8 @@
 import java.util.TimeZone;
 import java.util.regex.Pattern;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 
 public class TimestampSerializer extends TypeSerializer<Date>
 {
@@ -140,7 +142,7 @@
     public static long dateStringToTimestamp(String source) throws MarshalException
     {
         if (source.equalsIgnoreCase("now"))
-            return System.currentTimeMillis();
+            return currentTimeMillis();
 
         // Milliseconds since epoch?
         if (timestampPattern.matcher(source).matches())
diff --git a/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java b/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java
index 7128277..ce28266 100644
--- a/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java
@@ -22,16 +22,20 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import java.util.stream.Collectors;
+import java.util.function.Supplier;
+
+import javax.annotation.Nullable;
 
 import org.apache.cassandra.db.ConsistencyLevel;
 
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.locator.EndpointsForToken;
 import org.apache.cassandra.locator.ReplicaPlan;
+import org.apache.cassandra.locator.ReplicaPlan.ForWrite;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.IMutation;
 import org.apache.cassandra.db.WriteType;
@@ -41,28 +45,41 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Message;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
+import static java.lang.Long.MAX_VALUE;
+import static java.lang.Math.min;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static java.util.stream.Collectors.toList;
+import static org.apache.cassandra.config.DatabaseDescriptor.getCounterWriteRpcTimeout;
+import static org.apache.cassandra.config.DatabaseDescriptor.getWriteRpcTimeout;
+import static org.apache.cassandra.db.WriteType.COUNTER;
+import static org.apache.cassandra.schema.Schema.instance;
+import static org.apache.cassandra.service.StorageProxy.WritePerformer;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 import static org.apache.cassandra.locator.Replicas.countInOurDc;
 
+
 public abstract class AbstractWriteResponseHandler<T> implements RequestCallback<T>
 {
     protected static final Logger logger = LoggerFactory.getLogger(AbstractWriteResponseHandler.class);
 
     //Count down until all responses and expirations have occured before deciding whether the ideal CL was reached.
     private AtomicInteger responsesAndExpirations;
-    private final SimpleCondition condition = new SimpleCondition();
-    protected final ReplicaPlan.ForTokenWrite replicaPlan;
+    private final Condition condition = newOneTimeCondition();
+    protected final ReplicaPlan.ForWrite replicaPlan;
 
     protected final Runnable callback;
     protected final WriteType writeType;
-    private static final AtomicIntegerFieldUpdater<AbstractWriteResponseHandler> failuresUpdater
-    = AtomicIntegerFieldUpdater.newUpdater(AbstractWriteResponseHandler.class, "failures");
+    private static final AtomicIntegerFieldUpdater<AbstractWriteResponseHandler> failuresUpdater =
+        AtomicIntegerFieldUpdater.newUpdater(AbstractWriteResponseHandler.class, "failures");
     private volatile int failures = 0;
     private final Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint;
     private final long queryStartNanoTime;
+    private @Nullable final Supplier<Mutation> hintOnFailure;
 
     /**
       * Delegate to another WriteResponseHandler or possibly this one to track if the ideal consistency level was reached.
@@ -79,16 +96,16 @@
 
     /**
      * @param callback           A callback to be called when the write is successful.
+     * @param hintOnFailure
      * @param queryStartNanoTime
      */
-    protected AbstractWriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan,
-                                           Runnable callback,
-                                           WriteType writeType,
-                                           long queryStartNanoTime)
+    protected AbstractWriteResponseHandler(ForWrite replicaPlan, Runnable callback, WriteType writeType,
+                                           Supplier<Mutation> hintOnFailure, long queryStartNanoTime)
     {
         this.replicaPlan = replicaPlan;
         this.callback = callback;
         this.writeType = writeType;
+        this.hintOnFailure = hintOnFailure;
         this.failureReasonByEndpoint = new ConcurrentHashMap<>();
         this.queryStartNanoTime = queryStartNanoTime;
     }
@@ -102,9 +119,9 @@
         {
             success = condition.await(timeoutNanos, NANOSECONDS);
         }
-        catch (InterruptedException ex)
+        catch (InterruptedException e)
         {
-            throw new AssertionError(ex);
+            throw new UncheckedInterruptedException(e);
         }
 
         if (!success)
@@ -127,10 +144,10 @@
 
     public final long currentTimeoutNanos()
     {
-        long requestTimeout = writeType == WriteType.COUNTER
-                              ? DatabaseDescriptor.getCounterWriteRpcTimeout(NANOSECONDS)
-                              : DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS);
-        return requestTimeout - (System.nanoTime() - queryStartNanoTime);
+        long requestTimeout = writeType == COUNTER
+                              ? getCounterWriteRpcTimeout(NANOSECONDS)
+                              : getWriteRpcTimeout(NANOSECONDS);
+        return requestTimeout - (nanoTime() - queryStartNanoTime);
     }
 
     /**
@@ -198,7 +215,7 @@
     {
         // During bootstrap, we have to include the pending endpoints or we may fail the consistency level
         // guarantees (see #833)
-        return replicaPlan.blockFor();
+        return replicaPlan.writeQuorum();
     }
 
     /**
@@ -264,6 +281,9 @@
 
         if (blockFor() + n > candidateReplicaCount())
             signal();
+
+        if (hintOnFailure != null && StorageProxy.shouldHint(replicaPlan.lookup(from)))
+            StorageProxy.submitHint(hintOnFailure.get(), replicaPlan.lookup(from), null);
     }
 
     @Override
@@ -284,13 +304,13 @@
         {
             // The condition being signaled is a valid proxy for the CL being achieved
             // Only mark it as failed if the requested CL was achieved.
-            if (!condition.isSignaled() && requestedCLAchieved)
+            if (!condition.isSignalled() && requestedCLAchieved)
             {
                 replicaPlan.keyspace().metric.writeFailedIdealCL.inc();
             }
             else
             {
-                replicaPlan.keyspace().metric.idealCLWriteLatency.addNano(System.nanoTime() - queryStartNanoTime);
+                replicaPlan.keyspace().metric.idealCLWriteLatency.addNano(nanoTime() - queryStartNanoTime);
             }
         }
     }
@@ -298,38 +318,38 @@
     /**
      * Cheap Quorum backup.  If we failed to reach quorum with our initial (full) nodes, reach out to other nodes.
      */
-    public void maybeTryAdditionalReplicas(IMutation mutation, StorageProxy.WritePerformer writePerformer, String localDC)
+    public void maybeTryAdditionalReplicas(IMutation mutation, WritePerformer writePerformer, String localDC)
     {
         EndpointsForToken uncontacted = replicaPlan.liveUncontacted();
         if (uncontacted.isEmpty())
             return;
 
-        long timeout = Long.MAX_VALUE;
+        long timeout = MAX_VALUE;
         List<ColumnFamilyStore> cfs = mutation.getTableIds().stream()
-                                              .map(Schema.instance::getColumnFamilyStoreInstance)
-                                              .collect(Collectors.toList());
+                                              .map(instance::getColumnFamilyStoreInstance)
+                                              .collect(toList());
         for (ColumnFamilyStore cf : cfs)
-            timeout = Math.min(timeout, cf.additionalWriteLatencyNanos);
+            timeout = min(timeout, cf.additionalWriteLatencyMicros);
 
         // no latency information, or we're overloaded
-        if (timeout > mutation.getTimeout(NANOSECONDS))
+        if (timeout > mutation.getTimeout(MICROSECONDS))
             return;
 
         try
         {
-            if (!condition.await(timeout, NANOSECONDS))
+            if (!condition.await(timeout, MICROSECONDS))
             {
                 for (ColumnFamilyStore cf : cfs)
                     cf.metric.additionalWrites.inc();
 
-                writePerformer.apply(mutation, replicaPlan.withContact(uncontacted),
+                writePerformer.apply(mutation, replicaPlan.withContacts(uncontacted),
                                      (AbstractWriteResponseHandler<IMutation>) this,
                                      localDC);
             }
         }
-        catch (InterruptedException ex)
+        catch (InterruptedException e)
         {
-            throw new AssertionError(ex);
+            throw new UncheckedInterruptedException(e);
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/service/ActiveRepairService.java b/src/java/org/apache/cassandra/service/ActiveRepairService.java
index e3ec218..a0716b1 100644
--- a/src/java/org/apache/cassandra/service/ActiveRepairService.java
+++ b/src/java/org/apache/cassandra/service/ActiveRepairService.java
@@ -20,9 +20,15 @@
 import java.io.IOException;
 import java.net.UnknownHostException;
 import java.util.*;
-import java.util.concurrent.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import javax.management.openmbean.CompositeData;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
@@ -32,22 +38,31 @@
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
 
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DurationSpec;
 import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.locator.EndpointsByRange;
 import org.apache.cassandra.locator.EndpointsForRange;
+import org.apache.cassandra.utils.ExecutorUtils;
+import org.apache.cassandra.repair.state.CoordinatorState;
+import org.apache.cassandra.repair.state.ParticipateState;
+import org.apache.cassandra.repair.state.ValidationState;
+import org.apache.cassandra.utils.Simulate;
+import org.apache.cassandra.locator.EndpointsForToken;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
-import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -70,6 +85,9 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.repair.CommonRange;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanup;
 import org.apache.cassandra.repair.RepairJobDesc;
 import org.apache.cassandra.repair.RepairParallelism;
 import org.apache.cassandra.repair.RepairSession;
@@ -87,15 +105,27 @@
 import org.apache.cassandra.repair.messages.SyncResponse;
 import org.apache.cassandra.repair.messages.ValidationResponse;
 import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MBeanWrapper;
+import org.apache.cassandra.utils.MerkleTrees;
 import org.apache.cassandra.utils.Pair;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 
 import static com.google.common.collect.Iterables.concat;
 import static com.google.common.collect.Iterables.transform;
+import static java.util.Collections.synchronizedSet;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.config.Config.RepairCommandPoolFullStrategy.reject;
+import static org.apache.cassandra.config.DatabaseDescriptor.*;
+import static org.apache.cassandra.net.Message.out;
 import static org.apache.cassandra.net.Verb.PREPARE_MSG;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Simulate.With.MONITORS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
 
 /**
  * ActiveRepairService is the starting point for manual "active" repairs.
@@ -111,9 +141,9 @@
  * The creation of a repair session is done through the submitRepairSession that
  * returns a future on the completion of that session.
  */
+@Simulate(with = MONITORS)
 public class ActiveRepairService implements IEndpointStateChangeSubscriber, IFailureDetectionEventListener, ActiveRepairServiceMBean
 {
-
     public enum ParentRepairStatus
     {
         IN_PROGRESS, COMPLETED, FAILED
@@ -134,14 +164,20 @@
     public static final ActiveRepairService instance = new ActiveRepairService(FailureDetector.instance, Gossiper.instance);
 
     public static final long UNREPAIRED_SSTABLE = 0;
-    public static final UUID NO_PENDING_REPAIR = null;
+    public static final TimeUUID NO_PENDING_REPAIR = null;
 
     /**
      * A map of active coordinator session.
      */
-    private final ConcurrentMap<UUID, RepairSession> sessions = new ConcurrentHashMap<>();
+    private final ConcurrentMap<TimeUUID, RepairSession> sessions = new ConcurrentHashMap<>();
 
-    private final ConcurrentMap<UUID, ParentRepairSession> parentRepairSessions = new ConcurrentHashMap<>();
+    private final ConcurrentMap<TimeUUID, ParentRepairSession> parentRepairSessions = new ConcurrentHashMap<>();
+    // map of top level repair id (parent repair id) -> state
+    private final Cache<TimeUUID, CoordinatorState> repairs;
+    // map of top level repair id (parent repair id) -> participate state
+    private final Cache<TimeUUID, ParticipateState> participates;
+
+    private volatile ScheduledFuture<?> irCleanup;
 
     static
     {
@@ -150,46 +186,23 @@
 
     public static class RepairCommandExecutorHandle
     {
-        private static final ThreadPoolExecutor repairCommandExecutor =
-            initializeExecutor(DatabaseDescriptor.getRepairCommandPoolSize(),
-                               DatabaseDescriptor.getRepairCommandPoolFullStrategy());
+        private static final ExecutorPlus repairCommandExecutor = initializeExecutor(getRepairCommandPoolSize(), getRepairCommandPoolFullStrategy());
     }
 
     @VisibleForTesting
-    static ThreadPoolExecutor initializeExecutor(int maxPoolSize, Config.RepairCommandPoolFullStrategy strategy)
+    static ExecutorPlus initializeExecutor(int maxPoolSize, Config.RepairCommandPoolFullStrategy strategy)
     {
-        int corePoolSize = 1;
-        BlockingQueue<Runnable> queue;
-        if (strategy == Config.RepairCommandPoolFullStrategy.reject)
-        {
-            // new threads will be created on demand up to max pool
-            // size so we can leave corePoolSize at 1 to start with
-            queue = new SynchronousQueue<>();
-        }
-        else
-        {
-            // new threads are only created if > corePoolSize threads are running
-            // and the queue is full, so set corePoolSize to the desired max as the
-            // queue will _never_ be full. Idle core threads will eventually time
-            // out and may be re-created if/when subsequent tasks are submitted.
-            corePoolSize = maxPoolSize;
-            queue = new LinkedBlockingQueue<>();
-        }
-
-        ThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(corePoolSize,
-                                                                       maxPoolSize,
-                                                                       1,
-                                                                       TimeUnit.HOURS,
-                                                                       queue,
-                                                                       new NamedThreadFactory("Repair-Task"),
-                                                                       "internal",
-                                                                       new ThreadPoolExecutor.AbortPolicy());
-        // allow idle core threads to be terminated
-        executor.allowCoreThreadTimeOut(true);
-        return executor;
+        return executorFactory()
+               .localAware()       // we do trace repair sessions, and seem to rely on local aware propagation (though could do with refactoring)
+               .withJmxInternal()
+               .configurePooled("Repair-Task", maxPoolSize)
+               .withKeepAlive(1, TimeUnit.HOURS)
+               .withQueueLimit(strategy == reject ? 0 : Integer.MAX_VALUE)
+               .withRejectedExecutionHandler(new ThreadPoolExecutor.AbortPolicy())
+               .build();
     }
 
-    public static ThreadPoolExecutor repairCommandExecutor()
+    public static ExecutorPlus repairCommandExecutor()
     {
         return RepairCommandExecutorHandle.repairCommandExecutor;
     }
@@ -198,10 +211,9 @@
     private final Gossiper gossiper;
     private final Cache<Integer, Pair<ParentRepairStatus, List<String>>> repairStatusByCmd;
 
-    public final DebuggableThreadPoolExecutor snapshotExecutor = DebuggableThreadPoolExecutor.createWithMaximumPoolSize("RepairSnapshotExecutor",
-                                                                                                                        1,
-                                                                                                                        1,
-                                                                                                                        TimeUnit.HOURS);
+    public final ExecutorPlus snapshotExecutor = executorFactory().configurePooled("RepairSnapshotExecutor", 1)
+                                                                  .withKeepAlive(1, TimeUnit.HOURS)
+                                                                  .build();
 
     public ActiveRepairService(IFailureDetector failureDetector, Gossiper gossiper)
     {
@@ -217,19 +229,42 @@
                                              .maximumSize(Long.getLong("cassandra.parent_repair_status_cache_size", 100_000))
                                              .build();
 
+        DurationSpec.LongNanosecondsBound duration = getRepairStateExpires();
+        int numElements = getRepairStateSize();
+        logger.info("Storing repair state for {} or for {} elements", duration, numElements);
+        repairs = CacheBuilder.newBuilder()
+                              .expireAfterWrite(duration.quantity(), duration.unit())
+                              .maximumSize(numElements)
+                              .build();
+        participates = CacheBuilder.newBuilder()
+                                   .expireAfterWrite(duration.quantity(), duration.unit())
+                                   .maximumSize(numElements)
+                                   .build();
+
         MBeanWrapper.instance.registerMBean(this, MBEAN_NAME);
     }
 
     public void start()
     {
         consistent.local.start();
-        ScheduledExecutors.optionalTasks.scheduleAtFixedRate(consistent.local::cleanup, 0,
-                                                             LocalSessions.CLEANUP_INTERVAL,
-                                                             TimeUnit.SECONDS);
+        this.irCleanup = ScheduledExecutors.optionalTasks.scheduleAtFixedRate(consistent.local::cleanup, 0,
+                                                                              LocalSessions.CLEANUP_INTERVAL,
+                                                                              TimeUnit.SECONDS);
+    }
+
+    @VisibleForTesting
+    public void clearLocalRepairState()
+    {
+        // .cleanUp() doesn't clear, it looks to only run gc on things that could be removed... this method should remove all state
+        repairs.asMap().clear();
+        participates.asMap().clear();
     }
 
     public void stop()
     {
+        ScheduledFuture<?> irCleanup = this.irCleanup;
+        if (irCleanup != null)
+            irCleanup.cancel(false);
         consistent.local.stop();
     }
 
@@ -243,20 +278,32 @@
     @Override
     public void failSession(String session, boolean force)
     {
-        UUID sessionID = UUID.fromString(session);
+        TimeUUID sessionID = TimeUUID.fromString(session);
         consistent.local.cancelSession(sessionID, force);
     }
 
-    @Override
+    @Deprecated
     public void setRepairSessionSpaceInMegabytes(int sizeInMegabytes)
     {
-        DatabaseDescriptor.setRepairSessionSpaceInMegabytes(sizeInMegabytes);
+        DatabaseDescriptor.setRepairSessionSpaceInMiB(sizeInMegabytes);
+    }
+
+    @Deprecated
+    public int getRepairSessionSpaceInMegabytes()
+    {
+        return DatabaseDescriptor.getRepairSessionSpaceInMiB();
     }
 
     @Override
-    public int getRepairSessionSpaceInMegabytes()
+    public void setRepairSessionSpaceInMebibytes(int sizeInMebibytes)
     {
-        return DatabaseDescriptor.getRepairSessionSpaceInMegabytes();
+        DatabaseDescriptor.setRepairSessionSpaceInMiB(sizeInMebibytes);
+    }
+
+    @Override
+    public int getRepairSessionSpaceInMebibytes()
+    {
+        return DatabaseDescriptor.getRepairSessionSpaceInMiB();
     }
 
     public List<CompositeData> getRepairStats(List<String> schemaArgs, String rangeString)
@@ -329,7 +376,7 @@
      *
      * @return Future for asynchronous call or null if there is no need to repair
      */
-    public RepairSession submitRepairSession(UUID parentRepairSession,
+    public RepairSession submitRepairSession(TimeUUID parentRepairSession,
                                              CommonRange range,
                                              String keyspace,
                                              RepairParallelism parallelismDegree,
@@ -337,18 +384,24 @@
                                              boolean pullRepair,
                                              PreviewKind previewKind,
                                              boolean optimiseStreams,
-                                             ListeningExecutorService executor,
+                                             boolean repairPaxos,
+                                             boolean paxosOnly,
+                                             ExecutorPlus executor,
                                              String... cfnames)
     {
+        if (repairPaxos && previewKind != PreviewKind.NONE)
+            throw new IllegalArgumentException("cannot repair paxos in a preview repair");
+
         if (range.endpoints.isEmpty())
             return null;
 
         if (cfnames.length == 0)
             return null;
 
-        final RepairSession session = new RepairSession(parentRepairSession, UUIDGen.getTimeUUID(), range, keyspace,
+        final RepairSession session = new RepairSession(parentRepairSession, range, keyspace,
                                                         parallelismDegree, isIncremental, pullRepair,
-                                                        previewKind, optimiseStreams, cfnames);
+                                                        previewKind, optimiseStreams, repairPaxos, paxosOnly, cfnames);
+        repairs.getIfPresent(parentRepairSession).register(session.state);
 
         sessions.put(session.getId(), session);
         // register listeners
@@ -358,17 +411,10 @@
             LocalSessions.registerListener(session);
 
         // remove session at completion
-        session.addListener(new Runnable()
-        {
-            /**
-             * When repair finished, do clean up
-             */
-            public void run()
-            {
-                sessions.remove(session.getId());
-                LocalSessions.unregisterListener(session);
-            }
-        }, MoreExecutors.directExecutor());
+        session.addListener(() -> {
+            sessions.remove(session.getId());
+            LocalSessions.unregisterListener(session);
+        });
         session.start(executor);
         return session;
     }
@@ -383,7 +429,7 @@
         DatabaseDescriptor.useOffheapMerkleTrees(value);
     }
 
-    private <T extends AbstractFuture &
+    private <T extends Future &
                IEndpointStateChangeSubscriber &
                IFailureDetectionEventListener> void registerOnFdAndGossip(final T task)
     {
@@ -401,7 +447,7 @@
                 failureDetector.unregisterFailureDetectionEventListener(task);
                 gossiper.unregister(task);
             }
-        }, MoreExecutors.directExecutor());
+        });
     }
 
     public synchronized void terminateSessions()
@@ -420,7 +466,8 @@
     }
 
 
-    Pair<ParentRepairStatus, List<String>> getRepairStatus(Integer cmd)
+    @VisibleForTesting
+    public Pair<ParentRepairStatus, List<String>> getRepairStatus(Integer cmd)
     {
         return repairStatusByCmd.getIfPresent(cmd);
     }
@@ -428,16 +475,15 @@
     /**
      * Return all of the neighbors with whom we share the provided range.
      *
-     * @param keyspaceName keyspace to repair
+     * @param keyspaceName        keyspace to repair
      * @param keyspaceLocalRanges local-range for given keyspaceName
-     * @param toRepair token to repair
-     * @param dataCenters the data centers to involve in the repair
-     *
+     * @param toRepair            token to repair
+     * @param dataCenters         the data centers to involve in the repair
      * @return neighbors with whom we share the provided range
      */
     public static EndpointsForRange getNeighbors(String keyspaceName, Iterable<Range<Token>> keyspaceLocalRanges,
-                                          Range<Token> toRepair, Collection<String> dataCenters,
-                                          Collection<String> hosts)
+                                                 Range<Token> toRepair, Collection<String> dataCenters,
+                                                 Collection<String> hosts)
     {
         StorageService ss = StorageService.instance;
         EndpointsByRange replicaSets = ss.getRangeToAddressMap(keyspaceName);
@@ -514,17 +560,17 @@
         // we only want to set repairedAt for incremental repairs including all replicas for a token range. For non-global incremental repairs, full repairs, the UNREPAIRED_SSTABLE value will prevent
         // sstables from being promoted to repaired or preserve the repairedAt/pendingRepair values, respectively. For forced repairs, repairedAt time is only set to UNREPAIRED_SSTABLE if we actually
         // end up skipping replicas
-        if (options.isIncremental() && options.isGlobal() && ! force)
+        if (options.isIncremental() && options.isGlobal() && !force)
         {
-            return System.currentTimeMillis();
+            return currentTimeMillis();
         }
         else
         {
-            return  ActiveRepairService.UNREPAIRED_SSTABLE;
+            return ActiveRepairService.UNREPAIRED_SSTABLE;
         }
     }
 
-    public static boolean verifyCompactionsPendingThreshold(UUID parentRepairSession, PreviewKind previewKind)
+    public static boolean verifyCompactionsPendingThreshold(TimeUUID parentRepairSession, PreviewKind previewKind)
     {
         // Snapshot values so failure message is consistent with decision
         int pendingCompactions = CompactionManager.instance.getPendingTasks();
@@ -532,28 +578,29 @@
         if (pendingCompactions > pendingThreshold)
         {
             logger.error("[{}] Rejecting incoming repair, pending compactions ({}) above threshold ({})",
-                          previewKind.logPrefix(parentRepairSession), pendingCompactions, pendingThreshold);
+                         previewKind.logPrefix(parentRepairSession), pendingCompactions, pendingThreshold);
             return false;
         }
         return true;
     }
 
-    public UUID prepareForRepair(UUID parentRepairSession, InetAddressAndPort coordinator, Set<InetAddressAndPort> endpoints, RepairOption options, boolean isForcedRepair, List<ColumnFamilyStore> columnFamilyStores)
+    public TimeUUID prepareForRepair(TimeUUID parentRepairSession, InetAddressAndPort coordinator, Set<InetAddressAndPort> endpoints, RepairOption options, boolean isForcedRepair, List<ColumnFamilyStore> columnFamilyStores)
     {
         if (!verifyCompactionsPendingThreshold(parentRepairSession, options.getPreviewKind()))
             failRepair(parentRepairSession, "Rejecting incoming repair, pending compactions above threshold"); // failRepair throws exception
 
         long repairedAt = getRepairedAt(options, isForcedRepair);
         registerParentRepairSession(parentRepairSession, coordinator, columnFamilyStores, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal(), options.getPreviewKind());
-        final CountDownLatch prepareLatch = new CountDownLatch(endpoints.size());
+        final CountDownLatch prepareLatch = newCountDownLatch(endpoints.size());
         final AtomicBoolean status = new AtomicBoolean(true);
-        final Set<String> failedNodes = Collections.synchronizedSet(new HashSet<String>());
+        final Set<String> failedNodes = synchronizedSet(new HashSet<String>());
+        final AtomicInteger timeouts = new AtomicInteger(0);
         RequestCallback callback = new RequestCallback()
         {
             @Override
             public void onResponse(Message msg)
             {
-                prepareLatch.countDown();
+                prepareLatch.decrement();
             }
 
             @Override
@@ -561,7 +608,9 @@
             {
                 status.set(false);
                 failedNodes.add(from.toString());
-                prepareLatch.countDown();
+                if (failureReason == RequestFailureReason.TIMEOUT)
+                    timeouts.incrementAndGet();
+                prepareLatch.decrement();
             }
 
             @Override
@@ -575,12 +624,13 @@
         for (ColumnFamilyStore cfs : columnFamilyStores)
             tableIds.add(cfs.metadata.id);
 
+        PrepareMessage message = new PrepareMessage(parentRepairSession, tableIds, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal(), options.getPreviewKind());
+        register(new ParticipateState(FBUtilities.getBroadcastAddressAndPort(), message));
         for (InetAddressAndPort neighbour : endpoints)
         {
             if (FailureDetector.instance.isAlive(neighbour))
             {
-                PrepareMessage message = new PrepareMessage(parentRepairSession, tableIds, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal(), options.getPreviewKind());
-                Message<RepairMessage> msg = Message.out(PREPARE_MSG, message);
+                Message<RepairMessage> msg = out(PREPARE_MSG, message);
                 MessagingService.instance().sendWithCallback(msg, neighbour, callback);
             }
             else
@@ -589,19 +639,18 @@
                 // remaining ones go down, we still want to fail so we don't create repair sessions that can't complete
                 if (isForcedRepair && !options.isIncremental())
                 {
-                    prepareLatch.countDown();
+                    prepareLatch.decrement();
                 }
                 else
                 {
                     // bailout early to avoid potentially waiting for a long time.
                     failRepair(parentRepairSession, "Endpoint not alive: " + neighbour);
                 }
-
             }
         }
         try
         {
-            if (!prepareLatch.await(DatabaseDescriptor.getRpcTimeout(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS))
+            if (!prepareLatch.await(getRpcTimeout(MILLISECONDS), MILLISECONDS) || timeouts.get() > 0)
                 failRepair(parentRepairSession, "Did not get replies from all endpoints.");
         }
         catch (InterruptedException e)
@@ -622,7 +671,7 @@
      * endpoint's cache.
      * This method does not throw an exception in case of a messaging failure.
      */
-    public void cleanUp(UUID parentRepairSession, Set<InetAddressAndPort> endpoints)
+    public void cleanUp(TimeUUID parentRepairSession, Set<InetAddressAndPort> endpoints)
     {
         for (InetAddressAndPort endpoint : endpoints)
         {
@@ -645,8 +694,8 @@
                         public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason)
                         {
                             logger.debug("Failed to clean up parent repair session {} on {}. The uncleaned sessions will " +
-                                    "be removed on a node restart. This should not be a problem unless you see thousands " +
-                                    "of messages like this.", parentRepairSession, endpoint);
+                                         "be removed on a node restart. This should not be a problem unless you see thousands " +
+                                         "of messages like this.", parentRepairSession, endpoint);
                         }
                     };
 
@@ -658,14 +707,21 @@
                 logger.warn("Failed to send a clean up message to {}", endpoint, exc);
             }
         }
+        ParticipateState state = participate(parentRepairSession);
+        if (state != null)
+            state.phase.success("Cleanup message recieved");
     }
 
-    private void failRepair(UUID parentRepairSession, String errorMsg) {
+    private void failRepair(TimeUUID parentRepairSession, String errorMsg)
+    {
+        ParticipateState state = participate(parentRepairSession);
+        if (state != null)
+            state.phase.fail(errorMsg);
         removeParentRepairSession(parentRepairSession);
         throw new RuntimeException(errorMsg);
     }
 
-    public synchronized void registerParentRepairSession(UUID parentRepairSession, InetAddressAndPort coordinator, List<ColumnFamilyStore> columnFamilyStores, Collection<Range<Token>> ranges, boolean isIncremental, long repairedAt, boolean isGlobal, PreviewKind previewKind)
+    public synchronized void registerParentRepairSession(TimeUUID parentRepairSession, InetAddressAndPort coordinator, List<ColumnFamilyStore> columnFamilyStores, Collection<Range<Token>> ranges, boolean isIncremental, long repairedAt, boolean isGlobal, PreviewKind previewKind)
     {
         assert isIncremental || repairedAt == ActiveRepairService.UNREPAIRED_SSTABLE;
         if (!registeredForEndpointChanges)
@@ -681,27 +737,34 @@
         }
     }
 
-    public ParentRepairSession getParentRepairSession(UUID parentSessionId)
+    /**
+     * We assume when calling this method that a parent session for the provided identifier
+     * exists, and that session is still in progress. When it doesn't, that should mean either
+     * {@link #abort(Predicate, String)} or {@link #failRepair(TimeUUID, String)} have removed it.
+     *
+     * @param parentSessionId an identifier for an active parent repair session
+     * @return the {@link ParentRepairSession} associated with the provided identifier
+     * @throws NoSuchRepairSessionException if the provided identifier does not map to an active parent session
+     */
+    public ParentRepairSession getParentRepairSession(TimeUUID parentSessionId) throws NoSuchRepairSessionException
     {
         ParentRepairSession session = parentRepairSessions.get(parentSessionId);
-        // this can happen if a node thinks that the coordinator was down, but that coordinator got back before noticing
-        // that it was down itself.
         if (session == null)
-            throw new RuntimeException("Parent repair session with id = " + parentSessionId + " has failed.");
+            throw new NoSuchRepairSessionException(parentSessionId);
 
         return session;
     }
 
     /**
      * called when the repair session is done - either failed or anticompaction has completed
-     *
+     * <p>
      * clears out any snapshots created by this repair
      *
-     * @param parentSessionId id of parent session
-     * @return parent session of given id or null if there is not such
-     * @see org.apache.cassandra.db.repair.CassandraTableRepairManager#snapshot(String, Collection, boolean) 
+     * @param parentSessionId an identifier for an active parent repair session
+     * @return the {@link ParentRepairSession} associated with the provided identifier
+     * @see org.apache.cassandra.db.repair.CassandraTableRepairManager#snapshot(String, Collection, boolean)
      */
-    public synchronized ParentRepairSession removeParentRepairSession(UUID parentSessionId)
+    public synchronized ParentRepairSession removeParentRepairSession(TimeUUID parentSessionId)
     {
         String snapshotName = parentSessionId.toString();
         ParentRepairSession session = parentRepairSessions.remove(parentSessionId);
@@ -715,13 +778,13 @@
                             session.columnFamilyStores.values()
                                                       .stream()
                                                       .map(cfs -> cfs.metadata().toString()).collect(Collectors.joining(", ")));
-                long startNanos = System.nanoTime();
+                long startNanos = nanoTime();
                 for (ColumnFamilyStore cfs : session.columnFamilyStores.values())
                 {
                     if (cfs.snapshotExists(snapshotName))
                         cfs.clearSnapshot(snapshotName);
                 }
-                logger.info("[repair #{}] Cleared snapshots in {}ms", parentSessionId, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
+                logger.info("[repair #{}] Cleared snapshots in {}ms", parentSessionId, TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNanos));
             });
         }
         return session;
@@ -729,19 +792,35 @@
 
     public void handleMessage(Message<? extends RepairMessage> message)
     {
-        RepairJobDesc desc = message.payload.desc;
+        RepairMessage payload = message.payload;
+        RepairJobDesc desc = payload.desc;
         RepairSession session = sessions.get(desc.sessionId);
+
         if (session == null)
+        {
+            if (payload instanceof ValidationResponse)
+            {
+                // The trees may be off-heap, and will therefore need to be released.
+                ValidationResponse validation = (ValidationResponse) payload;
+                MerkleTrees trees = validation.trees;
+
+                // The response from a failed validation won't have any trees.
+                if (trees != null)
+                    trees.release();
+            }
+
             return;
+        }
+
         switch (message.verb())
         {
             case VALIDATION_RSP:
-                ValidationResponse validation = (ValidationResponse) message.payload;
+                ValidationResponse validation = (ValidationResponse) payload;
                 session.validationComplete(desc, message.from(), validation.trees);
                 break;
             case SYNC_RSP:
                 // one of replica is synced.
-                SyncResponse sync = (SyncResponse) message.payload;
+                SyncResponse sync = (SyncResponse) payload;
                 session.syncComplete(desc, sync.nodes, sync.success, sync.summaries);
                 break;
             default:
@@ -815,10 +894,10 @@
         public String toString()
         {
             return "ParentRepairSession{" +
-                    "columnFamilyStores=" + columnFamilyStores +
-                    ", ranges=" + ranges +
-                    ", repairedAt=" + repairedAt +
-                    '}';
+                   "columnFamilyStores=" + columnFamilyStores +
+                   ", ranges=" + ranges +
+                   ", repairedAt=" + repairedAt +
+                   '}';
         }
 
         public void setHasSnapshots()
@@ -831,11 +910,25 @@
     If the coordinator node dies we should remove the parent repair session from the other nodes.
     This uses the same notifications as we get in RepairSession
      */
-    public void onJoin(InetAddressAndPort endpoint, EndpointState epState) {}
-    public void beforeChange(InetAddressAndPort endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue) {}
-    public void onChange(InetAddressAndPort endpoint, ApplicationState state, VersionedValue value) {}
-    public void onAlive(InetAddressAndPort endpoint, EndpointState state) {}
-    public void onDead(InetAddressAndPort endpoint, EndpointState state) {}
+    public void onJoin(InetAddressAndPort endpoint, EndpointState epState)
+    {
+    }
+
+    public void beforeChange(InetAddressAndPort endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue)
+    {
+    }
+
+    public void onChange(InetAddressAndPort endpoint, ApplicationState state, VersionedValue value)
+    {
+    }
+
+    public void onAlive(InetAddressAndPort endpoint, EndpointState state)
+    {
+    }
+
+    public void onDead(InetAddressAndPort endpoint, EndpointState state)
+    {
+    }
 
     public void onRemove(InetAddressAndPort endpoint)
     {
@@ -849,7 +942,7 @@
 
     /**
      * Something has happened to a remote node - if that node is a coordinator, we mark the parent repair session id as failed.
-     *
+     * <p>
      * The fail marker is kept in the map for 24h to make sure that if the coordinator does not agree
      * that the repair failed, we need to fail the entire repair session
      *
@@ -880,8 +973,8 @@
      */
     public void abort(Predicate<ParentRepairSession> predicate, String message)
     {
-        Set<UUID> parentSessionsToRemove = new HashSet<>();
-        for (Map.Entry<UUID, ParentRepairSession> repairSessionEntry : parentRepairSessions.entrySet())
+        Set<TimeUUID> parentSessionsToRemove = new HashSet<>();
+        for (Map.Entry<TimeUUID, ParentRepairSession> repairSessionEntry : parentRepairSessions.entrySet())
         {
             if (predicate.test(repairSessionEntry.getValue()))
                 parentSessionsToRemove.add(repairSessionEntry.getKey());
@@ -904,4 +997,126 @@
     {
         return sessions.size();
     }
+
+    public Future<?> repairPaxosForTopologyChange(String ksName, Collection<Range<Token>> ranges, String reason)
+    {
+        if (!paxosRepairEnabled())
+        {
+            logger.warn("Not running paxos repair for topology change because paxos repair has been disabled");
+            return ImmediateFuture.success(null);
+        }
+
+        if (ranges.isEmpty())
+        {
+            logger.warn("Not running paxos repair for topology change because there are no ranges to repair");
+            return ImmediateFuture.success(null);
+        }
+        List<TableMetadata> tables = Lists.newArrayList(Schema.instance.getKeyspaceMetadata(ksName).tables);
+        List<Future<Void>> futures = new ArrayList<>(ranges.size() * tables.size());
+        Keyspace keyspace = Keyspace.open(ksName);
+        AbstractReplicationStrategy replication = keyspace.getReplicationStrategy();
+        for (Range<Token> range: ranges)
+        {
+            for (TableMetadata table : tables)
+            {
+                Set<InetAddressAndPort> endpoints = replication.getNaturalReplicas(range.right).filter(FailureDetector.isReplicaAlive).endpoints();
+                if (!PaxosRepair.hasSufficientLiveNodesForTopologyChange(keyspace, range, endpoints))
+                {
+                    Set<InetAddressAndPort> downEndpoints = replication.getNaturalReplicas(range.right).filter(e -> !endpoints.contains(e)).endpoints();
+                    downEndpoints.removeAll(endpoints);
+
+                    throw new RuntimeException(String.format("Insufficient live nodes to repair paxos for %s in %s for %s.\n" +
+                                                             "There must be enough live nodes to satisfy EACH_QUORUM, but the following nodes are down: %s\n" +
+                                                             "This check can be skipped by setting either the yaml property skip_paxos_repair_on_topology_change or " +
+                                                             "the system property cassandra.skip_paxos_repair_on_topology_change to false. The jmx property " +
+                                                             "StorageService.SkipPaxosRepairOnTopologyChange can also be set to false to temporarily disable without " +
+                                                             "restarting the node\n" +
+                                                             "Individual keyspaces can be skipped with the yaml property skip_paxos_repair_on_topology_change_keyspaces, the" +
+                                                             "system property cassandra.skip_paxos_repair_on_topology_change_keyspaces, or temporarily with the jmx" +
+                                                             "property StorageService.SkipPaxosRepairOnTopologyChangeKeyspaces\n" +
+                                                             "Skipping this check can lead to paxos correctness issues",
+                                                             range, ksName, reason, downEndpoints));
+                }
+                EndpointsForToken pending = StorageService.instance.getTokenMetadata().pendingEndpointsForToken(range.right, ksName);
+                if (pending.size() > 1 && !Boolean.getBoolean("cassandra.paxos_repair_allow_multiple_pending_unsafe"))
+                {
+                    throw new RuntimeException(String.format("Cannot begin paxos auto repair for %s in %s.%s, multiple pending endpoints exist for range (%s). " +
+                                                             "Set -Dcassandra.paxos_repair_allow_multiple_pending_unsafe=true to skip this check",
+                                                             range, table.keyspace, table.name, pending));
+
+                }
+                Future<Void> future = PaxosCleanup.cleanup(endpoints, table, Collections.singleton(range), false, repairCommandExecutor());
+                futures.add(future);
+            }
+        }
+
+        return FutureCombiner.allOf(futures);
+    }
+
+    public int getPaxosRepairParallelism()
+    {
+        return DatabaseDescriptor.getPaxosRepairParallelism();
+    }
+
+    public void setPaxosRepairParallelism(int v)
+    {
+        DatabaseDescriptor.setPaxosRepairParallelism(v);
+    }
+
+    public void shutdownNowAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownNowAndWait(timeout, unit, snapshotExecutor);
+    }
+
+    public Collection<CoordinatorState> coordinators()
+    {
+        return repairs.asMap().values();
+    }
+
+    public CoordinatorState coordinator(TimeUUID id)
+    {
+        return repairs.getIfPresent(id);
+    }
+
+    public void register(CoordinatorState state)
+    {
+        repairs.put(state.id, state);
+    }
+
+    public boolean register(ParticipateState state)
+    {
+        synchronized (participates)
+        {
+            ParticipateState current = participates.getIfPresent(state.id);
+            if (current != null)
+                return false;
+            participates.put(state.id, state);
+        }
+        return true;
+    }
+
+    public Collection<ParticipateState> participates()
+    {
+        return participates.asMap().values();
+    }
+
+    public ParticipateState participate(TimeUUID id)
+    {
+        return participates.getIfPresent(id);
+    }
+
+    public Collection<ValidationState> validations()
+    {
+        return participates.asMap().values().stream().flatMap(p -> p.validations().stream()).collect(Collectors.toList());
+    }
+
+    public ValidationState validation(UUID id)
+    {
+        for (ValidationState state : validations())
+        {
+            if (state.id.equals(id))
+                return state;
+        }
+        return null;
+    }
 }
diff --git a/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java b/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java
index b68cb6f..009ad56 100644
--- a/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java
+++ b/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java
@@ -29,9 +29,14 @@
     public List<Map<String, String>> getSessions(boolean all, String rangesStr);
     public void failSession(String session, boolean force);
 
+    @Deprecated
     public void setRepairSessionSpaceInMegabytes(int sizeInMegabytes);
+    @Deprecated
     public int getRepairSessionSpaceInMegabytes();
 
+    public void setRepairSessionSpaceInMebibytes(int sizeInMebibytes);
+    public int getRepairSessionSpaceInMebibytes();
+
     public boolean getUseOffheapMerkleTrees();
     public void setUseOffheapMerkleTrees(boolean value);
 
@@ -50,4 +55,6 @@
      * @return current size of the internal cache holding {@link ActiveRepairService.ParentRepairSession} instances
      */
     int parentRepairSessionsCount();
+    public int getPaxosRepairParallelism();
+    public void setPaxosRepairParallelism(int v);
 }
diff --git a/src/java/org/apache/cassandra/service/BatchlogResponseHandler.java b/src/java/org/apache/cassandra/service/BatchlogResponseHandler.java
index 155f42d..c1c828f 100644
--- a/src/java/org/apache/cassandra/service/BatchlogResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/BatchlogResponseHandler.java
@@ -36,7 +36,7 @@
 
     public BatchlogResponseHandler(AbstractWriteResponseHandler<T> wrapped, int requiredBeforeFinish, BatchlogCleanup cleanup, long queryStartNanoTime)
     {
-        super(wrapped.replicaPlan, wrapped.callback, wrapped.writeType, queryStartNanoTime);
+        super(wrapped.replicaPlan, wrapped.callback, wrapped.writeType, null, queryStartNanoTime);
         this.wrapped = wrapped;
         this.requiredBeforeFinish = requiredBeforeFinish;
         this.cleanup = cleanup;
diff --git a/src/java/org/apache/cassandra/service/CASRequest.java b/src/java/org/apache/cassandra/service/CASRequest.java
index 88fb9bd..19966c8 100644
--- a/src/java/org/apache/cassandra/service/CASRequest.java
+++ b/src/java/org/apache/cassandra/service/CASRequest.java
@@ -17,10 +17,11 @@
  */
 package org.apache.cassandra.service;
 
-import org.apache.cassandra.db.SinglePartitionReadQuery;
+import org.apache.cassandra.db.SinglePartitionReadCommand;
 import org.apache.cassandra.db.partitions.FilteredPartition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.service.paxos.Ballot;
 
 /**
  * Abstract the conditions and updates for a CAS operation.
@@ -30,7 +31,7 @@
     /**
      * The command to use to fetch the value to compare for the CAS.
      */
-    public SinglePartitionReadQuery readCommand(int nowInSec);
+    public SinglePartitionReadCommand readCommand(int nowInSec);
 
     /**
      * Returns whether the provided CF, that represents the values fetched using the
@@ -42,5 +43,5 @@
      * The updates to perform of a CAS success. The values fetched using the readFilter()
      * are passed as argument.
      */
-    public PartitionUpdate makeUpdates(FilteredPartition current) throws InvalidRequestException;
+    public PartitionUpdate makeUpdates(FilteredPartition current, ClientState clientState, Ballot ballot) throws InvalidRequestException;
 }
diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java
index 0a281ad..9c23e71 100644
--- a/src/java/org/apache/cassandra/service/CacheService.java
+++ b/src/java/org/apache/cassandra/service/CacheService.java
@@ -19,17 +19,10 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-import com.google.common.util.concurrent.Futures;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -45,6 +38,9 @@
 import org.apache.cassandra.db.partitions.CachedBTreePartition;
 import org.apache.cassandra.db.partitions.CachedPartition;
 import org.apache.cassandra.db.rows.*;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SSTableIdFactory;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
@@ -54,6 +50,8 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 
 public class CacheService implements CacheServiceMBean
 {
@@ -100,9 +98,9 @@
      */
     private AutoSavingCache<KeyCacheKey, RowIndexEntry> initKeyCache()
     {
-        logger.info("Initializing key cache with capacity of {} MBs.", DatabaseDescriptor.getKeyCacheSizeInMB());
+        logger.info("Initializing key cache with capacity of {} MiBs.", DatabaseDescriptor.getKeyCacheSizeInMiB());
 
-        long keyCacheInMemoryCapacity = DatabaseDescriptor.getKeyCacheSizeInMB() * 1024 * 1024;
+        long keyCacheInMemoryCapacity = DatabaseDescriptor.getKeyCacheSizeInMiB() * 1024 * 1024;
 
         // as values are constant size we can use singleton weigher
         // where 48 = 40 bytes (average size of the key) + 8 bytes (size of value)
@@ -122,10 +120,10 @@
      */
     private AutoSavingCache<RowCacheKey, IRowCacheEntry> initRowCache()
     {
-        logger.info("Initializing row cache with capacity of {} MBs", DatabaseDescriptor.getRowCacheSizeInMB());
+        logger.info("Initializing row cache with capacity of {} MiBs", DatabaseDescriptor.getRowCacheSizeInMiB());
 
         CacheProvider<RowCacheKey, IRowCacheEntry> cacheProvider;
-        String cacheProviderClassName = DatabaseDescriptor.getRowCacheSizeInMB() > 0
+        String cacheProviderClassName = DatabaseDescriptor.getRowCacheSizeInMiB() > 0
                                         ? DatabaseDescriptor.getRowCacheClassName() : "org.apache.cassandra.cache.NopCacheProvider";
         try
         {
@@ -151,9 +149,9 @@
 
     private AutoSavingCache<CounterCacheKey, ClockAndCount> initCounterCache()
     {
-        logger.info("Initializing counter cache with capacity of {} MBs", DatabaseDescriptor.getCounterCacheSizeInMB());
+        logger.info("Initializing counter cache with capacity of {} MiBs", DatabaseDescriptor.getCounterCacheSizeInMiB());
 
-        long capacity = DatabaseDescriptor.getCounterCacheSizeInMB() * 1024 * 1024;
+        long capacity = DatabaseDescriptor.getCounterCacheSizeInMiB() * 1024 * 1024;
 
         AutoSavingCache<CounterCacheKey, ClockAndCount> cache =
             new AutoSavingCache<>(CaffeineCache.create(capacity),
@@ -419,7 +417,7 @@
         // For column families with many SSTables the linear nature of getSSTables slowed down KeyCache loading
         // by orders of magnitude. So we cache the sstables once and rely on cleanupAfterDeserialize to cleanup any
         // cached state we may have accumulated during the load.
-        Map<Pair<String, String>, Map<Integer, SSTableReader>> cachedSSTableReaders = new ConcurrentHashMap<>();
+        Map<Pair<String, String>, Map<SSTableId, SSTableReader>> cachedSSTableReaders = new ConcurrentHashMap<>();
 
         public void serialize(KeyCacheKey key, DataOutputPlus out, ColumnFamilyStore cfs) throws IOException
         {
@@ -431,7 +429,15 @@
             tableMetadata.id.serialize(out);
             out.writeUTF(tableMetadata.indexName().orElse(""));
             ByteArrayUtil.writeWithLength(key.key, out);
-            out.writeInt(key.desc.generation);
+            if (key.desc.id instanceof SequenceBasedSSTableId)
+            {
+                out.writeInt(((SequenceBasedSSTableId) key.desc.id).generation);
+            }
+            else
+            {
+                out.writeInt(Integer.MIN_VALUE); // backwards compatibility for "int based generation only"
+                ByteBufferUtil.writeWithShortLength(key.desc.id.asBytes(), out);
+            }
             out.writeBoolean(true);
 
             SerializationHeader header = new SerializationHeader(false, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS);
@@ -452,23 +458,26 @@
             }
             ByteBuffer key = ByteBufferUtil.read(input, keyLength);
             int generation = input.readInt();
+            SSTableId generationId = generation == Integer.MIN_VALUE
+                                                   ? SSTableIdFactory.instance.fromBytes(ByteBufferUtil.readWithShortLength(input))
+                                                   : new SequenceBasedSSTableId(generation); // Backwards compatibility for "int based generation sstables"
             input.readBoolean(); // backwards compatibility for "promoted indexes" boolean
             SSTableReader reader = null;
             if (!skipEntry)
             {
                 Pair<String, String> qualifiedName = Pair.create(cfs.metadata.keyspace, cfs.metadata.name);
-                Map<Integer, SSTableReader> generationToSSTableReader = cachedSSTableReaders.get(qualifiedName);
+                Map<SSTableId, SSTableReader> generationToSSTableReader = cachedSSTableReaders.get(qualifiedName);
                 if (generationToSSTableReader == null)
                 {
                     generationToSSTableReader = new HashMap<>(cfs.getLiveSSTables().size());
                     for (SSTableReader ssTableReader : cfs.getSSTables(SSTableSet.CANONICAL))
                     {
-                        generationToSSTableReader.put(ssTableReader.descriptor.generation, ssTableReader);
+                        generationToSSTableReader.put(ssTableReader.descriptor.id, ssTableReader);
                     }
 
                     cachedSSTableReaders.putIfAbsent(qualifiedName, generationToSSTableReader);
                 }
-                reader = generationToSSTableReader.get(generation);
+                reader = generationToSSTableReader.get(generationId);
             }
 
             if (skipEntry || reader == null)
@@ -485,7 +494,7 @@
                                                                                                                 reader.descriptor.version,
                                                                                                                 reader.header);
             RowIndexEntry<?> entry = indexSerializer.deserializeForCache(input);
-            return Futures.immediateFuture(Pair.create(new KeyCacheKey(cfs.metadata(), reader.descriptor, key), entry));
+            return ImmediateFuture.success(Pair.create(new KeyCacheKey(cfs.metadata(), reader.descriptor, key), entry));
         }
 
         public void cleanupAfterDeserialize()
diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java
index 3e8277c..f1bca66 100644
--- a/src/java/org/apache/cassandra/service/CassandraDaemon.java
+++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.service;
 
-import java.io.File;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryPoolMXBean;
@@ -26,7 +25,6 @@
 import java.net.UnknownHostException;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -37,8 +35,8 @@
 import javax.management.remote.JMXConnectorServer;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.collect.ImmutableList;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,6 +50,7 @@
 import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
 
 import org.apache.cassandra.audit.AuditLogManager;
+import org.apache.cassandra.auth.AuthCacheService;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.QueryProcessor;
@@ -59,8 +58,7 @@
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SizeEstimatesRecorder;
 import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.db.SystemKeyspaceMigrator40;
-import org.apache.cassandra.db.WindowsFailedSnapshotTracker;
+import org.apache.cassandra.db.SystemKeyspaceMigrator41;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.virtual.SystemViewsKeyspace;
 import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
@@ -68,27 +66,27 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.StartupException;
 import org.apache.cassandra.gms.Gossiper;
-import org.apache.cassandra.io.FSError;
-import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.SSTableHeaderFix;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry;
 import org.apache.cassandra.metrics.DefaultNameFactory;
-import org.apache.cassandra.metrics.StorageMetrics;
 import org.apache.cassandra.net.StartupClusterConnectivityChecker;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.security.ThreadAwareSecurityManager;
-import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.streaming.StreamManager;
+import org.apache.cassandra.service.paxos.PaxosState;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JMXServerUtils;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.Mx4jTool;
 import org.apache.cassandra.utils.NativeLibrary;
-import org.apache.cassandra.utils.WindowsTimer;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.config.CassandraRelevantProperties.CASSANDRA_FOREGROUND;
@@ -217,7 +215,7 @@
     public CassandraDaemon(boolean runManaged)
     {
         this.runManaged = runManaged;
-        this.startupChecks = new StartupChecks().withDefaultTests();
+        this.startupChecks = new StartupChecks().withDefaultTests().withTest(new FileSystemOwnershipCheck());
         this.setupCompleted = false;
     }
 
@@ -243,10 +241,6 @@
             exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e);
         }
 
-        // Delete any failed snapshot deletions on Windows - see CASSANDRA-9658
-        if (FBUtilities.isWindows)
-            WindowsFailedSnapshotTracker.deleteOldSnapshots();
-
         maybeInitJmx();
 
         Mx4jTool.maybeLoad();
@@ -274,9 +268,9 @@
         // This should be the first write to SystemKeyspace (CASSANDRA-11742)
         SystemKeyspace.persistLocalMetadata();
 
-        Thread.setDefaultUncaughtExceptionHandler(CassandraDaemon::uncaughtException);
+        Thread.setDefaultUncaughtExceptionHandler(JVMStabilityInspector::uncaughtException);
 
-        SystemKeyspaceMigrator40.migrate();
+        SystemKeyspaceMigrator41.migrate();
 
         // Populate token metadata before flushing, for token-aware sstable partitioning (#6696)
         StorageService.instance.populateTokenMetadata();
@@ -333,7 +327,6 @@
             }
         }
 
-
         try
         {
             loadRowAndKeyCacheAsync().get();
@@ -355,6 +348,9 @@
         }
 
         // Replay any CommitLogSegments found on disk
+        PaxosState.initializeTrackers();
+
+        // replay the log if necessary
         try
         {
             CommitLog.instance.recoverSegmentsOnDisk();
@@ -367,7 +363,14 @@
         // Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
         StorageService.instance.populateTokenMetadata();
 
-        SystemKeyspace.finishStartup();
+        try
+        {
+            PaxosState.maybeRebuildUncommittedState();
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
 
         // Clean up system.size_estimates entries left lying around from missed keyspace drops (CASSANDRA-14905)
         StorageService.instance.cleanupSizeEstimates();
@@ -379,6 +382,7 @@
             ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SizeEstimatesRecorder.instance, 30, sizeRecorderInterval, TimeUnit.SECONDS);
 
         ActiveRepairService.instance.start();
+        StreamManager.instance.start();
 
         // Prepared statements
         QueryProcessor.instance.preloadPreparedStatements();
@@ -435,7 +439,7 @@
             logger.debug("Completed submission of build tasks for any materialized views defined at startup");
         };
 
-        ScheduledExecutors.optionalTasks.schedule(viewRebuild, StorageService.RING_DELAY, TimeUnit.MILLISECONDS);
+        ScheduledExecutors.optionalTasks.schedule(viewRebuild, StorageService.RING_DELAY_MILLIS, TimeUnit.MILLISECONDS);
 
         if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
             Gossiper.waitToSettle();
@@ -479,6 +483,15 @@
 
         initializeClientTransports();
 
+        // Ensure you've registered all caches during startup you want pre-warmed before this call -> be wary of adding
+        // init below this mark before completeSetup().
+        if (DatabaseDescriptor.getAuthCacheWarmingEnabled())
+            AuthCacheService.instance.warmCaches();
+        else
+            logger.info("Prewarming of auth caches is disabled");
+
+        PaxosState.startAutoRepairs();
+
         completeSetup();
     }
 
@@ -486,7 +499,7 @@
     {
         try
         {
-            startupChecks.verify();
+            startupChecks.verify(DatabaseDescriptor.getStartupChecksOptions());
         }
         catch (StartupException e)
         {
@@ -515,7 +528,7 @@
         //     the system keyspace location configured by the user (upgrade to 4.0)
         //  3) The system data are stored in the first data location and need to be moved to
         //     the system keyspace location configured by the user (system_data_file_directory has been configured)
-        Path target = Paths.get(DatabaseDescriptor.getLocalSystemKeyspacesDataFileLocations()[0]);
+        Path target = File.getPath(DatabaseDescriptor.getLocalSystemKeyspacesDataFileLocations()[0]);
 
         String[] nonLocalSystemKeyspacesFileLocations = DatabaseDescriptor.getNonLocalSystemKeyspacesDataFileLocations();
         String[] sources = DatabaseDescriptor.useSpecificLocationForLocalSystemData() ? nonLocalSystemKeyspacesFileLocations
@@ -525,7 +538,7 @@
 
         for (String source : sources)
         {
-            Path dataFileLocation = Paths.get(source);
+            Path dataFileLocation = File.getPath(source);
 
             if (!Files.exists(dataFileLocation))
                 continue;
@@ -574,33 +587,18 @@
             nativeTransportService = new NativeTransportService();
     }
 
-    @VisibleForTesting
-    public static void uncaughtException(Thread t, Throwable e)
-    {
-        StorageMetrics.uncaughtExceptions.inc();
-        logger.error("Exception in thread {}", t, e);
-        Tracing.trace("Exception in thread {}", t, e);
-        for (Throwable e2 = e; e2 != null; e2 = e2.getCause())
-        {
-            // make sure error gets logged exactly once.
-            if (e2 != e && (e2 instanceof FSError || e2 instanceof CorruptSSTableException))
-                logger.error("Exception in thread {}", t, e2);
-        }
-        JVMStabilityInspector.inspectThrowable(e);
-    }
-
     /*
      * Asynchronously load the row and key cache in one off threads and return a compound future of the result.
      * Error handling is pushed into the cache load since cache loads are allowed to fail and are handled by logging.
      */
-    private ListenableFuture<?> loadRowAndKeyCacheAsync()
+    private Future<?> loadRowAndKeyCacheAsync()
     {
-        final ListenableFuture<Integer> keyCacheLoad = CacheService.instance.keyCache.loadSavedAsync();
+        final Future<Integer> keyCacheLoad = CacheService.instance.keyCache.loadSavedAsync();
 
-        final ListenableFuture<Integer> rowCacheLoad = CacheService.instance.rowCache.loadSavedAsync();
+        final Future<Integer> rowCacheLoad = CacheService.instance.rowCache.loadSavedAsync();
 
         @SuppressWarnings("unchecked")
-        ListenableFuture<List<Integer>> retval = Futures.successfulAsList(keyCacheLoad, rowCacheLoad);
+        Future<List<Integer>> retval = FutureCombiner.allOf(ImmutableList.of(keyCacheLoad, rowCacheLoad));
 
         return retval;
     }
@@ -710,11 +708,6 @@
         destroyClientTransports();
         StorageService.instance.setRpcReady(false);
 
-        // On windows, we need to stop the entire system as prunsrv doesn't have the jsvc hooks
-        // We rely on the shutdown hook to drain the node
-        if (FBUtilities.isWindows)
-            System.exit(0);
-
         if (jmxServer != null)
         {
             try
@@ -755,13 +748,6 @@
 
             registerNativeAccess();
 
-            if (FBUtilities.isWindows)
-            {
-                // We need to adjust the system timer on windows from the default 15ms down to the minimum of 1ms as this
-                // impacts timer intervals, thread scheduling, driver interrupts, etc.
-                WindowsTimer.startTimerPeriod(DatabaseDescriptor.getWindowsTimerInterval());
-            }
-
             setup();
 
             String pidFile = CASSANDRA_PID_FILE.getString();
diff --git a/src/java/org/apache/cassandra/service/ClientState.java b/src/java/org/apache/cassandra/service/ClientState.java
index f76e7e3..9e35c7f 100644
--- a/src/java/org/apache/cassandra/service/ClientState.java
+++ b/src/java/org/apache/cassandra/service/ClientState.java
@@ -22,10 +22,17 @@
 import java.net.SocketAddress;
 import java.util.Arrays;
 import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,6 +58,8 @@
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MD5Digest;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * State related to a client connection.
  */
@@ -119,6 +128,9 @@
     // Driver String for the client
     private volatile String driverName;
     private volatile String driverVersion;
+    
+    // Options provided by the client
+    private volatile Map<String,String> clientOptions;
 
     // The biggest timestamp that was returned by getTimestamp/assigned to a query. This is global to ensure that the
     // timestamp assigned are strictly monotonic on a node, which is likely what user expect intuitively (more likely,
@@ -126,6 +138,14 @@
     // is unrealistic expectation, doing it node-wise is easy).
     private static final AtomicLong lastTimestampMicros = new AtomicLong(0);
 
+    @VisibleForTesting
+    public static void resetLastTimestamp(long nowMillis)
+    {
+        long nowMicros = TimeUnit.MILLISECONDS.toMicros(nowMillis);
+        if (lastTimestampMicros.get() > nowMicros)
+            lastTimestampMicros.set(nowMicros);
+    }
+
     /**
      * Construct a new, empty ClientState for internal calls.
      */
@@ -151,6 +171,7 @@
         this.keyspace = source.keyspace;
         this.driverName = source.driverName;
         this.driverVersion = source.driverVersion;
+        this.clientOptions = source.clientOptions;
     }
 
     /**
@@ -200,7 +221,7 @@
     {
         while (true)
         {
-            long current = System.currentTimeMillis() * 1000;
+            long current = currentTimeMillis() * 1000;
             long last = lastTimestampMicros.get();
             long tstamp = last >= current ? last + 1 : current;
             if (lastTimestampMicros.compareAndSet(last, tstamp))
@@ -241,7 +262,7 @@
      * with a clock in the future compared to the local one), we use the last proposal timestamp plus 1, ensuring
      * progress.
      *
-     * @param minTimestampToUse the max timestamp of the last proposal accepted by replica having responded
+     * @param minUnixMicros the max timestamp of the last proposal accepted by replica having responded
      * to the prepare phase of the paxos round this is for. In practice, that's the minimum timestamp this method
      * may return.
      * @return a timestamp suitable for a Paxos proposal (using the reasoning described above). Note that
@@ -250,22 +271,27 @@
      * it may be returned multiple times). Note that we still ensure Paxos "ballot" are unique (for different
      * proposal) by (securely) randomizing the non-timestamp part of the UUID.
      */
-    public long getTimestampForPaxos(long minTimestampToUse)
+    public static long getTimestampForPaxos(long minUnixMicros)
     {
         while (true)
         {
-            long current = Math.max(System.currentTimeMillis() * 1000, minTimestampToUse);
+            long current = Math.max(currentTimeMillis() * 1000, minUnixMicros);
             long last = lastTimestampMicros.get();
             long tstamp = last >= current ? last + 1 : current;
             // Note that if we ended up picking minTimestampMicrosToUse (it was "in the future"), we don't
             // want to change the local clock, otherwise a single node in the future could corrupt the clock
             // of all nodes and for all inserts (since non-paxos inserts also use lastTimestampMicros).
             // See CASSANDRA-11991
-            if (tstamp == minTimestampToUse || lastTimestampMicros.compareAndSet(last, tstamp))
+            if (tstamp == minUnixMicros || lastTimestampMicros.compareAndSet(last, tstamp))
                 return tstamp;
         }
     }
 
+    public static long getLastTimestampMicros()
+    {
+        return lastTimestampMicros.get();
+    }
+
     public Optional<String> getDriverName()
     {
         return Optional.ofNullable(driverName);
@@ -276,6 +302,11 @@
         return Optional.ofNullable(driverVersion);
     }
 
+    public Optional<Map<String,String>> getClientOptions()
+    {
+        return Optional.ofNullable(clientOptions);
+    }
+
     public void setDriverName(String driverName)
     {
         this.driverName = driverName;
@@ -285,6 +316,11 @@
     {
         this.driverVersion = driverVersion;
     }
+    
+    public void setClientOptions(Map<String,String> clientOptions)
+    {
+        this.clientOptions = ImmutableMap.copyOf(clientOptions);
+    }
 
     public static QueryHandler getCQLQueryHandler()
     {
@@ -358,6 +394,11 @@
         ensurePermission(keyspace, perm, DataResource.keyspace(keyspace));
     }
 
+    public void ensureAllTablesPermission(String keyspace, Permission perm)
+    {
+        ensurePermission(keyspace, perm, DataResource.allTables(keyspace));
+    }
+
     public void ensureTablePermission(String keyspace, String table, Permission perm)
     {
         ensurePermission(keyspace, perm, DataResource.table(keyspace, table));
@@ -425,7 +466,11 @@
 
     private void ensurePermissionOnResourceChain(Permission perm, IResource resource)
     {
-        for (IResource r : Resources.chain(resource))
+        List<? extends IResource> resources = Resources.chain(resource);
+        if (DatabaseDescriptor.getAuthFromRoot())
+            resources = Lists.reverse(resources);
+
+        for (IResource r : resources)
             if (authorize(r).contains(perm))
                 return;
 
@@ -475,9 +520,37 @@
             throw new UnauthorizedException("You have to be logged in and not anonymous to perform this request");
     }
 
+    /**
+     * Checks if this user is an ordinary user (not a super or system user).
+     *
+     * @return {@code true} if this user is an ordinary user, {@code false} otherwise.
+     */
+    public boolean isOrdinaryUser()
+    {
+        return !isSuper() && !isSystem();
+    }
+
+    /**
+     * Checks if this user is a super user.
+     */
+    public boolean isSuper()
+    {
+        return !DatabaseDescriptor.getAuthenticator().requireAuthentication() || (user != null && user.isSuper());
+    }
+
+    /**
+     * Checks if the user is the system user.
+     *
+     * @return {@code true} if this user is the system user, {@code false} otherwise.
+     */
+    public boolean isSystem()
+    {
+        return isInternal;
+    }
+
     public void ensureIsSuperuser(String message)
     {
-        if (DatabaseDescriptor.getAuthenticator().requireAuthentication() && (user == null || !user.isSuper()))
+        if (!isSuper())
             throw new UnauthorizedException(message);
     }
 
diff --git a/src/java/org/apache/cassandra/service/ClientWarn.java b/src/java/org/apache/cassandra/service/ClientWarn.java
index 5a6a878..6e2d3fc 100644
--- a/src/java/org/apache/cassandra/service/ClientWarn.java
+++ b/src/java/org/apache/cassandra/service/ClientWarn.java
@@ -20,14 +20,13 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import io.netty.util.concurrent.FastThreadLocal;
-import org.apache.cassandra.concurrent.ExecutorLocal;
+import org.apache.cassandra.concurrent.ExecutorLocals;
 import org.apache.cassandra.utils.FBUtilities;
 
-public class ClientWarn implements ExecutorLocal<ClientWarn.State>
+@SuppressWarnings("resource")
+public class ClientWarn extends ExecutorLocals.Impl
 {
     private static final String TRUNCATED = " [truncated]";
-    private static final FastThreadLocal<State> warnLocal = new FastThreadLocal<>();
     public static ClientWarn instance = new ClientWarn();
 
     private ClientWarn()
@@ -36,29 +35,30 @@
 
     public State get()
     {
-        return warnLocal.get();
+        return ExecutorLocals.current().clientWarnState;
     }
 
     public void set(State value)
     {
-        warnLocal.set(value);
+        ExecutorLocals current = ExecutorLocals.current();
+        ExecutorLocals.Impl.set(current.traceState, value);
     }
 
     public void warn(String text)
     {
-        State state = warnLocal.get();
+        State state = get();
         if (state != null)
             state.add(text);
     }
 
     public void captureWarnings()
     {
-        warnLocal.set(new State());
+        set(new State());
     }
 
     public List<String> getWarnings()
     {
-        State state = warnLocal.get();
+        State state = get();
         if (state == null || state.warnings.isEmpty())
             return null;
         return state.warnings;
@@ -66,7 +66,7 @@
 
     public void resetWarnings()
     {
-        warnLocal.remove();
+        set(null);
     }
 
     public static class State
@@ -85,6 +85,5 @@
                    ? warning.substring(0, FBUtilities.MAX_UNSIGNED_SHORT - TRUNCATED.length()) + TRUNCATED
                    : warning;
         }
-
     }
 }
diff --git a/src/java/org/apache/cassandra/service/DataResurrectionCheck.java b/src/java/org/apache/cassandra/service/DataResurrectionCheck.java
new file mode 100644
index 0000000..2c3b035
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/DataResurrectionCheck.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import java.io.IOException;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.StartupChecksOptions;
+import org.apache.cassandra.exceptions.StartupException;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.SchemaKeyspace;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Pair;
+
+import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.stream.Collectors.joining;
+import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toSet;
+import static org.apache.cassandra.exceptions.StartupException.ERR_WRONG_DISK_STATE;
+import static org.apache.cassandra.exceptions.StartupException.ERR_WRONG_MACHINE_STATE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
+public class DataResurrectionCheck implements StartupCheck
+{
+    private static final Logger LOGGER = LoggerFactory.getLogger(DataResurrectionCheck.class);
+
+    public static final String HEARTBEAT_FILE_CONFIG_PROPERTY = "heartbeat_file";
+    public static final String EXCLUDED_KEYSPACES_CONFIG_PROPERTY = "excluded_keyspaces";
+    public static final String EXCLUDED_TABLES_CONFIG_PROPERTY = "excluded_tables";
+
+    public static final String DEFAULT_HEARTBEAT_FILE = "cassandra-heartbeat";
+
+    @JsonIgnoreProperties(ignoreUnknown = true)
+    public static class Heartbeat
+    {
+        @JsonProperty("last_heartbeat")
+        public final Instant lastHeartbeat;
+
+        /** needed for jackson serialization */
+        @SuppressWarnings("unused")
+        private Heartbeat() {
+            this.lastHeartbeat = null;
+        }
+
+        public Heartbeat(Instant lastHeartbeat)
+        {
+            this.lastHeartbeat = lastHeartbeat;
+        }
+
+        public void serializeToJsonFile(File outputFile) throws IOException
+        {
+            FBUtilities.serializeToJsonFile(this, outputFile);
+        }
+
+        public static Heartbeat deserializeFromJsonFile(File file) throws IOException
+        {
+            return FBUtilities.deserializeFromJsonFile(Heartbeat.class, file);
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Heartbeat manifest = (Heartbeat) o;
+            return Objects.equals(lastHeartbeat, manifest.lastHeartbeat);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(lastHeartbeat);
+        }
+    }
+
+    @VisibleForTesting
+    static class TableGCPeriod
+    {
+        String table;
+        int gcPeriod;
+
+        TableGCPeriod(String table, int gcPeriod)
+        {
+            this.table = table;
+            this.gcPeriod = gcPeriod;
+        }
+    }
+
+    static File getHeartbeatFile(Map<String, Object> config)
+    {
+        String heartbeatFileConfigValue = (String) config.get(HEARTBEAT_FILE_CONFIG_PROPERTY);
+        File heartbeatFile;
+
+        if (heartbeatFileConfigValue != null)
+        {
+            heartbeatFile = new File(heartbeatFileConfigValue);
+        }
+        else
+        {
+            String[] dataFileLocations = DatabaseDescriptor.getLocalSystemKeyspacesDataFileLocations();
+            assert dataFileLocations.length != 0;
+            heartbeatFile = new File(dataFileLocations[0], DEFAULT_HEARTBEAT_FILE);
+        }
+
+        LOGGER.trace("Resolved heartbeat file for data resurrection check: " + heartbeatFile);
+
+        return heartbeatFile;
+    }
+
+    @Override
+    public StartupChecks.StartupCheckType getStartupCheckType()
+    {
+        return StartupChecks.StartupCheckType.check_data_resurrection;
+    }
+
+    @Override
+    public void execute(StartupChecksOptions options) throws StartupException
+    {
+        if (options.isDisabled(getStartupCheckType()))
+            return;
+
+        Map<String, Object> config = options.getConfig(StartupChecks.StartupCheckType.check_data_resurrection);
+        File heartbeatFile = getHeartbeatFile(config);
+
+        if (!heartbeatFile.exists())
+        {
+            LOGGER.debug("Heartbeat file {} not found! Skipping heartbeat startup check.", heartbeatFile.absolutePath());
+            return;
+        }
+
+        Heartbeat heartbeat;
+
+        try
+        {
+            heartbeat = Heartbeat.deserializeFromJsonFile(heartbeatFile);
+        }
+        catch (IOException ex)
+        {
+            throw new StartupException(ERR_WRONG_DISK_STATE, "Failed to deserialize heartbeat file " + heartbeatFile);
+        }
+
+        if (heartbeat.lastHeartbeat == null)
+            return;
+
+        long heartbeatMillis = heartbeat.lastHeartbeat.toEpochMilli();
+
+        List<Pair<String, String>> violations = new ArrayList<>();
+
+        Set<String> excludedKeyspaces = getExcludedKeyspaces(config);
+        Set<Pair<String, String>> excludedTables = getExcludedTables(config);
+
+        long currentTimeMillis = currentTimeMillis();
+
+        for (String keyspace : getKeyspaces())
+        {
+            if (excludedKeyspaces.contains(keyspace))
+                continue;
+
+            for (TableGCPeriod userTable : getTablesGcPeriods(keyspace))
+            {
+                if (excludedTables.contains(Pair.create(keyspace, userTable.table)))
+                    continue;
+
+                long gcGraceMillis = ((long) userTable.gcPeriod) * 1000;
+                if (heartbeatMillis + gcGraceMillis < currentTimeMillis)
+                    violations.add(Pair.create(keyspace, userTable.table));
+            }
+        }
+
+        if (!violations.isEmpty())
+        {
+            String invalidTables = violations.stream()
+                                             .map(p -> format("%s.%s", p.left, p.right))
+                                             .collect(joining(","));
+
+            String exceptionMessage = format("There are tables for which gc_grace_seconds is older " +
+                                             "than the lastly known time Cassandra node was up based " +
+                                             "on its heartbeat %s with timestamp %s. Cassandra node will not start " +
+                                             "as it would likely introduce data consistency " +
+                                             "issues (zombies etc). Please resolve these issues manually, " +
+                                             "then remove the heartbeat and start the node again. Invalid tables: %s",
+                                             heartbeatFile, heartbeat.lastHeartbeat, invalidTables);
+
+            throw new StartupException(ERR_WRONG_MACHINE_STATE, exceptionMessage);
+        }
+    }
+
+    @Override
+    public void postAction(StartupChecksOptions options)
+    {
+        // Schedule heartbeating after all checks have passed, not as part of the check,
+        // as it might happen that other checks after it might fail, but we would be heartbeating already.
+        if (options.isEnabled(StartupChecks.StartupCheckType.check_data_resurrection))
+        {
+            Map<String, Object> config = options.getConfig(StartupChecks.StartupCheckType.check_data_resurrection);
+            File heartbeatFile = DataResurrectionCheck.getHeartbeatFile(config);
+
+            ScheduledExecutors.scheduledTasks.scheduleAtFixedRate(() ->
+            {
+                Heartbeat heartbeat = new Heartbeat(Instant.ofEpochMilli(Clock.Global.currentTimeMillis()));
+                try
+                {
+                    heartbeatFile.parent().createDirectoriesIfNotExists();
+                    DataResurrectionCheck.LOGGER.trace("writing heartbeat to file " + heartbeatFile);
+                    heartbeat.serializeToJsonFile(heartbeatFile);
+                }
+                catch (IOException ex)
+                {
+                    DataResurrectionCheck.LOGGER.error("Unable to serialize heartbeat to " + heartbeatFile, ex);
+                }
+            }, 0, CassandraRelevantProperties.CHECK_DATA_RESURRECTION_HEARTBEAT_PERIOD.getInt(), MILLISECONDS);
+        }
+    }
+
+    @VisibleForTesting
+    public Set<String> getExcludedKeyspaces(Map<String, Object> config)
+    {
+        String excludedKeyspacesConfigValue = (String) config.get(EXCLUDED_KEYSPACES_CONFIG_PROPERTY);
+
+        if (excludedKeyspacesConfigValue == null)
+            return Collections.emptySet();
+        else
+            return Arrays.stream(excludedKeyspacesConfigValue.trim().split(","))
+                         .map(String::trim)
+                         .collect(toSet());
+    }
+
+    @VisibleForTesting
+    public Set<Pair<String, String>> getExcludedTables(Map<String, Object> config)
+    {
+        String excludedKeyspacesConfigValue = (String) config.get(EXCLUDED_TABLES_CONFIG_PROPERTY);
+
+        if (excludedKeyspacesConfigValue == null)
+            return Collections.emptySet();
+
+        Set<Pair<String, String>> pairs = new HashSet<>();
+
+        for (String keyspaceTable : excludedKeyspacesConfigValue.trim().split(","))
+        {
+            String[] pair = keyspaceTable.trim().split("\\.");
+            if (pair.length != 2)
+                continue;
+
+            pairs.add(Pair.create(pair[0].trim(), pair[1].trim()));
+        }
+
+        return pairs;
+    }
+
+    @VisibleForTesting
+    List<String> getKeyspaces()
+    {
+        return SchemaKeyspace.fetchNonSystemKeyspaces()
+                             .stream()
+                             .map(keyspaceMetadata -> keyspaceMetadata.name)
+                             .collect(toList());
+    }
+
+    @VisibleForTesting
+    List<TableGCPeriod> getTablesGcPeriods(String userKeyspace)
+    {
+        Optional<KeyspaceMetadata> keyspaceMetadata = SchemaKeyspace.fetchNonSystemKeyspaces().get(userKeyspace);
+        if (!keyspaceMetadata.isPresent())
+            return Collections.emptyList();
+
+        KeyspaceMetadata ksmd = keyspaceMetadata.get();
+        return ksmd.tables.stream()
+                          .filter(tmd -> tmd.params.gcGraceSeconds > 0)
+                          .map(tmd -> new TableGCPeriod(tmd.name, tmd.params.gcGraceSeconds)).collect(toList());
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/DatacenterSyncWriteResponseHandler.java b/src/java/org/apache/cassandra/service/DatacenterSyncWriteResponseHandler.java
index 65cf3cc..2e26bb9 100644
--- a/src/java/org/apache/cassandra/service/DatacenterSyncWriteResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/DatacenterSyncWriteResponseHandler.java
@@ -20,8 +20,10 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.NetworkTopologyStrategy;
 import org.apache.cassandra.locator.Replica;
@@ -40,13 +42,14 @@
     private final Map<String, AtomicInteger> responses = new HashMap<String, AtomicInteger>();
     private final AtomicInteger acks = new AtomicInteger(0);
 
-    public DatacenterSyncWriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan,
+    public DatacenterSyncWriteResponseHandler(ReplicaPlan.ForWrite replicaPlan,
                                               Runnable callback,
                                               WriteType writeType,
+                                              Supplier<Mutation> hintOnFailure,
                                               long queryStartNanoTime)
     {
         // Response is been managed by the map so make it 1 for the superclass.
-        super(replicaPlan, callback, writeType, queryStartNanoTime);
+        super(replicaPlan, callback, writeType, hintOnFailure, queryStartNanoTime);
         assert replicaPlan.consistencyLevel() == ConsistencyLevel.EACH_QUORUM;
 
         if (replicaPlan.replicationStrategy() instanceof NetworkTopologyStrategy)
diff --git a/src/java/org/apache/cassandra/service/DatacenterWriteResponseHandler.java b/src/java/org/apache/cassandra/service/DatacenterWriteResponseHandler.java
index a9583a3..4920a54 100644
--- a/src/java/org/apache/cassandra/service/DatacenterWriteResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/DatacenterWriteResponseHandler.java
@@ -17,27 +17,30 @@
  */
 package org.apache.cassandra.service;
 
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.WriteType;
-import org.apache.cassandra.locator.InOurDcTester;
+import org.apache.cassandra.locator.InOurDc;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.apache.cassandra.net.Message;
 
 import java.util.function.Predicate;
+import java.util.function.Supplier;
 
 /**
  * This class blocks for a quorum of responses _in the local datacenter only_ (CL.LOCAL_QUORUM).
  */
 public class DatacenterWriteResponseHandler<T> extends WriteResponseHandler<T>
 {
-    private final Predicate<InetAddressAndPort> waitingFor = InOurDcTester.endpoints();
+    private final Predicate<InetAddressAndPort> waitingFor = InOurDc.endpoints();
 
-    public DatacenterWriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan,
+    public DatacenterWriteResponseHandler(ReplicaPlan.ForWrite replicaPlan,
                                           Runnable callback,
                                           WriteType writeType,
+                                          Supplier<Mutation> hintOnFailure,
                                           long queryStartNanoTime)
     {
-        super(replicaPlan, callback, writeType, queryStartNanoTime);
+        super(replicaPlan, callback, writeType, hintOnFailure, queryStartNanoTime);
         assert replicaPlan.consistencyLevel().isDatacenterLocal();
     }
 
diff --git a/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java b/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java
index ec8d8ac..8b18294 100644
--- a/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java
+++ b/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java
@@ -18,10 +18,12 @@
 
 package org.apache.cassandra.service;
 
-import java.io.File;
+
 import java.util.Set;
 
 import com.google.common.collect.ImmutableSet;
+
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -84,10 +86,10 @@
                 }
 
                 // for both read and write errors mark the path as unwritable.
-                DisallowedDirectories.maybeMarkUnwritable(e.path);
+                DisallowedDirectories.maybeMarkUnwritable(new File(e.path));
                 if (e instanceof FSReadError && shouldMaybeRemoveData(e))
                 {
-                    File directory = DisallowedDirectories.maybeMarkUnreadable(e.path);
+                    File directory = DisallowedDirectories.maybeMarkUnreadable(new File(e.path));
                     if (directory != null)
                         Keyspace.removeUnreadableSSTables(directory);
                 }
@@ -116,7 +118,8 @@
         return true;
     }
 
-    private static void handleStartupFSError(Throwable t)
+    @Override
+    public void handleStartupFSError(Throwable t)
     {
         switch (DatabaseDescriptor.getDiskFailurePolicy())
         {
diff --git a/src/java/org/apache/cassandra/service/EchoVerbHandler.java b/src/java/org/apache/cassandra/service/EchoVerbHandler.java
index 77fe4ab..228808d 100644
--- a/src/java/org/apache/cassandra/service/EchoVerbHandler.java
+++ b/src/java/org/apache/cassandra/service/EchoVerbHandler.java
@@ -19,7 +19,6 @@
  * under the License.
  *
  */
-import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
diff --git a/src/java/org/apache/cassandra/service/FailureRecordingCallback.java b/src/java/org/apache/cassandra/service/FailureRecordingCallback.java
new file mode 100644
index 0000000..c4ca8e2
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/FailureRecordingCallback.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import java.util.AbstractMap;
+import java.util.AbstractSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.RequestCallbackWithFailure;
+import org.apache.cassandra.utils.concurrent.IntrusiveStack;
+
+import static org.apache.cassandra.exceptions.RequestFailureReason.TIMEOUT;
+
+public abstract class FailureRecordingCallback<T> implements RequestCallbackWithFailure<T>
+{
+    public static class FailureResponses extends IntrusiveStack<FailureResponses> implements Map.Entry<InetAddressAndPort, RequestFailureReason>
+    {
+        final InetAddressAndPort from;
+        final RequestFailureReason reason;
+
+        public FailureResponses(InetAddressAndPort from, RequestFailureReason reason)
+        {
+            this.from = from;
+            this.reason = reason;
+        }
+
+        @Override
+        public InetAddressAndPort getKey()
+        {
+            return from;
+        }
+
+        @Override
+        public RequestFailureReason getValue()
+        {
+            return reason;
+        }
+
+        @Override
+        public RequestFailureReason setValue(RequestFailureReason value)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public static <O> void push(AtomicReferenceFieldUpdater<O, FailureResponses> headUpdater, O owner, InetAddressAndPort from, RequestFailureReason reason)
+        {
+            push(headUpdater, owner, new FailureResponses(from, reason));
+        }
+
+        public static <O> void pushExclusive(AtomicReferenceFieldUpdater<O, FailureResponses> headUpdater, O owner, InetAddressAndPort from, RequestFailureReason reason)
+        {
+            pushExclusive(headUpdater, owner, new FailureResponses(from, reason));
+        }
+
+        public static FailureResponses pushExclusive(FailureResponses head, InetAddressAndPort from, RequestFailureReason reason)
+        {
+            return IntrusiveStack.pushExclusive(head, new FailureResponses(from, reason));
+        }
+
+        public static int size(FailureResponses head)
+        {
+            return IntrusiveStack.size(head);
+        }
+
+        public static Iterator<FailureResponses> iterator(FailureResponses head)
+        {
+            return IntrusiveStack.iterator(head);
+        }
+
+        public static int failureCount(FailureResponses head)
+        {
+            return (int)IntrusiveStack.accumulate(head, (f, v) -> f.reason == TIMEOUT ? v : v + 1, 0);
+        }
+    }
+
+    public static class AsMap extends AbstractMap<InetAddressAndPort, RequestFailureReason>
+    {
+        final FailureResponses head;
+        int size = -1;
+
+        AsMap(FailureResponses head)
+        {
+            this.head = head;
+        }
+
+        @Override
+        public Set<Map.Entry<InetAddressAndPort, RequestFailureReason>> entrySet()
+        {
+            return new AbstractSet<Map.Entry<InetAddressAndPort, RequestFailureReason>>()
+            {
+                @Override
+                public Iterator<Map.Entry<InetAddressAndPort, RequestFailureReason>> iterator()
+                {
+                    return (Iterator<Map.Entry<InetAddressAndPort, RequestFailureReason>>)
+                            (Iterator<?>) FailureResponses.iterator(head);
+                }
+
+                @Override
+                public int size()
+                {
+                    if (size < 0)
+                        size = FailureResponses.size(head);
+                    return size;
+                }
+            };
+        }
+
+        public int failureCount()
+        {
+            return FailureResponses.failureCount(head);
+        }
+    }
+
+    private volatile FailureResponses failureResponses;
+    private static final AtomicReferenceFieldUpdater<FailureRecordingCallback, FailureResponses> responsesUpdater = AtomicReferenceFieldUpdater.newUpdater(FailureRecordingCallback.class, FailureResponses.class, "failureResponses");
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason)
+    {
+        FailureResponses.push(responsesUpdater, this, from, failureReason);
+    }
+
+    protected void onFailureWithMutex(InetAddressAndPort from, RequestFailureReason failureReason)
+    {
+        FailureResponses.pushExclusive(responsesUpdater, this, from, failureReason);
+    }
+
+    protected AsMap failureReasonsAsMap()
+    {
+        return new AsMap(failureResponses);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/FileSystemOwnershipCheck.java b/src/java/org/apache/cassandra/service/FileSystemOwnershipCheck.java
new file mode 100644
index 0000000..3d69c9e
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/FileSystemOwnershipCheck.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import java.io.BufferedReader;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Multimap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.StartupChecksOptions;
+import org.apache.cassandra.exceptions.StartupException;
+import org.apache.cassandra.io.util.File;
+
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.check_filesystem_ownership;
+
+/**
+ * Ownership markers on disk are compatible with the java property file format.
+ * (https://docs.oracle.com/javase/8/docs/api/java/util/Properties.html#load-java.io.Reader-)
+ *
+ * This simple formatting is intended to enable them to be created either
+ * manually or by automated tooling using minimal standard tools (editor/shell
+ * builtins/etc).
+ * The only mandatory property is version, which must be parseable as an int
+ * and upon which the futher set of required properties will depend.
+ *
+ * In version 1, two further property values are required:
+ * - volume_count
+ *   to be parsed as an int representing the number of mounted volumes where
+ *   a marker file is expected.
+ * - ownership_token
+ *   must contain a non-empty token string that can be compared to one
+ *   derived from system properties. For version 1, this is simply the cluster name.
+ *
+ * For this check to succeed as of version 1 then:
+ * - There must be a single properties file found in the fs tree for each
+ *   target directory.
+ * - Every file found must contain the mandatory version property with the
+ *   literal value '1'.
+ * - The value of the ownership_token property in each file must match the
+ *   cluster name
+ * - The value of the volume_count property must be an int which must matches
+ *   the number of distinct marker files found when traversing the filesystem.
+ *
+ * In overridden implementations, you will need to override {@link #constructTokenFromProperties(Map)}
+ * and add the related *_PROPERTY values you will want the system to check on startup to confirm ownership.
+ */
+public class FileSystemOwnershipCheck implements StartupCheck
+{
+    private static final Logger logger = LoggerFactory.getLogger(FileSystemOwnershipCheck.class);
+
+    public static final String FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN = "CassandraOwnershipToken";
+    public static final String DEFAULT_FS_OWNERSHIP_FILENAME = ".cassandra_fs_ownership";
+
+    // Ownership file properties
+    static final String VERSION                                 = "version";
+    static final String VOLUME_COUNT                            = "volume_count";
+    static final String TOKEN                                   = "ownership_token";
+
+    // Error strings
+    static final String ERROR_PREFIX                            = "FS ownership check failed; ";
+    static final String MISSING_PROPERTY                        = "property '%s' required for fs ownership check not supplied";
+    static final String NO_OWNERSHIP_FILE                       = "no file found in tree for %s";
+    static final String MULTIPLE_OWNERSHIP_FILES                = "multiple files found in tree for %s";
+    static final String INCONSISTENT_FILES_FOUND                = "inconsistent ownership files found on disk: %s";
+    static final String INVALID_FILE_COUNT                      = "number of ownership files found doesn't match expected";
+    static final String MISMATCHING_TOKEN                       = "token found on disk does not match supplied";
+    static final String UNSUPPORTED_VERSION                     = "unsupported version '%s' in ownership file";
+    static final String INVALID_PROPERTY_VALUE                  = "invalid or missing value for property '%s'";
+    static final String READ_EXCEPTION                          = "error when checking for fs ownership file";
+
+    private final Supplier<Iterable<String>> dirs;
+
+    FileSystemOwnershipCheck()
+    {
+        this(() -> Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()),
+                                    Arrays.asList(DatabaseDescriptor.getCommitLogLocation(),
+                                                  DatabaseDescriptor.getSavedCachesLocation(),
+                                                  DatabaseDescriptor.getHintsDirectory().absolutePath())));
+    }
+
+    @VisibleForTesting
+    FileSystemOwnershipCheck(Supplier<Iterable<String>> dirs)
+    {
+        this.dirs = dirs;
+    }
+
+    @Override
+    public StartupChecks.StartupCheckType getStartupCheckType()
+    {
+        return check_filesystem_ownership;
+    }
+
+    @Override
+    public void execute(StartupChecksOptions options) throws StartupException
+    {
+        if (!isEnabled(options))
+        {
+            logger.info("Filesystem ownership check is not enabled.");
+            return;
+        }
+
+        Map<String, Object> config = options.getConfig(getStartupCheckType());
+
+        String expectedToken = constructTokenFromProperties(config);
+        String tokenFilename = getFsOwnershipFilename(config);
+        Map<String, Integer> foundPerTargetDir = new HashMap<>();
+        Map<Path, Properties> foundProperties = new HashMap<>();
+
+        // Step 1: Traverse the filesystem from each target dir upward, looking for marker files
+        for (String dataDir : dirs.get())
+        {
+            logger.info("Checking for fs ownership details in file hierarchy for {}", dataDir);
+            int foundFiles = 0;
+            Path dir = File.getPath(dataDir).normalize();
+            do
+            {
+                File tokenFile = resolve(dir, tokenFilename);
+                if (tokenFile.exists())
+                {
+                    foundFiles++;
+                    if (!foundProperties.containsKey(tokenFile.toPath().toAbsolutePath()))
+                    {
+                        try (BufferedReader reader = Files.newBufferedReader(tokenFile.toPath()))
+                        {
+                            Properties props = new Properties();
+                            props.load(reader);
+                            foundProperties.put(tokenFile.toPath().toAbsolutePath(), props);
+                        }
+                        catch (Exception e)
+                        {
+                            logger.error("Error reading fs ownership file from disk", e);
+                            throw exception(READ_EXCEPTION);
+                        }
+                    }
+                }
+                dir = dir.getParent();
+            } while (dir != null);
+
+            foundPerTargetDir.put(dataDir, foundFiles);
+        }
+
+        // If a marker file couldn't be found for every target directory, error.
+        if (foundPerTargetDir.containsValue(0))
+        {
+            throw exception(String.format(NO_OWNERSHIP_FILE, foundPerTargetDir.entrySet()
+                                                                              .stream()
+                                                                              .filter(e -> e.getValue() == 0)
+                                                                              .map(Map.Entry::getKey)
+                                                                              .collect(Collectors.joining("', '", "'", "'"))));
+        }
+
+        // If more than one marker file was found in the tree for any target directory, error
+        Set<String> multipleTokens = foundPerTargetDir.entrySet()
+                                                      .stream()
+                                                      .filter(e -> e.getValue() > 1)
+                                                      .map(Map.Entry::getKey)
+                                                      .collect(Collectors.toSet());
+        if (!multipleTokens.isEmpty())
+            throw exception(String.format(MULTIPLE_OWNERSHIP_FILES, String.join(",", multipleTokens)));
+
+        // Step 2: assert that the content of each file is identical
+        assert !foundProperties.isEmpty();
+        Multimap<Integer, Path> byHash = HashMultimap.create();
+        foundProperties.forEach((key, value) -> byHash.put(value.hashCode(), key));
+        if (byHash.keySet().size() > 1)
+        {
+            // Group identical files to highlight where the mismatches are, worst case is
+            // they're all unique, but even then the number of individual files should be low
+            throw exception(String.format(INCONSISTENT_FILES_FOUND,
+                                          byHash.keySet()
+                                                .stream()
+                                                .map(hash -> byHash.get(hash)
+                                                                   .stream()
+                                                                   .map(Path::toString)
+                                                                   .sorted()
+                                                                   .collect(Collectors.joining("', '", "['", "']")))
+                                                .sorted()
+                                                .collect(Collectors.joining(", "))));
+        }
+
+        // Step 3: validate the content of the properties from disk
+        // Currently, only version 1 is supported which requires:
+        //   volume_count       that matches the number of unique files we found
+        //   ownership_token    that matches the one constructed from system props
+        Properties fromDisk = foundProperties.entrySet().iterator().next().getValue();
+        int version = getIntProperty(fromDisk, VERSION);
+        if (version != 1)
+            throw exception(String.format(UNSUPPORTED_VERSION, version));
+
+        int volumeCount = getIntProperty(fromDisk, VOLUME_COUNT);
+        if (volumeCount != foundProperties.size())
+            throw exception(INVALID_FILE_COUNT);
+
+        String token = getRequiredProperty(fromDisk, TOKEN);
+        if (!expectedToken.equals(token))
+            throw exception(MISMATCHING_TOKEN);
+
+        logger.info("Successfully verified fs ownership");
+    }
+
+    /** In version 1, we check and return the ownership token. Extend this for custom ownership hierarchies. */
+    protected String constructTokenFromProperties(Map<String, Object> config) throws StartupException
+    {
+        String cluster = getOwnershipToken(config);
+        if (null == cluster || cluster.isEmpty())
+            throw exception(String.format(MISSING_PROPERTY, FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN));
+        return cluster;
+    }
+
+    private int getIntProperty(Properties props, String key) throws StartupException
+    {
+        String val = getRequiredProperty(props, key);
+        try
+        {
+            return Integer.parseInt(val);
+        }
+        catch (NumberFormatException e)
+        {
+            throw exception(String.format(INVALID_PROPERTY_VALUE, key));
+        }
+    }
+
+    private String getRequiredProperty(Properties props, String key) throws StartupException
+    {
+        String s = props.getProperty(key);
+        if (null == s || s.isEmpty())
+            throw exception(String.format(INVALID_PROPERTY_VALUE, key));
+        return s;
+    }
+
+    private File resolve(Path dir, String filename) throws StartupException
+    {
+        try
+        {
+            return new File(dir.resolve(filename));
+        }
+        catch (Exception e)
+        {
+            logger.error("Encountered error resolving path ownership file {} relative to dir {}", filename, dir);
+            throw exception(READ_EXCEPTION);
+        }
+    }
+
+    private StartupException exception(String message)
+    {
+        return new StartupException(StartupException.ERR_WRONG_DISK_STATE, ERROR_PREFIX + message);
+    }
+
+    public boolean isEnabled(StartupChecksOptions options)
+    {
+        boolean enabledFromYaml = options.isEnabled(getStartupCheckType());
+        return CassandraRelevantProperties.FILE_SYSTEM_CHECK_ENABLE.getBoolean(enabledFromYaml);
+    }
+
+    public String getFsOwnershipFilename(Map<String, Object> config)
+    {
+        if (CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME.isPresent())
+        {
+            logger.warn(String.format("Cassandra system property flag %s is deprecated and you should " +
+                                      "use startup check configuration in cassandra.yaml",
+                                      CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME.getKey()));
+            return CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME.getString();
+        }
+        else
+        {
+            Object fsOwnershipFilename = config.get("ownership_filename");
+            return fsOwnershipFilename == null
+                   ? CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME.getDefaultValue()
+                   : (String) fsOwnershipFilename;
+        }
+    }
+
+    public String getOwnershipToken(Map<String, Object> config)
+    {
+        if (CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.isPresent())
+        {
+            logger.warn(String.format("Cassandra system property flag %s is deprecated and you should " +
+                                      "use startup check configuration in cassandra.yaml",
+                                      CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey()));
+            return CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getString();
+        }
+        else
+        {
+            Object ownershipToken = config.get("ownership_token");
+            return ownershipToken == null
+                   ? CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getDefaultValue()
+                   : (String) ownershipToken;
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/service/GCInspector.java b/src/java/org/apache/cassandra/service/GCInspector.java
index 0d42e72..c290d9e 100644
--- a/src/java/org/apache/cassandra/service/GCInspector.java
+++ b/src/java/org/apache/cassandra/service/GCInspector.java
@@ -47,6 +47,8 @@
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.StatusLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class GCInspector implements NotificationListener, GCInspectorMXBean
 {
     public static final String MBEAN_NAME = "org.apache.cassandra.service:type=GCInspector";
@@ -108,7 +110,7 @@
         State()
         {
             count = maxRealTimeElapsed = sumSquaresRealTimeElapsed = totalRealTimeElapsed = totalBytesReclaimed = 0;
-            startNanos = System.nanoTime();
+            startNanos = nanoTime();
         }
     }
 
@@ -313,7 +315,7 @@
     {
         State state = getTotalSinceLastCheck();
         double[] r = new double[7];
-        r[0] = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - state.startNanos);
+        r[0] = TimeUnit.NANOSECONDS.toMillis(nanoTime() - state.startNanos);
         r[1] = state.maxRealTimeElapsed;
         r[2] = state.totalRealTimeElapsed;
         r[3] = state.sumSquaresRealTimeElapsed;
diff --git a/src/java/org/apache/cassandra/service/NativeTransportService.java b/src/java/org/apache/cassandra/service/NativeTransportService.java
index 7556f81..f131d74 100644
--- a/src/java/org/apache/cassandra/service/NativeTransportService.java
+++ b/src/java/org/apache/cassandra/service/NativeTransportService.java
@@ -149,7 +149,8 @@
         servers = Collections.emptyList();
 
         // shutdown executors used by netty for native transport server
-        workerGroup.shutdownGracefully(3, 5, TimeUnit.SECONDS).awaitUninterruptibly();
+        if (workerGroup != null)
+            workerGroup.shutdownGracefully(3, 5, TimeUnit.SECONDS).awaitUninterruptibly();
 
         Dispatcher.shutdown();
     }
diff --git a/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java b/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java
index 1c6b183..837bdd8 100644
--- a/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java
+++ b/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java
@@ -18,104 +18,71 @@
 
 package org.apache.cassandra.service;
 
-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.locator.AbstractReplicationStrategy;
-import org.apache.cassandra.utils.ExecutorUtils;
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.List;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.cassandra.concurrent.SequentialExecutorPlus;
+import org.apache.cassandra.concurrent.SequentialExecutorPlus.AtLeastOnceTrigger;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.locator.AbstractReplicationStrategy;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.utils.ExecutorUtils;
 
-import com.google.common.annotations.VisibleForTesting;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class PendingRangeCalculatorService
 {
     public static final PendingRangeCalculatorService instance = new PendingRangeCalculatorService();
 
-    private static Logger logger = LoggerFactory.getLogger(PendingRangeCalculatorService.class);
+    private static final Logger logger = LoggerFactory.getLogger(PendingRangeCalculatorService.class);
 
     // the executor will only run a single range calculation at a time while keeping at most one task queued in order
     // to trigger an update only after the most recent state change and not for each update individually
-    private final JMXEnabledThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(1, Integer.MAX_VALUE, TimeUnit.SECONDS,
-            new LinkedBlockingQueue<>(1), new NamedThreadFactory("PendingRangeCalculator"), "internal");
+    private final SequentialExecutorPlus executor = executorFactory()
+            .withJmxInternal()
+            .configureSequential("PendingRangeCalculator")
+            .withRejectedExecutionHandler((r, e) -> {})  // silently handle rejected tasks, this::update takes care of bookkeeping
+            .build();
 
-    private AtomicInteger updateJobs = new AtomicInteger(0);
+    private final AtLeastOnceTrigger update = executor.atLeastOnceTrigger(() -> {
+        PendingRangeCalculatorServiceDiagnostics.taskStarted(1);
+        long start = currentTimeMillis();
+        Collection<String> keyspaces = Schema.instance.getNonLocalStrategyKeyspaces().names();
+        for (String keyspaceName : keyspaces)
+            calculatePendingRanges(Keyspace.open(keyspaceName).getReplicationStrategy(), keyspaceName);
+        if (logger.isTraceEnabled())
+            logger.trace("Finished PendingRangeTask for {} keyspaces in {}ms", keyspaces.size(), currentTimeMillis() - start);
+        PendingRangeCalculatorServiceDiagnostics.taskFinished();
+    });
 
     public PendingRangeCalculatorService()
     {
-        executor.setRejectedExecutionHandler((r, e) ->
-            {
-                PendingRangeCalculatorServiceDiagnostics.taskRejected(instance, updateJobs);
-                PendingRangeCalculatorService.instance.finishUpdate();
-            }
-        );
-    }
-
-    private static class PendingRangeTask implements Runnable
-    {
-        private final AtomicInteger updateJobs;
-
-        PendingRangeTask(AtomicInteger updateJobs)
-        {
-            this.updateJobs = updateJobs;
-        }
-
-        public void run()
-        {
-            try
-            {
-                PendingRangeCalculatorServiceDiagnostics.taskStarted(instance, updateJobs);
-                long start = System.currentTimeMillis();
-                List<String> keyspaces = Schema.instance.getNonLocalStrategyKeyspaces();
-                for (String keyspaceName : keyspaces)
-                    calculatePendingRanges(Keyspace.open(keyspaceName).getReplicationStrategy(), keyspaceName);
-                if (logger.isTraceEnabled())
-                    logger.trace("Finished PendingRangeTask for {} keyspaces in {}ms", keyspaces.size(), System.currentTimeMillis() - start);
-                PendingRangeCalculatorServiceDiagnostics.taskFinished(instance, updateJobs);
-            }
-            finally
-            {
-                PendingRangeCalculatorService.instance.finishUpdate();
-            }
-        }
-    }
-
-    private void finishUpdate()
-    {
-        int jobs = updateJobs.decrementAndGet();
-        PendingRangeCalculatorServiceDiagnostics.taskCountChanged(instance, jobs);
     }
 
     public void update()
     {
-        int jobs = updateJobs.incrementAndGet();
-        PendingRangeCalculatorServiceDiagnostics.taskCountChanged(instance, jobs);
-        executor.execute(new PendingRangeTask(updateJobs));
+        boolean success = update.trigger();
+        if (!success) PendingRangeCalculatorServiceDiagnostics.taskRejected(1);
+        else PendingRangeCalculatorServiceDiagnostics.taskCountChanged(1);
     }
 
     public void blockUntilFinished()
     {
-        // We want to be sure the job we're blocking for is actually finished and we can't trust the TPE's active job count
-        while (updateJobs.get() > 0)
-        {
-            try
-            {
-                Thread.sleep(100);
-            }
-            catch (InterruptedException e)
-            {
-                throw new RuntimeException(e);
-            }
-        }
+        update.sync();
     }
 
 
+    public void executeWhenFinished(Runnable runnable)
+    {
+        update.runAfter(runnable);
+    }
+
     // public & static for testing purposes
     public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName)
     {
@@ -127,4 +94,5 @@
     {
         ExecutorUtils.shutdownNowAndWait(timeout, unit, executor);
     }
+
 }
diff --git a/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceDiagnostics.java b/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceDiagnostics.java
index ec09e3f..2b677d1 100644
--- a/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceDiagnostics.java
+++ b/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceDiagnostics.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.service;
 
-import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.cassandra.diag.DiagnosticEventService;
 import org.apache.cassandra.service.PendingRangeCalculatorServiceEvent.PendingRangeCalculatorServiceEventType;
 
@@ -34,35 +32,30 @@
     {
     }
     
-    static void taskStarted(PendingRangeCalculatorService calculatorService, AtomicInteger taskCount)
+    static void taskStarted(int taskCount)
     {
         if (isEnabled(PendingRangeCalculatorServiceEventType.TASK_STARTED))
             service.publish(new PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType.TASK_STARTED,
-                                                                   calculatorService,
-                                                                   taskCount.get()));
+                                                                   taskCount));
     }
 
-    static void taskFinished(PendingRangeCalculatorService calculatorService, AtomicInteger taskCount)
+    static void taskFinished()
     {
         if (isEnabled(PendingRangeCalculatorServiceEventType.TASK_FINISHED_SUCCESSFULLY))
-            service.publish(new PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType.TASK_FINISHED_SUCCESSFULLY,
-                                                                   calculatorService,
-                                                                   taskCount.get()));
+            service.publish(new PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType.TASK_FINISHED_SUCCESSFULLY));
     }
 
-    static void taskRejected(PendingRangeCalculatorService calculatorService, AtomicInteger taskCount)
+    static void taskRejected(int taskCount)
     {
         if (isEnabled(PendingRangeCalculatorServiceEventType.TASK_EXECUTION_REJECTED))
             service.publish(new PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType.TASK_EXECUTION_REJECTED,
-                                                                   calculatorService,
-                                                                   taskCount.get()));
+                                                                   taskCount));
     }
 
-    static void taskCountChanged(PendingRangeCalculatorService calculatorService, int taskCount)
+    static void taskCountChanged(int taskCount)
     {
         if (isEnabled(PendingRangeCalculatorServiceEventType.TASK_COUNT_CHANGED))
             service.publish(new PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType.TASK_COUNT_CHANGED,
-                                                                   calculatorService,
                                                                    taskCount));
     }
 
diff --git a/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceEvent.java b/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceEvent.java
index 3024149..f255173 100644
--- a/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceEvent.java
+++ b/src/java/org/apache/cassandra/service/PendingRangeCalculatorServiceEvent.java
@@ -19,7 +19,9 @@
 package org.apache.cassandra.service;
 
 import java.io.Serializable;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.cassandra.diag.DiagnosticEvent;
 
@@ -29,7 +31,6 @@
 final class PendingRangeCalculatorServiceEvent extends DiagnosticEvent
 {
     private final PendingRangeCalculatorServiceEventType type;
-    private final PendingRangeCalculatorService source;
     private final int taskCount;
 
     public enum PendingRangeCalculatorServiceEventType
@@ -40,12 +41,15 @@
         TASK_COUNT_CHANGED
     }
 
+    PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType type)
+    {
+        this(type, -1);
+    }
+
     PendingRangeCalculatorServiceEvent(PendingRangeCalculatorServiceEventType type,
-                                       PendingRangeCalculatorService service,
                                        int taskCount)
     {
         this.type = type;
-        this.source = service;
         this.taskCount = taskCount;
     }
 
@@ -59,9 +63,11 @@
         return type;
     }
 
-    public HashMap<String, Serializable> toMap()
+    public Map<String, Serializable> toMap()
     {
         // be extra defensive against nulls and bugs
+        if (taskCount < 0)
+            return Collections.emptyMap();
         HashMap<String, Serializable> ret = new HashMap<>();
         ret.put("taskCount", taskCount);
         return ret;
diff --git a/src/java/org/apache/cassandra/service/QueryState.java b/src/java/org/apache/cassandra/service/QueryState.java
index adb13b5..b2275f2 100644
--- a/src/java/org/apache/cassandra/service/QueryState.java
+++ b/src/java/org/apache/cassandra/service/QueryState.java
@@ -19,7 +19,6 @@
 
 import java.net.InetAddress;
 
-import org.apache.cassandra.transport.ClientStat;
 import org.apache.cassandra.utils.FBUtilities;
 
 /**
diff --git a/src/java/org/apache/cassandra/service/SnapshotVerbHandler.java b/src/java/org/apache/cassandra/service/SnapshotVerbHandler.java
index 1309d6e..850c982 100644
--- a/src/java/org/apache/cassandra/service/SnapshotVerbHandler.java
+++ b/src/java/org/apache/cassandra/service/SnapshotVerbHandler.java
@@ -17,16 +17,11 @@
  */
 package org.apache.cassandra.service;
 
-import java.util.concurrent.Executor;
-import java.util.concurrent.Executors;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.SnapshotCommand;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
diff --git a/src/java/org/apache/cassandra/service/StartupCheck.java b/src/java/org/apache/cassandra/service/StartupCheck.java
index 649f13c..331b381 100644
--- a/src/java/org/apache/cassandra/service/StartupCheck.java
+++ b/src/java/org/apache/cassandra/service/StartupCheck.java
@@ -17,14 +17,16 @@
  */
 package org.apache.cassandra.service;
 
+import org.apache.cassandra.config.StartupChecksOptions;
 import org.apache.cassandra.exceptions.StartupException;
+import org.apache.cassandra.service.StartupChecks.StartupCheckType;
 
 /**
  * A test to determine if the system is in a valid state to start up.
  * Some implementations may not actually halt startup, but provide
  * information or advice on tuning and non-fatal environmental issues (e.g. like
  * checking for and warning about suboptimal JVM settings).
- * Other checks may indicate that they system is not in a correct state to be started.
+ * Other checks may indicate that the system is not in a correct state to be started.
  * Examples include missing or unaccessible data directories, unreadable sstables and
  * misconfiguration of cluster_name in cassandra.yaml.
  *
@@ -39,8 +41,27 @@
      * test should log a message regarding the reason for the failure and
      * ideally the steps required to remedy the problem.
      *
+     * @param startupChecksOptions all options from descriptor
      * @throws org.apache.cassandra.exceptions.StartupException if the test determines
      * that the environement or system is not in a safe state to startup
      */
-    void execute() throws StartupException;
+    void execute(StartupChecksOptions startupChecksOptions) throws StartupException;
+
+    /**
+     *
+     * @return type of this startup check for configuration retrieval
+     */
+    default StartupCheckType getStartupCheckType()
+    {
+        return StartupCheckType.non_configurable_check;
+    }
+
+    /**
+     * Post-hook after all startup checks succeeded.
+     *
+     * @param options startup check options from descriptor
+     */
+    default void postAction(StartupChecksOptions options)
+    {
+    }
 }
diff --git a/src/java/org/apache/cassandra/service/StartupChecks.java b/src/java/org/apache/cassandra/service/StartupChecks.java
index dadb0c5..0aacc02 100644
--- a/src/java/org/apache/cassandra/service/StartupChecks.java
+++ b/src/java/org/apache/cassandra/service/StartupChecks.java
@@ -18,13 +18,25 @@
 package org.apache.cassandra.service;
 
 import java.io.BufferedReader;
-import java.io.File;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.RuntimeMXBean;
-import java.nio.file.*;
+import java.nio.file.FileStore;
+import java.nio.file.FileVisitResult;
+import java.nio.file.FileVisitor;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
 import java.nio.file.attribute.BasicFileAttributes;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -32,34 +44,39 @@
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
+import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import net.jpountz.lz4.LZ4Factory;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.config.StartupChecksOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.StartupException;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.UUIDBasedSSTableId;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.utils.NativeLibrary;
+import org.apache.cassandra.io.util.PathUtils;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JavaUtils;
-import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.NativeLibrary;
 import org.apache.cassandra.utils.SigarLibrary;
 
-import static java.lang.String.format;
 import static org.apache.cassandra.config.CassandraRelevantProperties.COM_SUN_MANAGEMENT_JMXREMOTE_PORT;
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_VERSION;
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_VM_NAME;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Verifies that the system and environment is in a fit state to be started.
@@ -82,6 +99,28 @@
  */
 public class StartupChecks
 {
+    public enum StartupCheckType
+    {
+        // non-configurable check is always enabled for execution
+        non_configurable_check,
+        check_filesystem_ownership(true),
+        check_dc,
+        check_rack,
+        check_data_resurrection(true);
+
+        public final boolean disabledByDefault;
+
+        StartupCheckType()
+        {
+            this(false);
+        }
+
+        StartupCheckType(boolean disabledByDefault)
+        {
+            this.disabledByDefault = disabledByDefault;
+        }
+    }
+
     private static final Logger logger = LoggerFactory.getLogger(StartupChecks.class);
     // List of checks to run before starting up. If any test reports failure, startup will be halted.
     private final List<StartupCheck> preFlightChecks = new ArrayList<>();
@@ -98,12 +137,14 @@
                                                                       checkNativeLibraryInitialization,
                                                                       initSigarLibrary,
                                                                       checkMaxMapCount,
+                                                                      checkReadAheadKbSetting,
                                                                       checkDataDirs,
                                                                       checkSSTablesFormat,
                                                                       checkSystemKeyspaceState,
                                                                       checkDatacenter,
                                                                       checkRack,
-                                                                      checkLegacyAuthTables);
+                                                                      checkLegacyAuthTables,
+                                                                      new DataResurrectionCheck());
 
     public StartupChecks withDefaultTests()
     {
@@ -125,20 +166,35 @@
      * Run the configured tests and return a report detailing the results.
      * @throws org.apache.cassandra.exceptions.StartupException if any test determines that the
      * system is not in an valid state to startup
+     * @param options options to pass to respective checks for their configration
      */
-    public void verify() throws StartupException
+    public void verify(StartupChecksOptions options) throws StartupException
     {
         for (StartupCheck test : preFlightChecks)
-            test.execute();
+            test.execute(options);
+
+        for (StartupCheck test : preFlightChecks)
+        {
+            try
+            {
+                test.postAction(options);
+            }
+            catch (Throwable t)
+            {
+                logger.warn("Failed to run startup check post-action on " + test.getStartupCheckType());
+            }
+        }
     }
 
     public static final StartupCheck checkJemalloc = new StartupCheck()
     {
-        public void execute()
+        @Override
+        public void execute(StartupChecksOptions options)
         {
-            if (FBUtilities.isWindows)
+            if (options.isDisabled(getStartupCheckType()))
                 return;
-            String jemalloc = System.getProperty("cassandra.libjemalloc");
+
+            String jemalloc = CassandraRelevantProperties.LIBJEMALLOC.getString();
             if (jemalloc == null)
                 logger.warn("jemalloc shared library could not be preloaded to speed up memory allocations");
             else if ("-".equals(jemalloc))
@@ -148,14 +204,21 @@
         }
     };
 
-    public static final StartupCheck checkLz4Native = () -> {
-        try
+    public static final StartupCheck checkLz4Native = new StartupCheck()
+    {
+        @Override
+        public void execute(StartupChecksOptions options)
         {
-            LZ4Factory.nativeInstance(); // make sure native loads
-        }
-        catch (AssertionError | LinkageError e)
-        {
-            logger.warn("lz4-java was unable to load native libraries; this will lower the performance of lz4 (network/sstables/etc.): {}", Throwables.getRootCause(e).getMessage());
+            if (options.isDisabled(getStartupCheckType()))
+                return;
+            try
+            {
+                LZ4Factory.nativeInstance(); // make sure native loads
+            }
+            catch (AssertionError | LinkageError e)
+            {
+                logger.warn("lz4-java was unable to load native libraries; this will lower the performance of lz4 (network/sstables/etc.): {}", Throwables.getRootCause(e).getMessage());
+            }
         }
     };
 
@@ -167,9 +230,13 @@
          * We use this to ensure the system clock is at least somewhat correct at startup.
          */
         private static final long EARLIEST_LAUNCH_DATE = 1215820800000L;
-        public void execute() throws StartupException
+
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
-            long now = System.currentTimeMillis();
+            if (options.isDisabled(getStartupCheckType()))
+                return;
+            long now = currentTimeMillis();
             if (now < EARLIEST_LAUNCH_DATE)
                 throw new StartupException(StartupException.ERR_WRONG_MACHINE_STATE,
                                            String.format("current machine time is %s, but that is seemingly incorrect. exiting now.",
@@ -179,13 +246,16 @@
 
     public static final StartupCheck checkJMXPorts = new StartupCheck()
     {
-        public void execute()
+        @Override
+        public void execute(StartupChecksOptions options)
         {
-            String jmxPort = System.getProperty("cassandra.jmx.remote.port");
+            if (options.isDisabled(getStartupCheckType()))
+                return;
+            String jmxPort = CassandraRelevantProperties.CASSANDRA_JMX_REMOTE_PORT.getString();
             if (jmxPort == null)
             {
                 logger.warn("JMX is not enabled to receive remote connections. Please see cassandra-env.sh for more info.");
-                jmxPort = System.getProperty("cassandra.jmx.local.port");
+                jmxPort = CassandraRelevantProperties.CASSANDRA_JMX_LOCAL_PORT.toString();
                 if (jmxPort == null)
                     logger.error("cassandra.jmx.local.port missing from cassandra-env.sh, unable to start local JMX service.");
             }
@@ -198,8 +268,11 @@
 
     public static final StartupCheck checkJMXProperties = new StartupCheck()
     {
-        public void execute()
+        @Override
+        public void execute(StartupChecksOptions options)
         {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
             if (COM_SUN_MANAGEMENT_JMXREMOTE_PORT.isPresent())
             {
                 logger.warn("Use of com.sun.management.jmxremote.port at startup is deprecated. " +
@@ -210,8 +283,11 @@
 
     public static final StartupCheck inspectJvmOptions = new StartupCheck()
     {
-        public void execute()
+        @Override
+        public void execute(StartupChecksOptions options)
         {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
             // log warnings for different kinds of sub-optimal JVMs.  tldr use 64-bit Oracle >= 1.6u32
             if (!DatabaseDescriptor.hasLargeAddressSpace())
                 logger.warn("32bit JVM detected.  It is recommended to run Cassandra on a 64bit JVM for better performance.");
@@ -269,8 +345,11 @@
 
     public static final StartupCheck checkNativeLibraryInitialization = new StartupCheck()
     {
-        public void execute() throws StartupException
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
             // Fail-fast if the native library could not be linked.
             if (!NativeLibrary.isAvailable())
                 throw new StartupException(StartupException.ERR_WRONG_MACHINE_STATE, "The native library could not be initialized properly. ");
@@ -279,12 +358,95 @@
 
     public static final StartupCheck initSigarLibrary = new StartupCheck()
     {
-        public void execute()
+        @Override
+        public void execute(StartupChecksOptions options)
         {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
             SigarLibrary.instance.warnIfRunningInDegradedMode();
         }
     };
 
+    public static final StartupCheck checkReadAheadKbSetting = new StartupCheck()
+    {
+        // This value is in KB.
+        private static final long MAX_RECOMMENDED_READ_AHEAD_KB_SETTING = 128;
+
+        /**
+         * Function to get the block device system path(Example: /dev/sda) from the
+         * data directories defined in cassandra config.(cassandra.yaml)
+         * @param dataDirectories list of data directories from cassandra.yaml
+         * @return Map of block device path and data directory
+         */
+        private Map<String, String> getBlockDevices(String[] dataDirectories) {
+            Map<String, String> blockDevices = new HashMap<String, String>();
+
+            for (String dataDirectory : dataDirectories)
+            {
+                try
+                {
+                    Path p = File.getPath(dataDirectory);
+                    FileStore fs = Files.getFileStore(p);
+
+                    String blockDirectory = fs.name();
+                    if(StringUtils.isNotEmpty(blockDirectory))
+                    {
+                        blockDevices.put(blockDirectory, dataDirectory);
+                    }
+                }
+                catch (IOException e)
+                {
+                    logger.warn("IO exception while reading file {}.", dataDirectory, e);
+                }
+            }
+            return blockDevices;
+        }
+
+        @Override
+        public void execute(StartupChecksOptions options)
+        {
+            if (options.isDisabled(getStartupCheckType()) || !FBUtilities.isLinux)
+                return;
+
+            String[] dataDirectories = DatabaseDescriptor.getRawConfig().data_file_directories;
+            Map<String, String> blockDevices = getBlockDevices(dataDirectories);
+
+            for (Map.Entry<String, String> entry: blockDevices.entrySet())
+            {
+                String blockDeviceDirectory = entry.getKey();
+                String dataDirectory = entry.getValue();
+                try
+                {
+                    Path readAheadKBPath = StartupChecks.getReadAheadKBPath(blockDeviceDirectory);
+
+                    if (readAheadKBPath == null || Files.notExists(readAheadKBPath))
+                    {
+                        logger.debug("No 'read_ahead_kb' setting found for device {} of data directory {}.", blockDeviceDirectory, dataDirectory);
+                        continue;
+                    }
+
+                    final List<String> data = Files.readAllLines(readAheadKBPath);
+                    if (data.isEmpty())
+                        continue;
+
+                    int readAheadKbSetting = Integer.parseInt(data.get(0));
+
+                    if (readAheadKbSetting > MAX_RECOMMENDED_READ_AHEAD_KB_SETTING)
+                    {
+                        logger.warn("Detected high '{}' setting of {} for device '{}' of data directory '{}'. It is " +
+                                    "recommended to set this value to 8KB (or lower) on SSDs or 64KB (or lower) on HDDs " +
+                                    "to prevent excessive IO usage and page cache churn on read-intensive workloads.",
+                                    readAheadKBPath, readAheadKbSetting, blockDeviceDirectory, dataDirectory);
+                    }
+                }
+                catch (final IOException e)
+                {
+                    logger.warn("IO exception while reading file {}.", blockDeviceDirectory, e);
+                }
+            }
+        }
+    };
+
     public static final StartupCheck checkMaxMapCount = new StartupCheck()
     {
         private final long EXPECTED_MAX_MAP_COUNT = 1048575;
@@ -292,7 +454,7 @@
 
         private long getMaxMapCount()
         {
-            final Path path = Paths.get(MAX_MAP_COUNT_PATH);
+            final Path path = File.getPath(MAX_MAP_COUNT_PATH);
             try (final BufferedReader bufferedReader = Files.newBufferedReader(path))
             {
                 final String data = bufferedReader.readLine();
@@ -315,9 +477,10 @@
             return -1;
         }
 
-        public void execute()
+        @Override
+        public void execute(StartupChecksOptions options)
         {
-            if (!FBUtilities.isLinux)
+            if (options.isDisabled(getStartupCheckType()) || !FBUtilities.isLinux)
                 return;
 
             if (DatabaseDescriptor.getDiskAccessMode() == Config.DiskAccessMode.standard &&
@@ -332,42 +495,51 @@
         }
     };
 
-    public static final StartupCheck checkDataDirs = () ->
+    public static final StartupCheck checkDataDirs = new StartupCheck()
     {
-        // check all directories(data, commitlog, saved cache) for existence and permission
-        Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()),
-                                                 Arrays.asList(DatabaseDescriptor.getCommitLogLocation(),
-                                                               DatabaseDescriptor.getSavedCachesLocation(),
-                                                               DatabaseDescriptor.getHintsDirectory().getAbsolutePath()));
-
-        for (String dataDir : dirs)
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
-            logger.debug("Checking directory {}", dataDir);
-            File dir = new File(dataDir);
-
-            // check that directories exist.
-            if (!dir.exists())
+            if (options.isDisabled(getStartupCheckType()))
+                return;
+            // check all directories(data, commitlog, saved cache) for existence and permission
+            Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()),
+                                                     Arrays.asList(DatabaseDescriptor.getCommitLogLocation(),
+                                                                   DatabaseDescriptor.getSavedCachesLocation(),
+                                                                   DatabaseDescriptor.getHintsDirectory().absolutePath()));
+            for (String dataDir : dirs)
             {
-                logger.warn("Directory {} doesn't exist", dataDir);
-                // if they don't, failing their creation, stop cassandra.
-                if (!dir.mkdirs())
-                    throw new StartupException(StartupException.ERR_WRONG_DISK_STATE,
-                                               "Has no permission to create directory "+ dataDir);
-            }
+                logger.debug("Checking directory {}", dataDir);
+                File dir = new File(dataDir);
 
-            // if directories exist verify their permissions
-            if (!Directories.verifyFullPermissions(dir, dataDir))
-                throw new StartupException(StartupException.ERR_WRONG_DISK_STATE,
-                                           "Insufficient permissions on directory " + dataDir);
+                // check that directories exist.
+                if (!dir.exists())
+                {
+                    logger.warn("Directory {} doesn't exist", dataDir);
+                    // if they don't, failing their creation, stop cassandra.
+                    if (!dir.tryCreateDirectories())
+                        throw new StartupException(StartupException.ERR_WRONG_DISK_STATE,
+                                                   "Has no permission to create directory "+ dataDir);
+                }
+
+                // if directories exist verify their permissions
+                if (!Directories.verifyFullPermissions(dir, dataDir))
+                    throw new StartupException(StartupException.ERR_WRONG_DISK_STATE,
+                                               "Insufficient permissions on directory " + dataDir);
+            }
         }
     };
 
     public static final StartupCheck checkSSTablesFormat = new StartupCheck()
     {
-        public void execute() throws StartupException
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
             final Set<String> invalid = new HashSet<>();
             final Set<String> nonSSTablePaths = new HashSet<>();
+            final List<String> withIllegalGenId = new ArrayList<>();
             nonSSTablePaths.add(FileUtils.getCanonicalPath(DatabaseDescriptor.getCommitLogLocation()));
             nonSSTablePaths.add(FileUtils.getCanonicalPath(DatabaseDescriptor.getSavedCachesLocation()));
             nonSSTablePaths.add(FileUtils.getCanonicalPath(DatabaseDescriptor.getHintsDirectory()));
@@ -376,14 +548,18 @@
             {
                 public FileVisitResult visitFile(Path path, BasicFileAttributes attrs)
                 {
-                    File file = path.toFile();
+                    File file = new File(path);
                     if (!Descriptor.isValidFile(file))
                         return FileVisitResult.CONTINUE;
 
                     try
                     {
-                        if (!Descriptor.fromFilename(file).isCompatible())
+                        Descriptor desc = Descriptor.fromFilename(file);
+                        if (!desc.isCompatible())
                             invalid.add(file.toString());
+
+                        if (!DatabaseDescriptor.isUUIDSSTableIdentifiersEnabled() && desc.id instanceof UUIDBasedSSTableId)
+                            withIllegalGenId.add(file.toString());
                     }
                     catch (Exception e)
                     {
@@ -397,7 +573,7 @@
                     String name = dir.getFileName().toString();
                     return (name.equals(Directories.SNAPSHOT_SUBDIR)
                             || name.equals(Directories.BACKUPS_SUBDIR)
-                            || nonSSTablePaths.contains(dir.toFile().getCanonicalPath()))
+                            || nonSSTablePaths.contains(PathUtils.toCanonicalPath(dir).toString()))
                            ? FileVisitResult.SKIP_SUBTREE
                            : FileVisitResult.CONTINUE;
                 }
@@ -407,7 +583,7 @@
             {
                 try
                 {
-                    Files.walkFileTree(Paths.get(dataDir), sstableVisitor);
+                    Files.walkFileTree(new File(dataDir).toPath(), sstableVisitor);
                 }
                 catch (IOException e)
                 {
@@ -423,13 +599,25 @@
                                                          "upgradesstables",
                                                          Joiner.on(",").join(invalid)));
 
+            if (!withIllegalGenId.isEmpty())
+                throw new StartupException(StartupException.ERR_WRONG_CONFIG,
+                                           "UUID sstable identifiers are disabled but some sstables have been " +
+                                           "created with UUID identifiers. You have to either delete those " +
+                                           "sstables or enable UUID based sstable identifers in cassandra.yaml " +
+                                           "(uuid_sstable_identifiers_enabled). The list of affected sstables is: " +
+                                           Joiner.on(", ").join(withIllegalGenId) + ". If you decide to delete sstables, " +
+                                           "and have that data replicated over other healthy nodes, those will be brought" +
+                                           "back during repair");
         }
     };
 
     public static final StartupCheck checkSystemKeyspaceState = new StartupCheck()
     {
-        public void execute() throws StartupException
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
             // check the system keyspace to keep user from shooting self in foot by changing partitioner, cluster name, etc.
             // we do a one-off scrub of the system keyspace first; we can't load the list of the rest of the keyspaces,
             // until system keyspace is opened.
@@ -450,9 +638,18 @@
 
     public static final StartupCheck checkDatacenter = new StartupCheck()
     {
-        public void execute() throws StartupException
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
-            if (!Boolean.getBoolean("cassandra.ignore_dc"))
+            boolean enabled = options.isEnabled(getStartupCheckType());
+            if (CassandraRelevantProperties.IGNORE_DC.isPresent())
+            {
+                logger.warn(String.format("Cassandra system property flag %s is deprecated and you should " +
+                                          "use startup check configuration in cassandra.yaml",
+                                          CassandraRelevantProperties.IGNORE_DC.getKey()));
+                enabled = !Boolean.getBoolean(CassandraRelevantProperties.IGNORE_DC.getKey());
+            }
+            if (enabled)
             {
                 String storedDc = SystemKeyspace.getDatacenter();
                 if (storedDc != null)
@@ -468,13 +665,28 @@
                 }
             }
         }
+
+        @Override
+        public StartupCheckType getStartupCheckType()
+        {
+            return StartupCheckType.check_dc;
+        }
     };
 
     public static final StartupCheck checkRack = new StartupCheck()
     {
-        public void execute() throws StartupException
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
         {
-            if (!Boolean.getBoolean("cassandra.ignore_rack"))
+            boolean enabled = options.isEnabled(getStartupCheckType());
+            if (CassandraRelevantProperties.IGNORE_RACK.isPresent())
+            {
+                logger.warn(String.format("Cassandra system property flag %s is deprecated and you should " +
+                                          "use startup check configuration in cassandra.yaml",
+                                          CassandraRelevantProperties.IGNORE_RACK.getKey()));
+                enabled = !Boolean.getBoolean(CassandraRelevantProperties.IGNORE_RACK.getKey());
+            }
+            if (enabled)
             {
                 String storedRack = SystemKeyspace.getRack();
                 if (storedRack != null)
@@ -490,16 +702,54 @@
                 }
             }
         }
+
+        @Override
+        public StartupCheckType getStartupCheckType()
+        {
+            return StartupCheckType.check_rack;
+        }
     };
 
-    public static final StartupCheck checkLegacyAuthTables = () ->
+    public static final StartupCheck checkLegacyAuthTables = new StartupCheck()
     {
-        Optional<String> errMsg = checkLegacyAuthTablesMessage();
-        if (errMsg.isPresent())
-            throw new StartupException(StartupException.ERR_WRONG_CONFIG, errMsg.get());
+        @Override
+        public void execute(StartupChecksOptions options) throws StartupException
+        {
+            if (options.isDisabled(getStartupCheckType()))
+                return;
+            Optional<String> errMsg = checkLegacyAuthTablesMessage();
+            if (errMsg.isPresent())
+                throw new StartupException(StartupException.ERR_WRONG_CONFIG, errMsg.get());
+        }
     };
 
     @VisibleForTesting
+    public static Path getReadAheadKBPath(String blockDirectoryPath)
+    {
+        Path readAheadKBPath = null;
+
+        final String READ_AHEAD_KB_SETTING_PATH = "/sys/block/%s/queue/read_ahead_kb";
+        try
+        {
+            String[] blockDirComponents = blockDirectoryPath.split("/");
+            if (blockDirComponents.length >= 2 && blockDirComponents[1].equals("dev"))
+            {
+                String deviceName = blockDirComponents[2].replaceAll("[0-9]*$", "");
+                if (StringUtils.isNotEmpty(deviceName))
+                {
+                    readAheadKBPath = File.getPath(String.format(READ_AHEAD_KB_SETTING_PATH, deviceName));
+                }
+            }
+        }
+        catch (Exception e)
+        {
+            logger.error("Error retrieving device path for {}.", blockDirectoryPath);
+        }
+
+        return readAheadKBPath;
+    }
+
+    @VisibleForTesting
     static Optional<String> checkLegacyAuthTablesMessage()
     {
         List<String> existing = new ArrayList<>(SchemaConstants.LEGACY_AUTH_TABLES).stream().filter((legacyAuthTable) ->
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index c783caf..2a0ccab 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -30,38 +30,47 @@
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Supplier;
+import java.util.function.Function;
+import java.util.stream.Collectors;
 
 import com.google.common.base.Preconditions;
 import com.google.common.cache.CacheLoader;
 import com.google.common.collect.Iterables;
-import com.google.common.primitives.Ints;
 import com.google.common.util.concurrent.Uninterruptibles;
-import org.apache.commons.lang3.StringUtils;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.service.paxos.*;
+import org.apache.cassandra.service.paxos.Paxos;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.batchlog.Batch;
 import org.apache.cassandra.batchlog.BatchlogManager;
 import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.CounterMutation;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.IMutation;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.MessageParams;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.PartitionRangeReadCommand;
 import org.apache.cassandra.db.ReadCommand;
 import org.apache.cassandra.db.ReadExecutionController;
 import org.apache.cassandra.db.ReadResponse;
+import org.apache.cassandra.db.RejectException;
 import org.apache.cassandra.db.SinglePartitionReadCommand;
 import org.apache.cassandra.db.TruncateRequest;
 import org.apache.cassandra.db.WriteType;
@@ -79,6 +88,7 @@
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.IsBootstrappingException;
 import org.apache.cassandra.exceptions.OverloadedException;
+import org.apache.cassandra.exceptions.ReadAbortException;
 import org.apache.cassandra.exceptions.ReadFailureException;
 import org.apache.cassandra.exceptions.ReadTimeoutException;
 import org.apache.cassandra.exceptions.RequestFailureException;
@@ -101,6 +111,7 @@
 import org.apache.cassandra.locator.ReplicaPlans;
 import org.apache.cassandra.locator.Replicas;
 import org.apache.cassandra.metrics.CASClientRequestMetrics;
+import org.apache.cassandra.metrics.DenylistMetrics;
 import org.apache.cassandra.metrics.ReadRepairMetrics;
 import org.apache.cassandra.metrics.StorageMetrics;
 import org.apache.cassandra.net.ForwardingInfo;
@@ -109,27 +120,32 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.schema.PartitionDenylist;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.service.paxos.Commit;
-import org.apache.cassandra.service.paxos.PaxosState;
-import org.apache.cassandra.service.paxos.PrepareCallback;
-import org.apache.cassandra.service.paxos.ProposeCallback;
+import org.apache.cassandra.service.paxos.v1.PrepareCallback;
+import org.apache.cassandra.service.paxos.v1.ProposeCallback;
 import org.apache.cassandra.service.reads.AbstractReadExecutor;
 import org.apache.cassandra.service.reads.ReadCallback;
 import org.apache.cassandra.service.reads.range.RangeCommands;
 import org.apache.cassandra.service.reads.repair.ReadRepair;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.triggers.TriggerExecutor;
+import org.apache.cassandra.utils.Clock;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.MonotonicClock;
+import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.Pair;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
+import static com.google.common.collect.Iterables.concat;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.db.ConsistencyLevel.SERIAL;
+import static org.apache.cassandra.net.Message.out;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casReadMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casWriteMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.readMetrics;
@@ -138,15 +154,18 @@
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.writeMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.writeMetricsForLevel;
 import static org.apache.cassandra.net.NoPayload.noPayload;
-import static org.apache.cassandra.net.Verb.BATCH_STORE_REQ;
-import static org.apache.cassandra.net.Verb.MUTATION_REQ;
-import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_REQ;
-import static org.apache.cassandra.net.Verb.PAXOS_PREPARE_REQ;
-import static org.apache.cassandra.net.Verb.PAXOS_PROPOSE_REQ;
-import static org.apache.cassandra.net.Verb.TRUNCATE_REQ;
+import static org.apache.cassandra.net.Verb.*;
 import static org.apache.cassandra.service.BatchlogResponseHandler.BatchlogCleanup;
-import static org.apache.cassandra.service.paxos.PrepareVerbHandler.doPrepare;
-import static org.apache.cassandra.service.paxos.ProposeVerbHandler.doPropose;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.GLOBAL;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.LOCAL;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.nextBallot;
+import static org.apache.cassandra.service.paxos.v1.PrepareVerbHandler.doPrepare;
+import static org.apache.cassandra.service.paxos.v1.ProposeVerbHandler.doPropose;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
+import static org.apache.commons.lang3.StringUtils.join;
 
 public class StorageProxy implements StorageProxyMBean
 {
@@ -155,6 +174,8 @@
 
     public static final String UNREACHABLE = "UNREACHABLE";
 
+    private static final int FAILURE_LOGGING_INTERVAL_SECONDS = CassandraRelevantProperties.FAILURE_LOGGING_INTERVAL_SECONDS.getInt();
+
     private static final WritePerformer standardWritePerformer;
     private static final WritePerformer counterWritePerformer;
     private static final WritePerformer counterWriteOnCoordinatorPerformer;
@@ -170,9 +191,11 @@
         }
     };
 
-    private static final String DISABLE_SERIAL_READ_LINEARIZABILITY_KEY = "cassandra.unsafe.disable-serial-reads-linearizability";
-    private static final boolean disableSerialReadLinearizability =
-        Boolean.parseBoolean(System.getProperty(DISABLE_SERIAL_READ_LINEARIZABILITY_KEY, "false"));
+    private static final DenylistMetrics denylistMetrics = new DenylistMetrics();
+
+    private static final PartitionDenylist partitionDenylist = new PartitionDenylist();
+
+    private volatile long logBlockingReadRepairAttemptsUntilNanos = Long.MIN_VALUE;
 
     private StorageProxy()
     {
@@ -199,7 +222,7 @@
         {
             EndpointsForToken selected = targets.contacts().withoutSelf();
             Replicas.temporaryAssertFull(selected); // TODO CASSANDRA-14548
-            counterWriteTask(mutation, targets.withContact(selected), responseHandler, localDataCenter).run();
+            counterWriteTask(mutation, targets.withContacts(selected), responseHandler, localDataCenter).run();
         };
 
         counterWriteOnCoordinatorPerformer = (mutation, targets, responseHandler, localDataCenter) ->
@@ -207,20 +230,20 @@
             EndpointsForToken selected = targets.contacts().withoutSelf();
             Replicas.temporaryAssertFull(selected); // TODO CASSANDRA-14548
             Stage.COUNTER_MUTATION.executor()
-                                  .execute(counterWriteTask(mutation, targets.withContact(selected), responseHandler, localDataCenter));
+                                  .execute(counterWriteTask(mutation, targets.withContacts(selected), responseHandler, localDataCenter));
         };
 
 
         ReadRepairMetrics.init();
 
-        if (disableSerialReadLinearizability)
+        if (!Paxos.isLinearizable())
         {
-            logger.warn("This node was started with -D{}. SERIAL (and LOCAL_SERIAL) reads coordinated by this node " +
-                        "will not offer linearizability (see CASSANDRA-12126 for details on what this mean) with " +
-                        "respect to other SERIAL operations. Please note that, with this flag, SERIAL reads will be " +
-                        "slower than QUORUM reads, yet offer no more guarantee. This flag should only be used in " +
+            logger.warn("This node was started with paxos variant {}. SERIAL (and LOCAL_SERIAL) reads coordinated by this node " +
+                        "will not offer linearizability (see CASSANDRA-12126 for details on what this means) with " +
+                        "respect to other SERIAL operations. Please note that with this variant, SERIAL reads will be " +
+                        "slower than QUORUM reads, yet offer no additional guarantees. This flag should only be used in " +
                         "the restricted case of upgrading from a pre-CASSANDRA-12126 version, and only if you " +
-                        "understand the tradeoff.", DISABLE_SERIAL_READ_LINEARIZABILITY_KEY);
+                        "understand the tradeoff.", Paxos.getPaxosVariant());
         }
     }
 
@@ -271,17 +294,40 @@
                                   CASRequest request,
                                   ConsistencyLevel consistencyForPaxos,
                                   ConsistencyLevel consistencyForCommit,
-                                  ClientState state,
+                                  ClientState clientState,
                                   int nowInSeconds,
                                   long queryStartNanoTime)
     throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException, CasWriteUnknownResultException
     {
-        final long startTimeForMetrics = System.nanoTime();
+        if (DatabaseDescriptor.getPartitionDenylistEnabled() && DatabaseDescriptor.getDenylistWritesEnabled() && !partitionDenylist.isKeyPermitted(keyspaceName, cfName, key.getKey()))
+        {
+            denylistMetrics.incrementWritesRejected();
+            throw new InvalidRequestException(String.format("Unable to CAS write to denylisted partition [0x%s] in %s/%s",
+                                                            key.toString(), keyspaceName, cfName));
+        }
+
+        return Paxos.useV2()
+                ? Paxos.cas(key, request, consistencyForPaxos, consistencyForCommit, clientState)
+                : legacyCas(keyspaceName, cfName, key, request, consistencyForPaxos, consistencyForCommit, clientState, nowInSeconds, queryStartNanoTime);
+    }
+
+    public static RowIterator legacyCas(String keyspaceName,
+                                        String cfName,
+                                        DecoratedKey key,
+                                        CASRequest request,
+                                        ConsistencyLevel consistencyForPaxos,
+                                        ConsistencyLevel consistencyForCommit,
+                                        ClientState clientState,
+                                        int nowInSeconds,
+                                        long queryStartNanoTime)
+    throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException
+    {
+        final long startTimeForMetrics = nanoTime();
         try
         {
             TableMetadata metadata = Schema.instance.validateTable(keyspaceName, cfName);
 
-            Supplier<Pair<PartitionUpdate, RowIterator>> updateProposer = () ->
+            Function<Ballot, Pair<PartitionUpdate, RowIterator>> updateProposer = ballot ->
             {
                 // read the current values and check they validate the conditions
                 Tracing.trace("Reading existing values for CAS precondition");
@@ -302,7 +348,7 @@
                 }
 
                 // Create the desired updates
-                PartitionUpdate updates = request.makeUpdates(current);
+                PartitionUpdate updates = request.makeUpdates(current, clientState, ballot);
 
                 long size = updates.dataSize();
                 casWriteMetrics.mutationSize.update(size);
@@ -325,7 +371,6 @@
                            consistencyForPaxos,
                            consistencyForCommit,
                            consistencyForCommit,
-                           state,
                            queryStartNanoTime,
                            casWriteMetrics,
                            updateProposer);
@@ -348,6 +393,12 @@
             writeMetricsForLevel(consistencyForPaxos).timeouts.mark();
             throw e;
         }
+        catch (ReadAbortException e)
+        {
+            casWriteMetrics.markAbort(e);
+            writeMetricsForLevel(consistencyForPaxos).markAbort(e);
+            throw e;
+        }
         catch (WriteFailureException | ReadFailureException e)
         {
             casWriteMetrics.failures.mark();
@@ -362,7 +413,7 @@
         }
         finally
         {
-            final long latency = System.nanoTime() - startTimeForMetrics;
+            final long latency = nanoTime() - startTimeForMetrics;
             casWriteMetrics.addNano(latency);
             writeMetricsForLevel(consistencyForPaxos).addNano(latency);
         }
@@ -398,7 +449,6 @@
      *     {@link ConsistencyLevel#LOCAL_SERIAL}).
      * @param consistencyForReplayCommits the consistency for the commit phase of "replayed" in-progress operations.
      * @param consistencyForCommit the consistency for the commit phase of _this_ operation update.
-     * @param state the client state.
      * @param queryStartNanoTime the nano time for the start of the query this is part of. This is the base time for
      *     timeouts.
      * @param casMetrics the metrics to update for this operation.
@@ -414,10 +464,9 @@
                                        ConsistencyLevel consistencyForPaxos,
                                        ConsistencyLevel consistencyForReplayCommits,
                                        ConsistencyLevel consistencyForCommit,
-                                       ClientState state,
                                        long queryStartNanoTime,
                                        CASClientRequestMetrics casMetrics,
-                                       Supplier<Pair<PartitionUpdate, RowIterator>> createUpdateProposal)
+                                       Function<Ballot, Pair<PartitionUpdate, RowIterator>> createUpdateProposal)
     throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException
     {
         int contentions = 0;
@@ -430,7 +479,7 @@
             consistencyForCommit.validateForCasCommit(latestRs);
 
             long timeoutNanos = DatabaseDescriptor.getCasContentionTimeout(NANOSECONDS);
-            while (System.nanoTime() - queryStartNanoTime < timeoutNanos)
+            while (nanoTime() - queryStartNanoTime < timeoutNanos)
             {
                 // for simplicity, we'll do a single liveness check at the start of each attempt
                 ReplicaPlan.ForPaxosWrite replicaPlan = ReplicaPlans.forPaxos(keyspace, key, consistencyForPaxos);
@@ -441,13 +490,12 @@
                                                                     replicaPlan,
                                                                     consistencyForPaxos,
                                                                     consistencyForReplayCommits,
-                                                                    casMetrics,
-                                                                    state);
+                                                                    casMetrics);
 
-                final UUID ballot = pair.ballot;
+                final Ballot ballot = pair.ballot;
                 contentions += pair.contentions;
 
-                Pair<PartitionUpdate, RowIterator> proposalPair = createUpdateProposal.get();
+                Pair<PartitionUpdate, RowIterator> proposalPair = createUpdateProposal.apply(ballot);
                 // See method javadoc: null here is code for "stop here and return null".
                 if (proposalPair == null)
                     return null;
@@ -472,6 +520,7 @@
 
                 Tracing.trace("Paxos proposal not accepted (pre-empted by a higher ballot)");
                 contentions++;
+
                 Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(100), TimeUnit.MILLISECONDS);
                 // continue to retry
             }
@@ -509,25 +558,23 @@
                                                                 ReplicaPlan.ForPaxosWrite paxosPlan,
                                                                 ConsistencyLevel consistencyForPaxos,
                                                                 ConsistencyLevel consistencyForCommit,
-                                                                CASClientRequestMetrics casMetrics,
-                                                                ClientState state)
+                                                                CASClientRequestMetrics casMetrics)
     throws WriteTimeoutException, WriteFailureException
     {
         long timeoutNanos = DatabaseDescriptor.getCasContentionTimeout(NANOSECONDS);
 
         PrepareCallback summary = null;
         int contentions = 0;
-        while (System.nanoTime() - queryStartNanoTime < timeoutNanos)
+        while (nanoTime() - queryStartNanoTime < timeoutNanos)
         {
             // We want a timestamp that is guaranteed to be unique for that node (so that the ballot is globally unique), but if we've got a prepare rejected
             // already we also want to make sure we pick a timestamp that has a chance to be promised, i.e. one that is greater that the most recently known
             // in progress (#5667). Lastly, we don't want to use a timestamp that is older than the last one assigned by ClientState or operations may appear
             // out-of-order (#7801).
-            long minTimestampMicrosToUse = summary == null ? Long.MIN_VALUE : 1 + UUIDGen.microsTimestamp(summary.mostRecentInProgressCommit.ballot);
-            long ballotMicros = state.getTimestampForPaxos(minTimestampMicrosToUse);
+            long minTimestampMicrosToUse = summary == null ? Long.MIN_VALUE : 1 + summary.mostRecentInProgressCommit.ballot.unixMicros();
             // Note that ballotMicros is not guaranteed to be unique if two proposal are being handled concurrently by the same coordinator. But we still
             // need ballots to be unique for each proposal so we have to use getRandomTimeUUIDFromMicros.
-            UUID ballot = UUIDGen.getRandomTimeUUIDFromMicros(ballotMicros);
+            Ballot ballot = nextBallot(minTimestampMicrosToUse, consistencyForPaxos == SERIAL ? GLOBAL : LOCAL);
 
             // prepare
             try
@@ -588,8 +635,7 @@
                 // https://issues.apache.org/jira/browse/CASSANDRA-5062?focusedCommentId=13619810&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13619810)
                 // Since we waited for quorum nodes, if some of them haven't seen the last commit (which may just be a timing issue, but may also
                 // mean we lost messages), we pro-actively "repair" those nodes, and retry.
-                int nowInSec = Ints.checkedCast(TimeUnit.MICROSECONDS.toSeconds(ballotMicros));
-                Iterable<InetAddressAndPort> missingMRC = summary.replicasMissingMostRecentCommit(metadata, nowInSec);
+                Iterable<InetAddressAndPort> missingMRC = summary.replicasMissingMostRecentCommit();
                 if (Iterables.size(missingMRC) > 0)
                 {
                     Tracing.trace("Repairing replicas that missed the most recent commit");
@@ -628,10 +674,14 @@
     {
         PrepareCallback callback = new PrepareCallback(toPrepare.update.partitionKey(), toPrepare.update.metadata(), replicaPlan.requiredParticipants(), replicaPlan.consistencyLevel(), queryStartNanoTime);
         Message<Commit> message = Message.out(PAXOS_PREPARE_REQ, toPrepare);
+
+        boolean hasLocalRequest = false;
+
         for (Replica replica: replicaPlan.contacts())
         {
             if (replica.isSelf())
             {
+                hasLocalRequest = true;
                 PAXOS_PREPARE_REQ.stage.execute(() -> {
                     try
                     {
@@ -648,6 +698,12 @@
                 MessagingService.instance().sendWithCallback(message, replica.endpoint(), callback);
             }
         }
+
+        if (hasLocalRequest)
+            writeMetrics.localRequests.mark();
+        else
+            writeMetrics.remoteRequests.mark();
+
         callback.await();
         return callback;
     }
@@ -703,11 +759,11 @@
 
         AbstractWriteResponseHandler<Commit> responseHandler = null;
         // NOTE: this ReplicaPlan is a lie, this usage of ReplicaPlan could do with being clarified - the selected() collection is essentially (I think) never used
-        ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, tk, ReplicaPlans.writeAll);
+        ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, tk, ReplicaPlans.writeAll);
         if (shouldBlock)
         {
             AbstractReplicationStrategy rs = replicaPlan.replicationStrategy();
-            responseHandler = rs.getWriteResponseHandler(replicaPlan, null, WriteType.SIMPLE, queryStartNanoTime);
+            responseHandler = rs.getWriteResponseHandler(replicaPlan, null, WriteType.SIMPLE, proposal::makeMutation, queryStartNanoTime);
         }
 
         Message<Commit> message = Message.outWithFlag(PAXOS_COMMIT_REQ, proposal, MessageFlag.CALL_BACK_ON_FAILURE);
@@ -723,7 +779,7 @@
                     if (replica.isSelf())
                         commitPaxosLocal(replica, message, responseHandler);
                     else
-                        MessagingService.instance().sendWriteWithCallback(message, replica, responseHandler, allowHints && shouldHint(replica));
+                        MessagingService.instance().sendWriteWithCallback(message, replica, responseHandler);
                 }
                 else
                 {
@@ -760,7 +816,7 @@
             {
                 try
                 {
-                    PaxosState.commit(message.payload);
+                    PaxosState.commitDirect(message.payload);
                     if (responseHandler != null)
                         responseHandler.onResponse(null);
                 }
@@ -788,7 +844,7 @@
      *
      * @param mutations the mutations to be applied across the replicas
      * @param consistencyLevel the consistency level for the operation
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static void mutate(List<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, OverloadedException, WriteTimeoutException, WriteFailureException
@@ -796,7 +852,7 @@
         Tracing.trace("Determining replicas for mutation");
         final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
         List<AbstractWriteResponseHandler<IMutation>> responseHandlers = new ArrayList<>(mutations.size());
         WriteType plainWriteType = mutations.size() <= 1 ? WriteType.SIMPLE : WriteType.UNLOGGED_BATCH;
@@ -864,7 +920,7 @@
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
             writeMetrics.addNano(latency);
             writeMetricsForLevel(consistencyLevel).addNano(latency);
             updateCoordinatorWriteLatencyTableMetric(mutations, latency);
@@ -920,7 +976,7 @@
      * @param mutations the mutations to be applied across the replicas
      * @param writeCommitLog if commitlog should be written
      * @param baseComplete time from epoch in ms that the local base mutation was(or will be) completed
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static void mutateMV(ByteBuffer dataKey, Collection<Mutation> mutations, boolean writeCommitLog, AtomicLong baseComplete, long queryStartNanoTime)
     throws UnavailableException, OverloadedException, WriteTimeoutException
@@ -928,13 +984,13 @@
         Tracing.trace("Determining replicas for mutation");
         final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
 
         try
         {
             // if we haven't joined the ring, write everything to batchlog because paired replicas may be stale
-            final UUID batchUUID = UUIDGen.getTimeUUID();
+            final TimeUUID batchUUID = nextTimeUUID();
 
             if (StorageService.instance.isStarting() || StorageService.instance.isJoining() || StorageService.instance.isMoving())
             {
@@ -951,7 +1007,7 @@
                 ConsistencyLevel consistencyLevel = ConsistencyLevel.ONE;
 
                 //Since the base -> view replication is 1:1 we only need to store the BL locally
-                ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forLocalBatchlogWrite();
+                ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forLocalBatchlogWrite();
                 BatchlogCleanup cleanup = new BatchlogCleanup(mutations.size(),
                                                               () -> asyncRemoveFromBatchlog(replicaPlan, batchUUID));
 
@@ -1023,7 +1079,7 @@
         }
         finally
         {
-            viewWriteMetrics.addNano(System.nanoTime() - startTime);
+            viewWriteMetrics.addNano(nanoTime() - startTime);
         }
     }
 
@@ -1034,6 +1090,25 @@
                                           long queryStartNanoTime)
     throws WriteTimeoutException, WriteFailureException, UnavailableException, OverloadedException, InvalidRequestException
     {
+        if (DatabaseDescriptor.getPartitionDenylistEnabled() && DatabaseDescriptor.getDenylistWritesEnabled())
+        {
+            for (final IMutation mutation : mutations)
+            {
+                for (final TableId tid : mutation.getTableIds())
+                {
+                    if (!partitionDenylist.isKeyPermitted(tid, mutation.key().getKey()))
+                    {
+                        denylistMetrics.incrementWritesRejected();
+                        // While Schema.instance.getTableMetadata() can return a null value, in this case the isKeyPermitted
+                        // call above ensures that we cannot have a null associated tid at this point.
+                        final TableMetadata tmd = Schema.instance.getTableMetadata(tid);
+                        throw new InvalidRequestException(String.format("Unable to write to denylisted partition [0x%s] in %s/%s",
+                                                                        mutation.key().toString(), tmd.keyspace, tmd.name));
+                    }
+                }
+            }
+        }
+
         Collection<Mutation> augmented = TriggerExecutor.instance.execute(mutations);
 
         boolean updatesView = Keyspace.open(mutations.iterator().next().getKeyspaceName())
@@ -1064,7 +1139,7 @@
      * @param mutations the Mutations to be applied across the replicas
      * @param consistency_level the consistency level for the operation
      * @param requireQuorumForRemove at least a quorum of nodes will see update before deleting batchlog
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static void mutateAtomically(Collection<Mutation> mutations,
                                         ConsistencyLevel consistency_level,
@@ -1073,7 +1148,7 @@
     throws UnavailableException, OverloadedException, WriteTimeoutException
     {
         Tracing.trace("Determining replicas for atomic batch");
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
         List<WriteResponseHandlerWrapper> wrappers = new ArrayList<>(mutations.size());
 
@@ -1096,9 +1171,9 @@
                     batchConsistencyLevel = consistency_level;
             }
 
-            ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forBatchlogWrite(batchConsistencyLevel == ConsistencyLevel.ANY);
+            ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forBatchlogWrite(batchConsistencyLevel == ConsistencyLevel.ANY);
 
-            final UUID batchUUID = UUIDGen.getTimeUUID();
+            final TimeUUID batchUUID = nextTimeUUID();
             BatchlogCleanup cleanup = new BatchlogCleanup(mutations.size(),
                                                           () -> asyncRemoveFromBatchlog(replicaPlan, batchUUID));
 
@@ -1144,7 +1219,7 @@
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
             writeMetrics.addNano(latency);
             writeMetricsForLevel(consistency_level).addNano(latency);
             updateCoordinatorWriteLatencyTableMetric(mutations, latency);
@@ -1173,11 +1248,12 @@
         }
     }
 
-    private static void syncWriteToBatchlog(Collection<Mutation> mutations, ReplicaPlan.ForTokenWrite replicaPlan, UUID uuid, long queryStartNanoTime)
+    private static void syncWriteToBatchlog(Collection<Mutation> mutations, ReplicaPlan.ForWrite replicaPlan, TimeUUID uuid, long queryStartNanoTime)
     throws WriteTimeoutException, WriteFailureException
     {
         WriteResponseHandler<?> handler = new WriteResponseHandler(replicaPlan,
                                                                    WriteType.BATCH_LOG,
+                                                                   null,
                                                                    queryStartNanoTime);
 
         Batch batch = Batch.createLocal(uuid, FBUtilities.timestampMicros(), mutations);
@@ -1194,9 +1270,9 @@
         handler.get();
     }
 
-    private static void asyncRemoveFromBatchlog(ReplicaPlan.ForTokenWrite replicaPlan, UUID uuid)
+    private static void asyncRemoveFromBatchlog(ReplicaPlan.ForWrite replicaPlan, TimeUUID uuid)
     {
-        Message<UUID> message = Message.out(Verb.BATCH_REMOVE_REQ, uuid);
+        Message<TimeUUID> message = Message.out(Verb.BATCH_REMOVE_REQ, uuid);
         for (Replica target : replicaPlan.contacts())
         {
             if (logger.isTraceEnabled())
@@ -1214,7 +1290,7 @@
         for (WriteResponseHandlerWrapper wrapper : wrappers)
         {
             Replicas.temporaryAssertFull(wrapper.handler.replicaPlan.liveAndDown());  // TODO: CASSANDRA-14549
-            ReplicaPlan.ForTokenWrite replicas = wrapper.handler.replicaPlan.withContact(wrapper.handler.replicaPlan.liveAndDown());
+            ReplicaPlan.ForWrite replicas = wrapper.handler.replicaPlan.withContacts(wrapper.handler.replicaPlan.liveAndDown());
 
             try
             {
@@ -1236,7 +1312,7 @@
         {
             EndpointsForToken sendTo = wrapper.handler.replicaPlan.liveAndDown();
             Replicas.temporaryAssertFull(sendTo); // TODO: CASSANDRA-14549
-            sendToHintedReplicas(wrapper.mutation, wrapper.handler.replicaPlan.withContact(sendTo), wrapper.handler, localDataCenter, stage);
+            sendToHintedReplicas(wrapper.mutation, wrapper.handler.replicaPlan.withContacts(sendTo), wrapper.handler, localDataCenter, stage);
         }
 
         for (WriteResponseHandlerWrapper wrapper : wrappers)
@@ -1255,7 +1331,7 @@
      * given the list of write endpoints (either standardWritePerformer for
      * standard writes or counterWritePerformer for counter writes).
      * @param callback an optional callback to be run if and when the write is
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static AbstractWriteResponseHandler<IMutation> performWrite(IMutation mutation,
                                                                        ConsistencyLevel consistencyLevel,
@@ -1269,9 +1345,15 @@
         Keyspace keyspace = Keyspace.open(keyspaceName);
         Token tk = mutation.key().getToken();
 
-        ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, tk, ReplicaPlans.writeNormal);
+        ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, tk, ReplicaPlans.writeNormal);
+
+        if (replicaPlan.lookup(FBUtilities.getBroadcastAddressAndPort()) != null)
+            writeMetrics.localRequests.mark();
+        else
+            writeMetrics.remoteRequests.mark();
+
         AbstractReplicationStrategy rs = replicaPlan.replicationStrategy();
-        AbstractWriteResponseHandler<IMutation> responseHandler = rs.getWriteResponseHandler(replicaPlan, callback, writeType, queryStartNanoTime);
+        AbstractWriteResponseHandler<IMutation> responseHandler = rs.getWriteResponseHandler(replicaPlan, callback, writeType, mutation.hintOnFailure(), queryStartNanoTime);
 
         performer.apply(mutation, replicaPlan, responseHandler, localDataCenter);
         return responseHandler;
@@ -1288,9 +1370,15 @@
         Keyspace keyspace = Keyspace.open(mutation.getKeyspaceName());
         Token tk = mutation.key().getToken();
 
-        ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, tk, ReplicaPlans.writeNormal);
+        ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, tk, ReplicaPlans.writeNormal);
+
+        if (replicaPlan.lookup(FBUtilities.getBroadcastAddressAndPort()) != null)
+            writeMetrics.localRequests.mark();
+        else
+            writeMetrics.remoteRequests.mark();
+
         AbstractReplicationStrategy rs = replicaPlan.replicationStrategy();
-        AbstractWriteResponseHandler<IMutation> writeHandler = rs.getWriteResponseHandler(replicaPlan,null, writeType, queryStartNanoTime);
+        AbstractWriteResponseHandler<IMutation> writeHandler = rs.getWriteResponseHandler(replicaPlan, null, writeType, mutation, queryStartNanoTime);
         BatchlogResponseHandler<IMutation> batchHandler = new BatchlogResponseHandler<>(writeHandler, batchConsistencyLevel.blockFor(rs), cleanup, queryStartNanoTime);
         return new WriteResponseHandlerWrapper(batchHandler, mutation);
     }
@@ -1309,12 +1397,12 @@
                                                                             long queryStartNanoTime)
     {
         Keyspace keyspace = Keyspace.open(mutation.getKeyspaceName());
-        ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, liveAndDown, ReplicaPlans.writeAll);
+        ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, liveAndDown, ReplicaPlans.writeAll);
         AbstractReplicationStrategy replicationStrategy = replicaPlan.replicationStrategy();
         AbstractWriteResponseHandler<IMutation> writeHandler = replicationStrategy.getWriteResponseHandler(replicaPlan, () -> {
-            long delay = Math.max(0, System.currentTimeMillis() - baseComplete.get());
+            long delay = Math.max(0, currentTimeMillis() - baseComplete.get());
             viewWriteMetrics.viewWriteLatency.update(delay, MILLISECONDS);
-        }, writeType, queryStartNanoTime);
+        }, writeType, mutation, queryStartNanoTime);
         BatchlogResponseHandler<IMutation> batchHandler = new ViewWriteMetricsWrapped(writeHandler, batchConsistencyLevel.blockFor(replicationStrategy), cleanup, queryStartNanoTime);
         return new WriteResponseHandlerWrapper(batchHandler, mutation);
     }
@@ -1350,7 +1438,7 @@
      * @throws OverloadedException if the hints cannot be written/enqueued
      */
     public static void sendToHintedReplicas(final Mutation mutation,
-                                            ReplicaPlan.ForTokenWrite plan,
+                                            ReplicaPlan.ForWrite plan,
                                             AbstractWriteResponseHandler<IMutation> responseHandler,
                                             String localDataCenter,
                                             Stage stage)
@@ -1441,7 +1529,7 @@
         if (localDc != null)
         {
             for (Replica destination : localDc)
-                MessagingService.instance().sendWriteWithCallback(message, destination, responseHandler, true);
+                MessagingService.instance().sendWriteWithCallback(message, destination, responseHandler);
         }
         if (dcGroups != null)
         {
@@ -1483,7 +1571,7 @@
 
             for (Replica replica : forwardToReplicas)
             {
-                MessagingService.instance().callbacks.addWithExpiration(handler, message, replica, handler.replicaPlan.consistencyLevel(), true);
+                MessagingService.instance().callbacks.addWithExpiration(handler, message, replica);
                 logger.trace("Adding FWD message to {}@{}", message.id(), replica);
             }
 
@@ -1498,7 +1586,7 @@
             target = targets.get(0);
         }
 
-        MessagingService.instance().sendWriteWithCallback(message, target, handler, true);
+        MessagingService.instance().sendWriteWithCallback(message, target, handler);
         logger.trace("Sending message to {}@{}", message.id(), target);
     }
 
@@ -1585,13 +1673,18 @@
             // we build this ONLY to perform the sufficiency check that happens on construction
             ReplicaPlans.forWrite(keyspace, cm.consistency(), tk, ReplicaPlans.writeAll);
 
+            // This host isn't a replica, so mark the request as being remote. If this host is a 
+            // replica, applyCounterMutationOnCoordinator() in the branch above will call performWrite(), and 
+            // there we'll mark a local request against the metrics.
+            writeMetrics.remoteRequests.mark();
+
             // Forward the actual update to the chosen leader replica
             AbstractWriteResponseHandler<IMutation> responseHandler = new WriteResponseHandler<>(ReplicaPlans.forForwardingCounterWrite(keyspace, tk, replica),
-                                                                                                 WriteType.COUNTER, queryStartNanoTime);
+                                                                                                 WriteType.COUNTER, null, queryStartNanoTime);
 
             Tracing.trace("Enqueuing counter update to {}", replica);
             Message message = Message.outWithFlag(Verb.COUNTER_MUTATION_REQ, cm, MessageFlag.CALL_BACK_ON_FAILURE);
-            MessagingService.instance().sendWriteWithCallback(message, replica, responseHandler, false);
+            MessagingService.instance().sendWriteWithCallback(message, replica, responseHandler);
             return responseHandler;
         }
     }
@@ -1660,7 +1753,7 @@
     }
 
     private static Runnable counterWriteTask(final IMutation mutation,
-                                             final ReplicaPlan.ForTokenWrite replicaPlan,
+                                             final ReplicaPlan.ForWrite replicaPlan,
                                              final AbstractWriteResponseHandler<IMutation> responseHandler,
                                              final String localDataCenter)
     {
@@ -1689,50 +1782,58 @@
     public static RowIterator readOne(SinglePartitionReadCommand command, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, IsBootstrappingException, ReadFailureException, ReadTimeoutException, InvalidRequestException
     {
-        return readOne(command, consistencyLevel, null, queryStartNanoTime);
-    }
-
-    public static RowIterator readOne(SinglePartitionReadCommand command, ConsistencyLevel consistencyLevel, ClientState state, long queryStartNanoTime)
-    throws UnavailableException, IsBootstrappingException, ReadFailureException, ReadTimeoutException, InvalidRequestException
-    {
-        return PartitionIterators.getOnlyElement(read(SinglePartitionReadCommand.Group.one(command), consistencyLevel, state, queryStartNanoTime), command);
-    }
-
-    public static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
-    throws UnavailableException, IsBootstrappingException, ReadFailureException, ReadTimeoutException, InvalidRequestException
-    {
-        // When using serial CL, the ClientState should be provided
-        assert !consistencyLevel.isSerialConsistency();
-        return read(group, consistencyLevel, null, queryStartNanoTime);
+        return PartitionIterators.getOnlyElement(read(SinglePartitionReadCommand.Group.one(command), consistencyLevel, queryStartNanoTime), command);
     }
 
     /**
      * Performs the actual reading of a row out of the StorageService, fetching
      * a specific set of column names from a given column family.
      */
-    public static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, ClientState state, long queryStartNanoTime)
+    public static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, IsBootstrappingException, ReadFailureException, ReadTimeoutException, InvalidRequestException
     {
         if (StorageService.instance.isBootstrapMode() && !systemKeyspaceQuery(group.queries))
         {
             readMetrics.unavailables.mark();
             readMetricsForLevel(consistencyLevel).unavailables.mark();
-            throw new IsBootstrappingException();
+            IsBootstrappingException exception = new IsBootstrappingException();
+            logRequestException(exception, group.queries);
+            throw exception;
+        }
+
+        if (DatabaseDescriptor.getPartitionDenylistEnabled() && DatabaseDescriptor.getDenylistReadsEnabled())
+        {
+            for (SinglePartitionReadCommand command : group.queries)
+            {
+                if (!partitionDenylist.isKeyPermitted(command.metadata().id, command.partitionKey().getKey()))
+                {
+                    denylistMetrics.incrementReadsRejected();
+                    throw new InvalidRequestException(String.format("Unable to read denylisted partition [0x%s] in %s/%s",
+                                                                    command.partitionKey().toString(), command.metadata().keyspace, command.metadata().name));
+                }
+            }
         }
 
         return consistencyLevel.isSerialConsistency()
-             ? readWithPaxos(group, consistencyLevel, state, queryStartNanoTime)
+             ? readWithPaxos(group, consistencyLevel, queryStartNanoTime)
              : readRegular(group, consistencyLevel, queryStartNanoTime);
     }
 
-    private static PartitionIterator readWithPaxos(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, ClientState state, long queryStartNanoTime)
+    private static PartitionIterator readWithPaxos(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException
     {
-        assert state != null;
+        return Paxos.useV2()
+                ? Paxos.read(group, consistencyLevel)
+                : legacyReadWithPaxos(group, consistencyLevel, queryStartNanoTime);
+    }
+
+    private static PartitionIterator legacyReadWithPaxos(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
+    throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException
+    {
         if (group.queries.size() > 1)
             throw new InvalidRequestException("SERIAL/LOCAL_SERIAL consistency may only be requested for one partition at a time");
 
-        long start = System.nanoTime();
+        long start = nanoTime();
         SinglePartitionReadCommand command = group.queries.get(0);
         TableMetadata metadata = command.metadata();
         DecoratedKey key = command.partitionKey();
@@ -1750,10 +1851,10 @@
             {
                 // Commit an empty update to make sure all in-progress updates that should be finished first is, _and_
                 // that no other in-progress can get resurrected.
-                Supplier<Pair<PartitionUpdate, RowIterator>> updateProposer =
-                    disableSerialReadLinearizability
-                    ? () -> null
-                    : () -> Pair.create(PartitionUpdate.emptyUpdate(metadata, key), null);
+                Function<Ballot, Pair<PartitionUpdate, RowIterator>> updateProposer =
+                    !Paxos.isLinearizable()
+                    ? ballot -> null
+                    : ballot -> Pair.create(PartitionUpdate.emptyUpdate(metadata, key), null);
                 // When replaying, we commit at quorum/local quorum, as we want to be sure the following read (done at
                 // quorum/local_quorum) sees any replayed updates. Our own update is however empty, and those don't even
                 // get committed due to an optimiation described in doPaxos/beingRepairAndPaxos, so the commit
@@ -1763,7 +1864,6 @@
                         consistencyLevel,
                         consistencyForReplayCommitsOrFetch,
                         ConsistencyLevel.ANY,
-                        state,
                         start,
                         casReadMetrics,
                         updateProposer);
@@ -1784,6 +1884,7 @@
             readMetrics.unavailables.mark();
             casReadMetrics.unavailables.mark();
             readMetricsForLevel(consistencyLevel).unavailables.mark();
+            logRequestException(e, group.queries);
             throw e;
         }
         catch (ReadTimeoutException e)
@@ -1791,6 +1892,14 @@
             readMetrics.timeouts.mark();
             casReadMetrics.timeouts.mark();
             readMetricsForLevel(consistencyLevel).timeouts.mark();
+            logRequestException(e, group.queries);
+            throw e;
+        }
+        catch (ReadAbortException e)
+        {
+            readMetrics.markAbort(e);
+            casReadMetrics.markAbort(e);
+            readMetricsForLevel(consistencyLevel).markAbort(e);
             throw e;
         }
         catch (ReadFailureException e)
@@ -1802,7 +1911,7 @@
         }
         finally
         {
-            long latency = System.nanoTime() - start;
+            long latency = nanoTime() - start;
             readMetrics.addNano(latency);
             casReadMetrics.addNano(latency);
             readMetricsForLevel(consistencyLevel).addNano(latency);
@@ -1816,7 +1925,7 @@
     private static PartitionIterator readRegular(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, ReadFailureException, ReadTimeoutException
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             PartitionIterator result = fetchRows(group.queries, consistencyLevel, queryStartNanoTime);
@@ -1833,12 +1942,19 @@
         {
             readMetrics.unavailables.mark();
             readMetricsForLevel(consistencyLevel).unavailables.mark();
+            logRequestException(e, group.queries);
             throw e;
         }
         catch (ReadTimeoutException e)
         {
             readMetrics.timeouts.mark();
             readMetricsForLevel(consistencyLevel).timeouts.mark();
+            logRequestException(e, group.queries);
+            throw e;
+        }
+        catch (ReadAbortException e)
+        {
+            recordReadRegularAbort(consistencyLevel, e);
             throw e;
         }
         catch (ReadFailureException e)
@@ -1849,7 +1965,7 @@
         }
         finally
         {
-            long latency = System.nanoTime() - start;
+            long latency = nanoTime() - start;
             readMetrics.addNano(latency);
             readMetricsForLevel(consistencyLevel).addNano(latency);
             // TODO avoid giving every command the same latency number.  Can fix this in CASSADRA-5329
@@ -1858,6 +1974,12 @@
         }
     }
 
+    public static void recordReadRegularAbort(ConsistencyLevel consistencyLevel, Throwable cause)
+    {
+        readMetrics.markAbort(cause);
+        readMetricsForLevel(consistencyLevel).markAbort(cause);
+    }
+
     public static PartitionIterator concatAndBlockOnRepair(List<PartitionIterator> iterators, List<ReadRepair<?, ?>> repairs)
     {
         PartitionIterator concatenated = PartitionIterators.concat(iterators);
@@ -1909,6 +2031,11 @@
         for (int i=0; i<cmdCount; i++)
         {
             reads[i] = AbstractReadExecutor.getReadExecutor(commands.get(i), consistencyLevel, queryStartNanoTime);
+
+            if (reads[i].hasLocalRead())
+                readMetrics.localRequests.mark();
+            else
+                readMetrics.remoteRequests.mark();
         }
 
         // sends a data request to the closest replica, and a digest request to the others. If we have a speculating
@@ -1927,9 +2054,10 @@
 
         // wait for enough responses to meet the consistency level. If there's a digest mismatch, begin the read
         // repair process by sending full data reads to all replicas we received responses from.
+        boolean logBlockingRepairAttempts = instance.isLoggingReadRepairs();
         for (int i=0; i<cmdCount; i++)
         {
-            reads[i].awaitResponses();
+            reads[i].awaitResponses(logBlockingRepairAttempts);
         }
 
         // read repair - if it looks like we may not receive enough full data responses to meet CL, send
@@ -1982,6 +2110,9 @@
         {
             try
             {
+                MessageParams.reset();
+
+                boolean readRejected = false;
                 command.setMonitoringTime(approxCreationTimeNanos, false, verb.expiresAfterNanos(), DatabaseDescriptor.getSlowQueryTimeout(NANOSECONDS));
 
                 ReadResponse response;
@@ -1990,6 +2121,14 @@
                 {
                     response = command.createResponse(iterator, controller.getRepairedDataInfo());
                 }
+                catch (RejectException e)
+                {
+                    if (!command.isTrackingWarnings())
+                        throw e;
+                    
+                    response = command.createEmptyResponse();
+                    readRejected = true;
+                }
 
                 if (command.complete())
                 {
@@ -1997,11 +2136,12 @@
                 }
                 else
                 {
-                    MessagingService.instance().metrics.recordSelfDroppedMessage(verb, MonotonicClock.approxTime.now() - approxCreationTimeNanos, NANOSECONDS);
+                    MessagingService.instance().metrics.recordSelfDroppedMessage(verb, MonotonicClock.Global.approxTime.now() - approxCreationTimeNanos, NANOSECONDS);
                     handler.onFailure(FBUtilities.getBroadcastAddressAndPort(), RequestFailureReason.UNKNOWN);
                 }
 
-                MessagingService.instance().latencySubscribers.add(FBUtilities.getBroadcastAddressAndPort(), MonotonicClock.approxTime.now() - approxCreationTimeNanos, NANOSECONDS);
+                if (!readRejected)
+                    MessagingService.instance().latencySubscribers.add(FBUtilities.getBroadcastAddressAndPort(), MonotonicClock.Global.approxTime.now() - approxCreationTimeNanos, NANOSECONDS);
             }
             catch (Throwable t)
             {
@@ -2023,6 +2163,18 @@
                                                   ConsistencyLevel consistencyLevel,
                                                   long queryStartNanoTime)
     {
+        if (DatabaseDescriptor.getPartitionDenylistEnabled() && DatabaseDescriptor.getDenylistRangeReadsEnabled())
+        {
+            final int denylisted = partitionDenylist.getDeniedKeysInRangeCount(command.metadata().id, command.dataRange().keyRange());
+            if (denylisted > 0)
+            {
+                denylistMetrics.incrementRangeReadsRejected();
+                String tokens = command.loggableTokens();
+                throw new InvalidRequestException(String.format("Attempted to read a range containing %d denylisted keys in %s/%s." +
+                                                                " Range read: %s", denylisted, command.metadata().keyspace, command.metadata().name,
+                                                                tokens));
+            }
+        }
         return RangeCommands.partitions(command, consistencyLevel, queryStartNanoTime);
     }
 
@@ -2046,16 +2198,16 @@
         final String myVersion = Schema.instance.getVersion().toString();
         final Map<InetAddressAndPort, UUID> versions = new ConcurrentHashMap<>();
         final Set<InetAddressAndPort> liveHosts = Gossiper.instance.getLiveMembers();
-        final CountDownLatch latch = new CountDownLatch(liveHosts.size());
+        final CountDownLatch latch = newCountDownLatch(liveHosts.size());
 
         RequestCallback<UUID> cb = message ->
         {
             // record the response from the remote node.
             versions.put(message.from(), message.payload);
-            latch.countDown();
+            latch.decrement();
         };
         // an empty message acts as a request to the SchemaVersionVerbHandler.
-        Message message = Message.out(Verb.SCHEMA_VERSION_REQ, noPayload);
+        Message message = out(SCHEMA_VERSION_REQ, noPayload);
         for (InetAddressAndPort endpoint : liveHosts)
             MessagingService.instance().sendWithCallback(message, endpoint, cb);
 
@@ -2064,14 +2216,14 @@
             // wait for as long as possible. timeout-1s if possible.
             latch.await(DatabaseDescriptor.getRpcTimeout(NANOSECONDS), NANOSECONDS);
         }
-        catch (InterruptedException ex)
+        catch (InterruptedException e)
         {
-            throw new AssertionError("This latch shouldn't have been interrupted.");
+            throw new UncheckedInterruptedException(e);
         }
 
         // maps versions to hosts that are on that version.
         Map<String, List<String>> results = new HashMap<String, List<String>>();
-        Iterable<InetAddressAndPort> allHosts = Iterables.concat(Gossiper.instance.getLiveMembers(), Gossiper.instance.getUnreachableMembers());
+        Iterable<InetAddressAndPort> allHosts = concat(Gossiper.instance.getLiveMembers(), Gossiper.instance.getUnreachableMembers());
         for (InetAddressAndPort host : allHosts)
         {
             UUID version = versions.get(host);
@@ -2087,7 +2239,7 @@
 
         // we're done: the results map is ready to return to the client.  the rest is just debug logging:
         if (results.get(UNREACHABLE) != null)
-            logger.debug("Hosts not in agreement. Didn't get a response from everybody: {}", StringUtils.join(results.get(UNREACHABLE), ","));
+            logger.debug("Hosts not in agreement. Didn't get a response from everybody: {}", join(results.get(UNREACHABLE), ","));
         for (Map.Entry<String, List<String>> entry : results.entrySet())
         {
             // check for version disagreement. log the hosts that don't agree.
@@ -2143,11 +2295,40 @@
         DatabaseDescriptor.setMaxHintWindow(ms);
     }
 
+    public int getMaxHintsSizePerHostInMiB()
+    {
+        return DatabaseDescriptor.getMaxHintsSizePerHostInMiB();
+    }
+
+    public void setMaxHintsSizePerHostInMiB(int value)
+    {
+        DatabaseDescriptor.setMaxHintsSizePerHostInMiB(value);
+    }
+
     public static boolean shouldHint(Replica replica)
     {
-        if (!DatabaseDescriptor.hintedHandoffEnabled())
-            return false;
-        if (replica.isTransient() || replica.isSelf())
+        return shouldHint(replica, true);
+    }
+
+    /**
+     * Determines whether a hint should be stored or not.
+     * It rejects early if any of the condition is met:
+     * - Hints disabled entirely or for the belonging datacetner of the replica
+     * - The replica is transient or is the self node
+     * - The replica is no longer part of the ring
+     * - The hint window has expired
+     * - The hints have reached to the size limit for the node
+     * Otherwise, it permits.
+     *
+     * @param replica, the replica for the hint
+     * @param tryEnablePersistentWindow, true to consider hint_window_persistent_enabled; otherwise, ignores
+     * @return true to permit or false to reject hint
+     */
+    public static boolean shouldHint(Replica replica, boolean tryEnablePersistentWindow)
+    {
+        if (!DatabaseDescriptor.hintedHandoffEnabled()
+            || replica.isTransient()
+            || replica.isSelf())
             return false;
 
         Set<String> disabledDCs = DatabaseDescriptor.hintedHandoffDisabledDCs();
@@ -2160,13 +2341,46 @@
                 return false;
             }
         }
-        boolean hintWindowExpired = Gossiper.instance.getEndpointDowntime(replica.endpoint()) > DatabaseDescriptor.getMaxHintWindow();
+
+        InetAddressAndPort endpoint = replica.endpoint();
+        int maxHintWindow = DatabaseDescriptor.getMaxHintWindow();
+        long endpointDowntime = Gossiper.instance.getEndpointDowntime(endpoint);
+        boolean hintWindowExpired = endpointDowntime > maxHintWindow;
+
+        UUID hostIdForEndpoint = StorageService.instance.getHostIdForEndpoint(endpoint);
+        if (hostIdForEndpoint == null)
+        {
+            Tracing.trace("Discarding hint for endpoint not part of ring: {}", endpoint);
+            return false;
+        }
+
+        // if persisting hints window, hintWindowExpired might be updated according to the timestamp of the earliest hint
+        if (tryEnablePersistentWindow && !hintWindowExpired && DatabaseDescriptor.hintWindowPersistentEnabled())
+        {
+            long earliestHint = HintsService.instance.getEarliestHintForHost(hostIdForEndpoint);
+            hintWindowExpired = Clock.Global.currentTimeMillis() - maxHintWindow > earliestHint;
+            if (hintWindowExpired)
+                Tracing.trace("Not hinting {} for which there is the earliest hint stored at {}", replica, earliestHint);
+        }
+
         if (hintWindowExpired)
         {
-            HintsService.instance.metrics.incrPastWindow(replica.endpoint());
-            Tracing.trace("Not hinting {} which has been down {} ms", replica, Gossiper.instance.getEndpointDowntime(replica.endpoint()));
+            HintsService.instance.metrics.incrPastWindow(endpoint);
+            Tracing.trace("Not hinting {} which has been down {} ms", endpoint, endpointDowntime);
+            return false;
         }
-        return !hintWindowExpired;
+
+        long maxHintsSize = DatabaseDescriptor.getMaxHintsSizePerHost();
+        long actualTotalHintsSize = HintsService.instance.getTotalHintsSize(hostIdForEndpoint);
+        boolean hasHintsReachedMaxSize = maxHintsSize > 0 && actualTotalHintsSize > maxHintsSize;
+        if (hasHintsReachedMaxSize)
+        {
+            Tracing.trace("Not hinting {} which has reached to the max hints size {} bytes on disk. The actual hints size on disk: {}",
+                          endpoint, maxHintsSize, actualTotalHintsSize);
+            return false;
+        }
+
+        return true;
     }
 
     /**
@@ -2225,7 +2439,7 @@
     public interface WritePerformer
     {
         public void apply(IMutation mutation,
-                          ReplicaPlan.ForTokenWrite targets,
+                          ReplicaPlan.ForWrite targets,
                           AbstractWriteResponseHandler<IMutation> responseHandler,
                           String localDataCenter) throws OverloadedException;
     }
@@ -2258,13 +2472,13 @@
 
         public DroppableRunnable(Verb verb)
         {
-            this.approxCreationTimeNanos = MonotonicClock.approxTime.now();
+            this.approxCreationTimeNanos = MonotonicClock.Global.approxTime.now();
             this.verb = verb;
         }
 
         public final void run()
         {
-            long approxCurrentTimeNanos = MonotonicClock.approxTime.now();
+            long approxCurrentTimeNanos = MonotonicClock.Global.approxTime.now();
             long expirationTimeNanos = verb.expiresAtNanos(approxCreationTimeNanos);
             if (approxCurrentTimeNanos > expirationTimeNanos)
             {
@@ -2291,7 +2505,7 @@
      */
     private static abstract class LocalMutationRunnable implements Runnable
     {
-        private final long approxCreationTimeNanos = MonotonicClock.approxTime.now();
+        private final long approxCreationTimeNanos = MonotonicClock.Global.approxTime.now();
 
         private final Replica localReplica;
 
@@ -2303,7 +2517,7 @@
         public final void run()
         {
             final Verb verb = verb();
-            long nowNanos = MonotonicClock.approxTime.now();
+            long nowNanos = MonotonicClock.Global.approxTime.now();
             long expirationTimeNanos = verb.expiresAtNanos(approxCreationTimeNanos);
             if (nowNanos > expirationTimeNanos)
             {
@@ -2335,6 +2549,17 @@
         abstract protected void runMayThrow() throws Exception;
     }
 
+    public static void logRequestException(Exception exception, Collection<? extends ReadCommand> commands)
+    {
+        NoSpamLogger.log(logger, NoSpamLogger.Level.INFO, FAILURE_LOGGING_INTERVAL_SECONDS, TimeUnit.SECONDS,
+                         "\"{}\" while executing {}",
+                         () -> new Object[]
+                               {
+                                   exception.getMessage(),
+                                   commands.stream().map(ReadCommand::toCQLString).collect(Collectors.joining("; "))
+                               });
+    }
+
     /**
      * HintRunnable will decrease totalHintsInProgress and targetHints when finished.
      * It is the caller's responsibility to increment them initially.
@@ -2435,7 +2660,7 @@
                         logger.debug("Discarding hint for endpoint not part of ring: {}", target);
                 }
                 logger.trace("Adding hints for {}", validTargets);
-                HintsService.instance.write(hostIds, Hint.create(mutation, System.currentTimeMillis()));
+                HintsService.instance.write(hostIds, Hint.create(mutation, currentTimeMillis()));
                 validTargets.forEach(HintsService.instance.metrics::incrCreatedHints);
                 // Notify the handler only for CL == ANY
                 if (responseHandler != null && responseHandler.replicaPlan.consistencyLevel() == ConsistencyLevel.ANY)
@@ -2498,6 +2723,11 @@
         return ReadRepairMetrics.repairedBackground.getCount();
     }
 
+    public long getReadRepairRepairTimedOut()
+    {
+        return ReadRepairMetrics.timedOut.getCount();
+    }
+
     public int getNumberOfTables()
     {
         return Schema.instance.getNumberOfTables();
@@ -2598,10 +2828,10 @@
 
     static class PaxosBallotAndContention
     {
-        final UUID ballot;
+        final Ballot ballot;
         final int contentions;
 
-        PaxosBallotAndContention(UUID ballot, int contentions)
+        PaxosBallotAndContention(Ballot ballot, int contentions)
         {
             this.ballot = ballot;
             this.contentions = contentions;
@@ -2678,4 +2908,180 @@
     {
         DatabaseDescriptor.setCheckForDuplicateRowsDuringCompaction(false);
     }
+
+    public void initialLoadPartitionDenylist()
+    {
+        partitionDenylist.initialLoad();
+    }
+
+    @Override
+    public void loadPartitionDenylist()
+    {
+        partitionDenylist.load();
+    }
+
+    @Override
+    public int getPartitionDenylistLoadAttempts()
+    {
+        return partitionDenylist.getLoadAttempts();
+    }
+
+    @Override
+    public int getPartitionDenylistLoadSuccesses()
+    {
+        return partitionDenylist.getLoadSuccesses();
+    }
+
+    @Override
+    public void setEnablePartitionDenylist(boolean enabled)
+    {
+        DatabaseDescriptor.setPartitionDenylistEnabled(enabled);
+    }
+
+    @Override
+    public void setEnableDenylistWrites(boolean enabled)
+    {
+        DatabaseDescriptor.setDenylistWritesEnabled(enabled);
+    }
+
+    @Override
+    public void setEnableDenylistReads(boolean enabled)
+    {
+        DatabaseDescriptor.setDenylistReadsEnabled(enabled);
+    }
+
+    @Override
+    public void setEnableDenylistRangeReads(boolean enabled)
+    {
+        DatabaseDescriptor.setDenylistRangeReadsEnabled(enabled);
+    }
+
+    @Override
+    public void setDenylistMaxKeysPerTable(int value)
+    {
+        DatabaseDescriptor.setDenylistMaxKeysPerTable(value);
+    }
+
+    @Override
+    public void setDenylistMaxKeysTotal(int value)
+    {
+        DatabaseDescriptor.setDenylistMaxKeysTotal(value);
+    }
+
+    /**
+     * Actively denies read and write access to the provided Partition Key
+     * @param keyspace Name of keyspace containing the PK you wish to deny access to
+     * @param table Name of table containing the PK you wish to deny access to
+     * @param partitionKeyAsString String representation of the PK you want to deny access to
+     * @return true if successfully added, false if failure
+     */
+    @Override
+    public boolean denylistKey(String keyspace, String table, String partitionKeyAsString)
+    {
+        if (!Schema.instance.getKeyspaces().contains(keyspace))
+            return false;
+
+        final ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(keyspace, table);
+        if (cfs == null)
+            return false;
+
+        final ByteBuffer bytes = cfs.metadata.get().partitionKeyType.fromString(partitionKeyAsString);
+        return partitionDenylist.addKeyToDenylist(keyspace, table, bytes);
+    }
+
+    /**
+     * Attempts to remove the provided pk from the ks + table deny list
+     * @param keyspace Keyspace containing the pk to remove the denylist entry for
+     * @param table Table containing the pk to remove denylist entry for
+     * @param partitionKeyAsString String representation of the PK you want to re-allow access to
+     * @return true if found and removed, false if not
+     */
+    @Override
+    public boolean removeDenylistKey(String keyspace, String table, String partitionKeyAsString)
+    {
+        if (!Schema.instance.getKeyspaces().contains(keyspace))
+            return false;
+
+        final ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(keyspace, table);
+        if (cfs == null)
+            return false;
+
+        final ByteBuffer bytes = cfs.metadata.get().partitionKeyType.fromString(partitionKeyAsString);
+        return partitionDenylist.removeKeyFromDenylist(keyspace, table, bytes);
+    }
+
+    /**
+     * A simple check for operators to determine what the denylisted value for a pk is on a node
+     */
+    public boolean isKeyDenylisted(String keyspace, String table, String partitionKeyAsString)
+    {
+        if (!Schema.instance.getKeyspaces().contains(keyspace))
+            return false;
+
+        final ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(keyspace, table);
+        if (cfs == null)
+            return false;
+
+        final ByteBuffer bytes = cfs.metadata.get().partitionKeyType.fromString(partitionKeyAsString);
+        return !partitionDenylist.isKeyPermitted(keyspace, table, bytes);
+    }
+
+    @Override
+    public void logBlockingReadRepairAttemptsForNSeconds(int seconds)
+    {
+        logBlockingReadRepairAttemptsUntilNanos = nanoTime() + TimeUnit.SECONDS.toNanos(seconds);
+    }
+
+    @Override
+    public boolean isLoggingReadRepairs()
+    {
+        return nanoTime() <= StorageProxy.instance.logBlockingReadRepairAttemptsUntilNanos;
+    }
+
+    @Override
+    public void setPaxosVariant(String variant)
+    {
+        Preconditions.checkNotNull(variant);
+        Paxos.setPaxosVariant(Config.PaxosVariant.valueOf(variant));
+    }
+
+    @Override
+    public String getPaxosVariant()
+    {
+        return Paxos.getPaxosVariant().toString();
+    }
+
+    @Override
+    public boolean getUseStatementsEnabled()
+    {
+        return DatabaseDescriptor.getUseStatementsEnabled();
+    }
+
+    @Override
+    public void setUseStatementsEnabled(boolean enabled)
+    {
+        DatabaseDescriptor.setUseStatementsEnabled(enabled);
+    }
+
+    public void setPaxosContentionStrategy(String spec)
+    {
+        ContentionStrategy.setStrategy(spec);
+    }
+
+    public String getPaxosContentionStrategy()
+    {
+        return ContentionStrategy.getStrategySpec();
+    }
+
+    @Override
+    public void setPaxosCoordinatorLockingDisabled(boolean disabled)
+    {
+        PaxosState.setDisableCoordinatorLocking(disabled);
+    }
+
+    @Override
+    public boolean getPaxosCoordinatorLockingDisabled()
+    {
+        return PaxosState.getDisableCoordinatorLocking();
+    }
 }
diff --git a/src/java/org/apache/cassandra/service/StorageProxyMBean.java b/src/java/org/apache/cassandra/service/StorageProxyMBean.java
index e3cde4b..546143d 100644
--- a/src/java/org/apache/cassandra/service/StorageProxyMBean.java
+++ b/src/java/org/apache/cassandra/service/StorageProxyMBean.java
@@ -32,6 +32,8 @@
     public Set<String> getHintedHandoffDisabledDCs();
     public int getMaxHintWindow();
     public void setMaxHintWindow(int ms);
+    public int getMaxHintsSizePerHostInMiB();
+    public void setMaxHintsSizePerHostInMiB(int value);
     public int getMaxHintsInProgress();
     public void setMaxHintsInProgress(int qs);
     public int getHintsInProgress();
@@ -59,9 +61,24 @@
     public long getReadRepairAttempted();
     public long getReadRepairRepairedBlocking();
     public long getReadRepairRepairedBackground();
+    public long getReadRepairRepairTimedOut();
 
     @Deprecated
     public int getOtcBacklogExpirationInterval();
+
+    public void loadPartitionDenylist();
+    public int getPartitionDenylistLoadAttempts();
+    public int getPartitionDenylistLoadSuccesses();
+    public void setEnablePartitionDenylist(boolean enabled);
+    public void setEnableDenylistWrites(boolean enabled);
+    public void setEnableDenylistReads(boolean enabled);
+    public void setEnableDenylistRangeReads(boolean enabled);
+    public boolean denylistKey(String keyspace, String table, String partitionKeyAsString);
+    public boolean removeDenylistKey(String keyspace, String table, String partitionKeyAsString);
+    public void setDenylistMaxKeysPerTable(int value);
+    public void setDenylistMaxKeysTotal(int value);
+    public boolean isKeyDenylisted(String keyspace, String table, String partitionKeyAsString);
+
     @Deprecated
     public void setOtcBacklogExpirationInterval(int intervalInMillis);
 
@@ -74,6 +91,9 @@
     public String getIdealConsistencyLevel();
     public String setIdealConsistencyLevel(String cl);
 
+    public void logBlockingReadRepairAttemptsForNSeconds(int seconds);
+    public boolean isLoggingReadRepairs();
+
     /**
      * Tracking and reporting of variances in the repaired data set across replicas at read time
      */
@@ -103,4 +123,16 @@
     boolean getCheckForDuplicateRowsDuringCompaction();
     void enableCheckForDuplicateRowsDuringCompaction();
     void disableCheckForDuplicateRowsDuringCompaction();
+
+    void setPaxosVariant(String variant);
+    String getPaxosVariant();
+
+    boolean getUseStatementsEnabled();
+    void setUseStatementsEnabled(boolean enabled);
+
+    void setPaxosContentionStrategy(String variant);
+    String getPaxosContentionStrategy();
+
+    void setPaxosCoordinatorLockingDisabled(boolean disabled);
+    boolean getPaxosCoordinatorLockingDisabled();
 }
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index b53467e..ed10f04 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -17,15 +17,16 @@
  */
 package org.apache.cassandra.service;
 
+
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
-import java.io.File;
 import java.io.IOError;
 import java.io.IOException;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
-import java.nio.file.Paths;
+import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -48,13 +49,12 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 import java.util.regex.MatchResult;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
@@ -68,11 +68,13 @@
 import javax.management.openmbean.TabularDataSupport;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
 import com.google.common.base.Predicates;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -80,9 +82,6 @@
 import com.google.common.collect.Ordering;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.RateLimiter;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.apache.commons.lang3.StringUtils;
@@ -91,23 +90,27 @@
 
 import org.apache.cassandra.audit.AuditLogManager;
 import org.apache.cassandra.audit.AuditLogOptions;
+import org.apache.cassandra.auth.AuthCacheService;
 import org.apache.cassandra.auth.AuthKeyspace;
 import org.apache.cassandra.auth.AuthSchemaChangeListener;
 import org.apache.cassandra.batchlog.BatchlogManager;
 import org.apache.cassandra.concurrent.ExecutorLocals;
+import org.apache.cassandra.concurrent.FutureTask;
+import org.apache.cassandra.concurrent.FutureTaskWithResources;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.Config.PaxosStatePurging;
+import org.apache.cassandra.config.DataStorageSpec;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.config.DurationSpec;
 import org.apache.cassandra.cql3.QueryHandler;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.SizeEstimatesRecorder;
 import org.apache.cassandra.db.SnapshotDetailsTabularData;
 import org.apache.cassandra.db.SystemKeyspace;
@@ -142,7 +145,9 @@
 import org.apache.cassandra.io.sstable.SSTableLoader;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.VersionAndType;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.locator.DynamicEndpointSnitch;
 import org.apache.cassandra.locator.EndpointsByRange;
@@ -165,18 +170,28 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.repair.RepairRunnable;
-import org.apache.cassandra.repair.SystemDistributedKeyspace;
 import org.apache.cassandra.repair.messages.RepairOption;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
 import org.apache.cassandra.schema.KeyspaceMetadata;
-import org.apache.cassandra.schema.MigrationCoordinator;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.Keyspaces;
 import org.apache.cassandra.schema.ReplicationParams;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.SchemaTransformations;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
+import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.schema.ViewMetadata;
+import org.apache.cassandra.service.disk.usage.DiskUsageBroadcaster;
+import org.apache.cassandra.service.paxos.Paxos;
+import org.apache.cassandra.service.paxos.PaxosCommit;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupLocalCoordinator;
+import org.apache.cassandra.service.paxos.cleanup.PaxosTableRepairs;
+import org.apache.cassandra.service.snapshot.SnapshotManager;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.streaming.StreamManager;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.streaming.StreamPlan;
@@ -185,6 +200,7 @@
 import org.apache.cassandra.tracing.TraceKeyspace;
 import org.apache.cassandra.transport.ClientResourceLimits;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MBeanWrapper;
@@ -192,8 +208,11 @@
 import org.apache.cassandra.utils.OutputHandler;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.Throwables;
-import org.apache.cassandra.utils.WindowsTimer;
 import org.apache.cassandra.utils.WrappedRunnable;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 import org.apache.cassandra.utils.logging.LoggingSupportFactory;
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventType;
@@ -208,6 +227,7 @@
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
 import static java.util.stream.Collectors.toList;
 import static java.util.stream.Collectors.toMap;
 import static org.apache.cassandra.config.CassandraRelevantProperties.BOOTSTRAP_SCHEMA_DELAY_MS;
@@ -218,8 +238,12 @@
 import static org.apache.cassandra.index.SecondaryIndexManager.isIndexColumnFamily;
 import static org.apache.cassandra.net.NoPayload.noPayload;
 import static org.apache.cassandra.net.Verb.REPLICATION_DONE_REQ;
-import static org.apache.cassandra.schema.MigrationManager.evolveSystemKeyspace;
+import static org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus;
+import static org.apache.cassandra.service.ActiveRepairService.repairCommandExecutor;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.FBUtilities.now;
 
 /**
  * This abstraction contains the token/identifier of this node
@@ -232,7 +256,7 @@
     private static final Logger logger = LoggerFactory.getLogger(StorageService.class);
 
     public static final int INDEFINITE = -1;
-    public static final int RING_DELAY = getRingDelay(); // delay after which we assume ring has stablized
+    public static final int RING_DELAY_MILLIS = getRingDelay(); // delay after which we assume ring has stablized
     public static final int SCHEMA_DELAY_MILLIS = getSchemaDelay();
 
     private static final boolean REQUIRE_SCHEMAS = !BOOTSTRAP_SKIP_SCHEMA_CHECK.getBoolean();
@@ -241,7 +265,7 @@
 
     private static int getRingDelay()
     {
-        String newdelay = System.getProperty("cassandra.ring_delay_ms");
+        String newdelay = CassandraRelevantProperties.RING_DELAY.getString();
         if (newdelay != null)
         {
             logger.info("Overriding RING_DELAY to {}ms", newdelay);
@@ -277,6 +301,8 @@
     private final List<Runnable> preShutdownHooks = new ArrayList<>();
     private final List<Runnable> postShutdownHooks = new ArrayList<>();
 
+    private final SnapshotManager snapshotManager = new SnapshotManager();
+
     public static final StorageService instance = new StorageService();
 
     @Deprecated
@@ -305,6 +331,16 @@
                 .getAddressReplicas(FBUtilities.getBroadcastAddressAndPort());
     }
 
+    public List<Range<Token>> getLocalRanges(String ks)
+    {
+        InetAddressAndPort broadcastAddress = FBUtilities.getBroadcastAddressAndPort();
+        Keyspace keyspace = Keyspace.open(ks);
+        List<Range<Token>> ranges = new ArrayList<>();
+        for (Replica r : keyspace.getReplicationStrategy().getAddressReplicas(broadcastAddress))
+            ranges.add(r.range());
+        return ranges;
+    }
+
     public List<Range<Token>> getLocalAndPendingRanges(String ks)
     {
         InetAddressAndPort broadcastAddress = FBUtilities.getBroadcastAddressAndPort();
@@ -351,7 +387,7 @@
     /* the probability for tracing any particular request, 0 disables tracing and 1 enables for all */
     private double traceProbability = 0.0;
 
-    private static enum Mode { STARTING, NORMAL, JOINING, LEAVING, DECOMMISSIONED, MOVING, DRAINING, DRAINED }
+    private enum Mode { STARTING, NORMAL, JOINING, LEAVING, DECOMMISSIONED, MOVING, DRAINING, DRAINED }
     private volatile Mode operationMode = Mode.STARTING;
 
     /* Used for tracking drain progress */
@@ -398,6 +434,7 @@
         setGossipTokens(localTokens);
         tokenMetadata.updateNormalTokens(tokens, FBUtilities.getBroadcastAddressAndPort());
         setMode(Mode.NORMAL, false);
+        invalidateLocalRanges();
     }
 
     public void setGossipTokens(Collection<Token> tokens)
@@ -476,7 +513,7 @@
                 setGossipTokens(tokens);
 
             Gossiper.instance.forceNewerGeneration();
-            Gossiper.instance.start((int) (System.currentTimeMillis() / 1000));
+            Gossiper.instance.start((int) (currentTimeMillis() / 1000));
             gossipActive = true;
         }
     }
@@ -588,9 +625,7 @@
 
     public boolean isDaemonSetupCompleted()
     {
-        return daemon == null
-               ? false
-               : daemon.setupCompleted();
+        return daemon != null && daemon.setupCompleted();
     }
 
     public void stopDaemon()
@@ -809,17 +844,22 @@
         initialized = true;
         gossipActive = true;
         Gossiper.instance.register(this);
-        Gossiper.instance.start((int) (System.currentTimeMillis() / 1000)); // needed for node-ring gathering.
+        Gossiper.instance.start((int) (currentTimeMillis() / 1000)); // needed for node-ring gathering.
         Gossiper.instance.addLocalApplicationState(ApplicationState.NET_VERSION, valueFactory.networkVersion());
         MessagingService.instance().listen();
     }
 
     public synchronized void initServer() throws ConfigurationException
     {
-        initServer(RING_DELAY);
+        initServer(SCHEMA_DELAY_MILLIS, RING_DELAY_MILLIS);
     }
 
-    public synchronized void initServer(int delay) throws ConfigurationException
+    public synchronized void initServer(int schemaAndRingDelayMillis) throws ConfigurationException
+    {
+        initServer(schemaAndRingDelayMillis, RING_DELAY_MILLIS);
+    }
+
+    public synchronized void initServer(int schemaTimeoutMillis, int ringTimeoutMillis) throws ConfigurationException
     {
         logger.info("Cassandra version: {}", FBUtilities.getReleaseVersionString());
         logger.info("CQL version: {}", QueryProcessor.CQL_VERSION);
@@ -853,11 +893,18 @@
             public void runMayThrow() throws InterruptedException, ExecutionException, IOException
             {
                 drain(true);
-
-                if (FBUtilities.isWindows)
-                    WindowsTimer.endTimerPeriod(DatabaseDescriptor.getWindowsTimerInterval());
-
-                LoggingSupportFactory.getLoggingSupport().onShutdown();
+                try
+                {
+                    ExecutorUtils.shutdownNowAndWait(1, MINUTES, ScheduledExecutors.scheduledFastTasks);
+                }
+                catch (Throwable t)
+                {
+                    logger.warn("Unable to terminate fast tasks within 1 minute.", t);
+                }
+                finally
+                {
+                    LoggingSupportFactory.getLoggingSupport().onShutdown();
+                }
             }
         }, "StorageServiceShutdownHook");
         Runtime.getRuntime().addShutdownHook(drainOnShutdown);
@@ -886,7 +933,7 @@
 
         if (joinRing)
         {
-            joinTokenRing(delay);
+            joinTokenRing(schemaTimeoutMillis, ringTimeoutMillis);
         }
         else
         {
@@ -931,21 +978,28 @@
             SystemKeyspace.removeEndpoint(FBUtilities.getBroadcastAddressAndPort());
 
         Map<InetAddressAndPort, UUID> loadedHostIds = SystemKeyspace.loadHostIds();
+        Map<UUID, InetAddressAndPort> hostIdToEndpointMap = new HashMap<>();
         for (InetAddressAndPort ep : loadedTokens.keySet())
         {
-            tokenMetadata.updateNormalTokens(loadedTokens.get(ep), ep);
-            if (loadedHostIds.containsKey(ep))
-                tokenMetadata.updateHostId(loadedHostIds.get(ep), ep);
+            UUID hostId = loadedHostIds.get(ep);
+            if (hostId != null)
+                hostIdToEndpointMap.put(hostId, ep);
         }
+        tokenMetadata.updateNormalTokens(loadedTokens);
+        tokenMetadata.updateHostIds(hostIdToEndpointMap);
     }
 
-    private boolean isReplacing()
+    public boolean isReplacing()
     {
+        if (replacing)
+            return true;
+
         if (System.getProperty("cassandra.replace_address_first_boot", null) != null && SystemKeyspace.bootstrapComplete())
         {
             logger.info("Replace address on first boot requested; this node is already bootstrapped");
             return false;
         }
+
         return DatabaseDescriptor.getReplaceAddress() != null;
     }
 
@@ -954,11 +1008,10 @@
      */
     public void removeShutdownHook()
     {
+        PathUtils.clearOnExitThreads();
+
         if (drainOnShutdown != null)
             Runtime.getRuntime().removeShutdownHook(drainOnShutdown);
-
-        if (FBUtilities.isWindows)
-            WindowsTimer.endTimerPeriod(DatabaseDescriptor.getWindowsTimerInterval());
     }
 
     private boolean shouldBootstrap()
@@ -973,7 +1026,6 @@
 
     private void prepareToJoin() throws ConfigurationException
     {
-        MigrationCoordinator.instance.start();
         if (!joined)
         {
             Map<ApplicationState, VersionedValue> appStates = new EnumMap<>(ApplicationState.class);
@@ -1011,13 +1063,12 @@
                 {
                     //only go into hibernate state if replacing the same address (CASSANDRA-8523)
                     logger.warn("Writes will not be forwarded to this node during replacement because it has the same address as " +
-                                "the node to be replaced ({}). If the previous node has been down for longer than max_hint_window_in_ms, " +
+                                "the node to be replaced ({}). If the previous node has been down for longer than max_hint_window, " +
                                 "repair must be run after the replacement process in order to make this node consistent.",
                                 DatabaseDescriptor.getReplaceAddress());
                     appStates.put(ApplicationState.STATUS_WITH_PORT, valueFactory.hibernate(true));
                     appStates.put(ApplicationState.STATUS, valueFactory.hibernate(true));
                 }
-                MigrationCoordinator.instance.removeAndIgnoreEndpoint(DatabaseDescriptor.getReplaceAddress());
             }
             else
             {
@@ -1063,55 +1114,43 @@
 
             // gossip snitch infos (local DC and rack)
             gossipSnitchInfo();
-            // gossip Schema.emptyVersion forcing immediate check for schema updates (see MigrationManager#maybeScheduleSchemaPull)
-            Schema.instance.updateVersionAndAnnounce(); // Ensure we know our own actual Schema UUID in preparation for updates
+            Schema.instance.startSync();
             LoadBroadcaster.instance.startBroadcasting();
+            DiskUsageBroadcaster.instance.startBroadcasting();
             HintsService.instance.startDispatch();
             BatchlogManager.instance.start();
+            startSnapshotManager();
         }
     }
 
-    public void waitForSchema(long delay)
+    @VisibleForTesting
+    public void startSnapshotManager()
     {
-        // first sleep the delay to make sure we see all our peers
-        for (long i = 0; i < delay; i += 1000)
-        {
-            // if we see schema, we can proceed to the next check directly
-            if (!Schema.instance.isEmpty())
-            {
-                logger.debug("current schema version: {}", Schema.instance.getVersion());
-                break;
-            }
+        snapshotManager.start();
+    }
+
+    public void waitForSchema(long schemaTimeoutMillis, long ringTimeoutMillis)
+    {
+        Instant deadline = FBUtilities.now().plus(java.time.Duration.ofMillis(ringTimeoutMillis));
+
+        while (Schema.instance.isEmpty() && FBUtilities.now().isBefore(deadline))
             Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
-        }
 
-        boolean schemasReceived = MigrationCoordinator.instance.awaitSchemaRequests(SCHEMA_DELAY_MILLIS);
-
-        if (schemasReceived)
-            return;
-
-        logger.warn(String.format("There are nodes in the cluster with a different schema version than us we did not merged schemas from, " +
-                                  "our version : (%s), outstanding versions -> endpoints : %s. Use -Dcassandra.skip_schema_check=true " +
-                                  "to ignore this, -Dcassandra.skip_schema_check_for_endpoints=<ep1[,epN]> to skip specific endpoints," +
-                                  "or -Dcassandra.skip_schema_check_for_versions=<ver1[,verN]> to skip specific schema versions",
-                                  Schema.instance.getVersion(),
-                                  MigrationCoordinator.instance.outstandingVersions()));
-
-        if (REQUIRE_SCHEMAS)
-            throw new RuntimeException("Didn't receive schemas for all known versions within the timeout. " +
-                                       "Use -Dcassandra.skip_schema_check=true to skip this check.");
+        if (!Schema.instance.waitUntilReady(java.time.Duration.ofMillis(schemaTimeoutMillis)))
+            throw new IllegalStateException("Could not achieve schema readiness in " + java.time.Duration.ofMillis(schemaTimeoutMillis));
     }
 
-    private void joinTokenRing(long schemaTimeoutMillis) throws ConfigurationException
+    private void joinTokenRing(long schemaTimeoutMillis, long ringTimeoutMillis) throws ConfigurationException
     {
-        joinTokenRing(!isSurveyMode, shouldBootstrap(), schemaTimeoutMillis, INDEFINITE);
+        joinTokenRing(!isSurveyMode, shouldBootstrap(), schemaTimeoutMillis, INDEFINITE, ringTimeoutMillis);
     }
 
     @VisibleForTesting
     public void joinTokenRing(boolean finishJoiningRing,
                               boolean shouldBootstrap,
                               long schemaTimeoutMillis,
-                              long bootstrapTimeoutMillis) throws ConfigurationException
+                              long bootstrapTimeoutMillis,
+                              long ringTimeoutMillis) throws ConfigurationException
     {
         joined = true;
 
@@ -1142,7 +1181,7 @@
 
         if (shouldBootstrap)
         {
-            current.addAll(prepareForBootstrap(schemaTimeoutMillis));
+            current.addAll(prepareForBootstrap(schemaTimeoutMillis, ringTimeoutMillis));
             dataAvailable = bootstrap(bootstrapTokens, bootstrapTimeoutMillis);
         }
         else
@@ -1150,7 +1189,7 @@
             bootstrapTokens = SystemKeyspace.getSavedTokens();
             if (bootstrapTokens.isEmpty())
             {
-                bootstrapTokens = BootStrapper.getBootstrapTokens(tokenMetadata, FBUtilities.getBroadcastAddressAndPort(), schemaTimeoutMillis);
+                bootstrapTokens = BootStrapper.getBootstrapTokens(tokenMetadata, FBUtilities.getBroadcastAddressAndPort(), schemaTimeoutMillis, ringTimeoutMillis);
             }
             else
             {
@@ -1181,6 +1220,8 @@
             {
                 logger.warn("Some data streaming failed. Use nodetool to check bootstrap state and resume. For more, see `nodetool help bootstrap`. {}", SystemKeyspace.getBootstrapState());
             }
+
+            StorageProxy.instance.initialLoadPartitionDenylist();
         }
         else
         {
@@ -1219,7 +1260,7 @@
             logger.info("Joining ring by operator request");
             try
             {
-                joinTokenRing(0);
+                joinTokenRing(SCHEMA_DELAY_MILLIS, 0);
                 doAuthSetup(false);
             }
             catch (ConfigurationException e)
@@ -1254,7 +1295,7 @@
     private void executePreJoinTasks(boolean bootstrap)
     {
         StreamSupport.stream(ColumnFamilyStore.all().spliterator(), false)
-                .filter(cfs -> Schema.instance.getUserKeyspaces().contains(cfs.keyspace.getName()))
+                .filter(cfs -> Schema.instance.getUserKeyspaces().names().contains(cfs.keyspace.getName()))
                 .forEach(cfs -> cfs.indexManager.executePreJoinTasksBlocking(bootstrap));
     }
 
@@ -1277,14 +1318,14 @@
         {
             if (setUpSchema)
             {
-                Optional<Mutation> mutation = evolveSystemKeyspace(AuthKeyspace.metadata(), AuthKeyspace.GENERATION);
-                mutation.ifPresent(value -> FBUtilities.waitOnFuture(MigrationManager.announceWithoutPush(Collections.singleton(value))));
+                Schema.instance.transform(SchemaTransformations.updateSystemKeyspace(AuthKeyspace.metadata(), AuthKeyspace.GENERATION));
             }
 
             DatabaseDescriptor.getRoleManager().setup();
             DatabaseDescriptor.getAuthenticator().setup();
             DatabaseDescriptor.getAuthorizer().setup();
             DatabaseDescriptor.getNetworkAuthorizer().setup();
+            AuthCacheService.initializeAndRegisterCaches();
             Schema.instance.registerListener(new AuthSchemaChangeListener());
             authSetupComplete = true;
         }
@@ -1305,14 +1346,9 @@
     @VisibleForTesting
     public void setUpDistributedSystemKeyspaces()
     {
-        Collection<Mutation> changes = new ArrayList<>(3);
-
-        evolveSystemKeyspace(            TraceKeyspace.metadata(),             TraceKeyspace.GENERATION).ifPresent(changes::add);
-        evolveSystemKeyspace(SystemDistributedKeyspace.metadata(), SystemDistributedKeyspace.GENERATION).ifPresent(changes::add);
-        evolveSystemKeyspace(             AuthKeyspace.metadata(),              AuthKeyspace.GENERATION).ifPresent(changes::add);
-
-        if (!changes.isEmpty())
-            FBUtilities.waitOnFuture(MigrationManager.announceWithoutPush(changes));
+        Schema.instance.transform(SchemaTransformations.updateSystemKeyspace(TraceKeyspace.metadata(), TraceKeyspace.GENERATION));
+        Schema.instance.transform(SchemaTransformations.updateSystemKeyspace(SystemDistributedKeyspace.metadata(), SystemDistributedKeyspace.GENERATION));
+        Schema.instance.transform(SchemaTransformations.updateSystemKeyspace(AuthKeyspace.metadata(), AuthKeyspace.GENERATION));
     }
 
     public boolean isJoined()
@@ -1345,12 +1381,6 @@
                                                                      sourceDc, String.join(",", availableDCs)));
                 }
             }
-
-            // check the arguments
-            if (keyspace == null && tokens != null)
-            {
-                throw new IllegalArgumentException("Cannot specify tokens without keyspace.");
-            }
         }
         catch (Throwable ex)
         {
@@ -1358,12 +1388,20 @@
             throw ex;
         }
 
-        logger.info("rebuild from dc: {}, {}, {}", sourceDc == null ? "(any dc)" : sourceDc,
-                    keyspace == null ? "(All keyspaces)" : keyspace,
-                    tokens == null ? "(All tokens)" : tokens);
-
         try
         {
+            // check the arguments
+            if (keyspace == null && tokens != null)
+            {
+                throw new IllegalArgumentException("Cannot specify tokens without keyspace.");
+            }
+
+            logger.info("rebuild from dc: {}, {}, {}", sourceDc == null ? "(any dc)" : sourceDc,
+                        keyspace == null ? "(All keyspaces)" : keyspace,
+                        tokens == null ? "(All tokens)" : tokens);
+
+            repairPaxosForTopologyChange("rebuild");
+
             RangeStreamer streamer = new RangeStreamer(tokenMetadata,
                                                        null,
                                                        FBUtilities.getBroadcastAddressAndPort(),
@@ -1378,7 +1416,7 @@
 
             if (keyspace == null)
             {
-                for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+                for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
                     streamer.addRanges(keyspaceName, getLocalReplicas(keyspaceName));
             }
             else if (tokens == null)
@@ -1457,7 +1495,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException("Interrupted while waiting on rebuild streaming");
+            throw new UncheckedInterruptedException(e);
         }
         catch (ExecutionException e)
         {
@@ -1583,52 +1621,166 @@
         return DatabaseDescriptor.getTruncateRpcTimeout(MILLISECONDS);
     }
 
+    @Deprecated
     public void setStreamThroughputMbPerSec(int value)
     {
-        int oldValue = DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSec();
-        DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(value);
-        StreamManager.StreamRateLimiter.updateThroughput();
-        logger.info("setstreamthroughput: throttle set to {} Mb/s (was {} Mb/s)", value, oldValue);
+        setStreamThroughputMbitPerSec(value);
     }
 
+    public void setStreamThroughputMbitPerSec(int value)
+    {
+        double oldValue = DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSecAsDouble();
+        DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(value);
+        StreamManager.StreamRateLimiter.updateThroughput();
+        logger.info("setstreamthroughput: throttle set to {}{} megabits per second (was approximately {} megabits per second)",
+                    value, value <= 0 ? " (unlimited)" : "", oldValue);
+    }
+
+    public void setStreamThroughputMebibytesPerSec(int value)
+    {
+        double oldValue = DatabaseDescriptor.getStreamThroughputOutboundMebibytesPerSec();
+        DatabaseDescriptor.setStreamThroughputOutboundMebibytesPerSecAsInt(value);
+        StreamManager.StreamRateLimiter.updateThroughput();
+        logger.info("setstreamthroughput: throttle set to {}{} MiB/s (was {} MiB/s)", value, value <= 0 ? " (unlimited)" : "", oldValue);
+    }
+
+    public double getStreamThroughputMebibytesPerSecAsDouble()
+    {
+        return DatabaseDescriptor.getStreamThroughputOutboundMebibytesPerSec();
+    }
+
+    public int getStreamThroughputMebibytesPerSec()
+    {
+        return DatabaseDescriptor.getStreamThroughputOutboundMebibytesPerSecAsInt();
+    }
+
+    @Deprecated
     public int getStreamThroughputMbPerSec()
     {
+        return getStreamThroughputMbitPerSec();
+    }
+
+    @Deprecated
+    public int getStreamThroughputMbitPerSec()
+    {
         return DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSec();
     }
 
-    public void setInterDCStreamThroughputMbPerSec(int value)
+    public double getStreamThroughputMbitPerSecAsDouble()
     {
-        int oldValue = DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSec();
-        DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(value);
-        StreamManager.StreamRateLimiter.updateInterDCThroughput();
-        logger.info("setinterdcstreamthroughput: throttle set to {} Mb/s (was {} Mb/s)", value, oldValue);
+        return DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSecAsDouble();
     }
 
+    public void setEntireSSTableStreamThroughputMebibytesPerSec(int value)
+    {
+        double oldValue = DatabaseDescriptor.getEntireSSTableStreamThroughputOutboundMebibytesPerSec();
+        DatabaseDescriptor.setEntireSSTableStreamThroughputOutboundMebibytesPerSec(value);
+        StreamManager.StreamRateLimiter.updateEntireSSTableThroughput();
+        logger.info("setstreamthroughput (entire SSTable): throttle set to {}{} MiB/s (was {} MiB/s)",
+                    value, value <= 0 ? " (unlimited)" : "", oldValue);
+    }
+
+    public double getEntireSSTableStreamThroughputMebibytesPerSecAsDouble()
+    {
+        return DatabaseDescriptor.getEntireSSTableStreamThroughputOutboundMebibytesPerSec();
+    }
+
+    @Deprecated
+    public void setInterDCStreamThroughputMbPerSec(int value)
+    {
+        setInterDCStreamThroughputMbitPerSec(value);
+    }
+
+    public void setInterDCStreamThroughputMbitPerSec(int value)
+    {
+        double oldValue = DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSecAsDouble();
+        DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(value);
+        StreamManager.StreamRateLimiter.updateInterDCThroughput();
+        logger.info("setinterdcstreamthroughput: throttle set to {}{} megabits per second (was {} megabits per second)", value, value <= 0 ? " (unlimited)" : "", oldValue);
+    }
+
+    @Deprecated
     public int getInterDCStreamThroughputMbPerSec()
     {
+        return getInterDCStreamThroughputMbitPerSec();
+    }
+
+    @Deprecated
+    public int getInterDCStreamThroughputMbitPerSec()
+    {
         return DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSec();
     }
 
+    public double getInterDCStreamThroughputMbitPerSecAsDouble()
+    {
+        return DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSecAsDouble();
+    }
 
+    public void setInterDCStreamThroughputMebibytesPerSec(int value)
+    {
+        double oldValue = DatabaseDescriptor.getInterDCStreamThroughputOutboundMebibytesPerSec();
+        DatabaseDescriptor.setInterDCStreamThroughputOutboundMebibytesPerSecAsInt(value);
+        StreamManager.StreamRateLimiter.updateInterDCThroughput();
+        logger.info("setinterdcstreamthroughput: throttle set to {}{} MiB/s (was {} MiB/s)", value, value <= 0 ? " (unlimited)" : "", oldValue);
+    }
+
+    public int getInterDCStreamThroughputMebibytesPerSec()
+    {
+        return DatabaseDescriptor.getInterDCStreamThroughputOutboundMebibytesPerSecAsInt();
+    }
+
+    public double getInterDCStreamThroughputMebibytesPerSecAsDouble()
+    {
+        return DatabaseDescriptor.getInterDCStreamThroughputOutboundMebibytesPerSec();
+    }
+
+    public void setEntireSSTableInterDCStreamThroughputMebibytesPerSec(int value)
+    {
+        double oldValue = DatabaseDescriptor.getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec();
+        DatabaseDescriptor.setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(value);
+        StreamManager.StreamRateLimiter.updateEntireSSTableInterDCThroughput();
+        logger.info("setinterdcstreamthroughput (entire SSTable): throttle set to {}{} MiB/s (was {} MiB/s)", value, value <= 0 ? " (unlimited)" : "", oldValue);
+    }
+
+    public double getEntireSSTableInterDCStreamThroughputMebibytesPerSecAsDouble()
+    {
+        return DatabaseDescriptor.getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec();
+    }
+
+    public double getCompactionThroughtputMibPerSecAsDouble()
+    {
+        return DatabaseDescriptor.getCompactionThroughputMebibytesPerSec();
+    }
+
+    public long getCompactionThroughtputBytesPerSec()
+    {
+        return (long)DatabaseDescriptor.getCompactionThroughputBytesPerSec();
+    }
+
+    @Deprecated
     public int getCompactionThroughputMbPerSec()
     {
-        return DatabaseDescriptor.getCompactionThroughputMbPerSec();
+        return DatabaseDescriptor.getCompactionThroughputMebibytesPerSecAsInt();
     }
 
     public void setCompactionThroughputMbPerSec(int value)
     {
-        DatabaseDescriptor.setCompactionThroughputMbPerSec(value);
-        CompactionManager.instance.setRate(value);
+        double oldValue = DatabaseDescriptor.getCompactionThroughputMebibytesPerSec();
+        DatabaseDescriptor.setCompactionThroughputMebibytesPerSec(value);
+        double valueInBytes = value * 1024.0 * 1024.0;
+        CompactionManager.instance.setRateInBytes(valueInBytes);
+        logger.info("compactionthroughput: throttle set to {} mebibytes per second (was {} mebibytes per second)",
+                    value, oldValue);
     }
 
     public int getBatchlogReplayThrottleInKB()
     {
-        return DatabaseDescriptor.getBatchlogReplayThrottleInKB();
+        return DatabaseDescriptor.getBatchlogReplayThrottleInKiB();
     }
 
     public void setBatchlogReplayThrottleInKB(int throttleInKB)
     {
-        DatabaseDescriptor.setBatchlogReplayThrottleInKB(throttleInKB);
+        DatabaseDescriptor.setBatchlogReplayThrottleInKiB(throttleInKB);
         BatchlogManager.instance.setRate(throttleInKB);
     }
 
@@ -1744,7 +1896,7 @@
     }
 
     @VisibleForTesting
-    public Collection<InetAddressAndPort> prepareForBootstrap(long schemaDelay)
+    public Collection<InetAddressAndPort> prepareForBootstrap(long schemaTimeoutMillis, long ringTimeoutMillis)
     {
         Set<InetAddressAndPort> collisions = new HashSet<>();
         if (SystemKeyspace.bootstrapInProgress())
@@ -1752,7 +1904,7 @@
         else
             SystemKeyspace.setBootstrapState(SystemKeyspace.BootstrapState.IN_PROGRESS);
         setMode(Mode.JOINING, "waiting for ring information", true);
-        waitForSchema(schemaDelay);
+        waitForSchema(schemaTimeoutMillis, ringTimeoutMillis);
         setMode(Mode.JOINING, "schema complete, ready to bootstrap", true);
         setMode(Mode.JOINING, "waiting for pending range calculation", true);
         PendingRangeCalculatorService.instance.blockUntilFinished();
@@ -1782,7 +1934,7 @@
                 throw new UnsupportedOperationException(s);
             }
             setMode(Mode.JOINING, "getting bootstrap token", true);
-            bootstrapTokens = BootStrapper.getBootstrapTokens(tokenMetadata, FBUtilities.getBroadcastAddressAndPort(), schemaDelay);
+            bootstrapTokens = BootStrapper.getBootstrapTokens(tokenMetadata, FBUtilities.getBroadcastAddressAndPort(), schemaTimeoutMillis, ringTimeoutMillis);
         }
         else
         {
@@ -1796,7 +1948,7 @@
                 }
                 catch (InterruptedException e)
                 {
-                    throw new AssertionError(e);
+                    throw new UncheckedInterruptedException(e);
                 }
 
                 // check for operator errors...
@@ -1805,8 +1957,8 @@
                     InetAddressAndPort existing = tokenMetadata.getEndpoint(token);
                     if (existing != null)
                     {
-                        long nanoDelay = schemaDelay * 1000000L;
-                        if (Gossiper.instance.getEndpointStateForEndpoint(existing).getUpdateTimestamp() > (System.nanoTime() - nanoDelay))
+                        long nanoDelay = ringTimeoutMillis * 1000000L;
+                        if (Gossiper.instance.getEndpointStateForEndpoint(existing).getUpdateTimestamp() > (nanoTime() - nanoDelay))
                             throw new UnsupportedOperationException("Cannot replace a live node... ");
                         collisions.add(existing);
                     }
@@ -1820,11 +1972,11 @@
             {
                 try
                 {
-                    Thread.sleep(RING_DELAY);
+                    Thread.sleep(RING_DELAY_MILLIS);
                 }
                 catch (InterruptedException e)
                 {
-                    throw new AssertionError(e);
+                    throw new UncheckedInterruptedException(e);
                 }
 
             }
@@ -1856,12 +2008,12 @@
             states.add(Pair.create(ApplicationState.STATUS_WITH_PORT, replacing?
                                                             valueFactory.bootReplacingWithPort(DatabaseDescriptor.getReplaceAddress()) :
                                                             valueFactory.bootstrapping(tokens)));
-            states.add(Pair.create(ApplicationState.STATUS, replacing?
-                                                            valueFactory.bootReplacing(DatabaseDescriptor.getReplaceAddress().address) :
+            states.add(Pair.create(ApplicationState.STATUS, replacing ?
+                                                            valueFactory.bootReplacing(DatabaseDescriptor.getReplaceAddress().getAddress()) :
                                                             valueFactory.bootstrapping(tokens)));
             Gossiper.instance.addLocalApplicationStates(states);
-            setMode(Mode.JOINING, "sleeping " + RING_DELAY + " ms for pending range setup", true);
-            Uninterruptibles.sleepUninterruptibly(RING_DELAY, MILLISECONDS);
+            setMode(Mode.JOINING, "sleeping " + RING_DELAY_MILLIS + " ms for pending range setup", true);
+            Uninterruptibles.sleepUninterruptibly(RING_DELAY_MILLIS, MILLISECONDS);
         }
         else
         {
@@ -1879,7 +2031,8 @@
         }
 
         // Force disk boundary invalidation now that local tokens are set
-        invalidateDiskBoundaries();
+        invalidateLocalRanges();
+        repairPaxosForTopologyChange("bootstrap");
 
         Future<StreamState> bootstrapStream = startBootstrap(tokens);
         try
@@ -1901,13 +2054,18 @@
 
     public Future<StreamState> startBootstrap(Collection<Token> tokens)
     {
+        return startBootstrap(tokens, replacing);
+    }
+
+    public Future<StreamState> startBootstrap(Collection<Token> tokens, boolean replacing)
+    {
         setMode(Mode.JOINING, "Starting to bootstrap...", true);
         BootStrapper bootstrapper = new BootStrapper(FBUtilities.getBroadcastAddressAndPort(), tokens, tokenMetadata);
         bootstrapper.addProgressListener(progressSupport);
         return bootstrapper.bootstrap(streamStateStore, useStrictConsistency && !replacing); // handles token update
     }
 
-    private void invalidateDiskBoundaries()
+    private void invalidateLocalRanges()
     {
         for (Keyspace keyspace : Keyspace.all())
         {
@@ -1915,7 +2073,7 @@
             {
                 for (final ColumnFamilyStore store : cfs.concatWithIndexes())
                 {
-                    store.invalidateDiskBoundaries();
+                    store.invalidateLocalRanges();
                 }
             }
         }
@@ -1925,7 +2083,7 @@
      * All MVs have been created during bootstrap, so mark them as built
      */
     private void markViewsAsBuilt() {
-        for (String keyspace : Schema.instance.getUserKeyspaces())
+        for (String keyspace : Schema.instance.getUserKeyspaces().names())
         {
             for (ViewMetadata view: Schema.instance.getKeyspaceMetadata(keyspace).views)
                 SystemKeyspace.finishViewBuildStatus(view.keyspace(), view.name());
@@ -1951,8 +2109,8 @@
             // already bootstrapped ranges are filtered during bootstrap
             BootStrapper bootstrapper = new BootStrapper(FBUtilities.getBroadcastAddressAndPort(), tokens, tokenMetadata);
             bootstrapper.addProgressListener(progressSupport);
-            ListenableFuture<StreamState> bootstrapStream = bootstrapper.bootstrap(streamStateStore, useStrictConsistency && !replacing); // handles token update
-            Futures.addCallback(bootstrapStream, new FutureCallback<StreamState>()
+            Future<StreamState> bootstrapStream = bootstrapper.bootstrap(streamStateStore, useStrictConsistency && !replacing); // handles token update
+            bootstrapStream.addCallback(new FutureCallback<StreamState>()
             {
                 @Override
                 public void onSuccess(StreamState streamState)
@@ -2000,7 +2158,7 @@
                     progressSupport.progress("bootstrap", new ProgressEvent(ProgressEventType.ERROR, 1, 1, message));
                     progressSupport.progress("bootstrap", new ProgressEvent(ProgressEventType.COMPLETE, 1, 1, "Resume bootstrap complete"));
                 }
-            }, MoreExecutors.directExecutor());
+            });
             return true;
         }
         else
@@ -2155,7 +2313,7 @@
         // some people just want to get a visual representation of things. Allow null and set it to the first
         // non-system keyspace.
         if (keyspace == null)
-            keyspace = Schema.instance.getNonLocalStrategyKeyspaces().get(0);
+            keyspace = Schema.instance.getNonLocalStrategyKeyspaces().iterator().next().name;
 
         Map<List<String>, List<String>> map = new HashMap<>();
         for (Map.Entry<Range<Token>, EndpointsForRange> entry : tokenMetadata.getPendingRangesMM(keyspace).asMap().entrySet())
@@ -2209,7 +2367,7 @@
         // some people just want to get a visual representation of things. Allow null and set it to the first
         // non-system keyspace.
         if (keyspace == null)
-            keyspace = Schema.instance.getNonLocalStrategyKeyspaces().get(0);
+            keyspace = Schema.instance.getNonLocalStrategyKeyspaces().iterator().next().name;
 
         List<Range<Token>> ranges = getAllRanges(sortedTokens);
         return constructRangeToEndpointMap(keyspace, ranges);
@@ -2333,7 +2491,7 @@
             return id;
         // this condition is to prevent accessing the tables when the node is not started yet, and in particular,
         // when it is not going to be started at all (e.g. when running some unit tests or client tools).
-        else if (CommitLog.instance.isStarted())
+        else if ((DatabaseDescriptor.isDaemonInitialized() || DatabaseDescriptor.isToolInitialized()) && CommitLog.instance.isStarted())
             return SystemKeyspace.getLocalHostId();
 
         return null;
@@ -2518,7 +2676,6 @@
                         break;
                     case SCHEMA:
                         SystemKeyspace.updatePeerInfo(endpoint, "schema_version", UUID.fromString(value.value));
-                        MigrationCoordinator.instance.reportEndpointVersion(endpoint, UUID.fromString(value.value));
                         break;
                     case HOST_ID:
                         SystemKeyspace.updatePeerInfo(endpoint, "host_id", UUID.fromString(value.value));
@@ -2602,8 +2759,8 @@
                     try
                     {
                         InetAddressAndPort address = InetAddressAndPort.getByName(entry.getValue().value);
-                        native_address = address.address;
-                        native_port = address.port;
+                        native_address = address.getAddress();
+                        native_port = address.getPort();
                     }
                     catch (UnknownHostException e)
                     {
@@ -2841,7 +2998,9 @@
                 tokensToUpdateInMetadata.add(token);
                 tokensToUpdateInSystemKeyspace.add(token);
             }
-            else if (Gossiper.instance.compareEndpointStartup(endpoint, currentOwner) > 0)
+            // Note: in test scenarios, there may not be any delta between the heartbeat generations of the old
+            // and new nodes, so we first check whether the new endpoint is marked as a replacement for the old.
+            else if (endpoint.equals(tokenMetadata.getReplacementNode(currentOwner).orElse(null)) || Gossiper.instance.compareEndpointStartup(endpoint, currentOwner) > 0)
             {
                 tokensToUpdateInMetadata.add(token);
                 tokensToUpdateInSystemKeyspace.add(token);
@@ -2870,6 +3029,9 @@
         }
         if (!tokensToUpdateInSystemKeyspace.isEmpty())
             SystemKeyspace.updateTokens(endpoint, tokensToUpdateInSystemKeyspace);
+
+        // Tokens changed, the local range ownership probably changed too.
+        invalidateLocalRanges();
     }
 
     @VisibleForTesting
@@ -2990,6 +3152,8 @@
         if (isMoving || operationMode == Mode.MOVING)
         {
             tokenMetadata.removeFromMoving(endpoint);
+            // The above may change the local ownership.
+            invalidateLocalRanges();
             notifyMoved(endpoint);
         }
         else if (!isMember) // prior to this, the node was not a member
@@ -3146,7 +3310,6 @@
     private void removeEndpoint(InetAddressAndPort endpoint)
     {
         Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.removeEndpoint(endpoint));
-        MigrationCoordinator.instance.removeAndIgnoreEndpoint(endpoint);
         SystemKeyspace.removeEndpoint(endpoint);
     }
 
@@ -3308,7 +3471,7 @@
 
         InetAddressAndPort myAddress = FBUtilities.getBroadcastAddressAndPort();
 
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             logger.debug("Restoring replica count for keyspace {}", keyspaceName);
             EndpointsByReplica changedReplicas = getChangedReplicasForLeaving(keyspaceName, endpoint, tokenMetadata, Keyspace.open(keyspaceName).getReplicationStrategy());
@@ -3351,7 +3514,7 @@
             });
         });
         StreamResultFuture future = stream.execute();
-        Futures.addCallback(future, new FutureCallback<StreamState>()
+        future.addCallback(new FutureCallback<StreamState>()
         {
             public void onSuccess(StreamState finalState)
             {
@@ -3364,7 +3527,7 @@
                 // We still want to send the notification
                 sendReplicationNotification(notifyEndpoint);
             }
-        }, MoreExecutors.directExecutor());
+        });
     }
 
     /**
@@ -3460,13 +3623,13 @@
             statusValue = epState.getApplicationState(statusState);
         }
         if (statusValue != null)
-            onChange(endpoint, statusState, statusValue);
+            Gossiper.instance.doOnChangeNotifications(endpoint, statusState, statusValue);
 
         for (Map.Entry<ApplicationState, VersionedValue> entry : epState.states())
         {
             if (entry.getKey() == ApplicationState.STATUS_WITH_PORT || entry.getKey() == ApplicationState.STATUS)
                 continue;
-            onChange(endpoint, entry.getKey(), entry.getValue());
+            Gossiper.instance.doOnChangeNotifications(endpoint, entry.getKey(), entry.getValue());
         }
     }
 
@@ -3614,7 +3777,7 @@
 
         for (Pair<Token, InetAddressAndPort> node : tokenMetadata.getMovingEndpoints())
         {
-            endpoints.add(node.right.address.getHostAddress());
+            endpoints.add(node.right.getAddress().getHostAddress());
         }
 
         return endpoints;
@@ -3820,12 +3983,34 @@
         return upgradeSSTables(keyspaceName, excludeCurrentVersion, 0, tableNames);
     }
 
-    public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    public int upgradeSSTables(String keyspaceName,
+                               final boolean skipIfCurrentVersion,
+                               final long skipIfNewerThanTimestamp,
+                               int jobs,
+                               String... tableNames) throws IOException, ExecutionException, InterruptedException
+    {
+        return rewriteSSTables(keyspaceName, skipIfCurrentVersion, skipIfNewerThanTimestamp, false, jobs, tableNames);
+    }
+
+    public int recompressSSTables(String keyspaceName,
+                                  int jobs,
+                                  String... tableNames) throws IOException, ExecutionException, InterruptedException
+    {
+        return rewriteSSTables(keyspaceName, false, Long.MAX_VALUE, true, jobs, tableNames);
+    }
+
+
+    public int rewriteSSTables(String keyspaceName,
+                               final boolean skipIfCurrentVersion,
+                               final long skipIfNewerThanTimestamp,
+                               final boolean skipIfCompressionMatches,
+                               int jobs,
+                               String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
         CompactionManager.AllSSTableOpStatus status = CompactionManager.AllSSTableOpStatus.SUCCESSFUL;
         for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, true, keyspaceName, tableNames))
         {
-            CompactionManager.AllSSTableOpStatus oneStatus = cfStore.sstablesRewrite(excludeCurrentVersion, jobs);
+            CompactionManager.AllSSTableOpStatus oneStatus = cfStore.sstablesRewrite(skipIfCurrentVersion, skipIfNewerThanTimestamp, skipIfCompressionMatches, jobs);
             if (oneStatus != CompactionManager.AllSSTableOpStatus.SUCCESSFUL)
                 status = oneStatus;
         }
@@ -3897,15 +4082,22 @@
     @Override
     public void takeSnapshot(String tag, Map<String, String> options, String... entities) throws IOException
     {
-        boolean skipFlush = Boolean.parseBoolean(options.getOrDefault("skipFlush", "false"));
+        DurationSpec.IntSecondsBound ttl = options.containsKey("ttl") ? new DurationSpec.IntSecondsBound(options.get("ttl")) : null;
+        if (ttl != null)
+        {
+            int minAllowedTtlSecs = CassandraRelevantProperties.SNAPSHOT_MIN_ALLOWED_TTL_SECONDS.getInt();
+            if (ttl.toSeconds() < minAllowedTtlSecs)
+                throw new IllegalArgumentException(String.format("ttl for snapshot must be at least %d seconds", minAllowedTtlSecs));
+        }
 
+        boolean skipFlush = Boolean.parseBoolean(options.getOrDefault("skipFlush", "false"));
         if (entities != null && entities.length > 0 && entities[0].contains("."))
         {
-            takeMultipleTableSnapshot(tag, skipFlush, entities);
+            takeMultipleTableSnapshot(tag, skipFlush, ttl, entities);
         }
         else
         {
-            takeSnapshot(tag, skipFlush, entities);
+            takeSnapshot(tag, skipFlush, ttl, entities);
         }
     }
 
@@ -3923,9 +4115,10 @@
     public void takeTableSnapshot(String keyspaceName, String tableName, String tag)
             throws IOException
     {
-        takeMultipleTableSnapshot(tag, false, keyspaceName + "." + tableName);
+        takeMultipleTableSnapshot(tag, false, null, keyspaceName + "." + tableName);
     }
 
+    @Override
     public void forceKeyspaceCompactionForTokenRange(String keyspaceName, String startToken, String endToken, String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
         Collection<Range<Token>> tokenRanges = createRepairRangeFrom(startToken, endToken);
@@ -3936,6 +4129,30 @@
         }
     }
 
+    @Override
+    public void forceKeyspaceCompactionForPartitionKey(String keyspaceName, String partitionKey, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    {
+        // validate that the key parses before attempting compaction
+        for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName, tableNames))
+        {
+            try
+            {
+                getKeyFromPartition(keyspaceName, cfStore.name, partitionKey);
+            }
+            catch (Exception e)
+            {
+                // JMX can not handle exceptions defined outside of java.* and javax.*, so safer to rewrite the exception
+                IllegalArgumentException exception = new IllegalArgumentException(String.format("Unable to parse partition key '%s' for table %s; %s", partitionKey, cfStore.metadata, e.getMessage()));
+                exception.setStackTrace(e.getStackTrace());
+                throw exception;
+            }
+        }
+        for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName, tableNames))
+        {
+            cfStore.forceCompactionForKey(getKeyFromPartition(keyspaceName, cfStore.name, partitionKey));
+        }
+    }
+
     /**
      * Takes the snapshot for the given keyspaces. A snapshot name must be specified.
      *
@@ -3944,7 +4161,7 @@
      */
     public void takeSnapshot(String tag, String... keyspaceNames) throws IOException
     {
-        takeSnapshot(tag, false, keyspaceNames);
+        takeSnapshot(tag, false, null, keyspaceNames);
     }
 
     /**
@@ -3958,7 +4175,7 @@
     public void takeMultipleTableSnapshot(String tag, String... tableList)
             throws IOException
     {
-        takeMultipleTableSnapshot(tag, false, tableList);
+        takeMultipleTableSnapshot(tag, false, null, tableList);
     }
 
     /**
@@ -3968,7 +4185,7 @@
      * @param skipFlush Skip blocking flush of memtable
      * @param keyspaceNames the names of the keyspaces to snapshot; empty means "all."
      */
-    private void takeSnapshot(String tag, boolean skipFlush, String... keyspaceNames) throws IOException
+    private void takeSnapshot(String tag, boolean skipFlush, DurationSpec.IntSecondsBound ttl, String... keyspaceNames) throws IOException
     {
         if (operationMode == Mode.JOINING)
             throw new IOException("Cannot snapshot until bootstrap completes");
@@ -3995,9 +4212,12 @@
 
 
         RateLimiter snapshotRateLimiter = DatabaseDescriptor.getSnapshotRateLimiter();
+        Instant creationTime = now();
 
         for (Keyspace keyspace : keyspaces)
-            keyspace.snapshot(tag, null, skipFlush, snapshotRateLimiter);
+        {
+            keyspace.snapshot(tag, null, skipFlush, ttl, snapshotRateLimiter, creationTime);
+        }
     }
 
     /**
@@ -4011,7 +4231,7 @@
      * @param tableList
      *            list of tables from different keyspace in the form of ks1.cf1 ks2.cf2
      */
-    private void takeMultipleTableSnapshot(String tag, boolean skipFlush, String... tableList)
+    private void takeMultipleTableSnapshot(String tag, boolean skipFlush, DurationSpec.IntSecondsBound ttl, String... tableList)
             throws IOException
     {
         Map<Keyspace, List<String>> keyspaceColumnfamily = new HashMap<Keyspace, List<String>>();
@@ -4058,13 +4278,13 @@
         }
 
         RateLimiter snapshotRateLimiter = DatabaseDescriptor.getSnapshotRateLimiter();
+        Instant creationTime = now();
 
         for (Entry<Keyspace, List<String>> entry : keyspaceColumnfamily.entrySet())
         {
             for (String table : entry.getValue())
-                entry.getKey().snapshot(tag, table, skipFlush, snapshotRateLimiter);
+                entry.getKey().snapshot(tag, table, skipFlush, ttl, snapshotRateLimiter, creationTime);
         }
-
     }
 
     private void verifyKeyspaceIsValid(String keyspaceName)
@@ -4094,7 +4314,7 @@
         Set<String> keyspaces = new HashSet<>();
         for (String dataDir : DatabaseDescriptor.getAllDataFileLocations())
         {
-            for(String keyspaceDir : new File(dataDir).list())
+            for(String keyspaceDir : new File(dataDir).tryListNames())
             {
                 // Only add a ks if it has been specified as a param, assuming params were actually provided.
                 if (keyspaceNames.length > 0 && !Arrays.asList(keyspaceNames).contains(keyspaceDir))
@@ -4110,27 +4330,34 @@
             logger.debug("Cleared out snapshot directories");
     }
 
+    public Map<String, TabularData> getSnapshotDetails(Map<String, String> options)
+    {
+        boolean skipExpiring = options != null && Boolean.parseBoolean(options.getOrDefault("no_ttl", "false"));
+
+        Map<String, TabularData> snapshotMap = new HashMap<>();
+
+        for (TableSnapshot snapshot : snapshotManager.loadSnapshots())
+        {
+            if (skipExpiring && snapshot.isExpiring())
+                continue;
+
+            TabularDataSupport data = (TabularDataSupport) snapshotMap.get(snapshot.getTag());
+            if (data == null)
+            {
+                data = new TabularDataSupport(SnapshotDetailsTabularData.TABULAR_TYPE);
+                snapshotMap.put(snapshot.getTag(), data);
+            }
+
+            SnapshotDetailsTabularData.from(snapshot, data);
+        }
+
+        return snapshotMap;
+    }
+
+    @Deprecated
     public Map<String, TabularData> getSnapshotDetails()
     {
-        Map<String, TabularData> snapshotMap = new HashMap<>();
-        for (Keyspace keyspace : Keyspace.all())
-        {
-            for (ColumnFamilyStore cfStore : keyspace.getColumnFamilyStores())
-            {
-                for (Map.Entry<String, Directories.SnapshotSizeDetails> snapshotDetail : cfStore.getSnapshotDetails().entrySet())
-                {
-                    TabularDataSupport data = (TabularDataSupport)snapshotMap.get(snapshotDetail.getKey());
-                    if (data == null)
-                    {
-                        data = new TabularDataSupport(SnapshotDetailsTabularData.TABULAR_TYPE);
-                        snapshotMap.put(snapshotDetail.getKey(), data);
-                    }
-
-                    SnapshotDetailsTabularData.from(snapshotDetail.getKey(), keyspace.getName(), cfStore.getTableName(), snapshotDetail, data);
-                }
-            }
-        }
-        return snapshotMap;
+        return getSnapshotDetails(ImmutableMap.of());
     }
 
     public long trueSnapshotsSize()
@@ -4196,7 +4423,21 @@
         for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName, tableNames))
         {
             logger.debug("Forcing flush on keyspace {}, CF {}", keyspaceName, cfStore.name);
-            cfStore.forceBlockingFlush();
+            cfStore.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
+        }
+    }
+
+    /**
+     * Flush all memtables for a keyspace and column families.
+     * @param keyspaceName
+     * @throws IOException
+     */
+    public void forceKeyspaceFlush(String keyspaceName, ColumnFamilyStore.FlushReason reason) throws IOException
+    {
+        for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName))
+        {
+            logger.debug("Forcing flush on keyspace {}, CF {}", keyspaceName, cfStore.name);
+            cfStore.forceBlockingFlush(reason);
         }
     }
 
@@ -4208,6 +4449,11 @@
     public Pair<Integer, Future<?>> repair(String keyspace, Map<String, String> repairSpec, List<ProgressListener> listeners)
     {
         RepairOption option = RepairOption.parse(repairSpec, tokenMetadata.partitioner);
+        return repair(keyspace, option, listeners);
+    }
+
+    public Pair<Integer, Future<?>> repair(String keyspace, RepairOption option, List<ProgressListener> listeners)
+    {
         // if ranges are not specified
         if (option.getRanges().isEmpty())
         {
@@ -4228,10 +4474,10 @@
             }
         }
         if (option.getRanges().isEmpty() || Keyspace.open(keyspace).getReplicationStrategy().getReplicationFactor().allReplicas < 2)
-            return Pair.create(0, Futures.immediateFuture(null));
+            return Pair.create(0, ImmediateFuture.success(null));
 
         int cmd = nextRepairCommand.incrementAndGet();
-        return Pair.create(cmd, ActiveRepairService.repairCommandExecutor().submit(createRepairTask(cmd, keyspace, option, listeners)));
+        return Pair.create(cmd, repairCommandExecutor().submit(createRepairTask(cmd, keyspace, option, listeners)));
     }
 
     /**
@@ -4297,21 +4543,99 @@
             task.addProgressListener(listener);
 
         if (options.isTraced())
+            return new FutureTaskWithResources<>(() -> ExecutorLocals::clear, task);
+        return new FutureTask<>(task);
+    }
+
+    private void tryRepairPaxosForTopologyChange(String reason)
+    {
+        try
         {
-            Runnable r = () ->
-            {
-                try
-                {
-                    task.run();
-                }
-                finally
-                {
-                    ExecutorLocals.set(null);
-                }
-            };
-            return new FutureTask<>(r, null);
+            startRepairPaxosForTopologyChange(reason).get();
         }
-        return new FutureTask<>(task, null);
+        catch (InterruptedException e)
+        {
+            logger.error("Error during paxos repair", e);
+            throw new AssertionError(e);
+        }
+        catch (ExecutionException e)
+        {
+            logger.error("Error during paxos repair", e);
+            throw new RuntimeException(e);
+        }
+    }
+
+    private void repairPaxosForTopologyChange(String reason)
+    {
+        if (getSkipPaxosRepairOnTopologyChange() || !Paxos.useV2())
+        {
+            logger.info("skipping paxos repair for {}. skip_paxos_repair_on_topology_change is set, or v2 paxos variant is not being used", reason);
+            return;
+        }
+
+        logger.info("repairing paxos for {}", reason);
+
+        int retries = 0;
+        int maxRetries = Integer.getInteger("cassandra.paxos_repair_on_topology_change_retries", 10);
+        int delaySec = Integer.getInteger("cassandra.paxos_repair_on_topology_change_retry_delay_seconds", 10);
+
+        boolean completed = false;
+        while (!completed)
+        {
+            try
+            {
+                tryRepairPaxosForTopologyChange(reason);
+                completed = true;
+            }
+            catch (Exception e)
+            {
+                if (retries >= maxRetries)
+                    throw e;
+
+                retries++;
+                int sleep = delaySec * retries;
+                logger.info("Sleeping {} seconds before retrying paxos repair...", sleep);
+                Uninterruptibles.sleepUninterruptibly(sleep, TimeUnit.SECONDS);
+                logger.info("Retrying paxos repair for {}. Retry {}/{}", reason, retries, maxRetries);
+            }
+        }
+
+        logger.info("paxos repair for {} complete", reason);
+    }
+
+    @VisibleForTesting
+    public Future<?> startRepairPaxosForTopologyChange(String reason)
+    {
+        logger.info("repairing paxos for {}", reason);
+
+        List<Future<?>> futures = new ArrayList<>();
+
+        Keyspaces keyspaces = Schema.instance.getNonLocalStrategyKeyspaces();
+        for (String ksName : keyspaces.names())
+        {
+            if (SchemaConstants.REPLICATED_SYSTEM_KEYSPACE_NAMES.contains(ksName))
+                continue;
+
+            if (DatabaseDescriptor.skipPaxosRepairOnTopologyChangeKeyspaces().contains(ksName))
+                continue;
+
+            List<Range<Token>> ranges = getLocalAndPendingRanges(ksName);
+            futures.add(ActiveRepairService.instance.repairPaxosForTopologyChange(ksName, ranges, reason));
+        }
+
+        return FutureCombiner.allOf(futures);
+    }
+
+    public Future<?> autoRepairPaxos(TableId tableId)
+    {
+        TableMetadata table = Schema.instance.getTableMetadata(tableId);
+        if (table == null)
+            return ImmediateFuture.success(null);
+
+        List<Range<Token>> ranges = getLocalAndPendingRanges(table.keyspace);
+        PaxosCleanupLocalCoordinator coordinator = PaxosCleanupLocalCoordinator.createForAutoRepair(tableId, ranges);
+        ScheduledExecutors.optionalTasks.submit(coordinator::start);
+        return coordinator;
     }
 
     public void forceTerminateAllRepairSessions()
@@ -4322,7 +4646,7 @@
     @Nullable
     public List<String> getParentRepairStatus(int cmd)
     {
-        Pair<ActiveRepairService.ParentRepairStatus, List<String>> pair = ActiveRepairService.instance.getRepairStatus(cmd);
+        Pair<ParentRepairStatus, List<String>> pair = ActiveRepairService.instance.getRepairStatus(cmd);
         return pair == null ? null :
                ImmutableList.<String>builder().add(pair.left.name()).addAll(pair.right).build();
     }
@@ -4469,7 +4793,7 @@
     {
         EndpointsForToken replicas = getNaturalReplicasForToken(keyspaceName, cf, key);
         List<InetAddress> inetList = new ArrayList<>(replicas.size());
-        replicas.forEach(r -> inetList.add(r.endpoint().address));
+        replicas.forEach(r -> inetList.add(r.endpoint().getAddress()));
         return inetList;
     }
 
@@ -4483,7 +4807,7 @@
     {
         EndpointsForToken replicas = getNaturalReplicasForToken(keyspaceName, key);
         List<InetAddress> inetList = new ArrayList<>(replicas.size());
-        replicas.forEach(r -> inetList.add(r.endpoint().address));
+        replicas.forEach(r -> inetList.add(r.endpoint().getAddress()));
         return inetList;
     }
 
@@ -4495,6 +4819,22 @@
 
     public EndpointsForToken getNaturalReplicasForToken(String keyspaceName, String cf, String key)
     {
+        return getNaturalReplicasForToken(keyspaceName, partitionKeyToBytes(keyspaceName, cf, key));
+    }
+
+    public EndpointsForToken getNaturalReplicasForToken(String keyspaceName, ByteBuffer key)
+    {
+        Token token = tokenMetadata.partitioner.getToken(key);
+        return Keyspace.open(keyspaceName).getReplicationStrategy().getNaturalReplicasForToken(token);
+    }
+
+    public DecoratedKey getKeyFromPartition(String keyspaceName, String table, String partitionKey)
+    {
+        return tokenMetadata.partitioner.decorateKey(partitionKeyToBytes(keyspaceName, table, partitionKey));
+    }
+
+    private static ByteBuffer partitionKeyToBytes(String keyspaceName, String cf, String key)
+    {
         KeyspaceMetadata ksMetaData = Schema.instance.getKeyspaceMetadata(keyspaceName);
         if (ksMetaData == null)
             throw new IllegalArgumentException("Unknown keyspace '" + keyspaceName + "'");
@@ -4503,13 +4843,13 @@
         if (metadata == null)
             throw new IllegalArgumentException("Unknown table '" + cf + "' in keyspace '" + keyspaceName + "'");
 
-        return getNaturalReplicasForToken(keyspaceName, metadata.partitionKeyType.fromString(key));
+        return metadata.partitionKeyType.fromString(key);
     }
 
-    public EndpointsForToken getNaturalReplicasForToken(String keyspaceName, ByteBuffer key)
+    @Override
+    public String getToken(String keyspaceName, String table, String key)
     {
-        Token token = tokenMetadata.partitioner.getToken(key);
-        return Keyspace.open(keyspaceName).getReplicationStrategy().getNaturalReplicasForToken(token);
+        return tokenMetadata.partitioner.getToken(partitionKeyToBytes(keyspaceName, table, key)).toString();
     }
 
     public void setLoggingLevel(String classQualifier, String rawLevel) throws Exception
@@ -4621,7 +4961,7 @@
             if (operationMode != Mode.LEAVING) // If we're already decommissioning there is no point checking RF/pending ranges
             {
                 int rf, numNodes;
-                for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+                for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
                 {
                     if (!force)
                     {
@@ -4650,7 +4990,7 @@
             }
 
             startLeaving();
-            long timeout = Math.max(RING_DELAY, BatchlogManager.instance.getBatchlogTimeout());
+            long timeout = Math.max(RING_DELAY_MILLIS, BatchlogManager.instance.getBatchlogTimeout());
             setMode(Mode.LEAVING, "sleeping " + timeout + " ms for batch processing and pending range setup", true);
             Thread.sleep(timeout);
 
@@ -4679,7 +5019,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException("Node interrupted while decommissioning");
+            throw new UncheckedInterruptedException(e);
         }
         catch (ExecutionException e)
         {
@@ -4700,16 +5040,16 @@
 
         Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS_WITH_PORT, valueFactory.left(getLocalTokens(),Gossiper.computeExpireTime()));
         Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.left(getLocalTokens(),Gossiper.computeExpireTime()));
-        int delay = Math.max(RING_DELAY, Gossiper.intervalInMillis * 2);
+        int delay = Math.max(RING_DELAY_MILLIS, Gossiper.intervalInMillis * 2);
         logger.info("Announcing that I have left the ring for {}ms", delay);
         Uninterruptibles.sleepUninterruptibly(delay, MILLISECONDS);
     }
 
-    private void unbootstrap(Runnable onFinish) throws ExecutionException, InterruptedException
+    public Supplier<Future<StreamState>> prepareUnbootstrapStreaming()
     {
         Map<String, EndpointsByReplica> rangesToStream = new HashMap<>();
 
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             EndpointsByReplica rangesMM = getChangedReplicasForLeaving(keyspaceName, FBUtilities.getBroadcastAddressAndPort(), tokenMetadata, Keyspace.open(keyspaceName).getReplicationStrategy());
 
@@ -4719,11 +5059,19 @@
             rangesToStream.put(keyspaceName, rangesMM);
         }
 
+        return () -> streamRanges(rangesToStream);
+    }
+
+    private void unbootstrap(Runnable onFinish) throws ExecutionException, InterruptedException
+    {
+        Supplier<Future<StreamState>> startStreaming = prepareUnbootstrapStreaming();
+
         setMode(Mode.LEAVING, "replaying batch log and streaming data to other nodes", true);
 
+        repairPaxosForTopologyChange("decommission");
         // Start with BatchLog replay, which may create hints but no writes since this is no longer a valid endpoint.
         Future<?> batchlogReplay = BatchlogManager.instance.startBatchlogReplay();
-        Future<StreamState> streamSuccess = streamRanges(rangesToStream);
+        Future<StreamState> streamSuccess = startStreaming.get();
 
         // Wait for batch log to complete before streaming hints.
         logger.debug("waiting for batch log processing.");
@@ -4815,7 +5163,7 @@
             throw new UnsupportedOperationException("This node has more than one token and cannot be moved thusly.");
         }
 
-        List<String> keyspacesToProcess = Schema.instance.getNonLocalStrategyKeyspaces();
+        List<String> keyspacesToProcess = ImmutableList.copyOf(Schema.instance.getNonLocalStrategyKeyspaces().names());
 
         PendingRangeCalculatorService.instance.blockUntilFinished();
         // checking if data is moving to this node
@@ -4830,12 +5178,13 @@
         Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.moving(newToken));
         setMode(Mode.MOVING, String.format("Moving %s from %s to %s.", localAddress, getLocalTokens().iterator().next(), newToken), true);
 
-        setMode(Mode.MOVING, String.format("Sleeping %s ms before start streaming/fetching ranges", RING_DELAY), true);
-        Uninterruptibles.sleepUninterruptibly(RING_DELAY, MILLISECONDS);
+        setMode(Mode.MOVING, String.format("Sleeping %s ms before start streaming/fetching ranges", RING_DELAY_MILLIS), true);
+        Uninterruptibles.sleepUninterruptibly(RING_DELAY_MILLIS, MILLISECONDS);
 
         RangeRelocator relocator = new RangeRelocator(Collections.singleton(newToken), keyspacesToProcess, tokenMetadata);
         relocator.calculateToFromStreams();
 
+        repairPaxosForTopologyChange("move");
         if (relocator.streamsNeeded())
         {
             setMode(Mode.MOVING, "fetching new ranges and streaming old ranges", true);
@@ -4843,7 +5192,11 @@
             {
                 relocator.stream().get();
             }
-            catch (ExecutionException | InterruptedException e)
+            catch (InterruptedException e)
+            {
+                throw new UncheckedInterruptedException(e);
+            }
+            catch (ExecutionException e)
             {
                 throw new RuntimeException("Interrupted while waiting for stream/fetch ranges to finish: " + e.getMessage());
             }
@@ -4957,7 +5310,7 @@
         Collection<Token> tokens = tokenMetadata.getTokens(endpoint);
 
         // Find the endpoints that are going to become responsible for data
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             // if the replication factor is 1 the data is lost so we shouldn't wait for confirmation
             if (Keyspace.open(keyspaceName).getReplicationStrategy().getReplicationFactor().allReplicas == 1)
@@ -5052,6 +5405,11 @@
         return operationMode == Mode.NORMAL;
     }
 
+    public boolean isDecommissioned()
+    {
+        return operationMode == Mode.DECOMMISSIONED;
+    }
+
     public String getDrainProgress()
     {
         return String.format("Drained %s/%s ColumnFamilies", remainingCFs, totalCFs);
@@ -5095,6 +5453,7 @@
                 logger.error("Batchlog manager timed out shutting down", t);
             }
 
+            snapshotManager.stop();
             HintsService.instance.pauseDispatch();
 
             if (daemon != null)
@@ -5143,7 +5502,7 @@
             for (Keyspace keyspace : Keyspace.nonSystem())
             {
                 for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores())
-                    flushes.add(cfs.forceFlush());
+                    flushes.add(cfs.forceFlush(ColumnFamilyStore.FlushReason.DRAIN));
             }
             // wait for the flushes.
             // TODO this is a godawful way to track progress, since they flush in parallel.  a long one could
@@ -5175,10 +5534,11 @@
             for (Keyspace keyspace : Keyspace.system())
             {
                 for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores())
-                    flushes.add(cfs.forceFlush());
+                    flushes.add(cfs.forceFlush(ColumnFamilyStore.FlushReason.DRAIN));
             }
             FBUtilities.waitOnFutures(flushes);
 
+            SnapshotManager.shutdownAndWait(1L, MINUTES);
             HintsService.instance.shutdownBlocking();
 
             // Interrupt ongoing compactions and shutdown CM to prevent further compactions.
@@ -5191,12 +5551,21 @@
             CommitLog.instance.shutdownBlocking();
 
             // wait for miscellaneous tasks like sstable and commitlog segment deletion
-            ScheduledExecutors.nonPeriodicTasks.shutdown();
-            if (!ScheduledExecutors.nonPeriodicTasks.awaitTermination(1, MINUTES))
-                logger.warn("Unable to terminate non-periodic tasks within 1 minute.");
-
             ColumnFamilyStore.shutdownPostFlushExecutor();
-            setMode(Mode.DRAINED, !isFinalShutdown);
+
+            try
+            {
+                // we are not shutting down ScheduledExecutors#scheduledFastTasks to be still able to progress time
+                // fast-tasks executor is shut down in StorageService's shutdown hook added to Runtime
+                ExecutorUtils.shutdownNowAndWait(1, MINUTES,
+                                                 ScheduledExecutors.nonPeriodicTasks,
+                                                 ScheduledExecutors.scheduledTasks,
+                                                 ScheduledExecutors.optionalTasks);
+            }
+            finally
+            {
+                setMode(Mode.DRAINED, !isFinalShutdown);
+            }
         }
         catch (Throwable t)
         {
@@ -5324,10 +5693,10 @@
         {
             InetAddressAndPort endpoint = tokenMetadata.getEndpoint(entry.getKey());
             Float tokenOwnership = entry.getValue();
-            if (nodeMap.containsKey(endpoint.address))
-                nodeMap.put(endpoint.address, nodeMap.get(endpoint.address) + tokenOwnership);
+            if (nodeMap.containsKey(endpoint.getAddress()))
+                nodeMap.put(endpoint.getAddress(), nodeMap.get(endpoint.getAddress()) + tokenOwnership);
             else
-                nodeMap.put(endpoint.address, tokenOwnership);
+                nodeMap.put(endpoint.getAddress(), tokenOwnership);
         }
         return nodeMap;
     }
@@ -5374,11 +5743,11 @@
         }
         else
         {
-            List<String> userKeyspaces = Schema.instance.getUserKeyspaces();
+            Collection<String> userKeyspaces = Schema.instance.getUserKeyspaces().names();
 
             if (userKeyspaces.size() > 0)
             {
-                keyspace = userKeyspaces.get(0);
+                keyspace = userKeyspaces.iterator().next();
                 AbstractReplicationStrategy replicationStrategy = Schema.instance.getKeyspaceInstance(keyspace).getReplicationStrategy();
                 for (String keyspaceName : userKeyspaces)
                 {
@@ -5431,7 +5800,7 @@
     {
         LinkedHashMap<InetAddressAndPort, Float> result = getEffectiveOwnership(keyspace);
         LinkedHashMap<InetAddress, Float> asInets = new LinkedHashMap<>();
-        result.entrySet().stream().forEachOrdered(entry -> asInets.put(entry.getKey().address, entry.getValue()));
+        result.entrySet().stream().forEachOrdered(entry -> asInets.put(entry.getKey().getAddress(), entry.getValue()));
         return asInets;
     }
 
@@ -5445,19 +5814,17 @@
 
     public List<String> getKeyspaces()
     {
-        List<String> keyspaceNamesList = new ArrayList<>(Schema.instance.getKeyspaces());
-        return Collections.unmodifiableList(keyspaceNamesList);
+        return Lists.newArrayList(Schema.instance.distributedAndLocalKeyspaces().names());
     }
 
     public List<String> getNonSystemKeyspaces()
     {
-        List<String> nonKeyspaceNamesList = new ArrayList<>(Schema.instance.getNonSystemKeyspaces());
-        return Collections.unmodifiableList(nonKeyspaceNamesList);
+        return Lists.newArrayList(Schema.instance.distributedKeyspaces().names());
     }
 
     public List<String> getNonLocalStrategyKeyspaces()
     {
-        return Collections.unmodifiableList(Schema.instance.getNonLocalStrategyKeyspaces());
+        return Lists.newArrayList(Schema.instance.getNonLocalStrategyKeyspaces().names());
     }
 
     public Map<String, String> getViewBuildStatuses(String keyspace, String view, boolean withPort)
@@ -5776,9 +6143,9 @@
         ColumnFamilyStore.rebuildSecondaryIndex(ksName, cfName, indices);
     }
 
-    public void resetLocalSchema()
+    public void resetLocalSchema() throws IOException
     {
-        MigrationManager.resetLocalSchema();
+        Schema.instance.resetLocalSchema();
     }
 
     public void reloadLocalSchema()
@@ -5839,14 +6206,16 @@
         return DatabaseDescriptor.getPartitionerName();
     }
 
+    /** Negative number for disabled */
     public void setSSTablePreemptiveOpenIntervalInMB(int intervalInMB)
     {
-        DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMB(intervalInMB);
+        DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMiB(intervalInMB);
     }
 
+    /** This method can return negative number for disabled */
     public int getSSTablePreemptiveOpenIntervalInMB()
     {
-        return DatabaseDescriptor.getSSTablePreemptiveOpenIntervalInMB();
+        return DatabaseDescriptor.getSSTablePreemptiveOpenIntervalInMiB();
     }
 
     public boolean getMigrateKeycacheOnCompaction()
@@ -5903,37 +6272,49 @@
         logger.info("updated replica_filtering_protection.cached_rows_fail_threshold to {}", threshold);
     }
 
+    public int getColumnIndexSizeInKiB()
+    {
+        return DatabaseDescriptor.getColumnIndexSizeInKiB();
+    }
+
+    public void setColumnIndexSize(int columnIndexSizeInKB)
+    {
+        int oldValueInKiB = DatabaseDescriptor.getColumnIndexSizeInKiB();
+        DatabaseDescriptor.setColumnIndexSize(columnIndexSizeInKB);
+        logger.info("Updated column_index_size to {} KiB (was {} KiB)", columnIndexSizeInKB, oldValueInKiB);
+    }
+
     public int getColumnIndexCacheSize()
     {
-        return DatabaseDescriptor.getColumnIndexCacheSizeInKB();
+        return DatabaseDescriptor.getColumnIndexCacheSizeInKiB();
     }
 
     public void setColumnIndexCacheSize(int cacheSizeInKB)
     {
         DatabaseDescriptor.setColumnIndexCacheSize(cacheSizeInKB);
-        logger.info("Updated column_index_cache_size_in_kb to {}", cacheSizeInKB);
+        logger.info("Updated column_index_cache_size to {}", cacheSizeInKB);
     }
 
     public int getBatchSizeFailureThreshold()
     {
-        return DatabaseDescriptor.getBatchSizeFailThresholdInKB();
+        return DatabaseDescriptor.getBatchSizeFailThresholdInKiB();
     }
 
     public void setBatchSizeFailureThreshold(int threshold)
     {
-        DatabaseDescriptor.setBatchSizeFailThresholdInKB(threshold);
-        logger.info("updated batch_size_fail_threshold_in_kb to {}", threshold);
+        DatabaseDescriptor.setBatchSizeFailThresholdInKiB(threshold);
+        logger.info("updated batch_size_fail_threshold to {}", threshold);
     }
 
     public int getBatchSizeWarnThreshold()
     {
-        return DatabaseDescriptor.getBatchSizeWarnThresholdInKB();
+        return DatabaseDescriptor.getBatchSizeWarnThresholdInKiB();
     }
 
     public void setBatchSizeWarnThreshold(int threshold)
     {
-        DatabaseDescriptor.setBatchSizeWarnThresholdInKB(threshold);
-        logger.info("Updated batch_size_warn_threshold_in_kb to {}", threshold);
+        DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(threshold);
+        logger.info("Updated batch_size_warn_threshold to {}", threshold);
     }
 
     public int getInitialRangeTombstoneListAllocationSize()
@@ -5973,8 +6354,8 @@
 
     public void setHintedHandoffThrottleInKB(int throttleInKB)
     {
-        DatabaseDescriptor.setHintedHandoffThrottleInKB(throttleInKB);
-        logger.info("updated hinted_handoff_throttle_in_kb to {}", throttleInKB);
+        DatabaseDescriptor.setHintedHandoffThrottleInKiB(throttleInKB);
+        logger.info("updated hinted_handoff_throttle to {} KiB", throttleInKB);
     }
 
     @Override
@@ -5989,38 +6370,57 @@
         logger.info("Auditlog is disabled");
     }
 
+    @Deprecated
     public void enableAuditLog(String loggerName, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
                                String includedUsers, String excludedUsers) throws ConfigurationException, IllegalStateException
     {
-        enableAuditLog(loggerName, Collections.emptyMap(), includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers);
+        enableAuditLog(loggerName, Collections.emptyMap(), includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers,
+                       Integer.MIN_VALUE, null, null, Long.MIN_VALUE, Integer.MIN_VALUE, null);
     }
 
+    public void enableAuditLog(String loggerName, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers, Integer maxArchiveRetries, Boolean block, String rollCycle,
+                               Long maxLogSize, Integer maxQueueWeight, String archiveCommand) throws IllegalStateException
+    {
+        enableAuditLog(loggerName, Collections.emptyMap(), includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers,
+                       maxArchiveRetries, block, rollCycle, maxLogSize, maxQueueWeight, archiveCommand);
+    }
+
+    @Deprecated
     public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
                                String includedUsers, String excludedUsers) throws ConfigurationException, IllegalStateException
     {
-        loggerName = loggerName != null ? loggerName : DatabaseDescriptor.getAuditLoggingOptions().logger.class_name;
+        enableAuditLog(loggerName, parameters, includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers,
+                       Integer.MIN_VALUE, null, null, Long.MIN_VALUE, Integer.MIN_VALUE, null);
+    }
 
-        Preconditions.checkNotNull(loggerName, "cassandra.yaml did not have logger in audit_logging_option and not set as parameter");
-        Preconditions.checkState(FBUtilities.isAuditLoggerClassExists(loggerName), "Unable to find AuditLogger class: "+loggerName);
+    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers, Integer maxArchiveRetries, Boolean block, String rollCycle,
+                               Long maxLogSize, Integer maxQueueWeight, String archiveCommand) throws IllegalStateException
+    {
+        AuditLogOptions auditOptions = DatabaseDescriptor.getAuditLoggingOptions();
+        if (archiveCommand != null && !auditOptions.allow_nodetool_archive_command)
+            throw new ConfigurationException("Can't enable audit log archiving via nodetool unless audit_logging_options.allow_nodetool_archive_command is set to true");
 
-        AuditLogOptions auditLogOptions = new AuditLogOptions();
-        auditLogOptions.enabled = true;
-        auditLogOptions.logger = new ParameterizedClass(loggerName, parameters);
-        auditLogOptions.included_keyspaces = includedKeyspaces != null ? includedKeyspaces : DatabaseDescriptor.getAuditLoggingOptions().included_keyspaces;
-        auditLogOptions.excluded_keyspaces = excludedKeyspaces != null ? excludedKeyspaces : DatabaseDescriptor.getAuditLoggingOptions().excluded_keyspaces;
-        auditLogOptions.included_categories = includedCategories != null ? includedCategories : DatabaseDescriptor.getAuditLoggingOptions().included_categories;
-        auditLogOptions.excluded_categories = excludedCategories != null ? excludedCategories : DatabaseDescriptor.getAuditLoggingOptions().excluded_categories;
-        auditLogOptions.included_users = includedUsers != null ? includedUsers : DatabaseDescriptor.getAuditLoggingOptions().included_users;
-        auditLogOptions.excluded_users = excludedUsers != null ? excludedUsers : DatabaseDescriptor.getAuditLoggingOptions().excluded_users;
+        final AuditLogOptions options = new AuditLogOptions.Builder(auditOptions)
+                                        .withEnabled(true)
+                                        .withLogger(loggerName, parameters)
+                                        .withIncludedKeyspaces(includedKeyspaces)
+                                        .withExcludedKeyspaces(excludedKeyspaces)
+                                        .withIncludedCategories(includedCategories)
+                                        .withExcludedCategories(excludedCategories)
+                                        .withIncludedUsers(includedUsers)
+                                        .withExcludedUsers(excludedUsers)
+                                        .withMaxArchiveRetries(maxArchiveRetries)
+                                        .withBlock(block)
+                                        .withRollCycle(rollCycle)
+                                        .withMaxLogSize(maxLogSize)
+                                        .withMaxQueueWeight(maxQueueWeight)
+                                        .withArchiveCommand(archiveCommand)
+                                        .build();
 
-        AuditLogManager.instance.enable(auditLogOptions);
-
-        logger.info("AuditLog is enabled with logger: [{}], included_keyspaces: [{}], excluded_keyspaces: [{}], " +
-                    "included_categories: [{}], excluded_categories: [{}], included_users: [{}], "
-                    + "excluded_users: [{}], archive_command: [{}]", auditLogOptions.logger, auditLogOptions.included_keyspaces, auditLogOptions.excluded_keyspaces,
-                    auditLogOptions.included_categories, auditLogOptions.excluded_categories, auditLogOptions.included_users, auditLogOptions.excluded_users,
-                    auditLogOptions.archive_command);
-
+        AuditLogManager.instance.enable(options);
+        logger.info("AuditLog is enabled with configuration: {}", options);
     }
 
     public boolean isAuditLogEnabled()
@@ -6063,6 +6463,30 @@
         ClientResourceLimits.setEndpointLimit(newLimit);
     }
 
+    @Override
+    public int getNativeTransportMaxRequestsPerSecond()
+    {
+        return ClientResourceLimits.getNativeTransportMaxRequestsPerSecond();
+    }
+
+    @Override
+    public void setNativeTransportMaxRequestsPerSecond(int newPerSecond)
+    {
+        ClientResourceLimits.setNativeTransportMaxRequestsPerSecond(newPerSecond);
+    }
+
+    @Override
+    public void setNativeTransportRateLimitingEnabled(boolean enabled)
+    {
+        DatabaseDescriptor.setNativeTransportRateLimitingEnabled(enabled);
+    }
+
+    @Override
+    public boolean getNativeTransportRateLimitingEnabled()
+    {
+        return DatabaseDescriptor.getNativeTransportRateLimitingEnabled();
+    }
+
     @VisibleForTesting
     public void shutdownServer()
     {
@@ -6087,7 +6511,7 @@
         maxArchiveRetries = maxArchiveRetries != Integer.MIN_VALUE ? maxArchiveRetries : fqlOptions.max_archive_retries;
 
         Preconditions.checkNotNull(path, "cassandra.yaml did not set log_dir and not set as parameter");
-        FullQueryLogger.instance.enableWithoutClean(Paths.get(path), rollCycle, blocking, maxQueueWeight, maxLogSize, archiveCommand, maxArchiveRetries);
+        FullQueryLogger.instance.enableWithoutClean(File.getPath(path), rollCycle, blocking, maxQueueWeight, maxLogSize, archiveCommand, maxArchiveRetries);
     }
 
     @Override
@@ -6117,17 +6541,17 @@
     @Override
     public Map<String, Set<InetAddress>> getOutstandingSchemaVersions()
     {
-        Map<UUID, Set<InetAddressAndPort>> outstanding = MigrationCoordinator.instance.outstandingVersions();
+        Map<UUID, Set<InetAddressAndPort>> outstanding = Schema.instance.getOutstandingSchemaVersions();
         return outstanding.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(),
-                                                                        e -> e.getValue().stream().map(i -> i.address).collect(Collectors.toSet())));
+                                                                        e -> e.getValue().stream().map(InetSocketAddress::getAddress).collect(Collectors.toSet())));
     }
 
     @Override
     public Map<String, Set<String>> getOutstandingSchemaVersionsWithPort()
     {
-        Map<UUID, Set<InetAddressAndPort>> outstanding = MigrationCoordinator.instance.outstandingVersions();
+        Map<UUID, Set<InetAddressAndPort>> outstanding = Schema.instance.getOutstandingSchemaVersions();
         return outstanding.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(),
-                                                                        e -> e.getValue().stream().map(InetAddressAndPort::toString).collect(Collectors.toSet())));
+                                                                        e -> e.getValue().stream().map(Object::toString).collect(Collectors.toSet())));
     }
 
     public boolean autoOptimiseIncRepairStreams()
@@ -6160,11 +6584,13 @@
         DatabaseDescriptor.setAutoOptimisePreviewRepairStreams(enabled);
     }
 
+    @Deprecated
     public int getTableCountWarnThreshold()
     {
         return DatabaseDescriptor.tableCountWarnThreshold();
     }
 
+    @Deprecated
     public void setTableCountWarnThreshold(int value)
     {
         if (value < 0)
@@ -6173,11 +6599,13 @@
         DatabaseDescriptor.setTableCountWarnThreshold(value);
     }
 
+    @Deprecated
     public int getKeyspaceCountWarnThreshold()
     {
         return DatabaseDescriptor.keyspaceCountWarnThreshold();
     }
 
+    @Deprecated
     public void setKeyspaceCountWarnThreshold(int value)
     {
         if (value < 0)
@@ -6186,6 +6614,258 @@
         DatabaseDescriptor.setKeyspaceCountWarnThreshold(value);
     }
 
+    public void setCompactionTombstoneWarningThreshold(int count)
+    {
+        if (count < 0)
+            throw new IllegalStateException("compaction tombstone warning threshold needs to be >= 0, not "+count);
+        logger.info("Setting compaction_tombstone_warning_threshold to {}", count);
+        DatabaseDescriptor.setCompactionTombstoneWarningThreshold(count);
+    }
+
+    public int getCompactionTombstoneWarningThreshold()
+    {
+        return DatabaseDescriptor.getCompactionTombstoneWarningThreshold();
+    }
+
+    public void addSnapshot(TableSnapshot snapshot) {
+        snapshotManager.addSnapshot(snapshot);
+    }
+
+    @Override
+    public boolean getReadThresholdsEnabled()
+    {
+        return DatabaseDescriptor.getReadThresholdsEnabled();
+    }
+
+    @Override
+    public void setReadThresholdsEnabled(boolean value)
+    {
+        DatabaseDescriptor.setReadThresholdsEnabled(value);
+    }
+
+    @Override
+    public String getCoordinatorLargeReadWarnThreshold()
+    {
+        return toString(DatabaseDescriptor.getCoordinatorReadSizeWarnThreshold());
+    }
+
+    @Override
+    public void setCoordinatorLargeReadWarnThreshold(String threshold)
+    {
+        DatabaseDescriptor.setCoordinatorReadSizeWarnThreshold(parseDataStorageSpec(threshold));
+    }
+
+    @Override
+    public String getCoordinatorLargeReadAbortThreshold()
+    {
+        return toString(DatabaseDescriptor.getCoordinatorReadSizeFailThreshold());
+    }
+
+    @Override
+    public void setCoordinatorLargeReadAbortThreshold(String threshold)
+    {
+        DatabaseDescriptor.setCoordinatorReadSizeFailThreshold(parseDataStorageSpec(threshold));
+    }
+
+    @Override
+    public String getLocalReadTooLargeWarnThreshold()
+    {
+        return toString(DatabaseDescriptor.getLocalReadSizeWarnThreshold());
+    }
+
+    @Override
+    public void setLocalReadTooLargeWarnThreshold(String threshold)
+    {
+        DatabaseDescriptor.setLocalReadSizeWarnThreshold(parseDataStorageSpec(threshold));
+    }
+
+    @Override
+    public String getLocalReadTooLargeAbortThreshold()
+    {
+        return toString(DatabaseDescriptor.getLocalReadSizeFailThreshold());
+    }
+
+    @Override
+    public void setLocalReadTooLargeAbortThreshold(String threshold)
+    {
+        DatabaseDescriptor.setLocalReadSizeFailThreshold(parseDataStorageSpec(threshold));
+    }
+
+    @Override
+    public String getRowIndexReadSizeWarnThreshold()
+    {
+        return toString(DatabaseDescriptor.getRowIndexReadSizeWarnThreshold());
+    }
+
+    @Override
+    public void setRowIndexReadSizeWarnThreshold(String threshold)
+    {
+        DatabaseDescriptor.setRowIndexReadSizeWarnThreshold(parseDataStorageSpec(threshold));
+    }
+
+    @Override
+    public String getRowIndexReadSizeAbortThreshold()
+    {
+        return toString(DatabaseDescriptor.getRowIndexReadSizeFailThreshold());
+    }
+
+    @Override
+    public void setRowIndexReadSizeAbortThreshold(String threshold)
+    {
+        DatabaseDescriptor.setRowIndexReadSizeFailThreshold(parseDataStorageSpec(threshold));
+    }
+
+    private static String toString(DataStorageSpec value)
+    {
+        return value == null ? null : value.toString();
+    }
+
+    public void setDefaultKeyspaceReplicationFactor(int value)
+    {
+        DatabaseDescriptor.setDefaultKeyspaceRF(value);
+        logger.info("set default keyspace rf to {}", value);
+    }
+
+    private static DataStorageSpec.LongBytesBound parseDataStorageSpec(String threshold)
+    {
+        return threshold == null
+               ? null
+               : new DataStorageSpec.LongBytesBound(threshold);
+    }
+
+    public int getDefaultKeyspaceReplicationFactor()
+    {
+        return DatabaseDescriptor.getDefaultKeyspaceRF();
+    }
+
+    public boolean getSkipPaxosRepairOnTopologyChange()
+    {
+        return DatabaseDescriptor.skipPaxosRepairOnTopologyChange();
+    }
+
+    public void setSkipPaxosRepairOnTopologyChange(boolean v)
+    {
+        DatabaseDescriptor.setSkipPaxosRepairOnTopologyChange(v);
+        logger.info("paxos skip topology change repair {} via jmx", v ? "enabled" : "disabled");
+    }
+
+    public String getSkipPaxosRepairOnTopologyChangeKeyspaces()
+    {
+        return Joiner.on(',').join(DatabaseDescriptor.skipPaxosRepairOnTopologyChangeKeyspaces());
+    }
+
+    public void setSkipPaxosRepairOnTopologyChangeKeyspaces(String v)
+    {
+        DatabaseDescriptor.setSkipPaxosRepairOnTopologyChangeKeyspaces(v);
+        logger.info("paxos skip topology change repair keyspaces set to  {} via jmx", v);
+    }
+
+    public boolean getPaxosAutoRepairsEnabled()
+    {
+        return PaxosState.uncommittedTracker().isAutoRepairsEnabled();
+    }
+
+    public void setPaxosAutoRepairsEnabled(boolean enabled)
+    {
+        PaxosState.uncommittedTracker().setAutoRepairsEnabled(enabled);
+        logger.info("paxos auto repairs {} via jmx", enabled ? "enabled" : "disabled");
+    }
+
+    public boolean getPaxosStateFlushEnabled()
+    {
+        return PaxosState.uncommittedTracker().isStateFlushEnabled();
+    }
+
+    public void setPaxosStateFlushEnabled(boolean enabled)
+    {
+        PaxosState.uncommittedTracker().setStateFlushEnabled(enabled);
+        logger.info("paxos state flush {} via jmx", enabled ? "enabled" : "disabled");
+    }
+
+    public List<String> getPaxosAutoRepairTables()
+    {
+        Set<TableId> tableIds = PaxosState.uncommittedTracker().tableIds();
+        List<String> tables = new ArrayList<>(tableIds.size());
+        for (TableId tableId : tableIds)
+        {
+            TableMetadata table = Schema.instance.getTableMetadata(tableId);
+            if (table == null)
+                continue;
+            tables.add(table.keyspace + '.' + table.name);
+        }
+        return tables;
+    }
+
+    public long getPaxosPurgeGraceSeconds()
+    {
+        return DatabaseDescriptor.getPaxosPurgeGrace(SECONDS);
+    }
+
+    public void setPaxosPurgeGraceSeconds(long v)
+    {
+        DatabaseDescriptor.setPaxosPurgeGrace(v);
+        logger.info("paxos purging grace seconds set to {} via jmx", v);
+    }
+
+    public String getPaxosOnLinearizabilityViolations()
+    {
+        return DatabaseDescriptor.paxosOnLinearizabilityViolations().toString();
+    }
+
+    public void setPaxosOnLinearizabilityViolations(String v)
+    {
+        DatabaseDescriptor.setPaxosOnLinearizabilityViolations(Config.PaxosOnLinearizabilityViolation.valueOf(v));
+        logger.info("paxos on linearizability violations {} via jmx", v);
+    }
+
+    public String getPaxosStatePurging()
+    {
+        return DatabaseDescriptor.paxosStatePurging().name();
+    }
+
+    public void setPaxosStatePurging(String v)
+    {
+        DatabaseDescriptor.setPaxosStatePurging(PaxosStatePurging.valueOf(v));
+        logger.info("paxos state purging {} via jmx", v);
+    }
+
+    public boolean getPaxosRepairEnabled()
+    {
+        return DatabaseDescriptor.paxosRepairEnabled();
+    }
+
+    public void setPaxosRepairEnabled(boolean enabled)
+    {
+        DatabaseDescriptor.setPaxosRepairEnabled(enabled);
+        logger.info("paxos repair {} via jmx", enabled ? "enabled" : "disabled");
+    }
+
+    public boolean getPaxosDcLocalCommitEnabled()
+    {
+        return PaxosCommit.getEnableDcLocalCommit();
+    }
+
+    public void setPaxosDcLocalCommitEnabled(boolean enabled)
+    {
+        PaxosCommit.setEnableDcLocalCommit(enabled);
+        logger.info("paxos dc local commit {} via jmx", enabled ? "enabled" : "disabled");
+    }
+
+    public String getPaxosBallotLowBound(String ksName, String tblName, String key)
+    {
+        Keyspace keyspace = Keyspace.open(ksName);
+        if (keyspace == null)
+            throw new IllegalArgumentException("Unknown keyspace '" + ksName + "'");
+
+        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(tblName);
+        if (cfs == null)
+            throw new IllegalArgumentException("Unknown table '" + tblName + "' in keyspace '" + ksName + "'");
+
+        TableMetadata table = cfs.metadata.get();
+        DecoratedKey dk = table.partitioner.decorateKey(table.partitionKeyType.fromString(key));
+        return cfs.getPaxosRepairHistory().ballotForToken(dk.getToken()).toString();
+    }
+
     public Long getRepairRpcTimeout()
     {
         return DatabaseDescriptor.getRepairRpcTimeout(MILLISECONDS);
@@ -6194,8 +6874,83 @@
     public void setRepairRpcTimeout(Long timeoutInMillis)
     {
         Preconditions.checkState(timeoutInMillis > 0);
-        DatabaseDescriptor.setRepairRpcTimeout(timeoutInMillis, MILLISECONDS);
+        DatabaseDescriptor.setRepairRpcTimeout(timeoutInMillis);
         logger.info("RepairRpcTimeout set to {}ms via JMX", timeoutInMillis);
     }
+    public void evictHungRepairs()
+    {
+        logger.info("StorageService#clearPaxosRateLimiters called via jmx");
+        Paxos.evictHungRepairs();
+    }
 
+    public void clearPaxosRepairs()
+    {
+        logger.info("StorageService#clearPaxosRepairs called via jmx");
+        PaxosTableRepairs.clearRepairs();
+    }
+
+    public void setSkipPaxosRepairCompatibilityCheck(boolean v)
+    {
+        PaxosRepair.setSkipPaxosRepairCompatibilityCheck(v);
+        logger.info("SkipPaxosRepairCompatibilityCheck set to {} via jmx", v);
+    }
+
+    public boolean getSkipPaxosRepairCompatibilityCheck()
+    {
+        return PaxosRepair.getSkipPaxosRepairCompatibilityCheck();
+    }
+
+    @Override
+    public boolean topPartitionsEnabled()
+    {
+        return DatabaseDescriptor.topPartitionsEnabled();
+    }
+
+    @Override
+    public int getMaxTopSizePartitionCount()
+    {
+        return DatabaseDescriptor.getMaxTopSizePartitionCount();
+    }
+
+    @Override
+    public void setMaxTopSizePartitionCount(int value)
+    {
+        DatabaseDescriptor.setMaxTopSizePartitionCount(value);
+    }
+
+    @Override
+    public int getMaxTopTombstonePartitionCount()
+    {
+        return DatabaseDescriptor.getMaxTopTombstonePartitionCount();
+    }
+
+    @Override
+    public void setMaxTopTombstonePartitionCount(int value)
+    {
+        DatabaseDescriptor.setMaxTopTombstonePartitionCount(value);
+    }
+
+    @Override
+    public String getMinTrackedPartitionSize()
+    {
+        return DatabaseDescriptor.getMinTrackedPartitionSizeInBytes().toString();
+    }
+
+    @Override
+    public void setMinTrackedPartitionSize(String value)
+    {
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(parseDataStorageSpec(value));
+    }
+
+    @Override
+    public long getMinTrackedPartitionTombstoneCount()
+    {
+        return DatabaseDescriptor.getMinTrackedPartitionTombstoneCount();
+    }
+
+    @Override
+    public void setMinTrackedPartitionTombstoneCount(long value)
+    {
+        DatabaseDescriptor.setMinTrackedPartitionTombstoneCount(value);
+    }
 }
diff --git a/src/java/org/apache/cassandra/service/StorageServiceMBean.java b/src/java/org/apache/cassandra/service/StorageServiceMBean.java
index c61e45e..ac2ff68 100644
--- a/src/java/org/apache/cassandra/service/StorageServiceMBean.java
+++ b/src/java/org/apache/cassandra/service/StorageServiceMBean.java
@@ -35,7 +35,7 @@
 
 import org.apache.cassandra.db.ColumnFamilyStoreMBean;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.utils.BreaksJMX;
 
 public interface StorageServiceMBean extends NotificationEmitter
 {
@@ -271,12 +271,21 @@
     public void clearSnapshot(String tag, String... keyspaceNames) throws IOException;
 
     /**
-     *  Get the details of all the snapshot
+     * Get the details of all the snapshot
      * @return A map of snapshotName to all its details in Tabular form.
      */
+    @Deprecated
     public Map<String, TabularData> getSnapshotDetails();
 
     /**
+     * Get the details of all the snapshots
+     *
+     * @param options map of options used for filtering of snapshots
+     * @return A map of snapshotName to all its details in Tabular form.
+     */
+    public Map<String, TabularData> getSnapshotDetails(Map<String, String> options);
+
+    /**
      * Get the true size taken by all snapshots across all keyspaces.
      * @return True size taken by all the snapshots.
      */
@@ -328,6 +337,11 @@
     public void forceKeyspaceCompactionForTokenRange(String keyspaceName, String startToken, String endToken, String... tableNames) throws IOException, ExecutionException, InterruptedException;
 
     /**
+     * Forces major compactions for the range represented by the partition key
+     */
+    public void forceKeyspaceCompactionForPartitionKey(String keyspaceName, String partitionKey, String... tableNames) throws IOException, ExecutionException, InterruptedException;
+
+    /**
      * Trigger a cleanup of keys on a single keyspace
      */
     @Deprecated
@@ -364,7 +378,14 @@
      */
     @Deprecated
     public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... tableNames) throws IOException, ExecutionException, InterruptedException;
-    public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException;
+    @Deprecated
+    default int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    {
+        return upgradeSSTables(keyspaceName, excludeCurrentVersion, Long.MAX_VALUE, jobs, tableNames);
+    }
+
+    public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, long maxSSTableTimestamp, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException;
+    public int recompressSSTables(String keyspaceName, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException;
 
     /**
      * Rewrites all sstables from the given tables to remove deleted data.
@@ -527,12 +548,12 @@
     public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException;
 
     /*
-      Update dynamic_snitch_update_interval_in_ms
+      Update dynamic_snitch_update_interval in ms
      */
     public void setDynamicUpdateInterval(int dynamicUpdateInterval);
 
     /*
-      Get dynamic_snitch_update_interval_in_ms
+      Get dynamic_snitch_update_interval in ms
      */
     public int getDynamicUpdateInterval();
 
@@ -562,7 +583,10 @@
     public void setNativeTransportMaxConcurrentRequestsInBytes(long newLimit);
     public long getNativeTransportMaxConcurrentRequestsInBytesPerIp();
     public void setNativeTransportMaxConcurrentRequestsInBytesPerIp(long newLimit);
-
+    public int getNativeTransportMaxRequestsPerSecond();
+    public void setNativeTransportMaxRequestsPerSecond(int newPerSecond);
+    public void setNativeTransportRateLimitingEnabled(boolean enabled);
+    public boolean getNativeTransportRateLimitingEnabled();
 
     // allows a node that have been started without joining the ring to join it
     public void joinRing() throws IOException;
@@ -606,12 +630,72 @@
     public void setTruncateRpcTimeout(long value);
     public long getTruncateRpcTimeout();
 
+    public void setStreamThroughputMbitPerSec(int value);
+    /**
+     * @return stream_throughput_outbound in megabits
+     * @deprecated Use getStreamThroughputMbitPerSecAsDouble instead as this one will provide a rounded value
+     */
+    @Deprecated
+    public int getStreamThroughputMbitPerSec();
+    public double getStreamThroughputMbitPerSecAsDouble();
+
+    @Deprecated
     public void setStreamThroughputMbPerSec(int value);
+
+    /**
+     * @return stream_throughput_outbound in MiB
+     * @deprecated Use getStreamThroughputMebibytesPerSecAsDouble instead as this one will provide a rounded value
+     */
+    @Deprecated
     public int getStreamThroughputMbPerSec();
+    public void setStreamThroughputMebibytesPerSec(int value);
+    /**
+     * Below method returns stream_throughput_outbound rounded, for precise number, please, use getStreamThroughputMebibytesPerSecAsDouble
+     * @return stream_throughput_outbound in MiB
+     */
+    public int getStreamThroughputMebibytesPerSec();
+    public double getStreamThroughputMebibytesPerSecAsDouble();
 
+    public void setInterDCStreamThroughputMbitPerSec(int value);
+
+    /**
+     * @return inter_dc_stream_throughput_outbound in megabits
+     * @deprecated Use getInterDCStreamThroughputMbitPerSecAsDouble instead as this one will provide a rounded value
+     */
+    @Deprecated
+    public int getInterDCStreamThroughputMbitPerSec();
+    public double getInterDCStreamThroughputMbitPerSecAsDouble();
+
+    @Deprecated
     public void setInterDCStreamThroughputMbPerSec(int value);
-    public int getInterDCStreamThroughputMbPerSec();
 
+    /**
+     * @return inter_dc_stream_throughput_outbound in MiB
+     * @deprecated Use getInterDCStreamThroughputMebibytesPerSecAsDouble instead as this one will provide a rounded value
+     */
+    @Deprecated
+    public int getInterDCStreamThroughputMbPerSec();
+    public void setInterDCStreamThroughputMebibytesPerSec(int value);
+    /**
+     * Below method returns Inter_dc_stream_throughput_outbound rounded, for precise number, please, use getInterDCStreamThroughputMebibytesPerSecAsDouble
+     * @return inter_dc_stream_throughput_outbound in MiB
+     */
+    public int getInterDCStreamThroughputMebibytesPerSec();
+    public double getInterDCStreamThroughputMebibytesPerSecAsDouble();
+
+    public void setEntireSSTableStreamThroughputMebibytesPerSec(int value);
+    public double getEntireSSTableStreamThroughputMebibytesPerSecAsDouble();
+
+    public void setEntireSSTableInterDCStreamThroughputMebibytesPerSec(int value);
+    public double getEntireSSTableInterDCStreamThroughputMebibytesPerSecAsDouble();
+
+    public double getCompactionThroughtputMibPerSecAsDouble();
+    public long getCompactionThroughtputBytesPerSec();
+    /**
+     * @return  compaction_throughgput in MiB
+     * @deprecated Use getCompactionThroughtputMibPerSecAsDouble instead as this one will provide a rounded value
+     */
+    @Deprecated
     public int getCompactionThroughputMbPerSec();
     public void setCompactionThroughputMbPerSec(int value);
 
@@ -749,6 +833,11 @@
     /** Sets the number of rows cached at the coordinator before filtering/index queries fail outright. */
     public void setCachedReplicaRowsFailThreshold(int threshold);
 
+    /** Returns the granularity of the collation index of rows within a partition **/
+    public int getColumnIndexSizeInKiB();
+    /** Sets the granularity of the collation index of rows within a partition **/
+    public void setColumnIndexSize(int columnIndexSizeInKB);
+
     /** Returns the threshold for skipping the column index when caching partition info **/
     public int getColumnIndexCacheSize();
     /** Sets the threshold for skipping the column index when caching partition info **/
@@ -764,7 +853,7 @@
     /** Sets the threshold for warning queries due to a large batch size */
     public void setBatchSizeWarnThreshold(int batchSizeDebugThreshold);
 
-    /** Sets the hinted handoff throttle in kb per second, per delivery thread. */
+    /** Sets the hinted handoff throttle in KiB per second, per delivery thread. */
     public void setHintedHandoffThrottleInKB(int throttleInKB);
 
     /**
@@ -796,8 +885,24 @@
     /** Clears the history of clients that have connected in the past **/
     void clearConnectionHistory();
     public void disableAuditLog();
-    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories, String includedUsers, String excludedUsers) throws ConfigurationException;
-    public void enableAuditLog(String loggerName, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories, String includedUsers, String excludedUsers) throws ConfigurationException;
+    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers, Integer maxArchiveRetries, Boolean block, String rollCycle,
+                               Long maxLogSize, Integer maxQueueWeight, String archiveCommand) throws IllegalStateException;
+
+    @BreaksJMX("This API was exposed as throwing ConfigurationException, removing is binary compatible but not source; see https://docs.oracle.com/javase/specs/jls/se7/html/jls-13.html")
+    @Deprecated
+    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers) throws ConfigurationException, IllegalStateException;
+
+    @BreaksJMX("This API was exposed as throwing ConfigurationException, removing is binary compatible but not source; see https://docs.oracle.com/javase/specs/jls/se7/html/jls-13.html")
+    @Deprecated
+    public void enableAuditLog(String loggerName, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers) throws ConfigurationException, IllegalStateException;
+
+    public void enableAuditLog(String loggerName, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers, Integer maxArchiveRetries, Boolean block, String rollCycle,
+                               Long maxLogSize, Integer maxQueueWeight, String archiveCommand) throws IllegalStateException;
+
     public boolean isAuditLogEnabled();
     public String getCorruptedTombstoneStrategy();
     public void setCorruptedTombstoneStrategy(String strategy);
@@ -858,11 +963,87 @@
     public boolean autoOptimisePreviewRepairStreams();
     public void setAutoOptimisePreviewRepairStreams(boolean enabled);
 
+    // warning thresholds will be replaced by equivalent guardrails
+    @Deprecated
     int getTableCountWarnThreshold();
+    @Deprecated
     void setTableCountWarnThreshold(int value);
+    @Deprecated
     int getKeyspaceCountWarnThreshold();
+    @Deprecated
     void setKeyspaceCountWarnThreshold(int value);
 
+    public void setCompactionTombstoneWarningThreshold(int count);
+    public int getCompactionTombstoneWarningThreshold();
+
+    public boolean getReadThresholdsEnabled();
+    public void setReadThresholdsEnabled(boolean value);
+
+    public String getCoordinatorLargeReadWarnThreshold();
+    public void setCoordinatorLargeReadWarnThreshold(String threshold);
+    public String getCoordinatorLargeReadAbortThreshold();
+    public void setCoordinatorLargeReadAbortThreshold(String threshold);
+
+    public String getLocalReadTooLargeWarnThreshold();
+    public void setLocalReadTooLargeWarnThreshold(String value);
+    public String getLocalReadTooLargeAbortThreshold();
+    public void setLocalReadTooLargeAbortThreshold(String value);
+
+    public String getRowIndexReadSizeWarnThreshold();
+    public void setRowIndexReadSizeWarnThreshold(String value);
+    public String getRowIndexReadSizeAbortThreshold();
+    public void setRowIndexReadSizeAbortThreshold(String value);
+
+    public void setDefaultKeyspaceReplicationFactor(int value);
+    public int getDefaultKeyspaceReplicationFactor();
+
+    boolean getSkipPaxosRepairOnTopologyChange();
+    void setSkipPaxosRepairOnTopologyChange(boolean v);
+
+    String getSkipPaxosRepairOnTopologyChangeKeyspaces();
+    void setSkipPaxosRepairOnTopologyChangeKeyspaces(String v);
+
+    boolean getPaxosAutoRepairsEnabled();
+    void setPaxosAutoRepairsEnabled(boolean enabled);
+
+    boolean getPaxosStateFlushEnabled();
+    void setPaxosStateFlushEnabled(boolean enabled);
+
+    List<String> getPaxosAutoRepairTables();
+
+    long getPaxosPurgeGraceSeconds();
+    void setPaxosPurgeGraceSeconds(long v);
+
+    String getPaxosOnLinearizabilityViolations();
+    void setPaxosOnLinearizabilityViolations(String v);
+
+    String getPaxosStatePurging();
+    void setPaxosStatePurging(String v);
+
+    boolean getPaxosRepairEnabled();
+    void setPaxosRepairEnabled(boolean v);
+
+    boolean getPaxosDcLocalCommitEnabled();
+    void setPaxosDcLocalCommitEnabled(boolean v);
+
+    String getPaxosBallotLowBound(String keyspace, String table, String key);
+
     public Long getRepairRpcTimeout();
     public void setRepairRpcTimeout(Long timeoutInMillis);
+
+    public void evictHungRepairs();
+    public void clearPaxosRepairs();
+    public void setSkipPaxosRepairCompatibilityCheck(boolean v);
+    public boolean getSkipPaxosRepairCompatibilityCheck();
+
+    String getToken(String keyspaceName, String table, String partitionKey);
+    public boolean topPartitionsEnabled();
+    public int getMaxTopSizePartitionCount();
+    public void setMaxTopSizePartitionCount(int value);
+    public int getMaxTopTombstonePartitionCount();
+    public void setMaxTopTombstonePartitionCount(int value);
+    public String getMinTrackedPartitionSize();
+    public void setMinTrackedPartitionSize(String value);
+    public long getMinTrackedPartitionTombstoneCount();
+    public void setMinTrackedPartitionTombstoneCount(long value);
 }
diff --git a/src/java/org/apache/cassandra/service/TruncateResponseHandler.java b/src/java/org/apache/cassandra/service/TruncateResponseHandler.java
index 60e8d0b..984ba5a 100644
--- a/src/java/org/apache/cassandra/service/TruncateResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/TruncateResponseHandler.java
@@ -21,24 +21,28 @@
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.TruncateResponse;
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.exceptions.TruncateException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Message;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.config.DatabaseDescriptor.getTruncateRpcTimeout;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 public class TruncateResponseHandler implements RequestCallback<TruncateResponse>
 {
     protected static final Logger logger = LoggerFactory.getLogger(TruncateResponseHandler.class);
-    protected final SimpleCondition condition = new SimpleCondition();
+    protected final Condition condition = newOneTimeCondition();
     private final int responseCount;
     protected final AtomicInteger responses = new AtomicInteger(0);
     private final long start;
@@ -51,20 +55,20 @@
         assert 1 <= responseCount: "invalid response count " + responseCount;
 
         this.responseCount = responseCount;
-        start = System.nanoTime();
+        start = nanoTime();
     }
 
     public void get() throws TimeoutException
     {
-        long timeoutNanos = DatabaseDescriptor.getTruncateRpcTimeout(NANOSECONDS) - (System.nanoTime() - start);
+        long timeoutNanos = getTruncateRpcTimeout(NANOSECONDS) - (nanoTime() - start);
         boolean completedInTime;
         try
         {
             completedInTime = condition.await(timeoutNanos, NANOSECONDS); // TODO truncate needs a much longer timeout
         }
-        catch (InterruptedException ex)
+        catch (InterruptedException e)
         {
-            throw new AssertionError(ex);
+            throw new UncheckedInterruptedException(e);
         }
 
         if (!completedInTime)
@@ -90,7 +94,7 @@
     public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason)
     {
         // If the truncation hasn't succeeded on some replica, abort and indicate this back to the client.
-        truncateFailingReplica = from.address;
+        truncateFailingReplica = from.getAddress();
         condition.signalAll();
     }
 
diff --git a/src/java/org/apache/cassandra/service/WriteResponseHandler.java b/src/java/org/apache/cassandra/service/WriteResponseHandler.java
index 94f5a80..6fe9e52 100644
--- a/src/java/org/apache/cassandra/service/WriteResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/WriteResponseHandler.java
@@ -18,7 +18,9 @@
 package org.apache.cassandra.service;
 
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.function.Supplier;
 
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,18 +39,19 @@
     private static final AtomicIntegerFieldUpdater<WriteResponseHandler> responsesUpdater
             = AtomicIntegerFieldUpdater.newUpdater(WriteResponseHandler.class, "responses");
 
-    public WriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan,
+    public WriteResponseHandler(ReplicaPlan.ForWrite replicaPlan,
                                 Runnable callback,
                                 WriteType writeType,
+                                Supplier<Mutation> hintOnFailure,
                                 long queryStartNanoTime)
     {
-        super(replicaPlan, callback, writeType, queryStartNanoTime);
+        super(replicaPlan, callback, writeType, hintOnFailure, queryStartNanoTime);
         responses = blockFor();
     }
 
-    public WriteResponseHandler(ReplicaPlan.ForTokenWrite replicaPlan, WriteType writeType, long queryStartNanoTime)
+    public WriteResponseHandler(ReplicaPlan.ForWrite replicaPlan, WriteType writeType, Supplier<Mutation> hintOnFailure, long queryStartNanoTime)
     {
-        this(replicaPlan, null, writeType, queryStartNanoTime);
+        this(replicaPlan, null, writeType, hintOnFailure, queryStartNanoTime);
     }
 
     public void onResponse(Message<T> m)
diff --git a/src/java/org/apache/cassandra/service/disk/usage/DiskUsageBroadcaster.java b/src/java/org/apache/cassandra/service/disk/usage/DiskUsageBroadcaster.java
new file mode 100644
index 0000000..4504ac7
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/disk/usage/DiskUsageBroadcaster.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.disk.usage;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+/**
+ * Starts {@link DiskUsageMonitor} to monitor local disk usage state and broadcast new state via Gossip.
+ * At the same time, it caches cluster's disk usage state received via Gossip.
+ */
+public class DiskUsageBroadcaster implements IEndpointStateChangeSubscriber
+{
+    private static final Logger logger = LoggerFactory.getLogger(DiskUsageBroadcaster.class);
+    private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(logger, 10, TimeUnit.MINUTES);
+
+    public static final DiskUsageBroadcaster instance = new DiskUsageBroadcaster(DiskUsageMonitor.instance);
+
+    private final DiskUsageMonitor monitor;
+    private final ConcurrentMap<InetAddressAndPort, DiskUsageState> usageInfo = new ConcurrentHashMap<>();
+    private volatile boolean hasStuffedOrFullNode = false;
+
+    @VisibleForTesting
+    public DiskUsageBroadcaster(DiskUsageMonitor monitor)
+    {
+        this.monitor = monitor;
+        Gossiper.instance.register(this);
+    }
+
+    /**
+     * @return {@code true} if any node in the cluster is STUFFED OR FULL
+     */
+    public boolean hasStuffedOrFullNode()
+    {
+        return hasStuffedOrFullNode;
+    }
+
+    /**
+     * @return {@code true} if given node's disk usage is FULL
+     */
+    public boolean isFull(InetAddressAndPort endpoint)
+    {
+        return state(endpoint).isFull();
+    }
+
+    /**
+     * @return {@code true} if given node's disk usage is STUFFED
+     */
+    public boolean isStuffed(InetAddressAndPort endpoint)
+    {
+        return state(endpoint).isStuffed();
+    }
+
+    @VisibleForTesting
+    public DiskUsageState state(InetAddressAndPort endpoint)
+    {
+        return usageInfo.getOrDefault(endpoint, DiskUsageState.NOT_AVAILABLE);
+    }
+
+    public void startBroadcasting()
+    {
+        monitor.start(newState -> {
+
+            if (logger.isTraceEnabled())
+                logger.trace("Disseminating disk usage info: {}", newState);
+
+            Gossiper.instance.addLocalApplicationState(ApplicationState.DISK_USAGE,
+                                                       StorageService.instance.valueFactory.diskUsage(newState.name()));
+        });
+    }
+
+    @Override
+    public void onChange(InetAddressAndPort endpoint, ApplicationState state, VersionedValue value)
+    {
+        if (state != ApplicationState.DISK_USAGE)
+            return;
+
+        DiskUsageState usageState = DiskUsageState.NOT_AVAILABLE;
+        try
+        {
+            usageState = DiskUsageState.valueOf(value.value);
+        }
+        catch (IllegalArgumentException e)
+        {
+            noSpamLogger.warn(String.format("Found unknown DiskUsageState: %s. Using default state %s instead.",
+                                            value.value, usageState));
+        }
+        usageInfo.put(endpoint, usageState);
+
+        hasStuffedOrFullNode = usageState.isStuffedOrFull() || computeHasStuffedOrFullNode();
+    }
+
+    private boolean computeHasStuffedOrFullNode()
+    {
+        for (DiskUsageState replicaState : usageInfo.values())
+        {
+            if (replicaState.isStuffedOrFull())
+            {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    @Override
+    public void onJoin(InetAddressAndPort endpoint, EndpointState epState)
+    {
+        updateDiskUsage(endpoint, epState);
+    }
+
+    @Override
+    public void beforeChange(InetAddressAndPort endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue)
+    {
+        // nothing to do here
+    }
+
+    @Override
+    public void onAlive(InetAddressAndPort endpoint, EndpointState state)
+    {
+        updateDiskUsage(endpoint, state);
+    }
+
+    @Override
+    public void onDead(InetAddressAndPort endpoint, EndpointState state)
+    {
+        // do nothing, as we don't care about dead nodes
+    }
+
+    @Override
+    public void onRestart(InetAddressAndPort endpoint, EndpointState state)
+    {
+        updateDiskUsage(endpoint, state);
+    }
+
+    @Override
+    public void onRemove(InetAddressAndPort endpoint)
+    {
+        usageInfo.remove(endpoint);
+        hasStuffedOrFullNode = usageInfo.values().stream().anyMatch(DiskUsageState::isStuffedOrFull);
+    }
+
+    private void updateDiskUsage(InetAddressAndPort endpoint, EndpointState state)
+    {
+        VersionedValue localValue = state.getApplicationState(ApplicationState.DISK_USAGE);
+
+        if (localValue != null)
+        {
+            onChange(endpoint, ApplicationState.DISK_USAGE, localValue);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/disk/usage/DiskUsageMonitor.java b/src/java/org/apache/cassandra/service/disk/usage/DiskUsageMonitor.java
new file mode 100644
index 0000000..fc0bb3a
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/disk/usage/DiskUsageMonitor.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.disk.usage;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.math.RoundingMode;
+import java.nio.file.FileStore;
+import java.nio.file.Files;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.db.guardrails.Guardrails;
+import org.apache.cassandra.db.guardrails.GuardrailsConfig;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.io.util.FileUtils;
+
+/**
+ * Schedule periodic task to monitor local disk usage and notify {@link DiskUsageBroadcaster} if local state changed.
+ */
+public class DiskUsageMonitor
+{
+    private static final Logger logger = LoggerFactory.getLogger(DiskUsageMonitor.class);
+
+    public static DiskUsageMonitor instance = new DiskUsageMonitor();
+
+    private final Supplier<GuardrailsConfig> guardrailsConfigSupplier = () -> Guardrails.CONFIG_PROVIDER.getOrCreate(null);
+    private final Supplier<Multimap<FileStore, Directories.DataDirectory>> dataDirectoriesSupplier;
+
+    private volatile DiskUsageState localState = DiskUsageState.NOT_AVAILABLE;
+
+    @VisibleForTesting
+    public DiskUsageMonitor()
+    {
+        this.dataDirectoriesSupplier = DiskUsageMonitor::dataDirectoriesGroupedByFileStore;
+    }
+
+    @VisibleForTesting
+    public DiskUsageMonitor(Supplier<Multimap<FileStore, Directories.DataDirectory>> dataDirectoriesSupplier)
+    {
+        this.dataDirectoriesSupplier = dataDirectoriesSupplier;
+    }
+
+    /**
+     * Start monitoring local disk usage and call notifier when local disk usage state changed.
+     */
+    public void start(Consumer<DiskUsageState> notifier)
+    {
+        // start the scheduler regardless guardrail is enabled, so we can enable it later without a restart
+        ScheduledExecutors.scheduledTasks.scheduleAtFixedRate(() -> {
+
+            if (!Guardrails.localDataDiskUsage.enabled(null))
+                return;
+
+            updateLocalState(getDiskUsage(), notifier);
+        }, 0, CassandraRelevantProperties.DISK_USAGE_MONITOR_INTERVAL_MS.getLong(), TimeUnit.MILLISECONDS);
+    }
+
+    @VisibleForTesting
+    public void updateLocalState(double usageRatio, Consumer<DiskUsageState> notifier)
+    {
+        double percentage = usageRatio * 100;
+        long percentageCeiling = (long) Math.ceil(percentage);
+
+        DiskUsageState state = getState(percentageCeiling);
+
+        Guardrails.localDataDiskUsage.guard(percentageCeiling, state.toString(), false, null);
+
+        // if state remains unchanged, no need to notify peers
+        if (state == localState)
+            return;
+
+        localState = state;
+        notifier.accept(state);
+    }
+
+    /**
+     * @return local node disk usage state
+     */
+    @VisibleForTesting
+    public DiskUsageState state()
+    {
+        return localState;
+    }
+
+    /**
+     * @return The current disk usage (including all memtable sizes) ratio. This is the ratio between the space taken by
+     * all the data directories and the addition of that same space and the free available space on disk. The space
+     * taken by the data directories is the addition of the actual space on disk plus the size of the memtables.
+     * Memtables are included in that calculation because they are expected to be eventually flushed to disk.
+     */
+    @VisibleForTesting
+    public double getDiskUsage()
+    {
+        // using BigInteger to handle large file system
+        BigInteger used = BigInteger.ZERO; // space used by data directories
+        BigInteger usable = BigInteger.ZERO; // free space on disks
+
+        for (Map.Entry<FileStore, Collection<Directories.DataDirectory>> e : dataDirectoriesSupplier.get().asMap().entrySet())
+        {
+            usable = usable.add(BigInteger.valueOf(usableSpace(e.getKey())));
+
+            for (Directories.DataDirectory dir : e.getValue())
+                used = used.add(BigInteger.valueOf(dir.getRawSize()));
+        }
+
+        // The total disk size for data directories is the space that is actually used by those directories plus the
+        // free space on disk that might be used for storing those directories in the future.
+        BigInteger total = used.add(usable);
+
+        // That total space can be limited by the config property data_disk_usage_max_disk_size.
+        DataStorageSpec.LongBytesBound diskUsageMaxSize = guardrailsConfigSupplier.get().getDataDiskUsageMaxDiskSize();
+        if (diskUsageMaxSize != null)
+            total = total.min(BigInteger.valueOf(diskUsageMaxSize.toBytes()));
+
+        // Add memtables size to the amount of used space because those memtables will be flushed to data directories.
+        used = used.add(BigInteger.valueOf(getAllMemtableSize()));
+
+        if (logger.isTraceEnabled())
+            logger.trace("Disk Usage Guardrail: current disk usage = {}, total disk usage = {}.",
+                         FileUtils.stringifyFileSize(used.doubleValue()),
+                         FileUtils.stringifyFileSize(total.doubleValue()));
+
+        return new BigDecimal(used).divide(new BigDecimal(total), 5, RoundingMode.UP).doubleValue();
+    }
+
+    @VisibleForTesting
+    public long getAllMemtableSize()
+    {
+        long size = 0;
+
+        for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
+        {
+            for (Memtable memtable : cfs.getTracker().getView().getAllMemtables())
+            {
+                size += memtable.getLiveDataSize();
+            }
+        }
+
+        return size;
+    }
+
+    @VisibleForTesting
+    public DiskUsageState getState(long usagePercentage)
+    {
+        if (!Guardrails.localDataDiskUsage.enabled())
+            return DiskUsageState.NOT_AVAILABLE;
+
+        if (Guardrails.localDataDiskUsage.failsOn(usagePercentage, null))
+            return DiskUsageState.FULL;
+
+        if (Guardrails.localDataDiskUsage.warnsOn(usagePercentage, null))
+            return DiskUsageState.STUFFED;
+
+        return DiskUsageState.SPACIOUS;
+    }
+
+    private static Multimap<FileStore, Directories.DataDirectory> dataDirectoriesGroupedByFileStore()
+    {
+        Multimap<FileStore, Directories.DataDirectory> directories = HashMultimap.create();
+        try
+        {
+            for (Directories.DataDirectory dir : Directories.dataDirectories.getAllDirectories())
+            {
+                FileStore store = Files.getFileStore(dir.location.toPath());
+                directories.put(store, dir);
+            }
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException("Cannot get data directories grouped by file store", e);
+        }
+        return directories;
+    }
+
+    public static long totalDiskSpace()
+    {
+        BigInteger size = dataDirectoriesGroupedByFileStore().keys()
+                                                             .stream()
+                                                             .map(DiskUsageMonitor::totalSpace)
+                                                             .map(BigInteger::valueOf)
+                                                             .reduce(BigInteger.ZERO, BigInteger::add);
+
+        return size.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) >= 0
+               ? Long.MAX_VALUE
+               : size.longValue();
+    }
+
+    public static long totalSpace(FileStore store)
+    {
+        try
+        {
+            long size = store.getTotalSpace();
+            return size < 0 ? Long.MAX_VALUE : size;
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException("Cannot get total space of file store", e);
+        }
+    }
+
+    public static long usableSpace(FileStore store)
+    {
+        try
+        {
+            long size = store.getUsableSpace();
+            return size < 0 ? Long.MAX_VALUE : size;
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException("Cannot get usable size of file store", e);
+        }
+    }
+}
+
diff --git a/src/java/org/apache/cassandra/service/disk/usage/DiskUsageState.java b/src/java/org/apache/cassandra/service/disk/usage/DiskUsageState.java
new file mode 100644
index 0000000..9a46251
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/disk/usage/DiskUsageState.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.disk.usage;
+
+import org.apache.cassandra.db.guardrails.GuardrailsConfig;
+
+public enum DiskUsageState
+{
+    /** Either disk usage guardrail is not enabled or gossip state is not ready. */
+    NOT_AVAILABLE("Not Available"),
+
+    /**
+     * Disk usage is below both {@link GuardrailsConfig#getDataDiskUsagePercentageWarnThreshold()} ()} and
+     * {@link GuardrailsConfig#getDataDiskUsagePercentageFailThreshold()}.
+     */
+    SPACIOUS("Spacious"),
+
+    /**
+     * Disk usage exceeds {@link GuardrailsConfig#getDataDiskUsagePercentageWarnThreshold()} but is below
+     * {@link GuardrailsConfig#getDataDiskUsagePercentageFailThreshold()}.
+     */
+    STUFFED("Stuffed"),
+
+    /** Disk usage exceeds {@link GuardrailsConfig#getDataDiskUsagePercentageFailThreshold()}. */
+    FULL("Full");
+
+    private final String msg;
+
+    DiskUsageState(String msg)
+    {
+        this.msg = msg;
+    }
+
+    public boolean isFull()
+    {
+        return this == FULL;
+    }
+
+    public boolean isStuffed()
+    {
+        return this == STUFFED;
+    }
+
+    public boolean isStuffedOrFull()
+    {
+        return isFull() || isStuffed();
+    }
+
+    @Override
+    public String toString()
+    {
+        return msg;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java b/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java
index dbc4fc0..bf29ff6 100644
--- a/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java
@@ -29,6 +29,8 @@
 import org.apache.cassandra.db.rows.RowIterator;
 import org.apache.cassandra.service.ClientState;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * {@code QueryPager} that takes care of fetching the pages for aggregation queries.
  * <p>
@@ -71,9 +73,9 @@
     public PartitionIterator fetchPageInternal(int pageSize, ReadExecutionController executionController)
     {
         if (limits.isGroupByLimit())
-            return new GroupByPartitionIterator(pageSize, executionController, System.nanoTime());
+            return new GroupByPartitionIterator(pageSize, executionController, nanoTime());
 
-        return new AggregationPartitionIterator(pageSize, executionController, System.nanoTime());
+        return new AggregationPartitionIterator(pageSize, executionController, nanoTime());
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
index ca16967..bc2c79e 100644
--- a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
+++ b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
@@ -30,6 +30,8 @@
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.service.ClientState;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Pager over a list of SinglePartitionReadQuery.
  *
@@ -158,7 +160,7 @@
     public PartitionIterator fetchPageInternal(int pageSize, ReadExecutionController executionController) throws RequestValidationException, RequestExecutionException
     {
         int toQuery = Math.min(remaining, pageSize);
-        return new PagersIterator(toQuery, null, null, executionController, System.nanoTime());
+        return new PagersIterator(toQuery, null, null, executionController, nanoTime());
     }
 
     private class PagersIterator extends AbstractIterator<RowIterator> implements PartitionIterator
@@ -230,4 +232,4 @@
     {
         return remaining;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/service/pager/PagingState.java b/src/java/org/apache/cassandra/service/pager/PagingState.java
index 2c2b08b..6e1a52f 100644
--- a/src/java/org/apache/cassandra/service/pager/PagingState.java
+++ b/src/java/org/apache/cassandra/service/pager/PagingState.java
@@ -26,7 +26,6 @@
 
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.ByteArrayAccessor;
 import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.CompositeType;
diff --git a/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java b/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java
deleted file mode 100644
index ab24f50..0000000
--- a/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-package org.apache.cassandra.service.paxos;
-
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.WriteType;
-import org.apache.cassandra.exceptions.WriteTimeoutException;
-import org.apache.cassandra.net.RequestCallback;
-
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
-
-public abstract class AbstractPaxosCallback<T> implements RequestCallback<T>
-{
-    protected final CountDownLatch latch;
-    protected final int targets;
-    private final ConsistencyLevel consistency;
-    private final long queryStartNanoTime;
-
-    public AbstractPaxosCallback(int targets, ConsistencyLevel consistency, long queryStartNanoTime)
-    {
-        this.targets = targets;
-        this.consistency = consistency;
-        latch = new CountDownLatch(targets);
-        this.queryStartNanoTime = queryStartNanoTime;
-    }
-
-    public int getResponseCount()
-    {
-        return (int) (targets - latch.getCount());
-    }
-
-    public void await() throws WriteTimeoutException
-    {
-        try
-        {
-            long timeout = DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS) - (System.nanoTime() - queryStartNanoTime);
-            if (!latch.await(timeout, NANOSECONDS))
-                throw new WriteTimeoutException(WriteType.CAS, consistency, getResponseCount(), targets);
-        }
-        catch (InterruptedException ex)
-        {
-            throw new AssertionError("This latch shouldn't have been interrupted.");
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/service/paxos/AbstractPaxosRepair.java b/src/java/org/apache/cassandra/service/paxos/AbstractPaxosRepair.java
new file mode 100644
index 0000000..05c67da
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/AbstractPaxosRepair.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.function.Consumer;
+
+import com.google.common.base.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.utils.Throwables;
+
+import static org.apache.cassandra.service.paxos.AbstractPaxosRepair.Result.Outcome;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public abstract class AbstractPaxosRepair
+{
+    private static final Logger logger = LoggerFactory.getLogger(AbstractPaxosRepair.class);
+
+    public static class State {}
+
+    public interface StateUpdater<S, I, T extends Throwable>
+    {
+        State apply(S in, I param) throws T;
+    }
+
+    public interface Listener
+    {
+        void onComplete(AbstractPaxosRepair repair, Result result);
+    }
+
+    abstract class ConsumerState<T> extends State implements Consumer<T>
+    {
+        public void accept(T input)
+        {
+            updateState(this, input, ConsumerState::execute);
+        }
+
+        abstract State execute(T input) throws Throwable;
+    }
+
+    public static class Result extends State
+    {
+        enum Outcome { DONE, CANCELLED, FAILURE }
+
+        final Outcome outcome;
+
+        public Result(Outcome outcome)
+        {
+            this.outcome = outcome;
+        }
+
+        public String toString()
+        {
+            return outcome.toString();
+        }
+
+        public boolean wasSuccessful()
+        {
+            return outcome == Outcome.DONE;
+        }
+    }
+
+    static boolean isResult(State state)
+    {
+        return state instanceof Result;
+    }
+
+    public static final Result DONE = new Result(Outcome.DONE);
+
+    public static final Result CANCELLED = new Result(Outcome.CANCELLED);
+
+    public static final class Failure extends Result
+    {
+        public final Throwable failure;
+
+        Failure(Throwable failure)
+        {
+            super(Outcome.FAILURE);
+            this.failure = failure;
+        }
+
+        public String toString()
+        {
+            StringWriter sw = new StringWriter();
+            PrintWriter pw = new PrintWriter(sw);
+            if (failure != null)
+                failure.printStackTrace(pw);
+            return outcome.toString() + ": " + sw;
+        }
+
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Failure failure1 = (Failure) o;
+            return Objects.equals(failure, failure1.failure);
+        }
+
+        public int hashCode()
+        {
+            return Objects.hash(failure);
+        }
+    }
+
+    private final DecoratedKey partitionKey;
+    private final Ballot incompleteBallot;
+    private List<Listener> listeners = null;
+    private volatile State state;
+    private volatile long startedNanos = Long.MIN_VALUE;
+
+    public AbstractPaxosRepair(DecoratedKey partitionKey, Ballot incompleteBallot)
+    {
+        this.partitionKey = partitionKey;
+        this.incompleteBallot = incompleteBallot;
+    }
+
+    public State state()
+    {
+        return state;
+    }
+
+    public long startedNanos()
+    {
+        return startedNanos;
+    }
+
+    public boolean isStarted()
+    {
+        return startedNanos != Long.MIN_VALUE;
+    }
+
+    public boolean isComplete()
+    {
+        return isResult(state);
+    }
+
+    public Ballot incompleteBallot()
+    {
+        return incompleteBallot;
+    }
+
+    /**
+     * add a listener to this repair, or if the repair has already completed, call the listener with the result
+     */
+    public AbstractPaxosRepair addListener(Listener listener)
+    {
+        Result result = null;
+        synchronized (this)
+        {
+            if (isResult(state))
+            {
+                result = (Result) state;
+            }
+            else
+            {
+                if (listeners == null)
+                    listeners = new ArrayList<>();
+
+                listeners.add(listener);
+            }
+        }
+
+        if (result != null)
+            listener.onComplete(this, result);
+
+        return this;
+    }
+
+    public AbstractPaxosRepair addListener(Consumer<Result> listener)
+    {
+        return addListener(((repair, result) -> listener.accept(result)));
+    }
+
+    public final DecoratedKey partitionKey()
+    {
+        return partitionKey;
+    }
+
+    public State restart(State state) { return restart(state, Long.MIN_VALUE); }
+    public abstract State restart(State state, long waitUntil);
+
+    public final synchronized AbstractPaxosRepair start()
+    {
+        updateState(null, null, (state, i2) -> {
+            Preconditions.checkState(!isStarted());
+            startedNanos = Math.max(Long.MIN_VALUE + 1, nanoTime());
+            return restart(state);
+        });
+        return this;
+    }
+
+    public final void cancel()
+    {
+        set(CANCELLED);
+    }
+
+    public final void cancelUnexceptionally()
+    {
+        try
+        {
+            cancel();
+        }
+        catch (Throwable t)
+        {
+            logger.error("Exception cancelling paxos repair", t);
+        }
+    }
+
+    public final synchronized Result await() throws InterruptedException
+    {
+        while (!isResult(state))
+            wait();
+        return (Result) state;
+    }
+
+    protected void set(Result result)
+    {
+        updateState(state(), null, (i1, i2) -> result);
+    }
+
+    protected <S extends State, I, T extends Throwable> void updateState(S expect, I param, StateUpdater<S, I, T> transform)
+    {
+        Result result = null;
+        List<Listener> listeners = null;
+        synchronized (this)
+        {
+            State next;
+            try
+            {
+                if (state != expect)
+                    return;
+
+                state = next = transform.apply(expect, param);
+            }
+            catch (Throwable t)
+            {
+                state = next = new Failure(t);
+            }
+
+            if (isResult(next))
+            {
+                notifyAll();
+                result = (Result) next;
+                listeners = this.listeners;
+                this.listeners = null;
+            }
+        }
+
+        if (result != null && listeners != null)
+        {
+            Throwable t = null;
+            for (int i=0, mi=listeners.size(); i<mi; i++)
+            {
+                try
+                {
+                    listeners.get(i).onComplete(this, result);
+                }
+                catch (Throwable throwable)
+                {
+                    t = Throwables.merge(t, throwable);
+                }
+            }
+            Throwables.maybeFail(t);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/Ballot.java b/src/java/org/apache/cassandra/service/paxos/Ballot.java
new file mode 100644
index 0000000..1787e34
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/Ballot.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import org.apache.cassandra.db.marshal.ValueAccessor;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.utils.ByteArrayUtil.getLong;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public class Ballot extends TimeUUID
+{
+    public static final long serialVersionUID = 1L;
+
+    public enum Flag
+    {
+        NONE, LOCAL, GLOBAL;
+        static final Flag[] FLAGS = values();
+    }
+
+    private static final Ballot epoch = atUnixMicrosWithLsb(0, 0, NONE);
+
+    public static Ballot none()
+    {
+        return epoch;
+    }
+
+    private Ballot(long rawTimestamp, long lsb)
+    {
+        super(rawTimestamp, lsb);
+    }
+
+    public boolean equals(Object that)
+    {
+        if (that == null) return false;
+        if (that == this) return true;
+        if (that.getClass() != Ballot.class) return false;
+        return super.equals((TimeUUID) that);
+    }
+
+    public static Ballot atUnixMicrosWithLsb(long unixMicros, long uniqueLsb, Flag flag)
+    {
+        return new Ballot(unixMicrosToRawTimestamp(unixMicros) + flag.ordinal(), uniqueLsb);
+    }
+
+    public static Ballot fromUuid(UUID uuid)
+    {
+        return fromBytes(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
+    }
+
+    public static Ballot fromBytes(long msb, long lsb)
+    {
+        return new Ballot(msbToRawTimestamp(msb), lsb);
+    }
+
+    public static Ballot fromString(String uuidString)
+    {
+        return fromUuid(UUID.fromString(uuidString));
+    }
+
+    public static Ballot deserialize(byte[] bytes)
+    {
+        if (bytes.length == 0)
+            return null;
+        return fromBytes(getLong(bytes, 0), getLong(bytes, 8));
+    }
+
+    public static Ballot deserialize(ByteBuffer buffer)
+    {
+        if (!buffer.hasRemaining())
+            return null;
+        return fromBytes(buffer.getLong(buffer.position()), buffer.getLong(buffer.position() + 8));
+    }
+
+    public static Ballot deserialize(DataInputPlus in) throws IOException
+    {
+        long msb = in.readLong();
+        long lsb = in.readLong();
+        return fromBytes(msb, lsb);
+    }
+
+    public Flag flag()
+    {
+        int i = (int)(uuidTimestamp() % 10);
+        if (i < Flag.FLAGS.length)
+            return Flag.FLAGS[i];
+        return NONE;
+    }
+
+
+    public static class Serializer extends AbstractSerializer<Ballot> implements IVersionedSerializer<Ballot>
+    {
+        public static final Serializer instance = new Serializer();
+
+        public <V> Ballot deserialize(V value, ValueAccessor<V> accessor)
+        {
+            return accessor.isEmpty(value) ? null : accessor.toBallot(value);
+        }
+
+        public Class<Ballot> getType()
+        {
+            return Ballot.class;
+        }
+
+        @Override
+        public void serialize(Ballot t, DataOutputPlus out, int version) throws IOException
+        {
+            t.serialize(out);
+        }
+
+        @Override
+        public Ballot deserialize(DataInputPlus in, int version) throws IOException
+        {
+            return Ballot.deserialize(in);
+        }
+
+        @Override
+        public long serializedSize(Ballot t, int version)
+        {
+            return sizeInBytes();
+        }
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/BallotGenerator.java b/src/java/org/apache/cassandra/service/paxos/BallotGenerator.java
new file mode 100644
index 0000000..f746c4e
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/BallotGenerator.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.security.SecureRandom;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.paxos.Ballot.Flag;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.service.paxos.Ballot.atUnixMicrosWithLsb;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface BallotGenerator
+{
+    static class Default implements BallotGenerator
+    {
+        private static final SecureRandom secureRandom = new SecureRandom();
+
+        public Ballot atUnixMicros(long unixMicros, Flag flag)
+        {
+            return atUnixMicrosWithLsb(unixMicros, secureRandom.nextLong(), flag);
+        }
+
+        public Ballot next(long minUnixMicros, Flag flag)
+        {
+            long unixMicros = ClientState.getTimestampForPaxos(minUnixMicros);
+            return atUnixMicros(unixMicros, flag);
+        }
+
+        public Ballot stale(long fromInMicros, long toInMicros, Flag flag)
+        {
+            long unixMicros = ThreadLocalRandom.current().nextLong(fromInMicros, toInMicros);
+            return atUnixMicros(unixMicros, flag);
+        }
+
+        public long next(long minTimestamp)
+        {
+            return ClientState.getTimestampForPaxos(minTimestamp);
+        }
+
+        public long prevUnixMicros()
+        {
+            return ClientState.getLastTimestampMicros();
+        }
+    }
+
+    static class Global
+    {
+        private static BallotGenerator instance = new Default();
+        public static Ballot atUnixMicros(long unixMicros, Flag flag) { return instance.atUnixMicros(unixMicros, flag); }
+        public static Ballot nextBallot(Flag flag) { return instance.next(Long.MIN_VALUE, flag); }
+        public static Ballot nextBallot(long minUnixMicros, Flag flag) { return instance.next(minUnixMicros, flag); }
+        public static Ballot staleBallot(long fromUnixMicros, long toUnixMicros, Flag flag) { return instance.stale(fromUnixMicros, toUnixMicros, flag); }
+        public static long prevUnixMicros() { return instance.prevUnixMicros(); }
+
+        public static void unsafeSet(BallotGenerator newInstance)
+        {
+            instance = newInstance;
+        }
+    }
+
+    Ballot atUnixMicros(long unixMicros, Flag flag);
+    Ballot next(long minUnixMicros, Flag flag);
+    Ballot stale(long fromUnixMicros, long toUnixMicros, Flag flag);
+    long prevUnixMicros();
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/service/paxos/Commit.java b/src/java/org/apache/cassandra/service/paxos/Commit.java
index 05fa595..88ab5ea 100644
--- a/src/java/org/apache/cassandra/service/paxos/Commit.java
+++ b/src/java/org/apache/cassandra/service/paxos/Commit.java
@@ -22,28 +22,235 @@
 
 
 import java.io.IOException;
-import java.util.UUID;
+import java.util.function.BiFunction;
+
+import javax.annotation.Nullable;
 
 import com.google.common.base.Objects;
 
-import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.utils.UUIDGen;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.schema.TableMetadata;
+
+import static org.apache.cassandra.db.SystemKeyspace.*;
+import static org.apache.cassandra.service.paxos.Commit.CompareResult.AFTER;
+import static org.apache.cassandra.service.paxos.Commit.CompareResult.BEFORE;
+import static org.apache.cassandra.service.paxos.Commit.CompareResult.IS_REPROPOSAL;
+import static org.apache.cassandra.service.paxos.Commit.CompareResult.WAS_REPROPOSED_BY;
+import static org.apache.cassandra.service.paxos.Commit.CompareResult.SAME;
+import static org.apache.cassandra.utils.FBUtilities.nowInSeconds;
 
 public class Commit
 {
-    public static final CommitSerializer serializer = new CommitSerializer();
+    enum CompareResult { SAME, BEFORE, AFTER, IS_REPROPOSAL, WAS_REPROPOSED_BY}
 
-    public final UUID ballot;
+    public static final CommitSerializer<Commit> serializer = new CommitSerializer<>(Commit::new);
+
+    public static class Proposal extends Commit
+    {
+        public static final CommitSerializer<Proposal> serializer = new CommitSerializer<>(Proposal::new);
+
+        public Proposal(Ballot ballot, PartitionUpdate update)
+        {
+            super(ballot, update);
+        }
+
+        public String toString()
+        {
+            return toString("Proposal");
+        }
+
+        public static Proposal of(Ballot ballot, PartitionUpdate update)
+        {
+            update = withTimestamp(update, ballot.unixMicros());
+            return new Proposal(ballot, update);
+        }
+
+        public static Proposal empty(Ballot ballot, DecoratedKey partitionKey, TableMetadata metadata)
+        {
+            return new Proposal(ballot, PartitionUpdate.emptyUpdate(metadata, partitionKey));
+        }
+
+        public Accepted accepted()
+        {
+            return new Accepted(ballot, update);
+        }
+
+        public Agreed agreed()
+        {
+            return new Agreed(ballot, update);
+        }
+    }
+
+    public static class Accepted extends Proposal
+    {
+        public static final CommitSerializer<Accepted> serializer = new CommitSerializer<>(Accepted::new);
+
+        public static Accepted none(DecoratedKey partitionKey, TableMetadata metadata)
+        {
+            return new Accepted(Ballot.none(), PartitionUpdate.emptyUpdate(metadata, partitionKey));
+        }
+
+        public Accepted(Ballot ballot, PartitionUpdate update)
+        {
+            super(ballot, update);
+        }
+
+        public Accepted(Commit commit)
+        {
+            super(commit.ballot, commit.update);
+        }
+
+        Committed committed()
+        {
+            return new Committed(ballot, update);
+        }
+
+        boolean isExpired(int nowInSec)
+        {
+            return false;
+        }
+
+        public String toString()
+        {
+            return toString("Accepted");
+        }
+
+        /**
+         * Like {@link #latest(Commit, Commit)} but also takes into account deletion time
+         */
+        public static Accepted latestAccepted(Accepted a, Accepted b)
+        {
+            int c = compare(a, b);
+            if (c != 0)
+                return c > 0 ? a : b;
+            return a instanceof AcceptedWithTTL ? ((AcceptedWithTTL)a).lastDeleted(b) : a;
+        }
+    }
+
+    public static class AcceptedWithTTL extends Accepted
+    {
+        public static AcceptedWithTTL withDefaultTTL(Commit copy)
+        {
+            return new AcceptedWithTTL(copy, nowInSeconds() + legacyPaxosTtlSec(copy.update.metadata()));
+        }
+
+        public final int localDeletionTime;
+
+        public AcceptedWithTTL(Commit copy, int localDeletionTime)
+        {
+            super(copy);
+            this.localDeletionTime = localDeletionTime;
+        }
+
+        public AcceptedWithTTL(Ballot ballot, PartitionUpdate update, int localDeletionTime)
+        {
+            super(ballot, update);
+            this.localDeletionTime = localDeletionTime;
+        }
+
+        boolean isExpired(int nowInSec)
+        {
+            return nowInSec >= localDeletionTime;
+        }
+
+        Accepted lastDeleted(Accepted b)
+        {
+            return b instanceof AcceptedWithTTL && localDeletionTime >= ((AcceptedWithTTL) b).localDeletionTime
+                   ? this : b;
+        }
+    }
+
+    // might prefer to call this Commit, but would mean refactoring more legacy code
+    public static class Agreed extends Accepted
+    {
+        public static final CommitSerializer<Agreed> serializer = new CommitSerializer<>(Agreed::new);
+
+        public Agreed(Ballot ballot, PartitionUpdate update)
+        {
+            super(ballot, update);
+        }
+
+        public Agreed(Commit copy)
+        {
+            super(copy);
+        }
+    }
+
+    public static class Committed extends Agreed
+    {
+        public static final CommitSerializer<Committed> serializer = new CommitSerializer<>(Committed::new);
+
+        public static Committed none(DecoratedKey partitionKey, TableMetadata metadata)
+        {
+            return new Committed(Ballot.none(), PartitionUpdate.emptyUpdate(metadata, partitionKey));
+        }
+
+        public Committed(Ballot ballot, PartitionUpdate update)
+        {
+            super(ballot, update);
+        }
+
+        public Committed(Commit copy)
+        {
+            super(copy);
+        }
+
+        public String toString()
+        {
+            return toString("Committed");
+        }
+
+        public static Committed latestCommitted(Committed a, Committed b)
+        {
+            int c = compare(a, b);
+            if (c != 0)
+                return c > 0 ? a : b;
+            return a instanceof CommittedWithTTL ? ((CommittedWithTTL)a).lastDeleted(b) : a;
+        }
+    }
+
+    public static class CommittedWithTTL extends Committed
+    {
+        public static CommittedWithTTL withDefaultTTL(Commit copy)
+        {
+            return new CommittedWithTTL(copy, nowInSeconds() + legacyPaxosTtlSec(copy.update.metadata()));
+        }
+
+        public final int localDeletionTime;
+
+        public CommittedWithTTL(Ballot ballot, PartitionUpdate update, int localDeletionTime)
+        {
+            super(ballot, update);
+            this.localDeletionTime = localDeletionTime;
+        }
+
+        public CommittedWithTTL(Commit copy, int localDeletionTime)
+        {
+            super(copy);
+            this.localDeletionTime = localDeletionTime;
+        }
+
+        boolean isExpired(int nowInSec)
+        {
+            return nowInSec >= localDeletionTime;
+        }
+
+        Committed lastDeleted(Committed b)
+        {
+            return b instanceof CommittedWithTTL && localDeletionTime >= ((CommittedWithTTL) b).localDeletionTime
+                   ? this : b;
+        }
+    }
+
+    public final Ballot ballot;
     public final PartitionUpdate update;
 
-    public Commit(UUID ballot, PartitionUpdate update)
+    public Commit(Ballot ballot, PartitionUpdate update)
     {
         assert ballot != null;
         assert update != null;
@@ -52,36 +259,51 @@
         this.update = update;
     }
 
-    public static Commit newPrepare(DecoratedKey key, TableMetadata metadata, UUID ballot)
+    public static Commit newPrepare(DecoratedKey partitionKey, TableMetadata metadata, Ballot ballot)
     {
-        return new Commit(ballot, PartitionUpdate.emptyUpdate(metadata, key));
+        return new Commit(ballot, PartitionUpdate.emptyUpdate(metadata, partitionKey));
     }
 
-    public static Commit newProposal(UUID ballot, PartitionUpdate update)
+    public static Commit emptyCommit(DecoratedKey partitionKey, TableMetadata metadata)
     {
-        PartitionUpdate withNewTimestamp = new PartitionUpdate.Builder(update, 0).updateAllTimestamp(UUIDGen.microsTimestamp(ballot)).build();
-        return new Commit(ballot, withNewTimestamp);
+        return new Commit(Ballot.none(), PartitionUpdate.emptyUpdate(metadata, partitionKey));
     }
 
-    public static Commit emptyCommit(DecoratedKey key, TableMetadata metadata)
+    @Deprecated
+    public static Commit newProposal(Ballot ballot, PartitionUpdate update)
     {
-        return new Commit(UUIDGen.minTimeUUID(0), PartitionUpdate.emptyUpdate(metadata, key));
+        update = withTimestamp(update, ballot.unixMicros());
+        return new Commit(ballot, update);
     }
 
     public boolean isAfter(Commit other)
     {
-        return ballot.timestamp() > other.ballot.timestamp();
+        return other == null || ballot.uuidTimestamp() > other.ballot.uuidTimestamp();
     }
 
-    public boolean hasBallot(UUID ballot)
+    public boolean isSameOrAfter(@Nullable Ballot otherBallot)
+    {
+        return otherBallot == null || otherBallot.equals(ballot) || ballot.uuidTimestamp() > otherBallot.uuidTimestamp();
+    }
+
+    public boolean isAfter(@Nullable Ballot otherBallot)
+    {
+        return otherBallot == null || ballot.uuidTimestamp() > otherBallot.uuidTimestamp();
+    }
+
+    public boolean isBefore(@Nullable Ballot otherBallot)
+    {
+        return otherBallot != null && ballot.uuidTimestamp() < otherBallot.uuidTimestamp();
+    }
+
+    public boolean hasBallot(Ballot ballot)
     {
         return this.ballot.equals(ballot);
     }
 
-    /** Whether this is an empty commit, that is one with no updates. */
-    public boolean isEmpty()
+    public boolean hasSameBallot(Commit other)
     {
-        return update.isEmpty();
+        return this.ballot.equals(other.ballot);
     }
 
     public Mutation makeMutation()
@@ -109,28 +331,170 @@
     @Override
     public String toString()
     {
-        return String.format("Commit(%s, %s)", ballot, update);
+        return toString("Commit");
     }
 
-    public static class CommitSerializer implements IVersionedSerializer<Commit>
+    public String toString(String kind)
     {
-        public void serialize(Commit commit, DataOutputPlus out, int version) throws IOException
+        return String.format("%s(%d:%s, %d:%s)", kind, ballot.uuidTimestamp(), ballot, update.stats().minTimestamp, update.toString(false));
+    }
+
+    /**
+     * We can witness reproposals of the latest successful commit; we can detect this by comparing the timestamp of
+     * the update with our ballot; if it is the same, we are not a reproposal. If it is the same as either the
+     * ballot timestamp or update timestamp of the latest committed proposal, then we are reproposing it and can
+     * instead simpy commit it.
+     */
+    public boolean isReproposalOf(Commit older)
+    {
+        return isReproposal(older, older.ballot.uuidTimestamp(), this, this.ballot.uuidTimestamp());
+    }
+
+    private boolean isReproposal(Commit older, long ballotOfOlder, Commit newer, long ballotOfNewer)
+    {
+        // NOTE: it would in theory be possible to just check
+        // newer.update.stats().minTimestamp == older.update.stats().minTimestamp
+        // however this could be brittle, if for some reason they don't get updated;
+        // the logic below is fail-safe, in that if minTimestamp is not set we will treat it as not a reproposal
+        // which is the safer way to get it wrong.
+
+        // the timestamp of a mutation stays unchanged as we repropose it, so the timestamp of the mutation
+        // is the timestamp of the ballot that originally proposed it
+        long originalBallotOfNewer = newer.update.stats().minTimestamp;
+
+        // so, if the mutation and ballot timestamps match, this is not a reproposal but a first proposal
+        if (ballotOfNewer == originalBallotOfNewer)
+            return false;
+
+        // otherwise, if the original proposing ballot matches the older proposal's ballot, it is reproposing it
+        if (originalBallotOfNewer == ballotOfOlder)
+            return true;
+
+        // otherwise, it could be that both are reproposals, so just check both for the "original" ballot timestamp
+        return originalBallotOfNewer == older.update.stats().minTimestamp;
+    }
+
+    public CompareResult compareWith(Commit that)
+    {
+        long thisBallot = this.ballot.uuidTimestamp();
+        long thatBallot = that.ballot.uuidTimestamp();
+        // by the time we reach proposal and commit, timestamps are unique so we can assert identity
+        if (thisBallot == thatBallot)
+            return SAME;
+
+        if (thisBallot < thatBallot)
+            return isReproposal(this, thisBallot, that, thatBallot) ? WAS_REPROPOSED_BY : BEFORE;
+        else
+            return isReproposal(that, thatBallot, this, thisBallot) ? IS_REPROPOSAL : AFTER;
+    }
+
+    private static int compare(@Nullable Commit a, @Nullable Commit b)
+    {
+        if (a == null) return 1;
+        if (b == null) return -1;
+        return Long.compare(a.ballot.uuidTimestamp(), b.ballot.uuidTimestamp());
+    }
+
+    /**
+     * @return testIfAfter.isAfter(testIfBefore), with non-null > null
+     */
+    public static boolean isAfter(@Nullable Commit testIsAfter, @Nullable Commit testIsBefore)
+    {
+        return testIsAfter != null && testIsAfter.isAfter(testIsBefore);
+    }
+
+    /**
+     * @return testIfAfter.isAfter(testIfBefore), with non-null > null
+     */
+    public static boolean isAfter(@Nullable Ballot testIsAfter, @Nullable Commit testIsBefore)
+    {
+        return testIsAfter != null && (testIsBefore == null || testIsAfter.uuidTimestamp() > testIsBefore.ballot.uuidTimestamp());
+    }
+
+    /**
+     * @return testIfAfter.isAfter(testIfBefore), with non-null > null
+     */
+    public static boolean isAfter(@Nullable Commit testIsAfter, @Nullable Ballot testIsBefore)
+    {
+        return testIsAfter != null && (testIsBefore == null || testIsAfter.ballot.uuidTimestamp() > testIsBefore.uuidTimestamp());
+    }
+
+    /**
+     * @return testIfAfter.isAfter(testIfBefore), with non-null > null
+     */
+    public static boolean isAfter(@Nullable Ballot testIsAfter, @Nullable Ballot testIsBefore)
+    {
+        return testIsAfter != null && (testIsBefore == null || testIsAfter.uuidTimestamp() > testIsBefore.uuidTimestamp());
+    }
+
+    /**
+     * the latest of two ballots, or the first ballot if equal timestamps
+     */
+    public static <C extends Commit> C latest(@Nullable C a, @Nullable C b)
+    {
+        return (a == null | b == null) ? (a == null ? b : a) : a.ballot.uuidTimestamp() >= b.ballot.uuidTimestamp() ? a : b;
+    }
+
+    /**
+     * the latest of two ballots, or the first ballot if equal timestamps
+     */
+    public static Ballot latest(@Nullable Commit a, @Nullable Ballot b)
+    {
+        return (a == null | b == null) ? (a == null ? b : a.ballot) : a.ballot.uuidTimestamp() >= b.uuidTimestamp() ? a.ballot : b;
+    }
+
+    /**
+     * the latest of two ballots, or the first ballot if equal timestamps
+     */
+    public static Ballot latest(@Nullable Ballot a, @Nullable Ballot b)
+    {
+        return (a == null | b == null) ? (a == null ? b : a) : a.uuidTimestamp() >= b.uuidTimestamp() ? a : b;
+    }
+
+    /**
+     * unequal ballots with same timestamp
+     */
+    public static boolean timestampsClash(@Nullable Commit a, @Nullable Ballot b)
+    {
+        return a != null && b != null && !a.ballot.equals(b) && a.ballot.uuidTimestamp() == b.uuidTimestamp();
+    }
+
+    public static boolean timestampsClash(@Nullable Ballot a, @Nullable Ballot b)
+    {
+        return a != null && b != null && !a.equals(b) && a.uuidTimestamp() == b.uuidTimestamp();
+    }
+
+    private static PartitionUpdate withTimestamp(PartitionUpdate update, long timestamp)
+    {
+        return new PartitionUpdate.Builder(update, 0).updateAllTimestamp(timestamp).build();
+    }
+
+    public static class CommitSerializer<T extends Commit> implements IVersionedSerializer<T>
+    {
+        final BiFunction<Ballot, PartitionUpdate, T> constructor;
+        public CommitSerializer(BiFunction<Ballot, PartitionUpdate, T> constructor)
         {
-            UUIDSerializer.serializer.serialize(commit.ballot, out, version);
+            this.constructor = constructor;
+        }
+
+        public void serialize(T commit, DataOutputPlus out, int version) throws IOException
+        {
+            commit.ballot.serialize(out);
             PartitionUpdate.serializer.serialize(commit.update, out, version);
         }
 
-        public Commit deserialize(DataInputPlus in, int version) throws IOException
+        public T deserialize(DataInputPlus in, int version) throws IOException
         {
-            UUID ballot = UUIDSerializer.serializer.deserialize(in, version);
+            Ballot ballot = Ballot.deserialize(in);
             PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, DeserializationHelper.Flag.LOCAL);
-            return new Commit(ballot, update);
+            return constructor.apply(ballot, update);
         }
 
-        public long serializedSize(Commit commit, int version)
+        public long serializedSize(T commit, int version)
         {
-            return UUIDSerializer.serializer.serializedSize(commit.ballot, version)
-                 + PartitionUpdate.serializer.serializedSize(commit.update, version);
+            return Ballot.sizeInBytes()
+                   + PartitionUpdate.serializer.serializedSize(commit.update, version);
         }
     }
+
 }
diff --git a/src/java/org/apache/cassandra/service/paxos/CommitVerbHandler.java b/src/java/org/apache/cassandra/service/paxos/CommitVerbHandler.java
deleted file mode 100644
index 12466dd..0000000
--- a/src/java/org/apache/cassandra/service/paxos/CommitVerbHandler.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-package org.apache.cassandra.service.paxos;
-
-import org.apache.cassandra.net.IVerbHandler;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.tracing.Tracing;
-
-public class CommitVerbHandler implements IVerbHandler<Commit>
-{
-    public static final CommitVerbHandler instance = new CommitVerbHandler();
-
-    public void doVerb(Message<Commit> message)
-    {
-        PaxosState.commit(message.payload);
-
-        Tracing.trace("Enqueuing acknowledge to {}", message.from());
-        MessagingService.instance().send(message.emptyResponse(), message.from());
-    }
-}
diff --git a/src/java/org/apache/cassandra/service/paxos/ContentionStrategy.java b/src/java/org/apache/cassandra/service/paxos/ContentionStrategy.java
new file mode 100644
index 0000000..7f38567
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/ContentionStrategy.java
@@ -0,0 +1,651 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+
+import com.codahale.metrics.Snapshot;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.NoSpamLogger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.DoubleSupplier;
+import java.util.function.LongBinaryOperator;
+import java.util.function.Supplier;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static java.lang.Double.parseDouble;
+import static java.lang.Integer.parseInt;
+import static java.lang.Math.*;
+import static java.util.Arrays.stream;
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.cassandra.config.DatabaseDescriptor.*;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casReadMetrics;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casWriteMetrics;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.Clock.waitUntil;
+
+/**
+ * <p>A strategy for making back-off decisions for Paxos operations that fail to make progress because of other paxos operations.
+ * The strategy is defined by four factors: <ul>
+ * <li> {@link #min}
+ * <li> {@link #max}
+ * <li> {@link #minDelta}
+ * <li> {@link #waitRandomizer}
+ * </ul>
+ *
+ * <p>The first three represent time periods, and may be defined dynamically based on a simple calculation over: <ul>
+ * <li> {@code pX()} recent experienced latency distribution for successful operations,
+ *                 e.g. {@code p50(rw)} the maximum of read and write median latencies,
+ *                      {@code p999(r)} the 99.9th percentile of read latencies
+ * <li> {@code attempts} the number of failed attempts made by the operation so far
+ * <li> {@code constant} a user provided floating point constant
+ * </ul>
+ *
+ * <p>Their calculation may take any of these forms
+ * <li> constant            {@code $constant$[mu]s}
+ * <li> dynamic constant    {@code pX() * constant}
+ * <li> dynamic linear      {@code pX() * constant * attempts}
+ * <li> dynamic exponential {@code pX() * constant ^ attempts}
+ *
+ * <p>Furthermore, the dynamic calculations can be bounded with a min/max, like so:
+ *  {@code min[mu]s <= dynamic expr <= max[mu]s}
+ *
+ * e.g.
+ * <li> {@code 10ms <= p50(rw)*0.66}
+ * <li> {@code 10ms <= p95(rw)*1.8^attempts <= 100ms}
+ * <li> {@code 5ms <= p50(rw)*0.5}
+ *
+ * <p>These calculations are put together to construct a range from which we draw a random number.
+ * The period we wait for {@code X} will be drawn so that {@code min <= X < max}.
+ *
+ * <p>With the constraint that {@code max} must be {@code minDelta} greater than {@code min},
+ * but no greater than its expression-defined maximum. {@code max} will be increased up until
+ * this point, after which {@code min} will be decreased until this gap is imposed.
+ *
+ * <p>The {@link #waitRandomizer} property specifies the manner in which a random value is drawn from the range.
+ * It is defined using one of the following specifiers:
+ * <li> uniform
+ * <li> exp($power$) or exponential($power$)
+ * <li> qexp($power$) or qexponential($power$) or quantizedexponential($power$)
+ *
+ * The uniform specifier is self-explanatory, selecting all values in the range with equal probability.
+ * The exponential specifier draws values towards the end of the range with higher probability, raising
+ * a floating point number in the range [0..1.0) to the power provided, and translating the resulting value
+ * to a uniform value in the range.
+ * The quantized exponential specifier partitions the range into {@code attempts} buckets, then applies the pure
+ * exponential approach to draw values from [0..attempts), before drawing a uniform value from the corresponding bucket
+ *
+ * <p>Finally, there is also a {@link #traceAfterAttempts} property that permits initiating tracing of operations
+ * that experience a certain minimum number of failed paxos rounds due to contention. A setting of 0 or 1 will initiate
+ * a trace session after the first failed ballot.
+ */
+public class ContentionStrategy
+{
+    private static final Logger logger = LoggerFactory.getLogger(ContentionStrategy.class);
+
+    private static final Pattern BOUND = Pattern.compile(
+                "(?<const>0|[0-9]+[mu]s)" +
+                "|((?<min>0|[0-9]+[mu]s) *<= *)?" +
+                    "(p(?<perc>[0-9]+)\\((?<rw>r|w|rw|wr)\\)|(?<constbase>0|[0-9]+[mu]s))" +
+                    "\\s*([*]\\s*(?<mod>[0-9.]+)?\\s*(?<modkind>[*^]\\s*attempts)?)?" +
+                "( *<= *(?<max>0|[0-9]+[mu]s))?");
+    private static final Pattern TIME = Pattern.compile(
+                "0|([0-9]+)ms|([0-9]+)us");
+    private static final Pattern RANDOMIZER = Pattern.compile(
+                "uniform|exp(onential)?[(](?<exp>[0-9.]+)[)]|q(uantized)?exp(onential)?[(](?<qexp>[0-9.]+)[)]");
+    private static final String DEFAULT_WAIT_RANDOMIZER = "qexp(1.5)"; // at least 0ms, and at least 66% of median latency
+    private static final String DEFAULT_MIN = "0 <= p50(rw)*0.66"; // at least 0ms, and at least 66% of median latency
+    private static final String DEFAULT_MAX = "10ms <= p95(rw)*1.8^attempts <= 100ms"; // p95 latency with exponential back-off at rate of 1.8^attempts
+    private static final String DEFAULT_MIN_DELTA = "5ms <= p50(rw)*0.5"; // at least 5ms, and at least 50% of median latency
+
+    private static volatile ContentionStrategy current;
+
+    // Factories can be useful for testing purposes, to supply custom implementations of selectors and modifiers.
+    final static LatencySelectorFactory selectors = new LatencySelectorFactory(){};
+    final static LatencyModifierFactory modifiers = new LatencyModifierFactory(){};
+    final static WaitRandomizerFactory randomizers = new WaitRandomizerFactory(){};
+
+    static
+    {
+        current = new ContentionStrategy(defaultWaitRandomizer(), defaultMinWait(), defaultMaxWait(), defaultMinDelta(), Integer.MAX_VALUE);
+    }
+
+    static interface LatencyModifierFactory
+    {
+        default LatencyModifier identity() { return (l, a) -> l; }
+        default LatencyModifier multiply(double constant) { return (l, a) -> saturatedCast(l * constant); }
+        default LatencyModifier multiplyByAttempts(double multiply) { return (l, a) -> saturatedCast(l * multiply * a); }
+        default LatencyModifier multiplyByAttemptsExp(double base) { return (l, a) -> saturatedCast(l * pow(base, a)); }
+    }
+
+    static interface LatencySupplier
+    {
+        abstract long get(double percentile);
+    }
+
+    static interface LatencySelector
+    {
+        abstract long select(LatencySupplier readLatencyHistogram, LatencySupplier writeLatencyHistogram);
+    }
+
+    static interface LatencySelectorFactory
+    {
+        default LatencySelector constant(long latency) { return (read, write) -> latency; }
+        default LatencySelector read(double percentile) { return (read, write) -> read.get(percentile); }
+        default LatencySelector write(double percentile) { return (read, write) -> write.get(percentile); }
+        default LatencySelector maxReadWrite(double percentile) { return (read, write) -> max(read.get(percentile), write.get(percentile)); }
+    }
+
+    static interface LatencyModifier
+    {
+        long modify(long latency, int attempts);
+    }
+
+    static interface WaitRandomizer
+    {
+        abstract long wait(long min, long max, int attempts);
+    }
+
+    static interface WaitRandomizerFactory
+    {
+        default LongBinaryOperator uniformLongSupplier() { return (min, max) -> ThreadLocalRandom.current().nextLong(min, max); } // DO NOT USE METHOD HANDLES (want to fetch afresh each time)
+        default DoubleSupplier uniformDoubleSupplier() { return () -> ThreadLocalRandom.current().nextDouble(); }
+        
+        default WaitRandomizer uniform() { return new Uniform(uniformLongSupplier()); }
+        default WaitRandomizer exponential(double power) { return new Exponential(uniformLongSupplier(), uniformDoubleSupplier(), power); }
+        default WaitRandomizer quantizedExponential(double power) { return new QuantizedExponential(uniformLongSupplier(), uniformDoubleSupplier(), power); }
+
+        static class Uniform implements WaitRandomizer
+        {
+            final LongBinaryOperator uniformLong;
+
+            public Uniform(LongBinaryOperator uniformLong)
+            {
+                this.uniformLong = uniformLong;
+            }
+
+            @Override
+            public long wait(long min, long max, int attempts)
+            {
+                return uniformLong.applyAsLong(min, max);
+            }
+        }
+
+        static abstract class AbstractExponential implements WaitRandomizer
+        {
+            final LongBinaryOperator uniformLong;
+            final DoubleSupplier uniformDouble;
+            final double power;
+
+            public AbstractExponential(LongBinaryOperator uniformLong, DoubleSupplier uniformDouble, double power)
+            {
+                this.uniformLong = uniformLong;
+                this.uniformDouble = uniformDouble;
+                this.power = power;
+            }
+        }
+
+        static class Exponential extends AbstractExponential
+        {
+            public Exponential(LongBinaryOperator uniformLong, DoubleSupplier uniformDouble, double power)
+            {
+                super(uniformLong, uniformDouble, power);
+            }
+
+            @Override
+            public long wait(long min, long max, int attempts)
+            {
+                if (attempts == 1)
+                    return uniformLong.applyAsLong(min, max);
+
+                double p = uniformDouble.getAsDouble();
+                long delta = max - min;
+                delta *= Math.pow(p, power);
+                return max - delta;
+            }
+        }
+
+        static class QuantizedExponential extends AbstractExponential
+        {
+            public QuantizedExponential(LongBinaryOperator uniformLong, DoubleSupplier uniformDouble, double power)
+            {
+                super(uniformLong, uniformDouble, power);
+            }
+
+            @Override
+            public long wait(long min, long max, int attempts)
+            {
+                long quanta = (max - min) / attempts;
+                if (attempts == 1 || quanta == 0)
+                    return uniformLong.applyAsLong(min, max);
+
+                double p = uniformDouble.getAsDouble();
+                int base = (int) (attempts * Math.pow(p, power));
+                return max - ThreadLocalRandom.current().nextLong(quanta * base, quanta * (base + 1));
+            }
+        }
+    }
+
+    static class SnapshotAndTime
+    {
+        final long validUntil;
+        final Snapshot snapshot;
+
+        SnapshotAndTime(long validUntil, Snapshot snapshot)
+        {
+            this.validUntil = validUntil;
+            this.snapshot = snapshot;
+        }
+    }
+
+    static class TimeLimitedLatencySupplier extends AtomicReference<SnapshotAndTime> implements LatencySupplier
+    {
+        final Supplier<Snapshot> snapshotSupplier;
+        final long validForNanos;
+
+        TimeLimitedLatencySupplier(Supplier<Snapshot> snapshotSupplier, long time, TimeUnit units)
+        {
+            this.snapshotSupplier = snapshotSupplier;
+            this.validForNanos = units.toNanos(time);
+        }
+
+        private Snapshot getSnapshot()
+        {
+            long now = nanoTime();
+
+            SnapshotAndTime cur = get();
+            if (cur != null && cur.validUntil > now)
+                return cur.snapshot;
+
+            Snapshot newSnapshot = snapshotSupplier.get();
+            SnapshotAndTime next = new SnapshotAndTime(now + validForNanos, newSnapshot);
+            if (compareAndSet(cur, next))
+                return next.snapshot;
+
+            return accumulateAndGet(next, (a, b) -> a.validUntil > b.validUntil ? a : b).snapshot;
+        }
+
+        @Override
+        public long get(double percentile)
+        {
+            return (long)getSnapshot().getValue(percentile);
+        }
+    }
+
+    static class Bound
+    {
+        final long min, max, onFailure;
+        final LatencyModifier modifier;
+        final LatencySelector selector;
+        final LatencySupplier reads, writes;
+
+        Bound(long min, long max, long onFailure, LatencyModifier modifier, LatencySelector selector)
+        {
+            Preconditions.checkArgument(min<=max, "min (%s) must be less than or equal to max (%s)", min, max);
+            this.min = min;
+            this.max = max;
+            this.onFailure = onFailure;
+            this.modifier = modifier;
+            this.selector = selector;
+            this.reads = new TimeLimitedLatencySupplier(casReadMetrics.latency::getSnapshot, 10L, SECONDS);
+            this.writes = new TimeLimitedLatencySupplier(casWriteMetrics.latency::getSnapshot, 10L, SECONDS);
+        }
+
+        long get(int attempts)
+        {
+            try
+            {
+                long base = selector.select(reads, writes);
+                return max(min, min(max, modifier.modify(base, attempts)));
+            }
+            catch (Throwable t)
+            {
+                NoSpamLogger.getLogger(logger, 1L, MINUTES).info("", t);
+                return onFailure;
+            }
+        }
+
+        public String toString()
+        {
+            return "Bound{" +
+                   "min=" + min +
+                   ", max=" + max +
+                   ", onFailure=" + onFailure +
+                   ", modifier=" + modifier +
+                   ", selector=" + selector +
+                   '}';
+        }
+    }
+
+    final WaitRandomizer waitRandomizer;
+    final Bound min, max, minDelta;
+    final int traceAfterAttempts;
+
+    public ContentionStrategy(String waitRandomizer, String min, String max, String minDelta, int traceAfterAttempts)
+    {
+        this.waitRandomizer = parseWaitRandomizer(waitRandomizer);
+        this.min = parseBound(min, true);
+        this.max = parseBound(max, false);
+        this.minDelta = parseBound(minDelta, true);
+        this.traceAfterAttempts = traceAfterAttempts;
+    }
+
+    public enum Type
+    {
+        READ("Contended Paxos Read"), WRITE("Contended Paxos Write"), REPAIR("Contended Paxos Repair");
+
+        final String traceTitle;
+        final String lowercase;
+
+        Type(String traceTitle)
+        {
+            this.traceTitle = traceTitle;
+            this.lowercase = name().toLowerCase();
+        }
+    }
+
+    long computeWaitUntilForContention(int attempts, TableMetadata table, DecoratedKey partitionKey, ConsistencyLevel consistency, Type type)
+    {
+        if (attempts >= traceAfterAttempts && !Tracing.isTracing())
+        {
+            Tracing.instance.newSession(Tracing.TraceType.QUERY);
+            Tracing.instance.begin(type.traceTitle,
+                                   ImmutableMap.of(
+                                       "keyspace", table.keyspace,
+                                       "table", table.name,
+                                       "partitionKey", table.partitionKeyType.getString(partitionKey.getKey()),
+                                       "consistency", consistency.name(),
+                                       "kind", type.lowercase
+                                   ));
+
+            logger.info("Tracing contended paxos {} for key {} on {}.{} with trace id {}",
+                        type.lowercase,
+                        ByteBufferUtil.bytesToHex(partitionKey.getKey()),
+                        table.keyspace, table.name,
+                        Tracing.instance.getSessionId());
+        }
+
+        long minWaitMicros = min.get(attempts);
+        long maxWaitMicros = max.get(attempts);
+        long minDeltaMicros = minDelta.get(attempts);
+
+        if (minWaitMicros + minDeltaMicros > maxWaitMicros)
+        {
+            maxWaitMicros = minWaitMicros + minDeltaMicros;
+            if (maxWaitMicros > this.max.max)
+            {
+                maxWaitMicros = this.max.max;
+                minWaitMicros = max(this.min.min, min(this.min.max, maxWaitMicros - minDeltaMicros));
+            }
+        }
+
+        long wait = waitRandomizer.wait(minWaitMicros, maxWaitMicros, attempts);
+        return nanoTime() + MICROSECONDS.toNanos(wait);
+    }
+
+    boolean doWaitForContention(long deadline, int attempts, TableMetadata table, DecoratedKey partitionKey, ConsistencyLevel consistency, Type type)
+    {
+        long until = computeWaitUntilForContention(attempts, table, partitionKey, consistency, type);
+        if (until >= deadline)
+            return false;
+
+        try
+        {
+            waitUntil(until);
+        }
+        catch (InterruptedException e)
+        {
+            Thread.currentThread().interrupt();
+            return false;
+        }
+        return true;
+    }
+
+    static boolean waitForContention(long deadline, int attempts, TableMetadata table, DecoratedKey partitionKey, ConsistencyLevel consistency, Type type)
+    {
+        return current.doWaitForContention(deadline, attempts, table, partitionKey, consistency, type);
+    }
+
+    static long waitUntilForContention(int attempts, TableMetadata table, DecoratedKey partitionKey, ConsistencyLevel consistency, Type type)
+    {
+        return current.computeWaitUntilForContention(attempts, table, partitionKey, consistency, type);
+    }
+
+    static class ParsedStrategy
+    {
+        final String waitRandomizer, min, max, minDelta;
+        final ContentionStrategy strategy;
+
+        ParsedStrategy(String waitRandomizer, String min, String max, String minDelta, ContentionStrategy strategy)
+        {
+            this.waitRandomizer = waitRandomizer;
+            this.min = min;
+            this.max = max;
+            this.minDelta = minDelta;
+            this.strategy = strategy;
+        }
+    }
+
+    @VisibleForTesting
+    static ParsedStrategy parseStrategy(String spec)
+    {
+        String[] args = spec.split(",");
+        String waitRandomizer = find(args, "random");
+        String min = find(args, "min");
+        String max = find(args, "max");
+        String minDelta = find(args, "delta");
+        String trace = find(args, "trace");
+
+        if (waitRandomizer == null) waitRandomizer = defaultWaitRandomizer();
+        if (min == null) min = defaultMinWait();
+        if (max == null) max = defaultMaxWait();
+        if (minDelta == null) minDelta = defaultMinDelta();
+        int traceAfterAttempts = trace == null ? current.traceAfterAttempts: Integer.parseInt(trace);
+
+        ContentionStrategy strategy = new ContentionStrategy(waitRandomizer, min, max, minDelta, traceAfterAttempts);
+        return new ParsedStrategy(waitRandomizer, min, max, minDelta, strategy);
+    }
+
+
+    public static void setStrategy(String spec)
+    {
+        ParsedStrategy parsed = parseStrategy(spec);
+        current = parsed.strategy;
+        setPaxosContentionWaitRandomizer(parsed.waitRandomizer);
+        setPaxosContentionMinWait(parsed.min);
+        setPaxosContentionMaxWait(parsed.max);
+        setPaxosContentionMinDelta(parsed.minDelta);
+    }
+
+    public static String getStrategySpec()
+    {
+        return "min=" + defaultMinWait()
+                + ",max=" + defaultMaxWait()
+                + ",delta=" + defaultMinDelta()
+                + ",random=" + defaultWaitRandomizer()
+                + ",trace=" + current.traceAfterAttempts;
+    }
+
+    private static String find(String[] args, String param)
+    {
+        return stream(args).filter(s -> s.startsWith(param + '='))
+                .map(s -> s.substring(param.length() + 1))
+                .findFirst().orElse(null);
+    }
+
+    private static LatencySelector parseLatencySelector(Matcher m, LatencySelectorFactory selectors)
+    {
+        String perc = m.group("perc");
+        if (perc == null)
+            return selectors.constant(parseInMicros(m.group("constbase")));
+
+        double percentile = parseDouble("0." + perc);
+        String rw = m.group("rw");
+        if (rw.length() == 2)
+            return selectors.maxReadWrite(percentile);
+        else if ("r".equals(rw))
+            return selectors.read(percentile);
+        else
+            return selectors.write(percentile);
+    }
+
+    private static LatencyModifier parseLatencyModifier(Matcher m, LatencyModifierFactory modifiers)
+    {
+        String mod = m.group("mod");
+        if (mod == null)
+            return modifiers.identity();
+
+        double modifier = parseDouble(mod);
+
+        String modkind = m.group("modkind");
+        if (modkind == null)
+            return modifiers.multiply(modifier);
+
+        if (modkind.startsWith("*"))
+            return modifiers.multiplyByAttempts(modifier);
+        else if (modkind.startsWith("^"))
+            return modifiers.multiplyByAttemptsExp(modifier);
+        else
+            throw new IllegalArgumentException("Unrecognised attempt modifier: " + modkind);
+    }
+
+    static long saturatedCast(double v)
+    {
+        if (v > Long.MAX_VALUE)
+            return Long.MAX_VALUE;
+        return (long) v;
+    }
+
+    static WaitRandomizer parseWaitRandomizer(String input)
+    {
+        return parseWaitRandomizer(input, randomizers);
+    }
+
+    static WaitRandomizer parseWaitRandomizer(String input, WaitRandomizerFactory randomizers)
+    {
+        Matcher m = RANDOMIZER.matcher(input);
+        if (!m.matches())
+            throw new IllegalArgumentException(input + " does not match" + RANDOMIZER);
+
+        String exp;
+        exp = m.group("exp");
+        if (exp != null)
+            return randomizers.exponential(Double.parseDouble(exp));
+        exp = m.group("qexp");
+        if (exp != null)
+            return randomizers.quantizedExponential(Double.parseDouble(exp));
+        return randomizers.uniform();
+    }
+
+    static Bound parseBound(String input, boolean isMin)
+    {
+        return parseBound(input, isMin, selectors, modifiers);
+    }
+
+    @VisibleForTesting
+    static Bound parseBound(String input, boolean isMin, LatencySelectorFactory selectors, LatencyModifierFactory modifiers)
+    {
+        Matcher m = BOUND.matcher(input);
+        if (!m.matches())
+            throw new IllegalArgumentException(input + " does not match " + BOUND);
+
+        String maybeConst = m.group("const");
+        if (maybeConst != null)
+        {
+            long v = parseInMicros(maybeConst);
+            return new Bound(v, v, v, modifiers.identity(), selectors.constant(v));
+        }
+
+        long min = parseInMicros(m.group("min"), 0);
+        long max = parseInMicros(m.group("max"), maxQueryTimeoutMicros() / 2);
+        return new Bound(min, max, isMin ? min : max, parseLatencyModifier(m, modifiers), parseLatencySelector(m, selectors));
+    }
+
+    private static long parseInMicros(String input, long orElse)
+    {
+        if (input == null)
+            return orElse;
+
+        return parseInMicros(input);
+    }
+
+    private static long parseInMicros(String input)
+    {
+        Matcher m = TIME.matcher(input);
+        if (!m.matches())
+            throw new IllegalArgumentException(input + " does not match " + TIME);
+
+        String text;
+        if (null != (text = m.group(1)))
+            return parseInt(text) * 1000;
+        else if (null != (text = m.group(2)))
+            return parseInt(text);
+        else
+            return 0;
+    }
+
+    @VisibleForTesting
+    static String defaultWaitRandomizer()
+    {
+        return orElse(DatabaseDescriptor::getPaxosContentionWaitRandomizer, DEFAULT_WAIT_RANDOMIZER);
+    }
+
+    @VisibleForTesting
+    static String defaultMinWait()
+    {
+        return orElse(DatabaseDescriptor::getPaxosContentionMinWait, DEFAULT_MIN);
+    }
+
+    @VisibleForTesting
+    static String defaultMaxWait()
+    {
+        return orElse(DatabaseDescriptor::getPaxosContentionMaxWait, DEFAULT_MAX);
+    }
+
+    @VisibleForTesting
+    static String defaultMinDelta()
+    {
+        return orElse(DatabaseDescriptor::getPaxosContentionMinDelta, DEFAULT_MIN_DELTA);
+    }
+
+    @VisibleForTesting
+    static long maxQueryTimeoutMicros()
+    {
+        return max(max(getCasContentionTimeout(MICROSECONDS), getWriteRpcTimeout(MICROSECONDS)), getReadRpcTimeout(MICROSECONDS));
+    }
+
+    private static String orElse(Supplier<String> get, String orElse)
+    {
+        String result = get.get();
+        return result != null ? result : orElse;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/Paxos.java b/src/java/org/apache/cassandra/service/paxos/Paxos.java
new file mode 100644
index 0000000..bf5f90e
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/Paxos.java
@@ -0,0 +1,1255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import javax.annotation.Nullable;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Maps;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.codahale.metrics.Meter;
+import org.apache.cassandra.exceptions.CasWriteTimeoutException;
+import org.apache.cassandra.exceptions.ExceptionCode;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.locator.AbstractReplicationStrategy;
+import org.apache.cassandra.locator.EndpointsForToken;
+import org.apache.cassandra.locator.InOurDc;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaLayout;
+import org.apache.cassandra.locator.ReplicaLayout.ForTokenWrite;
+import org.apache.cassandra.locator.ReplicaPlan.ForRead;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.SinglePartitionReadCommand;
+import org.apache.cassandra.db.WriteType;
+import org.apache.cassandra.db.partitions.FilteredPartition;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.partitions.PartitionIterators;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.exceptions.IsBootstrappingException;
+import org.apache.cassandra.exceptions.ReadFailureException;
+import org.apache.cassandra.exceptions.ReadTimeoutException;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.exceptions.RequestFailureException;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.RequestTimeoutException;
+import org.apache.cassandra.exceptions.UnavailableException;
+import org.apache.cassandra.exceptions.WriteFailureException;
+import org.apache.cassandra.exceptions.WriteTimeoutException;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.metrics.ClientRequestMetrics;
+import org.apache.cassandra.service.CASRequest;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.FailureRecordingCallback.AsMap;
+import org.apache.cassandra.service.paxos.Commit.Proposal;
+import org.apache.cassandra.service.reads.DataResolver;
+import org.apache.cassandra.service.reads.repair.NoopReadRepair;
+import org.apache.cassandra.service.paxos.cleanup.PaxosTableRepairs;
+import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.triggers.TriggerExecutor;
+import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.CollectionSerializer;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.service.paxos.PaxosPrepare.FoundIncompleteAccepted;
+import org.apache.cassandra.service.paxos.PaxosPrepare.FoundIncompleteCommitted;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+import static java.util.Collections.emptyMap;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.config.Config.PaxosVariant.v2_without_linearizable_reads_or_rejected_writes;
+import static org.apache.cassandra.db.Keyspace.openAndGetStore;
+import static org.apache.cassandra.exceptions.RequestFailureReason.TIMEOUT;
+import static org.apache.cassandra.gms.ApplicationState.RELEASE_VERSION;
+import static org.apache.cassandra.config.DatabaseDescriptor.*;
+import static org.apache.cassandra.db.ConsistencyLevel.*;
+import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
+import static org.apache.cassandra.locator.ReplicaLayout.forTokenWriteLiveAndDown;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casReadMetrics;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casWriteMetrics;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.readMetrics;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.readMetricsMap;
+import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.writeMetricsMap;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.GLOBAL;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.LOCAL;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.nextBallot;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.staleBallot;
+import static org.apache.cassandra.service.paxos.ContentionStrategy.*;
+import static org.apache.cassandra.service.paxos.ContentionStrategy.Type.READ;
+import static org.apache.cassandra.service.paxos.ContentionStrategy.Type.WRITE;
+import static org.apache.cassandra.service.paxos.PaxosCommit.commit;
+import static org.apache.cassandra.service.paxos.PaxosCommitAndPrepare.commitAndPrepare;
+import static org.apache.cassandra.service.paxos.PaxosPrepare.prepare;
+import static org.apache.cassandra.service.paxos.PaxosPropose.propose;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.CollectionSerializer.newHashSet;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.NoSpamLogger.Level.WARN;
+
+/**
+ * <p>This class serves as an entry-point to Cassandra's implementation of Paxos Consensus.
+ * Note that Cassandra does not utilise the distinguished proposer (Multi Paxos) optimisation;
+ * each operation executes its own instance of Paxos Consensus. Instead Cassandra employs
+ * various optimisations to reduce the overhead of operations. This may lead to higher throughput
+ * and lower overhead read operations, at the expense of contention during mixed or write-heavy workloads.
+ *
+ * Firstly, note that we do not follow Lamport's formulation, instead following the more common approach in
+ * literature (see e.g. Dr. Heidi Howard's dissertation) of permitting any acceptor to vote on a proposal,
+ * not only those who issued a promise.
+ *
+ * <h2>No Commit of Empty Proposals</h2>
+ * <p>If a proposal is empty, there can be no effect to the state, so once this empty proposal has poisoned any earlier
+ * proposal it is safe to stop processing. An empty proposal effectively scrubs the instance of consensus being
+ * performed once it has reached a quorum, as no earlier incomplete proposal (that may perhaps have reached a minority)
+ * may now be completed.
+ *
+ * <h2>Fast Read / Failed Write</h2>
+ * <p>This optimisation relies on every voter having no incomplete promises, i.e. their commit register must be greater
+ * than or equal to their promise and proposal registers (or there must be such an empty proposal).
+ * Since the operation we are performing must invalidate any nascent operation that has reached a minority, and will
+ * itself be invalidated by any newer write it might race with, we are only concerned about operations that might be
+ * in-flight and incomplete. If we reach a quorum without any incomplete proposal, we prevent any incomplete proposal
+ * that might have come before us from being committed, and so are correctly ordered.
+ *
+ * <p>NOTE: we could likely weaken this further, permitting a fast operation if we witness a stale incomplete operation
+ * on one or more of the replicas, so long as we witness _some_ response that had knowledge of that operation's decision,
+ * however we might waste more time performing optimistic reads (which we skip if we witness any in progress promise)
+ *
+ * <h2>Read Commutativity Optimisation</h2>
+ * <p>We separate read and write promises into distinct registers. Since reads are commutative they do not need to be
+ * ordered with respect to each other, so read promises consult only the write promise register to find competing
+ * operations, whereas writes consult both read and write registers. This permits better utilisation of the Fast Read
+ * optimisation, permitting arbitrarily many fast reads to execute concurrently.
+ *
+ * <p>A read will use its promise to finish any in progress write it encounters, but note that this is safe for multiple
+ * reads to attempt simultaneously. If a write operation has not reached a quorum of promises then it has no effect,
+ * so while some read operations may attempt to complete it and others may not, the operation will only be invalidated
+ * and these actions will be equivalent. If the write had reached a quorum of promises then every reads will attempt
+ * to complete the write. At the accept phase, only the most recent read promise will be accepted so whether the write
+ * proposal had reached a quorum or not, a consistent outcome will result.
+ *
+ * <h2>Reproposal Avoidance</h2>
+ * <p>It can occur that two (or more) commands begin competing to re-propose the same incomplete command even after it
+ * has already committed - this can occur when an in progress command that has reached the commit condition (but not yet
+ * committed) is encountered by a promise, so that it is re-proposed. If the original coordinator does not fail this
+ * original command will be committed normally, but the re-proposal can take on a life of its own, and become contended
+ * and re-proposed indefinitely. By having reproposals use the original proposal ballot's timestamp we spot this situation
+ * and consider re-proposals of a command we have seen committed to be (in effect) empty proposals.
+ *
+ * <h2>Durability of Asynchronous Commit</h2>
+ * To permit asynchronous commit (and also because we should) we ensure commits are durable once a proposal has been
+ * accepted by a majority.
+ *
+ * Replicas track commands that have *locally* been witnessed but not committed. They may clear this log by performing
+ * a round of Paxos Repair for each key in the log (which is simply a round of Paxos that tries not to interfere with
+ * future rounds of Paxos, while aiming to complete any earlier incomplete round).
+ *
+ * By selecting some quorum of replicas for a range to perform this operation on, once successful we guarantee that
+ * any transaction that had previously been accepted by a majority has been committed, and any transaction that had been
+ * previously witnessed by a majority has been either committed or invalidated.
+ *
+ * To ensure durability across range movements, once a joining node becomes pending such a coordinated paxos repair
+ * is performed prior to performing bootstrap, so that commands initiated before joining will either be bootstrapped
+ * or completed by paxos repair to be committed to a majority that includes the new node in its calculations, and
+ * commands initiated after will anyway do so due to being pending.
+ *
+ * Finally, for greater guarantees across range movements despite the uncertainty of gossip, paxos operations validate
+ * ring information with each other while seeking a quorum of promises. Any inconsistency is resolved by synchronising
+ * gossip state between the coordinator and the peers in question.
+ *
+ * <h2>Clearing of Paxos State</h2>
+ * Coordinated paxos repairs as described above are preceded by an preparation step that determines a ballot below
+ * which we agree to reject new promises. By deciding and disseminating this point prior to performing a coordinated
+ * paxos repair, once complete we have ensured that all commands with a lower ballot are either committed or invalidated,
+ * and so we are then able to disseminate this ballot as a bound below which may expunge all data for the range.
+ *
+ * For consistency of execution coordinators seek this latter ballot bound from each replica and, using the maximum of
+ * these, ignore all data received associated with ballots lower than this bound.
+ */
+public class Paxos
+{
+    private static final Logger logger = LoggerFactory.getLogger(Paxos.class);
+
+    private static volatile Config.PaxosVariant PAXOS_VARIANT = DatabaseDescriptor.getPaxosVariant();
+    private static final CassandraVersion MODERN_PAXOS_RELEASE = new CassandraVersion(System.getProperty("cassandra.paxos.modern_release", "4.1"));
+    static final boolean LOG_TTL_LINEARIZABILITY_VIOLATIONS = Boolean.parseBoolean(System.getProperty("cassandra.paxos.log_ttl_linearizability_violations", "true"));
+
+    static class Electorate implements Iterable<InetAddressAndPort>
+    {
+        static final Serializer serializer = new Serializer();
+
+        // all replicas, including pending, but without those in a remote DC if consistency is local
+        final Collection<InetAddressAndPort> natural;
+
+        // pending subset of electorate
+        final Collection<InetAddressAndPort> pending;
+
+        public Electorate(Collection<InetAddressAndPort> natural, Collection<InetAddressAndPort> pending)
+        {
+            this.natural = natural;
+            this.pending = pending;
+        }
+
+        public int size()
+        {
+            return natural.size() + pending.size();
+        }
+
+        @Override
+        public Iterator<InetAddressAndPort> iterator()
+        {
+            return Iterators.concat(natural.iterator(), pending.iterator());
+        }
+
+        static Electorate get(TableMetadata table, DecoratedKey key, ConsistencyLevel consistency)
+        {
+            return get(consistency, forTokenWriteLiveAndDown(Keyspace.open(table.keyspace), key.getToken()));
+        }
+
+        static Electorate get(ConsistencyLevel consistency, ForTokenWrite all)
+        {
+            ForTokenWrite electorate = all;
+            if (consistency == LOCAL_SERIAL)
+                electorate = all.filter(InOurDc.replicas());
+
+            return new Electorate(electorate.natural().endpointList(), electorate.pending().endpointList());
+        }
+
+        boolean hasPending()
+        {
+            return !pending.isEmpty();
+        }
+
+        boolean isPending(InetAddressAndPort endpoint)
+        {
+            return hasPending() && pending.contains(endpoint);
+        }
+
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Electorate that = (Electorate) o;
+            return natural.equals(that.natural) && pending.equals(that.pending);
+        }
+
+        public int hashCode()
+        {
+            return Objects.hash(natural, pending);
+        }
+
+        public String toString()
+        {
+            return "{" + natural + ", " + pending + '}';
+        }
+
+        static class Serializer implements IVersionedSerializer<Electorate>
+        {
+            public void serialize(Electorate electorate, DataOutputPlus out, int version) throws IOException
+            {
+                CollectionSerializer.serializeCollection(inetAddressAndPortSerializer, electorate.natural, out, version);
+                CollectionSerializer.serializeCollection(inetAddressAndPortSerializer, electorate.pending, out, version);
+            }
+
+            public Electorate deserialize(DataInputPlus in, int version) throws IOException
+            {
+                Set<InetAddressAndPort> endpoints = CollectionSerializer.deserializeCollection(inetAddressAndPortSerializer, newHashSet(), in, version);
+                Set<InetAddressAndPort> pending = CollectionSerializer.deserializeCollection(inetAddressAndPortSerializer, newHashSet(), in, version);
+                return new Electorate(endpoints, pending);
+            }
+
+            public long serializedSize(Electorate electorate, int version)
+            {
+                return CollectionSerializer.serializedSizeCollection(inetAddressAndPortSerializer, electorate.natural, version) +
+                       CollectionSerializer.serializedSizeCollection(inetAddressAndPortSerializer, electorate.pending, version);
+            }
+        }
+    }
+
+    /**
+     * Encapsulates the peers we will talk to for this operation.
+     */
+    static class Participants implements ForRead<EndpointsForToken, Participants>, Supplier<Participants>
+    {
+        final Keyspace keyspace;
+
+        final AbstractReplicationStrategy replicationStrategy;
+
+        /**
+         * SERIAL or LOCAL_SERIAL
+         */
+        final ConsistencyLevel consistencyForConsensus;
+
+        /**
+         * Those members that vote for {@link #consistencyForConsensus}
+         */
+        final Electorate electorate;
+
+        /**
+         * Those members of {@link #electorate} that we will 'poll' for their vote
+         * i.e. {@link #electorate} with down nodes removed
+         */
+
+        private final EndpointsForToken electorateNatural;
+        final EndpointsForToken electorateLive;
+
+        final EndpointsForToken all;
+        final EndpointsForToken allLive;
+        final EndpointsForToken allDown;
+        final EndpointsForToken pending;
+
+        /**
+         * The number of responses we require to reach desired consistency from members of {@code contact}
+         */
+        final int sizeOfConsensusQuorum;
+
+        /**
+         * The number of read responses we require to reach desired consistency from members of {@code contact}
+         * Note that this should always be met if {@link #sizeOfConsensusQuorum} is met, but we supply it separately
+         * for corroboration.
+         */
+        final int sizeOfReadQuorum;
+
+        Participants(Keyspace keyspace, ConsistencyLevel consistencyForConsensus, ReplicaLayout.ForTokenWrite all, ReplicaLayout.ForTokenWrite electorate, EndpointsForToken live)
+        {
+            this.keyspace = keyspace;
+            this.replicationStrategy = all.replicationStrategy();
+            this.consistencyForConsensus = consistencyForConsensus;
+            this.all = all.all();
+            this.pending = all.pending();
+            this.allDown = all.all() == live ? EndpointsForToken.empty(all.token()) : all.all().without(live.endpoints());
+            this.electorate = new Electorate(electorate.natural().endpointList(), electorate.pending().endpointList());
+            this.electorateNatural = electorate.natural();
+            this.electorateLive = electorate.all() == live ? live : electorate.all().keep(live.endpoints());
+            this.allLive = live;
+            this.sizeOfReadQuorum = electorate.natural().size() / 2 + 1;
+            this.sizeOfConsensusQuorum = sizeOfReadQuorum + electorate.pending().size();
+        }
+
+        @Override
+        public int readQuorum()
+        {
+            return sizeOfReadQuorum;
+        }
+
+        @Override
+        public EndpointsForToken readCandidates()
+        {
+            // Note: we could probably return electorateLive here and save a reference, but it's not strictly correct
+            return electorateNatural;
+        }
+
+        static Participants get(TableMetadata table, Token token, ConsistencyLevel consistencyForConsensus)
+        {
+            Keyspace keyspace = Keyspace.open(table.keyspace);
+            ReplicaLayout.ForTokenWrite all = forTokenWriteLiveAndDown(keyspace, token);
+            ReplicaLayout.ForTokenWrite electorate = consistencyForConsensus.isDatacenterLocal()
+                                                     ? all.filter(InOurDc.replicas()) : all;
+
+            EndpointsForToken live = all.all().filter(FailureDetector.isReplicaAlive);
+
+            return new Participants(keyspace, consistencyForConsensus, all, electorate, live);
+        }
+
+        static Participants get(TableMetadata cfm, DecoratedKey key, ConsistencyLevel consistency)
+        {
+            return get(cfm, key.getToken(), consistency);
+        }
+
+        int sizeOfPoll()
+        {
+            return electorateLive.size();
+        }
+
+        InetAddressAndPort voter(int i)
+        {
+            return electorateLive.endpoint(i);
+        }
+
+        void assureSufficientLiveNodes(boolean isWrite) throws UnavailableException
+        {
+            if (sizeOfConsensusQuorum > sizeOfPoll())
+            {
+                mark(isWrite, m -> m.unavailables, consistencyForConsensus);
+                throw new UnavailableException("Cannot achieve consistency level " + consistencyForConsensus, consistencyForConsensus, sizeOfConsensusQuorum, sizeOfPoll());
+            }
+        }
+
+        void assureSufficientLiveNodesForRepair() throws UnavailableException
+        {
+            if (sizeOfConsensusQuorum > sizeOfPoll())
+            {
+                throw UnavailableException.create(consistencyForConsensus, sizeOfConsensusQuorum, sizeOfPoll());
+            }
+        }
+
+        int requiredFor(ConsistencyLevel consistency)
+        {
+            if (consistency == Paxos.nonSerial(consistencyForConsensus))
+                return sizeOfConsensusQuorum;
+
+            return consistency.blockForWrite(replicationStrategy(), pending);
+        }
+
+        public boolean hasOldParticipants()
+        {
+            return electorateLive.anyMatch(Paxos::isOldParticipant);
+        }
+
+        @Override
+        public Participants get()
+        {
+            return this;
+        }
+
+        @Override
+        public Keyspace keyspace()
+        {
+            return keyspace;
+        }
+
+        @Override
+        public AbstractReplicationStrategy replicationStrategy()
+        {
+            return replicationStrategy;
+        }
+
+        @Override
+        public ConsistencyLevel consistencyLevel()
+        {
+            return nonSerial(consistencyForConsensus);
+        }
+
+        @Override
+        public EndpointsForToken contacts()
+        {
+            return electorateLive;
+        }
+
+        @Override
+        public Replica lookup(InetAddressAndPort endpoint)
+        {
+            return all.lookup(endpoint);
+        }
+
+        @Override
+        public Participants withContacts(EndpointsForToken newContacts)
+        {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    /**
+     * Encapsulates information about a failure to reach Success, either because of explicit failure responses
+     * or insufficient responses (in which case the status is not final)
+     */
+    static class MaybeFailure
+    {
+        final boolean isFailure;
+        final String serverError;
+        final int contacted;
+        final int required;
+        final int successes;
+        final Map<InetAddressAndPort, RequestFailureReason> failures;
+
+        static MaybeFailure noResponses(Participants contacted)
+        {
+            return new MaybeFailure(false, contacted.sizeOfPoll(), contacted.sizeOfConsensusQuorum, 0, emptyMap());
+        }
+
+        MaybeFailure(Participants contacted, int successes, AsMap failures)
+        {
+            this(contacted.sizeOfPoll() - failures.failureCount() < contacted.sizeOfConsensusQuorum, contacted.sizeOfPoll(), contacted.sizeOfConsensusQuorum, successes, failures);
+        }
+
+        MaybeFailure(int contacted, int required, int successes, AsMap failures)
+        {
+            this(contacted - failures.failureCount() < required, contacted, required, successes, failures);
+        }
+
+        MaybeFailure(boolean isFailure, int contacted, int required, int successes, Map<InetAddressAndPort, RequestFailureReason> failures)
+        {
+            this(isFailure, null, contacted, required, successes, failures);
+        }
+
+        MaybeFailure(boolean isFailure, String serverError, int contacted, int required, int successes, Map<InetAddressAndPort, RequestFailureReason> failures)
+        {
+            this.isFailure = isFailure;
+            this.serverError = serverError;
+            this.contacted = contacted;
+            this.required = required;
+            this.successes = successes;
+            this.failures = failures;
+        }
+
+        private static int failureCount(Map<InetAddressAndPort, RequestFailureReason> failures)
+        {
+            int count = 0;
+            for (RequestFailureReason reason : failures.values())
+                count += reason != TIMEOUT ? 1 : 0;
+            return count;
+        }
+
+        /**
+         * update relevant counters and throw the relevant exception
+         */
+        RequestExecutionException markAndThrowAsTimeoutOrFailure(boolean isWrite, ConsistencyLevel consistency, int failedAttemptsDueToContention)
+        {
+            if (isFailure)
+            {
+                mark(isWrite, m -> m.failures, consistency);
+                throw serverError != null ? new RequestFailureException(ExceptionCode.SERVER_ERROR, serverError, consistency, successes, required, failures)
+                                          : isWrite
+                                            ? new WriteFailureException(consistency, successes, required, WriteType.CAS, failures)
+                                            : new ReadFailureException(consistency, successes, required, false, failures);
+            }
+            else
+            {
+                mark(isWrite, m -> m.timeouts, consistency);
+                throw isWrite
+                        ? new CasWriteTimeoutException(WriteType.CAS, consistency, successes, required, failedAttemptsDueToContention)
+                        : new ReadTimeoutException(consistency, successes, required, false);
+            }
+        }
+
+        public String toString()
+        {
+            return (isFailure ? "Failure(" : "Timeout(") + successes + ',' + failures + ')';
+        }
+    }
+
+    public interface Async<Result>
+    {
+        Result awaitUntil(long until);
+    }
+
+    /**
+     * Apply @param updates if and only if the current values in the row for @param key
+     * match the provided @param conditions.  The algorithm is "raw" Paxos: that is, Paxos
+     * minus leader election -- any node in the cluster may propose changes for any partition.
+     *
+     * The Paxos electorate consists only of the replicas for the partition key.
+     * We expect performance to be reasonable, but CAS is still intended to be used
+     * "when you really need it," not for all your updates.
+     *
+     * There are three phases to Paxos:
+     *  1. Prepare: the coordinator generates a ballot (Ballot in our case) and asks replicas to
+     *     - promise not to accept updates from older ballots and
+     *     - tell us about the latest ballots it has already _promised_, _accepted_, or _committed_
+     *     - reads the necessary data to evaluate our CAS condition
+     *
+     *  2. Propose: if a majority of replicas reply, the coordinator asks replicas to accept the value of the
+     *     highest proposal ballot it heard about, or a new value if no in-progress proposals were reported.
+     *  3. Commit (Learn): if a majority of replicas acknowledge the accept request, we can commit the new
+     *     value.
+     *
+     *  Commit procedure is not covered in "Paxos Made Simple," and only briefly mentioned in "Paxos Made Live,"
+     *  so here is our approach:
+     *   3a. The coordinator sends a commit message to all replicas with the ballot and value.
+     *   3b. Because of 1-2, this will be the highest-seen commit ballot.  The replicas will note that,
+     *       and send it with subsequent promise replies.  This allows us to discard acceptance records
+     *       for successfully committed replicas, without allowing incomplete proposals to commit erroneously
+     *       later on.
+     *
+     *  Note that since we are performing a CAS rather than a simple update, when nodes respond positively to
+     *  Prepare, they include read response of commited values that will be reconciled on the coordinator
+     *  and checked against CAS precondition between the prepare and accept phases. This gives us a slightly
+     *  longer window for another coordinator to come along and trump our own promise with a newer one but
+     *  is otherwise safe.
+     *
+     *  Any successful prepare phase yielding a read that rejects the condition must be followed by the proposal of
+     *  an empty update, to ensure the evaluation of the condition is linearized with respect to other reads and writes.
+     *
+     * @param key the row key for the row to CAS
+     * @param request the conditions for the CAS to apply as well as the update to perform if the conditions hold.
+     * @param consistencyForConsensus the consistency for the paxos prepare and propose round. This can only be either SERIAL or LOCAL_SERIAL.
+     * @param consistencyForCommit the consistency for write done during the commit phase. This can be anything, except SERIAL or LOCAL_SERIAL.
+     *
+     * @return null if the operation succeeds in updating the row, or the current values corresponding to conditions.
+     * (since, if the CAS doesn't succeed, it means the current value do not match the conditions).
+     */
+    public static RowIterator cas(DecoratedKey key,
+                                  CASRequest request,
+                                  ConsistencyLevel consistencyForConsensus,
+                                  ConsistencyLevel consistencyForCommit,
+                                  ClientState clientState)
+            throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException
+    {
+        final long start = nanoTime();
+        final long proposeDeadline = start + getCasContentionTimeout(NANOSECONDS);
+        final long commitDeadline = Math.max(proposeDeadline, start + getWriteRpcTimeout(NANOSECONDS));
+        return cas(key, request, consistencyForConsensus, consistencyForCommit, clientState, start, proposeDeadline, commitDeadline);
+    }
+    public static RowIterator cas(DecoratedKey key,
+                                  CASRequest request,
+                                  ConsistencyLevel consistencyForConsensus,
+                                  ConsistencyLevel consistencyForCommit,
+                                  ClientState clientState,
+                                  long proposeDeadline,
+                                  long commitDeadline
+                                  )
+            throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException
+    {
+        return cas(key, request, consistencyForConsensus, consistencyForCommit, clientState, nanoTime(), proposeDeadline, commitDeadline);
+    }
+    private static RowIterator cas(DecoratedKey partitionKey,
+                                  CASRequest request,
+                                  ConsistencyLevel consistencyForConsensus,
+                                  ConsistencyLevel consistencyForCommit,
+                                  ClientState clientState,
+                                  long start,
+                                  long proposeDeadline,
+                                  long commitDeadline
+                                  )
+            throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException
+    {
+        SinglePartitionReadCommand readCommand = request.readCommand(FBUtilities.nowInSeconds());
+        TableMetadata metadata = readCommand.metadata();
+
+        consistencyForConsensus.validateForCas();
+        consistencyForCommit.validateForCasCommit(Keyspace.open(metadata.keyspace).getReplicationStrategy());
+
+        Ballot minimumBallot = null;
+        int failedAttemptsDueToContention = 0;
+        try (PaxosOperationLock lock = PaxosState.lock(partitionKey, metadata, proposeDeadline, consistencyForConsensus, true))
+        {
+            Paxos.Async<PaxosCommit.Status> commit = null;
+            done: while (true)
+            {
+                // read the current values and check they validate the conditions
+                Tracing.trace("Reading existing values for CAS precondition");
+
+                BeginResult begin = begin(proposeDeadline, readCommand, consistencyForConsensus,
+                        true, minimumBallot, failedAttemptsDueToContention);
+                Ballot ballot = begin.ballot;
+                Participants participants = begin.participants;
+                failedAttemptsDueToContention = begin.failedAttemptsDueToContention;
+
+                FilteredPartition current;
+                try (RowIterator iter = PartitionIterators.getOnlyElement(begin.readResponse, readCommand))
+                {
+                    current = FilteredPartition.create(iter);
+                }
+
+                Proposal proposal;
+                boolean conditionMet = request.appliesTo(current);
+                if (!conditionMet)
+                {
+                    if (getPaxosVariant() == v2_without_linearizable_reads_or_rejected_writes)
+                    {
+                        Tracing.trace("CAS precondition rejected", current);
+                        casWriteMetrics.conditionNotMet.inc();
+                        return current.rowIterator();
+                    }
+
+                    // If we failed to meet our condition, it does not mean we can do nothing: if we do not propose
+                    // anything that is accepted by a quorum, it is possible for our !conditionMet state
+                    // to not be serialized wrt other operations.
+                    // If a later read encounters an "in progress" write that did not reach a majority,
+                    // but that would have permitted conditionMet had it done so (and hence we evidently did not witness),
+                    // that operation will complete the in-progress proposal before continuing, so that this and future
+                    // reads will perceive conditionMet without any intervening modification from the time at which we
+                    // assured a conditional write that !conditionMet.
+                    // So our evaluation is only serialized if we invalidate any in progress operations by proposing an empty update
+                    // See also CASSANDRA-12126
+                    if (begin.isLinearizableRead)
+                    {
+                        Tracing.trace("CAS precondition does not match current values {}; read is already linearizable; aborting", current);
+                        return conditionNotMet(current);
+                    }
+
+                    Tracing.trace("CAS precondition does not match current values {}; proposing empty update", current);
+                    proposal = Proposal.empty(ballot, partitionKey, metadata);
+                }
+                else if (begin.isPromised)
+                {
+                    // finish the paxos round w/ the desired updates
+                    // TODO "turn null updates into delete?" - what does this TODO even mean?
+                    PartitionUpdate updates = request.makeUpdates(current, clientState, begin.ballot);
+
+                    // Apply triggers to cas updates. A consideration here is that
+                    // triggers emit Mutations, and so a given trigger implementation
+                    // may generate mutations for partitions other than the one this
+                    // paxos round is scoped for. In this case, TriggerExecutor will
+                    // validate that the generated mutations are targetted at the same
+                    // partition as the initial updates and reject (via an
+                    // InvalidRequestException) any which aren't.
+                    updates = TriggerExecutor.instance.execute(updates);
+
+                    proposal = Proposal.of(ballot, updates);
+                    Tracing.trace("CAS precondition is met; proposing client-requested updates for {}", ballot);
+                }
+                else
+                {
+                    // must retry, as only achieved read success in begin
+                    Tracing.trace("CAS precondition is met, but ballot stale for proposal; retrying", current);
+                    continue;
+                }
+
+                PaxosPropose.Status propose = propose(proposal, participants, conditionMet).awaitUntil(proposeDeadline);
+                switch (propose.outcome)
+                {
+                    default: throw new IllegalStateException();
+
+                    case MAYBE_FAILURE:
+                        throw propose.maybeFailure().markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+
+                    case SUCCESS:
+                    {
+                        if (!conditionMet)
+                            return conditionNotMet(current);
+
+                        // no need to commit a no-op; either it
+                        //   1) reached a majority, in which case it was agreed, had no effect and we can do nothing; or
+                        //   2) did not reach a majority, was not agreed, and was not user visible as a result so we can ignore it
+                        if (!proposal.update.isEmpty())
+                            commit = commit(proposal.agreed(), participants, consistencyForConsensus, consistencyForCommit, true);
+
+                        break done;
+                    }
+
+                    case SUPERSEDED:
+                    {
+                        switch (propose.superseded().hadSideEffects)
+                        {
+                            default: throw new IllegalStateException();
+
+                            case MAYBE:
+                                // We don't know if our update has been applied, as the competing ballot may have completed
+                                // our proposal.  We yield our uncertainty to the caller via timeout exception.
+                                // TODO: should return more useful result to client, and should also avoid this situation where possible
+                                throw new MaybeFailure(false, participants.sizeOfPoll(), participants.sizeOfConsensusQuorum, 0, emptyMap())
+                                        .markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+
+                            case NO:
+                                minimumBallot = propose.superseded().by;
+                                // We have been superseded without our proposal being accepted by anyone, so we can safely retry
+                                Tracing.trace("Paxos proposal not accepted (pre-empted by a higher ballot)");
+                                if (!waitForContention(proposeDeadline, ++failedAttemptsDueToContention, metadata, partitionKey, consistencyForConsensus, WRITE))
+                                    throw MaybeFailure.noResponses(participants).markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+                        }
+                    }
+                }
+                // continue to retry
+            }
+
+            if (commit != null)
+            {
+                PaxosCommit.Status result = commit.awaitUntil(commitDeadline);
+                if (!result.isSuccess())
+                    throw result.maybeFailure().markAndThrowAsTimeoutOrFailure(true, consistencyForCommit, failedAttemptsDueToContention);
+            }
+            Tracing.trace("CAS successful");
+            return null;
+
+        }
+        finally
+        {
+            final long latency = nanoTime() - start;
+
+            if (failedAttemptsDueToContention > 0)
+            {
+                casWriteMetrics.contention.update(failedAttemptsDueToContention);
+                openAndGetStore(metadata).metric.topCasPartitionContention.addSample(partitionKey.getKey(), failedAttemptsDueToContention);
+            }
+
+
+            casWriteMetrics.addNano(latency);
+            writeMetricsMap.get(consistencyForConsensus).addNano(latency);
+        }
+    }
+
+    private static RowIterator conditionNotMet(FilteredPartition read)
+    {
+        Tracing.trace("CAS precondition rejected", read);
+        casWriteMetrics.conditionNotMet.inc();
+        return read.rowIterator();
+    }
+
+    public static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyForConsensus)
+            throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException
+    {
+        long start = nanoTime();
+        long deadline = start + DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS);
+        return read(group, consistencyForConsensus, start, deadline);
+    }
+
+    public static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyForConsensus, long deadline)
+            throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException
+    {
+        return read(group, consistencyForConsensus, nanoTime(), deadline);
+    }
+
+    private static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyForConsensus, long start, long deadline)
+            throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException
+    {
+        if (group.queries.size() > 1)
+            throw new InvalidRequestException("SERIAL/LOCAL_SERIAL consistency may only be requested for one partition at a time");
+
+        int failedAttemptsDueToContention = 0;
+        Ballot minimumBallot = null;
+        SinglePartitionReadCommand read = group.queries.get(0);
+        try (PaxosOperationLock lock = PaxosState.lock(read.partitionKey(), read.metadata(), deadline, consistencyForConsensus, false))
+        {
+            while (true)
+            {
+                // does the work of applying in-progress writes; throws UAE or timeout if it can't
+                final BeginResult begin = begin(deadline, read, consistencyForConsensus, false, minimumBallot, failedAttemptsDueToContention);
+                failedAttemptsDueToContention = begin.failedAttemptsDueToContention;
+
+                switch (PAXOS_VARIANT)
+                {
+                    default: throw new AssertionError();
+
+                    case v2_without_linearizable_reads_or_rejected_writes:
+                    case v2_without_linearizable_reads:
+                        return begin.readResponse;
+
+                    case v2:
+                        // no need to submit an empty proposal, as the promise will be treated as complete for future optimistic reads
+                        if (begin.isLinearizableRead)
+                            return begin.readResponse;
+                }
+
+                Proposal proposal = Proposal.empty(begin.ballot, read.partitionKey(), read.metadata());
+                PaxosPropose.Status propose = propose(proposal, begin.participants, false).awaitUntil(deadline);
+                switch (propose.outcome)
+                {
+                    default: throw new IllegalStateException();
+
+                    case MAYBE_FAILURE:
+                        throw propose.maybeFailure().markAndThrowAsTimeoutOrFailure(false, consistencyForConsensus, failedAttemptsDueToContention);
+
+                    case SUCCESS:
+                        return begin.readResponse;
+
+                    case SUPERSEDED:
+                        switch (propose.superseded().hadSideEffects)
+                        {
+                            default: throw new IllegalStateException();
+
+                            case MAYBE:
+                                // We don't know if our update has been applied, as the competing ballot may have completed
+                                // our proposal.  We yield our uncertainty to the caller via timeout exception.
+                                // TODO: should return more useful result to client, and should also avoid this situation where possible
+                                throw new MaybeFailure(false, begin.participants.sizeOfPoll(), begin.participants.sizeOfConsensusQuorum, 0, emptyMap())
+                                      .markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+
+                            case NO:
+                                minimumBallot = propose.superseded().by;
+                                // We have been superseded without our proposal being accepted by anyone, so we can safely retry
+                                Tracing.trace("Paxos proposal not accepted (pre-empted by a higher ballot)");
+                                if (!waitForContention(deadline, ++failedAttemptsDueToContention, group.metadata(), group.queries.get(0).partitionKey(), consistencyForConsensus, READ))
+                                    throw MaybeFailure.noResponses(begin.participants).markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+                        }
+                }
+            }
+        }
+        finally
+        {
+            long latency = nanoTime() - start;
+            readMetrics.addNano(latency);
+            casReadMetrics.addNano(latency);
+            readMetricsMap.get(consistencyForConsensus).addNano(latency);
+            TableMetadata table = read.metadata();
+            Keyspace.open(table.keyspace).getColumnFamilyStore(table.name).metric.coordinatorReadLatency.update(latency, TimeUnit.NANOSECONDS);
+            if (failedAttemptsDueToContention > 0)
+                casReadMetrics.contention.update(failedAttemptsDueToContention);
+        }
+    }
+
+    static class BeginResult
+    {
+        final Ballot ballot;
+        final Participants participants;
+        final int failedAttemptsDueToContention;
+        final PartitionIterator readResponse;
+        final boolean isLinearizableRead;
+        final boolean isPromised;
+        final Ballot retryWithAtLeast;
+
+        public BeginResult(Ballot ballot, Participants participants, int failedAttemptsDueToContention, PartitionIterator readResponse, boolean isLinearizableRead, boolean isPromised, Ballot retryWithAtLeast)
+        {
+            assert isPromised || isLinearizableRead;
+            this.ballot = ballot;
+            this.participants = participants;
+            this.failedAttemptsDueToContention = failedAttemptsDueToContention;
+            this.readResponse = readResponse;
+            this.isLinearizableRead = isLinearizableRead;
+            this.isPromised = isPromised;
+            this.retryWithAtLeast = retryWithAtLeast;
+        }
+    }
+
+    /**
+     * Begin a Paxos operation by seeking promises from our electorate to be completed with proposals by our caller; and:
+     *
+     *  - Completing any in-progress proposals witnessed, that are not known to have reached the commit phase
+     *  - Completing any in-progress commits witnessed, that are not known to have reached a quorum of the electorate
+     *  - Retrying and backing-off under contention
+     *  - Detecting electorate mismatches with our peers and retrying to avoid non-overlapping
+     *    electorates agreeing operations
+     *  - Returning a resolved read response, and knowledge of if it is linearizable to read without proposing an empty update
+     *
+     * Optimisations:
+     *    - If the promises report an incomplete commit (but have been able to witness it in a read response)
+     *      we will submit the commit to those nodes that have not witnessed while waiting for those that have,
+     *      returning as soon as a quorum is known to have witnessed the commit
+     *    - If we witness an in-progress commit to complete, we batch the commit together with a new prepare
+     *      restarting our operation.
+     *    - If we witness an in-progress proposal to complete, after successfully proposing it we batch its
+     *      commit together with a new prepare restarting our operation.
+     *
+     * @return the Paxos ballot promised by the replicas if no in-progress requests were seen and a quorum of
+     * nodes have seen the mostRecentCommit.  Otherwise, return null.
+     */
+    @SuppressWarnings("resource")
+    private static BeginResult begin(long deadline,
+                                     SinglePartitionReadCommand query,
+                                     ConsistencyLevel consistencyForConsensus,
+                                     final boolean isWrite,
+                                     Ballot minimumBallot,
+                                     int failedAttemptsDueToContention)
+            throws WriteTimeoutException, WriteFailureException, ReadTimeoutException, ReadFailureException
+    {
+        boolean acceptEarlyReadPermission = !isWrite; // if we're reading, begin by assuming a read permission is sufficient
+        Participants initialParticipants = Participants.get(query.metadata(), query.partitionKey(), consistencyForConsensus);
+        initialParticipants.assureSufficientLiveNodes(isWrite);
+        PaxosPrepare preparing = prepare(minimumBallot, initialParticipants, query, isWrite, acceptEarlyReadPermission);
+        while (true)
+        {
+            // prepare
+            PaxosPrepare retry = null;
+            PaxosPrepare.Status prepare = preparing.awaitUntil(deadline);
+            boolean isPromised = false;
+            retry: switch (prepare.outcome)
+            {
+                default: throw new IllegalStateException();
+
+                case FOUND_INCOMPLETE_COMMITTED:
+                {
+                    FoundIncompleteCommitted incomplete = prepare.incompleteCommitted();
+                    Tracing.trace("Repairing replicas that missed the most recent commit");
+                    retry = commitAndPrepare(incomplete.committed, incomplete.participants, query, isWrite, acceptEarlyReadPermission);
+                    break;
+                }
+                case FOUND_INCOMPLETE_ACCEPTED:
+                {
+                    FoundIncompleteAccepted inProgress = prepare.incompleteAccepted();
+                    Tracing.trace("Finishing incomplete paxos round {}", inProgress.accepted);
+                    if (isWrite)
+                        casWriteMetrics.unfinishedCommit.inc();
+                    else
+                        casReadMetrics.unfinishedCommit.inc();
+
+                    // we DO NOT need to change the timestamp of this commit - either we or somebody else will finish it
+                    // and the original timestamp is correctly linearised. By not updatinig the timestamp we leave enough
+                    // information for nodes to avoid competing re-proposing the same proposal; if an in progress accept
+                    // is equal to the latest commit (even if the ballots aren't) we're done and can abort earlier,
+                    // and in fact it's possible for a CAS to sometimes determine if side effects occurred by reading
+                    // the underlying data and not witnessing the timestamp of its ballot (or any newer for the relevant data).
+                    Proposal repropose = new Proposal(inProgress.ballot, inProgress.accepted.update);
+                    PaxosPropose.Status proposeResult = propose(repropose, inProgress.participants, false).awaitUntil(deadline);
+                    switch (proposeResult.outcome)
+                    {
+                        default: throw new IllegalStateException();
+
+                        case MAYBE_FAILURE:
+                            throw proposeResult.maybeFailure().markAndThrowAsTimeoutOrFailure(isWrite, consistencyForConsensus, failedAttemptsDueToContention);
+
+                        case SUCCESS:
+                            retry = commitAndPrepare(repropose.agreed(), inProgress.participants, query, isWrite, acceptEarlyReadPermission);
+                            break retry;
+
+                        case SUPERSEDED:
+                            // since we are proposing a previous value that was maybe superseded by us before completion
+                            // we don't need to test the side effects, as we just want to start again, and fall through
+                            // to the superseded section below
+                            prepare = new PaxosPrepare.Superseded(proposeResult.superseded().by, inProgress.participants);
+
+                    }
+                }
+
+                case SUPERSEDED:
+                {
+                    Tracing.trace("Some replicas have already promised a higher ballot than ours; aborting");
+                    // sleep a random amount to give the other proposer a chance to finish
+                    if (!waitForContention(deadline, ++failedAttemptsDueToContention, query.metadata(), query.partitionKey(), consistencyForConsensus, isWrite ? WRITE : READ))
+                        throw MaybeFailure.noResponses(prepare.participants).markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+                    retry = prepare(prepare.retryWithAtLeast(), prepare.participants, query, isWrite, acceptEarlyReadPermission);
+                    break;
+                }
+                case PROMISED: isPromised = true;
+                case READ_PERMITTED:
+                {
+                    // We have received a quorum of promises (or read permissions) that have all witnessed the commit of the prior paxos
+                    // round's proposal (if any).
+                    PaxosPrepare.Success success = prepare.success();
+
+                    DataResolver<?, ?> resolver = new DataResolver(query, success.participants, NoopReadRepair.instance, query.creationTimeNanos());
+                    for (int i = 0 ; i < success.responses.size() ; ++i)
+                        resolver.preprocess(success.responses.get(i));
+
+                    class WasRun implements Runnable { boolean v; public void run() { v = true; } }
+                    WasRun hadShortRead = new WasRun();
+                    PartitionIterator result = resolver.resolve(hadShortRead);
+
+                    if (!isPromised && hadShortRead.v)
+                    {
+                        // we need to propose an empty update to linearize our short read, but only had read success
+                        // since we may continue to perform short reads, we ask our prepare not to accept an early
+                        // read permission, when a promise may yet be obtained
+                        // TODO: increase read size each time this happens?
+                        acceptEarlyReadPermission = false;
+                        break;
+                    }
+
+                    return new BeginResult(success.ballot, success.participants, failedAttemptsDueToContention, result, !hadShortRead.v && success.isReadSafe, isPromised, success.supersededBy);
+                }
+
+                case MAYBE_FAILURE:
+                    throw prepare.maybeFailure().markAndThrowAsTimeoutOrFailure(isWrite, consistencyForConsensus, failedAttemptsDueToContention);
+
+                case ELECTORATE_MISMATCH:
+                    Participants participants = Participants.get(query.metadata(), query.partitionKey(), consistencyForConsensus);
+                    participants.assureSufficientLiveNodes(isWrite);
+                    retry = prepare(participants, query, isWrite, acceptEarlyReadPermission);
+                    break;
+
+            }
+
+            if (retry == null)
+            {
+                Tracing.trace("Some replicas have already promised a higher ballot than ours; retrying");
+                // sleep a random amount to give the other proposer a chance to finish
+                if (!waitForContention(deadline, ++failedAttemptsDueToContention, query.metadata(), query.partitionKey(), consistencyForConsensus, isWrite ? WRITE : READ))
+                    throw MaybeFailure.noResponses(prepare.participants).markAndThrowAsTimeoutOrFailure(true, consistencyForConsensus, failedAttemptsDueToContention);
+                retry = prepare(prepare.retryWithAtLeast(), prepare.participants, query, isWrite, acceptEarlyReadPermission);
+            }
+
+            preparing = retry;
+        }
+    }
+
+    public static boolean isInRangeAndShouldProcess(InetAddressAndPort from, DecoratedKey key, TableMetadata table, boolean includesRead)
+    {
+        Keyspace keyspace = Keyspace.open(table.keyspace);
+        return (includesRead ? EndpointsForToken.natural(keyspace, key.getToken())
+                             : ReplicaLayout.forTokenWriteLiveAndDown(keyspace, key.getToken()).all()
+        ).contains(getBroadcastAddressAndPort());
+    }
+
+    static ConsistencyLevel nonSerial(ConsistencyLevel serial)
+    {
+        switch (serial)
+        {
+            default: throw new IllegalStateException();
+            case SERIAL: return QUORUM;
+            case LOCAL_SERIAL: return LOCAL_QUORUM;
+        }
+    }
+
+    private static void mark(boolean isWrite, Function<ClientRequestMetrics, Meter> toMark, ConsistencyLevel consistency)
+    {
+        if (isWrite)
+        {
+            toMark.apply(casWriteMetrics).mark();
+            toMark.apply(writeMetricsMap.get(consistency)).mark();
+        }
+        else
+        {
+            toMark.apply(casReadMetrics).mark();
+            toMark.apply(readMetricsMap.get(consistency)).mark();
+        }
+    }
+
+    public static Ballot newBallot(@Nullable Ballot minimumBallot, ConsistencyLevel consistency)
+    {
+        // We want a timestamp that is guaranteed to be unique for that node (so that the ballot is globally unique), but if we've got a prepare rejected
+        // already we also want to make sure we pick a timestamp that has a chance to be promised, i.e. one that is greater that the most recently known
+        // in progress (#5667). Lastly, we don't want to use a timestamp that is older than the last one assigned by ClientState or operations may appear
+        // out-of-order (#7801).
+        long minTimestampMicros = minimumBallot == null ? Long.MIN_VALUE : 1 + minimumBallot.unixMicros();
+        // Note that ballotMicros is not guaranteed to be unique if two proposal are being handled concurrently by the same coordinator. But we still
+        // need ballots to be unique for each proposal so we have to use getRandomTimeUUIDFromMicros.
+        return nextBallot(minTimestampMicros, flag(consistency));
+    }
+
+    static Ballot staleBallotNewerThan(Ballot than, ConsistencyLevel consistency)
+    {
+        long minTimestampMicros = 1 + than.unixMicros();
+        long maxTimestampMicros = BallotGenerator.Global.prevUnixMicros();
+        maxTimestampMicros -= Math.min((maxTimestampMicros - minTimestampMicros) / 2, SECONDS.toMicros(5L));
+        if (maxTimestampMicros <= minTimestampMicros)
+            return nextBallot(minTimestampMicros, flag(consistency));
+
+        return staleBallot(minTimestampMicros, maxTimestampMicros, flag(consistency));
+    }
+
+    /**
+     * Create a ballot uuid with the consistency level encoded in the timestamp.
+     *
+     * UUIDGen.getRandomTimeUUIDFromMicros timestamps are always a multiple of 10, so we add a 1 or 2 to indicate
+     * the consistency level of the operation. This should have no effect in practice (except preferring a serial
+     * operation over a local serial if there's a timestamp collision), but lets us avoid adding CL to the paxos
+     * table and messages, which should make backcompat easier if a different solution is committed upstream.
+     */
+    public static Ballot ballotForConsistency(long whenInMicros, ConsistencyLevel consistency)
+    {
+        Preconditions.checkArgument(consistency.isSerialConsistency());
+        return nextBallot(whenInMicros, flag(consistency));
+    }
+
+    private static Ballot.Flag flag(ConsistencyLevel consistency)
+    {
+        return consistency == SERIAL ? GLOBAL : LOCAL;
+    }
+
+    public static ConsistencyLevel consistency(Ballot ballot)
+    {
+        switch (ballot.flag())
+        {
+            default: return null;
+            case LOCAL: return LOCAL_SERIAL;
+            case GLOBAL: return SERIAL;
+        }
+    }
+
+    static Map<InetAddressAndPort, EndpointState> verifyElectorate(Electorate remoteElectorate, Electorate localElectorate)
+    {
+        // verify electorates; if they differ, send back gossip info for superset of two participant sets
+        if (remoteElectorate.equals(localElectorate))
+            return emptyMap();
+
+        Map<InetAddressAndPort, EndpointState> endpoints = Maps.newHashMapWithExpectedSize(remoteElectorate.size() + localElectorate.size());
+        for (InetAddressAndPort host : remoteElectorate)
+        {
+            EndpointState endpoint = Gossiper.instance.copyEndpointStateForEndpoint(host);
+            if (endpoint == null)
+            {
+                NoSpamLogger.log(logger, WARN, 1, TimeUnit.MINUTES, "Remote electorate {} could not be found in Gossip", host);
+                continue;
+            }
+            endpoints.put(host, endpoint);
+        }
+        for (InetAddressAndPort host : localElectorate)
+        {
+            EndpointState endpoint = Gossiper.instance.copyEndpointStateForEndpoint(host);
+            if (endpoint == null)
+            {
+                NoSpamLogger.log(logger, WARN, 1, TimeUnit.MINUTES, "Local electorate {} could not be found in Gossip", host);
+                continue;
+            }
+            endpoints.putIfAbsent(host, endpoint);
+        }
+
+        return endpoints;
+    }
+
+    public static boolean useV2()
+    {
+        switch (PAXOS_VARIANT)
+        {
+            case v2_without_linearizable_reads_or_rejected_writes:
+            case v2_without_linearizable_reads:
+            case v2:
+                return true;
+            case v1:
+            case v1_without_linearizable_reads_or_rejected_writes:
+                return false;
+            default:
+                throw new AssertionError();
+        }
+    }
+
+    public static boolean isLinearizable()
+    {
+        switch (PAXOS_VARIANT)
+        {
+            case v2:
+            case v1:
+                return true;
+            case v2_without_linearizable_reads_or_rejected_writes:
+            case v2_without_linearizable_reads:
+            case v1_without_linearizable_reads_or_rejected_writes:
+                return false;
+            default:
+                throw new AssertionError();
+        }
+    }
+
+    public static void setPaxosVariant(Config.PaxosVariant paxosVariant)
+    {
+        Preconditions.checkNotNull(paxosVariant);
+        PAXOS_VARIANT = paxosVariant;
+        DatabaseDescriptor.setPaxosVariant(paxosVariant);
+    }
+
+    public static Config.PaxosVariant getPaxosVariant()
+    {
+        return PAXOS_VARIANT;
+    }
+
+    static boolean isOldParticipant(Replica replica)
+    {
+        String version = Gossiper.instance.getForEndpoint(replica.endpoint(), RELEASE_VERSION);
+        if (version == null)
+            return false;
+
+        try
+        {
+            return new CassandraVersion(version).compareTo(MODERN_PAXOS_RELEASE) < 0;
+        }
+        catch (Throwable t)
+        {
+            return false;
+        }
+    }
+
+    public static void evictHungRepairs()
+    {
+        PaxosTableRepairs.evictHungRepairs();
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/Paxos.md b/src/java/org/apache/cassandra/service/paxos/Paxos.md
new file mode 100644
index 0000000..e8f1991
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/Paxos.md
@@ -0,0 +1,345 @@
+# Light-weight transactions and Paxos algorithm
+
+Our implementation of light-weight transactions (LWT) is loosely based on the classic Paxos single-decision algorithm.
+It contains a range of modifications that make it different from a direct application of a sequence of independent
+decisions.
+
+Below we will describe the basic process (used by both V1 and V2 implementations) with the actions that each participant
+takes and sketch a proof why the approach is safe, and then talk about various later improvements that do not change the
+correctness of the basic scheme.
+
+A key consideration in the design was the fact that we apply LWTs independently to the partitions of a table, and the
+necessary additional infrastructure (such as improved failure detection, client-side routing, etc) to efficiently 
+utilise a stable leader (i.e. Multi-Paxos). Instead of solving this we prefer to better support independent writes by 
+a leaderless scheme, where each coordinator attempts to direct its operations to completion. On congestion this runs 
+the chance of repeated clashes between coordinators attempting to perform writes to the same partition, in which case 
+we use randomized exponential backoff to achieve progress.
+
+The descriptions and proofs below assume no non-LWT modifications to the partition. Any other write (regardless of the
+requested consistency level) may reach a minority of replicas and leave the partition in an inconsistent state where the
+following operations can go different ways depending on which set of replicas respond to a quorum read. In particular,
+if two compare-and-set (CAS) operations with the same condition are executed with no intervening operations, such a
+state would make it possible for the first to fail and the second to succeed.
+
+## Basic scheme
+
+For each partition with LWT modification, we maintain the following registers:
+
+- The current `promised` ballot number.
+- The latest `accepted` proposal together with its ballot number and a `committed` flag.
+
+We define an ordering on the latter by ballot number and committed flag, where `committed` true is interpreted as
+greater on equal ballot numbers. Two proposals are equal when their ballot numbers and committed flag match.
+
+The basic scheme includes the following stages:
+
+1. A coordinator selects a fresh ballot number (based on time and made unique by including a source node identifier).
+2. The coordinator sends a `prepare` message to all replicas responsible for the targeted partition with the given
+   ballot number and a request to read the data required by the operation.
+3. By accepting the message, a replica declares that it will not accept any `prepare` or `propose` message with smaller
+   ballot number:
+    1. If the replica's `promised` register is greater than the number included in the message, the replica rejects it,
+       sending back its current `promised` number in a `rejected` message.
+    2. Otherwise, the replica stores the ballot number from the message in its `promised` register and replies with a
+       `promise` message which includes the contents of the current `accepted` register together with the requested data
+       from the database.
+4. On receipt of `promise`s from a quorum of nodes, the coordinator compiles the "most recently accepted" (`MRA`) value as
+   the greatest among the accepted values in the promises and then:
+    1. If the `MRA` is not null and its committed flag is not true, there is an in-progress Paxos session that needs to be
+       completed and committed. Controller reproposes the value with the new ballot (i.e. follows the steps from (iv)
+       below with a proposed value equal to the `MRA`'s) and then restarts the process.
+    2. If the `MRA` is not null, its committed flag is true and it is not a match for the `accepted` value of a quorum of
+       promises, controller sends a `commit` message to all replicas whose value did not match and awaits responses
+       until they form a quorum of replicas with a matching `accepted` value.
+    3. The coordinator then creates a proposal, a partition update which is the result of applying the operation, using
+       a read result obtained by resolving the quorum of read responses; the partition update has a timestamp that
+       corresponds to the proposal's ballot number, and is empty if the operation was a read or the CAS failed.
+    4. It then sends the proposal as a `propose` message with the value and the current ballot number to all replicas.
+5. A replica accepts the proposal if it has not promised that it will not do so:
+    1. If its `promised` ballot number is not higher than the proposal's, it sets its `accepted` register to the
+       proposal with its ballot number and a false `committed` flag, updates its `promised` register to the proposal's
+       ballot and sends back an `accepted` message.
+    2. Otherwise it rejects the proposal by sending the current `promised` number in a `rejected` message.
+6. On receipt of `accepted` messages from a quorum of nodes, the Paxos session has reached a decision. The coordinator
+   completes it by sending `commit` messages to all replicas, attaching the proposal value with its ballot number.
+    1. It can return completion without waiting for receipt of any commit messages.
+7. A replica accepts a commit unconditionally, by applying the attached partition update. If the commit's ballot number
+   is greater than the replica's `accepted` ballot number, it sets its `accepted` register to the message's value and
+   ballot with true `committed` flag, and the `promised` register to its ballot number.
+8. If at any stage of the process that requires a quorum the quorum is not reached, the coordinator backs off and then
+   restarts the process from the beginning using a fresh ballot that is greater than the ballot contained in any
+   `rejected` message received.
+
+We can't directly map multi-instance classic Paxos onto this, but we can follow its correctness proofs to ensure the
+following propositions:
+
+1. Once a value (possibly empty) has been decided (i.e. accepted by a majority), no earlier proposal can be reproposed.
+2. Once a value has been decided, in any further round (i.e. action taken by any proposer) it will either be accepted
+   and/or committed, or it will have been committed in an earlier round and will be witnessed by at least one member of
+   the quorum.
+
+Suppose the value V was decided in the round with ballot number E0.
+
+Proposition 1 is true because new proposals can only be made after a successful promise round with ballot number E > E0.
+To be successful, it needs a quorum which must intersect in at least one replica with E0's decision quorum. Since that
+replica accepted that proposal, it cannot have made a promise on E before that and must thus return an accepted value
+whose ballot number is at least E0. This is true because both `propose` and `commit` actions can only replace the
+`accepted` value with one with a higher or equal ballot number.
+
+Proposition 2 can be proven true by induction on the following invariant: for any successful ballot number E >= E0,
+either:
+
+1. For all quorums of replicas, commit V with some ballot number G < E, G >= E0 was witnessed by some replica in the
+   quorum.
+2. For all quorums of replicas, the `accepted` value with the highest ballot number among the replicas in the quorum is
+   V with some ballot number G where G <= E, G >= E0.
+
+For round E == E0 the invariant is true because all quorums contain a replica that accepted V with ballot E0, and E0 is
+the newest ballot number.
+
+Suppose the invariant is true for some round E and examine the behaviour of the algorithm on F = succ(E).
+
+- If 1. was true for E, it remains true for F.
+- Otherwise 2. was true for E and:
+    - If the promise pass did not reach a quorum, no accepted values change and hence 2. is still true.
+    - If the promise reached a quorum, the collected `MRA` is V with ballot G.
+        - If the `MRA`'s committed flag is not true, the value V is reproposed with ballot F.
+            - If the proposal reaches no node, no accepted values change and hence 2. is still true.
+            - If the proposal reaches a minority of nodes, any quorum that includes one of these nodes will have V with
+              ballot F as their highest `accepted` value. All other quorums will not have changed and still have V as their
+              highest accepted value with an earlier ballot >= E0. In any case, 2. is still true.
+            - If the proposal reaches a majority of nodes, all quorums have V with ballot F as the highest `accepted` value
+              and thus satisfy 2.
+            - Any `commit` message that is issued after a majority can only change the accepted value's `committed` flag --
+              all quorums will still satisfy 2.
+        - If the `MRA`'s committed flag is true but it is not matched in all responses, a `commit` with this `MRA` is sent to
+          all replicas. By the reasoning above, 2. is still true regardless how many of them (if any) are received and
+          processed.
+        - If the committed `MRA` matches in all responses (initially or because of commits issued in the last step), then 1.
+          is true for G <= E < F regardless of any further action taken by the coordinator.
+
+Proposition 1 ensures that we can only commit one value in any concurrent operation. Proposition 2 means that any
+accepted proposal must have started its promise after the previous value was committed to at least one replica in any
+quorum, and hence must be able to see its effects in its read responses. This is also true for every other value that
+was accepted in any previous ballot.
+
+Note that each commit may modify separate parts of the partition or overwrite previous values. Each of these updates may
+be witnessed by a different replica, but they must all be witnessed in the responses the coordinator receives prior to
+making a proposal or completing a read. By virtue of having their timestamps reflect ballot order, the read resolution
+process can correctly restore the combined state.
+
+As writes are only done in the `commit` stage after a value has been accepted, no undecided writes can be reflected in
+read responses.
+
+## Insubstantial differences with the actual code
+
+Instead of using a unique node identifier as part of the ballot number, we generate a 64-bit random integer. This has an
+extremely low chance of collision that can be assumed to be immaterial.
+
+Instead of storing an `accepted` proposal with a committed flag, for historical reasons the actual implementation
+separately stores the latest `accepted` and `committed` values. The coordinator computes most recent values for both
+after receiving promises, and acts on the higher of the two. In other words, instead of checking the `committed` flag on
+the most recently accepted, it checks if whether the `committed` value has a higher ballot than the `accepted` one.
+
+When accepting a proposal (which is conditional on having no newer promise or accepted/committed proposal), it stores it
+in the `accepted` register. When accepting a commit (which is unconditional), it replaces the `commit` register only if
+the commit has a newer ballot. It will clear the `accepted` value to null if that value does not have a newer ballot.
+
+Additionally, the `promised` value is not adjusted with accepted proposals and commits. Instead, whenever the code
+decides whether to promise or accept, it collects the maximum of the promised, accepted and committed ballots.
+
+Version 2 of the Paxos implementation performs reads as described here, by combining them with the `prepare`/`promise`
+messages. Version 1 runs quorum reads separately after receiving a promise; the read cannot complete if any write
+reaches consensus after that promise and, if successful, it will in turn invalidate and proposals that it may fail to
+see.
+
+These differences do not materially change the logic, but make correctness a little harder to prove directly.
+
+## Skipping commit for empty proposals (Versions 1 and 2)
+
+Since empty proposals make no modifications to the database state, it is not necessary to commit them.
+
+More precisely, in the basic scheme above we can treat the case of an empty partition update as the most-recently
+accepted value in the coordinator's preparation phase like we would treat a null `MRA`, skipping phases i and ii. In other
+words, we can change 4(i) to:
+
+4.
+    1. If the `MRA` is not null or empty, and its committed flag is not true, there is an in-progress Paxos session that
+       needs to be completed and committed. Controller reproposes the value with the new ballot (i.e. follow the steps
+       below with a proposed value equal to the `MRA`'s) and then restart the process.
+
+With this modified step Proposition 1 is still true, as is Proposition 2 restricted to committing and witnessing
+non-empty proposals. Their combination still ensures that no update made concurrently with any operation (including a
+read or a failing CAS) can resurface after that operation is decided, and that any operation will see the results of
+applying any previous.
+
+## Skipping proposal for reads (Version 2 only)
+
+To ensure correct ordering of reads and unsuccessful CAS operations, the algorithm above forces invalidation of any
+concurrent operation by issuing an empty update. It is possible, however, to recognize if a concurrent operation may
+have started at the time a `promise` is given. If no such operation is present, the read may proceed without issuing an
+empty update.
+
+To recognize this, the current `promised` value is returned with `promise` messages. During the proposal generation
+phase, the coordinator compiles the maximum returned `promised` number and compares it against the `MRA`'s. If higher, a
+concurrent operation is in place and all reads must issue an empty update to ensure proper ordering. If not, the empty
+update may be skipped.
+
+In other words, step 3(ii) changes to the following:
+
+3.
+    2. Otherwise, the replica stores the ballot number from the message in its `promised` register and replies with a
+       `promise` message which includes the contents of the current `accepted` and previous `promised` registers together
+       with the requested data from the database.
+
+and a new step is inserted before 4(iv) (which becomes 4(v)):
+
+4.
+    4. If the proposal is empty (i.e. the operation was a read or the CAS failed), the coordinator checks the maximum of
+       the quorum's `promised` values agains the `MRA`'s ballot number. If that maximum isn't higher, the operation
+       completes.
+
+Since we do not change issued proposals or make new ones, Proposition 1 is still in force. For Proposition 2 we must
+consider the possibility of a no-propose read missing an update with an earlier ballot number that was decided on
+concurrently. The difference in the new scheme is that this read will not invalidate an incomplete concurrent write, and
+thus an undecided entry could be decided after the read executes. However, to propose a new entry, a coordinator must
+first obtain a quorum of promises using a ballot number greater than the last committed or empty value's. Given such a
+promise and a read executing with higher ballot, at least one of the reader's quorum replicas must return its ballot
+number or higher in the `promised` field (otherwise the read's promise will have executed before the write's and the
+preparation would have been rejected). As a result, the coordinator will see a concurrent operation (either in a
+non-committed `MRA`, or a `promised` value higher than a committed or empty `MRA`) and will proceed to issue an invalidating
+empty proposal.
+
+## Concurrent reads (Version 2 only)
+
+As stated, the above optimization is only useful once per successful proposal, because a read executed in this way does
+not complete and will be treated as concurrent with any operation started after it. To improve this, we can use the fact
+that reads do not affect other reads, i.e. they are commutative operations and the order of execution of a set of reads
+with no concurrent writes is not significant, and separately store and issue read-only and write promises.
+
+More precisely, `prepare` messages are augmented with an `isWrite` flag, and an additional register called
+`promisedWrite` is maintained. The latter is updated when a promise is given for a `prepare` message with a true
+`isWrite` field, and is returned with all `promise` messages (in addition to `promised` as above). When a promise is
+requested with a ballot number lower than the current `promised` but higher than `promisedWrite`, the replica does not
+reject the request, but issues a "read-only promise" (note that this can be a normal `promise` message, recognized by
+`promised` being greater than the coordinator-issued ballot number), which cannot be used for making any proposal.
+
+The condition for making a no-proposal read is that the maximum returned `promisedWrite` number is not higher than the
+`MRA`'s (i.e. concurrent reads are permitted, but not concurrent writes). Provided that this is the case, the coordinator
+can use a read-only promise to execute no-proposal reads and failing CAS's. If they are not, it must restart the
+process, treating the read-only promises as rejections.
+
+Steps 2, 3, 4 and 8 are changed to accommodate this. The modified algorithm becomes:
+
+1. A coordinator selects a fresh ballot number (based on time and made unique by including a source node identifier).
+2. The coordinator sends a `prepare` message to all replicas responsible for the targeted partition with the given
+   ballot number, `isWrite` set to true if the operation is a CAS and false if it is a read, and a request to read the
+   data required by the operation.
+3. By accepting the message, a replica declares that it will not accept any `propose` message or issue write promises
+   with smaller ballot number:
+    1. If the replica's `promisedWrite` register is greater than the number included in the message, the replica rejects
+       it, sending back its current `promised` number in a `rejected` message.
+    2. A `read-only` flag is initialized to false.
+    3. If the `promised` register contains a lower value than the one supplied by the message, the `promised` register
+       is updated. Otherwise, the `read-only` flag is set to true.
+    4. If the message's `isWrite` flag is true and `read-only` is still false, the `promisedWrite` register is updated
+       to the passed ballot number. Otherwise, `read-only` is set to true.
+    5. The replica replies with a `promise` message which includes the `read-only` flag, the contents of the current
+       `accepted` and previous `promised` and `promisedWrite` registers together with the requested data from the
+       database.
+4. On receipt of `promise`s from a quorum of nodes, the coordinator compiles the "most recently accepted" (`MRA`) value as
+   the greatest among the accepted values in the promises and then:
+    1. If the `MRA` is not null or empty, and its committed flag is not true, there is an in-progress Paxos session that
+       needs to be completed and committed. The coordinator prepares a reproposal of the value with the new ballot,
+       continuing with step (v) below, and then restarts the process.
+    2. If the `MRA` is not null, its committed flag is true and it is not a match for the `accepted` value of a quorum of
+       promises, controller sends a `commit` message to all replicas whose value did not match and awaits responses
+       until they form a quorum of replicas with a matching `accepted` value.
+    3. The coordinator then creates a proposal, a partition update which is the result of applying the operation, using
+       a read result obtained by resolving the quorum of read responses; the partition update is empty if the operation
+       was a read or the CAS failed.
+    4. If the proposal is empty (i.e. the operation was a read or the CAS failed), the coordinator checks the maximum of
+       the quorum's `promisedWrite` values agains the `MRA`'s ballot number. If that maximum isn't higher, the operation
+       completes.
+    5. If there was no quorum of promises with false `read-only` flag, the coordinator restarts the process (step 8).
+    6. Otherwise, it sends the proposal as a `propose` message with the value and the current ballot number to all
+       replicas.
+5. A replica accepts the proposal if it has not promised that it will not do so:
+    1. If its `promised` ballot number is not higher than the proposal's, it sets its `accepted` register to the
+       proposal with its ballot number and a false `committed` flag, updates its `promised` register to the proposal's
+       ballot and sends back an `accepted` message.
+    2. Otherwise, it rejects the proposal by sending the current `promised` number in a `rejected` message.
+6. On receipt of `accepted` messages from a quorum of nodes, the Paxos session has reached a decision. The coordinator
+   completes it by sending `commit` messages to all replicas, attaching the proposal value and its ballot number.
+    1. It can return completion without waiting for receipt of any commit messages.
+7. A replica accepts a commit unconditionally, by applying the attached partition update. If the commit's ballot number
+   is greater than the replica's `accepted` ballot number, it sets its `accepted` register to the message's value and
+   ballot with true `committed` flag, and the `promised` register to its ballot number.
+8. If at any stage of the process that requires a quorum the quorum is not reached, the coordinator backs off and then
+   restarts the process from the beginning using a fresh ballot that is greater than the `promised` ballot contained in
+   any `rejected` and `promise` message received.
+
+With respect to any operation that issues a proposal, this algorithm fully matches the earlier version. For operations
+that do not (including all operations executing with a read-only promise), it allows for multiple to execute
+concurrently as long as no write promise quorum has been reached after the last commit. The reasoning of the previous
+paragraph is still valid and proves that no older proposal can be agreed on after a no-proposal read.
+
+## Paxos system table expiration (Version 1 only)
+
+The Paxos state registers used by the algorithm are persisted in the Paxos system table. For every partition with LWTs,
+this table will contain an entry specifying the current values of all registers (promised, promisedWrite, accepted,
+committed). Because this information is per-partition, there is a high chance that this table will quickly become very
+large if LWTs are used with many independent partitions.
+
+To make sure the overhead of the Paxos system table remains limited, Version 1 of the Cassandra Paxos implementation
+specifies a time-to-live (TTL) for all writes. That is, after a certain period of time with no LWT to a partition, the
+replica will forget the partition's Paxos state.
+
+If this data expires, any in-progress operations may fail to be brought to completion. With the algorithm as described
+above, one of the effects of this is that some writes that are reported complete may fail to ever be committed on a
+majority of replicas, or even on any replica (if e.g. connection with the replicas is lost before commits are sent, and
+the TTL expires before any new LWT operation on the permition is initiated).
+
+To avoid this problem, Version 1 of the implementation only reports success on writes after the commit stage has reached
+a requested consistency level. This solves the problem of reporting success, but only guarantees LWT writes to behave
+like non-LWT ones: a write may still reach a minority of nodes and leave the partition in an inconsistent state, which
+permits linearity guarantees to be violated.
+
+## Paxos repair (Version 2 only)
+
+In the second version of the implementation the Paxos system table does not expire. Instead, clearing up state is
+performed by a "Paxos repair" process which actively processes unfinished Paxos sessions and only deletes state that is
+known to have been brought to completion (i.e. successful majority commit).
+
+The repair process starts with taking a snapshot of the uncommitted operations in the Paxos system table. It then takes
+their ballots' maximum, which is then distributed to all nodes to be used as a lower bound on all promises, i.e.
+replicas stop accepting messages with earlier ballots. The process then proceeds to perform the equivalent of an empty
+read on all partitions in the snapshot. Upon completion, it can guarantee that all proposals with a ballot lower than
+the bound have been completed, i.e. either accepted and committed or superseded in a majority of nodes.
+
+What this means is that no LWT with earlier ballot can be incomplete, and thus no longer need any earlier state in the
+Paxos system table. The actual data deletion happens on compaction, where we drop all data that has lower ballots than
+what we know to have been repaired.
+
+## Correctness in the presence of range movements (Version 2 only)
+
+The crucial requirements for any Paxos-like scheme to work is to only make progress when we are guaranteed that all
+earlier decision points have at least one representative among the replicas that reply to a message (in other words,
+that all quorums intersect). When the node set is fixed this is achieved by requesting that a quorum contains more than
+half the replicas for the given partition.
+
+Range movements (i.e. joining or leaving nodes), however, can change the set of replicas that are responsible for a
+partition. In the exteme case, after multiple range movements it is possible to have a completely different set of
+replicas responsible for the partition (i.e. no quorum can exist that contains a replica for all earlier transactions).
+To deal with this problem, we must ensure that:
+
+- While operations in an incomplete state are ongoing, a quorum is formed in such a way that it contains a majority for
+  the current replica set _as well as_ for the replica set before the operation was started (as well as any intermediate
+  set, if it possible to perform multiple range movements in parallel).
+- By the time we transition fully to a new set of replicas responsible for a partition, we have completed moving all
+  committed mutations from any source replica to its replacement.
+
+The Paxos repair process above gives us a way to complete ongoing operations and a point in time when we can assume that
+all earlier LWT operations are complete. In combination with streaming, which moves all committed data to the new
+replica, this means that from the point when both complete forward we can safely use the new replica in quorums in place
+of the source.
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosCommit.java b/src/java/org/apache/cassandra/service/paxos/PaxosCommit.java
new file mode 100644
index 0000000..4321fc9
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosCommit.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.util.concurrent.atomic.AtomicLongFieldUpdater;
+import java.util.function.Consumer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.locator.EndpointsForToken;
+import org.apache.cassandra.locator.InOurDc;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.NoPayload;
+import org.apache.cassandra.service.paxos.Paxos.Participants;
+import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.concurrent.ConditionAsConsumer;
+
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.exceptions.RequestFailureReason.NODE_DOWN;
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.net.Verb.PAXOS2_COMMIT_REMOTE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_REQ;
+import static org.apache.cassandra.service.StorageProxy.shouldHint;
+import static org.apache.cassandra.service.StorageProxy.submitHint;
+import static org.apache.cassandra.service.paxos.Commit.*;
+import static org.apache.cassandra.utils.concurrent.ConditionAsConsumer.newConditionAsConsumer;
+
+// Does not support EACH_QUORUM, as no such thing as EACH_SERIAL
+public class PaxosCommit<OnDone extends Consumer<? super PaxosCommit.Status>> extends PaxosRequestCallback<NoPayload>
+{
+    public static final RequestHandler requestHandler = new RequestHandler();
+    private static final Logger logger = LoggerFactory.getLogger(PaxosCommit.class);
+
+    private static volatile boolean ENABLE_DC_LOCAL_COMMIT = Boolean.parseBoolean(System.getProperty("cassandra.enable_dc_local_commit", "true"));
+
+    public static boolean getEnableDcLocalCommit()
+    {
+        return ENABLE_DC_LOCAL_COMMIT;
+    }
+
+    public static void setEnableDcLocalCommit(boolean enableDcLocalCommit)
+    {
+        ENABLE_DC_LOCAL_COMMIT = enableDcLocalCommit;
+    }
+
+    /**
+     * Represents the current status of a commit action: it is a status rather than a result,
+     * as the result may be unknown without sufficient responses (though in most cases it is final status).
+     */
+    static class Status
+    {
+        private final Paxos.MaybeFailure maybeFailure;
+
+        Status(Paxos.MaybeFailure maybeFailure)
+        {
+            this.maybeFailure = maybeFailure;
+        }
+
+        boolean isSuccess() { return maybeFailure == null; }
+        Paxos.MaybeFailure maybeFailure() { return maybeFailure; }
+
+        public String toString() { return maybeFailure == null ? "Success" : maybeFailure.toString(); }
+    }
+
+    private static final Status success = new Status(null);
+
+    private static final AtomicLongFieldUpdater<PaxosCommit> responsesUpdater = AtomicLongFieldUpdater.newUpdater(PaxosCommit.class, "responses");
+
+    final Agreed commit;
+    final boolean allowHints;
+    final ConsistencyLevel consistencyForConsensus;
+    final ConsistencyLevel consistencyForCommit;
+
+    final EndpointsForToken replicas;
+    final int required;
+    final OnDone onDone;
+
+    /**
+     * packs two 32-bit integers;
+     * bit 00-31: accepts
+     * bit 32-63: failures/timeouts
+     * 
+     * {@link #accepts} 
+     * {@link #failures}
+     */
+    private volatile long responses;
+
+    public PaxosCommit(Agreed commit, boolean allowHints, ConsistencyLevel consistencyForConsensus, ConsistencyLevel consistencyForCommit, Participants participants, OnDone onDone)
+    {
+        this.commit = commit;
+        this.allowHints = allowHints;
+        this.consistencyForConsensus = consistencyForConsensus;
+        this.consistencyForCommit = consistencyForCommit;
+        this.replicas = participants.all;
+        this.onDone = onDone;
+        this.required = participants.requiredFor(consistencyForCommit);
+        if (required == 0)
+            onDone.accept(status());
+    }
+
+    /**
+     * Submit the proposal for commit with all replicas, and wait synchronously until at most {@code deadline} for the result
+     */
+    static Paxos.Async<Status> commit(Agreed commit, Participants participants, ConsistencyLevel consistencyForConsensus, ConsistencyLevel consistencyForCommit, @Deprecated boolean allowHints)
+    {
+        // to avoid unnecessary object allocations we extend PaxosPropose to implements Paxos.Async
+        class Async extends PaxosCommit<ConditionAsConsumer<Status>> implements Paxos.Async<Status>
+        {
+            private Async(Agreed commit, boolean allowHints, ConsistencyLevel consistencyForConsensus, ConsistencyLevel consistencyForCommit, Participants participants)
+            {
+                super(commit, allowHints, consistencyForConsensus, consistencyForCommit, participants, newConditionAsConsumer());
+            }
+
+            public Status awaitUntil(long deadline)
+            {
+                try
+                {
+                    onDone.awaitUntil(deadline);
+                }
+                catch (InterruptedException e)
+                {
+                    Thread.currentThread().interrupt();
+                    return new Status(new Paxos.MaybeFailure(true, replicas.size(), required, 0, emptyMap()));
+                }
+
+                return status();
+            }
+        }
+
+        Async async = new Async(commit, allowHints, consistencyForConsensus, consistencyForCommit, participants);
+        async.start(participants, false);
+        return async;
+    }
+
+    /**
+     * Submit the proposal for commit with all replicas, and wait synchronously until at most {@code deadline} for the result
+     */
+    static <T extends Consumer<Status>> T commit(Agreed commit, Participants participants, ConsistencyLevel consistencyForConsensus, ConsistencyLevel consistencyForCommit, @Deprecated boolean allowHints, T onDone)
+    {
+        new PaxosCommit<>(commit, allowHints, consistencyForConsensus, consistencyForCommit, participants, onDone)
+                .start(participants, true);
+        return onDone;
+    }
+
+    /**
+     * Send commit messages to peers (or self)
+     */
+    void start(Participants participants, boolean async)
+    {
+        boolean executeOnSelf = false;
+        Message<Agreed> commitMessage = Message.out(PAXOS_COMMIT_REQ, commit);
+        Message<Mutation> mutationMessage = ENABLE_DC_LOCAL_COMMIT && consistencyForConsensus.isDatacenterLocal()
+                                            ? Message.out(PAXOS2_COMMIT_REMOTE_REQ, commit.makeMutation()) : null;
+
+        for (int i = 0, mi = participants.allLive.size(); i < mi ; ++i)
+            executeOnSelf |= isSelfOrSend(commitMessage, mutationMessage, participants.allLive.endpoint(i));
+
+        for (int i = 0, mi = participants.allDown.size(); i < mi ; ++i)
+            onFailure(participants.allDown.endpoint(i), NODE_DOWN);
+
+        if (executeOnSelf)
+        {
+            ExecutorPlus executor = PAXOS_COMMIT_REQ.stage.executor();
+            if (async) executor.execute(this::executeOnSelf);
+            else executor.maybeExecuteImmediately(this::executeOnSelf);
+        }
+    }
+
+    /**
+     * If isLocal return true; otherwise if the destination is alive send our message, and if not mark the callback with failure
+     */
+    private boolean isSelfOrSend(Message<Agreed> commitMessage, Message<Mutation> mutationMessage, InetAddressAndPort destination)
+    {
+        if (shouldExecuteOnSelf(destination))
+            return true;
+
+        // don't send commits to remote dcs for local_serial operations
+        if (mutationMessage != null && !isInLocalDc(destination))
+            MessagingService.instance().sendWithCallback(mutationMessage, destination, this);
+        else
+            MessagingService.instance().sendWithCallback(commitMessage, destination, this);
+
+        return false;
+    }
+
+    private static boolean isInLocalDc(InetAddressAndPort destination)
+    {
+        return DatabaseDescriptor.getLocalDataCenter().equals(DatabaseDescriptor.getEndpointSnitch().getDatacenter(destination));
+    }
+
+    /**
+     * Record a failure or timeout, and maybe submit a hint to {@code from}
+     */
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        if (logger.isTraceEnabled())
+            logger.trace("{} {} from {}", commit, reason, from);
+
+        response(false, from);
+        Replica replica = replicas.lookup(from);
+
+        if (allowHints && shouldHint(replica))
+            submitHint(commit.makeMutation(), replica, null);
+    }
+
+    /**
+     * Record a success response
+     */
+    public void onResponse(Message<NoPayload> response)
+    {
+        logger.trace("{} Success from {}", commit, response.from());
+
+        response(true, response.from());
+    }
+
+    /**
+     * Execute locally and record response
+     */
+    public void executeOnSelf()
+    {
+        executeOnSelf(commit, RequestHandler::execute);
+    }
+
+    @Override
+    public void onResponse(NoPayload response, InetAddressAndPort from)
+    {
+        response(response != null, from);
+    }
+
+    /**
+     * Record a failure or success response if {@code from} contributes to our consistency.
+     * If we have reached a final outcome of the commit, run {@code onDone}.
+     */
+    private void response(boolean success, InetAddressAndPort from)
+    {
+        if (consistencyForCommit.isDatacenterLocal() && !InOurDc.endpoints().test(from))
+            return;
+
+        long responses = responsesUpdater.addAndGet(this, success ? 0x1L : 0x100000000L);
+        // next two clauses mutually exclusive to ensure we only invoke onDone once, when either failed or succeeded
+        if (accepts(responses) == required) // if we have received _precisely_ the required accepts, we have succeeded
+            onDone.accept(status());
+        else if (replicas.size() - failures(responses) == required - 1) // if we are _unable_ to receive the required accepts, we have failed
+            onDone.accept(status());
+    }
+
+    /**
+     * @return the Status as of now, which may be final or may indicate we have not received sufficient responses
+     */
+    Status status()
+    {
+        long responses = this.responses;
+        if (isSuccessful(responses))
+            return success;
+
+        return new Status(new Paxos.MaybeFailure(replicas.size(), required, accepts(responses), failureReasonsAsMap()));
+    }
+
+    private boolean isSuccessful(long responses)
+    {
+        return accepts(responses) >= required;
+    }
+
+    private static int accepts(long responses)
+    {
+        return (int) (responses & 0xffffffffL);
+    }
+
+    private static int failures(long responses)
+    {
+        return (int) (responses >>> 32);
+    }
+
+    public static class RequestHandler implements IVerbHandler<Agreed>
+    {
+        @Override
+        public void doVerb(Message<Agreed> message)
+        {
+            NoPayload response = execute(message.payload, message.from());
+            // NOTE: for correctness, this must be our last action, so that we cannot throw an error and send both a response and a failure response
+            if (response == null)
+                MessagingService.instance().respondWithFailure(UNKNOWN, message);
+            else
+                MessagingService.instance().respond(response, message);
+        }
+
+        private static NoPayload execute(Agreed agreed, InetAddressAndPort from)
+        {
+            if (!Paxos.isInRangeAndShouldProcess(from, agreed.update.partitionKey(), agreed.update.metadata(), false))
+                return null;
+
+            PaxosState.commitDirect(agreed);
+            Tracing.trace("Enqueuing acknowledge to {}", from);
+            return NoPayload.noPayload;
+        }
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosCommitAndPrepare.java b/src/java/org/apache/cassandra/service/paxos/PaxosCommitAndPrepare.java
new file mode 100644
index 0000000..7a9dba7
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosCommitAndPrepare.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.SinglePartitionReadCommand;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Commit.Agreed;
+import org.apache.cassandra.tracing.Tracing;
+
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.net.Verb.PAXOS2_COMMIT_AND_PREPARE_REQ;
+import static org.apache.cassandra.service.paxos.Paxos.newBallot;
+import static org.apache.cassandra.service.paxos.PaxosPrepare.start;
+
+public class PaxosCommitAndPrepare
+{
+    public static final RequestSerializer requestSerializer = new RequestSerializer();
+    public static final RequestHandler requestHandler = new RequestHandler();
+
+    static PaxosPrepare commitAndPrepare(Agreed commit, Paxos.Participants participants, SinglePartitionReadCommand readCommand, boolean isWrite, boolean acceptEarlyReadSuccess)
+    {
+        Ballot ballot = newBallot(commit.ballot, participants.consistencyForConsensus);
+        Request request = new Request(commit, ballot, participants.electorate, readCommand, isWrite);
+        PaxosPrepare prepare = new PaxosPrepare(participants, request, acceptEarlyReadSuccess, null);
+
+        Tracing.trace("Committing {}; Preparing {}", commit.ballot, ballot);
+        Message<Request> message = Message.out(PAXOS2_COMMIT_AND_PREPARE_REQ, request);
+//                .permitsArtificialDelay(participants.consistencyForConsensus);
+        start(prepare, participants, message, RequestHandler::execute);
+        return prepare;
+    }
+
+    private static class Request extends PaxosPrepare.AbstractRequest<Request>
+    {
+        final Agreed commit;
+
+        Request(Agreed commit, Ballot ballot, Paxos.Electorate electorate, SinglePartitionReadCommand read, boolean isWrite)
+        {
+            super(ballot, electorate, read, isWrite);
+            this.commit = commit;
+        }
+
+        private Request(Agreed commit, Ballot ballot, Paxos.Electorate electorate, DecoratedKey partitionKey, TableMetadata table, boolean isWrite)
+        {
+            super(ballot, electorate, partitionKey, table, isWrite);
+            this.commit = commit;
+        }
+
+        Request withoutRead()
+        {
+            return new Request(commit, ballot, electorate, partitionKey, table, isForWrite);
+        }
+
+        public String toString()
+        {
+            return commit.toString("CommitAndPrepare(") + ", " + Ballot.toString(ballot) + ')';
+        }
+    }
+
+    public static class RequestSerializer extends PaxosPrepare.AbstractRequestSerializer<Request, Agreed>
+    {
+        Request construct(Agreed param, Ballot ballot, Paxos.Electorate electorate, SinglePartitionReadCommand read, boolean isWrite)
+        {
+            return new Request(param, ballot, electorate, read, isWrite);
+        }
+
+        Request construct(Agreed param, Ballot ballot, Paxos.Electorate electorate, DecoratedKey partitionKey, TableMetadata table, boolean isWrite)
+        {
+            return new Request(param, ballot, electorate, partitionKey, table, isWrite);
+        }
+
+        @Override
+        public void serialize(Request request, DataOutputPlus out, int version) throws IOException
+        {
+            Agreed.serializer.serialize(request.commit, out, version);
+            super.serialize(request, out, version);
+        }
+
+        @Override
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            Agreed committed = Agreed.serializer.deserialize(in, version);
+            return deserialize(committed, in, version);
+        }
+
+        @Override
+        public long serializedSize(Request request, int version)
+        {
+            return Agreed.serializer.serializedSize(request.commit, version)
+                    + super.serializedSize(request, version);
+        }
+    }
+
+    public static class RequestHandler implements IVerbHandler<Request>
+    {
+        @Override
+        public void doVerb(Message<Request> message)
+        {
+            PaxosPrepare.Response response = execute(message.payload, message.from());
+            if (response == null)
+                MessagingService.instance().respondWithFailure(UNKNOWN, message);
+            else
+                MessagingService.instance().respond(response, message);
+        }
+
+        private static PaxosPrepare.Response execute(Request request, InetAddressAndPort from)
+        {
+            Agreed commit = request.commit;
+            if (!Paxos.isInRangeAndShouldProcess(from, commit.update.partitionKey(), commit.update.metadata(), request.read != null))
+                return null;
+
+            try (PaxosState state = PaxosState.get(commit))
+            {
+                state.commit(commit);
+                return PaxosPrepare.RequestHandler.execute(request, state);
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosOperationLock.java b/src/java/org/apache/cassandra/service/paxos/PaxosOperationLock.java
new file mode 100644
index 0000000..b9f01e8
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosOperationLock.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+public interface PaxosOperationLock extends AutoCloseable
+{
+    @Override
+    public void close();
+
+    static PaxosOperationLock noOp()
+    {
+        return () -> {};
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosPrepare.java b/src/java/org/apache/cassandra/service/paxos/PaxosPrepare.java
new file mode 100644
index 0000000..a7f58e1
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosPrepare.java
@@ -0,0 +1,1253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.UnavailableException;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.metrics.PaxosMetrics;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.PendingRangeCalculatorService;
+import org.apache.cassandra.service.paxos.PaxosPrepare.Status.Outcome;
+import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.vint.VIntCoding;
+
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_RSP;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.service.paxos.Commit.*;
+import static org.apache.cassandra.service.paxos.Commit.CompareResult.WAS_REPROPOSED_BY;
+import static org.apache.cassandra.service.paxos.Paxos.*;
+import static org.apache.cassandra.service.paxos.PaxosPrepare.Status.Outcome.*;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.service.paxos.PaxosState.*;
+import static org.apache.cassandra.service.paxos.PaxosState.MaybePromise.Outcome.*;
+import static org.apache.cassandra.utils.CollectionSerializer.deserializeMap;
+import static org.apache.cassandra.utils.CollectionSerializer.newHashMap;
+import static org.apache.cassandra.utils.CollectionSerializer.serializeMap;
+import static org.apache.cassandra.utils.CollectionSerializer.serializedSizeMap;
+import static org.apache.cassandra.utils.concurrent.Awaitable.SyncAwaitable.waitUntil;
+
+/**
+ * Perform one paxos "prepare" attempt, with various optimisations.
+ *
+ * The prepare step entails asking for a quorum of nodes to promise to accept our later proposal. It can
+ * yield one of five logical answers:
+ *
+ *   1) Success         - we have received a quorum of promises, and we know that a quorum of nodes
+ *                        witnessed the prior round's commit (if any)
+ *   2) Timeout         - we have not received enough responses at all before our deadline passed
+ *   3) Failure         - we have received too many explicit failures to succeed
+ *   4) Superseded      - we have been informed of a later ballot that has been promised
+ *   5) FoundInProgress - we have been informed of an earlier promise that has been accepted
+ *
+ * Success hinges on two distinct criteria being met, as the quorum of promises may not guarantee a quorum of
+ * witnesses of the prior round's commit.  We track this separately by recording those nodes that have witnessed
+ * the prior round's commit.  On receiving a quorum of promises, we submit the prior round's commit to any promisers
+ * that had not witnessed it, while continuing to wait for responses to our original request: as soon as we hear of
+ * a quorum that have witnessed it, either by our refresh request or by responses to the original request, we yield Success.
+ *
+ * Success is also accompanied by a quorum of read responses, avoiding another round-trip to obtain this result.
+ *
+ * This operation may be started either with a solo Prepare command, or with a prefixed Commit command.
+ * If we are completing an in-progress round we previously discovered, we save another round-trip by committing and
+ * preparing simultaneously.
+ */
+public class PaxosPrepare extends PaxosRequestCallback<PaxosPrepare.Response> implements PaxosPrepareRefresh.Callbacks, Paxos.Async<PaxosPrepare.Status>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosPrepare.class);
+
+    private static Runnable onLinearizabilityViolation;
+
+    public static final RequestHandler requestHandler = new RequestHandler();
+    public static final RequestSerializer requestSerializer = new RequestSerializer();
+    public static final ResponseSerializer responseSerializer = new ResponseSerializer();
+
+    /**
+     * Represents the current status of a prepare action: it is a status rather than a result,
+     * as the result may be unknown without sufficient responses (though in most cases it is final status).
+     */
+    static class Status
+    {
+        enum Outcome { READ_PERMITTED, PROMISED, SUPERSEDED, FOUND_INCOMPLETE_ACCEPTED, FOUND_INCOMPLETE_COMMITTED, MAYBE_FAILURE, ELECTORATE_MISMATCH }
+
+        final Outcome outcome;
+        final Participants participants;
+
+        Status(Outcome outcome, Participants participants)
+        {
+            this.outcome = outcome;
+            this.participants = participants;
+        }
+        @Nullable
+        Ballot retryWithAtLeast()
+        {
+            switch (outcome)
+            {
+                case READ_PERMITTED: return ((Success) this).supersededBy;
+                case SUPERSEDED: return ((Superseded) this).by;
+                default: return null;
+            }
+        }
+        Success success() { return (Success) this; }
+        FoundIncompleteAccepted incompleteAccepted() { return (FoundIncompleteAccepted) this; }
+        FoundIncompleteCommitted incompleteCommitted() { return (FoundIncompleteCommitted) this; }
+        Paxos.MaybeFailure maybeFailure() { return ((MaybeFailure) this).info; }
+    }
+
+    static class Success extends WithRequestedBallot
+    {
+        final List<Message<ReadResponse>> responses;
+        final boolean isReadSafe; // read responses constitute a linearizable read (though short read protection would invalidate that)
+        final @Nullable
+        Ballot supersededBy; // if known and READ_SUCCESS
+
+        Success(Outcome outcome, Ballot ballot, Participants participants, List<Message<ReadResponse>> responses, boolean isReadSafe, @Nullable Ballot supersededBy)
+        {
+            super(outcome, participants, ballot);
+            this.responses = responses;
+            this.isReadSafe = isReadSafe;
+            this.supersededBy = supersededBy;
+        }
+
+        static Success read(Ballot ballot, Participants participants, List<Message<ReadResponse>> responses, @Nullable Ballot supersededBy)
+        {
+            return new Success(Outcome.READ_PERMITTED, ballot, participants, responses, true, supersededBy);
+        }
+
+        static Success readOrWrite(Ballot ballot, Participants participants, List<Message<ReadResponse>> responses, boolean isReadConsistent)
+        {
+            return new Success(Outcome.PROMISED, ballot, participants, responses, isReadConsistent, null);
+        }
+
+        public String toString() { return "Success(" + ballot + ", " + participants.electorate + ')'; }
+    }
+
+    /**
+     * The ballot we sought promises for has been superseded by another proposer's
+     *
+     * Note: we extend this for Success, so that supersededBy() can be called for ReadSuccess
+     */
+    static class Superseded extends Status
+    {
+        final Ballot by;
+
+        Superseded(Ballot by, Participants participants)
+        {
+            super(SUPERSEDED, participants);
+            this.by = by;
+        }
+
+        public String toString() { return "Superseded(" + by + ')'; }
+    }
+
+    static class WithRequestedBallot extends Status
+    {
+        final Ballot ballot;
+
+        WithRequestedBallot(Outcome outcome, Participants participants, Ballot ballot)
+        {
+            super(outcome, participants);
+            this.ballot = ballot;
+        }
+    }
+
+    static class FoundIncomplete extends WithRequestedBallot
+    {
+        private FoundIncomplete(Outcome outcome, Participants participants, Ballot promisedBallot)
+        {
+            super(outcome, participants, promisedBallot);
+        }
+    }
+
+    /**
+     * We have been informed of a promise made by one of the replicas we contacted, that was not accepted by all replicas
+     * (though may have been accepted by a majority; we don't know).
+     * In this case we cannot readily know if we have prevented this proposal from being completed, so we attempt
+     * to finish it ourselves (unfortunately leaving the proposer to timeout, given the current semantics)
+     * TODO: we should consider waiting for more responses in case we encounter any successful commit, or a majority
+     *       of acceptors?
+     */
+    static class FoundIncompleteAccepted extends FoundIncomplete
+    {
+        final Accepted accepted;
+
+        private FoundIncompleteAccepted(Ballot promisedBallot, Participants participants, Accepted accepted)
+        {
+            super(FOUND_INCOMPLETE_ACCEPTED, participants, promisedBallot);
+            this.accepted = accepted;
+        }
+
+        public String toString()
+        {
+            return "FoundIncomplete" + accepted;
+        }
+    }
+
+    /**
+     * We have been informed of a proposal that was accepted by a majority, but we do not know has been
+     * committed to a majority, and we failed to read from a single natural replica that had witnessed this
+     * commit when we performed the read.
+     * Since this is an edge case, we simply start again, to keep the control flow more easily understood;
+     * the commit shouldld be committed to a majority as part of our re-prepare.
+     */
+    static class FoundIncompleteCommitted extends FoundIncomplete
+    {
+        final Committed committed;
+
+        private FoundIncompleteCommitted(Ballot promisedBallot, Participants participants, Committed committed)
+        {
+            super(FOUND_INCOMPLETE_COMMITTED, participants, promisedBallot);
+            this.committed = committed;
+        }
+
+        public String toString()
+        {
+            return "FoundIncomplete" + committed;
+        }
+    }
+
+    static class MaybeFailure extends Status
+    {
+        final Paxos.MaybeFailure info;
+        private MaybeFailure(Paxos.MaybeFailure info, Participants participants)
+        {
+            super(MAYBE_FAILURE, participants);
+            this.info = info;
+        }
+
+        public String toString() { return info.toString(); }
+    }
+
+    static class ElectorateMismatch extends WithRequestedBallot
+    {
+        private ElectorateMismatch(Participants participants, Ballot ballot)
+        {
+            super(ELECTORATE_MISMATCH, participants, ballot);
+        }
+    }
+
+    private final boolean acceptEarlyReadPermission;
+    private final AbstractRequest<?> request;
+    private Ballot supersededBy; // cannot be promised, as a newer promise has been made
+    private Accepted latestAccepted; // the latest latestAcceptedButNotCommitted response we have received (which may still have been committed elsewhere)
+    private Committed latestCommitted; // latest actually committed proposal
+
+    private final Participants participants;
+
+    private final List<Message<ReadResponse>> readResponses;
+    private boolean haveReadResponseWithLatest;
+    private boolean haveQuorumOfPermissions; // permissions => SUCCESS or READ_SUCCESS
+    private List<InetAddressAndPort> withLatest; // promised and have latest commit
+    private List<InetAddressAndPort> needLatest; // promised without having witnessed latest commit, nor yet been refreshed by us
+    private int failures; // failed either on initial request or on refresh
+    private boolean hasProposalStability = true; // no successful modifying proposal could have raced with us and not been seen
+    private boolean hasOnlyPromises = true;
+    private long maxLowBound;
+
+    private Status outcome;
+    private final Consumer<Status> onDone;
+
+    private PaxosPrepareRefresh refreshStaleParticipants;
+    private boolean linearizabilityViolationDetected = false;
+
+    PaxosPrepare(Participants participants, AbstractRequest<?> request, boolean acceptEarlyReadPermission, Consumer<Status> onDone)
+    {
+        this.acceptEarlyReadPermission = acceptEarlyReadPermission;
+        assert participants.sizeOfConsensusQuorum > 0;
+        this.participants = participants;
+        this.request = request;
+        this.readResponses = new ArrayList<>(participants.sizeOfConsensusQuorum);
+        this.withLatest = new ArrayList<>(participants.sizeOfConsensusQuorum);
+        this.latestAccepted = this.latestCommitted = Committed.none(request.partitionKey, request.table);
+        this.onDone = onDone;
+    }
+
+    private boolean hasInProgressProposal()
+    {
+        // no need to commit a no-op; either it
+        //   1) reached a majority, in which case it was agreed, had no effect and we can do nothing; or
+        //   2) did not reach a majority, was not agreed, and was not user visible as a result so we can ignore it
+        if (latestAccepted.update.isEmpty())
+            return false;
+
+        // If we aren't newer than latestCommitted, then we're done
+        if (!latestAccepted.isAfter(latestCommitted))
+            return false;
+
+        if (latestAccepted.ballot.uuidTimestamp() <= maxLowBound)
+            return false;
+
+        // We can be a re-proposal of latestCommitted, in which case we do not need to re-propose it
+        return !latestAccepted.isReproposalOf(latestCommitted);
+    }
+
+    static PaxosPrepare prepare(Participants participants, SinglePartitionReadCommand readCommand, boolean isWrite, boolean acceptEarlyReadPermission) throws UnavailableException
+    {
+        return prepare(null, participants, readCommand, isWrite, acceptEarlyReadPermission);
+    }
+
+    static PaxosPrepare prepare(Ballot minimumBallot, Participants participants, SinglePartitionReadCommand readCommand, boolean isWrite, boolean acceptEarlyReadPermission) throws UnavailableException
+    {
+        return prepareWithBallot(newBallot(minimumBallot, participants.consistencyForConsensus), participants, readCommand, isWrite, acceptEarlyReadPermission);
+    }
+
+    static PaxosPrepare prepareWithBallot(Ballot ballot, Participants participants, SinglePartitionReadCommand readCommand, boolean isWrite, boolean acceptEarlyReadPermission)
+    {
+        Tracing.trace("Preparing {} with read", ballot);
+        Request request = new Request(ballot, participants.electorate, readCommand, isWrite);
+        return prepareWithBallotInternal(participants, request, acceptEarlyReadPermission, null);
+    }
+
+    @SuppressWarnings("SameParameterValue")
+    static <T extends Consumer<Status>> T prepareWithBallot(Ballot ballot, Participants participants, DecoratedKey partitionKey, TableMetadata table, boolean isWrite, boolean acceptEarlyReadPermission, T onDone)
+    {
+        Tracing.trace("Preparing {}", ballot);
+        prepareWithBallotInternal(participants, new Request(ballot, participants.electorate, partitionKey, table, isWrite), acceptEarlyReadPermission, onDone);
+        return onDone;
+    }
+
+    private static PaxosPrepare prepareWithBallotInternal(Participants participants, Request request, boolean acceptEarlyReadPermission, Consumer<Status> onDone)
+    {
+        PaxosPrepare prepare = new PaxosPrepare(participants, request, acceptEarlyReadPermission, onDone);
+        Message<Request> message = Message.out(PAXOS2_PREPARE_REQ, request);
+        start(prepare, participants, message, RequestHandler::execute);
+        return prepare;
+    }
+
+    /**
+     * Submit the message to our peers, and submit it for local execution if relevant
+     */
+    static <R extends AbstractRequest<R>> void start(PaxosPrepare prepare, Participants participants, Message<R> send, BiFunction<R, InetAddressAndPort, Response> selfHandler)
+    {
+        boolean executeOnSelf = false;
+        for (int i = 0, size = participants.sizeOfPoll() ; i < size ; ++i)
+        {
+            InetAddressAndPort destination = participants.voter(i);
+            boolean isPending = participants.electorate.isPending(destination);
+            logger.trace("{} to {}", send.payload, destination);
+            if (shouldExecuteOnSelf(destination))
+                executeOnSelf = true;
+            else
+                MessagingService.instance().sendWithCallback(isPending ? withoutRead(send) : send, destination, prepare);
+        }
+
+        if (executeOnSelf)
+            send.verb().stage.execute(() -> prepare.executeOnSelf(send.payload, selfHandler));
+    }
+
+    // TODO: extend Sync?
+    public synchronized Status awaitUntil(long deadline)
+    {
+        try
+        {
+            //noinspection StatementWithEmptyBody
+            while (!isDone() && waitUntil(this, deadline)) {}
+
+            if (!isDone())
+                signalDone(MAYBE_FAILURE);
+
+            return outcome;
+        }
+        catch (InterruptedException e)
+        {
+            // can only normally be interrupted if the system is shutting down; should rethrow as a write failure but propagate the interrupt
+            Thread.currentThread().interrupt();
+            return new MaybeFailure(new Paxos.MaybeFailure(true, participants.sizeOfPoll(), participants.sizeOfConsensusQuorum, 0, emptyMap()), participants);
+        }
+    }
+
+    private boolean isDone()
+    {
+        return outcome != null;
+    }
+
+    private int withLatest()
+    {
+        return withLatest.size();
+    }
+
+    private int needLatest()
+    {
+        return needLatest == null ? 0 : needLatest.size();
+    }
+
+    private static boolean needsGossipUpdate(Map<InetAddressAndPort, EndpointState> gossipInfo)
+    {
+        if (gossipInfo.isEmpty())
+            return false;
+
+        for (Map.Entry<InetAddressAndPort, EndpointState> entry : gossipInfo.entrySet())
+        {
+            EndpointState remote = entry.getValue();
+            if (remote == null)
+                continue;
+            EndpointState local = Gossiper.instance.getEndpointStateForEndpoint(entry.getKey());
+            if (local == null || local.isSupersededBy(remote))
+                return true;
+        }
+
+        return false;
+    }
+
+    public synchronized void onResponse(Response response, InetAddressAndPort from)
+    {
+        if (logger.isTraceEnabled())
+            logger.trace("{} for {} from {}", response, request.ballot, from);
+
+        if (isDone())
+        {
+            maybeCheckForLinearizabilityViolation(response, from);
+            return;
+        }
+
+        if (response.isRejected())
+        {
+            Rejected rejected = response.rejected();
+            supersededBy = rejected.supersededBy;
+            signalDone(SUPERSEDED);
+            return;
+        }
+
+        Permitted permitted = response.permitted();
+        if (permitted.gossipInfo.isEmpty())
+            // we agree about the electorate, so can simply accept the promise/permission
+            permitted(permitted, from);
+        else if (!needsGossipUpdate(permitted.gossipInfo))
+            // our gossip is up-to-date, but our original electorate could have been built with stale gossip, so verify it
+            permittedOrTerminateIfElectorateMismatch(permitted, from);
+        else
+            // otherwise our beliefs about the ring potentially diverge, so update gossip with the peer's information
+            Stage.GOSSIP.executor().execute(() -> {
+                Gossiper.instance.notifyFailureDetector(permitted.gossipInfo);
+                Gossiper.instance.applyStateLocally(permitted.gossipInfo);
+
+                // TODO: We should also wait for schema pulls/pushes, however this would be quite an involved change to MigrationManager
+                //       (which currently drops some migration tasks on the floor).
+                //       Note it would be fine for us to fail to complete the migration task and simply treat this response as a failure/timeout.
+
+                // once any pending ranges have been calculated, refresh our Participants list and submit the promise
+                PendingRangeCalculatorService.instance.executeWhenFinished(() -> permittedOrTerminateIfElectorateMismatch(permitted, from));
+            });
+    }
+
+    private synchronized void permittedOrTerminateIfElectorateMismatch(Permitted permitted, InetAddressAndPort from)
+    {
+        if (isDone()) // this execution is asynchronous wrt promise arrival, so must recheck done status
+            return;
+
+        // if the electorate has changed, finish so we can retry with the updated view of the ring
+        if (!Electorate.get(request.table, request.partitionKey, consistency(request.ballot)).equals(participants.electorate))
+        {
+            signalDone(ELECTORATE_MISMATCH);
+            return;
+        }
+
+        // otherwise continue as normal
+        permitted(permitted, from);
+    }
+
+    private void permitted(Permitted permitted, InetAddressAndPort from)
+    {
+        if (permitted.outcome != PROMISE)
+        {
+            hasOnlyPromises = false;
+            if (supersededBy == null)
+                supersededBy = permitted.supersededBy;
+        }
+
+        if (permitted.lowBound > maxLowBound)
+            maxLowBound = permitted.lowBound;
+
+        if (!haveQuorumOfPermissions)
+        {
+            CompareResult compareLatest = permitted.latestCommitted.compareWith(latestCommitted);
+            switch (compareLatest)
+            {
+                default: throw new IllegalStateException();
+                case IS_REPROPOSAL:
+                    latestCommitted = permitted.latestCommitted;
+                case WAS_REPROPOSED_BY:
+                case SAME:
+                    withLatest.add(from);
+                    haveReadResponseWithLatest |= permitted.readResponse != null;
+                    break;
+                case BEFORE:
+                    if (needLatest == null)
+                        needLatest = new ArrayList<>(participants.sizeOfPoll() - withLatest.size());
+                    needLatest.add(from);
+                    break;
+                case AFTER:
+                    // move with->need
+                    if (!withLatest.isEmpty())
+                    {
+                        if (needLatest == null)
+                        {
+                            needLatest = withLatest;
+                            withLatest = new ArrayList<>(Math.min(participants.sizeOfPoll() - needLatest.size(), participants.sizeOfConsensusQuorum));
+                        }
+                        else
+                        {
+                            needLatest.addAll(withLatest);
+                            withLatest.clear();
+                        }
+                    }
+
+                    withLatest.add(from);
+                    haveReadResponseWithLatest = permitted.readResponse != null;
+                    latestCommitted = permitted.latestCommitted;
+            }
+
+            if (isAfter(permitted.latestAcceptedButNotCommitted, latestAccepted))
+                latestAccepted = permitted.latestAcceptedButNotCommitted;
+
+            if (permitted.readResponse != null)
+            {
+                hasProposalStability &= permitted.hadProposalStability;
+                addReadResponse(permitted.readResponse, from);
+            }
+        }
+        else
+        {
+            switch (permitted.latestCommitted.compareWith(latestCommitted))
+            {
+                default: throw new IllegalStateException();
+                case SAME:
+                case IS_REPROPOSAL:
+                case WAS_REPROPOSED_BY:
+                    withLatest.add(from);
+                    break;
+
+                case AFTER:
+                    if (maybeCheckForLinearizabilityViolation(permitted, from))
+                        return;
+                    // witnessing future commit doesn't imply have seen prior, so add to refresh list
+
+                case BEFORE:
+                    if (needLatest == null)
+                        needLatest = new ArrayList<>(participants.sizeOfPoll() - withLatest.size());
+                    needLatest.add(from);
+            }
+        }
+
+        haveQuorumOfPermissions |= withLatest() + needLatest() >= participants.sizeOfConsensusQuorum;
+        if (haveQuorumOfPermissions)
+        {
+            if (request.read != null && readResponses.size() < participants.sizeOfReadQuorum)
+                throw new IllegalStateException("Insufficient read responses: " + readResponses + "; need " + participants.sizeOfReadQuorum);
+
+            if (!hasOnlyPromises && !hasProposalStability)
+                signalDone(SUPERSEDED);
+
+            // We must be certain to have witnessed a quorum of responses before completing any in-progress proposal
+            // else we may complete a stale proposal that did not reach a quorum (and may do so in preference
+            // to a different in progress proposal that did reach a quorum).
+
+            // We should also be sure to return any in progress proposal in preference to any incompletely committed
+            // earlier commits (since, while we should encounter it next round, any commit that is incomplete in the
+            // presence of an incomplete proposal can be ignored, as either the proposal is a re-proposal of the same
+            // commit or the commit has already reached a quorum
+            else if (hasInProgressProposal())
+                signalDone(hasOnlyPromises ? FOUND_INCOMPLETE_ACCEPTED : SUPERSEDED);
+
+            else if (withLatest() >= participants.sizeOfConsensusQuorum)
+                signalDone(hasOnlyPromises ? PROMISED : READ_PERMITTED);
+
+            // otherwise if we have any read response with the latest commit,
+            // try to simply ensure it has been persisted to a consensus group
+            else if (haveReadResponseWithLatest)
+            {
+                refreshStaleParticipants();
+                // if an optimistic read is possible, and we are performing a read,
+                // we can safely answer immediately without waiting for the refresh
+                if (hasProposalStability && acceptEarlyReadPermission)
+                    signalDone(Outcome.READ_PERMITTED);
+            }
+
+            // otherwise we need to run our reads again anyway,
+            // and the chance of receiving another response with latest may be slim.
+            // so we just start again
+            else
+                signalDone(FOUND_INCOMPLETE_COMMITTED);
+        }
+    }
+
+    private boolean maybeCheckForLinearizabilityViolation(Response response, InetAddressAndPort from)
+    {
+        if (!response.isPromised() || !haveQuorumOfPermissions || !hasOnlyPromises)
+            return false;
+
+        Permitted permitted = response.permitted();
+        if (permitted.latestCommitted.compareWith(latestCommitted) == CompareResult.AFTER)
+            return checkForLinearizabilityViolation(permitted, from);
+        return false;
+    }
+
+    private static boolean isRunningLegacyPaxos()
+    {
+        switch (getPaxosVariant())
+        {
+            case v1:
+            case v1_without_linearizable_reads_or_rejected_writes:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    private Ballot getLowBoundForKey()
+    {
+        ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(request.table.id);
+        return cfs != null ? cfs.getPaxosRepairLowBound(request.partitionKey) : Ballot.none();
+    }
+
+    /**
+     * The linearizability check is incompatible with legacy paxos due to at least 2 issues:
+     *  1. The prepare phase doesn't evaluate accepted/committed ballots when promising ballots (excluding legacy_fixed)
+     *  2. Commits made at LOCAL_SERIAL are sent to all DCs
+     * Both issues will trigger linearizability violations, but are fixed by paxos repair. So we shouldn't do
+     * linearizability checks unless we're running v2 paxos and have had at least one paxos repair covering this
+     * operation's key.
+     */
+    private boolean isCompatibleWithLinearizabilityCheck()
+    {
+        if (isRunningLegacyPaxos())
+            return false;
+
+        return getLowBoundForKey() != Ballot.none();
+    }
+
+    private boolean checkForLinearizabilityViolation(Permitted permitted, InetAddressAndPort from)
+    {
+        if (!isCompatibleWithLinearizabilityCheck())
+            return false;
+
+        if (linearizabilityViolationDetected)
+            return false;
+        // if we witness a newer commit AND are accepted something has gone wrong, except:
+
+        // if we have raced with an ongoing commit, having missed all of them initially
+        if (permitted.latestCommitted.hasSameBallot(latestAccepted))
+            return false;
+
+        // or in the case that we have an empty proposal accepted, since that will not be committed
+        // in theory in this case we could now restart refreshStaleParticipants, but this would
+        // unnecessarily complicate the logic so instead we accept that we will unnecessarily re-propose
+        if (latestAccepted != null && latestAccepted.update.isEmpty() && latestAccepted.isAfter(permitted.latestCommitted))
+            return false;
+
+        // or in the case that both are older than the most recent repair low bound), in which case a topology change
+        // could have ocurred that means not all paxos state tables know of the accept/commit, though it is persistent
+        // in theory in this case we could ignore this entirely and call ourselves done
+        // TODO: consider this more; is it possible we cause problems by reproposing an old accept?
+        //  shouldn't be, as any newer accept that reaches a quorum will supersede
+        if (permitted.latestCommitted.ballot.uuidTimestamp() <= maxLowBound)
+            return false;
+
+        // if the lateset commit ballot doesn't have an encoded consistency level, it's from a legacy paxos operation.
+        // Legacy paxos operations would send commits to all replicas for LOCAL_SERIAL operations, which look like
+        // linearizability violations from datacenters the operation wasn't run in, so we ignore them here.
+        if (permitted.latestCommitted.ballot.flag() == NONE)
+            return false;
+
+        // If we discovered an incomplete proposal, it could have since completed successfullly
+        if (latestAccepted != null && outcome.outcome == FOUND_INCOMPLETE_ACCEPTED)
+        {
+            switch (permitted.latestCommitted.compareWith(latestAccepted))
+            {
+                case WAS_REPROPOSED_BY:
+                case SAME:
+                    return false;
+            }
+        }
+
+        long gcGraceMicros = TimeUnit.SECONDS.toMicros(permitted.latestCommitted.update.metadata().params.gcGraceSeconds);
+        // paxos repair uses stale ballots, so comparing against request.ballot time will not completely prevent false
+        // positives, since compaction may have removed paxos metadata on some nodes and not others. It's also possible
+        // clock skew has placed the ballot to repair in the future, so we use now or the ballot, whichever is higher.
+        long maxNowMicros = Math.max(currentTimeMillis() * 1000, request.ballot.unixMicros());
+        long ageMicros = maxNowMicros - permitted.latestCommitted.ballot.unixMicros();
+
+        String modifier = "";
+        boolean isTtlViolation;
+        if (isTtlViolation = (ageMicros >= gcGraceMicros))
+        {
+            if (participants.hasOldParticipants())
+                modifier = " (older than legacy TTL expiry with at least one legacy participant)";
+            else
+                modifier = " (older than legacy TTL expiry)";
+        }
+        String message = String.format("Linearizability violation%s: %s witnessed %s of latest %s (withLatest: %s, readResponses: %s, maxLowBound: %s, status: %s); %s promised with latest %s",
+                                       modifier, request.ballot, consistency(request.ballot), latestCommitted,
+                                       withLatest, readResponses
+                        .stream()
+                        .map(Message::from)
+                        .map(Object::toString)
+                        .collect(Collectors.joining(", ", "[", "]")),
+                                       maxLowBound, outcome, from, permitted.latestCommitted);
+
+        PaxosMetrics.linearizabilityViolations.inc();
+        linearizabilityViolationDetected = true;
+
+        try
+        {
+            switch (DatabaseDescriptor.paxosOnLinearizabilityViolations())
+            {
+                default: throw new AssertionError();
+                case fail:
+                    signalDone(new MaybeFailure(new Paxos.MaybeFailure(true, "A linearizability violation was detected", participants.sizeOfPoll(), participants.sizeOfConsensusQuorum, withLatest() + needLatest(), Collections.emptyMap()), participants));
+                    return true;
+                case log:
+                    if (isTtlViolation && LOG_TTL_LINEARIZABILITY_VIOLATIONS) logger.warn(message);
+                    else logger.error(message);
+                    return false;
+                case ignore:
+                    return false;
+            }
+        }
+        finally
+        {
+            Runnable run = onLinearizabilityViolation;
+            if (run != null)
+                run.run();
+        }
+    }
+
+    /**
+     * Save a read response from a node that we know to have witnessed the most recent commit
+     *
+     * Must be invoked while owning lock
+     */
+    private void addReadResponse(ReadResponse response, InetAddressAndPort from)
+    {
+        readResponses.add(Message.synthetic(from, PAXOS2_PREPARE_RSP, response));
+    }
+
+    @Override
+    public synchronized void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        if (logger.isTraceEnabled())
+            logger.trace("{} {} failure from {}", request, reason, from);
+
+        if (isDone())
+            return;
+
+        super.onFailureWithMutex(from, reason);
+        ++failures;
+
+        if (failures + participants.sizeOfConsensusQuorum == 1 + participants.sizeOfPoll())
+            signalDone(MAYBE_FAILURE);
+    }
+
+    private void signalDone(Outcome kindOfOutcome)
+    {
+        signalDone(toStatus(kindOfOutcome));
+    }
+
+    private void signalDone(Status status)
+    {
+        if (isDone())
+            throw new IllegalStateException();
+
+        this.outcome = status;
+        if (onDone != null)
+            onDone.accept(outcome);
+        notifyAll();
+    }
+
+    private Status toStatus(Outcome outcome)
+    {
+        switch (outcome)
+        {
+            case ELECTORATE_MISMATCH:
+                return new ElectorateMismatch(participants, request.ballot);
+            case SUPERSEDED:
+                return new Superseded(supersededBy, participants);
+            case FOUND_INCOMPLETE_ACCEPTED:
+                return new FoundIncompleteAccepted(request.ballot, participants, latestAccepted);
+            case FOUND_INCOMPLETE_COMMITTED:
+                return new FoundIncompleteCommitted(request.ballot, participants, latestCommitted);
+            case PROMISED:
+                return Success.readOrWrite(request.ballot, participants, readResponses, hasProposalStability);
+            case READ_PERMITTED:
+                if (!hasProposalStability)
+                    throw new IllegalStateException();
+                return Success.read(request.ballot, participants, readResponses, supersededBy);
+            case MAYBE_FAILURE:
+                return new MaybeFailure(new Paxos.MaybeFailure(participants, withLatest(), failureReasonsAsMap()), participants);
+            default:
+                throw new IllegalStateException();
+        }
+    }
+
+    /**
+     * See {@link PaxosPrepareRefresh}
+     *
+     * Must be invoked while owning lock
+     */
+    private void refreshStaleParticipants()
+    {
+        if (refreshStaleParticipants == null)
+            refreshStaleParticipants = new PaxosPrepareRefresh(request.ballot, participants, latestCommitted, this);
+
+        refreshStaleParticipants.refresh(needLatest);
+        needLatest.clear();
+    }
+
+    @Override
+    public void onRefreshFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        onFailure(from, reason);
+    }
+
+    public synchronized void onRefreshSuccess(Ballot isSupersededBy, InetAddressAndPort from)
+    {
+        if (logger.isTraceEnabled())
+            logger.trace("Refresh {} from {}", isSupersededBy == null ? "Success" : "SupersededBy(" + isSupersededBy + ')', from);
+
+        if (isDone())
+            return;
+
+        if (isSupersededBy != null)
+        {
+            supersededBy = isSupersededBy;
+            if (hasProposalStability) signalDone(Outcome.READ_PERMITTED);
+            else signalDone(SUPERSEDED);
+        }
+        else
+        {
+            withLatest.add(from);
+            if (withLatest.size() >= participants.sizeOfConsensusQuorum)
+                signalDone(hasOnlyPromises ? Outcome.PROMISED : Outcome.READ_PERMITTED);
+        }
+    }
+
+    static abstract class AbstractRequest<R extends AbstractRequest<R>>
+    {
+        final Ballot ballot;
+        final Electorate electorate;
+        final SinglePartitionReadCommand read;
+        final boolean isForWrite;
+        final DecoratedKey partitionKey;
+        final TableMetadata table;
+
+        AbstractRequest(Ballot ballot, Electorate electorate, SinglePartitionReadCommand read, boolean isForWrite)
+        {
+            this.ballot = ballot;
+            this.electorate = electorate;
+            this.read = read;
+            this.isForWrite = isForWrite;
+            this.partitionKey = read.partitionKey();
+            this.table = read.metadata();
+        }
+
+        AbstractRequest(Ballot ballot, Electorate electorate, DecoratedKey partitionKey, TableMetadata table, boolean isForWrite)
+        {
+            this.ballot = ballot;
+            this.electorate = electorate;
+            this.partitionKey = partitionKey;
+            this.table = table;
+            this.read = null;
+            this.isForWrite = isForWrite;
+        }
+
+        abstract R withoutRead();
+
+        public String toString()
+        {
+            return "Prepare(" + ballot + ')';
+        }
+    }
+
+    static class Request extends AbstractRequest<Request>
+    {
+        Request(Ballot ballot, Electorate electorate, SinglePartitionReadCommand read, boolean isWrite)
+        {
+            super(ballot, electorate, read, isWrite);
+        }
+
+        private Request(Ballot ballot, Electorate electorate, DecoratedKey partitionKey, TableMetadata table, boolean isWrite)
+        {
+            super(ballot, electorate, partitionKey, table, isWrite);
+        }
+
+        Request withoutRead()
+        {
+            return read == null ? this : new Request(ballot, electorate, partitionKey, table, isForWrite);
+        }
+
+        public String toString()
+        {
+            return "Prepare(" + ballot + ')';
+        }
+    }
+
+    static class Response
+    {
+        final MaybePromise.Outcome outcome;
+
+        Response(MaybePromise.Outcome outcome)
+        {
+            this.outcome = outcome;
+        }
+        Permitted permitted() { return (Permitted) this; }
+        Rejected rejected() { return (Rejected) this; }
+
+        public boolean isRejected()
+        {
+            return outcome == REJECT;
+        }
+
+        public boolean isPromised()
+        {
+            return outcome == PROMISE;
+        }
+    }
+
+    static class Permitted extends Response
+    {
+        final long lowBound;
+        // a proposal that has been accepted but not committed, i.e. must be null or > latestCommit
+        @Nullable final Accepted latestAcceptedButNotCommitted;
+        final Committed latestCommitted;
+        @Nullable final ReadResponse readResponse;
+        // latestAcceptedButNotCommitted and latestCommitted were the same before and after the read occurred, and no incomplete promise was witnessed
+        final boolean hadProposalStability;
+        final Map<InetAddressAndPort, EndpointState> gossipInfo;
+        @Nullable final Ballot supersededBy;
+
+        Permitted(MaybePromise.Outcome outcome, long lowBound, @Nullable Accepted latestAcceptedButNotCommitted, Committed latestCommitted, @Nullable ReadResponse readResponse, boolean hadProposalStability, Map<InetAddressAndPort, EndpointState> gossipInfo, @Nullable Ballot supersededBy)
+        {
+            super(outcome);
+            this.lowBound = lowBound;
+            this.latestAcceptedButNotCommitted = latestAcceptedButNotCommitted;
+            this.latestCommitted = latestCommitted;
+            this.hadProposalStability = hadProposalStability;
+            this.readResponse = readResponse;
+            this.gossipInfo = gossipInfo;
+            this.supersededBy = supersededBy;
+        }
+
+        @Override
+        public String toString()
+        {
+            return "Promise(" + latestAcceptedButNotCommitted + ", " + latestCommitted + ", " + hadProposalStability + ", " + gossipInfo + ')';
+        }
+    }
+
+    static class Rejected extends Response
+    {
+        final Ballot supersededBy;
+
+        Rejected(Ballot supersededBy)
+        {
+            super(REJECT);
+            this.supersededBy = supersededBy;
+        }
+
+        @Override
+        public String toString()
+        {
+            return "RejectPromise(supersededBy=" + supersededBy + ')';
+        }
+    }
+
+    public static class RequestHandler implements IVerbHandler<Request>
+    {
+        @Override
+        public void doVerb(Message<Request> message)
+        {
+            Response response = execute(message.payload, message.from());
+            if (response == null)
+                MessagingService.instance().respondWithFailure(UNKNOWN, message);
+            else
+                MessagingService.instance().respond(response, message);
+        }
+
+        static Response execute(AbstractRequest<?> request, InetAddressAndPort from)
+        {
+            if (!isInRangeAndShouldProcess(from, request.partitionKey, request.table, request.read != null))
+                return null;
+
+            long start = nanoTime();
+            try (PaxosState state = get(request.partitionKey, request.table))
+            {
+                return execute(request, state);
+            }
+            finally
+            {
+                Keyspace.openAndGetStore(request.table).metric.casPrepare.addNano(nanoTime() - start);
+            }
+        }
+
+        static Response execute(AbstractRequest<?> request, PaxosState state)
+        {
+            MaybePromise result = state.promiseIfNewer(request.ballot, request.isForWrite);
+            switch (result.outcome)
+            {
+                case PROMISE:
+                case PERMIT_READ:
+                    // verify electorates; if they differ, send back gossip info for superset of two participant sets
+                    Map<InetAddressAndPort, EndpointState> gossipInfo = verifyElectorate(request.electorate, Electorate.get(request.table, request.partitionKey, consistency(request.ballot)));
+                    ReadResponse readResponse = null;
+
+                    // Check we cannot race with a proposal, i.e. that we have not made a promise that
+                    // could be in the process of making a proposal. If a majority of nodes have made no such promise
+                    // then either we must have witnessed it (since it must have been committed), or the proposal
+                    // will now be rejected by our promises.
+
+                    // This is logicaly complicated a bit by reading from a subset of the consensus group when there are
+                    // pending nodes, however electorate verification we will cause us to retry if the pending status changes
+                    // during execution; otherwise if the most recent commit we witnessed wasn't witnessed by a read response
+                    // we will abort and retry, and we must witness it by the above argument.
+
+                    Ballot mostRecentCommit = result.before.accepted != null
+                                              && result.before.accepted.ballot.compareTo(result.before.committed.ballot) > 0
+                                              && result.before.accepted.update.isEmpty()
+                                              ? result.before.accepted.ballot : result.before.committed.ballot;
+
+                    boolean hasProposalStability = mostRecentCommit.equals(result.before.promisedWrite)
+                                                   || mostRecentCommit.compareTo(result.before.promisedWrite) > 0;
+
+                    if (request.read != null)
+                    {
+                        try (ReadExecutionController executionController = request.read.executionController();
+                             UnfilteredPartitionIterator iterator = request.read.executeLocally(executionController))
+                        {
+                            readResponse = request.read.createResponse(iterator, executionController.getRepairedDataInfo());
+                        }
+
+                        if (hasProposalStability)
+                        {
+                            Snapshot now = state.current(request.ballot);
+                            hasProposalStability = now.promisedWrite == result.after.promisedWrite
+                                    && now.committed == result.after.committed
+                                    && now.accepted == result.after.accepted;
+                        }
+                    }
+
+                    Ballot supersededBy = result.outcome == PROMISE ? null : result.after.latestWitnessedOrLowBound();
+                    Accepted acceptedButNotCommitted = result.after.accepted;
+                    Committed committed = result.after.committed;
+
+                    ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(request.table.id);
+                    long lowBound = cfs.getPaxosRepairLowBound(request.partitionKey).uuidTimestamp();
+                    return new Permitted(result.outcome, lowBound, acceptedButNotCommitted, committed, readResponse, hasProposalStability, gossipInfo, supersededBy);
+
+                case REJECT:
+                    return new Rejected(result.supersededBy());
+
+                default:
+                    throw new IllegalStateException();
+            }
+        }
+    }
+
+    static abstract class AbstractRequestSerializer<R extends AbstractRequest<R>, T> implements IVersionedSerializer<R>
+    {
+        abstract R construct(T param, Ballot ballot, Electorate electorate, SinglePartitionReadCommand read, boolean isWrite);
+        abstract R construct(T param, Ballot ballot, Electorate electorate, DecoratedKey partitionKey, TableMetadata table, boolean isWrite);
+
+        @Override
+        public void serialize(R request, DataOutputPlus out, int version) throws IOException
+        {
+            request.ballot.serialize(out);
+            Electorate.serializer.serialize(request.electorate, out, version);
+            out.writeByte((request.read != null ? 1 : 0) | (request.isForWrite ? 0 : 2));
+            if (request.read != null)
+            {
+
+                ReadCommand.serializer.serialize(request.read, out, version);
+            }
+            else
+            {
+                request.table.id.serialize(out);
+                DecoratedKey.serializer.serialize(request.partitionKey, out, version);
+            }
+        }
+
+        public R deserialize(T param, DataInputPlus in, int version) throws IOException
+        {
+            Ballot ballot = Ballot.deserialize(in);
+            Electorate electorate = Electorate.serializer.deserialize(in, version);
+            byte flag = in.readByte();
+            if ((flag & 1) != 0)
+            {
+                SinglePartitionReadCommand readCommand = (SinglePartitionReadCommand) ReadCommand.serializer.deserialize(in, version);
+                return construct(param, ballot, electorate, readCommand, (flag & 2) == 0);
+            }
+            else
+            {
+                TableMetadata table = Schema.instance.getExistingTableMetadata(TableId.deserialize(in));
+                DecoratedKey partitionKey = (DecoratedKey) DecoratedKey.serializer.deserialize(in, table.partitioner, version);
+                return construct(param, ballot, electorate, partitionKey, table, (flag & 2) != 0);
+            }
+        }
+
+        @Override
+        public long serializedSize(R request, int version)
+        {
+            return Ballot.sizeInBytes()
+                   + Electorate.serializer.serializedSize(request.electorate, version)
+                   + 1 + (request.read != null
+                        ? ReadCommand.serializer.serializedSize(request.read, version)
+                        : request.table.id.serializedSize()
+                            + DecoratedKey.serializer.serializedSize(request.partitionKey, version));
+        }
+    }
+
+    public static class RequestSerializer extends AbstractRequestSerializer<Request, Object>
+    {
+        Request construct(Object ignore, Ballot ballot, Electorate electorate, SinglePartitionReadCommand read, boolean isWrite)
+        {
+            return new Request(ballot, electorate, read, isWrite);
+        }
+
+        Request construct(Object ignore, Ballot ballot, Electorate electorate, DecoratedKey partitionKey, TableMetadata table, boolean isWrite)
+        {
+            return new Request(ballot, electorate, partitionKey, table, isWrite);
+        }
+
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            return deserialize(null, in, version);
+        }
+    }
+
+    public static class ResponseSerializer implements IVersionedSerializer<Response>
+    {
+        public void serialize(Response response, DataOutputPlus out, int version) throws IOException
+        {
+            if (response.isRejected())
+            {
+                out.writeByte(0);
+                Rejected rejected = (Rejected) response;
+                rejected.supersededBy.serialize(out);
+            }
+            else
+            {
+                Permitted promised = (Permitted) response;
+                out.writeByte(1
+                        | (promised.latestAcceptedButNotCommitted != null ? 2  : 0)
+                        | (promised.readResponse != null                  ? 4  : 0)
+                        | (promised.hadProposalStability                  ? 8  : 0)
+                        | (promised.outcome == PERMIT_READ ? 16 : 0)
+                );
+                out.writeUnsignedVInt(promised.lowBound);
+                if (promised.latestAcceptedButNotCommitted != null)
+                    Accepted.serializer.serialize(promised.latestAcceptedButNotCommitted, out, version);
+                Committed.serializer.serialize(promised.latestCommitted, out, version);
+                if (promised.readResponse != null)
+                    ReadResponse.serializer.serialize(promised.readResponse, out, version);
+                serializeMap(inetAddressAndPortSerializer, EndpointState.nullableSerializer, promised.gossipInfo, out, version);
+                if (promised.outcome == PERMIT_READ)
+                    promised.supersededBy.serialize(out);
+            }
+        }
+
+        public Response deserialize(DataInputPlus in, int version) throws IOException
+        {
+            byte flags = in.readByte();
+            if (flags == 0)
+            {
+                Ballot supersededBy = Ballot.deserialize(in);
+                return new Rejected(supersededBy);
+            }
+            else
+            {
+                long lowBound = in.readUnsignedVInt();
+                Accepted acceptedNotCommitted = (flags & 2) != 0 ? Accepted.serializer.deserialize(in, version) : null;
+                Committed committed = Committed.serializer.deserialize(in, version);
+                ReadResponse readResponse = (flags & 4) != 0 ? ReadResponse.serializer.deserialize(in, version) : null;
+                Map<InetAddressAndPort, EndpointState> gossipInfo = deserializeMap(inetAddressAndPortSerializer, EndpointState.nullableSerializer, newHashMap(), in, version);
+                MaybePromise.Outcome outcome = (flags & 16) != 0 ? PERMIT_READ : PROMISE;
+                boolean hasProposalStability = (flags & 8) != 0;
+                Ballot supersededBy = null;
+                if (outcome == PERMIT_READ)
+                    supersededBy = Ballot.deserialize(in);
+                return new Permitted(outcome, lowBound, acceptedNotCommitted, committed, readResponse, hasProposalStability, gossipInfo, supersededBy);
+            }
+        }
+
+        public long serializedSize(Response response, int version)
+        {
+            if (response.isRejected())
+            {
+                return 1 + Ballot.sizeInBytes();
+            }
+            else
+            {
+                Permitted permitted = (Permitted) response;
+                return 1
+                        + VIntCoding.computeUnsignedVIntSize(permitted.lowBound)
+                        + (permitted.latestAcceptedButNotCommitted == null ? 0 : Accepted.serializer.serializedSize(permitted.latestAcceptedButNotCommitted, version))
+                        + Committed.serializer.serializedSize(permitted.latestCommitted, version)
+                        + (permitted.readResponse == null ? 0 : ReadResponse.serializer.serializedSize(permitted.readResponse, version))
+                        + serializedSizeMap(inetAddressAndPortSerializer, EndpointState.nullableSerializer, permitted.gossipInfo, version)
+                        + (permitted.outcome == PERMIT_READ ? Ballot.sizeInBytes() : 0);
+            }
+        }
+    }
+
+    static <R extends AbstractRequest<R>> Message<R> withoutRead(Message<R> send)
+    {
+        if (send.payload.read == null)
+            return send;
+
+        return send.withPayload(send.payload.withoutRead());
+    }
+
+    public static void setOnLinearizabilityViolation(Runnable runnable)
+    {
+        assert onLinearizabilityViolation == null || runnable == null;
+        onLinearizabilityViolation = runnable;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosPrepareRefresh.java b/src/java/org/apache/cassandra/service/paxos/PaxosPrepareRefresh.java
new file mode 100644
index 0000000..19aff74
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosPrepareRefresh.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.WriteTimeoutException;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.RequestCallbackWithFailure;
+import org.apache.cassandra.service.paxos.Commit.Agreed;
+import org.apache.cassandra.service.paxos.Commit.Committed;
+import org.apache.cassandra.tracing.Tracing;
+
+import static org.apache.cassandra.exceptions.RequestFailureReason.TIMEOUT;
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REFRESH_REQ;
+import static org.apache.cassandra.service.paxos.Commit.isAfter;
+import static org.apache.cassandra.service.paxos.PaxosRequestCallback.shouldExecuteOnSelf;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.NullableSerializer.deserializeNullable;
+import static org.apache.cassandra.utils.NullableSerializer.serializeNullable;
+import static org.apache.cassandra.utils.NullableSerializer.serializedSizeNullable;
+
+/**
+ * Nodes that have promised in response to our prepare, may be missing the latestCommit, meaning we cannot be sure the
+ * prior round has been committed to the necessary quorum of participants, so that it will be visible to future quorums.
+ *
+ * To resolve this problem, we submit the latest commit we have seen, and wait for confirmation before continuing
+ * (verifying that we are still promised in the process).
+ */
+public class PaxosPrepareRefresh implements RequestCallbackWithFailure<PaxosPrepareRefresh.Response>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosPrepareRefresh.class);
+
+    public static final RequestHandler requestHandler = new RequestHandler();
+    public static final RequestSerializer requestSerializer = new RequestSerializer();
+    public static final ResponseSerializer responseSerializer = new ResponseSerializer();
+
+    interface Callbacks
+    {
+        void onRefreshFailure(InetAddressAndPort from, RequestFailureReason reason);
+        void onRefreshSuccess(Ballot isSupersededBy, InetAddressAndPort from);
+    }
+
+    private final Message<Request> send;
+    private final Callbacks callbacks;
+
+    public PaxosPrepareRefresh(Ballot prepared, Paxos.Participants participants, Committed latestCommitted, Callbacks callbacks)
+    {
+        this.callbacks = callbacks;
+        this.send = Message.out(PAXOS2_PREPARE_REFRESH_REQ, new Request(prepared, latestCommitted));
+    }
+
+    void refresh(List<InetAddressAndPort> refresh)
+    {
+        boolean executeOnSelf = false;
+        for (int i = 0, size = refresh.size(); i < size ; ++i)
+        {
+            InetAddressAndPort destination = refresh.get(i);
+
+            if (logger.isTraceEnabled())
+                logger.trace("Refresh {} and Confirm {} to {}", send.payload.missingCommit, Ballot.toString(send.payload.promised, "Promise"), destination);
+
+            if (Tracing.isTracing())
+                Tracing.trace("Refresh {} and Confirm {} to {}", send.payload.missingCommit.ballot, send.payload.promised, destination);
+
+            if (shouldExecuteOnSelf(destination))
+                executeOnSelf = true;
+            else
+                MessagingService.instance().sendWithCallback(send, destination, this);
+        }
+
+        if (executeOnSelf)
+            PAXOS2_PREPARE_REFRESH_REQ.stage.execute(this::executeOnSelf);
+    }
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        callbacks.onRefreshFailure(from, reason);
+    }
+
+    @Override
+    public void onResponse(Message<Response> message)
+    {
+        onResponse(message.payload, message.from());
+    }
+
+    private void executeOnSelf()
+    {
+        Response response;
+        try
+        {
+            response = RequestHandler.execute(send.payload, getBroadcastAddressAndPort());
+            if (response == null)
+                return;
+        }
+        catch (Exception ex)
+        {
+            RequestFailureReason reason = UNKNOWN;
+            if (ex instanceof WriteTimeoutException) reason = TIMEOUT;
+            else logger.error("Failed to apply paxos refresh-prepare locally", ex);
+
+            onFailure(getBroadcastAddressAndPort(), reason);
+            return;
+        }
+        onResponse(response, getBroadcastAddressAndPort());
+    }
+
+    private void onResponse(Response response, InetAddressAndPort from)
+    {
+        callbacks.onRefreshSuccess(response.isSupersededBy, from);
+    }
+
+    private static class Request
+    {
+        final Ballot promised;
+        final Committed missingCommit;
+
+        Request(Ballot promised, Committed missingCommit)
+        {
+            this.promised = promised;
+            this.missingCommit = missingCommit;
+        }
+    }
+
+    static class Response
+    {
+        final Ballot isSupersededBy;
+        Response(Ballot isSupersededBy)
+        {
+            this.isSupersededBy = isSupersededBy;
+        }
+    }
+
+    public static class RequestHandler implements IVerbHandler<Request>
+    {
+        @Override
+        public void doVerb(Message<Request> message)
+        {
+            Response response = execute(message.payload, message.from());
+            if (response == null)
+                MessagingService.instance().respondWithFailure(UNKNOWN, message);
+            else
+                MessagingService.instance().respond(response, message);
+        }
+
+        public static Response execute(Request request, InetAddressAndPort from)
+        {
+            Agreed commit = request.missingCommit;
+
+            if (!Paxos.isInRangeAndShouldProcess(from, commit.update.partitionKey(), commit.update.metadata(), false))
+                return null;
+
+            try (PaxosState state = PaxosState.get(commit))
+            {
+                state.commit(commit);
+                Ballot latest = state.current(request.promised).latestWitnessedOrLowBound();
+                if (isAfter(latest, request.promised))
+                {
+                    Tracing.trace("Promise {} rescinded; latest is now {}", request.promised, latest);
+                    return new Response(latest);
+                }
+                else
+                {
+                    Tracing.trace("Promise confirmed for ballot {}", request.promised);
+                    return new Response(null);
+                }
+            }
+        }
+    }
+
+    public static class RequestSerializer implements IVersionedSerializer<Request>
+    {
+        @Override
+        public void serialize(Request request, DataOutputPlus out, int version) throws IOException
+        {
+            request.promised.serialize(out);
+            Committed.serializer.serialize(request.missingCommit, out, version);
+        }
+
+        @Override
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            Ballot promise = Ballot.deserialize(in);
+            Committed missingCommit = Committed.serializer.deserialize(in, version);
+            return new Request(promise, missingCommit);
+        }
+
+        @Override
+        public long serializedSize(Request request, int version)
+        {
+            return Ballot.sizeInBytes()
+                   + Committed.serializer.serializedSize(request.missingCommit, version);
+        }
+    }
+
+    public static class ResponseSerializer implements IVersionedSerializer<Response>
+    {
+        public void serialize(Response response, DataOutputPlus out, int version) throws IOException
+        {
+            serializeNullable(Ballot.Serializer.instance, response.isSupersededBy, out, version);
+        }
+
+        public Response deserialize(DataInputPlus in, int version) throws IOException
+        {
+            Ballot isSupersededBy = deserializeNullable(Ballot.Serializer.instance, in, version);
+            return new Response(isSupersededBy);
+        }
+
+        public long serializedSize(Response response, int version)
+        {
+            return serializedSizeNullable(Ballot.Serializer.instance, response.isSupersededBy, version);
+        }
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosPropose.java b/src/java/org/apache/cassandra/service/paxos/PaxosPropose.java
new file mode 100644
index 0000000..57d3459
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosPropose.java
@@ -0,0 +1,479 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicLongFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.Consumer;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.service.paxos.Commit.Proposal;
+import org.apache.cassandra.utils.concurrent.ConditionAsConsumer;
+
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.net.Verb.PAXOS2_PROPOSE_REQ;
+import static org.apache.cassandra.service.paxos.PaxosPropose.Superseded.SideEffects.NO;
+import static org.apache.cassandra.service.paxos.PaxosPropose.Superseded.SideEffects.MAYBE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.ConditionAsConsumer.newConditionAsConsumer;
+
+/**
+ * In waitForNoSideEffect mode, we will not return failure to the caller until
+ * we have received a complete set of refusal responses, or at least one accept,
+ * indicating (respectively) that we have had no side effect, or that we cannot
+ * know if we our proposal produced a side effect.
+ */
+public class PaxosPropose<OnDone extends Consumer<? super PaxosPropose.Status>> extends PaxosRequestCallback<PaxosPropose.Response>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosPropose.class);
+
+    public static final RequestHandler requestHandler = new RequestHandler();
+    public static final RequestSerializer requestSerializer = new RequestSerializer();
+    public static final ResponseSerializer responseSerializer = new ResponseSerializer();
+
+    /**
+     * Represents the current status of a propose action: it is a status rather than a result,
+     * as the result may be unknown without sufficient responses (though in most cases it is final status).
+     */
+    static class Status
+    {
+        enum Outcome { SUCCESS, SUPERSEDED, MAYBE_FAILURE }
+        final Outcome outcome;
+
+        Status(Outcome outcome)
+        {
+            this.outcome = outcome;
+        }
+        Superseded superseded() { return (Superseded) this; }
+        Paxos.MaybeFailure maybeFailure() { return ((MaybeFailure) this).info; }
+        public String toString() { return "Success"; }
+    }
+
+    static class Superseded extends Status
+    {
+        enum SideEffects { NO, MAYBE }
+        final Ballot by;
+        final SideEffects hadSideEffects;
+        Superseded(Ballot by, SideEffects hadSideEffects)
+        {
+            super(Outcome.SUPERSEDED);
+            this.by = by;
+            this.hadSideEffects = hadSideEffects;
+        }
+
+        public String toString() { return "Superseded(" + by + ',' + hadSideEffects + ')'; }
+    }
+
+    private static class MaybeFailure extends Status
+    {
+        final Paxos.MaybeFailure info;
+        MaybeFailure(Paxos.MaybeFailure info)
+        {
+            super(Outcome.MAYBE_FAILURE);
+            this.info = info;
+        }
+
+        public String toString() { return info.toString(); }
+    }
+
+    private static final Status success = new Status(Status.Outcome.SUCCESS);
+
+    private static final AtomicLongFieldUpdater<PaxosPropose> responsesUpdater = AtomicLongFieldUpdater.newUpdater(PaxosPropose.class, "responses");
+    private static final AtomicReferenceFieldUpdater<PaxosPropose, Ballot> supersededByUpdater = AtomicReferenceFieldUpdater.newUpdater(PaxosPropose.class, Ballot.class, "supersededBy");
+
+    @VisibleForTesting public static final long ACCEPT_INCREMENT = 1;
+    private static final int  REFUSAL_SHIFT = 21;
+    @VisibleForTesting public static final long REFUSAL_INCREMENT = 1L << REFUSAL_SHIFT;
+    private static final int  FAILURE_SHIFT = 42;
+    @VisibleForTesting public static final long FAILURE_INCREMENT = 1L << FAILURE_SHIFT;
+    private static final long MASK = (1L << REFUSAL_SHIFT) - 1L;
+
+    private final Proposal proposal;
+    /** Wait until we know if we may have had side effects */
+    private final boolean waitForNoSideEffect;
+    /** Number of contacted nodes */
+    final int participants;
+    /** Number of accepts required */
+    final int required;
+    /** Invoke on reaching a terminal status */
+    final OnDone onDone;
+
+    /**
+     * bit 0-20:  accepts
+     * bit 21-41: refusals/errors
+     * bit 42-62: timeouts
+     * bit 63:    ambiguous signal bit (i.e. those states that cannot be certain to signal uniquely flip this bit to take signal responsibility)
+     *
+     * {@link #accepts}
+     * {@link #refusals}
+     * {@link #failures}
+     * {@link #notAccepts} (timeouts/errors+refusals)
+     */
+    private volatile long responses;
+
+    /** The newest superseding ballot from a refusal; only returned to the caller if we fail to reach a quorum */
+    private volatile Ballot supersededBy;
+
+    private PaxosPropose(Proposal proposal, int participants, int required, boolean waitForNoSideEffect, OnDone onDone)
+    {
+        this.proposal = proposal;
+        assert required > 0;
+        this.waitForNoSideEffect = waitForNoSideEffect;
+        this.participants = participants;
+        this.required = required;
+        this.onDone = onDone;
+    }
+
+    /**
+     * Submit the proposal for commit with all replicas, and return an object that can be waited on synchronously for the result,
+     * or for the present status if the time elapses without a final result being reached.
+     * @param waitForNoSideEffect if true, on failure we will wait until we can say with certainty there are no side effects
+     *                            or until we know we will never be able to determine this with certainty
+     */
+    static Paxos.Async<Status> propose(Proposal proposal, Paxos.Participants participants, boolean waitForNoSideEffect)
+    {
+        if (waitForNoSideEffect && proposal.update.isEmpty())
+            waitForNoSideEffect = false; // by definition this has no "side effects" (besides linearizing the operation)
+
+        // to avoid unnecessary object allocations we extend PaxosPropose to implements Paxos.Async
+        class Async extends PaxosPropose<ConditionAsConsumer<Status>> implements Paxos.Async<Status>
+        {
+            private Async(Proposal proposal, int participants, int required, boolean waitForNoSideEffect)
+            {
+                super(proposal, participants, required, waitForNoSideEffect, newConditionAsConsumer());
+            }
+
+            public Status awaitUntil(long deadline)
+            {
+                try
+                {
+                    onDone.awaitUntil(deadline);
+                }
+                catch (InterruptedException e)
+                {
+                    Thread.currentThread().interrupt();
+                    return new MaybeFailure(new Paxos.MaybeFailure(true, participants, required, 0, emptyMap()));
+                }
+
+                return status();
+            }
+        }
+
+        Async propose = new Async(proposal, participants.sizeOfPoll(), participants.sizeOfConsensusQuorum, waitForNoSideEffect);
+        propose.start(participants);
+        return propose;
+    }
+
+    static <T extends Consumer<Status>> T propose(Proposal proposal, Paxos.Participants participants, boolean waitForNoSideEffect, T onDone)
+    {
+        if (waitForNoSideEffect && proposal.update.isEmpty())
+            waitForNoSideEffect = false; // by definition this has no "side effects" (besides linearizing the operation)
+
+        PaxosPropose<?> propose = new PaxosPropose<>(proposal, participants.sizeOfPoll(), participants.sizeOfConsensusQuorum, waitForNoSideEffect, onDone);
+        propose.start(participants);
+        return onDone;
+    }
+
+    void start(Paxos.Participants participants)
+    {
+        Message<Request> message = Message.out(PAXOS2_PROPOSE_REQ, new Request(proposal));
+
+        boolean executeOnSelf = false;
+        for (int i = 0, size = participants.sizeOfPoll(); i < size ; ++i)
+        {
+            InetAddressAndPort destination = participants.voter(i);
+            logger.trace("{} to {}", proposal, destination);
+            if (shouldExecuteOnSelf(destination)) executeOnSelf = true;
+            else MessagingService.instance().sendWithCallback(message, destination, this);
+        }
+
+        if (executeOnSelf)
+            PAXOS2_PROPOSE_REQ.stage.execute(() -> executeOnSelf(proposal));
+    }
+
+    /**
+     * @return the result as of now; unless the result is definitive, it is only a snapshot of the present incomplete status
+     */
+    Status status()
+    {
+        long responses = this.responses;
+
+        if (isSuccessful(responses))
+            return success;
+
+        if (!canSucceed(responses) && supersededBy != null)
+        {
+            Superseded.SideEffects sideEffects = hasNoSideEffects(responses) ? NO : MAYBE;
+            return new Superseded(supersededBy, sideEffects);
+        }
+
+        return new MaybeFailure(new Paxos.MaybeFailure(participants, required, accepts(responses), failureReasonsAsMap()));
+    }
+
+    private void executeOnSelf(Proposal proposal)
+    {
+        executeOnSelf(proposal, RequestHandler::execute);
+    }
+
+    public void onResponse(Response response, InetAddressAndPort from)
+    {
+        if (logger.isTraceEnabled())
+            logger.trace("{} for {} from {}", response, proposal, from);
+
+        Ballot supersededBy = response.supersededBy;
+        if (supersededBy != null)
+            supersededByUpdater.accumulateAndGet(this, supersededBy, (a, b) -> a == null ? b : b.uuidTimestamp() > a.uuidTimestamp() ? b : a);
+
+        long increment = supersededBy == null
+                ? ACCEPT_INCREMENT
+                : REFUSAL_INCREMENT;
+
+        update(increment);
+    }
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        if (logger.isTraceEnabled())
+            logger.trace("{} {} failure from {}", proposal, reason, from);
+
+        super.onFailure(from, reason);
+        update(FAILURE_INCREMENT);
+    }
+
+    private void update(long increment)
+    {
+        long responses = responsesUpdater.addAndGet(this, increment);
+        if (shouldSignal(responses))
+            signalDone();
+    }
+
+    // returns true at most once for a given PaxosPropose, so we do not propagate a signal more than once
+    private boolean shouldSignal(long responses)
+    {
+        return shouldSignal(responses, required, participants, waitForNoSideEffect, responsesUpdater, this);
+    }
+
+    @VisibleForTesting
+    public static <T> boolean shouldSignal(long responses, int required, int participants, boolean waitForNoSideEffect, AtomicLongFieldUpdater<T> responsesUpdater, T update)
+    {
+        if (responses <= 0L) // already signalled via ambiguous signal bit
+            return false;
+
+        if (!isSuccessful(responses, required))
+        {
+            if (canSucceed(responses, required, participants))
+                return false;
+
+            if (waitForNoSideEffect && !hasPossibleSideEffects(responses))
+                return hasNoSideEffects(responses, participants);
+        }
+
+        return responsesUpdater.getAndUpdate(update, x -> x | Long.MIN_VALUE) >= 0L;
+    }
+
+    private void signalDone()
+    {
+        if (onDone != null)
+            onDone.accept(status());
+    }
+
+    private boolean isSuccessful(long responses)
+    {
+        return isSuccessful(responses, required);
+    }
+
+    private static boolean isSuccessful(long responses, int required)
+    {
+        return accepts(responses) >= required;
+    }
+
+    private boolean canSucceed(long responses)
+    {
+        return canSucceed(responses, required, participants);
+    }
+
+    private static boolean canSucceed(long responses, int required, int participants)
+    {
+        return refusals(responses) == 0 && required <= participants - failures(responses);
+    }
+
+    // Note: this is only reliable if !failFast
+    private boolean hasNoSideEffects(long responses)
+    {
+        return hasNoSideEffects(responses, participants);
+    }
+
+    private static boolean hasNoSideEffects(long responses, int participants)
+    {
+        return refusals(responses) == participants;
+    }
+
+    private static boolean hasPossibleSideEffects(long responses)
+    {
+        return accepts(responses) + failures(responses) > 0;
+    }
+
+    /** {@link #responses} */
+    private static int accepts(long responses)
+    {
+        return (int) (responses & MASK);
+    }
+
+    /** {@link #responses} */
+    private static int notAccepts(long responses)
+    {
+        return failures(responses) + refusals(responses);
+    }
+
+    /** {@link #responses} */
+    private static int refusals(long responses)
+    {
+        return (int) ((responses >>> REFUSAL_SHIFT) & MASK);
+    }
+
+    /** {@link #responses} */
+    private static int failures(long responses)
+    {
+        return (int) ((responses >>> FAILURE_SHIFT) & MASK);
+    }
+
+    /**
+     * A Proposal to submit to another node
+     */
+    static class Request
+    {
+        final Proposal proposal;
+        Request(Proposal proposal)
+        {
+            this.proposal = proposal;
+        }
+
+        public String toString()
+        {
+            return proposal.toString("Propose");
+        }
+    }
+
+    /**
+     * The response to a proposal, indicating success (if {@code supersededBy == null},
+     * or failure, alongside the ballot that beat us
+     */
+    static class Response
+    {
+        final Ballot supersededBy;
+        Response(Ballot supersededBy)
+        {
+            this.supersededBy = supersededBy;
+        }
+        public String toString() { return supersededBy == null ? "Accept" : "RejectProposal(supersededBy=" + supersededBy + ')'; }
+    }
+
+    /**
+     * The proposal request handler, i.e. receives a proposal from a peer and responds with either acccept/reject
+     */
+    public static class RequestHandler implements IVerbHandler<Request>
+    {
+        @Override
+        public void doVerb(Message<Request> message)
+        {
+            Response response = execute(message.payload.proposal, message.from());
+            if (response == null)
+                MessagingService.instance().respondWithFailure(UNKNOWN, message);
+            else
+                MessagingService.instance().respond(response, message);
+        }
+
+        public static Response execute(Proposal proposal, InetAddressAndPort from)
+        {
+            if (!Paxos.isInRangeAndShouldProcess(from, proposal.update.partitionKey(), proposal.update.metadata(), false))
+                return null;
+
+            long start = nanoTime();
+            try (PaxosState state = PaxosState.get(proposal))
+            {
+                return new Response(state.acceptIfLatest(proposal));
+            }
+            finally
+            {
+                Keyspace.openAndGetStore(proposal.update.metadata()).metric.casPropose.addNano(nanoTime() - start);
+            }
+        }
+    }
+
+    public static class RequestSerializer implements IVersionedSerializer<Request>
+    {
+        @Override
+        public void serialize(Request request, DataOutputPlus out, int version) throws IOException
+        {
+            Proposal.serializer.serialize(request.proposal, out, version);
+        }
+
+        @Override
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            Proposal propose = Proposal.serializer.deserialize(in, version);
+            return new Request(propose);
+        }
+
+        @Override
+        public long serializedSize(Request request, int version)
+        {
+            return Proposal.serializer.serializedSize(request.proposal, version);
+        }
+    }
+
+    public static class ResponseSerializer implements IVersionedSerializer<Response>
+    {
+        public void serialize(Response response, DataOutputPlus out, int version) throws IOException
+        {
+            out.writeBoolean(response.supersededBy != null);
+            if (response.supersededBy != null)
+                response.supersededBy.serialize(out);
+        }
+
+        public Response deserialize(DataInputPlus in, int version) throws IOException
+        {
+            boolean isSuperseded = in.readBoolean();
+            return isSuperseded ? new Response(Ballot.deserialize(in)) : new Response(null);
+        }
+
+        public long serializedSize(Response response, int version)
+        {
+            return response.supersededBy != null
+                    ? TypeSizes.sizeof(true) + Ballot.sizeInBytes()
+                    : TypeSizes.sizeof(false);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosRepair.java b/src/java/org/apache/cassandra/service/paxos/PaxosRepair.java
new file mode 100644
index 0000000..d88323c
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosRepair.java
@@ -0,0 +1,706 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Function;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.UnavailableException;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.RequestCallbackWithFailure;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.ExecutorUtils;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.MonotonicClock;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.config.CassandraRelevantProperties.PAXOS_REPAIR_RETRY_TIMEOUT_IN_MS;
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.net.Verb.PAXOS2_REPAIR_REQ;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.service.paxos.Commit.*;
+import static org.apache.cassandra.service.paxos.ContentionStrategy.Type.REPAIR;
+import static org.apache.cassandra.service.paxos.ContentionStrategy.waitUntilForContention;
+import static org.apache.cassandra.service.paxos.Paxos.*;
+import static org.apache.cassandra.service.paxos.PaxosPrepare.*;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.NullableSerializer.deserializeNullable;
+import static org.apache.cassandra.utils.NullableSerializer.serializeNullable;
+import static org.apache.cassandra.utils.NullableSerializer.serializedSizeNullable;
+
+/**
+ * Facility to finish any in-progress paxos transaction, and ensure that a quorum of nodes agree on the most recent operation.
+ * Semantically, we simply ensure that any side effects that were "decided" before repair was initiated have been committed
+ * to a quorum of nodes.
+ * This means:
+ *   - any prepare that has _possibly_ reached a quorum of nodes will be invalidated
+ *   - any proposal that has been accepted by at least one node, but not known to be committed to any, will be proposed again
+ *   - any proposal that has been committed to at least one node, but not committed to all, will be committed to a quorum
+ *
+ * Note that once started, this continues to try to repair any ongoing operations for the partition up to 4 times.
+ * In a functioning cluster this should always be possible, but during a network partition this might cause the repair
+ * to fail.
+ *
+ * Requirements for correction:
+ * - If performed during a range movement, we depend on a quorum (of the new topology) have been informed of the new
+ *   topology _prior_ to initiating this repair, and this node to have been a member of a quorum of nodes verifying
+ *    - If a quorum of nodes is unaware of the new topology prior to initiating repair, an operation could simply occur
+ *      after repair completes that permits a linearization failure, such as with CASSANDRA-15745.
+ *   their topology is up-to-date.
+ * - Paxos prepare rounds must also verify the topology being used with their peers
+ *    - If prepare rounds do not verify their topology, a node that is not a member of the quorum who have agreed
+ *      the latest topology could still perform an operation without being aware of the topology change, and permit a
+ *      linearization failure, such as with CASSANDRA-15745.
+ *
+ * With these issues addressed elsewhere, our algorithm is fairly simple.
+ * In brief:
+ *   1) Query all replicas for any promises or proposals they have witnessed that have not been committed,
+ *      and their most recent commit. Wait for a quorum of responses.
+ *   2) If this is the first time we have queried other nodes, we take a note of the most recent ballot we see;
+ *      If this is not the first time we have queried other nodes, and we have committed a newer ballot than the one
+ *      we previously recorded, we terminate (somebody else has done the work for us).
+ *   3) If we see an in-progress operation that is very recent, we wait for it to complete and try again
+ *   4) If we see a previously accepted operation, we attempt to complete it, or
+ *      if we see a prepare with no proposal, we propose an empty update to invalidate it;
+ *      otherwise we have nothing to do, as there is no operation that can have produced a side-effect before we began.
+ *   5) We prepare a paxos round to agree the new commit using a higher ballot than the one witnessed,
+ *      but a lower than one we would propose a new operation with. This permits newer operations to "beat" us so
+ *      that we do not interfere with normal paxos operations.
+ *   6) If we are "beaten" we start again (without delay, as (2) manages delays where necessary)
+ */
+public class PaxosRepair extends AbstractPaxosRepair
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosRepair.class);
+
+    public static final RequestSerializer requestSerializer = new RequestSerializer();
+    public static final ResponseSerializer responseSerializer = new ResponseSerializer();
+    public static final RequestHandler requestHandler = new RequestHandler();
+    private static final long RETRY_TIMEOUT_NANOS = getRetryTimeoutNanos();
+
+    private static final ScheduledExecutorPlus RETRIES = executorFactory().scheduled("PaxosRepairRetries");
+
+    private static long getRetryTimeoutNanos()
+    {
+        long retryMillis = PAXOS_REPAIR_RETRY_TIMEOUT_IN_MS.getLong();
+        return TimeUnit.MILLISECONDS.toNanos(retryMillis);
+    }
+
+    private final TableMetadata table;
+    private final ConsistencyLevel paxosConsistency;
+    private Participants participants;
+
+    private Ballot successCriteria;
+    private Ballot prevSupersededBy;
+    private int attempts;
+
+    public String toString()
+    {
+        return "PaxosRepair{" +
+               "key=" + partitionKey() +
+               ", table=" + table.toString() +
+               ", consistency=" + paxosConsistency +
+               ", participants=" + participants.electorate +
+               ", state=" + state() +
+               ", startedMillis=" + MonotonicClock.Global.approxTime.translate().toMillisSinceEpoch(startedNanos()) +
+               ", started=" + isStarted() +
+               '}';
+    }
+
+    /**
+     * Waiting for responses to PAXOS_REPAIR messages.
+     *
+     * This state may be entered multiple times; every time we fail for any reason, we restart from this state
+     */
+    private class Querying extends State implements RequestCallbackWithFailure<Response>, Runnable
+    {
+        private int successes;
+        private int failures;
+
+        private Ballot latestWitnessed;
+        private @Nullable Accepted latestAccepted;
+        private Committed latestCommitted;
+        private Ballot oldestCommitted;
+        private Ballot clashingPromise;
+
+        @Override
+        public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+        {
+            updateState(this, null, (i1, i2) -> i1.onFailure());
+        }
+
+        @Override
+        public void onResponse(Message<Response> msg)
+        {
+            logger.trace("PaxosRepair {} from {}", msg.payload, msg.from());
+            updateState(this, msg, Querying::onResponseInternal);
+        }
+
+        private State onFailure()
+        {
+            if (++failures + participants.sizeOfConsensusQuorum > participants.sizeOfPoll())
+                return retry(this);
+            return this;
+        }
+
+        private State onResponseInternal(Message<Response> msg)
+        {
+            latestWitnessed = latest(latestWitnessed, msg.payload.latestWitnessedOrLowBound);
+            latestAccepted = latest(latestAccepted, msg.payload.acceptedButNotCommitted);
+            latestCommitted = latest(latestCommitted, msg.payload.committed);
+            if (oldestCommitted == null || isAfter(oldestCommitted, msg.payload.committed))
+                oldestCommitted = msg.payload.committed.ballot;
+
+            if (isAfter(latestWitnessed, clashingPromise))
+                clashingPromise = null;
+            if (timestampsClash(latestAccepted, msg.payload.latestWitnessedOrLowBound))
+                clashingPromise = msg.payload.latestWitnessedOrLowBound;
+            if (timestampsClash(latestAccepted, latestWitnessed))
+                clashingPromise = latestWitnessed;
+
+            // once we receive the requisite number, we can simply proceed, and ignore future responses
+            if (++successes == participants.sizeOfConsensusQuorum)
+                return execute();
+
+            return this;
+        }
+
+        private State execute()
+        {
+            // if we have a timestamp clash, always prefer the accepted ballot
+            latestWitnessed = latest(latestAccepted, latestWitnessed);
+            Ballot latestPreviouslyWitnessed = latest(successCriteria, prevSupersededBy);
+
+            // Save as success criteria the latest promise seen by our first round; if we ever see anything
+            // newer committed, we know at least one paxos round has been completed since we started, which is all we need
+            // or newer than this committed we know we're done, so to avoid looping indefinitely in competition
+            // with others, we store this ballot for future retries so we can terminate based on other proposers' work
+            if (successCriteria == null || timestampsClash(successCriteria, latestWitnessed))
+            {
+                if (logger.isTraceEnabled())
+                    logger.trace("PaxosRepair of {} setting success criteria to {}", partitionKey(), Ballot.toString(latestWitnessed));
+
+                successCriteria = latestWitnessed;
+            }
+
+            boolean hasCommittedSuccessCriteria = isAfter(latestCommitted, successCriteria) || latestCommitted.hasBallot(successCriteria);
+            boolean isPromisedButNotAccepted    = isAfter(latestWitnessed, latestAccepted); // not necessarily promised - may be lowBound
+            boolean isAcceptedButNotCommitted   = isAfter(latestAccepted, latestCommitted);
+            boolean reproposalMayBeRejected     = clashingPromise != null || !isAfter(latestWitnessed, latestPreviouslyWitnessed);
+
+            if (hasCommittedSuccessCriteria)
+            {
+                if (logger.isTraceEnabled())
+                    logger.trace("PaxosRepair witnessed {} newer than success criteria {} (oldest: {})", latestCommitted, Ballot.toString(successCriteria), Ballot.toString(oldestCommitted));
+
+                // we have a new enough commit, but it might not have reached enough participants; make sure it has before terminating
+                // note: we could send to only those we know haven't witnessed it, but this is a rare operation so a small amount of redundant work is fine
+                return oldestCommitted.equals(latestCommitted.ballot)
+                        ? DONE
+                        : PaxosCommit.commit(latestCommitted, participants, paxosConsistency, commitConsistency(), true,
+                                             new CommittingRepair());
+            }
+            else if (isAcceptedButNotCommitted && !isPromisedButNotAccepted && !reproposalMayBeRejected)
+            {
+                if (logger.isTraceEnabled())
+                    logger.trace("PaxosRepair of {} completing {}", partitionKey(), latestAccepted);
+                // We need to complete this in-progress accepted proposal, which may not have been seen by a majority
+                // However, since we have not sought any promises, we can simply complete the existing proposal
+                // since this is an idempotent operation - both us and the original proposer (and others) can
+                // all do it at the same time without incident
+
+                // If ballots with same timestamp have been both accepted and rejected by different nodes,
+                // to avoid a livelock we simply try to poison, knowing we will fail but use a new ballot
+                // (note there are alternative approaches but this is conservative)
+
+                return PaxosPropose.propose(latestAccepted, participants, false,
+                        new ProposingRepair(latestAccepted));
+            }
+            else if (isAcceptedButNotCommitted || isPromisedButNotAccepted || latestWitnessed.compareTo(latestPreviouslyWitnessed) < 0)
+            {
+                Ballot ballot = staleBallotNewerThan(latest(latestWitnessed, latestPreviouslyWitnessed), paxosConsistency);
+                // We need to propose a no-op > latestPromised, to ensure we don't later discover
+                // that latestPromised had already been accepted (by a minority) and repair it
+                // This means starting a new ballot, but we choose to use one that is likely to lose a contention battle
+                // Since this operation is not urgent, and we can piggy-back on other paxos operations
+                if (logger.isTraceEnabled())
+                    logger.trace("PaxosRepair of {} found incomplete promise or proposal; preparing stale ballot {}", partitionKey(), Ballot.toString(ballot));
+
+                return prepareWithBallot(ballot, participants, partitionKey(), table, false, false,
+                        new PoisonProposals());
+            }
+            else
+            {
+                logger.error("PaxosRepair illegal state latestWitnessed={}, latestAcceptedButNotCommitted={}, latestCommitted={}, oldestCommitted={}", latestWitnessed, latestAccepted, latestCommitted, oldestCommitted);
+                throw new IllegalStateException(); // should be logically impossible
+            }
+        }
+
+        public void run()
+        {
+            Message<Request> message = Message.out(PAXOS2_REPAIR_REQ, new Request(partitionKey(), table));
+            for (int i = 0, size = participants.sizeOfPoll(); i < size ; ++i)
+                MessagingService.instance().sendWithCallback(message, participants.voter(i), this);
+        }
+    }
+
+    /**
+     * We found either an incomplete promise or proposal, so we need to start a new paxos round to complete them
+     */
+    private class PoisonProposals extends ConsumerState<Status>
+    {
+        @Override
+        public State execute(Status input) throws Throwable
+        {
+            switch (input.outcome)
+            {
+                case MAYBE_FAILURE:
+                    return retry(this);
+
+                case READ_PERMITTED:
+                case SUPERSEDED:
+                    prevSupersededBy = latest(prevSupersededBy, input.retryWithAtLeast());
+                    return retry(this);
+
+                case FOUND_INCOMPLETE_ACCEPTED:
+                {
+                    // finish the in-progress proposal
+                    // cannot simply restart, as our latest promise is newer than the proposal
+                    // so we require a promise before we decide which proposal to complete
+                    // (else an "earlier" operation can sneak in and invalidate us while we're proposing
+                    // with a newer ballot)
+                    FoundIncompleteAccepted incomplete = input.incompleteAccepted();
+                    Proposal propose = new Proposal(incomplete.ballot, incomplete.accepted.update);
+                    logger.trace("PaxosRepair of {} found incomplete {}", partitionKey(), incomplete.accepted);
+                    return PaxosPropose.propose(propose, participants, false,
+                            new ProposingRepair(propose)); // we don't know if we're done, so we must restart
+                }
+
+                case FOUND_INCOMPLETE_COMMITTED:
+                {
+                    // finish the in-progress commit
+                    FoundIncompleteCommitted incomplete = input.incompleteCommitted();
+                    logger.trace("PaxosRepair of {} found in progress {}", partitionKey(), incomplete.committed);
+                    return PaxosCommit.commit(incomplete.committed, participants, paxosConsistency, commitConsistency(), true,
+                                              new CommitAndRestart()); // we don't know if we're done, so we must restart
+                }
+
+                case PROMISED:
+                {
+                    // propose the empty ballot
+                    logger.trace("PaxosRepair of {} submitting empty proposal", partitionKey());
+                    Proposal proposal = Proposal.empty(input.success().ballot, partitionKey(), table);
+                    return PaxosPropose.propose(proposal, participants, false,
+                            new ProposingRepair(proposal));
+                }
+
+                default:
+                    throw new IllegalStateException();
+            }
+        }
+    }
+
+    private class ProposingRepair extends ConsumerState<PaxosPropose.Status>
+    {
+        final Proposal proposal;
+        private ProposingRepair(Proposal proposal)
+        {
+            this.proposal = proposal;
+        }
+
+        @Override
+        public State execute(PaxosPropose.Status input)
+        {
+            switch (input.outcome)
+            {
+                case MAYBE_FAILURE:
+                    return retry(this);
+
+                case SUPERSEDED:
+                    if (isAfter(input.superseded().by, prevSupersededBy))
+                        prevSupersededBy = input.superseded().by;
+                    return retry(this);
+
+                case SUCCESS:
+                    if (proposal.update.isEmpty())
+                    {
+                        logger.trace("PaxosRepair of {} complete after successful empty proposal", partitionKey());
+                        return DONE;
+                    }
+
+                    logger.trace("PaxosRepair of {} committing successful proposal {}", partitionKey(), proposal);
+                    return PaxosCommit.commit(proposal.agreed(), participants, paxosConsistency, commitConsistency(), true,
+                                              new CommittingRepair());
+
+                default:
+                    throw new IllegalStateException();
+            }
+        }
+    }
+
+    private class CommittingRepair extends ConsumerState<PaxosCommit.Status>
+    {
+        @Override
+        public State execute(PaxosCommit.Status input)
+        {
+            logger.trace("PaxosRepair of {} {}", partitionKey(), input);
+            return input.isSuccess() ? DONE : retry(this);
+        }
+    }
+
+    private class CommitAndRestart extends ConsumerState<PaxosCommit.Status>
+    {
+        @Override
+        public State execute(PaxosCommit.Status input)
+        {
+            return restart(this);
+        }
+    }
+
+    private PaxosRepair(DecoratedKey partitionKey, Ballot incompleteBallot, TableMetadata table, ConsistencyLevel paxosConsistency)
+    {
+        super(partitionKey, incompleteBallot);
+        // TODO: move precondition into super ctor
+        Preconditions.checkArgument(paxosConsistency.isSerialConsistency());
+        this.table = table;
+        this.paxosConsistency = paxosConsistency;
+        this.successCriteria = incompleteBallot;
+    }
+
+    public static PaxosRepair create(ConsistencyLevel consistency, DecoratedKey partitionKey, Ballot incompleteBallot, TableMetadata table)
+    {
+        return new PaxosRepair(partitionKey, incompleteBallot, table, consistency);
+    }
+
+    private State retry(State state)
+    {
+        Preconditions.checkState(isStarted());
+        if (isResult(state))
+            return state;
+
+        return restart(state, waitUntilForContention(++attempts, table, partitionKey(), paxosConsistency, REPAIR));
+    }
+
+    @Override
+    public State restart(State state, long waitUntil)
+    {
+        if (isResult(state))
+            return state;
+
+        participants = Participants.get(table, partitionKey(), paxosConsistency);
+
+        if (waitUntil > Long.MIN_VALUE && waitUntil - startedNanos() > RETRY_TIMEOUT_NANOS)
+            return new Failure(null);
+
+        try
+        {
+            participants.assureSufficientLiveNodesForRepair();
+        }
+        catch (UnavailableException e)
+        {
+            return new Failure(e);
+        }
+
+        Querying querying = new Querying();
+        long now;
+        if (waitUntil == Long.MIN_VALUE || waitUntil - (now = nanoTime()) < 0) querying.run();
+        else RETRIES.schedule(querying, waitUntil - now, NANOSECONDS);
+
+        return querying;
+    }
+
+    private ConsistencyLevel commitConsistency()
+    {
+        Preconditions.checkState(paxosConsistency.isSerialConsistency());
+        return paxosConsistency.isDatacenterLocal() ? ConsistencyLevel.LOCAL_QUORUM : ConsistencyLevel.QUORUM;
+    }
+
+    static class Request
+    {
+        final DecoratedKey partitionKey;
+        final TableMetadata table;
+        Request(DecoratedKey partitionKey, TableMetadata table)
+        {
+            this.partitionKey = partitionKey;
+            this.table = table;
+        }
+    }
+
+    /**
+     * The response to a proposal, indicating success (if {@code supersededBy == null},
+     * or failure, alongside the ballot that beat us
+     */
+    static class Response
+    {
+        @Nonnull final Ballot latestWitnessedOrLowBound;
+        @Nullable final Accepted acceptedButNotCommitted;
+        @Nonnull final Committed committed;
+
+        Response(Ballot latestWitnessedOrLowBound, @Nullable Accepted acceptedButNotCommitted, Committed committed)
+        {
+            this.latestWitnessedOrLowBound = latestWitnessedOrLowBound;
+            this.acceptedButNotCommitted = acceptedButNotCommitted;
+            this.committed = committed;
+        }
+
+        public String toString()
+        {
+            return String.format("Response(%s, %s, %s", latestWitnessedOrLowBound, acceptedButNotCommitted, committed);
+        }
+    }
+
+    private static Map<String, Set<InetAddressAndPort>> mapToDc(Collection<InetAddressAndPort> endpoints, Function<InetAddressAndPort, String> dcFunc)
+    {
+        Map<String, Set<InetAddressAndPort>> map = new HashMap<>();
+        endpoints.forEach(e -> map.computeIfAbsent(dcFunc.apply(e), k -> new HashSet<>()).add(e));
+        return map;
+    }
+
+    private static boolean hasQuorumOrSingleDead(Collection<InetAddressAndPort> all, Collection<InetAddressAndPort> live, boolean requireQuorum)
+    {
+        Preconditions.checkArgument(all.size() >= live.size());
+        return live.size() >= (all.size() / 2) + 1 || (!requireQuorum && live.size() >= all.size() - 1);
+    }
+
+    @VisibleForTesting
+    static boolean hasSufficientLiveNodesForTopologyChange(Collection<InetAddressAndPort> allEndpoints, Collection<InetAddressAndPort> liveEndpoints, Function<InetAddressAndPort, String> dcFunc, boolean onlyQuorumRequired, boolean strictQuorum)
+    {
+
+        Map<String, Set<InetAddressAndPort>> allDcMap = mapToDc(allEndpoints, dcFunc);
+        Map<String, Set<InetAddressAndPort>> liveDcMap = mapToDc(liveEndpoints, dcFunc);
+
+        if (!hasQuorumOrSingleDead(allEndpoints, liveEndpoints, strictQuorum))
+            return false;
+
+        if (onlyQuorumRequired)
+            return true;
+
+        for (Map.Entry<String, Set<InetAddressAndPort>> entry : allDcMap.entrySet())
+        {
+            Set<InetAddressAndPort> all = entry.getValue();
+            Set<InetAddressAndPort> live = liveDcMap.getOrDefault(entry.getKey(), Collections.emptySet());
+            if (!hasQuorumOrSingleDead(all, live, strictQuorum))
+                return false;
+        }
+        return true;
+    }
+
+    /**
+     * checks if we have enough live nodes to perform a paxos repair for topology repair. Generally, this means that we need enough
+     * live participants to reach EACH_QUORUM, with a few exceptions. The EACH_QUORUM requirement is meant to support workload using either
+     * SERIAL or LOCAL_SERIAL
+     *
+     * if paxos_topology_repair_strict_each_quorum is set to false (the default), we will accept either a quorum or n-1 live nodes
+     * in the cluster and per dc. If paxos_topology_repair_no_dc_checks is true, we only check the live nodes in the cluster,
+     * and do not do any per-dc checks.
+     */
+    public static boolean hasSufficientLiveNodesForTopologyChange(Keyspace keyspace, Range<Token> range, Collection<InetAddressAndPort> liveEndpoints)
+    {
+        return hasSufficientLiveNodesForTopologyChange(keyspace.getReplicationStrategy().getNaturalReplicasForToken(range.right).endpoints(),
+                                                       liveEndpoints,
+                                                       DatabaseDescriptor.getEndpointSnitch()::getDatacenter,
+                                                       DatabaseDescriptor.paxoTopologyRepairNoDcChecks(),
+                                                       DatabaseDescriptor.paxoTopologyRepairStrictEachQuorum());
+    }
+
+    /**
+     * The proposal request handler, i.e. receives a proposal from a peer and responds with either acccept/reject
+     */
+    public static class RequestHandler implements IVerbHandler<PaxosRepair.Request>
+    {
+        @Override
+        public void doVerb(Message<PaxosRepair.Request> message)
+        {
+            PaxosRepair.Request request = message.payload;
+            if (!isInRangeAndShouldProcess(message.from(), request.partitionKey, request.table, false))
+            {
+                MessagingService.instance().respondWithFailure(UNKNOWN, message);
+                return;
+            }
+
+            Ballot latestWitnessed;
+            Accepted acceptedButNotCommited;
+            Committed committed;
+            int nowInSec = FBUtilities.nowInSeconds();
+            try (PaxosState state = PaxosState.get(request.partitionKey, request.table))
+            {
+                PaxosState.Snapshot snapshot = state.current(nowInSec);
+                latestWitnessed = snapshot.latestWitnessedOrLowBound();
+                acceptedButNotCommited = snapshot.accepted;
+                committed = snapshot.committed;
+            }
+
+            Response response = new Response(latestWitnessed, acceptedButNotCommited, committed);
+            MessagingService.instance().respond(response, message);
+        }
+    }
+
+    public static class RequestSerializer implements IVersionedSerializer<Request>
+    {
+        @Override
+        public void serialize(Request request, DataOutputPlus out, int version) throws IOException
+        {
+            request.table.id.serialize(out);
+            DecoratedKey.serializer.serialize(request.partitionKey, out, version);
+        }
+
+        @Override
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            TableMetadata table = Schema.instance.getExistingTableMetadata(TableId.deserialize(in));
+            DecoratedKey partitionKey = (DecoratedKey) DecoratedKey.serializer.deserialize(in, table.partitioner, version);
+            return new Request(partitionKey, table);
+        }
+
+        @Override
+        public long serializedSize(Request request, int version)
+        {
+            return request.table.id.serializedSize()
+                   + DecoratedKey.serializer.serializedSize(request.partitionKey, version);
+        }
+    }
+
+    public static class ResponseSerializer implements IVersionedSerializer<Response>
+    {
+        public void serialize(Response response, DataOutputPlus out, int version) throws IOException
+        {
+            response.latestWitnessedOrLowBound.serialize(out);
+            serializeNullable(Accepted.serializer, response.acceptedButNotCommitted, out, version);
+            Committed.serializer.serialize(response.committed, out, version);
+        }
+
+        public Response deserialize(DataInputPlus in, int version) throws IOException
+        {
+            Ballot latestWitnessed = Ballot.deserialize(in);
+            Accepted acceptedButNotCommitted = deserializeNullable(Accepted.serializer, in, version);
+            Committed committed = Committed.serializer.deserialize(in, version);
+            return new Response(latestWitnessed, acceptedButNotCommitted, committed);
+        }
+
+        public long serializedSize(Response response, int version)
+        {
+            return Ballot.sizeInBytes()
+                   + serializedSizeNullable(Accepted.serializer, response.acceptedButNotCommitted, version)
+                   + Committed.serializer.serializedSize(response.committed, version);
+        }
+    }
+
+    private static volatile boolean SKIP_VERSION_VALIDATION = Boolean.getBoolean("cassandra.skip_paxos_repair_version_validation");
+
+    public static void setSkipPaxosRepairCompatibilityCheck(boolean v)
+    {
+        SKIP_VERSION_VALIDATION = v;
+    }
+
+    public static boolean getSkipPaxosRepairCompatibilityCheck()
+    {
+        return SKIP_VERSION_VALIDATION;
+    }
+
+    static boolean validateVersionCompatibility(CassandraVersion version)
+    {
+        if (SKIP_VERSION_VALIDATION)
+            return true;
+
+        if (version == null)
+            return false;
+
+        // assume 4.0 is ok
+        return (version.major == 4 && version.minor > 0) || version.major > 4;
+    }
+
+    static String getPeerVersion(InetAddressAndPort peer)
+    {
+        EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(peer);
+        if (epState == null)
+            return null;
+
+        VersionedValue value = epState.getApplicationState(ApplicationState.RELEASE_VERSION);
+        if (value == null)
+            return null;
+
+        try
+        {
+            return value.value;
+        }
+        catch (IllegalArgumentException e)
+        {
+            return null;
+        }
+    }
+
+    static boolean validatePeerCompatibility(Replica peer)
+    {
+        String versionString = getPeerVersion(peer.endpoint());
+        CassandraVersion version = versionString != null ? new CassandraVersion(versionString) : null;
+        boolean result = validateVersionCompatibility(version);
+        if (!result)
+            logger.info("PaxosRepair isn't supported by {} on version {}", peer, versionString);
+        return result;
+    }
+
+    static boolean validatePeerCompatibility(TableMetadata table, Range<Token> range)
+    {
+        Participants participants = Participants.get(table, range.right, ConsistencyLevel.SERIAL);
+        return Iterables.all(participants.all, PaxosRepair::validatePeerCompatibility);
+    }
+
+    public static boolean validatePeerCompatibility(TableMetadata table, Collection<Range<Token>> ranges)
+    {
+        return Iterables.all(ranges, range -> validatePeerCompatibility(table, range));
+    }
+
+    public static void shutdownAndWait(long timeout, TimeUnit units) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownAndWait(timeout, units, RETRIES);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java b/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
new file mode 100644
index 0000000..1627fdb
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
@@ -0,0 +1,479 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.*;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.TupleType;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+import static java.lang.Math.min;
+import static org.apache.cassandra.service.paxos.Commit.isAfter;
+import static org.apache.cassandra.service.paxos.Commit.latest;
+
+public class PaxosRepairHistory
+{
+    public static final PaxosRepairHistory EMPTY = new PaxosRepairHistory(new Token[0], new Ballot[] { Ballot.none() });
+    private static final Token.TokenFactory TOKEN_FACTORY = DatabaseDescriptor.getPartitioner().getTokenFactory();
+    private static final Token MIN_TOKEN = DatabaseDescriptor.getPartitioner().getMinimumToken();
+    private static final TupleType TYPE = new TupleType(ImmutableList.of(BytesType.instance, BytesType.instance));
+
+    /**
+     * The following two fields represent the mapping of ranges to ballot lower bounds, for example:
+     *
+     *   ballotLowBound           = [ none(), b2, none(), b4, none() ]
+     *   tokenInclusiveUpperBound = [ t1, t2, t3, t4 ]
+     *
+     * Correspond to the following token bounds:
+     *
+     *   (MIN_VALUE, t1] => none()
+     *   (t1, t2]        => b2
+     *   (t2, t3]        => none()
+     *   (t3, t4]        => b4
+     *   (t4, MAX_VALUE) => none()
+     */
+
+    private final Token[] tokenInclusiveUpperBound;
+    private final Ballot[] ballotLowBound; // always one longer to capture values up to "MAX_VALUE" (which in some cases doesn't exist, as is infinite)
+
+    PaxosRepairHistory(Token[] tokenInclusiveUpperBound, Ballot[] ballotLowBound)
+    {
+        assert ballotLowBound.length == tokenInclusiveUpperBound.length + 1;
+        this.tokenInclusiveUpperBound = tokenInclusiveUpperBound;
+        this.ballotLowBound = ballotLowBound;
+    }
+
+    public Ballot maxLowBound()
+    {
+        Ballot maxBallot = Ballot.none();
+        for (Ballot lowBound : ballotLowBound)
+        {
+            maxBallot = Commit.latest(maxBallot, lowBound);
+        }
+        return maxBallot;
+    }
+
+    public String toString()
+    {
+        return "PaxosRepairHistory{" +
+                IntStream.range(0, ballotLowBound.length)
+                        .filter(i -> !Ballot.none().equals(ballotLowBound[i]))
+                        .mapToObj(i -> range(i) + "=" + ballotLowBound[i])
+                        .collect(Collectors.joining(", ")) + '}';
+    }
+
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        PaxosRepairHistory that = (PaxosRepairHistory) o;
+        return Arrays.equals(ballotLowBound, that.ballotLowBound) && Arrays.equals(tokenInclusiveUpperBound, that.tokenInclusiveUpperBound);
+    }
+
+    public int hashCode()
+    {
+        return Arrays.hashCode(ballotLowBound);
+    }
+
+    public Ballot ballotForToken(Token token)
+    {
+        int idx = Arrays.binarySearch(tokenInclusiveUpperBound, token);
+        if (idx < 0) idx = -1 - idx;
+        return ballotLowBound[idx];
+    }
+
+    private Ballot ballotForIndex(int idx)
+    {
+        if (idx < 0 || idx > size())
+            throw new IndexOutOfBoundsException();
+
+        return ballotLowBound[idx];
+    }
+
+    private int indexForToken(Token token)
+    {
+        int idx = Arrays.binarySearch(tokenInclusiveUpperBound, token);
+        if (idx < 0) idx = -1 - idx;
+        return idx;
+    }
+
+    private boolean contains(int idx, Token token)
+    {
+        if (idx < 0 || idx > size())
+            throw new IndexOutOfBoundsException();
+
+        return  (idx == 0      || tokenInclusiveUpperBound[idx - 1].compareTo(token) <  0)
+             && (idx == size() || tokenInclusiveUpperBound[idx    ].compareTo(token) >= 0);
+    }
+
+    public int size()
+    {
+        return tokenInclusiveUpperBound.length;
+    }
+
+    private RangeIterator rangeIterator()
+    {
+        return new RangeIterator();
+    }
+
+    private Range<Token> range(int i)
+    {
+        return new Range<>(tokenExclusiveLowerBound(i), tokenInclusiveUpperBound(i));
+    }
+
+    public Searcher searcher()
+    {
+        return new Searcher();
+    }
+
+    private Token tokenExclusiveLowerBound(int i)
+    {
+        return i == 0 ? MIN_TOKEN : tokenInclusiveUpperBound[i - 1];
+    }
+
+    private Token tokenInclusiveUpperBound(int i)
+    {
+        return i == tokenInclusiveUpperBound.length ? MIN_TOKEN : tokenInclusiveUpperBound[i];
+    }
+
+    public List<ByteBuffer> toTupleBufferList()
+    {
+        List<ByteBuffer> tuples = new ArrayList<>(size() + 1);
+        for (int i = 0 ; i < 1 + size() ; ++i)
+            tuples.add(TupleType.buildValue(new ByteBuffer[] { TOKEN_FACTORY.toByteArray(tokenInclusiveUpperBound(i)), ballotLowBound[i].toBytes() }));
+        return tuples;
+    }
+
+    public static PaxosRepairHistory fromTupleBufferList(List<ByteBuffer> tuples)
+    {
+        Token[] tokenInclusiveUpperBounds = new Token[tuples.size() - 1];
+        Ballot[] ballotLowBounds = new Ballot[tuples.size()];
+        for (int i = 0 ; i < tuples.size() ; ++i)
+        {
+            ByteBuffer[] split = TYPE.split(tuples.get(i));
+            if (i < tokenInclusiveUpperBounds.length)
+                tokenInclusiveUpperBounds[i] = TOKEN_FACTORY.fromByteArray(split[0]);
+            ballotLowBounds[i] = Ballot.deserialize(split[1]);
+        }
+
+        return new PaxosRepairHistory(tokenInclusiveUpperBounds, ballotLowBounds);
+    }
+
+    // append the item to the given list, modifying the underlying list
+    // if the item makes previoud entries redundant
+
+    public static PaxosRepairHistory merge(PaxosRepairHistory historyLeft, PaxosRepairHistory historyRight)
+    {
+        if (historyLeft == null)
+            return historyRight;
+
+        if (historyRight == null)
+            return historyLeft;
+
+        Builder builder = new Builder(historyLeft.size() + historyRight.size());
+
+        RangeIterator left = historyLeft.rangeIterator();
+        RangeIterator right = historyRight.rangeIterator();
+        while (left.hasUpperBound() && right.hasUpperBound())
+        {
+            int cmp = left.tokenInclusiveUpperBound().compareTo(right.tokenInclusiveUpperBound());
+
+            Ballot ballot = latest(left.ballotLowBound(), right.ballotLowBound());
+            if (cmp == 0)
+            {
+                builder.append(left.tokenInclusiveUpperBound(), ballot);
+                left.next();
+                right.next();
+            }
+            else
+            {
+                RangeIterator firstIter = cmp < 0 ? left : right;
+                builder.append(firstIter.tokenInclusiveUpperBound(), ballot);
+                firstIter.next();
+            }
+        }
+
+        while (left.hasUpperBound())
+        {
+            builder.append(left.tokenInclusiveUpperBound(), latest(left.ballotLowBound(), right.ballotLowBound()));
+            left.next();
+        }
+
+        while (right.hasUpperBound())
+        {
+            builder.append(right.tokenInclusiveUpperBound(), latest(left.ballotLowBound(), right.ballotLowBound()));
+            right.next();
+        }
+
+        builder.appendLast(latest(left.ballotLowBound(), right.ballotLowBound()));
+        return builder.build();
+    }
+
+    @VisibleForTesting
+    public static PaxosRepairHistory add(PaxosRepairHistory existing, Collection<Range<Token>> ranges, Ballot ballot)
+    {
+        ranges = Range.normalize(ranges);
+        Builder builder = new Builder(ranges.size() * 2);
+        for (Range<Token> range : ranges)
+        {
+            // don't add a point for an opening min token, since it
+            // effectively leaves the bottom of the range unbounded
+            builder.appendMaybeMin(range.left, Ballot.none());
+            builder.appendMaybeMax(range.right, ballot);
+        }
+
+        return merge(existing, builder.build());
+    }
+
+    /**
+     * returns a copy of this PaxosRepairHistory limited to the ranges supplied, with all other ranges reporting Ballot.none()
+     */
+    @VisibleForTesting
+    static PaxosRepairHistory trim(PaxosRepairHistory existing, Collection<Range<Token>> ranges)
+    {
+        Builder builder = new Builder(existing.size());
+
+        ranges = Range.normalize(ranges);
+        for (Range<Token> select : ranges)
+        {
+            RangeIterator intersects = existing.intersects(select);
+            while (intersects.hasNext())
+            {
+                if (Ballot.none().equals(intersects.ballotLowBound()))
+                {
+                    intersects.next();
+                    continue;
+                }
+
+                Token exclusiveLowerBound = maxExclusiveLowerBound(select.left, intersects.tokenExclusiveLowerBound());
+                Token inclusiveUpperBound = minInclusiveUpperBound(select.right, intersects.tokenInclusiveUpperBound());
+                assert exclusiveLowerBound.compareTo(inclusiveUpperBound) < 0 || inclusiveUpperBound.isMinimum();
+
+                builder.appendMaybeMin(exclusiveLowerBound, Ballot.none());
+                builder.appendMaybeMax(inclusiveUpperBound, intersects.ballotLowBound());
+                intersects.next();
+            }
+        }
+
+        return builder.build();
+    }
+
+    RangeIterator intersects(Range<Token> unwrapped)
+    {
+        int from = Arrays.binarySearch(tokenInclusiveUpperBound, unwrapped.left);
+        if (from < 0) from = -1 - from; else ++from;
+        int to = unwrapped.right.isMinimum() ? ballotLowBound.length - 1 : Arrays.binarySearch(tokenInclusiveUpperBound, unwrapped.right);
+        if (to < 0) to = -1 - to;
+        return new RangeIterator(from, min(1 + to, ballotLowBound.length));
+    }
+
+    private static Token maxExclusiveLowerBound(Token a, Token b)
+    {
+        return a.compareTo(b) < 0 ? b : a;
+    }
+
+    private static Token minInclusiveUpperBound(Token a, Token b)
+    {
+        if (!a.isMinimum() && !b.isMinimum()) return a.compareTo(b) <= 0 ? a : b;
+        else if (!a.isMinimum()) return a;
+        else if (!b.isMinimum()) return b;
+        else return a;
+    }
+
+    public static final IVersionedSerializer<PaxosRepairHistory> serializer = new IVersionedSerializer<PaxosRepairHistory>()
+    {
+        public void serialize(PaxosRepairHistory history, DataOutputPlus out, int version) throws IOException
+        {
+            out.writeUnsignedVInt(history.size());
+            for (int i = 0; i < history.size() ; ++i)
+            {
+                Token.serializer.serialize(history.tokenInclusiveUpperBound[i], out, version);
+                history.ballotLowBound[i].serialize(out);
+            }
+            history.ballotLowBound[history.size()].serialize(out);
+        }
+
+        public PaxosRepairHistory deserialize(DataInputPlus in, int version) throws IOException
+        {
+            int size = (int) in.readUnsignedVInt();
+            Token[] tokenInclusiveUpperBounds = new Token[size];
+            Ballot[] ballotLowBounds = new Ballot[size + 1];
+            for (int i = 0; i < size; i++)
+            {
+                tokenInclusiveUpperBounds[i] = Token.serializer.deserialize(in, DatabaseDescriptor.getPartitioner(), version);
+                ballotLowBounds[i] = Ballot.deserialize(in);
+            }
+            ballotLowBounds[size] = Ballot.deserialize(in);
+            return new PaxosRepairHistory(tokenInclusiveUpperBounds, ballotLowBounds);
+        }
+
+        public long serializedSize(PaxosRepairHistory history, int version)
+        {
+            long size = TypeSizes.sizeofUnsignedVInt(history.size());
+            for (int i = 0; i < history.size() ; ++i)
+            {
+                size += Token.serializer.serializedSize(history.tokenInclusiveUpperBound[i], version);
+                size += Ballot.sizeInBytes();
+            }
+            size += Ballot.sizeInBytes();
+            return size;
+        }
+    };
+
+    public class Searcher
+    {
+        int idx = -1;
+
+        public Ballot ballotForToken(Token token)
+        {
+            if (idx < 0 || !contains(idx, token))
+                idx = indexForToken(token);
+            return ballotForIndex(idx);
+        }
+    }
+
+    class RangeIterator
+    {
+        final int end;
+        int i;
+
+        RangeIterator()
+        {
+            this.end = ballotLowBound.length;
+        }
+
+        RangeIterator(int from, int to)
+        {
+            this.i = from;
+            this.end = to;
+        }
+
+        boolean hasNext()
+        {
+            return i < end;
+        }
+
+        boolean hasUpperBound()
+        {
+            return i < tokenInclusiveUpperBound.length;
+        }
+
+        void next()
+        {
+            ++i;
+        }
+
+        Token tokenExclusiveLowerBound()
+        {
+            return PaxosRepairHistory.this.tokenExclusiveLowerBound(i);
+        }
+
+        Token tokenInclusiveUpperBound()
+        {
+            return PaxosRepairHistory.this.tokenInclusiveUpperBound(i);
+        }
+
+        Ballot ballotLowBound()
+        {
+            return ballotLowBound[i];
+        }
+    }
+
+    static class Builder
+    {
+        final List<Token> tokenInclusiveUpperBounds;
+        final List<Ballot> ballotLowBounds;
+
+        Builder(int capacity)
+        {
+            this.tokenInclusiveUpperBounds = new ArrayList<>(capacity);
+            this.ballotLowBounds = new ArrayList<>(capacity + 1);
+        }
+
+        void appendMaybeMin(Token inclusiveLowBound, Ballot ballotLowBound)
+        {
+            if (inclusiveLowBound.isMinimum())
+                assert ballotLowBound.equals(Ballot.none()) && ballotLowBounds.isEmpty();
+            else
+                append(inclusiveLowBound, ballotLowBound);
+        }
+
+        void appendMaybeMax(Token inclusiveLowBound, Ballot ballotLowBound)
+        {
+            if (inclusiveLowBound.isMinimum())
+                appendLast(ballotLowBound);
+            else
+                append(inclusiveLowBound, ballotLowBound);
+        }
+
+        void append(Token inclusiveLowBound, Ballot ballotLowBound)
+        {
+            int tailIdx = tokenInclusiveUpperBounds.size() - 1;
+
+            assert tokenInclusiveUpperBounds.size() == ballotLowBounds.size();
+            assert tailIdx < 0 || inclusiveLowBound.compareTo(tokenInclusiveUpperBounds.get(tailIdx)) >= 0;
+
+            boolean sameAsTailToken = tailIdx >= 0 && inclusiveLowBound.equals(tokenInclusiveUpperBounds.get(tailIdx));
+            boolean sameAsTailBallot = tailIdx >= 0 && ballotLowBound.equals(ballotLowBounds.get(tailIdx));
+            if (sameAsTailToken || sameAsTailBallot)
+            {
+                if (sameAsTailBallot)
+                    tokenInclusiveUpperBounds.set(tailIdx, inclusiveLowBound);
+                else if (isAfter(ballotLowBound, ballotLowBounds.get(tailIdx)))
+                    ballotLowBounds.set(tailIdx, ballotLowBound);
+            }
+            else
+            {
+                tokenInclusiveUpperBounds.add(inclusiveLowBound);
+                ballotLowBounds.add(ballotLowBound);
+            }
+        }
+
+        void appendLast(Ballot ballotLowBound)
+        {
+            assert ballotLowBounds.size() == tokenInclusiveUpperBounds.size();
+            int tailIdx = tokenInclusiveUpperBounds.size() - 1;
+            if (!ballotLowBounds.isEmpty() && ballotLowBound.equals(ballotLowBounds.get(tailIdx)))
+                tokenInclusiveUpperBounds.remove(tailIdx);
+            else
+                ballotLowBounds.add(ballotLowBound);
+        }
+
+        PaxosRepairHistory build()
+        {
+            if (tokenInclusiveUpperBounds.size() == ballotLowBounds.size())
+                ballotLowBounds.add(Ballot.none());
+            return new PaxosRepairHistory(tokenInclusiveUpperBounds.toArray(new Token[0]), ballotLowBounds.toArray(new Ballot[0]));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosRequestCallback.java b/src/java/org/apache/cassandra/service/paxos/PaxosRequestCallback.java
new file mode 100644
index 0000000..282aeb2
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosRequestCallback.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.util.function.BiFunction;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.WriteTimeoutException;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.service.FailureRecordingCallback;
+
+import static org.apache.cassandra.exceptions.RequestFailureReason.TIMEOUT;
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+
+public abstract class PaxosRequestCallback<T> extends FailureRecordingCallback<T>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosRequestCallback.class);
+    private static final boolean USE_SELF_EXECUTION = CassandraRelevantProperties.PAXOS_EXECUTE_ON_SELF.getBoolean();
+
+    protected abstract void onResponse(T response, InetAddressAndPort from);
+
+    @Override
+    public void onResponse(Message<T> message)
+    {
+        onResponse(message.payload, message.from());
+    }
+
+    protected <I> void executeOnSelf(I parameter, BiFunction<I, InetAddressAndPort, T> execute)
+    {
+        T response;
+        try
+        {
+            response = execute.apply(parameter, getBroadcastAddressAndPort());
+            if (response == null)
+                return;
+        }
+        catch (Exception ex)
+        {
+            RequestFailureReason reason = UNKNOWN;
+            if (ex instanceof WriteTimeoutException) reason = TIMEOUT;
+            else logger.error("Failed to apply {} locally", parameter, ex);
+
+            onFailure(getBroadcastAddressAndPort(), reason);
+            return;
+        }
+
+        onResponse(response, getBroadcastAddressAndPort());
+    }
+
+    static boolean shouldExecuteOnSelf(InetAddressAndPort replica)
+    {
+        return USE_SELF_EXECUTION && replica.equals(getBroadcastAddressAndPort());
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosState.java b/src/java/org/apache/cassandra/service/paxos/PaxosState.java
index 6e02435..e802cd0 100644
--- a/src/java/org/apache/cassandra/service/paxos/PaxosState.java
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosState.java
@@ -1,5 +1,5 @@
 /*
- *
+ * 
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -7,155 +7,816 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * 
  *   http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
+ * 
  */
 package org.apache.cassandra.service.paxos;
 
-import java.util.concurrent.locks.Lock;
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
 
-import com.google.common.util.concurrent.Striped;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 
-import org.apache.cassandra.schema.TableMetadata;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Ints;
+
+import com.github.benmanes.caffeine.cache.Caffeine;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.metrics.PaxosMetrics;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.exceptions.ReadTimeoutException;
+import org.apache.cassandra.exceptions.RequestTimeoutException;
+import org.apache.cassandra.exceptions.WriteTimeoutException;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosBallotTracker;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosStateTracker;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTracker;
 import org.apache.cassandra.tracing.Tracing;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.Nemesis;
 
-public class PaxosState
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.config.Config.PaxosStatePurging.gc_grace;
+import static org.apache.cassandra.config.Config.PaxosStatePurging.legacy;
+import static org.apache.cassandra.config.DatabaseDescriptor.paxosStatePurging;
+import static org.apache.cassandra.service.paxos.Commit.*;
+import static org.apache.cassandra.service.paxos.PaxosState.MaybePromise.Outcome.*;
+import static org.apache.cassandra.service.paxos.Commit.Accepted.latestAccepted;
+import static org.apache.cassandra.service.paxos.Commit.Committed.latestCommitted;
+import static org.apache.cassandra.service.paxos.Commit.isAfter;
+
+/**
+ * We save to memory the result of each operation before persisting to disk, however each operation that performs
+ * the update does not return a result to the coordinator until the result is fully persisted.
+ */
+public class PaxosState implements PaxosOperationLock
 {
-    private static final Striped<Lock> LOCKS = Striped.lazyWeakLock(DatabaseDescriptor.getConcurrentWriters() * 1024);
+    private static volatile boolean DISABLE_COORDINATOR_LOCKING = Boolean.getBoolean("cassandra.paxos.disable_coordinator_locking");
+    public static final ConcurrentHashMap<Key, PaxosState> ACTIVE = new ConcurrentHashMap<>();
+    public static final Map<Key, Snapshot> RECENT = Caffeine.newBuilder()
+                                                            .maximumWeight(DatabaseDescriptor.getPaxosCacheSizeInMiB() << 20)
+                                                            .<Key, Snapshot>weigher((k, v) -> Ints.saturatedCast((v.accepted != null ? v.accepted.update.unsharedHeapSize() : 0L) + v.committed.update.unsharedHeapSize()))
+                                                            .executor(ImmediateExecutor.INSTANCE)
+                                                            .build().asMap();
 
-    private final Commit promised;
-    private final Commit accepted;
-    private final Commit mostRecentCommit;
-
-    public PaxosState(DecoratedKey key, TableMetadata metadata)
+    private static class TrackerHandle
     {
-        this(Commit.emptyCommit(key, metadata), Commit.emptyCommit(key, metadata), Commit.emptyCommit(key, metadata));
-    }
+        static final PaxosStateTracker tracker;
 
-    public PaxosState(Commit promised, Commit accepted, Commit mostRecentCommit)
-    {
-        assert promised.update.partitionKey().equals(accepted.update.partitionKey()) && accepted.update.partitionKey().equals(mostRecentCommit.update.partitionKey());
-        assert promised.update.metadata().id.equals(accepted.update.metadata().id) && accepted.update.metadata().id.equals(mostRecentCommit.update.metadata().id);
-
-        this.promised = promised;
-        this.accepted = accepted;
-        this.mostRecentCommit = mostRecentCommit;
-    }
-
-    public static PrepareResponse prepare(Commit toPrepare)
-    {
-        long start = System.nanoTime();
-        try
+        static
         {
-            Lock lock = LOCKS.get(toPrepare.update.partitionKey());
-            lock.lock();
             try
             {
-                // When preparing, we need to use the same time as "now" (that's the time we use to decide if something
-                // is expired or not) accross nodes otherwise we may have a window where a Most Recent Commit shows up
-                // on some replica and not others during a new proposal (in StorageProxy.beginAndRepairPaxos()), and no
-                // amount of re-submit will fix this (because the node on which the commit has expired will have a
-                // tombstone that hides any re-submit). See CASSANDRA-12043 for details.
-                int nowInSec = UUIDGen.unixTimestampInSec(toPrepare.ballot);
-                PaxosState state = SystemKeyspace.loadPaxosState(toPrepare.update.partitionKey(), toPrepare.update.metadata(), nowInSec);
-                if (toPrepare.isAfter(state.promised))
-                {
-                    Tracing.trace("Promising ballot {}", toPrepare.ballot);
-                    SystemKeyspace.savePaxosPromise(toPrepare);
-                    return new PrepareResponse(true, state.accepted, state.mostRecentCommit);
-                }
-                else
-                {
-                    Tracing.trace("Promise rejected; {} is not sufficiently newer than {}", toPrepare, state.promised);
-                    // return the currently promised ballot (not the last accepted one) so the coordinator can make sure it uses newer ballot next time (#5667)
-                    return new PrepareResponse(false, state.promised, state.mostRecentCommit);
-                }
+                tracker = PaxosStateTracker.create(Directories.dataDirectories);
             }
-            finally
+            catch (IOException e)
             {
-                lock.unlock();
+                throw new RuntimeException(e);
             }
         }
-        finally
-        {
-            Keyspace.open(toPrepare.update.metadata().keyspace).getColumnFamilyStore(toPrepare.update.metadata().id).metric.casPrepare.addNano(System.nanoTime() - start);
-        }
-
-    }
-
-    public static Boolean propose(Commit proposal)
-    {
-        long start = System.nanoTime();
-        try
-        {
-            Lock lock = LOCKS.get(proposal.update.partitionKey());
-            lock.lock();
-            try
-            {
-                int nowInSec = UUIDGen.unixTimestampInSec(proposal.ballot);
-                PaxosState state = SystemKeyspace.loadPaxosState(proposal.update.partitionKey(), proposal.update.metadata(), nowInSec);
-                if (proposal.hasBallot(state.promised.ballot) || proposal.isAfter(state.promised))
-                {
-                    Tracing.trace("Accepting proposal {}", proposal);
-                    SystemKeyspace.savePaxosProposal(proposal);
-                    return true;
-                }
-                else
-                {
-                    Tracing.trace("Rejecting proposal for {} because inProgress is now {}", proposal, state.promised);
-                    return false;
-                }
-            }
-            finally
-            {
-                lock.unlock();
-            }
-        }
-        finally
-        {
-            Keyspace.open(proposal.update.metadata().keyspace).getColumnFamilyStore(proposal.update.metadata().id).metric.casPropose.addNano(System.nanoTime() - start);
-        }
     }
 
-    public static void commit(Commit proposal)
+    public static void setDisableCoordinatorLocking(boolean disable)
     {
-        long start = System.nanoTime();
+        DISABLE_COORDINATOR_LOCKING = disable;
+    }
+
+    public static boolean getDisableCoordinatorLocking()
+    {
+        return DISABLE_COORDINATOR_LOCKING;
+    }
+
+    public static PaxosUncommittedTracker uncommittedTracker()
+    {
+        return TrackerHandle.tracker.uncommitted();
+    }
+
+    public static PaxosBallotTracker ballotTracker()
+    {
+        return TrackerHandle.tracker.ballots();
+    }
+
+    public static void initializeTrackers()
+    {
+        Preconditions.checkState(TrackerHandle.tracker != null);
+        PaxosMetrics.initialize();
+    }
+
+    public static void maybeRebuildUncommittedState() throws IOException
+    {
+        TrackerHandle.tracker.maybeRebuild();
+    }
+
+    public static void startAutoRepairs()
+    {
+        TrackerHandle.tracker.uncommitted().startAutoRepairs();
+    }
+
+    public static class Key
+    {
+        final DecoratedKey partitionKey;
+        final TableMetadata metadata;
+
+        public Key(DecoratedKey partitionKey, TableMetadata metadata)
+        {
+            this.partitionKey = partitionKey;
+            this.metadata = metadata;
+        }
+
+        public int hashCode()
+        {
+            return partitionKey.hashCode() * 31 + metadata.id.hashCode();
+        }
+
+        public boolean equals(Object that)
+        {
+            return that instanceof Key && equals((Key) that);
+        }
+
+        public boolean equals(Key that)
+        {
+            return this.partitionKey.equals(that.partitionKey)
+                    && this.metadata.id.equals(that.metadata.id);
+        }
+    }
+
+    public static class Snapshot
+    {
+        public final @Nonnull
+        Ballot promised;
+        public final @Nonnull
+        Ballot promisedWrite; // <= promised
+        public final @Nullable Accepted  accepted; // if already committed, this will be null
+        public final @Nonnull  Committed committed;
+
+        public Snapshot(@Nonnull Ballot promised, @Nonnull Ballot promisedWrite, @Nullable Accepted accepted, @Nonnull Committed committed)
+        {
+            assert isAfter(promised, promisedWrite) || promised == promisedWrite;
+            assert accepted == null || accepted.update.partitionKey().equals(committed.update.partitionKey());
+            assert accepted == null || accepted.update.metadata().id.equals(committed.update.metadata().id);
+            assert accepted == null || committed.isBefore(accepted.ballot);
+
+            this.promised = promised;
+            this.promisedWrite = promisedWrite;
+            this.accepted = accepted;
+            this.committed = committed;
+        }
+
+        public @Nonnull
+        Ballot latestWitnessedOrLowBound(Ballot latestWriteOrLowBound)
+        {
+            return promised == promisedWrite ? latestWriteOrLowBound : latest(promised, latestWriteOrLowBound);
+        }
+
+        public @Nonnull
+        Ballot latestWitnessedOrLowBound()
+        {
+            // warn: if proposal has same timestamp as promised, we should prefer accepted
+            // since (if different) it reached a quorum of promises; this means providing it as first argument
+            Ballot latest;
+            latest = latest(accepted, committed).ballot;
+            latest = latest(latest, promised);
+            latest = latest(latest, ballotTracker().getLowBound());
+            return latest;
+        }
+
+        public @Nonnull
+        Ballot latestWriteOrLowBound()
+        {
+            // warn: if proposal has same timestamp as promised, we should prefer accepted
+            // since (if different) it reached a quorum of promises; this means providing it as first argument
+            Ballot latest = accepted != null && !accepted.update.isEmpty() ? accepted.ballot : null;
+            latest = latest(latest, committed.ballot);
+            latest = latest(latest, promisedWrite);
+            latest = latest(latest, ballotTracker().getLowBound());
+            return latest;
+        }
+
+        public static Snapshot merge(Snapshot a, Snapshot b)
+        {
+            if (a == null || b == null)
+                return a == null ? b : a;
+
+            Committed committed = latestCommitted(a.committed, b.committed);
+            if (a instanceof UnsafeSnapshot && b instanceof UnsafeSnapshot)
+                return new UnsafeSnapshot(committed);
+
+            Accepted accepted;
+            Ballot promised, promisedWrite;
+            if (a instanceof UnsafeSnapshot || b instanceof UnsafeSnapshot)
+            {
+                if (a instanceof UnsafeSnapshot)
+                    a = b; // we already have the winning Committed saved above, so just want the full snapshot (if either)
+
+                if (committed == a.committed)
+                    return a;
+
+                promised = a.promised;
+                promisedWrite = a.promisedWrite;
+                accepted = isAfter(a.accepted, committed) ? a.accepted : null;
+            }
+            else
+            {
+                accepted = latestAccepted(a.accepted, b.accepted);
+                accepted = isAfter(accepted, committed) ? accepted : null;
+                promised = latest(a.promised, b.promised);
+                promisedWrite = latest(a.promisedWrite, b.promisedWrite);
+            }
+
+            return new Snapshot(promised, promisedWrite, accepted, committed);
+        }
+
+        Snapshot removeExpired(int nowInSec)
+        {
+            boolean isAcceptedExpired = accepted != null && accepted.isExpired(nowInSec);
+            boolean isCommittedExpired = committed.isExpired(nowInSec);
+
+            if (paxosStatePurging() == gc_grace)
+            {
+                long expireOlderThan = SECONDS.toMicros(nowInSec - committed.update.metadata().params.gcGraceSeconds);
+                isAcceptedExpired |= accepted != null && accepted.ballot.unixMicros() < expireOlderThan;
+                isCommittedExpired |= committed.ballot.unixMicros() < expireOlderThan;
+            }
+
+            if (!isAcceptedExpired && !isCommittedExpired)
+                return this;
+
+            return new Snapshot(promised, promisedWrite,
+                                isAcceptedExpired ? null : accepted,
+                                isCommittedExpired
+                                    ? Committed.none(committed.update.partitionKey(), committed.update.metadata())
+                                    : committed);
+        }
+    }
+
+    // used to permit recording Committed outcomes without waiting for initial read
+    public static class UnsafeSnapshot extends Snapshot
+    {
+        public UnsafeSnapshot(@Nonnull Committed committed)
+        {
+            super(Ballot.none(), Ballot.none(), null, committed);
+        }
+
+        public UnsafeSnapshot(@Nonnull Commit committed)
+        {
+            this(new Committed(committed.ballot, committed.update));
+        }
+    }
+
+    @VisibleForTesting
+    public static class MaybePromise
+    {
+        public enum Outcome { REJECT, PERMIT_READ, PROMISE }
+
+        final Snapshot before;
+        final Snapshot after;
+        final Ballot supersededBy;
+        final Outcome outcome;
+
+        MaybePromise(Snapshot before, Snapshot after, Ballot supersededBy, Outcome outcome)
+        {
+            this.before = before;
+            this.after = after;
+            this.supersededBy = supersededBy;
+            this.outcome = outcome;
+        }
+
+        static MaybePromise promise(Snapshot before, Snapshot after)
+        {
+            return new MaybePromise(before, after, null, PROMISE);
+        }
+
+        static MaybePromise permitRead(Snapshot before, Ballot supersededBy)
+        {
+            return new MaybePromise(before, before, supersededBy, PERMIT_READ);
+        }
+
+        static MaybePromise reject(Snapshot snapshot, Ballot supersededBy)
+        {
+            return new MaybePromise(snapshot, snapshot, supersededBy, REJECT);
+        }
+
+        public Outcome outcome()
+        {
+            return outcome;
+        }
+
+        public Ballot supersededBy()
+        {
+            return supersededBy;
+        }
+    }
+
+    @Nemesis private static final AtomicReferenceFieldUpdater<PaxosState, Snapshot> currentUpdater = AtomicReferenceFieldUpdater.newUpdater(PaxosState.class, Snapshot.class, "current");
+
+    final Key key;
+    private int active; // current number of active referents (once drops to zero, we remove the global entry)
+    @Nemesis private volatile Snapshot current;
+    @Nemesis private volatile Thread lockedBy;
+    @Nemesis private volatile int waiting;
+
+    private static final AtomicReferenceFieldUpdater<PaxosState, Thread> lockedByUpdater = AtomicReferenceFieldUpdater.newUpdater(PaxosState.class, Thread.class, "lockedBy");
+
+    private PaxosState(Key key, Snapshot current)
+    {
+        this.key = key;
+        this.current = current;
+    }
+
+    @VisibleForTesting
+    public static PaxosState get(Commit commit)
+    {
+        return get(commit.update.partitionKey(), commit.update.metadata());
+    }
+
+    public static PaxosState get(DecoratedKey partitionKey, TableMetadata table)
+    {
+        // TODO would be nice to refactor verb handlers to support re-submitting to executor if waiting for another thread to read state
+        return getUnsafe(partitionKey, table).maybeLoad();
+    }
+
+    // does not increment total number of accessors, since we would accept null (so only access if others are, not for own benefit)
+    private static PaxosState tryGetUnsafe(DecoratedKey partitionKey, TableMetadata metadata)
+    {
+        return ACTIVE.compute(new Key(partitionKey, metadata), (key, cur) -> {
+            if (cur == null)
+            {
+                Snapshot saved = RECENT.remove(key);
+                if (saved != null)
+                    //noinspection resource
+                    cur = new PaxosState(key, saved);
+            }
+            if (cur != null)
+                ++cur.active;
+            return cur;
+        });
+    }
+
+    private static PaxosState getUnsafe(DecoratedKey partitionKey, TableMetadata metadata)
+    {
+        return ACTIVE.compute(new Key(partitionKey, metadata), (key, cur) -> {
+            if (cur == null)
+            {
+                //noinspection resource
+                cur = new PaxosState(key, RECENT.remove(key));
+            }
+            ++cur.active;
+            return cur;
+        });
+    }
+
+    private static PaxosState getUnsafe(Commit commit)
+    {
+        return getUnsafe(commit.update.partitionKey(), commit.update.metadata());
+    }
+
+    // don't increment the total count, as we are only using this for locking purposes when coordinating
+    @VisibleForTesting
+    public static PaxosOperationLock lock(DecoratedKey partitionKey, TableMetadata metadata, long deadline, ConsistencyLevel consistencyForConsensus, boolean isWrite) throws RequestTimeoutException
+    {
+        if (DISABLE_COORDINATOR_LOCKING)
+            return PaxosOperationLock.noOp();
+
+        PaxosState lock = ACTIVE.compute(new Key(partitionKey, metadata), (key, cur) -> {
+            if (cur == null)
+                cur = new PaxosState(key, RECENT.remove(key));
+            ++cur.active;
+            return cur;
+        });
+
         try
         {
-            // There is no guarantee we will see commits in the right order, because messages
-            // can get delayed, so a proposal can be older than our current most recent ballot/commit.
-            // Committing it is however always safe due to column timestamps, so always do it. However,
-            // if our current in-progress ballot is strictly greater than the proposal one, we shouldn't
-            // erase the in-progress update.
+            if (!lock.lock(deadline))
+                throw throwTimeout(metadata, consistencyForConsensus, isWrite);
+            return lock;
+        }
+        catch (Throwable t)
+        {
+            lock.close();
+            throw t;
+        }
+    }
+    
+    private static RequestTimeoutException throwTimeout(TableMetadata metadata, ConsistencyLevel consistencyForConsensus, boolean isWrite)
+    {
+        int blockFor = consistencyForConsensus.blockFor(Keyspace.open(metadata.keyspace).getReplicationStrategy());
+        throw isWrite
+                ? new WriteTimeoutException(WriteType.CAS, consistencyForConsensus, 0, blockFor)
+                : new ReadTimeoutException(consistencyForConsensus, 0, blockFor, false);
+    }
+
+    private PaxosState maybeLoad()
+    {
+        try
+        {
+            Snapshot current = this.current;
+            if (current == null || current instanceof UnsafeSnapshot)
+            {
+                synchronized (this)
+                {
+                    current = this.current;
+                    if (current == null || current instanceof UnsafeSnapshot)
+                    {
+                        Snapshot snapshot = SystemKeyspace.loadPaxosState(key.partitionKey, key.metadata, 0);
+                        currentUpdater.accumulateAndGet(this, snapshot, Snapshot::merge);
+                    }
+                }
+            }
+        }
+        catch (Throwable t)
+        {
+            try { close(); } catch (Throwable t2) { t.addSuppressed(t2); }
+            throw t;
+        }
+
+        return this;
+    }
+
+    private boolean lock(long deadline)
+    {
+        try
+        {
+            Thread thread = Thread.currentThread();
+            if (lockedByUpdater.compareAndSet(this, null, thread))
+                return true;
+
+            synchronized (this)
+            {
+                waiting++;
+
+                try
+                {
+                    while (true)
+                    {
+                        if (lockedByUpdater.compareAndSet(this, null, thread))
+                            return true;
+
+                        while (lockedBy != null)
+                        {
+                            long now = nanoTime();
+                            if (now >= deadline)
+                                return false;
+
+                            wait(1 + ((deadline - now) - 1) / 1000000);
+                        }
+                    }
+                }
+                finally
+                {
+                    waiting--;
+                }
+            }
+        }
+        catch (InterruptedException e)
+        {
+            Thread.currentThread().interrupt();
+            return false;
+        }
+    }
+
+    private void maybeUnlock()
+    {
+        // no visibility requirements, as if we hold the lock it was last updated by us
+        if (lockedBy == null)
+            return;
+
+        Thread thread = Thread.currentThread();
+
+        if (lockedBy == thread)
+        {
+            lockedBy = null;
+            if (waiting > 0)
+            {
+                synchronized (this)
+                {
+                    notify();
+                }
+            }
+        }
+    }
+
+    public void close()
+    {
+        maybeUnlock();
+        ACTIVE.compute(key, (key, cur) ->
+        {
+            assert cur != null;
+            if (--cur.active > 0)
+                return cur;
+
+            Snapshot stash = cur.current;
+            if (stash != null && stash.getClass() == Snapshot.class)
+                RECENT.put(key, stash);
+            return null;
+        });
+    }
+
+    Snapshot current(Ballot ballot)
+    {
+        return current((int)ballot.unix(SECONDS));
+    }
+
+    Snapshot current(int nowInSec)
+    {
+        // CASSANDRA-12043 is not an issue for v2, as we perform Commit+Prepare and PrepareRefresh
+        // which are able to make progress whether or not the old commit is shadowed by the TTL (since they
+        // depend only on the write being successful, not the data being read again later).
+        // However, we still use nowInSec to guard reads to ensure we do not log any linearizability violations
+        // due to discrepancies in gc grace handling
+
+        Snapshot current = this.current;
+        if (current == null || current.getClass() != Snapshot.class)
+            throw new IllegalStateException();
+        return current.removeExpired(nowInSec);
+    }
+
+    @VisibleForTesting
+    public Snapshot currentSnapshot()
+    {
+        return current;
+    }
+
+    @VisibleForTesting
+    public void updateStateUnsafe(Function<Snapshot, Snapshot> f)
+    {
+        current = f.apply(current);
+    }
+
+    /**
+     * Record the requested ballot as promised if it is newer than our current promise; otherwise do nothing.
+     * @return a PromiseResult containing the before and after state for this operation
+     */
+    public MaybePromise promiseIfNewer(Ballot ballot, boolean isWrite)
+    {
+        Snapshot before, after;
+        while (true)
+        {
+            Snapshot realBefore = current;
+            before = realBefore.removeExpired((int)ballot.unix(SECONDS));
+            Ballot latestWriteOrLowBound = before.latestWriteOrLowBound();
+            Ballot latest = before.latestWitnessedOrLowBound(latestWriteOrLowBound);
+            if (isAfter(ballot, latest))
+            {
+                after = new Snapshot(ballot, isWrite ? ballot : before.promisedWrite, before.accepted, before.committed);
+                if (currentUpdater.compareAndSet(this, before, after))
+                {
+                    // It doesn't matter if a later operation witnesses this before it's persisted,
+                    // as it can only lead to rejecting a promise which leaves no persistent state
+                    // (and it's anyway safe to arbitrarily reject promises)
+                    if (isWrite)
+                    {
+                        Tracing.trace("Promising read/write ballot {}", ballot);
+                        SystemKeyspace.savePaxosWritePromise(key.partitionKey, key.metadata, ballot);
+                    }
+                    else
+                    {
+                        Tracing.trace("Promising read ballot {}", ballot);
+                        SystemKeyspace.savePaxosReadPromise(key.partitionKey, key.metadata, ballot);
+                    }
+                    return MaybePromise.promise(before, after);
+                }
+            }
+            else if (isAfter(ballot, latestWriteOrLowBound))
+            {
+                Tracing.trace("Permitting only read by ballot {}", ballot);
+                return MaybePromise.permitRead(before, latest);
+            }
+            else
+            {
+                Tracing.trace("Promise rejected; {} older than {}", ballot, latest);
+                return MaybePromise.reject(before, latest);
+            }
+
+            Snapshot realAfter = new Snapshot(ballot, isWrite ? ballot : realBefore.promisedWrite, realBefore.accepted, realBefore.committed);
+            after = new Snapshot(ballot, realAfter.promisedWrite, before.accepted, before.committed);
+            if (currentUpdater.compareAndSet(this, realBefore, realAfter))
+                break;
+        }
+
+        // It doesn't matter if a later operation witnesses this before it's persisted,
+        // as it can only lead to rejecting a promise which leaves no persistent state
+        // (and it's anyway safe to arbitrarily reject promises)
+        Tracing.trace("Promising ballot {}", ballot);
+        if (isWrite) SystemKeyspace.savePaxosWritePromise(key.partitionKey, key.metadata, ballot);
+        else SystemKeyspace.savePaxosReadPromise(key.partitionKey, key.metadata, ballot);
+        return MaybePromise.promise(before, after);
+    }
+
+    /**
+     * Record an acceptance of the proposal if there is no newer promise; otherwise inform the caller of the newer ballot
+     */
+    public Ballot acceptIfLatest(Proposal proposal)
+    {
+        if (paxosStatePurging() == legacy && !(proposal instanceof AcceptedWithTTL))
+            proposal = AcceptedWithTTL.withDefaultTTL(proposal);
+
+        // state.promised can be null, because it is invalidated by committed;
+        // we may also have accepted a newer proposal than we promised, so we confirm that we are the absolute newest
+        // (or that we have the exact same ballot as our promise, which is the typical case)
+        Snapshot before, after;
+        while (true)
+        {
+            Snapshot realBefore = current;
+            before = realBefore.removeExpired((int)proposal.ballot.unix(SECONDS));
+            Ballot latest = before.latestWitnessedOrLowBound();
+            if (!proposal.isSameOrAfter(latest))
+            {
+                Tracing.trace("Rejecting proposal {}; latest is now {}", proposal.ballot, latest);
+                return latest;
+            }
+
+            if (proposal.hasSameBallot(before.committed)) // TODO: consider not answering
+                return null; // no need to save anything, or indeed answer at all
+
+            after = new Snapshot(realBefore.promised, realBefore.promisedWrite, proposal.accepted(), realBefore.committed);
+            if (currentUpdater.compareAndSet(this, realBefore, after))
+                break;
+        }
+
+        // It is more worrisome to permit witnessing an accepted proposal before we have persisted it
+        // because this has more tangible effects on the recipient, but again it is safe: either it is
+        //  - witnessed to reject (which is always safe, as it prevents rather than creates an outcome); or
+        //  - witnessed as an in progress proposal
+        // in the latter case, for there to be any effect on the state the proposal must be re-proposed, or not,
+        // on its own terms, and must
+        // be persisted by the re-proposer, and so it remains a non-issue
+        // though this
+        Tracing.trace("Accepting proposal {}", proposal);
+        SystemKeyspace.savePaxosProposal(proposal);
+        return null;
+    }
+
+    public void commit(Agreed commit)
+    {
+        applyCommit(commit, this, (apply, to) ->
+            currentUpdater.accumulateAndGet(to, new UnsafeSnapshot(apply), Snapshot::merge)
+        );
+    }
+
+    public static void commitDirect(Commit commit)
+    {
+        applyCommit(commit, null, (apply, ignore) -> {
+            try (PaxosState state = tryGetUnsafe(apply.update.partitionKey(), apply.update.metadata()))
+            {
+                if (state != null)
+                    currentUpdater.accumulateAndGet(state, new UnsafeSnapshot(apply), Snapshot::merge);
+            }
+        });
+    }
+
+    private static void applyCommit(Commit commit, PaxosState state, BiConsumer<Commit, PaxosState> postCommit)
+    {
+        if (paxosStatePurging() == legacy && !(commit instanceof CommittedWithTTL))
+            commit = CommittedWithTTL.withDefaultTTL(commit);
+
+        long start = nanoTime();
+        try
+        {
+            // TODO: run Paxos Repair before truncate so we can excise this
             // The table may have been truncated since the proposal was initiated. In that case, we
             // don't want to perform the mutation and potentially resurrect truncated data
-            if (UUIDGen.unixTimestamp(proposal.ballot) >= SystemKeyspace.getTruncatedAt(proposal.update.metadata().id))
+            if (commit.ballot.unixMicros() >= SystemKeyspace.getTruncatedAt(commit.update.metadata().id))
             {
-                Tracing.trace("Committing proposal {}", proposal);
-                Mutation mutation = proposal.makeMutation();
+                Tracing.trace("Committing proposal {}", commit);
+                Mutation mutation = commit.makeMutation();
                 Keyspace.open(mutation.getKeyspaceName()).apply(mutation, true);
             }
             else
             {
-                Tracing.trace("Not committing proposal {} as ballot timestamp predates last truncation time", proposal);
+                Tracing.trace("Not committing proposal {} as ballot timestamp predates last truncation time", commit);
             }
-            // We don't need to lock, we're just blindly updating
-            SystemKeyspace.savePaxosCommit(proposal);
+
+            // for commits we save to disk first, because we can; even here though it is safe to permit later events to
+            // witness the state before it is persisted. The only tricky situation is that we use the witnessing of
+            // a quorum of nodes having witnessed the latest commit to decide if we need to disseminate a commit
+            // again before proceeding with any new operation, but in this case we have already persisted the relevant
+            // information, namely the base table mutation.  So this fact is persistent, even if knowldge of this fact
+            // is not (and if this is lost, it may only lead to a future operation unnecessarily committing again)
+            SystemKeyspace.savePaxosCommit(commit);
+            postCommit.accept(commit, state);
         }
         finally
         {
-            Keyspace.open(proposal.update.metadata().keyspace).getColumnFamilyStore(proposal.update.metadata().id).metric.casCommit.addNano(System.nanoTime() - start);
+            Keyspace.openAndGetStore(commit.update.metadata()).metric.casCommit.addNano(nanoTime() - start);
         }
     }
+
+    public static PrepareResponse legacyPrepare(Commit toPrepare)
+    {
+        long start = nanoTime();
+        try (PaxosState unsafeState = getUnsafe(toPrepare))
+        {
+            synchronized (unsafeState.key)
+            {
+                unsafeState.maybeLoad();
+                assert unsafeState.current != null;
+
+                while (true)
+                {
+                    // ignore nowInSec when merging as this can only be an issue during the transition period, so the unbounded
+                    // problem of CASSANDRA-12043 is not an issue
+                    Snapshot realBefore = unsafeState.current;
+                    Snapshot before = realBefore.removeExpired((int)toPrepare.ballot.unix(SECONDS));
+                    Ballot latest = before.latestWitnessedOrLowBound();
+                    if (toPrepare.isAfter(latest))
+                    {
+                        Snapshot after = new Snapshot(toPrepare.ballot, toPrepare.ballot, realBefore.accepted, realBefore.committed);
+                        if (currentUpdater.compareAndSet(unsafeState, realBefore, after))
+                        {
+                            Tracing.trace("Promising ballot {}", toPrepare.ballot);
+                            DecoratedKey partitionKey = toPrepare.update.partitionKey();
+                            TableMetadata metadata = toPrepare.update.metadata();
+                            SystemKeyspace.savePaxosWritePromise(partitionKey, metadata, toPrepare.ballot);
+                            return new PrepareResponse(true, before.accepted == null ? Accepted.none(partitionKey, metadata) : before.accepted, before.committed);
+                        }
+                    }
+                    else
+                    {
+                        Tracing.trace("Promise rejected; {} is not sufficiently newer than {}", toPrepare, before.promised);
+                        // return the currently promised ballot (not the last accepted one) so the coordinator can make sure it uses newer ballot next time (#5667)
+                        return new PrepareResponse(false, new Commit(before.promised, toPrepare.update), before.committed);
+                    }
+                }
+            }
+        }
+        finally
+        {
+            Keyspace.openAndGetStore(toPrepare.update.metadata()).metric.casPrepare.addNano(nanoTime() - start);
+        }
+    }
+
+    public static Boolean legacyPropose(Commit proposal)
+    {
+        if (paxosStatePurging() == legacy && !(proposal instanceof AcceptedWithTTL))
+            proposal = AcceptedWithTTL.withDefaultTTL(proposal);
+
+        long start = nanoTime();
+        try (PaxosState unsafeState = getUnsafe(proposal))
+        {
+            synchronized (unsafeState.key)
+            {
+                unsafeState.maybeLoad();
+                assert unsafeState.current != null;
+
+                while (true)
+                {
+                    Snapshot realBefore = unsafeState.current;
+                    Snapshot before = realBefore.removeExpired((int)proposal.ballot.unix(SECONDS));
+                    boolean accept = proposal.isSameOrAfter(before.latestWitnessedOrLowBound());
+                    if (accept)
+                    {
+                        if (proposal.hasSameBallot(before.committed) ||
+                            currentUpdater.compareAndSet(unsafeState, realBefore,
+                                                         new Snapshot(realBefore.promised, realBefore.promisedWrite,
+                                                                      new Accepted(proposal), realBefore.committed)))
+                        {
+                            Tracing.trace("Accepting proposal {}", proposal);
+                            SystemKeyspace.savePaxosProposal(proposal);
+                            return true;
+                        }
+                    }
+                    else
+                    {
+                        Tracing.trace("Rejecting proposal for {} because inProgress is now {}", proposal, before.promised);
+                        return false;
+                    }
+                }
+            }
+        }
+        finally
+        {
+            Keyspace.openAndGetStore(proposal.update.metadata()).metric.casPropose.addNano(nanoTime() - start);
+        }
+    }
+
+    public static void unsafeReset()
+    {
+        ACTIVE.clear();
+        RECENT.clear();
+        ballotTracker().truncate();
+    }
+
+    @SuppressWarnings("resource")
+    public static Snapshot unsafeGetIfPresent(DecoratedKey partitionKey, TableMetadata metadata)
+    {
+        Key key = new Key(partitionKey, metadata);
+        PaxosState cur = ACTIVE.get(key);
+        if (cur != null) return cur.current;
+        return RECENT.get(key);
+    }
 }
diff --git a/src/java/org/apache/cassandra/service/paxos/PrepareCallback.java b/src/java/org/apache/cassandra/service/paxos/PrepareCallback.java
deleted file mode 100644
index 93941e9..0000000
--- a/src/java/org/apache/cassandra/service/paxos/PrepareCallback.java
+++ /dev/null
@@ -1,108 +0,0 @@
-package org.apache.cassandra.service.paxos;
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterables;
-
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.DecoratedKey;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.utils.UUIDGen;
-
-public class PrepareCallback extends AbstractPaxosCallback<PrepareResponse>
-{
-    private static final Logger logger = LoggerFactory.getLogger(PrepareCallback.class);
-
-    public boolean promised = true;
-    public Commit mostRecentCommit;
-    public Commit mostRecentInProgressCommit;
-
-    private final Map<InetAddressAndPort, Commit> commitsByReplica = new ConcurrentHashMap<>();
-
-    public PrepareCallback(DecoratedKey key, TableMetadata metadata, int targets, ConsistencyLevel consistency, long queryStartNanoTime)
-    {
-        super(targets, consistency, queryStartNanoTime);
-        // need to inject the right key in the empty commit so comparing with empty commits in the response works as expected
-        mostRecentCommit = Commit.emptyCommit(key, metadata);
-        mostRecentInProgressCommit = Commit.emptyCommit(key, metadata);
-    }
-
-    public synchronized void onResponse(Message<PrepareResponse> message)
-    {
-        PrepareResponse response = message.payload;
-        logger.trace("Prepare response {} from {}", response, message.from());
-
-        // We set the mostRecentInProgressCommit even if we're not promised as, in that case, the ballot of that commit
-        // will be used to avoid generating a ballot that has not chance to win on retry (think clock skew).
-        if (response.inProgressCommit.isAfter(mostRecentInProgressCommit))
-            mostRecentInProgressCommit = response.inProgressCommit;
-
-        if (!response.promised)
-        {
-            promised = false;
-            while (latch.getCount() > 0)
-                latch.countDown();
-            return;
-        }
-
-        commitsByReplica.put(message.from(), response.mostRecentCommit);
-        if (response.mostRecentCommit.isAfter(mostRecentCommit))
-            mostRecentCommit = response.mostRecentCommit;
-
-        latch.countDown();
-    }
-
-    public Iterable<InetAddressAndPort> replicasMissingMostRecentCommit(TableMetadata metadata, int nowInSec)
-    {
-        // In general, we need every replicas that have answered to the prepare (a quorum) to agree on the MRC (see
-        // coment in StorageProxy.beginAndRepairPaxos(), but basically we need to make sure at least a quorum of nodes
-        // have learn a commit before commit a new one otherwise that previous commit is not guaranteed to have reach a
-        // quorum and further commit may proceed on incomplete information).
-        // However, if that commit is too hold, it may have been expired from some of the replicas paxos table (we don't
-        // keep the paxos state forever or that could grow unchecked), and we could end up in some infinite loop as
-        // explained on CASSANDRA-12043. To avoid that, we ignore a MRC that is too old, i.e. older than the TTL we set
-        // on paxos tables. For such old commit, we rely on hints and repair to ensure the commit has indeed be
-        // propagated to all nodes.
-        long paxosTtlSec = SystemKeyspace.paxosTtlSec(metadata);
-        if (UUIDGen.unixTimestampInSec(mostRecentCommit.ballot) + paxosTtlSec < nowInSec)
-            return Collections.emptySet();
-
-        return Iterables.filter(commitsByReplica.keySet(), new Predicate<InetAddressAndPort>()
-        {
-            public boolean apply(InetAddressAndPort inetAddress)
-            {
-                return (!commitsByReplica.get(inetAddress).ballot.equals(mostRecentCommit.ballot));
-            }
-        });
-    }
-}
diff --git a/src/java/org/apache/cassandra/service/paxos/PrepareVerbHandler.java b/src/java/org/apache/cassandra/service/paxos/PrepareVerbHandler.java
deleted file mode 100644
index 157630f..0000000
--- a/src/java/org/apache/cassandra/service/paxos/PrepareVerbHandler.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package org.apache.cassandra.service.paxos;
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-import org.apache.cassandra.net.IVerbHandler;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-
-public class PrepareVerbHandler implements IVerbHandler<Commit>
-{
-    public static PrepareVerbHandler instance = new PrepareVerbHandler();
-
-    public static PrepareResponse doPrepare(Commit toPrepare)
-    {
-        return PaxosState.prepare(toPrepare);
-    }
-
-    public void doVerb(Message<Commit> message)
-    {
-        Message<PrepareResponse> reply = message.responseWith(doPrepare(message.payload));
-        MessagingService.instance().send(reply, message.from());
-    }
-}
diff --git a/src/java/org/apache/cassandra/service/paxos/ProposeCallback.java b/src/java/org/apache/cassandra/service/paxos/ProposeCallback.java
deleted file mode 100644
index 7e755a0..0000000
--- a/src/java/org/apache/cassandra/service/paxos/ProposeCallback.java
+++ /dev/null
@@ -1,93 +0,0 @@
-package org.apache.cassandra.service.paxos;
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.net.Message;
-
-/**
- * ProposeCallback has two modes of operation, controlled by the failFast parameter.
- *
- * In failFast mode, we will return a failure as soon as a majority of nodes reject
- * the proposal. This is used when replaying a proposal from an earlier leader.
- *
- * Otherwise, we wait for either all replicas to respond or until we achieve
- * the desired quorum. We continue to wait for all replicas even after we know we cannot succeed
- * because we need to know if no node at all have accepted or if at least one has.
- * In the former case, a proposer is guaranteed no-one will
- * replay its value; in the latter we don't, so we must timeout in case another
- * leader replays it before we can; see CASSANDRA-6013
- */
-public class ProposeCallback extends AbstractPaxosCallback<Boolean>
-{
-    private static final Logger logger = LoggerFactory.getLogger(ProposeCallback.class);
-
-    private final AtomicInteger accepts = new AtomicInteger(0);
-    private final int requiredAccepts;
-    private final boolean failFast;
-
-    public ProposeCallback(int totalTargets, int requiredTargets, boolean failFast, ConsistencyLevel consistency, long queryStartNanoTime)
-    {
-        super(totalTargets, consistency, queryStartNanoTime);
-        this.requiredAccepts = requiredTargets;
-        this.failFast = failFast;
-    }
-
-    public void onResponse(Message<Boolean> msg)
-    {
-        logger.trace("Propose response {} from {}", msg.payload, msg.from());
-
-        if (msg.payload)
-            accepts.incrementAndGet();
-
-        latch.countDown();
-
-        if (isSuccessful() || (failFast && (latch.getCount() + accepts.get() < requiredAccepts)))
-        {
-            while (latch.getCount() > 0)
-                latch.countDown();
-        }
-    }
-
-    public int getAcceptCount()
-    {
-        return accepts.get();
-    }
-
-    public boolean isSuccessful()
-    {
-        return accepts.get() >= requiredAccepts;
-    }
-
-    // Note: this is only reliable if !failFast
-    public boolean isFullyRefused()
-    {
-        // We need to check the latch first to avoid racing with a late arrival
-        // between the latch check and the accepts one
-        return latch.getCount() == 0 && accepts.get() == 0;
-    }
-}
diff --git a/src/java/org/apache/cassandra/service/paxos/ProposeVerbHandler.java b/src/java/org/apache/cassandra/service/paxos/ProposeVerbHandler.java
deleted file mode 100644
index 5a20b67..0000000
--- a/src/java/org/apache/cassandra/service/paxos/ProposeVerbHandler.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package org.apache.cassandra.service.paxos;
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-import org.apache.cassandra.net.IVerbHandler;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-
-public class ProposeVerbHandler implements IVerbHandler<Commit>
-{
-    public static final ProposeVerbHandler instance = new ProposeVerbHandler();
-
-    public static Boolean doPropose(Commit proposal)
-    {
-        return PaxosState.propose(proposal);
-    }
-
-    public void doVerb(Message<Commit> message)
-    {
-        Message<Boolean> reply = message.responseWith(doPropose(message.payload));
-        MessagingService.instance().send(reply, message.from());
-    }
-}
diff --git a/src/java/org/apache/cassandra/service/paxos/TablePaxosRepairHistory.java b/src/java/org/apache/cassandra/service/paxos/TablePaxosRepairHistory.java
new file mode 100644
index 0000000..81e0b14
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/TablePaxosRepairHistory.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.util.Collection;
+
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+
+public class TablePaxosRepairHistory
+{
+    private final String keyspace;
+    private final String table;
+    private volatile PaxosRepairHistory history;
+
+    private TablePaxosRepairHistory(String keyspace, String table, PaxosRepairHistory history)
+    {
+        this.keyspace = keyspace;
+        this.table = table;
+        this.history = history;
+    }
+
+    public static TablePaxosRepairHistory load(String keyspace, String table)
+    {
+        return new TablePaxosRepairHistory(keyspace, table, SystemKeyspace.loadPaxosRepairHistory(keyspace, table));
+    }
+
+    public Ballot getBallotForToken(Token token)
+    {
+        return history.ballotForToken(token);
+    }
+
+    private void updatePaxosRepairTable(PaxosRepairHistory update, boolean flush)
+    {
+        SystemKeyspace.savePaxosRepairHistory(keyspace, table, update, flush);
+    }
+
+    public synchronized void add(Collection<Range<Token>> ranges, Ballot ballot, boolean flush)
+    {
+        PaxosRepairHistory update = PaxosRepairHistory.add(history, ranges, ballot);
+        updatePaxosRepairTable(update, flush);
+        history = update;
+    }
+
+    public synchronized void merge(PaxosRepairHistory toMerge, boolean flush)
+    {
+        PaxosRepairHistory update = PaxosRepairHistory.merge(history, toMerge);
+        if (!update.equals(history))
+            updatePaxosRepairTable(update, flush);
+        history = update;
+    }
+
+    public PaxosRepairHistory getHistory()
+    {
+        return history;
+    }
+
+    public PaxosRepairHistory getHistoryForRanges(Collection<Range<Token>> ranges)
+    {
+        return PaxosRepairHistory.trim(history, ranges);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanup.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanup.java
new file mode 100644
index 0000000..6eb1ebd
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanup.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Executor;
+import java.util.function.Consumer;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.RangesAtEndpoint;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.config.DatabaseDescriptor.getCasContentionTimeout;
+import static org.apache.cassandra.config.DatabaseDescriptor.getWriteRpcTimeout;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+
+public class PaxosCleanup extends AsyncFuture<Void> implements Runnable
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosCleanup.class);
+
+    private final Collection<InetAddressAndPort> endpoints;
+    private final TableMetadata table;
+    private final Collection<Range<Token>> ranges;
+    private final boolean skippedReplicas;
+    private final Executor executor;
+
+    // references kept for debugging
+    private PaxosStartPrepareCleanup startPrepare;
+    private PaxosFinishPrepareCleanup finishPrepare;
+    private PaxosCleanupSession session;
+    private PaxosCleanupComplete complete;
+
+    public PaxosCleanup(Collection<InetAddressAndPort> endpoints, TableMetadata table, Collection<Range<Token>> ranges, boolean skippedReplicas, Executor executor)
+    {
+        this.endpoints = endpoints;
+        this.table = table;
+        this.ranges = ranges;
+        this.skippedReplicas = skippedReplicas;
+        this.executor = executor;
+    }
+
+    private <T> void addCallback(Future<T> future, Consumer<T> onComplete)
+    {
+        future.addCallback(onComplete, this::tryFailure);
+    }
+
+    public static PaxosCleanup cleanup(Collection<InetAddressAndPort> endpoints, TableMetadata table, Collection<Range<Token>> ranges, boolean skippedReplicas, Executor executor)
+    {
+        PaxosCleanup cleanup = new PaxosCleanup(endpoints, table, ranges, skippedReplicas, executor);
+        executor.execute(cleanup);
+        return cleanup;
+    }
+
+    public void run()
+    {
+        EndpointState localEpState = Gossiper.instance.getEndpointStateForEndpoint(getBroadcastAddressAndPort());
+        startPrepare = PaxosStartPrepareCleanup.prepare(table.id, endpoints, localEpState, ranges);
+        addCallback(startPrepare, this::finishPrepare);
+    }
+
+    private void finishPrepare(PaxosCleanupHistory result)
+    {
+        ScheduledExecutors.nonPeriodicTasks.schedule(() -> {
+            finishPrepare = PaxosFinishPrepareCleanup.finish(endpoints, result);
+            addCallback(finishPrepare, (v) -> startSession(result.highBound));
+        }, Math.min(getCasContentionTimeout(MILLISECONDS), getWriteRpcTimeout(MILLISECONDS)), MILLISECONDS);
+    }
+
+    private void startSession(Ballot lowBound)
+    {
+        session = new PaxosCleanupSession(endpoints, table.id, ranges);
+        addCallback(session, (v) -> finish(lowBound));
+        executor.execute(session);
+    }
+
+    private void finish(Ballot lowBound)
+    {
+        complete = new PaxosCleanupComplete(endpoints, table.id, ranges, lowBound, skippedReplicas);
+        addCallback(complete, this::trySuccess);
+        executor.execute(complete);
+    }
+
+    private static boolean isOutOfRange(String ksName, Collection<Range<Token>> repairRanges)
+    {
+        Keyspace keyspace = Keyspace.open(ksName);
+        List<Range<Token>> localRanges = Range.normalize(keyspace.getReplicationStrategy()
+                                                                 .getAddressReplicas()
+                                                                 .get(FBUtilities.getBroadcastAddressAndPort())
+                                                                 .ranges());
+
+        RangesAtEndpoint pendingRanges = StorageService.instance.getTokenMetadata().getPendingRanges(ksName, FBUtilities.getBroadcastAddressAndPort());
+        if (!pendingRanges.isEmpty())
+        {
+            localRanges.addAll(pendingRanges.ranges());
+            localRanges = Range.normalize(localRanges);
+        }
+
+        for (Range<Token> repairRange : Range.normalize(repairRanges))
+        {
+            if (!Iterables.any(localRanges, localRange -> localRange.contains(repairRange)))
+                return true;
+        }
+        return false;
+    }
+
+    static boolean isInRangeAndShouldProcess(Collection<Range<Token>> ranges, TableId tableId)
+    {
+        TableMetadata metadata = Schema.instance.getTableMetadata(tableId);
+
+        Keyspace keyspace = Keyspace.open(metadata.keyspace);
+        Preconditions.checkNotNull(keyspace);
+
+        if (!isOutOfRange(metadata.keyspace, ranges))
+            return true;
+
+        logger.warn("Out of range PaxosCleanup request for {}: {}", metadata, ranges);
+        return false;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupComplete.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupComplete.java
new file mode 100644
index 0000000..0196e9c
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupComplete.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.*;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+
+import static org.apache.cassandra.config.DatabaseDescriptor.getPartitioner;
+import static org.apache.cassandra.net.NoPayload.noPayload;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_COMPLETE_REQ;
+
+public class PaxosCleanupComplete extends AsyncFuture<Void> implements RequestCallbackWithFailure<Void>, Runnable
+{
+    private final Set<InetAddressAndPort> waitingResponse;
+    final TableId tableId;
+    final Collection<Range<Token>> ranges;
+    final Ballot lowBound;
+    final boolean skippedReplicas;
+
+    PaxosCleanupComplete(Collection<InetAddressAndPort> endpoints, TableId tableId, Collection<Range<Token>> ranges, Ballot lowBound, boolean skippedReplicas)
+    {
+        this.waitingResponse = new HashSet<>(endpoints);
+        this.tableId = tableId;
+        this.ranges = ranges;
+        this.lowBound = lowBound;
+        this.skippedReplicas = skippedReplicas;
+    }
+
+    public synchronized void run()
+    {
+        Request request = !skippedReplicas ? new Request(tableId, lowBound, ranges)
+                                           : new Request(tableId, Ballot.none(), Collections.emptyList());
+        Message<Request> message = Message.out(PAXOS2_CLEANUP_COMPLETE_REQ, request);
+        for (InetAddressAndPort endpoint : waitingResponse)
+            MessagingService.instance().sendWithCallback(message, endpoint, this);
+    }
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        tryFailure(new PaxosCleanupException("Timed out waiting on response from " + from));
+    }
+
+    @Override
+    public synchronized void onResponse(Message<Void> msg)
+    {
+        if (isDone())
+            return;
+
+        if (!waitingResponse.remove(msg.from()))
+            throw new IllegalArgumentException("Received unexpected response from " + msg.from());
+
+        if (waitingResponse.isEmpty())
+            trySuccess(null);
+    }
+
+    static class Request
+    {
+        final TableId tableId;
+        final Ballot lowBound;
+        final Collection<Range<Token>> ranges;
+
+        Request(TableId tableId, Ballot lowBound, Collection<Range<Token>> ranges)
+        {
+            this.tableId = tableId;
+            this.ranges = ranges;
+            this.lowBound = lowBound;
+        }
+    }
+
+    public static final IVersionedSerializer<Request> serializer = new IVersionedSerializer<Request>()
+    {
+        public void serialize(Request request, DataOutputPlus out, int version) throws IOException
+        {
+            request.tableId.serialize(out);
+            request.lowBound.serialize(out);
+            out.writeInt(request.ranges.size());
+            for (Range<Token> rt : request.ranges)
+                AbstractBounds.tokenSerializer.serialize(rt, out, version);
+        }
+
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            TableId tableId = TableId.deserialize(in);
+            Ballot lowBound = Ballot.deserialize(in);
+            int numRanges = in.readInt();
+            List<Range<Token>> ranges = new ArrayList<>();
+            for (int i = 0; i < numRanges; i++)
+            {
+                Range<Token> range = (Range<Token>) AbstractBounds.tokenSerializer.deserialize(in, getPartitioner(), version);
+                ranges.add(range);
+            }
+            return new Request(tableId, lowBound, ranges);
+        }
+
+        public long serializedSize(Request request, int version)
+        {
+            long size = request.tableId.serializedSize();
+            size += Ballot.sizeInBytes();
+            size += TypeSizes.sizeof(request.ranges.size());
+            for (Range<Token> range : request.ranges)
+                size += AbstractBounds.tokenSerializer.serializedSize(range, version);
+            return size;
+        }
+    };
+
+    public static final IVerbHandler<Request> verbHandler = (in) -> {
+        ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(in.payload.tableId);
+        cfs.onPaxosRepairComplete(in.payload.ranges, in.payload.lowBound);
+        MessagingService.instance().respond(noPayload, in);
+    };
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupException.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupException.java
new file mode 100644
index 0000000..9eb8fbe
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+class PaxosCleanupException extends RuntimeException
+{
+    PaxosCleanupException(String message)
+    {
+        super(message);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupHistory.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupHistory.java
new file mode 100644
index 0000000..70b4099
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupHistory.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.io.IOException;
+
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+
+public class PaxosCleanupHistory
+{
+    final TableId tableId;
+    final Ballot highBound;
+    final PaxosRepairHistory history;
+
+    public PaxosCleanupHistory(TableId tableId, Ballot highBound, PaxosRepairHistory history)
+    {
+        this.tableId = tableId;
+        this.highBound = highBound;
+        this.history = history;
+    }
+
+    public static final IVersionedSerializer<PaxosCleanupHistory> serializer = new IVersionedSerializer<PaxosCleanupHistory>()
+    {
+        public void serialize(PaxosCleanupHistory message, DataOutputPlus out, int version) throws IOException
+        {
+            message.tableId.serialize(out);
+            message.highBound.serialize(out);
+            PaxosRepairHistory.serializer.serialize(message.history, out, version);
+        }
+
+        public PaxosCleanupHistory deserialize(DataInputPlus in, int version) throws IOException
+        {
+            TableId tableId = TableId.deserialize(in);
+            Ballot lowBound = Ballot.deserialize(in);
+            PaxosRepairHistory history = PaxosRepairHistory.serializer.deserialize(in, version);
+            return new PaxosCleanupHistory(tableId, lowBound, history);
+        }
+
+        public long serializedSize(PaxosCleanupHistory message, int version)
+        {
+            long size = message.tableId.serializedSize();
+            size += Ballot.sizeInBytes();
+            size += PaxosRepairHistory.serializer.serializedSize(message.history, version);
+            return size;
+        }
+    };
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupLocalCoordinator.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupLocalCoordinator.java
new file mode 100644
index 0000000..7b88276
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupLocalCoordinator.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.AbstractPaxosRepair;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.service.paxos.uncommitted.UncommittedPaxosKey;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+
+import static org.apache.cassandra.service.paxos.cleanup.PaxosCleanupSession.TIMEOUT_NANOS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public class PaxosCleanupLocalCoordinator extends AsyncFuture<PaxosCleanupResponse>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosCleanupLocalCoordinator.class);
+    private static final UUID INTERNAL_SESSION = new UUID(0, 0);
+
+    private final UUID session;
+    private final TableId tableId;
+    private final TableMetadata table;
+    private final Collection<Range<Token>> ranges;
+    private final CloseableIterator<UncommittedPaxosKey> uncommittedIter;
+    private int count = 0;
+    private final long deadline;
+
+    private final Map<DecoratedKey, AbstractPaxosRepair> inflight = new ConcurrentHashMap<>();
+    private final PaxosTableRepairs tableRepairs;
+
+    private PaxosCleanupLocalCoordinator(UUID session, TableId tableId, Collection<Range<Token>> ranges, CloseableIterator<UncommittedPaxosKey> uncommittedIter)
+    {
+        this.session = session;
+        this.tableId = tableId;
+        this.table = Schema.instance.getTableMetadata(tableId);
+        this.ranges = ranges;
+        this.uncommittedIter = uncommittedIter;
+        this.tableRepairs = PaxosTableRepairs.getForTable(tableId);
+        this.deadline = TIMEOUT_NANOS + nanoTime();
+    }
+
+    public synchronized void start()
+    {
+        if (table == null)
+        {
+            fail("Unknown tableId: " + tableId);
+            return;
+        }
+
+        if (!PaxosRepair.validatePeerCompatibility(table, ranges))
+        {
+            fail("Unsupported peer versions for " + tableId + ' ' + ranges.toString());
+            return;
+        }
+
+        logger.info("Completing uncommitted paxos instances for {} on ranges {} for session {}", table, ranges, session);
+
+        scheduleKeyRepairsOrFinish();
+    }
+
+    @SuppressWarnings("resource")
+    public static PaxosCleanupLocalCoordinator create(PaxosCleanupRequest request)
+    {
+        CloseableIterator<UncommittedPaxosKey> iterator = PaxosState.uncommittedTracker().uncommittedKeyIterator(request.tableId, request.ranges);
+        return new PaxosCleanupLocalCoordinator(request.session, request.tableId, request.ranges, iterator);
+    }
+
+    @SuppressWarnings("resource")
+    public static PaxosCleanupLocalCoordinator createForAutoRepair(TableId tableId, Collection<Range<Token>> ranges)
+    {
+        CloseableIterator<UncommittedPaxosKey> iterator = PaxosState.uncommittedTracker().uncommittedKeyIterator(tableId, ranges);
+        return new PaxosCleanupLocalCoordinator(INTERNAL_SESSION, tableId, ranges, iterator);
+    }
+
+    /**
+     * Schedule as many key repairs as we can, up to the paralellism limit. If no repairs are scheduled and
+     * none are in flight when the iterator is exhausted, the session will be finished
+     */
+    private void scheduleKeyRepairsOrFinish()
+    {
+        int parallelism = DatabaseDescriptor.getPaxosRepairParallelism();
+        Preconditions.checkArgument(parallelism > 0);
+        if (inflight.size() < parallelism)
+        {
+            if (nanoTime() - deadline >= 0)
+            {
+                fail("timeout");
+                return;
+            }
+
+            while (inflight.size() < parallelism && uncommittedIter.hasNext())
+                repairKey(uncommittedIter.next());
+
+        }
+
+        if (inflight.isEmpty())
+            finish();
+    }
+
+    private boolean repairKey(UncommittedPaxosKey uncommitted)
+    {
+        logger.trace("repairing {}", uncommitted);
+        Preconditions.checkState(!inflight.containsKey(uncommitted.getKey()));
+        ConsistencyLevel consistency = uncommitted.getConsistencyLevel();
+
+        // we don't know the consistency of this operation, presumably because it originated
+        // before we started tracking paxos cl, so we don't attempt to repair it
+        if (consistency == null)
+            return false;
+
+        inflight.put(uncommitted.getKey(), tableRepairs.startOrGetOrQueue(uncommitted.getKey(), uncommitted.ballot(), uncommitted.getConsistencyLevel(), table, result -> {
+            if (result.wasSuccessful())
+                onKeyFinish(uncommitted.getKey());
+            else
+                onKeyFailure(result.toString());
+        }));
+        return true;
+    }
+
+    private synchronized void onKeyFinish(DecoratedKey key)
+    {
+        if (!inflight.containsKey(key))
+            return;
+        logger.trace("finished repairing {}", key);
+        inflight.remove(key);
+        count++;
+
+        scheduleKeyRepairsOrFinish();
+    }
+
+    private void complete(PaxosCleanupResponse response)
+    {
+        uncommittedIter.close();
+        trySuccess(response);
+    }
+
+    private void onKeyFailure(String reason)
+    {
+        // not synchronized to avoid deadlock with callback we register on start
+        inflight.values().forEach(AbstractPaxosRepair::cancel);
+        fail(reason);
+    }
+
+    private synchronized void fail(String reason)
+    {
+        logger.info("Failing paxos cleanup session {} for {} on ranges {}. Reason: {}", session, table, ranges, reason);
+        complete(PaxosCleanupResponse.failed(session, reason));
+    }
+
+    private void finish()
+    {
+        logger.info("Completed {} uncommitted paxos instances for {} on ranges {} for session {}", count, table, ranges, session);
+        complete(PaxosCleanupResponse.success(session));
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupRequest.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupRequest.java
new file mode 100644
index 0000000..4db457f
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupRequest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+import javax.annotation.Nullable;
+
+import com.google.common.util.concurrent.FutureCallback;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.utils.UUIDSerializer;
+
+import static org.apache.cassandra.net.MessagingService.instance;
+import static org.apache.cassandra.net.NoPayload.noPayload;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_RSP2;
+
+// TODO: send the high bound as a minimum commit point, so later repairs can terminate early if a later commit has been witnessed
+public class PaxosCleanupRequest
+{
+    public final UUID session;
+    public final TableId tableId;
+    public final Collection<Range<Token>> ranges;
+
+    static Collection<Range<Token>> rangesOrMin(Collection<Range<Token>> ranges)
+    {
+        if (ranges != null && !ranges.isEmpty())
+            return ranges;
+
+        Token min = DatabaseDescriptor.getPartitioner().getMinimumToken();
+        return Collections.singleton(new Range<>(min, min));
+    }
+
+    public PaxosCleanupRequest(UUID session, TableId tableId, Collection<Range<Token>> ranges)
+    {
+        this.session = session;
+        this.tableId = tableId;
+        this.ranges = rangesOrMin(ranges);
+    }
+
+    public static final IVerbHandler<PaxosCleanupRequest> verbHandler = in -> {
+        PaxosCleanupRequest request = in.payload;
+
+        if (!PaxosCleanup.isInRangeAndShouldProcess(request.ranges, request.tableId))
+        {
+            String msg = String.format("Rejecting cleanup request %s from %s. Some ranges are not replicated (%s)",
+                                       request.session, in.from(), request.ranges);
+            Message<PaxosCleanupResponse> response = Message.out(PAXOS2_CLEANUP_RSP2, PaxosCleanupResponse.failed(request.session, msg));
+            instance().send(response, in.respondTo());
+            return;
+        }
+
+        PaxosCleanupLocalCoordinator coordinator = PaxosCleanupLocalCoordinator.create(request);
+
+        coordinator.addCallback(new FutureCallback<PaxosCleanupResponse>()
+        {
+            public void onSuccess(@Nullable PaxosCleanupResponse finished)
+            {
+                Message<PaxosCleanupResponse> response = Message.out(PAXOS2_CLEANUP_RSP2, coordinator.getNow());
+                instance().send(response, in.respondTo());
+            }
+
+            public void onFailure(Throwable throwable)
+            {
+                Message<PaxosCleanupResponse> response = Message.out(PAXOS2_CLEANUP_RSP2, PaxosCleanupResponse.failed(request.session, throwable.getMessage()));
+                instance().send(response, in.respondTo());
+            }
+        });
+
+        // ack the request so the coordinator knows we've started
+        instance().respond(noPayload, in);
+
+        coordinator.start();
+    };
+
+    public static final IVersionedSerializer<PaxosCleanupRequest> serializer = new IVersionedSerializer<PaxosCleanupRequest>()
+    {
+        public void serialize(PaxosCleanupRequest completer, DataOutputPlus out, int version) throws IOException
+        {
+            UUIDSerializer.serializer.serialize(completer.session, out, version);
+            completer.tableId.serialize(out);
+            out.writeInt(completer.ranges.size());
+            for (Range<Token> range: completer.ranges)
+                AbstractBounds.tokenSerializer.serialize(range, out, version);
+        }
+
+        public PaxosCleanupRequest deserialize(DataInputPlus in, int version) throws IOException
+        {
+            UUID session = UUIDSerializer.serializer.deserialize(in, version);
+            TableId tableId = TableId.deserialize(in);
+
+            int numRanges = in.readInt();
+            List<Range<Token>> ranges = new ArrayList<>(numRanges);
+            for (int i=0; i<numRanges; i++)
+            {
+                ranges.add((Range<Token>) AbstractBounds.tokenSerializer.deserialize(in, DatabaseDescriptor.getPartitioner(), version));
+            }
+            return new PaxosCleanupRequest(session, tableId, ranges);
+        }
+
+        public long serializedSize(PaxosCleanupRequest completer, int version)
+        {
+            long size = UUIDSerializer.serializer.serializedSize(completer.session, version);
+            size += completer.tableId.serializedSize();
+            size += TypeSizes.sizeof(completer.ranges.size());
+            for (Range<Token> range: completer.ranges)
+                size += AbstractBounds.tokenSerializer.serializedSize(range, version);
+            return size;
+        }
+    };
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupResponse.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupResponse.java
new file mode 100644
index 0000000..1c90162
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupResponse.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.utils.UUIDSerializer;
+
+public class PaxosCleanupResponse
+{
+    public final UUID session;
+    public final boolean wasSuccessful;
+    public final String message;
+
+    public PaxosCleanupResponse(UUID session, boolean wasSuccessful, @Nullable String message)
+    {
+        this.session = session;
+        this.wasSuccessful = wasSuccessful;
+        this.message = message;
+    }
+
+    public static PaxosCleanupResponse success(UUID session)
+    {
+        return new PaxosCleanupResponse(session, true, null);
+    }
+
+    public static PaxosCleanupResponse failed(UUID session, String message)
+    {
+        return new PaxosCleanupResponse(session, false, message);
+    }
+
+    public static final IVerbHandler<PaxosCleanupResponse> verbHandler = (message) -> PaxosCleanupSession.finishSession(message.from(), message.payload);
+
+    public static final IVersionedSerializer<PaxosCleanupResponse> serializer = new IVersionedSerializer<PaxosCleanupResponse>()
+    {
+        public void serialize(PaxosCleanupResponse finished, DataOutputPlus out, int version) throws IOException
+        {
+            UUIDSerializer.serializer.serialize(finished.session, out, version);
+            out.writeBoolean(finished.wasSuccessful);
+            out.writeBoolean(finished.message != null);
+            if (finished.message != null)
+                out.writeUTF(finished.message);
+        }
+
+        public PaxosCleanupResponse deserialize(DataInputPlus in, int version) throws IOException
+        {
+            UUID session = UUIDSerializer.serializer.deserialize(in, version);
+            boolean success = in.readBoolean();
+            String message = in.readBoolean() ? in.readUTF() : null;
+            return new PaxosCleanupResponse(session, success, message);
+        }
+
+        public long serializedSize(PaxosCleanupResponse finished, int version)
+        {
+            long size = UUIDSerializer.serializer.serializedSize(finished.session, version);
+            size += TypeSizes.sizeof(finished.wasSuccessful);
+            size += TypeSizes.sizeof(finished.message != null);
+            if (finished.message != null)
+                size += TypeSizes.sizeof(finished.message);
+            return size;
+        }
+    };
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupSession.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupSession.java
new file mode 100644
index 0000000..3d765ea
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosCleanupSession.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.lang.ref.WeakReference;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
+import org.apache.cassandra.gms.IFailureDetectionEventListener;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.RequestCallbackWithFailure;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_REQ;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public class PaxosCleanupSession extends AsyncFuture<Void> implements Runnable,
+                                                                      IEndpointStateChangeSubscriber,
+                                                                      IFailureDetectionEventListener,
+                                                                      RequestCallbackWithFailure<Void>
+{
+    private static final Map<UUID, PaxosCleanupSession> sessions = new ConcurrentHashMap<>();
+
+    static final long TIMEOUT_NANOS;
+    static
+    {
+        long timeoutSeconds = Integer.getInteger("cassandra.paxos_cleanup_session_timeout_seconds", (int) TimeUnit.HOURS.toSeconds(2));
+        TIMEOUT_NANOS = TimeUnit.SECONDS.toNanos(timeoutSeconds);
+    }
+
+    private static class TimeoutTask implements Runnable
+    {
+        private final WeakReference<PaxosCleanupSession> ref;
+
+        TimeoutTask(PaxosCleanupSession session)
+        {
+            this.ref = new WeakReference<>(session);
+        }
+
+        @Override
+        public void run()
+        {
+            PaxosCleanupSession session = ref.get();
+            if (session == null || session.isDone())
+                return;
+
+            long remaining = session.lastMessageSentNanos + TIMEOUT_NANOS - nanoTime();
+            if (remaining > 0)
+                schedule(remaining);
+            else
+                session.fail(String.format("Paxos cleanup session %s timed out", session.session));
+        }
+
+        ScheduledFuture<?> schedule(long delayNanos)
+        {
+            return ScheduledExecutors.scheduledTasks.scheduleTimeoutWithDelay(this, delayNanos, TimeUnit.NANOSECONDS);
+        }
+
+        private static ScheduledFuture<?> schedule(PaxosCleanupSession session)
+        {
+            return new TimeoutTask(session).schedule(TIMEOUT_NANOS);
+        }
+    }
+
+    private final UUID session = UUID.randomUUID();
+    private final TableId tableId;
+    private final Collection<Range<Token>> ranges;
+    private final Queue<InetAddressAndPort> pendingCleanups = new ConcurrentLinkedQueue<>();
+    private InetAddressAndPort inProgress = null;
+    private volatile long lastMessageSentNanos = nanoTime();
+    private ScheduledFuture<?> timeout;
+
+    PaxosCleanupSession(Collection<InetAddressAndPort> endpoints, TableId tableId, Collection<Range<Token>> ranges)
+    {
+        this.tableId = tableId;
+        this.ranges = ranges;
+
+        pendingCleanups.addAll(endpoints);
+    }
+
+    private static void setSession(PaxosCleanupSession session)
+    {
+        Preconditions.checkState(!sessions.containsKey(session.session));
+        sessions.put(session.session, session);
+    }
+
+    private static void removeSession(PaxosCleanupSession session)
+    {
+        Preconditions.checkState(sessions.containsKey(session.session));
+        sessions.remove(session.session);
+    }
+
+    @Override
+    public void run()
+    {
+        setSession(this);
+        startNextOrFinish();
+        if (!isDone())
+            timeout = TimeoutTask.schedule(this);
+    }
+
+    private void startCleanup(InetAddressAndPort endpoint)
+    {
+        lastMessageSentNanos = nanoTime();
+        PaxosCleanupRequest completer = new PaxosCleanupRequest(session, tableId, ranges);
+        Message<PaxosCleanupRequest> msg = Message.out(PAXOS2_CLEANUP_REQ, completer);
+        MessagingService.instance().sendWithCallback(msg, endpoint, this);
+    }
+
+    private synchronized void startNextOrFinish()
+    {
+        InetAddressAndPort endpoint = pendingCleanups.poll();
+
+        if (endpoint == null)
+            Preconditions.checkState(inProgress == null, "Unable to complete paxos cleanup session %s, still waiting on %s", session, inProgress);
+        else
+            Preconditions.checkState(inProgress == null, "Unable to start paxos cleanup on %s for %s, still waiting on response from %s", endpoint, session, inProgress);
+
+        inProgress = endpoint;
+
+        if (endpoint != null)
+        {
+            startCleanup(endpoint);
+        }
+        else
+        {
+            removeSession(this);
+            trySuccess(null);
+            if (timeout != null)
+                timeout.cancel(true);
+        }
+    }
+
+    private synchronized void fail(String message)
+    {
+        if (isDone())
+            return;
+        removeSession(this);
+        tryFailure(new PaxosCleanupException(message));
+        if (timeout != null)
+            timeout.cancel(true);
+    }
+
+    private synchronized void finish(InetAddressAndPort from, PaxosCleanupResponse finished)
+    {
+        Preconditions.checkArgument(from.equals(inProgress), "Received unexpected cleanup complete response from %s for session %s. Expected %s", from, session, inProgress);
+        inProgress = null;
+
+        if (finished.wasSuccessful)
+        {
+            startNextOrFinish();
+        }
+        else
+        {
+            fail(String.format("Paxos cleanup session %s failed on %s with message: %s", session, from, finished.message));
+        }
+    }
+
+    public static void finishSession(InetAddressAndPort from, PaxosCleanupResponse response)
+    {
+        PaxosCleanupSession session = sessions.get(response.session);
+        if (session != null)
+            session.finish(from, response);
+    }
+
+    private synchronized void maybeKillSession(InetAddressAndPort unavailable, String reason)
+    {
+        // don't fail if we've already completed the cleanup for the unavailable endpoint,
+        // if it's something that affects availability, the ongoing sessions will fail themselves
+        if (!pendingCleanups.contains(unavailable))
+            return;
+
+        fail(String.format("Paxos cleanup session %s failed after %s %s", session, unavailable, reason));
+    }
+
+    @Override
+    public void onJoin(InetAddressAndPort endpoint, EndpointState epState)
+    {
+
+    }
+
+    @Override
+    public void beforeChange(InetAddressAndPort endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue)
+    {
+
+    }
+
+    @Override
+    public void onChange(InetAddressAndPort endpoint, ApplicationState state, VersionedValue value)
+    {
+
+    }
+
+    @Override
+    public void onAlive(InetAddressAndPort endpoint, EndpointState state)
+    {
+
+    }
+
+    @Override
+    public void onDead(InetAddressAndPort endpoint, EndpointState state)
+    {
+        maybeKillSession(endpoint, "marked dead");
+    }
+
+    @Override
+    public void onRemove(InetAddressAndPort endpoint)
+    {
+        maybeKillSession(endpoint, "removed from ring");
+    }
+
+    @Override
+    public void onRestart(InetAddressAndPort endpoint, EndpointState state)
+    {
+        maybeKillSession(endpoint, "restarted");
+    }
+
+    @Override
+    public void convict(InetAddressAndPort ep, double phi)
+    {
+        maybeKillSession(ep, "convicted by failure detector");
+    }
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        fail(from.toString() + ' ' + reason + " for cleanup request for paxos cleanup session  " + session);
+    }
+
+    @Override
+    public void onResponse(Message<Void> msg)
+    {
+        // noop, we're only interested in failures
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosFinishPrepareCleanup.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosFinishPrepareCleanup.java
new file mode 100644
index 0000000..92d8d35
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosFinishPrepareCleanup.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.io.FSWriteError;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.*;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.IntrusiveStack;
+
+import static org.apache.cassandra.exceptions.RequestFailureReason.UNKNOWN;
+import static org.apache.cassandra.net.NoPayload.noPayload;
+
+public class PaxosFinishPrepareCleanup extends AsyncFuture<Void> implements RequestCallbackWithFailure<Void>
+{
+    private final Set<InetAddressAndPort> waitingResponse;
+
+    PaxosFinishPrepareCleanup(Collection<InetAddressAndPort> endpoints)
+    {
+        this.waitingResponse = new HashSet<>(endpoints);
+    }
+
+    public static PaxosFinishPrepareCleanup finish(Collection<InetAddressAndPort> endpoints, PaxosCleanupHistory result)
+    {
+        PaxosFinishPrepareCleanup callback = new PaxosFinishPrepareCleanup(endpoints);
+        synchronized (callback)
+        {
+            Message<PaxosCleanupHistory> message = Message.out(Verb.PAXOS2_CLEANUP_FINISH_PREPARE_REQ, result);
+            for (InetAddressAndPort endpoint : endpoints)
+                MessagingService.instance().sendWithCallback(message, endpoint, callback);
+        }
+        return callback;
+    }
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        tryFailure(new PaxosCleanupException(reason + " failure response from " + from));
+    }
+
+    public synchronized void onResponse(Message<Void> msg)
+    {
+        if (isDone())
+            return;
+
+        if (!waitingResponse.remove(msg.from()))
+            throw new IllegalArgumentException("Received unexpected response from " + msg.from());
+
+        if (waitingResponse.isEmpty())
+            trySuccess(null);
+    }
+
+    static class PendingCleanup extends IntrusiveStack<PendingCleanup>
+    {
+        private static final AtomicReference<PendingCleanup> pendingCleanup = new AtomicReference();
+        private static final Runnable CLEANUP = () -> {
+            PendingCleanup list = pendingCleanup.getAndSet(null);
+            if (list == null)
+                return;
+
+            Ballot highBound = Ballot.none();
+            for (PendingCleanup pending : IntrusiveStack.iterable(list))
+            {
+                PaxosCleanupHistory cleanupHistory = pending.message.payload;
+                if (cleanupHistory.highBound.compareTo(highBound) > 0)
+                    highBound = cleanupHistory.highBound;
+            }
+            try
+            {
+                try
+                {
+                    PaxosState.ballotTracker().updateLowBound(highBound);
+                }
+                catch (IOException e)
+                {
+                    throw new FSWriteError(e);
+                }
+            }
+            catch (Throwable t)
+            {
+                for (PendingCleanup pending : IntrusiveStack.iterable(list))
+                    MessagingService.instance().respondWithFailure(UNKNOWN, pending.message);
+                throw t;
+            }
+
+            Set<PendingCleanup> failed = null;
+            Throwable fail = null;
+            for (PendingCleanup pending : IntrusiveStack.iterable(list))
+            {
+                try
+                {
+                    Schema.instance.getColumnFamilyStoreInstance(pending.message.payload.tableId)
+                                   .syncPaxosRepairHistory(pending.message.payload.history, false);
+                }
+                catch (Throwable t)
+                {
+                    fail = Throwables.merge(fail, t);
+                    if (failed == null)
+                        failed = Collections.newSetFromMap(new IdentityHashMap<>());
+                    failed.add(pending);
+                    MessagingService.instance().respondWithFailure(UNKNOWN, pending.message);
+                }
+            }
+
+            try
+            {
+                SystemKeyspace.flushPaxosRepairHistory();
+                for (PendingCleanup pending : IntrusiveStack.iterable(list))
+                {
+                    if (failed == null || !failed.contains(pending))
+                        MessagingService.instance().respond(noPayload, pending.message);
+                }
+            }
+            catch (Throwable t)
+            {
+                fail = Throwables.merge(fail, t);
+                for (PendingCleanup pending : IntrusiveStack.iterable(list))
+                {
+                    if (failed == null || !failed.contains(pending))
+                        MessagingService.instance().respondWithFailure(UNKNOWN, pending.message);
+                }
+            }
+            Throwables.maybeFail(fail);
+        };
+
+        final Message<PaxosCleanupHistory> message;
+        PendingCleanup(Message<PaxosCleanupHistory> message)
+        {
+            this.message = message;
+        }
+
+        public static void add(Message<PaxosCleanupHistory> message)
+        {
+            PendingCleanup next = new PendingCleanup(message);
+            PendingCleanup prev = IntrusiveStack.push(AtomicReference::get, AtomicReference::compareAndSet, pendingCleanup, next);
+            if (prev == null)
+                Stage.MISC.execute(CLEANUP);
+        }
+    }
+
+    public static final IVerbHandler<PaxosCleanupHistory> verbHandler = PendingCleanup::add;
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosStartPrepareCleanup.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosStartPrepareCleanup.java
new file mode 100644
index 0000000..9f30692
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosStartPrepareCleanup.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.*;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.PendingRangeCalculatorService;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_START_PREPARE_REQ;
+import static org.apache.cassandra.service.paxos.Paxos.newBallot;
+import static org.apache.cassandra.service.paxos.PaxosState.ballotTracker;
+
+/**
+ * Determines the highest ballot we should attempt to repair
+ */
+public class PaxosStartPrepareCleanup extends AsyncFuture<PaxosCleanupHistory> implements RequestCallbackWithFailure<PaxosCleanupHistory>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosStartPrepareCleanup.class);
+
+    public static final RequestSerializer serializer = new RequestSerializer();
+
+    private final TableId table;
+
+    private final Set<InetAddressAndPort> waitingResponse;
+    private Ballot maxBallot = null;
+    private PaxosRepairHistory history = null;
+
+    PaxosStartPrepareCleanup(TableId table, Collection<InetAddressAndPort> endpoints)
+    {
+        this.table = table;
+        this.waitingResponse = new HashSet<>(endpoints);
+    }
+
+    /**
+     * We run paxos repair as part of topology changes, so we include the local endpoint state in the paxos repair
+     * prepare message to prevent racing with gossip dissemination and guarantee that every repair participant is aware
+     * of the pending ring change during repair.
+     */
+    public static PaxosStartPrepareCleanup prepare(TableId tableId, Collection<InetAddressAndPort> endpoints, EndpointState localEpState, Collection<Range<Token>> ranges)
+    {
+        PaxosStartPrepareCleanup callback = new PaxosStartPrepareCleanup(tableId, endpoints);
+        synchronized (callback)
+        {
+            Message<Request> message = Message.out(PAXOS2_CLEANUP_START_PREPARE_REQ, new Request(tableId, localEpState, ranges));
+            for (InetAddressAndPort endpoint : endpoints)
+                MessagingService.instance().sendWithCallback(message, endpoint, callback);
+        }
+        return callback;
+    }
+
+    @Override
+    public void onFailure(InetAddressAndPort from, RequestFailureReason reason)
+    {
+        tryFailure(new PaxosCleanupException("Received " + reason + " failure response from " + from));
+    }
+
+    public synchronized void onResponse(Message<PaxosCleanupHistory> msg)
+    {
+        if (isDone())
+            return;
+
+        if (!waitingResponse.remove(msg.from()))
+            throw new IllegalArgumentException("Received unexpected response from " + msg.from());
+
+        if (Commit.isAfter(msg.payload.highBound, maxBallot))
+            maxBallot = msg.payload.highBound;
+
+        history = PaxosRepairHistory.merge(history, msg.payload.history);
+
+        if (waitingResponse.isEmpty())
+            trySuccess(new PaxosCleanupHistory(table, maxBallot, history));
+    }
+
+    private static void maybeUpdateTopology(InetAddressAndPort endpoint, EndpointState remote)
+    {
+        EndpointState local = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
+        if (local == null || local.isSupersededBy(remote))
+        {
+            logger.trace("updating endpoint info for {} with {}", endpoint, remote);
+            Map<InetAddressAndPort, EndpointState> states = Collections.singletonMap(endpoint, remote);
+
+            Gossiper.runInGossipStageBlocking(() -> {
+                Gossiper.instance.notifyFailureDetector(states);
+                Gossiper.instance.applyStateLocally(states);
+            });
+            // TODO: We should also wait for schema pulls/pushes, however this would be quite an involved change to MigrationManager
+            //       (which currently drops some migration tasks on the floor).
+            //       Note it would be fine for us to fail to complete the migration task and simply treat this response as a failure/timeout.
+        }
+        // even if we have th latest gossip info, wait until pending range calculations are complete
+        PendingRangeCalculatorService.instance.blockUntilFinished();
+    }
+
+    public static class Request
+    {
+        final TableId tableId;
+        final EndpointState epState;
+        final Collection<Range<Token>> ranges;
+
+        public Request(TableId tableId, EndpointState epState, Collection<Range<Token>> ranges)
+        {
+            this.tableId = tableId;
+            this.epState = epState;
+            this.ranges = ranges;
+        }
+    }
+
+    public static class RequestSerializer implements IVersionedSerializer<Request>
+    {
+        public void serialize(Request request, DataOutputPlus out, int version) throws IOException
+        {
+            request.tableId.serialize(out);
+            EndpointState.serializer.serialize(request.epState, out, version);
+            out.writeInt(request.ranges.size());
+            for (Range<Token> rt : request.ranges)
+                AbstractBounds.tokenSerializer.serialize(rt, out, version);
+        }
+
+        public Request deserialize(DataInputPlus in, int version) throws IOException
+        {
+            TableId tableId = TableId.deserialize(in);
+            EndpointState epState = EndpointState.serializer.deserialize(in, version);
+
+            int numRanges = in.readInt();
+            List<Range<Token>> ranges = new ArrayList<>();
+            for (int i = 0; i < numRanges; i++)
+            {
+                Range<Token> range = (Range<Token>) AbstractBounds.tokenSerializer.deserialize(in, DatabaseDescriptor.getPartitioner(), version);
+                ranges.add(range);
+            }
+            return new Request(tableId, epState, ranges);
+        }
+
+        public long serializedSize(Request request, int version)
+        {
+            long size = request.tableId.serializedSize();
+            size += EndpointState.serializer.serializedSize(request.epState, version);
+            size += TypeSizes.sizeof(request.ranges.size());
+            for (Range<Token> range : request.ranges)
+                size += AbstractBounds.tokenSerializer.serializedSize(range, version);
+            return size;
+        }
+    }
+
+    public static final IVerbHandler<Request> verbHandler = in -> {
+        ColumnFamilyStore table = Schema.instance.getColumnFamilyStoreInstance(in.payload.tableId);
+        maybeUpdateTopology(in.from(), in.payload.epState);
+        Ballot highBound = newBallot(ballotTracker().getHighBound(), ConsistencyLevel.SERIAL);
+        PaxosRepairHistory history = table.getPaxosRepairHistoryForRanges(in.payload.ranges);
+        Message<PaxosCleanupHistory> out = in.responseWith(new PaxosCleanupHistory(table.metadata.id, highBound, history));
+        MessagingService.instance().send(out, in.respondTo());
+    };
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosTableRepairs.java b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosTableRepairs.java
new file mode 100644
index 0000000..6da4e0b
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/cleanup/PaxosTableRepairs.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import java.util.ArrayDeque;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.AbstractPaxosRepair;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+import static org.apache.cassandra.service.paxos.Commit.isAfter;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+/**
+ * Coordinates repairs on a given key to prevent multiple repairs being scheduled for a single key
+ */
+public class PaxosTableRepairs implements AbstractPaxosRepair.Listener
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosTableRepairs.class);
+
+    static class KeyRepair
+    {
+        private final DecoratedKey key;
+
+        private final ArrayDeque<AbstractPaxosRepair> queued = new ArrayDeque<>();
+
+        private KeyRepair(DecoratedKey key)
+        {
+            this.key = key;
+        }
+
+        void onFirst(Predicate<AbstractPaxosRepair> predicate, Consumer<AbstractPaxosRepair> consumer, boolean removeBeforeAction)
+        {
+            while (!queued.isEmpty())
+            {
+                AbstractPaxosRepair repair = queued.peek();
+                if (repair.isComplete())
+                {
+                    queued.remove();
+                    continue;
+                }
+
+                if (predicate.test(repair))
+                {
+                    if (removeBeforeAction)
+                        queued.remove();
+                    consumer.accept(repair);
+                }
+                return;
+            }
+        }
+
+        void clear()
+        {
+            while (!queued.isEmpty())
+                queued.remove().cancelUnexceptionally();
+        }
+
+        AbstractPaxosRepair startOrGetOrQueue(PaxosTableRepairs tableRepairs, DecoratedKey key, Ballot incompleteBallot, ConsistencyLevel consistency, TableMetadata table, Consumer<PaxosRepair.Result> onComplete)
+        {
+            Preconditions.checkArgument(this.key.equals(key));
+
+            if (!queued.isEmpty() && !isAfter(incompleteBallot, queued.peekLast().incompleteBallot()))
+            {
+                queued.peekLast().addListener(onComplete);
+                return queued.peekLast();
+            }
+
+            AbstractPaxosRepair repair = tableRepairs.createRepair(key, incompleteBallot, consistency, table);
+
+            repair.addListener(tableRepairs);
+            repair.addListener(onComplete);
+
+            queued.add(repair);
+            maybeScheduleNext();
+            return repair;
+        }
+
+        @VisibleForTesting
+        AbstractPaxosRepair activeRepair()
+        {
+            return queued.peek();
+        }
+
+        @VisibleForTesting
+        boolean queueContains(AbstractPaxosRepair repair)
+        {
+            return queued.contains(repair);
+        }
+
+        void maybeScheduleNext()
+        {
+            onFirst(repair -> !repair.isStarted(), AbstractPaxosRepair::start, false);
+        }
+
+        void complete(AbstractPaxosRepair repair)
+        {
+            queued.remove(repair);
+            maybeScheduleNext();
+        }
+
+        int pending()
+        {
+            return queued.size();
+        }
+
+        boolean isEmpty()
+        {
+            return queued.isEmpty();
+        }
+    }
+
+    private final Map<DecoratedKey, KeyRepair> keyRepairs = new ConcurrentHashMap<>();
+
+    @VisibleForTesting
+    KeyRepair getKeyRepairUnsafe(DecoratedKey key)
+    {
+        return keyRepairs.get(key);
+    }
+
+    synchronized AbstractPaxosRepair startOrGetOrQueue(DecoratedKey key, Ballot incompleteBallot, ConsistencyLevel consistency, TableMetadata table, Consumer<PaxosRepair.Result> onComplete)
+    {
+        KeyRepair keyRepair = keyRepairs.computeIfAbsent(key, KeyRepair::new);
+        return keyRepair.startOrGetOrQueue(this, key, incompleteBallot, consistency, table, onComplete);
+    }
+
+    public synchronized void onComplete(AbstractPaxosRepair repair, AbstractPaxosRepair.Result result)
+    {
+        KeyRepair keyRepair = keyRepairs.get(repair.partitionKey());
+        if (keyRepair == null)
+        {
+            NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES,
+                             "onComplete callback fired for nonexistant KeyRepair");
+            return;
+        }
+
+        keyRepair.complete(repair);
+        if (keyRepair.queueContains(repair))
+            NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES,
+                             "repair not removed after call to onComplete");
+
+        if (keyRepair.isEmpty())
+            keyRepairs.remove(repair.partitionKey());
+    }
+
+    synchronized void evictHungRepairs(long activeSinceNanos)
+    {
+        Predicate<AbstractPaxosRepair> timeoutPredicate = repair -> repair.startedNanos() - activeSinceNanos < 0;
+        for (KeyRepair repair : keyRepairs.values())
+        {
+            if (repair.isEmpty())
+                NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES,
+                                 "inactive KeyRepair found, this means post-repair cleanup/schedule isn't working properly");
+            repair.onFirst(timeoutPredicate, r -> {
+                logger.warn("cancelling timed out paxos repair: {}", r);
+                r.cancelUnexceptionally();
+            }, true);
+            repair.maybeScheduleNext();
+            if (repair.isEmpty())
+                keyRepairs.remove(repair.key);
+        }
+    }
+
+    synchronized void clear()
+    {
+        for (KeyRepair repair : keyRepairs.values())
+            repair.clear();
+        keyRepairs.clear();
+    }
+
+    @VisibleForTesting
+    synchronized boolean hasActiveRepairs(DecoratedKey key)
+    {
+        return keyRepairs.containsKey(key);
+    }
+
+    AbstractPaxosRepair createRepair(DecoratedKey key, Ballot incompleteBallot, ConsistencyLevel consistency, TableMetadata table)
+    {
+        return PaxosRepair.create(consistency, key, incompleteBallot, table);
+    }
+
+    private static final ConcurrentMap<TableId, PaxosTableRepairs> tableRepairsMap = new ConcurrentHashMap<>();
+
+    static PaxosTableRepairs getForTable(TableId tableId)
+    {
+        return tableRepairsMap.computeIfAbsent(tableId, k -> new PaxosTableRepairs());
+    }
+
+    public static void evictHungRepairs()
+    {
+        long deadline = nanoTime() - TimeUnit.MINUTES.toNanos(5);
+        for (PaxosTableRepairs repairs : tableRepairsMap.values())
+            repairs.evictHungRepairs(deadline);
+    }
+
+    public static void clearRepairs()
+    {
+        for (PaxosTableRepairs repairs : tableRepairsMap.values())
+            repairs.clear();
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTracker.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTracker.java
new file mode 100644
index 0000000..41314a2e
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTracker.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.zip.CRC32;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.service.ClientState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.RandomAccessReader;
+import org.apache.cassandra.io.util.SequentialWriter;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+
+import static org.apache.cassandra.io.util.SequentialWriterOption.FINISH_ON_CLOSE;
+import static org.apache.cassandra.net.Crc.crc32;
+
+/**
+ * Tracks the highest paxos ballot we've seen, and the lowest ballot we can accept.
+ *
+ * During paxos repair, the coordinator gets the highest ballot seen by each participant. At the end of repair, that
+ * high ballot is set as the new low bound. Combined with paxos repair during topology changes, this eliminates the
+ * possibility of new nodes accepting ballots that are before the most recently accepted ballot for a key.
+ */
+public class PaxosBallotTracker
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosBallotTracker.class);
+
+    private static final int FILE_VERSION = 0;
+    static final String FNAME = "ballot.meta";
+    private static final String TMP_FNAME = FNAME + ".tmp";
+
+    private final File directory;
+    private final AtomicReference<Ballot> highBound;
+    private volatile Ballot lowBound;
+
+    private PaxosBallotTracker(File directory, Ballot highBound, Ballot lowBound)
+    {
+        Preconditions.checkNotNull(lowBound);
+        Preconditions.checkNotNull(highBound);
+        this.directory = directory;
+        this.highBound = new AtomicReference<>(highBound);
+        this.lowBound = lowBound;
+    }
+
+    private static void serializeBallot(SequentialWriter writer, CRC32 crc, Ballot ballot) throws IOException
+    {
+        ByteBuffer bytes = ballot.toBytes();
+        writer.write(bytes);
+        crc.update(bytes);
+    }
+
+    private static Ballot deserializeBallot(RandomAccessReader reader, CRC32 crc, byte[] bytes) throws IOException
+    {
+        reader.readFully(bytes);
+        crc.update(bytes);
+        return Ballot.deserialize(ByteBuffer.wrap(bytes));
+    }
+
+    public static void truncate(File directory) throws IOException
+    {
+        logger.info("truncating paxos ballot metadata in {}", directory);
+        deleteIfExists(new File(directory, TMP_FNAME));
+        deleteIfExists(new File(directory, FNAME));
+    }
+
+    public static PaxosBallotTracker load(File directory) throws IOException
+    {
+        deleteIfExists(new File(directory, TMP_FNAME));
+
+        File file = new File(directory, FNAME);
+        if (!file.exists())
+            return new PaxosBallotTracker(directory, Ballot.none(), Ballot.none());
+
+        try (RandomAccessReader reader = RandomAccessReader.open(file))
+        {
+            int version = reader.readInt();
+            if (version != FILE_VERSION)
+                throw new IOException("Unsupported ballot file version: " + version);
+
+            byte[] bytes = new byte[16];
+            CRC32 crc = crc32();
+            Ballot highBallot = deserializeBallot(reader, crc, bytes);
+            Ballot lowBallot = deserializeBallot(reader, crc, bytes);
+            int checksum = Integer.reverseBytes(reader.readInt());
+            if (!reader.isEOF() || (int) crc.getValue() != checksum)
+                throw new IOException("Ballot file corrupted");
+
+            return new PaxosBallotTracker(directory, highBallot, lowBallot);
+        }
+    }
+
+    private static void deleteIfExists(File file)
+    {
+        if (file.exists())
+            file.delete();
+    }
+
+    public synchronized void flush() throws IOException
+    {
+        File file = new File(directory, TMP_FNAME);
+        deleteIfExists(file);
+
+        try(SequentialWriter writer = new SequentialWriter(file, FINISH_ON_CLOSE))
+        {
+            CRC32 crc = crc32();
+            writer.writeInt(FILE_VERSION);
+            serializeBallot(writer, crc, getHighBound());
+            serializeBallot(writer, crc, getLowBound());
+            writer.writeInt(Integer.reverseBytes((int) crc.getValue()));
+        }
+        file.move(new File(directory, FNAME));
+    }
+
+    public synchronized void truncate()
+    {
+        deleteIfExists(new File(directory, TMP_FNAME));
+        deleteIfExists(new File(directory, FNAME));
+        highBound.set(Ballot.none());
+        lowBound = Ballot.none();
+    }
+
+    private void updateHighBound(Ballot current, Ballot next)
+    {
+        while (Commit.isAfter(next, current) && !highBound.compareAndSet(current, next))
+            current = highBound.get();
+    }
+
+    void updateHighBound(Ballot next)
+    {
+        updateHighBound(highBound.get(), next);
+    }
+
+    public void onUpdate(Row row)
+    {
+        Ballot current = highBound.get();
+        Ballot next = PaxosRows.getHighBallot(row, current);
+        if (current == next)
+            return;
+
+        updateHighBound(current, next);
+    }
+
+    @VisibleForTesting
+    void updateHighBoundUnsafe(Ballot expected, Ballot update)
+    {
+        highBound.compareAndSet(expected, update);
+    }
+
+    public File getDirectory()
+    {
+        return directory;
+    }
+
+    public synchronized void updateLowBound(Ballot update) throws IOException
+    {
+        if (!Commit.isAfter(update, lowBound))
+        {
+            logger.debug("Not updating lower bound with earlier or equal ballot from {} to {}", lowBound, update);
+            return;
+        }
+
+        logger.debug("Updating lower bound from {} to {}", lowBound, update);
+        ClientState.getTimestampForPaxos(lowBound.unixMicros());
+        lowBound = update;
+        flush();
+    }
+
+    public Ballot getHighBound()
+    {
+        return highBound.get();
+    }
+
+    /**
+     * @return a unique ballot that has never been proposed, below which we will reject all proposals
+     */
+    public Ballot getLowBound()
+    {
+        return lowBound;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosKeyState.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosKeyState.java
new file mode 100644
index 0000000..c55269a
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosKeyState.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Objects;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.primitives.Longs;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.MergeIterator;
+
+public class PaxosKeyState implements UncommittedPaxosKey
+{
+    static final Comparator<PaxosKeyState> KEY_COMPARATOR = Comparator.comparing(o -> o.key);
+    static final Comparator<PaxosKeyState> BALLOT_COMPARATOR = (o1, o2) -> Longs.compare(o1.ballot.uuidTimestamp(), o2.ballot.uuidTimestamp());
+
+    final TableId tableId;
+    final DecoratedKey key;
+    final Ballot ballot;
+    final boolean committed;
+
+    public PaxosKeyState(TableId tableId, DecoratedKey key, Ballot ballot, boolean committed)
+    {
+        Preconditions.checkNotNull(tableId);
+        Preconditions.checkNotNull(ballot);
+        this.tableId = tableId;
+        this.key = key;
+        this.ballot = ballot;
+        this.committed = committed;
+    }
+
+    public DecoratedKey getKey()
+    {
+        return key;
+    }
+
+    public ConsistencyLevel getConsistencyLevel()
+    {
+        switch (ballot.flag())
+        {
+            default: throw new IllegalStateException();
+            case GLOBAL: return ConsistencyLevel.SERIAL;
+            case LOCAL: return ConsistencyLevel.LOCAL_SERIAL;
+            case NONE: return null;
+        }
+    }
+
+    @Override
+    public Ballot ballot()
+    {
+        return ballot;
+    }
+
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        PaxosKeyState that = (PaxosKeyState) o;
+        return committed == that.committed &&
+               Objects.equals(key, that.key) &&
+               Objects.equals(ballot, that.ballot);
+    }
+
+    public int hashCode()
+    {
+        return Objects.hash(key, ballot, committed);
+    }
+
+    public String toString()
+    {
+        return "BallotState{" +
+               "tableId=" + tableId +
+               ", key=" + key +
+               ", ballot=" + ballot +
+               ", committed=" + committed +
+               '}';
+    }
+
+    static PaxosKeyState merge(PaxosKeyState left, PaxosKeyState right)
+    {
+        if (left == null)
+            return right;
+
+        if (right == null)
+            return left;
+
+        int cmp = BALLOT_COMPARATOR.compare(left, right);
+
+        // prefer committed operations if the ballots are the same so they can be filtered out later
+        if (cmp == 0)
+            return left.committed ? left : right;
+        else
+            return cmp > 0 ? left : right;
+    }
+
+    static class Reducer extends MergeIterator.Reducer<PaxosKeyState, PaxosKeyState>
+    {
+        private PaxosKeyState mostRecent = null;
+
+        public void reduce(int idx, PaxosKeyState current)
+        {
+            mostRecent = merge(mostRecent, current);
+        }
+
+        protected PaxosKeyState getReduced()
+        {
+            return mostRecent;
+        }
+
+        protected void onKeyChange()
+        {
+            super.onKeyChange();
+            mostRecent = null;
+        }
+    }
+
+    public static CloseableIterator<PaxosKeyState> mergeUncommitted(CloseableIterator<PaxosKeyState>... iterators)
+    {
+        return MergeIterator.get(Lists.newArrayList(iterators), PaxosKeyState.KEY_COMPARATOR, new Reducer());
+    }
+
+    public static CloseableIterator<UncommittedPaxosKey> toUncommittedInfo(CloseableIterator<PaxosKeyState> iter)
+    {
+        Iterator<PaxosKeyState> filtered = Iterators.filter(iter, k -> !k.committed);
+        return new CloseableIterator<UncommittedPaxosKey>()
+        {
+            public void close()
+            {
+                iter.close();
+            }
+
+            public boolean hasNext()
+            {
+                return filtered.hasNext();
+            }
+
+            public UncommittedPaxosKey next()
+            {
+                return filtered.next();
+            }
+        };
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosRows.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosRows.java
new file mode 100644
index 0000000..8bdbdf7
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosRows.java
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import javax.annotation.Nullable;
+
+import com.google.common.collect.Lists;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.db.marshal.UUIDType;
+import org.apache.cassandra.db.marshal.ValueAccessor;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.DeserializationHelper;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.exceptions.UnknownTableException;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.Commit.Accepted;
+import org.apache.cassandra.service.paxos.Commit.AcceptedWithTTL;
+import org.apache.cassandra.service.paxos.Commit.Committed;
+import org.apache.cassandra.service.paxos.Commit.CommittedWithTTL;
+import org.apache.cassandra.utils.AbstractIterator;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+
+import static org.apache.cassandra.db.partitions.PartitionUpdate.PartitionUpdateSerializer.*;
+import static org.apache.cassandra.service.paxos.Commit.isAfter;
+import static org.apache.cassandra.service.paxos.Commit.latest;
+
+@SuppressWarnings({ "unchecked", "rawtypes" })
+public class PaxosRows
+{
+    private static final ColumnMetadata WRITE_PROMISE = paxosColumn("in_progress_ballot", TimeUUIDType.instance);
+    private static final ColumnMetadata READ_PROMISE = paxosColumn("in_progress_read_ballot", TimeUUIDType.instance);
+    private static final ColumnMetadata PROPOSAL = paxosColumn("proposal_ballot", TimeUUIDType.instance);
+    private static final ColumnMetadata PROPOSAL_UPDATE = paxosColumn("proposal", BytesType.instance);
+    private static final ColumnMetadata PROPOSAL_VERSION = paxosColumn("proposal_version", Int32Type.instance);
+    private static final ColumnMetadata COMMIT = paxosColumn("most_recent_commit_at", TimeUUIDType.instance);
+    private static final ColumnMetadata COMMIT_UPDATE = paxosColumn("most_recent_commit", BytesType.instance);
+    private static final ColumnMetadata COMMIT_VERSION = paxosColumn("most_recent_commit_version", Int32Type.instance);
+
+    private PaxosRows() {}
+
+    private static ColumnMetadata paxosColumn(String name, AbstractType<?> type)
+    {
+        return ColumnMetadata.regularColumn(SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.PAXOS, name, type);
+    }
+
+    public static Ballot getPromise(Row row)
+    {
+        return getBallot(row, READ_PROMISE, Ballot.none());
+    }
+
+    public static Ballot getWritePromise(Row row)
+    {
+        return getBallot(row, WRITE_PROMISE, Ballot.none());
+    }
+
+    public static Accepted getAccepted(Row row)
+    {
+        Cell ballotCell = row.getCell(PROPOSAL);
+        if (ballotCell == null)
+            return null;
+
+        Ballot ballot = ballotCell.accessor().toBallot(ballotCell.value());
+        int version = getInt(row, PROPOSAL_VERSION, MessagingService.VERSION_30);
+        PartitionUpdate update = getUpdate(row, PROPOSAL_UPDATE, version);
+        return ballotCell.isExpiring()
+               ? new AcceptedWithTTL(ballot, update, ballotCell.localDeletionTime())
+               : new Accepted(ballot, update);
+    }
+
+    public static Committed getCommitted(TableMetadata metadata, DecoratedKey partitionKey, Row row)
+    {
+        Cell ballotCell = row.getCell(COMMIT);
+        if (ballotCell == null)
+            return Committed.none(partitionKey, metadata);
+
+        Ballot ballot = ballotCell.accessor().toBallot(ballotCell.value());
+        int version = getInt(row, COMMIT_VERSION, MessagingService.VERSION_30);
+        PartitionUpdate update = getUpdate(row, COMMIT_UPDATE, version);
+        return ballotCell.isExpiring()
+               ? new CommittedWithTTL(ballot, update, ballotCell.localDeletionTime())
+               : new Committed(ballot, update);
+    }
+
+    public static TableId getTableId(Row row)
+    {
+        return TableId.fromUUID(UUIDType.instance.compose(row.clustering().get(0), (ValueAccessor)row.clustering().accessor()));
+    }
+
+    public static UUID getTableUuid(Row row)
+    {
+        return UUIDType.instance.compose(row.clustering().get(0), (ValueAccessor)row.clustering().accessor());
+    }
+
+    private static int getInt(Row row, ColumnMetadata cmeta, @SuppressWarnings("SameParameterValue") int ifNull)
+    {
+        Cell cell = row.getCell(cmeta);
+        if (cell == null)
+            return ifNull;
+        return Int32Type.instance.compose(cell.value(), cell.accessor());
+    }
+
+    private static PartitionUpdate getUpdate(Row row, ColumnMetadata cmeta, int version)
+    {
+        Cell cell = row.getCell(cmeta);
+        if (cell == null)
+            throw new IllegalStateException();
+        try
+        {
+            return PartitionUpdate.fromBytes(cell.buffer(), version);
+        }
+        catch (RuntimeException e)
+        {
+            // the legacy behaviors of not deleting proposal_version along with proposal and proposal_ballot on commit,
+            // and accepting proposals younger than the most recent commit combined with the right sequence of tombstone
+            // purging and retention over a few compactions can result in 3.x format proposals without a proposal version
+            // value, causing deserialization to fail when looking up the table. So here we detect that and attempt to
+            // deserialize with the current version
+            if (e.getCause() instanceof UnknownTableException && version == MessagingService.VERSION_30)
+                return PartitionUpdate.fromBytes(cell.buffer(), MessagingService.current_version);
+
+            throw e;
+        }
+    }
+
+    private static Ballot getBallot(Row row, ColumnMetadata cmeta)
+    {
+        return getBallot(row, cmeta, null);
+    }
+
+    private static Ballot getBallot(Row row, ColumnMetadata cmeta, Ballot ifNull)
+    {
+        Cell cell = row.getCell(cmeta);
+        if (cell == null)
+            return ifNull;
+        return cell.accessor().toBallot(cell.value());
+    }
+
+    private static boolean proposalIsEmpty(Row row, DecoratedKey key)
+    {
+        try
+        {
+            Cell proposalVersionCell = row.getCell(PROPOSAL_VERSION);
+            if (proposalVersionCell == null)
+                return true;
+            Integer proposalVersion = Int32Type.instance.compose(proposalVersionCell.value(), proposalVersionCell.accessor());
+            if (proposalVersion == null)
+                return true;
+
+            Cell proposal = row.getCell(PROPOSAL_UPDATE);
+            if (proposal == null)
+                return true;
+
+            ByteBuffer proposalValue = proposal.buffer();
+            if (!proposalValue.hasRemaining())
+                return true;
+
+            return isEmpty(proposalValue, DeserializationHelper.Flag.LOCAL, key);
+        }
+        catch (IOException e)
+        {
+            JVMStabilityInspector.inspectThrowable(e);
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static long getTimestamp(Row row, ColumnMetadata cmeta)
+    {
+        Cell cell = row.getCell(cmeta);
+        if (cell == null || cell.valueSize() == 0)
+            return Long.MIN_VALUE;
+        return cell.timestamp();
+    }
+
+    static PaxosKeyState getCommitState(DecoratedKey key, Row row, TableId targetTableId)
+    {
+        if (row == null)
+            return null;
+
+        UUID tableUuid = getTableUuid(row);
+        if (targetTableId != null && !targetTableId.asUUID().equals(tableUuid))
+            return null;
+
+        Ballot promise = latest(getBallot(row, WRITE_PROMISE), getBallot(row, READ_PROMISE));
+        Ballot proposal = getBallot(row, PROPOSAL);
+        Ballot commit = getBallot(row, COMMIT);
+
+        Ballot inProgress = null;
+        Ballot committed = null;
+        if (isAfter(promise, proposal))
+        {
+            if (isAfter(promise, commit))
+                inProgress = promise;
+            else
+                committed = commit;
+        }
+        else if (isAfter(proposal, commit))
+        {
+            if (proposalIsEmpty(row, key))
+                committed = proposal;
+            else
+                inProgress = proposal;
+        }
+        else
+        {
+            committed = commit;
+        }
+
+        TableId tableId = TableId.fromUUID(tableUuid);
+        return inProgress != null ?
+               new PaxosKeyState(tableId, key, inProgress, false) :
+               new PaxosKeyState(tableId, key, committed, true);
+    }
+
+    private static class PaxosMemtableToKeyStateIterator extends AbstractIterator<PaxosKeyState> implements CloseableIterator<PaxosKeyState>
+    {
+        private final UnfilteredPartitionIterator partitions;
+        private UnfilteredRowIterator partition;
+        private final @Nullable TableId filterByTableId; // if unset, return records for all tables
+
+        private PaxosMemtableToKeyStateIterator(UnfilteredPartitionIterator partitions, TableId filterByTableId)
+        {
+            this.partitions = partitions;
+            this.filterByTableId = filterByTableId;
+        }
+
+        protected PaxosKeyState computeNext()
+        {
+            while (true)
+            {
+                if (partition != null && partition.hasNext())
+                {
+                    PaxosKeyState commitState = PaxosRows.getCommitState(partition.partitionKey(),
+                                                                         (Row) partition.next(),
+                                                                         filterByTableId);
+                    if (commitState == null)
+                        continue;
+
+                    return commitState;
+                }
+                else if (partition != null)
+                {
+                    partition.close();
+                    partition = null;
+                }
+
+                if (partitions.hasNext())
+                {
+                    partition = partitions.next();
+                }
+                else
+                {
+                    partitions.close();
+                    return endOfData();
+                }
+            }
+        }
+
+        public void close()
+        {
+            if (partition != null)
+                partition.close();
+            partitions.close();
+        }
+    }
+
+    static CloseableIterator<PaxosKeyState> toIterator(UnfilteredPartitionIterator partitions, TableId filterBytableId, boolean materializeLazily)
+    {
+        CloseableIterator<PaxosKeyState> iter = new PaxosMemtableToKeyStateIterator(partitions, filterBytableId);
+        if (materializeLazily)
+            return iter;
+
+        try
+        {
+            // eagerly materialize key states for repairs so we're not referencing memtables for the entire repair
+            return CloseableIterator.wrap(Lists.newArrayList(iter).iterator());
+        }
+        finally
+        {
+            iter.close();
+        }
+    }
+
+    public static Ballot getHighBallot(Row row, Ballot current)
+    {
+        long maxUnixMicros = current != null ? current.unixMicros() : Long.MIN_VALUE;
+        ColumnMetadata maxCol = null;
+
+        long inProgressRead = getTimestamp(row, READ_PROMISE);
+        if (inProgressRead > maxUnixMicros)
+        {
+            maxUnixMicros = inProgressRead;
+            maxCol = READ_PROMISE;
+        }
+
+        long inProgressWrite = getTimestamp(row, WRITE_PROMISE);
+        if (inProgressWrite > maxUnixMicros)
+        {
+            maxUnixMicros = inProgressWrite;
+            maxCol = WRITE_PROMISE;
+        }
+
+        long proposal = getTimestamp(row, PROPOSAL);
+        if (proposal > maxUnixMicros)
+        {
+            maxUnixMicros = proposal;
+            maxCol = PROPOSAL;
+        }
+
+        long commit = getTimestamp(row, COMMIT);
+        if (commit > maxUnixMicros)
+            maxCol = COMMIT;
+
+        return maxCol == null ? current : getBallot(row, maxCol);
+    }
+
+    public static boolean hasBallotBeforeOrEqualTo(Row row, Ballot ballot)
+    {
+        return !Commit.isAfter(ballot, getBallot(row, WRITE_PROMISE))
+            && !Commit.isAfter(ballot, getBallot(row, READ_PROMISE))
+            && !Commit.isAfter(ballot, getBallot(row, PROPOSAL))
+            && !Commit.isAfter(ballot, getBallot(row, COMMIT));
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTracker.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTracker.java
new file mode 100644
index 0000000..d3594b3
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTracker.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.statements.SelectStatement;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.ReadExecutionController;
+import org.apache.cassandra.db.ReadQuery;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.ListType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.db.SystemKeyspace.PAXOS_REPAIR_HISTORY;
+import static org.apache.cassandra.schema.SchemaConstants.SYSTEM_KEYSPACE_NAME;
+
+/**
+ * Tracks uncommitted and ballot high/low bounds
+ */
+public class PaxosStateTracker
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosStateTracker.class);
+
+    // when starting with no data, skip rebuilding uncommitted data from the paxos table
+    static final String SKIP_REBUILD_PROP = "cassandra.skip_paxos_state_rebuild";
+    static final String FORCE_REBUILD_PROP = "cassandra.force_paxos_state_rebuild";
+    static final String TRUNCATE_BALLOT_METADATA_PROP = "cassandra.truncate_ballot_metadata";
+
+    private static boolean skipRebuild()
+    {
+        return Boolean.getBoolean(SKIP_REBUILD_PROP);
+    }
+
+    private static boolean forceRebuild()
+    {
+        return Boolean.getBoolean(FORCE_REBUILD_PROP);
+    }
+
+    private static boolean truncateBallotMetadata()
+    {
+        return Boolean.getBoolean(TRUNCATE_BALLOT_METADATA_PROP);
+    }
+
+    private static final String DIRECTORY = "system/_paxos_repair_state";
+
+    private final PaxosUncommittedTracker uncommitted;
+    private final PaxosBallotTracker ballots;
+    private boolean rebuildNeeded;
+
+    public PaxosStateTracker(PaxosUncommittedTracker uncommitted, PaxosBallotTracker ballots, boolean rebuildNeeded)
+    {
+        this.uncommitted = uncommitted;
+        this.ballots = ballots;
+        this.rebuildNeeded = rebuildNeeded;
+    }
+
+    public boolean isRebuildNeeded()
+    {
+        return rebuildNeeded;
+    }
+
+    static File stateDirectory(File dataDirectory)
+    {
+        return new File(dataDirectory, DIRECTORY);
+    }
+
+    public static PaxosStateTracker create(File[] directories) throws IOException
+    {
+        File stateDirectory = null;
+        boolean hasExistingData = false;
+
+        for (File directory : directories)
+        {
+            File candidate = stateDirectory(directory);
+            if (candidate.exists() && new File(candidate, PaxosBallotTracker.FNAME).exists())
+            {
+                Preconditions.checkState(!hasExistingData,
+                                         "Multiple paxos repair metadata directories found (%s, %s), remove the older directory and restart.",
+                                         stateDirectory, candidate);
+                hasExistingData = true;
+                stateDirectory = candidate;
+            }
+        }
+
+        if (stateDirectory == null)
+            stateDirectory = stateDirectory(directories[0]);
+
+        boolean rebuildNeeded = !hasExistingData || forceRebuild();
+
+        if (truncateBallotMetadata() && !rebuildNeeded)
+            logger.warn("{} was set, but {} was not and no rebuild is required. Ballot data will not be truncated",
+                        TRUNCATE_BALLOT_METADATA_PROP, FORCE_REBUILD_PROP);
+
+        if (rebuildNeeded)
+        {
+            if (stateDirectory.exists())
+            {
+                PaxosUncommittedTracker.truncate(stateDirectory);
+                if (truncateBallotMetadata())
+                    PaxosBallotTracker.truncate(stateDirectory);
+            }
+            else
+            {
+                stateDirectory.createDirectoriesIfNotExists();
+            }
+        }
+
+        PaxosUncommittedTracker uncommitted = PaxosUncommittedTracker.load(stateDirectory);
+        PaxosBallotTracker ballots = PaxosBallotTracker.load(stateDirectory);
+
+        if (!rebuildNeeded)
+            uncommitted.start();
+
+        return new PaxosStateTracker(uncommitted, ballots, rebuildNeeded);
+    }
+
+    public static PaxosStateTracker create(Directories.DataDirectories dataDirectories) throws IOException
+    {
+        return create(dataDirectories.getAllDirectories().stream().map(d -> d.location).toArray(File[]::new));
+    }
+
+    @SuppressWarnings("resource")
+    private void rebuildUncommittedData() throws IOException
+    {
+        logger.info("Beginning uncommitted paxos data rebuild. Set -Dcassandra.skip_paxos_state_rebuild=true and restart to skip");
+
+        String queryStr = "SELECT * FROM " + SYSTEM_KEYSPACE_NAME + '.' + SystemKeyspace.PAXOS;
+        SelectStatement stmt = (SelectStatement) QueryProcessor.parseStatement(queryStr).prepare(ClientState.forInternalCalls());
+        ReadQuery query = stmt.getQuery(QueryOptions.DEFAULT, FBUtilities.nowInSeconds());
+        try (ReadExecutionController controller = query.executionController();
+             PartitionIterator partitions = query.executeInternal(controller);
+             PaxosKeyStateRowsIterator rows = new PaxosKeyStateRowsIterator(partitions))
+        {
+            uncommitted.rebuild(rows);
+        }
+    }
+
+    class PaxosKeyStateRowsIterator implements CloseableIterator<PaxosKeyState>
+    {
+        // note: this is not closed by this iterator
+        final PartitionIterator partitions;
+
+        RowIterator partition = null;
+        PaxosKeyState next = null;
+
+        PaxosKeyStateRowsIterator(PartitionIterator partitions)
+        {
+            this.partitions = partitions;
+        }
+
+        @Override
+        public boolean hasNext()
+        {
+            if (next != null)
+                return true;
+
+            while (true)
+            {
+                if (partition != null && partition.hasNext())
+                {
+                    PaxosKeyState commitState = PaxosRows.getCommitState(partition.partitionKey(), partition.next(), null);
+                    if (commitState == null)
+                        continue;
+                    ballots.updateHighBound(commitState.ballot);
+                    if (!commitState.committed)
+                    {
+                        next = commitState;
+                        return true;
+                    }
+                }
+                else
+                {
+                    if (partition != null)
+                    {
+                        partition.close();
+                        partition = null;
+                    }
+
+                    if (!partitions.hasNext())
+                        return false;
+
+                    partition = partitions.next();
+                }
+            }
+        }
+
+        @Override
+        public PaxosKeyState next()
+        {
+            if (next == null && !hasNext())
+                throw new NoSuchElementException();
+            PaxosKeyState next = this.next;
+            this.next = null;
+            return next;
+        }
+
+        @Override
+        public void close()
+        {
+            if (partition != null)
+            {
+                partition.close();
+                partition = null;
+            }
+        }
+    }
+
+    private void updateLowBoundFromRepairHistory() throws IOException
+    {
+        String queryStr = "SELECT * FROM " + SYSTEM_KEYSPACE_NAME + '.' + PAXOS_REPAIR_HISTORY;
+        SelectStatement stmt = (SelectStatement) QueryProcessor.parseStatement(queryStr).prepare(ClientState.forInternalCalls());
+        ReadQuery query = stmt.getQuery(QueryOptions.DEFAULT, FBUtilities.nowInSeconds());
+
+        Ballot lowBound = null;
+        ListType<ByteBuffer> listType = ListType.getInstance(BytesType.instance, false);
+        ColumnMetadata pointsColumn = ColumnMetadata.regularColumn(SYSTEM_KEYSPACE_NAME, PAXOS_REPAIR_HISTORY, "points", listType);
+        try (ReadExecutionController controller = query.executionController(); PartitionIterator partitions = query.executeInternal(controller))
+        {
+            while (partitions.hasNext())
+            {
+                try (RowIterator partition = partitions.next())
+                {
+                    String keyspaceName = UTF8Type.instance.compose(partition.partitionKey().getKey());
+                    if (Schema.instance.getKeyspaceMetadata(keyspaceName) == null)
+                        continue;
+
+                    Keyspace.open(keyspaceName);
+                    while (partition.hasNext())
+                    {
+                        Row row = partition.next();
+                        Clustering clustering = row.clustering();
+                        String tableName = UTF8Type.instance.compose(clustering.get(0), clustering.accessor());
+                        if (Schema.instance.getTableMetadata(keyspaceName, tableName) == null)
+                            continue;
+
+                        Cell pointsCell = row.getCell(pointsColumn);
+                        List<ByteBuffer> points = listType.compose(pointsCell.value(), pointsCell.accessor());
+                        PaxosRepairHistory history = PaxosRepairHistory.fromTupleBufferList(points);
+                        lowBound = Commit.latest(lowBound, history.maxLowBound());
+                    }
+                }
+            }
+        }
+        ballots.updateLowBound(lowBound);
+    }
+
+    public void maybeRebuild() throws IOException
+    {
+        if (!rebuildNeeded)
+            return;
+
+        if (truncateBallotMetadata())
+        {
+            logger.info("Truncating {}.{}", SYSTEM_KEYSPACE_NAME, PAXOS_REPAIR_HISTORY);
+            Keyspace.open(SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(PAXOS_REPAIR_HISTORY).truncateBlocking();
+        }
+
+        if (!skipRebuild())
+        {
+            rebuildUncommittedData();
+
+            if (!truncateBallotMetadata()) // no point doing this if we just truncated the repair history table
+                updateLowBoundFromRepairHistory();
+            logger.info("Uncommitted paxos data rebuild completed");
+        }
+        uncommitted.start();
+        ballots.flush();   // explicitly flush since a missing ballot file on startup indicates a rebuild is needed
+        rebuildNeeded = false;
+    }
+
+    public PaxosUncommittedTracker uncommitted()
+    {
+        return uncommitted;
+    }
+
+    public PaxosBallotTracker ballots()
+    {
+        return ballots;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedIndex.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedIndex.java
new file mode 100644
index 0000000..5e3b540
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedIndex.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.util.*;
+import java.util.concurrent.Callable;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.Callables;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.Operator;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.filter.RowFilter;
+import org.apache.cassandra.db.lifecycle.View;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.partitions.*;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.index.Index;
+import org.apache.cassandra.index.IndexRegistry;
+import org.apache.cassandra.index.transactions.IndexTransaction;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.IndexMetadata;
+import org.apache.cassandra.schema.Indexes;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static java.util.Collections.*;
+import static org.apache.cassandra.schema.SchemaConstants.SYSTEM_KEYSPACE_NAME;
+import static org.apache.cassandra.service.paxos.PaxosState.ballotTracker;
+import static org.apache.cassandra.service.paxos.PaxosState.uncommittedTracker;
+
+/**
+ * A 2i implementation made specifically for system.paxos that listens for changes to paxos state by interpreting
+ * mutations against system.paxos and updates the uncommitted tracker accordingly.
+ *
+ * No read expressions are supported by the index.
+ *
+ * This is implemented as a 2i so it can piggy back off the commit log and paxos table flushes, and avoid worrying
+ * about implementing a parallel log/flush system for the tracker and potential bugs there. It also means we don't
+ * have to worry about cases where the tracker can become out of sync with the paxos table due to failure/edge cases
+ * outside of the PaxosTableState class itself.
+ */
+public class PaxosUncommittedIndex implements Index, PaxosUncommittedTracker.UpdateSupplier
+{
+    public final ColumnFamilyStore baseCfs;
+    protected IndexMetadata metadata;
+
+    private static final DataRange FULL_RANGE = DataRange.allData(DatabaseDescriptor.getPartitioner());
+    private final ColumnFilter memtableColumnFilter;
+
+    public PaxosUncommittedIndex(ColumnFamilyStore baseTable, IndexMetadata metadata)
+    {
+        Preconditions.checkState(baseTable.metadata.keyspace.equals(SYSTEM_KEYSPACE_NAME));
+        Preconditions.checkState(baseTable.metadata.name.equals(SystemKeyspace.PAXOS));
+
+        this.baseCfs = baseTable;
+        this.metadata = metadata;
+
+        this.memtableColumnFilter = ColumnFilter.all(baseTable.metadata.get());
+        PaxosUncommittedTracker.unsafSetUpdateSupplier(this);
+    }
+
+    public static IndexMetadata indexMetadata()
+    {
+        Map<String, String> options = new HashMap<>();
+        options.put("class_name", PaxosUncommittedIndex.class.getName());
+        options.put("target", "");
+        return IndexMetadata.fromSchemaMetadata("PaxosUncommittedIndex", IndexMetadata.Kind.CUSTOM, options);
+    }
+
+    public static Indexes indexes()
+    {
+        return Indexes.builder().add(indexMetadata()).build();
+    }
+
+    public Callable<?> getInitializationTask()
+    {
+        return Callables.returning(null);
+    }
+
+    public IndexMetadata getIndexMetadata()
+    {
+        return metadata;
+    }
+
+    public Callable<?> getMetadataReloadTask(IndexMetadata indexMetadata)
+    {
+        return Callables.returning(null);
+    }
+
+    public void register(IndexRegistry registry)
+    {
+        registry.registerIndex(this);
+    }
+
+    public Optional<ColumnFamilyStore> getBackingTable()
+    {
+        return Optional.empty();
+    }
+
+    private CloseableIterator<PaxosKeyState> getPaxosUpdates(List<UnfilteredPartitionIterator> iterators, TableId filterByTableId, boolean materializeLazily)
+    {
+        Preconditions.checkArgument((filterByTableId == null) == materializeLazily);
+
+        return PaxosRows.toIterator(UnfilteredPartitionIterators.merge(iterators, UnfilteredPartitionIterators.MergeListener.NOOP), filterByTableId, materializeLazily);
+    }
+
+    public CloseableIterator<PaxosKeyState> repairIterator(TableId tableId, Collection<Range<Token>> ranges)
+    {
+        Preconditions.checkNotNull(tableId);
+
+        View view = baseCfs.getTracker().getView();
+        List<Memtable> memtables = view.flushingMemtables.isEmpty()
+                                   ? view.liveMemtables
+                                   : ImmutableList.<Memtable>builder().addAll(view.flushingMemtables).addAll(view.liveMemtables).build();
+
+        List<DataRange> dataRanges = ranges.stream().map(DataRange::forTokenRange).collect(Collectors.toList());
+        List<UnfilteredPartitionIterator> iters = new ArrayList<>(memtables.size() * ranges.size());
+
+        for (int j=0, jsize=dataRanges.size(); j<jsize; j++)
+        {
+            for (int i=0, isize=memtables.size(); i<isize; i++)
+                iters.add(memtables.get(i).partitionIterator(memtableColumnFilter, dataRanges.get(j), SSTableReadsListener.NOOP_LISTENER));
+        }
+
+        return getPaxosUpdates(iters, tableId, false);
+    }
+
+    public CloseableIterator<PaxosKeyState> flushIterator(Memtable flushing)
+    {
+        List<UnfilteredPartitionIterator> iters = singletonList(flushing.partitionIterator(memtableColumnFilter, FULL_RANGE, SSTableReadsListener.NOOP_LISTENER));
+        return getPaxosUpdates(iters, null, true);
+    }
+
+    public Callable<?> getBlockingFlushTask()
+    {
+        return (Callable<Object>) () -> {
+            ballotTracker().flush();
+            return null;
+        };
+    }
+
+    public Callable<?> getBlockingFlushTask(Memtable paxos)
+    {
+        return (Callable<Object>) () -> {
+            uncommittedTracker().flushUpdates(paxos);
+            ballotTracker().flush();
+            return null;
+        };
+    }
+
+    public Callable<?> getInvalidateTask()
+    {
+        return (Callable<Object>) () -> {
+            uncommittedTracker().truncate();
+            ballotTracker().truncate();
+            return null;
+        };
+    }
+
+    public Callable<?> getTruncateTask(long truncatedAt)
+    {
+        return (Callable<Object>) () -> {
+            uncommittedTracker().truncate();
+            ballotTracker().truncate();
+            return null;
+        };
+    }
+
+    public boolean shouldBuildBlocking()
+    {
+        return false;
+    }
+
+    public boolean dependsOn(ColumnMetadata column)
+    {
+        return false;
+    }
+
+    public boolean supportsExpression(ColumnMetadata column, Operator operator)
+    {
+        // should prevent this from ever being used
+        return false;
+    }
+
+    public AbstractType<?> customExpressionValueType()
+    {
+        return null;
+    }
+
+    public RowFilter getPostIndexQueryFilter(RowFilter filter)
+    {
+        return null;
+    }
+
+    public long getEstimatedResultRows()
+    {
+        return 0;
+    }
+
+    public void validate(PartitionUpdate update) throws InvalidRequestException
+    {
+
+    }
+
+    public Indexer indexerFor(DecoratedKey key, RegularAndStaticColumns columns, int nowInSec, WriteContext ctx, IndexTransaction.Type transactionType)
+    {
+        return indexer;
+    }
+
+    public BiFunction<PartitionIterator, ReadCommand, PartitionIterator> postProcessorFor(ReadCommand command)
+    {
+        return null;
+    }
+
+    public Searcher searcherFor(ReadCommand command)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    private final Indexer indexer = new Indexer()
+    {
+        public void begin() {}
+        public void partitionDelete(DeletionTime deletionTime) {}
+        public void rangeTombstone(RangeTombstone tombstone) {}
+
+        public void insertRow(Row row)
+        {
+            ballotTracker().onUpdate(row);
+        }
+
+        public void updateRow(Row oldRowData, Row newRowData)
+        {
+            ballotTracker().onUpdate(newRowData);
+        }
+
+        public void removeRow(Row row) {}
+        public void finish() {}
+    };
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTracker.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTracker.java
new file mode 100644
index 0000000..7712c79
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTracker.java
@@ -0,0 +1,377 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.cleanup.PaxosTableRepairs;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static org.apache.cassandra.config.DatabaseDescriptor.paxosRepairEnabled;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosKeyState.mergeUncommitted;
+
+/**
+ * Tracks uncommitted paxos operations to enable operation completion as part of repair by returning an iterator of
+ * partition keys with uncommitted paxos operations (and their consistency levels) for a given table and token range(s)
+ *
+ * There are 2 parts to the uncommitted states it tracks: operations flushed to disk, and updates still in memory. This
+ * class handles merging these two sources for queries and for merging states as part of flush. In practice, in memory
+ * updates are the contents of the system.paxos memtables, although this has been generalized into an "UpdateSupplier"
+ * interface to accomodate testing.
+ */
+public class PaxosUncommittedTracker
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosUncommittedTracker.class);
+    private static final Range<Token> FULL_RANGE = new Range<>(DatabaseDescriptor.getPartitioner().getMinimumToken(),
+                                                               DatabaseDescriptor.getPartitioner().getMinimumToken());
+
+    private static volatile UpdateSupplier updateSupplier;
+
+    private volatile boolean autoRepairsEnabled = !Boolean.getBoolean("cassandra.disable_paxos_auto_repairs");
+    private volatile boolean stateFlushEnabled = !Boolean.getBoolean("cassandra.disable_paxos_state_flush");
+
+    private boolean started = false;
+    private boolean autoRepairStarted = false;
+
+    private final Set<TableId> autoRepairTableIds = Sets.newConcurrentHashSet();
+
+    public interface UpdateSupplier
+    {
+        CloseableIterator<PaxosKeyState> repairIterator(TableId tableId, Collection<Range<Token>> ranges);
+        CloseableIterator<PaxosKeyState> flushIterator(Memtable paxos);
+    }
+
+    private final File dataDirectory;
+    private volatile ImmutableMap<TableId, UncommittedTableData> tableStates;
+
+    public PaxosUncommittedTracker(File dataDirectory, ImmutableMap<TableId, UncommittedTableData> tableStates)
+    {
+        this.dataDirectory = dataDirectory;
+        this.tableStates = tableStates;
+    }
+
+    public PaxosUncommittedTracker(File dataDirectory)
+    {
+        this(dataDirectory, ImmutableMap.of());
+    }
+
+    public File getDirectory()
+    {
+        return dataDirectory;
+    }
+
+    public static void truncate(File dataDirectory)
+    {
+        logger.info("truncating paxos uncommitted metadata in {}", dataDirectory);
+        for (File file : dataDirectory.tryList())
+        {
+            if (file.name().equals(PaxosBallotTracker.FNAME))
+                continue;
+
+            if (file.isDirectory())
+                FileUtils.deleteRecursive(file);
+            else
+                FileUtils.deleteWithConfirm(file);
+        }
+    }
+
+    public static PaxosUncommittedTracker load(File dataDirectory)
+    {
+        ImmutableMap.Builder<TableId, UncommittedTableData> builder = ImmutableMap.builder();
+        for (TableId tableId : UncommittedTableData.listTableIds(dataDirectory))
+        {
+            builder.put(tableId, UncommittedTableData.load(dataDirectory, tableId));
+        }
+
+        return new PaxosUncommittedTracker(dataDirectory, builder.build());
+    }
+
+    @VisibleForTesting
+    UncommittedTableData getOrCreateTableState(TableId tableId)
+    {
+        UncommittedTableData state = tableStates.get(tableId);
+        if (state == null)
+        {
+            synchronized (this)
+            {
+                state = tableStates.get(tableId);
+                if (state != null)
+                    return state;
+
+                state = UncommittedTableData.load(dataDirectory, tableId);
+                tableStates = ImmutableMap.<TableId, UncommittedTableData>builder()
+                              .putAll(tableStates).put(tableId, state)
+                              .build();
+            }
+        }
+        return state;
+    }
+
+    synchronized void flushUpdates(Memtable paxos) throws IOException
+    {
+        if (!stateFlushEnabled || !started)
+            return;
+
+        Map<TableId, UncommittedTableData.FlushWriter> flushWriters = new HashMap<>();
+        try (CloseableIterator<PaxosKeyState> iterator = updateSupplier.flushIterator(paxos))
+        {
+            while (iterator.hasNext())
+            {
+                PaxosKeyState next = iterator.next();
+                UncommittedTableData.FlushWriter writer = flushWriters.get(next.tableId);
+                if (writer == null)
+                {
+                    writer = getOrCreateTableState(next.tableId).flushWriter();
+                    flushWriters.put(next.tableId, writer);
+                }
+                writer.append(next);
+            }
+        }
+        catch (Throwable t)
+        {
+            for (UncommittedTableData.FlushWriter writer : flushWriters.values())
+                t = writer.abort(t);
+            throw new IOException(t);
+        }
+
+        for (UncommittedTableData.FlushWriter writer : flushWriters.values())
+            writer.finish();
+    }
+
+    @VisibleForTesting
+    UncommittedTableData getTableState(TableId tableId)
+    {
+        return tableStates.get(tableId);
+    }
+
+    @SuppressWarnings("resource")
+    public CloseableIterator<UncommittedPaxosKey> uncommittedKeyIterator(TableId tableId, Collection<Range<Token>> ranges)
+    {
+        ranges = (ranges == null || ranges.isEmpty()) ? Collections.singleton(FULL_RANGE) : Range.normalize(ranges);
+        CloseableIterator<PaxosKeyState> updates = updateSupplier.repairIterator(tableId, ranges);
+
+        try
+        {
+            UncommittedTableData state = tableStates.get(tableId);
+            if (state == null)
+                return PaxosKeyState.toUncommittedInfo(updates);
+
+            CloseableIterator<PaxosKeyState> fileIter = state.iterator(ranges);
+            try
+            {
+                @SuppressWarnings("unchecked") CloseableIterator<PaxosKeyState> merged = mergeUncommitted(updates, fileIter);
+
+                return PaxosKeyState.toUncommittedInfo(merged);
+            }
+            catch (Throwable t)
+            {
+                fileIter.close();
+                throw t;
+            }
+        }
+        catch (Throwable t)
+        {
+            updates.close();
+            throw t;
+        }
+    }
+
+    synchronized void truncate()
+    {
+        logger.info("truncating paxos uncommitted info");
+        tableStates.values().forEach(UncommittedTableData::truncate);
+        tableStates = ImmutableMap.of();
+    }
+
+    public synchronized void start()
+    {
+        if (started)
+            return;
+
+        logger.info("enabling PaxosUncommittedTracker");
+        started = true;
+    }
+
+    public synchronized void rebuild(Iterator<PaxosKeyState> iterator) throws IOException
+    {
+        Preconditions.checkState(!started);
+        truncate();
+
+        Map<TableId, UncommittedTableData.FlushWriter> flushWriters = new HashMap<>();
+        try
+        {
+            while (iterator.hasNext())
+            {
+                PaxosKeyState next = iterator.next();
+                UncommittedTableData.FlushWriter writer = flushWriters.get(next.tableId);
+                if (writer == null)
+                {
+                    writer = getOrCreateTableState(next.tableId).rebuildWriter();
+                    flushWriters.put(next.tableId, writer);
+                }
+                writer.append(next);
+            }
+            for (UncommittedTableData.FlushWriter writer : flushWriters.values())
+                writer.finish();
+        }
+        catch (Throwable t)
+        {
+            for (UncommittedTableData.FlushWriter writer : flushWriters.values())
+                t = writer.abort(t);
+            throw new IOException(t);
+        }
+
+        start();
+    }
+
+    synchronized void consolidateFiles()
+    {
+        tableStates.values().forEach(UncommittedTableData::maybeScheduleMerge);
+    }
+
+    synchronized void schedulePaxosAutoRepairs()
+    {
+        if (!paxosRepairEnabled() || !autoRepairsEnabled)
+            return;
+
+        for (UncommittedTableData tableData : tableStates.values())
+        {
+            if (tableData.numFiles() == 0)
+                continue;
+
+            if (SchemaConstants.REPLICATED_SYSTEM_KEYSPACE_NAMES.contains(tableData.keyspace()))
+                continue;
+
+            TableId tableId = tableData.tableId();
+            if (Schema.instance.getTableMetadata(tableId) == null)
+                continue;
+
+            logger.debug("Starting paxos auto repair for {}.{}", tableData.keyspace(), tableData.table());
+
+            if (!autoRepairTableIds.add(tableId))
+            {
+                logger.debug("Skipping paxos auto repair for {}.{}, another auto repair is already in progress", tableData.keyspace(), tableData.table());
+                continue;
+            }
+
+            StorageService.instance.autoRepairPaxos(tableId).addCallback((success, failure) -> {
+                if (failure != null) logger.error("Paxos auto repair for {}.{} failed", tableData.keyspace(), tableData.table(), failure);
+                else logger.debug("Paxos auto repair for {}.{} completed", tableData.keyspace(), tableData.table());
+                autoRepairTableIds.remove(tableId);
+            });
+        }
+    }
+
+    private static void runAndLogException(String desc, Runnable runnable)
+    {
+        try
+        {
+            runnable.run();
+        }
+        catch (Throwable t)
+        {
+            logger.error("Unhandled exception running " + desc, t);
+        }
+    }
+
+    void maintenance()
+    {
+        runAndLogException("file consolidation", this::consolidateFiles);
+        runAndLogException("schedule auto repairs", this::schedulePaxosAutoRepairs);
+        runAndLogException("evict hung repairs", PaxosTableRepairs::evictHungRepairs);
+    }
+
+    public synchronized void startAutoRepairs()
+    {
+        if (autoRepairStarted)
+            return;
+        int seconds = Integer.getInteger("cassandra.auto_repair_frequency_seconds", (int) TimeUnit.MINUTES.toSeconds(5));
+        ScheduledExecutors.scheduledTasks.scheduleAtFixedRate(this::maintenance, seconds, seconds, TimeUnit.SECONDS);
+        autoRepairStarted = true;
+    }
+
+    @VisibleForTesting
+    public boolean hasInflightAutoRepairs()
+    {
+        return !autoRepairTableIds.isEmpty();
+    }
+
+    public boolean isAutoRepairsEnabled()
+    {
+        return autoRepairsEnabled;
+    }
+
+    public void setAutoRepairsEnabled(boolean autoRepairsEnabled)
+    {
+        this.autoRepairsEnabled = autoRepairsEnabled;
+    }
+
+    public boolean isStateFlushEnabled()
+    {
+        return stateFlushEnabled;
+    }
+
+    public void setStateFlushEnabled(boolean enabled)
+    {
+        this.stateFlushEnabled = enabled;
+    }
+
+    public Set<TableId> tableIds()
+    {
+        return tableStates.keySet();
+    }
+
+    public static UpdateSupplier unsafGetUpdateSupplier()
+    {
+        return updateSupplier;
+    }
+
+    public static void unsafSetUpdateSupplier(UpdateSupplier updateSupplier)
+    {
+        Preconditions.checkArgument(updateSupplier != null);
+        PaxosUncommittedTracker.updateSupplier = updateSupplier;
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedDataFile.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedDataFile.java
new file mode 100644
index 0000000..b2a5004
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedDataFile.java
@@ -0,0 +1,383 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.UUID;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.PeekingIterator;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.FSReadError;
+import org.apache.cassandra.io.util.ChecksummedRandomAccessReader;
+import org.apache.cassandra.io.util.ChecksummedSequentialWriter;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.RandomAccessReader;
+import org.apache.cassandra.io.util.SequentialWriter;
+import org.apache.cassandra.io.util.SequentialWriterOption;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.AbstractIterator;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.Throwables;
+
+public class UncommittedDataFile
+{
+    static final String EXTENSION = "paxos";
+    static final String TMP_SUFFIX = ".tmp";
+    private static final int VERSION = 0;
+    final TableId tableId;
+    private final File file;
+    private final File crcFile;
+    private final long generation;
+    private int activeReaders = 0;
+    private boolean markedDeleted = false;
+
+    private UncommittedDataFile(TableId tableId, File file, File crcFile, long generation)
+    {
+        this.tableId = tableId;
+        this.file = file;
+        this.crcFile = crcFile;
+        this.generation = generation;
+    }
+
+    public static UncommittedDataFile create(TableId tableId, File file, File crcFile, long generation)
+    {
+        return new UncommittedDataFile(tableId, file, crcFile, generation);
+    }
+
+    static Writer writer(File directory, String keyspace, String table, TableId tableId, long generation) throws IOException
+    {
+        return new Writer(directory, keyspace, table, tableId, generation);
+    }
+
+    static Set<TableId> listTableIds(File directory)
+    {
+        Pattern pattern = Pattern.compile(".*-([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})-(\\d+)\\." + EXTENSION + '$');
+        Set<TableId> tableIds = new HashSet<>();
+        for (String fname : directory.listNamesUnchecked())
+        {
+            Matcher matcher = pattern.matcher(fname);
+            if (matcher.matches())
+                tableIds.add(TableId.fromUUID(UUID.fromString(matcher.group(1))));
+        }
+        return tableIds;
+    }
+
+    static Pattern fileRegexFor(TableId tableId)
+    {
+        return Pattern.compile(".*-" + tableId.toString() + "-(\\d+)\\." + EXTENSION + ".*");
+    }
+
+    static boolean isTmpFile(String fname)
+    {
+        return fname.endsWith(TMP_SUFFIX);
+    }
+
+    static boolean isCrcFile(String fname)
+    {
+        return fname.endsWith(".crc");
+    }
+
+    static String fileName(String keyspace, String table, TableId tableId, long generation)
+    {
+        return String.format("%s-%s-%s-%s.%s", keyspace, table, tableId, generation, EXTENSION);
+    }
+
+    static String crcName(String fname)
+    {
+        return fname + ".crc";
+    }
+
+    synchronized void markDeleted()
+    {
+        markedDeleted = true;
+        maybeDelete();
+    }
+
+    private void maybeDelete()
+    {
+        if (markedDeleted && activeReaders == 0)
+        {
+            file.delete();
+            crcFile.delete();
+        }
+    }
+
+    synchronized private void onIteratorClose()
+    {
+        activeReaders--;
+        maybeDelete();
+    }
+
+    @VisibleForTesting
+    File file()
+    {
+        return file;
+    }
+
+    @VisibleForTesting
+    int getActiveReaders()
+    {
+        return activeReaders;
+    }
+
+    @VisibleForTesting
+    boolean isMarkedDeleted()
+    {
+        return markedDeleted;
+    }
+
+    long generation()
+    {
+        return generation;
+    }
+
+    /**
+     * Return an iterator of the file contents for the given token ranges. Token ranges
+     * must be normalized
+     */
+    synchronized CloseableIterator<PaxosKeyState> iterator(Collection<Range<Token>> ranges)
+    {
+        Preconditions.checkArgument(Iterables.elementsEqual(Range.normalize(ranges), ranges));
+        if (markedDeleted)
+            return null;
+        activeReaders++;
+        return new KeyCommitStateIterator(ranges);
+    }
+
+    private interface PeekingKeyCommitIterator extends CloseableIterator<PaxosKeyState>, PeekingIterator<PaxosKeyState>
+    {
+        static final PeekingKeyCommitIterator EMPTY = new PeekingKeyCommitIterator()
+        {
+            public PaxosKeyState peek() { throw new NoSuchElementException(); }
+            public void remove() { throw new NoSuchElementException(); }
+            public void close() { }
+            public boolean hasNext() { return false; }
+            public PaxosKeyState next() { throw new NoSuchElementException(); }
+        };
+    }
+
+    static class Writer
+    {
+        final File directory;
+        final String keyspace;
+        final String table;
+        final TableId tableId;
+        long generation;
+
+        private final File file;
+        private final File crcFile;
+        final SequentialWriter writer;
+        DecoratedKey lastKey = null;
+
+        private String fileName(long generation)
+        {
+            return UncommittedDataFile.fileName(keyspace, table, tableId, generation);
+        }
+
+        private String crcName(long generation)
+        {
+            return UncommittedDataFile.crcName(fileName(generation));
+        }
+
+        Writer(File directory, String keyspace, String table, TableId tableId, long generation) throws IOException
+        {
+            this.directory = directory;
+            this.keyspace = keyspace;
+            this.table = table;
+            this.tableId = tableId;
+            this.generation = generation;
+
+            directory.createDirectoriesIfNotExists();
+
+            this.file = new File(this.directory, fileName(generation) + TMP_SUFFIX);
+            this.crcFile = new File(this.directory, crcName(generation) + TMP_SUFFIX);
+            this.writer = new ChecksummedSequentialWriter(file, crcFile, null, SequentialWriterOption.DEFAULT);
+            this.writer.writeInt(VERSION);
+        }
+
+        void append(PaxosKeyState state) throws IOException
+        {
+            if (lastKey != null)
+                Preconditions.checkArgument(state.key.compareTo(lastKey) > 0);
+            lastKey = state.key;
+            ByteBufferUtil.writeWithShortLength(state.key.getKey(), writer);
+            state.ballot.serialize(writer);
+            writer.writeBoolean(state.committed);
+        }
+
+        Throwable abort(Throwable accumulate)
+        {
+            return writer.abort(accumulate);
+        }
+
+        UncommittedDataFile finish()
+        {
+            writer.finish();
+            File finalCrc = new File(directory, crcName(generation));
+            File finalData = new File(directory, fileName(generation));
+            try
+            {
+                crcFile.move(finalCrc);
+                file.move(finalData);
+                return new UncommittedDataFile(tableId, finalData, finalCrc, generation);
+            }
+            catch (Throwable e)
+            {
+                Throwable merged = e;
+                for (File f : new File[]{crcFile, finalCrc, file, finalData})
+                {
+                    try
+                    {
+                        if (f.exists())
+                            Files.delete(f.toPath());
+                    }
+                    catch (Throwable t)
+                    {
+                        merged = Throwables.merge(merged, t);
+                    }
+                }
+
+                if (merged != e)
+                    throw new RuntimeException(merged);
+                throw e;
+            }
+        }
+    }
+
+    class KeyCommitStateIterator extends AbstractIterator<PaxosKeyState> implements PeekingKeyCommitIterator
+    {
+        private final Iterator<Range<Token>> rangeIterator;
+        private final RandomAccessReader reader;
+
+        private Range<PartitionPosition> currentRange;
+
+        KeyCommitStateIterator(Collection<Range<Token>> ranges)
+        {
+            this.rangeIterator = ranges.iterator();
+            try
+            {
+                this.reader = ChecksummedRandomAccessReader.open(file, crcFile);
+            }
+            catch (IOException e)
+            {
+                throw new FSReadError(e, file);
+            }
+            validateVersion(this.reader);
+
+            Preconditions.checkArgument(rangeIterator.hasNext());
+            currentRange = convertRange(rangeIterator.next());
+        }
+
+        private Range<PartitionPosition> convertRange(Range<Token> tokenRange)
+        {
+            return new Range<>(tokenRange.left.maxKeyBound(), tokenRange.right.maxKeyBound());
+        }
+
+        private void validateVersion(RandomAccessReader reader)
+        {
+            try
+            {
+                int version = reader.readInt();
+                Preconditions.checkArgument(version == VERSION, "unsupported file version: %s", version);
+            }
+            catch (IOException e)
+            {
+                throw new FSReadError(e, file);
+            }
+        }
+
+        PaxosKeyState createKeyState(DecoratedKey key, RandomAccessReader reader) throws IOException
+        {
+            return new PaxosKeyState(tableId, key,
+                                     Ballot.deserialize(reader),
+                                     reader.readBoolean());
+        }
+
+        /**
+         * skip any bytes after the key
+         */
+        void skipEntryRemainder(RandomAccessReader reader) throws IOException
+        {
+            reader.skipBytes((int) Ballot.sizeInBytes());
+            reader.readBoolean();
+        }
+
+        protected synchronized PaxosKeyState computeNext()
+        {
+            try
+            {
+                nextKey:
+                while (!reader.isEOF())
+                {
+                    DecoratedKey key = DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.readWithShortLength(reader));
+
+                    while (!currentRange.contains(key))
+                    {
+                        // if this falls before our current target range, just keep going
+                        if (currentRange.left.compareTo(key) >= 0)
+                        {
+                            skipEntryRemainder(reader);
+                            continue nextKey;
+                        }
+
+                        // otherwise check against subsequent ranges and end iteration if there are none
+                        if (!rangeIterator.hasNext())
+                            return endOfData();
+
+                        currentRange = convertRange(rangeIterator.next());
+                    }
+
+                    return createKeyState(key, reader);
+                }
+                return endOfData();
+            }
+            catch (IOException e)
+            {
+                throw new FSReadError(e, file);
+            }
+        }
+
+        public void close()
+        {
+            synchronized (this)
+            {
+                reader.close();
+            }
+            onIteratorClose();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedPaxosKey.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedPaxosKey.java
new file mode 100644
index 0000000..c78f94d
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedPaxosKey.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.service.paxos.Ballot;
+
+public interface UncommittedPaxosKey
+{
+    DecoratedKey getKey();
+    ConsistencyLevel getConsistencyLevel();
+    Ballot ballot();
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java
new file mode 100644
index 0000000..dd47e40
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java
@@ -0,0 +1,616 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.PeekingIterator;
+import com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.SchemaElement;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.FSReadError;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.utils.AbstractIterator;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.ExecutorUtils;
+import org.apache.cassandra.utils.MergeIterator;
+import org.apache.cassandra.utils.Throwables;
+
+import static com.google.common.collect.Iterables.elementsEqual;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.service.paxos.uncommitted.UncommittedDataFile.isCrcFile;
+import static org.apache.cassandra.service.paxos.uncommitted.UncommittedDataFile.isTmpFile;
+import static org.apache.cassandra.service.paxos.uncommitted.UncommittedDataFile.writer;
+
+/**
+ * On memtable flush
+ */
+public class UncommittedTableData
+{
+    private static final Logger logger = LoggerFactory.getLogger(UncommittedTableData.class);
+    private static final Collection<Range<Token>> FULL_RANGE;
+
+    static
+    {
+        Token min = DatabaseDescriptor.getPartitioner().getMinimumToken();
+        FULL_RANGE = Collections.singleton(new Range<>(min, min));
+    }
+
+    private static final SchemaElement UNKNOWN_TABLE = TableMetadata.minimal("UNKNOWN", "UNKNOWN");
+    private static final ExecutorPlus executor = executorFactory().sequential("PaxosUncommittedMerge");
+
+    public interface FlushWriter
+    {
+        void append(PaxosKeyState commitState) throws IOException;
+
+        void finish();
+
+        Throwable abort(Throwable accumulate);
+
+        default void appendAll(Iterable<PaxosKeyState> states) throws IOException
+        {
+            for (PaxosKeyState state : states)
+                append(state);
+        }
+    }
+
+    private static class FilteringIterator extends AbstractIterator<PaxosKeyState> implements CloseableIterator<PaxosKeyState>
+    {
+        private final CloseableIterator<PaxosKeyState> wrapped;
+        private final PeekingIterator<PaxosKeyState> peeking;
+        private final PeekingIterator<Range<Token>> rangeIterator;
+        private final PaxosRepairHistory.Searcher historySearcher;
+
+        FilteringIterator(CloseableIterator<PaxosKeyState> wrapped, List<Range<Token>> ranges, PaxosRepairHistory history)
+        {
+            this.wrapped = wrapped;
+            this.peeking = Iterators.peekingIterator(wrapped);
+            this.rangeIterator = Iterators.peekingIterator(Range.normalize(ranges).iterator());
+            this.historySearcher = history.searcher();
+        }
+
+        protected PaxosKeyState computeNext()
+        {
+            while (true)
+            {
+                if (!peeking.hasNext() || !rangeIterator.hasNext())
+                    return endOfData();
+
+                Range<Token> range = rangeIterator.peek();
+
+                Token token = peeking.peek().key.getToken();
+                if (!range.contains(token))
+                {
+                    if (range.right.compareTo(token) < 0)
+                        rangeIterator.next();
+                    else
+                        peeking.next();
+                    continue;
+                }
+
+                PaxosKeyState next = peeking.next();
+
+                Ballot lowBound = historySearcher.ballotForToken(token);
+                if (Commit.isAfter(lowBound, next.ballot))
+                    continue;
+
+                return next;
+            }
+        }
+
+        public void close()
+        {
+            wrapped.close();
+        }
+    }
+
+    static abstract class FilterFactory
+    {
+        abstract List<Range<Token>> getReplicatedRanges();
+        abstract PaxosRepairHistory getPaxosRepairHistory();
+
+        CloseableIterator<PaxosKeyState> filter(CloseableIterator<PaxosKeyState> iterator)
+        {
+            return new FilteringIterator(iterator, getReplicatedRanges(), getPaxosRepairHistory());
+        }
+    }
+
+    private static class CFSFilterFactory extends FilterFactory
+    {
+        private final TableId tableId;
+
+        /**
+         * @param tableId must refer to a known CFS
+         */
+        CFSFilterFactory(TableId tableId)
+        {
+            this.tableId = tableId;
+        }
+
+        List<Range<Token>> getReplicatedRanges()
+        {
+            if (tableId == null)
+                return Range.normalize(FULL_RANGE);
+
+            ColumnFamilyStore table = Schema.instance.getColumnFamilyStoreInstance(tableId);
+            if (table == null)
+                return Range.normalize(FULL_RANGE);
+
+            String ksName = table.keyspace.getName();
+            List<Range<Token>> ranges = StorageService.instance.getLocalAndPendingRanges(ksName);
+
+            // don't filter anything if we're not aware of any locally replicated ranges
+            if (ranges.isEmpty())
+                return Range.normalize(FULL_RANGE);
+
+            return Range.normalize(ranges);
+        }
+
+        PaxosRepairHistory getPaxosRepairHistory()
+        {
+            ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(tableId);
+            if (cfs == null)
+                return PaxosRepairHistory.EMPTY;
+
+            return cfs.getPaxosRepairHistory();
+        }
+    }
+
+    static class Data
+    {
+        final ImmutableSet<UncommittedDataFile> files;
+
+        Data(ImmutableSet<UncommittedDataFile> files)
+        {
+            this.files = files;
+        }
+
+        Data withFile(UncommittedDataFile file)
+        {
+            return new Data(ImmutableSet.<UncommittedDataFile>builder().addAll(files).add(file).build());
+        }
+
+        void truncate()
+        {
+            for (UncommittedDataFile file : files)
+                file.markDeleted();
+        }
+    }
+
+    private static class Reducer extends MergeIterator.Reducer<PaxosKeyState, PaxosKeyState>
+    {
+        PaxosKeyState merged = null;
+
+        public void reduce(int idx, PaxosKeyState current)
+        {
+            merged = PaxosKeyState.merge(merged, current);
+        }
+
+        protected PaxosKeyState getReduced()
+        {
+            return merged;
+        }
+
+        protected void onKeyChange()
+        {
+            merged = null;
+        }
+    }
+
+    @SuppressWarnings("resource")
+    private static CloseableIterator<PaxosKeyState> merge(Collection<UncommittedDataFile> files, Collection<Range<Token>> ranges)
+    {
+        List<CloseableIterator<PaxosKeyState>> iterators = new ArrayList<>(files.size());
+        try
+        {
+            for (UncommittedDataFile file : files)
+            {
+                CloseableIterator<PaxosKeyState> iterator = file.iterator(ranges);
+                if (iterator == null) continue;
+                iterators.add(iterator);
+            }
+            return MergeIterator.get(iterators, PaxosKeyState.KEY_COMPARATOR, new Reducer());
+        }
+        catch (Throwable t)
+        {
+            Throwables.close(t, iterators);
+            throw t;
+        }
+    }
+
+    class Merge implements Runnable
+    {
+        final int generation;
+        boolean isScheduled = false;
+
+        Merge(int generation)
+        {
+            this.generation = generation;
+        }
+
+        public void run()
+        {
+            try
+            {
+                Preconditions.checkState(!dependsOnActiveFlushes());
+                Data current = data;
+                SchemaElement name = tableName(tableId);
+                UncommittedDataFile.Writer writer = writer(directory, name.elementKeyspace(), name.elementName(), tableId, generation);
+                Set<UncommittedDataFile> files = Sets.newHashSet(Iterables.filter(current.files, u -> u.generation() < generation));
+                logger.info("merging {} paxos uncommitted files into a new generation {} file for {}.{}", files.size(), generation, keyspace(), table());
+                try (CloseableIterator<PaxosKeyState> iterator = filterFactory.filter(merge(files, FULL_RANGE)))
+                {
+                    while (iterator.hasNext())
+                    {
+                        PaxosKeyState next = iterator.next();
+
+                        if (next.committed)
+                            continue;
+
+                        writer.append(next);
+                    }
+                    mergeComplete(this, writer.finish());
+                }
+            }
+            catch (IOException e)
+            {
+                throw new IOError(e);
+            }
+        }
+
+        void maybeSchedule()
+        {
+            if (isScheduled)
+                return;
+
+            if (dependsOnActiveFlushes())
+                return;
+
+            executor.submit(merge);
+            merge.isScheduled = true;
+        }
+
+        boolean dependsOnActiveFlushes()
+        {
+            return !activeFlushes.headSet(generation).isEmpty();
+        }
+    }
+
+    private final File directory;
+    private final TableId tableId;
+    private final FilterFactory filterFactory;
+
+    private volatile Data data;
+    private volatile Merge merge;
+    private volatile boolean rebuilding = false;
+
+    private int nextGeneration;
+    private final NavigableSet<Integer> activeFlushes = new ConcurrentSkipListSet<>();
+
+    private UncommittedTableData(File directory, TableId tableId, FilterFactory filterFactory, Data data)
+    {
+        this.directory = directory;
+        this.tableId = tableId;
+        this.filterFactory = filterFactory;
+        this.data = data;
+        this.nextGeneration = 1 + (int) data.files.stream().mapToLong(UncommittedDataFile::generation).max().orElse(-1);
+    }
+
+    static UncommittedTableData load(File directory, TableId tableId, FilterFactory flushFilterFactory)
+    {
+        Preconditions.checkArgument(directory.exists());
+        Preconditions.checkArgument(directory.isDirectory());
+        Preconditions.checkNotNull(tableId);
+
+        String[] fnames = directory.tryListNames();
+        Preconditions.checkArgument(fnames != null);
+
+        Pattern pattern = UncommittedDataFile.fileRegexFor(tableId);
+        Set<Long> generations = new HashSet<>();
+        List<UncommittedDataFile> files = new ArrayList<>();
+        for (String fname : fnames)
+        {
+            Matcher matcher = pattern.matcher(fname);
+            if (!matcher.matches())
+                continue;
+
+            File file = new File(directory, fname);
+            if (isTmpFile(fname))
+            {
+                logger.info("deleting left over uncommitted paxos temp file {} for tableId {}", file, tableId);
+                file.delete();
+                continue;
+            }
+
+            if (isCrcFile(fname))
+                continue;
+
+            File crcFile = new File(directory, UncommittedDataFile.crcName(fname));
+            if (!crcFile.exists())
+                throw new FSReadError(new IOException(String.format("%s does not have a corresponding crc file", file)), crcFile);
+            long generation = Long.parseLong(matcher.group(1));
+            files.add(UncommittedDataFile.create(tableId, file, crcFile, generation));
+            generations.add(generation);
+        }
+
+        // cleanup orphaned crc files
+        for (String fname : fnames)
+        {
+            if (!isCrcFile(fname))
+                continue;
+
+            Matcher matcher = pattern.matcher(fname);
+            if (!matcher.matches())
+                continue;
+
+            long generation = Long.parseLong(matcher.group(1));
+            if (!generations.contains(generation))
+            {
+                File file = new File(directory, fname);
+                logger.info("deleting left over uncommitted paxos crc file {} for tableId {}", file, tableId);
+                file.delete();
+            }
+        }
+
+        return new UncommittedTableData(directory, tableId, flushFilterFactory, new Data(ImmutableSet.copyOf(files)));
+    }
+
+    static UncommittedTableData load(File directory, TableId tableId)
+    {
+        return load(directory, tableId, new CFSFilterFactory(tableId));
+    }
+
+    static Set<TableId> listTableIds(File directory)
+    {
+        Preconditions.checkArgument(directory.isDirectory());
+        return UncommittedDataFile.listTableIds(directory);
+    }
+
+    private static SchemaElement tableName(TableId tableId)
+    {
+        TableMetadata name = Schema.instance.getTableMetadata(tableId);
+        return name != null ? name : UNKNOWN_TABLE;
+    }
+
+    int numFiles()
+    {
+        return data.files.size();
+    }
+
+    TableId tableId()
+    {
+        return tableId;
+    }
+
+    public String keyspace()
+    {
+        return tableName(tableId).elementKeyspace();
+    }
+
+    public String table()
+    {
+        return tableName(tableId).elementName();
+    }
+
+    /**
+     * Return an iterator of the file contents for the given token ranges. Token ranges
+     * must be normalized
+     */
+    synchronized CloseableIterator<PaxosKeyState> iterator(Collection<Range<Token>> ranges)
+    {
+        // we don't wait for pending flushes because flushing memtable data is added in PaxosUncommittedIndex
+        Preconditions.checkArgument(elementsEqual(Range.normalize(ranges), ranges));
+        return filterFactory.filter(merge(data.files, ranges));
+    }
+
+    private void flushTerminated(int generation)
+    {
+        activeFlushes.remove(generation);
+        if (merge != null)
+            merge.maybeSchedule();
+    }
+
+    private synchronized void flushSuccess(int generation, UncommittedDataFile newFile)
+    {
+        assert newFile == null || generation == newFile.generation();
+        if (newFile != null)
+            data = data.withFile(newFile);
+        flushTerminated(generation);
+    }
+
+    private synchronized void flushAborted(int generation)
+    {
+        flushTerminated(generation);
+    }
+
+    private synchronized void mergeComplete(Merge merge, UncommittedDataFile newFile)
+    {
+        Preconditions.checkArgument(this.merge == merge);
+        ImmutableSet.Builder<UncommittedDataFile> files = ImmutableSet.builder();
+        files.add(newFile);
+        for (UncommittedDataFile file : data.files)
+        {
+            if (file.generation() > merge.generation)
+                files.add(file);
+            else
+                file.markDeleted();
+        }
+
+        data = new Data(files.build());
+        this.merge = null;
+        logger.info("paxos uncommitted merge completed for {}.{}, new generation {} file added", keyspace(), table(), newFile.generation());
+    }
+
+    synchronized FlushWriter flushWriter() throws IOException
+    {
+        int generation = nextGeneration++;
+        UncommittedDataFile.Writer writer = writer(directory, keyspace(), table(), tableId, generation);
+        activeFlushes.add(generation);
+        logger.info("flushing generation {} uncommitted paxos file for {}.{}", generation, keyspace(), table());
+
+        return new FlushWriter()
+        {
+            public void append(PaxosKeyState commitState) throws IOException
+            {
+                writer.append(commitState);
+            }
+
+            public void finish()
+            {
+                flushSuccess(generation, writer.finish());
+            }
+
+            public Throwable abort(Throwable accumulate)
+            {
+                accumulate = writer.abort(accumulate);
+                flushAborted(generation);
+                return accumulate;
+            }
+        };
+    }
+
+    private synchronized void rebuildComplete(UncommittedDataFile file)
+    {
+        Preconditions.checkState(rebuilding);
+        Preconditions.checkState(!hasInProgressIO());
+        Preconditions.checkState(data.files.isEmpty());
+
+        data = new Data(ImmutableSet.of(file));
+        logger.info("paxos rebuild completed for {}.{}", keyspace(), table());
+        rebuilding = false;
+    }
+
+    synchronized FlushWriter rebuildWriter() throws IOException
+    {
+        Preconditions.checkState(!rebuilding);
+        Preconditions.checkState(nextGeneration == 0);
+        Preconditions.checkState(!hasInProgressIO());
+        rebuilding = true;
+        int generation = nextGeneration++;
+        UncommittedDataFile.Writer writer = writer(directory, keyspace(), table(), tableId, generation);
+
+        return new FlushWriter()
+        {
+            public void append(PaxosKeyState commitState) throws IOException
+            {
+                if (commitState.committed)
+                    return;
+
+                writer.append(commitState);
+            }
+
+            public void finish()
+            {
+                rebuildComplete(writer.finish());
+            }
+
+            public Throwable abort(Throwable accumulate)
+            {
+                accumulate = writer.abort(accumulate);
+                logger.info("paxos rebuild aborted for {}.{}", keyspace(), table());
+                rebuilding = false;
+                return accumulate;
+            }
+        };
+    }
+
+    synchronized void maybeScheduleMerge()
+    {
+        logger.info("Scheduling uncommitted paxos data merge task for {}.{}", keyspace(), table());
+        if (data.files.size() < 2 || merge != null)
+            return;
+
+        createMergeTask().maybeSchedule();
+    }
+
+    @VisibleForTesting
+    synchronized Merge createMergeTask()
+    {
+        Preconditions.checkState(merge == null);
+        merge = new Merge(nextGeneration++);
+        return merge;
+    }
+
+    synchronized boolean hasInProgressIO()
+    {
+        return merge != null || !activeFlushes.isEmpty();
+    }
+
+    void truncate()
+    {
+        logger.info("truncating uncommitting paxos date for {}.{}", keyspace(), table());
+        data.truncate();
+        data = new Data(ImmutableSet.of());
+    }
+
+    @VisibleForTesting
+    Data data()
+    {
+        return data;
+    }
+
+    @VisibleForTesting
+    long nextGeneration()
+    {
+        return nextGeneration;
+    }
+
+    @VisibleForTesting
+    Merge currentMerge()
+    {
+        return merge;
+    }
+
+    public static void shutdownAndWait(long timeout, TimeUnit units) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownAndWait(timeout, units, executor);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/v1/AbstractPaxosCallback.java b/src/java/org/apache/cassandra/service/paxos/v1/AbstractPaxosCallback.java
new file mode 100644
index 0000000..e6ef1da
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/v1/AbstractPaxosCallback.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.paxos.v1;
+
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.WriteType;
+import org.apache.cassandra.exceptions.WriteTimeoutException;
+import org.apache.cassandra.net.RequestCallback;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
+
+public abstract class AbstractPaxosCallback<T> implements RequestCallback<T>
+{
+    protected final CountDownLatch latch;
+    protected final int targets;
+    private final ConsistencyLevel consistency;
+    private final long queryStartNanoTime;
+
+    public AbstractPaxosCallback(int targets, ConsistencyLevel consistency, long queryStartNanoTime)
+    {
+        this.targets = targets;
+        this.consistency = consistency;
+        latch = newCountDownLatch(targets);
+        this.queryStartNanoTime = queryStartNanoTime;
+    }
+
+    public int getResponseCount()
+    {
+        return targets - latch.count();
+    }
+
+    public void await() throws WriteTimeoutException
+    {
+        try
+        {
+            long timeout = DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS) - (nanoTime() - queryStartNanoTime);
+            if (!latch.await(timeout, NANOSECONDS))
+                throw new WriteTimeoutException(WriteType.CAS, consistency, getResponseCount(), targets);
+        }
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/v1/PrepareCallback.java b/src/java/org/apache/cassandra/service/paxos/v1/PrepareCallback.java
new file mode 100644
index 0000000..a6afc3a
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/v1/PrepareCallback.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.v1;
+
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.google.common.collect.Iterables;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PrepareResponse;
+
+public class PrepareCallback extends AbstractPaxosCallback<PrepareResponse>
+{
+    private static final Logger logger = LoggerFactory.getLogger(PrepareCallback.class);
+
+    public boolean promised = true;
+    public Commit mostRecentCommit;
+    public Commit mostRecentInProgressCommit;
+
+    private final Map<InetAddressAndPort, Commit> commitsByReplica = new ConcurrentHashMap<>();
+
+    public PrepareCallback(DecoratedKey key, TableMetadata metadata, int targets, ConsistencyLevel consistency, long queryStartNanoTime)
+    {
+        super(targets, consistency, queryStartNanoTime);
+        // need to inject the right key in the empty commit so comparing with empty commits in the response works as expected
+        mostRecentCommit = Commit.emptyCommit(key, metadata);
+        mostRecentInProgressCommit = Commit.emptyCommit(key, metadata);
+    }
+
+    public synchronized void onResponse(Message<PrepareResponse> message)
+    {
+        PrepareResponse response = message.payload;
+        logger.trace("Prepare response {} from {}", response, message.from());
+
+        // We set the mostRecentInProgressCommit even if we're not promised as, in that case, the ballot of that commit
+        // will be used to avoid generating a ballot that has not chance to win on retry (think clock skew).
+        if (response.inProgressCommit.isAfter(mostRecentInProgressCommit))
+            mostRecentInProgressCommit = response.inProgressCommit;
+
+        if (!response.promised)
+        {
+            promised = false;
+            while (latch.count() > 0)
+                latch.decrement();
+            return;
+        }
+
+        commitsByReplica.put(message.from(), response.mostRecentCommit);
+        if (response.mostRecentCommit.isAfter(mostRecentCommit))
+            mostRecentCommit = response.mostRecentCommit;
+
+        latch.decrement();
+    }
+
+    public Iterable<InetAddressAndPort> replicasMissingMostRecentCommit()
+    {
+        return Iterables.filter(commitsByReplica.keySet(), inetAddress -> (!commitsByReplica.get(inetAddress).ballot.equals(mostRecentCommit.ballot)));
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/v1/PrepareVerbHandler.java b/src/java/org/apache/cassandra/service/paxos/v1/PrepareVerbHandler.java
new file mode 100644
index 0000000..2ebcfeb
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/v1/PrepareVerbHandler.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.v1;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.service.paxos.PrepareResponse;
+
+public class PrepareVerbHandler implements IVerbHandler<Commit>
+{
+    public static PrepareVerbHandler instance = new PrepareVerbHandler();
+
+    public static PrepareResponse doPrepare(Commit toPrepare)
+    {
+        return PaxosState.legacyPrepare(toPrepare);
+    }
+
+    public void doVerb(Message<Commit> message)
+    {
+        Message<PrepareResponse> reply = message.responseWith(doPrepare(message.payload));
+        MessagingService.instance().send(reply, message.from());
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/v1/ProposeCallback.java b/src/java/org/apache/cassandra/service/paxos/v1/ProposeCallback.java
new file mode 100644
index 0000000..1c975cb
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/v1/ProposeCallback.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.v1;
+
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.utils.Nemesis;
+
+/**
+ * ProposeCallback has two modes of operation, controlled by the failFast parameter.
+ *
+ * In failFast mode, we will return a failure as soon as a majority of nodes reject
+ * the proposal. This is used when replaying a proposal from an earlier leader.
+ *
+ * Otherwise, we wait for either all replicas to respond or until we achieve
+ * the desired quorum. We continue to wait for all replicas even after we know we cannot succeed
+ * because we need to know if no node at all have accepted or if at least one has.
+ * In the former case, a proposer is guaranteed no-one will
+ * replay its value; in the latter we don't, so we must timeout in case another
+ * leader replays it before we can; see CASSANDRA-6013
+ */
+public class ProposeCallback extends AbstractPaxosCallback<Boolean>
+{
+    private static final Logger logger = LoggerFactory.getLogger(ProposeCallback.class);
+
+    @Nemesis private final AtomicInteger accepts = new AtomicInteger(0);
+    private final int requiredAccepts;
+    private final boolean failFast;
+
+    public ProposeCallback(int totalTargets, int requiredTargets, boolean failFast, ConsistencyLevel consistency, long queryStartNanoTime)
+    {
+        super(totalTargets, consistency, queryStartNanoTime);
+        this.requiredAccepts = requiredTargets;
+        this.failFast = failFast;
+    }
+
+    public void onResponse(Message<Boolean> msg)
+    {
+        logger.trace("Propose response {} from {}", msg.payload, msg.from());
+
+        if (msg.payload)
+            accepts.incrementAndGet();
+
+        latch.decrement();
+
+        if (isSuccessful() || (failFast && (latch.count() + accepts.get() < requiredAccepts)))
+        {
+            while (latch.count() > 0)
+                latch.decrement();
+        }
+    }
+
+    public int getAcceptCount()
+    {
+        return accepts.get();
+    }
+
+    public boolean isSuccessful()
+    {
+        return accepts.get() >= requiredAccepts;
+    }
+
+    // Note: this is only reliable if !failFast
+    public boolean isFullyRefused()
+    {
+        // We need to check the latch first to avoid racing with a late arrival
+        // between the latch check and the accepts one
+        return latch.count() == 0 && accepts.get() == 0;
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/paxos/v1/ProposeVerbHandler.java b/src/java/org/apache/cassandra/service/paxos/v1/ProposeVerbHandler.java
new file mode 100644
index 0000000..54c8c67
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/paxos/v1/ProposeVerbHandler.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.v1;
+import org.apache.cassandra.net.IVerbHandler;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosState;
+
+public class ProposeVerbHandler implements IVerbHandler<Commit>
+{
+    public static final ProposeVerbHandler instance = new ProposeVerbHandler();
+
+    public static Boolean doPropose(Commit proposal)
+    {
+        return PaxosState.legacyPropose(proposal);
+    }
+
+    public void doVerb(Message<Commit> message)
+    {
+        Boolean response = doPropose(message.payload);
+        Message<Boolean> reply = message.responseWith(response);
+        MessagingService.instance().send(reply, message.from());
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/reads/AbstractReadExecutor.java b/src/java/org/apache/cassandra/service/reads/AbstractReadExecutor.java
index fd1b372..1613283 100644
--- a/src/java/org/apache/cassandra/service/reads/AbstractReadExecutor.java
+++ b/src/java/org/apache/cassandra/service/reads/AbstractReadExecutor.java
@@ -45,9 +45,10 @@
 import org.apache.cassandra.service.reads.repair.ReadRepair;
 import org.apache.cassandra.tracing.TraceState;
 import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.FBUtilities;
 
 import static com.google.common.collect.Iterables.all;
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
 
 /**
  * Sends a read request to the replicas needed to satisfy a given ConsistencyLevel.
@@ -195,7 +196,7 @@
 
         // There are simply no extra replicas to speculate.
         // Handle this separately so it can record failed attempts to speculate due to lack of replicas
-        if (replicaPlan.contacts().size() == replicaPlan.candidates().size())
+        if (replicaPlan.contacts().size() == replicaPlan.readCandidates().size())
         {
             boolean recordFailedSpeculation = consistencyLevel != ConsistencyLevel.ALL;
             return new NeverSpeculatingReadExecutor(cfs, command, replicaPlan, queryStartNanoTime, recordFailedSpeculation);
@@ -207,6 +208,11 @@
             return new SpeculatingReadExecutor(cfs, command, replicaPlan, queryStartNanoTime);
     }
 
+    public boolean hasLocalRead()
+    {
+        return replicaPlan().lookup(FBUtilities.getBroadcastAddressAndPort()) != null;
+    }
+
     /**
      *  Returns true if speculation should occur and if it should then block until it is time to
      *  send the speculative reads
@@ -214,10 +220,16 @@
     boolean shouldSpeculateAndMaybeWait()
     {
         // no latency information, or we're overloaded
-        if (cfs.sampleReadLatencyNanos > command.getTimeout(NANOSECONDS))
+        if (cfs.sampleReadLatencyMicros > command.getTimeout(MICROSECONDS))
+        {
+            if (logger.isTraceEnabled())
+                logger.trace("Decided not to speculate as {} > {}", cfs.sampleReadLatencyMicros, command.getTimeout(MICROSECONDS));
             return false;
+        }
 
-        return !handler.await(cfs.sampleReadLatencyNanos, NANOSECONDS);
+        if (logger.isTraceEnabled())
+            logger.trace("Awaiting {} microseconds before speculating", cfs.sampleReadLatencyMicros);
+        return !handler.await(cfs.sampleReadLatencyMicros, MICROSECONDS);
     }
 
     ReplicaPlan.ForTokenRead replicaPlan()
@@ -263,7 +275,7 @@
             // We're hitting additional targets for read repair (??).  Since our "extra" replica is the least-
             // preferred by the snitch, we do an extra data read to start with against a replica more
             // likely to respond; better to let RR fail than the entire query.
-            super(cfs, command, replicaPlan, replicaPlan.blockFor() < replicaPlan.contacts().size() ? 2 : 1, queryStartNanoTime);
+            super(cfs, command, replicaPlan, replicaPlan.readQuorum() < replicaPlan.contacts().size() ? 2 : 1, queryStartNanoTime);
         }
 
         public void maybeTryAdditionalReplicas()
@@ -357,13 +369,18 @@
     public void setResult(PartitionIterator result)
     {
         Preconditions.checkState(this.result == null, "Result can only be set once");
-        this.result = DuplicateRowChecker.duringRead(result, this.replicaPlan.get().candidates().endpointList());
+        this.result = DuplicateRowChecker.duringRead(result, this.replicaPlan.get().readCandidates().endpointList());
+    }
+
+    public void awaitResponses() throws ReadTimeoutException
+    {
+        awaitResponses(false);
     }
 
     /**
      * Wait for the CL to be satisfied by responses
      */
-    public void awaitResponses() throws ReadTimeoutException
+    public void awaitResponses(boolean logBlockingReadRepairAttempt) throws ReadTimeoutException
     {
         try
         {
@@ -391,6 +408,13 @@
         {
             Tracing.trace("Digest mismatch: Mismatch for key {}", getKey());
             readRepair.startRepair(digestResolver, this::setResult);
+            if (logBlockingReadRepairAttempt)
+            {
+                logger.info("Blocking Read Repair triggered for query [{}] at CL.{} with endpoints {}",
+                            command.toCQLString(),
+                            replicaPlan().consistencyLevel(),
+                            replicaPlan().contacts());
+            }
         }
     }
 
diff --git a/src/java/org/apache/cassandra/service/reads/AlwaysSpeculativeRetryPolicy.java b/src/java/org/apache/cassandra/service/reads/AlwaysSpeculativeRetryPolicy.java
index a6092fb..506378e 100644
--- a/src/java/org/apache/cassandra/service/reads/AlwaysSpeculativeRetryPolicy.java
+++ b/src/java/org/apache/cassandra/service/reads/AlwaysSpeculativeRetryPolicy.java
@@ -19,7 +19,7 @@
 
 import com.google.common.base.Objects;
 
-import com.codahale.metrics.Snapshot;
+import org.apache.cassandra.metrics.SnapshottingTimer;
 
 public class AlwaysSpeculativeRetryPolicy implements SpeculativeRetryPolicy
 {
@@ -30,7 +30,7 @@
     }
 
     @Override
-    public long calculateThreshold(Snapshot latency, long existingValue)
+    public long calculateThreshold(SnapshottingTimer latency, long existingValue)
     {
         return 0;
     }
diff --git a/src/java/org/apache/cassandra/service/reads/DataResolver.java b/src/java/org/apache/cassandra/service/reads/DataResolver.java
index 6abb2ad..e7e4952 100644
--- a/src/java/org/apache/cassandra/service/reads/DataResolver.java
+++ b/src/java/org/apache/cassandra/service/reads/DataResolver.java
@@ -21,8 +21,11 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.function.Supplier;
 import java.util.function.UnaryOperator;
 
+import javax.annotation.Nullable;
+
 import com.google.common.base.Joiner;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -50,24 +53,25 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.schema.IndexMetadata;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.reads.repair.NoopReadRepair;
 import org.apache.cassandra.service.reads.repair.ReadRepair;
 import org.apache.cassandra.service.reads.repair.RepairedDataTracker;
 import org.apache.cassandra.service.reads.repair.RepairedDataVerifier;
 
 import static com.google.common.collect.Iterables.*;
 
-public class DataResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> extends ResponseResolver<E, P>
+public class DataResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>> extends ResponseResolver<E, P>
 {
     private final boolean enforceStrictLiveness;
     private final ReadRepair<E, P> readRepair;
     private final boolean trackRepairedStatus;
 
-    public DataResolver(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, ReadRepair<E, P> readRepair, long queryStartNanoTime)
+    public DataResolver(ReadCommand command, Supplier<? extends P> replicaPlan, ReadRepair<E, P> readRepair, long queryStartNanoTime)
     {
         this(command, replicaPlan, readRepair, queryStartNanoTime, false);
     }
 
-    public DataResolver(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, ReadRepair<E, P> readRepair, long queryStartNanoTime, boolean trackRepairedStatus)
+    public DataResolver(ReadCommand command, Supplier<? extends P> replicaPlan, ReadRepair<E, P> readRepair, long queryStartNanoTime, boolean trackRepairedStatus)
     {
         super(command, replicaPlan, queryStartNanoTime);
         this.enforceStrictLiveness = command.metadata().enforceStrictLiveness();
@@ -88,12 +92,17 @@
 
     public PartitionIterator resolve()
     {
+        return resolve(null);
+    }
+
+    public PartitionIterator resolve(@Nullable Runnable runOnShortRead)
+    {
         // We could get more responses while this method runs, which is ok (we're happy to ignore any response not here
         // at the beginning of this method), so grab the response count once and use that through the method.
         Collection<Message<ReadResponse>> messages = responses.snapshot();
         assert !any(messages, msg -> msg.payload.isDigestResponse());
 
-        E replicas = replicaPlan().candidates().select(transform(messages, Message::from), false);
+        E replicas = replicaPlan().readCandidates().select(transform(messages, Message::from), false);
 
         // If requested, inspect each response for a digest of the replica's repaired data set
         RepairedDataTracker repairedDataTracker = trackRepairedStatus
@@ -115,7 +124,7 @@
         {
             ResolveContext context = new ResolveContext(replicas);
             return resolveWithReadRepair(context,
-                                         i -> shortReadProtectedResponse(i, context),
+                                         i -> shortReadProtectedResponse(i, context, runOnShortRead),
                                          UnaryOperator.identity(),
                                          repairedDataTracker);
         }
@@ -181,13 +190,13 @@
         UnfilteredPartitionIterator getResponse(int i);
     }
 
-    private UnfilteredPartitionIterator shortReadProtectedResponse(int i, ResolveContext context)
+    private UnfilteredPartitionIterator shortReadProtectedResponse(int i, ResolveContext context, @Nullable Runnable onShortRead)
     {
         UnfilteredPartitionIterator originalResponse = responses.get(i).payload.makeIterator(command);
 
         return context.needShortReadProtection()
                ? ShortReadProtection.extend(context.replicas.get(i),
-                                            () -> responses.clearUnsafe(i),
+                                            () -> { responses.clearUnsafe(i); if (onShortRead != null) onShortRead.run(); },
                                             originalResponse,
                                             command,
                                             context.mergedResultCounter,
@@ -202,9 +211,9 @@
                                                     RepairedDataTracker repairedDataTracker)
     {
         UnfilteredPartitionIterators.MergeListener listener = null;
-        if (context.needsReadRepair())
+        if (context.needsReadRepair() && readRepair != NoopReadRepair.instance)
         {
-            P sources = replicaPlan.getWithContacts(context.replicas);
+            P sources = replicaPlan.get().withContacts(context.replicas);
             listener = wrapMergeListener(readRepair.getMergeListener(sources), sources, repairedDataTracker);
         }
 
@@ -244,7 +253,7 @@
 
         PartitionIterator firstPhasePartitions = resolveInternal(firstPhaseContext,
                                                                  rfp.mergeController(),
-                                                                 i -> shortReadProtectedResponse(i, firstPhaseContext),
+                                                                 i -> shortReadProtectedResponse(i, firstPhaseContext, null),
                                                                  UnaryOperator.identity());
 
         PartitionIterator completedPartitions = resolveWithReadRepair(secondPhaseContext,
@@ -285,6 +294,7 @@
         Filter filter = new Filter(command.nowInSec(), command.metadata().enforceStrictLiveness());
         FilteredPartitions filtered = FilteredPartitions.filter(merged, filter);
         PartitionIterator counted = Transformation.apply(preCountFilter.apply(filtered), context.mergedResultCounter);
+
         return Transformation.apply(counted, new EmptyPartitionsDiscarder());
     }
 
diff --git a/src/java/org/apache/cassandra/service/reads/DigestResolver.java b/src/java/org/apache/cassandra/service/reads/DigestResolver.java
index 475c8c2..f799374 100644
--- a/src/java/org/apache/cassandra/service/reads/DigestResolver.java
+++ b/src/java/org/apache/cassandra/service/reads/DigestResolver.java
@@ -38,8 +38,9 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static com.google.common.collect.Iterables.any;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
-public class DigestResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> extends ResponseResolver<E, P>
+public class DigestResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>> extends ResponseResolver<E, P>
 {
     private volatile Message<ReadResponse> dataResponse;
 
@@ -102,7 +103,7 @@
 
     public boolean responsesMatch()
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         // validate digests against each other; return false immediately on mismatch.
         ByteBuffer digest = null;
@@ -126,7 +127,7 @@
         }
 
         if (logger.isTraceEnabled())
-            logger.trace("responsesMatch: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+            logger.trace("responsesMatch: {} ms.", TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
 
         return true;
     }
diff --git a/src/java/org/apache/cassandra/service/reads/FixedSpeculativeRetryPolicy.java b/src/java/org/apache/cassandra/service/reads/FixedSpeculativeRetryPolicy.java
index 9bbeb12..7338b2c 100644
--- a/src/java/org/apache/cassandra/service/reads/FixedSpeculativeRetryPolicy.java
+++ b/src/java/org/apache/cassandra/service/reads/FixedSpeculativeRetryPolicy.java
@@ -23,8 +23,8 @@
 
 import com.google.common.base.Objects;
 
-import com.codahale.metrics.Snapshot;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.metrics.SnapshottingTimer;
 import org.apache.cassandra.schema.TableParams;
 
 public class FixedSpeculativeRetryPolicy implements SpeculativeRetryPolicy
@@ -39,9 +39,9 @@
     }
 
     @Override
-    public long calculateThreshold(Snapshot latency, long existingValue)
+    public long calculateThreshold(SnapshottingTimer latency, long existingValue)
     {
-        return TimeUnit.MILLISECONDS.toNanos(speculateAtMilliseconds);
+        return TimeUnit.MILLISECONDS.toMicros(speculateAtMilliseconds);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/service/reads/HybridSpeculativeRetryPolicy.java b/src/java/org/apache/cassandra/service/reads/HybridSpeculativeRetryPolicy.java
index 8228c45..fedec23 100644
--- a/src/java/org/apache/cassandra/service/reads/HybridSpeculativeRetryPolicy.java
+++ b/src/java/org/apache/cassandra/service/reads/HybridSpeculativeRetryPolicy.java
@@ -23,8 +23,8 @@
 import com.google.common.base.Objects;
 
 import com.codahale.metrics.Snapshot;
-import com.codahale.metrics.Timer;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.metrics.SnapshottingTimer;
 import org.apache.cassandra.schema.TableParams;
 
 public class HybridSpeculativeRetryPolicy implements SpeculativeRetryPolicy
@@ -57,11 +57,15 @@
     }
 
     @Override
-    public long calculateThreshold(Snapshot latency, long existingValue)
+    public long calculateThreshold(SnapshottingTimer latency, long existingValue)
     {
-        if (latency.size() <= 0)
+        Snapshot snapshot = latency.getPercentileSnapshot();
+        
+        if (snapshot.size() <= 0)
             return existingValue;
-        return function.call(percentilePolicy.calculateThreshold(latency, existingValue), fixedPolicy.calculateThreshold(latency, existingValue));
+        
+        return function.call(percentilePolicy.calculateThreshold(snapshot, existingValue), 
+                             fixedPolicy.calculateThreshold(null, existingValue));
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/service/reads/NeverSpeculativeRetryPolicy.java b/src/java/org/apache/cassandra/service/reads/NeverSpeculativeRetryPolicy.java
index 1211142..5ed7585 100644
--- a/src/java/org/apache/cassandra/service/reads/NeverSpeculativeRetryPolicy.java
+++ b/src/java/org/apache/cassandra/service/reads/NeverSpeculativeRetryPolicy.java
@@ -19,7 +19,7 @@
 
 import com.google.common.base.Objects;
 
-import com.codahale.metrics.Snapshot;
+import org.apache.cassandra.metrics.SnapshottingTimer;
 
 public class NeverSpeculativeRetryPolicy implements SpeculativeRetryPolicy
 {
@@ -30,7 +30,7 @@
     }
 
     @Override
-    public long calculateThreshold(Snapshot latency, long existingValue)
+    public long calculateThreshold(SnapshottingTimer latency, long existingValue)
     {
         return Long.MAX_VALUE;
     }
diff --git a/src/java/org/apache/cassandra/service/reads/PercentileSpeculativeRetryPolicy.java b/src/java/org/apache/cassandra/service/reads/PercentileSpeculativeRetryPolicy.java
index ffd473e..a084a0f 100644
--- a/src/java/org/apache/cassandra/service/reads/PercentileSpeculativeRetryPolicy.java
+++ b/src/java/org/apache/cassandra/service/reads/PercentileSpeculativeRetryPolicy.java
@@ -27,6 +27,7 @@
 
 import com.codahale.metrics.Snapshot;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.metrics.SnapshottingTimer;
 import org.apache.cassandra.schema.TableParams;
 
 public class PercentileSpeculativeRetryPolicy implements SpeculativeRetryPolicy
@@ -47,11 +48,17 @@
     }
 
     @Override
-    public long calculateThreshold(Snapshot latency, long existingValue)
+    public long calculateThreshold(SnapshottingTimer latency, long existingValue)
     {
-        if (latency.size() <= 0)
+        return calculateThreshold(latency.getPercentileSnapshot(), existingValue);
+    }
+
+    public long calculateThreshold(Snapshot snapshot, long existingValue)
+    {
+        if (snapshot.size() <= 0)
             return existingValue;
-        return (long) latency.getValue(percentile / 100);
+        // latency snapshot uses a default timer so is in microseconds, so just return percentile
+        return (long) snapshot.getValue(percentile / 100);
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/service/reads/ReadCallback.java b/src/java/org/apache/cassandra/service/reads/ReadCallback.java
index 91d9370..e69e6bd 100644
--- a/src/java/org/apache/cassandra/service/reads/ReadCallback.java
+++ b/src/java/org/apache/cassandra/service/reads/ReadCallback.java
@@ -21,9 +21,12 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.MessageParams;
 import org.apache.cassandra.locator.ReplicaPlan;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -35,20 +38,29 @@
 import org.apache.cassandra.exceptions.RequestFailureReason;
 import org.apache.cassandra.locator.Endpoints;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.ParamType;
+import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.service.reads.thresholds.CoordinatorWarnings;
+import org.apache.cassandra.service.reads.thresholds.WarningContext;
+import org.apache.cassandra.service.reads.thresholds.WarningsSnapshot;
 import org.apache.cassandra.tracing.Tracing;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater;
+import static org.apache.cassandra.tracing.Tracing.isTracing;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
-public class ReadCallback<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> implements RequestCallback<ReadResponse>
+public class ReadCallback<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>> implements RequestCallback<ReadResponse>
 {
-    protected static final Logger logger = LoggerFactory.getLogger( ReadCallback.class );
+    protected static final Logger logger = LoggerFactory.getLogger(ReadCallback.class);
 
     public final ResponseResolver<E, P> resolver;
-    final SimpleCondition condition = new SimpleCondition();
+    final Condition condition = newOneTimeCondition();
     private final long queryStartNanoTime;
     final int blockFor; // TODO: move to replica plan as well?
     // this uses a plain reference, but is initialised before handoff to any other threads; the later updates
@@ -56,9 +68,12 @@
     final ReplicaPlan.Shared<E, P> replicaPlan;
     private final ReadCommand command;
     private static final AtomicIntegerFieldUpdater<ReadCallback> failuresUpdater
-            = AtomicIntegerFieldUpdater.newUpdater(ReadCallback.class, "failures");
+            = newUpdater(ReadCallback.class, "failures");
     private volatile int failures = 0;
     private final Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint;
+    private volatile WarningContext warningContext;
+    private static final AtomicReferenceFieldUpdater<ReadCallback, WarningContext> warningsUpdater
+        = AtomicReferenceFieldUpdater.newUpdater(ReadCallback.class, WarningContext.class, "warningContext");
 
     public ReadCallback(ResponseResolver<E, P> resolver, ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
     {
@@ -66,7 +81,7 @@
         this.resolver = resolver;
         this.queryStartNanoTime = queryStartNanoTime;
         this.replicaPlan = replicaPlan;
-        this.blockFor = replicaPlan.get().blockFor();
+        this.blockFor = replicaPlan.get().readQuorum();
         this.failureReasonByEndpoint = new ConcurrentHashMap<>();
         // we don't support read repair (or rapid read protection) for range scans yet (CASSANDRA-6897)
         assert !(command instanceof PartitionRangeReadCommand) || blockFor >= replicaPlan().contacts().size();
@@ -82,14 +97,14 @@
 
     public boolean await(long timePastStart, TimeUnit unit)
     {
-        long time = unit.toNanos(timePastStart) - (System.nanoTime() - queryStartNanoTime);
+        long time = unit.toNanos(timePastStart) - (nanoTime() - queryStartNanoTime);
         try
         {
-            return condition.await(time, TimeUnit.NANOSECONDS);
+            return condition.await(time, NANOSECONDS);
         }
-        catch (InterruptedException ex)
+        catch (InterruptedException e)
         {
-            throw new AssertionError(ex);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
@@ -105,10 +120,22 @@
          */
         int received = resolver.responses.size();
         boolean failed = failures > 0 && (blockFor > received || !resolver.isDataPresent());
+        WarningContext warnings = warningContext;
+        // save the snapshot so abort state is not changed between now and when mayAbort gets called
+        WarningsSnapshot snapshot = null;
+        if (warnings != null)
+        {
+            snapshot = warnings.snapshot();
+            // this is possible due to a race condition between waiting and responding
+            // network thread creates the WarningContext to update metrics, but we are actively reading and see it is empty
+            // this is likely to happen when a timeout happens or from a speculative response
+            if (!snapshot.isEmpty())
+                CoordinatorWarnings.update(command, snapshot);
+        }
         if (signaled && !failed)
             return;
 
-        if (Tracing.isTracing())
+        if (isTracing())
         {
             String gotData = received > 0 ? (resolver.isDataPresent() ? " (including data)" : " (only digests)") : "";
             Tracing.trace("{}; received {} of {} responses{}", failed ? "Failed" : "Timed out", received, blockFor, gotData);
@@ -119,6 +146,9 @@
             logger.debug("{}; received {} of {} responses{}", failed ? "Failed" : "Timed out", received, blockFor, gotData);
         }
 
+        if (snapshot != null)
+            snapshot.maybeAbort(command, replicaPlan().consistencyLevel(), received, blockFor, resolver.isDataPresent(), failureReasonByEndpoint);
+
         // Same as for writes, see AbstractWriteResponseHandler
         throw failed
             ? new ReadFailureException(replicaPlan().consistencyLevel(), received, blockFor, resolver.isDataPresent(), failureReasonByEndpoint)
@@ -134,6 +164,17 @@
     public void onResponse(Message<ReadResponse> message)
     {
         assertWaitingFor(message.from());
+        Map<ParamType, Object> params = message.header.params();
+        InetAddressAndPort from = message.from();
+        if (WarningContext.isSupported(params.keySet()))
+        {
+            RequestFailureReason reason = getWarningContext().updateCounters(params, from);
+            if (reason != null)
+            {
+                onFailure(message.from(), reason);
+                return;
+            }
+        }
         resolver.preprocess(message);
 
         /*
@@ -146,14 +187,28 @@
             condition.signalAll();
     }
 
+    private WarningContext getWarningContext()
+    {
+        WarningContext current;
+        do {
+
+            current = warningContext;
+            if (current != null)
+                return current;
+
+            current = new WarningContext();
+        } while (!warningsUpdater.compareAndSet(this, null, current));
+        return current;
+    }
+
     public void response(ReadResponse result)
     {
         Verb kind = command.isRangeRequest() ? Verb.RANGE_RSP : Verb.READ_RSP;
         Message<ReadResponse> message = Message.internalResponse(kind, result);
+        message = MessageParams.addToMessage(message);
         onResponse(message);
     }
 
-
     @Override
     public boolean trackLatencyForSnitch()
     {
diff --git a/src/java/org/apache/cassandra/service/reads/ResponseResolver.java b/src/java/org/apache/cassandra/service/reads/ResponseResolver.java
index 6ae19ac..02e565d 100644
--- a/src/java/org/apache/cassandra/service/reads/ResponseResolver.java
+++ b/src/java/org/apache/cassandra/service/reads/ResponseResolver.java
@@ -17,6 +17,8 @@
  */
 package org.apache.cassandra.service.reads;
 
+import java.util.function.Supplier;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -27,22 +29,23 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.utils.concurrent.Accumulator;
 
-public abstract class ResponseResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+public abstract class ResponseResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
 {
     protected static final Logger logger = LoggerFactory.getLogger(ResponseResolver.class);
 
     protected final ReadCommand command;
-    protected final ReplicaPlan.Shared<E, P> replicaPlan;
+    // TODO: this doesn't need to be a full ReplicaPlan; just a replica collection
+    protected final Supplier<? extends P> replicaPlan;
 
     // Accumulator gives us non-blocking thread-safety with optimal algorithmic constraints
     protected final Accumulator<Message<ReadResponse>> responses;
     protected final long queryStartNanoTime;
 
-    public ResponseResolver(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
+    public ResponseResolver(ReadCommand command, Supplier<? extends P> replicaPlan, long queryStartNanoTime)
     {
         this.command = command;
         this.replicaPlan = replicaPlan;
-        this.responses = new Accumulator<>(replicaPlan.get().candidates().size());
+        this.responses = new Accumulator<>(replicaPlan.get().readCandidates().size());
         this.queryStartNanoTime = queryStartNanoTime;
     }
 
diff --git a/src/java/org/apache/cassandra/service/reads/ShortReadPartitionsProtection.java b/src/java/org/apache/cassandra/service/reads/ShortReadPartitionsProtection.java
index 51043c3..f49c9fb 100644
--- a/src/java/org/apache/cassandra/service/reads/ShortReadPartitionsProtection.java
+++ b/src/java/org/apache/cassandra/service/reads/ShortReadPartitionsProtection.java
@@ -174,7 +174,7 @@
         return executeReadCommand(cmd.withUpdatedLimitsAndDataRange(newLimits, newDataRange), ReplicaPlan.shared(replicaPlan));
     }
 
-    private <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    private <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
     UnfilteredPartitionIterator executeReadCommand(ReadCommand cmd, ReplicaPlan.Shared<E, P> replicaPlan)
     {
         DataResolver<E, P> resolver = new DataResolver<>(cmd, replicaPlan, (NoopReadRepair<E, P>)NoopReadRepair.instance, queryStartNanoTime);
diff --git a/src/java/org/apache/cassandra/service/reads/ShortReadProtection.java b/src/java/org/apache/cassandra/service/reads/ShortReadProtection.java
index a1bdc0e..8bf877f 100644
--- a/src/java/org/apache/cassandra/service/reads/ShortReadProtection.java
+++ b/src/java/org/apache/cassandra/service/reads/ShortReadProtection.java
@@ -18,15 +18,12 @@
 
 package org.apache.cassandra.service.reads;
 
-import java.net.InetAddress;
-
 
 import org.apache.cassandra.db.ReadCommand;
 import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 import org.apache.cassandra.db.transform.MorePartitions;
 import org.apache.cassandra.db.transform.Transformation;
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 
 /**
diff --git a/src/java/org/apache/cassandra/service/reads/SpeculativeRetryPolicy.java b/src/java/org/apache/cassandra/service/reads/SpeculativeRetryPolicy.java
index e09ff51..1164076 100644
--- a/src/java/org/apache/cassandra/service/reads/SpeculativeRetryPolicy.java
+++ b/src/java/org/apache/cassandra/service/reads/SpeculativeRetryPolicy.java
@@ -17,8 +17,8 @@
  */
 package org.apache.cassandra.service.reads;
 
-import com.codahale.metrics.Snapshot;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.metrics.SnapshottingTimer;
 import org.apache.cassandra.schema.TableParams;
 
 public interface SpeculativeRetryPolicy
@@ -28,7 +28,14 @@
         NEVER, FIXED, PERCENTILE, HYBRID, ALWAYS
     }
 
-    long calculateThreshold(Snapshot latency, long existingValue);
+    /**
+     * Calculate the delay in microseconds after which speculation takes place
+     *
+     * @param latency       snapshot of coordinator latencies (in microseconds)
+     * @param existingValue existing speculation threshold (in microseconds)
+     * @return speculation delay (in microseconds).
+     */
+    long calculateThreshold(SnapshottingTimer latency, long existingValue);
 
     Kind kind();
 
diff --git a/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java b/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java
index 38014e2..b353b4b 100644
--- a/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java
+++ b/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.service.reads.range;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
@@ -34,6 +35,7 @@
 import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.partitions.PartitionIterator;
 import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.exceptions.ReadAbortException;
 import org.apache.cassandra.exceptions.ReadFailureException;
 import org.apache.cassandra.exceptions.ReadTimeoutException;
 import org.apache.cassandra.exceptions.UnavailableException;
@@ -51,7 +53,10 @@
 import org.apache.cassandra.utils.AbstractIterator;
 import org.apache.cassandra.utils.CloseableIterator;
 
-class RangeCommandIterator extends AbstractIterator<RowIterator> implements PartitionIterator
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+@VisibleForTesting
+public class RangeCommandIterator extends AbstractIterator<RowIterator> implements PartitionIterator
 {
     private static final Logger logger = LoggerFactory.getLogger(RangeCommandIterator.class);
 
@@ -89,7 +94,7 @@
         this.totalRangeCount = totalRangeCount;
         this.queryStartNanoTime = queryStartNanoTime;
 
-        startTime = System.nanoTime();
+        startTime = nanoTime();
         enforceStrictLiveness = command.metadata().enforceStrictLiveness();
     }
 
@@ -122,11 +127,18 @@
         catch (UnavailableException e)
         {
             rangeMetrics.unavailables.mark();
+            StorageProxy.logRequestException(e, Collections.singleton(command));
             throw e;
         }
         catch (ReadTimeoutException e)
         {
             rangeMetrics.timeouts.mark();
+            StorageProxy.logRequestException(e, Collections.singleton(command));
+            throw e;
+        }
+        catch (ReadAbortException e)
+        {
+            rangeMetrics.markAbort(e);
             throw e;
         }
         catch (ReadFailureException e)
@@ -256,7 +268,7 @@
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
             rangeMetrics.addNano(latency);
             Keyspace.openAndGetStore(command.metadata()).metric.coordinatorScanLatency.update(latency, TimeUnit.NANOSECONDS);
         }
diff --git a/src/java/org/apache/cassandra/service/reads/range/RangeCommands.java b/src/java/org/apache/cassandra/service/reads/range/RangeCommands.java
index 3452a35..5b656d7 100644
--- a/src/java/org/apache/cassandra/service/reads/range/RangeCommands.java
+++ b/src/java/org/apache/cassandra/service/reads/range/RangeCommands.java
@@ -24,10 +24,14 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DataRange;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.PartitionRangeReadCommand;
 import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.exceptions.UnavailableException;
 import org.apache.cassandra.index.Index;
+import org.apache.cassandra.locator.ReplicaPlans;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.FBUtilities;
 import org.assertj.core.util.VisibleForTesting;
@@ -114,4 +118,26 @@
         return (maxExpectedResults / DatabaseDescriptor.getNumTokens())
                / keyspace.getReplicationStrategy().getReplicationFactor().allReplicas;
     }
+
+    /**
+     * Added specifically to check for sufficient nodes live to serve partition denylist queries
+     */
+    public static boolean sufficientLiveNodesForSelectStar(TableMetadata metadata, ConsistencyLevel consistency)
+    {
+        try
+        {
+            Keyspace keyspace = Keyspace.open(metadata.keyspace);
+            ReplicaPlanIterator rangeIterator = new ReplicaPlanIterator(DataRange.allData(metadata.partitioner).keyRange(),
+                                                                        keyspace, consistency);
+
+            // Called for the side effect of running assureSufficientLiveReplicasForRead.
+            // Deliberately called with an invalid vnode count in case it is used elsewhere in the future..
+            rangeIterator.forEachRemaining(r ->  ReplicaPlans.forRangeRead(keyspace, consistency, r.range(), -1));
+            return true;
+        }
+        catch (UnavailableException e)
+        {
+            return false;
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/service/reads/repair/AbstractReadRepair.java b/src/java/org/apache/cassandra/service/reads/repair/AbstractReadRepair.java
index 086766e..28f94fd 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/AbstractReadRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/AbstractReadRepair.java
@@ -23,6 +23,9 @@
 import com.codahale.metrics.Meter;
 import com.google.common.base.Preconditions;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -44,10 +47,13 @@
 import org.apache.cassandra.service.reads.ReadCallback;
 import org.apache.cassandra.tracing.Tracing;
 
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
 
-public abstract class AbstractReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> implements ReadRepair<E, P>
+public abstract class AbstractReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
+        implements ReadRepair<E, P>
 {
+    protected static final Logger logger = LoggerFactory.getLogger(AbstractReadRepair.class);
+
     protected final ReadCommand command;
     protected final long queryStartNanoTime;
     protected final ReplicaPlan.Shared<E, P> replicaPlan;
@@ -55,7 +61,7 @@
 
     private volatile DigestRepair<E, P> digestRepair = null;
 
-    private static class DigestRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    private static class DigestRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
     {
         private final DataResolver<E, P> dataResolver;
         private final ReadCallback<E, P> readCallback;
@@ -153,7 +159,17 @@
         if (repair == null)
             return;
 
-        repair.readCallback.awaitResults();
+        try
+        {
+            repair.readCallback.awaitResults();
+        }
+        catch (ReadTimeoutException e)
+        {
+            ReadRepairMetrics.timedOut.mark();
+            if (logger.isDebugEnabled() )
+                logger.debug("Timed out merging read repair responses", e);
+            throw e;
+        }
         repair.resultConsumer.accept(digestRepair.dataResolver.resolve());
     }
 
@@ -163,7 +179,7 @@
         ConsistencyLevel speculativeCL = consistency.isDatacenterLocal() ? ConsistencyLevel.LOCAL_QUORUM : ConsistencyLevel.QUORUM;
         return  consistency != ConsistencyLevel.EACH_QUORUM
                 && consistency.satisfies(speculativeCL, replicaPlan.get().replicationStrategy())
-                && cfs.sampleReadLatencyNanos <= command.getTimeout(NANOSECONDS);
+                && cfs.sampleReadLatencyMicros <= command.getTimeout(MICROSECONDS);
     }
 
     public void maybeSendAdditionalReads()
@@ -174,7 +190,7 @@
         if (repair == null)
             return;
 
-        if (shouldSpeculate() && !repair.readCallback.await(cfs.sampleReadLatencyNanos, NANOSECONDS))
+        if (shouldSpeculate() && !repair.readCallback.await(cfs.sampleReadLatencyMicros, MICROSECONDS))
         {
             Replica uncontacted = replicaPlan().firstUncontactedCandidate(replica -> true);
             if (uncontacted == null)
diff --git a/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java b/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java
index edcf14d..c8b0e29 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java
@@ -21,7 +21,9 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
+
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Predicate;
 
@@ -30,7 +32,6 @@
 import com.google.common.base.Predicates;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.AbstractFuture;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ConsistencyLevel;
@@ -38,46 +39,48 @@
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.locator.Endpoints;
 import org.apache.cassandra.locator.EndpointsForToken;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.apache.cassandra.locator.Replicas;
-import org.apache.cassandra.locator.InOurDcTester;
+import org.apache.cassandra.locator.InOurDc;
 import org.apache.cassandra.metrics.ReadRepairMetrics;
 import org.apache.cassandra.net.RequestCallback;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.tracing.Tracing;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static org.apache.cassandra.net.Verb.*;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
 
 public class BlockingPartitionRepair
-        extends AbstractFuture<Object> implements RequestCallback<Object>
+        extends AsyncFuture<Object> implements RequestCallback<Object>
 {
     private final DecoratedKey key;
-    private final ReplicaPlan.ForTokenWrite writePlan;
+    private final ReplicaPlan.ForWrite writePlan;
     private final Map<Replica, Mutation> pendingRepairs;
     private final CountDownLatch latch;
     private final Predicate<InetAddressAndPort> shouldBlockOn;
 
     private volatile long mutationsSentTime;
 
-    public BlockingPartitionRepair(DecoratedKey key, Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan)
+    public BlockingPartitionRepair(DecoratedKey key, Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan)
     {
         this(key, repairs, writePlan,
-             writePlan.consistencyLevel().isDatacenterLocal() ? InOurDcTester.endpoints() : Predicates.alwaysTrue());
+             writePlan.consistencyLevel().isDatacenterLocal() ? InOurDc.endpoints() : Predicates.alwaysTrue());
     }
-    public BlockingPartitionRepair(DecoratedKey key, Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan, Predicate<InetAddressAndPort> shouldBlockOn)
+    public BlockingPartitionRepair(DecoratedKey key, Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan, Predicate<InetAddressAndPort> shouldBlockOn)
     {
         this.key = key;
         this.pendingRepairs = new ConcurrentHashMap<>(repairs);
         this.writePlan = writePlan;
         this.shouldBlockOn = shouldBlockOn;
 
-        int blockFor = writePlan.blockFor();
+        int blockFor = writePlan.writeQuorum();
         // here we remove empty repair mutations from the block for total, since
         // we're not sending them mutations
         for (Replica participant : writePlan.contacts())
@@ -93,18 +96,18 @@
         // empty mutations. If we'd also speculated on either of the read stages, the number
         // of empty mutations would be greater than blockFor, causing the latch ctor to throw
         // an illegal argument exception due to a negative start value. So here we clamp it 0
-        latch = new CountDownLatch(Math.max(blockFor, 0));
+        latch = newCountDownLatch(Math.max(blockFor, 0));
     }
 
     int blockFor()
     {
-        return writePlan.blockFor();
+        return writePlan.writeQuorum();
     }
 
     @VisibleForTesting
     int waitingOn()
     {
-        return (int) latch.getCount();
+        return (int) latch.count();
     }
 
     @VisibleForTesting
@@ -113,7 +116,7 @@
         if (shouldBlockOn.test(from))
         {
             pendingRepairs.remove(writePlan.lookup(from));
-            latch.countDown();
+            latch.decrement();
         }
     }
 
@@ -146,7 +149,7 @@
 
     public void sendInitialRepairs()
     {
-        mutationsSentTime = System.nanoTime();
+        mutationsSentTime = nanoTime();
         Replicas.assertFull(pendingRepairs.keySet());
 
         for (Map.Entry<Replica, Mutation> entry: pendingRepairs.entrySet())
@@ -177,14 +180,14 @@
     public boolean awaitRepairsUntil(long timeoutAt, TimeUnit timeUnit)
     {
         long timeoutAtNanos = timeUnit.toNanos(timeoutAt);
-        long remaining = timeoutAtNanos - System.nanoTime();
+        long remaining = timeoutAtNanos - nanoTime();
         try
         {
             return latch.await(remaining, TimeUnit.NANOSECONDS);
         }
         catch (InterruptedException e)
         {
-            throw new AssertionError(e);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/service/reads/repair/BlockingReadRepair.java b/src/java/org/apache/cassandra/service/reads/repair/BlockingReadRepair.java
index fdc8b50..9143a47 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/BlockingReadRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/BlockingReadRepair.java
@@ -21,7 +21,6 @@
 import java.util.Map;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.cassandra.db.DecoratedKey;
 import org.slf4j.Logger;
@@ -39,6 +38,7 @@
 import org.apache.cassandra.metrics.ReadRepairMetrics;
 import org.apache.cassandra.tracing.Tracing;
 
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 /**
@@ -46,7 +46,7 @@
  *  updates have been written to nodes needing correction. Breaks write
  *  atomicity in some situations
  */
-public class BlockingReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+public class BlockingReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         extends AbstractReadRepair<E, P>
 {
     private static final Logger logger = LoggerFactory.getLogger(BlockingReadRepair.class);
@@ -74,7 +74,7 @@
     {
         for (BlockingPartitionRepair repair: repairs)
         {
-            repair.maybeSendAdditionalWrites(cfs.additionalWriteLatencyNanos, TimeUnit.NANOSECONDS);
+            repair.maybeSendAdditionalWrites(cfs.additionalWriteLatencyMicros, MICROSECONDS);
         }
     }
 
@@ -106,7 +106,7 @@
     }
 
     @Override
-    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForTokenWrite writePlan)
+    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForWrite writePlan)
     {
         BlockingPartitionRepair blockingRepair = new BlockingPartitionRepair(partitionKey, mutations, writePlan);
         blockingRepair.sendInitialRepairs();
diff --git a/src/java/org/apache/cassandra/service/reads/repair/NoopReadRepair.java b/src/java/org/apache/cassandra/service/reads/repair/NoopReadRepair.java
index b65f3fc..5cf72b3 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/NoopReadRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/NoopReadRepair.java
@@ -34,7 +34,7 @@
 /**
  * Bypasses the read repair path for short read protection and testing
  */
-public class NoopReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> implements ReadRepair<E, P>
+public class NoopReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>> implements ReadRepair<E, P>
 {
     public static final NoopReadRepair instance = new NoopReadRepair();
 
@@ -75,7 +75,7 @@
     }
 
     @Override
-    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForTokenWrite writePlan)
+    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForWrite writePlan)
     {
 
     }
diff --git a/src/java/org/apache/cassandra/service/reads/repair/PartitionIteratorMergeListener.java b/src/java/org/apache/cassandra/service/reads/repair/PartitionIteratorMergeListener.java
index 7247704..f77bd4d 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/PartitionIteratorMergeListener.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/PartitionIteratorMergeListener.java
@@ -21,7 +21,6 @@
 import java.util.List;
 
 import org.apache.cassandra.db.Columns;
-import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.ReadCommand;
 import org.apache.cassandra.db.RegularAndStaticColumns;
@@ -34,11 +33,11 @@
 public class PartitionIteratorMergeListener<E extends Endpoints<E>>
         implements UnfilteredPartitionIterators.MergeListener
 {
-    private final ReplicaPlan.ForRead<E> replicaPlan;
+    private final ReplicaPlan.ForRead<E, ?> replicaPlan;
     private final ReadCommand command;
     private final ReadRepair readRepair;
 
-    public PartitionIteratorMergeListener(ReplicaPlan.ForRead<E> replicaPlan, ReadCommand command, ReadRepair readRepair)
+    public PartitionIteratorMergeListener(ReplicaPlan.ForRead<E, ?> replicaPlan, ReadCommand command, ReadRepair readRepair)
     {
         this.replicaPlan = replicaPlan;
         this.command = command;
diff --git a/src/java/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepair.java b/src/java/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepair.java
index d9293fb..72a1298 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepair.java
@@ -34,7 +34,7 @@
  * Only performs the collection of data responses and reconciliation of them, doesn't send repair mutations
  * to replicas. This preserves write atomicity, but doesn't provide monotonic quorum reads
  */
-public class ReadOnlyReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+public class ReadOnlyReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         extends AbstractReadRepair<E, P>
 {
     ReadOnlyReadRepair(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
@@ -61,7 +61,7 @@
     }
 
     @Override
-    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForTokenWrite writePlan)
+    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForWrite writePlan)
     {
         throw new UnsupportedOperationException("ReadOnlyReadRepair shouldn't be trying to repair partitions");
     }
diff --git a/src/java/org/apache/cassandra/service/reads/repair/ReadRepair.java b/src/java/org/apache/cassandra/service/reads/repair/ReadRepair.java
index 4747651..a6a9be2 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/ReadRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/ReadRepair.java
@@ -19,7 +19,6 @@
 
 import java.util.Map;
 import java.util.function.Consumer;
-import java.util.function.Supplier;
 
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.locator.Endpoints;
@@ -33,15 +32,15 @@
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.apache.cassandra.service.reads.DigestResolver;
 
-public interface ReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+public interface ReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
 {
     public interface Factory
     {
-        <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+        <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         ReadRepair<E, P> create(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime);
     }
 
-    static <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    static <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
     ReadRepair<E, P> create(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
     {
         return command.metadata().params.readRepair.create(command, replicaPlan, queryStartNanoTime);
@@ -93,5 +92,5 @@
      * Repairs a partition _after_ receiving data responses. This method receives replica list, since
      * we will block repair only on the replicas that have responded.
      */
-    void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForTokenWrite writePlan);
+    void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForWrite writePlan);
 }
diff --git a/src/java/org/apache/cassandra/service/reads/repair/ReadRepairDiagnostics.java b/src/java/org/apache/cassandra/service/reads/repair/ReadRepairDiagnostics.java
index b9167bd..0443c3f 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/ReadRepairDiagnostics.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/ReadRepairDiagnostics.java
@@ -25,7 +25,6 @@
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.diag.DiagnosticEventService;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.locator.ReplicaLayout;
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.apache.cassandra.service.reads.DigestResolver;
 import org.apache.cassandra.service.reads.repair.PartitionRepairEvent.PartitionRepairEventType;
@@ -39,22 +38,22 @@
     {
     }
 
-    static void startRepair(AbstractReadRepair readRepair, ReplicaPlan.ForRead<?> fullPlan, DigestResolver digestResolver)
+    static void startRepair(AbstractReadRepair readRepair, ReplicaPlan.ForRead<?, ?> fullPlan, DigestResolver digestResolver)
     {
         if (service.isEnabled(ReadRepairEvent.class, ReadRepairEventType.START_REPAIR))
             service.publish(new ReadRepairEvent(ReadRepairEventType.START_REPAIR,
                                                 readRepair,
                                                 fullPlan.contacts().endpoints(),
-                                                fullPlan.candidates().endpoints(), digestResolver));
+                                                fullPlan.readCandidates().endpoints(), digestResolver));
     }
 
     static void speculatedRead(AbstractReadRepair readRepair, InetAddressAndPort endpoint,
-                               ReplicaPlan.ForRead<?> fullPlan)
+                               ReplicaPlan.ForRead<?, ?> fullPlan)
     {
         if (service.isEnabled(ReadRepairEvent.class, ReadRepairEventType.SPECULATED_READ))
             service.publish(new ReadRepairEvent(ReadRepairEventType.SPECULATED_READ,
                                                 readRepair, Collections.singletonList(endpoint),
-                                                Lists.newArrayList(fullPlan.candidates().endpoints()), null));
+                                                Lists.newArrayList(fullPlan.readCandidates().endpoints()), null));
     }
 
     static void sendInitialRepair(BlockingPartitionRepair partitionRepair, InetAddressAndPort destination, Mutation mutation)
diff --git a/src/java/org/apache/cassandra/service/reads/repair/ReadRepairEvent.java b/src/java/org/apache/cassandra/service/reads/repair/ReadRepairEvent.java
index 5cec802..a30efa1 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/ReadRepairEvent.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/ReadRepairEvent.java
@@ -22,7 +22,6 @@
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -90,7 +89,7 @@
         ret.put("consistency", consistency.name());
         ret.put("speculativeRetry", speculativeRetry.name());
 
-        Set<String> eps = destinations.stream().map(InetAddressAndPort::toString).collect(Collectors.toSet());
+        Set<String> eps = destinations.stream().map(Object::toString).collect(Collectors.toSet());
         ret.put("endpointDestinations", new HashSet<>(eps));
 
         if (digestsByEndpoint != null)
@@ -107,7 +106,7 @@
         }
         if (allEndpoints != null)
         {
-            eps = allEndpoints.stream().map(InetAddressAndPort::toString).collect(Collectors.toSet());
+            eps = allEndpoints.stream().map(Object::toString).collect(Collectors.toSet());
             ret.put("allEndpoints", new HashSet<>(eps));
         }
         return ret;
diff --git a/src/java/org/apache/cassandra/service/reads/repair/ReadRepairStrategy.java b/src/java/org/apache/cassandra/service/reads/repair/ReadRepairStrategy.java
index 7a4b795..0d9caad 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/ReadRepairStrategy.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/ReadRepairStrategy.java
@@ -20,14 +20,13 @@
 
 import org.apache.cassandra.db.ReadCommand;
 import org.apache.cassandra.locator.Endpoints;
-import org.apache.cassandra.locator.ReplicaLayout;
 import org.apache.cassandra.locator.ReplicaPlan;
 
 public enum ReadRepairStrategy implements ReadRepair.Factory
 {
     NONE
     {
-        public <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+        public <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         ReadRepair<E, P> create(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
         {
             return new ReadOnlyReadRepair<>(command, replicaPlan, queryStartNanoTime);
@@ -36,7 +35,7 @@
 
     BLOCKING
     {
-        public <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+        public <E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         ReadRepair<E, P> create(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
         {
             return new BlockingReadRepair<>(command, replicaPlan, queryStartNanoTime);
diff --git a/src/java/org/apache/cassandra/service/reads/repair/RepairedDataVerifier.java b/src/java/org/apache/cassandra/service/reads/repair/RepairedDataVerifier.java
index d1cff11..c5a0f95 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/RepairedDataVerifier.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/RepairedDataVerifier.java
@@ -18,11 +18,7 @@
 
 package org.apache.cassandra.service.reads.repair;
 
-import java.time.LocalDate;
-import java.time.format.DateTimeFormatter;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -30,14 +26,7 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ReadCommand;
-import org.apache.cassandra.db.SnapshotCommand;
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.TableMetrics;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.net.Verb;
-import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.service.SnapshotVerbHandler;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.DiagnosticSnapshotService;
 import org.apache.cassandra.utils.NoSpamLogger;
diff --git a/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java b/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java
index 38d077a..079080a 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java
@@ -68,8 +68,8 @@
     private final PartitionUpdate.Builder[] repairs;
     private final Row.Builder[] currentRows;
     private final RowDiffListener diffListener;
-    private final ReplicaPlan.ForRead<E> readPlan;
-    private final ReplicaPlan.ForTokenWrite writePlan;
+    private final ReplicaPlan.ForRead<E, ?> readPlan;
+    private final ReplicaPlan.ForWrite writePlan;
 
     // The partition level deletion for the merge row.
     private DeletionTime partitionLevelDeletion;
@@ -82,7 +82,7 @@
 
     private final ReadRepair readRepair;
 
-    public RowIteratorMergeListener(DecoratedKey partitionKey, RegularAndStaticColumns columns, boolean isReversed, ReplicaPlan.ForRead<E> readPlan, ReadCommand command, ReadRepair readRepair)
+    public RowIteratorMergeListener(DecoratedKey partitionKey, RegularAndStaticColumns columns, boolean isReversed, ReplicaPlan.ForRead<E, ?> readPlan, ReadCommand command, ReadRepair readRepair)
     {
         this.partitionKey = partitionKey;
         this.columns = columns;
diff --git a/src/java/org/apache/cassandra/service/reads/thresholds/CoordinatorWarnings.java b/src/java/org/apache/cassandra/service/reads/thresholds/CoordinatorWarnings.java
new file mode 100644
index 0000000..ff5bd78
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/reads/thresholds/CoordinatorWarnings.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.reads.thresholds;
+
+import java.util.AbstractMap;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.ReadCommand;
+import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.service.ClientWarn;
+
+public class CoordinatorWarnings
+{
+    private static final Logger logger = LoggerFactory.getLogger(CoordinatorWarnings.class);
+    private static final boolean ENABLE_DEFENSIVE_CHECKS = Boolean.getBoolean("cassandra.reads.thresholds.coordinator.defensive_checks_enabled");
+
+    // when .init() is called set the STATE to be INIT; this is to lazy allocate the map only when warnings are generated
+    private static final Map<ReadCommand, WarningsSnapshot> INIT = Collections.emptyMap();
+    private static final FastThreadLocal<Map<ReadCommand, WarningsSnapshot>> STATE = new FastThreadLocal<>();
+
+    private CoordinatorWarnings() {}
+
+    public static void init()
+    {
+        logger.trace("CoordinatorTrackWarnings.init()");
+        if (STATE.get() != null)
+        {
+            if (ENABLE_DEFENSIVE_CHECKS)
+                throw new AssertionError("CoordinatorTrackWarnings.init called while state is not null: " + STATE.get());
+            return;
+        }
+        STATE.set(INIT);
+    }
+
+    public static void reset()
+    {
+        logger.trace("CoordinatorTrackWarnings.reset()");
+        STATE.remove();
+    }
+
+    public static void update(ReadCommand cmd, WarningsSnapshot snapshot)
+    {
+        logger.trace("CoordinatorTrackWarnings.update({}, {})", cmd.metadata(), snapshot);
+        Map<ReadCommand, WarningsSnapshot> map = mutable();
+        WarningsSnapshot previous = map.get(cmd);
+        WarningsSnapshot update = WarningsSnapshot.merge(previous, snapshot);
+        if (update == null) // null happens when the merge had null input or EMPTY input... remove the command from the map
+            map.remove(cmd);
+        else
+            map.put(cmd, update);
+    }
+
+    public static void done()
+    {
+        Map<ReadCommand, WarningsSnapshot> map = readonly();
+        logger.trace("CoordinatorTrackWarnings.done() with state {}", map);
+        map.forEach((command, merged) -> {
+            ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(command.metadata().id);
+            // race condition when dropping tables, also happens in unit tests as Schema may be bypassed
+            if (cfs == null)
+                return;
+
+            String cql = command.toCQLString();
+            String loggableTokens = command.loggableTokens();
+            recordAborts(merged.tombstones, cql, loggableTokens, cfs.metric.clientTombstoneAborts, WarningsSnapshot::tombstoneAbortMessage);
+            recordWarnings(merged.tombstones, cql, loggableTokens, cfs.metric.clientTombstoneWarnings, WarningsSnapshot::tombstoneWarnMessage);
+
+            recordAborts(merged.localReadSize, cql, loggableTokens, cfs.metric.localReadSizeAborts, WarningsSnapshot::localReadSizeAbortMessage);
+            recordWarnings(merged.localReadSize, cql, loggableTokens, cfs.metric.localReadSizeWarnings, WarningsSnapshot::localReadSizeWarnMessage);
+
+            recordAborts(merged.rowIndexReadSize, cql, loggableTokens, cfs.metric.rowIndexSizeAborts, WarningsSnapshot::rowIndexReadSizeAbortMessage);
+            recordWarnings(merged.rowIndexReadSize, cql, loggableTokens, cfs.metric.rowIndexSizeWarnings, WarningsSnapshot::rowIndexSizeWarnMessage);
+        });
+
+        // reset the state to block from double publishing
+        clearState();
+    }
+
+    private static Map<ReadCommand, WarningsSnapshot> mutable()
+    {
+        Map<ReadCommand, WarningsSnapshot> map = STATE.get();
+        if (map == null)
+        {
+            if (ENABLE_DEFENSIVE_CHECKS)
+                throw new AssertionError("CoordinatorTrackWarnings.mutable calling without calling .init() first");
+            // set map to an "ignore" map; dropping all mutations
+            // since init was not called, it isn't clear that the state will be cleaned up, so avoid populating
+            map = IgnoreMap.get();
+        }
+        else if (map == INIT)
+        {
+            map = new HashMap<>();
+            STATE.set(map);
+        }
+        return map;
+    }
+
+    private static Map<ReadCommand, WarningsSnapshot> readonly()
+    {
+        Map<ReadCommand, WarningsSnapshot> map = STATE.get();
+        if (map == null)
+        {
+            if (ENABLE_DEFENSIVE_CHECKS)
+                throw new AssertionError("CoordinatorTrackWarnings.readonly calling without calling .init() first");
+            // since init was not called, it isn't clear that the state will be cleaned up, so avoid populating
+            map = Collections.emptyMap();
+        }
+        return map;
+    }
+
+    private static void clearState()
+    {
+        Map<ReadCommand, WarningsSnapshot> map = STATE.get();
+        if (map == null || map == INIT)
+            return;
+        // map is mutable, so set to INIT
+        STATE.set(INIT);
+    }
+
+    // utility interface to let callers use static functions
+    @FunctionalInterface
+    private interface ToString
+    {
+        String apply(int count, long value, String cql);
+    }
+
+    private static void recordAborts(WarningsSnapshot.Warnings counter, String cql, String loggableTokens, TableMetrics.TableMeter metric, ToString toString)
+    {
+        if (!counter.aborts.instances.isEmpty())
+        {
+            String msg = toString.apply(counter.aborts.instances.size(), counter.aborts.maxValue, cql);
+            ClientWarn.instance.warn(msg + " with " + loggableTokens);
+            logger.warn(msg);
+            metric.mark();
+        }
+    }
+
+    private static void recordWarnings(WarningsSnapshot.Warnings counter, String cql, String loggableTokens, TableMetrics.TableMeter metric, ToString toString)
+    {
+        if (!counter.warnings.instances.isEmpty())
+        {
+            String msg = toString.apply(counter.warnings.instances.size(), counter.warnings.maxValue, cql);
+            ClientWarn.instance.warn(msg + " with " + loggableTokens);
+            logger.warn(msg);
+            metric.mark();
+        }
+    }
+
+    /**
+     * Utility class to create an immutable map which does not fail on mutation but instead ignores it.
+     */
+    private static final class IgnoreMap extends AbstractMap<Object, Object>
+    {
+        private static final IgnoreMap INSTANCE = new IgnoreMap();
+
+        private static <K, V> Map<K, V> get()
+        {
+            return (Map<K, V>) INSTANCE;
+        }
+
+        @Override
+        public Object put(Object key, Object value)
+        {
+            return null;
+        }
+
+        @Override
+        public Set<Entry<Object, Object>> entrySet()
+        {
+            return Collections.emptySet();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/reads/thresholds/WarnAbortCounter.java b/src/java/org/apache/cassandra/service/reads/thresholds/WarnAbortCounter.java
new file mode 100644
index 0000000..37a64b8
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/reads/thresholds/WarnAbortCounter.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.reads.thresholds;
+
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+public class WarnAbortCounter
+{
+    final Set<InetAddressAndPort> warnings = Collections.newSetFromMap(new ConcurrentHashMap<>());
+    // the highest number reported by a node's warning
+    final AtomicLong maxWarningValue = new AtomicLong();
+
+    final Set<InetAddressAndPort> aborts = Collections.newSetFromMap(new ConcurrentHashMap<>());
+    // the highest number reported by a node's rejection.
+    final AtomicLong maxAbortsValue = new AtomicLong();
+
+    void addWarning(InetAddressAndPort from, long value)
+    {
+        maxWarningValue.accumulateAndGet(value, Math::max);
+        // call add last so concurrent reads see empty even if values > 0; if done in different order then
+        // size=1 could have values == 0
+        warnings.add(from);
+    }
+
+    void addAbort(InetAddressAndPort from, long value)
+    {
+        maxAbortsValue.accumulateAndGet(value, Math::max);
+        // call add last so concurrent reads see empty even if values > 0; if done in different order then
+        // size=1 could have values == 0
+        aborts.add(from);
+    }
+
+    public WarningsSnapshot.Warnings snapshot()
+    {
+        return WarningsSnapshot.Warnings.create(WarningsSnapshot.Counter.create(warnings, maxWarningValue), WarningsSnapshot.Counter.create(aborts, maxAbortsValue));
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/reads/thresholds/WarningContext.java b/src/java/org/apache/cassandra/service/reads/thresholds/WarningContext.java
new file mode 100644
index 0000000..1303f9b
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/reads/thresholds/WarningContext.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.reads.thresholds;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.ParamType;
+
+public class WarningContext
+{
+    private static EnumSet<ParamType> SUPPORTED = EnumSet.of(ParamType.TOMBSTONE_WARNING, ParamType.TOMBSTONE_FAIL,
+                                                             ParamType.LOCAL_READ_SIZE_WARN, ParamType.LOCAL_READ_SIZE_FAIL,
+                                                             ParamType.ROW_INDEX_READ_SIZE_WARN, ParamType.ROW_INDEX_READ_SIZE_FAIL);
+
+    final WarnAbortCounter tombstones = new WarnAbortCounter();
+    final WarnAbortCounter localReadSize = new WarnAbortCounter();
+    final WarnAbortCounter rowIndexReadSize = new WarnAbortCounter();
+
+    public static boolean isSupported(Set<ParamType> keys)
+    {
+        return !Collections.disjoint(keys, SUPPORTED);
+    }
+
+    public RequestFailureReason updateCounters(Map<ParamType, Object> params, InetAddressAndPort from)
+    {
+        for (Map.Entry<ParamType, Object> entry : params.entrySet())
+        {
+            WarnAbortCounter counter = null;
+            RequestFailureReason reason = null;
+            switch (entry.getKey())
+            {
+                case ROW_INDEX_READ_SIZE_FAIL:
+                    reason = RequestFailureReason.READ_SIZE;
+                case ROW_INDEX_READ_SIZE_WARN:
+                    counter = rowIndexReadSize;
+                    break;
+                case LOCAL_READ_SIZE_FAIL:
+                    reason = RequestFailureReason.READ_SIZE;
+                case LOCAL_READ_SIZE_WARN:
+                    counter = localReadSize;
+                    break;
+                case TOMBSTONE_FAIL:
+                    reason = RequestFailureReason.READ_TOO_MANY_TOMBSTONES;
+                case TOMBSTONE_WARNING:
+                    counter = tombstones;
+                    break;
+            }
+            if (reason != null)
+            {
+                counter.addAbort(from, ((Number) entry.getValue()).longValue());
+                return reason;
+            }
+            if (counter != null)
+                counter.addWarning(from, ((Number) entry.getValue()).longValue());
+        }
+        return null;
+    }
+
+    public WarningsSnapshot snapshot()
+    {
+        return WarningsSnapshot.create(tombstones.snapshot(), localReadSize.snapshot(), rowIndexReadSize.snapshot());
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/reads/thresholds/WarningsSnapshot.java b/src/java/org/apache/cassandra/service/reads/thresholds/WarningsSnapshot.java
new file mode 100644
index 0000000..fddb1e1
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/reads/thresholds/WarningsSnapshot.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.reads.thresholds;
+
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.ReadCommand;
+import org.apache.cassandra.exceptions.ReadSizeAbortException;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.TombstoneAbortException;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+public class WarningsSnapshot
+{
+    private static final WarningsSnapshot EMPTY = new WarningsSnapshot(Warnings.EMPTY, Warnings.EMPTY, Warnings.EMPTY);
+
+    public final Warnings tombstones, localReadSize, rowIndexReadSize;
+
+    private WarningsSnapshot(Warnings tombstones, Warnings localReadSize, Warnings rowIndexReadSize)
+    {
+        this.tombstones = tombstones;
+        this.localReadSize = localReadSize;
+        this.rowIndexReadSize = rowIndexReadSize;
+    }
+
+    public static WarningsSnapshot empty()
+    {
+        return EMPTY;
+    }
+
+    public static WarningsSnapshot create(Warnings tombstones, Warnings localReadSize, Warnings rowIndexTooLarge)
+    {
+        if (tombstones == localReadSize && tombstones == rowIndexTooLarge && tombstones == Warnings.EMPTY)
+            return EMPTY;
+        return new WarningsSnapshot(tombstones, localReadSize, rowIndexTooLarge);
+    }
+
+    public static WarningsSnapshot merge(WarningsSnapshot... values)
+    {
+        if (values == null || values.length == 0)
+            return null;
+
+        WarningsSnapshot accum = EMPTY;
+        for (WarningsSnapshot a : values)
+            accum = accum.merge(a);
+        return accum == EMPTY ? null : accum;
+    }
+
+    public boolean isEmpty()
+    {
+        return this == EMPTY;
+    }
+
+    public boolean isDefined()
+    {
+        return this != EMPTY;
+    }
+
+    @VisibleForTesting
+    WarningsSnapshot merge(WarningsSnapshot other)
+    {
+        if (other == null || other == EMPTY)
+            return this;
+        return WarningsSnapshot.create(tombstones.merge(other.tombstones), localReadSize.merge(other.localReadSize), rowIndexReadSize.merge(other.rowIndexReadSize));
+    }
+
+    public void maybeAbort(ReadCommand command, ConsistencyLevel cl, int received, int blockFor, boolean isDataPresent, Map<InetAddressAndPort, RequestFailureReason> failureReasonByEndpoint)
+    {
+        if (!tombstones.aborts.instances.isEmpty())
+            throw new TombstoneAbortException(tombstones.aborts.instances.size(), tombstones.aborts.maxValue, command.toCQLString(), isDataPresent,
+                                              cl, received, blockFor, failureReasonByEndpoint);
+
+        if (!localReadSize.aborts.instances.isEmpty())
+            throw new ReadSizeAbortException(localReadSizeAbortMessage(localReadSize.aborts.instances.size(), localReadSize.aborts.maxValue, command.toCQLString()),
+                                             cl, received, blockFor, isDataPresent, failureReasonByEndpoint);
+
+        if (!rowIndexReadSize.aborts.instances.isEmpty())
+            throw new ReadSizeAbortException(rowIndexReadSizeAbortMessage(rowIndexReadSize.aborts.instances.size(), rowIndexReadSize.aborts.maxValue, command.toCQLString()),
+                                             cl, received, blockFor, isDataPresent, failureReasonByEndpoint);
+    }
+
+    @VisibleForTesting
+    public static String tombstoneAbortMessage(int nodes, long tombstones, String cql)
+    {
+        return String.format("%s nodes scanned over %s tombstones and aborted the query %s (see tombstone_failure_threshold)", nodes, tombstones, cql);
+    }
+
+    @VisibleForTesting
+    public static String tombstoneWarnMessage(int nodes, long tombstones, String cql)
+    {
+        return String.format("%s nodes scanned up to %s tombstones and issued tombstone warnings for query %s  (see tombstone_warn_threshold)", nodes, tombstones, cql);
+    }
+
+    @VisibleForTesting
+    public static String localReadSizeAbortMessage(long nodes, long bytes, String cql)
+    {
+        return String.format("%s nodes loaded over %s bytes and aborted the query %s (see local_read_size_fail_threshold)", nodes, bytes, cql);
+    }
+
+    @VisibleForTesting
+    public static String localReadSizeWarnMessage(int nodes, long bytes, String cql)
+    {
+        return String.format("%s nodes loaded over %s bytes and issued local read size warnings for query %s  (see local_read_size_warn_threshold)", nodes, bytes, cql);
+    }
+
+    @VisibleForTesting
+    public static String rowIndexReadSizeAbortMessage(long nodes, long bytes, String cql)
+    {
+        return String.format("%s nodes loaded over %s bytes in RowIndexEntry and aborted the query %s (see row_index_size_fail_threshold)", nodes, bytes, cql);
+    }
+
+    @VisibleForTesting
+    public static String rowIndexSizeWarnMessage(int nodes, long bytes, String cql)
+    {
+        return String.format("%s nodes loaded over %s bytes in RowIndexEntry and issued warnings for query %s  (see row_index_size_warn_threshold)", nodes, bytes, cql);
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        WarningsSnapshot that = (WarningsSnapshot) o;
+        return Objects.equals(tombstones, that.tombstones) && Objects.equals(localReadSize, that.localReadSize) && Objects.equals(rowIndexReadSize, that.rowIndexReadSize);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(tombstones, localReadSize, rowIndexReadSize);
+    }
+
+    @Override
+    public String toString()
+    {
+        return "(tombstones=" + tombstones + ", localReadSize=" + localReadSize + ", rowIndexTooLarge=" + rowIndexReadSize + ')';
+    }
+
+    public static final class Warnings
+    {
+        private static final Warnings EMPTY = new Warnings(Counter.EMPTY, Counter.EMPTY);
+
+        public final Counter warnings;
+        public final Counter aborts;
+
+        private Warnings(Counter warnings, Counter aborts)
+        {
+            this.warnings = warnings;
+            this.aborts = aborts;
+        }
+
+        public static Warnings create(Counter warnings, Counter aborts)
+        {
+            if (warnings == Counter.EMPTY && aborts == Counter.EMPTY)
+                return EMPTY;
+            return new Warnings(warnings, aborts);
+        }
+
+        public Warnings merge(Warnings other)
+        {
+            if (other == EMPTY)
+                return this;
+            return Warnings.create(warnings.merge(other.warnings), aborts.merge(other.aborts));
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Warnings warnings1 = (Warnings) o;
+            return Objects.equals(warnings, warnings1.warnings) && Objects.equals(aborts, warnings1.aborts);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(warnings, aborts);
+        }
+
+        @Override
+        public String toString()
+        {
+            return "(warnings=" + warnings + ", aborts=" + aborts + ')';
+        }
+    }
+
+    public static final class Counter
+    {
+        private static final Counter EMPTY = new Counter(ImmutableSet.of(), 0);
+
+        public final ImmutableSet<InetAddressAndPort> instances;
+        public final long maxValue;
+
+        @VisibleForTesting
+        Counter(ImmutableSet<InetAddressAndPort> instances, long maxValue)
+        {
+            this.instances = instances;
+            this.maxValue = maxValue;
+        }
+
+        @VisibleForTesting
+        static Counter empty()
+        {
+            return EMPTY;
+        }
+
+        public static Counter create(Set<InetAddressAndPort> instances, AtomicLong maxValue)
+        {
+            ImmutableSet<InetAddressAndPort> copy = ImmutableSet.copyOf(instances);
+            // if instances is empty ignore value
+            // writes and reads are concurrent (write = networking callback, read = coordinator thread), so there is
+            // an edge case where instances is empty and maxValue > 0; this is caused by the fact we update value first before count
+            // we write: value then instance
+            // we read: instance then value
+            if (copy.isEmpty())
+                return EMPTY;
+            return new Counter(copy, maxValue.get());
+        }
+
+        public Counter merge(Counter other)
+        {
+            if (other == EMPTY)
+                return this;
+            ImmutableSet<InetAddressAndPort> copy = ImmutableSet.<InetAddressAndPort>builder()
+                                                    .addAll(instances)
+                                                    .addAll(other.instances)
+                                                    .build();
+            // since other is NOT empty, then output can not be empty; so skip create method
+            return new Counter(copy, Math.max(maxValue, other.maxValue));
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Counter counter = (Counter) o;
+            return maxValue == counter.maxValue && Objects.equals(instances, counter.instances);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(instances, maxValue);
+        }
+
+        @Override
+        public String toString()
+        {
+            return "(" + instances + ", " + maxValue + ')';
+        }
+    }
+
+    @VisibleForTesting
+    static Builder builder()
+    {
+        return new Builder();
+    }
+
+    @VisibleForTesting
+    public static final class Builder
+    {
+        private WarningsSnapshot snapshot = empty();
+
+        public Builder tombstonesWarning(ImmutableSet<InetAddressAndPort> instances, long maxValue)
+        {
+            return tombstonesWarning(new Counter(Objects.requireNonNull(instances), maxValue));
+        }
+
+        public Builder tombstonesWarning(Counter counter)
+        {
+            Objects.requireNonNull(counter);
+            snapshot = snapshot.merge(new WarningsSnapshot(new Warnings(counter, Counter.EMPTY), Warnings.EMPTY, Warnings.EMPTY));
+            return this;
+        }
+
+        public Builder tombstonesAbort(ImmutableSet<InetAddressAndPort> instances, long maxValue)
+        {
+            return tombstonesAbort(new Counter(Objects.requireNonNull(instances), maxValue));
+        }
+
+        public Builder tombstonesAbort(Counter counter)
+        {
+            Objects.requireNonNull(counter);
+            snapshot = snapshot.merge(new WarningsSnapshot(new Warnings(Counter.EMPTY, counter), Warnings.EMPTY, Warnings.EMPTY));
+            return this;
+        }
+
+        public Builder localReadSizeWarning(ImmutableSet<InetAddressAndPort> instances, long maxValue)
+        {
+            return localReadSizeWarning(new Counter(Objects.requireNonNull(instances), maxValue));
+        }
+
+        public Builder localReadSizeWarning(Counter counter)
+        {
+            Objects.requireNonNull(counter);
+            snapshot = snapshot.merge(new WarningsSnapshot(Warnings.EMPTY, new Warnings(counter, Counter.EMPTY), Warnings.EMPTY));
+            return this;
+        }
+
+        public Builder localReadSizeAbort(ImmutableSet<InetAddressAndPort> instances, long maxValue)
+        {
+            return localReadSizeAbort(new Counter(Objects.requireNonNull(instances), maxValue));
+        }
+
+        public Builder localReadSizeAbort(Counter counter)
+        {
+            Objects.requireNonNull(counter);
+            snapshot = snapshot.merge(new WarningsSnapshot(Warnings.EMPTY, new Warnings(Counter.EMPTY, counter), Warnings.EMPTY));
+            return this;
+        }
+
+        public Builder rowIndexSizeWarning(Counter counter)
+        {
+            Objects.requireNonNull(counter);
+            snapshot = snapshot.merge(new WarningsSnapshot(Warnings.EMPTY, Warnings.EMPTY, new Warnings(counter, Counter.EMPTY)));
+            return this;
+        }
+
+        public Builder rowIndexSizeAbort(Counter counter)
+        {
+            Objects.requireNonNull(counter);
+            snapshot = snapshot.merge(new WarningsSnapshot(Warnings.EMPTY, Warnings.EMPTY, new Warnings(Counter.EMPTY, counter)));
+            return this;
+        }
+
+        public WarningsSnapshot build()
+        {
+            return snapshot;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
new file mode 100644
index 0000000..523e1e5
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.snapshot;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.io.util.File;
+
+import static org.apache.cassandra.db.Directories.SNAPSHOT_SUBDIR;
+import static org.apache.cassandra.service.snapshot.TableSnapshot.buildSnapshotId;
+
+/**
+ * Loads snapshot metadata from data directories
+ */
+public class SnapshotLoader
+{
+    private static final Logger logger = LoggerFactory.getLogger(SnapshotLoader.class);
+
+    static final Pattern SNAPSHOT_DIR_PATTERN = Pattern.compile("(?<keyspace>\\w+)/(?<tableName>\\w+)-(?<tableId>[0-9a-f]{32})/snapshots/(?<tag>.+)$");
+
+    private final Collection<Path> dataDirectories;
+
+    public SnapshotLoader()
+    {
+        this(DatabaseDescriptor.getAllDataFileLocations());
+    }
+
+    public SnapshotLoader(String[] dataDirectories)
+    {
+        this(Arrays.stream(dataDirectories).map(File::getPath).collect(Collectors.toList()));
+    }
+
+    public SnapshotLoader(Collection<Path> dataDirs)
+    {
+        this.dataDirectories = dataDirs;
+    }
+
+    @VisibleForTesting
+    static class Visitor extends SimpleFileVisitor<Path>
+    {
+        private static final Pattern UUID_PATTERN = Pattern.compile("([0-9a-f]{8})([0-9a-f]{4})([0-9a-f]{4})([0-9a-f]{4})([0-9a-f]+)");
+        private final Map<String, TableSnapshot.Builder> snapshots;
+
+        public Visitor(Map<String, TableSnapshot.Builder> snapshots)
+        {
+            this.snapshots = snapshots;
+        }
+
+        @Override
+        public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException
+        {
+            // Cassandra can remove some files while traversing the tree,
+            // for example when SSTables are compacted while we are walking it.
+            // SnapshotLoader is interested only in SSTables in snapshot directories which are not compacted,
+            // but we need to cover these in regular table directories too.
+            // If listing failed but exception is NoSuchFileException, then we
+            // just skip it and continue with the listing.
+            if (exc instanceof NoSuchFileException)
+                return FileVisitResult.CONTINUE;
+            else
+                throw exc;
+        }
+
+        @Override
+        public FileVisitResult preVisitDirectory(Path subdir, BasicFileAttributes attrs)
+        {
+            // see CASSANDRA-18359
+            if (subdir.getParent() == null || subdir.getParent().getFileName() == null)
+                return FileVisitResult.CONTINUE;
+
+            if (subdir.getParent().getFileName().toString().equals(SNAPSHOT_SUBDIR))
+            {
+                logger.trace("Processing directory " + subdir);
+                Matcher snapshotDirMatcher = SNAPSHOT_DIR_PATTERN.matcher(subdir.toString());
+                if (snapshotDirMatcher.find())
+                {
+                    try
+                    {
+                        loadSnapshotFromDir(snapshotDirMatcher, subdir);
+                    }
+                    catch (Throwable e)
+                    {
+                        logger.warn("Could not load snapshot from {}.", subdir, e);
+                    }
+                }
+                return FileVisitResult.SKIP_SUBTREE;
+            }
+
+            return subdir.getFileName().toString().equals(Directories.BACKUPS_SUBDIR)
+                   ? FileVisitResult.SKIP_SUBTREE
+                   : FileVisitResult.CONTINUE;
+        }
+
+        /**
+         * Given an UUID string without dashes (ie. c7e513243f0711ec9bbc0242ac130002)
+         * return an UUID object (ie. c7e51324-3f07-11ec-9bbc-0242ac130002)
+         */
+        static UUID parseUUID(String uuidWithoutDashes) throws IllegalArgumentException
+        {
+            assert uuidWithoutDashes.length() == 32 && !uuidWithoutDashes.contains("-");
+            String dashedUUID = UUID_PATTERN.matcher(uuidWithoutDashes).replaceFirst("$1-$2-$3-$4-$5");
+            return UUID.fromString(dashedUUID);
+        }
+
+        private void loadSnapshotFromDir(Matcher snapshotDirMatcher, Path snapshotDir)
+        {
+            String keyspaceName = snapshotDirMatcher.group("keyspace");
+            String tableName = snapshotDirMatcher.group("tableName");
+            UUID tableId = parseUUID(snapshotDirMatcher.group("tableId"));
+            String tag = snapshotDirMatcher.group("tag");
+            String snapshotId = buildSnapshotId(keyspaceName, tableName, tableId, tag);
+            TableSnapshot.Builder builder = snapshots.computeIfAbsent(snapshotId, k -> new TableSnapshot.Builder(keyspaceName, tableName, tableId, tag));
+            builder.addSnapshotDir(new File(snapshotDir));
+        }
+    }
+
+    public Set<TableSnapshot> loadSnapshots()
+    {
+        Map<String, TableSnapshot.Builder> snapshots = new HashMap<>();
+        Visitor visitor = new Visitor(snapshots);
+
+        for (Path dataDir : dataDirectories)
+        {
+            try
+            {
+                if (new File(dataDir).exists())
+                    Files.walkFileTree(dataDir, Collections.emptySet(), 5, visitor);
+                else
+                    logger.debug("Skipping non-existing data directory {}", dataDir);
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(String.format("Error while loading snapshots from %s", dataDir), e);
+            }
+        }
+
+        return snapshots.values().stream().map(TableSnapshot.Builder::build).collect(Collectors.toSet());
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java
new file mode 100644
index 0000000..8e630c7
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.snapshot;
+
+
+import java.util.Collection;
+import java.util.PriorityQueue;
+import java.util.Set;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Directories;
+
+import java.util.concurrent.TimeoutException;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.utils.ExecutorUtils;
+
+import static java.util.Comparator.comparing;
+import static java.util.stream.Collectors.toList;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.FBUtilities.now;
+
+public class SnapshotManager {
+
+    private static final ScheduledExecutorPlus executor = executorFactory().scheduled(false, "SnapshotCleanup");
+
+    private static final Logger logger = LoggerFactory.getLogger(SnapshotManager.class);
+
+    private final long initialDelaySeconds;
+    private final long cleanupPeriodSeconds;
+    private final SnapshotLoader snapshotLoader;
+
+    @VisibleForTesting
+    protected volatile ScheduledFuture<?> cleanupTaskFuture;
+
+    /**
+     * Expiring snapshots ordered by expiration date, to allow only iterating over snapshots
+     * that need to be removed on {@link this#clearExpiredSnapshots()}
+     */
+    private final PriorityQueue<TableSnapshot> expiringSnapshots = new PriorityQueue<>(comparing(TableSnapshot::getExpiresAt));
+
+    public SnapshotManager()
+    {
+        this(CassandraRelevantProperties.SNAPSHOT_CLEANUP_INITIAL_DELAY_SECONDS.getInt(),
+             CassandraRelevantProperties.SNAPSHOT_CLEANUP_PERIOD_SECONDS.getInt());
+    }
+
+    @VisibleForTesting
+    protected SnapshotManager(long initialDelaySeconds, long cleanupPeriodSeconds)
+    {
+        this.initialDelaySeconds = initialDelaySeconds;
+        this.cleanupPeriodSeconds = cleanupPeriodSeconds;
+        snapshotLoader = new SnapshotLoader(DatabaseDescriptor.getAllDataFileLocations());
+    }
+
+    public Collection<TableSnapshot> getExpiringSnapshots()
+    {
+        return expiringSnapshots;
+    }
+
+    public synchronized void start()
+    {
+        addSnapshots(loadSnapshots());
+        resumeSnapshotCleanup();
+    }
+
+    public synchronized void stop() throws InterruptedException, TimeoutException
+    {
+        expiringSnapshots.clear();
+        if (cleanupTaskFuture != null)
+        {
+            cleanupTaskFuture.cancel(false);
+            cleanupTaskFuture = null;
+        }
+    }
+
+    public synchronized void addSnapshot(TableSnapshot snapshot)
+    {
+        // We currently only care about expiring snapshots
+        if (snapshot.isExpiring())
+        {
+            logger.debug("Adding expiring snapshot {}", snapshot);
+            expiringSnapshots.add(snapshot);
+        }
+    }
+
+    public synchronized Set<TableSnapshot> loadSnapshots()
+    {
+        return snapshotLoader.loadSnapshots();
+    }
+
+    @VisibleForTesting
+    protected synchronized void addSnapshots(Collection<TableSnapshot> snapshots)
+    {
+        logger.debug("Adding snapshots: {}.", Joiner.on(", ").join(snapshots.stream().map(TableSnapshot::getId).collect(toList())));
+        snapshots.forEach(this::addSnapshot);
+    }
+
+    // TODO: Support pausing snapshot cleanup
+    @VisibleForTesting
+    synchronized void resumeSnapshotCleanup()
+    {
+        if (cleanupTaskFuture == null)
+        {
+            logger.info("Scheduling expired snapshot cleanup with initialDelaySeconds={} and cleanupPeriodSeconds={}",
+                        initialDelaySeconds, cleanupPeriodSeconds);
+            cleanupTaskFuture = executor.scheduleWithFixedDelay(this::clearExpiredSnapshots, initialDelaySeconds,
+                                                                cleanupPeriodSeconds, TimeUnit.SECONDS);
+        }
+    }
+
+    @VisibleForTesting
+    protected synchronized void clearExpiredSnapshots()
+    {
+        TableSnapshot expiredSnapshot;
+        while ((expiredSnapshot = expiringSnapshots.peek()) != null)
+        {
+            if (!expiredSnapshot.isExpired(now()))
+                break; // the earliest expiring snapshot is not expired yet, so there is no more expired snapshots to remove
+
+            logger.debug("Removing expired snapshot {}.", expiredSnapshot);
+            clearSnapshot(expiredSnapshot);
+        }
+    }
+
+    public synchronized void clearSnapshot(TableSnapshot snapshot)
+    {
+        for (File snapshotDir : snapshot.getDirectories())
+            Directories.removeSnapshotDirectory(DatabaseDescriptor.getSnapshotRateLimiter(), snapshotDir);
+
+        expiringSnapshots.remove(snapshot);
+    }
+
+    @VisibleForTesting
+    public static void shutdownAndWait(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
+    {
+        ExecutorUtils.shutdownNowAndWait(timeout, unit, executor);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
new file mode 100644
index 0000000..ba840ef
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.snapshot;
+
+import java.io.*;
+import java.time.Instant;
+import java.util.List;
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.cassandra.config.DurationSpec;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.utils.FBUtilities;
+
+// Only serialize fields
+@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY,
+                getterVisibility = JsonAutoDetect.Visibility.NONE,
+                setterVisibility = JsonAutoDetect.Visibility.NONE)
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class SnapshotManifest
+{
+    @JsonProperty("files")
+    public final List<String> files;
+
+    @JsonProperty("created_at")
+    public final Instant createdAt;
+
+    @JsonProperty("expires_at")
+    public final Instant expiresAt;
+
+    /** needed for jackson serialization */
+    @SuppressWarnings("unused")
+    private SnapshotManifest() {
+        this.files = null;
+        this.createdAt = null;
+        this.expiresAt = null;
+    }
+
+    public SnapshotManifest(List<String> files, DurationSpec.IntSecondsBound ttl, Instant creationTime)
+    {
+        this.files = files;
+        this.createdAt = creationTime;
+        this.expiresAt = ttl == null ? null : createdAt.plusSeconds(ttl.toSeconds());
+    }
+
+    public List<String> getFiles()
+    {
+        return files;
+    }
+
+    public Instant getCreatedAt()
+    {
+        return createdAt;
+    }
+
+    public Instant getExpiresAt()
+    {
+        return expiresAt;
+    }
+
+    public void serializeToJsonFile(File outputFile) throws IOException
+    {
+        FBUtilities.serializeToJsonFile(this, outputFile);
+    }
+
+    public static SnapshotManifest deserializeFromJsonFile(File file) throws IOException
+    {
+        return FBUtilities.deserializeFromJsonFile(SnapshotManifest.class, file);
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        SnapshotManifest manifest = (SnapshotManifest) o;
+        return Objects.equals(files, manifest.files) && Objects.equals(createdAt, manifest.createdAt) && Objects.equals(expiresAt, manifest.expiresAt);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(files, createdAt, expiresAt);
+    }
+}
diff --git a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
new file mode 100644
index 0000000..0cfcfea
--- /dev/null
+++ b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.snapshot;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.DirectorySizeCalculator;
+
+public class TableSnapshot
+{
+    private static final Logger logger = LoggerFactory.getLogger(TableSnapshot.class);
+
+    private final String keyspaceName;
+    private final String tableName;
+    private final UUID tableId;
+    private final String tag;
+
+    private final Instant createdAt;
+    private final Instant expiresAt;
+
+    private final Set<File> snapshotDirs;
+
+    public TableSnapshot(String keyspaceName, String tableName, UUID tableId,
+                         String tag, Instant createdAt, Instant expiresAt,
+                         Set<File> snapshotDirs)
+    {
+        this.keyspaceName = keyspaceName;
+        this.tableName = tableName;
+        this.tableId = tableId;
+        this.tag = tag;
+        this.createdAt = createdAt;
+        this.expiresAt = expiresAt;
+        this.snapshotDirs = snapshotDirs;
+    }
+
+    /**
+     * Unique identifier of a snapshot. Used
+     * only to deduplicate snapshots internally,
+     * not exposed externally.
+     *
+     * Format: "$ks:$table_name:$table_id:$tag"
+     */
+    public String getId()
+    {
+        return buildSnapshotId(keyspaceName, tableName, tableId, tag);
+    }
+
+    public String getKeyspaceName()
+    {
+        return keyspaceName;
+    }
+
+    public String getTableName()
+    {
+        return tableName;
+    }
+
+    public String getTag()
+    {
+        return tag;
+    }
+
+    public Instant getCreatedAt()
+    {
+        if (createdAt == null)
+        {
+            long minCreation = snapshotDirs.stream().mapToLong(File::lastModified).min().orElse(0);
+            if (minCreation != 0)
+            {
+                return Instant.ofEpochMilli(minCreation);
+            }
+        }
+        return createdAt;
+    }
+
+    public Instant getExpiresAt()
+    {
+        return expiresAt;
+    }
+
+    public boolean isExpired(Instant now)
+    {
+        if (createdAt == null || expiresAt == null)
+        {
+            return false;
+        }
+
+        return expiresAt.compareTo(now) < 0;
+    }
+
+    public boolean exists()
+    {
+        return snapshotDirs.stream().anyMatch(File::exists);
+    }
+
+    public boolean isExpiring()
+    {
+        return expiresAt != null;
+    }
+
+    public long computeSizeOnDiskBytes()
+    {
+        return snapshotDirs.stream().mapToLong(FileUtils::folderSize).sum();
+    }
+
+    public long computeTrueSizeBytes()
+    {
+        DirectorySizeCalculator visitor = new SnapshotTrueSizeCalculator();
+
+        for (File snapshotDir : snapshotDirs)
+        {
+            try
+            {
+                Files.walkFileTree(snapshotDir.toPath(), visitor);
+            }
+            catch (IOException e)
+            {
+                logger.error("Could not calculate the size of {}.", snapshotDir, e);
+            }
+        }
+
+        return visitor.getAllocatedSize();
+    }
+
+    public Collection<File> getDirectories()
+    {
+        return snapshotDirs;
+    }
+
+    public Optional<File> getManifestFile()
+    {
+        for (File snapshotDir : snapshotDirs)
+        {
+            File manifestFile = Directories.getSnapshotManifestFile(snapshotDir);
+            if (manifestFile.exists())
+            {
+                return Optional.of(manifestFile);
+            }
+        }
+        return Optional.empty();
+    }
+
+    public Optional<File> getSchemaFile()
+    {
+        for (File snapshotDir : snapshotDirs)
+        {
+            File schemaFile = Directories.getSnapshotSchemaFile(snapshotDir);
+            if (schemaFile.exists())
+            {
+                return Optional.of(schemaFile);
+            }
+        }
+        return Optional.empty();
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        TableSnapshot snapshot = (TableSnapshot) o;
+        return Objects.equals(keyspaceName, snapshot.keyspaceName) && Objects.equals(tableName, snapshot.tableName) &&
+               Objects.equals(tableId, snapshot.tableId) && Objects.equals(tag, snapshot.tag) &&
+               Objects.equals(createdAt, snapshot.createdAt) && Objects.equals(expiresAt, snapshot.expiresAt) &&
+               Objects.equals(snapshotDirs, snapshot.snapshotDirs);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(keyspaceName, tableName, tableId, tag, createdAt, expiresAt, snapshotDirs);
+    }
+
+    @Override
+    public String toString()
+    {
+        return "TableSnapshot{" +
+               "keyspaceName='" + keyspaceName + '\'' +
+               ", tableName='" + tableName + '\'' +
+               ", tableId=" + tableId +
+               ", tag='" + tag + '\'' +
+               ", createdAt=" + createdAt +
+               ", expiresAt=" + expiresAt +
+               ", snapshotDirs=" + snapshotDirs +
+               '}';
+    }
+
+    static class Builder {
+        private final String keyspaceName;
+        private final String tableName;
+        private final UUID tableId;
+        private final String tag;
+
+        private Instant createdAt = null;
+        private Instant expiresAt = null;
+
+        private final Set<File> snapshotDirs = new HashSet<>();
+
+        Builder(String keyspaceName, String tableName, UUID tableId, String tag)
+        {
+            this.keyspaceName = keyspaceName;
+            this.tableName = tableName;
+            this.tag = tag;
+            this.tableId = tableId;
+        }
+
+        void addSnapshotDir(File snapshotDir)
+        {
+            snapshotDirs.add(snapshotDir);
+            File manifestFile = new File(snapshotDir, "manifest.json");
+            if (manifestFile.exists() && createdAt == null && expiresAt == null) {
+                loadTimestampsFromManifest(manifestFile);
+            }
+        }
+
+        private void loadTimestampsFromManifest(File manifestFile)
+        {
+            try
+            {
+                logger.trace("Loading snapshot manifest from {}", manifestFile);
+                SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
+                createdAt = manifest.createdAt;
+                expiresAt = manifest.expiresAt;
+            }
+            catch (IOException e)
+            {
+                logger.warn("Cannot read manifest file {} of snapshot {}.", manifestFile, tag, e);
+            }
+        }
+
+        TableSnapshot build()
+        {
+            return new TableSnapshot(keyspaceName, tableName, tableId, tag, createdAt, expiresAt, snapshotDirs);
+        }
+    }
+
+    protected static String buildSnapshotId(String keyspaceName, String tableName, UUID tableId, String tag)
+    {
+        return String.format("%s:%s:%s:%s", keyspaceName, tableName, tableId, tag);
+    }
+
+    public static class SnapshotTrueSizeCalculator extends DirectorySizeCalculator
+    {
+        /**
+         * Snapshots are composed of hard-linked sstables. The true snapshot size should only include
+         * snapshot files which do not contain a corresponding "live" sstable file.
+         */
+        @Override
+        public boolean isAcceptable(Path snapshotFilePath)
+        {
+            return !getLiveFileFromSnapshotFile(snapshotFilePath).exists();
+        }
+    }
+
+    /**
+     * Returns the corresponding live file for a given snapshot file.
+     *
+     * Example:
+     *  - Base table:
+     *    - Snapshot file: ~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/snapshots/1643481737850/me-1-big-Data.db
+     *    - Live file: ~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/me-1-big-Data.db
+     *  - Secondary index:
+     *    - Snapshot file: ~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/snapshots/1643481737850/.tbl_val_idx/me-1-big-Summary.db
+     *    - Live file: ~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/.tbl_val_idx/me-1-big-Summary.db
+     *
+     */
+    static File getLiveFileFromSnapshotFile(Path snapshotFilePath)
+    {
+        // Snapshot directory structure format is {data_dir}/snapshots/{snapshot_name}/{snapshot_file}
+        Path liveDir = snapshotFilePath.getParent().getParent().getParent();
+        if (Directories.isSecondaryIndexFolder(snapshotFilePath.getParent()))
+        {
+            // Snapshot file structure format is {data_dir}/snapshots/{snapshot_name}/.{index}/{sstable-component}.db
+            liveDir = File.getPath(liveDir.getParent().toString(), snapshotFilePath.getParent().getFileName().toString());
+        }
+        return new File(liveDir.toString(), snapshotFilePath.getFileName().toString());
+    }
+}
diff --git a/src/java/org/apache/cassandra/streaming/DefaultConnectionFactory.java b/src/java/org/apache/cassandra/streaming/DefaultConnectionFactory.java
deleted file mode 100644
index 5f2163f..0000000
--- a/src/java/org/apache/cassandra/streaming/DefaultConnectionFactory.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.streaming;
-
-import java.io.IOException;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoop;
-import io.netty.util.concurrent.Future;
-import org.apache.cassandra.net.ConnectionCategory;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.net.OutboundConnectionInitiator.Result;
-import org.apache.cassandra.net.OutboundConnectionInitiator.Result.StreamingSuccess;
-import org.apache.cassandra.net.OutboundConnectionSettings;
-
-import static org.apache.cassandra.net.OutboundConnectionInitiator.initiateStreaming;
-
-public class DefaultConnectionFactory implements StreamConnectionFactory
-{
-    @VisibleForTesting
-    public static int MAX_CONNECT_ATTEMPTS = 3;
-
-    @Override
-    public Channel createConnection(OutboundConnectionSettings template, int messagingVersion) throws IOException
-    {
-        EventLoop eventLoop = MessagingService.instance().socketFactory.outboundStreamingGroup().next();
-
-        int attempts = 0;
-        while (true)
-        {
-            Future<Result<StreamingSuccess>> result = initiateStreaming(eventLoop, template.withDefaults(ConnectionCategory.STREAMING), messagingVersion);
-            result.awaitUninterruptibly(); // initiate has its own timeout, so this is "guaranteed" to return relatively promptly
-            if (result.isSuccess())
-                return result.getNow().success().channel;
-
-            if (++attempts == MAX_CONNECT_ATTEMPTS)
-                throw new IOException("failed to connect to " + template.to + " for streaming data", result.cause());
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/streaming/IncomingStream.java b/src/java/org/apache/cassandra/streaming/IncomingStream.java
index 0733249..25ab626 100644
--- a/src/java/org/apache/cassandra/streaming/IncomingStream.java
+++ b/src/java/org/apache/cassandra/streaming/IncomingStream.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.streaming;
 
-import java.io.IOException;
-
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.schema.TableId;
 
@@ -37,7 +35,7 @@
     /**
      * Read in the stream data.
      */
-    void read(DataInputPlus inputPlus, int version) throws IOException;
+    void read(DataInputPlus inputPlus, int version) throws Throwable;
 
     String getName();
     long getSize();
diff --git a/src/java/org/apache/cassandra/streaming/OutgoingStream.java b/src/java/org/apache/cassandra/streaming/OutgoingStream.java
index 546462d..cc42ab6 100644
--- a/src/java/org/apache/cassandra/streaming/OutgoingStream.java
+++ b/src/java/org/apache/cassandra/streaming/OutgoingStream.java
@@ -19,10 +19,9 @@
 package org.apache.cassandra.streaming;
 
 import java.io.IOException;
-import java.util.UUID;
 
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Some subset of data to be streamed. Implementations handle writing out their data via the write method.
@@ -36,7 +35,7 @@
     /**
      * Write the streams data into the socket
      */
-    void write(StreamSession session, DataOutputStreamPlus output, int version) throws IOException;
+    void write(StreamSession session, StreamingDataOutputPlus output, int version) throws IOException;
 
     /**
      * Release any resources held by the stream
@@ -44,7 +43,7 @@
     void finish();
 
     long getRepairedAt();
-    UUID getPendingRepair();
+    TimeUUID getPendingRepair();
 
     String getName();
 
diff --git a/src/java/org/apache/cassandra/streaming/PreviewKind.java b/src/java/org/apache/cassandra/streaming/PreviewKind.java
index 3d0bff9..8d7ad8a 100644
--- a/src/java/org/apache/cassandra/streaming/PreviewKind.java
+++ b/src/java/org/apache/cassandra/streaming/PreviewKind.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.streaming;
 
-import java.util.UUID;
-
 import com.google.common.base.Predicate;
 import com.google.common.base.Predicates;
 
@@ -29,6 +27,7 @@
 import org.apache.cassandra.repair.consistent.ConsistentSession;
 import org.apache.cassandra.repair.consistent.LocalSession;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 public enum PreviewKind
 {
@@ -69,7 +68,7 @@
         return isPreview() ? "preview repair" : "repair";
     }
 
-    public String logPrefix(UUID sessionId)
+    public String logPrefix(TimeUUID sessionId)
     {
         return '[' + logPrefix() + " #" + sessionId.toString() + ']';
     }
diff --git a/src/java/org/apache/cassandra/streaming/ProgressInfo.java b/src/java/org/apache/cassandra/streaming/ProgressInfo.java
index 2b306f8..2ed78ac 100644
--- a/src/java/org/apache/cassandra/streaming/ProgressInfo.java
+++ b/src/java/org/apache/cassandra/streaming/ProgressInfo.java
@@ -54,9 +54,11 @@
     public final String fileName;
     public final Direction direction;
     public final long currentBytes;
+    public final long deltaBytes; // change from previous ProgressInfo
     public final long totalBytes;
 
-    public ProgressInfo(InetAddressAndPort peer, int sessionIndex, String fileName, Direction direction, long currentBytes, long totalBytes)
+    public ProgressInfo(InetAddressAndPort peer, int sessionIndex, String fileName, Direction direction,
+                        long currentBytes,  long deltaBytes, long totalBytes)
     {
         assert totalBytes > 0;
 
@@ -65,6 +67,7 @@
         this.fileName = fileName;
         this.direction = direction;
         this.currentBytes = currentBytes;
+        this.deltaBytes = deltaBytes;
         this.totalBytes = totalBytes;
     }
 
diff --git a/src/java/org/apache/cassandra/streaming/SessionInfo.java b/src/java/org/apache/cassandra/streaming/SessionInfo.java
index c77e90d..d95d85b 100644
--- a/src/java/org/apache/cassandra/streaming/SessionInfo.java
+++ b/src/java/org/apache/cassandra/streaming/SessionInfo.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.streaming;
 
 import java.io.Serializable;
+import java.net.InetSocketAddress;
 import java.util.Collection;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -26,7 +27,6 @@
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
 
 /**
@@ -34,9 +34,9 @@
  */
 public final class SessionInfo implements Serializable
 {
-    public final InetAddressAndPort peer;
+    public final InetSocketAddress peer;
     public final int sessionIndex;
-    public final InetAddressAndPort connecting;
+    public final InetSocketAddress connecting;
     /** Immutable collection of receiving summaries */
     public final Collection<StreamSummary> receivingSummaries;
     /** Immutable collection of sending summaries*/
@@ -44,12 +44,12 @@
     /** Current session state */
     public final StreamSession.State state;
 
-    private final Map<String, ProgressInfo> receivingFiles;
-    private final Map<String, ProgressInfo> sendingFiles;
+    private final Map<String, ProgressInfo> receivingFiles = new ConcurrentHashMap<>();
+    private final Map<String, ProgressInfo> sendingFiles = new ConcurrentHashMap<>();
 
-    public SessionInfo(InetAddressAndPort peer,
+    public SessionInfo(InetSocketAddress peer,
                        int sessionIndex,
-                       InetAddressAndPort connecting,
+                       InetSocketAddress connecting,
                        Collection<StreamSummary> receivingSummaries,
                        Collection<StreamSummary> sendingSummaries,
                        StreamSession.State state)
@@ -59,11 +59,14 @@
         this.connecting = connecting;
         this.receivingSummaries = ImmutableSet.copyOf(receivingSummaries);
         this.sendingSummaries = ImmutableSet.copyOf(sendingSummaries);
-        this.receivingFiles = new ConcurrentHashMap<>();
-        this.sendingFiles = new ConcurrentHashMap<>();
         this.state = state;
     }
 
+    public SessionInfo(SessionInfo other)
+    {
+        this(other.peer, other.sessionIndex, other.connecting, other.receivingSummaries, other.sendingSummaries, other.state);
+    }
+
     public boolean isFailed()
     {
         return state == StreamSession.State.FAILED;
diff --git a/src/java/org/apache/cassandra/streaming/SessionSummary.java b/src/java/org/apache/cassandra/streaming/SessionSummary.java
index 5b168a0..9588e49 100644
--- a/src/java/org/apache/cassandra/streaming/SessionSummary.java
+++ b/src/java/org/apache/cassandra/streaming/SessionSummary.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.streaming;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -28,20 +29,19 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.locator.InetAddressAndPort.Serializer;
 
 import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
 
 public class SessionSummary
 {
-    public final InetAddressAndPort coordinator;
-    public final InetAddressAndPort peer;
+    public final InetSocketAddress coordinator;
+    public final InetSocketAddress peer;
     /** Immutable collection of receiving summaries */
     public final Collection<StreamSummary> receivingSummaries;
     /** Immutable collection of sending summaries*/
     public final Collection<StreamSummary> sendingSummaries;
 
-    public SessionSummary(InetAddressAndPort coordinator, InetAddressAndPort peer,
+    public SessionSummary(InetSocketAddress coordinator, InetSocketAddress peer,
                           Collection<StreamSummary> receivingSummaries,
                           Collection<StreamSummary> sendingSummaries)
     {
diff --git a/src/java/org/apache/cassandra/streaming/StreamConnectionFactory.java b/src/java/org/apache/cassandra/streaming/StreamConnectionFactory.java
index 97bb452..95208e4 100644
--- a/src/java/org/apache/cassandra/streaming/StreamConnectionFactory.java
+++ b/src/java/org/apache/cassandra/streaming/StreamConnectionFactory.java
@@ -26,18 +26,4 @@
 public interface StreamConnectionFactory
 {
     Channel createConnection(OutboundConnectionSettings template, int messagingVersion) throws IOException;
-
-    /** Provide way to disable getPreferredIP() for tools without access to the system keyspace
-     * <p> 
-     * CASSANDRA-17663 moves calls to SystemKeyspace.getPreferredIP() outside of any threads
-     * that are regularly interrupted.  However the streaming subsystem is also used
-     * by the bulk loader tool, which does not have direct access to the local tables
-     * and uses the client metadata/queries to retrieve it.
-     *
-     * @return true if SystemKeyspace.getPreferredIP() should be used when connecting
-     */
-    default boolean supportsPreferredIp()
-    {
-        return true;
-    }
 }
diff --git a/src/java/org/apache/cassandra/streaming/StreamCoordinator.java b/src/java/org/apache/cassandra/streaming/StreamCoordinator.java
index 8bcf54e..ad292517 100644
--- a/src/java/org/apache/cassandra/streaming/StreamCoordinator.java
+++ b/src/java/org/apache/cassandra/streaming/StreamCoordinator.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.streaming;
 
+import java.net.InetSocketAddress;
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 
@@ -25,6 +26,9 @@
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.net.MessagingService.current_version;
 
 
 /**
@@ -40,17 +44,17 @@
 
     private final boolean connectSequentially;
 
-    private final Map<InetAddressAndPort, HostStreamingData> peerSessions = new ConcurrentHashMap<>();
+    private final Map<InetSocketAddress, HostStreamingData> peerSessions = new ConcurrentHashMap<>();
     private final StreamOperation streamOperation;
     private final int connectionsPerHost;
     private final boolean follower;
-    private StreamConnectionFactory factory;
+    private StreamingChannel.Factory factory;
     private Iterator<StreamSession> sessionsToConnect = null;
-    private final UUID pendingRepair;
+    private final TimeUUID pendingRepair;
     private final PreviewKind previewKind;
 
-    public StreamCoordinator(StreamOperation streamOperation, int connectionsPerHost, StreamConnectionFactory factory,
-                             boolean follower, boolean connectSequentially, UUID pendingRepair, PreviewKind previewKind)
+    public StreamCoordinator(StreamOperation streamOperation, int connectionsPerHost, StreamingChannel.Factory factory,
+                             boolean follower, boolean connectSequentially, TimeUUID pendingRepair, PreviewKind previewKind)
     {
         this.streamOperation = streamOperation;
         this.connectionsPerHost = connectionsPerHost;
@@ -61,7 +65,7 @@
         this.previewKind = previewKind;
     }
 
-    public void setConnectionFactory(StreamConnectionFactory factory)
+    public void setConnectionFactory(StreamingChannel.Factory factory)
     {
         this.factory = factory;
     }
@@ -148,19 +152,19 @@
             logger.debug("Finished connecting all sessions");
     }
 
-    public synchronized Set<InetAddressAndPort> getPeers()
+    public synchronized Set<InetSocketAddress> getPeers()
     {
         return new HashSet<>(peerSessions.keySet());
     }
 
-    public synchronized StreamSession getOrCreateNextSession(InetAddressAndPort peer)
+    public synchronized StreamSession getOrCreateOutboundSession(InetAddressAndPort peer)
     {
-        return getOrCreateHostData(peer).getOrCreateNextSession(peer);
+        return getOrCreateHostData(peer).getOrCreateOutboundSession(peer);
     }
 
-    public synchronized StreamSession getOrCreateSessionById(InetAddressAndPort peer, int id)
+    public synchronized StreamSession getOrCreateInboundSession(InetAddressAndPort from, StreamingChannel channel, int messagingVersion, int id)
     {
-        return getOrCreateHostData(peer).getOrCreateSessionById(peer, id);
+        return getOrCreateHostData(from).getOrCreateInboundSession(from, channel, messagingVersion, id);
     }
 
     public StreamSession getSessionById(InetAddressAndPort peer, int id)
@@ -199,13 +203,13 @@
 
             for (Collection<OutgoingStream> bucket : buckets)
             {
-                StreamSession session = sessionList.getOrCreateNextSession(to);
+                StreamSession session = sessionList.getOrCreateOutboundSession(to);
                 session.addTransferStreams(bucket);
             }
         }
         else
         {
-            StreamSession session = sessionList.getOrCreateNextSession(to);
+            StreamSession session = sessionList.getOrCreateOutboundSession(to);
             session.addTransferStreams(streams);
         }
     }
@@ -242,7 +246,7 @@
         return data;
     }
 
-    private HostStreamingData getOrCreateHostData(InetAddressAndPort peer)
+    private HostStreamingData getOrCreateHostData(InetSocketAddress peer)
     {
         HostStreamingData data = peerSessions.get(peer);
         if (data == null)
@@ -253,7 +257,7 @@
         return data;
     }
 
-    public UUID getPendingRepair()
+    public TimeUUID getPendingRepair()
     {
         return pendingRepair;
     }
@@ -281,12 +285,12 @@
             return false;
         }
 
-        public StreamSession getOrCreateNextSession(InetAddressAndPort peer)
+        public StreamSession getOrCreateOutboundSession(InetAddressAndPort peer)
         {
             // create
             if (streamSessions.size() < connectionsPerHost)
             {
-                StreamSession session = new StreamSession(streamOperation, peer, factory, isFollower(), streamSessions.size(),
+                StreamSession session = new StreamSession(streamOperation, peer, factory, null, current_version, isFollower(), streamSessions.size(),
                                                           pendingRepair, previewKind);
                 streamSessions.put(++lastReturned, session);
                 sessionInfos.put(lastReturned, session.getSessionInfo());
@@ -315,12 +319,12 @@
             return Collections.unmodifiableCollection(streamSessions.values());
         }
 
-        public StreamSession getOrCreateSessionById(InetAddressAndPort peer, int id)
+        public StreamSession getOrCreateInboundSession(InetAddressAndPort from, StreamingChannel channel, int messagingVersion, int id)
         {
             StreamSession session = streamSessions.get(id);
             if (session == null)
             {
-                session = new StreamSession(streamOperation, peer, factory, isFollower(), id, pendingRepair, previewKind);
+                session = new StreamSession(streamOperation, from, factory, channel, messagingVersion, isFollower(), id, pendingRepair, previewKind);
                 streamSessions.put(id, session);
                 sessionInfos.put(id, session.getSessionInfo());
             }
diff --git a/src/java/org/apache/cassandra/streaming/StreamDeserializingTask.java b/src/java/org/apache/cassandra/streaming/StreamDeserializingTask.java
new file mode 100644
index 0000000..3785249
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/StreamDeserializingTask.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.streaming.messages.KeepAliveMessage;
+import org.apache.cassandra.streaming.messages.StreamMessage;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+
+import static org.apache.cassandra.streaming.StreamSession.createLogTag;
+
+/**
+ * The task that performs the actual deserialization.
+ */
+public class StreamDeserializingTask implements Runnable
+{
+    private static final Logger logger = LoggerFactory.getLogger(StreamDeserializingTask.class);
+
+    private final StreamingChannel channel;
+    private final int messagingVersion;
+    @VisibleForTesting
+    protected StreamSession session;
+
+    public StreamDeserializingTask(StreamSession session, StreamingChannel channel, int messagingVersion)
+    {
+        this.session = session;
+        this.channel = channel;
+        this.messagingVersion = messagingVersion;
+    }
+
+    @Override
+    public void run()
+    {
+        @SuppressWarnings("resource") // closed in finally
+        StreamingDataInputPlus input = channel.in();
+        try
+        {
+            StreamMessage message;
+            while (null != (message = StreamMessage.deserialize(input, messagingVersion)))
+            {
+                // keep-alives don't necessarily need to be tied to a session (they could be arrive before or after
+                // wrt session lifecycle, due to races), just log that we received the message and carry on
+                if (message instanceof KeepAliveMessage)
+                {
+                    if (logger.isDebugEnabled())
+                        logger.debug("{} Received {}", createLogTag(session, channel), message);
+                    continue;
+                }
+
+                if (session == null)
+                    session = deriveSession(message);
+
+                if (logger.isDebugEnabled())
+                    logger.debug("{} Received {}", createLogTag(session, channel), message);
+
+                session.messageReceived(message);
+            }
+        }
+        catch (Throwable t)
+        {
+            JVMStabilityInspector.inspectThrowable(t);
+            if (session != null)
+            {
+                session.onError(t);
+            }
+            else if (t instanceof StreamReceiveException)
+            {
+                ((StreamReceiveException)t).session.onError(t.getCause());
+            }
+            else
+            {
+                logger.error("{} stream operation from {} failed", createLogTag(session, channel), InetAddressAndPort.toString(channel.peer(), true), t);
+            }
+        }
+        finally
+        {
+            channel.close();
+            input.close();
+        }
+    }
+
+    @VisibleForTesting
+    public StreamSession deriveSession(StreamMessage message)
+    {
+        // StreamInitMessage starts a new channel here, but IncomingStreamMessage needs a session
+        // to be established a priori
+        StreamSession streamSession = message.getOrCreateAndAttachInboundSession(channel, messagingVersion);
+
+        // Attach this channel to the session: this only happens upon receiving the first init message as a follower;
+        // in all other cases, no new control channel will be added, as the proper control channel will be already attached.
+        streamSession.attachInbound(channel);
+        return streamSession;
+    }
+}
diff --git a/src/java/org/apache/cassandra/streaming/StreamEvent.java b/src/java/org/apache/cassandra/streaming/StreamEvent.java
index 7ecd081..ff83a19 100644
--- a/src/java/org/apache/cassandra/streaming/StreamEvent.java
+++ b/src/java/org/apache/cassandra/streaming/StreamEvent.java
@@ -20,13 +20,13 @@
 import java.util.Collections;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.ImmutableSet;
 
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.utils.TimeUUID;
 
 public abstract class StreamEvent
 {
@@ -38,9 +38,9 @@
     }
 
     public final Type eventType;
-    public final UUID planId;
+    public final TimeUUID planId;
 
-    protected StreamEvent(Type eventType, UUID planId)
+    protected StreamEvent(Type eventType, TimeUUID planId)
     {
         this.eventType = eventType;
         this.planId = planId;
@@ -71,7 +71,7 @@
     {
         public final ProgressInfo progress;
 
-        public ProgressEvent(UUID planId, ProgressInfo progress)
+        public ProgressEvent(TimeUUID planId, ProgressInfo progress)
         {
             super(Type.FILE_PROGRESS, planId);
             this.progress = progress;
@@ -87,11 +87,13 @@
     public static class SessionPreparedEvent extends StreamEvent
     {
         public final SessionInfo session;
+        public final StreamSession.PrepareDirection prepareDirection;
 
-        public SessionPreparedEvent(UUID planId, SessionInfo session)
+        public SessionPreparedEvent(TimeUUID planId, SessionInfo session, StreamSession.PrepareDirection prepareDirection)
         {
             super(Type.STREAM_PREPARED, planId);
             this.session = session;
+            this.prepareDirection = prepareDirection;
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/streaming/StreamManager.java b/src/java/org/apache/cassandra/streaming/StreamManager.java
index 849f7b5..408b6f4 100644
--- a/src/java/org/apache/cassandra/streaming/StreamManager.java
+++ b/src/java/org/apache/cassandra/streaming/StreamManager.java
@@ -17,9 +17,10 @@
  */
 package org.apache.cassandra.streaming;
 
+import java.util.Collection;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
+import java.util.concurrent.CopyOnWriteArrayList;
 import javax.management.ListenerNotFoundException;
 import javax.management.MBeanNotificationInfo;
 import javax.management.NotificationFilter;
@@ -28,29 +29,37 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.RateLimiter;
-
 import org.cliffc.high_scale_lib.NonBlockingHashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.DurationSpec;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.streaming.management.StreamEventJMXNotifier;
 import org.apache.cassandra.streaming.management.StreamStateCompositeData;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * StreamManager manages currently running {@link StreamResultFuture}s and provides status of all operation invoked.
  *
- * All stream operation should be created through this class to track streaming status and progress.
+ * All stream operations should be created through this class to track streaming status and progress.
  */
 public class StreamManager implements StreamManagerMBean
 {
+    private static final Logger logger = LoggerFactory.getLogger(StreamManager.class);
+
     public static final StreamManager instance = new StreamManager();
 
     /**
      * Gets streaming rate limiter.
-     * When stream_throughput_outbound_megabits_per_sec is 0, this returns rate limiter
+     * When stream_throughput_outbound is 0, this returns rate limiter
      * with the rate of Double.MAX_VALUE bytes per second.
      * Rate unit is bytes per sec.
      *
@@ -58,25 +67,60 @@
      */
     public static StreamRateLimiter getRateLimiter(InetAddressAndPort peer)
     {
-        return new StreamRateLimiter(peer);
+        return new StreamRateLimiter(peer,
+                                     StreamRateLimiter.LIMITER,
+                                     StreamRateLimiter.INTER_DC_LIMITER,
+                                     DatabaseDescriptor.getStreamThroughputOutboundBytesPerSec(),
+                                     DatabaseDescriptor.getInterDCStreamThroughputOutboundBytesPerSec());
     }
 
-    public static class StreamRateLimiter
+    /**
+     * Get streaming rate limiter for entire SSTable operations.
+     * When {@code entire_sstable_stream_throughput_outbound}
+     * is less than or equal ot {@code 0}, this returns rate limiter with the
+     * rate of {@link Double.MAX_VALUE} bytes per second.
+     * Rate unit is bytes per sec.
+     *
+     * @param peer the peer location
+     * @return {@link  StreamRateLimiter} with entire SSTable rate limit set based on peer location
+     */
+    public static StreamRateLimiter getEntireSSTableRateLimiter(InetAddressAndPort peer)
     {
-        public static final double BYTES_PER_MEGABIT = (1000 * 1000) / 8.0;
-        private static final RateLimiter limiter = RateLimiter.create(calculateRateInBytes());
-        private static final RateLimiter interDCLimiter = RateLimiter.create(calculateInterDCRateInBytes());
-        private final boolean isLocalDC;
+        return new StreamRateLimiter(peer,
+                                     StreamRateLimiter.ENTIRE_SSTABLE_LIMITER,
+                                     StreamRateLimiter.ENTIRE_SSTABLE_INTER_DC_LIMITER,
+                                     DatabaseDescriptor.getEntireSSTableStreamThroughputOutboundBytesPerSec(),
+                                     DatabaseDescriptor.getEntireSSTableInterDCStreamThroughputOutboundBytesPerSec());
+    }
 
-        public StreamRateLimiter(InetAddressAndPort peer)
+    public static class StreamRateLimiter implements StreamingDataOutputPlus.RateLimiter
+    {
+        public static final double BYTES_PER_MEBIBYTE = 1024.0 * 1024.0;
+        private static final RateLimiter LIMITER = RateLimiter.create(calculateRateInBytes());
+        private static final RateLimiter INTER_DC_LIMITER = RateLimiter.create(calculateInterDCRateInBytes());
+        private static final RateLimiter ENTIRE_SSTABLE_LIMITER = RateLimiter.create(calculateEntireSSTableRateInBytes());
+        private static final RateLimiter ENTIRE_SSTABLE_INTER_DC_LIMITER = RateLimiter.create(calculateEntireSSTableInterDCRateInBytes());
+
+        private final RateLimiter limiter;
+        private final RateLimiter interDCLimiter;
+        private final boolean isLocalDC;
+        private final double throughput;
+        private final double interDCThroughput;
+
+        private StreamRateLimiter(InetAddressAndPort peer, RateLimiter limiter, RateLimiter interDCLimiter, double throughput, double interDCThroughput)
         {
+            this.limiter = limiter;
+            this.interDCLimiter = interDCLimiter;
+            this.throughput = throughput;
+            this.interDCThroughput = interDCThroughput;
             if (DatabaseDescriptor.getLocalDataCenter() != null && DatabaseDescriptor.getEndpointSnitch() != null)
                 isLocalDC = DatabaseDescriptor.getLocalDataCenter().equals(
-                            DatabaseDescriptor.getEndpointSnitch().getDatacenter(peer));
+                DatabaseDescriptor.getEndpointSnitch().getDatacenter(peer));
             else
                 isLocalDC = true;
         }
 
+        @Override
         public void acquire(int toTransfer)
         {
             limiter.acquire(toTransfer);
@@ -84,52 +128,180 @@
                 interDCLimiter.acquire(toTransfer);
         }
 
+        @Override
+        public boolean isRateLimited()
+        {
+            // Rate limiting is enabled when throughput greater than 0.
+            // If the peer is not local, also check whether inter-DC rate limiting is enabled.
+            return throughput > 0 || (!isLocalDC && interDCThroughput > 0);
+        }
+
         public static void updateThroughput()
         {
-            limiter.setRate(calculateRateInBytes());
+            LIMITER.setRate(calculateRateInBytes());
         }
 
         public static void updateInterDCThroughput()
         {
-            interDCLimiter.setRate(calculateInterDCRateInBytes());
+            INTER_DC_LIMITER.setRate(calculateInterDCRateInBytes());
+        }
+
+        public static void updateEntireSSTableThroughput()
+        {
+            ENTIRE_SSTABLE_LIMITER.setRate(calculateEntireSSTableRateInBytes());
+        }
+
+        public static void updateEntireSSTableInterDCThroughput()
+        {
+            ENTIRE_SSTABLE_INTER_DC_LIMITER.setRate(calculateEntireSSTableInterDCRateInBytes());
         }
 
         private static double calculateRateInBytes()
         {
-            return DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSec() > 0
-                   ? DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSec() * BYTES_PER_MEGABIT
-                   : Double.MAX_VALUE; // if throughput is set to 0 or negative value, throttling is disabled
+            double throughput = DatabaseDescriptor.getStreamThroughputOutboundBytesPerSec();
+            return calculateEffectiveRateInBytes(throughput);
         }
 
         private static double calculateInterDCRateInBytes()
         {
-            return DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSec() > 0
-                   ? DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSec() * BYTES_PER_MEGABIT
-                   : Double.MAX_VALUE; // if throughput is set to 0 or negative value, throttling is disabled
+            double throughput = DatabaseDescriptor.getInterDCStreamThroughputOutboundBytesPerSec();
+            return calculateEffectiveRateInBytes(throughput);
+        }
+
+        private static double calculateEntireSSTableRateInBytes()
+        {
+            double throughput = DatabaseDescriptor.getEntireSSTableStreamThroughputOutboundBytesPerSec();
+            return calculateEffectiveRateInBytes(throughput);
+        }
+
+        private static double calculateEntireSSTableInterDCRateInBytes()
+        {
+            double throughput = DatabaseDescriptor.getEntireSSTableInterDCStreamThroughputOutboundBytesPerSec();
+            return calculateEffectiveRateInBytes(throughput);
         }
 
         @VisibleForTesting
         public static double getRateLimiterRateInBytes()
         {
-            return limiter.getRate();
+            return LIMITER.getRate();
         }
 
         @VisibleForTesting
         public static double getInterDCRateLimiterRateInBytes()
         {
-            return interDCLimiter.getRate();
+            return INTER_DC_LIMITER.getRate();
+        }
+
+        @VisibleForTesting
+        public static double getEntireSSTableRateLimiterRateInBytes()
+        {
+            return ENTIRE_SSTABLE_LIMITER.getRate();
+        }
+
+        @VisibleForTesting
+        public static double getEntireSSTableInterDCRateLimiterRateInBytes()
+        {
+            return ENTIRE_SSTABLE_INTER_DC_LIMITER.getRate();
+        }
+
+        private static double calculateEffectiveRateInBytes(double throughput)
+        {
+            // if throughput is set to 0, throttling is disabled
+            return throughput > 0
+                   ? throughput
+                   : Double.MAX_VALUE;
         }
     }
 
     private final StreamEventJMXNotifier notifier = new StreamEventJMXNotifier();
+    private final CopyOnWriteArrayList<StreamListener> listeners = new CopyOnWriteArrayList<>();
 
     /*
      * Currently running streams. Removed after completion/failure.
      * We manage them in two different maps to distinguish plan from initiated ones to
      * receiving ones withing the same JVM.
      */
-    private final Map<UUID, StreamResultFuture> initiatorStreams = new NonBlockingHashMap<>();
-    private final Map<UUID, StreamResultFuture> followerStreams = new NonBlockingHashMap<>();
+    private final Map<TimeUUID, StreamResultFuture> initiatorStreams = new NonBlockingHashMap<>();
+    private final Map<TimeUUID, StreamResultFuture> followerStreams = new NonBlockingHashMap<>();
+
+    private final Cache<TimeUUID, StreamingState> states;
+    private final StreamListener listener = new StreamListener()
+    {
+        @Override
+        public void onRegister(StreamResultFuture result)
+        {
+            if (!DatabaseDescriptor.getStreamingStatsEnabled())
+                return;
+            // reason for synchronized rather than states.get is to detect duplicates
+            // streaming shouldn't be producing duplicates as that would imply a planId collision
+            synchronized (states)
+            {
+                StreamingState previous = states.getIfPresent(result.planId);
+                if (previous == null)
+                {
+                    StreamingState state = new StreamingState(result);
+                    states.put(state.id(), state);
+                    state.phase.start();
+                    result.addEventListener(state);
+                }
+                else
+                {
+                    logger.warn("Duplicate streaming states detected for id {}", result.planId);
+                }
+            }
+        }
+    };
+
+    public StreamManager()
+    {
+        DurationSpec.LongNanosecondsBound duration = DatabaseDescriptor.getStreamingStateExpires();
+        long sizeBytes = DatabaseDescriptor.getStreamingStateSize().toBytes();
+        long numElements = sizeBytes / StreamingState.ELEMENT_SIZE;
+        logger.info("Storing streaming state for {} or for {} elements", duration, numElements);
+        states = CacheBuilder.newBuilder()
+                             .expireAfterWrite(duration.quantity(), duration.unit())
+                             .maximumSize(numElements)
+                             .build();
+    }
+
+    public void start()
+    {
+        addListener(listener);
+    }
+
+    public void stop()
+    {
+        removeListener(listener);
+    }
+
+    public Collection<StreamingState> getStreamingStates()
+    {
+        return states.asMap().values();
+    }
+
+    public StreamingState getStreamingState(TimeUUID id)
+    {
+        return states.getIfPresent(id);
+    }
+
+    @VisibleForTesting
+    public void putStreamingState(StreamingState state)
+    {
+        synchronized (states)
+        {
+            StreamingState previous = states.getIfPresent(state.id());
+            if (previous != null)
+                throw new AssertionError("StreamPlan id " + state.id() + " already exists");
+            states.put(state.id(), state);
+        }
+    }
+
+    @VisibleForTesting
+    public void clearStates()
+    {
+        // states.cleanUp() doesn't clear, it looks to only run gc on things that could be removed... this method should remove all state
+        states.asMap().clear();
+    }
 
     public Set<CompositeData> getCurrentStreams()
     {
@@ -142,42 +314,104 @@
         }));
     }
 
+    @Override
+    public boolean getStreamingStatsEnabled()
+    {
+        return DatabaseDescriptor.getStreamingStatsEnabled();
+    }
+
+    @Override
+    public void setStreamingStatsEnabled(boolean streamingStatsEnabled)
+    {
+        DatabaseDescriptor.setStreamingStatsEnabled(streamingStatsEnabled);
+    }
+
+    @Override
+    public String getStreamingSlowEventsLogTimeout()
+    {
+        return DatabaseDescriptor.getStreamingSlowEventsLogTimeout().toString();
+    }
+
+    @Override
+    public void setStreamingSlowEventsLogTimeout(String value)
+    {
+        DatabaseDescriptor.setStreamingSlowEventsLogTimeout(value);
+    }
+
     public void registerInitiator(final StreamResultFuture result)
     {
         result.addEventListener(notifier);
         // Make sure we remove the stream on completion (whether successful or not)
-        result.addListener(new Runnable()
-        {
-            public void run()
-            {
-                initiatorStreams.remove(result.planId);
-            }
-        }, MoreExecutors.directExecutor());
+        result.addListener(() -> initiatorStreams.remove(result.planId));
 
         initiatorStreams.put(result.planId, result);
+        notifySafeOnRegister(result);
     }
 
     public StreamResultFuture registerFollower(final StreamResultFuture result)
     {
         result.addEventListener(notifier);
         // Make sure we remove the stream on completion (whether successful or not)
-        result.addListener(new Runnable()
-        {
-            public void run()
-            {
-                followerStreams.remove(result.planId);
-            }
-        }, MoreExecutors.directExecutor());
+        result.addListener(() -> followerStreams.remove(result.planId));
 
         StreamResultFuture previous = followerStreams.putIfAbsent(result.planId, result);
-        return previous ==  null ? result : previous;
+        if (previous == null)
+        {
+            notifySafeOnRegister(result);
+            return result;
+        }
+        return previous;
     }
 
-    public StreamResultFuture getReceivingStream(UUID planId)
+    @VisibleForTesting
+    public void putInitiatorStream(StreamResultFuture future)
+    {
+        StreamResultFuture current = initiatorStreams.putIfAbsent(future.planId, future);
+        assert current == null: "Duplicat initiator stream for " + future.planId;
+    }
+
+    @VisibleForTesting
+    public void putFollowerStream(StreamResultFuture future)
+    {
+        StreamResultFuture current = followerStreams.putIfAbsent(future.planId, future);
+        assert current == null: "Duplicate follower stream for " + future.planId;
+    }
+
+    public void addListener(StreamListener listener)
+    {
+        listeners.add(listener);
+    }
+
+    public void removeListener(StreamListener listener)
+    {
+        listeners.remove(listener);
+    }
+
+    private void notifySafeOnRegister(StreamResultFuture result)
+    {
+        for (StreamListener l : listeners)
+        {
+            try
+            {
+                l.onRegister(result);
+            }
+            catch (Throwable t)
+            {
+                logger.warn("Failed to notify stream listener of new Initiator/Follower", t);
+            }
+        }
+    }
+
+    public StreamResultFuture getReceivingStream(TimeUUID planId)
     {
         return followerStreams.get(planId);
     }
 
+    public StreamResultFuture getInitiatorStream(TimeUUID planId)
+    {
+        return initiatorStreams.get(planId);
+    }
+
     public void addNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback)
     {
         notifier.addNotificationListener(listener, filter, handback);
@@ -198,13 +432,13 @@
         return notifier.getNotificationInfo();
     }
 
-    public StreamSession findSession(InetAddressAndPort peer, UUID planId, int sessionIndex, boolean searchInitiatorSessions)
+    public StreamSession findSession(InetAddressAndPort peer, TimeUUID planId, int sessionIndex, boolean searchInitiatorSessions)
     {
-        Map<UUID, StreamResultFuture> streams = searchInitiatorSessions ? initiatorStreams : followerStreams;
+        Map<TimeUUID, StreamResultFuture> streams = searchInitiatorSessions ? initiatorStreams : followerStreams;
         return findSession(streams, peer, planId, sessionIndex);
     }
 
-    private StreamSession findSession(Map<UUID, StreamResultFuture> streams, InetAddressAndPort peer, UUID planId, int sessionIndex)
+    private StreamSession findSession(Map<TimeUUID, StreamResultFuture> streams, InetAddressAndPort peer, TimeUUID planId, int sessionIndex)
     {
         StreamResultFuture streamResultFuture = streams.get(planId);
         if (streamResultFuture == null)
@@ -212,4 +446,9 @@
 
         return streamResultFuture.getSession(peer, sessionIndex);
     }
+
+    public interface StreamListener
+    {
+        default void onRegister(StreamResultFuture result) {}
+    }
 }
diff --git a/src/java/org/apache/cassandra/streaming/StreamManagerMBean.java b/src/java/org/apache/cassandra/streaming/StreamManagerMBean.java
index f329596..e49c059 100644
--- a/src/java/org/apache/cassandra/streaming/StreamManagerMBean.java
+++ b/src/java/org/apache/cassandra/streaming/StreamManagerMBean.java
@@ -29,4 +29,24 @@
      * Returns the current state of all ongoing streams.
      */
     Set<CompositeData> getCurrentStreams();
+
+    /**
+     * @return whether the streaming virtual table should collect stats while streaming is running
+     */
+    boolean getStreamingStatsEnabled();
+
+    /**
+     * enable/disable collection of streaming stats while streaming is running.
+     */
+    void setStreamingStatsEnabled(boolean streamingStatsEnabled);
+
+    /**
+     * @return current timeout for streaming slow events log
+     */
+    String getStreamingSlowEventsLogTimeout();
+
+    /**
+     * Sets the timeout for the streaming slow events log
+     */
+    void setStreamingSlowEventsLogTimeout(String value);
 }
diff --git a/src/java/org/apache/cassandra/streaming/StreamPlan.java b/src/java/org/apache/cassandra/streaming/StreamPlan.java
index 60845fa..9e79a5d 100644
--- a/src/java/org/apache/cassandra/streaming/StreamPlan.java
+++ b/src/java/org/apache/cassandra/streaming/StreamPlan.java
@@ -24,10 +24,12 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static com.google.common.collect.Iterables.all;
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
+import static org.apache.cassandra.streaming.StreamingChannel.Factory.Global.streamingFactory;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * {@link StreamPlan} is a helper class that builds StreamOperation of given configuration.
@@ -37,7 +39,7 @@
 public class StreamPlan
 {
     private static final String[] EMPTY_COLUMN_FAMILIES = new String[0];
-    private final UUID planId = UUIDGen.getTimeUUID();
+    private final TimeUUID planId = nextTimeUUID();
     private final StreamOperation streamOperation;
     private final List<StreamEventHandler> handlers = new ArrayList<>();
     private final StreamCoordinator coordinator;
@@ -60,10 +62,10 @@
     }
 
     public StreamPlan(StreamOperation streamOperation, int connectionsPerHost,
-                      boolean connectSequentially, UUID pendingRepair, PreviewKind previewKind)
+                      boolean connectSequentially, TimeUUID pendingRepair, PreviewKind previewKind)
     {
         this.streamOperation = streamOperation;
-        this.coordinator = new StreamCoordinator(streamOperation, connectionsPerHost, new DefaultConnectionFactory(),
+        this.coordinator = new StreamCoordinator(streamOperation, connectionsPerHost, streamingFactory(),
                                                  false, connectSequentially, pendingRepair, previewKind);
     }
 
@@ -107,7 +109,7 @@
         assert all(fullRanges, Replica::isSelf) || RangesAtEndpoint.isDummyList(fullRanges) : fullRanges.toString();
         assert all(transientRanges, Replica::isSelf) || RangesAtEndpoint.isDummyList(transientRanges) : transientRanges.toString();
 
-        StreamSession session = coordinator.getOrCreateNextSession(from);
+        StreamSession session = coordinator.getOrCreateOutboundSession(from);
         session.addStreamRequest(keyspace, fullRanges, transientRanges, Arrays.asList(columnFamilies));
         return this;
     }
@@ -123,7 +125,7 @@
      */
     public StreamPlan transferRanges(InetAddressAndPort to, String keyspace, RangesAtEndpoint replicas, String... columnFamilies)
     {
-        StreamSession session = coordinator.getOrCreateNextSession(to);
+        StreamSession session = coordinator.getOrCreateOutboundSession(to);
         session.addTransferRanges(keyspace, replicas, Arrays.asList(columnFamilies), flushBeforeTransfer);
         return this;
     }
@@ -155,7 +157,7 @@
      * @param factory StreamConnectionFactory to use
      * @return self
      */
-    public StreamPlan connectionFactory(StreamConnectionFactory factory)
+    public StreamPlan connectionFactory(StreamingChannel.Factory factory)
     {
         this.coordinator.setConnectionFactory(factory);
         return this;
@@ -192,7 +194,7 @@
         return this;
     }
 
-    public UUID getPendingRepair()
+    public TimeUUID getPendingRepair()
     {
         return coordinator.getPendingRepair();
     }
diff --git a/src/java/org/apache/cassandra/streaming/StreamReceiveException.java b/src/java/org/apache/cassandra/streaming/StreamReceiveException.java
index 54b365a..c564182 100644
--- a/src/java/org/apache/cassandra/streaming/StreamReceiveException.java
+++ b/src/java/org/apache/cassandra/streaming/StreamReceiveException.java
@@ -18,7 +18,9 @@
 
 package org.apache.cassandra.streaming;
 
-public class StreamReceiveException extends RuntimeException
+import java.io.IOException;
+
+public class StreamReceiveException extends IOException
 {
     public final StreamSession session;
 
diff --git a/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java b/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
index d127edb..002e182 100644
--- a/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
+++ b/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.streaming;
 
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
@@ -26,11 +25,11 @@
 import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.utils.ExecutorUtils.awaitTermination;
 import static org.apache.cassandra.utils.ExecutorUtils.shutdown;
 
@@ -41,7 +40,7 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(StreamReceiveTask.class);
 
-    private static final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("StreamReceiveTask"));
+    private static final ExecutorService executor = executorFactory().pooled("StreamReceiveTask", Integer.MAX_VALUE);
 
     private final StreamReceiver receiver;
 
diff --git a/src/java/org/apache/cassandra/streaming/StreamResultFuture.java b/src/java/org/apache/cassandra/streaming/StreamResultFuture.java
index 3b8ffdc..b43203d 100644
--- a/src/java/org/apache/cassandra/streaming/StreamResultFuture.java
+++ b/src/java/org/apache/cassandra/streaming/StreamResultFuture.java
@@ -17,20 +17,26 @@
  */
 package org.apache.cassandra.streaming;
 
+import java.time.Duration;
 import java.util.Collection;
-import java.util.UUID;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.TimeUnit;
 
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import io.netty.channel.Channel;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.streaming.StreamingChannel.Factory.Global.streamingFactory;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * A future on the result ({@link StreamState}) of a streaming plan.
  *
@@ -44,14 +50,15 @@
  * You can attach {@link StreamEventHandler} to this object to listen on {@link StreamEvent}s to
  * track progress of the streaming.
  */
-public final class StreamResultFuture extends AbstractFuture<StreamState>
+public final class StreamResultFuture extends AsyncFuture<StreamState>
 {
     private static final Logger logger = LoggerFactory.getLogger(StreamResultFuture.class);
 
-    public final UUID planId;
+    public final TimeUUID planId;
     public final StreamOperation streamOperation;
     private final StreamCoordinator coordinator;
     private final Collection<StreamEventHandler> eventListeners = new ConcurrentLinkedQueue<>();
+    private final long slowEventsLogTimeoutNanos = DatabaseDescriptor.getStreamingSlowEventsLogTimeout().toNanoseconds();
 
     /**
      * Create new StreamResult of given {@code planId} and streamOperation.
@@ -61,7 +68,7 @@
      * @param planId Stream plan ID
      * @param streamOperation Stream streamOperation
      */
-    private StreamResultFuture(UUID planId, StreamOperation streamOperation, StreamCoordinator coordinator)
+    public StreamResultFuture(TimeUUID planId, StreamOperation streamOperation, StreamCoordinator coordinator)
     {
         this.planId = planId;
         this.streamOperation = streamOperation;
@@ -69,15 +76,16 @@
 
         // if there is no session to listen to, we immediately set result for returning
         if (!coordinator.isFollower() && !coordinator.hasActiveSessions())
-            set(getCurrentState());
+            trySuccess(getCurrentState());
     }
 
-    private StreamResultFuture(UUID planId, StreamOperation streamOperation, UUID pendingRepair, PreviewKind previewKind)
+    @VisibleForTesting
+    public StreamResultFuture(TimeUUID planId, StreamOperation streamOperation, TimeUUID pendingRepair, PreviewKind previewKind)
     {
-        this(planId, streamOperation, new StreamCoordinator(streamOperation, 0, new DefaultConnectionFactory(), true, false, pendingRepair, previewKind));
+        this(planId, streamOperation, new StreamCoordinator(streamOperation, 0, streamingFactory(), true, false, pendingRepair, previewKind));
     }
 
-    public static StreamResultFuture createInitiator(UUID planId, StreamOperation streamOperation, Collection<StreamEventHandler> listeners,
+    public static StreamResultFuture createInitiator(TimeUUID planId, StreamOperation streamOperation, Collection<StreamEventHandler> listeners,
                                                      StreamCoordinator coordinator)
     {
         StreamResultFuture future = createAndRegisterInitiator(planId, streamOperation, coordinator);
@@ -101,31 +109,31 @@
     }
 
     public static synchronized StreamResultFuture createFollower(int sessionIndex,
-                                                                 UUID planId,
+                                                                 TimeUUID planId,
                                                                  StreamOperation streamOperation,
                                                                  InetAddressAndPort from,
-                                                                 Channel channel,
-                                                                 UUID pendingRepair,
+                                                                 StreamingChannel channel,
+                                                                 int messagingVersion,
+                                                                 TimeUUID pendingRepair,
                                                                  PreviewKind previewKind)
     {
         StreamResultFuture future = StreamManager.instance.getReceivingStream(planId);
         if (future == null)
         {
-            logger.info("[Stream #{} ID#{}] Creating new streaming plan for {} from {} channel.remote {} channel.local {}" +
-                        " channel.id {}", planId, sessionIndex, streamOperation.getDescription(),
-                        from, channel.remoteAddress(), channel.localAddress(), channel.id());
+            logger.info("[Stream #{} ID#{}] Creating new streaming plan for {} from {} {}", planId, sessionIndex, streamOperation.getDescription(),
+                        from, channel.description());
 
             // The main reason we create a StreamResultFuture on the receiving side is for JMX exposure.
             future = new StreamResultFuture(planId, streamOperation, pendingRepair, previewKind);
             StreamManager.instance.registerFollower(future);
         }
-        future.attachConnection(from, sessionIndex);
-        logger.info("[Stream #{}, ID#{}] Received streaming plan for {} from {} channel.remote {} channel.local {} channel.id {}",
-                    planId, sessionIndex, streamOperation.getDescription(), from, channel.remoteAddress(), channel.localAddress(), channel.id());
+        future.initInbound(from, channel, messagingVersion, sessionIndex);
+        logger.info("[Stream #{}, ID#{}] Received streaming plan for {} from {} {}",
+                    planId, sessionIndex, streamOperation.getDescription(), from, channel.description());
         return future;
     }
 
-    private static StreamResultFuture createAndRegisterInitiator(UUID planId, StreamOperation streamOperation, StreamCoordinator coordinator)
+    private static StreamResultFuture createAndRegisterInitiator(TimeUUID planId, StreamOperation streamOperation, StreamCoordinator coordinator)
     {
         StreamResultFuture future = new StreamResultFuture(planId, streamOperation, coordinator);
         StreamManager.instance.registerInitiator(future);
@@ -137,16 +145,16 @@
         return coordinator;
     }
 
-    private void attachConnection(InetAddressAndPort from, int sessionIndex)
+    private void initInbound(InetAddressAndPort from, StreamingChannel channel, int messagingVersion, int sessionIndex)
     {
-        StreamSession session = coordinator.getOrCreateSessionById(from, sessionIndex);
+        StreamSession session = coordinator.getOrCreateInboundSession(from, channel, messagingVersion, sessionIndex);
         session.init(this);
     }
 
     @SuppressWarnings("UnstableApiUsage")
     public void addEventListener(StreamEventHandler listener)
     {
-        Futures.addCallback(this, listener, MoreExecutors.directExecutor());
+        addCallback(listener);
         eventListeners.add(listener);
     }
 
@@ -173,7 +181,7 @@
         return planId.hashCode();
     }
 
-    void handleSessionPrepared(StreamSession session)
+    void handleSessionPrepared(StreamSession session, StreamSession.PrepareDirection prepareDirection)
     {
         SessionInfo sessionInfo = session.getSessionInfo();
         logger.info("[Stream #{} ID#{}] Prepare completed. Receiving {} files({}), sending {} files({})",
@@ -183,14 +191,14 @@
                               FBUtilities.prettyPrintMemory(sessionInfo.getTotalSizeToReceive()),
                               sessionInfo.getTotalFilesToSend(),
                               FBUtilities.prettyPrintMemory(sessionInfo.getTotalSizeToSend()));
-        StreamEvent.SessionPreparedEvent event = new StreamEvent.SessionPreparedEvent(planId, sessionInfo);
+        StreamEvent.SessionPreparedEvent event = new StreamEvent.SessionPreparedEvent(planId, sessionInfo, prepareDirection);
         coordinator.addSessionInfo(sessionInfo);
         fireStreamEvent(event);
     }
 
     void handleSessionComplete(StreamSession session)
     {
-        logger.info("[Stream #{}] Session with {} is complete", session.planId(), session.peer);
+        logger.info("[Stream #{}] Session with {} is {}", session.planId(), session.peer, session.state().name().toLowerCase());
         fireStreamEvent(new StreamEvent.SessionCompleteEvent(session));
         SessionInfo sessionInfo = session.getSessionInfo();
         coordinator.addSessionInfo(sessionInfo);
@@ -206,8 +214,22 @@
     synchronized void fireStreamEvent(StreamEvent event)
     {
         // delegate to listener
+        long startNanos = nanoTime();
         for (StreamEventHandler listener : eventListeners)
-            listener.handleStreamEvent(event);
+        {
+            try
+            {
+                listener.handleStreamEvent(event);
+            }
+            catch (Throwable t)
+            {
+                logger.warn("Unexpected exception in listern while calling handleStreamEvent", t);
+            }
+        }
+        long totalNanos = nanoTime() - startNanos;
+        if (totalNanos > slowEventsLogTimeoutNanos)
+            NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES, "Handling streaming events took longer than {}; took {}",
+                             () -> new Object[] { Duration.ofNanos(slowEventsLogTimeoutNanos), Duration.ofNanos(totalNanos)});
     }
 
     private synchronized void maybeComplete()
@@ -218,17 +240,17 @@
             if (finalState.hasFailedSession())
             {
                 logger.warn("[Stream #{}] Stream failed", planId);
-                setException(new StreamException(finalState, "Stream failed"));
+                tryFailure(new StreamException(finalState, "Stream failed"));
             }
             else if (finalState.hasAbortedSession())
             {
                 logger.info("[Stream #{}] Stream aborted", planId);
-                set(finalState);
+                trySuccess(finalState);
             }
             else
             {
                 logger.info("[Stream #{}] All sessions completed", planId);
-                set(finalState);
+                trySuccess(finalState);
             }
         }
     }
diff --git a/src/java/org/apache/cassandra/streaming/StreamSession.java b/src/java/org/apache/cassandra/streaming/StreamSession.java
index a2e479c..811717f 100644
--- a/src/java/org/apache/cassandra/streaming/StreamSession.java
+++ b/src/java/org/apache/cassandra/streaming/StreamSession.java
@@ -19,21 +19,36 @@
 
 import java.io.EOFException;
 import java.net.SocketTimeoutException;
-import java.util.*;
-import java.util.concurrent.*;
+import java.nio.channels.ClosedChannelException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.Nullable;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.*;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
+import io.netty.channel.Channel;
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelId;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.Range;
@@ -42,16 +57,17 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.metrics.StreamingMetrics;
-import org.apache.cassandra.net.OutboundConnectionSettings;
 import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.streaming.async.NettyStreamingMessageSender;
+import org.apache.cassandra.streaming.async.StreamingMultiplexedChannel;
 import org.apache.cassandra.streaming.messages.*;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NoSpamLogger;
 
 import static com.google.common.collect.Iterables.all;
-import static org.apache.cassandra.net.MessagingService.current_version;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.locator.InetAddressAndPort.hostAddressAndPort;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
 
 /**
  * Handles the streaming a one or more streams to and from a specific remote node.
@@ -85,7 +101,7 @@
  * 3. Streaming phase
  *
  *   (a) The streaming phase is started at each node by calling {@link StreamSession#startStreamingFiles(boolean)}.
- *       This will send, sequentially on each outbound streaming connection (see {@link NettyStreamingMessageSender}),
+ *       This will send, sequentially on each outbound streaming connection (see {@link StreamingMultiplexedChannel}),
  *       an {@link OutgoingStreamMessage} for each stream in each of the {@link StreamTransferTask}.
  *       Each {@link OutgoingStreamMessage} consists of a {@link StreamMessageHeader} that contains metadata about
  *       the stream, followed by the stream content itself. Once all the files for a {@link StreamTransferTask} are sent,
@@ -126,12 +142,14 @@
  *
  * All messages which derive from {@link StreamMessage} are sent by the standard internode messaging
  * (via {@link org.apache.cassandra.net.MessagingService}, while the actual files themselves are sent by a special
- * "streaming" connection type. See {@link NettyStreamingMessageSender} for details. Because of the asynchronous
+ * "streaming" connection type. See {@link StreamingMultiplexedChannel} for details. Because of the asynchronous
  */
 public class StreamSession implements IEndpointStateChangeSubscriber
 {
     private static final Logger logger = LoggerFactory.getLogger(StreamSession.class);
 
+    public enum PrepareDirection { SEND, ACK }
+
     // for test purpose to record received message and state transition
     public volatile static MessageStateSink sink = MessageStateSink.NONE;
 
@@ -143,7 +161,6 @@
      * Each {@code StreamSession} is identified by this InetAddressAndPort which is broadcast address of the node streaming.
      */
     public final InetAddressAndPort peer;
-    private final OutboundConnectionSettings template;
 
     private final int index;
 
@@ -162,9 +179,10 @@
     final Map<String, Set<Range<Token>>> transferredRangesPerKeyspace = new HashMap<>();
 
     private final boolean isFollower;
-    private final NettyStreamingMessageSender messageSender;
+    private final StreamingMultiplexedChannel channel;
     // contains both inbound and outbound channels
-    private final ConcurrentMap<ChannelId, Channel> channels = new ConcurrentHashMap<>();
+    private final ConcurrentMap<Object, StreamingChannel> inbound = new ConcurrentHashMap<>();
+    private final ConcurrentMap<Object, StreamingChannel> outbound = new ConcurrentHashMap<>();
 
     // "maybeCompleted()" should be executed at most once. Because it can be executed asynchronously by IO
     // threads(serialization/deserialization) and stream messaging processing thread, causing connection closed before
@@ -172,7 +190,7 @@
     private boolean maybeCompleted = false;
     private Future<?> closeFuture;
 
-    private final UUID pendingRepair;
+    private final TimeUUID pendingRepair;
     private final PreviewKind previewKind;
 
 /**
@@ -226,21 +244,18 @@
     /**
      * Create new streaming session with the peer.
      */
-    public StreamSession(StreamOperation streamOperation, InetAddressAndPort peer, StreamConnectionFactory factory,
-                         boolean isFollower, int index, UUID pendingRepair, PreviewKind previewKind)
+    public StreamSession(StreamOperation streamOperation, InetAddressAndPort peer, StreamingChannel.Factory factory, @Nullable StreamingChannel controlChannel, int messagingVersion,
+                         boolean isFollower, int index, TimeUUID pendingRepair, PreviewKind previewKind)
     {
         this.streamOperation = streamOperation;
         this.peer = peer;
-        this.template = new OutboundConnectionSettings(peer);
         this.isFollower = isFollower;
         this.index = index;
 
-        this.messageSender = new NettyStreamingMessageSender(this, template, factory, current_version, previewKind.isPreview());
+        this.channel = new StreamingMultiplexedChannel(this, factory, peer, controlChannel, messagingVersion);
         this.metrics = StreamingMetrics.get(peer);
         this.pendingRepair = pendingRepair;
         this.previewKind = previewKind;
-
-        logger.debug("Creating stream session to {} as {}", template, isFollower ? "follower" : "initiator");
     }
 
     public boolean isFollower()
@@ -248,7 +263,7 @@
         return isFollower;
     }
 
-    public UUID planId()
+    public TimeUUID planId()
     {
         return streamResult == null ? null : streamResult.planId;
     }
@@ -268,7 +283,7 @@
         return streamOperation;
     }
 
-    public UUID getPendingRepair()
+    public TimeUUID getPendingRepair()
     {
         return pendingRepair;
     }
@@ -305,18 +320,19 @@
      * Attach a channel to this session upon receiving the first inbound message.
      *
      * @param channel The channel to attach.
-     * @param isControlChannel If the channel is the one to send control messages to.
      * @return False if the channel was already attached, true otherwise.
      */
-    public synchronized boolean attachInbound(Channel channel, boolean isControlChannel)
+    public synchronized boolean attachInbound(StreamingChannel channel)
     {
         failIfFinished();
 
-        if (!messageSender.hasControlChannel() && isControlChannel)
-            messageSender.injectControlMessageChannel(channel);
-
-        channel.closeFuture().addListener(ignored -> onChannelClose(channel));
-        return channels.putIfAbsent(channel.id(), channel) == null;
+        boolean attached = inbound.putIfAbsent(channel.id(), channel) == null;
+        if (attached)
+            channel.onClose(() -> {
+                if (null != inbound.remove(channel.id()) && inbound.isEmpty())
+                    this.channel.close();
+            });
+        return attached;
     }
 
     /**
@@ -325,22 +341,14 @@
      * @param channel The channel to attach.
      * @return False if the channel was already attached, true otherwise.
      */
-    public synchronized boolean attachOutbound(Channel channel)
+    public synchronized boolean attachOutbound(StreamingChannel channel)
     {
         failIfFinished();
 
-        channel.closeFuture().addListener(ignored -> onChannelClose(channel));
-        return channels.putIfAbsent(channel.id(), channel) == null;
-    }
-
-    /**
-     * On channel closing, if no channels are left just close the message sender; this must be closed last to ensure
-     * keep alive messages are sent until the very end of the streaming session.
-     */
-    private void onChannelClose(Channel channel)
-    {
-        if (channels.remove(channel.id()) != null && channels.isEmpty())
-            messageSender.close();
+        boolean attached = outbound.putIfAbsent(channel.id(), channel) == null;
+        if (attached)
+            channel.onClose(() -> outbound.remove(channel.id()));
+        return attached;
     }
 
     /**
@@ -358,9 +366,17 @@
         try
         {
             logger.info("[Stream #{}] Starting streaming to {}{}", planId(),
-                                                                   peer,
-                                                                   template.connectTo == null ? "" : " through " + template.connectTo);
-            messageSender.initialize();
+                        hostAddressAndPort(channel.peer()),
+                        channel.connectedTo().equals(channel.peer()) ? "" : " through " + hostAddressAndPort(channel.connectedTo()));
+
+            StreamInitMessage message = new StreamInitMessage(getBroadcastAddressAndPort(),
+                                                              sessionIndex(),
+                                                              planId(),
+                                                              streamOperation(),
+                                                              getPendingRepair(),
+                                                              getPreviewKind());
+
+            channel.sendControlMessage(message).sync();
             onInitializationComplete();
         }
         catch (Exception e)
@@ -443,7 +459,7 @@
     }
 
     @VisibleForTesting
-    public List<OutgoingStream> getOutgoingStreamsForRanges(RangesAtEndpoint replicas, Collection<ColumnFamilyStore> stores, UUID pendingRepair, PreviewKind previewKind)
+    public List<OutgoingStream> getOutgoingStreamsForRanges(RangesAtEndpoint replicas, Collection<ColumnFamilyStore> stores, TimeUUID pendingRepair, PreviewKind previewKind)
     {
         List<OutgoingStream> streams = new ArrayList<>();
         try
@@ -499,13 +515,14 @@
         // due to failure, channels should be always closed regardless, even if this is not the initator.
         if (!isFollower || state != State.COMPLETE)
         {
-            logger.debug("[Stream #{}] Will close attached channels {}", planId(), channels);
-            channels.values().forEach(channel -> futures.add(channel.close()));
+            logger.debug("[Stream #{}] Will close attached inbound {} and outbound {} channels", planId(), inbound, outbound);
+            inbound.values().forEach(channel -> futures.add(channel.close()));
+            outbound.values().forEach(channel -> futures.add(channel.close()));
         }
 
         sink.onClose(peer);
         streamResult.handleSessionComplete(this);
-        closeFuture = FBUtilities.allOf(futures);
+        closeFuture = FutureCombiner.allOf(futures);
 
         return closeFuture;
     }
@@ -530,8 +547,8 @@
      */
     public void state(State newState)
     {
-        if (logger.isTraceEnabled())
-            logger.trace("[Stream #{}] Changing session state from {} to {}", planId(), state, newState);
+        if (logger.isDebugEnabled())
+            logger.debug("[Stream #{}] Changing session state from {} to {}", planId(), state, newState);
 
         sink.recordState(peer, newState);
         state = newState;
@@ -545,9 +562,9 @@
         return state;
     }
 
-    public NettyStreamingMessageSender getMessageSender()
+    public StreamingMultiplexedChannel getChannel()
     {
-        return messageSender;
+        return channel;
     }
 
     /**
@@ -621,7 +638,7 @@
             prepare.summaries.add(task.getSummary());
         }
 
-        messageSender.sendMessage(prepare);
+        channel.sendControlMessage(prepare).syncUninterruptibly();
     }
 
     /**
@@ -629,11 +646,12 @@
      * after completion or because the peer was down, otherwise sends a {@link SessionFailedMessage} and closes
      * the session as {@link State#FAILED}.
      */
-    public synchronized Future<?> onError(Throwable e)
+    public Future<?> onError(Throwable e)
     {
-        boolean isEofException = e instanceof EOFException;
+        boolean isEofException = e instanceof EOFException || e instanceof ClosedChannelException;
         if (isEofException)
         {
+            State state = this.state;
             if (state.finalState)
             {
                 logger.debug("[Stream #{}] Socket closed after session completed with state {}", planId(), state);
@@ -653,10 +671,10 @@
 
         logError(e);
 
-        if (messageSender.connected())
+        if (channel.connected())
         {
-            state(State.FAILED); // make sure subsequent error handling sees the session in a final state
-            messageSender.sendMessage(new SessionFailedMessage());
+            state(State.FAILED); // make sure subsequent error handling sees the session in a final state 
+            channel.sendControlMessage(new SessionFailedMessage()).awaitUninterruptibly();
         }
 
         return closeSession(State.FAILED);
@@ -666,18 +684,18 @@
     {
         if (e instanceof SocketTimeoutException)
         {
-            logger.error("[Stream #{}] Did not receive response from peer {}{} for {} secs. Is peer down? " +
-                         "If not, maybe try increasing streaming_keep_alive_period_in_secs.", planId(),
-                         peer.getHostAddressAndPort(),
-                         template.connectTo == null ? "" : " through " + template.connectTo.getHostAddressAndPort(),
-                         2 * DatabaseDescriptor.getStreamingKeepAlivePeriod(),
+            logger.error("[Stream #{}] Timeout from peer {}{}. Is peer down? " +
+                         "If not, and earlier failure detection is required enable (or lower) streaming_keep_alive_period.",
+                         planId(),
+                         hostAddressAndPort(channel.peer()),
+                         channel.peer().equals(channel.connectedTo()) ? "" : " through " + hostAddressAndPort(channel.connectedTo()),
                          e);
         }
         else
         {
             logger.error("[Stream #{}] Streaming error occurred on session with peer {}{}", planId(),
-                         peer.getHostAddressAndPort(),
-                         template.connectTo == null ? "" : " through " + template.connectTo.getHostAddressAndPort(),
+                         hostAddressAndPort(channel.peer()),
+                         channel.peer().equals(channel.connectedTo()) ? "" : " through " + hostAddressAndPort(channel.connectedTo()),
                          e);
         }
     }
@@ -701,6 +719,11 @@
         });
     }
 
+    public void countStreamedIn(boolean isEntireSSTable)
+    {
+        metrics.countStreamedIn(isEntireSSTable);
+    }
+
     /**
      * Finish preparing the session. This method is blocking (memtables are flushed in {@link #addTransferRanges}),
      * so the logic should not execute on the main IO thread (read: netty event loop).
@@ -716,9 +739,16 @@
         if (!peer.equals(FBUtilities.getBroadcastAddressAndPort()))
             for (StreamTransferTask task : transfers.values())
                 prepareSynAck.summaries.add(task.getSummary());
-        messageSender.sendMessage(prepareSynAck);
 
-        streamResult.handleSessionPrepared(this);
+        streamResult.handleSessionPrepared(this, PrepareDirection.SEND);
+        // After sending the message the initiator can close the channel which will cause a ClosedChannelException
+        // in buffer logic, this then gets sent to onError which validates the state isFinalState, if not fails
+        // the session.  To avoid a race condition between sending and setting state, make sure to update the state
+        // before sending the message (without closing the channel)
+        // see CASSANDRA-17116
+        if (isPreview())
+            state(State.COMPLETE);
+        channel.sendControlMessage(prepareSynAck).syncUninterruptibly();
 
         if (isPreview())
             completePreview();
@@ -735,20 +765,20 @@
 
             // only send the (final) ACK if we are expecting the peer to send this node (the initiator) some files
             if (!isPreview())
-                messageSender.sendMessage(new PrepareAckMessage());
+                channel.sendControlMessage(new PrepareAckMessage()).syncUninterruptibly();
         }
 
         if (isPreview())
             completePreview();
         else
-            startStreamingFiles(true);
+            startStreamingFiles(PrepareDirection.ACK);
     }
 
     private void prepareAck(PrepareAckMessage msg)
     {
         if (isPreview())
             throw new RuntimeException(String.format("[Stream #%s] Cannot receive PrepareAckMessage for preview session", planId()));
-        startStreamingFiles(true);
+        startStreamingFiles(PrepareDirection.ACK);
     }
 
     /**
@@ -792,16 +822,16 @@
         StreamingMetrics.totalIncomingBytes.inc(headerSize);
         metrics.incomingBytes.inc(headerSize);
         // send back file received message
-        messageSender.sendMessage(new ReceivedMessage(message.header.tableId, message.header.sequenceNumber));
+        channel.sendControlMessage(new ReceivedMessage(message.header.tableId, message.header.sequenceNumber)).syncUninterruptibly();
         StreamHook.instance.reportIncomingStream(message.header.tableId, message.stream, this, message.header.sequenceNumber);
-        long receivedStartNanos = System.nanoTime();
+        long receivedStartNanos = nanoTime();
         try
         {
             receivers.get(message.header.tableId).received(message.stream);
         }
         finally
         {
-            long latencyNanos = System.nanoTime() - receivedStartNanos;
+            long latencyNanos = nanoTime() - receivedStartNanos;
             metrics.incomingProcessTime.update(latencyNanos, TimeUnit.NANOSECONDS);
             long latencyMs = TimeUnit.NANOSECONDS.toMillis(latencyNanos);
             int timeout = DatabaseDescriptor.getInternodeStreamingTcpUserTimeoutInMS();
@@ -811,15 +841,19 @@
                                  "The time taken ({} ms) for processing the incoming stream message ({})" +
                                  " exceeded internode streaming TCP user timeout ({} ms).\n" +
                                  "The streaming connection might be closed due to tcp user timeout.\n" +
-                                 "Try to increase the internode_streaming_tcp_user_timeout_in_ms" +
+                                 "Try to increase the internode_streaming_tcp_user_timeout" +
                                  " or set it to 0 to use system defaults.",
                                  latencyMs, message, timeout);
         }
     }
 
-    public void progress(String filename, ProgressInfo.Direction direction, long bytes, long total)
+    public void progress(String filename, ProgressInfo.Direction direction, long bytes, long delta, long total)
     {
-        ProgressInfo progress = new ProgressInfo(peer, index, filename, direction, bytes, total);
+        if (delta < 0)
+            NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES,
+                             "[id={}, key={{}, {}, {})] Stream event reported a negative delta ({})",
+                             planId(), peer, filename, direction, delta);
+        ProgressInfo progress = new ProgressInfo(peer, index, filename, direction, bytes, delta, total);
         streamResult.handleProgress(progress);
     }
 
@@ -835,14 +869,11 @@
     {
         logger.debug("[Stream #{}] handling Complete message, state = {}", planId(), state);
 
-        if (!isFollower)
+        if (!isFollower) // initiator
         {
-            if (state == State.WAIT_COMPLETE)
-                closeSession(State.COMPLETE);
-            else
-                state(State.WAIT_COMPLETE);
+            initiatorCompleteOrWait();
         }
-        else
+        else // follower
         {
             // pre-4.0 nodes should not be connected via streaming, see {@link MessagingService#accept_streaming}
             throw new IllegalStateException(String.format("[Stream #%s] Complete message can be only received by the initiator!", planId()));
@@ -862,22 +893,35 @@
             return true;
 
         maybeCompleted = true;
-        if (!isFollower)
+        if (!isFollower) // initiator
         {
-            if (state == State.WAIT_COMPLETE)
-                closeSession(State.COMPLETE);
-            else
-                state(State.WAIT_COMPLETE);
+            initiatorCompleteOrWait();
         }
-        else
+        else // follower
         {
-            messageSender.sendMessage(new CompleteMessage());
+            // After sending the message the initiator can close the channel which will cause a ClosedChannelException
+            // in buffer logic, this then gets sent to onError which validates the state isFinalState, if not fails
+            // the session.  To avoid a race condition between sending and setting state, make sure to update the state
+            // before sending the message (without closing the channel)
+            // see CASSANDRA-17116
+            state(State.COMPLETE);
+            channel.sendControlMessage(new CompleteMessage()).syncUninterruptibly();
             closeSession(State.COMPLETE);
         }
 
         return true;
     }
 
+    private void initiatorCompleteOrWait()
+    {
+        // This is called when coordination completes AND when COMPLETE message is seen; it is possible that the
+        // COMPLETE method is seen first!
+        if (state == State.WAIT_COMPLETE)
+            closeSession(State.COMPLETE);
+        else
+            state(State.WAIT_COMPLETE);
+    }
+
     /**
      * Call back on receiving {@code StreamMessage.Type.SESSION_FAILED} message.
      */
@@ -888,6 +932,15 @@
     }
 
     /**
+     * Call back on receiving {@code StreamMessage.Type.SESSION_FAILED} message.
+     */
+    public synchronized void sessionTimeout()
+    {
+        logger.error("[Stream #{}] timeout with {}.", planId(), peer.toString());
+        closeSession(State.FAILED);
+    }
+
+    /**
      * @return Current snapshot of this session info.
      */
     public SessionInfo getSessionInfo()
@@ -898,8 +951,7 @@
         List<StreamSummary> transferSummaries = Lists.newArrayList();
         for (StreamTask transfer : transfers.values())
             transferSummaries.add(transfer.getSummary());
-        // TODO: the connectTo treatment here is peculiar, and needs thinking about - since the connection factory can change it
-        return new SessionInfo(peer, index, template.connectTo == null ? peer : template.connectTo, receivingSummaries, transferSummaries, state);
+        return new SessionInfo(channel.peer(), index, channel.connectedTo(), receivingSummaries, transferSummaries, state);
     }
 
     public synchronized void taskCompleted(StreamReceiveTask completedTask)
@@ -950,7 +1002,7 @@
     {
         List<Future<?>> flushes = new ArrayList<>();
         for (ColumnFamilyStore cfs : stores)
-            flushes.add(cfs.forceFlush());
+            flushes.add(cfs.forceFlush(ColumnFamilyStore.FlushReason.STREAMING));
         FBUtilities.waitOnFutures(flushes);
     }
 
@@ -962,10 +1014,10 @@
             receivers.put(summary.tableId, new StreamReceiveTask(this, summary.tableId, summary.files, summary.totalSize));
     }
 
-    private void startStreamingFiles(boolean notifyPrepared)
+    private void startStreamingFiles(@Nullable PrepareDirection prepareDirection)
     {
-        if (notifyPrepared)
-            streamResult.handleSessionPrepared(this);
+        if (prepareDirection != null)
+            streamResult.handleSessionPrepared(this, prepareDirection);
 
         state(State.STREAMING);
 
@@ -978,7 +1030,8 @@
                 {
                     // pass the session planId/index to the OFM (which is only set at init(), after the transfers have already been created)
                     ofm.header.addSessionInfo(this);
-                    messageSender.sendMessage(ofm);
+                    // do not sync here as this does disk access
+                    channel.sendControlMessage(ofm);
                 }
             }
             else
@@ -1040,6 +1093,36 @@
         public void onClose(InetAddressAndPort from);
     }
 
+    public static String createLogTag(StreamSession session)
+    {
+        return createLogTag(session, (Object) null);
+    }
+
+    public static String createLogTag(StreamSession session, StreamingChannel channel)
+    {
+        return createLogTag(session, channel == null ? null : channel.id());
+    }
+
+    public static String createLogTag(StreamSession session, Channel channel)
+    {
+        return createLogTag(session, channel == null ? null : channel.id());
+    }
+
+    public static String createLogTag(StreamSession session, Object channelId)
+    {
+        StringBuilder sb = new StringBuilder(64);
+        sb.append("[Stream");
+
+        if (session != null)
+            sb.append(" #").append(session.planId());
+
+        if (channelId != null)
+            sb.append(" channel: ").append(channelId);
+
+        sb.append(']');
+        return sb.toString();
+    }
+
     public synchronized void abort()
     {
         if (state.isFinalState())
@@ -1050,8 +1133,8 @@
 
         logger.info("[Stream #{}] Aborting stream session with peer {}...", planId(), peer);
 
-        if (getMessageSender().connected())
-            getMessageSender().sendMessage(new SessionFailedMessage());
+        if (channel.connected())
+            channel.sendControlMessage(new SessionFailedMessage());
 
         try
         {
diff --git a/src/java/org/apache/cassandra/streaming/StreamState.java b/src/java/org/apache/cassandra/streaming/StreamState.java
index 1b7c042..88eb76d 100644
--- a/src/java/org/apache/cassandra/streaming/StreamState.java
+++ b/src/java/org/apache/cassandra/streaming/StreamState.java
@@ -20,21 +20,22 @@
 import java.io.Serializable;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
+import org.apache.cassandra.utils.TimeUUID;
+
 /**
  * Current snapshot of streaming progress.
  */
 public class StreamState implements Serializable
 {
-    public final UUID planId;
+    public final TimeUUID planId;
     public final StreamOperation streamOperation;
     public final Set<SessionInfo> sessions;
 
-    public StreamState(UUID planId, StreamOperation streamOperation, Set<SessionInfo> sessions)
+    public StreamState(TimeUUID planId, StreamOperation streamOperation, Set<SessionInfo> sessions)
     {
         this.planId = planId;
         this.sessions = sessions;
diff --git a/src/java/org/apache/cassandra/streaming/StreamTransferTask.java b/src/java/org/apache/cassandra/streaming/StreamTransferTask.java
index 1fbd540..45fbcc6 100644
--- a/src/java/org/apache/cassandra/streaming/StreamTransferTask.java
+++ b/src/java/org/apache/cassandra/streaming/StreamTransferTask.java
@@ -22,7 +22,6 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -33,12 +32,12 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.messages.OutgoingStreamMessage;
+import org.apache.cassandra.utils.ExecutorUtils;
 
-import static org.apache.cassandra.utils.ExecutorUtils.awaitTermination;
-import static org.apache.cassandra.utils.ExecutorUtils.shutdown;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
 /**
  * StreamTransferTask sends streams for a given table
@@ -46,7 +45,7 @@
 public class StreamTransferTask extends StreamTask
 {
     private static final Logger logger = LoggerFactory.getLogger(StreamTransferTask.class);
-    private static final ScheduledThreadPoolExecutor timeoutExecutor = createTimeoutExecutor();
+    private static final ScheduledExecutorPlus timeoutExecutor = executorFactory().scheduled(false, "StreamingTransferTaskTimeouts");
 
     private final AtomicInteger sequenceNumber = new AtomicInteger(0);
     private boolean aborted = false;
@@ -91,7 +90,7 @@
             if (stream != null)
                 stream.complete();
 
-            logger.debug("recevied sequenceNumber {}, remaining files {}", sequenceNumber, streams.keySet());
+            logger.debug("received sequenceNumber {}, remaining files {}", sequenceNumber, streams.keySet());
             signalComplete = streams.isEmpty();
         }
 
@@ -100,6 +99,26 @@
             session.taskCompleted(this);
     }
 
+    /**
+     * Received ACK for stream at {@code sequenceNumber}.
+     *
+     * @param sequenceNumber sequence number of stream
+     */
+    public void timeout(int sequenceNumber)
+    {
+        synchronized (this)
+        {
+            timeoutTasks.remove(sequenceNumber);
+            OutgoingStreamMessage stream = streams.remove(sequenceNumber);
+            if (stream == null) return;
+            stream.complete();
+
+            logger.debug("timeout sequenceNumber {}, remaining files {}", sequenceNumber, streams.keySet());
+        }
+
+        session.sessionTimeout();
+    }
+
     public synchronized void abort()
     {
         if (aborted)
@@ -169,35 +188,15 @@
         if (!streams.containsKey(sequenceNumber))
             return null;
 
-        ScheduledFuture<?> future = timeoutExecutor.schedule(new Runnable()
-        {
-            public void run()
-            {
-                synchronized (StreamTransferTask.this)
-                {
-                    // remove so we don't cancel ourselves
-                    timeoutTasks.remove(sequenceNumber);
-                    StreamTransferTask.this.complete(sequenceNumber);
-                }
-            }
-        }, time, unit);
-
+        ScheduledFuture<?> future = timeoutExecutor.scheduleTimeoutWithDelay(() -> StreamTransferTask.this.timeout(sequenceNumber), time, unit);
         ScheduledFuture<?> prev = timeoutTasks.put(sequenceNumber, future);
         assert prev == null;
         return future;
     }
 
-    private static ScheduledThreadPoolExecutor createTimeoutExecutor()
-    {
-        ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("StreamingTransferTaskTimeouts"));
-        executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-        return executor;
-    }
-
     @VisibleForTesting
     public static void shutdownAndWait(long timeout, TimeUnit units) throws InterruptedException, TimeoutException
     {
-        shutdown(timeoutExecutor);
-        awaitTermination(timeout, units, timeoutExecutor);
+        ExecutorUtils.shutdownAndWait(timeout, units, timeoutExecutor);
     }
 }
diff --git a/src/java/org/apache/cassandra/streaming/StreamingChannel.java b/src/java/org/apache/cassandra/streaming/StreamingChannel.java
new file mode 100644
index 0000000..6b623b8
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/StreamingChannel.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.function.IntFunction;
+
+import io.netty.util.concurrent.Future; //checkstyle: permit this import
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface StreamingChannel
+{
+    public interface Factory
+    {
+        public static class Global
+        {
+            private static StreamingChannel.Factory FACTORY = new NettyStreamingConnectionFactory();
+            public static StreamingChannel.Factory streamingFactory()
+            {
+                return FACTORY;
+            }
+
+            public static void unsafeSet(StreamingChannel.Factory factory)
+            {
+                FACTORY = factory;
+            }
+        }
+
+        StreamingChannel create(InetSocketAddress to, int messagingVersion, Kind kind) throws IOException;
+
+        default StreamingChannel create(InetSocketAddress to,
+                                        InetSocketAddress preferred,
+                                        int messagingVersion,
+                                        StreamingChannel.Kind kind) throws IOException
+        {
+            // Implementations can decide whether or not to do something with the preferred address.
+            return create(to, messagingVersion, kind);
+        }
+
+        /** Provide way to disable getPreferredIP() for tools without access to the system keyspace
+         *
+         * CASSANDRA-17663 moves calls to SystemKeyspace.getPreferredIP() outside of any threads
+         * that are regularly interrupted.  However the streaming subsystem is also used
+         * by the bulk loader tool, which does not have direct access to the local tables
+         * and uses the client metadata/queries to retrieve it.
+         *
+         * @return true if SystemKeyspace.getPreferredIP() should be used when connecting
+         */
+        default boolean supportsPreferredIp()
+        {
+            return true;
+        }
+    }
+
+    public enum Kind { CONTROL, FILE }
+
+    public interface Send
+    {
+        void send(IntFunction<StreamingDataOutputPlus> outSupplier) throws IOException;
+    }
+
+    Object id();
+    String description();
+
+    InetSocketAddress peer();
+    InetSocketAddress connectedTo();
+    boolean connected();
+
+    StreamingDataInputPlus in();
+
+    /**
+     * until closed, cannot invoke {@link #send(Send)}
+     */
+    StreamingDataOutputPlus acquireOut();
+    Future<?> send(Send send) throws IOException;
+
+    Future<?> close();
+    void onClose(Runnable runOnClose);
+}
diff --git a/src/java/org/apache/cassandra/streaming/StreamingDataInputPlus.java b/src/java/org/apache/cassandra/streaming/StreamingDataInputPlus.java
new file mode 100644
index 0000000..0cfcc0d
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/StreamingDataInputPlus.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming;
+
+import java.io.Closeable;
+
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface StreamingDataInputPlus extends DataInputPlus, Closeable
+{
+    @Override
+    void close();
+}
diff --git a/src/java/org/apache/cassandra/streaming/StreamingDataOutputPlus.java b/src/java/org/apache/cassandra/streaming/StreamingDataOutputPlus.java
new file mode 100644
index 0000000..d845497
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/StreamingDataOutputPlus.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import io.netty.channel.FileRegion;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface StreamingDataOutputPlus extends DataOutputPlus, Closeable
+{
+    interface BufferSupplier
+    {
+        /**
+         * Request a buffer with at least the given capacity.
+         * This method may only be invoked once, and the lifetime of buffer it returns will be managed
+         * by the AsyncChannelOutputPlus it was created for.
+         */
+        ByteBuffer get(int capacity) throws IOException;
+    }
+
+    interface Write
+    {
+        /**
+         * Write to a buffer, and flush its contents to the channel.
+         * <p>
+         * The lifetime of the buffer will be managed by the AsyncChannelOutputPlus you issue this Write to.
+         * If the method exits successfully, the contents of the buffer will be written to the channel, otherwise
+         * the buffer will be cleaned and the exception propagated to the caller.
+         */
+        void write(BufferSupplier supplier) throws IOException;
+    }
+
+    interface RateLimiter
+    {
+        void acquire(int bytes);
+
+        boolean isRateLimited();
+    }
+
+    /**
+     * Provide a lambda that can request a buffer of suitable size, then fill the buffer and have
+     * that buffer written and flushed to the underlying channel, without having to handle buffer
+     * allocation, lifetime or cleanup, including in case of exceptions.
+     * <p>
+     * Any exception thrown by the Write will be propagated to the caller, after any buffer is cleaned up.
+     */
+    int writeToChannel(Write write, RateLimiter limiter) throws IOException;
+
+    /**
+     * Writes all data in file channel to stream: <br>
+     * * For zero-copy-streaming, 1MiB at a time, with at most 2MiB in flight at once. <br>
+     * * For streaming with SSL, 64KiB at a time, with at most 32+64KiB (default low water mark + batch size) in flight. <br>
+     * <p>
+     * This method takes ownership of the provided {@link FileChannel}.
+     * <p>
+     * WARNING: this method blocks only for permission to write to the netty channel; it exits before
+     * the {@link FileRegion}(zero-copy) or {@link ByteBuffer}(ssl) is flushed to the network.
+     */
+    long writeFileToChannel(FileChannel file, RateLimiter limiter) throws IOException;
+
+    default void flush() throws IOException {}
+}
diff --git a/src/java/org/apache/cassandra/streaming/StreamingDataOutputPlusFixed.java b/src/java/org/apache/cassandra/streaming/StreamingDataOutputPlusFixed.java
new file mode 100644
index 0000000..2159090
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/StreamingDataOutputPlusFixed.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import org.apache.cassandra.io.util.DataOutputBufferFixed;
+
+public class StreamingDataOutputPlusFixed extends DataOutputBufferFixed implements StreamingDataOutputPlus
+{
+    public StreamingDataOutputPlusFixed(ByteBuffer buffer)
+    {
+        super(buffer);
+    }
+
+    @Override
+    public int writeToChannel(Write write, RateLimiter limiter) throws IOException
+    {
+        int position = buffer.position();
+        write.write(size -> buffer);
+        return buffer.position() - position;
+    }
+
+    @Override
+    public long writeFileToChannel(FileChannel file, RateLimiter limiter) throws IOException
+    {
+        long count = 0;
+        long tmp;
+        while (0 <= (tmp = file.read(buffer))) count += tmp;
+        return count;
+    }
+}
diff --git a/src/java/org/apache/cassandra/streaming/StreamingMessageSender.java b/src/java/org/apache/cassandra/streaming/StreamingMessageSender.java
deleted file mode 100644
index 96e7626..0000000
--- a/src/java/org/apache/cassandra/streaming/StreamingMessageSender.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.streaming;
-
-import java.io.IOException;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.streaming.messages.StreamMessage;
-
-public interface StreamingMessageSender
-{
-    void initialize() throws IOException;
-
-    void sendMessage(StreamMessage message) throws IOException;
-
-    boolean connected();
-
-    void close();
-}
diff --git a/src/java/org/apache/cassandra/streaming/StreamingState.java b/src/java/org/apache/cassandra/streaming/StreamingState.java
new file mode 100644
index 0000000..c2eed1e
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/StreamingState.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.streaming;
+
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.net.InetSocketAddress;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.concurrent.GuardedBy;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Throwables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.virtual.SimpleDataSet;
+import org.apache.cassandra.tools.nodetool.formatter.TableBuilder;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.ObjectSizes;
+import org.apache.cassandra.utils.TimeUUID;
+import org.checkerframework.checker.nullness.qual.Nullable;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+public class StreamingState implements StreamEventHandler
+{
+    private static final Logger logger = LoggerFactory.getLogger(StreamingState.class);
+
+    public static final long ELEMENT_SIZE = ObjectSizes.measureDeep(new StreamingState(nextTimeUUID(), StreamOperation.OTHER, false));
+
+    public enum Status
+    {INIT, START, SUCCESS, FAILURE}
+
+    private final long createdAtMillis = Clock.Global.currentTimeMillis();
+
+    private final TimeUUID id;
+    private final boolean follower;
+    private final StreamOperation operation;
+    private final Set<InetSocketAddress> peers = Collections.newSetFromMap(new ConcurrentHashMap<>());
+    @GuardedBy("this")
+    private final Sessions sessions = new Sessions();
+
+    private Status status;
+    private String completeMessage = null;
+
+    private final long[] stateTimesNanos;
+    private volatile long lastUpdatedAtNanos;
+
+    // API for state changes
+    public final Phase phase = new Phase();
+
+    public StreamingState(StreamResultFuture result)
+    {
+        this(result.planId, result.streamOperation, result.getCoordinator().isFollower());
+    }
+
+    private StreamingState(TimeUUID planId, StreamOperation streamOperation, boolean follower)
+    {
+        this.id = planId;
+        this.operation = streamOperation;
+        this.follower = follower;
+        this.stateTimesNanos = new long[Status.values().length];
+        updateState(Status.INIT);
+    }
+
+    public TimeUUID id()
+    {
+        return id;
+    }
+
+    public boolean follower()
+    {
+        return follower;
+    }
+
+    public StreamOperation operation()
+    {
+        return operation;
+    }
+
+    public Set<InetSocketAddress> peers()
+    {
+        return this.peers;
+    }
+
+    public Status status()
+    {
+        return status;
+    }
+
+    public Sessions sessions()
+    {
+        return sessions;
+    }
+
+    public boolean isComplete()
+    {
+        switch (status)
+        {
+            case SUCCESS:
+            case FAILURE:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @VisibleForTesting
+    public StreamResultFuture future()
+    {
+        if (follower)
+            return StreamManager.instance.getReceivingStream(id);
+        else
+            return StreamManager.instance.getInitiatorStream(id);
+    }
+
+    public float progress()
+    {
+        switch (status)
+        {
+            case INIT:
+                return 0;
+            case START:
+                return Math.min(0.99f, sessions().progress().floatValue());
+            case SUCCESS:
+            case FAILURE:
+                return 1;
+            default:
+                throw new AssertionError("unknown state: " + status);
+        }
+    }
+
+    public EnumMap<Status, Long> stateTimesMillis()
+    {
+        EnumMap<Status, Long> map = new EnumMap<>(Status.class);
+        for (int i = 0; i < stateTimesNanos.length; i++)
+        {
+            long nanos = stateTimesNanos[i];
+            if (nanos != 0)
+                map.put(Status.values()[i], nanosToMillis(nanos));
+        }
+        return map;
+    }
+
+    public long durationMillis()
+    {
+        long endNanos = lastUpdatedAtNanos;
+        if (!isComplete())
+            endNanos = Clock.Global.nanoTime();
+        return TimeUnit.NANOSECONDS.toMillis(endNanos - stateTimesNanos[0]);
+    }
+
+    public long lastUpdatedAtMillis()
+    {
+        return nanosToMillis(lastUpdatedAtNanos);
+    }
+
+    public long lastUpdatedAtNanos()
+    {
+        return lastUpdatedAtNanos;
+    }
+
+    public String failureCause()
+    {
+        if (status == Status.FAILURE)
+            return completeMessage;
+        return null;
+    }
+
+    public String successMessage()
+    {
+        if (status == Status.SUCCESS)
+            return completeMessage;
+        return null;
+    }
+
+    @Override
+    public String toString()
+    {
+        TableBuilder table = new TableBuilder();
+        table.add("id", id.toString());
+        table.add("status", status().name().toLowerCase());
+        table.add("progress", (progress() * 100) + "%");
+        table.add("duration_ms", Long.toString(durationMillis()));
+        table.add("last_updated_ms", Long.toString(lastUpdatedAtMillis()));
+        table.add("failure_cause", failureCause());
+        table.add("success_message", successMessage());
+        for (Map.Entry<Status, Long> e : stateTimesMillis().entrySet())
+            table.add("status_" + e.getKey().name().toLowerCase() + "_ms", e.toString());
+        return table.toString();
+    }
+
+    @Override
+    public synchronized void handleStreamEvent(StreamEvent event)
+    {
+        try
+        {
+            switch (event.eventType)
+            {
+                case STREAM_PREPARED:
+                    streamPrepared((StreamEvent.SessionPreparedEvent) event);
+                    break;
+                case STREAM_COMPLETE:
+                    // currently not taking track of state, so ignore
+                    break;
+                case FILE_PROGRESS:
+                    streamProgress((StreamEvent.ProgressEvent) event);
+                    break;
+                default:
+                    logger.warn("Unknown stream event type: {}", event.eventType);
+            }
+        }
+        catch (Throwable t)
+        {
+            logger.warn("Unexpected exception handling stream event", t);
+        }
+        lastUpdatedAtNanos = Clock.Global.nanoTime();
+    }
+
+    private void streamPrepared(StreamEvent.SessionPreparedEvent event)
+    {
+        SessionInfo session = event.session;
+        peers.add(session.peer);
+        // only update stats on ACK to avoid duplication
+        if (event.prepareDirection != StreamSession.PrepareDirection.ACK)
+            return;
+        sessions.bytesToReceive += session.getTotalSizeToReceive();
+        sessions.bytesToSend += session.getTotalSizeToSend();
+
+        sessions.filesToReceive += session.getTotalFilesToReceive();
+        sessions.filesToSend += session.getTotalFilesToSend();
+    }
+
+    private void streamProgress(StreamEvent.ProgressEvent event)
+    {
+        ProgressInfo info = event.progress;
+
+        if (info.direction == ProgressInfo.Direction.IN)
+        {
+            // receiving
+            sessions.bytesReceived += info.deltaBytes;
+            if (info.isCompleted())
+                sessions.filesReceived++;
+        }
+        else
+        {
+            // sending
+            sessions.bytesSent += info.deltaBytes;
+            if (info.isCompleted())
+                sessions.filesSent++;
+        }
+    }
+
+    @Override
+    public synchronized void onSuccess(@Nullable StreamState state)
+    {
+        updateState(Status.SUCCESS);
+    }
+
+    @Override
+    public synchronized void onFailure(Throwable throwable)
+    {
+        completeMessage = Throwables.getStackTraceAsString(throwable);
+        updateState(Status.FAILURE);
+    }
+
+    private synchronized void updateState(Status state)
+    {
+        this.status = state;
+        long now = Clock.Global.nanoTime();
+        stateTimesNanos[state.ordinal()] = now;
+        lastUpdatedAtNanos = now;
+    }
+
+    private long nanosToMillis(long nanos)
+    {
+        // nanos - creationTimeNanos = delta since init
+        return createdAtMillis + TimeUnit.NANOSECONDS.toMillis(nanos - stateTimesNanos[0]);
+    }
+
+    public class Phase
+    {
+        public void start()
+        {
+            updateState(Status.START);
+        }
+    }
+
+    public static class Sessions
+    {
+        public long bytesToReceive, bytesReceived;
+        public long bytesToSend, bytesSent;
+        public long filesToReceive, filesReceived;
+        public long filesToSend, filesSent;
+
+        public static String columns()
+        {
+            return "  bytes_to_receive bigint, \n" +
+                   "  bytes_received bigint, \n" +
+                   "  bytes_to_send bigint, \n" +
+                   "  bytes_sent bigint, \n" +
+                   "  files_to_receive bigint, \n" +
+                   "  files_received bigint, \n" +
+                   "  files_to_send bigint, \n" +
+                   "  files_sent bigint, \n";
+        }
+
+        public boolean isEmpty()
+        {
+            return bytesToReceive == 0 && bytesToSend == 0 && filesToReceive == 0 && filesToSend == 0;
+        }
+
+        public BigDecimal progress()
+        {
+            return div(bytesSent + bytesReceived, bytesToSend + bytesToReceive);
+        }
+
+        private static BigDecimal div(long a, long b)
+        {
+            // not "correct" but its what you would do if this happened...
+            if (b == 0)
+                return BigDecimal.ZERO;
+            return BigDecimal.valueOf(a).divide(BigDecimal.valueOf(b), 4, RoundingMode.HALF_UP);
+        }
+
+        public void update(SimpleDataSet ds)
+        {
+            if (isEmpty())
+                return;
+            ds.column("bytes_to_receive", bytesToReceive)
+              .column("bytes_received", bytesReceived)
+              .column("bytes_to_send", bytesToSend)
+              .column("bytes_sent", bytesSent)
+              .column("files_to_receive", filesToReceive)
+              .column("files_received", filesReceived)
+              .column("files_to_send", filesToSend)
+              .column("files_sent", filesSent);
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/streaming/TableStreamManager.java b/src/java/org/apache/cassandra/streaming/TableStreamManager.java
index d97fabc..208dc34 100644
--- a/src/java/org/apache/cassandra/streaming/TableStreamManager.java
+++ b/src/java/org/apache/cassandra/streaming/TableStreamManager.java
@@ -19,10 +19,10 @@
 package org.apache.cassandra.streaming;
 
 import java.util.Collection;
-import java.util.UUID;
 
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.streaming.messages.StreamMessageHeader;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * The main streaming hook for a storage implementation.
@@ -51,6 +51,6 @@
      */
     Collection<OutgoingStream> createOutgoingStreams(StreamSession session,
                                                      RangesAtEndpoint replicas,
-                                                     UUID pendingRepair,
+                                                     TimeUUID pendingRepair,
                                                      PreviewKind previewKind);
 }
diff --git a/src/java/org/apache/cassandra/streaming/async/NettyStreamingChannel.java b/src/java/org/apache/cassandra/streaming/async/NettyStreamingChannel.java
new file mode 100644
index 0000000..70b1c37
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/async/NettyStreamingChannel.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming.async;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.IntFunction;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.util.AttributeKey;
+import io.netty.util.ReferenceCountUtil;
+import org.apache.cassandra.net.AsyncChannelPromise;
+import org.apache.cassandra.net.AsyncStreamingInputPlus;
+import org.apache.cassandra.net.AsyncStreamingOutputPlus;
+import org.apache.cassandra.net.GlobalBufferPoolAllocator;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingDataInputPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlusFixed;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
+import static io.netty.util.AttributeKey.valueOf;
+import static java.lang.Boolean.FALSE;
+
+public class NettyStreamingChannel extends ChannelInboundHandlerAdapter implements StreamingChannel
+{
+    private static final Logger logger = LoggerFactory.getLogger(NettyStreamingChannel.class);
+    private static volatile boolean trackInboundHandlers = false;
+    private static Collection<NettyStreamingChannel> inboundHandlers;
+
+    @VisibleForTesting
+    static final AttributeKey<Boolean> TRANSFERRING_FILE_ATTR = valueOf("transferringFile");
+    final int messagingVersion;
+    final Channel channel;
+
+    /**
+     * A collection of {@link ByteBuf}s that are yet to be processed. Incoming buffers are first dropped into this
+     * structure, and then consumed.
+     * <p>
+     * For thread safety, this structure's resources are released on the consuming thread
+     * (via {@link AsyncStreamingInputPlus#close()},
+     * but the producing side calls {@link AsyncStreamingInputPlus#requestClosure()} to notify the input that it should close.
+     */
+    @VisibleForTesting
+    final AsyncStreamingInputPlus in;
+
+    private volatile boolean closed;
+
+    public NettyStreamingChannel(int messagingVersion, Channel channel, Kind kind)
+    {
+        this.messagingVersion = messagingVersion;
+        this.channel = channel;
+        channel.attr(TRANSFERRING_FILE_ATTR).set(FALSE);
+        if (kind == Kind.CONTROL)
+        {
+            if (trackInboundHandlers)
+                inboundHandlers.add(this);
+            in = new AsyncStreamingInputPlus(channel);
+        }
+        else in = null;
+    }
+
+    @Override
+    public Object id()
+    {
+        return channel.id();
+    }
+
+    @Override
+    public String description()
+    {
+        return "channel.remote " + channel.remoteAddress() +
+               " channel.local " + channel.localAddress() +
+               " channel.id " + channel.id();
+    }
+
+    @Override
+    public InetSocketAddress peer()
+    {
+        return (InetSocketAddress) channel.remoteAddress();
+    }
+
+    @Override
+    public InetSocketAddress connectedTo()
+    {
+        return peer();
+    }
+
+    @Override
+    public boolean connected()
+    {
+        return channel.isOpen();
+    }
+
+    public StreamingDataInputPlus in()
+    {
+        return in;
+    }
+
+    public StreamingDataOutputPlus acquireOut()
+    {
+        if (!channel.attr(TRANSFERRING_FILE_ATTR).compareAndSet(false, true))
+            throw new IllegalStateException("channel's transferring state is currently set to true. refusing to start new stream");
+
+        return new AsyncStreamingOutputPlus(channel)
+        {
+            @Override
+            public void close() throws IOException
+            {
+                try
+                {
+                    super.close();
+                }
+                finally
+                {
+                    NettyStreamingChannel.this.channel.attr(TRANSFERRING_FILE_ATTR).set(FALSE);
+                }
+            }
+        };
+    }
+
+    public Future<?> send(Send send)
+    {
+        class Factory implements IntFunction<StreamingDataOutputPlus>
+        {
+            ByteBuf buf;
+            ByteBuffer buffer;
+
+            @Override
+            public StreamingDataOutputPlus apply(int size)
+            {
+                buf = GlobalBufferPoolAllocator.instance.buffer(size);
+                buffer = buf.nioBuffer(buf.writerIndex(), size);
+                return new StreamingDataOutputPlusFixed(buffer);
+            }
+        }
+
+        Factory factory = new Factory();
+        try
+        {
+            send.send(factory);
+            ByteBuf buf = factory.buf;
+            ByteBuffer buffer = factory.buffer;
+            try
+            {
+                assert buffer.position() == buffer.limit();
+                buf.writerIndex(buffer.position());
+                AsyncChannelPromise promise = new AsyncChannelPromise(channel);
+                channel.writeAndFlush(buf, promise);
+                return promise;
+            }
+            catch (Throwable t)
+            {
+                buf.release();
+                throw t;
+            }
+        }
+        catch (Throwable t)
+        {
+            return ImmediateFuture.failure(t);
+        }
+    }
+
+    @Override
+    public synchronized io.netty.util.concurrent.Future<?> close()
+    {
+        if (closed)
+            return channel.closeFuture();
+
+        closed = true;
+        if (in != null)
+        {
+            in.requestClosure();
+            if (trackInboundHandlers)
+                inboundHandlers.remove(this);
+        }
+
+        return channel.close();
+    }
+
+    @Override
+    public void onClose(Runnable runOnClose)
+    {
+        channel.closeFuture().addListener(ignore -> runOnClose.run());
+    }
+
+    @Override
+    public void channelRead(ChannelHandlerContext ctx, Object message)
+    {
+        if (closed || !(message instanceof ByteBuf) || !in.append((ByteBuf) message))
+            ReferenceCountUtil.release(message);
+    }
+
+    @Override
+    public void channelInactive(ChannelHandlerContext ctx)
+    {
+        close();
+        ctx.fireChannelInactive();
+    }
+
+    @Override
+    public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
+    {
+        if (cause instanceof IOException)
+            logger.trace("connection problem while streaming", cause);
+        else
+            logger.warn("exception occurred while in processing streaming data", cause);
+        close();
+    }
+
+    /** Shutdown for in-JVM tests. For any other usage, tracking of active inbound streaming handlers
+     *  should be revisted first and in-JVM shutdown refactored with it.
+     *  This does not prevent new inbound handlers being added after shutdown, nor is not thread-safe
+     *  around new inbound handlers being opened during shutdown.
+     */
+    @VisibleForTesting
+    public static void shutdown()
+    {
+        assert trackInboundHandlers : "in-JVM tests required tracking of inbound streaming handlers";
+
+        inboundHandlers.forEach(NettyStreamingChannel::close);
+        inboundHandlers.clear();
+    }
+
+    public static void trackInboundHandlers()
+    {
+        inboundHandlers = Collections.newSetFromMap(new ConcurrentHashMap<>());
+        trackInboundHandlers = true;
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/streaming/async/NettyStreamingConnectionFactory.java b/src/java/org/apache/cassandra/streaming/async/NettyStreamingConnectionFactory.java
new file mode 100644
index 0000000..6a57e39
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/async/NettyStreamingConnectionFactory.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming.async;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.EventLoop;
+import io.netty.util.concurrent.Future; // checkstyle: permit this import
+import org.apache.cassandra.net.ConnectionCategory;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.OutboundConnectionInitiator.Result;
+import org.apache.cassandra.net.OutboundConnectionInitiator.Result.StreamingSuccess;
+import org.apache.cassandra.net.OutboundConnectionSettings;
+import org.apache.cassandra.streaming.StreamingChannel;
+
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+import static org.apache.cassandra.net.OutboundConnectionInitiator.initiateStreaming;
+
+public class NettyStreamingConnectionFactory implements StreamingChannel.Factory
+{
+    @VisibleForTesting
+    public static int MAX_CONNECT_ATTEMPTS = 3;
+
+    public static NettyStreamingChannel connect(OutboundConnectionSettings template, int messagingVersion, StreamingChannel.Kind kind) throws IOException
+    {
+        EventLoop eventLoop = MessagingService.instance().socketFactory.outboundStreamingGroup().next();
+
+        int attempts = 0;
+        while (true)
+        {
+            Future<Result<StreamingSuccess>> result = initiateStreaming(eventLoop, template.withDefaults(ConnectionCategory.STREAMING), messagingVersion);
+            result.awaitUninterruptibly(); // initiate has its own timeout, so this is "guaranteed" to return relatively promptly
+            if (result.isSuccess())
+            {
+                Channel channel = result.getNow().success().channel;
+                NettyStreamingChannel streamingChannel = new NettyStreamingChannel(messagingVersion, channel, kind);
+                if (kind == StreamingChannel.Kind.CONTROL)
+                {
+                    ChannelPipeline pipeline = channel.pipeline();
+                    pipeline.addLast("stream", streamingChannel);
+                }
+                return streamingChannel;
+            }
+
+            if (++attempts == MAX_CONNECT_ATTEMPTS)
+                throw new IOException("failed to connect to " + template.to + " for streaming data", result.cause());
+        }
+    }
+
+    @Override
+    public StreamingChannel create(InetSocketAddress to, int messagingVersion, StreamingChannel.Kind kind) throws IOException
+    {
+        return connect(new OutboundConnectionSettings(getByAddress(to)), messagingVersion, kind);
+    }
+
+    @Override
+    public StreamingChannel create(InetSocketAddress to,
+                                   InetSocketAddress preferred,
+                                   int messagingVersion,
+                                   StreamingChannel.Kind kind) throws IOException
+    {
+        return connect(new OutboundConnectionSettings(getByAddress(to), getByAddress(preferred)), messagingVersion, kind);
+    }
+}
diff --git a/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java b/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java
deleted file mode 100644
index 9f8f476..0000000
--- a/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.streaming.async;
-
-import java.io.IOError;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.ClosedByInterruptException;
-import java.util.Collection;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Throwables;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelFuture;
-import io.netty.channel.ChannelPipeline;
-import io.netty.util.AttributeKey;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
-import org.apache.cassandra.config.Config;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.io.util.DataOutputBufferFixed;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.net.AsyncChannelPromise;
-import org.apache.cassandra.net.OutboundConnectionSettings;
-import org.apache.cassandra.net.AsyncStreamingOutputPlus;
-import org.apache.cassandra.streaming.StreamConnectionFactory;
-import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.streaming.StreamingMessageSender;
-import org.apache.cassandra.streaming.messages.IncomingStreamMessage;
-import org.apache.cassandra.streaming.messages.KeepAliveMessage;
-import org.apache.cassandra.streaming.messages.OutgoingStreamMessage;
-import org.apache.cassandra.streaming.messages.StreamInitMessage;
-import org.apache.cassandra.streaming.messages.StreamMessage;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.JVMStabilityInspector;
-
-/**
- * Responsible for sending {@link StreamMessage}s to a given peer. We manage an array of netty {@link Channel}s
- * for sending {@link OutgoingStreamMessage} instances; all other {@link StreamMessage} types are sent via
- * a special control channel. The reason for this is to treat those messages carefully and not let them get stuck
- * behind a stream transfer.
- *
- * One of the challenges when sending streams is we might need to delay shipping the stream if:
- *
- * - we've exceeded our network I/O use due to rate limiting (at the cassandra level)
- * - the receiver isn't keeping up, which causes the local TCP socket buffer to not empty, which causes epoll writes to not
- * move any bytes to the socket, which causes buffers to stick around in user-land (a/k/a cassandra) memory.
- *
- * When those conditions occur, it's easy enough to reschedule processing the stream once the resources pick up
- * (we acquire the permits from the rate limiter, or the socket drains). However, we need to ensure that
- * no other messages are submitted to the same channel while the current stream is still being processed.
- */
-public class NettyStreamingMessageSender implements StreamingMessageSender
-{
-    private static final Logger logger = LoggerFactory.getLogger(NettyStreamingMessageSender.class);
-
-    private static final int DEFAULT_MAX_PARALLEL_TRANSFERS = FBUtilities.getAvailableProcessors();
-    private static final int MAX_PARALLEL_TRANSFERS = Integer.parseInt(System.getProperty(Config.PROPERTY_PREFIX + "streaming.session.parallelTransfers", Integer.toString(DEFAULT_MAX_PARALLEL_TRANSFERS)));
-
-    private static final long DEFAULT_CLOSE_WAIT_IN_MILLIS = TimeUnit.MINUTES.toMillis(5);
-
-    // a simple mechansim for allowing a degree of fairnes across multiple sessions
-    private static final Semaphore fileTransferSemaphore = new Semaphore(DEFAULT_MAX_PARALLEL_TRANSFERS, true);
-
-    private final StreamSession session;
-    private final boolean isPreview;
-    private final int streamingVersion;
-    private final OutboundConnectionSettings template;
-    private final StreamConnectionFactory factory;
-
-    private volatile boolean closed;
-
-    /**
-     * A special {@link Channel} for sending non-stream streaming messages, basically anything that isn't an
-     * {@link OutgoingStreamMessage} (or an {@link IncomingStreamMessage}, but a node doesn't send that, it's only received).
-     */
-    private volatile Channel controlMessageChannel;
-
-    // note: this really doesn't need to be a LBQ, just something that's thread safe
-    private final Collection<ScheduledFuture<?>> channelKeepAlives = new LinkedBlockingQueue<>();
-
-    private final ThreadPoolExecutor fileTransferExecutor;
-
-    /**
-     * A mapping of each {@link #fileTransferExecutor} thread to a channel that can be written to (on that thread).
-     */
-    private final ConcurrentMap<Thread, Channel> threadToChannelMap = new ConcurrentHashMap<>();
-
-    /**
-     * A netty channel attribute used to indicate if a channel is currently transferring a stream. This is primarily used
-     * to indicate to the {@link KeepAliveTask} if it is safe to send a {@link KeepAliveMessage}, as sending the
-     * (application level) keep-alive in the middle of a stream would be bad news.
-     */
-    @VisibleForTesting
-    static final AttributeKey<Boolean> TRANSFERRING_FILE_ATTR = AttributeKey.valueOf("transferringFile");
-
-    public NettyStreamingMessageSender(StreamSession session, OutboundConnectionSettings template, StreamConnectionFactory factory, int streamingVersion, boolean isPreview)
-    {
-        this.session = session;
-        this.streamingVersion = streamingVersion;
-        this.template = template;
-        this.factory = factory;
-        this.isPreview = isPreview;
-
-        String name = session.peer.toString().replace(':', '.');
-        fileTransferExecutor = new DebuggableThreadPoolExecutor(1, MAX_PARALLEL_TRANSFERS, 1L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(),
-                                                                new NamedThreadFactory("NettyStreaming-Outbound-" + name));
-        fileTransferExecutor.allowCoreThreadTimeOut(true);
-    }
-
-    @Override
-    public void initialize()
-    {
-        StreamInitMessage message = new StreamInitMessage(FBUtilities.getBroadcastAddressAndPort(),
-                                                          session.sessionIndex(),
-                                                          session.planId(),
-                                                          session.streamOperation(),
-                                                          session.getPendingRepair(),
-                                                          session.getPreviewKind());
-        sendMessage(message);
-    }
-
-    public boolean hasControlChannel()
-    {
-        return controlMessageChannel != null;
-    }
-
-    /**
-     * Used by follower to setup control message channel created by initiator
-     */
-    public void injectControlMessageChannel(Channel channel)
-    {
-        this.controlMessageChannel = channel;
-        channel.attr(TRANSFERRING_FILE_ATTR).set(Boolean.FALSE);
-        scheduleKeepAliveTask(channel);
-    }
-
-    /**
-     * Used by initiator to setup control message channel connecting to follower
-     */
-    private void setupControlMessageChannel(OutboundConnectionSettings templateWithConnectTo) throws IOException
-    {
-        if (controlMessageChannel == null)
-        {
-            /*
-             * Inbound handlers are needed:
-             *  a) for initiator's control channel(the first outbound channel) to receive follower's message.
-             *  b) for streaming receiver (note: both initiator and follower can receive streaming files) to reveive files,
-             *     in {@link Handler#setupStreamingPipeline}
-             */
-            controlMessageChannel = createChannel(true, templateWithConnectTo);
-            scheduleKeepAliveTask(controlMessageChannel);
-        }
-    }
-
-    private void scheduleKeepAliveTask(Channel channel)
-    {
-        int keepAlivePeriod = DatabaseDescriptor.getStreamingKeepAlivePeriod();
-        if (logger.isDebugEnabled())
-            logger.debug("{} Scheduling keep-alive task with {}s period.", createLogTag(session, channel), keepAlivePeriod);
-
-        KeepAliveTask task = new KeepAliveTask(channel, session);
-        ScheduledFuture<?> scheduledFuture = channel.eventLoop().scheduleAtFixedRate(task, 0, keepAlivePeriod, TimeUnit.SECONDS);
-        channelKeepAlives.add(scheduledFuture);
-        task.future = scheduledFuture;
-    }
-    
-    private Channel createChannel(boolean isInboundHandlerNeeded, OutboundConnectionSettings templateWithConnectTo) throws IOException
-    {
-        Channel channel = factory.createConnection(templateWithConnectTo, streamingVersion);
-        session.attachOutbound(channel);
-
-        if (isInboundHandlerNeeded)
-        {
-            ChannelPipeline pipeline = channel.pipeline();
-            pipeline.addLast("stream", new StreamingInboundHandler(template.to, streamingVersion, session));
-        }
-        channel.attr(TRANSFERRING_FILE_ATTR).set(Boolean.FALSE);
-        logger.debug("Creating channel id {} local {} remote {}", channel.id(), channel.localAddress(), channel.remoteAddress());
-        return channel;
-    }
-
-    static String createLogTag(StreamSession session, Channel channel)
-    {
-        StringBuilder sb = new StringBuilder(64);
-        sb.append("[Stream");
-
-        if (session != null)
-                sb.append(" #").append(session.planId());
-
-        if (channel != null)
-                sb.append(" channel: ").append(channel.id());
-
-        sb.append(']');
-        return sb.toString();
-    }
-
-    @Override
-    public void sendMessage(StreamMessage message)
-    {
-        if (closed)
-            throw new RuntimeException("stream has been closed, cannot send " + message);
-
-        if (message instanceof OutgoingStreamMessage)
-        {
-            if (isPreview)
-                throw new RuntimeException("Cannot send stream data messages for preview streaming sessions");
-            if (logger.isDebugEnabled())
-                logger.debug("{} Sending {}", createLogTag(session, null), message);
-
-            // Supply a preferred IP up-front to avoid trying to get it in the executor thread, which can be interrupted.
-            OutboundConnectionSettings templateWithConnectTo = factory.supportsPreferredIp() ? template.withConnectTo(template.connectTo()) : template;
-            fileTransferExecutor.submit(new FileStreamTask((OutgoingStreamMessage) message, templateWithConnectTo));
-            return;
-        }
-
-        try
-        {
-            setupControlMessageChannel(template);
-            sendControlMessage(controlMessageChannel, message, future -> onControlMessageComplete(future, message));
-        }
-        catch (Exception e)
-        {
-            close();
-            session.onError(e);
-        }
-    }
-
-    private void sendControlMessage(Channel channel, StreamMessage message, GenericFutureListener listener) throws IOException
-    {
-        if (logger.isDebugEnabled())
-            logger.debug("{} Sending {}", createLogTag(session, channel), message);
-
-        // we anticipate that the control messages are rather small, so allocating a ByteBuf shouldn't  blow out of memory.
-        long messageSize = StreamMessage.serializedSize(message, streamingVersion);
-        if (messageSize > 1 << 30)
-        {
-            throw new IllegalStateException(String.format("%s something is seriously wrong with the calculated stream control message's size: %d bytes, type is %s",
-                                                          createLogTag(session, channel), messageSize, message.type));
-        }
-
-        // as control messages are (expected to be) small, we can simply allocate a ByteBuf here, wrap it, and send via the channel
-        ByteBuf buf = channel.alloc().directBuffer((int) messageSize, (int) messageSize);
-        ByteBuffer nioBuf = buf.nioBuffer(0, (int) messageSize);
-        @SuppressWarnings("resource")
-        DataOutputBufferFixed out = new DataOutputBufferFixed(nioBuf);
-        StreamMessage.serialize(message, out, streamingVersion, session);
-        assert nioBuf.position() == nioBuf.limit();
-        buf.writerIndex(nioBuf.position());
-
-        AsyncChannelPromise.writeAndFlush(channel, buf, listener);
-    }
-
-    /**
-     * Decides what to do after a {@link StreamMessage} is processed.
-     *
-     * Note: this is called from the netty event loop.
-     *
-     * @return null if the message was processed sucessfully; else, a {@link java.util.concurrent.Future} to indicate
-     * the status of aborting any remaining tasks in the session.
-     */
-    java.util.concurrent.Future onControlMessageComplete(Future<?> future, StreamMessage msg)
-    {
-        ChannelFuture channelFuture = (ChannelFuture)future;
-        Throwable cause = future.cause();
-        if (cause == null)
-            return null;
-
-        Channel channel = channelFuture.channel();
-        logger.error("{} failed to send a stream message/data to peer {}: msg = {}",
-                     createLogTag(session, channel), template.to, msg, future.cause());
-
-        // StreamSession will invoke close(), but we have to mark this sender as closed so the session doesn't try
-        // to send any failure messages
-        return session.onError(cause);
-    }
-
-    class FileStreamTask implements Runnable
-    {
-        /**
-         * Time interval, in minutes, to wait between logging a message indicating that we're waiting on a semaphore
-         * permit to become available.
-         */
-        private static final int SEMAPHORE_UNAVAILABLE_LOG_INTERVAL = 3;
-
-        /**
-         * Even though we expect only an {@link OutgoingStreamMessage} at runtime, the type here is {@link StreamMessage}
-         * to facilitate simpler testing.
-         */
-        private final StreamMessage msg;
-
-        private final OutboundConnectionSettings templateWithConnectTo;
-
-        FileStreamTask(OutgoingStreamMessage ofm, OutboundConnectionSettings templateWithConnectTo)
-        {
-            this.msg = ofm;
-            this.templateWithConnectTo = templateWithConnectTo;
-        }
-
-        /**
-         * For testing purposes
-         */
-        FileStreamTask(StreamMessage msg)
-        {
-            this.msg = msg;
-            this.templateWithConnectTo = null;
-        }
-
-        @Override
-        public void run()
-        {
-            if (!acquirePermit(SEMAPHORE_UNAVAILABLE_LOG_INTERVAL))
-                return;
-
-            Channel channel = null;
-            try
-            {
-                channel = getOrCreateChannel(templateWithConnectTo);
-                if (!channel.attr(TRANSFERRING_FILE_ATTR).compareAndSet(false, true))
-                    throw new IllegalStateException("channel's transferring state is currently set to true. refusing to start new stream");
-
-                // close the DataOutputStreamPlus as we're done with it - but don't close the channel
-                try (DataOutputStreamPlus outPlus = new AsyncStreamingOutputPlus(channel))
-                {
-                    StreamMessage.serialize(msg, outPlus, streamingVersion, session);
-                }
-                finally
-                {
-                    channel.attr(TRANSFERRING_FILE_ATTR).set(Boolean.FALSE);
-                }
-            }
-            catch (Exception e)
-            {
-                session.onError(e);
-            }
-            catch (Throwable t)
-            {
-                if (closed && Throwables.getRootCause(t) instanceof ClosedByInterruptException && fileTransferExecutor.isShutdown())
-                {
-                    logger.debug("{} Streaming channel was closed due to the executor pool being shutdown", createLogTag(session, channel));
-                }
-                else
-                {
-                    JVMStabilityInspector.inspectThrowable(t);
-                    if (!session.state().isFinalState())
-                        session.onError(t);
-                }
-            }
-            finally
-            {
-                fileTransferSemaphore.release();
-            }
-        }
-
-        boolean acquirePermit(int logInterval)
-        {
-            long logIntervalNanos = TimeUnit.MINUTES.toNanos(logInterval);
-            long timeOfLastLogging = System.nanoTime();
-            while (true)
-            {
-                if (closed)
-                    return false;
-                try
-                {
-                    if (fileTransferSemaphore.tryAcquire(1, TimeUnit.SECONDS))
-                        return true;
-
-                    // log a helpful message to operators in case they are wondering why a given session might not be making progress.
-                    long now = System.nanoTime();
-                    if (now - timeOfLastLogging > logIntervalNanos)
-                    {
-                        timeOfLastLogging = now;
-                        OutgoingStreamMessage ofm = (OutgoingStreamMessage)msg;
-
-                        if (logger.isInfoEnabled())
-                            logger.info("{} waiting to acquire a permit to begin streaming {}. This message logs every {} minutes",
-                                        createLogTag(session, null), ofm.getName(), logInterval);
-                    }
-                }
-                catch (InterruptedException ie)
-                {
-                    //ignore
-                }
-            }
-        }
-
-        private Channel getOrCreateChannel(OutboundConnectionSettings templateWithConnectTo)
-        {
-            Thread currentThread = Thread.currentThread();
-            try
-            {
-                Channel channel = threadToChannelMap.get(currentThread);
-                if (channel != null)
-                    return channel;
-
-                channel = createChannel(false, templateWithConnectTo);
-                threadToChannelMap.put(currentThread, channel);
-                return channel;
-            }
-            catch (Exception e)
-            {
-                throw new IOError(e);
-            }
-        }
-
-        private void onError(Throwable t)
-        {
-            try
-            {
-                session.onError(t).get(DEFAULT_CLOSE_WAIT_IN_MILLIS, TimeUnit.MILLISECONDS);
-            }
-            catch (Exception e)
-            {
-                // nop - let the Throwable param be the main failure point here, and let session handle it
-            }
-        }
-
-        /**
-         * For testing purposes
-         */
-        void injectChannel(Channel channel)
-        {
-            Thread currentThread = Thread.currentThread();
-            if (threadToChannelMap.get(currentThread) != null)
-                throw new IllegalStateException("previous channel already set");
-
-            threadToChannelMap.put(currentThread, channel);
-        }
-
-        /**
-         * For testing purposes
-         */
-        void unsetChannel()
-        {
-            threadToChannelMap.remove(Thread.currentThread());
-        }
-    }
-
-    /**
-     * Periodically sends the {@link KeepAliveMessage}.
-     *
-     * NOTE: this task, and the callback function {@link #keepAliveListener(Future)} is executed in the netty event loop.
-     */
-    class KeepAliveTask implements Runnable
-    {
-        private final Channel channel;
-        private final StreamSession session;
-
-        /**
-         * A reference to the scheduled task for this instance so that it may be cancelled.
-         */
-        ScheduledFuture<?> future;
-
-        KeepAliveTask(Channel channel, StreamSession session)
-        {
-            this.channel = channel;
-            this.session = session;
-        }
-
-        public void run()
-        {
-            // if the channel has been closed, cancel the scheduled task and return
-            if (!channel.isOpen() || closed)
-            {
-                future.cancel(false);
-                return;
-            }
-
-            // if the channel is currently processing streaming, skip this execution. As this task executes
-            // on the event loop, even if there is a race with a FileStreamTask which changes the channel attribute
-            // after we check it, the FileStreamTask cannot send out any bytes as this KeepAliveTask is executing
-            // on the event loop (and FileStreamTask publishes it's buffer to the channel, consumed after we're done here).
-            if (channel.attr(TRANSFERRING_FILE_ATTR).get())
-                return;
-
-            try
-            {
-                if (logger.isTraceEnabled())
-                    logger.trace("{} Sending keep-alive to {}.", createLogTag(session, channel), session.peer);
-                sendControlMessage(channel, new KeepAliveMessage(), this::keepAliveListener);
-            }
-            catch (IOException ioe)
-            {
-                future.cancel(false);
-            }
-        }
-
-        private void keepAliveListener(Future<? super Void> future)
-        {
-            if (future.isSuccess() || future.isCancelled())
-                return;
-
-            if (logger.isDebugEnabled())
-                logger.debug("{} Could not send keep-alive message (perhaps stream session is finished?).",
-                             createLogTag(session, channel), future.cause());
-        }
-    }
-
-    /**
-     * For testing purposes only.
-     */
-    public void setClosed()
-    {
-        closed = true;
-    }
-
-    void setControlMessageChannel(Channel channel)
-    {
-        controlMessageChannel = channel;
-    }
-
-    int semaphoreAvailablePermits()
-    {
-        return fileTransferSemaphore.availablePermits();
-    }
-
-    @Override
-    public boolean connected()
-    {
-        return !closed && (controlMessageChannel == null || controlMessageChannel.isOpen());
-    }
-
-    @Override
-    public void close()
-    {
-        if (closed)
-            return;
-
-        closed = true;
-        if (logger.isDebugEnabled())
-            logger.debug("{} Closing stream connection channels on {}", createLogTag(session, null), template.to);
-        for (ScheduledFuture<?> future : channelKeepAlives)
-            future.cancel(false);
-        channelKeepAlives.clear();
-
-        threadToChannelMap.clear();
-        fileTransferExecutor.shutdownNow();
-    }
-}
diff --git a/src/java/org/apache/cassandra/streaming/async/StreamCompressionSerializer.java b/src/java/org/apache/cassandra/streaming/async/StreamCompressionSerializer.java
index fc1bde2..f7d8101 100644
--- a/src/java/org/apache/cassandra/streaming/async/StreamCompressionSerializer.java
+++ b/src/java/org/apache/cassandra/streaming/async/StreamCompressionSerializer.java
@@ -27,7 +27,7 @@
 import net.jpountz.lz4.LZ4Compressor;
 import net.jpountz.lz4.LZ4SafeDecompressor;
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.net.AsyncStreamingOutputPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 
 import static org.apache.cassandra.net.MessagingService.current_version;
 
@@ -54,7 +54,7 @@
      */
     private static final int HEADER_LENGTH = 8;
 
-    public static AsyncStreamingOutputPlus.Write serialize(LZ4Compressor compressor, ByteBuffer in, int version)
+    public static StreamingDataOutputPlus.Write serialize(LZ4Compressor compressor, ByteBuffer in, int version)
     {
         assert version == current_version;
         return bufferSupplier -> {
diff --git a/src/java/org/apache/cassandra/streaming/async/StreamingInboundHandler.java b/src/java/org/apache/cassandra/streaming/async/StreamingInboundHandler.java
deleted file mode 100644
index 3b9c172..0000000
--- a/src/java/org/apache/cassandra/streaming/async/StreamingInboundHandler.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.streaming.async;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import javax.annotation.Nullable;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.Uninterruptibles;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInboundHandlerAdapter;
-import io.netty.util.ReferenceCountUtil;
-import io.netty.util.concurrent.FastThreadLocalThread;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.AsyncStreamingInputPlus;
-import org.apache.cassandra.streaming.StreamReceiveException;
-import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.streaming.messages.KeepAliveMessage;
-import org.apache.cassandra.streaming.messages.StreamInitMessage;
-import org.apache.cassandra.streaming.messages.StreamMessage;
-import org.apache.cassandra.utils.JVMStabilityInspector;
-
-import static org.apache.cassandra.streaming.async.NettyStreamingMessageSender.createLogTag;
-
-/**
- * Handles the inbound side of streaming messages and stream data. From the incoming data, we derserialize the message
- * including the actual stream data itself. Because the reading and deserialization of streams is a blocking affair,
- * we can't block the netty event loop. Thus we have a background thread perform all the blocking deserialization.
- */
-public class StreamingInboundHandler extends ChannelInboundHandlerAdapter
-{
-    private static final Logger logger = LoggerFactory.getLogger(StreamingInboundHandler.class);
-    private static volatile boolean trackInboundHandlers = false;
-    private static Collection<StreamingInboundHandler> inboundHandlers;
-    private final InetAddressAndPort remoteAddress;
-    private final int protocolVersion;
-
-    private final StreamSession session;
-
-    /**
-     * A collection of {@link ByteBuf}s that are yet to be processed. Incoming buffers are first dropped into this
-     * structure, and then consumed.
-     * <p>
-     * For thread safety, this structure's resources are released on the consuming thread
-     * (via {@link AsyncStreamingInputPlus#close()},
-     * but the producing side calls {@link AsyncStreamingInputPlus#requestClosure()} to notify the input that is should close.
-     */
-    private AsyncStreamingInputPlus buffers;
-
-    private volatile boolean closed;
-
-    public StreamingInboundHandler(InetAddressAndPort remoteAddress, int protocolVersion, @Nullable StreamSession session)
-    {
-        this.remoteAddress = remoteAddress;
-        this.protocolVersion = protocolVersion;
-        this.session = session;
-        if (trackInboundHandlers)
-            inboundHandlers.add(this);
-    }
-
-    @Override
-    @SuppressWarnings("resource")
-    public void handlerAdded(ChannelHandlerContext ctx)
-    {
-        buffers = new AsyncStreamingInputPlus(ctx.channel());
-        Thread blockingIOThread = new FastThreadLocalThread(new StreamDeserializingTask(session, ctx.channel()),
-                                                            String.format("Stream-Deserializer-%s-%s", remoteAddress.toString(), ctx.channel().id()));
-        blockingIOThread.setDaemon(true);
-        blockingIOThread.start();
-    }
-
-    @Override
-    public void channelRead(ChannelHandlerContext ctx, Object message)
-    {
-        if (closed || !(message instanceof ByteBuf) || !buffers.append((ByteBuf) message))
-            ReferenceCountUtil.release(message);
-    }
-
-    @Override
-    public void channelInactive(ChannelHandlerContext ctx)
-    {
-        close();
-        ctx.fireChannelInactive();
-    }
-
-    void close()
-    {
-        closed = true;
-        buffers.requestClosure();
-        if (trackInboundHandlers)
-            inboundHandlers.remove(this);
-    }
-
-    @Override
-    public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
-    {
-        if (cause instanceof IOException)
-            logger.trace("connection problem while streaming", cause);
-        else
-            logger.warn("exception occurred while in processing streaming data", cause);
-        close();
-    }
-
-    /**
-     * For testing only!!
-     */
-    void setPendingBuffers(AsyncStreamingInputPlus bufChannel)
-    {
-        this.buffers = bufChannel;
-    }
-
-    /**
-     * The task that performs the actual deserialization.
-     */
-    class StreamDeserializingTask implements Runnable
-    {
-        private final Channel channel;
-
-        @VisibleForTesting
-        StreamSession session;
-
-        StreamDeserializingTask(StreamSession session, Channel channel)
-        {
-            this.session = session;
-            this.channel = channel;
-        }
-
-        @Override
-        public void run()
-        {
-            try
-            {
-                while (true)
-                {
-                    buffers.maybeIssueRead();
-
-                    // do a check of available bytes and possibly sleep some amount of time (then continue).
-                    // this way we can break out of run() sanely or we end up blocking indefintely in StreamMessage.deserialize()
-                    while (buffers.isEmpty())
-                    {
-                        if (closed)
-                            return;
-
-                        Uninterruptibles.sleepUninterruptibly(400, TimeUnit.MILLISECONDS);
-                    }
-
-                    StreamMessage message = StreamMessage.deserialize(buffers, protocolVersion);
-
-                    // keep-alives don't necessarily need to be tied to a session (they could be arrive before or after
-                    // wrt session lifecycle, due to races), just log that we received the message and carry on
-                    if (message instanceof KeepAliveMessage)
-                    {
-                        if (logger.isDebugEnabled())
-                            logger.debug("{} Received {}", createLogTag(session, channel), message);
-                        continue;
-                    }
-
-                    if (session == null)
-                        session = deriveSession(message);
-
-                    if (logger.isDebugEnabled())
-                        logger.debug("{} Received {}", createLogTag(session, channel), message);
-
-                    session.messageReceived(message);
-                }
-            }
-            catch (Throwable t)
-            {
-                JVMStabilityInspector.inspectThrowable(t);
-                if (session != null)
-                {
-                    session.onError(t);
-                }
-                else if (t instanceof StreamReceiveException)
-                {
-                    ((StreamReceiveException)t).session.onError(t);
-                }
-                else
-                {
-                    logger.error("{} stream operation from {} failed", createLogTag(session, channel), remoteAddress, t);
-                }
-            }
-            finally
-            {
-                channel.close();
-                closed = true;
-
-                if (buffers != null)
-                {
-                    // request closure again as the original request could have raced with receiving a
-                    // message and been consumed in the message receive loop above.  Otherweise
-                    // buffers could hang indefinitely on the queue.poll.
-                    buffers.requestClosure();
-                    buffers.close();
-                }
-            }
-        }
-
-        StreamSession deriveSession(StreamMessage message)
-        {
-            // StreamInitMessage starts a new channel here, but IncomingStreamMessage needs a session
-            // to be established a priori
-            StreamSession streamSession = message.getOrCreateSession(channel);
-
-            // Attach this channel to the session: this only happens upon receiving the first init message as a follower;
-            // in all other cases, no new control channel will be added, as the proper control channel will be already attached.
-            streamSession.attachInbound(channel, message instanceof StreamInitMessage);
-            return streamSession;
-        }
-    }
-
-    /** Shutdown for in-JVM tests. For any other usage, tracking of active inbound streaming handlers
-     *  should be revisted first and in-JVM shutdown refactored with it.
-     *  This does not prevent new inbound handlers being added after shutdown, nor is not thread-safe
-     *  around new inbound handlers being opened during shutdown.
-      */
-    @VisibleForTesting
-    public static void shutdown()
-    {
-        assert trackInboundHandlers : "in-JVM tests required tracking of inbound streaming handlers";
-
-        inboundHandlers.forEach(StreamingInboundHandler::close);
-        inboundHandlers.clear();
-    }
-
-    public static void trackInboundHandlers()
-    {
-        inboundHandlers = Collections.newSetFromMap(new ConcurrentHashMap<>());
-        trackInboundHandlers = true;
-    }
-}
diff --git a/src/java/org/apache/cassandra/streaming/async/StreamingMultiplexedChannel.java b/src/java/org/apache/cassandra/streaming/async/StreamingMultiplexedChannel.java
new file mode 100644
index 0000000..560fee9
--- /dev/null
+++ b/src/java/org/apache/cassandra/streaming/async/StreamingMultiplexedChannel.java
@@ -0,0 +1,521 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming.async;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.channels.ClosedByInterruptException;
+import java.util.Collection;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.Future; // checkstyle: permit this import
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.streaming.StreamDeserializingTask;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
+import org.apache.cassandra.streaming.StreamSession;
+import org.apache.cassandra.streaming.messages.IncomingStreamMessage;
+import org.apache.cassandra.streaming.messages.KeepAliveMessage;
+import org.apache.cassandra.streaming.messages.OutgoingStreamMessage;
+import org.apache.cassandra.streaming.messages.StreamMessage;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+import org.apache.cassandra.utils.concurrent.Semaphore;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static com.google.common.base.Throwables.getRootCause;
+import static java.lang.Integer.parseInt;
+import static java.lang.String.format;
+import static java.lang.System.getProperty;
+import static java.lang.Thread.currentThread;
+import static java.util.concurrent.TimeUnit.*;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.config.Config.PROPERTY_PREFIX;
+import static org.apache.cassandra.streaming.StreamSession.createLogTag;
+import static org.apache.cassandra.streaming.messages.StreamMessage.serialize;
+import static org.apache.cassandra.streaming.messages.StreamMessage.serializedSize;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.FBUtilities.getAvailableProcessors;
+import static org.apache.cassandra.utils.JVMStabilityInspector.inspectThrowable;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
+import static org.apache.cassandra.utils.concurrent.Semaphore.newFairSemaphore;
+
+/**
+ * Responsible for sending {@link StreamMessage}s to a given peer. We manage an array of netty {@link Channel}s
+ * for sending {@link OutgoingStreamMessage} instances; all other {@link StreamMessage} types are sent via
+ * a special control channel. The reason for this is to treat those messages carefully and not let them get stuck
+ * behind a stream transfer.
+ *
+ * One of the challenges when sending streams is we might need to delay shipping the stream if:
+ *
+ * - we've exceeded our network I/O use due to rate limiting (at the cassandra level)
+ * - the receiver isn't keeping up, which causes the local TCP socket buffer to not empty, which causes epoll writes to not
+ * move any bytes to the socket, which causes buffers to stick around in user-land (a/k/a cassandra) memory.
+ *
+ * When those conditions occur, it's easy enough to reschedule processing the stream once the resources pick up
+ * (we acquire the permits from the rate limiter, or the socket drains). However, we need to ensure that
+ * no other messages are submitted to the same channel while the current stream is still being processed.
+ */
+public class StreamingMultiplexedChannel
+{
+    private static final Logger logger = LoggerFactory.getLogger(StreamingMultiplexedChannel.class);
+
+    private static final int DEFAULT_MAX_PARALLEL_TRANSFERS = getAvailableProcessors();
+    private static final int MAX_PARALLEL_TRANSFERS = parseInt(getProperty(PROPERTY_PREFIX + "streaming.session.parallelTransfers", Integer.toString(DEFAULT_MAX_PARALLEL_TRANSFERS)));
+
+    // a simple mechansim for allowing a degree of fairness across multiple sessions
+    private static final Semaphore fileTransferSemaphore = newFairSemaphore(DEFAULT_MAX_PARALLEL_TRANSFERS);
+
+    private final StreamingChannel.Factory factory;
+    private final InetAddressAndPort to;
+    private final StreamSession session;
+    private final int messagingVersion;
+
+    private volatile boolean closed;
+
+    /**
+     * A special {@link Channel} for sending non-stream streaming messages, basically anything that isn't an
+     * {@link OutgoingStreamMessage} (or an {@link IncomingStreamMessage}, but a node doesn't send that, it's only received).
+     */
+    private volatile StreamingChannel controlChannel;
+
+    // note: this really doesn't need to be a LBQ, just something that's thread safe
+    private final Collection<ScheduledFuture<?>> channelKeepAlives = newBlockingQueue();
+
+    private final ExecutorPlus fileTransferExecutor;
+
+    /**
+     * A mapping of each {@link #fileTransferExecutor} thread to a channel that can be written to (on that thread).
+     */
+    private final ConcurrentMap<Thread, StreamingChannel> threadToChannelMap = new ConcurrentHashMap<>();
+
+    public StreamingMultiplexedChannel(StreamSession session, StreamingChannel.Factory factory, InetAddressAndPort to, @Nullable StreamingChannel controlChannel, int messagingVersion)
+    {
+        this.session = session;
+        this.factory = factory;
+        this.to = to;
+        this.messagingVersion = messagingVersion;
+        this.controlChannel = controlChannel;
+
+        String name = session.peer.toString().replace(':', '.');
+        fileTransferExecutor = executorFactory()
+                .configurePooled("NettyStreaming-Outbound-" + name, MAX_PARALLEL_TRANSFERS)
+                .withKeepAlive(1L, SECONDS).build();
+    }
+
+
+
+    public InetAddressAndPort peer()
+    {
+        return to;
+    }
+
+    public InetSocketAddress connectedTo()
+    {
+        return controlChannel == null ? to : controlChannel.connectedTo();
+    }
+
+    /**
+     * Used by initiator to setup control message channel connecting to follower
+     */
+    private void setupControlMessageChannel() throws IOException
+    {
+        if (controlChannel == null)
+        {
+            /*
+             * Inbound handlers are needed:
+             *  a) for initiator's control channel(the first outbound channel) to receive follower's message.
+             *  b) for streaming receiver (note: both initiator and follower can receive streaming files) to reveive files,
+             *     in {@link Handler#setupStreamingPipeline}
+             */
+            controlChannel = createControlChannel();
+        }
+    }
+
+    private StreamingChannel createControlChannel() throws IOException
+    {
+        logger.debug("Creating stream session to {} as {}", to, session.isFollower() ? "follower" : "initiator");
+
+        StreamingChannel channel = factory.create(to, messagingVersion, StreamingChannel.Kind.CONTROL);
+        executorFactory().startThread(String.format("Stream-Deserializer-%s-%s", to.toString(), channel.id()),
+                                      new StreamDeserializingTask(session, channel, messagingVersion));
+
+        session.attachInbound(channel);
+        session.attachOutbound(channel);
+
+        scheduleKeepAliveTask(channel);
+
+        logger.debug("Creating control {}", channel.description());
+        return channel;
+    }
+    
+    private StreamingChannel createFileChannel(InetAddressAndPort connectTo) throws IOException
+    {
+        logger.debug("Creating stream session to {} as {}", to, session.isFollower() ? "follower" : "initiator");
+
+        StreamingChannel channel = factory.create(to, connectTo, messagingVersion, StreamingChannel.Kind.FILE);
+        session.attachOutbound(channel);
+
+        logger.debug("Creating file {}", channel.description());
+        return channel;
+    }
+
+    public Future<?> sendControlMessage(StreamMessage message)
+    {
+        try
+        {
+            setupControlMessageChannel();
+            return sendMessage(controlChannel, message);
+        }
+        catch (Exception e)
+        {
+            close();
+            session.onError(e);
+            return ImmediateFuture.failure(e);
+        }
+
+    }
+    public Future<?> sendMessage(StreamingChannel channel, StreamMessage message)
+    {
+        if (closed)
+            throw new RuntimeException("stream has been closed, cannot send " + message);
+
+        if (message instanceof OutgoingStreamMessage)
+        {
+            if (session.isPreview())
+                throw new RuntimeException("Cannot send stream data messages for preview streaming sessions");
+            if (logger.isDebugEnabled())
+                logger.debug("{} Sending {}", createLogTag(session), message);
+
+            InetAddressAndPort connectTo = factory.supportsPreferredIp() ? SystemKeyspace.getPreferredIP(to) : to;
+            return fileTransferExecutor.submit(new FileStreamTask((OutgoingStreamMessage) message, connectTo));
+        }
+
+        try
+        {
+            Future<?> promise = channel.send(outSupplier -> {
+                // we anticipate that the control messages are rather small, so allocating a ByteBuf shouldn't  blow out of memory.
+                long messageSize = serializedSize(message, messagingVersion);
+                if (messageSize > 1 << 30)
+                {
+                    throw new IllegalStateException(format("%s something is seriously wrong with the calculated stream control message's size: %d bytes, type is %s",
+                                                           createLogTag(session, controlChannel.id()), messageSize, message.type));
+                }
+                try (StreamingDataOutputPlus out = outSupplier.apply((int) messageSize))
+                {
+                    StreamMessage.serialize(message, out, messagingVersion, session);
+                }
+            });
+            promise.addListener(future -> onMessageComplete(future, message));
+            return promise;
+        }
+        catch (Exception e)
+        {
+            close();
+            session.onError(e);
+            return ImmediateFuture.failure(e);
+        }
+    }
+
+    /**
+     * Decides what to do after a {@link StreamMessage} is processed.
+     *
+     * Note: this is called from the netty event loop.
+     *
+     * @return null if the message was processed successfully; else, a {@link java.util.concurrent.Future} to indicate
+     * the status of aborting any remaining tasks in the session.
+     */
+    Future<?> onMessageComplete(Future<?> future, StreamMessage msg)
+    {
+        Throwable cause = future.cause();
+        if (cause == null)
+            return null;
+
+        Channel channel = future instanceof ChannelFuture ? ((ChannelFuture)future).channel() : null;
+        logger.error("{} failed to send a stream message/data to peer {}: msg = {}",
+                     createLogTag(session, channel), to, msg, future.cause());
+
+        // StreamSession will invoke close(), but we have to mark this sender as closed so the session doesn't try
+        // to send any failure messages
+        return session.onError(cause);
+    }
+
+    class FileStreamTask implements Runnable
+    {
+        /**
+         * Time interval, in minutes, to wait between logging a message indicating that we're waiting on a semaphore
+         * permit to become available.
+         */
+        private static final int SEMAPHORE_UNAVAILABLE_LOG_INTERVAL = 3;
+
+        /**
+         * Even though we expect only an {@link OutgoingStreamMessage} at runtime, the type here is {@link StreamMessage}
+         * to facilitate simpler testing.
+         */
+        private final StreamMessage msg;
+
+        private final InetAddressAndPort connectTo;
+
+        FileStreamTask(OutgoingStreamMessage ofm, InetAddressAndPort connectTo)
+        {
+            this.msg = ofm;
+            this.connectTo = connectTo;
+        }
+
+        /**
+         * For testing purposes
+         */
+        FileStreamTask(StreamMessage msg)
+        {
+            this.msg = msg;
+            this.connectTo = null;
+        }
+
+        @Override
+        public void run()
+        {
+            if (!acquirePermit(SEMAPHORE_UNAVAILABLE_LOG_INTERVAL))
+                return;
+
+            StreamingChannel channel = null;
+            try
+            {
+                channel = getOrCreateFileChannel(connectTo);
+
+                // close the DataOutputStreamPlus as we're done with it - but don't close the channel
+                try (StreamingDataOutputPlus out = channel.acquireOut())
+                {
+                    serialize(msg, out, messagingVersion, session);
+                }
+            }
+            catch (Exception e)
+            {
+                session.onError(e);
+            }
+            catch (Throwable t)
+            {
+                if (closed && getRootCause(t) instanceof ClosedByInterruptException && fileTransferExecutor.isShutdown())
+                {
+                    logger.debug("{} Streaming channel was closed due to the executor pool being shutdown", createLogTag(session, channel));
+                }
+                else
+                {
+                    inspectThrowable(t);
+                    if (!session.state().isFinalState())
+                        session.onError(t);
+                }
+            }
+            finally
+            {
+                fileTransferSemaphore.release(1);
+            }
+        }
+
+        boolean acquirePermit(int logInterval)
+        {
+            long logIntervalNanos = MINUTES.toNanos(logInterval);
+            long timeOfLastLogging = nanoTime();
+            while (true)
+            {
+                if (closed)
+                    return false;
+                try
+                {
+                    if (fileTransferSemaphore.tryAcquire(1, 1, SECONDS))
+                        return true;
+
+                    // log a helpful message to operators in case they are wondering why a given session might not be making progress.
+                    long now = nanoTime();
+                    if (now - timeOfLastLogging > logIntervalNanos)
+                    {
+                        timeOfLastLogging = now;
+                        OutgoingStreamMessage ofm = (OutgoingStreamMessage)msg;
+
+                        if (logger.isInfoEnabled())
+                            logger.info("{} waiting to acquire a permit to begin streaming {}. This message logs every {} minutes",
+                                        createLogTag(session), ofm.getName(), logInterval);
+                    }
+                }
+                catch (InterruptedException e)
+                {
+                    throw new UncheckedInterruptedException(e);
+                }
+            }
+        }
+
+        private StreamingChannel getOrCreateFileChannel(InetAddressAndPort connectTo)
+        {
+            Thread currentThread = currentThread();
+            try
+            {
+                StreamingChannel channel = threadToChannelMap.get(currentThread);
+                if (channel != null)
+                    return channel;
+
+                channel = createFileChannel(connectTo);
+                threadToChannelMap.put(currentThread, channel);
+                return channel;
+            }
+            catch (Exception e)
+            {
+                throw new IOError(e);
+            }
+        }
+
+        /**
+         * For testing purposes
+         */
+        void injectChannel(StreamingChannel channel)
+        {
+            Thread currentThread = currentThread();
+            if (threadToChannelMap.get(currentThread) != null)
+                throw new IllegalStateException("previous channel already set");
+
+            threadToChannelMap.put(currentThread, channel);
+        }
+
+        /**
+         * For testing purposes
+         */
+        void unsetChannel()
+        {
+            threadToChannelMap.remove(currentThread());
+        }
+    }
+
+    /**
+     * Periodically sends the {@link KeepAliveMessage}.
+     * <p>
+     * NOTE: this task, and the callback function are executed in the netty event loop.
+     */
+    class KeepAliveTask implements Runnable
+    {
+        private final StreamingChannel channel;
+
+        /**
+         * A reference to the scheduled task for this instance so that it may be cancelled.
+         */
+        ScheduledFuture<?> future;
+
+        KeepAliveTask(StreamingChannel channel)
+        {
+            this.channel = channel;
+        }
+
+        @Override
+        public void run()
+        {
+            // if the channel has been closed, cancel the scheduled task and return
+            if (!channel.connected() || closed)
+            {
+                if (null != future)
+                    future.cancel(false);
+                return;
+            }
+
+            if (logger.isTraceEnabled())
+                logger.trace("{} Sending keep-alive to {}.", createLogTag(session, channel), session.peer);
+
+            sendControlMessage(new KeepAliveMessage()).addListener(f ->
+            {
+                if (f.isSuccess() || f.isCancelled())
+                    return;
+
+                if (logger.isDebugEnabled())
+                    logger.debug("{} Could not send keep-alive message (perhaps stream session is finished?).",
+                                 createLogTag(session, channel), f.cause());
+            });
+        }
+    }
+
+    private void scheduleKeepAliveTask(StreamingChannel channel)
+    {
+        if (!(channel instanceof NettyStreamingChannel))
+            return;
+
+        int keepAlivePeriod = DatabaseDescriptor.getStreamingKeepAlivePeriod();
+        if (keepAlivePeriod <= 0)
+            return;
+
+        if (logger.isDebugEnabled())
+            logger.debug("{} Scheduling keep-alive task with {}s period.", createLogTag(session, channel), keepAlivePeriod);
+
+        KeepAliveTask task = new KeepAliveTask(channel);
+        ScheduledFuture<?> scheduledFuture =
+            ((NettyStreamingChannel)channel).channel
+                                            .eventLoop()
+                                            .scheduleAtFixedRate(task, keepAlivePeriod, keepAlivePeriod, TimeUnit.SECONDS);
+        task.future = scheduledFuture;
+        channelKeepAlives.add(scheduledFuture);
+    }
+
+    /**
+     * For testing purposes only.
+     */
+    public void setClosed()
+    {
+        closed = true;
+    }
+
+    void setControlChannel(NettyStreamingChannel channel)
+    {
+        controlChannel = channel;
+    }
+
+    int semaphoreAvailablePermits()
+    {
+        return fileTransferSemaphore.permits();
+    }
+
+    public boolean connected()
+    {
+        return !closed && (controlChannel == null || controlChannel.connected());
+    }
+
+    public void close()
+    {
+        if (closed)
+            return;
+
+        closed = true;
+        if (logger.isDebugEnabled())
+            logger.debug("{} Closing stream connection channels on {}", createLogTag(session), to);
+        for (ScheduledFuture<?> future : channelKeepAlives)
+            future.cancel(false);
+        channelKeepAlives.clear();
+
+        threadToChannelMap.values().forEach(StreamingChannel::close);
+        threadToChannelMap.clear();
+        fileTransferExecutor.shutdownNow();
+    }
+}
diff --git a/src/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java b/src/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java
index a1fa19f..72ab844 100644
--- a/src/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java
+++ b/src/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java
@@ -20,13 +20,13 @@
 import java.net.UnknownHostException;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.UUID;
 import javax.management.openmbean.*;
 
 import com.google.common.base.Throwables;
 
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class ProgressInfoCompositeData
 {
@@ -72,12 +72,14 @@
         }
     }
 
-    public static CompositeData toCompositeData(UUID planId, ProgressInfo progressInfo)
+    public static CompositeData toCompositeData(TimeUUID planId, ProgressInfo progressInfo)
     {
+        // Delta is not returned as it wasn't clear the impact to backwards compatability; it may be safe to expose.
+        // see CASSANDRA-18110
         Map<String, Object> valueMap = new HashMap<>();
         valueMap.put(ITEM_NAMES[0], planId.toString());
-        valueMap.put(ITEM_NAMES[1], progressInfo.peer.address.getHostAddress());
-        valueMap.put(ITEM_NAMES[2], progressInfo.peer.port);
+        valueMap.put(ITEM_NAMES[1], progressInfo.peer.getAddress().getHostAddress());
+        valueMap.put(ITEM_NAMES[2], progressInfo.peer.getPort());
         valueMap.put(ITEM_NAMES[3], progressInfo.sessionIndex);
         valueMap.put(ITEM_NAMES[4], progressInfo.fileName);
         valueMap.put(ITEM_NAMES[5], progressInfo.direction.name());
@@ -103,6 +105,7 @@
                                     (String) values[4],
                                     ProgressInfo.Direction.valueOf((String)values[5]),
                                     (long) values[6],
+                                    (long) values[6],
                                     (long) values[7]);
         }
         catch (UnknownHostException e)
diff --git a/src/java/org/apache/cassandra/streaming/management/SessionCompleteEventCompositeData.java b/src/java/org/apache/cassandra/streaming/management/SessionCompleteEventCompositeData.java
index 1c0d8c5..665b4cd 100644
--- a/src/java/org/apache/cassandra/streaming/management/SessionCompleteEventCompositeData.java
+++ b/src/java/org/apache/cassandra/streaming/management/SessionCompleteEventCompositeData.java
@@ -61,8 +61,8 @@
     {
         Map<String, Object> valueMap = new HashMap<>();
         valueMap.put(ITEM_NAMES[0], event.planId.toString());
-        valueMap.put(ITEM_NAMES[1], event.peer.address.getHostAddress());
-        valueMap.put(ITEM_NAMES[2], event.peer.port);
+        valueMap.put(ITEM_NAMES[1], event.peer.getAddress().getHostAddress());
+        valueMap.put(ITEM_NAMES[2], event.peer.getPort());
         valueMap.put(ITEM_NAMES[3], event.success);
         try
         {
diff --git a/src/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java b/src/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java
index d20eaf5..f39b321 100644
--- a/src/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java
+++ b/src/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java
@@ -31,6 +31,7 @@
 import org.apache.cassandra.streaming.SessionInfo;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.streaming.StreamSummary;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class SessionInfoCompositeData
 {
@@ -86,14 +87,14 @@
         }
     }
 
-    public static CompositeData toCompositeData(final UUID planId, SessionInfo sessionInfo)
+    public static CompositeData toCompositeData(final TimeUUID planId, SessionInfo sessionInfo)
     {
         Map<String, Object> valueMap = new HashMap<>();
         valueMap.put(ITEM_NAMES[0], planId.toString());
-        valueMap.put(ITEM_NAMES[1], sessionInfo.peer.address.getHostAddress());
-        valueMap.put(ITEM_NAMES[2], sessionInfo.peer.port);
-        valueMap.put(ITEM_NAMES[3], sessionInfo.connecting.address.getHostAddress());
-        valueMap.put(ITEM_NAMES[4], sessionInfo.connecting.port);
+        valueMap.put(ITEM_NAMES[1], sessionInfo.peer.getAddress().getHostAddress());
+        valueMap.put(ITEM_NAMES[2], sessionInfo.peer.getPort());
+        valueMap.put(ITEM_NAMES[3], sessionInfo.connecting.getAddress().getHostAddress());
+        valueMap.put(ITEM_NAMES[4], sessionInfo.connecting.getPort());
         Function<StreamSummary, CompositeData> fromStreamSummary = new Function<StreamSummary, CompositeData>()
         {
             public CompositeData apply(StreamSummary input)
diff --git a/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java b/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java
index 3e12c2a..b8c7487 100644
--- a/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java
+++ b/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java
@@ -23,6 +23,8 @@
 
 import org.apache.cassandra.streaming.*;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  */
 public class StreamEventJMXNotifier extends NotificationBroadcasterSupport implements StreamEventHandler
@@ -53,14 +55,14 @@
                 break;
             case FILE_PROGRESS:
                 ProgressInfo progress = ((StreamEvent.ProgressEvent) event).progress;
-                long current = System.currentTimeMillis();
+                long current = currentTimeMillis();
                 if (current - progressLastSent >= PROGRESS_NOTIFICATION_INTERVAL || progress.isCompleted())
                 {
                     notif = new Notification(StreamEvent.ProgressEvent.class.getCanonicalName(),
                                              StreamManagerMBean.OBJECT_NAME,
                                              seq.getAndIncrement());
                     notif.setUserData(ProgressInfoCompositeData.toCompositeData(event.planId, progress));
-                    progressLastSent = System.currentTimeMillis();
+                    progressLastSent = currentTimeMillis();
                 }
                 else
                 {
@@ -85,7 +87,7 @@
         Notification notif = new Notification(StreamEvent.class.getCanonicalName() + ".failure",
                                               StreamManagerMBean.OBJECT_NAME,
                                               seq.getAndIncrement());
-        notif.setUserData(t.fillInStackTrace().toString());
+        notif.setUserData(t.toString());
         sendNotification(notif);
     }
 }
diff --git a/src/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java b/src/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java
index de88762..5ee4f32 100644
--- a/src/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java
+++ b/src/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java
@@ -29,6 +29,7 @@
 import org.apache.cassandra.streaming.SessionInfo;
 import org.apache.cassandra.streaming.StreamState;
 import org.apache.cassandra.streaming.StreamOperation;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  */
@@ -121,7 +122,7 @@
     {
         assert cd.getCompositeType().equals(COMPOSITE_TYPE);
         Object[] values = cd.getAll(ITEM_NAMES);
-        UUID planId = UUID.fromString((String) values[0]);
+        TimeUUID planId = TimeUUID.fromString((String) values[0]);
         String typeString = (String) values[1];
         Set<SessionInfo> sessions = Sets.newHashSet(Iterables.transform(Arrays.asList((CompositeData[]) values[2]),
                                                                         new Function<CompositeData, SessionInfo>()
diff --git a/src/java/org/apache/cassandra/streaming/messages/CompleteMessage.java b/src/java/org/apache/cassandra/streaming/messages/CompleteMessage.java
index 83d95e0..bf35266 100644
--- a/src/java/org/apache/cassandra/streaming/messages/CompleteMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/CompleteMessage.java
@@ -18,7 +18,7 @@
 package org.apache.cassandra.streaming.messages;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 
 public class CompleteMessage extends StreamMessage
@@ -30,7 +30,7 @@
             return new CompleteMessage();
         }
 
-        public void serialize(CompleteMessage message, DataOutputStreamPlus out, int version, StreamSession session) {}
+        public void serialize(CompleteMessage message, StreamingDataOutputPlus out, int version, StreamSession session) {}
 
         public long serializedSize(CompleteMessage message, int version)
         {
diff --git a/src/java/org/apache/cassandra/streaming/messages/IncomingStreamMessage.java b/src/java/org/apache/cassandra/streaming/messages/IncomingStreamMessage.java
index e268747..ff1e61f 100644
--- a/src/java/org/apache/cassandra/streaming/messages/IncomingStreamMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/IncomingStreamMessage.java
@@ -20,23 +20,20 @@
 import java.io.IOException;
 import java.util.Objects;
 
-import io.netty.channel.Channel;
-
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.io.util.DataInputPlus;
 
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.streaming.IncomingStream;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamManager;
 import org.apache.cassandra.streaming.StreamReceiveException;
 import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.utils.JVMStabilityInspector;
 
 public class IncomingStreamMessage extends StreamMessage
 {
     public static Serializer<IncomingStreamMessage> serializer = new Serializer<IncomingStreamMessage>()
     {
-        @SuppressWarnings("resource")
         public IncomingStreamMessage deserialize(DataInputPlus input, int version) throws IOException
         {
             StreamMessageHeader header = StreamMessageHeader.serializer.deserialize(input, version);
@@ -56,12 +53,14 @@
             }
             catch (Throwable t)
             {
-                JVMStabilityInspector.inspectThrowable(t);
+                if (t instanceof StreamReceiveException)
+                    throw (StreamReceiveException) t;
+                // make sure to wrap so the caller always has access to the session to call onError
                 throw new StreamReceiveException(session, t);
             }
         }
 
-        public void serialize(IncomingStreamMessage message, DataOutputStreamPlus out, int version, StreamSession session)
+        public void serialize(IncomingStreamMessage message, StreamingDataOutputPlus out, int version, StreamSession session)
         {
             throw new UnsupportedOperationException("Not allowed to call serialize on an incoming stream");
         }
@@ -83,8 +82,9 @@
     }
 
     @Override
-    public StreamSession getOrCreateSession(Channel channel)
+    public StreamSession getOrCreateAndAttachInboundSession(StreamingChannel channel, int messagingVersion)
     {
+        stream.session().attachInbound(channel);
         return stream.session();
     }
 
diff --git a/src/java/org/apache/cassandra/streaming/messages/KeepAliveMessage.java b/src/java/org/apache/cassandra/streaming/messages/KeepAliveMessage.java
index 5352b3b..a09cfca 100644
--- a/src/java/org/apache/cassandra/streaming/messages/KeepAliveMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/KeepAliveMessage.java
@@ -15,40 +15,40 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.cassandra.streaming.messages;
 
-import java.io.IOException;
-
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 
 public class KeepAliveMessage extends StreamMessage
 {
-    public static Serializer<KeepAliveMessage> serializer = new Serializer<KeepAliveMessage>()
-    {
-        public KeepAliveMessage deserialize(DataInputPlus in, int version) throws IOException
-        {
-            return new KeepAliveMessage();
-        }
-
-        public void serialize(KeepAliveMessage message, DataOutputStreamPlus out, int version, StreamSession session)
-        {}
-
-        public long serializedSize(KeepAliveMessage message, int version)
-        {
-            return 0;
-        }
-    };
 
     public KeepAliveMessage()
     {
         super(Type.KEEP_ALIVE);
     }
 
+    @Override
     public String toString()
     {
         return "keep-alive";
     }
+
+    public static Serializer<KeepAliveMessage> serializer = new Serializer<KeepAliveMessage>()
+    {
+        public KeepAliveMessage deserialize(DataInputPlus in, int version)
+        {
+            return new KeepAliveMessage();
+        }
+
+        public void serialize(KeepAliveMessage message, StreamingDataOutputPlus out, int version, StreamSession session)
+        {
+        }
+
+        public long serializedSize(KeepAliveMessage message, int version)
+        {
+            return 0;
+        }
+    };
 }
diff --git a/src/java/org/apache/cassandra/streaming/messages/OutgoingStreamMessage.java b/src/java/org/apache/cassandra/streaming/messages/OutgoingStreamMessage.java
index 702e806..4128ddb 100644
--- a/src/java/org/apache/cassandra/streaming/messages/OutgoingStreamMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/OutgoingStreamMessage.java
@@ -22,9 +22,9 @@
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.OutgoingStream;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -37,7 +37,7 @@
             throw new UnsupportedOperationException("Not allowed to call deserialize on an outgoing stream");
         }
 
-        public void serialize(OutgoingStreamMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+        public void serialize(OutgoingStreamMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
         {
             message.startTransfer();
             try
@@ -77,7 +77,7 @@
                                               stream.getPendingRepair());
     }
 
-    public synchronized void serialize(DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+    public synchronized void serialize(StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
     {
         if (completed)
         {
diff --git a/src/java/org/apache/cassandra/streaming/messages/PrepareAckMessage.java b/src/java/org/apache/cassandra/streaming/messages/PrepareAckMessage.java
index 97fdff7..479ef34 100644
--- a/src/java/org/apache/cassandra/streaming/messages/PrepareAckMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/PrepareAckMessage.java
@@ -21,14 +21,14 @@
 import java.io.IOException;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 
 public class PrepareAckMessage extends StreamMessage
 {
     public static Serializer<PrepareAckMessage> serializer = new Serializer<PrepareAckMessage>()
     {
-        public void serialize(PrepareAckMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+        public void serialize(PrepareAckMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
         {
             //nop
         }
diff --git a/src/java/org/apache/cassandra/streaming/messages/PrepareSynAckMessage.java b/src/java/org/apache/cassandra/streaming/messages/PrepareSynAckMessage.java
index 4e5e8fb..9d97de6 100644
--- a/src/java/org/apache/cassandra/streaming/messages/PrepareSynAckMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/PrepareSynAckMessage.java
@@ -23,7 +23,7 @@
 import java.util.Collection;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.streaming.StreamSummary;
 
@@ -31,7 +31,7 @@
 {
     public static Serializer<PrepareSynAckMessage> serializer = new Serializer<PrepareSynAckMessage>()
     {
-        public void serialize(PrepareSynAckMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+        public void serialize(PrepareSynAckMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
         {
             out.writeInt(message.summaries.size());
             for (StreamSummary summary : message.summaries)
diff --git a/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java b/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java
index e378af7..1160033 100644
--- a/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java
@@ -22,7 +22,7 @@
 import java.util.Collection;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamRequest;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.streaming.StreamSummary;
@@ -55,7 +55,7 @@
             return size;
         }
 
-        public void serialize(PrepareSynMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+        public void serialize(PrepareSynMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
         {
             // requests
             out.writeInt(message.requests.size());
diff --git a/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java b/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java
index ff2cdec..134e2cf 100644
--- a/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java
@@ -20,8 +20,8 @@
 import java.io.*;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 
 public class ReceivedMessage extends StreamMessage
@@ -34,7 +34,7 @@
             return new ReceivedMessage(TableId.deserialize(input), input.readInt());
         }
 
-        public void serialize(ReceivedMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+        public void serialize(ReceivedMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
         {
             message.tableId.serialize(out);
             out.writeInt(message.sequenceNumber);
diff --git a/src/java/org/apache/cassandra/streaming/messages/SessionFailedMessage.java b/src/java/org/apache/cassandra/streaming/messages/SessionFailedMessage.java
index ca10bcc..f09b643 100644
--- a/src/java/org/apache/cassandra/streaming/messages/SessionFailedMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/SessionFailedMessage.java
@@ -18,7 +18,7 @@
 package org.apache.cassandra.streaming.messages;
 
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 
 public class SessionFailedMessage extends StreamMessage
@@ -30,7 +30,7 @@
             return new SessionFailedMessage();
         }
 
-        public void serialize(SessionFailedMessage message, DataOutputStreamPlus out, int version, StreamSession session) {}
+        public void serialize(SessionFailedMessage message, StreamingDataOutputPlus out, int version, StreamSession session) {}
 
         public long serializedSize(SessionFailedMessage message, int version)
         {
diff --git a/src/java/org/apache/cassandra/streaming/messages/StreamInitMessage.java b/src/java/org/apache/cassandra/streaming/messages/StreamInitMessage.java
index 0d6ef47..889c732 100644
--- a/src/java/org/apache/cassandra/streaming/messages/StreamInitMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/StreamInitMessage.java
@@ -18,20 +18,17 @@
 package org.apache.cassandra.streaming.messages;
 
 import java.io.IOException;
-import java.util.UUID;
-
-import io.netty.channel.Channel;
 
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.StreamResultFuture;
 import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
 
@@ -45,14 +42,14 @@
 
     public final InetAddressAndPort from;
     public final int sessionIndex;
-    public final UUID planId;
+    public final TimeUUID planId;
     public final StreamOperation streamOperation;
 
-    public final UUID pendingRepair;
+    public final TimeUUID pendingRepair;
     public final PreviewKind previewKind;
 
-    public StreamInitMessage(InetAddressAndPort from, int sessionIndex, UUID planId, StreamOperation streamOperation,
-                             UUID pendingRepair, PreviewKind previewKind)
+    public StreamInitMessage(InetAddressAndPort from, int sessionIndex, TimeUUID planId, StreamOperation streamOperation,
+                             TimeUUID pendingRepair, PreviewKind previewKind)
     {
         super(Type.STREAM_INIT);
         this.from = from;
@@ -64,10 +61,12 @@
     }
 
     @Override
-    public StreamSession getOrCreateSession(Channel channel)
+    public StreamSession getOrCreateAndAttachInboundSession(StreamingChannel channel, int messagingVersion)
     {
-        return StreamResultFuture.createFollower(sessionIndex, planId, streamOperation, from, channel, pendingRepair, previewKind)
+        StreamSession session = StreamResultFuture.createFollower(sessionIndex, planId, streamOperation, from, channel, messagingVersion, pendingRepair, previewKind)
                                  .getSession(from, sessionIndex);
+        session.attachInbound(channel);
+        return session;
     }
 
     @Override
@@ -81,18 +80,16 @@
 
     private static class StreamInitMessageSerializer implements Serializer<StreamInitMessage>
     {
-        public void serialize(StreamInitMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+        public void serialize(StreamInitMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
         {
             inetAddressAndPortSerializer.serialize(message.from, out, version);
             out.writeInt(message.sessionIndex);
-            UUIDSerializer.serializer.serialize(message.planId, out, MessagingService.current_version);
+            message.planId.serialize(out);
             out.writeUTF(message.streamOperation.getDescription());
 
             out.writeBoolean(message.pendingRepair != null);
             if (message.pendingRepair != null)
-            {
-                UUIDSerializer.serializer.serialize(message.pendingRepair, out, MessagingService.current_version);
-            }
+                message.pendingRepair.serialize(out);
             out.writeInt(message.previewKind.getSerializationVal());
         }
 
@@ -100,10 +97,10 @@
         {
             InetAddressAndPort from = inetAddressAndPortSerializer.deserialize(in, version);
             int sessionIndex = in.readInt();
-            UUID planId = UUIDSerializer.serializer.deserialize(in, MessagingService.current_version);
+            TimeUUID planId = TimeUUID.deserialize(in);
             String description = in.readUTF();
 
-            UUID pendingRepair = in.readBoolean() ? UUIDSerializer.serializer.deserialize(in, version) : null;
+            TimeUUID pendingRepair = in.readBoolean() ? TimeUUID.deserialize(in) : null;
             PreviewKind previewKind = PreviewKind.deserialize(in.readInt());
             return new StreamInitMessage(from, sessionIndex, planId, StreamOperation.fromString(description),
                                          pendingRepair, previewKind);
@@ -113,13 +110,11 @@
         {
             long size = inetAddressAndPortSerializer.serializedSize(message.from, version);
             size += TypeSizes.sizeof(message.sessionIndex);
-            size += UUIDSerializer.serializer.serializedSize(message.planId, MessagingService.current_version);
+            size += TimeUUID.sizeInBytes();
             size += TypeSizes.sizeof(message.streamOperation.getDescription());
             size += TypeSizes.sizeof(message.pendingRepair != null);
             if (message.pendingRepair != null)
-            {
-                size += UUIDSerializer.serializer.serializedSize(message.pendingRepair, MessagingService.current_version);
-            }
+                size += TimeUUID.sizeInBytes();
             size += TypeSizes.sizeof(message.previewKind.getSerializationVal());
 
             return size;
diff --git a/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java b/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
index e3f805e..db393a5 100644
--- a/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
@@ -21,14 +21,11 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import io.netty.channel.Channel;
-
 import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
 import org.apache.cassandra.streaming.StreamSession;
 
-import static java.lang.Math.max;
-
 /**
  * StreamMessage is an abstract base class that every messages in streaming protocol inherit.
  *
@@ -36,7 +33,7 @@
  */
 public abstract class StreamMessage
 {
-    public static void serialize(StreamMessage message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException
+    public static void serialize(StreamMessage message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException
     {
         out.writeByte(message.type.id);
         message.type.outSerializer.serialize(message, out, version, session);
@@ -57,7 +54,7 @@
     public static interface Serializer<V extends StreamMessage>
     {
         V deserialize(DataInputPlus in, int version) throws IOException;
-        void serialize(V message, DataOutputStreamPlus out, int version, StreamSession session) throws IOException;
+        void serialize(V message, StreamingDataOutputPlus out, int version, StreamSession session) throws IOException;
         long serializedSize(V message, int version) throws IOException;
     }
 
@@ -138,7 +135,7 @@
      * Get or create a {@link StreamSession} based on this stream message data: not all stream messages support this,
      * so the default implementation just throws an exception.
      */
-    public StreamSession getOrCreateSession(Channel channel)
+    public StreamSession getOrCreateAndAttachInboundSession(StreamingChannel channel, int messagingVersion)
     {
         throw new UnsupportedOperationException("Not supported by streaming messages of type: " + this.getClass());
     }
diff --git a/src/java/org/apache/cassandra/streaming/messages/StreamMessageHeader.java b/src/java/org/apache/cassandra/streaming/messages/StreamMessageHeader.java
index 30afbb8..ec9d8e8 100644
--- a/src/java/org/apache/cassandra/streaming/messages/StreamMessageHeader.java
+++ b/src/java/org/apache/cassandra/streaming/messages/StreamMessageHeader.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.streaming.messages;
 
 import java.io.IOException;
-import java.util.UUID;
 
 import com.google.common.base.Objects;
 
@@ -26,10 +25,9 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.utils.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer;
 
@@ -41,23 +39,23 @@
     public static FileMessageHeaderSerializer serializer = new FileMessageHeaderSerializer();
 
     public final TableId tableId;
-    public UUID planId;
+    public TimeUUID planId;
     // it tells us if the file was sent by a follower stream session
     public final boolean sendByFollower;
     public int sessionIndex;
     public final int sequenceNumber;
     public final long repairedAt;
-    public final UUID pendingRepair;
+    public final TimeUUID pendingRepair;
     public final InetAddressAndPort sender;
 
     public StreamMessageHeader(TableId tableId,
                                InetAddressAndPort sender,
-                               UUID planId,
+                               TimeUUID planId,
                                boolean sendByFollower,
                                int sessionIndex,
                                int sequenceNumber,
                                long repairedAt,
-                               UUID pendingRepair)
+                               TimeUUID pendingRepair)
     {
         this.tableId = tableId;
         this.sender = sender;
@@ -111,7 +109,7 @@
         {
             header.tableId.serialize(out);
             inetAddressAndPortSerializer.serialize(header.sender, out, version);
-            UUIDSerializer.serializer.serialize(header.planId, out, version);
+            header.planId.serialize(out);
             out.writeBoolean(header.sendByFollower);
             out.writeInt(header.sessionIndex);
             out.writeInt(header.sequenceNumber);
@@ -119,7 +117,7 @@
             out.writeBoolean(header.pendingRepair != null);
             if (header.pendingRepair != null)
             {
-                UUIDSerializer.serializer.serialize(header.pendingRepair, out, version);
+                header.pendingRepair.serialize(out);
             }
         }
 
@@ -127,12 +125,12 @@
         {
             TableId tableId = TableId.deserialize(in);
             InetAddressAndPort sender = inetAddressAndPortSerializer.deserialize(in, version);
-            UUID planId = UUIDSerializer.serializer.deserialize(in, MessagingService.current_version);
+            TimeUUID planId = TimeUUID.deserialize(in);
             boolean sendByFollower = in.readBoolean();
             int sessionIndex = in.readInt();
             int sequenceNumber = in.readInt();
             long repairedAt = in.readLong();
-            UUID pendingRepair = in.readBoolean() ? UUIDSerializer.serializer.deserialize(in, version) : null;
+            TimeUUID pendingRepair = in.readBoolean() ? TimeUUID.deserialize(in) : null;
 
             return new StreamMessageHeader(tableId, sender, planId, sendByFollower, sessionIndex, sequenceNumber, repairedAt, pendingRepair);
         }
@@ -141,13 +139,13 @@
         {
             long size = header.tableId.serializedSize();
             size += inetAddressAndPortSerializer.serializedSize(header.sender, version);
-            size += UUIDSerializer.serializer.serializedSize(header.planId, version);
+            size += TimeUUID.sizeInBytes();
             size += TypeSizes.sizeof(header.sendByFollower);
             size += TypeSizes.sizeof(header.sessionIndex);
             size += TypeSizes.sizeof(header.sequenceNumber);
             size += TypeSizes.sizeof(header.repairedAt);
             size += TypeSizes.sizeof(header.pendingRepair != null);
-            size += header.pendingRepair != null ? UUIDSerializer.serializer.serializedSize(header.pendingRepair, version) : 0;
+            size += header.pendingRepair != null ? TimeUUID.sizeInBytes() : 0;
 
             return size;
         }
diff --git a/src/java/org/apache/cassandra/tools/AuditLogViewer.java b/src/java/org/apache/cassandra/tools/AuditLogViewer.java
index dd0e839..f226aa2 100644
--- a/src/java/org/apache/cassandra/tools/AuditLogViewer.java
+++ b/src/java/org/apache/cassandra/tools/AuditLogViewer.java
@@ -17,12 +17,12 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.util.Arrays;
 import java.util.List;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -76,7 +76,7 @@
         Pauser pauser = Pauser.millis(100);
         List<ExcerptTailer> tailers = pathList.stream()
                                               .distinct()
-                                              .map(path -> SingleChronicleQueueBuilder.single(new File(path)).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build())
+                                              .map(path -> SingleChronicleQueueBuilder.single(new File(path).toJavaIOFile()).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build())
                                               .map(SingleChronicleQueue::createTailer)
                                               .collect(Collectors.toList());
         boolean hadWork = true;
diff --git a/src/java/org/apache/cassandra/tools/BootstrapMonitor.java b/src/java/org/apache/cassandra/tools/BootstrapMonitor.java
index 9719192..4d58638 100644
--- a/src/java/org/apache/cassandra/tools/BootstrapMonitor.java
+++ b/src/java/org/apache/cassandra/tools/BootstrapMonitor.java
@@ -20,18 +20,20 @@
 import java.io.IOException;
 import java.io.PrintStream;
 import java.text.SimpleDateFormat;
-import java.util.concurrent.locks.Condition;
 
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventType;
 import org.apache.cassandra.utils.progress.jmx.JMXNotificationProgressListener;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+
 public class BootstrapMonitor extends JMXNotificationProgressListener
 {
     private final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
     private final PrintStream out;
-    private final Condition condition = new SimpleCondition();
+    private final Condition condition = newOneTimeCondition();
 
     public BootstrapMonitor(PrintStream out)
     {
@@ -74,7 +76,7 @@
     public void progress(String tag, ProgressEvent event)
     {
         ProgressEventType type = event.getType();
-        String message = String.format("[%s] %s", format.format(System.currentTimeMillis()), event.getMessage());
+        String message = String.format("[%s] %s", format.format(currentTimeMillis()), event.getMessage());
         if (type == ProgressEventType.PROGRESS)
         {
             message = message + " (progress: " + (int)event.getProgressPercentage() + "%)";
diff --git a/src/java/org/apache/cassandra/tools/BulkLoadConnectionFactory.java b/src/java/org/apache/cassandra/tools/BulkLoadConnectionFactory.java
index 5b61fab..eef0ef4 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoadConnectionFactory.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoadConnectionFactory.java
@@ -19,14 +19,17 @@
 package org.apache.cassandra.tools;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 
-import io.netty.channel.Channel;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.net.OutboundConnectionSettings;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
-import org.apache.cassandra.streaming.StreamConnectionFactory;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
+import org.apache.cassandra.streaming.async.NettyStreamingChannel;
 
-public class BulkLoadConnectionFactory extends DefaultConnectionFactory implements StreamConnectionFactory
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+
+public class BulkLoadConnectionFactory extends NettyStreamingConnectionFactory
 {
     private final int storagePort;
     private final EncryptionOptions.ServerEncryptionOptions encryptionOptions;
@@ -37,7 +40,26 @@
         this.encryptionOptions = encryptionOptions;
     }
 
-    public Channel createConnection(OutboundConnectionSettings template, int messagingVersion) throws IOException
+    @Override
+    public NettyStreamingChannel create(InetSocketAddress to, int messagingVersion, StreamingChannel.Kind kind) throws IOException
+    {
+        OutboundConnectionSettings template = new OutboundConnectionSettings(getByAddress(to));
+        return create(template, messagingVersion, kind);
+    }
+
+    @Override
+    public StreamingChannel create(InetSocketAddress to,
+                                   InetSocketAddress preferred,
+                                   int messagingVersion,
+                                   StreamingChannel.Kind kind) throws IOException
+    {
+        // The preferred address is always overwritten in create(). This method override only exists so we can avoid 
+        // falling back to the NettyStreamingConnectionFactory implementation.
+        OutboundConnectionSettings template = new OutboundConnectionSettings(getByAddress(to), getByAddress(preferred));
+        return create(template, messagingVersion, kind);
+    }
+
+    private NettyStreamingChannel create(OutboundConnectionSettings template, int messagingVersion, StreamingChannel.Kind kind) throws IOException
     {
         // storage port can handle both encrypted and unencrypted traffic from 4.0
         // so from sstableloader's point of view we can use just storage port for both cases
@@ -47,9 +69,8 @@
         if (encryptionOptions != null && encryptionOptions.internode_encryption != EncryptionOptions.ServerEncryptionOptions.InternodeEncryption.none)
             template = template.withEncryption(encryptionOptions);
 
-        return super.createConnection(template, messagingVersion);
+        return connect(template, messagingVersion, kind);
     }
-
     @Override
     public boolean supportsPreferredIp()
     {
diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java
index f68c74d..ebdd072 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoader.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoader.java
@@ -23,29 +23,36 @@
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.SSLEngine;
 
-import com.datastax.driver.core.AuthProvider;
-import com.datastax.driver.core.RemoteEndpointAwareJdkSSLOptions;
-import com.datastax.driver.core.SSLOptions;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 
+import com.datastax.driver.core.AuthProvider;
+import com.datastax.driver.core.RemoteEndpointAwareJdkSSLOptions;
+import com.datastax.driver.core.SSLOptions;
 import com.datastax.shaded.netty.channel.socket.SocketChannel;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.io.sstable.SSTableLoader;
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.security.SSLFactory;
-import org.apache.cassandra.streaming.*;
+import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.streaming.SessionInfo;
+import org.apache.cassandra.streaming.StreamEvent;
+import org.apache.cassandra.streaming.StreamEventHandler;
+import org.apache.cassandra.streaming.StreamResultFuture;
+import org.apache.cassandra.streaming.StreamState;
+import org.apache.cassandra.streaming.StreamingChannel;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NativeSSTableLoaderClient;
 import org.apache.cassandra.utils.OutputHandler;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class BulkLoader
 {
-    public static void main(String args[]) throws BulkLoadException
+    public static void main(String[] args) throws BulkLoadException
     {
         LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
         load(options);
@@ -56,7 +63,7 @@
         DatabaseDescriptor.toolInitialization();
         OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
         SSTableLoader loader = new SSTableLoader(
-                options.directory.getAbsoluteFile(),
+                options.directory.toAbsolute(),
                 new ExternalClient(
                         options.hosts,
                         options.storagePort,
@@ -66,9 +73,11 @@
                         handler,
                         options.connectionsPerHost,
                         options.targetKeyspace);
-        DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(options.throttle);
-        DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(options.interDcThrottle);
-        StreamResultFuture future = null;
+        DatabaseDescriptor.setStreamThroughputOutboundBytesPerSec(options.throttleBytes);
+        DatabaseDescriptor.setInterDCStreamThroughputOutboundBytesPerSec(options.interDcThrottleBytes);
+        DatabaseDescriptor.setEntireSSTableStreamThroughputOutboundMebibytesPerSec(options.entireSSTableThrottleMebibytes);
+        DatabaseDescriptor.setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(options.entireSSTableInterDcThrottleMebibytes);
+        StreamResultFuture future;
 
         ProgressIndicator indicator = new ProgressIndicator();
         try
@@ -119,18 +128,18 @@
     // Return true when everything is at 100%
     static class ProgressIndicator implements StreamEventHandler
     {
-        private long start;
+        private final long start;
         private long lastProgress;
         private long lastTime;
 
         private long peak = 0;
         private int totalFiles = 0;
 
-        private final Multimap<InetAddressAndPort, SessionInfo> sessionsByHost = HashMultimap.create();
+        private final Multimap<InetSocketAddress, SessionInfo> sessionsByHost = HashMultimap.create();
 
         public ProgressIndicator()
         {
-            start = lastTime = System.nanoTime();
+            start = lastTime = nanoTime();
         }
 
         public void onSuccess(StreamState finalState)
@@ -156,7 +165,7 @@
                     progressInfo = ((StreamEvent.ProgressEvent) event).progress;
                 }
 
-                long time = System.nanoTime();
+                long time = nanoTime();
                 long deltaTime = time - lastTime;
 
                 StringBuilder sb = new StringBuilder();
@@ -167,7 +176,7 @@
 
                 boolean updateTotalFiles = totalFiles == 0;
                 // recalculate progress across all sessions in all hosts and display
-                for (InetAddressAndPort peer : sessionsByHost.keySet())
+                for (InetSocketAddress peer : sessionsByHost.keySet())
                 {
                     sb.append("[").append(peer).append("]");
 
@@ -218,7 +227,7 @@
                 }
                 sb.append(" (avg: ").append(FBUtilities.prettyPrintMemoryPerSecond(totalProgress, time - start)).append(")");
 
-                System.out.println(sb.toString());
+                System.out.println(sb);
             }
         }
 
@@ -229,7 +238,7 @@
 
         private void printSummary(int connectionsPerHost)
         {
-            long end = System.nanoTime();
+            long end = nanoTime();
             long durationMS = ((end - start) / (1000000));
 
             StringBuilder sb = new StringBuilder();
@@ -240,14 +249,14 @@
             sb.append(String.format("   %-24s: %-10s%n", "Total duration ", durationMS + " ms"));
             sb.append(String.format("   %-24s: %-10s%n", "Average transfer rate ", FBUtilities.prettyPrintMemoryPerSecond(lastProgress, end - start)));
             sb.append(String.format("   %-24s: %-10s%n", "Peak transfer rate ",  FBUtilities.prettyPrintMemoryPerSecond(peak)));
-            System.out.println(sb.toString());
+            System.out.println(sb);
         }
     }
 
     private static SSLOptions buildSSLOptions(EncryptionOptions clientEncryptionOptions)
     {
 
-        if (!clientEncryptionOptions.isEnabled())
+        if (!clientEncryptionOptions.getEnabled())
         {
             return null;
         }
@@ -297,7 +306,7 @@
         }
 
         @Override
-        public StreamConnectionFactory getConnectionFactory()
+        public StreamingChannel.Factory getConnectionFactory()
         {
             return new BulkLoadConnectionFactory(serverEncOptions, storagePort);
         }
diff --git a/src/java/org/apache/cassandra/tools/GenerateTokens.java b/src/java/org/apache/cassandra/tools/GenerateTokens.java
index c03a4d0..a6888d7 100644
--- a/src/java/org/apache/cassandra/tools/GenerateTokens.java
+++ b/src/java/org/apache/cassandra/tools/GenerateTokens.java
@@ -21,7 +21,6 @@
 import java.util.Arrays;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Option;
diff --git a/src/java/org/apache/cassandra/tools/HashPassword.java b/src/java/org/apache/cassandra/tools/HashPassword.java
new file mode 100644
index 0000000..ddc0a72
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/HashPassword.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.cassandra.io.util.File;
+import org.mindrot.jbcrypt.BCrypt;
+
+public class HashPassword
+{
+    private static final String LOGROUNDS_OPTION = "logrounds";
+    private static final String HELP_OPTION = "help";
+    private static final String ENV_VAR = "environment-var";
+    private static final String PLAIN = "plain";
+    private static final String INPUT = "input";
+
+    private static final int LOGROUNDS_DEFAULT = 10;
+    private static final int MIN_PASS_LENGTH = 4;
+
+    public static void main(String[] args)
+    {
+        try
+        {
+            Options options = getOptions();
+            CommandLine cmd = parseCommandLine(args, options);
+
+            String password = null;
+            if (cmd.hasOption(ENV_VAR))
+            {
+                password = System.getenv(cmd.getOptionValue(ENV_VAR));
+                if (password == null)
+                {
+                    System.err.println(String.format("Environment variable '%s' is undefined.", cmd.getOptionValue(ENV_VAR)));
+                    System.exit(1);
+                }
+            }
+            else if (cmd.hasOption(PLAIN))
+            {
+                password = cmd.getOptionValue(PLAIN);
+            }
+            else if (cmd.hasOption(INPUT))
+            {
+                String input = cmd.getOptionValue(INPUT);
+                byte[] fileInput = null;
+                if ("-".equals(input))
+                {
+                    ByteArrayOutputStream os = new ByteArrayOutputStream();
+                    int rd;
+                    while ((rd = System.in.read()) != -1)
+                        os.write(rd);
+                    fileInput = os.toByteArray();
+                }
+                else
+                {
+                    try
+                    {
+                        Path file = File.getPath(input);
+                        fileInput = Files.readAllBytes(file);
+                    }
+                    catch (IOException e)
+                    {
+                        System.err.printf("Failed to read from '%s': %s%n", input, e);
+                        System.exit(1);
+                    }
+                }
+                password = new String(fileInput, StandardCharsets.UTF_8);
+            }
+            else
+            {
+                System.err.println(String.format("One of the options --%s, --%s or --%s must be used.",
+                                                 ENV_VAR, PLAIN, INPUT));
+                printUsage(options);
+                System.exit(1);
+            }
+
+            if (password.chars().anyMatch(i -> i < 32))
+                System.err.println("WARNING: The provided plain text password contains non-printable characters (ASCII<32).");
+
+            if (password.length() < MIN_PASS_LENGTH)
+                System.err.println("WARNING: The provided password is very short, probably too short to be secure.");
+
+            int logRounds = cmd.hasOption(LOGROUNDS_OPTION) ? Integer.parseInt(cmd.getOptionValue(LOGROUNDS_OPTION)) : LOGROUNDS_DEFAULT;
+            if (logRounds < 4 || logRounds > 30)
+            {
+                System.err.println(String.format("Bad value for --%s %d. " +
+                                                 "Please use a value between 4 and 30 inclusively",
+                        LOGROUNDS_OPTION, logRounds));
+                System.exit(1);
+            }
+
+            // The number of rounds is in fact = 2^rounds.
+            if (logRounds > 16)
+                System.err.println(String.format("WARNING: Using a high number of hash rounds, as configured using '--%s %d' " +
+                                                 "will consume a lot of CPU and likely cause timeouts. Note that the parameter defines the " +
+                                                 "logarithmic number of rounds: %d becomes 2^%d = %d rounds",
+                        LOGROUNDS_OPTION, logRounds,
+                        logRounds, logRounds, 1 << logRounds));
+
+            if (password.getBytes().length > 72)
+                System.err.println(String.format("WARNING: The provided password has a length of %d bytes, but the underlying hash/crypt algorithm " +
+                        "(bcrypt) can only compare up to 72 bytes. The password will be accepted and work, but only compared up to 72 bytes.",
+                        password.getBytes().length));
+
+            String hashed = escape(hashpw(password, logRounds));
+            System.out.print(hashed);
+            System.out.flush();
+        }
+        catch (Exception e)
+        {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    private static CommandLine parseCommandLine(String[] args, Options options) throws ParseException
+    {
+        CommandLineParser parser = new GnuParser();
+
+        CommandLine cmd = parser.parse(options, args, false);
+
+        if (cmd.hasOption(HELP_OPTION))
+        {
+            printUsage(options);
+            System.exit(0);
+        }
+        return cmd;
+    }
+
+    private static Options getOptions()
+    {
+        Options options = new Options();
+        options.addOption("h", HELP_OPTION, false, "Display this help message");
+        options.addOption("r", LOGROUNDS_OPTION, true, "Number of hash rounds (default: " + LOGROUNDS_DEFAULT + ").");
+        OptionGroup group = new OptionGroup();
+        group.addOption(new Option("e", ENV_VAR, true,
+                                   "Use value of the specified environment variable as the password"));
+        group.addOption(new Option("p", PLAIN, true,
+                                   "Argument is the plain text password"));
+        group.addOption(new Option("i", INPUT, true,
+                                   "Input is a file (or - for stdin) to read the password from. " +
+                                   "Make sure that the whole input including newlines is considered. " +
+                                   "For example, the shell command 'echo -n foobar | hash_password -i -' will " +
+                                   "work as intended and just hash 'foobar'."));
+        options.addOptionGroup(group);
+        return options;
+    }
+
+    private static String hashpw(String password, int rounds)
+    {
+        return BCrypt.hashpw(password, BCrypt.gensalt(rounds));
+    }
+
+    private static String escape(String name)
+    {
+        return StringUtils.replace(name, "'", "''");
+    }
+
+    public static void printUsage(Options options)
+    {
+        String usage = "hash_password [options]";
+        String header = "--\n" +
+                        "Hashes a plain text password and prints the hashed password.\n" +
+                        "Options are:";
+        new HelpFormatter().printHelp(usage, header, options, "");
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/JMXTool.java b/src/java/org/apache/cassandra/tools/JMXTool.java
index e917179..d054716 100644
--- a/src/java/org/apache/cassandra/tools/JMXTool.java
+++ b/src/java/org/apache/cassandra/tools/JMXTool.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.tools;
 
-import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -71,6 +69,8 @@
 import io.airlift.airline.Help;
 import io.airlift.airline.HelpOption;
 import io.airlift.airline.Option;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.yaml.snakeyaml.TypeDescription;
 import org.yaml.snakeyaml.Yaml;
 import org.yaml.snakeyaml.constructor.Constructor;
@@ -229,8 +229,8 @@
             Preconditions.checkArgument(files.size() == 2, "files requires 2 arguments but given %s", files);
             Map<String, Info> left;
             Map<String, Info> right;
-            try (FileInputStream leftStream = new FileInputStream(files.get(0));
-                 FileInputStream rightStream = new FileInputStream(files.get(1)))
+            try (FileInputStreamPlus leftStream = new FileInputStreamPlus(files.get(0));
+                 FileInputStreamPlus rightStream = new FileInputStreamPlus(files.get(1)))
             {
                 left = format.load(leftStream);
                 right = format.load(rightStream);
diff --git a/src/java/org/apache/cassandra/tools/JsonTransformer.java b/src/java/org/apache/cassandra/tools/JsonTransformer.java
index 341512c..ed0830d 100644
--- a/src/java/org/apache/cassandra/tools/JsonTransformer.java
+++ b/src/java/org/apache/cassandra/tools/JsonTransformer.java
@@ -58,6 +58,8 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public final class JsonTransformer
 {
 
@@ -282,7 +284,7 @@
                     json.writeFieldName("expires_at");
                     json.writeString(dateString(TimeUnit.SECONDS, liveInfo.localExpirationTime()));
                     json.writeFieldName("expired");
-                    json.writeBoolean(liveInfo.localExpirationTime() < (System.currentTimeMillis() / 1000));
+                    json.writeBoolean(liveInfo.localExpirationTime() < (currentTimeMillis() / 1000));
                 }
                 json.writeEndObject();
                 objectIndenter.setCompact(false);
@@ -497,7 +499,7 @@
                 json.writeFieldName("expires_at");
                 json.writeString(dateString(TimeUnit.SECONDS, cell.localDeletionTime()));
                 json.writeFieldName("expired");
-                json.writeBoolean(!cell.isLive((int) (System.currentTimeMillis() / 1000)));
+                json.writeBoolean(!cell.isLive((int) (currentTimeMillis() / 1000)));
             }
             json.writeEndObject();
             objectIndenter.setCompact(false);
diff --git a/src/java/org/apache/cassandra/tools/LoaderOptions.java b/src/java/org/apache/cassandra/tools/LoaderOptions.java
index 22729fc..7729955 100644
--- a/src/java/org/apache/cassandra/tools/LoaderOptions.java
+++ b/src/java/org/apache/cassandra/tools/LoaderOptions.java
@@ -20,28 +20,40 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
-import java.net.*;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.UnknownHostException;
 import java.util.HashSet;
 import java.util.Set;
 
 import com.google.common.base.Throwables;
 import com.google.common.net.HostAndPort;
-
-import org.apache.cassandra.config.*;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
-
-import com.datastax.driver.core.AuthProvider;
-import com.datastax.driver.core.PlainTextAuthProvider;
-import org.apache.commons.cli.*;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.datastax.driver.core.AuthProvider;
+import com.datastax.driver.core.PlainTextAuthProvider;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DataRateSpec;
+import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.config.YamlConfigurationLoader;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
+
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
+
 public class LoaderOptions
 {
     private static final Logger logger = LoggerFactory.getLogger(LoaderOptions.class);
@@ -60,8 +72,25 @@
     public static final String IGNORE_NODES_OPTION = "ignore";
     public static final String CONNECTIONS_PER_HOST = "connections-per-host";
     public static final String CONFIG_PATH = "conf-path";
+
+    /**
+     * Throttle defined in megabits per second. CASSANDRA-10637 introduced a builder and is the preferred way to
+     * provide options instead of using these constant fields.
+     * @deprecated Use {@code throttle-mib} instead
+     */
+    @Deprecated
     public static final String THROTTLE_MBITS = "throttle";
+    public static final String THROTTLE_MEBIBYTES = "throttle-mib";
+    /**
+     * Inter-datacenter throttle defined in megabits per second. CASSANDRA-10637 introduced a builder and is the
+     * preferred way to provide options instead of using these constant fields.
+     * @deprecated Use {@code inter-dc-throttle-mib} instead
+     */
+    @Deprecated
     public static final String INTER_DC_THROTTLE_MBITS = "inter-dc-throttle";
+    public static final String INTER_DC_THROTTLE_MEBIBYTES = "inter-dc-throttle-mib";
+    public static final String ENTIRE_SSTABLE_THROTTLE_MEBIBYTES = "entire-sstable-throttle-mib";
+    public static final String ENTIRE_SSTABLE_INTER_DC_THROTTLE_MEBIBYTES = "entire-sstable-inter-dc-throttle-mib";
     public static final String TOOL_NAME = "sstableloader";
     public static final String TARGET_KEYSPACE = "target-keyspace";
 
@@ -83,8 +112,10 @@
     public final String user;
     public final String passwd;
     public final AuthProvider authProvider;
-    public final int throttle;
-    public final int interDcThrottle;
+    public final long throttleBytes;
+    public final long interDcThrottleBytes;
+    public final int entireSSTableThrottleMebibytes;
+    public final int entireSSTableInterDcThrottleMebibytes;
     public final int storagePort;
     public final int sslStoragePort;
     public final EncryptionOptions clientEncOptions;
@@ -104,8 +135,10 @@
         user = builder.user;
         passwd = builder.passwd;
         authProvider = builder.authProvider;
-        throttle = builder.throttle;
-        interDcThrottle = builder.interDcThrottle;
+        throttleBytes = builder.throttleBytes;
+        interDcThrottleBytes = builder.interDcThrottleBytes;
+        entireSSTableThrottleMebibytes = builder.entireSSTableThrottleMebibytes;
+        entireSSTableInterDcThrottleMebibytes = builder.entireSSTableInterDcThrottleMebibytes;
         storagePort = builder.storagePort;
         sslStoragePort = builder.sslStoragePort;
         clientEncOptions = builder.clientEncOptions;
@@ -127,8 +160,11 @@
         String passwd;
         String authProviderName;
         AuthProvider authProvider;
-        int throttle = 0;
-        int interDcThrottle = 0;
+        long throttleBytes = 0;
+        long interDcThrottleBytes = 0;
+        int entireSSTableThrottleMebibytes = 0;
+        int entireSSTableInterDcThrottleMebibytes = 0;
+
         int storagePort;
         int sslStoragePort;
         EncryptionOptions clientEncOptions = new EncryptionOptions();
@@ -216,15 +252,60 @@
             return this;
         }
 
-        public Builder throttle(int throttle)
+        public Builder throttleMebibytes(int throttleMebibytes)
         {
-            this.throttle = throttle;
+            this.throttleBytes = (long) MEBIBYTES_PER_SECOND.toBytesPerSecond(throttleMebibytes);
             return this;
         }
 
+        @Deprecated
+        public Builder throttle(int throttleMegabits)
+        {
+            this.throttleBytes = (long) DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(throttleMegabits).toBytesPerSecond();
+            return this;
+        }
+
+        public Builder interDcThrottleMebibytes(int interDcThrottleMebibytes)
+        {
+            this.interDcThrottleBytes = (long) MEBIBYTES_PER_SECOND.toBytesPerSecond(interDcThrottleMebibytes);
+            return this;
+        }
+
+        public Builder interDcThrottleMegabits(int interDcThrottleMegabits)
+        {
+            this.interDcThrottleBytes = (long) DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(interDcThrottleMegabits).toBytesPerSecond();
+            return this;
+        }
+
+        @Deprecated
         public Builder interDcThrottle(int interDcThrottle)
         {
-            this.interDcThrottle = interDcThrottle;
+            return interDcThrottleMegabits(interDcThrottle);
+        }
+
+        public Builder entireSSTableThrottleMebibytes(int entireSSTableThrottleMebibytes)
+        {
+            this.entireSSTableThrottleMebibytes = entireSSTableThrottleMebibytes;
+            return this;
+        }
+
+        @Deprecated
+        public Builder entireSSTableThrottle(int entireSSTableThrottle)
+        {
+            this.entireSSTableThrottleMebibytes = entireSSTableThrottle;
+            return this;
+        }
+
+        public Builder entireSSTableInterDcThrottleMebibytes(int entireSSTableInterDcThrottleMebibytes)
+        {
+            this.entireSSTableInterDcThrottleMebibytes = entireSSTableInterDcThrottleMebibytes;
+            return this;
+        }
+
+        @Deprecated
+        public Builder entireSSTableInterDcThrottle(int entireSSTableInterDcThrottle)
+        {
+            this.entireSSTableInterDcThrottleMebibytes = entireSSTableInterDcThrottle;
             return this;
         }
 
@@ -381,14 +462,37 @@
                     {
                         errorMsg("Config file not found", options);
                     }
-                    config = new YamlConfigurationLoader().loadConfig(configFile.toURI().toURL());
+                    config = new YamlConfigurationLoader().loadConfig(configFile.toPath().toUri().toURL());
+
+                    // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
+                    if (config.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+                    {
+                        throw new ConfigurationException("stream_throughput_outbound: " + config.stream_throughput_outbound.toString() + " is too large", false);
+                    }
+
+                    if (config.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+                    {
+                        throw new ConfigurationException("inter_dc_stream_throughput_outbound: " + config.inter_dc_stream_throughput_outbound.toString() + " is too large", false);
+                    }
+
+                    if (config.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+                    {
+                        throw new ConfigurationException("entire_sstable_stream_throughput_outbound: " + config.entire_sstable_stream_throughput_outbound.toString() + " is too large", false);
+                    }
+
+                    if (config.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+                    {
+                        throw new ConfigurationException("entire_sstable_inter_dc_stream_throughput_outbound: " + config.entire_sstable_inter_dc_stream_throughput_outbound.toString() + " is too large", false);
+                    }
                 }
                 else
                 {
                     config = new Config();
                     // unthrottle stream by default
-                    config.stream_throughput_outbound_megabits_per_sec = 0;
-                    config.inter_dc_stream_throughput_outbound_megabits_per_sec = 0;
+                    config.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(0);
+                    config.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(0);
+                    config.entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(0);
+                    config.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(0);
                 }
 
                 if (cmd.hasOption(STORAGE_PORT_OPTION))
@@ -416,11 +520,12 @@
                     connectionsPerHost = Integer.parseInt(cmd.getOptionValue(CONNECTIONS_PER_HOST));
                 }
 
+                throttleBytes = config.stream_throughput_outbound.toBytesPerSecondAsInt();
+
                 if (cmd.hasOption(SSL_STORAGE_PORT_OPTION))
                     logger.info("ssl storage port is deprecated and not used, all communication goes though storage port " +
                                 "which is able to handle encrypted communication too.");
 
-                throttle = config.stream_throughput_outbound_megabits_per_sec;
                 // Copy the encryption options and apply the config so that argument parsing can accesss isEnabled.
                 clientEncOptions = config.client_encryption_options.applyConfig();
                 serverEncOptions = config.server_encryption_options;
@@ -432,7 +537,7 @@
                 }
                 else
                 {
-                    if (config.native_transport_port_ssl != null && (config.client_encryption_options.isEnabled() || clientEncOptions.isEnabled()))
+                    if (config.native_transport_port_ssl != null && (config.client_encryption_options.getEnabled() || clientEncOptions.getEnabled()))
                         nativePort = config.native_transport_port_ssl;
                     else
                         nativePort = config.native_transport_port;
@@ -460,18 +565,48 @@
                     System.exit(1);
                 }
 
+                if (cmd.hasOption(THROTTLE_MBITS) && cmd.hasOption(THROTTLE_MEBIBYTES))
+                {
+                    errorMsg(String.format("Both '%s' and '%s' were provided. Please only provide one of the two options", THROTTLE_MBITS, THROTTLE_MEBIBYTES), options);
+                }
+
+                if (cmd.hasOption(INTER_DC_THROTTLE_MBITS) && cmd.hasOption(INTER_DC_THROTTLE_MEBIBYTES))
+                {
+                    errorMsg(String.format("Both '%s' and '%s' were provided. Please only provide one of the two options", INTER_DC_THROTTLE_MBITS, INTER_DC_THROTTLE_MEBIBYTES), options);
+                }
+
                 if (cmd.hasOption(THROTTLE_MBITS))
                 {
-                    throttle = Integer.parseInt(cmd.getOptionValue(THROTTLE_MBITS));
+                    throttle(Integer.parseInt(cmd.getOptionValue(THROTTLE_MBITS)));
+                }
+
+                if (cmd.hasOption(THROTTLE_MEBIBYTES))
+                {
+                    throttleMebibytes(Integer.parseInt(cmd.getOptionValue(THROTTLE_MEBIBYTES)));
                 }
 
                 if (cmd.hasOption(INTER_DC_THROTTLE_MBITS))
                 {
-                    interDcThrottle = Integer.parseInt(cmd.getOptionValue(INTER_DC_THROTTLE_MBITS));
+                    interDcThrottleMegabits(Integer.parseInt(cmd.getOptionValue(INTER_DC_THROTTLE_MBITS)));
+                }
+
+                if (cmd.hasOption(INTER_DC_THROTTLE_MEBIBYTES))
+                {
+                    interDcThrottleMebibytes(Integer.parseInt(cmd.getOptionValue(INTER_DC_THROTTLE_MEBIBYTES)));
+                }
+
+                if (cmd.hasOption(ENTIRE_SSTABLE_THROTTLE_MEBIBYTES))
+                {
+                    entireSSTableThrottleMebibytes(Integer.parseInt(cmd.getOptionValue(ENTIRE_SSTABLE_THROTTLE_MEBIBYTES)));
+                }
+
+                if (cmd.hasOption(ENTIRE_SSTABLE_INTER_DC_THROTTLE_MEBIBYTES))
+                {
+                    entireSSTableInterDcThrottleMebibytes(Integer.parseInt(cmd.getOptionValue(ENTIRE_SSTABLE_INTER_DC_THROTTLE_MEBIBYTES)));
                 }
 
                 if (cmd.hasOption(SSL_TRUSTSTORE) || cmd.hasOption(SSL_TRUSTSTORE_PW) ||
-                            cmd.hasOption(SSL_KEYSTORE) || cmd.hasOption(SSL_KEYSTORE_PW))
+                    cmd.hasOption(SSL_KEYSTORE) || cmd.hasOption(SSL_KEYSTORE_PW))
                 {
                     clientEncOptions = clientEncOptions.withEnabled(true);
                 }
@@ -615,8 +750,12 @@
         options.addOption("p",  NATIVE_PORT_OPTION, "native transport port", "port used for native connection (default 9042)");
         options.addOption("sp",  STORAGE_PORT_OPTION, "storage port", "port used for internode communication (default 7000)");
         options.addOption("ssp",  SSL_STORAGE_PORT_OPTION, "ssl storage port", "port used for TLS internode communication (default 7001), this option is deprecated, all communication goes through storage port which handles encrypted communication as well");
-        options.addOption("t", THROTTLE_MBITS, "throttle", "throttle speed in Mbits (default unlimited)");
-        options.addOption("idct", INTER_DC_THROTTLE_MBITS, "inter-dc-throttle", "inter-datacenter throttle speed in Mbits (default unlimited)");
+        options.addOption("t", THROTTLE_MBITS, "throttle", "throttle speed in Mbps (default 0 for unlimited), this option is deprecated, use \"throttle-mib\" instead");
+        options.addOption(null, THROTTLE_MEBIBYTES, "throttle-mib", "throttle speed in MiB/s (default 0 for unlimited)");
+        options.addOption("idct", INTER_DC_THROTTLE_MBITS, "inter-dc-throttle", "inter-datacenter throttle speed in Mbps (default 0 for unlimited), this option is deprecated, use \"inter-dc-throttle-mib\" instead");
+        options.addOption(null, INTER_DC_THROTTLE_MEBIBYTES, "inter-dc-throttle-mib", "inter-datacenter throttle speed in MiB/s (default 0 for unlimited)");
+        options.addOption(null, ENTIRE_SSTABLE_THROTTLE_MEBIBYTES, "entire-sstable-throttle-mib", "entire SSTable throttle speed in MiB/s (default 0 for unlimited)");
+        options.addOption(null, ENTIRE_SSTABLE_INTER_DC_THROTTLE_MEBIBYTES, "entire-sstable-inter-dc-throttle-mib", "entire SSTable inter-datacenter throttle speed in MiB/s (default 0 for unlimited)");
         options.addOption("u", USER_OPTION, "username", "username for cassandra authentication");
         options.addOption("pw", PASSWD_OPTION, "password", "password for cassandra authentication");
         options.addOption("ap", AUTH_PROVIDER_OPTION, "auth provider", "custom AuthProvider class name for cassandra authentication");
@@ -645,7 +784,7 @@
                 "you will need to have the files Standard1-g-1-Data.db and Standard1-g-1-Index.db into a directory /path/to/Keyspace1/Standard1/.";
         String footer = System.lineSeparator() +
                 "You can provide cassandra.yaml file with -f command line option to set up streaming throughput, client and server encryption options. " +
-                "Only stream_throughput_outbound_megabits_per_sec, server_encryption_options and client_encryption_options are read from yaml. " +
+                "Only stream_throughput_outbound, server_encryption_options and client_encryption_options are read from yaml. " +
                 "You can override options read from cassandra.yaml with corresponding command line options.";
         new HelpFormatter().printHelp(usage, header, options, footer);
     }
diff --git a/src/java/org/apache/cassandra/tools/NodeProbe.java b/src/java/org/apache/cassandra/tools/NodeProbe.java
index b06e0f3..4a2f43d 100644
--- a/src/java/org/apache/cassandra/tools/NodeProbe.java
+++ b/src/java/org/apache/cassandra/tools/NodeProbe.java
@@ -55,6 +55,21 @@
 import javax.management.remote.JMXServiceURL;
 import javax.rmi.ssl.SslRMIClientSocketFactory;
 
+import org.apache.cassandra.audit.AuditLogManager;
+import org.apache.cassandra.audit.AuditLogManagerMBean;
+import org.apache.cassandra.audit.AuditLogOptions;
+import org.apache.cassandra.audit.AuditLogOptionsCompositeData;
+import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.auth.AuthCache;
+import org.apache.cassandra.auth.AuthCacheMBean;
+import org.apache.cassandra.auth.NetworkPermissionsCache;
+import org.apache.cassandra.auth.NetworkPermissionsCacheMBean;
+import org.apache.cassandra.auth.PasswordAuthenticator;
+import org.apache.cassandra.auth.PermissionsCache;
+import org.apache.cassandra.auth.PermissionsCacheMBean;
+import org.apache.cassandra.auth.RolesCache;
+import org.apache.cassandra.auth.RolesCacheMBean;
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
 import org.apache.cassandra.batchlog.BatchlogManager;
 import org.apache.cassandra.batchlog.BatchlogManagerMBean;
 import org.apache.cassandra.db.ColumnFamilyStoreMBean;
@@ -133,6 +148,12 @@
     protected HintsServiceMBean hsProxy;
     protected BatchlogManagerMBean bmProxy;
     protected ActiveRepairServiceMBean arsProxy;
+    protected AuditLogManagerMBean almProxy;
+    protected PasswordAuthenticator.CredentialsCacheMBean ccProxy;
+    protected AuthorizationProxy.JmxPermissionsCacheMBean jpcProxy;
+    protected NetworkPermissionsCacheMBean npcProxy;
+    protected PermissionsCacheMBean pcProxy;
+    protected RolesCacheMBean rcProxy;
     protected Output output;
     private boolean failed;
 
@@ -245,6 +266,18 @@
             bmProxy = JMX.newMBeanProxy(mbeanServerConn, name, BatchlogManagerMBean.class);
             name = new ObjectName(ActiveRepairServiceMBean.MBEAN_NAME);
             arsProxy = JMX.newMBeanProxy(mbeanServerConn, name, ActiveRepairServiceMBean.class);
+            name = new ObjectName(AuditLogManager.MBEAN_NAME);
+            almProxy = JMX.newMBeanProxy(mbeanServerConn, name, AuditLogManagerMBean.class);
+            name = new ObjectName(AuthCache.MBEAN_NAME_BASE + PasswordAuthenticator.CredentialsCacheMBean.CACHE_NAME);
+            ccProxy = JMX.newMBeanProxy(mbeanServerConn, name, PasswordAuthenticator.CredentialsCacheMBean.class);
+            name = new ObjectName(AuthCache.MBEAN_NAME_BASE + AuthorizationProxy.JmxPermissionsCacheMBean.CACHE_NAME);
+            jpcProxy = JMX.newMBeanProxy(mbeanServerConn, name, AuthorizationProxy.JmxPermissionsCacheMBean.class);
+            name = new ObjectName(AuthCache.MBEAN_NAME_BASE + NetworkPermissionsCache.CACHE_NAME);
+            npcProxy = JMX.newMBeanProxy(mbeanServerConn, name, NetworkPermissionsCacheMBean.class);
+            name = new ObjectName(AuthCache.MBEAN_NAME_BASE + PermissionsCache.CACHE_NAME);
+            pcProxy = JMX.newMBeanProxy(mbeanServerConn, name, PermissionsCacheMBean.class);
+            name = new ObjectName(AuthCache.MBEAN_NAME_BASE + RolesCache.CACHE_NAME);
+            rcProxy = JMX.newMBeanProxy(mbeanServerConn, name, RolesCacheMBean.class);
         }
         catch (MalformedObjectNameException e)
         {
@@ -304,9 +337,9 @@
         return ssProxy.verify(extendedVerify, checkVersion, diskFailurePolicy, mutateRepairStatus, checkOwnsTokens, quick, keyspaceName, tableNames);
     }
 
-    public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, long maxSSTableTimestamp, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
-        return ssProxy.upgradeSSTables(keyspaceName, excludeCurrentVersion, jobs, tableNames);
+        return ssProxy.upgradeSSTables(keyspaceName, excludeCurrentVersion, maxSSTableTimestamp, jobs, tableNames);
     }
 
     public int garbageCollect(String tombstoneOption, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException
@@ -314,6 +347,11 @@
         return ssProxy.garbageCollect(tombstoneOption, jobs, keyspaceName, tableNames);
     }
 
+    public int recompressSSTables(String keyspaceName, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    {
+        return ssProxy.recompressSSTables(keyspaceName, jobs, tableNames);
+    }
+
     private void checkJobs(PrintStream out, int jobs)
     {
         int compactors = ssProxy.getConcurrentCompactors();
@@ -324,64 +362,59 @@
     public void forceKeyspaceCleanup(PrintStream out, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
         checkJobs(out, jobs);
-        switch (forceKeyspaceCleanup(jobs, keyspaceName, tableNames))
-        {
-            case 1:
-                failed = true;
-                out.println("Aborted cleaning up at least one table in keyspace "+keyspaceName+", check server logs for more information.");
-                break;
-            case 2:
-                failed = true;
-                out.println("Failed marking some sstables compacting in keyspace "+keyspaceName+", check server logs for more information");
-                break;
-        }
+        perform(out, keyspaceName,
+                () -> forceKeyspaceCleanup(jobs, keyspaceName, tableNames),
+                "cleaning up");
     }
 
     public void scrub(PrintStream out, boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL, int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
     {
         checkJobs(out, jobs);
-        switch (ssProxy.scrub(disableSnapshot, skipCorrupted, checkData, reinsertOverflowedTTL, jobs, keyspaceName, tables))
-        {
-            case 1:
-                failed = true;
-                out.println("Aborted scrubbing at least one table in keyspace "+keyspaceName+", check server logs for more information.");
-                break;
-            case 2:
-                failed = true;
-                out.println("Failed marking some sstables compacting in keyspace "+keyspaceName+", check server logs for more information");
-                break;
-        }
+        perform(out, keyspaceName,
+                () -> scrub(disableSnapshot, skipCorrupted, checkData, reinsertOverflowedTTL, jobs, keyspaceName, tables),
+                "scrubbing");
     }
 
     public void verify(PrintStream out, boolean extendedVerify, boolean checkVersion, boolean diskFailurePolicy, boolean mutateRepairStatus, boolean checkOwnsTokens, boolean quick, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
-        switch (verify(extendedVerify, checkVersion, diskFailurePolicy, mutateRepairStatus, checkOwnsTokens, quick, keyspaceName, tableNames))
-        {
-            case 1:
-                failed = true;
-                out.println("Aborted verifying at least one table in keyspace "+keyspaceName+", check server logs for more information.");
-                break;
-            case 2:
-                failed = true;
-                out.println("Failed marking some sstables compacting in keyspace "+keyspaceName+", check server logs for more information");
-                break;
-        }
+        perform(out, keyspaceName,
+                () -> verify(extendedVerify, checkVersion, diskFailurePolicy, mutateRepairStatus, checkOwnsTokens, quick, keyspaceName, tableNames),
+                "verifying");
     }
 
-
-    public void upgradeSSTables(PrintStream out, String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    public void recompressSSTables(PrintStream out, String keyspaceName, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
         checkJobs(out, jobs);
-        switch (upgradeSSTables(keyspaceName, excludeCurrentVersion, jobs, tableNames))
+        perform(out, keyspaceName,
+                () -> recompressSSTables(keyspaceName, jobs, tableNames),
+                "recompressing sstables");
+    }
+
+    public void upgradeSSTables(PrintStream out, String keyspaceName, boolean excludeCurrentVersion, long maxSSTableTimestamp, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
+    {
+        checkJobs(out, jobs);
+        perform(out, keyspaceName,
+                () -> upgradeSSTables(keyspaceName, excludeCurrentVersion, maxSSTableTimestamp, jobs, tableNames),
+                "upgrading sstables");
+    }
+
+    private static interface Job
+    {
+        int perform() throws IOException, ExecutionException, InterruptedException;
+    }
+
+    private void perform(PrintStream out, String ks, Job job, String jobName) throws IOException, ExecutionException, InterruptedException
+    {
+        switch (job.perform())
         {
             case 1:
-                failed = true;
-                out.println("Aborted upgrading sstables for at least one table in keyspace " + keyspaceName + ", check server logs for more information.");
+                out.printf("Aborted %s for at least one table in keyspace %s, check server logs for more information.\n",
+                           jobName, ks);
                 break;
             case 2:
                 failed = true;
-                out.println("Failed marking some sstables compacting in keyspace "+keyspaceName+", check server logs for more information");
-                break;
+                out.printf("Failed marking some sstables compacting in keyspace %s, check server logs for more information.\n",
+                           ks);
         }
     }
 
@@ -422,6 +455,11 @@
         ssProxy.forceKeyspaceCompactionForTokenRange(keyspaceName, startToken, endToken, tableNames);
     }
 
+    public void forceKeyspaceCompactionForPartitionKey(String keyspaceName, String partitionKey, String... tableNames) throws InterruptedException, ExecutionException, IOException
+    {
+        ssProxy.forceKeyspaceCompactionForPartitionKey(keyspaceName, partitionKey, tableNames);
+    }
+
     public void forceKeyspaceFlush(String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException
     {
         ssProxy.forceKeyspaceFlush(keyspaceName, tableNames);
@@ -491,16 +529,85 @@
         cacheService.invalidateCounterCache();
     }
 
+    public void invalidateCredentialsCache()
+    {
+        ccProxy.invalidate();
+    }
+
+    public void invalidateCredentialsCache(String roleName)
+    {
+        ccProxy.invalidateCredentials(roleName);
+    }
+
+    public void invalidateJmxPermissionsCache()
+    {
+        jpcProxy.invalidate();
+    }
+
+    public void invalidateJmxPermissionsCache(String roleName)
+    {
+        jpcProxy.invalidatePermissions(roleName);
+    }
+
     public void invalidateKeyCache()
     {
         cacheService.invalidateKeyCache();
     }
 
+    public void invalidateNetworkPermissionsCache()
+    {
+        npcProxy.invalidate();
+    }
+
+    public void invalidateNetworkPermissionsCache(String roleName)
+    {
+        npcProxy.invalidateNetworkPermissions(roleName);
+    }
+
+    public void invalidatePermissionsCache()
+    {
+        pcProxy.invalidate();
+    }
+
+    public void invalidatePermissionsCache(String roleName, String resourceName)
+    {
+        pcProxy.invalidatePermissions(roleName, resourceName);
+    }
+
+    public void invalidateRolesCache()
+    {
+        rcProxy.invalidate();
+    }
+
+    public void invalidateRolesCache(String roleName)
+    {
+        rcProxy.invalidateRoles(roleName);
+    }
+
     public void invalidateRowCache()
     {
         cacheService.invalidateRowCache();
     }
 
+    public AuthCacheMBean getAuthCacheMBean(String cacheName)
+    {
+        switch (cacheName)
+        {
+            case PasswordAuthenticator.CredentialsCacheMBean.CACHE_NAME:
+                return ccProxy;
+            case AuthorizationProxy.JmxPermissionsCacheMBean.CACHE_NAME:
+                return jpcProxy;
+            case NetworkPermissionsCacheMBean.CACHE_NAME:
+                return npcProxy;
+            case PermissionsCacheMBean.CACHE_NAME:
+                return pcProxy;
+            case RolesCacheMBean.CACHE_NAME:
+                return rcProxy;
+            default:
+                throw new IllegalArgumentException("Unknown cache name: " + cacheName);
+        }
+    }
+
     public void drain() throws IOException, InterruptedException, ExecutionException
     {
         ssProxy.drain();
@@ -623,6 +730,11 @@
         }
     }
 
+    public Map<String, String> getHostIdToEndpointWithPort()
+    {
+        return ssProxy.getHostIdToEndpointWithPort();
+    }
+
     public String getLocalHostId()
     {
         return ssProxy.getLocalHostId();
@@ -722,9 +834,15 @@
         ssProxy.clearSnapshot(tag, keyspaces);
     }
 
+    public Map<String, TabularData> getSnapshotDetails(Map<String, String> options)
+    {
+        return ssProxy.getSnapshotDetails(options);
+    }
+
+    @Deprecated
     public Map<String, TabularData> getSnapshotDetails()
     {
-        return ssProxy.getSnapshotDetails();
+        return getSnapshotDetails(ImmutableMap.of());
     }
 
     public long trueSnapshotsSize()
@@ -1071,6 +1189,11 @@
         hsProxy.deleteAllHints();
     }
 
+    public List<Map<String, String>> listPendingHints()
+    {
+        return hsProxy.getPendingHints();
+    }
+
     public void refreshSizeEstimates()
     {
         try
@@ -1123,16 +1246,37 @@
         return ssProxy.isInitialized();
     }
 
+    public void setColumnIndexSize(int columnIndexSizeInKiB)
+    {
+        ssProxy.setColumnIndexSize(columnIndexSizeInKiB);
+    }
+
+    public int getColumnIndexSizeInKB()
+    {
+        return ssProxy.getColumnIndexSizeInKiB();
+    }
+
     public void setCompactionThroughput(int value)
     {
         ssProxy.setCompactionThroughputMbPerSec(value);
     }
 
+    @Deprecated
     public int getCompactionThroughput()
     {
         return ssProxy.getCompactionThroughputMbPerSec();
     }
 
+    public double getCompactionThroughputMebibytesAsDouble()
+    {
+        return ssProxy.getCompactionThroughtputMibPerSecAsDouble();
+    }
+
+    public long getCompactionThroughputBytes()
+    {
+        return ssProxy.getCompactionThroughtputBytesPerSec();
+    }
+
     public void setBatchlogReplayThrottle(int value)
     {
         ssProxy.setBatchlogReplayThrottleInKB(value);
@@ -1202,14 +1346,46 @@
         }
     }
 
+    @Deprecated
     public int getStreamThroughput()
     {
-        return ssProxy.getStreamThroughputMbPerSec();
+        return ssProxy.getStreamThroughputMbitPerSec();
     }
 
+    public double getStreamThroughputAsDouble()
+    {
+        return ssProxy.getStreamThroughputMbitPerSecAsDouble();
+    }
+
+    @Deprecated
     public int getInterDCStreamThroughput()
     {
-        return ssProxy.getInterDCStreamThroughputMbPerSec();
+        return ssProxy.getInterDCStreamThroughputMbitPerSec();
+    }
+
+    public double getInterDCStreamThroughputAsDouble()
+    {
+        return ssProxy.getInterDCStreamThroughputMbitPerSecAsDouble();
+    }
+
+    public double getStreamThroughputMibAsDouble()
+    {
+        return ssProxy.getStreamThroughputMebibytesPerSecAsDouble();
+    }
+
+    public double getInterDCStreamThroughputMibAsDouble()
+    {
+        return ssProxy.getInterDCStreamThroughputMebibytesPerSecAsDouble();
+    }
+
+    public double getEntireSSTableStreamThroughput()
+    {
+        return ssProxy.getEntireSSTableStreamThroughputMebibytesPerSecAsDouble();
+    }
+
+    public double getEntireSSTableInterDCStreamThroughput()
+    {
+        return ssProxy.getEntireSSTableInterDCStreamThroughputMebibytesPerSecAsDouble();
     }
 
     public double getTraceProbability()
@@ -1243,9 +1419,22 @@
         ssProxy.rebuildSecondaryIndex(ksName, cfName, idxNames);
     }
 
+    public Map<String, String> getSimpleStatesWithPort()
+    {
+        return fdProxy.getSimpleStatesWithPort();
+    }
+
     public String getGossipInfo(boolean withPort)
     {
-        return withPort ? fdProxy.getAllEndpointStatesWithPort() : fdProxy.getAllEndpointStates();
+        return getGossipInfo(withPort, false);
+    }
+
+    public String getGossipInfo(boolean withPort, boolean resolveIp)
+    {
+        if (resolveIp)
+            return withPort ? fdProxy.getAllEndpointStatesWithPortAndResolveIp() : fdProxy.getAllEndpointStatesWithResolveIp();
+        else
+            return withPort ? fdProxy.getAllEndpointStatesWithPort() : fdProxy.getAllEndpointStates();
     }
 
     public void stop(String string)
@@ -1302,12 +1491,32 @@
 
     public void setStreamThroughput(int value)
     {
-        ssProxy.setStreamThroughputMbPerSec(value);
+        ssProxy.setStreamThroughputMbitPerSec(value);
+    }
+
+    public void setStreamThroughputMiB(int value)
+    {
+        ssProxy.setStreamThroughputMebibytesPerSec(value);
     }
 
     public void setInterDCStreamThroughput(int value)
     {
-        ssProxy.setInterDCStreamThroughputMbPerSec(value);
+        ssProxy.setInterDCStreamThroughputMbitPerSec(value);
+    }
+
+    public void setInterDCStreamThroughputMiB(int value)
+    {
+        ssProxy.setInterDCStreamThroughputMebibytesPerSec(value);
+    }
+
+    public void setEntireSSTableStreamThroughput(int value)
+    {
+        ssProxy.setEntireSSTableStreamThroughputMebibytesPerSec(value);
+    }
+
+    public void setEntireSSTableInterDCStreamThroughput(int value)
+    {
+        ssProxy.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(value);
     }
 
     public void setTraceProbability(double value)
@@ -1859,16 +2068,26 @@
         ssProxy.disableAuditLog();
     }
 
-    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces ,String excludedKeyspaces ,String includedCategories ,String excludedCategories ,String includedUsers ,String excludedUsers)
+    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces,
+                               String includedCategories, String excludedCategories, String includedUsers, String excludedUsers)
     {
         ssProxy.enableAuditLog(loggerName, parameters, includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers);
     }
 
-    public void enableAuditLog(String loggerName, String includedKeyspaces ,String excludedKeyspaces ,String includedCategories ,String excludedCategories ,String includedUsers ,String excludedUsers)
+    public void enableAuditLog(String loggerName, String includedKeyspaces, String excludedKeyspaces, String includedCategories,
+                               String excludedCategories, String includedUsers, String excludedUsers)
     {
         this.enableAuditLog(loggerName, Collections.emptyMap(), includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers);
     }
 
+    public void enableAuditLog(String loggerName, Map<String, String> parameters, String includedKeyspaces, String excludedKeyspaces, String includedCategories, String excludedCategories,
+                               String includedUsers, String excludedUsers, Integer maxArchiveRetries, Boolean block, String rollCycle,
+                               Long maxLogSize, Integer maxQueueWeight, String archiveCommand)
+    {
+        ssProxy.enableAuditLog(loggerName, parameters, includedKeyspaces, excludedKeyspaces, includedCategories, excludedCategories, includedUsers, excludedUsers,
+                               maxArchiveRetries, block, rollCycle, maxLogSize, maxQueueWeight, archiveCommand);
+    }
+
     public void enableOldProtocolVersions()
     {
         ssProxy.enableNativeTransportOldProtocolVersions();
@@ -1903,6 +2122,21 @@
     {
         return FullQueryLoggerOptionsCompositeData.fromCompositeData(ssProxy.getFullQueryLoggerOptions());
     }
+
+    public AuditLogOptions getAuditLogOptions()
+    {
+        return AuditLogOptionsCompositeData.fromCompositeData(almProxy.getAuditLogOptionsData());
+    }
+
+    public void setDefaultKeyspaceReplicationFactor(int value)
+    {
+        ssProxy.setDefaultKeyspaceReplicationFactor(value);
+    }
+
+    public int getDefaultKeyspaceReplicationFactor()
+    {
+        return ssProxy.getDefaultKeyspaceReplicationFactor();
+    }
 }
 
 class ColumnFamilyStoreMBeanIterator implements Iterator<Map.Entry<String, ColumnFamilyStoreMBean>>
diff --git a/src/java/org/apache/cassandra/tools/NodeTool.java b/src/java/org/apache/cassandra/tools/NodeTool.java
index b796e02..8d87c88 100644
--- a/src/java/org/apache/cassandra/tools/NodeTool.java
+++ b/src/java/org/apache/cassandra/tools/NodeTool.java
@@ -22,18 +22,18 @@
 import static com.google.common.collect.Lists.newArrayList;
 import static java.lang.Integer.parseInt;
 import static java.lang.String.format;
+import static org.apache.cassandra.io.util.File.WriteMode.APPEND;
 import static org.apache.commons.lang3.ArrayUtils.EMPTY_STRING_ARRAY;
 import static org.apache.commons.lang3.StringUtils.EMPTY;
 import static org.apache.commons.lang3.StringUtils.isEmpty;
 import static org.apache.commons.lang3.StringUtils.isNotEmpty;
 
 import java.io.Console;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileWriter;
 import java.io.FileNotFoundException;
-import java.io.FileWriter;
 import java.io.IOError;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
@@ -51,7 +51,6 @@
 import org.apache.cassandra.locator.EndpointSnitchInfoMBean;
 import org.apache.cassandra.tools.nodetool.*;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.tools.nodetool.Sjk;
 
 import com.google.common.collect.Maps;
 
@@ -93,126 +92,140 @@
     public int execute(String... args)
     {
         List<Class<? extends NodeToolCmdRunnable>> commands = newArrayList(
+                Assassinate.class,
                 CassHelp.class,
-                Info.class,
-                Ring.class,
-                NetStats.class,
-                CfStats.class,
-                TableStats.class,
                 CfHistograms.class,
-                TableHistograms.class,
+                CfStats.class,
                 Cleanup.class,
                 ClearSnapshot.class,
                 ClientStats.class,
                 Compact.class,
-                Scrub.class,
-                Verify.class,
-                Flush.class,
-                UpgradeSSTable.class,
-                GarbageCollect.class,
-                DisableAutoCompaction.class,
-                EnableAutoCompaction.class,
-                CompactionStats.class,
                 CompactionHistory.class,
+                CompactionStats.class,
+                DataPaths.class,
                 Decommission.class,
                 DescribeCluster.class,
+                DescribeRing.class,
+                DisableAuditLog.class,
+                DisableAutoCompaction.class,
+                DisableBackup.class,
                 DisableBinary.class,
-                EnableBinary.class,
-                EnableGossip.class,
-                DisableGossip.class,
-                EnableHandoff.class,
-                EnableFullQueryLog.class,
                 DisableFullQueryLog.class,
+                DisableGossip.class,
+                DisableHandoff.class,
+                DisableHintsForDC.class,
+                DisableOldProtocolVersions.class,
+                Drain.class,
+                EnableAuditLog.class,
+                EnableAutoCompaction.class,
+                EnableBackup.class,
+                EnableBinary.class,
+                EnableFullQueryLog.class,
+                EnableGossip.class,
+                EnableHandoff.class,
+                EnableHintsForDC.class,
+                EnableOldProtocolVersions.class,
+                FailureDetectorInfo.class,
+                Flush.class,
+                GarbageCollect.class,
                 GcStats.class,
+                GetAuditLog.class,
+                GetAuthCacheConfig.class,
                 GetBatchlogReplayTrottle.class,
+                GetColumnIndexSize.class,
                 GetCompactionThreshold.class,
                 GetCompactionThroughput.class,
                 GetConcurrency.class,
-                GetFullQueryLog.class,
-                GetTimeout.class,
-                GetStreamThroughput.class,
-                GetTraceProbability.class,
-                GetInterDCStreamThroughput.class,
+                GetConcurrentCompactors.class,
+                GetConcurrentViewBuilders.class,
+                GetDefaultKeyspaceRF.class,
                 GetEndpoints.class,
-                GetSeeds.class,
-                GetSSTables.class,
+                GetFullQueryLog.class,
+                GetInterDCStreamThroughput.class,
+                GetLoggingLevels.class,
                 GetMaxHintWindow.class,
+                GetSSTables.class,
+                GetSeeds.class,
+                GetSnapshotThrottle.class,
+                GetStreamThroughput.class,
+                GetTimeout.class,
+                GetTraceProbability.class,
                 GossipInfo.class,
                 Import.class,
-                InvalidateKeyCache.class,
-                InvalidateRowCache.class,
+                Info.class,
                 InvalidateCounterCache.class,
+                InvalidateCredentialsCache.class,
+                InvalidateJmxPermissionsCache.class,
+                InvalidateKeyCache.class,
+                InvalidateNetworkPermissionsCache.class,
+                InvalidatePermissionsCache.class,
+                InvalidateRolesCache.class,
+                InvalidateRowCache.class,
                 Join.class,
+                ListPendingHints.class,
+                ListSnapshots.class,
                 Move.class,
+                NetStats.class,
                 PauseHandoff.class,
-                ResumeHandoff.class,
                 ProfileLoad.class,
                 ProxyHistograms.class,
+                RangeKeySample.class,
                 Rebuild.class,
+                RebuildIndex.class,
+                RecompressSSTables.class,
                 Refresh.class,
-                RemoveNode.class,
-                Assassinate.class,
+                RefreshSizeEstimates.class,
+                ReloadLocalSchema.class,
                 ReloadSeeds.class,
-                ResetFullQueryLog.class,
+                ReloadSslCertificates.class,
+                ReloadTriggers.class,
+                RelocateSSTables.class,
+                RemoveNode.class,
                 Repair.class,
                 ReplayBatchlog.class,
-                SetCacheCapacity.class,
-                SetConcurrency.class,
-                SetHintedHandoffThrottleInKB.class,
+                ResetFullQueryLog.class,
+                ResetLocalSchema.class,
+                ResumeHandoff.class,
+                Ring.class,
+                Scrub.class,
+                SetAuthCacheConfig.class,
                 SetBatchlogReplayThrottle.class,
+                SetCacheCapacity.class,
+                SetCacheKeysToSave.class,
+                SetColumnIndexSize.class,
                 SetCompactionThreshold.class,
                 SetCompactionThroughput.class,
-                GetConcurrentCompactors.class,
-                SetConcurrentCompactors.class,
-                GetConcurrentViewBuilders.class,
-                SetConcurrentViewBuilders.class,
                 SetConcurrency.class,
-                SetTimeout.class,
-                SetStreamThroughput.class,
+                SetConcurrentCompactors.class,
+                SetConcurrentViewBuilders.class,
+                SetDefaultKeyspaceRF.class,
+                SetHintedHandoffThrottleInKB.class,
                 SetInterDCStreamThroughput.class,
-                SetTraceProbability.class,
+                SetLoggingLevel.class,
                 SetMaxHintWindow.class,
-                Snapshot.class,
-                ListSnapshots.class,
-                GetSnapshotThrottle.class,
                 SetSnapshotThrottle.class,
+                SetStreamThroughput.class,
+                SetTimeout.class,
+                SetTraceProbability.class,
+                Sjk.class,
+                Snapshot.class,
                 Status.class,
+                StatusAutoCompaction.class,
+                StatusBackup.class,
                 StatusBinary.class,
                 StatusGossip.class,
-                StatusBackup.class,
                 StatusHandoff.class,
-                StatusAutoCompaction.class,
                 Stop.class,
                 StopDaemon.class,
-                Version.class,
-                DescribeRing.class,
-                RebuildIndex.class,
-                RangeKeySample.class,
-                EnableBackup.class,
-                DisableBackup.class,
-                ResetLocalSchema.class,
-                ReloadLocalSchema.class,
-                ReloadTriggers.class,
-                SetCacheKeysToSave.class,
-                DisableHandoff.class,
-                Drain.class,
-                TruncateHints.class,
-                TpStats.class,
+                TableHistograms.class,
+                TableStats.class,
                 TopPartitions.class,
-                SetLoggingLevel.class,
-                GetLoggingLevels.class,
-                Sjk.class,
-                DisableHintsForDC.class,
-                EnableHintsForDC.class,
-                FailureDetectorInfo.class,
-                RefreshSizeEstimates.class,
-                RelocateSSTables.class,
-                ViewBuildStatus.class,
-                ReloadSslCertificates.class,
-                EnableAuditLog.class,
-                DisableAuditLog.class,
-                EnableOldProtocolVersions.class,
-                DisableOldProtocolVersions.class
+                TpStats.class,
+                TruncateHints.class,
+                UpgradeSSTable.class,
+                Verify.class,
+                Version.class,
+                ViewBuildStatus.class
         );
 
         Cli.CliBuilder<NodeToolCmdRunnable> builder = Cli.builder("nodetool");
@@ -274,7 +287,7 @@
         String cmdLine = Joiner.on(" ").skipNulls().join(args);
         cmdLine = cmdLine.replaceFirst("(?<=(-pw|--password))\\s+\\S+", " <hidden>");
 
-        try (FileWriter writer = new FileWriter(new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE), true))
+        try (FileWriter writer = new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE).newWriter(APPEND))
         {
             SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
             writer.append(sdf.format(new Date())).append(": ").append(cmdLine).append(System.lineSeparator());
@@ -329,7 +342,7 @@
         @Option(type = OptionType.GLOBAL, name = {"-pwf", "--password-file"}, description = "Path to the JMX password file")
         private String passwordFilePath = EMPTY;
 
-		@Option(type = OptionType.GLOBAL, name = { "-pp", "--print-port"}, description = "Operate in 4.0 mode with hosts disambiguated by port number", arity = 0)
+        @Option(type = OptionType.GLOBAL, name = { "-pp", "--print-port"}, description = "Operate in 4.0 mode with hosts disambiguated by port number", arity = 0)
         protected boolean printPort = false;
 
         private INodeProbeFactory nodeProbeFactory;
@@ -370,7 +383,7 @@
             String password = EMPTY;
 
             File passwordFile = new File(passwordFilePath);
-            try (Scanner scanner = new Scanner(passwordFile).useDelimiter("\\s+"))
+            try (Scanner scanner = new Scanner(passwordFile.toJavaIOFile()).useDelimiter("\\s+"))
             {
                 while (scanner.hasNextLine())
                 {
@@ -385,7 +398,8 @@
                     }
                     scanner.nextLine();
                 }
-            } catch (FileNotFoundException e)
+            }
+            catch (FileNotFoundException e)
             {
                 throw new RuntimeException(e);
             }
@@ -472,29 +486,6 @@
         }
     }
 
-    public static SortedMap<String, SetHostStat> getOwnershipByDc(NodeProbe probe, boolean resolveIp,
-                                                                  Map<String, String> tokenToEndpoint,
-                                                                  Map<InetAddress, Float> ownerships)
-    {
-        SortedMap<String, SetHostStat> ownershipByDc = Maps.newTreeMap();
-        EndpointSnitchInfoMBean epSnitchInfo = probe.getEndpointSnitchInfoProxy();
-        try
-        {
-            for (Entry<String, String> tokenAndEndPoint : tokenToEndpoint.entrySet())
-            {
-                String dc = epSnitchInfo.getDatacenter(tokenAndEndPoint.getValue());
-                if (!ownershipByDc.containsKey(dc))
-                    ownershipByDc.put(dc, new SetHostStat(resolveIp));
-                ownershipByDc.get(dc).add(tokenAndEndPoint.getKey(), tokenAndEndPoint.getValue(), ownerships);
-            }
-        }
-        catch (UnknownHostException e)
-        {
-            throw new RuntimeException(e);
-        }
-        return ownershipByDc;
-    }
-
     public static SortedMap<String, SetHostStatWithPort> getOwnershipByDcWithPort(NodeProbe probe, boolean resolveIp,
                                                                   Map<String, String> tokenToEndpoint,
                                                                   Map<String, Float> ownerships)
diff --git a/src/java/org/apache/cassandra/tools/RepairRunner.java b/src/java/org/apache/cassandra/tools/RepairRunner.java
index 593bc26..cd09c57 100644
--- a/src/java/org/apache/cassandra/tools/RepairRunner.java
+++ b/src/java/org/apache/cassandra/tools/RepairRunner.java
@@ -22,19 +22,23 @@
 import java.text.SimpleDateFormat;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Condition;
 
-import com.google.common.base.Throwables;
-
-import org.apache.cassandra.repair.messages.RepairOption;
-import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.StorageServiceMBean;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.Condition;
+
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventType;
 import org.apache.cassandra.utils.progress.jmx.JMXNotificationProgressListener;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus;
+import static org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus.FAILED;
+import static org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus.valueOf;
+import static org.apache.cassandra.tools.NodeProbe.JMX_NOTIFICATION_POLL_INTERVAL_SECONDS;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+import static org.apache.cassandra.utils.progress.ProgressEventType.*;
+
 public class RepairRunner extends JMXNotificationProgressListener
 {
     private final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
@@ -43,7 +47,7 @@
     private final StorageServiceMBean ssProxy;
     private final String keyspace;
     private final Map<String, String> options;
-    private final SimpleCondition condition = new SimpleCondition();
+    private final Condition condition = newOneTimeCondition();
 
     private int cmd;
     private volatile Exception error;
@@ -67,10 +71,10 @@
         }
         else
         {
-            while (!condition.await(NodeProbe.JMX_NOTIFICATION_POLL_INTERVAL_SECONDS, TimeUnit.SECONDS))
+            while (!condition.await(JMX_NOTIFICATION_POLL_INTERVAL_SECONDS, SECONDS))
             {
                 queryForCompletedRepair(String.format("After waiting for poll interval of %s seconds",
-                                                      NodeProbe.JMX_NOTIFICATION_POLL_INTERVAL_SECONDS));
+                                                      JMX_NOTIFICATION_POLL_INTERVAL_SECONDS));
             }
             Exception error = this.error;
             if (error == null)
@@ -123,18 +127,18 @@
     {
         ProgressEventType type = event.getType();
         String message = event.getMessage();
-        if (type == ProgressEventType.PROGRESS)
+        if (type == PROGRESS)
         {
             message = message + " (progress: " + (int) event.getProgressPercentage() + "%)";
         }
         printMessage(message);
-        if (type == ProgressEventType.ERROR)
+        if (type == ERROR)
         {
             error = new RuntimeException(String.format("Repair job has failed with the error message: %s. " +
                                                        "Check the logs on the repair participants for further details",
                                                        message));
         }
-        if (type == ProgressEventType.COMPLETE)
+        if (type == COMPLETE)
         {
             condition.signalAll();
         }
@@ -153,7 +157,7 @@
         }
         else
         {
-            ActiveRepairService.ParentRepairStatus parentRepairStatus = ActiveRepairService.ParentRepairStatus.valueOf(status.get(0));
+            ParentRepairStatus parentRepairStatus = valueOf(status.get(0));
             List<String> messages = status.subList(1, status.size());
             switch (parentRepairStatus)
             {
@@ -162,7 +166,7 @@
                     printMessage(String.format("%s %s discovered repair %s.",
                                               triggeringCondition,
                                               queriedString, parentRepairStatus.name().toLowerCase()));
-                    if (parentRepairStatus == ActiveRepairService.ParentRepairStatus.FAILED)
+                    if (parentRepairStatus == FAILED)
                     {
                         error = new IOException(messages.get(0));
                     }
@@ -189,6 +193,6 @@
 
     private void printMessage(String message)
     {
-        out.println(String.format("[%s] %s", this.format.format(System.currentTimeMillis()), message));
+        out.println(String.format("[%s] %s", this.format.format(currentTimeMillis()), message));
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java b/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java
index 56c57d9..f5d24ed 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java
@@ -35,6 +35,8 @@
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * During compaction we can drop entire sstables if they only contain expired tombstones and if it is guaranteed
  * to not cover anything in other sstables. An expired sstable can be blocked from getting dropped if its newest
@@ -58,7 +60,7 @@
 
         String keyspace = args[args.length - 2];
         String columnfamily = args[args.length - 1];
-        Schema.instance.loadFromDisk(false);
+        Schema.instance.loadFromDisk();
 
         TableMetadata metadata = Schema.instance.validateTable(keyspace, columnfamily);
 
@@ -87,7 +89,7 @@
             System.exit(1);
         }
 
-        int gcBefore = (int)(System.currentTimeMillis()/1000) - metadata.params.gcGraceSeconds;
+        int gcBefore = (int)(currentTimeMillis() / 1000) - metadata.params.gcGraceSeconds;
         Multimap<SSTableReader, SSTableReader> blockers = checkForExpiredSSTableBlockers(sstables, gcBefore);
         for (SSTableReader blocker : blockers.keySet())
         {
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index 5be67d7..771380b 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashSet;
@@ -26,6 +25,7 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.DecoratedKey;
@@ -135,7 +135,7 @@
             printUsage();
             System.exit(1);
         }
-        String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
+        String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath();
 
         if (!new File(ssTableFileName).exists())
         {
@@ -217,7 +217,6 @@
         }
         catch (IOException e)
         {
-            // throwing exception outside main with broken pipe causes windows cmd to hang
             e.printStackTrace(System.err);
         }
 
diff --git a/src/java/org/apache/cassandra/tools/SSTableLevelResetter.java b/src/java/org/apache/cassandra/tools/SSTableLevelResetter.java
index 4340e8e..9618d362 100644
--- a/src/java/org/apache/cassandra/tools/SSTableLevelResetter.java
+++ b/src/java/org/apache/cassandra/tools/SSTableLevelResetter.java
@@ -65,7 +65,7 @@
         try
         {
             // load keyspace descriptions.
-            Schema.instance.loadFromDisk(false);
+            Schema.instance.loadFromDisk();
 
             String keyspaceName = args[1];
             String columnfamily = args[2];
diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
old mode 100755
new mode 100644
index bad7b13..c4bef35
--- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
+++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.tools;
 
 import java.io.DataInputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintWriter;
@@ -62,6 +61,7 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataType;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.io.sstable.metadata.ValidationMetadata;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.tools.Util.TermHistogram;
@@ -72,6 +72,7 @@
 import static org.apache.cassandra.tools.Util.CYAN;
 import static org.apache.cassandra.tools.Util.RESET;
 import static org.apache.cassandra.tools.Util.WHITE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.commons.lang3.time.DurationFormatUtils.formatDurationWords;
 
 /**
@@ -232,9 +233,9 @@
                                     cellCount++;
                                     double percentComplete = Math.min(1.0, cellCount / totalCells);
                                     if (lastPercent != (int) (percentComplete * 100) &&
-                                        (System.currentTimeMillis() - lastPercentTime) > 1000)
+                                        (currentTimeMillis() - lastPercentTime) > 1000)
                                     {
-                                        lastPercentTime = System.currentTimeMillis();
+                                        lastPercentTime = currentTimeMillis();
                                         lastPercent = (int) (percentComplete * 100);
                                         if (color)
                                             out.printf("\r%sAnalyzing SSTable...  %s%s %s(%%%s)", BLUE, CYAN,
@@ -371,7 +372,7 @@
                 field("maxClusteringValues", Arrays.toString(maxValues));
             }
             field("Estimated droppable tombstones",
-                  stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000) - this.gc));
+                  stats.getEstimatedDroppableTombstoneRatio((int) (currentTimeMillis() / 1000) - this.gc));
             field("SSTable Level", stats.sstableLevel);
             field("Repaired at", stats.repairedAt, toDateString(stats.repairedAt, TimeUnit.MILLISECONDS));
             field("Originating host id", stats.originatingHostId);
@@ -544,7 +545,7 @@
             File sstable = new File(fname);
             if (sstable.exists())
             {
-                metawriter.printSStableMetadata(sstable.getAbsolutePath(), fullScan);
+                metawriter.printSStableMetadata(sstable.absolutePath(), fullScan);
             }
             else
             {
diff --git a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java
index 79fec81..e4e343f 100644
--- a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java
+++ b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
@@ -35,6 +34,7 @@
 import com.google.common.collect.SetMultimap;
 
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -91,7 +91,7 @@
         boolean dryRun = args[0].equals("--dry-run");
         String keyspace = args[args.length - 2];
         String columnfamily = args[args.length - 1];
-        Schema.instance.loadFromDisk(false);
+        Schema.instance.loadFromDisk();
 
         if (Schema.instance.getTableMetadataRef(keyspace, columnfamily) == null)
             throw new IllegalArgumentException(String.format("Unknown keyspace/table %s.%s",
diff --git a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
index 31d80fa..1289e7e 100644
--- a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
+++ b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
@@ -17,10 +17,11 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
 import java.nio.charset.Charset;
 import java.nio.file.Files;
-import java.nio.file.Paths;
 import java.nio.file.attribute.FileTime;
 import java.util.Arrays;
 import java.util.List;
@@ -39,6 +40,8 @@
  * sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
  * }
  */
+import org.apache.cassandra.io.util.File;
+
 public class SSTableRepairedAtSetter
 {
     /**
@@ -69,7 +72,7 @@
         List<String> fileNames;
         if (args[2].equals("-f"))
         {
-            fileNames = Files.readAllLines(Paths.get(args[3]), Charset.defaultCharset());
+            fileNames = Files.readAllLines(File.getPath(args[3]), Charset.defaultCharset());
         }
         else
         {
diff --git a/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java b/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java
index cca48fc..4d2acd2 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java
@@ -25,10 +25,10 @@
 import org.apache.cassandra.utils.OutputHandler;
 import org.apache.commons.cli.*;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.function.BiPredicate;
 
+import org.apache.cassandra.io.util.File;
 import static org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
 
 public class StandaloneSSTableUtil
@@ -48,7 +48,7 @@
         {
             // load keyspace descriptions.
             Util.initDatabaseDescriptor();
-            Schema.instance.loadFromDisk(false);
+            Schema.instance.loadFromDisk();
 
             TableMetadata metadata = Schema.instance.getTableMetadata(options.keyspaceName, options.cfName);
             if (metadata == null)
@@ -87,7 +87,7 @@
         for (File dir : directories.getCFDirectories())
         {
             for (File file : LifecycleTransaction.getFiles(dir.toPath(), getFilter(options), Directories.OnTxnErr.THROW))
-                handler.output(file.getCanonicalPath());
+                handler.output(file.canonicalPath());
         }
     }
 
diff --git a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
index bd71c64..4484b69 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
@@ -18,8 +18,6 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.File;
-import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -27,6 +25,7 @@
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -55,6 +54,8 @@
 import org.apache.cassandra.utils.OutputHandler;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class StandaloneScrubber
 {
     public static final String REINSERT_OVERFLOWED_TTL_OPTION_DESCRIPTION = "Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 with " +
@@ -85,7 +86,7 @@
         try
         {
             // load keyspace descriptions.
-            Schema.instance.loadFromDisk(false);
+            Schema.instance.loadFromDisk();
 
             if (Schema.instance.getKeyspaceMetadata(options.keyspaceName) == null)
                 throw new IllegalArgumentException(String.format("Unknown keyspace %s", options.keyspaceName));
@@ -108,7 +109,7 @@
                                                                   options.keyspaceName,
                                                                   options.cfName));
 
-            String snapshotName = "pre-scrub-" + System.currentTimeMillis();
+            String snapshotName = "pre-scrub-" + currentTimeMillis();
 
             OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
             Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
@@ -125,7 +126,7 @@
                 listResult.add(Pair.create(descriptor, components));
 
                 File snapshotDirectory = Directories.getSnapshotDirectory(descriptor, snapshotName);
-                SSTableReader.createLinks(descriptor, components, snapshotDirectory.getPath());
+                SSTableReader.createLinks(descriptor, components, snapshotDirectory.path());
             }
             System.out.println(String.format("Pre-scrub sstables snapshotted into snapshot %s", snapshotName));
 
@@ -142,7 +143,7 @@
                     headerFixBuilder = headerFixBuilder.dryRun();
 
                 for (Pair<Descriptor, Set<Component>> p : listResult)
-                    headerFixBuilder.withPath(Paths.get(p.left.filenameFor(Component.DATA)));
+                    headerFixBuilder.withPath(File.getPath(p.left.filenameFor(Component.DATA)));
 
                 SSTableHeaderFix headerFix = headerFixBuilder.build();
                 try
@@ -260,7 +261,7 @@
     {
         if (strategyManager.getCompactionParams().klass().equals(LeveledCompactionStrategy.class))
         {
-            int maxSizeInMB = (int)((cfs.getCompactionStrategyManager().getMaxSSTableBytes()) / (1024L * 1024L));
+            int maxSizeInMiB = (int)((cfs.getCompactionStrategyManager().getMaxSSTableBytes()) / (1024L * 1024L));
             int fanOut = cfs.getCompactionStrategyManager().getLevelFanoutSize();
             for (AbstractStrategyHolder.GroupedSSTableContainer sstableGroup : strategyManager.groupSSTables(sstables))
             {
@@ -268,7 +269,7 @@
                 {
                     List<SSTableReader> groupSSTables = new ArrayList<>(sstableGroup.getGroup(i));
                     // creating the manifest makes sure the leveling is sane:
-                    LeveledManifest.create(cfs, maxSizeInMB, fanOut, groupSSTables);
+                    LeveledManifest.create(cfs, maxSizeInMiB, fanOut, groupSSTables);
                 }
             }
         }
diff --git a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
index e15e5bc..efadb56 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
@@ -18,10 +18,10 @@
  */
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.util.*;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.commons.cli.*;
@@ -38,6 +38,7 @@
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
 import static org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class StandaloneSplitter
 {
@@ -60,7 +61,7 @@
         try
         {
             // load keyspace descriptions.
-            Schema.instance.loadFromDisk(false);
+            Schema.instance.loadFromDisk();
 
             String ksName = null;
             String cfName = null;
@@ -115,7 +116,7 @@
             // Do not load sstables since they might be broken
             Keyspace keyspace = Keyspace.openWithoutSSTables(ksName);
             ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
-            String snapshotName = "pre-split-" + System.currentTimeMillis();
+            String snapshotName = "pre-split-" + currentTimeMillis();
 
             List<SSTableReader> sstables = new ArrayList<>();
             for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet())
@@ -132,7 +133,7 @@
 
                     if (options.snapshot) {
                         File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName);
-                        sstable.createLinks(snapshotDirectory.getPath());
+                        sstable.createLinks(snapshotDirectory.path());
                     }
 
                 }
diff --git a/src/java/org/apache/cassandra/tools/StandaloneUpgrader.java b/src/java/org/apache/cassandra/tools/StandaloneUpgrader.java
index e1b546b..a7c099c 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneUpgrader.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneUpgrader.java
@@ -57,7 +57,7 @@
         try
         {
             // load keyspace descriptions.
-            Schema.instance.loadFromDisk(false);
+            Schema.instance.loadFromDisk();
 
             if (Schema.instance.getTableMetadataRef(options.keyspace, options.cf) == null)
                 throw new IllegalArgumentException(String.format("Unknown keyspace/table %s.%s",
diff --git a/src/java/org/apache/cassandra/tools/StandaloneVerifier.java b/src/java/org/apache/cassandra/tools/StandaloneVerifier.java
index df276bd..d7554dd 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneVerifier.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneVerifier.java
@@ -62,11 +62,18 @@
     private static final String CHECK_VERSION = "check_version";
     private static final String MUTATE_REPAIR_STATUS = "mutate_repair_status";
     private static final String QUICK = "quick";
+    private static final String FORCE = "force";
     private static final String TOKEN_RANGE = "token_range";
 
     public static void main(String args[])
     {
         Options options = Options.parseArgs(args);
+        if (!options.force)
+        {
+            System.err.println("verify will not run without -f or --force. See CASSANDRA-17017 for details.");
+            Options.printUsage(Options.getCmdLineOptions());
+            System.exit(1);
+        }
         initDatabaseDescriptorForTool();
 
         System.out.println("sstableverify using the following options: " + options);
@@ -74,7 +81,7 @@
         try
         {
             // load keyspace descriptions.
-            Schema.instance.loadFromDisk(false);
+            Schema.instance.loadFromDisk();
 
             boolean hasFailed = false;
 
@@ -165,6 +172,7 @@
         public boolean checkVersion;
         public boolean mutateRepairStatus;
         public boolean quick;
+        public boolean force;
         public Collection<Range<Token>> tokens;
 
         private Options(String keyspaceName, String cfName)
@@ -190,8 +198,8 @@
                 String[] args = cmd.getArgs();
                 if (args.length != 2)
                 {
-                    String msg = args.length < 2 ? "Missing arguments" : "Too many arguments";
-                    System.err.println(msg);
+                    String prefix = args.length < 2 ? "Missing" : "Too many";
+                    System.err.println(prefix + " arguments");
                     printUsage(options);
                     System.exit(1);
                 }
@@ -207,6 +215,7 @@
                 opts.checkVersion = cmd.hasOption(CHECK_VERSION);
                 opts.mutateRepairStatus = cmd.hasOption(MUTATE_REPAIR_STATUS);
                 opts.quick = cmd.hasOption(QUICK);
+                opts.force = cmd.hasOption(FORCE);
 
                 if (cmd.hasOption(TOKEN_RANGE))
                 {
@@ -260,17 +269,24 @@
             options.addOption("c",  CHECK_VERSION,         "make sure sstables are the latest version");
             options.addOption("r",  MUTATE_REPAIR_STATUS,  "don't mutate repair status");
             options.addOption("q",  QUICK,                 "do a quick check, don't read all data");
+            options.addOption("f",  FORCE,                 "force verify - see CASSANDRA-17017");
             options.addOptionList("t", TOKEN_RANGE, "range", "long token range of the format left,right. This may be provided multiple times to define multiple different ranges");
             return options;
         }
 
         public static void printUsage(CmdLineOptions options)
         {
-            String usage = String.format("%s [options] <keyspace> <column_family>", TOOL_NAME);
+            String usage = String.format("%s [options] <keyspace> <column_family> force", TOOL_NAME);
             StringBuilder header = new StringBuilder();
             header.append("--\n");
             header.append("Verify the sstable for the provided table." );
             header.append("\n--\n");
+            header.append("NOTE: There are significant risks associated with using this tool; it likely doesn't do what " +
+                          "you expect and there are known edge cases. You must provide a -f or --force argument in " +
+                          "order to allow usage of the tool -> see CASSANDRA-9947 and CASSANDRA-17017 for known risks.\n");
+            header.append("https://issues.apache.org/jira/browse/CASSANDRA-9947\n");
+            header.append("https://issues.apache.org/jira/browse/CASSANDRA-17017");
+            header.append("\n--\n");
             header.append("Options are:");
             new HelpFormatter().printHelp(usage, header.toString(), options, "");
         }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java
index a075ded..2639ec8 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java
@@ -44,4 +44,4 @@
             throw new RuntimeException(e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java
index 8fdf803..6a06fd4 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java
@@ -26,4 +26,4 @@
 @Deprecated
 public class CfHistograms extends TableHistograms
 {
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java
index 2d27ea0..42e2bc3 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java
@@ -26,4 +26,4 @@
 @Deprecated
 public class CfStats extends TableStats
 {
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ClientStats.java b/src/java/org/apache/cassandra/tools/nodetool/ClientStats.java
index b9bf45e..7ed9b9d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ClientStats.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ClientStats.java
@@ -44,6 +44,9 @@
     @Option(title = "clear_history", name = "--clear-history", description = "Clear the history of connected clients")
     private boolean clearConnectionHistory = false;
 
+    @Option(title = "list_connections_with_client_options", name = "--client-options", description = "Lists all connections and the client options")
+    private boolean clientOptions = false;
+
     @Override
     public void execute(NodeProbe probe)
     {
@@ -108,6 +111,32 @@
             }
         }
 
+        if (clientOptions)
+        {
+            List<Map<String, String>> clients = (List<Map<String, String>>) probe.getClientMetric("connections");
+            if (!clients.isEmpty())
+            {
+                TableBuilder table = new TableBuilder();
+                table.add("Address", "SSL", "Cipher", "Protocol", "Version", "User", "Keyspace", "Requests", "Driver-Name", "Driver-Version", "Client-Options");
+                for (Map<String, String> conn : clients)
+                {
+                    table.add(conn.get(ConnectedClient.ADDRESS),
+                              conn.get(ConnectedClient.SSL),
+                              conn.get(ConnectedClient.CIPHER),
+                              conn.get(ConnectedClient.PROTOCOL),
+                              conn.get(ConnectedClient.VERSION),
+                              conn.get(ConnectedClient.USER),
+                              conn.get(ConnectedClient.KEYSPACE),
+                              conn.get(ConnectedClient.REQUESTS),
+                              conn.get(ConnectedClient.DRIVER_NAME),
+                              conn.get(ConnectedClient.DRIVER_VERSION),
+                              conn.get(ConnectedClient.CLIENT_OPTIONS));
+                }
+                table.printTo(out);
+                out.println();
+            }
+        }
+
         Map<String, Integer> connectionsByUser = (Map<String, Integer>) probe.getClientMetric("connectedNativeClientsByUser");
         int total = connectionsByUser.values().stream().reduce(0, Integer::sum);
         out.println("Total connected clients: " + total);
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Compact.java b/src/java/org/apache/cassandra/tools/nodetool/Compact.java
index bf7f5f3..f5a83ed 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Compact.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Compact.java
@@ -47,11 +47,16 @@
     @Option(title = "end_token", name = {"-et", "--end-token"}, description = "Use -et to specify a token at which compaction range ends (inclusive)")
     private String endToken = EMPTY;
 
+    @Option(title = "partition_key", name = {"--partition"}, description = "String representation of the partition key")
+    private String partitionKey = EMPTY;
+
 
     @Override
     public void execute(NodeProbe probe)
     {
-        final boolean tokenProvided = !(startToken.isEmpty() && endToken.isEmpty());
+        final boolean startEndTokenProvided = !(startToken.isEmpty() && endToken.isEmpty());
+        final boolean partitionKeyProvided = !partitionKey.isEmpty();
+        final boolean tokenProvided = startEndTokenProvided || partitionKeyProvided;
         if (splitOutput && (userDefined || tokenProvided))
         {
             throw new RuntimeException("Invalid option combination: Can not use split-output here");
@@ -80,10 +85,14 @@
         {
             try
             {
-                if (tokenProvided)
+                if (startEndTokenProvided)
                 {
                     probe.forceKeyspaceCompactionForTokenRange(keyspace, startToken, endToken, tableNames);
                 }
+                else if (partitionKeyProvided)
+                {
+                    probe.forceKeyspaceCompactionForPartitionKey(keyspace, partitionKey, tableNames);
+                }
                 else
                 {
                     probe.forceKeyspaceCompaction(splitOutput, keyspace, tableNames);
@@ -94,4 +103,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java b/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java
index 04dcc2b..799ef56 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java
@@ -44,6 +44,11 @@
             description = "Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB")
     private boolean humanReadable = false;
 
+    @Option(title = "vtable_output",
+            name = {"-V", "--vtable"},
+            description = "Display fields matching vtable output")
+    private boolean vtableOutput = false;
+
     @Override
     public void execute(NodeProbe probe)
     {
@@ -70,17 +75,26 @@
             }
         }
         out.println();
-        reportCompactionTable(cm.getCompactions(), probe.getCompactionThroughput(), humanReadable, out);
+        reportCompactionTable(cm.getCompactions(), probe.getCompactionThroughputBytes(), humanReadable, vtableOutput, out);
     }
 
-    public static void reportCompactionTable(List<Map<String,String>> compactions, int compactionThroughput, boolean humanReadable, PrintStream out)
+    public static void reportCompactionTable(List<Map<String,String>> compactions, long compactionThroughputInBytes, boolean humanReadable, PrintStream out)
+    {
+        reportCompactionTable(compactions, compactionThroughputInBytes, humanReadable, false, out);
+    }
+
+    public static void reportCompactionTable(List<Map<String,String>> compactions, long compactionThroughputInBytes, boolean humanReadable, boolean vtableOutput, PrintStream out)
     {
         if (!compactions.isEmpty())
         {
             long remainingBytes = 0;
             TableBuilder table = new TableBuilder();
 
-            table.add("id", "compaction type", "keyspace", "table", "completed", "total", "unit", "progress");
+            if (vtableOutput)
+                table.add("keyspace", "table", "task id", "completion ratio", "kind", "progress", "sstables", "total", "unit");
+            else
+                table.add("id", "compaction type", "keyspace", "table", "completed", "total", "unit", "progress");
+
             for (Map<String, String> c : compactions)
             {
                 long total = Long.parseLong(c.get(CompactionInfo.TOTAL));
@@ -90,19 +104,24 @@
                 String columnFamily = c.get(CompactionInfo.COLUMNFAMILY);
                 String unit = c.get(CompactionInfo.UNIT);
                 boolean toFileSize = humanReadable && Unit.isFileSize(unit);
-                String completedStr = toFileSize ? FileUtils.stringifyFileSize(completed) : Long.toString(completed);
+                String[] tables = c.get(CompactionInfo.SSTABLES).split(",");
+                String progressStr = toFileSize ? FileUtils.stringifyFileSize(completed) : Long.toString(completed);
                 String totalStr = toFileSize ? FileUtils.stringifyFileSize(total) : Long.toString(total);
                 String percentComplete = total == 0 ? "n/a" : new DecimalFormat("0.00").format((double) completed / total * 100) + "%";
                 String id = c.get(CompactionInfo.COMPACTION_ID);
-                table.add(id, taskType, keyspace, columnFamily, completedStr, totalStr, unit, percentComplete);
+                if (vtableOutput)
+                    table.add(keyspace, columnFamily, id, percentComplete, taskType, progressStr, String.valueOf(tables.length), totalStr, unit);
+                else
+                    table.add(id, taskType, keyspace, columnFamily, progressStr, totalStr, unit, percentComplete);
+
                 remainingBytes += total - completed;
             }
             table.printTo(out);
 
             String remainingTime = "n/a";
-            if (compactionThroughput != 0)
+            if (compactionThroughputInBytes != 0)
             {
-                long remainingTimeInSecs = remainingBytes / (1024L * 1024L * compactionThroughput);
+                long remainingTimeInSecs = remainingBytes / compactionThroughputInBytes;
                 remainingTime = format("%dh%02dm%02ds", remainingTimeInSecs / 3600, (remainingTimeInSecs % 3600) / 60, (remainingTimeInSecs % 60));
             }
             out.printf("%25s%10s%n", "Active compaction remaining time : ", remainingTime);
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DataPaths.java b/src/java/org/apache/cassandra/tools/nodetool/DataPaths.java
new file mode 100644
index 0000000..10ae01e
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/DataPaths.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+import org.apache.cassandra.tools.nodetool.stats.DataPathsHolder;
+import org.apache.cassandra.tools.nodetool.stats.DataPathsPrinter;
+import org.apache.cassandra.tools.nodetool.stats.StatsPrinter;
+
+@Command(name = "datapaths", description = "Print all directories where data of tables are stored")
+public class DataPaths extends NodeToolCmd
+{
+    @Arguments(usage = "[<keyspace.table>...]", description = "List of table (or keyspace) names")
+    private List<String> tableNames = new ArrayList<>();
+
+    @Option(title = "format", name = {"-F", "--format"}, description = "Output format (json, yaml)")
+    private String outputFormat = "";
+
+    @Override
+    protected void execute(NodeProbe probe)
+    {
+        if (!outputFormat.isEmpty() && !"json".equals(outputFormat) && !"yaml".equals(outputFormat))
+        {
+            throw new IllegalArgumentException("arguments for -F are yaml and json only.");
+        }
+
+        DataPathsHolder holder = new DataPathsHolder(probe, tableNames);
+        StatsPrinter<DataPathsHolder> printer = DataPathsPrinter.from(outputFormat);
+        printer.print(holder, probe.output().out);
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java
index 0e58687..98b6d58 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java
@@ -46,4 +46,4 @@
             throw new IllegalStateException("Unsupported operation: " + e.getMessage(), e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java
index 35653ae..6d878a0 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java
@@ -30,4 +30,4 @@
     {
         probe.disableAuditLog();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java
index b9fc7d6..39a4c76 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java
@@ -50,4 +50,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java
index 4b0bfbe..4ee6340 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java
@@ -30,4 +30,4 @@
     {
         probe.setIncrementalBackupsEnabled(false);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java
index 463f2b0..79b9219 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java
@@ -30,4 +30,4 @@
     {
         probe.stopNativeTransport();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java
index 8820e5f..aa5d0b7 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java
@@ -30,4 +30,4 @@
     {
         probe.stopFullQueryLogger();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java
index 6f950bb..7b6c348 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java
@@ -30,4 +30,4 @@
     {
         probe.stopGossiping();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java
index d7ec35f..62465a3 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java
@@ -30,4 +30,4 @@
     {
         probe.disableHintedHandoff();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java
index d65c70b..3615a99 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java
@@ -39,4 +39,4 @@
 
         probe.disableHintsForDC(args.get(0));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java b/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java
index 2083062..8756471 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java
@@ -30,4 +30,4 @@
     {
         probe.disableOldProtocolVersions();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Drain.java b/src/java/org/apache/cassandra/tools/nodetool/Drain.java
index eaa537a..a152057 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Drain.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Drain.java
@@ -39,4 +39,4 @@
             throw new RuntimeException("Error occurred during flushing", e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java
index c71d210..cf9d05e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java
@@ -18,6 +18,8 @@
 
 package org.apache.cassandra.tools.nodetool;
 
+import java.util.Collections;
+
 import io.airlift.airline.Command;
 import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
@@ -47,9 +49,39 @@
     @Option(title = "excluded_users", name = { "--excluded-users" }, description = "Comma separated list of users to be excluded for audit log. If not set the value from cassandra.yaml will be used")
     private String excluded_users = null;
 
+    @Option(title = "roll_cycle", name = {"--roll-cycle"}, description = "How often to roll the log file (MINUTELY, HOURLY, DAILY).")
+    private String rollCycle = null;
+
+    @Option(title = "blocking", name = {"--blocking"}, description = "If the queue is full whether to block producers or drop samples [true|false].")
+    private String blocking = null;
+
+    @Option(title = "max_queue_weight", name = {"--max-queue-weight"}, description = "Maximum number of bytes of query data to queue to disk before blocking or dropping samples.")
+    private int maxQueueWeight = Integer.MIN_VALUE;
+
+    @Option(title = "max_log_size", name = {"--max-log-size"}, description = "How many bytes of log data to store before dropping segments. Might not be respected if a log file hasn't rolled so it can be deleted.")
+    private long maxLogSize = Long.MIN_VALUE;
+
+    @Option(title = "archive_command", name = {"--archive-command"}, description = "Command that will handle archiving rolled audit log files." +
+                                                                                   " Format is \"/path/to/script.sh %path\" where %path will be replaced with the file to archive" +
+                                                                                   " Enable this by setting the audit_logging_options.allow_nodetool_archive_command: true in the config.")
+
+    private String archiveCommand = null;
+
+    @Option(title = "archive_retries", name = {"--max-archive-retries"}, description = "Max number of archive retries.")
+    private int archiveRetries = Integer.MIN_VALUE;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.enableAuditLog(logger, included_keyspaces, excluded_keyspaces, included_categories, excluded_categories, included_users, excluded_users);
+        Boolean bblocking = null;
+        if (blocking != null)
+        {
+            if (!blocking.equalsIgnoreCase("TRUE") && !blocking.equalsIgnoreCase("FALSE"))
+                throw new IllegalArgumentException("Invalid [" + blocking + "]. Blocking only accepts 'true' or 'false'.");
+            else
+                bblocking = Boolean.parseBoolean(blocking);
+        }
+        probe.enableAuditLog(logger, Collections.EMPTY_MAP, included_keyspaces, excluded_keyspaces, included_categories, excluded_categories, included_users, excluded_users,
+                             archiveRetries, bblocking, rollCycle, maxLogSize, maxQueueWeight, archiveCommand);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java
index 795ab13..f8b98ff 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java
@@ -50,4 +50,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java
index d1773d9..7ebad8a 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java
@@ -30,4 +30,4 @@
     {
         probe.setIncrementalBackupsEnabled(true);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java
index 506945f..2e37e6f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java
@@ -30,4 +30,4 @@
     {
         probe.startNativeTransport();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java
index d78d5ae..511730d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java
@@ -62,4 +62,4 @@
         }
         probe.enableFullQueryLogger(path, rollCycle, bblocking, maxQueueWeight, maxLogSize, archiveCommand, archiveRetries);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java
index 900c427..3433c3e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java
@@ -30,4 +30,4 @@
     {
         probe.startGossiping();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java
index bccf7e7..be64e12 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java
@@ -30,4 +30,4 @@
     {
         probe.enableHintedHandoff();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java b/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java
index f6d5be5..06c9f8d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java
@@ -31,4 +31,4 @@
     {
         probe.enableOldProtocolVersions();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Flush.java b/src/java/org/apache/cassandra/tools/nodetool/Flush.java
index c83e420..fb2446d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Flush.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Flush.java
@@ -49,4 +49,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/GetAuditLog.java
new file mode 100644
index 0000000..1e08984
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetAuditLog.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Command;
+import org.apache.cassandra.audit.AuditLogOptions;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool;
+import org.apache.cassandra.tools.nodetool.formatter.TableBuilder;
+
+@Command(name = "getauditlog", description = "Print configuration of audit log if enabled, otherwise the configuration reflected in cassandra.yaml")
+public class GetAuditLog extends NodeTool.NodeToolCmd
+{
+    @Override
+    protected void execute(NodeProbe probe)
+    {
+        final TableBuilder tableBuilder = new TableBuilder();
+
+        tableBuilder.add("enabled", Boolean.toString(probe.getStorageService().isAuditLogEnabled()));
+
+        final AuditLogOptions options = probe.getAuditLogOptions();
+
+        tableBuilder.add("logger", options.logger.class_name);
+        tableBuilder.add("audit_logs_dir", options.audit_logs_dir);
+        tableBuilder.add("archive_command", options.archive_command);
+        tableBuilder.add("roll_cycle", options.roll_cycle);
+        tableBuilder.add("block", Boolean.toString(options.block));
+        tableBuilder.add("max_log_size", Long.toString(options.max_log_size));
+        tableBuilder.add("max_queue_weight", Integer.toString(options.max_queue_weight));
+        tableBuilder.add("max_archive_retries", Long.toString(options.max_archive_retries));
+        tableBuilder.add("included_keyspaces", options.included_keyspaces);
+        tableBuilder.add("excluded_keyspaces", options.excluded_keyspaces);
+        tableBuilder.add("included_categories", options.included_categories);
+        tableBuilder.add("excluded_categories", options.excluded_categories);
+        tableBuilder.add("included_users", options.included_users);
+        tableBuilder.add("excluded_users", options.excluded_users);
+
+        tableBuilder.printTo(probe.output().out);
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetAuthCacheConfig.java b/src/java/org/apache/cassandra/tools/nodetool/GetAuthCacheConfig.java
new file mode 100644
index 0000000..347500b
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetAuthCacheConfig.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import org.apache.cassandra.auth.AuthCacheMBean;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool;
+
+@Command(name = "getauthcacheconfig", description = "Get configuration of Auth cache")
+public class GetAuthCacheConfig extends NodeTool.NodeToolCmd
+{
+    @SuppressWarnings("unused")
+    @Option(title = "cache-name",
+            name = {"--cache-name"},
+            description = "Name of Auth cache (required)",
+            required = true)
+    private String cacheName;
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        AuthCacheMBean authCacheMBean = probe.getAuthCacheMBean(cacheName);
+
+        probe.output().out.println("Validity Period: " + authCacheMBean.getValidity());
+        probe.output().out.println("Update Interval: " + authCacheMBean.getUpdateInterval());
+        probe.output().out.println("Max Entries: " + authCacheMBean.getMaxEntries());
+        probe.output().out.println("Active Update: " + authCacheMBean.getActiveUpdate());
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java b/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java
new file mode 100644
index 0000000..6925932
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "getcolumnindexsize", description = "Print the granularity of the collation index of rows within a partition in KiB")
+public class GetColumnIndexSize extends NodeToolCmd
+{
+    @Override
+    protected void execute(NodeProbe probe)
+    {
+        probe.output().out.println("Current value for column_index_size: " + probe.getColumnIndexSizeInKB() + " KiB");
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java
index 839c78d..e71fe0a 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java
@@ -17,17 +17,34 @@
  */
 package org.apache.cassandra.tools.nodetool;
 
+import com.google.common.math.DoubleMath;
+
 import io.airlift.airline.Command;
 
+import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "getcompactionthroughput", description = "Print the MB/s throughput cap for compaction in the system")
+@Command(name = "getcompactionthroughput", description = "Print the MiB/s throughput cap for compaction in the system as a rounded number")
 public class GetCompactionThroughput extends NodeToolCmd
 {
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-d", "--precise-mib" }, description = "Print the MiB/s throughput cap for compaction in the system as a precise number (double)")
+    private boolean  compactionThroughputAsDouble;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.output().out.println("Current compaction throughput: " + probe.getCompactionThroughput() + " MB/s");
+        double throughput = probe.getCompactionThroughputMebibytesAsDouble();
+
+        if (compactionThroughputAsDouble)
+            probe.output().out.println("Current compaction throughput: " + throughput + " MiB/s");
+        else
+        {
+            if (!DoubleMath.isMathematicalInteger(throughput))
+                throw new RuntimeException("Use the -d flag to quiet this error and get the exact throughput in MiB/s");
+
+            probe.output().out.println("Current compaction throughput: " + probe.getCompactionThroughput() + " MB/s");
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetDefaultKeyspaceRF.java b/src/java/org/apache/cassandra/tools/nodetool/GetDefaultKeyspaceRF.java
new file mode 100644
index 0000000..0ba7d37
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetDefaultKeyspaceRF.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool;
+
+@Command(name = "getdefaultrf", description = "Gets default keyspace replication factor.")
+public class GetDefaultKeyspaceRF extends NodeTool.NodeToolCmd
+{
+    protected void execute(NodeProbe probe)
+    {
+        probe.output().out.println(probe.getDefaultKeyspaceReplicationFactor());
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/GetFullQueryLog.java
index be3aa56..1dc4c3f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GetFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetFullQueryLog.java
@@ -24,7 +24,7 @@
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 import org.apache.cassandra.tools.nodetool.formatter.TableBuilder;
 
-@Command(name = "getfullquerylog", description = "print configuration of fql if enabled, otherwise the configuration reflected in cassandra.yaml")
+@Command(name = "getfullquerylog", description = "Print configuration of fql if enabled, otherwise the configuration reflected in cassandra.yaml")
 public class GetFullQueryLog extends NodeToolCmd
 {
     protected void execute(NodeProbe probe)
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java
index 554876d..50098a5 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java
@@ -17,17 +17,71 @@
  */
 package org.apache.cassandra.tools.nodetool;
 
-import io.airlift.airline.Command;
+import com.google.common.math.DoubleMath;
 
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "getinterdcstreamthroughput", description = "Print the Mb/s throughput cap for inter-datacenter streaming in the system")
+@Command(name = "getinterdcstreamthroughput", description = "Print the throughput cap for inter-datacenter streaming and entire SSTable inter-datacenter streaming in the system" +
+                                                            "in rounded megabits. For precise number, please, use option -d")
 public class GetInterDCStreamThroughput extends NodeToolCmd
 {
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-e", "--entire-sstable-throughput" }, description = "Print entire SSTable streaming throughput in MiB/s")
+    private boolean entireSSTableThroughput;
+
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-m", "--mib" }, description = "Print the throughput cap for inter-datacenter streaming in MiB/s")
+    private boolean interDCStreamThroughputMiB;
+
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-d", "--precise-mbit" }, description = "Print the throughput cap for inter-datacenter streaming in precise Mbits (double)")
+    private boolean interDCStreamThroughputDoubleMbit;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.output().out.println("Current inter-datacenter stream throughput: " + probe.getInterDCStreamThroughput() + " Mb/s");
+        int throughput;
+        double throughputInDouble;
+
+        if (entireSSTableThroughput)
+        {
+            if (interDCStreamThroughputDoubleMbit || interDCStreamThroughputMiB)
+                throw new IllegalArgumentException("You cannot use more than one flag with this command");
+
+            throughputInDouble = probe.getEntireSSTableInterDCStreamThroughput();
+            probe.output().out.printf("Current entire SSTable inter-datacenter stream throughput: %s%n",
+                                      throughputInDouble > 0 ? throughputInDouble + " MiB/s" : "unlimited");
+        }
+        else if (interDCStreamThroughputMiB)
+        {
+            if (interDCStreamThroughputDoubleMbit)
+                throw new IllegalArgumentException("You cannot use more than one flag with this command");
+
+            throughputInDouble = probe.getInterDCStreamThroughputMibAsDouble();
+            probe.output().out.printf("Current inter-datacenter stream throughput: %s%n",
+                                      throughputInDouble > 0 ? throughputInDouble + " MiB/s" : "unlimited");
+
+        }
+        else if (interDCStreamThroughputDoubleMbit)
+        {
+            throughputInDouble = probe.getInterDCStreamThroughputAsDouble();
+            probe.output().out.printf("Current stream throughput: %s%n",
+                                      throughputInDouble > 0 ? throughputInDouble + " Mb/s" : "unlimited");
+        }
+        else
+        {
+            throughputInDouble = probe.getInterDCStreamThroughputAsDouble();
+            throughput = probe.getInterDCStreamThroughput();
+
+            if (throughput <= 0)
+                probe.output().out.printf("Current inter-datacenter stream throughput: unlimited%n");
+            else if (DoubleMath.isMathematicalInteger(throughputInDouble))
+                probe.output().out.printf(throughputInDouble + "Current inter-datacenter stream throughput: %s%n", throughput + " Mb/s");
+            else
+                throw new RuntimeException("Use the -d flag to quiet this error and get the exact throughput in megabits/s");
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java
index 0e9bdc1..bd98d34 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java
@@ -33,4 +33,4 @@
         else
             System.out.println("Snapshot throttle is disabled");
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java
index 9014d3c..94cf298 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java
@@ -17,17 +17,70 @@
  */
 package org.apache.cassandra.tools.nodetool;
 
-import io.airlift.airline.Command;
+import com.google.common.math.DoubleMath;
 
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "getstreamthroughput", description = "Print the Mb/s throughput cap for streaming in the system")
+@Command(name = "getstreamthroughput", description = "Print the throughput cap for streaming and entire SSTable streaming in the system in rounded megabits. " +
+                                                     "For precise number, please, use option -d")
 public class GetStreamThroughput extends NodeToolCmd
 {
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-e", "--entire-sstable-throughput" }, description = "Print entire SSTable streaming throughput in MiB/s")
+    private boolean entireSSTableThroughput;
+
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-m", "--mib" }, description = "Print the throughput cap for streaming in MiB/s")
+    private boolean streamThroughputMiB;
+
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-d", "--precise-mbit" }, description = "Print the throughput cap for streaming in precise Mbits (double)")
+    private boolean streamThroughputDoubleMbit;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.output().out.println("Current stream throughput: " + probe.getStreamThroughput() + " Mb/s");
+        int throughput;
+        double throughputInDouble;
+
+        if (entireSSTableThroughput)
+        {
+            if (streamThroughputDoubleMbit || streamThroughputMiB)
+                throw new IllegalArgumentException("You cannot use more than one flag with this command");
+
+            throughputInDouble = probe.getEntireSSTableStreamThroughput();
+            probe.output().out.printf("Current entire SSTable stream throughput: %s%n",
+                                      throughputInDouble > 0 ? throughputInDouble + " MiB/s" : "unlimited");
+        }
+        else if (streamThroughputMiB)
+        {
+            if (streamThroughputDoubleMbit)
+                throw new IllegalArgumentException("You cannot use more than one flag with this command");
+
+            throughputInDouble = probe.getStreamThroughputMibAsDouble();
+            probe.output().out.printf("Current stream throughput: %s%n",
+                                      throughputInDouble > 0 ? throughputInDouble + " MiB/s" : "unlimited");
+        }
+        else if (streamThroughputDoubleMbit)
+        {
+            throughputInDouble = probe.getStreamThroughputAsDouble();
+            probe.output().out.printf("Current stream throughput: %s%n",
+                                      throughputInDouble > 0 ? throughputInDouble + " Mb/s" : "unlimited");
+        }
+        else
+        {
+            throughputInDouble = probe.getStreamThroughputAsDouble();
+            throughput = probe.getStreamThroughput();
+
+            if (throughput <= 0)
+                probe.output().out.printf("Current stream throughput: unlimited%n");
+            else if (DoubleMath.isMathematicalInteger(throughputInDouble))
+                probe.output().out.printf("Current stream throughput: %s%n", throughput + " Mb/s");
+            else
+                throw new RuntimeException("Use the -d flag to quiet this error and get the exact throughput in megabits/s");
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java b/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java
index 4f5f1b3..a297f76 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.tools.nodetool;
 
 import io.airlift.airline.Command;
+import io.airlift.airline.Option;
 
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
@@ -25,9 +26,12 @@
 @Command(name = "gossipinfo", description = "Shows the gossip information for the cluster")
 public class GossipInfo extends NodeToolCmd
 {
+    @Option(title = "resolve_ip", name = {"-r", "--resolve-ip"}, description = "Show node domain names instead of IPs")
+    private boolean resolveIp = false;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.output().out.println(probe.getGossipInfo(printPort));
+        probe.output().out.println(probe.getGossipInfo(printPort, resolveIp));
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/HostStat.java b/src/java/org/apache/cassandra/tools/nodetool/HostStat.java
index 19c0448..56c46ee 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/HostStat.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/HostStat.java
@@ -38,4 +38,4 @@
     {
         return resolveIp ? endpoint.getHostName() : endpoint.getHostAddress();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/HostStatWithPort.java b/src/java/org/apache/cassandra/tools/nodetool/HostStatWithPort.java
index 9cff725..b07a6e3 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/HostStatWithPort.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/HostStatWithPort.java
@@ -26,7 +26,7 @@
 
     public HostStatWithPort(String token, InetAddressAndPort endpoint, boolean resolveIp, Float owns)
     {
-        super(token, endpoint.address, resolveIp, owns);
+        super(token, endpoint.getAddress(), resolveIp, owns);
         this.endpointWithPort = endpoint;
     }
 
@@ -41,7 +41,7 @@
             return super.ipOrDns();
 
         return resolveIp ?
-               endpointWithPort.address.getHostName() + ':' + endpointWithPort.port :
+               endpointWithPort.getAddress().getHostName() + ':' + endpointWithPort.getPort() :
                endpointWithPort.getHostAddressAndPort();
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Info.java b/src/java/org/apache/cassandra/tools/nodetool/Info.java
index cf1f894..db7277e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Info.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Info.java
@@ -178,8 +178,8 @@
     }
 
     /**
-     * Returns the total off heap memory used in MB.
-     * @return the total off heap memory used in MB.
+     * Returns the total off heap memory used in MiB.
+     * @return the total off heap memory used in MiB.
      */
     private static double getOffHeapMemoryUsed(NodeProbe probe)
     {
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java
index aef77bd..3cba8e0 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java
@@ -30,4 +30,4 @@
     {
         probe.invalidateCounterCache();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCache.java
new file mode 100644
index 0000000..0f9079f
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCache.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "invalidatecredentialscache", description = "Invalidate the credentials cache")
+public class InvalidateCredentialsCache extends NodeToolCmd
+{
+    @Arguments(usage = "[<role>...]", description = "List of roles to invalidate. By default, all roles")
+    private List<String> args = new ArrayList<>();
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        if (args.isEmpty())
+        {
+            probe.invalidateCredentialsCache();
+        }
+        else
+        {
+            for (String roleName : args)
+            {
+                probe.invalidateCredentialsCache(roleName);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCache.java
new file mode 100644
index 0000000..c242b03
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCache.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "invalidatejmxpermissionscache", description = "Invalidate the JMX permissions cache")
+public class InvalidateJmxPermissionsCache extends NodeToolCmd
+{
+    @Arguments(usage = "[<role>...]", description = "List of roles to invalidate. By default, all roles")
+    private List<String> args = new ArrayList<>();
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        if (args.isEmpty())
+        {
+            probe.invalidateJmxPermissionsCache();
+        } else
+        {
+            for (String roleName : args)
+            {
+                probe.invalidateJmxPermissionsCache(roleName);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java
index cfe7d2f..4414b42 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java
@@ -30,4 +30,4 @@
     {
         probe.invalidateKeyCache();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCache.java
new file mode 100644
index 0000000..8b58060
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCache.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "invalidatenetworkpermissionscache", description = "Invalidate the network permissions cache")
+public class InvalidateNetworkPermissionsCache extends NodeToolCmd
+{
+    @Arguments(usage = "[<role>...]", description = "List of roles to invalidate. By default, all roles")
+    private List<String> args = new ArrayList<>();
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        if (args.isEmpty())
+        {
+            probe.invalidateNetworkPermissionsCache();
+        }
+        else
+        {
+            for (String roleName : args)
+            {
+                probe.invalidateNetworkPermissionsCache(roleName);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCache.java
new file mode 100644
index 0000000..cc66c98
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCache.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang3.StringUtils;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import org.apache.cassandra.auth.DataResource;
+import org.apache.cassandra.auth.FunctionResource;
+import org.apache.cassandra.auth.JMXResource;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+@Command(name = "invalidatepermissionscache", description = "Invalidate the permissions cache")
+public class InvalidatePermissionsCache extends NodeToolCmd
+{
+    @Arguments(usage = "[<role>]", description = "A role for which permissions to specified resources need to be invalidated")
+    private List<String> args = new ArrayList<>();
+
+    // Data Resources
+    @Option(title = "all-keyspaces",
+            name = {"--all-keyspaces"},
+            description = "Invalidate permissions for 'ALL KEYSPACES'")
+    private boolean allKeyspaces;
+
+    @Option(title = "keyspace",
+            name = {"--keyspace"},
+            description = "Keyspace to invalidate permissions for")
+    private String keyspace;
+
+    @Option(title = "all-tables",
+            name = {"--all-tables"},
+            description = "Invalidate permissions for 'ALL TABLES'")
+    private boolean allTables;
+
+    @Option(title = "table",
+            name = {"--table"},
+            description = "Table to invalidate permissions for (you must specify --keyspace for using this option)")
+    private String table;
+
+    // Roles Resources
+    @Option(title = "all-roles",
+            name = {"--all-roles"},
+            description = "Invalidate permissions for 'ALL ROLES'")
+    private boolean allRoles;
+
+    @Option(title = "role",
+            name = {"--role"},
+            description = "Role to invalidate permissions for")
+    private String role;
+
+    // Functions Resources
+    @Option(title = "all-functions",
+            name = {"--all-functions"},
+            description = "Invalidate permissions for 'ALL FUNCTIONS'")
+    private boolean allFunctions;
+
+    @Option(title = "functions-in-keyspace",
+            name = {"--functions-in-keyspace"},
+            description = "Keyspace to invalidate permissions for")
+    private String functionsInKeyspace;
+
+    @Option(title = "function",
+            name = {"--function"},
+            description = "Function to invalidate permissions for (you must specify --functions-in-keyspace for using " +
+                    "this option; function format: name[arg1^..^agrN], for example: foo[Int32Type^DoubleType])")
+    private String function;
+
+    // MBeans Resources
+    @Option(title = "all-mbeans",
+            name = {"--all-mbeans"},
+            description = "Invalidate permissions for 'ALL MBEANS'")
+    private boolean allMBeans;
+
+    @Option(title = "mbean",
+            name = {"--mbean"},
+            description = "MBean to invalidate permissions for")
+    private String mBean;
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        if (args.isEmpty())
+        {
+            checkArgument(!allKeyspaces && StringUtils.isEmpty(keyspace) && StringUtils.isEmpty(table)
+                    && !allRoles && StringUtils.isEmpty(role)
+                    && !allFunctions && StringUtils.isEmpty(functionsInKeyspace) && StringUtils.isEmpty(function)
+                    && !allMBeans && StringUtils.isEmpty(mBean),
+                    "No resource options allowed without a <role> being specified");
+
+            probe.invalidatePermissionsCache();
+        }
+        else
+        {
+            checkArgument(args.size() == 1,
+                    "A single <role> is only supported / you have a typo in the resource options spelling");
+            List<String> resourceNames = new ArrayList<>();
+
+            // Data Resources
+            if (allKeyspaces)
+                resourceNames.add(DataResource.root().getName());
+
+            if (allTables)
+                if (StringUtils.isNotEmpty(keyspace))
+                    resourceNames.add(DataResource.allTables(keyspace).getName());
+                else
+                    throw new IllegalArgumentException("--all-tables option should be passed along with --keyspace option");
+
+            if (StringUtils.isNotEmpty(table))
+                if (StringUtils.isNotEmpty(keyspace))
+                    resourceNames.add(DataResource.table(keyspace, table).getName());
+                else
+                    throw new IllegalArgumentException("--table option should be passed along with --keyspace option");
+
+            if (StringUtils.isNotEmpty(keyspace) && !allTables && StringUtils.isEmpty(table))
+                resourceNames.add(DataResource.keyspace(keyspace).getName());
+
+            // Roles Resources
+            if (allRoles)
+                resourceNames.add(RoleResource.root().getName());
+
+            if (StringUtils.isNotEmpty(role))
+                resourceNames.add(RoleResource.role(role).getName());
+
+            // Function Resources
+            if (allFunctions)
+                resourceNames.add(FunctionResource.root().getName());
+
+            if (StringUtils.isNotEmpty(function))
+                if (StringUtils.isNotEmpty(functionsInKeyspace))
+                    resourceNames.add(constructFunctionResource(functionsInKeyspace, function));
+                else
+                    throw new IllegalArgumentException("--function option should be passed along with --functions-in-keyspace option");
+            else
+                if (StringUtils.isNotEmpty(functionsInKeyspace))
+                    resourceNames.add(FunctionResource.keyspace(functionsInKeyspace).getName());
+
+            // MBeans Resources
+            if (allMBeans)
+                resourceNames.add(JMXResource.root().getName());
+
+            if (StringUtils.isNotEmpty(mBean))
+                resourceNames.add(JMXResource.mbean(mBean).getName());
+
+            String roleName = args.get(0);
+
+            if (resourceNames.isEmpty())
+                throw new IllegalArgumentException("No resource options specified");
+
+            for (String resourceName : resourceNames)
+                probe.invalidatePermissionsCache(roleName, resourceName);
+        }
+    }
+
+    private String constructFunctionResource(String functionsInKeyspace, String function) {
+        try
+        {
+            return FunctionResource.fromName("functions/" + functionsInKeyspace + '/' + function).getName();
+        } catch (ConfigurationException e)
+        {
+            throw new IllegalArgumentException("An error was encountered when looking up function definition: " + e.getMessage());
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRolesCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRolesCache.java
new file mode 100644
index 0000000..4fca5c3
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRolesCache.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "invalidaterolescache", description = "Invalidate the roles cache")
+public class InvalidateRolesCache extends NodeToolCmd
+{
+
+    @Arguments(usage = "[<role>...]", description = "List of roles to invalidate. By default, all roles")
+    private List<String> args = new ArrayList<>();
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        if (args.isEmpty())
+        {
+            probe.invalidateRolesCache();
+        }
+        else
+        {
+            for (String roleName : args)
+            {
+                probe.invalidateRolesCache(roleName);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java
index 7357e27..1a10ed0 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java
@@ -30,4 +30,4 @@
     {
         probe.invalidateRowCache();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ListPendingHints.java b/src/java/org/apache/cassandra/tools/nodetool/ListPendingHints.java
new file mode 100644
index 0000000..af414d8
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/ListPendingHints.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.net.UnknownHostException;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+import java.util.List;
+import java.util.Map;
+
+import io.airlift.airline.Command;
+import org.apache.cassandra.hints.PendingHintsInfo;
+import org.apache.cassandra.locator.EndpointSnitchInfoMBean;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool;
+import org.apache.cassandra.tools.nodetool.formatter.TableBuilder;
+
+@Command(name = "listpendinghints", description = "Print all pending hints that this node has")
+public class ListPendingHints extends NodeTool.NodeToolCmd
+{
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        List<Map<String, String>> pendingHints = probe.listPendingHints();
+        if(pendingHints.isEmpty())
+        {
+            probe.output().out.println("This node does not have any pending hints");
+        }
+        else
+        {
+            Map<String, String> endpointMap = probe.getHostIdToEndpointWithPort();
+            Map<String, String> simpleStates = probe.getSimpleStatesWithPort();
+            EndpointSnitchInfoMBean epSnitchInfo = probe.getEndpointSnitchInfoProxy();
+
+            DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss,SSS");
+            TableBuilder tableBuilder = new TableBuilder();
+
+            tableBuilder.add("Host ID", "Address", "Rack", "DC", "Status", "Total files", "Newest", "Oldest");
+            for (Map<String, String> hintInfo : pendingHints)
+            {
+                String endpoint = hintInfo.get(PendingHintsInfo.HOST_ID);
+                String totalFiles = hintInfo.get(PendingHintsInfo.TOTAL_FILES);
+                LocalDateTime newest = Instant.ofEpochMilli(Long.parseLong(hintInfo.get(PendingHintsInfo.NEWEST_TIMESTAMP)))
+                                              .atZone(ZoneId.of("UTC"))
+                                              .toLocalDateTime();
+                LocalDateTime oldest = Instant.ofEpochMilli(Long.parseLong(hintInfo.get(PendingHintsInfo.OLDEST_TIMESTAMP)))
+                                              .atZone(ZoneId.of("UTC"))
+                                              .toLocalDateTime();
+                String address = endpointMap.get(endpoint);
+                String rack = null;
+                String dc = null;
+                String status = null;
+                try
+                {
+                    rack = epSnitchInfo.getRack(address);
+                    dc = epSnitchInfo.getDatacenter(address);
+                    status = simpleStates.getOrDefault(InetAddressAndPort.getByName(address).toString(),
+                                                       "Unknown");
+                }
+                catch (UnknownHostException e)
+                {
+                    rack = rack != null ? rack : "Unknown";
+                    dc = dc != null ? dc : "Unknown";
+                    status = "Unknown";
+                }
+
+                tableBuilder.add(endpoint,
+                                 address,
+                                 rack,
+                                 dc,
+                                 status,
+                                 String.valueOf(totalFiles),
+                                 dtf.format(newest),
+                                 dtf.format(oldest));
+            }
+            tableBuilder.printTo(probe.output().out);
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
index 79494cd..b70a7a9 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.tools.nodetool;
 
 import java.io.PrintStream;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -25,6 +26,7 @@
 
 import io.airlift.airline.Command;
 
+import io.airlift.airline.Option;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
@@ -33,6 +35,11 @@
 @Command(name = "listsnapshots", description = "Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.")
 public class ListSnapshots extends NodeToolCmd
 {
+    @Option(title = "no_ttl",
+    name = { "-nt", "--no-ttl" },
+    description = "Skip snapshots with TTL")
+    private boolean noTTL = false;
+
     @Override
     public void execute(NodeProbe probe)
     {
@@ -41,7 +48,10 @@
         {
             out.println("Snapshot Details: ");
 
-            final Map<String,TabularData> snapshotDetails = probe.getSnapshotDetails();
+            Map<String, String> options = new HashMap<>();
+            options.put("no_ttl", Boolean.toString(noTTL));
+
+            final Map<String, TabularData> snapshotDetails = probe.getSnapshotDetails(options);
             if (snapshotDetails.isEmpty())
             {
                 out.println("There are no snapshots");
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Move.java b/src/java/org/apache/cassandra/tools/nodetool/Move.java
index 8654d25..075e008 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Move.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Move.java
@@ -43,4 +43,4 @@
             throw new RuntimeException("Error during moving node", e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/NetStats.java b/src/java/org/apache/cassandra/tools/nodetool/NetStats.java
index cc6b7b1..aacc071 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/NetStats.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/NetStats.java
@@ -26,6 +26,7 @@
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.MessagingServiceMBean;
 import org.apache.cassandra.streaming.ProgressInfo;
 import org.apache.cassandra.streaming.SessionInfo;
@@ -54,11 +55,11 @@
             out.printf("%s %s%n", status.streamOperation.getDescription(), status.planId.toString());
             for (SessionInfo info : status.sessions)
             {
-                out.printf("    %s", info.peer.toString(printPort));
+                out.printf("    %s", InetAddressAndPort.toString(info.peer, printPort));
                 // print private IP when it is used
                 if (!info.peer.equals(info.connecting))
                 {
-                    out.printf(" (using %s)", info.connecting.toString(printPort));
+                    out.printf(" (using %s)", InetAddressAndPort.toString(info.connecting, printPort));
                 }
                 out.printf("%n");
                 if (!info.receivingSummaries.isEmpty())
diff --git a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java
index 4ec70d8..fde9eef 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java
@@ -30,4 +30,4 @@
     {
         probe.pauseHintsDelivery();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java
index a083cde..a16e8f2 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java
@@ -57,4 +57,4 @@
 
         probe.rebuild(sourceDataCenterName, keyspace, tokens, specificSources);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java
index 4a6b071..f7a3b6f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java
@@ -40,4 +40,4 @@
         checkArgument(args.size() >= 3, "rebuild_index requires ks, cf and idx args");
         probe.rebuildIndex(args.get(0), args.get(1), toArray(args.subList(2, args.size()), String.class));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/RecompressSSTables.java b/src/java/org/apache/cassandra/tools/nodetool/RecompressSSTables.java
new file mode 100644
index 0000000..78ed7ec
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/RecompressSSTables.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "recompress_sstables", description = "Rewrite sstables (for the requested tables) that have compression configuration different from the current")
+public class RecompressSSTables extends NodeToolCmd
+{
+    @Arguments(usage = "[<keyspace> <tables>...]", description = "The keyspace followed by one or many tables")
+    private List<String> args = new ArrayList<>();
+
+    @Option(title = "jobs",
+            name = {"-j", "--jobs"},
+            description = "Number of sstables to upgrade simultanously, set to 0 to use all available compaction threads")
+    private int jobs = 2;
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        List<String> keyspaces = parseOptionalKeyspace(args, probe);
+        String[] tableNames = parseOptionalTables(args);
+
+        for (String keyspace : keyspaces)
+        {
+            try
+            {
+                probe.recompressSSTables(probe.output().out, keyspace, jobs, tableNames);
+            }
+            catch (Exception e)
+            {
+                throw new RuntimeException("Error occurred during enabling auto-compaction", e);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java
index 6ca90fb..8727a61 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java
@@ -30,4 +30,4 @@
     {
         probe.reloadTriggers();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Repair.java b/src/java/org/apache/cassandra/tools/nodetool/Repair.java
index c4f60af..8e5aab2 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Repair.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Repair.java
@@ -97,6 +97,12 @@
     @Option(title = "optimise_streams", name = {"-os", "--optimise-streams"}, description = "Use --optimise-streams to try to reduce the number of streams we do (EXPERIMENTAL, see CASSANDRA-3200).")
     private boolean optimiseStreams = false;
 
+    @Option(title = "skip-paxos", name = {"-skip-paxos", "--skip-paxos"}, description = "If the --skip-paxos flag is included, the paxos repair step is skipped. Paxos repair is also skipped for preview repairs.")
+    private boolean skipPaxos = false;
+
+    @Option(title = "paxos-only", name = {"-paxos-only", "--paxos-only"}, description = "If the --paxos-only flag is included, no table data is repaired, only paxos operations..")
+    private boolean paxosOnly = false;
+
     @Option(title = "ignore_unreplicated_keyspaces", name = {"-iuk","--ignore-unreplicated-keyspaces"}, description = "Use --ignore-unreplicated-keyspaces to ignore keyspaces which are not replicated, otherwise the repair will fail")
     private boolean ignoreUnreplicatedKeyspaces = false;
 
@@ -152,6 +158,8 @@
             options.put(RepairOption.PREVIEW, getPreviewKind().toString());
             options.put(RepairOption.OPTIMISE_STREAMS_KEY, Boolean.toString(optimiseStreams));
             options.put(RepairOption.IGNORE_UNREPLICATED_KS, Boolean.toString(ignoreUnreplicatedKeyspaces));
+            options.put(RepairOption.REPAIR_PAXOS_KEY, Boolean.toString(!skipPaxos && getPreviewKind() == PreviewKind.NONE));
+            options.put(RepairOption.PAXOS_ONLY_KEY, Boolean.toString(paxosOnly && getPreviewKind() == PreviewKind.NONE));
 
             if (!startToken.isEmpty() || !endToken.isEmpty())
             {
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java
index 786852d..d7ac301 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java
@@ -30,4 +30,4 @@
     {
         probe.resetFullQueryLogger();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java
index 708636f..62775a4 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java
@@ -38,4 +38,4 @@
             throw new RuntimeException(e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java
index a3984f8..bda98aa 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java
@@ -30,4 +30,4 @@
     {
         probe.resumeHintsDelivery();
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetAuthCacheConfig.java b/src/java/org/apache/cassandra/tools/nodetool/SetAuthCacheConfig.java
new file mode 100644
index 0000000..8adad65
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetAuthCacheConfig.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import org.apache.cassandra.auth.AuthCacheMBean;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+@Command(name = "setauthcacheconfig", description = "Set configuration for Auth cache")
+public class SetAuthCacheConfig extends NodeToolCmd
+{
+    @SuppressWarnings("unused")
+    @Option(title = "cache-name",
+            name = {"--cache-name"},
+            description = "Name of Auth cache (required)",
+            required = true)
+    private String cacheName;
+
+    @SuppressWarnings("unused")
+    @Option(title = "validity-period",
+            name = {"--validity-period"},
+            description = "Validity period in milliseconds")
+    private Integer validityPeriod;
+
+    @SuppressWarnings("unused")
+    @Option(title = "update-interval",
+            name = {"--update-interval"},
+            description = "Update interval in milliseconds")
+    private Integer updateInterval;
+
+    @SuppressWarnings("unused")
+    @Option(title = "max-entries",
+            name = {"--max-entries"},
+            description = "Max entries")
+    private Integer maxEntries;
+
+    @SuppressWarnings("unused")
+    @Option(title = "enable-active-update",
+            name = {"--enable-active-update"},
+            description = "Enable active update")
+    private Boolean enableActiveUpdate;
+
+    @SuppressWarnings("unused")
+    @Option(title = "disable-active-update",
+            name = {"--disable-active-update"},
+            description = "Disable active update")
+    private Boolean disableActiveUpdate;
+
+    @Override
+    public void execute(NodeProbe probe)
+    {
+        Boolean activeUpdate = getActiveUpdate(enableActiveUpdate, disableActiveUpdate);
+
+        checkArgument(validityPeriod != null || updateInterval != null
+                      || maxEntries != null || activeUpdate != null,
+                      "At least one optional parameter need to be passed");
+
+        AuthCacheMBean authCacheMBean = probe.getAuthCacheMBean(cacheName);
+
+        if (validityPeriod != null)
+        {
+            authCacheMBean.setValidity(validityPeriod);
+            probe.output().out.println("Changed Validity Period to " + validityPeriod);
+        }
+
+        if (updateInterval != null)
+        {
+            authCacheMBean.setUpdateInterval(updateInterval);
+            probe.output().out.println("Changed Update Interval to " + updateInterval);
+        }
+
+        if (maxEntries != null)
+        {
+            authCacheMBean.setMaxEntries(maxEntries);
+            probe.output().out.println("Changed Max Entries to " + maxEntries);
+        }
+
+        if (activeUpdate != null)
+        {
+            authCacheMBean.setActiveUpdate(activeUpdate);
+            probe.output().out.println("Changed Active Update to " + activeUpdate);
+        }
+    }
+
+    private Boolean getActiveUpdate(Boolean enableActiveUpdate, Boolean disableActiveUpdate)
+    {
+        if (enableActiveUpdate == null && disableActiveUpdate == null)
+            return null;
+
+        if (enableActiveUpdate != null && disableActiveUpdate != null)
+            throw new IllegalArgumentException("enable-active-update and disable-active-update cannot be used together");
+
+        return Boolean.TRUE.equals(enableActiveUpdate) ? Boolean.TRUE : Boolean.FALSE;
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java
index 65bb8f5..b96146c 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java
@@ -26,7 +26,7 @@
                                                            "This will be reduced proportionally to the number of nodes in the cluster.")
 public class SetBatchlogReplayThrottle extends NodeToolCmd
 {
-    @Arguments(title = "batchlog_replay_throttle", usage = "<value_in_kb_per_sec>", description = "Value in KB per second, 0 to disable throttling", required = true)
+    @Arguments(title = "batchlog_replay_throttle", usage = "<value_in_kb_per_sec>", description = "Value in KiB per second, 0 to disable throttling", required = true)
     private Integer batchlogReplayThrottle = null;
 
     @Override
@@ -34,4 +34,4 @@
     {
         probe.setBatchlogReplayThrottle(batchlogReplayThrottle);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java
index 461f6ae..b07eb9e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java
@@ -42,4 +42,4 @@
         checkArgument(args.size() == 3, "setcachecapacity requires key-cache-capacity, row-cache-capacity, and counter-cache-capacity args.");
         probe.setCacheCapacities(args.get(0), args.get(1), args.get(2));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java
index 18197e6..de9bab5 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java
@@ -42,4 +42,4 @@
         checkArgument(args.size() == 3, "setcachekeystosave requires key-cache-keys-to-save, row-cache-keys-to-save, and counter-cache-keys-to-save args.");
         probe.setCacheKeysToSave(args.get(0), args.get(1), args.get(2));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java b/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java
new file mode 100644
index 0000000..fe5d8b3
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
+
+@Command(name = "setcolumnindexsize", description = "Set the granularity of the collation index of rows within a partition in KiB")
+public class SetColumnIndexSize extends NodeToolCmd
+{
+    @SuppressWarnings("UnusedDeclaration")
+    @Arguments(title = "column_index_size", usage = "<value_in_kib>", description = "Value in KiB", required = true)
+    private int columnIndexSizeInKiB;
+
+    @Override
+    protected void execute(NodeProbe probe)
+    {
+        probe.setColumnIndexSize(columnIndexSizeInKiB);
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java
index 56e558f..52bb5bc 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java
@@ -47,4 +47,4 @@
 
         probe.setCompactionThreshold(args.get(0), args.get(1), minthreshold, maxthreshold);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java
index 80e7222..a75aa13 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java
@@ -23,10 +23,10 @@
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "setcompactionthroughput", description = "Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling")
+@Command(name = "setcompactionthroughput", description = "Set the MiB/s throughput cap for compaction in the system, or 0 to disable throttling")
 public class SetCompactionThroughput extends NodeToolCmd
 {
-    @Arguments(title = "compaction_throughput", usage = "<value_in_mb>", description = "Value in MB, 0 to disable throttling", required = true)
+    @Arguments(title = "compaction_throughput", usage = "<value_in_mb>", description = "Value in MiB, 0 to disable throttling", required = true)
     private Integer compactionThroughput = null;
 
     @Override
@@ -34,4 +34,4 @@
     {
         probe.setCompactionThroughput(compactionThroughput);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetDefaultKeyspaceRF.java b/src/java/org/apache/cassandra/tools/nodetool/SetDefaultKeyspaceRF.java
new file mode 100644
index 0000000..6126bac
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetDefaultKeyspaceRF.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.NodeTool;
+
+@Command(name = "setdefaultrf", description = "Sets default keyspace replication factor.")
+public class SetDefaultKeyspaceRF extends NodeTool.NodeToolCmd
+{
+    @Arguments(title = "default_rf", usage = "<value>", description = "Default replication factor", required = true)
+    private Integer defaultRF = null;
+
+    protected void execute(NodeProbe probe)
+    {
+        probe.setDefaultKeyspaceReplicationFactor(defaultRF);
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java
index feb945b..b7b7b61 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java
@@ -23,10 +23,10 @@
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "sethintedhandoffthrottlekb", description =  "Set hinted handoff throttle in kb per second, per delivery thread.")
+@Command(name = "sethintedhandoffthrottlekb", description =  "Set hinted handoff throttle in KiB per second, per delivery thread.")
 public class SetHintedHandoffThrottleInKB extends NodeToolCmd
 {
-    @Arguments(title = "throttle_in_kb", usage = "<value_in_kb_per_sec>", description = "Value in KB per second", required = true)
+    @Arguments(title = "throttle_in_kb", usage = "<value_in_kb_per_sec>", description = "Value in KiB per second", required = true)
     private Integer throttleInKB = null;
 
     @Override
@@ -34,4 +34,4 @@
     {
         probe.setHintedHandoffThrottleInKB(throttleInKB);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java b/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java
index c43abe1..1160876 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java
@@ -51,4 +51,4 @@
         Float owns = ownerships.get(endpoint);
         hostStats.add(new HostStat(token, endpoint, resolveIp, owns));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java
index 1397573..29465c6 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java
@@ -19,20 +19,36 @@
 
 import io.airlift.airline.Arguments;
 import io.airlift.airline.Command;
-
+import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "setinterdcstreamthroughput", description = "Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling")
+@Command(name = "setinterdcstreamthroughput", description = "Set the throughput cap for inter-datacenter streaming and entire SSTable inter-datacenter streaming in the system, or 0 to disable throttling")
 public class SetInterDCStreamThroughput extends NodeToolCmd
 {
     @SuppressWarnings("UnusedDeclaration")
-    @Arguments(title = "inter_dc_stream_throughput", usage = "<value_in_mb>", description = "Value in Mb, 0 to disable throttling", required = true)
+    @Arguments(title = "inter_dc_stream_throughput", usage = "<value_in_mb>", description = "Value in megabits, 0 to disable throttling", required = true)
     private int interDCStreamThroughput;
 
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-e", "--entire-sstable-throughput" }, description = "Set entire SSTable streaming throughput in MiB/s")
+    private boolean setEntireSSTableThroughput;
+
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-m", "--mib" }, description = "Set streaming throughput in MiB/s")
+    private boolean interDCStreamThroughputInMebibytes;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.setInterDCStreamThroughput(interDCStreamThroughput);
+        if (setEntireSSTableThroughput && interDCStreamThroughputInMebibytes)
+            throw new IllegalArgumentException("You cannot use -e and -m at the same time");
+
+        if (setEntireSSTableThroughput)
+            probe.setEntireSSTableInterDCStreamThroughput(interDCStreamThroughput);
+        else if (interDCStreamThroughputInMebibytes )
+            probe.setInterDCStreamThroughputMiB(interDCStreamThroughput);
+        else
+            probe.setInterDCStreamThroughput(interDCStreamThroughput);
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java
index 8d9ad90..66d6283 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java
@@ -100,4 +100,4 @@
         for (String classQualifier : classQualifiers)
             probe.setLoggingLevel(classQualifier, level);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java
index a4c49b8..045ccc1 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java
@@ -33,4 +33,4 @@
     {
         probe.setSnapshotLinksPerSecond(snapshotThrottle);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
index 469ec95..210a736 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
@@ -19,20 +19,36 @@
 
 import io.airlift.airline.Arguments;
 import io.airlift.airline.Command;
-
+import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
-@Command(name = "setstreamthroughput", description = "Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling")
+@Command(name = "setstreamthroughput", description = "Set throughput cap for streaming and entire SSTable streaming in the system, or 0 to disable throttling")
 public class SetStreamThroughput extends NodeToolCmd
 {
     @SuppressWarnings("UnusedDeclaration")
-    @Arguments(title = "stream_throughput", usage = "<value_in_mb>", description = "Value in Mb, 0 to disable throttling", required = true)
+    @Arguments(title = "stream_throughput", usage = "<value_in_mb>", description = "Value in megabits, 0 to disable throttling", required = true)
     private int streamThroughput;
 
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-e", "--entire-sstable-throughput" }, description = "Set entire SSTable streaming throughput in MiB/s")
+    private boolean setEntireSSTableThroughput;
+
+    @SuppressWarnings("UnusedDeclaration")
+    @Option(name = { "-m", "--mib" }, description = "Set streaming throughput in MiB/s")
+    private boolean streamThroughputInMebibytes;
+
     @Override
     public void execute(NodeProbe probe)
     {
-        probe.setStreamThroughput(streamThroughput);
+        if (setEntireSSTableThroughput && streamThroughputInMebibytes)
+            throw new IllegalArgumentException("You cannot use -e and -m at the same time");
+
+        if (setEntireSSTableThroughput)
+            probe.setEntireSSTableStreamThroughput(streamThroughput);
+        else if (streamThroughputInMebibytes )
+            probe.setStreamThroughputMiB(streamThroughput);
+        else
+            probe.setStreamThroughput(streamThroughput);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java
index e081980..ef9f498 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java
@@ -36,4 +36,4 @@
         checkArgument(traceProbability >= 0 && traceProbability <= 1, "Trace probability must be between 0 and 1");
         probe.setTraceProbability(traceProbability);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Sjk.java b/src/java/org/apache/cassandra/tools/nodetool/Sjk.java
index 3ad2c94..d7f7a04 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Sjk.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Sjk.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.tools.nodetool;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.lang.reflect.Field;
@@ -57,6 +56,7 @@
 import com.beust.jcommander.Parameterized;
 import io.airlift.airline.Arguments;
 import io.airlift.airline.Command;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.tools.Output;
 import org.gridkit.jvmtool.JmxConnectionInfo;
 import org.gridkit.jvmtool.cli.CommandLauncher;
@@ -464,15 +464,15 @@
             {
                 // loop through files in classpath
                 File dir = new File(packageURL.getFile());
-                String cp = dir.getCanonicalPath();
+                String cp = dir.canonicalPath();
                 File root = dir;
                 while (true)
                 {
-                    if (cp.equals(new File(root, path).getCanonicalPath()))
+                    if (cp.equals(new File(root, path).canonicalPath()))
                     {
                         break;
                     }
-                    root = root.getParentFile();
+                    root = root.parent();
                 }
                 listFiles(results, root, dir);
             }
@@ -480,10 +480,10 @@
 
         static void listFiles(List<String> names, File root, File dir)
         {
-            String rootPath = root.getAbsolutePath();
+            String rootPath = root.absolutePath();
             if (dir.exists() && dir.isDirectory())
             {
-                for (File file : dir.listFiles())
+                for (File file : dir.tryList())
                 {
                     if (file.isDirectory())
                     {
@@ -491,7 +491,7 @@
                     }
                     else
                     {
-                        String name = file.getAbsolutePath().substring(rootPath.length() + 1);
+                        String name = file.absolutePath().substring(rootPath.length() + 1);
                         name = name.replace('\\', '/');
                         names.add(name);
                     }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java
index ac485ea..52cc5df 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java
@@ -17,13 +17,6 @@
  */
 package org.apache.cassandra.tools.nodetool;
 
-import static com.google.common.collect.Iterables.toArray;
-import static org.apache.commons.lang3.StringUtils.join;
-import io.airlift.airline.Arguments;
-import io.airlift.airline.Command;
-import io.airlift.airline.Option;
-
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
@@ -31,9 +24,18 @@
 import java.util.List;
 import java.util.Map;
 
+import io.airlift.airline.Arguments;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import org.apache.cassandra.config.DurationSpec;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
 
+import static com.google.common.collect.Iterables.toArray;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.commons.lang3.StringUtils.join;
+
 @Command(name = "snapshot", description = "Take a snapshot of specified keyspaces or a snapshot of the specified table")
 public class Snapshot extends NodeToolCmd
 {
@@ -44,7 +46,7 @@
     private String table = null;
 
     @Option(title = "tag", name = {"-t", "--tag"}, description = "The name of the snapshot")
-    private String snapshotName = Long.toString(System.currentTimeMillis());
+    private String snapshotName = Long.toString(currentTimeMillis());
 
     @Option(title = "ktlist", name = { "-kt", "--kt-list", "-kc", "--kc.list" }, description = "The list of Keyspace.table to take snapshot.(you must not specify only keyspace)")
     private String ktList = null;
@@ -52,6 +54,9 @@
     @Option(title = "skip-flush", name = {"-sf", "--skip-flush"}, description = "Do not flush memtables before snapshotting (snapshot will not contain unflushed data)")
     private boolean skipFlush = false;
 
+    @Option(title = "ttl", name = {"--ttl"}, description = "Specify a TTL of created snapshot")
+    private String ttl = null;
+
     @Override
     public void execute(NodeProbe probe)
     {
@@ -64,10 +69,14 @@
 
             Map<String, String> options = new HashMap<String,String>();
             options.put("skipFlush", Boolean.toString(skipFlush));
+            if (null != ttl) {
+                DurationSpec.LongNanosecondsBound d = new DurationSpec.LongNanosecondsBound(ttl);
+                options.put("ttl", d.toString());
+            }
 
-            if (!snapshotName.isEmpty() && snapshotName.contains(File.pathSeparator))
+            if (!snapshotName.isEmpty() && snapshotName.contains(File.pathSeparator()))
             {
-                throw new IOException("Snapshot name cannot contain " + File.pathSeparatorChar);
+                throw new IOException("Snapshot name cannot contain " + File.pathSeparator());
             }
             // Create a separate path for kclist to avoid breaking of already existing scripts
             if (null != ktList && !ktList.isEmpty())
diff --git a/src/java/org/apache/cassandra/tools/nodetool/TpStats.java b/src/java/org/apache/cassandra/tools/nodetool/TpStats.java
index 5b20b13..1d16d8d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/TpStats.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/TpStats.java
@@ -22,8 +22,6 @@
 import io.airlift.airline.Option;
 import org.apache.cassandra.tools.NodeProbe;
 import org.apache.cassandra.tools.NodeTool.NodeToolCmd;
-import org.apache.cassandra.tools.nodetool.stats.TpStatsHolder;
-import org.apache.cassandra.tools.nodetool.stats.TpStatsPrinter;
 import org.apache.cassandra.tools.nodetool.stats.*;
 
 
diff --git a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java
index a3a0049..2a19d3a 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java
@@ -38,4 +38,4 @@
         else
             probe.truncateHints(endpoint);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java b/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java
index ba1b6f5..cc94d8b 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java
@@ -33,9 +33,16 @@
     @Arguments(usage = "[<keyspace> <tables>...]", description = "The keyspace followed by one or many tables")
     private List<String> args = new ArrayList<>();
 
-    @Option(title = "include_all", name = {"-a", "--include-all-sstables"}, description = "Use -a to include all sstables, even those already on the current version")
+    @Option(title = "include_all",
+            name = {"-a", "--include-all-sstables"},
+            description = "Use -a to include all sstables, even those already on the current version")
     private boolean includeAll = false;
 
+    @Option(title = "max_timestamp",
+            name = {"-t", "--max-timestamp"},
+            description = "Use -t to compact only SSTables that have local creation time _older_ than the given timestamp")
+    private long maxSSTableTimestamp = Long.MAX_VALUE;
+
     @Option(title = "jobs",
             name = {"-j", "--jobs"},
             description = "Number of sstables to upgrade simultanously, set to 0 to use all available compaction threads")
@@ -51,7 +58,7 @@
         {
             try
             {
-                probe.upgradeSSTables(probe.output().out, keyspace, !includeAll, jobs, tableNames);
+                probe.upgradeSSTables(probe.output().out, keyspace, !includeAll, maxSSTableTimestamp, jobs, tableNames);
             }
             catch (Exception e)
             {
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Verify.java b/src/java/org/apache/cassandra/tools/nodetool/Verify.java
index 872a124..0a610b3 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Verify.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Verify.java
@@ -44,6 +44,11 @@
             description = "Also check that all sstables are the latest version")
     private boolean checkVersion = false;
 
+    @Option(title = "override-disable",
+    name = {"-f", "--force"},
+    description = "Override disabling of verify tool - see CASSANDRA-9947 for caveats")
+    private boolean overrideDisable = false;
+
     @Option(title = "dfp",
             name = {"-d", "--dfp"},
             description = "Invoke the disk failure policy if a corrupt sstable is found")
@@ -68,6 +73,12 @@
     public void execute(NodeProbe probe)
     {
         PrintStream out = probe.output().out;
+        if (!overrideDisable)
+        {
+            out.println("verify is disabled unless a [-f|--force] override flag is provided. See CASSANDRA-9947 and CASSANDRA-17017 for details.");
+            System.exit(1);
+        }
+
         List<String> keyspaces = parseOptionalKeyspace(args, probe);
         String[] tableNames = parseOptionalTables(args);
 
diff --git a/src/java/org/apache/cassandra/tools/nodetool/formatter/TableBuilder.java b/src/java/org/apache/cassandra/tools/nodetool/formatter/TableBuilder.java
index 166ed3d..a95cb9e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/formatter/TableBuilder.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/formatter/TableBuilder.java
@@ -18,7 +18,11 @@
 
 package org.apache.cassandra.tools.nodetool.formatter;
 
+import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
+import java.io.UncheckedIOException;
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -72,6 +76,11 @@
         this.rows.addAll(base.rows);
     }
 
+    public void add(@Nonnull List<String> row)
+    {
+        add(row.toArray(new String[0]));
+    }
+
     public void add(@Nonnull String... row)
     {
         Objects.requireNonNull(row);
@@ -116,6 +125,22 @@
         }
     }
 
+    @Override
+    public String toString()
+    {
+        ByteArrayOutputStream os = new ByteArrayOutputStream();
+        try (PrintStream stream = new PrintStream(os, true, StandardCharsets.UTF_8.displayName()))
+        {
+            printTo(stream);
+            stream.flush();
+            return os.toString(StandardCharsets.UTF_8.displayName());
+        }
+        catch (UnsupportedEncodingException e)
+        {
+            throw new UncheckedIOException(e);
+        }
+    }
+
     /**
      * Share max offsets across multiple TableBuilders
      */
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/DataPathsHolder.java b/src/java/org/apache/cassandra/tools/nodetool/stats/DataPathsHolder.java
new file mode 100644
index 0000000..9bf7bdb
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/DataPathsHolder.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool.stats;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.base.Throwables.getStackTraceAsString;
+
+import org.apache.cassandra.db.ColumnFamilyStoreMBean;
+import org.apache.cassandra.tools.NodeProbe;
+
+public class DataPathsHolder implements StatsHolder
+{
+    public final Map<String, Object> pathsHash = new HashMap<>();
+
+    public DataPathsHolder(NodeProbe probe, List<String> tableNames)
+    {
+        Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> mbeansIterator = probe.getColumnFamilyStoreMBeanProxies();
+        while (mbeansIterator.hasNext())
+        {
+            Map.Entry<String, ColumnFamilyStoreMBean> entry = mbeansIterator.next();
+            String keyspaceName = entry.getKey();
+            String tableName = entry.getValue().getTableName();
+
+            if (!(tableNames.isEmpty() || 
+                  tableNames.contains(keyspaceName + '.' + tableName) || 
+                  tableNames.contains(keyspaceName) ))
+            {
+                continue;
+            }
+
+            Map<String, List<String>> ksPaths;
+            List<String> dataPaths;
+
+            try
+            {
+                dataPaths = entry.getValue().getDataPaths();
+            }
+            catch (Throwable e)
+            {
+                probe.output().err.println("Failed to get data paths for " + keyspaceName + '.' + tableName + ". Skipped.");
+                probe.output().err.println("error: " + e.getMessage());
+                probe.output().err.println("-- StackTrace --");
+                probe.output().err.println(getStackTraceAsString(e));
+                continue;
+            }
+
+            if (pathsHash.containsKey(keyspaceName))
+            {
+                ksPaths = (Map<String, List<String>>) pathsHash.get(keyspaceName);
+            }
+            else
+            {
+                ksPaths = new HashMap<>();
+                pathsHash.put(keyspaceName, ksPaths);
+            }
+            ksPaths.put(tableName, dataPaths);
+        }
+    }
+
+    @Override
+    public Map<String, Object> convert2Map()
+    {
+        return pathsHash;
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/DataPathsPrinter.java b/src/java/org/apache/cassandra/tools/nodetool/stats/DataPathsPrinter.java
new file mode 100644
index 0000000..ba2b29e
--- /dev/null
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/DataPathsPrinter.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool.stats;
+
+import java.io.PrintStream;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class DataPathsPrinter<T extends StatsHolder>
+{
+    public static StatsPrinter<DataPathsHolder> from(String format)
+    {
+        if ("json".equals(format))
+            return new StatsPrinter.JsonPrinter<>();
+        if ("yaml".equals(format))
+            return new StatsPrinter.YamlPrinter<>();
+
+        return new DefaultPrinter();
+    }
+
+    public static class DefaultPrinter implements StatsPrinter<DataPathsHolder>
+    {
+        @Override
+        public void print(DataPathsHolder data, PrintStream out)
+        {
+            Iterator<Map.Entry<String, Object>> iterator = data.pathsHash.entrySet().iterator();
+
+            while (iterator.hasNext())
+            {
+                Map.Entry<String, Object> entry = iterator.next();
+
+                out.println("Keyspace: " + entry.getKey());
+                Map<String, List<String>> ksPaths = (Map<String, List<String>>) entry.getValue();
+                for (Map.Entry<String, List<String>> table : ksPaths.entrySet())
+                {
+                    out.println("\tTable: " + table.getKey());
+                    out.println("\tPaths:");
+
+                    for (String path : table.getValue())
+                        out.println("\t\t" + path);
+
+                    out.println("");
+                }
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java
index c35e1fe..a345ce0 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java
@@ -26,4 +26,4 @@
 public interface StatsHolder
 {
     public Map<String, Object> convert2Map();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java
index dc15332..89d7705 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java
@@ -75,4 +75,4 @@
                ? totalWriteTime / writeCount / 1000
                : Double.NaN;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java
index 389efba..037227b 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java
@@ -66,4 +66,4 @@
             out.println(yaml.dump(data.convert2Map()));
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java
index 0859327..8b5090b 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java
@@ -20,6 +20,7 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 
 public class StatsTable
 {
@@ -69,6 +70,11 @@
     public long maximumTombstonesPerSliceLastFiveMinutes;
     public String droppedMutations;
     public List<String> sstablesInEachLevel = new ArrayList<>();
+    public List<String> sstableBytesInEachLevel = new ArrayList<>();
     public Boolean isInCorrectLocation = null; // null: option not active
     public double droppableTombstoneRatio;
+    public Map<String, String> topSizePartitions;
+    public Map<String, Long> topTombstonePartitions;
+    public String topSizePartitionsLastUpdate;
+    public String topTombstonePartitionsLastUpdate;
 }
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java b/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java
index 3cd2570..60132d1 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java
@@ -18,6 +18,8 @@
 
 package org.apache.cassandra.tools.nodetool.stats;
 
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
 import java.util.*;
 
 import com.google.common.collect.ArrayListMultimap;
@@ -120,6 +122,7 @@
         mpTable.put("sstable_count", table.sstableCount);
         mpTable.put("old_sstable_count", table.oldSSTableCount);
         mpTable.put("sstables_in_each_level", table.sstablesInEachLevel);
+        mpTable.put("sstable_bytes_in_each_level", table.sstableBytesInEachLevel);
         mpTable.put("space_used_live", table.spaceUsedLive);
         mpTable.put("space_used_total", table.spaceUsedTotal);
         mpTable.put("space_used_by_snapshots_total", table.spaceUsedBySnapshotsTotal);
@@ -165,6 +168,8 @@
         mpTable.put("dropped_mutations", table.droppedMutations);
         mpTable.put("droppable_tombstone_ratio",
                     String.format("%01.5f", table.droppableTombstoneRatio));
+        mpTable.put("top_size_partitions", table.topSizePartitions);
+        mpTable.put("top_tombstone_partitions", table.topTombstonePartitions);
         if (locationCheck)
             mpTable.put("sstables_in_correct_location", table.isInCorrectLocation);
         return mpTable;
@@ -239,6 +244,17 @@
                     }
                 }
 
+                long[] leveledSSTablesBytes = table.getPerLevelSizeBytes();
+                if (leveledSSTablesBytes != null)
+                {
+                    statsTable.isLeveledSstable = true;
+                    for (int level = 0; level < leveledSSTablesBytes.length; level++)
+                    {
+                        long size = leveledSSTablesBytes[level];
+                        statsTable.sstableBytesInEachLevel.add(format(size, humanReadable));
+                    }
+                }
+
                 if (locationCheck)
                     statsTable.isInCorrectLocation = !table.hasMisplacedSSTables();
 
@@ -350,6 +366,13 @@
                 statsTable.maximumTombstonesPerSliceLastFiveMinutes = histogram.getMax();
                 statsTable.droppedMutations = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "DroppedMutations"), humanReadable);
                 statsTable.droppableTombstoneRatio = probe.getDroppableTombstoneRatio(keyspaceName, tableName);
+                statsTable.topSizePartitions = format(table.getTopSizePartitions(), humanReadable);
+                if (table.getTopSizePartitionsLastUpdate() != null)
+                    statsTable.topSizePartitionsLastUpdate = millisToDateString(table.getTopSizePartitionsLastUpdate());
+                statsTable.topTombstonePartitions = table.getTopTombstonePartitions();
+                if (table.getTopTombstonePartitionsLastUpdate() != null)
+                    statsTable.topTombstonePartitionsLastUpdate = millisToDateString(table.getTopTombstonePartitionsLastUpdate());
+
                 statsKeyspace.tables.add(statsTable);
             }
             keyspaces.add(statsKeyspace);
@@ -361,6 +384,22 @@
         return humanReadable ? FileUtils.stringifyFileSize(bytes) : Long.toString(bytes);
     }
 
+    private Map<String, String> format(Map<String, Long> map, boolean humanReadable)
+    {
+        LinkedHashMap<String, String> retMap = new LinkedHashMap<>();
+        for (Map.Entry<String, Long> entry : map.entrySet())
+            retMap.put(entry.getKey(), format(entry.getValue(), humanReadable));
+        return retMap;
+    }
+
+    private String millisToDateString(long millis)
+    {
+        TimeZone tz = TimeZone.getTimeZone("UTC");
+        DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
+        df.setTimeZone(tz);
+        return df.format(new Date(millis));
+    }
+
     /**
      * Sort and filter this TableStatHolder's tables as specified by its sortKey and top attributes.
      */
@@ -447,7 +486,7 @@
             return filter.get(keyspace) != null || ignoreMode;
         }
 
-        public void verifyKeyspaces(List<String> keyspaces)
+        public void verifyKeyspaces(Collection<String> keyspaces)
         {
             for (String ks : verifier.keySet())
                 if (!keyspaces.contains(ks))
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java b/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java
index d8e4d4a..7f17c21 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java
@@ -20,6 +20,7 @@
 
 import java.io.PrintStream;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -79,8 +80,12 @@
             out.println(indent + "SSTable count: " + table.sstableCount);
             out.println(indent + "Old SSTable count: " + table.oldSSTableCount);
             if (table.isLeveledSstable)
+            {
                 out.println(indent + "SSTables in each level: [" + String.join(", ",
-                                                                          table.sstablesInEachLevel) + "]");
+                                                                               table.sstablesInEachLevel) + "]");
+                out.println(indent + "SSTable bytes in each level: [" + String.join(", ",
+                                                                                    table.sstableBytesInEachLevel) + "]");
+            }
 
             out.println(indent + "Space used (live): " + table.spaceUsedLive);
             out.println(indent + "Space used (total): " + table.spaceUsedTotal);
@@ -129,6 +134,23 @@
             out.printf(indent + "Droppable tombstone ratio: %01.5f%n", table.droppableTombstoneRatio);
             if (table.isInCorrectLocation != null)
                 out.println(indent + "SSTables in correct location: " + table.isInCorrectLocation);
+            if (table.topSizePartitions != null && !table.topSizePartitions.isEmpty())
+            {
+                out.printf(indent + "Top partitions by size (last update: %s):%n", table.topSizePartitionsLastUpdate);
+                int maxWidth = Math.max(table.topSizePartitions.keySet().stream().map(String::length).max(Integer::compareTo).get() + 3, 5);
+                out.printf(indent + "  %-" + maxWidth + "s %s%n", "Key", "Size");
+                for (Map.Entry<String, String> size : table.topSizePartitions.entrySet())
+                    out.printf(indent + "  %-" + maxWidth + "s %s%n", size.getKey(), size.getValue());
+            }
+
+            if (table.topTombstonePartitions != null && !table.topTombstonePartitions.isEmpty())
+            {
+                out.printf(indent + "Top partitions by tombstone count (last update: %s):%n", table.topTombstonePartitionsLastUpdate);
+                int maxWidth = Math.max(table.topTombstonePartitions.keySet().stream().map(String::length).max(Integer::compareTo).get() + 3, 5);
+                out.printf(indent + "  %-" + maxWidth + "s %s%n", "Key", "Count");
+                for (Map.Entry<String, Long> tombstonecnt : table.topTombstonePartitions.entrySet())
+                    out.printf(indent + "  %-" + maxWidth + "s %s%n", tombstonecnt.getKey(), tombstonecnt.getValue());
+            }
             out.println("");
         }
     }
diff --git a/src/java/org/apache/cassandra/tracing/TraceKeyspace.java b/src/java/org/apache/cassandra/tracing/TraceKeyspace.java
index c2e74d8..c6ca1f2 100644
--- a/src/java/org/apache/cassandra/tracing/TraceKeyspace.java
+++ b/src/java/org/apache/cassandra/tracing/TraceKeyspace.java
@@ -21,7 +21,9 @@
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.Row;
@@ -33,9 +35,9 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
 
 import static java.lang.String.format;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public final class TraceKeyspace
 {
@@ -43,6 +45,8 @@
     {
     }
 
+    private static final int DEFAULT_RF = CassandraRelevantProperties.SYSTEM_TRACES_DEFAULT_RF.getInt();
+
     /**
      * Generation is used as a timestamp for automatic table creation on startup.
      * If you make any changes to the tables below, make sure to increment the
@@ -102,7 +106,7 @@
 
     public static KeyspaceMetadata metadata()
     {
-        return KeyspaceMetadata.create(SchemaConstants.TRACE_KEYSPACE_NAME, KeyspaceParams.simple(2), Tables.of(Sessions, Events));
+        return KeyspaceMetadata.create(SchemaConstants.TRACE_KEYSPACE_NAME, KeyspaceParams.simple(Math.max(DEFAULT_RF, DatabaseDescriptor.getDefaultKeyspaceRF())), Tables.of(Sessions, Events));
     }
 
     static Mutation makeStartSessionMutation(ByteBuffer sessionId,
@@ -117,9 +121,9 @@
         Row.SimpleBuilder rb = builder.row();
         rb.ttl(ttl)
           .add("client", client)
-          .add("coordinator", FBUtilities.getBroadcastAddressAndPort().address);
+          .add("coordinator", FBUtilities.getBroadcastAddressAndPort().getAddress());
         if (!Gossiper.instance.hasMajorVersion3Nodes())
-            rb.add("coordinator_port", FBUtilities.getBroadcastAddressAndPort().port);
+            rb.add("coordinator_port", FBUtilities.getBroadcastAddressAndPort().getPort());
         rb.add("request", request)
           .add("started_at", new Date(startedAt))
           .add("command", command)
@@ -140,13 +144,13 @@
     static Mutation makeEventMutation(ByteBuffer sessionId, String message, int elapsed, String threadName, int ttl)
     {
         PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(Events, sessionId);
-        Row.SimpleBuilder rowBuilder = builder.row(UUIDGen.getTimeUUID())
+        Row.SimpleBuilder rowBuilder = builder.row(nextTimeUUID())
                                               .ttl(ttl);
 
         rowBuilder.add("activity", message)
-                  .add("source", FBUtilities.getBroadcastAddressAndPort().address);
+                  .add("source", FBUtilities.getBroadcastAddressAndPort().getAddress());
         if (!Gossiper.instance.hasMajorVersion3Nodes())
-            rowBuilder.add("source_port", FBUtilities.getBroadcastAddressAndPort().port);
+            rowBuilder.add("source_port", FBUtilities.getBroadcastAddressAndPort().getPort());
         rowBuilder.add("thread", threadName);
 
         if (elapsed >= 0)
diff --git a/src/java/org/apache/cassandra/tracing/TraceState.java b/src/java/org/apache/cassandra/tracing/TraceState.java
index 1e0813c..1713369 100644
--- a/src/java/org/apache/cassandra/tracing/TraceState.java
+++ b/src/java/org/apache/cassandra/tracing/TraceState.java
@@ -19,7 +19,6 @@
 
 import java.nio.ByteBuffer;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -28,7 +27,8 @@
 import org.slf4j.helpers.MessageFormatter;
 
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventNotifier;
 import org.apache.cassandra.utils.progress.ProgressListener;
@@ -39,7 +39,7 @@
  */
 public abstract class TraceState implements ProgressEventNotifier
 {
-    public final UUID sessionId;
+    public final TimeUUID sessionId;
     public final InetAddressAndPort coordinator;
     public final Stopwatch watch;
     public final ByteBuffer sessionIdBytes;
@@ -63,14 +63,14 @@
     // See CASSANDRA-7626 for more details.
     private final AtomicInteger references = new AtomicInteger(1);
 
-    protected TraceState(InetAddressAndPort coordinator, UUID sessionId, Tracing.TraceType traceType)
+    protected TraceState(InetAddressAndPort coordinator, TimeUUID sessionId, Tracing.TraceType traceType)
     {
         assert coordinator != null;
         assert sessionId != null;
 
         this.coordinator = coordinator;
         this.sessionId = sessionId;
-        sessionIdBytes = ByteBufferUtil.bytes(sessionId);
+        sessionIdBytes = sessionId.toBytes();
         this.traceType = traceType;
         this.ttl = traceType.getTTL();
         watch = Stopwatch.createStarted();
@@ -134,7 +134,7 @@
             }
             catch (InterruptedException e)
             {
-                throw new RuntimeException();
+                throw new UncheckedInterruptedException(e);
             }
         }
         if (status == Status.ACTIVE)
diff --git a/src/java/org/apache/cassandra/tracing/TraceStateImpl.java b/src/java/org/apache/cassandra/tracing/TraceStateImpl.java
index 48f193c..f8691ee 100644
--- a/src/java/org/apache/cassandra/tracing/TraceStateImpl.java
+++ b/src/java/org/apache/cassandra/tracing/TraceStateImpl.java
@@ -17,12 +17,9 @@
  */
 package org.apache.cassandra.tracing;
 
-import java.util.Collections;
+import java.util.Arrays;
 import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
@@ -31,13 +28,18 @@
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.concurrent.Stage;
-import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.exceptions.OverloadedException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.utils.JVMStabilityInspector;
-import org.apache.cassandra.utils.WrappedRunnable;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
+
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.db.ConsistencyLevel.ANY;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * ThreadLocal state for a tracing session. The presence of an instance of this class as a ThreadLocal denotes that an
@@ -53,7 +55,7 @@
 
     private final Set<Future<?>> pendingFutures = ConcurrentHashMap.newKeySet();
 
-    public TraceStateImpl(InetAddressAndPort coordinator, UUID sessionId, Tracing.TraceType traceType)
+    public TraceStateImpl(InetAddressAndPort coordinator, TimeUUID sessionId, Tracing.TraceType traceType)
     {
         super(coordinator, sessionId, traceType);
     }
@@ -82,8 +84,8 @@
                 logger.trace("Waiting for up to {} seconds for {} trace events to complete",
                              +WAIT_FOR_PENDING_EVENTS_TIMEOUT_SECS, pendingFutures.size());
 
-            CompletableFuture.allOf(pendingFutures.toArray(new CompletableFuture<?>[pendingFutures.size()]))
-                             .get(WAIT_FOR_PENDING_EVENTS_TIMEOUT_SECS, TimeUnit.SECONDS);
+            FutureCombiner.allOf(Arrays.asList(pendingFutures.toArray(new Future<?>[pendingFutures.size()])))
+                          .get(WAIT_FOR_PENDING_EVENTS_TIMEOUT_SECS, TimeUnit.SECONDS);
         }
         catch (TimeoutException ex)
         {
@@ -101,14 +103,7 @@
 
     void executeMutation(final Mutation mutation)
     {
-        CompletableFuture<Void> fut = CompletableFuture.runAsync(new WrappedRunnable()
-        {
-            protected void runMayThrow()
-            {
-                mutateWithCatch(mutation);
-            }
-        }, Stage.TRACING.executor());
-
+        Future<Void> fut = Stage.TRACING.executor().submit(() -> mutateWithCatch(mutation), null);
         boolean ret = pendingFutures.add(fut);
         if (!ret)
             logger.warn("Failed to insert pending future, tracing synchronization may not work");
@@ -118,7 +113,7 @@
     {
         try
         {
-            StorageProxy.mutate(Collections.singletonList(mutation), ConsistencyLevel.ANY, System.nanoTime());
+            StorageProxy.mutate(singletonList(mutation), ANY, nanoTime());
         }
         catch (OverloadedException e)
         {
diff --git a/src/java/org/apache/cassandra/tracing/Tracing.java b/src/java/org/apache/cassandra/tracing/Tracing.java
index 7d72224..5c820db 100644
--- a/src/java/org/apache/cassandra/tracing/Tracing.java
+++ b/src/java/org/apache/cassandra/tracing/Tracing.java
@@ -24,17 +24,14 @@
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import io.netty.util.concurrent.FastThreadLocal;
-import org.apache.cassandra.concurrent.ExecutorLocal;
+import org.apache.cassandra.concurrent.ExecutorLocals;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
@@ -43,14 +40,15 @@
 import org.apache.cassandra.net.ParamType;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * A trace session context. Able to track and store trace sessions. A session is usually a user initiated query, and may
  * have multiple local and remote events before it is completed.
  */
-public abstract class Tracing implements ExecutorLocal<TraceState>
+public abstract class Tracing extends ExecutorLocals.Impl
 {
     public static final IVersionedSerializer<TraceType> traceTypeSerializer = new IVersionedSerializer<TraceType>()
     {
@@ -105,9 +103,7 @@
 
     private final InetAddressAndPort localAddress = FBUtilities.getLocalAddressAndPort();
 
-    private final FastThreadLocal<TraceState> state = new FastThreadLocal<>();
-
-    protected final ConcurrentMap<UUID, TraceState> sessions = new ConcurrentHashMap<>();
+    protected final ConcurrentMap<TimeUUID, TraceState> sessions = new ConcurrentHashMap<>();
 
     public static final Tracing instance;
 
@@ -131,22 +127,22 @@
         instance = null != tracing ? tracing : new TracingImpl();
     }
 
-    public UUID getSessionId()
+    public TimeUUID getSessionId()
     {
         assert isTracing();
-        return state.get().sessionId;
+        return get().sessionId;
     }
 
     public TraceType getTraceType()
     {
         assert isTracing();
-        return state.get().traceType;
+        return get().traceType;
     }
 
     public int getTTL()
     {
         assert isTracing();
-        return state.get().ttl;
+        return get().ttl;
     }
 
     /**
@@ -157,29 +153,29 @@
         return instance.get() != null;
     }
 
-    public UUID newSession(Map<String,ByteBuffer> customPayload)
+    public TimeUUID newSession(Map<String,ByteBuffer> customPayload)
     {
         return newSession(
-                TimeUUIDType.instance.compose(ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes())),
+                nextTimeUUID(),
                 TraceType.QUERY,
                 customPayload);
     }
 
-    public UUID newSession(TraceType traceType)
+    public TimeUUID newSession(TraceType traceType)
     {
         return newSession(
-                TimeUUIDType.instance.compose(ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes())),
+                nextTimeUUID(),
                 traceType,
                 Collections.EMPTY_MAP);
     }
 
-    public UUID newSession(UUID sessionId, Map<String,ByteBuffer> customPayload)
+    public TimeUUID newSession(TimeUUID sessionId, Map<String,ByteBuffer> customPayload)
     {
         return newSession(sessionId, TraceType.QUERY, customPayload);
     }
 
     /** This method is intended to be overridden in tracing implementations that need access to the customPayload */
-    protected UUID newSession(UUID sessionId, TraceType traceType, Map<String,ByteBuffer> customPayload)
+    protected TimeUUID newSession(TimeUUID sessionId, TraceType traceType, Map<String,ByteBuffer> customPayload)
     {
         assert get() == null;
 
@@ -221,17 +217,19 @@
 
     public TraceState get()
     {
-        return state.get();
+        return ExecutorLocals.current().traceState;
     }
 
-    public TraceState get(UUID sessionId)
+    public TraceState get(TimeUUID sessionId)
     {
         return sessions.get(sessionId);
     }
 
-    public void set(final TraceState tls)
+    public void set(TraceState tls)
     {
-        state.set(tls);
+        @SuppressWarnings("resource")
+        ExecutorLocals current = ExecutorLocals.current();
+        ExecutorLocals.Impl.set(tls, current.clientWarnState);
     }
 
     public TraceState begin(final String request, final Map<String, String> parameters)
@@ -248,7 +246,7 @@
      */
     public TraceState initializeFromMessage(final Message.Header header)
     {
-        final UUID sessionId = header.traceSession();
+        final TimeUUID sessionId = header.traceSession();
         if (sessionId == null)
             return null;
 
@@ -278,7 +276,7 @@
     {
         try
         {
-            final UUID sessionId = message.traceSession();
+            final TimeUUID sessionId = message.traceSession();
             if (sessionId == null)
                 return;
 
@@ -289,7 +287,7 @@
             if (state == null) // session may have already finished; see CASSANDRA-5668
             {
                 TraceType traceType = message.traceType();
-                trace(ByteBuffer.wrap(UUIDGen.decompose(sessionId)), logMessage, traceType.getTTL());
+                trace(sessionId.toBytes(), logMessage, traceType.getTTL());
             }
             else
             {
@@ -313,7 +311,7 @@
         return addToMutable;
     }
 
-    protected abstract TraceState newTraceState(InetAddressAndPort coordinator, UUID sessionId, Tracing.TraceType traceType);
+    protected abstract TraceState newTraceState(InetAddressAndPort coordinator, TimeUUID sessionId, Tracing.TraceType traceType);
 
     // repair just gets a varargs method since it's so heavyweight anyway
     public static void traceRepair(String format, Object... args)
diff --git a/src/java/org/apache/cassandra/tracing/TracingImpl.java b/src/java/org/apache/cassandra/tracing/TracingImpl.java
index c786fa2..1885146 100644
--- a/src/java/org/apache/cassandra/tracing/TracingImpl.java
+++ b/src/java/org/apache/cassandra/tracing/TracingImpl.java
@@ -22,12 +22,14 @@
 import java.net.InetAddress;
 import java.nio.ByteBuffer;
 import java.util.Map;
-import java.util.UUID;
 
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.WrappedRunnable;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 
 /**
  * A trace session context. Able to track and store trace sessions. A session is usually a user initiated query, and may
@@ -55,7 +57,7 @@
         final TraceStateImpl state = getStateImpl();
         assert state != null;
 
-        final long startedAt = System.currentTimeMillis();
+        final long startedAt = currentTimeMillis();
         final ByteBuffer sessionId = state.sessionIdBytes;
         final String command = state.traceType.toString();
         final int ttl = state.ttl;
@@ -93,7 +95,7 @@
     }
 
     @Override
-    protected TraceState newTraceState(InetAddressAndPort coordinator, UUID sessionId, TraceType traceType)
+    protected TraceState newTraceState(InetAddressAndPort coordinator, TimeUUID sessionId, TraceType traceType)
     {
         return new TraceStateImpl(coordinator, sessionId, traceType);
     }
diff --git a/src/java/org/apache/cassandra/transport/CBUtil.java b/src/java/org/apache/cassandra/transport/CBUtil.java
index fd6a0ff..6cab638 100644
--- a/src/java/org/apache/cassandra/transport/CBUtil.java
+++ b/src/java/org/apache/cassandra/transport/CBUtil.java
@@ -44,6 +44,7 @@
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.UUIDGen;
 
 /**
@@ -301,11 +302,24 @@
         return UUIDGen.getUUID(buffer);
     }
 
+    public static TimeUUID readTimeUUID(ByteBuf cb)
+    {
+        long msb = cb.readLong();
+        long lsb = cb.readLong();
+        return TimeUUID.fromBytes(msb, lsb);
+    }
+
     public static void writeUUID(UUID uuid, ByteBuf cb)
     {
         cb.writeBytes(UUIDGen.decompose(uuid));
     }
 
+    public static void writeUUID(TimeUUID uuid, ByteBuf cb)
+    {
+        cb.writeLong(uuid.msb());
+        cb.writeLong(uuid.lsb());
+    }
+
     public static int sizeOfUUID(UUID uuid)
     {
         return UUID_SIZE;
diff --git a/src/java/org/apache/cassandra/transport/CQLMessageHandler.java b/src/java/org/apache/cassandra/transport/CQLMessageHandler.java
index a9dba8b..09e9996 100644
--- a/src/java/org/apache/cassandra/transport/CQLMessageHandler.java
+++ b/src/java/org/apache/cassandra/transport/CQLMessageHandler.java
@@ -23,6 +23,8 @@
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.primitives.Ints;
+import org.apache.cassandra.transport.ClientResourceLimits.Overload;
+import org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,10 +44,9 @@
 import org.apache.cassandra.net.ShareableBytes;
 import org.apache.cassandra.transport.Flusher.FlushItem.Framed;
 import org.apache.cassandra.transport.messages.ErrorMessage;
-import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NoSpamLogger;
 
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 /**
  * Implementation of {@link AbstractMessageHandler} for processing CQL messages which comprise a {@link Message} wrapped
@@ -78,6 +79,7 @@
     private static final NoSpamLogger noSpamLogger = NoSpamLogger.getLogger(logger, 1L, TimeUnit.SECONDS);
 
     public static final int LARGE_MESSAGE_THRESHOLD = FrameEncoder.Payload.MAX_SIZE - 1;
+    public static final TimeUnit RATE_LIMITER_DELAY_UNIT = TimeUnit.NANOSECONDS;
 
     private final Envelope.Decoder envelopeDecoder;
     private final Message.Decoder<M> messageDecoder;
@@ -86,13 +88,14 @@
     private final ErrorHandler errorHandler;
     private final boolean throwOnOverload;
     private final ProtocolVersion version;
+    private final NonBlockingRateLimiter requestRateLimiter;
 
     long channelPayloadBytesInFlight;
     private int consecutiveMessageErrors = 0;
 
     interface MessageConsumer<M extends Message>
     {
-        void accept(Channel channel, M message, Dispatcher.FlushItemConverter toFlushItem);
+        void accept(Channel channel, M message, Dispatcher.FlushItemConverter toFlushItem, Overload backpressure);
     }
 
     interface ErrorHandler
@@ -129,6 +132,7 @@
         this.errorHandler       = errorHandler;
         this.throwOnOverload    = throwOnOverload;
         this.version            = version;
+        this.requestRateLimiter = resources.requestRateLimiter();
     }
 
     @Override
@@ -139,6 +143,23 @@
         return super.process(frame);
     }
 
+    /**
+     * Checks limits on bytes in flight and the request rate limiter (if enabled), then takes one of three actions:
+     * 
+     * 1.) If no limits are breached, process the request.
+     * 2.) If a limit is breached, and the connection is configured to throw on overload, throw {@link OverloadedException}.
+     * 3.) If a limit is breached, and the connection is not configurd to throw, process the request, and return false
+     *     to let the {@link FrameDecoder} know it should stop processing frames.
+     *     
+     * If the connection is configured to throw {@link OverloadedException}, requests that breach the rate limit are
+     * not counted against that limit.
+     * 
+     * @return true if the {@link FrameDecoder} should continue to process incoming frames, and false if it should stop
+     *         processing them, effectively applying backpressure to clients
+     * 
+     * @throws ErrorMessage.WrappedException with an {@link OverloadedException} if overload occurs and the 
+     *         connection is configured to throw on overload
+     */
     protected boolean processOneContainedMessage(ShareableBytes bytes, Limit endpointReserve, Limit globalReserve)
     {
         ByteBuffer buf = bytes.get();
@@ -157,41 +178,108 @@
 
         // max CQL message size defaults to 256mb, so should be safe to downcast
         int messageSize = Ints.checkedCast(header.bodySizeInBytes);
+        
         if (throwOnOverload)
         {
             if (!acquireCapacity(header, endpointReserve, globalReserve))
             {
-                // discard the request and throw an exception
-                ClientMetrics.instance.markRequestDiscarded();
-                logger.trace("Discarded request of size: {}. InflightChannelRequestPayload: {}, " +
-                             "InflightEndpointRequestPayload: {}, InflightOverallRequestPayload: {}, Header: {}",
-                             messageSize,
-                             channelPayloadBytesInFlight,
-                             endpointReserve.using(),
-                             globalReserve.using(),
-                             header);
+                discardAndThrow(endpointReserve, globalReserve, buf, header, messageSize, Overload.BYTES_IN_FLIGHT);
+                return true;
+            }
 
-                handleError(new OverloadedException("Server is in overloaded state. " +
-                                                    "Cannot accept more requests at this point"), header);
-
-                // Don't stop processing incoming messages, rely on the client to apply
-                // backpressure when it receives OverloadedException
-                // but discard this message as we're responding with the overloaded error
-                incrementReceivedMessageMetrics(messageSize);
-                buf.position(buf.position() + Envelope.Header.LENGTH + messageSize);
+            if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled() && !requestRateLimiter.tryReserve())
+            {
+                // We've already allocated against the bytes-in-flight limits, so release those resources.
+                release(header);
+                discardAndThrow(endpointReserve, globalReserve, buf, header, messageSize, Overload.REQUESTS);
                 return true;
             }
         }
-        else if (!acquireCapacityAndQueueOnFailure(header, endpointReserve, globalReserve))
+        else
         {
-            // set backpressure on the channel, queuing the request until we have capacity
-            ClientMetrics.instance.pauseConnection();
-            return false;
+            Overload backpressure = Overload.NONE;
+
+            if (!acquireCapacityAndQueueOnFailure(header, endpointReserve, globalReserve))
+            {
+                if (processRequestAndUpdateMetrics(bytes, header, messageSize, Overload.BYTES_IN_FLIGHT))
+                {
+                    if (decoder.isActive())
+                        ClientMetrics.instance.pauseConnection();
+                }
+
+                backpressure = Overload.BYTES_IN_FLIGHT;
+            }
+            
+            if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled())
+            {
+                // Reserve a permit even if we've already triggered backpressure on bytes in flight.
+                long delay = requestRateLimiter.reserveAndGetDelay(RATE_LIMITER_DELAY_UNIT);
+                
+                if (backpressure == Overload.NONE && delay > 0)
+                {
+                    if (processRequestAndUpdateMetrics(bytes, header, messageSize, Overload.REQUESTS))
+                    {
+                        if (decoder.isActive())
+                            ClientMetrics.instance.pauseConnection();
+
+                        // Schedule a wakup here if we process successfully. The connection should be closing otherwise.  
+                        scheduleConnectionWakeupTask(delay, RATE_LIMITER_DELAY_UNIT);
+                    }
+                    
+                    backpressure = Overload.REQUESTS;
+                }
+            }
+            
+            // If we triggered backpressure, make sure the caller stops processing frames after the request completes.
+            if (backpressure != Overload.NONE)
+                return false;
         }
 
+        return processRequestAndUpdateMetrics(bytes, header, messageSize, Overload.NONE);
+    }
+
+    private boolean processRequestAndUpdateMetrics(ShareableBytes bytes, Envelope.Header header, int messageSize, Overload backpressure)
+    {
         channelPayloadBytesInFlight += messageSize;
         incrementReceivedMessageMetrics(messageSize);
-        return processRequest(composeRequest(header, bytes));
+        return processRequest(composeRequest(header, bytes), backpressure);
+    }
+
+    private void discardAndThrow(Limit endpointReserve, Limit globalReserve, 
+                                 ByteBuffer buf, Envelope.Header header, int messageSize,
+                                 Overload overload)
+    {
+        ClientMetrics.instance.markRequestDiscarded();
+        logOverload(endpointReserve, globalReserve, header, messageSize);
+
+        OverloadedException exception = buildOverloadedException(endpointReserve, globalReserve, overload);
+        handleError(exception, header);
+
+        // Don't stop processing incoming messages, as we rely on the client to apply
+        // backpressure when it receives OverloadedException, but discard this message 
+        // as we're responding with the overloaded error.
+        incrementReceivedMessageMetrics(messageSize);
+        buf.position(buf.position() + Envelope.Header.LENGTH + messageSize);
+    }
+
+    private OverloadedException buildOverloadedException(Limit endpointReserve, Limit globalReserve, Overload overload) {
+        return overload == Overload.REQUESTS
+                ? new OverloadedException(String.format("Request breached global limit of %d requests/second. Server is " +
+                                                        "currently in an overloaded state and cannot accept more requests.", 
+                                                        requestRateLimiter.getRate()))
+                : new OverloadedException(String.format("Request breached limit on bytes in flight. (Endpoint: %d/%d bytes, Global: %d/%d bytes.) " +
+                                                        "Server is currently in an overloaded state and cannot accept more requests.",
+                                                        endpointReserve.using(), endpointReserve.limit(), globalReserve.using(), globalReserve.limit()));
+    }
+
+    private void logOverload(Limit endpointReserve, Limit globalReserve, Envelope.Header header, int messageSize)
+    {
+        logger.trace("Discarded request of size {} with {} bytes in flight on channel. " + 
+                     "Using {}/{} bytes of endpoint limit and {}/{} bytes of global limit. " + 
+                     "Global rate limiter: {} Header: {}",
+                     messageSize, channelPayloadBytesInFlight,
+                     endpointReserve.using(), endpointReserve.limit(), globalReserve.using(), globalReserve.limit(),
+                     requestRateLimiter, header);
     }
 
     private boolean handleProtocolException(ProtocolException exception,
@@ -251,11 +339,17 @@
 
     protected boolean processRequest(Envelope request)
     {
+        return processRequest(request, Overload.NONE);
+    }
+    
+    protected boolean processRequest(Envelope request, Overload backpressure)
+    {
         M message = null;
         try
         {
             message = messageDecoder.decode(channel, request);
-            dispatcher.accept(channel, message, this::toFlushItem);
+            dispatcher.accept(channel, message, this::toFlushItem, backpressure);
+            
             // sucessfully delivered a CQL message to the execution
             // stage, so reset the counter of consecutive errors
             consecutiveMessageErrors = 0;
@@ -293,8 +387,6 @@
      * to the client.
      * This also releases the capacity acquired for processing as
      * indicated by supplied header.
-     * @param t
-     * @param header
      */
     private void handleErrorAndRelease(Throwable t, Envelope.Header header)
     {
@@ -311,8 +403,6 @@
      * when an error occurs without any capacity being acquired.
      * Typically, this would be the result of an acquisition failure
      * if the THROW_ON_OVERLOAD option has been specified by the client.
-     * @param t
-     * @param header
      */
     private void handleError(Throwable t, Envelope.Header header)
     {
@@ -328,8 +418,6 @@
      * when an error occurs without any capacity being acquired.
      * Typically, this would be the result of an acquisition failure
      * if the THROW_ON_OVERLOAD option has been specified by the client.
-     * @param t
-     * @param streamId
      */
     private void handleError(Throwable t, int streamId)
     {
@@ -342,8 +430,6 @@
      * payload fails. This does not attempt to release any resources, as these errors
      * should only occur before any capacity acquisition is attempted (e.g. on receipt
      * of a corrupt frame, or failure to extract a CQL message from the envelope).
-     *
-     * @param t
      */
     private void handleError(Throwable t)
     {
@@ -405,8 +491,9 @@
             // max CQL message size defaults to 256mb, so should be safe to downcast
             int messageSize = Ints.checkedCast(header.bodySizeInBytes);
             receivedBytes += buf.remaining();
-
+            
             LargeMessage largeMessage = new LargeMessage(header);
+
             if (!acquireCapacity(header, endpointReserve, globalReserve))
             {
                 // In the case of large messages, never stop processing incoming frames
@@ -424,21 +511,49 @@
                 // concurrently.
                 if (throwOnOverload)
                 {
-                    // discard the request and throw an exception
+                    // Mark as overloaded so that discard the message after consuming any subsequent frames.
                     ClientMetrics.instance.markRequestDiscarded();
-                    logger.trace("Discarded request of size: {}. InflightChannelRequestPayload: {}, " +
-                                 "InflightEndpointRequestPayload: {}, InflightOverallRequestPayload: {}, Header: {}",
-                                 messageSize,
-                                 channelPayloadBytesInFlight,
-                                 endpointReserve.using(),
-                                 globalReserve.using(),
-                                 header);
-
-                    // mark as overloaded so that discard the message
-                    // after consuming any subsequent frames
-                    largeMessage.markOverloaded();
+                    logOverload(endpointReserve, globalReserve, header, messageSize);
+                    largeMessage.markOverloaded(Overload.BYTES_IN_FLIGHT);
                 }
             }
+            else if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled())
+            {
+                if (throwOnOverload)
+                {
+                    if (!requestRateLimiter.tryReserve())
+                    {
+                        ClientMetrics.instance.markRequestDiscarded();
+                        logOverload(endpointReserve, globalReserve, header, messageSize);
+                        
+                        // Mark as overloaded so that we discard the message after consuming any subsequent frames.
+                        // (i.e. Request resources we may already have acquired above will be released.)
+                        largeMessage.markOverloaded(Overload.REQUESTS);
+                        
+                        this.largeMessage = largeMessage;
+                        largeMessage.supply(frame);
+                        return true;
+                    }
+                }
+                else
+                {
+                    long delay = requestRateLimiter.reserveAndGetDelay(RATE_LIMITER_DELAY_UNIT);
+
+                    if (delay > 0)
+                    {
+                        this.largeMessage = largeMessage;
+                        largeMessage.markBackpressure(Overload.REQUESTS);
+                        largeMessage.supply(frame);
+
+                        if (decoder.isActive())
+                            ClientMetrics.instance.pauseConnection();
+
+                        scheduleConnectionWakeupTask(delay, RATE_LIMITER_DELAY_UNIT);
+                        return false;
+                    }
+                }
+            }
+            
             this.largeMessage = largeMessage;
             largeMessage.supply(frame);
             return true;
@@ -454,6 +569,31 @@
         return channel.id().asShortText();
     }
 
+    private void scheduleConnectionWakeupTask(long waitLength, TimeUnit unit)
+    {
+        channel.eventLoop().schedule(() ->
+                                     {
+                                         try
+                                         {
+                                             // We might have already reactivated via another wake task.
+                                             if (!decoder.isActive())
+                                             {
+                                                 decoder.reactivate();
+
+                                                 // Only update the relevant metric if we've actually activated.
+                                                 if (decoder.isActive())
+                                                     ClientMetrics.instance.unpauseConnection();
+                                             }
+                                         }
+                                         catch (Throwable t)
+                                         {
+                                             fatalExceptionCaught(t);
+                                         }
+                                     },
+                                     waitLength,
+                                     unit);
+    }
+
     @SuppressWarnings("BooleanMethodIsAlwaysInverted")
     private boolean acquireCapacityAndQueueOnFailure(Envelope.Header header, Limit endpointReserve, Limit globalReserve)
     {
@@ -515,7 +655,8 @@
     {
         private static final long EXPIRES_AT = Long.MAX_VALUE;
 
-        private boolean overloaded = false;
+        private Overload overload = Overload.NONE;
+        private Overload backpressure = Overload.NONE;
 
         private LargeMessage(Envelope.Header header)
         {
@@ -541,19 +682,22 @@
          * so we must ensure that subsequent frames are consumed from the channel. At that
          * point an error response is returned to the client, rather than processing the message.
          */
-        private void markOverloaded()
+        private void markOverloaded(Overload overload)
         {
-            overloaded = true;
+            this.overload = overload;
+        }
+
+        private void markBackpressure(Overload backpressure)
+        {
+            this.backpressure = backpressure;
         }
 
         protected void onComplete()
         {
-            if (overloaded)
-                handleErrorAndRelease(new OverloadedException("Server is in overloaded state. " +
-                                                              "Cannot accept more requests at this point"), header);
+            if (overload != Overload.NONE)
+                handleErrorAndRelease(buildOverloadedException(endpointReserveCapacity, globalReserveCapacity, overload), header);
             else if (!isCorrupt)
-                processRequest(assembleFrame());
-
+                processRequest(assembleFrame(), backpressure);
         }
 
         protected void abort()
diff --git a/src/java/org/apache/cassandra/transport/ClientResourceLimits.java b/src/java/org/apache/cassandra/transport/ClientResourceLimits.java
index 17a6e59..e38cfdb 100644
--- a/src/java/org/apache/cassandra/transport/ClientResourceLimits.java
+++ b/src/java/org/apache/cassandra/transport/ClientResourceLimits.java
@@ -24,6 +24,7 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,6 +43,10 @@
     private static final AbstractMessageHandler.WaitQueue GLOBAL_QUEUE = AbstractMessageHandler.WaitQueue.global(GLOBAL_LIMIT);
     private static final ConcurrentMap<InetAddress, Allocator> PER_ENDPOINT_ALLOCATORS = new ConcurrentHashMap<>();
 
+    public static final NonBlockingRateLimiter GLOBAL_REQUEST_LIMITER = new NonBlockingRateLimiter(getNativeTransportMaxRequestsPerSecond());
+
+    public enum Overload { NONE, REQUESTS, BYTES_IN_FLIGHT }
+    
     public static Allocator getAllocatorForEndpoint(InetAddress endpoint)
     {
         while (true)
@@ -56,12 +61,12 @@
 
     public static long getGlobalLimit()
     {
-        return DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytes();
+        return DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightInBytes();
     }
 
     public static void setGlobalLimit(long newLimit)
     {
-        DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytes(newLimit);
+        DatabaseDescriptor.setNativeTransportConcurrentRequestDataInFlightInBytes(newLimit);
         long existingLimit = GLOBAL_LIMIT.setLimit(getGlobalLimit());
         logger.info("Changed native_max_transport_requests_in_bytes from {} to {}", existingLimit, newLimit);
     }
@@ -73,13 +78,13 @@
 
     public static long getEndpointLimit()
     {
-        return DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp();
+        return DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes();
     }
 
     public static void setEndpointLimit(long newLimit)
     {
-        long existingLimit = DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp();
-        DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytesPerIp(newLimit); // ensure new instances get the new limit
+        long existingLimit = DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes();
+        DatabaseDescriptor.setNativeTransportMaxRequestDataInFlightPerIpInBytes(newLimit); // ensure new instances get the new limit
         for (Allocator allocator : PER_ENDPOINT_ALLOCATORS.values())
             existingLimit = allocator.endpointAndGlobal.endpoint().setLimit(newLimit);
         logger.info("Changed native_max_transport_requests_in_bytes_per_ip from {} to {}", existingLimit, newLimit);
@@ -95,6 +100,19 @@
         return histogram.getSnapshot();
     }
 
+    public static int getNativeTransportMaxRequestsPerSecond()
+    {
+        return DatabaseDescriptor.getNativeTransportMaxRequestsPerSecond();
+    }
+
+    public static void setNativeTransportMaxRequestsPerSecond(int newPerSecond)
+    {
+        int existingPerSecond = getNativeTransportMaxRequestsPerSecond();
+        DatabaseDescriptor.setNativeTransportMaxRequestsPerSecond(newPerSecond);
+        GLOBAL_REQUEST_LIMITER.setRate(newPerSecond);
+        logger.info("Changed native_transport_max_requests_per_second from {} to {}", existingPerSecond, newPerSecond);
+    }
+
     /**
      * This will recompute the ip usage histo on each query of the snapshot when requested instead of trying to keep
      * a histogram up to date with each request
@@ -166,8 +184,8 @@
          *
          * @param amount number permits to allocate
          * @return outcome SUCCESS if the allocation was successful. In the case of failure,
-         * either INSUFFICIENT_GLOBAL or INSUFFICIENT_ENPOINT to indicate which reserve rejected
-         * the allocation request.
+         * either INSUFFICIENT_GLOBAL or INSUFFICIENT_ENDPOINT to indicate which 
+         * reserve rejected the allocation request.
          */
         ResourceLimits.Outcome tryAllocate(long amount)
         {
@@ -211,19 +229,18 @@
            return endpointAndGlobal.global().using();
         }
 
+        @Override
         public String toString()
         {
-            return String.format("InflightEndpointRequestPayload: %d/%d, InflightOverallRequestPayload: %d/%d",
-                                 endpointAndGlobal.endpoint().using(),
-                                 endpointAndGlobal.endpoint().limit(),
-                                 endpointAndGlobal.global().using(),
-                                 endpointAndGlobal.global().limit());
+            return String.format("Using %d/%d bytes of endpoint limit and %d/%d bytes of global limit.",
+                                 endpointAndGlobal.endpoint().using(), endpointAndGlobal.endpoint().limit(),
+                                 endpointAndGlobal.global().using(), endpointAndGlobal.global().limit());
         }
     }
 
     /**
      * Used in protocol V5 and later by the AbstractMessageHandler/CQLMessageHandler hierarchy.
-     * This hides the allocate/tryAllocate/release methods from EndpointResourceLimits and exposes
+     * This hides the allocate/tryAllocate/release methods from {@link ClientResourceLimits} and exposes
      * the endpoint and global limits, along with their corresponding
      * {@link org.apache.cassandra.net.AbstractMessageHandler.WaitQueue} directly.
      * Provided as an interface and single implementation for testing (see CQLConnectionTest)
@@ -234,9 +251,10 @@
         AbstractMessageHandler.WaitQueue globalWaitQueue();
         ResourceLimits.Limit endpointLimit();
         AbstractMessageHandler.WaitQueue endpointWaitQueue();
+        NonBlockingRateLimiter requestRateLimiter();
         void release();
 
-        static class Default implements ResourceProvider
+        class Default implements ResourceProvider
         {
             private final Allocator limits;
 
@@ -265,6 +283,11 @@
                 return limits.waitQueue;
             }
 
+            public NonBlockingRateLimiter requestRateLimiter()
+            {
+                return GLOBAL_REQUEST_LIMITER;
+            }
+            
             public void release()
             {
                 limits.release();
diff --git a/src/java/org/apache/cassandra/transport/ConnectedClient.java b/src/java/org/apache/cassandra/transport/ConnectedClient.java
index ca100f2..a4af32f 100644
--- a/src/java/org/apache/cassandra/transport/ConnectedClient.java
+++ b/src/java/org/apache/cassandra/transport/ConnectedClient.java
@@ -18,9 +18,11 @@
 package org.apache.cassandra.transport;
 
 import java.net.InetSocketAddress;
+import java.util.Collections;
 import java.util.Map;
 import java.util.Optional;
 
+import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableMap;
 
 import io.netty.handler.ssl.SslHandler;
@@ -32,6 +34,7 @@
     public static final String ADDRESS = "address";
     public static final String USER = "user";
     public static final String VERSION = "version";
+    public static final String CLIENT_OPTIONS = "clientOptions";
     public static final String DRIVER_NAME = "driverName";
     public static final String DRIVER_VERSION = "driverVersion";
     public static final String REQUESTS = "requests";
@@ -83,6 +86,11 @@
         return state().getDriverVersion();
     }
 
+    public Optional<Map<String,String>> clientOptions()
+    {
+        return state().getClientOptions();
+    }
+
     public long requestCount()
     {
         return connection.requests.getCount();
@@ -132,6 +140,9 @@
                            .put(ADDRESS, remoteAddress().toString())
                            .put(USER, username().orElse(UNDEFINED))
                            .put(VERSION, String.valueOf(protocolVersion()))
+                           .put(CLIENT_OPTIONS, Joiner.on(", ")
+                                                      .withKeyValueSeparator("=")
+                                                      .join(clientOptions().orElse(Collections.emptyMap())))
                            .put(DRIVER_NAME, driverName().orElse(UNDEFINED))
                            .put(DRIVER_VERSION, driverVersion().orElse(UNDEFINED))
                            .put(REQUESTS, String.valueOf(requestCount()))
diff --git a/src/java/org/apache/cassandra/transport/Dispatcher.java b/src/java/org/apache/cassandra/transport/Dispatcher.java
index 05b55e8..da79c3d 100644
--- a/src/java/org/apache/cassandra/transport/Dispatcher.java
+++ b/src/java/org/apache/cassandra/transport/Dispatcher.java
@@ -20,29 +20,41 @@
 
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 
+import com.google.common.base.Predicate;
+import org.apache.cassandra.metrics.ClientMetrics;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import io.netty.channel.Channel;
 import io.netty.channel.EventLoop;
 import io.netty.util.AttributeKey;
-import org.apache.cassandra.concurrent.LocalAwareExecutorService;
+import org.apache.cassandra.concurrent.LocalAwareExecutorPlus;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.net.FrameEncoder;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.service.reads.thresholds.CoordinatorWarnings;
+import org.apache.cassandra.transport.ClientResourceLimits.Overload;
 import org.apache.cassandra.transport.Flusher.FlushItem;
 import org.apache.cassandra.transport.messages.ErrorMessage;
 import org.apache.cassandra.transport.messages.EventMessage;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.NoSpamLogger;
 
 import static org.apache.cassandra.concurrent.SharedExecutorPool.SHARED;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class Dispatcher
 {
-    private static final LocalAwareExecutorService requestExecutor = SHARED.newExecutor(DatabaseDescriptor.getNativeTransportMaxThreads(),
-                                                                                        DatabaseDescriptor::setNativeTransportMaxThreads,
-                                                                                        "transport",
-                                                                                        "Native-Transport-Requests");
+    private static final Logger logger = LoggerFactory.getLogger(Dispatcher.class);
+    
+    private static final LocalAwareExecutorPlus requestExecutor = SHARED.newExecutor(DatabaseDescriptor.getNativeTransportMaxThreads(),
+                                                                                     DatabaseDescriptor::setNativeTransportMaxThreads,
+                                                                                     "transport",
+                                                                                     "Native-Transport-Requests");
 
     private static final ConcurrentMap<EventLoop, Flusher> flusherLookup = new ConcurrentHashMap<>();
     private final boolean useLegacyFlusher;
@@ -65,25 +77,53 @@
         this.useLegacyFlusher = useLegacyFlusher;
     }
 
-    public void dispatch(Channel channel, Message.Request request, FlushItemConverter forFlusher)
+    public void dispatch(Channel channel, Message.Request request, FlushItemConverter forFlusher, Overload backpressure)
     {
-        requestExecutor.submit(() -> processRequest(channel, request, forFlusher));
+        requestExecutor.submit(() -> processRequest(channel, request, forFlusher, backpressure));
+        ClientMetrics.instance.markRequestDispatched();
     }
 
     /**
-     * Note: this method may be executed on the netty event loop, during initial protocol negotiation
+     * Note: this method may be executed on the netty event loop, during initial protocol negotiation; the caller is
+     * responsible for cleaning up any global or thread-local state. (ex. tracing, client warnings, etc.).
      */
-    static Message.Response processRequest(ServerConnection connection, Message.Request request)
+    private static Message.Response processRequest(ServerConnection connection, Message.Request request, Overload backpressure)
     {
-        long queryStartNanoTime = System.nanoTime();
+        long queryStartNanoTime = nanoTime();
         if (connection.getVersion().isGreaterOrEqualTo(ProtocolVersion.V4))
             ClientWarn.instance.captureWarnings();
 
+        // even if ClientWarn is disabled, still setup CoordinatorTrackWarnings, as this will populate metrics and
+        // emit logs on the server; the warnings will just be ignored and not sent to the client
+        if (request.isTrackable())
+            CoordinatorWarnings.init();
+
+        if (backpressure == Overload.REQUESTS)
+        {
+            String message = String.format("Request breached global limit of %d requests/second and triggered backpressure.",
+                                           ClientResourceLimits.getNativeTransportMaxRequestsPerSecond());
+
+            NoSpamLogger.log(logger, NoSpamLogger.Level.INFO, 1, TimeUnit.MINUTES, message);
+            ClientWarn.instance.warn(message);
+        }
+        else if (backpressure == Overload.BYTES_IN_FLIGHT)
+        {
+            String message = String.format("Request breached limit(s) on bytes in flight (Endpoint: %d, Global: %d) and triggered backpressure.",
+                                           ClientResourceLimits.getEndpointLimit(), ClientResourceLimits.getGlobalLimit());
+
+            NoSpamLogger.log(logger, NoSpamLogger.Level.INFO, 1, TimeUnit.MINUTES, message);
+            ClientWarn.instance.warn(message);
+        }
+
         QueryState qstate = connection.validateNewMessage(request.type, connection.getVersion());
 
         Message.logger.trace("Received: {}, v={}", request, connection.getVersion());
         connection.requests.inc();
         Message.Response response = request.execute(qstate, queryStartNanoTime);
+
+        if (request.isTrackable())
+            CoordinatorWarnings.done();
+
         response.setStreamId(request.getStreamId());
         response.setWarnings(ClientWarn.instance.getWarnings());
         response.attach(connection);
@@ -92,33 +132,42 @@
     }
 
     /**
-     * Note: this method is not expected to execute on the netty event loop.
+     * Note: this method may be executed on the netty event loop.
      */
-    void processRequest(Channel channel, Message.Request request, FlushItemConverter forFlusher)
+    static Message.Response processRequest(Channel channel, Message.Request request, Overload backpressure)
     {
-        final Message.Response response;
-        final ServerConnection connection;
-        FlushItem<?> toFlush;
         try
         {
-            assert request.connection() instanceof ServerConnection;
-            connection = (ServerConnection) request.connection();
-            response = processRequest(connection, request);
-            toFlush = forFlusher.toFlushItem(channel, request, response);
-            Message.logger.trace("Responding: {}, v={}", response, connection.getVersion());
+            return processRequest((ServerConnection) request.connection(), request, backpressure);
         }
         catch (Throwable t)
         {
             JVMStabilityInspector.inspectThrowable(t);
-            ExceptionHandlers.UnexpectedChannelExceptionHandler handler = new ExceptionHandlers.UnexpectedChannelExceptionHandler(channel, true);
+
+            if (request.isTrackable())
+                CoordinatorWarnings.done();
+
+            Predicate<Throwable> handler = ExceptionHandlers.getUnexpectedExceptionHandler(channel, true);
             ErrorMessage error = ErrorMessage.fromException(t, handler);
             error.setStreamId(request.getStreamId());
-            toFlush = forFlusher.toFlushItem(channel, request, error);
+            error.setWarnings(ClientWarn.instance.getWarnings());
+            return error;
         }
         finally
         {
+            CoordinatorWarnings.reset();
             ClientWarn.instance.resetWarnings();
         }
+    }
+
+    /**
+     * Note: this method is not expected to execute on the netty event loop.
+     */
+    void processRequest(Channel channel, Message.Request request, FlushItemConverter forFlusher, Overload backpressure)
+    {
+        Message.Response response = processRequest(channel, request, backpressure);
+        FlushItem<?> toFlush = forFlusher.toFlushItem(channel, request, response);
+        Message.logger.trace("Responding: {}, v={}", response, request.connection().getVersion());
         flush(toFlush);
     }
 
@@ -152,7 +201,7 @@
      * for delivering events to registered clients is dependent on protocol version and the configuration
      * of the pipeline. For v5 and newer connections, the event message is encoded into an Envelope,
      * wrapped in a FlushItem and then delivered via the pipeline's flusher, in a similar way to
-     * a Response returned from {@link #processRequest(Channel, Message.Request, FlushItemConverter)}.
+     * a Response returned from {@link #processRequest(Channel, Message.Request, FlushItemConverter, Overload)}.
      * It's worth noting that events are not generally fired as a direct response to a client request,
      * so this flush item has a null request attribute. The dispatcher itself is created when the
      * pipeline is first configured during protocol negotiation and is attached to the channel for
diff --git a/src/java/org/apache/cassandra/transport/Envelope.java b/src/java/org/apache/cassandra/transport/Envelope.java
index 6b15a2a..99c6e13 100644
--- a/src/java/org/apache/cassandra/transport/Envelope.java
+++ b/src/java/org/apache/cassandra/transport/Envelope.java
@@ -353,7 +353,19 @@
             int firstByte = buffer.getByte(idx++);
             Message.Direction direction = Message.Direction.extractFromVersion(firstByte);
             int versionNum = firstByte & PROTOCOL_VERSION_MASK;
-            ProtocolVersion version = ProtocolVersion.decode(versionNum, DatabaseDescriptor.getNativeTransportAllowOlderProtocols());
+
+            ProtocolVersion version;
+            
+            try
+            {
+                version = ProtocolVersion.decode(versionNum, DatabaseDescriptor.getNativeTransportAllowOlderProtocols());
+            }
+            catch (ProtocolException e)
+            {
+                // Skip the remaining useless bytes. Otherwise the channel closing logic may try to decode again. 
+                buffer.skipBytes(readableBytes);
+                throw e;
+            }
 
             // Wait until we have the complete header
             if (readableBytes < Header.LENGTH)
diff --git a/src/java/org/apache/cassandra/transport/Event.java b/src/java/org/apache/cassandra/transport/Event.java
index c62a73f..5e8e201 100644
--- a/src/java/org/apache/cassandra/transport/Event.java
+++ b/src/java/org/apache/cassandra/transport/Event.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.transport;
 
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.List;
@@ -116,17 +115,17 @@
 
         public static TopologyChange newNode(InetAddressAndPort address)
         {
-            return new TopologyChange(Change.NEW_NODE, new InetSocketAddress(address.address, address.port));
+            return new TopologyChange(Change.NEW_NODE, new InetSocketAddress(address.getAddress(), address.getPort()));
         }
 
         public static TopologyChange removedNode(InetAddressAndPort address)
         {
-            return new TopologyChange(Change.REMOVED_NODE, new InetSocketAddress(address.address, address.port));
+            return new TopologyChange(Change.REMOVED_NODE, new InetSocketAddress(address.getAddress(), address.getPort()));
         }
 
         public static TopologyChange movedNode(InetAddressAndPort address)
         {
-            return new TopologyChange(Change.MOVED_NODE, new InetSocketAddress(address.address, address.port));
+            return new TopologyChange(Change.MOVED_NODE, new InetSocketAddress(address.getAddress(), address.getPort()));
         }
 
         // Assumes the type has already been deserialized
@@ -187,12 +186,12 @@
 
         public static StatusChange nodeUp(InetAddressAndPort address)
         {
-            return new StatusChange(Status.UP, new InetSocketAddress(address.address, address.port));
+            return new StatusChange(Status.UP, new InetSocketAddress(address.getAddress(), address.getPort()));
         }
 
         public static StatusChange nodeDown(InetAddressAndPort address)
         {
-            return new StatusChange(Status.DOWN, new InetSocketAddress(address.address, address.port));
+            return new StatusChange(Status.DOWN, new InetSocketAddress(address.getAddress(), address.getPort()));
         }
 
         // Assumes the type has already been deserialized
diff --git a/src/java/org/apache/cassandra/transport/ExceptionHandlers.java b/src/java/org/apache/cassandra/transport/ExceptionHandlers.java
index 1c5c976..0039bc0 100644
--- a/src/java/org/apache/cassandra/transport/ExceptionHandlers.java
+++ b/src/java/org/apache/cassandra/transport/ExceptionHandlers.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.transport;
 
 import java.io.IOException;
+import java.net.SocketAddress;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
@@ -33,6 +34,8 @@
 import io.netty.channel.ChannelInboundHandlerAdapter;
 import io.netty.channel.ChannelPromise;
 import io.netty.channel.unix.Errors;
+import org.apache.cassandra.exceptions.OverloadedException;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.metrics.ClientMetrics;
 import org.apache.cassandra.net.FrameEncoder;
 import org.apache.cassandra.transport.messages.ErrorMessage;
@@ -67,7 +70,7 @@
             // Provide error message to client in case channel is still open
             if (ctx.channel().isOpen())
             {
-                UnexpectedChannelExceptionHandler handler = new UnexpectedChannelExceptionHandler(ctx.channel(), false);
+                Predicate<Throwable> handler = getUnexpectedExceptionHandler(ctx.channel(), false);
                 ErrorMessage errorMessage = ErrorMessage.fromException(cause, handler);
                 Envelope response = errorMessage.encode(version);
                 FrameEncoder.Payload payload = allocator.allocate(true, CQLMessageHandler.envelopeSize(response.header));
@@ -88,27 +91,16 @@
                     JVMStabilityInspector.inspectThrowable(cause);
                 }
             }
-            if (Throwables.anyCauseMatches(cause, t -> t instanceof ProtocolException))
+            
+            if (DatabaseDescriptor.getClientErrorReportingExclusions().contains(ctx.channel().remoteAddress()))
             {
-                // if any ProtocolExceptions is not silent, then handle
-                if (Throwables.anyCauseMatches(cause, t -> t instanceof ProtocolException && !((ProtocolException) t).isSilent()))
-                {
-                    ClientMetrics.instance.markProtocolException();
-                    // since protocol exceptions are expected to be client issues, not logging stack trace
-                    // to avoid spamming the logs once a bad client shows up
-                    NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES, "Protocol exception with client networking: " + cause.getMessage());
-                }
+                // Sometimes it is desirable to ignore exceptions from specific IPs; such as when security scans are
+                // running.  To avoid polluting logs and metrics, metrics are not updated when the IP is in the exclude
+                // list.
+                logger.debug("Excluding client exception for {}; address contained in client_error_reporting_exclusions", ctx.channel().remoteAddress(), cause);
+                return;
             }
-            else if (Throwables.anyCauseMatches(cause, t -> t instanceof Errors.NativeIoException))
-            {
-                ClientMetrics.instance.markUnknownException();
-                logger.trace("Native exception in client networking", cause);
-            }
-            else
-            {
-                ClientMetrics.instance.markUnknownException();
-                logger.warn("Unknown exception in client networking", cause);
-            }
+            logClientNetworkingExceptions(cause);
         }
 
         private static boolean isFatal(Throwable cause)
@@ -118,6 +110,49 @@
         }
     }
 
+    static void logClientNetworkingExceptions(Throwable cause)
+    {
+        if (Throwables.anyCauseMatches(cause, t -> t instanceof ProtocolException))
+        {
+            // if any ProtocolExceptions is not silent, then handle
+            if (Throwables.anyCauseMatches(cause, t -> t instanceof ProtocolException && !((ProtocolException) t).isSilent()))
+            {
+                ClientMetrics.instance.markProtocolException();
+                // since protocol exceptions are expected to be client issues, not logging stack trace
+                // to avoid spamming the logs once a bad client shows up
+                NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES, "Protocol exception with client networking: " + cause.getMessage());
+            }
+        }
+        else if (Throwables.anyCauseMatches(cause, t -> t instanceof OverloadedException))
+        {
+            // Once the threshold for overload is breached, it will very likely spam the logs...
+            NoSpamLogger.log(logger, NoSpamLogger.Level.INFO, 1, TimeUnit.MINUTES, cause.getMessage());
+        }
+        else if (Throwables.anyCauseMatches(cause, t -> t instanceof Errors.NativeIoException))
+        {
+            ClientMetrics.instance.markUnknownException();
+            logger.trace("Native exception in client networking", cause);
+        }
+        else
+        {
+            ClientMetrics.instance.markUnknownException();
+            logger.warn("Unknown exception in client networking", cause);
+        }
+    }
+
+    static Predicate<Throwable> getUnexpectedExceptionHandler(Channel channel, boolean alwaysLogAtError)
+    {
+        SocketAddress address = channel.remoteAddress();
+        if (DatabaseDescriptor.getClientErrorReportingExclusions().contains(address))
+        {
+            return cause -> {
+                logger.debug("Excluding client exception for {}; address contained in client_error_reporting_exclusions", address, cause);
+                return true;
+            };
+        }
+        return new UnexpectedChannelExceptionHandler(channel, alwaysLogAtError);
+    }
+
     /**
      * Include the channel info in the logged information for unexpected errors, and (if {@link #alwaysLogAtError} is
      * false then choose the log level based on the type of exception (some are clearly client issues and shouldn't be
diff --git a/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java b/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
index bc53fd2..75cb72e 100644
--- a/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
+++ b/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
@@ -25,6 +25,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.cassandra.transport.ClientResourceLimits.Overload;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -147,7 +148,7 @@
                         promise = new VoidChannelPromise(ctx.channel(), false);
                     }
 
-                    final Message.Response response = Dispatcher.processRequest((ServerConnection) connection, startup);
+                    final Message.Response response = Dispatcher.processRequest(ctx.channel(), startup, Overload.NONE);
                     outbound = response.encode(inbound.header.version);
                     ctx.writeAndFlush(outbound, promise);
                     logger.trace("Configured pipeline: {}", ctx.pipeline());
diff --git a/src/java/org/apache/cassandra/transport/Message.java b/src/java/org/apache/cassandra/transport/Message.java
index 0284489..75c997e 100644
--- a/src/java/org/apache/cassandra/transport/Message.java
+++ b/src/java/org/apache/cassandra/transport/Message.java
@@ -23,7 +23,6 @@
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -37,7 +36,9 @@
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.transport.messages.*;
 import org.apache.cassandra.service.QueryState;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * A message from the CQL binary protocol.
@@ -209,24 +210,35 @@
                 throw new IllegalArgumentException();
         }
 
+        /**
+         * @return true if the execution of this {@link Request} should be recorded in a tracing session
+         */
         protected boolean isTraceable()
         {
             return false;
         }
 
+        /**
+         * @return true if warnings should be tracked and aborts enforced for resource limits on this {@link Request}
+         */
+        protected boolean isTrackable()
+        {
+            return false;
+        }
+
         protected abstract Response execute(QueryState queryState, long queryStartNanoTime, boolean traceRequest);
 
         public final Response execute(QueryState queryState, long queryStartNanoTime)
         {
             boolean shouldTrace = false;
-            UUID tracingSessionId = null;
+            TimeUUID tracingSessionId = null;
 
             if (isTraceable())
             {
                 if (isTracingRequested())
                 {
                     shouldTrace = true;
-                    tracingSessionId = UUIDGen.getTimeUUID();
+                    tracingSessionId = nextTimeUUID();
                     Tracing.instance.newSession(tracingSessionId, getCustomPayload());
                 }
                 else if (StorageService.instance.shouldTraceProbablistically())
@@ -266,7 +278,7 @@
 
     public static abstract class Response extends Message
     {
-        protected UUID tracingId;
+        protected TimeUUID tracingId;
         protected List<String> warnings;
 
         protected Response(Type type)
@@ -277,13 +289,13 @@
                 throw new IllegalArgumentException();
         }
 
-        Message setTracingId(UUID tracingId)
+        Message setTracingId(TimeUUID tracingId)
         {
             this.tracingId = tracingId;
             return this;
         }
 
-        UUID getTracingId()
+        TimeUUID getTracingId()
         {
             return tracingId;
         }
@@ -312,16 +324,23 @@
             if (this instanceof Response)
             {
                 Response message = (Response)this;
-                UUID tracingId = message.getTracingId();
+                TimeUUID tracingId = message.getTracingId();
                 Map<String, ByteBuffer> customPayload = message.getCustomPayload();
                 if (tracingId != null)
-                    messageSize += CBUtil.sizeOfUUID(tracingId);
+                    messageSize += TimeUUID.sizeInBytes();
                 List<String> warnings = message.getWarnings();
                 if (warnings != null)
                 {
+                    // if cassandra populates warnings for <= v3 protocol, this is a bug
                     if (version.isSmallerThan(ProtocolVersion.V4))
-                        throw new ProtocolException("Must not send frame with WARNING flag for native protocol version < 4");
-                    messageSize += CBUtil.sizeOfStringList(warnings);
+                    {
+                        logger.warn("Warnings present in message with version less than v4 (it is {}); warnings={}", version, warnings);
+                        warnings = null;
+                    }
+                    else
+                    {
+                        messageSize += CBUtil.sizeOfStringList(warnings);
+                    }
                 }
                 if (customPayload != null)
                 {
@@ -398,7 +417,7 @@
             boolean isCustomPayload = inbound.header.flags.contains(Envelope.Header.Flag.CUSTOM_PAYLOAD);
             boolean hasWarning = inbound.header.flags.contains(Envelope.Header.Flag.WARNING);
 
-            UUID tracingId = isRequest || !isTracing ? null : CBUtil.readUUID(inbound.body);
+            TimeUUID tracingId = isRequest || !isTracing ? null : CBUtil.readTimeUUID(inbound.body);
             List<String> warnings = isRequest || !hasWarning ? null : CBUtil.readStringList(inbound.body);
             Map<String, ByteBuffer> customPayload = !isCustomPayload ? null : CBUtil.readBytesMap(inbound.body);
 
diff --git a/src/java/org/apache/cassandra/transport/PipelineConfigurator.java b/src/java/org/apache/cassandra/transport/PipelineConfigurator.java
index 0250f13..81ff136 100644
--- a/src/java/org/apache/cassandra/transport/PipelineConfigurator.java
+++ b/src/java/org/apache/cassandra/transport/PipelineConfigurator.java
@@ -44,6 +44,7 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.net.*;
+import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.security.SSLFactory;
 import org.apache.cassandra.transport.messages.StartupMessage;
 
@@ -163,7 +164,7 @@
                 return channel -> {
                     SslContext sslContext = SSLFactory.getOrCreateSslContext(encryptionOptions,
                                                                              encryptionOptions.require_client_auth,
-                                                                             SSLFactory.SocketType.SERVER);
+                                                                             ISslContextFactory.SocketType.SERVER);
 
                     channel.pipeline().addFirst(SSL_HANDLER, new ByteToMessageDecoder()
                     {
@@ -197,7 +198,7 @@
                 return channel -> {
                     SslContext sslContext = SSLFactory.getOrCreateSslContext(encryptionOptions,
                                                                              encryptionOptions.require_client_auth,
-                                                                             SSLFactory.SocketType.SERVER);
+                                                                             ISslContextFactory.SocketType.SERVER);
                     channel.pipeline().addFirst(SSL_HANDLER, sslContext.newHandler(channel.alloc()));
                 };
             default:
diff --git a/src/java/org/apache/cassandra/transport/PreV5Handlers.java b/src/java/org/apache/cassandra/transport/PreV5Handlers.java
index cec8edd..ea850cc 100644
--- a/src/java/org/apache/cassandra/transport/PreV5Handlers.java
+++ b/src/java/org/apache/cassandra/transport/PreV5Handlers.java
@@ -19,8 +19,11 @@
 package org.apache.cassandra.transport;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
+import com.google.common.base.Predicate;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.transport.ClientResourceLimits.Overload;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,10 +42,9 @@
 import org.apache.cassandra.net.ResourceLimits;
 import org.apache.cassandra.transport.messages.ErrorMessage;
 import org.apache.cassandra.utils.JVMStabilityInspector;
-import org.apache.cassandra.utils.NoSpamLogger;
-import org.apache.cassandra.utils.Throwables;
 
-import static org.apache.cassandra.transport.Message.logger;
+import static org.apache.cassandra.transport.CQLMessageHandler.RATE_LIMITER_DELAY_UNIT;
+import static org.apache.cassandra.transport.ClientResourceLimits.GLOBAL_REQUEST_LIMITER;
 
 public class PreV5Handlers
 {
@@ -63,7 +65,9 @@
          * Note: should only be accessed while on the netty event loop.
          */
         private long channelPayloadBytesInFlight;
-        private boolean paused;
+        
+        /** The cause of the current connection pause, or {@link Overload#NONE} if it is unpaused. */
+        private Overload backpressure = Overload.NONE;
 
         LegacyDispatchHandler(Dispatcher dispatcher, ClientResourceLimits.Allocator endpointPayloadTracker)
         {
@@ -71,11 +75,12 @@
             this.endpointPayloadTracker = endpointPayloadTracker;
         }
 
-        protected void channelRead0(ChannelHandlerContext ctx, Message.Request request) throws Exception
+        protected void channelRead0(ChannelHandlerContext ctx, Message.Request request)
         {
-            // if we decide to handle this message, process it outside of the netty event loop
-            if (shouldHandleRequest(ctx, request))
-                dispatcher.dispatch(ctx.channel(), request, this::toFlushItem);
+            // The only reason we won't process this message is if checkLimits() throws an OverloadedException.
+            // (i.e. Even if backpressure is applied, the current request is allowed to finish.)
+            checkLimits(ctx, request);
+            dispatcher.dispatch(ctx.channel(), request, this::toFlushItem, backpressure);
         }
 
         // Acts as a Dispatcher.FlushItemConverter
@@ -95,76 +100,143 @@
 
             // since the request has been processed, decrement inflight payload at channel, endpoint and global levels
             channelPayloadBytesInFlight -= itemSize;
-            ResourceLimits.Outcome endpointGlobalReleaseOutcome = endpointPayloadTracker.release(itemSize);
+            boolean globalInFlightBytesBelowLimit = endpointPayloadTracker.release(itemSize) == ResourceLimits.Outcome.BELOW_LIMIT;
 
-            // now check to see if we need to reenable the channel's autoRead.
-            // If the current payload side is zero, we must reenable autoread as
-            // 1) we allow no other thread/channel to do it, and
-            // 2) there's no other events following this one (becuase we're at zero bytes in flight),
-            // so no successive to trigger the other clause in this if-block
+            // Now check to see if we need to reenable the channel's autoRead.
             //
-            // note: this path is only relevant when part of a pre-V5 pipeline, as only in this case is
+            // If the current payload bytes in flight is zero, we must reenable autoread as
+            // 1) we allow no other thread/channel to do it, and
+            // 2) there are no other events following this one (becuase we're at zero bytes in flight),
+            // so no successive to trigger the other clause in this if-block.
+            //
+            // The only exception to this is if the global request rate limit has been breached, which means
+            // we'll have to wait until a scheduled wakeup task unpauses the connection.
+            //
+            // Note: This path is only relevant when part of a pre-V5 pipeline, as only in this case is
             // paused ever set to true. In pipelines configured for V5 or later, backpressure and control
             // over the inbound pipeline's autoread status are handled by the FrameDecoder/FrameProcessor.
             ChannelConfig config = item.channel.config();
-            if (paused && (channelPayloadBytesInFlight == 0 || endpointGlobalReleaseOutcome == ResourceLimits.Outcome.BELOW_LIMIT))
+
+            if (backpressure == Overload.BYTES_IN_FLIGHT && (channelPayloadBytesInFlight == 0 || globalInFlightBytesBelowLimit))
             {
-                paused = false;
+                unpauseConnection(config);
+            }
+        }
+
+        /**
+         * Checks limits on bytes in flight and the request rate limiter (if enabled) to determine whether to drop a
+         * request or trigger backpressure and pause the connection.
+         * <p>
+         * The check for inflight payload to potentially discard the request should have been ideally in one of the
+         * first handlers in the pipeline (Envelope.Decoder::decode()). However, in case of any exception thrown between
+         * that handler (where inflight payload is incremented) and this handler (Dispatcher::channelRead0) (where 
+         * inflight payload in decremented), inflight payload becomes erroneous. ExceptionHandler is not sufficient for 
+         * this purpose since it does not have the message envelope associated with the exception.
+         * <p>
+         * If the connection is configured to throw {@link OverloadedException}, requests that breach the rate limit are
+         * not counted against that limit.
+         * <p>
+         * Note: this method should execute on the netty event loop.
+         * 
+         * @throws ErrorMessage.WrappedException with an {@link OverloadedException} if overload occurs and the 
+         *         connection is configured to throw on overload
+         */
+        private void checkLimits(ChannelHandlerContext ctx, Message.Request request)
+        {
+            long requestSize = request.getSource().header.bodySizeInBytes;
+            
+            if (request.connection.isThrowOnOverload())
+            {
+                if (endpointPayloadTracker.tryAllocate(requestSize) != ResourceLimits.Outcome.SUCCESS)
+                {
+                    discardAndThrow(request, requestSize, Overload.BYTES_IN_FLIGHT);
+                }
+
+                if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled() && !GLOBAL_REQUEST_LIMITER.tryReserve())
+                {
+                    // We've already allocated against the payload tracker here, so release those resources.
+                    endpointPayloadTracker.release(requestSize);
+                    discardAndThrow(request, requestSize, Overload.REQUESTS);
+                }
+            }
+            else
+            {
+                // Any request that gets here will be processed, so increment the channel bytes in flight.
+                channelPayloadBytesInFlight += requestSize;
+                
+                // Check for overloaded state by trying to allocate the message size from inflight payload trackers
+                if (endpointPayloadTracker.tryAllocate(requestSize) != ResourceLimits.Outcome.SUCCESS)
+                {
+                    endpointPayloadTracker.allocate(requestSize);
+                    pauseConnection(ctx);
+                    backpressure = Overload.BYTES_IN_FLIGHT;
+                }
+
+                if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled())
+                {
+                    // Reserve a permit even if we've already triggered backpressure on bytes in flight.
+                    long delay = GLOBAL_REQUEST_LIMITER.reserveAndGetDelay(RATE_LIMITER_DELAY_UNIT);
+                    
+                    // If we've already triggered backpressure on bytes in flight, no further action is necessary.
+                    if (backpressure == Overload.NONE && delay > 0)
+                    {
+                        pauseConnection(ctx);
+                        
+                        // A permit isn't immediately available, so schedule an unpause for when it is.
+                        ctx.channel().eventLoop().schedule(() -> unpauseConnection(ctx.channel().config()), delay, RATE_LIMITER_DELAY_UNIT);
+                        backpressure = Overload.REQUESTS;
+                    }
+                }
+            }
+        }
+
+        private void pauseConnection(ChannelHandlerContext ctx)
+        {
+            if (ctx.channel().config().isAutoRead())
+            {
+                ctx.channel().config().setAutoRead(false);
+                ClientMetrics.instance.pauseConnection();
+            }
+        }
+
+        private void unpauseConnection(ChannelConfig config)
+        {
+            backpressure = Overload.NONE;
+            
+            if (!config.isAutoRead())
+            {
                 ClientMetrics.instance.unpauseConnection();
                 config.setAutoRead(true);
             }
         }
 
-        /**
-         * This check for inflight payload to potentially discard the request should have been ideally in one of the
-         * first handlers in the pipeline (Envelope.Decoder::decode()). However, incase of any exception thrown between that
-         * handler (where inflight payload is incremented) and this handler (Dispatcher::channelRead0) (where inflight
-         * payload in decremented), inflight payload becomes erroneous. ExceptionHandler is not sufficient for this
-         * purpose since it does not have the message envelope associated with the exception.
-         * <p>
-         * Note: this method should execute on the netty event loop.
-         */
-        private boolean shouldHandleRequest(ChannelHandlerContext ctx, Message.Request request)
+        private void discardAndThrow(Message.Request request, long requestSize, Overload overload)
         {
-            long requestSize = request.getSource().header.bodySizeInBytes;
+            ClientMetrics.instance.markRequestDiscarded();
 
-            // check for overloaded state by trying to allocate the message size from inflight payload trackers
-            if (endpointPayloadTracker.tryAllocate(requestSize) != ResourceLimits.Outcome.SUCCESS)
-            {
-                if (request.connection.isThrowOnOverload())
-                {
-                    // discard the request and throw an exception
-                    ClientMetrics.instance.markRequestDiscarded();
-                    logger.trace("Discarded request of size: {}. InflightChannelRequestPayload: {}, {}, Request: {}",
-                                 requestSize,
-                                 channelPayloadBytesInFlight,
-                                 endpointPayloadTracker.toString(),
-                                 request);
-                    throw ErrorMessage.wrap(new OverloadedException("Server is in overloaded state. Cannot accept more requests at this point"),
-                                            request.getSource().header.streamId);
-                }
-                else
-                {
-                    // set backpressure on the channel, and handle the request
-                    endpointPayloadTracker.allocate(requestSize);
-                    ctx.channel().config().setAutoRead(false);
-                    ClientMetrics.instance.pauseConnection();
-                    paused = true;
-                }
-            }
+            logger.trace("Discarded request of size {} with {} bytes in flight on channel. {} " + 
+                         "Global rate limiter: {} Request: {}",
+                         requestSize, channelPayloadBytesInFlight, endpointPayloadTracker,
+                         GLOBAL_REQUEST_LIMITER, request);
 
-            channelPayloadBytesInFlight += requestSize;
-            return true;
+            OverloadedException exception = overload == Overload.REQUESTS
+                    ? new OverloadedException(String.format("Request breached global limit of %d requests/second. Server is " +
+                                                            "currently in an overloaded state and cannot accept more requests.",
+                                                            GLOBAL_REQUEST_LIMITER.getRate()))
+                    : new OverloadedException(String.format("Request breached limit on bytes in flight. (%s)) " +
+                                                            "Server is currently in an overloaded state and cannot accept more requests.",
+
+                    endpointPayloadTracker));
+            
+            throw ErrorMessage.wrap(exception, request.getSource().header.streamId);
         }
 
-
         @Override
         public void channelInactive(ChannelHandlerContext ctx)
         {
             endpointPayloadTracker.release();
-            if (paused)
+            if (!ctx.channel().config().isAutoRead())
             {
-                paused = false;
                 ClientMetrics.instance.unpauseConnection();
             }
             ctx.fireChannelInactive();
@@ -181,7 +253,7 @@
         public static final ProtocolDecoder instance = new ProtocolDecoder();
         private ProtocolDecoder(){}
 
-        public void decode(ChannelHandlerContext ctx, Envelope source, List results)
+        public void decode(ChannelHandlerContext ctx, Envelope source, List<Object> results)
         {
             try
             {
@@ -212,7 +284,7 @@
     {
         public static final ProtocolEncoder instance = new ProtocolEncoder();
         private ProtocolEncoder(){}
-        public void encode(ChannelHandlerContext ctx, Message source, List results)
+        public void encode(ChannelHandlerContext ctx, Message source, List<Object> results)
         {
             ProtocolVersion version = getConnectionVersion(ctx);
             results.add(source.encode(version));
@@ -226,6 +298,8 @@
     @ChannelHandler.Sharable
     public static final class ExceptionHandler extends ChannelInboundHandlerAdapter
     {
+        private static final Logger logger = LoggerFactory.getLogger(ExceptionHandler.class);
+
         public static final ExceptionHandler instance = new ExceptionHandler();
         private ExceptionHandler(){}
 
@@ -235,7 +309,7 @@
             // Provide error message to client in case channel is still open
             if (ctx.channel().isOpen())
             {
-                ExceptionHandlers.UnexpectedChannelExceptionHandler handler = new ExceptionHandlers.UnexpectedChannelExceptionHandler(ctx.channel(), false);
+                Predicate<Throwable> handler = ExceptionHandlers.getUnexpectedExceptionHandler(ctx.channel(), false);
                 ErrorMessage errorMessage = ErrorMessage.fromException(cause, handler);
                 ChannelFuture future = ctx.writeAndFlush(errorMessage.encode(getConnectionVersion(ctx)));
                 // On protocol exception, close the channel as soon as the message have been sent.
@@ -244,22 +318,16 @@
                 if (isFatal(cause))
                     future.addListener((ChannelFutureListener) f -> ctx.close());
             }
-            if (Throwables.anyCauseMatches(cause, t -> t instanceof ProtocolException))
+            
+            if (DatabaseDescriptor.getClientErrorReportingExclusions().contains(ctx.channel().remoteAddress()))
             {
-                // if any ProtocolExceptions is not silent, then handle
-                if (Throwables.anyCauseMatches(cause, t -> t instanceof ProtocolException && !((ProtocolException) t).isSilent()))
-                {
-                    ClientMetrics.instance.markProtocolException();
-                    // since protocol exceptions are expected to be client issues, not logging stack trace
-                    // to avoid spamming the logs once a bad client shows up
-                    NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.MINUTES, "Protocol exception with client networking: " + cause.getMessage());
-                }
+                // Sometimes it is desirable to ignore exceptions from specific IPs; such as when security scans are
+                // running.  To avoid polluting logs and metrics, metrics are not updated when the IP is in the exclude
+                // list.
+                logger.debug("Excluding client exception for {}; address contained in client_error_reporting_exclusions", ctx.channel().remoteAddress(), cause);
+                return;
             }
-            else
-            {
-                ClientMetrics.instance.markUnknownException();
-                logger.warn("Unknown exception in client networking", cause);
-            }
+            ExceptionHandlers.logClientNetworkingExceptions(cause);
             JVMStabilityInspector.inspectThrowable(cause);
         }
 
diff --git a/src/java/org/apache/cassandra/transport/ProtocolVersion.java b/src/java/org/apache/cassandra/transport/ProtocolVersion.java
index aa02aab..fc97cc8 100644
--- a/src/java/org/apache/cassandra/transport/ProtocolVersion.java
+++ b/src/java/org/apache/cassandra/transport/ProtocolVersion.java
@@ -67,7 +67,7 @@
     final static ProtocolVersion MIN_SUPPORTED_VERSION = SUPPORTED_VERSIONS[0];
     final static ProtocolVersion MAX_SUPPORTED_VERSION = SUPPORTED_VERSIONS[SUPPORTED_VERSIONS.length - 1];
     /** These versions are sent by some clients, but are not valid Apache Cassandra versions (66, and 65 are DSE versions) */
-    private static int[] KNOWN_INVALID_VERSIONS = {66, 65};
+    private static int[] KNOWN_INVALID_VERSIONS = { 66, 65 };
 
     /** All supported versions, published as an enumset */
     public final static EnumSet<ProtocolVersion> SUPPORTED = EnumSet.copyOf(Arrays.asList(ArrayUtils.addAll(SUPPORTED_VERSIONS)));
diff --git a/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java b/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java
index f289377..3d6e900 100644
--- a/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java
+++ b/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java
@@ -26,6 +26,8 @@
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * This class tracks the last 100 connections per protocol version
  */
@@ -47,13 +49,13 @@
         for (ProtocolVersion version : ProtocolVersion.values())
         {
             clientsByProtocolVersion.put(version, Caffeine.newBuilder().maximumSize(capacity)
-                                                          .build(key -> System.currentTimeMillis()));
+                                                          .build(key -> currentTimeMillis()));
         }
     }
 
     void addConnection(InetAddress addr, ProtocolVersion version)
     {
-        clientsByProtocolVersion.get(version).put(addr, System.currentTimeMillis());
+        clientsByProtocolVersion.get(version).put(addr, currentTimeMillis());
     }
 
     List<ClientStat> getAll()
diff --git a/src/java/org/apache/cassandra/transport/Server.java b/src/java/org/apache/cassandra/transport/Server.java
index 5c9e575..5ec6d20 100644
--- a/src/java/org/apache/cassandra/transport/Server.java
+++ b/src/java/org/apache/cassandra/transport/Server.java
@@ -41,10 +41,15 @@
 import org.apache.cassandra.auth.AuthenticatedUser;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.cql3.functions.UDAggregate;
+import org.apache.cassandra.cql3.functions.UDFunction;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaChangeListener;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.*;
 import org.apache.cassandra.transport.messages.EventMessage;
 import org.apache.cassandra.utils.FBUtilities;
@@ -354,7 +359,7 @@
         }
     }
 
-    public static class EventNotifier extends SchemaChangeListener implements IEndpointLifecycleSubscriber
+    public static class EventNotifier implements SchemaChangeListener, IEndpointLifecycleSubscriber
     {
         private ConnectionTracker connectionTracker;
 
@@ -381,7 +386,7 @@
                 // That should not happen, so log an error, but return the
                 // endpoint address since there's a good change this is right
                 logger.error("Problem retrieving RPC address for {}", endpoint, e);
-                return InetAddressAndPort.getByAddressOverrideDefaults(endpoint.address, DatabaseDescriptor.getNativeTransportPort());
+                return InetAddressAndPort.getByAddressOverrideDefaults(endpoint.getAddress(), DatabaseDescriptor.getNativeTransportPort());
             }
         }
 
@@ -407,6 +412,7 @@
             connectionTracker.send(event);
         }
 
+        @Override
         public void onJoinCluster(InetAddressAndPort endpoint)
         {
             if (!StorageService.instance.isRpcReady(endpoint))
@@ -415,16 +421,19 @@
                 onTopologyChange(endpoint, Event.TopologyChange.newNode(getNativeAddress(endpoint)));
         }
 
+        @Override
         public void onLeaveCluster(InetAddressAndPort endpoint)
         {
             onTopologyChange(endpoint, Event.TopologyChange.removedNode(getNativeAddress(endpoint)));
         }
 
+        @Override
         public void onMove(InetAddressAndPort endpoint)
         {
             onTopologyChange(endpoint, Event.TopologyChange.movedNode(getNativeAddress(endpoint)));
         }
 
+        @Override
         public void onUp(InetAddressAndPort endpoint)
         {
             if (endpointsPendingJoinedNotification.remove(endpoint))
@@ -433,6 +442,7 @@
             onStatusChange(endpoint, Event.StatusChange.nodeUp(getNativeAddress(endpoint)));
         }
 
+        @Override
         public void onDown(InetAddressAndPort endpoint)
         {
             onStatusChange(endpoint, Event.StatusChange.nodeDown(getNativeAddress(endpoint)));
@@ -466,85 +476,100 @@
             }
         }
 
-        public void onCreateKeyspace(String ksName)
+        @Override
+        public void onCreateKeyspace(KeyspaceMetadata keyspace)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, ksName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, keyspace.name));
         }
 
-        public void onCreateTable(String ksName, String cfName)
+        @Override
+        public void onCreateTable(TableMetadata table)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TABLE, ksName, cfName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TABLE, table.keyspace, table.name));
         }
 
-        public void onCreateType(String ksName, String typeName)
+        @Override
+        public void onCreateType(UserType type)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TYPE, ksName, typeName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TYPE, type.keyspace, type.getNameAsString()));
         }
 
-        public void onCreateFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onCreateFunction(UDFunction function)
         {
             send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.FUNCTION,
-                                        ksName, functionName, AbstractType.asCQLTypeStringList(argTypes)));
+                                        function.name().keyspace, function.name().name, AbstractType.asCQLTypeStringList(function.argTypes())));
         }
 
-        public void onCreateAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onCreateAggregate(UDAggregate aggregate)
         {
             send(new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.AGGREGATE,
-                                        ksName, aggregateName, AbstractType.asCQLTypeStringList(argTypes)));
+                                        aggregate.name().keyspace, aggregate.name().name, AbstractType.asCQLTypeStringList(aggregate.argTypes())));
         }
 
-        public void onAlterKeyspace(String ksName)
+        @Override
+        public void onAlterKeyspace(KeyspaceMetadata before, KeyspaceMetadata after)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, ksName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, after.name));
         }
 
-        public void onAlterTable(String ksName, String cfName, boolean affectsStatements)
+        @Override
+        public void onAlterTable(TableMetadata before, TableMetadata after, boolean affectsStatements)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, ksName, cfName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, after.keyspace, after.name));
         }
 
-        public void onAlterType(String ksName, String typeName)
+        @Override
+        public void onAlterType(UserType before, UserType after)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TYPE, ksName, typeName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TYPE, after.keyspace, after.getNameAsString()));
         }
 
-        public void onAlterFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onAlterFunction(UDFunction before, UDFunction after)
         {
             send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.FUNCTION,
-                                        ksName, functionName, AbstractType.asCQLTypeStringList(argTypes)));
+                                        after.name().keyspace, after.name().name, AbstractType.asCQLTypeStringList(after.argTypes())));
         }
 
-        public void onAlterAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onAlterAggregate(UDAggregate before, UDAggregate after)
         {
             send(new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.AGGREGATE,
-                                        ksName, aggregateName, AbstractType.asCQLTypeStringList(argTypes)));
+                                        after.name().keyspace, after.name().name, AbstractType.asCQLTypeStringList(after.argTypes())));
         }
 
-        public void onDropKeyspace(String ksName)
+        @Override
+        public void onDropKeyspace(KeyspaceMetadata keyspace, boolean dropData)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, ksName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, keyspace.name));
         }
 
-        public void onDropTable(String ksName, String cfName)
+        @Override
+        public void onDropTable(TableMetadata table, boolean dropData)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.TABLE, ksName, cfName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.TABLE, table.keyspace, table.name));
         }
 
-        public void onDropType(String ksName, String typeName)
+        @Override
+        public void onDropType(UserType type)
         {
-            send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.TYPE, ksName, typeName));
+            send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.TYPE, type.keyspace, type.getNameAsString()));
         }
 
-        public void onDropFunction(String ksName, String functionName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onDropFunction(UDFunction function)
         {
             send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.FUNCTION,
-                                        ksName, functionName, AbstractType.asCQLTypeStringList(argTypes)));
+                                        function.name().keyspace, function.name().name, AbstractType.asCQLTypeStringList(function.argTypes())));
         }
 
-        public void onDropAggregate(String ksName, String aggregateName, List<AbstractType<?>> argTypes)
+        @Override
+        public void onDropAggregate(UDAggregate aggregate)
         {
             send(new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.AGGREGATE,
-                                        ksName, aggregateName, AbstractType.asCQLTypeStringList(argTypes)));
+                                        aggregate.name().keyspace, aggregate.name().name, AbstractType.asCQLTypeStringList(aggregate.argTypes())));
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/transport/SimpleClient.java b/src/java/org/apache/cassandra/transport/SimpleClient.java
index ae89e93..43bb8ad 100644
--- a/src/java/org/apache/cassandra/transport/SimpleClient.java
+++ b/src/java/org/apache/cassandra/transport/SimpleClient.java
@@ -27,6 +27,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Ints;
+import org.apache.cassandra.transport.ClientResourceLimits.Overload;
+import org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,7 +38,7 @@
 import io.netty.handler.codec.MessageToMessageDecoder;
 import io.netty.handler.codec.MessageToMessageEncoder;
 import io.netty.handler.ssl.SslContext;
-import io.netty.util.concurrent.Promise;
+import io.netty.util.concurrent.Promise; // checkstyle: permit this import
 import io.netty.util.concurrent.PromiseCombiner;
 import io.netty.util.internal.logging.InternalLoggerFactory;
 import io.netty.util.internal.logging.Slf4JLoggerFactory;
@@ -45,14 +47,23 @@
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.net.*;
+import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.security.SSLFactory;
 import org.apache.cassandra.transport.messages.*;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static org.apache.cassandra.transport.CQLMessageHandler.envelopeSize;
 import static org.apache.cassandra.transport.Flusher.MAX_FRAMED_PAYLOAD_SIZE;
+import static org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter.NO_OP_LIMITER;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 public class SimpleClient implements Closeable
 {
+
+    public static final int TIMEOUT_SECONDS = 10;
+
     static
     {
         InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
@@ -211,7 +222,7 @@
                     .option(ChannelOption.TCP_NODELAY, true);
 
         // Configure the pipeline factory.
-        if(encryptionOptions.isEnabled())
+        if(encryptionOptions.getEnabled())
         {
             bootstrap.handler(new SecureInitializer(largeMessageThreshold));
         }
@@ -281,7 +292,7 @@
         {
             request.attach(connection);
             lastWriteFuture = channel.writeAndFlush(Collections.singletonList(request));
-            Message.Response msg = responseHandler.responses.poll(10, TimeUnit.SECONDS);
+            Message.Response msg = responseHandler.responses.poll(TIMEOUT_SECONDS, TimeUnit.SECONDS);
             if (msg == null)
                 throw new RuntimeException("timeout");
             if (throwOnErrorResponse && msg instanceof ErrorMessage)
@@ -290,7 +301,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
@@ -310,10 +321,10 @@
                 }
                 lastWriteFuture = channel.writeAndFlush(requests);
 
-                long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
+                long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(TIMEOUT_SECONDS);
                 for (int i = 0; i < requests.size(); i++)
                 {
-                    Message.Response msg = responseHandler.responses.poll(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+                    Message.Response msg = responseHandler.responses.poll(deadline - currentTimeMillis(), TimeUnit.MILLISECONDS);
                     if (msg == null)
                         throw new RuntimeException("timeout");
                     if (msg instanceof ErrorMessage)
@@ -332,7 +343,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
@@ -343,7 +354,7 @@
 
     public static class SimpleEventHandler implements EventHandler
     {
-        public final LinkedBlockingQueue<Event> queue = new LinkedBlockingQueue<>();
+        public final BlockingQueue<Event> queue = newBlockingQueue();
 
         public void onEvent(Event event)
         {
@@ -393,7 +404,7 @@
                 case AUTHENTICATE:
                     if (response.header.version.isGreaterOrEqualTo(ProtocolVersion.V5))
                     {
-                        configureModernPipeline(ctx, response);
+                        configureModernPipeline(ctx, response, largeMessageThreshold);
                         // consuming the message is done when setting up the pipeline
                     }
                     else
@@ -414,7 +425,7 @@
             }
         }
 
-        private void configureModernPipeline(ChannelHandlerContext ctx, Envelope response)
+        private void configureModernPipeline(ChannelHandlerContext ctx, Envelope response, int largeMessageThreshold)
         {
             logger.info("Configuring modern pipeline");
             ChannelPipeline pipeline = ctx.pipeline();
@@ -434,7 +445,7 @@
             FrameEncoder frameEncoder = frameEncoder(ctx);
             FrameEncoder.PayloadAllocator payloadAllocator = frameEncoder.allocator();
 
-            CQLMessageHandler.MessageConsumer<Message.Response> responseConsumer = (c, message, converter) -> {
+            CQLMessageHandler.MessageConsumer<Message.Response> responseConsumer = (c, message, converter, backpressured) -> {
                 responseHandler.handleResponse(c, message);
             };
 
@@ -470,6 +481,12 @@
                     return endpointQueue;
                 }
 
+                @Override
+                public NonBlockingRateLimiter requestRateLimiter()
+                {
+                    return NO_OP_LIMITER;
+                }
+
                 public void release()
                 {
                 }
@@ -512,7 +529,7 @@
                     Connection connection = ctx.channel().attr(Connection.attributeKey).get();
                     // The only case the connection can be null is when we send the initial STARTUP message (client side thus)
                     ProtocolVersion version = connection == null ? ProtocolVersion.CURRENT : connection.getVersion();
-                    SimpleFlusher flusher = new SimpleFlusher(frameEncoder);
+                    SimpleFlusher flusher = new SimpleFlusher(frameEncoder, largeMessageThreshold);
                     for (Message message : (List<Message>) msg)
                         flusher.enqueue(message.encode(version));
 
@@ -522,7 +539,7 @@
             pipeline.remove(this);
 
             Message.Response message = messageDecoder.decode(ctx.channel(), response);
-            responseConsumer.accept(channel, message, (ch, req, resp) -> null);
+            responseConsumer.accept(channel, message, (ch, req, resp) -> null, Overload.NONE);
         }
 
         private FrameDecoder frameDecoder(ChannelHandlerContext ctx, BufferPoolAllocator allocator)
@@ -606,7 +623,7 @@
         {
             super.initChannel(channel);
             SslContext sslContext = SSLFactory.getOrCreateSslContext(encryptionOptions, encryptionOptions.require_client_auth,
-                                                                     SSLFactory.SocketType.CLIENT);
+                                                                     ISslContextFactory.SocketType.CLIENT);
             channel.pipeline().addFirst("ssl", sslContext.newHandler(channel.alloc()));
         }
     }
@@ -639,9 +656,9 @@
                 else
                     responses.put(r);
             }
-            catch (InterruptedException ie)
+            catch (InterruptedException e)
             {
-                throw new RuntimeException(ie);
+                throw new UncheckedInterruptedException(e);
             }
         }
 
@@ -669,10 +686,17 @@
         final Queue<Envelope> outbound = new ConcurrentLinkedQueue<>();
         final FrameEncoder frameEncoder;
         private final AtomicBoolean scheduled = new AtomicBoolean(false);
+        private final int largeMessageThreshold;
+
+        SimpleFlusher(FrameEncoder frameEncoder, int largeMessageThreshold)
+        {
+            this.frameEncoder = frameEncoder;
+            this.largeMessageThreshold = largeMessageThreshold;
+        }
 
         SimpleFlusher(FrameEncoder frameEncoder)
         {
-            this.frameEncoder = frameEncoder;
+            this(frameEncoder, MAX_FRAMED_PAYLOAD_SIZE);
         }
 
         public void enqueue(Envelope message)
@@ -709,14 +733,14 @@
             Envelope f;
             while ((f = outbound.poll()) != null)
             {
-                if (f.header.bodySizeInBytes > MAX_FRAMED_PAYLOAD_SIZE)
+                if (f.header.bodySizeInBytes > largeMessageThreshold)
                 {
                     combiner.addAll(writeLargeMessage(ctx, f));
                 }
                 else
                 {
                     int messageSize = envelopeSize(f.header);
-                    if (bufferSize + messageSize >= MAX_FRAMED_PAYLOAD_SIZE)
+                    if (bufferSize + messageSize >= largeMessageThreshold)
                     {
                         combiner.add(flushBuffer(ctx, buffer, bufferSize));
                         buffer = new ArrayList<>();
@@ -751,9 +775,9 @@
         private FrameEncoder.Payload allocate(int size, boolean selfContained)
         {
             FrameEncoder.Payload payload = frameEncoder.allocator()
-                                                       .allocate(selfContained, Math.min(size, MAX_FRAMED_PAYLOAD_SIZE));
-            if (size >= MAX_FRAMED_PAYLOAD_SIZE)
-                payload.buffer.limit(MAX_FRAMED_PAYLOAD_SIZE);
+                                                       .allocate(selfContained, Math.min(size, largeMessageThreshold));
+            if (size >= largeMessageThreshold)
+                payload.buffer.limit(largeMessageThreshold);
 
             return payload;
         }
@@ -766,14 +790,14 @@
             boolean firstFrame = true;
             while (f.body.readableBytes() > 0 || firstFrame)
             {
-                int payloadSize = Math.min(f.body.readableBytes(), MAX_FRAMED_PAYLOAD_SIZE);
+                int payloadSize = Math.min(f.body.readableBytes(), largeMessageThreshold);
                 payload = allocate(f.body.readableBytes(), false);
 
                 buf = payload.buffer;
                 // BufferPool may give us a buffer larger than we asked for.
                 // FrameEncoder may object if buffer.remaining is >= MAX_SIZE.
-                if (payloadSize >= MAX_FRAMED_PAYLOAD_SIZE)
-                    buf.limit(MAX_FRAMED_PAYLOAD_SIZE);
+                if (payloadSize >= largeMessageThreshold)
+                    buf.limit(largeMessageThreshold);
 
                 if (firstFrame)
                 {
diff --git a/src/java/org/apache/cassandra/transport/messages/BatchMessage.java b/src/java/org/apache/cassandra/transport/messages/BatchMessage.java
index afc308a..de575ce 100644
--- a/src/java/org/apache/cassandra/transport/messages/BatchMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/BatchMessage.java
@@ -20,13 +20,9 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.ImmutableMap;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import io.netty.buffer.ByteBuf;
 import org.apache.cassandra.cql3.Attributes;
 import org.apache.cassandra.cql3.BatchQueryOptions;
@@ -49,7 +45,8 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MD5Digest;
-import org.apache.cassandra.utils.NoSpamLogger;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class BatchMessage extends Message.Request
 {
@@ -166,6 +163,12 @@
     }
 
     @Override
+    protected boolean isTrackable()
+    {
+        return true;
+    }
+
+    @Override
     protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest)
     {
         List<QueryHandler.Prepared> prepared = null;
@@ -222,7 +225,7 @@
             // (and no value would be really correct, so we prefer passing a clearly wrong one).
             BatchStatement batch = new BatchStatement(batchType, VariableSpecifications.empty(), statements, Attributes.none());
 
-            long queryTime = System.currentTimeMillis();
+            long queryTime = currentTimeMillis();
             Message.Response response = handler.processBatch(batch, state, batchOptions, getCustomPayload(), queryStartNanoTime);
             if (queries != null)
                 QueryEvents.instance.notifyBatchSuccess(batchType, statements, queries, values, options, state, queryTime, response);
diff --git a/src/java/org/apache/cassandra/transport/messages/ErrorMessage.java b/src/java/org/apache/cassandra/transport/messages/ErrorMessage.java
index 6890584..5d29d3a 100644
--- a/src/java/org/apache/cassandra/transport/messages/ErrorMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/ErrorMessage.java
@@ -222,7 +222,7 @@
                         {
                             for (Map.Entry<InetAddressAndPort, RequestFailureReason> entry : rfe.failureReasonByEndpoint.entrySet())
                             {
-                                CBUtil.writeInetAddr(entry.getKey().address, dest);
+                                CBUtil.writeInetAddr(entry.getKey().getAddress(), dest);
                                 dest.writeShort(entry.getValue().code);
                             }
                         }
@@ -302,7 +302,7 @@
                         {
                             for (Map.Entry<InetAddressAndPort, RequestFailureReason> entry : rfe.failureReasonByEndpoint.entrySet())
                             {
-                                size += CBUtil.sizeOfInetAddr(entry.getKey().address);
+                                size += CBUtil.sizeOfInetAddr(entry.getKey().getAddress());
                                 size += 2; // RequestFailureReason code
                             }
                         }
diff --git a/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java b/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java
index 19d40ba..692d183 100644
--- a/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java
@@ -42,6 +42,8 @@
 import org.apache.cassandra.utils.MD5Digest;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class ExecuteMessage extends Message.Request
 {
     private static final NoSpamLogger nospam = NoSpamLogger.getLogger(logger, 10, TimeUnit.MINUTES);
@@ -117,6 +119,12 @@
     }
 
     @Override
+    protected boolean isTrackable()
+    {
+        return true;
+    }
+
+    @Override
     protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest)
     {
         QueryHandler.Prepared prepared = null;
@@ -154,7 +162,7 @@
             // by wrapping the QueryOptions.
             QueryOptions queryOptions = QueryOptions.addColumnSpecifications(options, prepared.statement.getBindVariables());
 
-            long requestStartTime = System.currentTimeMillis();
+            long requestStartTime = currentTimeMillis();
 
             Message.Response response = handler.processPrepared(statement, state, queryOptions, getCustomPayload(), queryStartNanoTime);
 
diff --git a/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java b/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java
index 6c60f78..20861d0 100644
--- a/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java
@@ -36,6 +36,8 @@
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class PrepareMessage extends Message.Request
 {
     private static final Logger logger = LoggerFactory.getLogger(PrepareMessage.class);
@@ -121,7 +123,7 @@
 
             ClientState clientState = state.getClientState().cloneWithKeyspaceIfSet(keyspace);
             QueryHandler queryHandler = ClientState.getCQLQueryHandler();
-            long queryTime = System.currentTimeMillis();
+            long queryTime = currentTimeMillis();
             ResultMessage.Prepared response = queryHandler.prepare(query, clientState, getCustomPayload());
             QueryEvents.instance.notifyPrepareSuccess(() -> queryHandler.getPrepared(response.statementId), query, state, queryTime, response);
             return response;
diff --git a/src/java/org/apache/cassandra/transport/messages/QueryMessage.java b/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
index 71d7c73..9a296e4 100644
--- a/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
@@ -35,6 +35,8 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * A CQL query
  */
@@ -90,6 +92,12 @@
     }
 
     @Override
+    protected boolean isTrackable()
+    {
+        return true;
+    }
+
+    @Override
     protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest)
     {
         CQLStatement statement = null;
@@ -101,7 +109,7 @@
             if (traceRequest)
                 traceQuery(state);
 
-            long queryStartTime = System.currentTimeMillis();
+            long queryStartTime = currentTimeMillis();
 
             QueryHandler queryHandler = ClientState.getCQLQueryHandler();
             statement = queryHandler.parse(query, state, options);
diff --git a/src/java/org/apache/cassandra/transport/messages/StartupMessage.java b/src/java/org/apache/cassandra/transport/messages/StartupMessage.java
index 172768c..37afb22 100644
--- a/src/java/org/apache/cassandra/transport/messages/StartupMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/StartupMessage.java
@@ -110,6 +110,7 @@
         connection.setThrowOnOverload("1".equals(options.get(THROW_ON_OVERLOAD)));
 
         ClientState clientState = state.getClientState();
+        clientState.setClientOptions(options);
         String driverName = options.get(DRIVER_NAME);
         if (null != driverName)
         {
diff --git a/src/java/org/apache/cassandra/triggers/CustomClassLoader.java b/src/java/org/apache/cassandra/triggers/CustomClassLoader.java
index 6948c2d..95be219 100644
--- a/src/java/org/apache/cassandra/triggers/CustomClassLoader.java
+++ b/src/java/org/apache/cassandra/triggers/CustomClassLoader.java
@@ -19,35 +19,34 @@
  * under the License.
  *
  */
-
-
-import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.net.URL;
 import java.net.URLClassLoader;
+import java.nio.file.StandardCopyOption;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiPredicate;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.io.Files;
-
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.util.FileUtils;
 
+import static java.nio.file.Files.*;
+
 /**
  * Custom class loader will load the classes from the class path, CCL will load
- * the classes from the the URL first, if it cannot find the required class it
- * will let the parent class loader do the its job.
+ * the classes from the URL first, if it cannot find the required class it
+ * will let the parent class loader do its job.
  *
  * Note: If the CCL is GC'ed then the associated classes will be unloaded.
  */
 public class CustomClassLoader extends URLClassLoader
 {
     private static final Logger logger = LoggerFactory.getLogger(CustomClassLoader.class);
-    private final Map<String, Class<?>> cache = new ConcurrentHashMap<String, Class<?>>();
+    private final Map<String, Class<?>> cache = new ConcurrentHashMap<>();
     private final ClassLoader parent;
 
     public CustomClassLoader(ClassLoader parent)
@@ -69,28 +68,22 @@
     {
         if (dir == null || !dir.exists())
             return;
-        FilenameFilter filter = new FilenameFilter()
-        {
-            public boolean accept(File dir, String name)
-            {
-                return name.endsWith(".jar");
-            }
-        };
-        for (File inputJar : dir.listFiles(filter))
+        BiPredicate<File, String> filter = (ignore, name) -> name.endsWith(".jar");
+        for (File inputJar : dir.tryList(filter))
         {
             File lib = new File(FileUtils.getTempDir(), "lib");
             if (!lib.exists())
             {
-                lib.mkdir();
+                lib.tryCreateDirectory();
                 lib.deleteOnExit();
             }
             File out = FileUtils.createTempFile("cassandra-", ".jar", lib);
             out.deleteOnExit();
-            logger.info("Loading new jar {}", inputJar.getAbsolutePath());
+            logger.info("Loading new jar {}", inputJar.absolutePath());
             try
             {
-                Files.copy(inputJar, out);
-                addURL(out.toURI().toURL());
+                copy(inputJar.toPath(), out.toPath(), StandardCopyOption.REPLACE_EXISTING);
+                addURL(out.toPath().toUri().toURL());
             }
             catch (IOException ex)
             {
diff --git a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
index 295003f..c76c6bd 100644
--- a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
+++ b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
@@ -18,7 +18,6 @@
  */
 package org.apache.cassandra.triggers;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.*;
 
@@ -33,6 +32,7 @@
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.CassandraException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TriggerMetadata;
 import org.apache.cassandra.schema.Triggers;
@@ -44,7 +44,7 @@
     public static final TriggerExecutor instance = new TriggerExecutor();
 
     private final Map<String, ITrigger> cachedTriggers = Maps.newConcurrentMap();
-    private final ClassLoader parent = Thread.currentThread().getContextClassLoader();
+    private final ClassLoader parent = TriggerExecutor.class.getClassLoader();
     private volatile ClassLoader customClassLoader;
 
     private TriggerExecutor()
diff --git a/src/java/org/apache/cassandra/utils/ApproximateTime.java b/src/java/org/apache/cassandra/utils/ApproximateTime.java
deleted file mode 100644
index 32b6e44..0000000
--- a/src/java/org/apache/cassandra/utils/ApproximateTime.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.concurrent.ScheduledExecutors;
-import org.apache.cassandra.config.Config;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.cassandra.utils.ApproximateTime.Measurement.ALMOST_NOW;
-import static org.apache.cassandra.utils.ApproximateTime.Measurement.ALMOST_SAME_TIME;
-
-/**
- * This class provides approximate time utilities:
- *   - An imprecise nanoTime (monotonic) and currentTimeMillis (non-monotonic), that are faster than their regular counterparts
- *     They have a configured approximate precision (default of 10ms), which is the cadence they will be updated if the system is healthy
- *   - A mechanism for converting between nanoTime and currentTimeMillis measurements.
- *     These conversions may have drifted, and they offer no absolute guarantees on precision
- */
-public class ApproximateTime
-{
-    private static final Logger logger = LoggerFactory.getLogger(ApproximateTime.class);
-    private static final int ALMOST_NOW_UPDATE_INTERVAL_MS = Math.max(1, Integer.parseInt(System.getProperty(Config.PROPERTY_PREFIX + "approximate_time_precision_ms", "2")));
-    private static final String CONVERSION_UPDATE_INTERVAL_PROPERTY = Config.PROPERTY_PREFIX + "NANOTIMETOMILLIS_TIMESTAMP_UPDATE_INTERVAL";
-    private static final long ALMOST_SAME_TIME_UPDATE_INTERVAL_MS = Long.getLong(CONVERSION_UPDATE_INTERVAL_PROPERTY, 10000);
-
-    public static class AlmostSameTime
-    {
-        final long millis;
-        final long nanos;
-        final long error; // maximum error of millis measurement (in nanos)
-
-        private AlmostSameTime(long millis, long nanos, long error)
-        {
-            this.millis = millis;
-            this.nanos = nanos;
-            this.error = error;
-        }
-
-        public long toCurrentTimeMillis(long nanoTime)
-        {
-            return millis + TimeUnit.NANOSECONDS.toMillis(nanoTime - nanos);
-        }
-
-        public long toNanoTime(long currentTimeMillis)
-        {
-            return nanos + MILLISECONDS.toNanos(currentTimeMillis - millis);
-        }
-    }
-
-    public enum Measurement { ALMOST_NOW, ALMOST_SAME_TIME }
-
-    private static volatile Future<?> almostNowUpdater;
-    private static volatile Future<?> almostSameTimeUpdater;
-
-    private static volatile long almostNowMillis;
-    private static volatile long almostNowNanos;
-
-    private static volatile AlmostSameTime almostSameTime = new AlmostSameTime(0L, 0L, Long.MAX_VALUE);
-    private static double failedAlmostSameTimeUpdateModifier = 1.0;
-
-    private static final Runnable refreshAlmostNow = () -> {
-        almostNowMillis = System.currentTimeMillis();
-        almostNowNanos = System.nanoTime();
-    };
-
-    private static final Runnable refreshAlmostSameTime = () -> {
-        final int tries = 3;
-        long[] samples = new long[2 * tries + 1];
-        samples[0] = System.nanoTime();
-        for (int i = 1 ; i < samples.length ; i += 2)
-        {
-            samples[i] = System.currentTimeMillis();
-            samples[i + 1] = System.nanoTime();
-        }
-
-        int best = 1;
-        // take sample with minimum delta between calls
-        for (int i = 3 ; i < samples.length - 1 ; i += 2)
-        {
-            if ((samples[i+1] - samples[i-1]) < (samples[best+1]-samples[best-1]))
-                best = i;
-        }
-
-        long millis = samples[best];
-        long nanos = (samples[best+1] / 2) + (samples[best-1] / 2);
-        long error = (samples[best+1] / 2) - (samples[best-1] / 2);
-
-        AlmostSameTime prev = almostSameTime;
-        AlmostSameTime next = new AlmostSameTime(millis, nanos, error);
-
-        if (next.error > prev.error && next.error > prev.error * failedAlmostSameTimeUpdateModifier)
-        {
-            failedAlmostSameTimeUpdateModifier *= 1.1;
-            return;
-        }
-
-        failedAlmostSameTimeUpdateModifier = 1.0;
-        almostSameTime = next;
-    };
-
-    static
-    {
-        start(ALMOST_NOW);
-        start(ALMOST_SAME_TIME);
-    }
-
-    public static synchronized void stop(Measurement measurement)
-    {
-        switch (measurement)
-        {
-            case ALMOST_NOW:
-                almostNowUpdater.cancel(true);
-                try { almostNowUpdater.get(); } catch (Throwable t) { }
-                almostNowUpdater = null;
-                break;
-            case ALMOST_SAME_TIME:
-                almostSameTimeUpdater.cancel(true);
-                try { almostSameTimeUpdater.get(); } catch (Throwable t) { }
-                almostSameTimeUpdater = null;
-                break;
-        }
-    }
-
-    public static synchronized void start(Measurement measurement)
-    {
-        switch (measurement)
-        {
-            case ALMOST_NOW:
-                if (almostNowUpdater != null)
-                    throw new IllegalStateException("Already running");
-                refreshAlmostNow.run();
-                logger.info("Scheduling approximate time-check task with a precision of {} milliseconds", ALMOST_NOW_UPDATE_INTERVAL_MS);
-                almostNowUpdater = ScheduledExecutors.scheduledFastTasks.scheduleWithFixedDelay(refreshAlmostNow, ALMOST_NOW_UPDATE_INTERVAL_MS, ALMOST_NOW_UPDATE_INTERVAL_MS, MILLISECONDS);
-                break;
-            case ALMOST_SAME_TIME:
-                if (almostSameTimeUpdater != null)
-                    throw new IllegalStateException("Already running");
-                refreshAlmostSameTime.run();
-                logger.info("Scheduling approximate time conversion task with an interval of {} milliseconds", ALMOST_SAME_TIME_UPDATE_INTERVAL_MS);
-                almostSameTimeUpdater = ScheduledExecutors.scheduledFastTasks.scheduleWithFixedDelay(refreshAlmostSameTime, ALMOST_SAME_TIME_UPDATE_INTERVAL_MS, ALMOST_SAME_TIME_UPDATE_INTERVAL_MS, MILLISECONDS);
-                break;
-        }
-    }
-
-
-    /**
-     * Request an immediate refresh; this shouldn't generally be invoked, except perhaps by tests
-     */
-    @VisibleForTesting
-    public static synchronized void refresh(Measurement measurement)
-    {
-        stop(measurement);
-        start(measurement);
-    }
-
-    /** no guarantees about relationship to nanoTime; non-monotonic (tracks currentTimeMillis as closely as possible) */
-    public static long currentTimeMillis()
-    {
-        return almostNowMillis;
-    }
-
-    /** no guarantees about relationship to currentTimeMillis; monotonic */
-    public static long nanoTime()
-    {
-        return almostNowNanos;
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/BiLongAccumulator.java b/src/java/org/apache/cassandra/utils/BiLongAccumulator.java
index 2c3d6b5..cdf2a4f 100644
--- a/src/java/org/apache/cassandra/utils/BiLongAccumulator.java
+++ b/src/java/org/apache/cassandra/utils/BiLongAccumulator.java
@@ -20,5 +20,5 @@
 
 public interface BiLongAccumulator<T, A>
 {
-    long apply(T obj, A arguemnt, long v);
+    long apply(T obj, A argument, long v);
 }
diff --git a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
index d3c08b5..3df4314 100644
--- a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
+++ b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
@@ -17,8 +17,9 @@
  */
 package org.apache.cassandra.utils;
 
-import java.io.DataInputStream;
+import java.io.DataInput;
 import java.io.IOException;
+import java.io.InputStream;
 
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.util.DataOutputPlus;
@@ -38,7 +39,7 @@
     }
 
     @SuppressWarnings("resource")
-    public static BloomFilter deserialize(DataInputStream in, boolean oldBfFormat) throws IOException
+    public static <I extends InputStream & DataInput> BloomFilter deserialize(I in, boolean oldBfFormat) throws IOException
     {
         int hashes = in.readInt();
         IBitSet bs = OffHeapBitSet.deserialize(in, oldBfFormat);
diff --git a/src/java/org/apache/cassandra/utils/BreaksJMX.java b/src/java/org/apache/cassandra/utils/BreaksJMX.java
new file mode 100644
index 0000000..566ef82
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/BreaksJMX.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Annotation to have JMX breaking APIs not trigger test failures, each example must explain why it is ok to expose a
+ * JMX breaking API
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ ElementType.METHOD })
+public @interface BreaksJMX
+{
+    String value();
+}
diff --git a/src/java/org/apache/cassandra/utils/ByteArrayUtil.java b/src/java/org/apache/cassandra/utils/ByteArrayUtil.java
index 58229c0..8c84ee5 100644
--- a/src/java/org/apache/cassandra/utils/ByteArrayUtil.java
+++ b/src/java/org/apache/cassandra/utils/ByteArrayUtil.java
@@ -277,4 +277,4 @@
     {
         FastByteOperations.copy(src, srcPos, dst, dstPos, length);
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java
index d0ab6b2..ba7d1be 100644
--- a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java
+++ b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java
@@ -522,6 +522,8 @@
             return ByteBufferUtil.bytes((InetAddress) obj);
         else if (obj instanceof String)
             return ByteBufferUtil.bytes((String) obj);
+        else if (obj instanceof byte[])
+            return ByteBuffer.wrap((byte[]) obj);
         else if (obj instanceof ByteBuffer)
             return (ByteBuffer) obj;
         else
@@ -662,6 +664,11 @@
         return ByteBuffer.wrap(UUIDGen.decompose(uuid));
     }
 
+    public static ByteBuffer bytes(TimeUUID uuid)
+    {
+        return bytes(uuid.asUUID());
+    }
+
     // Returns whether {@code prefix} is a prefix of {@code value}.
     public static boolean isPrefix(ByteBuffer prefix, ByteBuffer value)
     {
diff --git a/src/java/org/apache/cassandra/utils/CassandraVersion.java b/src/java/org/apache/cassandra/utils/CassandraVersion.java
index 56290fb..766f0e8 100644
--- a/src/java/org/apache/cassandra/utils/CassandraVersion.java
+++ b/src/java/org/apache/cassandra/utils/CassandraVersion.java
@@ -43,17 +43,31 @@
      * note: 3rd/4th groups matches to words but only allows number and checked after regexp test.
      * this is because 3rd and the last can be identical.
      **/
-    private static final String VERSION_REGEXP = "(\\d+)\\.(\\d+)(?:\\.(\\w+))?(?:\\.(\\w+))?(\\-[-.\\w]+)?([.+][.\\w]+)?";
+    private static final String VERSION_REGEXP = "(?<major>\\d+)\\.(?<minor>\\d+)(\\.(?<patch>\\w+)(\\.(?<hotfix>\\w+))?)?(-(?<prerelease>[-.\\w]+))?([.+](?<build>[.\\w]+))?";
     private static final Pattern PATTERN_WORDS = Pattern.compile("\\w+");
     @VisibleForTesting
     static final int NO_HOTFIX = -1;
 
     private static final Pattern PATTERN = Pattern.compile(VERSION_REGEXP);
 
+    public static final CassandraVersion CASSANDRA_4_1 = new CassandraVersion("4.1").familyLowerBound.get();
     public static final CassandraVersion CASSANDRA_4_0 = new CassandraVersion("4.0").familyLowerBound.get();
     public static final CassandraVersion CASSANDRA_4_0_RC2 = new CassandraVersion(4, 0, 0, NO_HOTFIX, new String[] {"rc2"}, null);
     public static final CassandraVersion CASSANDRA_3_4 = new CassandraVersion("3.4").familyLowerBound.get();
 
+    /**
+     * Used to indicate that there was a previous version written to the legacy (pre 1.2)
+     * system.Versions table, but that we cannot read it. Suffice to say, any upgrade should
+     * proceed through 1.2.x before upgrading to the current version.
+     */
+    public static final CassandraVersion UNREADABLE_VERSION = new CassandraVersion("0.0.0-unknown");
+
+    /**
+     * Used to indicate that no previous version information was found. When encountered, we assume that
+     * Cassandra was not previously installed and we're in the process of starting a fresh node.
+     */
+    public static final CassandraVersion NULL_VERSION = new CassandraVersion("0.0.0-absent");
+
     public final int major;
     public final int minor;
     public final int patch;
@@ -90,13 +104,13 @@
 
         try
         {
-            this.major = Integer.parseInt(matcher.group(1));
-            this.minor = Integer.parseInt(matcher.group(2));
-            this.patch = matcher.group(3) != null ? Integer.parseInt(matcher.group(3)) : 0;
-            this.hotfix = matcher.group(4) != null ? Integer.parseInt(matcher.group(4)) : NO_HOTFIX;
+            this.major = intPart(matcher, "major");
+            this.minor = intPart(matcher, "minor");
+            this.patch = intPart(matcher, "patch", 0);
+            this.hotfix = intPart(matcher, "hotfix", NO_HOTFIX);
 
-            String pr = matcher.group(5);
-            String bld = matcher.group(6);
+            String pr = matcher.group("prerelease");
+            String bld = matcher.group("build");
 
             this.preRelease = pr == null || pr.isEmpty() ? null : parseIdentifiers(version, pr);
             this.build = bld == null || bld.isEmpty() ? null : parseIdentifiers(version, bld);
@@ -107,6 +121,17 @@
         }
     }
 
+    private static int intPart(Matcher matcher, String group)
+    {
+        return Integer.parseInt(matcher.group(group));
+    }
+
+    private static int intPart(Matcher matcher, String group, int orElse)
+    {
+        String value = matcher.group(group);
+        return value == null ? orElse : Integer.parseInt(value);
+    }
+
     private CassandraVersion getFamilyLowerBound()
     {
         return patch == 0 && hotfix == NO_HOTFIX && preRelease != null && preRelease.length == 0 && build == null
@@ -117,7 +142,6 @@
     private static String[] parseIdentifiers(String version, String str)
     {
         // Drop initial - or +
-        str = str.substring(1);
         String[] parts = StringUtils.split(str, ".-");
         for (String part : parts)
         {
diff --git a/src/java/org/apache/cassandra/utils/Clock.java b/src/java/org/apache/cassandra/utils/Clock.java
new file mode 100644
index 0000000..7fffefb
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Clock.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils;
+
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.CLOCK_GLOBAL;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Wrapper around time related functions that are either implemented by using the default JVM calls
+ * or by using a custom implementation for testing purposes.
+ *
+ * See {@link Global#instance} for how to use a custom implementation.
+ *
+ * Please note that {@link java.time.Clock} wasn't used, as it would not be possible to provide an
+ * implementation for {@link #nanoTime()} with the exact same properties of {@link System#nanoTime()}.
+ */
+@Shared(scope = SIMULATION)
+public interface Clock
+{
+    public static class Global
+    {
+        // something weird happens with class loading Logger that can cause a deadlock
+        private static Throwable FAILED_TO_INITIALISE;
+        private static String INITIALIZE_MESSAGE;
+
+        /**
+         * Static singleton object that will be instantiated by default with a system clock
+         * implementation. Set <code>cassandra.clock</code> system property to a FQCN to use a
+         * different implementation instead.
+         */
+        private static final Clock instance;
+
+        static
+        {
+            String classname = CLOCK_GLOBAL.getString();
+            Clock clock = new Default();
+            Throwable errorOutcome = null;
+            String outcome = null;
+            if (classname != null)
+            {
+                try
+                {
+                    outcome = "Using custom clock implementation: " + classname;
+                    clock = (Clock) Class.forName(classname).newInstance();
+                }
+                catch (Throwable t)
+                {
+                    outcome = "Failed to load clock implementation " + classname;
+                    errorOutcome = t;
+                }
+            }
+            instance = clock;
+            FAILED_TO_INITIALISE = errorOutcome;
+            INITIALIZE_MESSAGE = outcome;
+        }
+
+        public static void logInitializationOutcome(Logger logger)
+        {
+            if (FAILED_TO_INITIALISE != null)
+            {
+                logger.error(INITIALIZE_MESSAGE, FAILED_TO_INITIALISE);
+            }
+            else if (INITIALIZE_MESSAGE != null)
+            {
+                logger.debug(INITIALIZE_MESSAGE);
+            }
+            FAILED_TO_INITIALISE = null;
+            INITIALIZE_MESSAGE = null;
+        }
+
+        /**
+         * Semantically equivalent to {@link System#nanoTime()}
+         */
+        public static long nanoTime()
+        {
+            return instance.nanoTime();
+        }
+
+        /**
+         * Semantically equivalent to {@link System#currentTimeMillis()}
+         */
+        public static long currentTimeMillis()
+        {
+            return instance.currentTimeMillis();
+        }
+    }
+
+    public static class Default implements Clock
+    {
+        /**
+         * {@link System#nanoTime()}
+         */
+        public long nanoTime()
+        {
+            return System.nanoTime(); // checkstyle: permit system clock
+        }
+
+        /**
+         * {@link System#currentTimeMillis()}
+         */
+        public long currentTimeMillis()
+        {
+            return System.currentTimeMillis(); // checkstyle: permit system clock
+        }
+    }
+
+    /**
+     * Semantically equivalent to {@link System#nanoTime()}
+     */
+    public long nanoTime();
+
+    /**
+     * Semantically equivalent to {@link System#currentTimeMillis()}
+     */
+    public long currentTimeMillis();
+
+    @Intercept
+    public static void waitUntil(long deadlineNanos) throws InterruptedException
+    {
+        long waitNanos = deadlineNanos - Clock.Global.nanoTime();
+        if (waitNanos > 0)
+            TimeUnit.NANOSECONDS.sleep(waitNanos);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/Closeable.java b/src/java/org/apache/cassandra/utils/Closeable.java
new file mode 100644
index 0000000..1a8c1e9
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Closeable.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface Closeable extends java.io.Closeable
+{
+    public void close();
+}
diff --git a/src/java/org/apache/cassandra/utils/CloseableIterator.java b/src/java/org/apache/cassandra/utils/CloseableIterator.java
index 57034ae..32de799 100644
--- a/src/java/org/apache/cassandra/utils/CloseableIterator.java
+++ b/src/java/org/apache/cassandra/utils/CloseableIterator.java
@@ -18,9 +18,53 @@
 package org.apache.cassandra.utils;
 
 import java.util.Iterator;
+import java.util.NoSuchElementException;
 
 // so we can instantiate anonymous classes implementing both interfaces
 public interface CloseableIterator<T> extends Iterator<T>, AutoCloseable
 {
     public void close();
+
+    public static <T> CloseableIterator<T> wrap(Iterator<T> iter)
+    {
+        return new CloseableIterator<T>()
+        {
+            public void close()
+            {
+                // noop
+            }
+
+            public boolean hasNext()
+            {
+                return iter.hasNext();
+            }
+
+            public T next()
+            {
+                return iter.next();
+            }
+        };
+    }
+
+    public static <T> CloseableIterator<T> empty()
+    {
+        return new CloseableIterator<T>()
+        {
+            public void close()
+            {
+                // noop
+            }
+
+            public boolean hasNext()
+            {
+                return false;
+            }
+
+            public T next()
+            {
+                throw new NoSuchElementException();
+            }
+        };
+    }
+
 }
diff --git a/src/java/org/apache/cassandra/utils/CollectionSerializer.java b/src/java/org/apache/cassandra/utils/CollectionSerializer.java
new file mode 100644
index 0000000..4f8e8b0
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/CollectionSerializer.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.RandomAccess;
+import java.util.Set;
+import java.util.function.IntFunction;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+public class CollectionSerializer
+{
+
+    public static <V> void serializeCollection(IVersionedSerializer<V> valueSerializer, Collection<V> values, DataOutputPlus out, int version) throws IOException
+    {
+        out.writeUnsignedVInt(values.size());
+        for (V value : values)
+            valueSerializer.serialize(value, out, version);
+    }
+
+    public static <V, L extends List<V> & RandomAccess> void serializeList(IVersionedSerializer<V> valueSerializer, L values, DataOutputPlus out, int version) throws IOException
+    {
+        int size = values.size();
+        out.writeUnsignedVInt(size);
+        for (int i = 0 ; i < size ; ++i)
+            valueSerializer.serialize(values.get(i), out, version);
+    }
+
+    public static <K, V> void serializeMap(IVersionedSerializer<K> keySerializer, IVersionedSerializer<V> valueSerializer, Map<K, V> map, DataOutputPlus out, int version) throws IOException
+    {
+        out.writeUnsignedVInt(map.size());
+        for (Map.Entry<K, V> e : map.entrySet())
+        {
+            keySerializer.serialize(e.getKey(), out, version);
+            valueSerializer.serialize(e.getValue(), out, version);
+        }
+    }
+
+    public static <V, C extends Collection<? super V>> C deserializeCollection(IVersionedSerializer<V> serializer, IntFunction<C> factory, DataInputPlus in, int version) throws IOException
+    {
+        int size = (int) in.readUnsignedVInt();
+        C result = factory.apply(size);
+        while (size-- > 0)
+            result.add(serializer.deserialize(in, version));
+        return result;
+    }
+
+    public static <K, V, M extends Map<K, V>> M deserializeMap(IVersionedSerializer<K> keySerializer, IVersionedSerializer<V> valueSerializer, IntFunction<M> factory, DataInputPlus in, int version) throws IOException
+    {
+        int size = (int) in.readUnsignedVInt();
+        M result = factory.apply(size);
+        while (size-- > 0)
+        {
+            K key = keySerializer.deserialize(in, version);
+            V value = valueSerializer.deserialize(in, version);
+            result.put(key, value);
+        }
+        return result;
+    }
+
+    public static <V> long serializedSizeCollection(IVersionedSerializer<V> valueSerializer, Collection<V> values, int version)
+    {
+        long size = TypeSizes.sizeofUnsignedVInt(values.size());
+        for (V value : values)
+            size += valueSerializer.serializedSize(value, version);
+        return size;
+    }
+
+    public static <V, L extends List<V> & RandomAccess> long serializedSizeList(IVersionedSerializer<V> valueSerializer, L values, int version) throws IOException
+    {
+        int items = values.size();
+        long size = TypeSizes.sizeofUnsignedVInt(items);
+        for (int i = 0 ; i < items ; ++i)
+            size += valueSerializer.serializedSize(values.get(i), version);
+        return size;
+    }
+
+
+    public static <K, V> long serializedSizeMap(IVersionedSerializer<K> keySerializer, IVersionedSerializer<V> valueSerializer, Map<K, V> map, int version)
+    {
+        long size = TypeSizes.sizeofUnsignedVInt(map.size());
+        for (Map.Entry<K, V> e : map.entrySet())
+            size += keySerializer.serializedSize(e.getKey(), version)
+                  + valueSerializer.serializedSize(e.getValue(), version);
+        return size;
+    }
+
+    public static <V> IntFunction<Set<V>> newHashSet()
+    {
+        return i -> i == 0 ? Collections.emptySet() : Sets.newHashSetWithExpectedSize(i);
+    }
+
+    public static <K, V> IntFunction<Map<K, V>> newHashMap()
+    {
+        return i -> i == 0 ? Collections.emptyMap() : Maps.newHashMapWithExpectedSize(i);
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/utils/Collectors3.java b/src/java/org/apache/cassandra/utils/Collectors3.java
index f8f262e..c48f160 100644
--- a/src/java/org/apache/cassandra/utils/Collectors3.java
+++ b/src/java/org/apache/cassandra/utils/Collectors3.java
@@ -18,22 +18,24 @@
 
 package org.apache.cassandra.utils;
 
-import java.util.List;
-import java.util.Set;
+import java.util.Map;
+import java.util.function.Function;
 import java.util.stream.Collector;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 
 /**
  * Some extra Collector implementations.
- *
+ * <p>
  * Named Collectors3 just in case Guava ever makes a Collectors2
  */
 public class Collectors3
 {
-    private static final Collector.Characteristics[] LIST_CHARACTERISTICS = new Collector.Characteristics[] { };
-    public static <T>  Collector<T, ?, List<T>> toImmutableList()
+    private static final Collector.Characteristics[] LIST_CHARACTERISTICS = new Collector.Characteristics[]{};
+
+    public static <T> Collector<T, ?, ImmutableList<T>> toImmutableList()
     {
         return Collector.of(ImmutableList.Builder<T>::new,
                             ImmutableList.Builder<T>::add,
@@ -42,8 +44,9 @@
                             LIST_CHARACTERISTICS);
     }
 
-    private static final Collector.Characteristics[] SET_CHARACTERISTICS = new Collector.Characteristics[] { Collector.Characteristics.UNORDERED };
-    public static <T>  Collector<T, ?, Set<T>> toImmutableSet()
+    private static final Collector.Characteristics[] SET_CHARACTERISTICS = new Collector.Characteristics[]{ Collector.Characteristics.UNORDERED };
+
+    public static <T> Collector<T, ?, ImmutableSet<T>> toImmutableSet()
     {
         return Collector.of(ImmutableSet.Builder<T>::new,
                             ImmutableSet.Builder<T>::add,
@@ -51,4 +54,25 @@
                             ImmutableSet.Builder<T>::build,
                             SET_CHARACTERISTICS);
     }
+
+    private static final Collector.Characteristics[] MAP_CHARACTERISTICS = new Collector.Characteristics[]{ Collector.Characteristics.UNORDERED };
+
+    public static <K, V> Collector<Map.Entry<K, V>, ?, ImmutableMap<K, V>> toImmutableMap()
+    {
+        return Collector.of(ImmutableMap.Builder<K, V>::new,
+                            ImmutableMap.Builder<K, V>::put,
+                            (l, r) -> l.putAll(r.build()),
+                            ImmutableMap.Builder<K, V>::build,
+                            MAP_CHARACTERISTICS);
+    }
+
+    public static <T, K, V> Collector<T, ?, ImmutableMap<K, V>> toImmutableMap(Function<? super T, ? extends K> keyMapper, Function<? super T, ? extends V> valueMapper)
+    {
+        return Collector.of(ImmutableMap.Builder<K, V>::new,
+                            (b, t) -> b.put(keyMapper.apply(t), valueMapper.apply(t)),
+                            (l, r) -> l.putAll(r.build()),
+                            ImmutableMap.Builder::build,
+                            MAP_CHARACTERISTICS);
+    }
+
 }
diff --git a/src/java/org/apache/cassandra/utils/CounterId.java b/src/java/org/apache/cassandra/utils/CounterId.java
index b138aac..fc0385e 100644
--- a/src/java/org/apache/cassandra/utils/CounterId.java
+++ b/src/java/org/apache/cassandra/utils/CounterId.java
@@ -22,6 +22,8 @@
 
 import org.apache.cassandra.db.SystemKeyspace;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
+
 public class CounterId implements Comparable<CounterId>
 {
     public static final int LENGTH = 16; // we assume a fixed length size for all CounterIds
@@ -84,7 +86,7 @@
 
     public static CounterId generate()
     {
-        return new CounterId(ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes()));
+        return new CounterId(ByteBuffer.wrap(nextTimeUUIDAsBytes()));
     }
 
     /*
diff --git a/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java b/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java
index d1f33ed..ab2d67e 100644
--- a/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java
+++ b/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils;
 
-import java.net.InetAddress;
 import java.time.LocalDate;
 import java.time.format.DateTimeFormatter;
 import java.util.concurrent.*;
@@ -29,7 +28,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.Message;
@@ -37,7 +35,9 @@
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
-import org.hsqldb.Table;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
 /**
  * Provides a means to take snapshots when triggered by anomalous events or when the breaking of invariants is
@@ -64,7 +64,7 @@
     private static final Logger logger = LoggerFactory.getLogger(DiagnosticSnapshotService.class);
 
     public static final DiagnosticSnapshotService instance =
-        new DiagnosticSnapshotService(Executors.newSingleThreadExecutor(new NamedThreadFactory("DiagnosticSnapshot")));
+        new DiagnosticSnapshotService(executorFactory().sequential("DiagnosticSnapshot"));
 
     public static final String REPAIRED_DATA_MISMATCH_SNAPSHOT_PREFIX = "RepairedDataMismatch-";
     public static final String DUPLICATE_ROWS_DETECTED_SNAPSHOT_PREFIX = "DuplicateRows-";
@@ -119,7 +119,7 @@
 
     private void maybeTriggerSnapshot(TableMetadata metadata, String prefix, Iterable<InetAddressAndPort> endpoints)
     {
-        long now = System.nanoTime();
+        long now = nanoTime();
         AtomicLong cached = lastSnapshotTimes.computeIfAbsent(metadata.id, u -> new AtomicLong(0));
         long last = cached.get();
         long interval = Long.getLong("cassandra.diagnostic_snapshot_interval_nanos", SNAPSHOT_INTERVAL_NANOS);
diff --git a/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java b/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java
index c1fb6e0..94f9b22 100644
--- a/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java
+++ b/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.FileVisitResult;
 import java.nio.file.Path;
@@ -30,13 +29,11 @@
  */
 public class DirectorySizeCalculator extends SimpleFileVisitor<Path>
 {
-    protected volatile long size = 0;
-    protected final File path;
+    private volatile long size = 0;
 
-    public DirectorySizeCalculator(File path)
+    public DirectorySizeCalculator()
     {
         super();
-        this.path = path;
     }
 
     public boolean isAcceptable(Path file)
@@ -53,7 +50,7 @@
     }
 
     @Override
-    public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException
+    public FileVisitResult visitFileFailed(Path file, IOException exc)
     {
         return FileVisitResult.CONTINUE;
     }
@@ -62,4 +59,12 @@
     {
         return size;
     }
+
+    /**
+     * Reset the size to 0 in case that the size calculator is used multiple times
+     */
+    protected void resetSize()
+    {
+        size = 0;
+    }
 }
diff --git a/src/java/org/apache/cassandra/utils/EstimatedHistogram.java b/src/java/org/apache/cassandra/utils/EstimatedHistogram.java
index a494b3a..198f922 100644
--- a/src/java/org/apache/cassandra/utils/EstimatedHistogram.java
+++ b/src/java/org/apache/cassandra/utils/EstimatedHistogram.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.concurrent.atomic.AtomicLongArray;
+import java.util.function.DoubleToLongFunction;
 
 import com.google.common.base.Objects;
 import org.slf4j.Logger;
@@ -30,10 +31,12 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 
-public class EstimatedHistogram
+public class EstimatedHistogram implements DoubleToLongFunction
 {
     public static final EstimatedHistogramSerializer serializer = new EstimatedHistogramSerializer();
 
+    public static final int DEFAULT_BUCKET_COUNT = 90;
+
     /**
      * The series of values to which the counts in `buckets` correspond:
      * 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc.
@@ -52,7 +55,7 @@
 
     public EstimatedHistogram()
     {
-        this(90);
+        this(DEFAULT_BUCKET_COUNT);
     }
 
     public EstimatedHistogram(int bucketCount)
@@ -382,6 +385,12 @@
         return Objects.hashCode(getBucketOffsets(), getBuckets(false));
     }
 
+    @Override
+    public long applyAsLong(double value)
+    {
+        return percentile(value);
+    }
+
     public static class EstimatedHistogramSerializer implements ISerializer<EstimatedHistogram>
     {
         private static final Logger logger = LoggerFactory.getLogger(EstimatedHistogramSerializer.class);
diff --git a/src/java/org/apache/cassandra/utils/ExecutorUtils.java b/src/java/org/apache/cassandra/utils/ExecutorUtils.java
index 21933a3..5bb841f 100644
--- a/src/java/org/apache/cassandra/utils/ExecutorUtils.java
+++ b/src/java/org/apache/cassandra/utils/ExecutorUtils.java
@@ -24,9 +24,10 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.cassandra.concurrent.InfiniteLoopExecutor;
+import org.apache.cassandra.concurrent.Shutdownable;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class ExecutorUtils
 {
@@ -66,8 +67,11 @@
                 if (interrupt) ((ExecutorService) executor).shutdownNow();
                 else ((ExecutorService) executor).shutdown();
             }
-            else if (executor instanceof InfiniteLoopExecutor)
-                ((InfiniteLoopExecutor) executor).shutdownNow();
+            else if (executor instanceof Shutdownable)
+            {
+                if (interrupt) ((Shutdownable) executor).shutdownNow();
+                else ((Shutdownable) executor).shutdown();
+            }
             else if (executor instanceof Thread)
                 ((Thread) executor).interrupt();
             else if (executor != null)
@@ -92,7 +96,7 @@
 
     public static void awaitTermination(long timeout, TimeUnit unit, Collection<?> executors) throws InterruptedException, TimeoutException
     {
-        long deadline = System.nanoTime() + unit.toNanos(timeout);
+        long deadline = nanoTime() + unit.toNanos(timeout);
         awaitTerminationUntil(deadline, executors);
     }
 
@@ -100,15 +104,15 @@
     {
         for (Object executor : executors)
         {
-            long wait = deadline - System.nanoTime();
+            long wait = deadline - nanoTime();
             if (executor instanceof ExecutorService)
             {
                 if (wait <= 0 || !((ExecutorService)executor).awaitTermination(wait, NANOSECONDS))
                     throw new TimeoutException(executor + " did not terminate on time");
             }
-            else if (executor instanceof InfiniteLoopExecutor)
+            else if (executor instanceof Shutdownable)
             {
-                if (wait <= 0 || !((InfiniteLoopExecutor)executor).awaitTermination(wait, NANOSECONDS))
+                if (wait <= 0 || !((Shutdownable)executor).awaitTermination(wait, NANOSECONDS))
                     throw new TimeoutException(executor + " did not terminate on time");
             }
             else if (executor instanceof Thread)
@@ -148,4 +152,4 @@
     {
         shutdownNowAndWait(timeout, unit, Arrays.asList(executors));
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java b/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
index 1736ae2..02aa09d 100644
--- a/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
+++ b/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
@@ -25,6 +25,8 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * An implementation similar to Guava's Suppliers.memoizeWithExpiration(Supplier)
  * but allowing for memoization to be skipped.
@@ -59,7 +61,7 @@
         // the extra memory consumption and indirection are more
         // expensive than the extra volatile reads.
         long nanos = this.expirationNanos;
-        long now = System.nanoTime();
+        long now = nanoTime();
         if (nanos == 0L || now - nanos >= 0L) {
             synchronized(this) {
                 if (nanos == this.expirationNanos) {
@@ -69,8 +71,7 @@
                     else
                         return t.value();
 
-                    nanos = now + this.durationNanos;
-                    this.expirationNanos = nanos == 0L ? 1L : nanos;
+                    this.expirationNanos = now + this.durationNanos;
                     return t.value();
                 }
             }
@@ -79,7 +80,7 @@
     }
 
     @VisibleForTesting
-    public void expire()
+    public synchronized void expire()
     {
         this.expirationNanos = 0;
     }
diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java
index 63ffc63..6d210ce 100644
--- a/src/java/org/apache/cassandra/utils/FBUtilities.java
+++ b/src/java/org/apache/cassandra/utils/FBUtilities.java
@@ -17,15 +17,26 @@
  */
 package org.apache.cassandra.utils;
 
-import java.io.*;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
 import java.lang.reflect.Field;
 import java.math.BigInteger;
 import java.net.*;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.time.Instant;
 import java.util.*;
-import java.util.concurrent.*;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeoutException;
 import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
@@ -34,16 +45,21 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
-import com.google.common.base.Strings;
-import com.google.common.util.concurrent.Uninterruptibles;
+
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
+import org.apache.cassandra.utils.concurrent.*;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.cassandra.auth.AllowAllNetworkAuthorizer;
 import org.apache.cassandra.audit.IAuditLogger;
+import org.apache.cassandra.auth.AllowAllNetworkAuthorizer;
 import org.apache.cassandra.auth.IAuthenticator;
 import org.apache.cassandra.auth.IAuthorizer;
 import org.apache.cassandra.auth.INetworkAuthorizer;
@@ -66,29 +82,34 @@
 import org.apache.cassandra.io.util.DataOutputBufferFixed;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.security.ISslContextFactory;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.LINE_SEPARATOR;
 import static org.apache.cassandra.config.CassandraRelevantProperties.USER_HOME;
+import static org.apache.cassandra.io.util.File.WriteMode.OVERWRITE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 
 public class FBUtilities
 {
+    private static final ObjectMapper jsonMapper = new ObjectMapper(new JsonFactory());
+
     static
     {
         preventIllegalAccessWarnings();
+        jsonMapper.registerModule(new JavaTimeModule());
+        jsonMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
     }
 
     private static final Logger logger = LoggerFactory.getLogger(FBUtilities.class);
-
-    private static final ObjectMapper jsonMapper = new ObjectMapper(new JsonFactory());
-
     public static final String UNKNOWN_RELEASE_VERSION = "Unknown";
 
     public static final BigInteger TWO = new BigInteger("2");
     private static final String DEFAULT_TRIGGER_DIR = "triggers";
 
     private static final String OPERATING_SYSTEM = System.getProperty("os.name").toLowerCase();
-    public static final boolean isWindows = OPERATING_SYSTEM.contains("windows");
     public static final boolean isLinux = OPERATING_SYSTEM.contains("linux");
 
     private static volatile InetAddress localInetAddress;
@@ -100,11 +121,17 @@
 
     private static volatile String previousReleaseVersionString;
 
+    private static int availableProcessors = Integer.getInteger("cassandra.available_processors", DatabaseDescriptor.getAvailableProcessors());
+
+    public static void setAvailableProcessors(int value)
+    {
+        availableProcessors = value;
+    }
+
     public static int getAvailableProcessors()
     {
-        String availableProcessors = System.getProperty("cassandra.available_processors");
-        if (!Strings.isNullOrEmpty(availableProcessors))
-            return Integer.parseInt(availableProcessors);
+        if (availableProcessors > 0)
+            return availableProcessors;
         else
             return Runtime.getRuntime().availableProcessors();
     }
@@ -226,7 +253,7 @@
      */
     public static void setBroadcastInetAddressAndPort(InetAddressAndPort addr)
     {
-        broadcastInetAddress = addr.address;
+        broadcastInetAddress = addr.getAddress();
         broadcastInetAddressAndPort = addr;
     }
 
@@ -358,7 +385,7 @@
         if (scpurl == null)
             throw new ConfigurationException("unable to locate " + filename);
 
-        return new File(scpurl.getFile()).getAbsolutePath();
+        return new File(scpurl.getFile()).absolutePath();
     }
 
     public static File cassandraTriggerDir()
@@ -426,12 +453,18 @@
     {
         // we use microsecond resolution for compatibility with other client libraries, even though
         // we can't actually get microsecond precision.
-        return System.currentTimeMillis() * 1000;
+        return currentTimeMillis() * 1000;
     }
 
     public static int nowInSeconds()
     {
-        return (int) (System.currentTimeMillis() / 1000);
+        return (int) (currentTimeMillis() / 1000);
+    }
+
+    public static Instant now()
+    {
+        long epochMilli = currentTimeMillis();
+        return Instant.ofEpochMilli(epochMilli);
     }
 
     public static <T> List<T> waitOnFutures(Iterable<? extends Future<? extends T>> futures)
@@ -451,7 +484,7 @@
     {
         long endNanos = 0;
         if (timeout > 0)
-            endNanos = System.nanoTime() + units.toNanos(timeout);
+            endNanos = nanoTime() + units.toNanos(timeout);
         List<T> results = new ArrayList<>();
         Throwable fail = null;
         for (Future<? extends T> f : futures)
@@ -464,7 +497,7 @@
                 }
                 else
                 {
-                    long waitFor = Math.max(1, endNanos - System.nanoTime());
+                    long waitFor = Math.max(1, endNanos - nanoTime());
                     results.add(f.get(waitFor, TimeUnit.NANOSECONDS));
                 }
             }
@@ -485,15 +518,15 @@
         }
         catch (ExecutionException ee)
         {
-            throw new RuntimeException(ee);
+            throw Throwables.cleaned(ee);
         }
         catch (InterruptedException ie)
         {
-            throw new AssertionError(ie);
+            throw new UncheckedInterruptedException(ie);
         }
     }
 
-    public static <T> Future<? extends T> waitOnFirstFuture(Iterable<? extends Future<? extends T>> futures)
+    public static <T, F extends Future<? extends T>> F waitOnFirstFuture(Iterable<? extends F> futures)
     {
         return waitOnFirstFuture(futures, 100);
     }
@@ -502,105 +535,49 @@
      * @param futures The futures to wait on
      * @return future that completed.
      */
-    public static <T> Future<? extends T> waitOnFirstFuture(Iterable<? extends Future<? extends T>> futures, long delay)
+    public static <T, F extends Future<? extends T>> F waitOnFirstFuture(Iterable<? extends F> futures, long delay)
     {
         while (true)
         {
-            for (Future<? extends T> f : futures)
+            Iterator<? extends F> iter = futures.iterator();
+            if (!iter.hasNext())
+                throw new IllegalArgumentException();
+
+            while (true)
             {
-                if (f.isDone())
+                F f = iter.next();
+                boolean isDone;
+                if ((isDone = f.isDone()) || !iter.hasNext())
                 {
                     try
                     {
-                        f.get();
+                        f.get(delay, TimeUnit.MILLISECONDS);
                     }
                     catch (InterruptedException e)
                     {
-                        throw new AssertionError(e);
+                        throw new UncheckedInterruptedException(e);
                     }
                     catch (ExecutionException e)
                     {
                         throw new RuntimeException(e);
                     }
+                    catch (TimeoutException e)
+                    {
+                        if (!isDone) // prevent infinite loops on bad implementations (not encountered)
+                            break;
+                    }
                     return f;
                 }
             }
-            Uninterruptibles.sleepUninterruptibly(delay, TimeUnit.MILLISECONDS);
         }
     }
 
     /**
      * Returns a new {@link Future} wrapping the given list of futures and returning a list of their results.
      */
-    public static Future<List> allOf(Collection<Future<?>> futures)
+    public static <T> org.apache.cassandra.utils.concurrent.Future<List<T>> allOf(Collection<? extends org.apache.cassandra.utils.concurrent.Future<? extends T>> futures)
     {
-        if (futures.isEmpty())
-            return CompletableFuture.completedFuture(null);
-
-        return new Future<List>()
-        {
-            @Override
-            @SuppressWarnings("unchecked")
-            public List get() throws InterruptedException, ExecutionException
-            {
-                List result = new ArrayList<>(futures.size());
-                for (Future current : futures)
-                {
-                    result.add(current.get());
-                }
-                return result;
-            }
-
-            @Override
-            @SuppressWarnings("unchecked")
-            public List get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
-            {
-                List result = new ArrayList<>(futures.size());
-                long deadline = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, unit);
-                for (Future current : futures)
-                {
-                    long remaining = deadline - System.nanoTime();
-                    if (remaining <= 0)
-                        throw new TimeoutException();
-
-                    result.add(current.get(remaining, TimeUnit.NANOSECONDS));
-                }
-                return result;
-            }
-
-            @Override
-            public boolean cancel(boolean mayInterruptIfRunning)
-            {
-                for (Future current : futures)
-                {
-                    if (!current.cancel(mayInterruptIfRunning))
-                        return false;
-                }
-                return true;
-            }
-
-            @Override
-            public boolean isCancelled()
-            {
-                for (Future current : futures)
-                {
-                    if (!current.isCancelled())
-                        return false;
-                }
-                return true;
-            }
-
-            @Override
-            public boolean isDone()
-            {
-                for (Future current : futures)
-                {
-                    if (!current.isDone())
-                        return false;
-                }
-                return true;
-            }
-        };
+        return FutureCombiner.allOf(futures);
     }
 
     /**
@@ -670,7 +647,7 @@
         }
         return FBUtilities.construct(className, "network authorizer");
     }
-    
+
     public static IAuditLogger newAuditLogger(String className, Map<String, String> parameters) throws ConfigurationException
     {
         if (!className.contains("."))
@@ -678,7 +655,7 @@
 
         try
         {
-            Class<?> auditLoggerClass = Class.forName(className);
+            Class<?> auditLoggerClass = FBUtilities.classForName(className, "Audit logger");
             return (IAuditLogger) auditLoggerClass.getConstructor(Map.class).newInstance(parameters);
         }
         catch (Exception ex)
@@ -687,20 +664,20 @@
         }
     }
 
-    public static boolean isAuditLoggerClassExists(String className)
+    public static ISslContextFactory newSslContextFactory(String className, Map<String,Object> parameters) throws ConfigurationException
     {
         if (!className.contains("."))
-            className = "org.apache.cassandra.audit." + className;
+            className = "org.apache.cassandra.security." + className;
 
         try
         {
-            FBUtilities.classForName(className, "Audit logger");
+            Class<?> sslContextFactoryClass = Class.forName(className);
+            return (ISslContextFactory) sslContextFactoryClass.getConstructor(Map.class).newInstance(parameters);
         }
-        catch (ConfigurationException e)
+        catch (Exception ex)
         {
-            return false;
+            throw new ConfigurationException("Unable to create instance of ISslContextFactory for " + className, ex);
         }
-        return true;
     }
 
     /**
@@ -866,6 +843,22 @@
         }
     }
 
+    public static void serializeToJsonFile(Object object, File outputFile) throws IOException
+    {
+        try (FileOutputStreamPlus out = outputFile.newOutputStream(OVERWRITE))
+        {
+            jsonMapper.writeValue((OutputStream) out, object);
+        }
+    }
+
+    public static <T> T deserializeFromJsonFile(Class<T> tClass, File file) throws IOException
+    {
+        try (FileInputStreamPlus in = file.newInputStream())
+        {
+            return jsonMapper.readValue((InputStream) in, tClass);
+        }
+    }
+
     public static String prettyPrintMemory(long size)
     {
         return prettyPrintMemory(size, false);
@@ -930,7 +923,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new AssertionError(e);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
@@ -1086,7 +1079,7 @@
         }
         catch (InterruptedException e)
         {
-            throw new RuntimeException(e);
+            throw new UncheckedInterruptedException(e);
         }
     }
 
@@ -1128,4 +1121,24 @@
             // ignore
         }
     }
+
+    public static String camelToSnake(String camel)
+    {
+        StringBuilder sb = new StringBuilder();
+        for (char c : camel.toCharArray())
+        {
+            if (Character.isUpperCase(c))
+            {
+                // if first char is uppercase, then avoid adding the _ prefix
+                if (sb.length() > 0)
+                    sb.append('_');
+                sb.append(Character.toLowerCase(c));
+            }
+            else
+            {
+                sb.append(c);
+            }
+        }
+        return sb.toString();
+    }
 }
diff --git a/src/java/org/apache/cassandra/utils/GuidGenerator.java b/src/java/org/apache/cassandra/utils/GuidGenerator.java
index aa3ee5b..46843b4 100644
--- a/src/java/org/apache/cassandra/utils/GuidGenerator.java
+++ b/src/java/org/apache/cassandra/utils/GuidGenerator.java
@@ -22,6 +22,7 @@
 import java.util.Random;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_SECURITY_EGD;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class GuidGenerator
 {
@@ -90,7 +91,7 @@
 
     public static ByteBuffer guidAsBytes()
     {
-        return guidAsBytes(myRand, s_id, System.currentTimeMillis());
+        return guidAsBytes(myRand, s_id, currentTimeMillis());
     }
 
     /*
diff --git a/src/java/org/apache/cassandra/utils/HeapUtils.java b/src/java/org/apache/cassandra/utils/HeapUtils.java
index 4dd0d46..c0910d8 100644
--- a/src/java/org/apache/cassandra/utils/HeapUtils.java
+++ b/src/java/org/apache/cassandra/utils/HeapUtils.java
@@ -17,9 +17,13 @@
  */
 package org.apache.cassandra.utils;
 
-import java.io.*;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
 import java.lang.management.ManagementFactory;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.text.StrBuilder;
 
@@ -81,14 +85,8 @@
         if (javaHome == null)
             return null;
         File javaBinDirectory = new File(javaHome, "bin");
-        File[] files = javaBinDirectory.listFiles(new FilenameFilter()
-        {
-            public boolean accept(File dir, String name)
-            {
-                return name.startsWith("jcmd");
-            }
-        });
-        return ArrayUtils.isEmpty(files) ? null : files[0].getPath();
+        File[] files = javaBinDirectory.tryList((dir, name) -> name.startsWith("jcmd"));
+        return ArrayUtils.isEmpty(files) ? null : files[0].path();
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/utils/Hex.java b/src/java/org/apache/cassandra/utils/Hex.java
index 9163067..b8044b8 100644
--- a/src/java/org/apache/cassandra/utils/Hex.java
+++ b/src/java/org/apache/cassandra/utils/Hex.java
@@ -86,6 +86,23 @@
         return wrapCharArray(c);
     }
 
+    public static long parseLong(String hex, int start, int end)
+    {
+        int len = end - start;
+        if (len > 16)
+            throw new IllegalArgumentException();
+
+        long result = 0;
+        int shift = 4 * (len - 1);
+        for (int i = start ; i < end ; ++i)
+        {
+            char c = hex.charAt(i);
+            result |= (long)(c - (c >= 'a' ? 'a' - 10 : '0')) << shift;
+            shift -= 4;
+        }
+        return result;
+    }
+
     /**
      * Create a String from a char array with zero-copy (if available), using reflection to access a package-protected constructor of String.
      * */
diff --git a/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java b/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java
index 597e5bb..bd2f70a 100644
--- a/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java
+++ b/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java
@@ -35,4 +35,4 @@
      * @throws java.util.NoSuchElementException if next() returned null
      */
     public int indexOfCurrent();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/Int32Serializer.java b/src/java/org/apache/cassandra/utils/Int32Serializer.java
new file mode 100644
index 0000000..731f5aa
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Int32Serializer.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.io.IOException;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+public class Int32Serializer implements IVersionedSerializer<Integer>
+{
+    public static final Int32Serializer serializer = new Int32Serializer();
+
+    public void serialize(Integer t, DataOutputPlus out, int version) throws IOException
+    {
+        out.writeInt(t);
+    }
+
+    public Integer deserialize(DataInputPlus in, int version) throws IOException
+    {
+        return in.readInt();
+    }
+
+    public long serializedSize(Integer t, int version)
+    {
+        return TypeSizes.sizeof(t.intValue());
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/Int64Serializer.java b/src/java/org/apache/cassandra/utils/Int64Serializer.java
new file mode 100644
index 0000000..3f65d60
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Int64Serializer.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.io.IOException;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+public class Int64Serializer implements IVersionedSerializer<Long>
+{
+    public static final Int64Serializer serializer = new Int64Serializer();
+
+    @Override
+    public void serialize(Long t, DataOutputPlus out, int version) throws IOException
+    {
+        out.writeLong(t);
+    }
+
+    @Override
+    public Long deserialize(DataInputPlus in, int version) throws IOException
+    {
+        return in.readLong();
+    }
+
+    @Override
+    public long serializedSize(Long t, int version)
+    {
+        return TypeSizes.sizeof(t);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/Intercept.java b/src/java/org/apache/cassandra/utils/Intercept.java
new file mode 100644
index 0000000..b81947b
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Intercept.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+// a marker to indicate that the method is intercepted by the Simulator
+@Retention(RetentionPolicy.SOURCE)
+@Target({ ElementType.METHOD })
+public @interface Intercept
+{
+}
diff --git a/src/java/org/apache/cassandra/utils/IntervalTree.java b/src/java/org/apache/cassandra/utils/IntervalTree.java
index f761180..35ec614 100644
--- a/src/java/org/apache/cassandra/utils/IntervalTree.java
+++ b/src/java/org/apache/cassandra/utils/IntervalTree.java
@@ -23,7 +23,6 @@
 import java.util.*;
 
 import com.google.common.base.Joiner;
-import org.apache.cassandra.utils.AbstractIterator;
 import com.google.common.collect.Iterators;
 
 import org.slf4j.Logger;
diff --git a/src/java/org/apache/cassandra/utils/Isolated.java b/src/java/org/apache/cassandra/utils/Isolated.java
new file mode 100644
index 0000000..d132b73
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Isolated.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Tells jvm-dtest that a class should be isolated and loaded into the instance class loader.
+ *
+ * Jvm-dtest relies on classloader isolation to run multiple cassandra instances in the same JVM, this makes it
+ * so some classes do not get shared (outside a blesssed set of classes/packages). When the default behavior
+ * is not desirable, this annotation will tell jvm-dtest to isolate the class accross all class loaders.
+ *
+ * This is the oposite of {@link Shared}.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ ElementType.TYPE })
+public @interface Isolated
+{
+}
diff --git a/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java b/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java
index 4c0f972..1d3c09f 100644
--- a/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java
+++ b/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java
@@ -19,16 +19,20 @@
 
 import java.io.FileNotFoundException;
 import java.net.SocketException;
+import java.nio.file.FileSystemException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Consumer;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
 
 import org.apache.cassandra.exceptions.UnrecoverableIllegalStateException;
+import org.apache.cassandra.metrics.StorageMetrics;
+import org.apache.cassandra.tracing.Tracing;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -40,6 +44,7 @@
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 /**
  * Responsible for deciding whether to kill the JVM if it gets in an "unstable" state (think OOM).
@@ -57,6 +62,20 @@
 
     private JVMStabilityInspector() {}
 
+    public static void uncaughtException(Thread thread, Throwable t)
+    {
+        try { StorageMetrics.uncaughtExceptions.inc(); } catch (Throwable ignore) { /* might not be initialised */ }
+        logger.error("Exception in thread {}", thread, t);
+        Tracing.trace("Exception in thread {}", thread, t);
+        for (Throwable t2 = t; t2 != null; t2 = t2.getCause())
+        {
+            // make sure error gets logged exactly once.
+            if (t2 != t && (t2 instanceof FSError || t2 instanceof CorruptSSTableException))
+                logger.error("Exception in thread {}", thread, t2);
+        }
+        JVMStabilityInspector.inspectThrowable(t);
+    }
+
     /**
      * Certain Throwables and Exceptions represent "Die" conditions for the server.
      * This recursively checks the input Throwable's cause hierarchy until null.
@@ -114,49 +133,59 @@
             isUnstable = true;
         }
 
+        if (t instanceof InterruptedException)
+            throw new UncheckedInterruptedException((InterruptedException) t);
+
         if (DatabaseDescriptor.getDiskFailurePolicy() == Config.DiskFailurePolicy.die)
             if (t instanceof FSError || t instanceof CorruptSSTableException)
                 isUnstable = true;
 
-        fn.accept(t);
-
         // Check for file handle exhaustion
-        if (t instanceof FileNotFoundException || t instanceof SocketException)
+        if (t instanceof FileNotFoundException || t instanceof FileSystemException || t instanceof SocketException)
             if (t.getMessage() != null && t.getMessage().contains("Too many open files"))
                 isUnstable = true;
 
         if (isUnstable)
+        {
+            if (!StorageService.instance.isDaemonSetupCompleted())
+                FileUtils.handleStartupFSError(t);
             killer.killCurrentJVM(t);
+        }
+
+        try
+        {
+            fn.accept(t);
+        }
+        catch (Exception | Error e)
+        {
+            logger.warn("Unexpected error while handling unexpected error", e);
+        }
 
         if (t.getCause() != null)
             inspectThrowable(t.getCause(), fn);
     }
 
+    private static final Set<String> FORCE_HEAP_OOM_IGNORE_SET = ImmutableSet.of("Java heap space", "GC Overhead limit exceeded");
+
     /**
-     * Intentionally produce a heap space OOM upon seeing a Direct buffer memory OOM.
+     * Intentionally produce a heap space OOM upon seeing a non heap memory OOM.
      * Direct buffer OOM cannot trigger JVM OOM error related options,
      * e.g. OnOutOfMemoryError, HeapDumpOnOutOfMemoryError, etc.
-     * See CASSANDRA-15214 for more details
+     * See CASSANDRA-15214 and CASSANDRA-17128 for more details
      */
     @Exclude // Exclude from just in time compilation.
     private static void forceHeapSpaceOomMaybe(OutOfMemoryError oom)
     {
-        // See the oom thrown from java.nio.Bits.reserveMemory.
-        // In jdk 13 and up, the message is "Cannot reserve XX bytes of direct buffer memory (...)"
-        // In jdk 11 and below, the message is "Direct buffer memory"
-        if ((oom.getMessage() != null && oom.getMessage().toLowerCase().contains("direct buffer memory")) ||
-            Arrays.stream(oom.getStackTrace()).anyMatch(x -> x.getClassName().equals("java.nio.Bits")
-                                                             && x.getMethodName().equals("reserveMemory")))
+        if (FORCE_HEAP_OOM_IGNORE_SET.contains(oom.getMessage()))
+            return;
+        logger.error("Force heap space OutOfMemoryError in the presence of", oom);
+        // Start to produce heap space OOM forcibly.
+        List<long[]> ignored = new ArrayList<>();
+        while (true)
         {
-            logger.error("Force heap space OutOfMemoryError in the presence of", oom);
-            // Start to produce heap space OOM forcibly.
-            List<long[]> ignored = new ArrayList<>();
-            while (true)
-            {
-                // java.util.AbstractCollection.MAX_ARRAY_SIZE is defined as Integer.MAX_VALUE - 8
-                // so Integer.MAX_VALUE / 2 should be a large enough and safe size to request.
-                ignored.add(new long[Integer.MAX_VALUE / 2]);
-            }
+            // java.util.AbstractCollection.MAX_ARRAY_SIZE is defined as Integer.MAX_VALUE - 8
+            // so Integer.MAX_VALUE / 2 should be a large enough and safe size to request.
+            ignored.add(new long[Integer.MAX_VALUE / 2]);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/utils/LazyToString.java b/src/java/org/apache/cassandra/utils/LazyToString.java
new file mode 100644
index 0000000..e719445
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/LazyToString.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.util.function.Supplier;
+
+public interface LazyToString
+{
+    public static LazyToString lazy(Supplier<String> castAsLambda)
+    {
+        return new LazyToString()
+        {
+            @Override
+            public String toString()
+            {
+                return castAsLambda.get();
+            }
+        };
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/MerkleTrees.java b/src/java/org/apache/cassandra/utils/MerkleTrees.java
index 0043fe0..15469d3 100644
--- a/src/java/org/apache/cassandra/utils/MerkleTrees.java
+++ b/src/java/org/apache/cassandra/utils/MerkleTrees.java
@@ -145,7 +145,7 @@
     /**
      * Dereference all merkle trees and release direct memory for all off-heap trees.
      */
-    public void release()
+    public synchronized void release()
     {
         merkleTrees.values().forEach(MerkleTree::release);
         merkleTrees.clear();
@@ -381,15 +381,15 @@
     /**
      * Get the differences between the two sets of MerkleTrees.
      *
-     * @param ltree
-     * @param rtree
+     * @param ltrees
+     * @param rtrees
      * @return
      */
-    public static List<Range<Token>> difference(MerkleTrees ltree, MerkleTrees rtree)
+    public static List<Range<Token>> difference(MerkleTrees ltrees, MerkleTrees rtrees)
     {
         List<Range<Token>> differences = new ArrayList<>();
-        for (MerkleTree tree : ltree.merkleTrees.values())
-            differences.addAll(MerkleTree.difference(tree, rtree.getMerkleTree(tree.fullRange)));
+        for (MerkleTree tree : ltrees.merkleTrees.values())
+            differences.addAll(MerkleTree.difference(tree, rtrees.getMerkleTree(tree.fullRange)));
         return differences;
     }
 
diff --git a/src/java/org/apache/cassandra/utils/MonotonicClock.java b/src/java/org/apache/cassandra/utils/MonotonicClock.java
index bd69bd5..ad9ee81 100644
--- a/src/java/org/apache/cassandra/utils/MonotonicClock.java
+++ b/src/java/org/apache/cassandra/utils/MonotonicClock.java
@@ -30,25 +30,25 @@
 import org.apache.cassandra.config.Config;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.CLOCK_MONOTONIC_APPROX;
+import static org.apache.cassandra.config.CassandraRelevantProperties.CLOCK_MONOTONIC_PRECISE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
 
 /**
  * Wrapper around time related functions that are either implemented by using the default JVM calls
  * or by using a custom implementation for testing purposes.
  *
- * See {@link #preciseTime} for how to use a custom implementation.
+ * See {@link Global#preciseTime} for how to use a custom implementation.
  *
  * Please note that {@link java.time.Clock} wasn't used, as it would not be possible to provide an
  * implementation for {@link #now()} with the exact same properties of {@link System#nanoTime()}.
+ *
+ * TODO better rationalise MonotonicClock/Clock
  */
+@Shared(scope = SIMULATION)
 public interface MonotonicClock
 {
-    /**
-     * Static singleton object that will be instantiated by default with a system clock
-     * implementation. Set <code>cassandra.clock</code> system property to a FQCN to use a
-     * different implementation instead.
-     */
-    public static final MonotonicClock preciseTime = Defaults.precise();
-    public static final MonotonicClock approxTime = Defaults.approx(preciseTime);
 
     /**
      * @see System#nanoTime()
@@ -70,15 +70,21 @@
     public boolean isAfter(long instant);
     public boolean isAfter(long now, long instant);
 
-    static class Defaults
+    public static class Global
     {
         private static final Logger logger = LoggerFactory.getLogger(MonotonicClock.class);
 
+        /**
+         * Static singleton object that will be instantiated by default with a system clock
+         * implementation. Set <code>cassandra.clock</code> system property to a FQCN to use a
+         * different implementation instead.
+         */
+        public static final MonotonicClock preciseTime = precise();
+        public static final MonotonicClock approxTime = approx(preciseTime);
+
         private static MonotonicClock precise()
         {
-            String sclock = System.getProperty("cassandra.clock");
-            if (sclock == null)
-                sclock = System.getProperty("cassandra.monotonic_clock.precise");
+            String sclock = CLOCK_MONOTONIC_PRECISE.getString();
 
             if (sclock != null)
             {
@@ -98,7 +104,7 @@
 
         private static MonotonicClock approx(MonotonicClock precise)
         {
-            String sclock = System.getProperty("cassandra.monotonic_clock.approx");
+            String sclock = CLOCK_MONOTONIC_APPROX.getString();
             if (sclock != null)
             {
                 try
@@ -137,14 +143,14 @@
         private static final long UPDATE_INTERVAL_MS = Long.getLong(UPDATE_INTERVAL_PROPERTY, 10000);
 
         @VisibleForTesting
-        static class AlmostSameTime implements MonotonicClockTranslation
+        public static class AlmostSameTime implements MonotonicClockTranslation
         {
             final long millisSinceEpoch;
             final long monotonicNanos;
             final long error; // maximum error of millis measurement (in nanos)
 
             @VisibleForTesting
-            AlmostSameTime(long millisSinceEpoch, long monotonicNanos, long errorNanos)
+            public AlmostSameTime(long millisSinceEpoch, long monotonicNanos, long errorNanos)
             {
                 this.millisSinceEpoch = millisSinceEpoch;
                 this.monotonicNanos = monotonicNanos;
@@ -207,7 +213,7 @@
         {
             final int tries = 3;
             long[] samples = new long[2 * tries + 1];
-            samples[0] = System.nanoTime();
+            samples[0] = nanoTime();
             for (int i = 1 ; i < samples.length ; i += 2)
             {
                 samples[i] = millisSinceEpoch.getAsLong();
@@ -244,13 +250,13 @@
     {
         private SystemClock()
         {
-            super(System::currentTimeMillis);
+            super(Clock.Global::currentTimeMillis);
         }
 
         @Override
         public long now()
         {
-            return System.nanoTime();
+            return nanoTime();
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/utils/MonotonicClockTranslation.java b/src/java/org/apache/cassandra/utils/MonotonicClockTranslation.java
index f7f83e4..cef8bd8 100644
--- a/src/java/org/apache/cassandra/utils/MonotonicClockTranslation.java
+++ b/src/java/org/apache/cassandra/utils/MonotonicClockTranslation.java
@@ -18,6 +18,9 @@
 
 package org.apache.cassandra.utils;
 
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
 public interface MonotonicClockTranslation
 {
     /** accepts millis since epoch, returns nanoTime in the related clock */
diff --git a/src/java/org/apache/cassandra/utils/Mx4jTool.java b/src/java/org/apache/cassandra/utils/Mx4jTool.java
index 054fdcf..e0e998f 100644
--- a/src/java/org/apache/cassandra/utils/Mx4jTool.java
+++ b/src/java/org/apache/cassandra/utils/Mx4jTool.java
@@ -23,8 +23,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.config.CassandraRelevantProperties;
-
 import static org.apache.cassandra.config.CassandraRelevantProperties.MX4JADDRESS;
 import static org.apache.cassandra.config.CassandraRelevantProperties.MX4JPORT;
 
@@ -82,7 +80,7 @@
     {
         String sAddress = MX4JADDRESS.getString();
         if (StringUtils.isEmpty(sAddress))
-            sAddress = FBUtilities.getBroadcastAddressAndPort().address.getHostAddress();
+            sAddress = FBUtilities.getBroadcastAddressAndPort().getAddress().getHostAddress();
         return sAddress;
     }
 
diff --git a/src/java/org/apache/cassandra/utils/NativeLibrary.java b/src/java/org/apache/cassandra/utils/NativeLibrary.java
index e5b5da7..9348433 100644
--- a/src/java/org/apache/cassandra/utils/NativeLibrary.java
+++ b/src/java/org/apache/cassandra/utils/NativeLibrary.java
@@ -17,14 +17,14 @@
  */
 package org.apache.cassandra.utils;
 
-import java.io.File;
 import java.io.FileDescriptor;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.lang.reflect.Field;
 import java.nio.channels.FileChannel;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -32,22 +32,22 @@
 
 import org.apache.cassandra.io.FSWriteError;
 
+import static org.apache.cassandra.config.CassandraRelevantProperties.IGNORE_MISSING_NATIVE_FILE_HINTS;
 import static org.apache.cassandra.config.CassandraRelevantProperties.OS_ARCH;
 import static org.apache.cassandra.config.CassandraRelevantProperties.OS_NAME;
 import static org.apache.cassandra.utils.NativeLibrary.OSType.LINUX;
 import static org.apache.cassandra.utils.NativeLibrary.OSType.MAC;
-import static org.apache.cassandra.utils.NativeLibrary.OSType.WINDOWS;
 import static org.apache.cassandra.utils.NativeLibrary.OSType.AIX;
 
 public final class NativeLibrary
 {
     private static final Logger logger = LoggerFactory.getLogger(NativeLibrary.class);
+    private static final boolean REQUIRE = !IGNORE_MISSING_NATIVE_FILE_HINTS.getBoolean();
 
     public enum OSType
     {
         LINUX,
         MAC,
-        WINDOWS,
         AIX,
         OTHER;
     }
@@ -96,7 +96,6 @@
         switch (osType)
         {
             case MAC: wrappedLibrary = new NativeLibraryDarwin(); break;
-            case WINDOWS: wrappedLibrary = new NativeLibraryWindows(); break;
             case LINUX:
             case AIX:
             case OTHER:
@@ -140,10 +139,8 @@
             return LINUX;
         else if (osName.contains("mac"))
             return MAC;
-        else if (osName.contains("windows"))
-            return WINDOWS;
 
-        logger.warn("the current operating system, {}, is unsupported by cassandra", osName);
+        logger.warn("the current operating system, {}, is unsupported by Cassandra", osName);
         if (osName.contains("aix"))
             return AIX;
         else
@@ -160,7 +157,8 @@
         }
         catch (NoSuchMethodError x)
         {
-            logger.warn("Obsolete version of JNA present; unable to read errno. Upgrade to JNA 3.2.7 or later");
+            if (REQUIRE)
+                logger.warn("Obsolete version of JNA present; unable to read errno. Upgrade to JNA 3.2.7 or later");
             return 0;
         }
     }
@@ -216,7 +214,7 @@
         if (!f.exists())
             return;
 
-        try (FileInputStream fis = new FileInputStream(f))
+        try (FileInputStreamPlus fis = new FileInputStreamPlus(f))
         {
             trySkipCache(getfd(fis.getChannel()), offset, len, path);
         }
@@ -292,7 +290,8 @@
             if (!(e instanceof LastErrorException))
                 throw e;
 
-            logger.warn("fcntl({}, {}, {}) failed, errno ({}).", fd, command, flags, errno(e));
+            if (REQUIRE)
+                logger.warn("fcntl({}, {}, {}) failed, errno ({}).", fd, command, flags, errno(e));
         }
 
         return result;
@@ -315,7 +314,8 @@
             if (!(e instanceof LastErrorException))
                 throw e;
 
-            logger.warn("open({}, O_RDONLY) failed, errno ({}).", path, errno(e));
+            if (REQUIRE)
+                logger.warn("open({}, O_RDONLY) failed, errno ({}).", path, errno(e));
         }
 
         return fd;
@@ -339,9 +339,12 @@
             if (!(e instanceof LastErrorException))
                 throw e;
 
-            String errMsg = String.format("fsync(%s) failed, errno (%s) %s", fd, errno(e), e.getMessage());
-            logger.warn(errMsg);
-            throw new FSWriteError(e, errMsg);
+            if (REQUIRE)
+            {
+                String errMsg = String.format("fsync(%s) failed, errno (%s) %s", fd, errno(e), e.getMessage());
+                logger.warn(errMsg);
+                throw new FSWriteError(e, errMsg);
+            }
         }
     }
 
@@ -363,9 +366,12 @@
             if (!(e instanceof LastErrorException))
                 throw e;
 
-            String errMsg = String.format("close(%d) failed, errno (%d).", fd, errno(e));
-            logger.warn(errMsg);
-            throw new FSWriteError(e, errMsg);
+            if (REQUIRE)
+            {
+                String errMsg = String.format("close(%d) failed, errno (%d).", fd, errno(e));
+                logger.warn(errMsg);
+                throw new FSWriteError(e, errMsg);
+            }
         }
     }
 
@@ -377,7 +383,8 @@
         }
         catch (IllegalArgumentException|IllegalAccessException e)
         {
-            logger.warn("Unable to read fd field from FileChannel");
+            if (REQUIRE)
+                logger.warn("Unable to read fd field from FileChannel", e);
         }
         return -1;
     }
@@ -395,8 +402,11 @@
         }
         catch (Exception e)
         {
-            JVMStabilityInspector.inspectThrowable(e);
-            logger.warn("Unable to read fd field from FileDescriptor");
+            if (REQUIRE)
+            {
+                JVMStabilityInspector.inspectThrowable(e);
+                logger.warn("Unable to read fd field from FileDescriptor", e);
+            }
         }
 
         return -1;
@@ -417,7 +427,8 @@
         }
         catch (Exception e)
         {
-            logger.info("Failed to get PID from JNA", e);
+            if (REQUIRE)
+                logger.info("Failed to get PID from JNA", e);
         }
 
         return -1;
diff --git a/src/java/org/apache/cassandra/utils/NativeLibraryDarwin.java b/src/java/org/apache/cassandra/utils/NativeLibraryDarwin.java
index 6ed18d1..c119311 100644
--- a/src/java/org/apache/cassandra/utils/NativeLibraryDarwin.java
+++ b/src/java/org/apache/cassandra/utils/NativeLibraryDarwin.java
@@ -42,6 +42,7 @@
  * @see org.apache.cassandra.utils.NativeLibraryWrapper
  * @see NativeLibrary
  */
+@Shared
 public class NativeLibraryDarwin implements NativeLibraryWrapper
 {
     private static final Logger logger = LoggerFactory.getLogger(NativeLibraryDarwin.class);
diff --git a/src/java/org/apache/cassandra/utils/NativeLibraryLinux.java b/src/java/org/apache/cassandra/utils/NativeLibraryLinux.java
index 3f21d17..9c7bb3b 100644
--- a/src/java/org/apache/cassandra/utils/NativeLibraryLinux.java
+++ b/src/java/org/apache/cassandra/utils/NativeLibraryLinux.java
@@ -42,6 +42,7 @@
  * @see org.apache.cassandra.utils.NativeLibraryWrapper
  * @see NativeLibrary
  */
+@Shared
 public class NativeLibraryLinux implements NativeLibraryWrapper
 {
     private static boolean available;
diff --git a/src/java/org/apache/cassandra/utils/NativeLibraryWindows.java b/src/java/org/apache/cassandra/utils/NativeLibraryWindows.java
deleted file mode 100644
index b8304c7..0000000
--- a/src/java/org/apache/cassandra/utils/NativeLibraryWindows.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.utils;
-
-import java.util.Collections;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.sun.jna.LastErrorException;
-import com.sun.jna.Native;
-import com.sun.jna.Pointer;
-
-/**
- * A {@code NativeLibraryWrapper} implementation for Windows.
- * <p> This implementation only offers support for the {@code callGetpid} method
- * using the Windows/Kernel32 library.</p>
- *
- * @see org.apache.cassandra.utils.NativeLibraryWrapper
- * @see NativeLibrary
- */
-public class NativeLibraryWindows implements NativeLibraryWrapper
-{
-    private static final Logger logger = LoggerFactory.getLogger(NativeLibraryWindows.class);
-
-    private static boolean available;
-
-    static
-    {
-        try
-        {
-            Native.register(com.sun.jna.NativeLibrary.getInstance("kernel32", Collections.emptyMap()));
-            available = true;
-        }
-        catch (NoClassDefFoundError e)
-        {
-            logger.warn("JNA not found. Native methods will be disabled.");
-        }
-        catch (UnsatisfiedLinkError e)
-        {
-            logger.error("Failed to link the Windows/Kernel32 library against JNA. Native methods will be unavailable.", e);
-        }
-        catch (NoSuchMethodError e)
-        {
-            logger.warn("Obsolete version of JNA present; unable to register Windows/Kernel32 library. Upgrade to JNA 3.2.7 or later");
-        }
-    }
-
-    /**
-     * Retrieves the process identifier of the calling process (<a href='https://msdn.microsoft.com/en-us/library/windows/desktop/ms683180(v=vs.85).aspx'>GetCurrentProcessId function</a>).
-     *
-     * @return the process identifier of the calling process
-     */
-    private static native long GetCurrentProcessId() throws LastErrorException;
-
-    public int callMlockall(int flags) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public int callMunlockall() throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public int callFcntl(int fd, int command, long flags) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public int callPosixFadvise(int fd, long offset, int len, int flag) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public int callOpen(String path, int flags) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public int callFsync(int fd) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public int callClose(int fd) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    public Pointer callStrerror(int errnum) throws UnsatisfiedLinkError, RuntimeException
-    {
-        throw new UnsatisfiedLinkError();
-    }
-
-    /**
-     * @return the PID of the JVM running
-     * @throws UnsatisfiedLinkError if we fail to link against Sigar
-     * @throws RuntimeException if another unexpected error is thrown by Sigar
-     */
-    public long callGetpid() throws UnsatisfiedLinkError, RuntimeException
-    {
-        return GetCurrentProcessId();
-    }
-
-    public boolean isAvailable()
-    {
-        return available;
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/NativeLibraryWrapper.java b/src/java/org/apache/cassandra/utils/NativeLibraryWrapper.java
index 879ea88..2c3d47f 100644
--- a/src/java/org/apache/cassandra/utils/NativeLibraryWrapper.java
+++ b/src/java/org/apache/cassandra/utils/NativeLibraryWrapper.java
@@ -24,7 +24,8 @@
  * An interface to implement for using OS specific native methods.
  * @see NativeLibrary
  */
-interface NativeLibraryWrapper
+@Shared
+public interface NativeLibraryWrapper
 {
     /**
      * Checks if the library has been successfully linked.
diff --git a/src/java/org/apache/cassandra/utils/Nemesis.java b/src/java/org/apache/cassandra/utils/Nemesis.java
new file mode 100644
index 0000000..840eba3
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Nemesis.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static org.apache.cassandra.utils.Nemesis.Traffic.HIGH;
+
+/**
+ * Annotate fields, particularly important volatile fields, where the system should adversarially schedule
+ * thread events around memory accesses (read or write).
+ *
+ * This can introduce significant simulation overhead, so should be used sparingly.
+ *
+ * TODO: Support @Nemesis on methods, to insert nemesis points either before or after invocations of the method
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ ElementType.FIELD, ElementType.METHOD })
+public @interface Nemesis
+{
+    enum Traffic { LOW, HIGH }
+
+    Traffic traffic() default HIGH;
+}
diff --git a/src/java/org/apache/cassandra/utils/NoSpamLogger.java b/src/java/org/apache/cassandra/utils/NoSpamLogger.java
index ac9168a..9b62e21 100644
--- a/src/java/org/apache/cassandra/utils/NoSpamLogger.java
+++ b/src/java/org/apache/cassandra/utils/NoSpamLogger.java
@@ -19,12 +19,15 @@
 
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
 
 import org.cliffc.high_scale_lib.NonBlockingHashMap;
 import org.slf4j.Logger;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import static org.apache.cassandra.utils.Clock.Global;
+
 /**
  * Logging that limits each log statement to firing based on time since the statement last fired.
  *
@@ -44,7 +47,7 @@
      */
     public enum Level
     {
-        INFO, WARN, ERROR;
+        INFO, WARN, ERROR
     }
 
     @VisibleForTesting
@@ -58,7 +61,7 @@
     {
         public long nanoTime()
         {
-            return System.nanoTime();
+            return Global.nanoTime();
         }
     };
 
@@ -82,21 +85,31 @@
             return nowNanos >= expected && compareAndSet(expected, nowNanos + minIntervalNanos);
         }
 
+        public boolean log(Level l, long nowNanos, Supplier<Object[]> objects)
+        {
+            if (!shouldLog(nowNanos)) return false;
+            return logNoCheck(l, objects.get());
+        }
+
         public boolean log(Level l, long nowNanos, Object... objects)
         {
             if (!shouldLog(nowNanos)) return false;
+            return logNoCheck(l, objects);
+        }
 
+        private boolean logNoCheck(Level l, Object... objects)
+        {
             switch (l)
             {
-            case INFO:
-                wrapped.info(statement, objects);
-                break;
-            case WARN:
-                wrapped.warn(statement, objects);
-                break;
-            case ERROR:
-                wrapped.error(statement, objects);
-                break;
+                case INFO:
+                    wrapped.info(statement, objects);
+                    break;
+                case WARN:
+                    wrapped.warn(statement, objects);
+                    break;
+                case ERROR:
+                    wrapped.error(statement, objects);
+                    break;
                 default:
                     throw new AssertionError();
             }
@@ -172,6 +185,23 @@
         return statement.log(level, nowNanos, objects);
     }
 
+    public static boolean log(Logger logger, Level level, long minInterval, TimeUnit unit, String message, Supplier<Object[]> objects)
+    {
+        return log(logger, level, message, minInterval, unit, CLOCK.nanoTime(), message, objects);
+    }
+
+    public static boolean log(Logger logger, Level level, String key, long minInterval, TimeUnit unit, String message, Supplier<Object[]> objects)
+    {
+        return log(logger, level, key, minInterval, unit, CLOCK.nanoTime(), message, objects);
+    }
+
+    public static boolean log(Logger logger, Level level, String key, long minInterval, TimeUnit unit, long nowNanos, String message, Supplier<Object[]> objects)
+    {
+        NoSpamLogger wrapped = getLogger(logger, minInterval, unit);
+        NoSpamLogStatement statement = wrapped.getStatement(key, message);
+        return statement.log(level, nowNanos, objects);
+    }
+
     public static NoSpamLogStatement getStatement(Logger logger, String message, long minInterval, TimeUnit unit)
     {
         NoSpamLogger wrapped = getLogger(logger, minInterval, unit);
diff --git a/src/java/org/apache/cassandra/utils/NullableSerializer.java b/src/java/org/apache/cassandra/utils/NullableSerializer.java
new file mode 100644
index 0000000..67e2d6a
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/NullableSerializer.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.io.IOException;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+public class NullableSerializer
+{
+
+    public static <T> void serializeNullable(IVersionedSerializer<T> serializer, T value, DataOutputPlus out, int version) throws IOException
+    {
+        out.writeBoolean(value != null);
+        if (value != null)
+            serializer.serialize(value, out, version);
+    }
+
+    public static <T> T deserializeNullable(IVersionedSerializer<T> serializer, DataInputPlus in, int version) throws IOException
+    {
+        return in.readBoolean() ? serializer.deserialize(in, version) : null;
+    }
+
+    public static <T> long serializedSizeNullable(IVersionedSerializer<T> serializer, T value, int version)
+    {
+        return value != null
+                ? TypeSizes.sizeof(true) + serializer.serializedSize(value, version)
+                : TypeSizes.sizeof(false);
+    }
+
+    public static <T> IVersionedSerializer<T> wrap(IVersionedSerializer<T> wrap)
+    {
+        return new IVersionedSerializer<T>() {
+            public void serialize(T t, DataOutputPlus out, int version) throws IOException
+            {
+                serializeNullable(wrap, t, out, version);
+            }
+
+            public T deserialize(DataInputPlus in, int version) throws IOException
+            {
+                return deserializeNullable(wrap, in, version);
+            }
+
+            public long serializedSize(T t, int version)
+            {
+                return serializedSizeNullable(wrap, t, version);
+            }
+        };
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/utils/ObjectSizes.java b/src/java/org/apache/cassandra/utils/ObjectSizes.java
index 468522c..07066cf 100644
--- a/src/java/org/apache/cassandra/utils/ObjectSizes.java
+++ b/src/java/org/apache/cassandra/utils/ObjectSizes.java
@@ -29,8 +29,7 @@
  */
 public class ObjectSizes
 {
-    private static final MemoryMeter meter = new MemoryMeter().omitSharedBufferOverhead()
-                                                              .withGuessing(MemoryMeter.Guess.FALLBACK_UNSAFE)
+    private static final MemoryMeter meter = new MemoryMeter().withGuessing(MemoryMeter.Guess.FALLBACK_UNSAFE)
                                                               .ignoreKnownSingletons();
 
     private static final long EMPTY_HEAP_BUFFER_SIZE = measure(ByteBufferUtil.EMPTY_BYTE_BUFFER);
diff --git a/src/java/org/apache/cassandra/utils/RecomputingSupplier.java b/src/java/org/apache/cassandra/utils/RecomputingSupplier.java
index ba6a1ff..0554439 100644
--- a/src/java/org/apache/cassandra/utils/RecomputingSupplier.java
+++ b/src/java/org/apache/cassandra/utils/RecomputingSupplier.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils;
 
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
@@ -27,6 +26,10 @@
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Supplier;
 
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.Promise;
+
 /**
  * Supplier that caches the last computed value until it is reset, forcing every caller of
  * {@link RecomputingSupplier#get(long, TimeUnit)} to wait until this value is computed if
@@ -38,7 +41,7 @@
 public class RecomputingSupplier<T>
 {
     private final Supplier<T> supplier;
-    private final AtomicReference<CompletableFuture<T>> cached = new AtomicReference<>(null);
+    private final AtomicReference<Future<T>> cached = new AtomicReference<>(null);
     private final AtomicBoolean workInProgress = new AtomicBoolean(false);
     private final ExecutorService executor;
 
@@ -50,7 +53,7 @@
 
     public void recompute()
     {
-        CompletableFuture<T> current = cached.get();
+        Future<T> current = cached.get();
         boolean origWip = workInProgress.get();
 
         if (origWip || (current != null && !current.isDone()))
@@ -63,14 +66,14 @@
         assert current == null || current.isDone();
 
         // The work is not in progress, and current future is done. Try to submit a new task.
-        CompletableFuture<T> lazyValue = new CompletableFuture<>();
+        Promise<T> lazyValue = new AsyncPromise<>();
         if (cached.compareAndSet(current, lazyValue))
             executor.submit(() -> doWork(lazyValue));
         else
             executor.submit(this::recompute); // Lost CAS, resubmit
     }
 
-    private void doWork(CompletableFuture<T> lazyValue)
+    private void doWork(Promise<T> lazyValue)
     {
         T value = null;
         Throwable err = null;
@@ -89,9 +92,9 @@
         }
 
         if (err == null)
-            lazyValue.complete(value);
+            lazyValue.trySuccess(value);
         else
-            lazyValue.completeExceptionally(err);
+            lazyValue.tryFailure(err);
     }
 
     private static void sanityCheck(boolean check)
@@ -101,7 +104,7 @@
 
     public T get(long timeout, TimeUnit timeUnit) throws InterruptedException, ExecutionException, TimeoutException
     {
-        CompletableFuture<T> lazyValue = cached.get();
+        Future<T> lazyValue = cached.get();
 
         // recompute was never called yet, return null.
         if (lazyValue == null)
diff --git a/src/java/org/apache/cassandra/utils/ResourceWatcher.java b/src/java/org/apache/cassandra/utils/ResourceWatcher.java
index 5e7cbdd..e8dcb85 100644
--- a/src/java/org/apache/cassandra/utils/ResourceWatcher.java
+++ b/src/java/org/apache/cassandra/utils/ResourceWatcher.java
@@ -17,9 +17,9 @@
  */
 package org.apache.cassandra.utils;
 
-import java.io.File;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
diff --git a/src/java/org/apache/cassandra/utils/Shared.java b/src/java/org/apache/cassandra/utils/Shared.java
new file mode 100644
index 0000000..e576c86
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Shared.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Tells jvm-dtest that a class should be shared accross all {@link ClassLoader}s.
+ *
+ * Jvm-dtest relies on classloader isolation to run multiple cassandra instances in the same JVM, this makes it
+ * so some classes do not get shared (outside a blesssed set of classes/packages). When the default behavior
+ * is not desirable, this annotation will tell jvm-dtest to share the class accross all class loaders.
+ *
+ * This is the oposite of {@link Isolated}.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ ElementType.TYPE })
+public @interface Shared
+{
+    enum Scope { ANY, SIMULATION }
+    enum Recursive { NONE, INTERFACES /*(and enums and exceptions) */, ALL }
+    Scope[] scope() default Scope.ANY;
+    Recursive inner() default Recursive.NONE;
+    Recursive ancestors() default Recursive.NONE;
+    Recursive members() default Recursive.NONE;
+}
diff --git a/src/java/org/apache/cassandra/utils/SigarLibrary.java b/src/java/org/apache/cassandra/utils/SigarLibrary.java
index 246a9c8..32987d8 100644
--- a/src/java/org/apache/cassandra/utils/SigarLibrary.java
+++ b/src/java/org/apache/cassandra/utils/SigarLibrary.java
@@ -21,6 +21,8 @@
 import org.slf4j.LoggerFactory;
 import org.slf4j.Logger;
 
+import org.apache.cassandra.config.CassandraRelevantProperties;
+
 public class SigarLibrary
 {
     private Logger logger = LoggerFactory.getLogger(SigarLibrary.class);
@@ -111,10 +113,6 @@
 
     private boolean hasAcceptableAddressSpace()
     {
-        // Check is invalid on Windows
-        if (FBUtilities.isWindows)
-            return true;
-
         try
         {
             long fileMax = sigar.getResourceLimit().getVirtualMemoryMax();
@@ -169,7 +167,7 @@
             boolean goodAddressSpace = hasAcceptableAddressSpace();
             boolean goodFileLimits = hasAcceptableFileLimits();
             boolean goodProcNumber = hasAcceptableProcNumber();
-            if (swapEnabled || !goodAddressSpace || !goodFileLimits || !goodProcNumber)
+            if (swapEnabled || !goodAddressSpace || !goodFileLimits || !goodProcNumber || CassandraRelevantProperties.TEST_IGNORE_SIGAR.getBoolean())
             {
                 logger.warn("Cassandra server running in degraded mode. Is swap disabled? : {},  Address space adequate? : {}, " +
                             " nofile limit adequate? : {}, nproc limit adequate? : {} ", !swapEnabled, goodAddressSpace,
diff --git a/src/java/org/apache/cassandra/utils/Simulate.java b/src/java/org/apache/cassandra/utils/Simulate.java
new file mode 100644
index 0000000..dd0d230
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Simulate.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+/**
+ * Enable certain features for a specific method or class.
+ *
+ * Note that presently class level annotations are not inherited by inner classes.
+ *
+ * TODO: support package level, and apply to all nested classes
+ */
+public @interface Simulate
+{
+    enum With
+    {
+        /**
+         * Calls to FBUtilities.timestampMicros() will be guaranteed globally monotonically increasing.
+         *
+         * May be annotated at the method or class level.
+         */
+        GLOBAL_CLOCK,
+
+        /**
+         * synchronized methods and blocks, and wait/notify.
+         *
+         * May be annotated at the class level.
+         */
+        MONITORS,
+
+        /**
+         * Usages of LockSupport. This defaults to ON for all classes, including system classes.
+         *
+         * May be annotated at the method or class level.
+         */
+        LOCK_SUPPORT
+    }
+
+    With[] with() default {};
+    With[] without() default {};
+}
diff --git a/src/java/org/apache/cassandra/utils/SlidingTimeRate.java b/src/java/org/apache/cassandra/utils/SlidingTimeRate.java
deleted file mode 100644
index 0e00054..0000000
--- a/src/java/org/apache/cassandra/utils/SlidingTimeRate.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * Concurrent rate computation over a sliding time window.
- *
- * Currently not used in the Cassandra 4.0 code base. If you decide to use it, please check CASSANDRA-16713.
- * There still might be a bug, flaky test to be fixed before using it again.
- */
-public class SlidingTimeRate
-{
-    private final ConcurrentSkipListMap<Long, AtomicInteger> counters = new ConcurrentSkipListMap<>();
-    private final AtomicLong lastCounterTimestamp = new AtomicLong(0);
-    private final ReadWriteLock pruneLock = new ReentrantReadWriteLock();
-    private final long sizeInMillis;
-    private final long precisionInMillis;
-    private final TimeSource timeSource;
-
-    /**
-     * Creates a sliding rate whose time window is of the given size, with the given precision and time unit.
-     * <p>
-     * The precision defines how accurate the rate computation is, as it will be computed over window size +/-
-     * precision.
-     * </p>
-     */
-    public SlidingTimeRate(TimeSource timeSource, long size, long precision, TimeUnit unit)
-    {
-        Preconditions.checkArgument(size > precision, "Size should be greater than precision.");
-        Preconditions.checkArgument(TimeUnit.MILLISECONDS.convert(precision, unit) >= 1, "Precision must be greater than or equal to 1 millisecond.");
-        this.sizeInMillis = TimeUnit.MILLISECONDS.convert(size, unit);
-        this.precisionInMillis = TimeUnit.MILLISECONDS.convert(precision, unit);
-        this.timeSource = timeSource;
-    }
-
-    /**
-     * Updates the rate.
-     */
-    public void update(int delta)
-    {
-        pruneLock.readLock().lock();
-        try
-        {
-            while (true)
-            {
-                long now = timeSource.currentTimeMillis();
-                long lastTimestamp = lastCounterTimestamp.get();
-                boolean isWithinPrecisionRange = (now - lastTimestamp) < precisionInMillis;
-                AtomicInteger lastCounter = counters.get(lastTimestamp);
-                // If there's a valid counter for the current last timestamp, and we're in the precision range,
-                // update such counter:
-                if (lastCounter != null && isWithinPrecisionRange)
-                {
-                    lastCounter.addAndGet(delta);
-
-                    break;
-                }
-                // Else if there's no counter or we're past the precision range, try to create a new counter,
-                // but only the thread updating the last timestamp will create a new counter:
-                else if (lastCounterTimestamp.compareAndSet(lastTimestamp, now))
-                {
-                    AtomicInteger existing = counters.putIfAbsent(now, new AtomicInteger(delta));
-                    if (existing != null)
-                    {
-                        existing.addAndGet(delta);
-                    }
-
-                    break;
-                }
-            }
-        }
-        finally
-        {
-            pruneLock.readLock().unlock();
-        }
-    }
-
-    /**
-     * Gets the current rate in the given time unit from the beginning of the time window to the
-     * provided point in time ago.
-     */
-    public double get(long toAgo, TimeUnit unit)
-    {
-        pruneLock.readLock().lock();
-        try
-        {
-            long toAgoInMillis = TimeUnit.MILLISECONDS.convert(toAgo, unit);
-            Preconditions.checkArgument(toAgoInMillis < sizeInMillis, "Cannot get rate in the past!");
-
-            long now = timeSource.currentTimeMillis();
-            long sum = 0;
-            ConcurrentNavigableMap<Long, AtomicInteger> tailCounters = counters
-                    .tailMap(now - sizeInMillis, true)
-                    .headMap(now - toAgoInMillis, true);
-            for (AtomicInteger i : tailCounters.values())
-            {
-                sum += i.get();
-            }
-
-            double rateInMillis = sum == 0
-                                  ? sum
-                                  : sum / (double) Math.max(1000, (now - toAgoInMillis) - tailCounters.firstKey());
-            double multiplier = TimeUnit.MILLISECONDS.convert(1, unit);
-            return rateInMillis * multiplier;
-        }
-        finally
-        {
-            pruneLock.readLock().unlock();
-        }
-    }
-
-    /**
-     * Gets the current rate in the given time unit.
-     */
-    public double get(TimeUnit unit)
-    {
-        return get(0, unit);
-    }
-
-    /**
-     * Prunes the time window of old unused updates.
-     */
-    public void prune()
-    {
-        pruneLock.writeLock().lock();
-        try
-        {
-            long now = timeSource.currentTimeMillis();
-            counters.headMap(now - sizeInMillis, false).clear();
-        }
-        finally
-        {
-            pruneLock.writeLock().unlock();
-        }
-    }
-
-    @VisibleForTesting
-    public int size()
-    {
-        return counters.values().stream().reduce(new AtomicInteger(), (v1, v2) -> {
-            v1.addAndGet(v2.get());
-            return v1;
-        }).get();
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/SyncUtil.java b/src/java/org/apache/cassandra/utils/SyncUtil.java
index 1917e8b..6055859 100644
--- a/src/java/org/apache/cassandra/utils/SyncUtil.java
+++ b/src/java/org/apache/cassandra/utils/SyncUtil.java
@@ -28,9 +28,9 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.cassandra.config.Config;
-import org.apache.cassandra.service.CassandraDaemon;
 
 import com.google.common.base.Preconditions;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -173,12 +173,6 @@
         }
     }
 
-    public static void sync(RandomAccessFile ras) throws IOException
-    {
-        Preconditions.checkNotNull(ras);
-        sync(ras.getFD());
-    }
-
     public static void sync(FileOutputStream fos) throws IOException
     {
         Preconditions.checkNotNull(fos);
@@ -198,7 +192,7 @@
         if (SKIP_SYNC)
             return;
 
-        int directoryFD = NativeLibrary.tryOpenDirectory(dir.getPath());
+        int directoryFD = NativeLibrary.tryOpenDirectory(dir.path());
         try
         {
             trySync(directoryFD);
diff --git a/src/java/org/apache/cassandra/utils/SystemTimeSource.java b/src/java/org/apache/cassandra/utils/SystemTimeSource.java
deleted file mode 100644
index fef525e..0000000
--- a/src/java/org/apache/cassandra/utils/SystemTimeSource.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.util.concurrent.Uninterruptibles;
-
-/**
- * Time source backed by JVM clock.
- */
-public class SystemTimeSource implements TimeSource
-{
-    @Override
-    public long currentTimeMillis()
-    {
-        return System.currentTimeMillis();
-    }
-
-    @Override
-    public long nanoTime()
-    {
-        return System.nanoTime();
-    }
-
-    @Override
-    public TimeSource sleepUninterruptibly(long sleepFor, TimeUnit unit)
-    {
-        Uninterruptibles.sleepUninterruptibly(sleepFor, unit);
-        return this;
-    }
-
-    @Override
-    public TimeSource sleep(long sleepFor, TimeUnit unit) throws InterruptedException
-    {
-        TimeUnit.NANOSECONDS.sleep(TimeUnit.NANOSECONDS.convert(sleepFor, unit));
-        return this;
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/Throwables.java b/src/java/org/apache/cassandra/utils/Throwables.java
index 86c0156..8337a56 100644
--- a/src/java/org/apache/cassandra/utils/Throwables.java
+++ b/src/java/org/apache/cassandra/utils/Throwables.java
@@ -18,7 +18,7 @@
 */
 package org.apache.cassandra.utils;
 
-import java.io.File;
+import org.apache.cassandra.io.util.File;
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.util.Arrays;
@@ -31,6 +31,7 @@
 
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.FSWriteError;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
 public final class Throwables
 {
@@ -60,6 +61,8 @@
     {
         if (existingFail == null)
             return newFail;
+        if (newFail == null)
+            return existingFail;
         existingFail.addSuppressed(newFail);
         return existingFail;
     }
@@ -87,6 +90,9 @@
         if (fail instanceof RuntimeException)
             throw (RuntimeException) fail;
 
+        if (fail instanceof InterruptedException)
+            throw new UncheckedInterruptedException((InterruptedException) fail);
+
         if (checked != null && checked.isInstance(fail))
             throw checked.cast(fail);
 
@@ -148,7 +154,7 @@
     @SafeVarargs
     public static void perform(File against, FileOpType opType, DiscreteAction<? extends IOException> ... actions)
     {
-        perform(against.getPath(), opType, actions);
+        perform(against.path(), opType, actions);
     }
 
     @SafeVarargs
@@ -237,7 +243,10 @@
      */
     public static RuntimeException unchecked(Throwable t)
     {
-        return t instanceof RuntimeException ? (RuntimeException)t : new RuntimeException(t);
+        return t instanceof RuntimeException ? (RuntimeException)t :
+               t instanceof InterruptedException
+               ? new UncheckedInterruptedException((InterruptedException) t)
+               : new RuntimeException(t);
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/utils/TimeSource.java b/src/java/org/apache/cassandra/utils/TimeSource.java
deleted file mode 100644
index 5d8acec..0000000
--- a/src/java/org/apache/cassandra/utils/TimeSource.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.TimeUnit;
-
-public interface TimeSource
-{
-    /**
-     *
-     * @return the current time in milliseconds
-     */
-    long currentTimeMillis();
-
-    /**
-     *
-     * @return Returns the current time value in nanoseconds.
-     *
-     * <p>This method can only be used to measure elapsed time and is
-     * not related to any other notion of system or wall-clock time.
-     */
-    long nanoTime();
-
-    /**
-     * Sleep for the given amount of time uninterruptibly.
-     *
-     * @param  sleepFor given amout.
-     * @param  unit time unit
-     * @return The time source itself after the given sleep period.
-     */
-    TimeSource sleepUninterruptibly(long sleepFor, TimeUnit unit);
-
-    /**
-     * Sleep for the given amount of time. This operation could interrupted.
-     * Hence after returning from this method, it is not guaranteed
-     * that the request amount of time has passed.
-     *
-     * @param  sleepFor given amout.
-     * @param  unit time unit
-     * @return The time source itself after the given sleep period.
-     */
-    TimeSource sleep(long sleepFor, TimeUnit unit) throws InterruptedException;
-}
diff --git a/src/java/org/apache/cassandra/utils/TimeUUID.java b/src/java/org/apache/cassandra/utils/TimeUUID.java
new file mode 100644
index 0000000..8d79096
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/TimeUUID.java
@@ -0,0 +1,574 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.nio.ByteBuffer;
+import java.security.SecureRandom;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import com.google.common.hash.Hasher;
+import com.google.common.hash.Hashing;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.marshal.ValueAccessor;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.serializers.MarshalException;
+import org.apache.cassandra.serializers.TypeSerializer;
+
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DETERMINISM_UNSAFE_UUID_NODE;
+import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+
+@Shared(inner = INTERFACES)
+public class TimeUUID implements Serializable, Comparable<TimeUUID>
+{
+    public static final long serialVersionUID = 1L;
+
+    // A grand day! millis at 00:00:00.000 15 Oct 1582.
+    public static final long UUID_EPOCH_UNIX_MILLIS = -12219292800000L;
+    protected static final long TIMESTAMP_UUID_VERSION_IN_MSB = 0x1000L;
+    protected static final long UUID_VERSION_BITS_IN_MSB = 0xf000L;
+
+    /*
+     * The min and max possible lsb for a UUID.
+     * Note that his is not 0 and all 1's because Cassandra TimeUUIDType
+     * compares the lsb parts as a signed byte array comparison. So the min
+     * value is 8 times -128 and the max is 8 times +127.
+     *
+     * Note that we ignore the uuid variant (namely, MIN_CLOCK_SEQ_AND_NODE
+     * have variant 2 as it should, but MAX_CLOCK_SEQ_AND_NODE have variant 0).
+     * I don't think that has any practical consequence and is more robust in
+     * case someone provides a UUID with a broken variant.
+     */
+    private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L;
+    private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL;
+
+
+    final long uuidTimestamp, lsb;
+
+    public TimeUUID(long uuidTimestamp, long lsb)
+    {
+        // we don't validate that this is a true TIMEUUID to avoid problems with historical mixing of UUID with TimeUUID
+        this.uuidTimestamp = uuidTimestamp;
+        this.lsb = lsb;
+    }
+
+    public static TimeUUID atUnixMicrosWithLsb(long unixMicros, long uniqueLsb)
+    {
+        return new TimeUUID(unixMicrosToRawTimestamp(unixMicros), uniqueLsb);
+    }
+
+    public static UUID atUnixMicrosWithLsbAsUUID(long unixMicros, long uniqueLsb)
+    {
+        return new UUID(rawTimestampToMsb(unixMicrosToRawTimestamp(unixMicros)), uniqueLsb);
+    }
+
+    /**
+     * Returns the smaller possible type 1 UUID having the provided timestamp.
+     *
+     * <b>Warning:</b> this method should only be used for querying as this
+     * doesn't at all guarantee the uniqueness of the resulting UUID.
+     */
+    public static TimeUUID minAtUnixMillis(long unixMillis)
+    {
+        return new TimeUUID(unixMillisToRawTimestamp(unixMillis, 0), MIN_CLOCK_SEQ_AND_NODE);
+    }
+
+    /**
+     * Returns the biggest possible type 1 UUID having the provided timestamp.
+     *
+     * <b>Warning:</b> this method should only be used for querying as this
+     * doesn't at all guarantee the uniqueness of the resulting UUID.
+     */
+    public static TimeUUID maxAtUnixMillis(long unixMillis)
+    {
+        return new TimeUUID(unixMillisToRawTimestamp(unixMillis + 1, 0) - 1, MAX_CLOCK_SEQ_AND_NODE);
+    }
+
+    public static TimeUUID fromString(String uuidString)
+    {
+        return fromUuid(UUID.fromString(uuidString));
+    }
+
+    public static TimeUUID fromUuid(UUID uuid)
+    {
+        return fromBytes(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
+    }
+
+    public static TimeUUID fromBytes(long msb, long lsb)
+    {
+        return new TimeUUID(msbToRawTimestamp(msb), lsb);
+    }
+
+    public static TimeUUID deserialize(ByteBuffer buffer)
+    {
+        return fromBytes(buffer.getLong(buffer.position()), buffer.getLong(buffer.position() + 8));
+    }
+
+    public static TimeUUID deserialize(DataInput in) throws IOException
+    {
+        long msb = in.readLong();
+        long lsb = in.readLong();
+        return fromBytes(msb, lsb);
+    }
+
+    public void serialize(DataOutput out) throws IOException
+    {
+        out.writeLong(msb());
+        out.writeLong(lsb());
+    }
+
+    public ByteBuffer toBytes()
+    {
+        return ByteBuffer.wrap(toBytes(msb(), lsb()));
+    }
+
+    public static byte[] toBytes(long msb, long lsb)
+    {
+        byte[] uuidBytes = new byte[16];
+
+        for (int i = 0; i < 8; i++)
+            uuidBytes[i] = (byte) (msb >>> 8 * (7 - i));
+
+        for (int i = 8; i < 16; i++)
+            uuidBytes[i] = (byte) (lsb >>> 8 * (15 - i));
+
+        return uuidBytes;
+    }
+
+    public static long sizeInBytes()
+    {
+        return 16;
+    }
+
+    public UUID asUUID()
+    {
+        return new UUID(rawTimestampToMsb(uuidTimestamp), lsb);
+    }
+
+    /**
+     * The Cassandra internal micros-resolution timestamp of the TimeUUID, as of unix epoch
+     */
+    public long unix(TimeUnit units)
+    {
+        return units.convert(unixMicros(), MICROSECONDS);
+    }
+
+    /**
+     * The Cassandra internal micros-resolution timestamp of the TimeUUID, as of unix epoch
+     */
+    public long unixMicros()
+    {
+        return rawTimestampToUnixMicros(uuidTimestamp);
+    }
+
+    /**
+     * The UUID-format timestamp, i.e. 10x micros-resolution, as of UUIDGen.UUID_EPOCH_UNIX_MILLIS
+     * The tenths of a microsecond are used to store a flag value.
+     */
+    public long uuidTimestamp()
+    {
+        return uuidTimestamp & 0x0FFFFFFFFFFFFFFFL;
+    }
+
+    public long msb()
+    {
+        return rawTimestampToMsb(uuidTimestamp);
+    }
+
+    public long lsb()
+    {
+        return lsb;
+    }
+
+    public static long rawTimestampToUnixMicros(long rawTimestamp)
+    {
+        return (rawTimestamp / 10) + UUID_EPOCH_UNIX_MILLIS * 1000;
+    }
+
+    public static long unixMillisToRawTimestamp(long unixMillis, long tenthsOfAMicro)
+    {
+        return unixMillis * 10000 - (UUID_EPOCH_UNIX_MILLIS * 10000) + tenthsOfAMicro;
+    }
+
+    public static long unixMicrosToRawTimestamp(long unixMicros)
+    {
+        return unixMicros * 10 - (UUID_EPOCH_UNIX_MILLIS * 10000);
+    }
+
+    public static long msbToRawTimestamp(long msb)
+    {
+        assert (UUID_VERSION_BITS_IN_MSB & msb) == TIMESTAMP_UUID_VERSION_IN_MSB;
+        msb &= ~TIMESTAMP_UUID_VERSION_IN_MSB;
+        return   (msb &     0xFFFFL) << 48
+               | (msb & 0xFFFF0000L) << 16
+               | (msb >>> 32);
+    }
+
+    public static long rawTimestampToMsb(long rawTimestamp)
+    {
+        return TIMESTAMP_UUID_VERSION_IN_MSB
+               | (rawTimestamp >>> 48)
+               | ((rawTimestamp & 0xFFFF00000000L) >>> 16)
+               | (rawTimestamp << 32);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return (int) ((uuidTimestamp ^ (uuidTimestamp >> 32) * 31) + (lsb ^ (lsb >> 32)));
+    }
+
+    @Override
+    public boolean equals(Object that)
+    {
+        return    (that instanceof UUID && equals((UUID) that))
+               || (that instanceof TimeUUID && equals((TimeUUID) that));
+    }
+
+    public boolean equals(TimeUUID that)
+    {
+        return that != null && uuidTimestamp == that.uuidTimestamp && lsb == that.lsb;
+    }
+
+    public boolean equals(UUID that)
+    {
+        return that != null && uuidTimestamp == that.timestamp() && lsb == that.getLeastSignificantBits();
+    }
+
+    @Override
+    public String toString()
+    {
+        return asUUID().toString();
+    }
+
+    public static String toString(TimeUUID ballot)
+    {
+        return ballot == null ? "null" : ballot.uuidTimestamp() + ":" + ballot;
+    }
+
+    public static String toString(TimeUUID ballot, String kind)
+    {
+        return ballot == null ? "null" : String.format("%s(%d:%s)", kind, ballot.uuidTimestamp(), ballot);
+    }
+
+    @Override
+    public int compareTo(TimeUUID that)
+    {
+        return this.uuidTimestamp != that.uuidTimestamp
+               ? Long.compare(this.uuidTimestamp, that.uuidTimestamp)
+               : Long.compare(this.lsb, that.lsb);
+    }
+
+    protected static abstract class AbstractSerializer<T extends TimeUUID> extends TypeSerializer<T>
+    {
+        public <V> void validate(V value, ValueAccessor<V> accessor) throws MarshalException
+        {
+            if (accessor.isEmpty(value))
+                return;
+
+            if (accessor.size(value) != 16)
+                throw new MarshalException(String.format("UUID should be 16 or 0 bytes (%d)", accessor.size(value)));
+
+            if ((accessor.getByte(value, 6) & 0xf0) != 0x10)
+                throw new MarshalException(String.format("Invalid version for TimeUUID type: 0x%s", Integer.toHexString((accessor.getByte(value, 0) >> 4) & 0xf)));
+        }
+
+        public String toString(T value)
+        {
+            return value == null ? "" : value.toString();
+        }
+
+        public ByteBuffer serialize(T value)
+        {
+            if (value == null)
+                return EMPTY_BYTE_BUFFER;
+            ByteBuffer buffer = ByteBuffer.allocate(16);
+            buffer.putLong(value.msb());
+            buffer.putLong(value.lsb());
+            buffer.flip();
+            return buffer;
+        }
+    }
+
+    public static class Serializer extends AbstractSerializer<TimeUUID> implements IVersionedSerializer<TimeUUID>
+    {
+        public static final Serializer instance = new Serializer();
+
+        public <V> TimeUUID deserialize(V value, ValueAccessor<V> accessor)
+        {
+            return accessor.isEmpty(value) ? null : accessor.toTimeUUID(value);
+        }
+
+        public Class<TimeUUID> getType()
+        {
+            return TimeUUID.class;
+        }
+
+        @Override
+        public void serialize(TimeUUID t, DataOutputPlus out, int version) throws IOException
+        {
+            t.serialize(out);
+        }
+
+        @Override
+        public TimeUUID deserialize(DataInputPlus in, int version) throws IOException
+        {
+            return TimeUUID.deserialize(in);
+        }
+
+        @Override
+        public long serializedSize(TimeUUID t, int version)
+        {
+            return 16;
+        }
+    }
+
+    public static class Generator
+    {
+        private static final long clockSeqAndNode = makeClockSeqAndNode();
+
+        private static final AtomicLong lastMicros = new AtomicLong();
+
+        public static TimeUUID nextTimeUUID()
+        {
+            return atUnixMicrosWithLsb(nextUnixMicros(), clockSeqAndNode);
+        }
+
+        public static UUID nextTimeAsUUID()
+        {
+            return atUnixMicrosWithLsbAsUUID(nextUnixMicros(), clockSeqAndNode);
+        }
+
+        public static TimeUUID atUnixMillis(long unixMillis)
+        {
+            return atUnixMillis(unixMillis, 0);
+        }
+
+        public static TimeUUID atUnixMillis(long unixMillis, long tenthsOfAMicro)
+        {
+            return new TimeUUID(unixMillisToRawTimestamp(unixMillis, tenthsOfAMicro), clockSeqAndNode);
+        }
+
+        public static byte[] atUnixMillisAsBytes(long unixMillis)
+        {
+            return atUnixMillisAsBytes(unixMillis, 0);
+        }
+
+        public static byte[] atUnixMillisAsBytes(long unixMillis, long tenthsOfAMicro)
+        {
+            return toBytes(rawTimestampToMsb(unixMillisToRawTimestamp(unixMillis, tenthsOfAMicro)), clockSeqAndNode);
+        }
+
+        public static byte[] nextTimeUUIDAsBytes()
+        {
+            return toBytes(rawTimestampToMsb(unixMicrosToRawTimestamp(nextUnixMicros())), clockSeqAndNode);
+        }
+
+        // needs to return two different values for the same when.
+        // we can generate at most 10k UUIDs per ms.
+        private static long nextUnixMicros()
+        {
+            long newLastMicros;
+            while (true)
+            {
+                //Generate a candidate value for new lastNanos
+                newLastMicros = currentTimeMillis() * 1000;
+                long originalLastNanos = lastMicros.get();
+                if (newLastMicros > originalLastNanos)
+                {
+                    //Slow path once per millisecond do a CAS
+                    if (lastMicros.compareAndSet(originalLastNanos, newLastMicros))
+                    {
+                        break;
+                    }
+                }
+                else
+                {
+                    //Fast path do an atomic increment
+                    //Or when falling behind this will move time forward past the clock if necessary
+                    newLastMicros = lastMicros.incrementAndGet();
+                    break;
+                }
+            }
+            return newLastMicros;
+        }
+
+        private static long makeClockSeqAndNode()
+        {
+            if (DETERMINISM_UNSAFE_UUID_NODE.getBoolean())
+                return FBUtilities.getBroadcastAddressAndPort().addressBytes[3];
+
+            Long specified = Long.getLong("cassandra.unsafe.timeuuidnode");
+            if (specified != null)
+                return specified
+                       ^ FBUtilities.getBroadcastAddressAndPort().addressBytes[3]
+                       ^ (FBUtilities.getBroadcastAddressAndPort().addressBytes[2] << 8);
+
+            long clock = new SecureRandom().nextLong();
+
+            long lsb = 0;
+            lsb |= 0x8000000000000000L;                 // variant (2 bits)
+            lsb |= (clock & 0x0000000000003FFFL) << 48; // clock sequence (14 bits)
+            lsb |= makeNode();                          // 6 bytes
+            return lsb;
+        }
+
+        private static long makeNode()
+        {
+            /*
+             * We don't have access to the MAC address but need to generate a node part
+             * that identify this host as uniquely as possible.
+             * The spec says that one option is to take as many source that identify
+             * this node as possible and hash them together. That's what we do here by
+             * gathering all the ip of this host.
+             * Note that FBUtilities.getJustBroadcastAddress() should be enough to uniquely
+             * identify the node *in the cluster* but it triggers DatabaseDescriptor
+             * instanciation and the UUID generator is used in Stress for instance,
+             * where we don't want to require the yaml.
+             */
+            Collection<InetAddressAndPort> localAddresses = getAllLocalAddresses();
+            if (localAddresses.isEmpty())
+                throw new RuntimeException("Cannot generate the node component of the UUID because cannot retrieve any IP addresses.");
+
+            // ideally, we'd use the MAC address, but java doesn't expose that.
+            byte[] hash = hash(localAddresses);
+            long node = 0;
+            for (int i = 0; i < Math.min(6, hash.length); i++)
+                node |= (0x00000000000000ff & (long)hash[i]) << (5-i)*8;
+            assert (0xff00000000000000L & node) == 0;
+
+            // Since we don't use the mac address, the spec says that multicast
+            // bit (least significant bit of the first octet of the node ID) must be 1.
+            return node | 0x0000010000000000L;
+        }
+
+        private static byte[] hash(Collection<InetAddressAndPort> data)
+        {
+            // Identify the host.
+            Hasher hasher = Hashing.md5().newHasher();
+            for(InetAddressAndPort addr : data)
+            {
+                hasher.putBytes(addr.addressBytes);
+                hasher.putInt(addr.getPort());
+            }
+
+            // Identify the process on the load: we use both the PID and class loader hash.
+            long pid = NativeLibrary.getProcessID();
+            if (pid < 0)
+                pid = new Random(currentTimeMillis()).nextLong();
+            updateWithLong(hasher, pid);
+
+            ClassLoader loader = UUIDGen.class.getClassLoader();
+            int loaderId = loader != null ? System.identityHashCode(loader) : 0;
+            updateWithInt(hasher, loaderId);
+
+            return hasher.hash().asBytes();
+        }
+
+        private static void updateWithInt(Hasher hasher, int val)
+        {
+            hasher.putByte((byte) ((val >>> 24) & 0xFF));
+            hasher.putByte((byte) ((val >>> 16) & 0xFF));
+            hasher.putByte((byte) ((val >>>  8) & 0xFF));
+            hasher.putByte((byte) ((val >>> 0) & 0xFF));
+        }
+
+        public static void updateWithLong(Hasher hasher, long val)
+        {
+            hasher.putByte((byte) ((val >>> 56) & 0xFF));
+            hasher.putByte((byte) ((val >>> 48) & 0xFF));
+            hasher.putByte((byte) ((val >>> 40) & 0xFF));
+            hasher.putByte((byte) ((val >>> 32) & 0xFF));
+            hasher.putByte((byte) ((val >>> 24) & 0xFF));
+            hasher.putByte((byte) ((val >>> 16) & 0xFF));
+            hasher.putByte((byte) ((val >>>  8) & 0xFF));
+            hasher.putByte((byte)  ((val >>> 0) & 0xFF));
+        }
+
+        /**
+         * Helper function used exclusively by UUIDGen to create
+         **/
+        public static Collection<InetAddressAndPort> getAllLocalAddresses()
+        {
+            Set<InetAddressAndPort> localAddresses = new HashSet<>();
+            try
+            {
+                Enumeration<NetworkInterface> nets = NetworkInterface.getNetworkInterfaces();
+                if (nets != null)
+                {
+                    while (nets.hasMoreElements())
+                    {
+                        Function<InetAddress, InetAddressAndPort> converter =
+                        address -> InetAddressAndPort.getByAddressOverrideDefaults(address, 0);
+                        List<InetAddressAndPort> addresses =
+                        Collections.list(nets.nextElement().getInetAddresses()).stream().map(converter).collect(Collectors.toList());
+                        localAddresses.addAll(addresses);
+                    }
+                }
+            }
+            catch (SocketException e)
+            {
+                throw new AssertionError(e);
+            }
+            if (DatabaseDescriptor.isDaemonInitialized())
+            {
+                localAddresses.add(FBUtilities.getBroadcastAddressAndPort());
+                localAddresses.add(FBUtilities.getBroadcastNativeAddressAndPort());
+                localAddresses.add(FBUtilities.getLocalAddressAndPort());
+            }
+            return localAddresses;
+        }
+    }
+}
+
+// ---Copied from UUIDGen
+// for the curious, here is how I generated START_EPOCH
+//        Calendar c = Calendar.getInstance(TimeZone.getTimeZone("GMT-0"));
+//        c.set(Calendar.YEAR, 1582);
+//        c.set(Calendar.MONTH, Calendar.OCTOBER);
+//        c.set(Calendar.DAY_OF_MONTH, 15);
+//        c.set(Calendar.HOUR_OF_DAY, 0);
+//        c.set(Calendar.MINUTE, 0);
+//        c.set(Calendar.SECOND, 0);
+//        c.set(Calendar.MILLISECOND, 0);
+//        long START_EPOCH = c.getTimeInMillis();
diff --git a/src/java/org/apache/cassandra/utils/UUIDGen.java b/src/java/org/apache/cassandra/utils/UUIDGen.java
index c83e292..14ab230 100644
--- a/src/java/org/apache/cassandra/utils/UUIDGen.java
+++ b/src/java/org/apache/cassandra/utils/UUIDGen.java
@@ -17,138 +17,16 @@
  */
 package org.apache.cassandra.utils;
 
-import java.net.InetAddress;
-import java.net.NetworkInterface;
-import java.net.SocketException;
 import java.nio.ByteBuffer;
-import java.security.SecureRandom;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.hash.Hasher;
-import com.google.common.hash.Hashing;
-import com.google.common.primitives.Ints;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.locator.InetAddressAndPort;
 
 /**
  * The goods are here: www.ietf.org/rfc/rfc4122.txt.
  */
 public class UUIDGen
 {
-    // A grand day! millis at 00:00:00.000 15 Oct 1582.
-    private static final long START_EPOCH = -12219292800000L;
-    private static final long clockSeqAndNode = makeClockSeqAndNode();
-
     public static final int UUID_LEN = 16;
 
-    /*
-     * The min and max possible lsb for a UUID.
-     * Note that his is not 0 and all 1's because Cassandra TimeUUIDType
-     * compares the lsb parts as a signed byte array comparison. So the min
-     * value is 8 times -128 and the max is 8 times +127.
-     *
-     * Note that we ignore the uuid variant (namely, MIN_CLOCK_SEQ_AND_NODE
-     * have variant 2 as it should, but MAX_CLOCK_SEQ_AND_NODE have variant 0).
-     * I don't think that has any practical consequence and is more robust in
-     * case someone provides a UUID with a broken variant.
-     */
-    private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L;
-    private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL;
-
-    private static final SecureRandom secureRandom = new SecureRandom();
-
-    // placement of this singleton is important.  It needs to be instantiated *AFTER* the other statics.
-    private static final UUIDGen instance = new UUIDGen();
-
-    private AtomicLong lastNanos = new AtomicLong();
-
-    private UUIDGen()
-    {
-        // make sure someone didn't whack the clockSeqAndNode by changing the order of instantiation.
-        if (clockSeqAndNode == 0) throw new RuntimeException("singleton instantiation is misplaced.");
-    }
-
-    /**
-     * Creates a type 1 UUID (time-based UUID).
-     *
-     * @return a UUID instance
-     */
-    public static UUID getTimeUUID()
-    {
-        return new UUID(instance.createTimeSafe(), clockSeqAndNode);
-    }
-
-    /**
-     * Creates a type 1 UUID (time-based UUID) with the timestamp of @param when, in milliseconds.
-     *
-     * @return a UUID instance
-     */
-    public static UUID getTimeUUID(long when)
-    {
-        return new UUID(createTime(fromUnixTimestamp(when)), clockSeqAndNode);
-    }
-
-    /**
-     * Returns a version 1 UUID using the provided timestamp and the local clock and sequence.
-     * <p>
-     * Note that this method is generally only safe to use if you can guarantee that the provided
-     * parameter is unique across calls (otherwise the returned UUID won't be unique accross calls).
-     *
-     * @param whenInMicros a unix time in microseconds.
-     * @return a new UUID {@code id} such that {@code microsTimestamp(id) == whenInMicros}. Please not that
-     * multiple calls to this method with the same value of {@code whenInMicros} will return the <b>same</b>
-     * UUID.
-     */
-    public static UUID getTimeUUIDFromMicros(long whenInMicros)
-    {
-        long whenInMillis = whenInMicros / 1000;
-        long nanos = (whenInMicros - (whenInMillis * 1000)) * 10;
-        return getTimeUUID(whenInMillis, nanos);
-    }
-
-    /**
-     * Similar to {@link #getTimeUUIDFromMicros}, but randomize (using SecureRandom) the clock and sequence.
-     * <p>
-     * If you can guarantee that the {@code whenInMicros} argument is unique (for this JVM instance) for
-     * every call, then you should prefer {@link #getTimeUUIDFromMicros} which is faster. If you can't
-     * guarantee this however, this method will ensure the returned UUID are still unique (accross calls)
-     * through randomization.
-     *
-     * @param whenInMicros a unix time in microseconds.
-     * @return a new UUID {@code id} such that {@code microsTimestamp(id) == whenInMicros}. The UUID returned
-     * by different calls will be unique even if {@code whenInMicros} is not.
-     */
-    public static UUID getRandomTimeUUIDFromMicros(long whenInMicros)
-    {
-        long whenInMillis = whenInMicros / 1000;
-        long nanos = (whenInMicros - (whenInMillis * 1000)) * 10;
-        return new UUID(createTime(fromUnixTimestamp(whenInMillis, nanos)), secureRandom.nextLong());
-    }
-
-    public static UUID getTimeUUID(long when, long nanos)
-    {
-        return new UUID(createTime(fromUnixTimestamp(when, nanos)), clockSeqAndNode);
-    }
-
-    @VisibleForTesting
-    public static UUID getTimeUUID(long when, long nanos, long clockSeqAndNode)
-    {
-        return new UUID(createTime(fromUnixTimestamp(when, nanos)), clockSeqAndNode);
-    }
-
     /** creates a type 1 uuid from raw bytes. */
     public static UUID getUUID(ByteBuffer raw)
     {
@@ -179,118 +57,6 @@
     }
 
     /**
-     * Returns a 16 byte representation of a type 1 UUID (a time-based UUID),
-     * based on the current system time.
-     *
-     * @return a type 1 UUID represented as a byte[]
-     */
-    public static byte[] getTimeUUIDBytes()
-    {
-        return createTimeUUIDBytes(instance.createTimeSafe());
-    }
-
-    /**
-     * Returns the smaller possible type 1 UUID having the provided timestamp.
-     *
-     * <b>Warning:</b> this method should only be used for querying as this
-     * doesn't at all guarantee the uniqueness of the resulting UUID.
-     */
-    public static UUID minTimeUUID(long timestamp)
-    {
-        return new UUID(createTime(fromUnixTimestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE);
-    }
-
-    /**
-     * Returns the biggest possible type 1 UUID having the provided timestamp.
-     *
-     * <b>Warning:</b> this method should only be used for querying as this
-     * doesn't at all guarantee the uniqueness of the resulting UUID.
-     */
-    public static UUID maxTimeUUID(long timestamp)
-    {
-        // unix timestamp are milliseconds precision, uuid timestamp are 100's
-        // nanoseconds precision. If we ask for the biggest uuid have unix
-        // timestamp 1ms, then we should not extend 100's nanoseconds
-        // precision by taking 10000, but rather 19999.
-        long uuidTstamp = fromUnixTimestamp(timestamp + 1) - 1;
-        return new UUID(createTime(uuidTstamp), MAX_CLOCK_SEQ_AND_NODE);
-    }
-
-    /**
-     * @param uuid
-     * @return milliseconds since Unix epoch
-     */
-    public static long unixTimestamp(UUID uuid)
-    {
-        return (uuid.timestamp() / 10000) + START_EPOCH;
-    }
-
-    /**
-     * @param uuid
-     * @return seconds since Unix epoch
-     */
-    public static int unixTimestampInSec(UUID uuid)
-    {
-        return Ints.checkedCast(TimeUnit.MILLISECONDS.toSeconds(unixTimestamp(uuid)));
-    }
-
-    /**
-     * @param uuid
-     * @return microseconds since Unix epoch
-     */
-    public static long microsTimestamp(UUID uuid)
-    {
-        return (uuid.timestamp() / 10) + START_EPOCH * 1000;
-    }
-
-    /**
-     * @param timestamp milliseconds since Unix epoch
-     * @return
-     */
-    private static long fromUnixTimestamp(long timestamp)
-    {
-        return fromUnixTimestamp(timestamp, 0L);
-    }
-
-    private static long fromUnixTimestamp(long timestamp, long nanos)
-    {
-        return ((timestamp - START_EPOCH) * 10000) + nanos;
-    }
-
-    /**
-     * Converts a 100-nanoseconds precision timestamp into the 16 byte representation
-     * of a type 1 UUID (a time-based UUID).
-     *
-     * To specify a 100-nanoseconds precision timestamp, one should provide a milliseconds timestamp and
-     * a number {@code 0 <= n < 10000} such that n*100 is the number of nanoseconds within that millisecond.
-     *
-     * <p><i><b>Warning:</b> This method is not guaranteed to return unique UUIDs; Multiple
-     * invocations using identical timestamps will result in identical UUIDs.</i></p>
-     *
-     * @return a type 1 UUID represented as a byte[]
-     */
-    public static byte[] getTimeUUIDBytes(long timeMillis, int nanos)
-    {
-        if (nanos >= 10000)
-            throw new IllegalArgumentException();
-        return createTimeUUIDBytes(instance.createTimeUnsafe(timeMillis, nanos));
-    }
-
-    private static byte[] createTimeUUIDBytes(long msb)
-    {
-        long lsb = clockSeqAndNode;
-        byte[] uuidBytes = new byte[16];
-
-        for (int i = 0; i < 8; i++)
-            uuidBytes[i] = (byte) (msb >>> 8 * (7 - i));
-
-        for (int i = 8; i < 16; i++)
-            uuidBytes[i] = (byte) (lsb >>> 8 * (7 - i));
-
-        return uuidBytes;
-    }
-
-    /**
      * Returns a milliseconds-since-epoch value for a type-1 UUID.
      *
      * @param uuid a type-1 (time-based) UUID
@@ -301,169 +67,7 @@
     {
         if (uuid.version() != 1)
             throw new IllegalArgumentException("incompatible with uuid version: "+uuid.version());
-        return (uuid.timestamp() / 10000) + START_EPOCH;
-    }
-
-    private static long makeClockSeqAndNode()
-    {
-        long clock = new SecureRandom().nextLong();
-
-        long lsb = 0;
-        lsb |= 0x8000000000000000L;                 // variant (2 bits)
-        lsb |= (clock & 0x0000000000003FFFL) << 48; // clock sequence (14 bits)
-        lsb |= makeNode();                          // 6 bytes
-        return lsb;
-    }
-
-    // needs to return two different values for the same when.
-    // we can generate at most 10k UUIDs per ms.
-    private long createTimeSafe()
-    {
-        long newLastNanos;
-        while (true)
-        {
-            //Generate a candidate value for new lastNanos
-            newLastNanos = (System.currentTimeMillis() - START_EPOCH) * 10000;
-            long originalLastNanos = lastNanos.get();
-            if (newLastNanos > originalLastNanos)
-            {
-                //Slow path once per millisecond do a CAS
-                if (lastNanos.compareAndSet(originalLastNanos, newLastNanos))
-                {
-                    break;
-                }
-            }
-            else
-            {
-                //Fast path do an atomic increment
-                //Or when falling behind this will move time forward past the clock if necessary
-                newLastNanos = lastNanos.incrementAndGet();
-                break;
-            }
-        }
-        return createTime(newLastNanos);
-    }
-
-    private long createTimeUnsafe(long when, int nanos)
-    {
-        long nanosSince = ((when - START_EPOCH) * 10000) + nanos;
-        return createTime(nanosSince);
-    }
-
-    private static long createTime(long nanosSince)
-    {
-        long msb = 0L;
-        msb |= (0x00000000ffffffffL & nanosSince) << 32;
-        msb |= (0x0000ffff00000000L & nanosSince) >>> 16;
-        msb |= (0xffff000000000000L & nanosSince) >>> 48;
-        msb |= 0x0000000000001000L; // sets the version to 1.
-        return msb;
-    }
-
-    private static long makeNode()
-    {
-       /*
-        * We don't have access to the MAC address but need to generate a node part
-        * that identify this host as uniquely as possible.
-        * The spec says that one option is to take as many source that identify
-        * this node as possible and hash them together. That's what we do here by
-        * gathering all the ip of this host.
-        * Note that FBUtilities.getJustBroadcastAddress() should be enough to uniquely
-        * identify the node *in the cluster* but it triggers DatabaseDescriptor
-        * instanciation and the UUID generator is used in Stress for instance,
-        * where we don't want to require the yaml.
-        */
-        Collection<InetAddressAndPort> localAddresses = getAllLocalAddresses();
-        if (localAddresses.isEmpty())
-            throw new RuntimeException("Cannot generate the node component of the UUID because cannot retrieve any IP addresses.");
-
-        // ideally, we'd use the MAC address, but java doesn't expose that.
-        byte[] hash = hash(localAddresses);
-        long node = 0;
-        for (int i = 0; i < Math.min(6, hash.length); i++)
-            node |= (0x00000000000000ff & (long)hash[i]) << (5-i)*8;
-        assert (0xff00000000000000L & node) == 0;
-
-        // Since we don't use the mac address, the spec says that multicast
-        // bit (least significant bit of the first octet of the node ID) must be 1.
-        return node | 0x0000010000000000L;
-    }
-
-    private static byte[] hash(Collection<InetAddressAndPort> data)
-    {
-        // Identify the host.
-        Hasher hasher = Hashing.md5().newHasher();
-        for(InetAddressAndPort addr : data)
-        {
-            hasher.putBytes(addr.addressBytes);
-            hasher.putInt(addr.port);
-        }
-
-        // Identify the process on the load: we use both the PID and class loader hash.
-        long pid = NativeLibrary.getProcessID();
-        if (pid < 0)
-            pid = new Random(System.currentTimeMillis()).nextLong();
-        updateWithLong(hasher, pid);
-
-        ClassLoader loader = UUIDGen.class.getClassLoader();
-        int loaderId = loader != null ? System.identityHashCode(loader) : 0;
-        updateWithInt(hasher, loaderId);
-
-        return hasher.hash().asBytes();
-    }
-
-    private static void updateWithInt(Hasher hasher, int val)
-    {
-        hasher.putByte((byte) ((val >>> 24) & 0xFF));
-        hasher.putByte((byte) ((val >>> 16) & 0xFF));
-        hasher.putByte((byte) ((val >>>  8) & 0xFF));
-        hasher.putByte((byte) ((val >>> 0) & 0xFF));
-    }
-
-    public static void updateWithLong(Hasher hasher, long val)
-    {
-        hasher.putByte((byte) ((val >>> 56) & 0xFF));
-        hasher.putByte((byte) ((val >>> 48) & 0xFF));
-        hasher.putByte((byte) ((val >>> 40) & 0xFF));
-        hasher.putByte((byte) ((val >>> 32) & 0xFF));
-        hasher.putByte((byte) ((val >>> 24) & 0xFF));
-        hasher.putByte((byte) ((val >>> 16) & 0xFF));
-        hasher.putByte((byte) ((val >>>  8) & 0xFF));
-        hasher.putByte((byte)  ((val >>> 0) & 0xFF));
-    }
-
-    /**
-     * Helper function used exclusively by UUIDGen to create
-     **/
-    public static Collection<InetAddressAndPort> getAllLocalAddresses()
-    {
-        Set<InetAddressAndPort> localAddresses = new HashSet<>();
-        try
-        {
-            Enumeration<NetworkInterface> nets = NetworkInterface.getNetworkInterfaces();
-            if (nets != null)
-            {
-                while (nets.hasMoreElements())
-                {
-                    Function<InetAddress, InetAddressAndPort> converter =
-                    address -> InetAddressAndPort.getByAddressOverrideDefaults(address, 0);
-                    List<InetAddressAndPort> addresses =
-                    Collections.list(nets.nextElement().getInetAddresses()).stream().map(converter).collect(Collectors.toList());
-                    localAddresses.addAll(addresses);
-                }
-            }
-        }
-        catch (SocketException e)
-        {
-            throw new AssertionError(e);
-        }
-        if (DatabaseDescriptor.isDaemonInitialized())
-        {
-            localAddresses.add(FBUtilities.getBroadcastAddressAndPort());
-            localAddresses.add(FBUtilities.getBroadcastNativeAddressAndPort());
-            localAddresses.add(FBUtilities.getLocalAddressAndPort());
-        }
-        return localAddresses;
+        return (uuid.timestamp() / 10000) + TimeUUID.UUID_EPOCH_UNIX_MILLIS;
     }
 
 }
diff --git a/src/java/org/apache/cassandra/utils/VoidSerializer.java b/src/java/org/apache/cassandra/utils/VoidSerializer.java
new file mode 100644
index 0000000..1bc1167
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/VoidSerializer.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import java.io.IOException;
+
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+public class VoidSerializer implements IVersionedSerializer<Void>
+{
+    public static final VoidSerializer serializer = new VoidSerializer();
+    private VoidSerializer() {}
+    public void serialize(Void v, DataOutputPlus out, int version) throws IOException {}
+    public Void deserialize(DataInputPlus in, int version) throws IOException { return null; }
+    public long serializedSize(Void v, int version) { return 0; }
+}
diff --git a/src/java/org/apache/cassandra/utils/WindowsTimer.java b/src/java/org/apache/cassandra/utils/WindowsTimer.java
deleted file mode 100644
index bbd162c..0000000
--- a/src/java/org/apache/cassandra/utils/WindowsTimer.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.utils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.sun.jna.LastErrorException;
-import com.sun.jna.Native;
-
-public final class WindowsTimer
-{
-    private static final Logger logger = LoggerFactory.getLogger(WindowsTimer.class);
-
-    static
-    {
-        try
-        {
-            Native.register("winmm");
-        }
-        catch (NoClassDefFoundError e)
-        {
-            logger.warn("JNA not found. winmm.dll cannot be registered. Performance will be negatively impacted on this node.");
-        }
-        catch (Exception e)
-        {
-            logger.error("Failed to register winmm.dll. Performance will be negatively impacted on this node.");
-        }
-    }
-
-    private static native int timeBeginPeriod(int period) throws LastErrorException;
-    private static native int timeEndPeriod(int period) throws LastErrorException;
-
-    private WindowsTimer() {}
-
-    public static void startTimerPeriod(int period)
-    {
-        if (period == 0)
-            return;
-        assert(period > 0);
-        if (timeBeginPeriod(period) != 0)
-            logger.warn("Failed to set timer to : {}. Performance will be degraded.", period);
-    }
-
-    public static void endTimerPeriod(int period)
-    {
-        if (period == 0)
-            return;
-        assert(period > 0);
-        if (timeEndPeriod(period) != 0)
-            logger.warn("Failed to end accelerated timer period. System timer will remain set to: {} ms.", period);
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/WithResources.java b/src/java/org/apache/cassandra/utils/WithResources.java
new file mode 100644
index 0000000..0c0bb92
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/WithResources.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A generic interface for encapsulating a Runnable task with related work before and after execution,
+ * using the built-in try-with-resources functionality offered by {@link Closeable}.
+ *
+ * See {@link ExecutorPlus#execute(WithResources, Runnable)}
+ */
+@Shared(scope = SIMULATION)
+public interface WithResources
+{
+    static class None implements WithResources
+    {
+        static final None INSTANCE = new None();
+        private None() {}
+        @Override
+        public Closeable get()
+        {
+            return () -> {};
+        }
+
+        @Override
+        public boolean isNoOp()
+        {
+            return true;
+        }
+    }
+
+    /**
+     * Instantiate any necessary resources
+     * @return an object that closes any instantiated resources
+     */
+    public Closeable get();
+
+    /**
+     * A convenience method to avoid unnecessary work.
+     * @return true iff this object performs no work when {@link #get()} is invoked, nor when {@link Closeable#close()}
+     *         is invoked on the object it returns.
+     */
+    default public boolean isNoOp() { return false; }
+    default public WithResources and(WithResources withResources)
+    {
+        return and(this, withResources);
+    }
+    static WithResources none() { return None.INSTANCE; }
+
+    @SuppressWarnings("resource")
+    public static WithResources and(WithResources first, WithResources second)
+    {
+        if (second.isNoOp()) return first;
+        if (first.isNoOp()) return second;
+        return () -> {
+            Closeable a = first.get();
+            try
+            {
+                Closeable b = second.get();
+                return () -> {
+                    try { a.close(); }
+                    finally { b.close(); }
+                };
+            }
+            catch (Throwable t)
+            {
+                try { a.close(); } catch (Throwable t2) { t.addSuppressed(t2); }
+                throw t;
+            }
+        };
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/binlog/BinLog.java b/src/java/org/apache/cassandra/utils/binlog/BinLog.java
index b226751..43ff67e 100644
--- a/src/java/org/apache/cassandra/utils/binlog/BinLog.java
+++ b/src/java/org/apache/cassandra/utils/binlog/BinLog.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
 import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -32,6 +31,7 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,10 +44,10 @@
 import net.openhft.chronicle.wire.WriteMarshallable;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.io.FSError;
-import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 import org.apache.cassandra.utils.concurrent.WeightedQueue;
 
 import static java.lang.String.format;
@@ -125,7 +125,7 @@
         Preconditions.checkNotNull(path, "path was null");
         Preconditions.checkNotNull(options.roll_cycle, "roll_cycle was null");
         Preconditions.checkArgument(options.max_queue_weight > 0, "max_queue_weight must be > 0");
-        SingleChronicleQueueBuilder builder = SingleChronicleQueueBuilder.single(path.toFile());
+        SingleChronicleQueueBuilder builder = SingleChronicleQueueBuilder.single(path.toFile()); // checkstyle: permit this invocation
         builder.rollCycle(RollCycles.valueOf(options.roll_cycle));
 
         sampleQueue = new WeightedQueue<>(options.max_queue_weight);
@@ -295,7 +295,7 @@
                 }
                 catch (InterruptedException e)
                 {
-                    throw new RuntimeException(e);
+                    throw new UncheckedInterruptedException(e);
                 }
             }
             else
@@ -366,11 +366,11 @@
         public Builder path(Path path)
         {
             Preconditions.checkNotNull(path, "path was null");
-            File pathAsFile = path.toFile();
+            File pathAsFile = new File(path);
             //Exists and is a directory or can be created
             Preconditions.checkArgument(!pathAsFile.toString().isEmpty(), "you might have forgotten to specify a directory to save logs");
-            Preconditions.checkArgument((pathAsFile.exists() && pathAsFile.isDirectory()) || (!pathAsFile.exists() && pathAsFile.mkdirs()), "path exists and is not a directory or couldn't be created");
-            Preconditions.checkArgument(pathAsFile.canRead() && pathAsFile.canWrite() && pathAsFile.canExecute(), "path is not readable, writable, and executable");
+            Preconditions.checkArgument((pathAsFile.exists() && pathAsFile.isDirectory()) || (!pathAsFile.exists() && pathAsFile.tryCreateDirectories()), "path exists and is not a directory or couldn't be created");
+            Preconditions.checkArgument(pathAsFile.isReadable() && pathAsFile.isWritable() && pathAsFile.isExecutable(), "path is not readable, writable, and executable");
             this.path = path;
             return this;
         }
@@ -428,7 +428,7 @@
             }
             try
             {
-                Throwable sanitationThrowable = cleanEmptyLogFiles(path.toFile(), null);
+                Throwable sanitationThrowable = cleanEmptyLogFiles(new File(path), null);
                 if (sanitationThrowable != null)
                     throw new RuntimeException(format("Unable to clean up %s directory from empty %s files.",
                                                       path.toAbsolutePath(), SingleChronicleQueue.SUFFIX),
@@ -439,9 +439,9 @@
                 if (cleanDirectory)
                 {
                     logger.info("Cleaning directory: {} as requested", path);
-                    if (path.toFile().exists())
+                    if (new File(path).exists())
                     {
-                        Throwable error = cleanDirectory(path.toFile(), null);
+                        Throwable error = cleanDirectory(new File(path), null);
                         if (error != null)
                         {
                             throw new RuntimeException(error);
@@ -476,14 +476,14 @@
     private static Throwable cleanEmptyLogFiles(File directory, Throwable accumulate)
     {
         return cleanDirectory(directory, accumulate,
-                              (dir) -> dir.listFiles(file -> {
+                              (dir) -> dir.tryList(file -> {
                                   boolean foundEmptyCq4File = !file.isDirectory()
                                                               && file.length() == 0
-                                                              && file.getName().endsWith(SingleChronicleQueue.SUFFIX);
+                                                              && file.name().endsWith(SingleChronicleQueue.SUFFIX);
 
                                   if (foundEmptyCq4File)
                                       logger.warn("Found empty ChronicleQueue file {}. This file wil be deleted as part of BinLog initialization.",
-                                                  file.getAbsolutePath());
+                                                  file.absolutePath());
 
                                   return foundEmptyCq4File;
                               }));
@@ -491,7 +491,7 @@
 
     public static Throwable cleanDirectory(File directory, Throwable accumulate)
     {
-        return cleanDirectory(directory, accumulate, File::listFiles);
+        return cleanDirectory(directory, accumulate, File::tryList);
     }
 
     private static Throwable cleanDirectory(File directory, Throwable accumulate, Function<File, File[]> lister)
@@ -517,12 +517,12 @@
     {
         if (fileOrDirectory.isDirectory())
         {
-            File[] files = fileOrDirectory.listFiles();
+            File[] files = fileOrDirectory.tryList();
             if (files != null)
                 for (File f : files)
-                    accumulate = FileUtils.deleteWithConfirm(f, accumulate);
+                    accumulate = f.delete(accumulate, null);
         }
-        return FileUtils.deleteWithConfirm(fileOrDirectory, accumulate);
+        return fileOrDirectory.delete(accumulate, null);
     }
 
     private static Throwable checkDirectory(File directory, Throwable accumulate)
diff --git a/src/java/org/apache/cassandra/utils/binlog/BinLogArchiver.java b/src/java/org/apache/cassandra/utils/binlog/BinLogArchiver.java
index 9a6f0bc..f009629 100644
--- a/src/java/org/apache/cassandra/utils/binlog/BinLogArchiver.java
+++ b/src/java/org/apache/cassandra/utils/binlog/BinLogArchiver.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
+import java.io.File; // checkstyle: permit this import
 
 import net.openhft.chronicle.queue.impl.StoreFileListener;
 
diff --git a/src/java/org/apache/cassandra/utils/binlog/DeletingArchiver.java b/src/java/org/apache/cassandra/utils/binlog/DeletingArchiver.java
index 3bdbb8f..44bdc8e 100644
--- a/src/java/org/apache/cassandra/utils/binlog/DeletingArchiver.java
+++ b/src/java/org/apache/cassandra/utils/binlog/DeletingArchiver.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
+import java.io.File; // checkstyle: permit this import
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
 
diff --git a/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java b/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
index e53c5b0..a6b2335 100644
--- a/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
+++ b/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
@@ -18,14 +18,13 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
+import java.io.File; // checkstyle: permit this import
 import java.io.IOException;
 import java.nio.file.Path;
 import java.util.concurrent.DelayQueue;
 import java.util.concurrent.Delayed;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -37,8 +36,12 @@
 import org.slf4j.LoggerFactory;
 
 import net.openhft.chronicle.queue.impl.single.SingleChronicleQueue;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Archives binary log files immediately when they are rolled using a configure archive command.
@@ -57,7 +60,7 @@
      */
     private final DelayQueue<DelayFile> archiveQueue = new DelayQueue<>();
     private final String archiveCommand;
-    private final ExecutorService executor = Executors.newSingleThreadExecutor(new NamedThreadFactory("BinLogArchiver"));
+    private final ExecutorService executor = executorFactory().sequential("BinLogArchiver");
     private final Path path;
     /**
      * for testing, to be able to make sure that the command is executed
@@ -134,7 +137,11 @@
             // and try to archive all remaining files before exiting
             archiveExisting(path);
         }
-        catch (InterruptedException | ExecutionException e)
+        catch (InterruptedException e)
+        {
+            throw new UncheckedInterruptedException(e);
+        }
+        catch (ExecutionException e)
         {
             throw new RuntimeException(e);
         }
@@ -147,7 +154,7 @@
     {
         if (path == null)
             return;
-        for (File f : path.toFile().listFiles((f) -> f.isFile() && f.getName().endsWith(SingleChronicleQueue.SUFFIX)))
+        for (File f : path.toFile().listFiles((f) -> f.isFile() && f.getName().endsWith(SingleChronicleQueue.SUFFIX))) // checkstyle: permit this invocation
         {
             try
             {
@@ -184,12 +191,12 @@
         public DelayFile(File file, long delay, TimeUnit delayUnit, int retries)
         {
             this.file = file;
-            this.delayTime = System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(delay, delayUnit);
+            this.delayTime = currentTimeMillis() + MILLISECONDS.convert(delay, delayUnit);
             this.retries = retries;
         }
         public long getDelay(TimeUnit unit)
         {
-            return unit.convert(delayTime - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+            return unit.convert(delayTime - currentTimeMillis(), TimeUnit.MILLISECONDS);
         }
 
         public int compareTo(Delayed o)
diff --git a/src/java/org/apache/cassandra/utils/btree/BTree.java b/src/java/org/apache/cassandra/utils/btree/BTree.java
index 97f5cc0..a026f70 100644
--- a/src/java/org/apache/cassandra/utils/btree/BTree.java
+++ b/src/java/org/apache/cassandra/utils/btree/BTree.java
@@ -2199,6 +2199,7 @@
     {
         if (isEmpty(tree))
             return 0;
+
         long size = ObjectSizes.sizeOfArray(tree);
         if (isLeaf(tree))
             return size;
@@ -2212,6 +2213,7 @@
     {
         if (isEmpty(tree))
             return 0;
+
         return ObjectSizes.sizeOfArray(tree);
     }
 
diff --git a/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java b/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java
index a23f460..3c1991a 100644
--- a/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java
+++ b/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java
@@ -133,4 +133,4 @@
         int current = forwards ? nextPos - 1 : nextPos + 1;
         return forwards ? current - lowerBound : upperBound - current;
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/btree/NodeCursor.java b/src/java/org/apache/cassandra/utils/btree/NodeCursor.java
index e9fa89e..4c7e993 100644
--- a/src/java/org/apache/cassandra/utils/btree/NodeCursor.java
+++ b/src/java/org/apache/cassandra/utils/btree/NodeCursor.java
@@ -195,4 +195,4 @@
     {
         return (K) node[position];
     }
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/AbstractFuture.java b/src/java/org/apache/cassandra/utils/concurrent/AbstractFuture.java
new file mode 100644
index 0000000..83cd7d3
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/AbstractFuture.java
@@ -0,0 +1,530 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import javax.annotation.Nullable;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.ListenableFuture; // checkstyle: permit this import
+
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.internal.ThrowableUtil;
+import org.apache.cassandra.utils.concurrent.ListenerList.CallbackBiConsumerListener;
+import org.apache.cassandra.utils.concurrent.ListenerList.CallbackLambdaListener;
+import org.apache.cassandra.utils.concurrent.ListenerList.CallbackListener;
+import org.apache.cassandra.utils.concurrent.ListenerList.CallbackListenerWithExecutor;
+import org.apache.cassandra.utils.concurrent.ListenerList.GenericFutureListenerList;
+import org.apache.cassandra.utils.concurrent.ListenerList.RunnableWithExecutor;
+import org.apache.cassandra.utils.concurrent.ListenerList.RunnableWithNotifyExecutor;
+
+import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater.newUpdater;
+import static org.apache.cassandra.utils.concurrent.ListenerList.notifyListener;
+
+/**
+ * Our default {@link Future} implementation, with all state being managed without locks (except those used by the JVM).
+ *
+ * Some implementation comments versus Netty's default promise:
+ *  - We permit efficient initial state declaration, avoiding unnecessary CAS or lock acquisitions when mutating
+ *    a Promise we are ourselves constructing (and can easily add more; only those we use have been added)
+ *  - We guarantee the order of invocation of listeners (and callbacks etc, and with respect to each other)
+ *  - We save some space when registering listeners, especially if there is only one listener, as we perform no
+ *    extra allocations in this case.
+ *  - We implement our invocation list as a concurrent stack, that is cleared on notification
+ *  - We handle special values slightly differently.
+ *    - We do not use a special value for null, instead using a special value to indicate the result has not been set.
+ *      This means that once isSuccess() holds, the result must be a correctly typed object (modulo generics pitfalls).
+ *    - All special values are also instances of FailureHolder, which simplifies a number of the logical conditions.
+ */
+@SuppressWarnings({ "rawtypes", "unchecked" })
+public abstract class AbstractFuture<V> implements Future<V>
+{
+    protected static final FailureHolder UNSET = new FailureHolder(null);
+    protected static final FailureHolder UNCANCELLABLE = new FailureHolder(null);
+    protected static final FailureHolder CANCELLED = new FailureHolder(ThrowableUtil.unknownStackTrace(new CancellationException(), AbstractFuture.class, "cancel(...)"));
+
+    static class FailureHolder
+    {
+        final Throwable cause;
+        FailureHolder(Throwable cause)
+        {
+            this.cause = cause;
+        }
+    }
+
+    private static Throwable cause(Object result)
+    {
+        return result instanceof FailureHolder ? ((FailureHolder) result).cause : null;
+    }
+    static boolean isSuccess(Object result)
+    {
+        return !(result instanceof FailureHolder);
+    }
+    static boolean isCancelled(Object result)
+    {
+        return result == CANCELLED;
+    }
+    static boolean isDone(Object result)
+    {
+        return result != UNSET && result != UNCANCELLABLE;
+    }
+
+    volatile Object result;
+    volatile ListenerList<V> listeners; // either a ListenerList or GenericFutureListener (or null)
+    static final AtomicReferenceFieldUpdater<AbstractFuture, Object> resultUpdater = newUpdater(AbstractFuture.class, Object.class, "result");
+    static final AtomicReferenceFieldUpdater<AbstractFuture, ListenerList> listenersUpdater = newUpdater(AbstractFuture.class, ListenerList.class, "listeners");
+
+    protected AbstractFuture(FailureHolder initialState)
+    {
+        // TODO: document visibility of constructor (i.e. must be safe published)
+        resultUpdater.lazySet(this, initialState);
+    }
+
+    public AbstractFuture()
+    {
+        this(UNSET);
+    }
+
+    protected AbstractFuture(V immediateSuccess)
+    {
+        resultUpdater.lazySet(this, immediateSuccess);
+    }
+
+    protected AbstractFuture(Throwable immediateFailure)
+    {
+       this(new FailureHolder(immediateFailure));
+    }
+
+    protected AbstractFuture(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        this();
+        listenersUpdater.lazySet(this, new GenericFutureListenerList(listener));
+    }
+
+    protected AbstractFuture(FailureHolder initialState, GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        this(initialState);
+        listenersUpdater.lazySet(this, new GenericFutureListenerList(listener));
+    }
+
+    public Executor notifyExecutor()
+    {
+        return null;
+    }
+
+    protected boolean trySuccess(V v)
+    {
+        return trySet(v);
+    }
+
+    protected boolean tryFailure(Throwable throwable)
+    {
+        return trySet(new FailureHolder(throwable));
+    }
+
+    protected boolean setUncancellable()
+    {
+        if (trySet(UNCANCELLABLE))
+            return true;
+        return isUncancellable();
+    }
+
+    protected boolean setUncancellableExclusive()
+    {
+        return trySet(UNCANCELLABLE);
+    }
+
+    protected boolean isUncancellable()
+    {
+        Object result = this.result;
+        return result == UNCANCELLABLE || (isDone(result) && !isCancelled(result));
+    }
+
+    public boolean cancel(boolean b)
+    {
+        return trySet(CANCELLED);
+    }
+
+    /**
+     * Shared implementation of various promise completion methods.
+     * Updates the result if it is possible to do so, returning success/failure.
+     *
+     * If the promise is UNSET the new value will succeed;
+     *          if it is UNCANCELLABLE it will succeed only if the new value is not CANCELLED
+     *          otherwise it will fail, as isDone() is implied
+     *
+     * If the update succeeds, and the new state implies isDone(), any listeners and waiters will be notified
+     */
+    abstract boolean trySet(Object v);
+
+    @Override
+    public boolean isSuccess()
+    {
+        return isSuccess(result);
+    }
+
+    @Override
+    public boolean isCancelled()
+    {
+        return isCancelled(result);
+    }
+
+    @Override
+    public boolean isDone()
+    {
+        return isDone(result);
+    }
+
+    @Override
+    public boolean isCancellable()
+    {
+        return result == UNSET;
+    }
+
+    @Override
+    public Throwable cause()
+    {
+        return cause(result);
+    }
+
+    /**
+     * if isSuccess(), returns the value, otherwise returns null
+     */
+    @Override
+    public V getNow()
+    {
+        Object result = this.result;
+        if (isSuccess(result))
+            return (V) result;
+        return null;
+    }
+
+    /**
+     * Shared implementation of get() after suitable await(); assumes isDone(), and returns
+     * either the success result or throws the suitable exception under failure
+     */
+    protected V getWhenDone() throws ExecutionException
+    {
+        Object result = this.result;
+        if (isSuccess(result))
+            return (V) result;
+        if (result == CANCELLED)
+            throw new CancellationException();
+        throw new ExecutionException(((FailureHolder) result).cause);
+    }
+
+    @Override
+    public V get() throws InterruptedException, ExecutionException
+    {
+        await();
+        return getWhenDone();
+    }
+
+    @Override
+    public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
+    {
+        if (!await(timeout, unit))
+            throw new TimeoutException();
+        return getWhenDone();
+    }
+
+    /**
+     * Logically append {@code newListener} to {@link #listeners}
+     * (at this stage it is a stack, so we actually prepend)
+     */
+    abstract void appendListener(ListenerList<V> newListener);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public AbstractFuture<V> addCallback(FutureCallback<? super V> callback)
+    {
+        appendListener(new CallbackListener<>(this, callback));
+        return this;
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public AbstractFuture<V> addCallback(BiConsumer<? super V, Throwable> callback)
+    {
+        appendListener(new CallbackBiConsumerListener<>(this, callback, null));
+        return this;
+    }
+
+    @Override
+    public Future<V> addCallback(BiConsumer<? super V, Throwable> callback, Executor executor)
+    {
+        appendListener(new CallbackBiConsumerListener<>(this, callback, executor));
+        return this;
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public AbstractFuture<V> addCallback(FutureCallback<? super V> callback, Executor executor)
+    {
+        Preconditions.checkNotNull(executor);
+        appendListener(new CallbackListenerWithExecutor<>(this, callback, executor));
+        return this;
+    }
+
+    /**
+     * Support more fluid version of {@link com.google.common.util.concurrent.Futures#addCallback}
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public AbstractFuture<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure)
+    {
+        appendListener(new CallbackLambdaListener<>(this, onSuccess, onFailure, null));
+        return this;
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public <T> Future<T> map(Function<? super V, ? extends T> mapper)
+    {
+        return map(mapper, null);
+    }
+
+    /**
+     * Support more fluid version of {@link com.google.common.util.concurrent.Futures#addCallback}
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public AbstractFuture<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure, Executor executor)
+    {
+        appendListener(new CallbackLambdaListener<>(this, onSuccess, onFailure, executor));
+        return this;
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transform(ListenableFuture, com.google.common.base.Function, Executor)} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    protected <T> Future<T> map(AbstractFuture<T> result, Function<? super V, ? extends T> mapper, @Nullable Executor executor)
+    {
+        addListener(() -> {
+            try
+            {
+                if (isSuccess()) result.trySet(mapper.apply(getNow()));
+                else result.tryFailure(cause());
+            }
+            catch (Throwable t)
+            {
+                result.tryFailure(t);
+                throw t;
+            }
+        }, executor);
+        return result;
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    protected <T> Future<T> flatMap(AbstractFuture<T> result, Function<? super V, ? extends Future<T>> flatMapper, @Nullable Executor executor)
+    {
+        addListener(() -> {
+            try
+            {
+                if (isSuccess()) flatMapper.apply(getNow()).addListener(propagate(result));
+                else result.tryFailure(cause());
+            }
+            catch (Throwable t)
+            {
+                result.tryFailure(t);
+                throw t;
+            }
+        }, executor);
+        return result;
+    }
+
+    /**
+     * Add a listener to be invoked once this future completes.
+     * Listeners are submitted to {@link #notifyExecutor} in the order they are added (or the specified executor
+     * in the case of {@link #addListener(Runnable, Executor)}.
+     * if {@link #notifyExecutor} is unset, they are invoked in the order they are added.
+     * The ordering holds across all variants of this method.
+     */
+    public Future<V> addListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        appendListener(new GenericFutureListenerList(listener));
+        return this;
+    }
+
+    /**
+     * Add a listener to be invoked once this future completes.
+     * Listeners are submitted to their {@code #executor} (or {@link #notifyExecutor}) in the order they are added;
+     * if {@link #notifyExecutor} is unset, they are invoked in the order they are added.
+     * The ordering holds across all variants of this method.
+     */
+    public void addListener(Runnable task, @Nullable Executor executor)
+    {
+        appendListener(new RunnableWithExecutor(task, executor));
+    }
+
+    /**
+     * Add a listener to be invoked once this future completes.
+     * Listeners are submitted to {@link #notifyExecutor} in the order they are added (or the specified executor
+     * in the case of {@link #addListener(Runnable, Executor)}.
+     * if {@link #notifyExecutor} is unset, they are invoked in the order they are added.
+     * The ordering holds across all variants of this method.
+     */
+    public void addListener(Runnable task)
+    {
+        appendListener(new RunnableWithNotifyExecutor(task));
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public Future<V> addListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... listeners)
+    {
+        // this could be more efficient if we cared, but we do not
+        return addListener(future -> {
+            for (GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener : listeners)
+                notifyListener((GenericFutureListener<io.netty.util.concurrent.Future<? super V>>)listener, future);
+        });
+    }
+
+    @Override
+    public Future<V> removeListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public Future<V> removeListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... listeners)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public boolean await(long timeout, TimeUnit unit) throws InterruptedException
+    {
+        return Defaults.await(this, timeout, unit);
+    }
+
+    @Override
+    public boolean awaitThrowUncheckedOnInterrupt(long time, TimeUnit units) throws UncheckedInterruptedException
+    {
+        return Defaults.awaitThrowUncheckedOnInterrupt(this, time, units);
+    }
+
+    @Override
+    public boolean awaitUninterruptibly(long timeout, TimeUnit unit)
+    {
+        return Defaults.awaitUninterruptibly(this, timeout, unit);
+    }
+
+    @Override
+    public boolean awaitUntilThrowUncheckedOnInterrupt(long nanoTimeDeadline) throws UncheckedInterruptedException
+    {
+        return Defaults.awaitUntilThrowUncheckedOnInterrupt(this, nanoTimeDeadline);
+    }
+
+    @Override
+    public boolean awaitUntilUninterruptibly(long nanoTimeDeadline)
+    {
+        return Defaults.awaitUntilUninterruptibly(this, nanoTimeDeadline);
+    }
+
+    /**
+     * Wait for this future to complete {@link Awaitable#awaitUninterruptibly()}
+     */
+    @Override
+    public Future<V> awaitUninterruptibly()
+    {
+        return Defaults.awaitUninterruptibly(this);
+    }
+
+    /**
+     * Wait for this future to complete {@link Awaitable#awaitThrowUncheckedOnInterrupt()}
+     */
+    @Override
+    public Future<V> awaitThrowUncheckedOnInterrupt() throws UncheckedInterruptedException
+    {
+        return Defaults.awaitThrowUncheckedOnInterrupt(this);
+    }
+
+    public String toString()
+    {
+        String description = description();
+        String state = state();
+        return description == null ? state : (state + ' ' + description);
+    }
+
+    private String state()
+    {
+        Object result = this.result;
+        if (isSuccess(result))
+            return "(success: " + result + ')';
+        if (result == UNCANCELLABLE)
+            return "(uncancellable)";
+        if (result == CANCELLED)
+            return "(cancelled)";
+        if (isDone(result))
+            return "(failure: " + ((FailureHolder) result).cause + ')';
+        return "(incomplete)";
+    }
+
+    protected String description()
+    {
+        return null;
+    }
+
+    /**
+     * @return a listener that will propagate to {@code to} the result of the future it is invoked with
+     */
+    private static <V> GenericFutureListener<? extends Future<V>> propagate(AbstractFuture<? super V> to)
+    {
+        return from -> {
+            if (from.isSuccess()) to.trySuccess(from.getNow());
+            else to.tryFailure(from.cause());
+        };
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/AsyncFuture.java b/src/java/org/apache/cassandra/utils/concurrent/AsyncFuture.java
new file mode 100644
index 0000000..0ef35d5
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/AsyncFuture.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.Function;
+import javax.annotation.Nullable;
+
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.ListenableFuture; // checkstyle: permit this import
+
+import io.netty.util.concurrent.GenericFutureListener;
+
+/**
+ * Our default {@link Future} implementation, with all state being managed without locks (except those used by the JVM).
+ *
+ * Some implementation comments versus Netty's default promise:
+ *  - We permit efficient initial state declaration, avoiding unnecessary CAS or lock acquisitions when mutating
+ *    a Promise we are ourselves constructing (and can easily add more; only those we use have been added)
+ *  - We guarantee the order of invocation of listeners (and callbacks etc, and with respect to each other)
+ *  - We save some space when registering listeners, especially if there is only one listener, as we perform no
+ *    extra allocations in this case.
+ *  - We implement our invocation list as a concurrent stack, that is cleared on notification
+ *  - We handle special values slightly differently.
+ *    - We do not use a special value for null, instead using a special value to indicate the result has not been set.
+ *      This means that once isSuccess() holds, the result must be a correctly typed object (modulo generics pitfalls).
+ *    - All special values are also instances of FailureHolder, which simplifies a number of the logical conditions.
+ */
+public class AsyncFuture<V> extends AbstractFuture<V>
+{
+    @SuppressWarnings({ "rawtypes" })
+    private static final AtomicReferenceFieldUpdater<AsyncFuture, WaitQueue> waitingUpdater = AtomicReferenceFieldUpdater.newUpdater(AsyncFuture.class, WaitQueue.class, "waiting");
+    @SuppressWarnings({ "unused" })
+    private volatile WaitQueue waiting;
+
+    public AsyncFuture()
+    {
+        super();
+    }
+
+    protected AsyncFuture(V immediateSuccess)
+    {
+        super(immediateSuccess);
+    }
+
+    protected AsyncFuture(Throwable immediateFailure)
+    {
+        super(immediateFailure);
+    }
+
+    protected AsyncFuture(FailureHolder initialState)
+    {
+        super(initialState);
+    }
+
+    protected AsyncFuture(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        super(listener);
+    }
+
+    protected AsyncFuture(FailureHolder initialState, GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        super(initialState, listener);
+    }
+
+    /**
+     * Shared implementation of various promise completion methods.
+     * Updates the result if it is possible to do so, returning success/failure.
+     *
+     * If the promise is UNSET the new value will succeed;
+     *          if it is UNCANCELLABLE it will succeed only if the new value is not CANCELLED
+     *          otherwise it will fail, as isDone() is implied
+     *
+     * If the update succeeds, and the new state implies isDone(), any listeners and waiters will be notified
+     */
+    boolean trySet(Object v)
+    {
+        while (true)
+        {
+            Object current = result;
+            if (isDone(current) || (current == UNCANCELLABLE && (v == CANCELLED || v == UNCANCELLABLE)))
+                return false;
+            if (resultUpdater.compareAndSet(this, current, v))
+            {
+                if (v != UNCANCELLABLE)
+                {
+                    ListenerList.notify(listenersUpdater, this);
+                    AsyncAwaitable.signalAll(waitingUpdater, this);
+                }
+                return true;
+            }
+        }
+    }
+
+    /**
+     * Logically append {@code newListener} to {@link #listeners}
+     * (at this stage it is a stack, so we actually prepend)
+     *
+     * @param newListener must be either a {@link ListenerList} or {@link GenericFutureListener}
+     */
+    void appendListener(ListenerList<V> newListener)
+    {
+        ListenerList.push(listenersUpdater, this, newListener);
+        if (isDone())
+            ListenerList.notify(listenersUpdater, this);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transform} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public <T> Future<T> map(Function<? super V, ? extends T> mapper, Executor executor)
+    {
+        return map(new AsyncFuture<>(), mapper, executor);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public <T> Future<T> flatMap(Function<? super V, ? extends Future<T>> flatMapper, @Nullable Executor executor)
+    {
+        return flatMap(new AsyncFuture<>(), flatMapper, executor);
+    }
+
+    /**
+     * Wait for this future to complete {@link Awaitable#await()}
+     */
+    @Override
+    public AsyncFuture<V> await() throws InterruptedException
+    {
+        //noinspection unchecked
+        return AsyncAwaitable.await(waitingUpdater, Future::isDone, this);
+    }
+
+    @Override
+    public boolean awaitUntil(long nanoTimeDeadline) throws InterruptedException
+    {
+        return AsyncAwaitable.awaitUntil(waitingUpdater, Future::isDone, this, nanoTimeDeadline);
+    }
+}
+
diff --git a/src/java/org/apache/cassandra/utils/concurrent/AsyncPromise.java b/src/java/org/apache/cassandra/utils/concurrent/AsyncPromise.java
new file mode 100644
index 0000000..61b5818
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/AsyncPromise.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.function.Consumer;
+
+import com.google.common.util.concurrent.FutureCallback;
+
+import io.netty.util.concurrent.Future; // checkstyle: permit this import
+import io.netty.util.concurrent.GenericFutureListener;
+
+/**
+ * Extends {@link AsyncFuture} to implement the {@link Promise} interface.
+ */
+public class AsyncPromise<V> extends AsyncFuture<V> implements Promise<V>
+{
+    public static class WithExecutor<V> extends AsyncPromise<V>
+    {
+        final Executor notifyExecutor;
+        protected WithExecutor(Executor notifyExecutor)
+        {
+            this.notifyExecutor = notifyExecutor;
+        }
+
+        protected WithExecutor(Executor notifyExecutor, FailureHolder initialState)
+        {
+            super(initialState);
+            this.notifyExecutor = notifyExecutor;
+        }
+
+        protected WithExecutor(Executor notifyExecutor, GenericFutureListener<? extends Future<? super V>> listener)
+        {
+            super(listener);
+            this.notifyExecutor = notifyExecutor;
+        }
+
+        @Override
+        public Executor notifyExecutor()
+        {
+            return notifyExecutor;
+        }
+    }
+
+    public AsyncPromise() {}
+
+    AsyncPromise(FailureHolder initialState)
+    {
+        super(initialState);
+    }
+
+    public AsyncPromise(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        super(listener);
+    }
+
+    AsyncPromise(FailureHolder initialState, GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        super(initialState, listener);
+    }
+
+    public static <V> AsyncPromise<V> withExecutor(Executor executor)
+    {
+        return new AsyncPromise.WithExecutor<>(executor);
+    }
+
+    public static <V> AsyncPromise<V> uncancellable()
+    {
+        return new AsyncPromise<>(UNCANCELLABLE);
+    }
+
+    public static <V> AsyncPromise<V> uncancellable(Executor executor)
+    {
+        return new WithExecutor<>(executor, UNCANCELLABLE);
+    }
+
+    public static <V> AsyncPromise<V> uncancellable(GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        return new AsyncPromise<>(UNCANCELLABLE, listener);
+    }
+
+    /**
+     * Complete the promise successfully if not already complete
+     * @throws IllegalStateException if already set
+     */
+    @Override
+    public Promise<V> setSuccess(V v)
+    {
+        if (!trySuccess(v))
+            throw new IllegalStateException("complete already: " + this);
+        return this;
+    }
+
+    /**
+     * Complete the promise successfully if not already complete
+     * @return true iff completed promise
+     */
+    @Override
+    public boolean trySuccess(V v)
+    {
+        return super.trySuccess(v);
+    }
+
+    /**
+     * Complete the promise abnormally if not already complete
+     * @throws IllegalStateException if already set
+     */
+    @Override
+    public Promise<V> setFailure(Throwable throwable)
+    {
+        if (!tryFailure(throwable))
+            throw new IllegalStateException("complete already: " + this);
+        return this;
+    }
+
+    /**
+     * Complete the promise abnormally if not already complete
+     * @return true iff completed promise
+     */
+    @Override
+    public boolean tryFailure(Throwable throwable)
+    {
+        return super.tryFailure(throwable);
+    }
+
+    /**
+     * Prevent a future caller from cancelling this promise
+     * @return true if the promise is now uncancellable (whether or not we did this)
+     */
+    @Override
+    public boolean setUncancellable()
+    {
+        return super.setUncancellable();
+    }
+
+    /**
+     * Prevent a future caller from cancelling this promise
+     * @return true iff this invocation set it to uncancellable, whether or not now uncancellable
+     */
+    @Override
+    public boolean setUncancellableExclusive()
+    {
+        return super.setUncancellableExclusive();
+    }
+
+    @Override
+    public boolean isUncancellable()
+    {
+        return super.isUncancellable();
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    public Promise<V> sync() throws InterruptedException
+    {
+        super.sync();
+        return this;
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    public Promise<V> syncUninterruptibly()
+    {
+        super.syncUninterruptibly();
+        return this;
+    }
+
+    @Override
+    public AsyncPromise<V> addListener(GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        super.addListener(listener);
+        return this;
+    }
+
+    @Override
+    public AsyncPromise<V> addListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... listeners)
+    {
+        super.addListeners(listeners);
+        return this;
+    }
+
+    @Override
+    public AsyncPromise<V> removeListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public AsyncPromise<V> removeListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... listeners)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public AsyncPromise<V> addCallback(FutureCallback<? super V> callback)
+    {
+        super.addCallback(callback);
+        return this;
+    }
+
+    @Override
+    public AsyncPromise<V> addCallback(FutureCallback<? super V> callback, Executor executor)
+    {
+        super.addCallback(callback, executor);
+        return this;
+    }
+
+    @Override
+    public AsyncPromise<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure)
+    {
+        super.addCallback(onSuccess, onFailure);
+        return this;
+    }
+
+    /**
+     * Wait for this promise to complete
+     * @throws InterruptedException if interrupted
+     */
+    @Override
+    public AsyncPromise<V> await() throws InterruptedException
+    {
+        super.await();
+        return this;
+    }
+
+    /**
+     * Wait uninterruptibly for this promise to complete
+     */
+    @Override
+    public AsyncPromise<V> awaitUninterruptibly()
+    {
+        super.awaitUninterruptibly();
+        return this;
+    }
+
+    /**
+     * Wait for this promise to complete, throwing any interrupt as an UncheckedInterruptedException
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    @Override
+    public AsyncPromise<V> awaitThrowUncheckedOnInterrupt() throws UncheckedInterruptedException
+    {
+        super.awaitThrowUncheckedOnInterrupt();
+        return this;
+    }
+}
+
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Awaitable.java b/src/java/org/apache/cassandra/utils/concurrent/Awaitable.java
new file mode 100644
index 0000000..25bdf02
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/Awaitable.java
@@ -0,0 +1,409 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.Predicate;
+
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.utils.Shared;
+
+import org.apache.cassandra.utils.Intercept;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A generic signal consumer, supporting all of the typical patterns used in Cassandra.
+ * All of the methods defined in {@link Awaitable} may be waited on without a loop,
+ * as this interface declares that there are no spurious wake-ups.
+ */
+@Shared(scope = SIMULATION)
+public interface Awaitable
+{
+    /**
+     * Await until the deadline (in nanoTime), throwing any interrupt.
+     * No spurious wakeups.
+     * @return true if we were signalled, false if the deadline elapsed
+     * @throws InterruptedException if interrupted
+     */
+    boolean awaitUntil(long nanoTimeDeadline) throws InterruptedException;
+
+    /**
+     * Await until the deadline (in nanoTime), throwing any interrupt as an unchecked exception.
+     * No spurious wakeups.
+     * @return true if we were signalled, false if the deadline elapsed
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    boolean awaitUntilThrowUncheckedOnInterrupt(long nanoTimeDeadline) throws UncheckedInterruptedException;
+
+    /**
+     * Await until the deadline (in nanoTime), ignoring interrupts (but maintaining the interrupt flag on exit).
+     * No spurious wakeups.
+     * @return true if we were signalled, false if the deadline elapsed
+     */
+    boolean awaitUntilUninterruptibly(long nanoTimeDeadline);
+
+    /**
+     * Await for the specified period, throwing any interrupt.
+     * No spurious wakeups.
+     * @return true if we were signalled, false if the timeout elapses
+     * @throws InterruptedException if interrupted
+     */
+    boolean await(long time, TimeUnit units) throws InterruptedException;
+
+    /**
+     * Await for the specified period, throwing any interrupt as an unchecked exception.
+     * No spurious wakeups.
+     * @return true if we were signalled, false if the timeout elapses
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    boolean awaitThrowUncheckedOnInterrupt(long time, TimeUnit units) throws UncheckedInterruptedException;
+
+    /**
+     * Await until the deadline (in nanoTime), ignoring interrupts (but maintaining the interrupt flag on exit).
+     * No spurious wakeups.
+     * @return true if we were signalled, false if the timeout elapses
+     */
+    boolean awaitUninterruptibly(long time, TimeUnit units);
+
+    /**
+     * Await indefinitely, throwing any interrupt.
+     * No spurious wakeups.
+     * @throws InterruptedException if interrupted
+     */
+    Awaitable await() throws InterruptedException;
+
+    /**
+     * Await indefinitely, throwing any interrupt as an unchecked exception.
+     * No spurious wakeups.
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    Awaitable awaitThrowUncheckedOnInterrupt() throws UncheckedInterruptedException;
+
+    /**
+     * Await indefinitely, ignoring interrupts (but maintaining the interrupt flag on exit).
+     * No spurious wakeups.
+     */
+    Awaitable awaitUninterruptibly();
+
+    // we must declare the static implementation methods outside of the interface,
+    // so that they can be loaded by different classloaders during simulation
+    class Defaults
+    {
+        public static boolean await(Awaitable await, long time, TimeUnit unit) throws InterruptedException
+        {
+            return await.awaitUntil(nanoTime() + unit.toNanos(time));
+        }
+
+        public static boolean awaitThrowUncheckedOnInterrupt(Awaitable await, long time, TimeUnit units) throws UncheckedInterruptedException
+        {
+            return awaitUntilThrowUncheckedOnInterrupt(await, nanoTime() + units.toNanos(time));
+        }
+
+        public static boolean awaitUninterruptibly(Awaitable await, long time, TimeUnit units)
+        {
+            return awaitUntilUninterruptibly(await, nanoTime() + units.toNanos(time));
+        }
+
+        public static <A extends Awaitable> A awaitThrowUncheckedOnInterrupt(A await) throws UncheckedInterruptedException
+        {
+            try
+            {
+                await.await();
+            }
+            catch (InterruptedException e)
+            {
+                throw new UncheckedInterruptedException();
+            }
+            return await;
+        }
+
+        public static boolean awaitUntilThrowUncheckedOnInterrupt(Awaitable await, long nanoTimeDeadline) throws UncheckedInterruptedException
+        {
+            try
+            {
+                return await.awaitUntil(nanoTimeDeadline);
+            }
+            catch (InterruptedException e)
+            {
+                throw new UncheckedInterruptedException();
+            }
+        }
+
+        /**
+         * {@link Awaitable#awaitUntilUninterruptibly(long)}
+         */
+        public static boolean awaitUntilUninterruptibly(Awaitable await, long nanoTimeDeadline)
+        {
+            boolean interrupted = false;
+            boolean result;
+            while (true)
+            {
+                try
+                {
+                    result = await.awaitUntil(nanoTimeDeadline);
+                    break;
+                }
+                catch (InterruptedException e)
+                {
+                    interrupted = true;
+                }
+            }
+            if (interrupted)
+                Thread.currentThread().interrupt();
+            return result;
+        }
+
+        /**
+         * {@link Awaitable#awaitUninterruptibly()}
+         */
+        public static <A extends Awaitable> A awaitUninterruptibly(A await)
+        {
+            boolean interrupted = false;
+            while (true)
+            {
+                try
+                {
+                    await.await();
+                    break;
+                }
+                catch (InterruptedException e)
+                {
+                    interrupted = true;
+                }
+            }
+            if (interrupted)
+                Thread.currentThread().interrupt();
+            return await;
+        }
+    }
+
+    abstract class AbstractAwaitable implements Awaitable
+    {
+        protected AbstractAwaitable() {}
+
+        /**
+         * {@link Awaitable#await(long, TimeUnit)}
+         */
+        @Override
+        public boolean await(long time, TimeUnit unit) throws InterruptedException
+        {
+            return Defaults.await(this, time, unit);
+        }
+
+        /**
+         * {@link Awaitable#awaitThrowUncheckedOnInterrupt(long, TimeUnit)}
+         */
+        @Override
+        public boolean awaitThrowUncheckedOnInterrupt(long time, TimeUnit units) throws UncheckedInterruptedException
+        {
+            return Defaults.awaitThrowUncheckedOnInterrupt(this, time, units);
+        }
+
+        /**
+         * {@link Awaitable#awaitUninterruptibly(long, TimeUnit)}
+         */
+        public boolean awaitUninterruptibly(long time, TimeUnit units)
+        {
+            return awaitUntilUninterruptibly(nanoTime() + units.toNanos(time));
+        }
+
+        /**
+         * {@link Awaitable#awaitThrowUncheckedOnInterrupt()}
+         */
+        public Awaitable awaitThrowUncheckedOnInterrupt() throws UncheckedInterruptedException
+        {
+            return Defaults.awaitThrowUncheckedOnInterrupt(this);
+        }
+
+        /**
+         * {@link Awaitable#awaitUntilThrowUncheckedOnInterrupt(long)}
+         */
+        public boolean awaitUntilThrowUncheckedOnInterrupt(long nanoTimeDeadline) throws UncheckedInterruptedException
+        {
+            return Defaults.awaitUntilThrowUncheckedOnInterrupt(this, nanoTimeDeadline);
+        }
+
+        /**
+         * {@link Awaitable#awaitUntilUninterruptibly(long)}
+         */
+        public boolean awaitUntilUninterruptibly(long nanoTimeDeadline)
+        {
+            return Defaults.awaitUntilUninterruptibly(this, nanoTimeDeadline);
+        }
+
+        /**
+         * {@link Awaitable#awaitUninterruptibly()}
+         */
+        public Awaitable awaitUninterruptibly()
+        {
+            return Defaults.awaitUninterruptibly(this);
+        }
+    }
+
+    /**
+     * A barebones asynchronous {@link Awaitable}.
+     * If your state is minimal, or can be updated concurrently, extend this class.
+     */
+    abstract class AsyncAwaitable extends AbstractAwaitable
+    {
+        /**
+         * Maintain an internal variable containing a lazily-initialized wait queue
+         * @return null if is done
+         */
+        @Inline
+        private static <A extends Awaitable> WaitQueue.Signal register(AtomicReferenceFieldUpdater<A, WaitQueue> waitingUpdater, Predicate<A> isDone, A awaitable)
+        {
+            if (isDone.test(awaitable))
+                return null;
+
+            WaitQueue waiting = waitingUpdater.get(awaitable);
+            if (waiting == null)
+            {
+                if (!waitingUpdater.compareAndSet(awaitable, null, waiting = newWaitQueue()))
+                {
+                    waiting = waitingUpdater.get(awaitable);
+                    if (waiting == null)
+                    {
+                        assert isDone.test(awaitable);
+                        return null;
+                    }
+                }
+            }
+
+            WaitQueue.Signal s = waiting.register();
+            if (!isDone.test(awaitable))
+                return s;
+
+            s.cancel();
+            return null;
+        }
+
+        @Inline
+        static <A extends Awaitable> A await(AtomicReferenceFieldUpdater<A, WaitQueue> waitingUpdater, Predicate<A> isDone, A awaitable) throws InterruptedException
+        {
+            WaitQueue.Signal s = register(waitingUpdater, isDone, awaitable);
+            if (s != null)
+                s.await();
+            return awaitable;
+        }
+
+        @Inline
+        static <A extends Awaitable> boolean awaitUntil(AtomicReferenceFieldUpdater<A, WaitQueue> waitingUpdater, Predicate<A> isDone, A awaitable, long nanoTimeDeadline) throws InterruptedException
+        {
+            WaitQueue.Signal s = register(waitingUpdater, isDone, awaitable);
+            return s == null || s.awaitUntil(nanoTimeDeadline) || isDone.test(awaitable);
+        }
+
+        @Inline
+        static <A extends Awaitable> void signalAll(AtomicReferenceFieldUpdater<A, WaitQueue> waitingUpdater, A awaitable)
+        {
+            WaitQueue waiting = waitingUpdater.get(awaitable);
+            if (waiting == null)
+                return;
+
+            waiting.signalAll();
+            waitingUpdater.lazySet(awaitable, null);
+        }
+
+        private static final AtomicReferenceFieldUpdater<AsyncAwaitable, WaitQueue> waitingUpdater = AtomicReferenceFieldUpdater.newUpdater(AsyncAwaitable.class, WaitQueue.class, "waiting");
+        private volatile WaitQueue waiting;
+
+        protected AsyncAwaitable() {}
+
+        /**
+         * {@link Awaitable#await()}
+         */
+        public Awaitable await() throws InterruptedException
+        {
+            return await(waitingUpdater, AsyncAwaitable::isSignalled, this);
+        }
+
+        /**
+         * {@link Awaitable#awaitUntil(long)}
+         */
+        public boolean awaitUntil(long nanoTimeDeadline) throws InterruptedException
+        {
+            return awaitUntil(waitingUpdater, AsyncAwaitable::isSignalled, this, nanoTimeDeadline);
+        }
+
+        /**
+         * Signal any waiting threads; {@link #isSignalled()} must return {@code true} before this method is invoked.
+         */
+        protected void signal()
+        {
+            signalAll(waitingUpdater, this);
+        }
+
+        /**
+         * Return true once signalled. Unidirectional; once true, must never again be false.
+         */
+        protected abstract boolean isSignalled();
+    }
+
+    /**
+     * A barebones {@link Awaitable} that uses mutual exclusion.
+     * If your state will be updated while holding the object monitor, extend this class.
+     */
+    abstract class SyncAwaitable extends AbstractAwaitable
+    {
+        protected SyncAwaitable() {}
+
+        /**
+         * {@link Awaitable#await()}
+         */
+        public synchronized Awaitable await() throws InterruptedException
+        {
+            while (!isSignalled())
+                wait();
+            return this;
+        }
+
+        /**
+         * {@link Awaitable#awaitUntil(long)}
+         */
+        public synchronized boolean awaitUntil(long nanoTimeDeadline) throws InterruptedException
+        {
+            while (true)
+            {
+                if (isSignalled()) return true;
+                if (!waitUntil(this, nanoTimeDeadline)) return false;
+            }
+        }
+
+        /**
+         * Return true once signalled. Unidirectional; once true, must never again be false.
+         */
+        protected abstract boolean isSignalled();
+
+        @Intercept
+        public static boolean waitUntil(Object monitor, long deadlineNanos) throws InterruptedException
+        {
+            long wait = deadlineNanos - nanoTime();
+            if (wait <= 0)
+                return false;
+
+            monitor.wait((wait + 999999) / 1000000);
+            return true;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Blocker.java b/src/java/org/apache/cassandra/utils/concurrent/Blocker.java
deleted file mode 100644
index 5192e98..0000000
--- a/src/java/org/apache/cassandra/utils/concurrent/Blocker.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*    http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*/
-package org.apache.cassandra.utils.concurrent;
-
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.ReentrantLock;
-
-public class Blocker
-{
-    private final ReentrantLock lock = new ReentrantLock();
-    private final Condition unblocked = lock.newCondition();
-    private volatile boolean block = false;
-
-    public void block(boolean block)
-    {
-        this.block = block;
-        if (!block)
-        {
-            lock.lock();
-            try
-            {
-                unblocked.signalAll();
-            }
-            finally
-            {
-                lock.unlock();
-            }
-        }
-    }
-
-    public void ask()
-    {
-        if (block)
-        {
-            lock.lock();
-            try
-            {
-                while (block)
-                    unblocked.awaitUninterruptibly();
-            }
-            finally
-            {
-                lock.unlock();
-            }
-        }
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/BlockingQueues.java b/src/java/org/apache/cassandra/utils/concurrent/BlockingQueues.java
new file mode 100644
index 0000000..4d79ed3
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/BlockingQueues.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Queue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue; // checkstyle: permit this import
+import java.util.concurrent.SynchronousQueue; // checkstyle: permit this import
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.utils.Intercept;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.concurrent.Awaitable.SyncAwaitable.waitUntil;
+
+public class BlockingQueues
+{
+    @Intercept
+    public static <T> BlockingQueue<T> newBlockingQueue()
+    {
+        return new LinkedBlockingQueue<>();
+    }
+
+    @Intercept
+    public static <T> BlockingQueue<T> newBlockingQueue(int capacity)
+    {
+        return capacity == 0 ? new SynchronousQueue<>()
+                             : new LinkedBlockingQueue<>(capacity);
+    }
+
+    public static class Sync<T> implements BlockingQueue<T>
+    {
+        final int capacity;
+        final Queue<T> wrapped;
+        public Sync(int capacity, Queue<T> wrapped)
+        {
+            this.capacity = capacity;
+            this.wrapped = wrapped;
+        }
+
+        public synchronized boolean add(T t)
+        {
+            if (!wrapped.add(t))
+                throw new IllegalStateException();
+            notify();
+            return true;
+        }
+
+        public synchronized boolean offer(T t)
+        {
+            if (wrapped.size() == capacity)
+                return false;
+            return add(t);
+        }
+
+        public synchronized T remove()
+        {
+            return poll();
+        }
+
+        public synchronized T poll()
+        {
+            if (wrapped.size() == capacity)
+                notify();
+
+            return wrapped.poll();
+        }
+
+        public synchronized T element()
+        {
+            return wrapped.element();
+        }
+
+        public synchronized T peek()
+        {
+            return wrapped.peek();
+        }
+
+        public synchronized void put(T t) throws InterruptedException
+        {
+            while (!offer(t))
+                wait();
+        }
+
+        public synchronized boolean offer(T t, long timeout, TimeUnit unit) throws InterruptedException
+        {
+            if (offer(t))
+                return true;
+
+            long deadline = nanoTime() + unit.toNanos(timeout);
+            while (true)
+            {
+                if (offer(t))
+                    return true;
+
+                if (!waitUntil(this, deadline))
+                    return false;
+            }
+        }
+
+        public synchronized T take() throws InterruptedException
+        {
+            T result;
+            while (null == (result = poll()))
+                wait();
+
+            return result;
+        }
+
+        public synchronized T poll(long timeout, TimeUnit unit) throws InterruptedException
+        {
+            T result = poll();
+            if (result != null)
+                return result;
+
+            long deadline = nanoTime() + unit.toNanos(timeout);
+            while (null == (result = poll()))
+            {
+                if (!waitUntil(this, deadline))
+                    return null;
+            }
+            return result;
+        }
+
+        public synchronized int remainingCapacity()
+        {
+            return capacity - wrapped.size();
+        }
+
+        public synchronized boolean remove(Object o)
+        {
+            if (!wrapped.remove(o))
+                return false;
+            if (wrapped.size() == capacity - 1)
+                notify();
+            return true;
+        }
+
+        public synchronized boolean containsAll(Collection<?> c)
+        {
+            return wrapped.containsAll(c);
+        }
+
+        public synchronized boolean addAll(Collection<? extends T> c)
+        {
+            c.forEach(this::add);
+            return true;
+        }
+
+        public synchronized boolean removeAll(Collection<?> c)
+        {
+            boolean result = wrapped.removeAll(c);
+            notifyAll();
+            return result;
+        }
+
+        public synchronized boolean retainAll(Collection<?> c)
+        {
+            boolean result = wrapped.retainAll(c);
+            notifyAll();
+            return result;
+        }
+
+        public synchronized void clear()
+        {
+            wrapped.clear();
+            notifyAll();
+        }
+
+        public synchronized int size()
+        {
+            return wrapped.size();
+        }
+
+        public synchronized boolean isEmpty()
+        {
+            return wrapped.isEmpty();
+        }
+
+        public synchronized boolean contains(Object o)
+        {
+            return wrapped.contains(o);
+        }
+
+        public synchronized Iterator<T> iterator()
+        {
+            Iterator<T> iter = wrapped.iterator();
+            return new Iterator<T>()
+            {
+                public boolean hasNext()
+                {
+                    synchronized (Sync.this)
+                    {
+                        return iter.hasNext();
+                    }
+                }
+
+                public T next()
+                {
+                    synchronized (Sync.this)
+                    {
+                        return iter.next();
+                    }
+                }
+            };
+        }
+
+        public synchronized Object[] toArray()
+        {
+            return wrapped.toArray();
+        }
+
+        public synchronized <T1> T1[] toArray(T1[] a)
+        {
+            return wrapped.toArray(a);
+        }
+
+        public synchronized int drainTo(Collection<? super T> c)
+        {
+            return drainTo(c, Integer.MAX_VALUE);
+        }
+
+        public synchronized int drainTo(Collection<? super T> c, int maxElements)
+        {
+            int count = 0;
+            while (count < maxElements && !isEmpty())
+            {
+                c.add(poll());
+                ++count;
+            }
+
+            return count;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Condition.java b/src/java/org/apache/cassandra/utils/concurrent/Condition.java
new file mode 100644
index 0000000..eb97848
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/Condition.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import org.apache.cassandra.utils.Intercept;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Simpler API than java.util.concurrent.Condition; would be nice to extend it, but also nice
+ * to share API with Future, for which Netty's API is incompatible with java.util.concurrent.Condition
+ *
+ * {@link Awaitable} for explicit external signals.
+ */
+@Shared(scope = SIMULATION)
+public interface Condition extends Awaitable
+{
+    /**
+     * Returns true once signalled. Unidirectional; once true, will never again be false.
+     */
+    boolean isSignalled();
+
+    /**
+     * Signal the condition as met, and wake all waiting threads.
+     */
+    void signal();
+
+    /**
+     * Signal the condition as met, and wake all waiting threads.
+     */
+    default void signalAll() { signal(); }
+
+    /**
+     * Factory method used to capture and redirect instantiations for simulation
+     */
+    @Intercept
+    static Condition newOneTimeCondition()
+    {
+        return new Async();
+    }
+
+    /**
+     * An asynchronous {@link Condition}. Typically lower overhead than {@link Sync}.
+     */
+    public static class Async extends AsyncAwaitable implements Condition
+    {
+        private volatile boolean signaled = false;
+
+        // WARNING: if extending this class, consider simulator interactions
+        protected Async() {}
+
+        public boolean isSignalled()
+        {
+            return signaled;
+        }
+
+        public void signal()
+        {
+            signaled = true;
+            super.signal();
+        }
+    }
+
+    /**
+     * A {@link Condition} based on its object monitor.
+     * WARNING: lengthy operations performed while holding the lock may prevent timely notification of waiting threads
+     * that a deadline has passed.
+     */
+    public static class Sync extends SyncAwaitable implements Condition
+    {
+        private boolean signaled = false;
+
+        // this can be instantiated directly, as we intercept monitors directly with byte weaving
+        public Sync() {}
+
+        public synchronized boolean isSignalled()
+        {
+            return signaled;
+        }
+
+        public synchronized void signal()
+        {
+            signaled = true;
+            notifyAll();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/ConditionAsConsumer.java b/src/java/org/apache/cassandra/utils/concurrent/ConditionAsConsumer.java
new file mode 100644
index 0000000..985daf7
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/ConditionAsConsumer.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.function.Consumer;
+
+public interface ConditionAsConsumer<T> extends Condition, Consumer<T>
+{
+    @Override
+    default void accept(T t) { signal(); }
+
+    public static class Async<T> extends Condition.Async implements ConditionAsConsumer<T> { }
+
+    public static <T> ConditionAsConsumer<T> newConditionAsConsumer()
+    {
+        return new Async<T>();
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/CountDownLatch.java b/src/java/org/apache/cassandra/utils/concurrent/CountDownLatch.java
new file mode 100644
index 0000000..976abb7
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/CountDownLatch.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+
+import org.apache.cassandra.utils.Intercept;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface CountDownLatch extends Awaitable
+{
+    /**
+     * Count down by 1, signalling waiters if we have reached zero
+     */
+    void decrement();
+
+    /**
+     * @return the current count
+     */
+    int count();
+
+    /**
+     * Factory method used to capture and redirect instantiations for simulation
+     */
+    @Intercept
+    static CountDownLatch newCountDownLatch(int count)
+    {
+        return new Async(count);
+    }
+
+    static class Async extends AsyncAwaitable implements CountDownLatch
+    {
+        private static final AtomicIntegerFieldUpdater<CountDownLatch.Async> countUpdater = AtomicIntegerFieldUpdater.newUpdater(CountDownLatch.Async.class, "count");
+        private volatile int count;
+
+        // WARNING: if extending this class, consider simulator interactions
+        protected Async(int count)
+        {
+            this.count = count;
+            if (count == 0)
+                signal();
+        }
+
+        public void decrement()
+        {
+            if (countUpdater.decrementAndGet(this) == 0)
+                signal();
+        }
+
+        public int count()
+        {
+            return count;
+        }
+
+        @Override
+        protected boolean isSignalled()
+        {
+            return count <= 0;
+        }
+    }
+
+    static final class Sync extends SyncAwaitable implements CountDownLatch
+    {
+        private int count;
+
+        public Sync(int count)
+        {
+            this.count = count;
+        }
+
+        public synchronized void decrement()
+        {
+            if (count > 0 && --count == 0)
+                notifyAll();
+        }
+
+        public synchronized int count()
+        {
+            return count;
+        }
+
+        /**
+         * not synchronized as only intended for internal usage by externally synchronized methods
+         */
+
+        @Override
+        protected boolean isSignalled()
+        {
+            return count <= 0;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Future.java b/src/java/org/apache/cassandra/utils/concurrent/Future.java
new file mode 100644
index 0000000..fae5d43
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/Future.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.ListenableFuture; // checkstyle: permit this import
+
+import io.netty.util.concurrent.GenericFutureListener;
+
+import io.netty.util.internal.PlatformDependent;
+import org.apache.cassandra.utils.Shared;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A Future that integrates several different (but equivalent) APIs used within Cassandra into a single concept,
+ * integrating also with our {@link Awaitable} abstraction, to overall improve coherency and clarity in the codebase.
+ */
+@Shared(scope = SIMULATION, ancestors = INTERFACES)
+public interface Future<V> extends io.netty.util.concurrent.Future<V>, ListenableFuture<V>, Awaitable
+{
+    /**
+     * Wait indefinitely for this future to complete, throwing any interrupt
+     * @throws InterruptedException if interrupted
+     */
+    @Override
+    Future<V> await() throws InterruptedException;
+
+    /**
+     * Wait indefinitely for this future to complete
+     */
+    @Override
+    Future<V> awaitUninterruptibly();
+
+    /**
+     * Wait indefinitely for this promise to complete, throwing any interrupt as an UncheckedInterruptedException
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    @Override
+    Future<V> awaitThrowUncheckedOnInterrupt();
+
+    default void rethrowIfFailed()
+    {
+        Throwable cause = this.cause();
+        if (cause != null)
+        {
+            PlatformDependent.throwException(cause);
+        }
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    default Future<V> sync() throws InterruptedException
+    {
+        await();
+        rethrowIfFailed();
+        return this;
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    default Future<V> syncUninterruptibly()
+    {
+        awaitUninterruptibly();
+        rethrowIfFailed();
+        return this;
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    default Future<V> syncThrowUncheckedOnInterrupt()
+    {
+        awaitThrowUncheckedOnInterrupt();
+        rethrowIfFailed();
+        return this;
+    }
+
+    @Deprecated
+    @Override
+    default boolean await(long l) throws InterruptedException
+    {
+        return await(l, MILLISECONDS);
+    }
+
+    @Deprecated
+    @Override
+    default boolean awaitUninterruptibly(long l)
+    {
+        return awaitUninterruptibly(l, MILLISECONDS);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     */
+    Future<V> addCallback(BiConsumer<? super V, Throwable> callback);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     */
+    Future<V> addCallback(BiConsumer<? super V, Throwable> callback, Executor executor);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     */
+    Future<V> addCallback(FutureCallback<? super V> callback);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     */
+    Future<V> addCallback(FutureCallback<? super V> callback, Executor executor);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     */
+    Future<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#addCallback} natively
+     */
+    Future<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure, Executor executor);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transform(ListenableFuture, com.google.common.base.Function, Executor)} natively
+     */
+    default <T> Future<T> map(Function<? super V, ? extends T> mapper)
+    {
+        return map(mapper, null);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transform(ListenableFuture, com.google.common.base.Function, Executor)} natively
+     */
+    <T> Future<T> map(Function<? super V, ? extends T> mapper, Executor executor);
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)} natively
+     */
+    default <T> Future<T> flatMap(Function<? super V, ? extends Future<T>> flatMapper)
+    {
+        return flatMap(flatMapper, null);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)} natively
+     */
+    <T> Future<T> flatMap(Function<? super V, ? extends Future<T>> flatMapper, Executor executor);
+
+    /**
+     * Invoke {@code runnable} on completion, using {@code executor}.
+     *
+     * Tasks are submitted to their executors in the order they were added to this Future.
+     */
+    @Override
+    void addListener(Runnable runnable, Executor executor);
+
+    /**
+     * Invoke {@code runnable} on completion. Depending on the implementation and its configuration, this
+     * may be executed immediately by the notifying/completing thread, or asynchronously by an executor.
+     * Tasks are executed, or submitted to the executor, in the order they were added to this Future.
+     */
+    void addListener(Runnable runnable);
+
+    Executor notifyExecutor();
+
+    @Override Future<V> addListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> genericFutureListener);
+    @Override Future<V> addListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... genericFutureListeners);
+    @Override Future<V> removeListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> genericFutureListener);
+    @Override Future<V> removeListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... genericFutureListeners);
+}
+
diff --git a/src/java/org/apache/cassandra/utils/concurrent/FutureCombiner.java b/src/java/org/apache/cassandra/utils/concurrent/FutureCombiner.java
new file mode 100644
index 0000000..e47feff
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/FutureCombiner.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.concurrent;
+
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.GlobalEventExecutor;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Netty's PromiseCombiner is not threadsafe, and we combine futures from multiple event executors.
+ *
+ * This class groups a number of Future into a single logical Future, by registering a listener to each that
+ * decrements a shared counter; if any of them fail, the FutureCombiner is completed with the first cause,
+ * but in all scenario only completes when all underlying future have completed (exceptionally or otherwise)
+ *
+ * This Future is always uncancellable.
+ *
+ * We extend AsyncFuture, and simply provide it an uncancellable Promise that will be completed by the listeners
+ * registered to the input futures.
+ */
+public class FutureCombiner<T> extends AsyncFuture<T>
+{
+    private interface ListenerFactory<T>
+    {
+        Listener<T> create(int count, Supplier<T> onSuccess, FutureCombiner<T> complete);
+    }
+
+    /**
+     * Tracks completion; once all futures have completed, invokes {@link Listener#complete#trySuccess(Object)} with {@link Listener#onSuccess}.
+     * Never invokes failure on {@link Listener#complete}.
+     */
+    private static class Listener<T> extends AtomicInteger implements GenericFutureListener<io.netty.util.concurrent.Future<Object>>
+    {
+        Supplier<T> onSuccess; // non-final so we can release resources immediately when failing fast
+        final FutureCombiner<T> complete;
+
+        Listener(int count, Supplier<T> onSuccess, FutureCombiner<T> complete)
+        {
+            super(count);
+            Preconditions.checkNotNull(onSuccess);
+            this.onSuccess = onSuccess;
+            this.complete = complete;
+        }
+
+        @Override
+        public void operationComplete(io.netty.util.concurrent.Future<Object> result)
+        {
+            if (0 == decrementAndGet())
+                onCompletion();
+        }
+
+        void onCompletion()
+        {
+            complete.trySuccess(onSuccess.get());
+            onSuccess = null;
+        }
+    }
+
+    /**
+     * Tracks completion; once all futures have completed, invokes {@link Listener#complete#trySuccess(Object)} with {@link Listener#onSuccess}.
+     * If any future fails, immediately propagates this failure and releases associated resources.
+     */
+    private static class FailFastListener<T> extends Listener<T>
+    {
+        FailFastListener(int count, Supplier<T> onSuccess, FutureCombiner<T> complete)
+        {
+            super(count, onSuccess, complete);
+        }
+
+        @Override
+        public void operationComplete(io.netty.util.concurrent.Future<Object> result)
+        {
+            if (!result.isSuccess())
+            {
+                onSuccess = null;
+                complete.tryFailure(result.cause());
+            }
+            else
+            {
+                super.operationComplete(result);
+            }
+        }
+    }
+
+    /**
+     * Tracks completion; once all futures have completed, invokes {@link Listener#complete#trySuccess(Object)} with {@link Listener#onSuccess}.
+     * If any future fails we propagate this failure, but only once all have completed.
+     */
+    private static class FailSlowListener<T> extends Listener<T>
+    {
+        private static final AtomicReferenceFieldUpdater<FailSlowListener, Throwable> firstCauseUpdater =
+        AtomicReferenceFieldUpdater.newUpdater(FailSlowListener.class, Throwable.class, "firstCause");
+
+        private volatile Throwable firstCause;
+
+        FailSlowListener(int count, Supplier<T> onSuccess, FutureCombiner<T> complete)
+        {
+            super(count, onSuccess, complete);
+        }
+
+        @Override
+        void onCompletion()
+        {
+            if (onSuccess == null)
+                complete.tryFailure(firstCause);
+            else
+                super.onCompletion();
+        }
+
+        @Override
+        public void operationComplete(io.netty.util.concurrent.Future<Object> result)
+        {
+            if (!result.isSuccess())
+            {
+                onSuccess = null;
+                firstCauseUpdater.compareAndSet(FailSlowListener.this, null, result.cause());
+            }
+
+            super.operationComplete(result);
+        }
+    }
+
+    private volatile Collection<? extends io.netty.util.concurrent.Future<?>> propagateCancellation;
+
+    private FutureCombiner(Collection<? extends io.netty.util.concurrent.Future<?>> combine, Supplier<T> resultSupplier, ListenerFactory<T> listenerFactory)
+    {
+        if (combine.isEmpty())
+        {
+            trySuccess(null);
+        }
+        else
+        {
+            Listener<T> listener = listenerFactory.create(combine.size(), resultSupplier, this);
+            combine.forEach(f -> {
+                if (f.isDone()) listener.operationComplete((io.netty.util.concurrent.Future<Object>) f);
+                else f.addListener(listener);
+            });
+        }
+    }
+
+    @Override
+    protected boolean setUncancellable()
+    {
+        if (!super.setUncancellable())
+            return false;
+        propagateCancellation = null;
+        return true;
+    }
+
+    @Override
+    protected boolean setUncancellableExclusive()
+    {
+        if (!super.setUncancellableExclusive())
+            return false;
+        propagateCancellation = null;
+        return true;
+    }
+
+    @Override
+    protected boolean trySuccess(T t)
+    {
+        if (!super.trySuccess(t))
+            return false;
+        propagateCancellation = null;
+        return true;
+    }
+
+    @Override
+    protected boolean tryFailure(Throwable throwable)
+    {
+        if (!super.tryFailure(throwable))
+            return false;
+        propagateCancellation = null;
+        return true;
+    }
+
+    @Override
+    public boolean cancel(boolean b)
+    {
+        if (!super.cancel(b))
+            return false;
+        Collection<? extends io.netty.util.concurrent.Future<?>> propagate = propagateCancellation;
+        propagateCancellation = null;
+        if (propagate != null)
+            propagate.forEach(f -> f.cancel(b));
+        return true;
+    }
+
+    /**
+     * Waits for all of {@code futures} to complete, only propagating failures on completion
+     */
+    public static FutureCombiner<Void> nettySuccessListener(Collection<? extends io.netty.util.concurrent.Future<?>> futures)
+    {
+        return new FutureCombiner<Void>(futures, () -> null, FailSlowListener::new)
+        {
+            @Override
+            public Executor notifyExecutor()
+            {
+                return GlobalEventExecutor.INSTANCE;
+            }
+        };
+    }
+
+    /**
+     * Waits only until the first failure, or until all have succeeded.
+     * Returns a list of results if successful; an exception if any failed.
+     *
+     * @param futures futures to wait for completion of
+     * @return a Future containing all results of {@code futures}
+     */
+    public static <V> Future<List<V>> allOf(Collection<? extends io.netty.util.concurrent.Future<? extends V>> futures)
+    {
+        if (futures.isEmpty())
+            return ImmediateFuture.success(Collections.emptyList());
+
+        return new FutureCombiner<>(futures, () -> futures.stream().map(f -> f.getNow()).collect(Collectors.toList()), FailFastListener::new);
+    }
+
+    /**
+     * Waits for all futures to complete, returning a list containing values of all successful input futures. This
+     * emulates Guava's Futures::successfulAsList in that results will be in the same order as inputs and any
+     * non-success value (e.g. failure or cancellation) will be replaced by null.
+     * @param futures futures to wait for completion of
+     * @return a Future containing all successful results of {@code futures} and nulls for non-successful futures
+     */
+    public static <V> Future<List<V>> successfulOf(List<? extends io.netty.util.concurrent.Future<V>> futures)
+    {
+        if (futures.isEmpty())
+            return ImmediateFuture.success(Collections.emptyList());
+
+        return new FutureCombiner<>(futures,
+                                    () -> futures.stream()
+                                                 .map(f -> f.isSuccess() ? f.getNow() : null)
+                                                 .collect(Collectors.toList()),
+                                    Listener::new);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/ImmediateFuture.java b/src/java/org/apache/cassandra/utils/concurrent/ImmediateFuture.java
new file mode 100644
index 0000000..159d580
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/ImmediateFuture.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+public class ImmediateFuture<V> extends AsyncFuture<V>
+{
+    private ImmediateFuture(V value)
+    {
+        super(value);
+    }
+
+    private ImmediateFuture(Throwable cause)
+    {
+        super(cause);
+    }
+
+    private ImmediateFuture(FailureHolder failure)
+    {
+        super(failure);
+    }
+
+    public static <V> ImmediateFuture<V> success(V value)
+    {
+        return new ImmediateFuture<>(value);
+    }
+
+    public static <V> ImmediateFuture<V> failure(Throwable cause)
+    {
+        return new ImmediateFuture<>(cause);
+    }
+
+    public static <V> ImmediateFuture<V> cancelled()
+    {
+        return new ImmediateFuture<>(CANCELLED);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/IntervalLock.java b/src/java/org/apache/cassandra/utils/concurrent/IntervalLock.java
deleted file mode 100644
index 382a2dc..0000000
--- a/src/java/org/apache/cassandra/utils/concurrent/IntervalLock.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils.concurrent;
-
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.cassandra.utils.TimeSource;
-
-/**
- * This class extends ReentrantReadWriteLock to provide a write lock that can only be acquired at provided intervals.
- */
-public class IntervalLock extends ReentrantReadWriteLock
-{
-    private final AtomicLong lastAcquire = new AtomicLong();
-    private final TimeSource timeSource;
-
-    public IntervalLock(TimeSource timeSource)
-    {
-        this.timeSource = timeSource;
-    }
-
-    /**
-     * Try acquiring a write lock if the given interval is passed since the last call to this method.
-     *
-     * @param interval In millis.
-     * @return True if acquired and locked, false otherwise.
-     */
-    public boolean tryIntervalLock(long interval)
-    {
-        long now = timeSource.currentTimeMillis();
-        boolean acquired = (now - lastAcquire.get() >= interval) && writeLock().tryLock();
-        if (acquired)
-            lastAcquire.set(now);
-
-        return acquired;
-    }
-
-    /**
-     * Release the last acquired interval lock.
-     */
-    public void releaseIntervalLock()
-    {
-        writeLock().unlock();
-    }
-
-    @VisibleForTesting
-    public long getLastIntervalAcquire()
-    {
-        return lastAcquire.get();
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/IntrusiveStack.java b/src/java/org/apache/cassandra/utils/concurrent/IntrusiveStack.java
new file mode 100644
index 0000000..e61d565
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/IntrusiveStack.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.utils.LongAccumulator;
+
+/**
+ * An efficient stack/list that is expected to be ordinarily either empty or close to, and for which
+ * we need concurrent insertions and do not need to support removal - i.e. the list is semi immutable.
+ *
+ * This is an intrusive stack, and for simplicity we treat all
+ *
+ * @param <T>
+ */
+public class IntrusiveStack<T extends IntrusiveStack<T>> implements Iterable<T>
+{
+    static class Itr<T extends IntrusiveStack<T>> implements Iterator<T>
+    {
+        private T next;
+
+        Itr(T next)
+        {
+            this.next = next;
+        }
+
+        @Override
+        public boolean hasNext()
+        {
+            return next != null;
+        }
+
+        @Override
+        public T next()
+        {
+            T result = next;
+            next = result.next;
+            return result;
+        }
+    }
+
+    T next;
+
+    @Inline
+    protected static <O, T extends IntrusiveStack<T>> T push(AtomicReferenceFieldUpdater<? super O, T> headUpdater, O owner, T prepend)
+    {
+        return push(headUpdater, owner, prepend, (prev, next) -> {
+            next.next = prev;
+            return next;
+        });
+    }
+
+    protected static <O, T extends IntrusiveStack<T>> T push(AtomicReferenceFieldUpdater<O, T> headUpdater, O owner, T prepend, BiFunction<T, T, T> combine)
+    {
+        while (true)
+        {
+            T head = headUpdater.get(owner);
+            if (headUpdater.compareAndSet(owner, head, combine.apply(head, prepend)))
+                return head;
+        }
+    }
+
+    protected interface Setter<O, T>
+    {
+        public boolean compareAndSet(O owner, T expect, T update);
+    }
+
+    @Inline
+    protected static <O, T extends IntrusiveStack<T>> T push(Function<O, T> getter, Setter<O, T> setter, O owner, T prepend)
+    {
+        return push(getter, setter, owner, prepend, (prev, next) -> {
+            next.next = prev;
+            return next;
+        });
+    }
+
+    protected static <O, T extends IntrusiveStack<T>> T push(Function<O, T> getter, Setter<O, T> setter, O owner, T prepend, BiFunction<T, T, T> combine)
+    {
+        while (true)
+        {
+            T head = getter.apply(owner);
+            if (setter.compareAndSet(owner, head, combine.apply(head, prepend)))
+                return head;
+        }
+    }
+
+    protected static <O, T extends IntrusiveStack<T>> void pushExclusive(AtomicReferenceFieldUpdater<O, T> headUpdater, O owner, T prepend, BiFunction<T, T, T> combine)
+    {
+        T head = headUpdater.get(owner);
+        headUpdater.lazySet(owner, combine.apply(head, prepend));
+    }
+
+    protected static <T extends IntrusiveStack<T>, O> void pushExclusive(AtomicReferenceFieldUpdater<O, T> headUpdater, O owner, T prepend)
+    {
+        prepend.next = headUpdater.get(owner);
+        headUpdater.lazySet(owner, prepend);
+    }
+
+    protected static <T extends IntrusiveStack<T>> T pushExclusive(T head, T prepend)
+    {
+        prepend.next = head;
+        return prepend;
+    }
+
+    protected static <T extends IntrusiveStack<T>, O> Iterable<T> iterable(AtomicReferenceFieldUpdater<O, T> headUpdater, O owner)
+    {
+        return iterable(headUpdater.get(owner));
+    }
+
+    protected static <T extends IntrusiveStack<T>> Iterable<T> iterable(T list)
+    {
+        return list == null ? () -> iterator(null) : list;
+    }
+
+    protected static <T extends IntrusiveStack<T>> Iterator<T> iterator(T list)
+    {
+        return new Itr<>(list);
+    }
+
+    protected static int size(IntrusiveStack<?> list)
+    {
+        int size = 0;
+        while (list != null)
+        {
+            ++size;
+            list = list.next;
+        }
+        return size;
+    }
+
+    protected static <T extends IntrusiveStack<T>> long accumulate(T list, LongAccumulator<T> accumulator, long initialValue)
+    {
+        long value = initialValue;
+        while (list != null)
+        {
+            value = accumulator.apply(list, initialValue);
+            list = list.next;
+        }
+        return value;
+    }
+
+    // requires exclusive ownership (incl. with readers)
+    protected T reverse()
+    {
+        return reverse((T) this);
+    }
+
+    // requires exclusive ownership (incl. with readers)
+    protected static <T extends IntrusiveStack<T>> T reverse(T list)
+    {
+        T prev = null;
+        T cur = list;
+        while (cur != null)
+        {
+            T next = cur.next;
+            cur.next = prev;
+            prev = cur;
+            cur = next;
+        }
+        return prev;
+    }
+
+    @Override
+    public void forEach(Consumer<? super T> forEach)
+    {
+        forEach((T)this, forEach);
+    }
+
+    protected static <T extends IntrusiveStack<T>> void forEach(T list, Consumer<? super T> forEach)
+    {
+        while (list != null)
+        {
+            forEach.accept(list);
+            list = list.next;
+        }
+    }
+
+    @Override
+    public Iterator<T> iterator()
+    {
+        return new Itr<>((T) this);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/ListenerList.java b/src/java/org/apache/cassandra/utils/concurrent/ListenerList.java
new file mode 100644
index 0000000..40b908b
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/ListenerList.java
@@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import javax.annotation.Nullable;
+
+import com.google.common.util.concurrent.FutureCallback;
+
+import io.netty.util.concurrent.EventExecutor;
+import io.netty.util.concurrent.GenericFutureListener;
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.concurrent.ExecutionFailure;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
+
+import static org.apache.cassandra.utils.concurrent.ListenerList.Notifying.NOTIFYING;
+
+/**
+ * Encapsulate one or more items in a linked-list that is immutable whilst shared, forming a prepend-only list (or stack).
+ * Once the list is ready to consume, exclusive ownership is taken by clearing the shared variable containing it, after
+ * which the list may be invoked using {@link #notifyExclusive(ListenerList, Future)}, which reverses the list before invoking the work it contains.
+ */
+abstract class ListenerList<V> extends IntrusiveStack<ListenerList<V>>
+{
+    abstract void notifySelf(Executor notifyExecutor, Future<V> future);
+
+    static ListenerList pushHead(ListenerList prev, ListenerList next)
+    {
+        if (prev instanceof Notifying<?>)
+        {
+            Notifying result = new Notifying();
+            result.next = next;
+            next.next = prev == NOTIFYING ? null : prev;
+            return result;
+        }
+        next.next = prev;
+        return next;
+    }
+
+    /**
+     * Logically append {@code newListener} to {@link #listeners}
+     * (at this stage it is a stack, so we actually prepend)
+     *
+     * @param newListener must be either a {@link ListenerList} or {@link GenericFutureListener}
+     */
+    @Inline
+    static <T> void push(AtomicReferenceFieldUpdater<? super T, ListenerList> updater, T in, ListenerList newListener)
+    {
+        IntrusiveStack.push(updater, in, newListener, ListenerList::pushHead);
+    }
+
+    /**
+     * Logically append {@code newListener} to {@link #listeners}
+     * (at this stage it is a stack, so we actually prepend)
+     *
+     * @param newListener must be either a {@link ListenerList} or {@link GenericFutureListener}
+     */
+    @Inline
+    static <T> void pushExclusive(AtomicReferenceFieldUpdater<? super T, ListenerList> updater, T in, ListenerList newListener)
+    {
+        IntrusiveStack.pushExclusive(updater, in, newListener, ListenerList::pushHead);
+    }
+
+    static <V, T extends Future<V>> void notify(AtomicReferenceFieldUpdater<? super T, ListenerList> updater, T in)
+    {
+        while (true)
+        {
+            ListenerList<V> listeners = updater.get(in);
+            if (listeners == null || listeners instanceof Notifying)
+                return; // either no listeners, or we are already notifying listeners, so we'll get to the new one when ready
+
+            if (updater.compareAndSet(in, listeners, NOTIFYING))
+            {
+                while (true)
+                {
+                    notifyExclusive(listeners, in);
+                    if (updater.compareAndSet(in, NOTIFYING, null))
+                        return;
+
+                    listeners = updater.getAndSet(in, NOTIFYING);
+                }
+            }
+        }
+    }
+
+    /**
+     * Requires exclusive ownership of {@code head}.
+     *
+     * Task submission occurs in the order the operations were submitted; if all of the executors
+     * are immediate or unspecified this guarantees execution order.
+     * Tasks are submitted to the executor individually, as this simplifies semantics and
+     * we anticipate few listeners in practice, and even fewer with indirect executors.
+     *
+     * @param head must be either a {@link ListenerList} or {@link GenericFutureListener}
+     */
+    static <T> void notifyExclusive(ListenerList<T> head, Future<T> future)
+    {
+        Executor notifyExecutor; {
+            Executor exec = future.notifyExecutor();
+            notifyExecutor = inExecutor(exec) ? null : exec;
+        }
+
+        head = reverse(head);
+        forEach(head, i -> i.notifySelf(notifyExecutor, future));
+    }
+
+    /**
+     * Notify {@code listener} on the invoking thread, handling any exceptions
+     */
+    static <F extends io.netty.util.concurrent.Future<?>> void notifyListener(GenericFutureListener<F> listener, F future)
+    {
+        try
+        {
+            listener.operationComplete(future);
+        }
+        catch (Throwable t)
+        {
+            // TODO: suboptimal package interdependency - move FutureTask etc here?
+            ExecutionFailure.handle(t);
+        }
+    }
+
+    /**
+     * Notify {@code listener} using {@code notifyExecutor} if set, and handling exceptions otherwise
+     */
+    static <F extends io.netty.util.concurrent.Future<?>> void notifyListener(Executor notifyExecutor, GenericFutureListener<F> listener, F future)
+    {
+        if (notifyExecutor == null) notifyListener(listener, future);
+        else safeExecute(notifyExecutor, () -> notifyListener(listener, future));
+    }
+
+    /**
+     * Notify {@code listener} using {@code notifyExecutor} if set, and handling exceptions otherwise
+     */
+    static void notifyListener(@Nullable Executor notifyExecutor, Runnable listener)
+    {
+        safeExecute(notifyExecutor, listener);
+    }
+
+    private static void safeExecute(@Nullable Executor notifyExecutor, Runnable runnable)
+    {
+        if (notifyExecutor == null)
+            notifyExecutor = ImmediateExecutor.INSTANCE;
+        try
+        {
+            notifyExecutor.execute(runnable);
+        }
+        catch (Exception | Error e)
+        {
+            // TODO: suboptimal package interdependency - move FutureTask etc here?
+            ExecutionFailure.handle(e);
+        }
+    }
+
+    /**
+     * Encapsulate a regular listener in a linked list
+     */
+    static class GenericFutureListenerList<V> extends ListenerList<V>
+    {
+        final GenericFutureListener listener;
+
+        GenericFutureListenerList(GenericFutureListener listener)
+        {
+            this.listener = listener;
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+            notifyListener(notifyExecutor, listener, future);
+        }
+    }
+
+    /**
+     * Encapsulates the invocation of a callback with everything needed to submit for execution
+     * without incurring significant further overhead as a list
+     */
+    static class CallbackListener<V> extends ListenerList<V> implements Runnable
+    {
+        final Future<V> future;
+        final FutureCallback<? super V> callback;
+
+        CallbackListener(Future<V> future, FutureCallback<? super V> callback)
+        {
+            this.future = future;
+            this.callback = callback;
+        }
+
+        @Override
+        public void run()
+        {
+            if (future.isSuccess()) callback.onSuccess(future.getNow());
+            else callback.onFailure(future.cause());
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+            notifyListener(notifyExecutor, this);
+        }
+    }
+
+    /**
+     * Encapsulates the invocation of a callback with everything needed to submit for execution
+     * without incurring significant further overhead as a list
+     */
+    static class CallbackBiConsumerListener<V> extends ListenerList<V> implements Runnable
+    {
+        final Future<V> future;
+        final BiConsumer<? super V, Throwable> callback;
+        final Executor executor;
+
+        CallbackBiConsumerListener(Future<V> future, BiConsumer<? super V, Throwable> callback, Executor executor)
+        {
+            this.future = future;
+            this.callback = callback;
+            this.executor = executor;
+        }
+
+        @Override
+        public void run()
+        {
+            if (future.isSuccess()) callback.accept(future.getNow(), null);
+            else callback.accept(null, future.cause());
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+            notifyListener(executor == null ? notifyExecutor : executor, this);
+        }
+    }
+
+    /**
+     * Encapsulates the invocation of a callback with everything needed to submit for execution
+     * without incurring significant further overhead as a list
+     */
+    static class CallbackListenerWithExecutor<V> extends CallbackListener<V>
+    {
+        final Executor executor;
+        CallbackListenerWithExecutor(Future<V> future, FutureCallback<? super V> callback, Executor executor)
+        {
+            super(future, callback);
+            this.executor = executor;
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+            notifyListener(executor, this);
+        }
+    }
+
+    /**
+     * Encapsulates the invocation of a callback with everything needed to submit for execution
+     * without incurring significant further overhead as a list
+     */
+    static class CallbackLambdaListener<V> extends ListenerList<V> implements Runnable
+    {
+        final Future<V> future;
+        final Consumer<? super V> onSuccess;
+        final Consumer<? super Throwable> onFailure;
+        final Executor executor;
+
+        CallbackLambdaListener(Future<V> future, Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure, Executor executor)
+        {
+            this.future = future;
+            this.onSuccess = onSuccess;
+            this.onFailure = onFailure;
+            this.executor = executor;
+        }
+
+        @Override
+        public void run()
+        {
+            if (future.isSuccess()) onSuccess.accept(future.getNow());
+            else onFailure.accept(future.cause());
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future future)
+        {
+            notifyListener(executor == null ? notifyExecutor : executor, this);
+        }
+    }
+
+    /**
+     * Encapsulate a task, executable on completion by {@link Future#notifyExecutor}, in a linked list for storing in
+     * {@link #listeners}, either as a listener on its own (since we need to encapsulate it anyway), or alongside
+     * other listeners in a list
+     */
+    static class RunnableWithNotifyExecutor<V> extends ListenerList<V>
+    {
+        final Runnable task;
+        RunnableWithNotifyExecutor(Runnable task)
+        {
+            this.task = task;
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+            notifyListener(notifyExecutor, task);
+        }
+    }
+
+    /**
+     * Encapsulate a task executable on completion in a linked list for storing in {@link #listeners},
+     * either as a listener on its own (since we need to encapsulate it anyway), or alongside other listeners
+     * in a list
+     */
+    static class RunnableWithExecutor<V> extends ListenerList<V>
+    {
+        final Runnable task;
+        @Nullable final Executor executor;
+        RunnableWithExecutor(Runnable task, @Nullable Executor executor)
+        {
+            this.task = task;
+            this.executor = executor;
+        }
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+            notifyListener(inExecutor(executor) ? null : executor, task);
+        }
+    }
+
+    /**
+     * Dummy that indicates listeners are already being notified after the future was completed,
+     * so we cannot notify them ourselves whilst maintaining the guaranteed invocation order.
+     * The invocation of the list can be left to the thread already notifying listeners.
+     */
+    static class Notifying<V> extends ListenerList<V>
+    {
+        static final Notifying NOTIFYING = new Notifying();
+
+        @Override
+        void notifySelf(Executor notifyExecutor, Future<V> future)
+        {
+        }
+    }
+
+    /**
+     * @return true iff the invoking thread is executing {@code executor}
+     */
+    static boolean inExecutor(Executor executor)
+    {
+        return (executor instanceof EventExecutor && ((EventExecutor) executor).inEventLoop())
+               || (executor instanceof ExecutorPlus && ((ExecutorPlus) executor).inExecutor());
+    }
+}
+
diff --git a/src/java/org/apache/cassandra/utils/concurrent/LoadingMap.java b/src/java/org/apache/cassandra/utils/concurrent/LoadingMap.java
new file mode 100644
index 0000000..399eb0e
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/LoadingMap.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.cliffc.high_scale_lib.NonBlockingHashMap;
+
+/**
+ * An extension of {@link NonBlockingHashMap} where all values are wrapped by {@link Future}.
+ * <p>
+ * The main purpose of this class is to provide the functionality of concurrent hash map which may perform operations like
+ * {@link ConcurrentHashMap#computeIfAbsent(Object, Function)} and {@link ConcurrentHashMap#computeIfPresent(Object, BiFunction)}
+ * with synchronization scope reduced to the single key - that is, when dealing with a single key, unlike
+ * {@link ConcurrentHashMap} we do not lock the whole map for the time the mapping function is running. This may help
+ * to avoid the case when we want to load/unload a value for a key K1 while loading/unloading a value for a key K2. Such
+ * scenario is forbidden in case of {@link ConcurrentHashMap} and leads to a deadlock. On the other hand, {@link NonBlockingHashMap}
+ * does not guarantee at-most-once semantics of running the mapping function for a single key.
+ *
+ * @param <K>
+ * @param <V>
+ */
+public class LoadingMap<K, V>
+{
+    private final NonBlockingHashMap<K, Future<V>> internalMap = new NonBlockingHashMap<>();
+
+    /**
+     * Returns a promise for the given key or null if there is nothing associated with the key.
+     * <p/>if the promise is not fulfilled, it means that there a loading process associated with the key
+     * <p/>if the promise is fulfilled with {@code null} value, it means that there is an unloading process associated with the key
+     * <p/>if the promise is fulfilled with a failure, it means that a loading process associated with the key failed
+     * but the exception was not propagated yet (a failed promise is eventually removed from the map)
+     */
+    @VisibleForTesting
+    Future<V> get(K key)
+    {
+        return internalMap.get(key);
+    }
+
+    /**
+     * Get a value for a given key. Returns a non-null object only if there is a successfully initialized value associated,
+     * with the provided key. It returns {@code null} if there is no value for the key, or the value is being initialized
+     * or removed. It does not throw if the last attempt to initialize the value failed.
+     */
+    public V getIfReady(K key)
+    {
+        Future<V> future = internalMap.get(key);
+        return future != null ? future.getNow() : null;
+    }
+
+    /**
+     * If the value for the given key is missing, execute a load function to obtain a value and put it into the map.
+     * It is guaranteed that the loading and unloading a value for a single key are executed serially. It is also
+     * guaranteed that the load function is executed exactly once to load a value into the map (regardless of the concurrent attempts).
+     * <p/>
+     * In case there is a concurrent attempt to load a value for this key, this attempt waits until the concurrent attempt
+     * is done and returns its result (if succeeded). If the concurrent attempt fails, this attempt is retried. However,
+     * if this attempt fails, it is not retried and the exception is rethrown. In case there is a concurrent attempt
+     * to unload a value for this key, this attempt waits until the concurrent attempt is done and retries loading.
+     * <p/>
+     * When the mapping function returns {@code null}, {@link NullPointerException} is thrown. When the mapping function
+     * throws exception, it is rethrown by this method. In both cases nothing gets added to the map.
+     * <p/>
+     * It is allowed to nest loading for a different key, though nested loading for the same key results in a deadlock.
+     */
+    public V blockingLoadIfAbsent(K key, Supplier<? extends V> loadFunction) throws RuntimeException
+    {
+        while (true)
+        {
+            Future<V> future = internalMap.get(key);
+            boolean attemptedInThisThread = false;
+            if (future == null)
+            {
+                AsyncPromise<V> newEntry = new AsyncPromise<>();
+                future = internalMap.putIfAbsent(key, newEntry);
+                if (future == null)
+                {
+                    // We managed to create an entry for the value. Now initialize it.
+                    attemptedInThisThread = true;
+                    future = newEntry;
+                    try
+                    {
+                        V v = loadFunction.get();
+                        if (v == null)
+                            throw new NullPointerException("The mapping function returned null");
+                        else
+                            newEntry.setSuccess(v);
+                    }
+                    catch (Throwable t)
+                    {
+                        newEntry.setFailure(t);
+                        // Remove future so that construction can be retried later
+                        internalMap.remove(key, future);
+                    }
+                }
+
+                // Else some other thread beat us to it, but we now have the reference to the future which we can wait for.
+            }
+
+            V v = future.awaitUninterruptibly().getNow();
+
+            if (v != null) // implies success
+                return v;
+
+            if (attemptedInThisThread)
+                // Rethrow if the failing attempt was initiated by us (failed and attemptedInThisThread)
+                future.rethrowIfFailed();
+
+            // Retry in other cases, that is, if blockingUnloadIfPresent was called in the meantime
+            // (success and getNow == null) hoping that unloading gets finished soon, and if the concurrent attempt
+            // to load entry fails
+            Thread.yield();
+        }
+    }
+
+    /**
+     * If a value for the given key is present, unload function is run and the value is removed from the map.
+     * Similarly to {@link #blockingLoadIfAbsent(Object, Supplier)} at-most-once semantics is guaranteed for unload
+     * function.
+     * <p/>
+     * When unload function fails, the value is removed from the map anyway and the failure is rethrown.
+     * <p/>
+     * When the key was not found, the method returns {@code null}.
+     *
+     * @throws UnloadExecutionException when the unloading failed to complete - this is checked exception because
+     *                                  the value is removed from the map regardless of the result of unloading;
+     *                                  therefore if the unloading failed, the caller is responsible for handling that
+     */
+    public V blockingUnloadIfPresent(K key, Consumer<? super V> unloadFunction) throws UnloadExecutionException
+    {
+        Promise<V> droppedFuture = new AsyncPromise<V>().setSuccess(null);
+
+        Future<V> existingFuture;
+        do
+        {
+            existingFuture = internalMap.get(key);
+            if (existingFuture == null || existingFuture.isDone() && existingFuture.getNow() == null)
+                return null;
+        } while (!internalMap.replace(key, existingFuture, droppedFuture));
+
+        V v = existingFuture.awaitUninterruptibly().getNow();
+        try
+        {
+            if (v == null)
+                // which means that either the value failed to load or a concurrent attempt to unload already did the work
+                return null;
+
+            unloadFunction.accept(v);
+            return v;
+        }
+        catch (Throwable t)
+        {
+            throw new UnloadExecutionException(v, t);
+        }
+        finally
+        {
+            Future<V> future = internalMap.remove(key);
+            assert future == droppedFuture;
+        }
+    }
+
+    /**
+     * Thrown when unloading a value failed. It encapsulates the value which was failed to unload.
+     */
+    public static class UnloadExecutionException extends ExecutionException
+    {
+        private final Object value;
+
+        public UnloadExecutionException(Object value, Throwable cause)
+        {
+            super(cause);
+            this.value = value;
+        }
+
+        public <T> T value()
+        {
+            return (T) value;
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/NonBlockingRateLimiter.java b/src/java/org/apache/cassandra/utils/concurrent/NonBlockingRateLimiter.java
new file mode 100644
index 0000000..2a98222
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/NonBlockingRateLimiter.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Ticker;
+
+import javax.annotation.concurrent.ThreadSafe;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.lang.Math.toIntExact;
+
+/**
+ * A rate limiter implementation that allows callers to reserve permits that may only be available 
+ * in the future, delegating to them decisions about how to schedule/delay work and whether or not
+ * to block execution to do so.
+ */
+@SuppressWarnings("UnstableApiUsage")
+@ThreadSafe
+public class NonBlockingRateLimiter
+{
+    public static final long NANOS_PER_SECOND = TimeUnit.SECONDS.toNanos(1);
+    static final long DEFAULT_BURST_NANOS = NANOS_PER_SECOND;
+
+    /** a starting time for elapsed time calculations */
+    private final long startedNanos;
+
+    /** nanoseconds from start time corresponding to the next available permit */
+    private final AtomicLong nextAvailable = new AtomicLong();
+    
+    private volatile Ticker ticker;
+    
+    private volatile int permitsPerSecond;
+    
+    /** time in nanoseconds between permits on the timeline */
+    private volatile long intervalNanos;
+
+    /**
+     * To allow the limiter to more closely adhere to the configured rate in the face of
+     * unevenly distributed permits requests, it will allow a number of permits equal to
+     * burstNanos / intervalNanos to be issued in a "burst" before reaching a steady state.
+     * 
+     * Another way to think about this is that it allows us to bring forward the permits
+     * from short periods of inactivity. This is especially useful when the upstream user
+     * of the limiter delays request processing mechanism contains overhead that is longer
+     * than the intervalNanos in duration.
+     */
+    private final long burstNanos;
+
+    public NonBlockingRateLimiter(int permitsPerSecond)
+    {
+        this(permitsPerSecond, DEFAULT_BURST_NANOS, Ticker.systemTicker());
+    }
+
+    @VisibleForTesting
+    public NonBlockingRateLimiter(int permitsPerSecond, long burstNanos, Ticker ticker)
+    {
+        this.startedNanos = ticker.read();
+        this.burstNanos = burstNanos;
+        setRate(permitsPerSecond, ticker);
+    }
+
+    public void setRate(int permitsPerSecond)
+    {
+        setRate(permitsPerSecond, Ticker.systemTicker());
+    }
+
+    @VisibleForTesting
+    public synchronized void setRate(int permitsPerSecond, Ticker ticker)
+    {
+        Preconditions.checkArgument(permitsPerSecond > 0, "permits/second must be positive");
+        Preconditions.checkArgument(permitsPerSecond <= NANOS_PER_SECOND, "permits/second cannot be greater than " + NANOS_PER_SECOND);
+
+        this.ticker = ticker;
+        this.permitsPerSecond = permitsPerSecond;
+        intervalNanos = NANOS_PER_SECOND / permitsPerSecond;
+        nextAvailable.set(nanosElapsed());
+    }
+
+    /**
+     * @return the number of available permits per second
+     */
+    public int getRate()
+    {
+        return permitsPerSecond;
+    }
+
+    /**
+     * Reserves a single permit slot on the timeline which may not yet be available.
+     *
+     * @return time until the reserved permit will be available (or zero if it already is) in the specified units
+     */
+    public long reserveAndGetDelay(TimeUnit delayUnit)
+    {
+        long nowNanos = nanosElapsed();
+
+        for (;;)
+        {
+            long prev = nextAvailable.get();
+            long interval = this.intervalNanos;
+
+            // Push the first available permit slot up to the burst window if necessary.
+            long firstAvailable = Math.max(prev, nowNanos - burstNanos);
+
+            // Advance the configured interval starting from the bounded previous permit slot.
+            if (nextAvailable.compareAndSet(prev, firstAvailable + interval))
+                // If the time now is before the first available slot, return the delay.  
+                return delayUnit.convert(Math.max(0,  firstAvailable - nowNanos), TimeUnit.NANOSECONDS);
+        }
+    }
+
+    /**
+     * Reserves a single permit slot on the timeline, but only if one is available.
+     *
+     * @return true if a permit is available, false if one is not
+     */
+    public boolean tryReserve()
+    {
+        long nowNanos = nanosElapsed();
+    
+        for (;;)
+        {
+            long prev = nextAvailable.get();
+            long interval = this.intervalNanos;
+    
+            // Push the first available permit slot up to the burst window if necessary.
+            long firstAvailable = Math.max(prev, nowNanos - burstNanos);
+            
+            // If we haven't reached the time for the first available permit, we've failed to reserve. 
+            if (nowNanos < firstAvailable)
+                return false;
+    
+            // Advance the configured interval starting from the bounded previous permit slot.
+            // If another thread has already taken the next slot, retry.
+            if (nextAvailable.compareAndSet(prev, firstAvailable + interval))
+                return true;
+        }
+    }
+
+    @VisibleForTesting
+    public long getIntervalNanos()
+    {
+        return intervalNanos;
+    }
+    
+    @VisibleForTesting
+    public long getStartedNanos()
+    {
+        return startedNanos;
+    }
+
+    private long nanosElapsed()
+    {
+        return ticker.read() - startedNanos;
+    }
+
+    public static final NonBlockingRateLimiter NO_OP_LIMITER = new NonBlockingRateLimiter(toIntExact(NANOS_PER_SECOND))
+    {
+        @Override
+        public long reserveAndGetDelay(TimeUnit delayUnit) {
+            return 0;
+        }
+
+        @Override
+        public boolean tryReserve()
+        {
+            return true;
+        }
+    };
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/NotScheduledFuture.java b/src/java/org/apache/cassandra/utils/concurrent/NotScheduledFuture.java
new file mode 100644
index 0000000..a81f9a9
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/NotScheduledFuture.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Delayed;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+public class NotScheduledFuture<T> implements ScheduledFuture<T>
+{
+    public long getDelay(TimeUnit unit)
+    {
+        return 0;
+    }
+
+    public int compareTo(Delayed o)
+    {
+        return 0;
+    }
+
+    public boolean cancel(boolean mayInterruptIfRunning)
+    {
+        return false;
+    }
+
+    public boolean isCancelled()
+    {
+        return false;
+    }
+
+    public boolean isDone()
+    {
+        return false;
+    }
+
+    public T get()
+    {
+        return null;
+    }
+
+    public T get(long timeout, TimeUnit unit)
+    {
+        return null;
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/OpOrder.java b/src/java/org/apache/cassandra/utils/concurrent/OpOrder.java
index 863f038..7f18a0c 100644
--- a/src/java/org/apache/cassandra/utils/concurrent/OpOrder.java
+++ b/src/java/org/apache/cassandra/utils/concurrent/OpOrder.java
@@ -18,7 +18,11 @@
  */
 package org.apache.cassandra.utils.concurrent;
 
+import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
 
 /**
  * <p>A class for providing synchronization between producers and consumers that do not
@@ -113,8 +117,6 @@
      * after which all new operations will start against a new Group that will not be accepted
      * by barrier.isAfter(), and barrier.await() will return only once all operations started prior to the issue
      * have completed.
-     *
-     * @return
      */
     public Barrier newBarrier()
     {
@@ -162,10 +164,11 @@
         private final long id; // monotonically increasing id for compareTo()
         private volatile int running = 0; // number of operations currently running.  < 0 means we're expired, and the count of tasks still running is -(running + 1)
         private volatile boolean isBlocking; // indicates running operations are blocking future barriers
-        private final WaitQueue isBlockingSignal = new WaitQueue(); // signal to wait on to indicate isBlocking is true
-        private final WaitQueue waiting = new WaitQueue(); // signal to wait on for completion
+        private volatile ConcurrentLinkedQueue<WaitQueue.Signal> blocking; // signal to wait on to indicate isBlocking is true
+        private final WaitQueue waiting = newWaitQueue(); // signal to wait on for completion
 
         static final AtomicIntegerFieldUpdater<Group> runningUpdater = AtomicIntegerFieldUpdater.newUpdater(Group.class, "running");
+        static final AtomicReferenceFieldUpdater<Group, ConcurrentLinkedQueue> blockingUpdater = AtomicReferenceFieldUpdater.newUpdater(Group.class, ConcurrentLinkedQueue.class, "blocking");
 
         // constructs first instance only
         private Group()
@@ -318,21 +321,21 @@
             return isBlocking;
         }
 
-        /**
-         * register to be signalled when a barrier waiting on us is, or maybe, blocking general progress,
-         * so we should try more aggressively to progress
-         */
-        public WaitQueue.Signal isBlockingSignal()
+        public void notifyIfBlocking(WaitQueue.Signal signal)
         {
-            return isBlockingSignal.register();
+            if (blocking == null)
+                blockingUpdater.compareAndSet(this, null, new ConcurrentLinkedQueue<>());
+            blocking.add(signal);
+            if (isBlocking() && blocking.remove(signal))
+                signal.signal();
         }
 
-        /**
-         * wrap the provided signal to also be signalled if the operation gets marked blocking
-         */
-        public WaitQueue.Signal isBlockingSignal(WaitQueue.Signal signal)
+        private void markBlocking()
         {
-            return WaitQueue.any(signal, isBlockingSignal());
+            isBlocking = true;
+            ConcurrentLinkedQueue<WaitQueue.Signal> blocking = this.blocking;
+            if (blocking != null)
+                blocking.forEach(WaitQueue.Signal::signal);
         }
 
         public int compareTo(Group that)
@@ -406,21 +409,12 @@
             Group current = orderOnOrBefore;
             while (current != null)
             {
-                current.isBlocking = true;
-                current.isBlockingSignal.signalAll();
+                current.markBlocking();
                 current = current.prev;
             }
         }
 
         /**
-         * Register to be signalled once allPriorOpsAreFinished() or allPriorOpsAreFinishedOrSafe() may return true
-         */
-        public WaitQueue.Signal register()
-        {
-            return orderOnOrBefore.waiting.register();
-        }
-
-        /**
          * wait for all operations started prior to issuing the barrier to complete
          */
         public void await()
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Promise.java b/src/java/org/apache/cassandra/utils/concurrent/Promise.java
new file mode 100644
index 0000000..d9e4623
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/Promise.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+
+import com.google.common.util.concurrent.FutureCallback;
+
+import io.netty.util.concurrent.GenericFutureListener;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A Promise that integrates {@link io.netty.util.concurrent.Promise} with our {@link Future} API
+ * to improve clarity and coherence in the codebase.
+ */
+@Shared(scope = SIMULATION, ancestors = INTERFACES)
+public interface Promise<V> extends io.netty.util.concurrent.Promise<V>, Future<V>
+{
+    public static <V> GenericFutureListener<? extends Future<V>> listener(FutureCallback<V> callback)
+    {
+        return future -> {
+            if (future.isSuccess()) callback.onSuccess(future.getNow());
+            else callback.onFailure(future.cause());
+        };
+    }
+
+    public static <V> GenericFutureListener<? extends Future<V>> listener(ExecutorService executor, FutureCallback<V> callback)
+    {
+        return future -> executor.execute(() -> {
+            if (future.isSuccess()) callback.onSuccess(future.getNow());
+            else callback.onFailure(future.cause());
+        });
+    }
+
+    @Override
+    Promise<V> addCallback(FutureCallback<? super V> callback);
+
+    Promise<V> addCallback(FutureCallback<? super V> callback, Executor executor);
+
+    Promise<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure);
+
+    @Override
+    Promise<V> addListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> var1);
+
+    @Override
+    Promise<V> addListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... var1);
+
+    @Override
+    Promise<V> removeListener(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> var1);
+
+    @Override
+    Promise<V> removeListeners(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>>... var1);
+
+    /**
+     * Complete the promise successfully if not already complete
+     * @throws IllegalStateException if already set
+     */
+    @Override
+    Promise<V> setSuccess(V v) throws IllegalStateException;
+
+    /**
+     * Complete the promise abnormally if not already complete
+     * @throws IllegalStateException if already set
+     */
+    @Override
+    Promise<V> setFailure(Throwable throwable) throws IllegalStateException;
+
+    /**
+     * Prevent a future caller from cancelling this promise
+     * @return true iff this invocation set it to uncancellable, whether or not now uncancellable
+     */
+    boolean setUncancellableExclusive();
+
+    /**
+     * Wait indefinitely for this promise to complete, throwing any interrupt
+     * @throws InterruptedException if interrupted
+     */
+    @Override
+    Promise<V> await() throws InterruptedException;
+
+    /**
+     * Wait indefinitely for this promise to complete
+     */
+    @Override
+    Promise<V> awaitUninterruptibly();
+
+    /**
+     * Wait indefinitely for this promise to complete, throwing any interrupt as an UncheckedInterruptedException
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    @Override
+    Promise<V> awaitThrowUncheckedOnInterrupt();
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    Promise<V> sync() throws InterruptedException;
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    Promise<V> syncUninterruptibly();
+}
+
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Ref.java b/src/java/org/apache/cassandra/utils/concurrent/Ref.java
index 121e71d..90650cf 100644
--- a/src/java/org/apache/cassandra/utils/concurrent/Ref.java
+++ b/src/java/org/apache/cassandra/utils/concurrent/Ref.java
@@ -31,14 +31,13 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 
-import org.apache.cassandra.concurrent.InfiniteLoopExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.concurrent.Shutdownable;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.lifecycle.View;
@@ -48,12 +47,15 @@
 import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.Shared;
+
 import org.cliffc.high_scale_lib.NonBlockingHashMap;
 
 import static java.util.Collections.emptyList;
 
-import static org.apache.cassandra.utils.ExecutorUtils.awaitTermination;
-import static org.apache.cassandra.utils.ExecutorUtils.shutdownNow;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe.UNSAFE;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
 import static org.apache.cassandra.utils.Throwables.maybeFail;
 import static org.apache.cassandra.utils.Throwables.merge;
 
@@ -92,6 +94,13 @@
 {
     static final Logger logger = LoggerFactory.getLogger(Ref.class);
     public static final boolean DEBUG_ENABLED = System.getProperty("cassandra.debugrefcount", "false").equalsIgnoreCase("true");
+    static OnLeak ON_LEAK;
+
+    @Shared(scope = SIMULATION)
+    public interface OnLeak
+    {
+        void onLeak(Object state);
+    }
 
     final State state;
     final T referent;
@@ -182,7 +191,7 @@
 
         private static final AtomicIntegerFieldUpdater<State> releasedUpdater = AtomicIntegerFieldUpdater.newUpdater(State.class, "released");
 
-        public State(final GlobalState globalState, Ref reference, ReferenceQueue<? super Ref> q)
+        State(final GlobalState globalState, Ref reference, ReferenceQueue<? super Ref> q)
         {
             super(reference, q);
             this.globalState = globalState;
@@ -228,6 +237,9 @@
                 logger.error("LEAK DETECTED: a reference ({}) to {} was not released before the reference was garbage collected", id, globalState);
                 if (DEBUG_ENABLED)
                     debug.log(id);
+                OnLeak onLeak = ON_LEAK;
+                if (onLeak != null)
+                    onLeak.onLeak(this);
             }
             else if (DEBUG_ENABLED)
             {
@@ -236,6 +248,12 @@
             if (fail != null)
                 logger.error("Error when closing {}", globalState, fail);
         }
+
+        @Override
+        public String toString()
+        {
+            return globalState.toString();
+        }
     }
 
     static final class Debug
@@ -358,8 +376,8 @@
     static final Set<Class<?>> concurrentIterables = Collections.newSetFromMap(new IdentityHashMap<>());
     private static final Set<GlobalState> globallyExtant = Collections.newSetFromMap(new ConcurrentHashMap<>());
     static final ReferenceQueue<Object> referenceQueue = new ReferenceQueue<>();
-    private static final InfiniteLoopExecutor EXEC = new InfiniteLoopExecutor("Reference-Reaper", Ref::reapOneReference).start();
-    static final ScheduledExecutorService STRONG_LEAK_DETECTOR = !DEBUG_ENABLED ? null : Executors.newScheduledThreadPool(1, new NamedThreadFactory("Strong-Reference-Leak-Detector"));
+    private static final Shutdownable EXEC = executorFactory().infiniteLoop("Reference-Reaper", Ref::reapOneReference, UNSAFE);
+    static final ScheduledExecutorService STRONG_LEAK_DETECTOR = !DEBUG_ENABLED ? null : executorFactory().scheduled("Strong-Reference-Leak-Detector");
     static
     {
         if (DEBUG_ENABLED)
@@ -679,7 +697,10 @@
         {
             final Set<Tidy> candidates = Collections.newSetFromMap(new IdentityHashMap<>());
             for (GlobalState state : globallyExtant)
-                candidates.add(state.tidy);
+            {
+                if (state.tidy != null)
+                    candidates.add(state.tidy);
+            }
             removeExpected(candidates);
             this.candidates.retainAll(candidates);
             if (!this.candidates.isEmpty())
@@ -707,6 +728,11 @@
         }
     }
 
+    public static void setOnLeak(OnLeak onLeak)
+    {
+        ON_LEAK = onLeak;
+    }
+
     @VisibleForTesting
     public static void shutdownReferenceReaper(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
     {
diff --git a/src/java/org/apache/cassandra/utils/concurrent/RunnableFuture.java b/src/java/org/apache/cassandra/utils/concurrent/RunnableFuture.java
new file mode 100644
index 0000000..6ec1daa4
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/RunnableFuture.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface RunnableFuture<V> extends Future<V>, java.util.concurrent.RunnableFuture<V>
+{
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Semaphore.java b/src/java/org/apache/cassandra/utils/concurrent/Semaphore.java
new file mode 100644
index 0000000..01c52c5
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/Semaphore.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.utils.Intercept;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface Semaphore
+{
+    /**
+     * @return the number of permits presently in this semaphore
+     */
+    int permits();
+
+    /**
+     * set the number of permits in this semaphore to zero
+     */
+    int drain();
+
+    /**
+     * Increase the number of available permits and signal any waiters that may be served by the release
+     */
+    void release(int permits);
+
+    /**
+     * Try to take permits, returning immediately
+     * @return true iff permits acquired
+     */
+    boolean tryAcquire(int acquire);
+
+    /**
+     * Try to take permits, waiting up to timeout
+     * @return true iff permits acquired
+     * @throws InterruptedException if interrupted
+     */
+    boolean tryAcquire(int acquire, long time, TimeUnit unit) throws InterruptedException;
+
+    /**
+     * Try to take permits, waiting until the deadline
+     * @return true iff permits acquired
+     * @throws InterruptedException if interrupted
+     */
+    boolean tryAcquireUntil(int acquire, long nanoTimeDeadline) throws InterruptedException;
+
+    /**
+     * Take permits, waiting indefinitely until available
+     * @throws InterruptedException if interrupted
+     */
+    void acquire(int acquire) throws InterruptedException;
+
+    /**
+     * Take permits, waiting indefinitely until available
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    void acquireThrowUncheckedOnInterrupt(int acquire) throws UncheckedInterruptedException;
+
+    /**
+     * Factory method used to capture and redirect instantiations for simulation
+     *
+     * Construct an unfair Semaphore initially holding the specified number of permits
+     */
+    @Intercept
+    public static Semaphore newSemaphore(int permits)
+    {
+        return new Standard(permits, false);
+    }
+
+    /**
+     * Factory method used to capture and redirect instantiations for simulation
+     *
+     * Construct a fair Semaphore initially holding the specified number of permits
+     */
+    @Intercept
+    public static Semaphore newFairSemaphore(int permits)
+    {
+        return new Standard(permits, true);
+    }
+
+    public static class Standard extends java.util.concurrent.Semaphore implements Semaphore
+    {
+        public Standard(int permits)
+        {
+            this(permits, false);
+        }
+
+        public Standard(int permits, boolean fair)
+        {
+            super(permits, fair);
+        }
+
+        /**
+         * {@link Semaphore#drain()}
+         */
+        public int drain()
+        {
+            return drainPermits();
+        }
+
+        /**
+         * Number of permits that are available to be acquired. {@link Semaphore#permits()}
+         */
+        public int permits()
+        {
+            return availablePermits();
+        }
+
+        /**
+         * Number of permits that have been acquired in excess of available. {@link Semaphore#permits()}
+         */
+        public int waiting()
+        {
+            return getQueueLength();
+        }
+
+        /**
+         * {@link Semaphore#tryAcquireUntil(int, long)}
+         */
+        public boolean tryAcquireUntil(int acquire, long nanoTimeDeadline) throws InterruptedException
+        {
+            long wait = nanoTimeDeadline - System.nanoTime();
+            return tryAcquire(acquire, Math.max(0, wait), TimeUnit.NANOSECONDS);
+        }
+
+        @Override
+        public void acquireThrowUncheckedOnInterrupt(int acquire) throws UncheckedInterruptedException
+        {
+            try
+            {
+                acquire(acquire);
+            }
+            catch (InterruptedException e)
+            {
+                throw new UncheckedInterruptedException(e);
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java b/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java
deleted file mode 100644
index 61ec640..0000000
--- a/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils.concurrent;
-
-import java.util.Date;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import java.util.concurrent.locks.Condition;
-
-// fulfils the Condition interface without spurious wakeup problems
-// (or lost notify problems either: that is, even if you call await()
-// _after_ signal(), it will work as desired.)
-public class SimpleCondition implements Condition
-{
-    private static final AtomicReferenceFieldUpdater<SimpleCondition, WaitQueue> waitingUpdater = AtomicReferenceFieldUpdater.newUpdater(SimpleCondition.class, WaitQueue.class, "waiting");
-
-    private volatile WaitQueue waiting;
-    private volatile boolean signaled = false;
-
-    @Override
-    public void await() throws InterruptedException
-    {
-        if (isSignaled())
-            return;
-        if (waiting == null)
-            waitingUpdater.compareAndSet(this, null, new WaitQueue());
-        WaitQueue.Signal s = waiting.register();
-        if (isSignaled())
-            s.cancel();
-        else
-            s.await();
-        assert isSignaled();
-    }
-
-    public boolean await(long time, TimeUnit unit) throws InterruptedException
-    {
-        long start = System.nanoTime();
-        long until = start + unit.toNanos(time);
-        return awaitUntil(until);
-    }
-
-    public boolean awaitUntil(long deadlineNanos) throws InterruptedException
-    {
-        if (isSignaled())
-            return true;
-        if (waiting == null)
-            waitingUpdater.compareAndSet(this, null, new WaitQueue());
-        WaitQueue.Signal s = waiting.register();
-        if (isSignaled())
-        {
-            s.cancel();
-            return true;
-        }
-        return s.awaitUntil(deadlineNanos) || isSignaled();
-    }
-
-    public void signal()
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    public boolean isSignaled()
-    {
-        return signaled;
-    }
-
-    public void signalAll()
-    {
-        signaled = true;
-        if (waiting != null)
-            waiting.signalAll();
-    }
-
-    public void awaitUninterruptibly()
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public long awaitNanos(long nanosTimeout)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public boolean awaitUntil(Date deadline)
-    {
-        throw new UnsupportedOperationException();
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/SyncFuture.java b/src/java/org/apache/cassandra/utils/concurrent/SyncFuture.java
new file mode 100644
index 0000000..2a3598a
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/SyncFuture.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.function.Function;
+
+import javax.annotation.Nullable;
+
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.ListenableFuture; // checkstyle: permit this import
+
+import io.netty.util.concurrent.GenericFutureListener;
+
+import static org.apache.cassandra.utils.concurrent.Awaitable.SyncAwaitable.waitUntil;
+
+/**
+ * Netty's DefaultPromise uses a mutex to coordinate notifiers AND waiters between the eventLoop and the other threads.
+ * Since we register cross-thread listeners, this has the potential to block internode messaging for an unknown
+ * number of threads for an unknown period of time, if we are unlucky with the scheduler (which will certainly
+ * happen, just with some unknown but low periodicity)
+ *
+ * At the same time, we manage some other efficiencies:
+ *  - We save some space when registering listeners, especially if there is only one listener, as we perform no
+ *    extra allocations in this case.
+ *  - We permit efficient initial state declaration, avoiding unnecessary CAS or lock acquisitions when mutating
+ *    a Promise we are ourselves constructing (and can easily add more; only those we use have been added)
+ *
+ * We can also make some guarantees about our behaviour here, although we primarily mirror Netty.
+ * Specifically, we can guarantee that notifiers are always invoked in the order they are added (which may be true
+ * for netty, but was unclear and is not declared).  This is useful for ensuring the correctness of some of our
+ * behaviours in OutboundConnection without having to jump through extra hoops.
+ *
+ * The implementation loosely follows that of Netty's DefaultPromise, with some slight changes; notably that we have
+ * no synchronisation on our listeners, instead using a CoW list that is cleared each time we notify listeners.
+ *
+ * We handle special values slightly differently.  We do not use a special value for null, instead using
+ * a special value to indicate the result has not been set yet.  This means that once isSuccess() holds,
+ * the result must be a correctly typed object (modulo generics pitfalls).
+ * All special values are also instances of FailureHolder, which simplifies a number of the logical conditions.
+ *
+ * @param <V>
+ */
+public class SyncFuture<V> extends AbstractFuture<V>
+{
+    protected SyncFuture()
+    {
+        super();
+    }
+
+    protected SyncFuture(V immediateSuccess)
+    {
+        super(immediateSuccess);
+    }
+
+    protected SyncFuture(Throwable immediateFailure)
+    {
+        super(immediateFailure);
+    }
+
+    protected SyncFuture(FailureHolder initialState)
+    {
+        super(initialState);
+    }
+
+    protected SyncFuture(GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        super(listener);
+    }
+
+    protected SyncFuture(FailureHolder initialState, GenericFutureListener<? extends io.netty.util.concurrent.Future<? super V>> listener)
+    {
+        super(initialState, listener);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transform} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public <T> Future<T> map(Function<? super V, ? extends T> mapper, Executor executor)
+    {
+        return map(new SyncFuture<>(), mapper, executor);
+    }
+
+    /**
+     * Support {@link com.google.common.util.concurrent.Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)} natively
+     *
+     * See {@link #addListener(GenericFutureListener)} for ordering semantics.
+     */
+    @Override
+    public <T> Future<T> flatMap(Function<? super V, ? extends Future<T>> flatMapper, @Nullable Executor executor)
+    {
+        return flatMap(new SyncFuture<>(), flatMapper, executor);
+    }
+
+    /**
+     * Shared implementation of various promise completion methods.
+     * Updates the result if it is possible to do so, returning success/failure.
+     *
+     * If the promise is UNSET the new value will succeed;
+     *          if it is UNCANCELLABLE it will succeed only if the new value is not CANCELLED
+     *          otherwise it will fail, as isDone() is implied
+     *
+     * If the update succeeds, and the new state implies isDone(), any listeners and waiters will be notified
+     */
+    synchronized boolean trySet(Object v)
+    {
+        Object current = result;
+        if (isDone(current) || (current == UNCANCELLABLE && (v == CANCELLED || v == UNCANCELLABLE)))
+            return false;
+
+        resultUpdater.lazySet(this, v);
+        if (v != UNCANCELLABLE)
+        {
+            notifyListeners();
+            notifyAll();
+        }
+        return true;
+    }
+
+    public synchronized boolean awaitUntil(long deadline) throws InterruptedException
+    {
+        if (isDone())
+            return true;
+
+        waitUntil(this, deadline);
+        return isDone();
+    }
+
+    public synchronized Future<V> await() throws InterruptedException
+    {
+        while (!isDone())
+            wait();
+        return this;
+    }
+
+    /**
+     * Logically append {@code newListener} to {@link #listeners}
+     * (at this stage it is a stack, so we actually prepend)
+     */
+    synchronized void appendListener(ListenerList<V> newListener)
+    {
+        ListenerList.pushExclusive(listenersUpdater, this, newListener);
+        if (isDone())
+            notifyListeners();
+    }
+
+    private void notifyListeners()
+    {
+        ListenerList.notifyExclusive(listeners, this);
+        listenersUpdater.lazySet(this, null);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/SyncPromise.java b/src/java/org/apache/cassandra/utils/concurrent/SyncPromise.java
new file mode 100644
index 0000000..fc5fb81
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/SyncPromise.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.Executor;
+import java.util.function.Consumer;
+
+import com.google.common.util.concurrent.FutureCallback;
+
+import io.netty.util.concurrent.Future; // checkstyle: permit this import
+import io.netty.util.concurrent.GenericFutureListener;
+
+/**
+ * Extends {@link SyncFuture} to implement the {@link Promise} interface.
+ */
+public class SyncPromise<V> extends SyncFuture<V> implements Promise<V>
+{
+    public static class WithExecutor<V> extends SyncPromise<V>
+    {
+        final Executor notifyExecutor;
+        protected WithExecutor(Executor notifyExecutor)
+        {
+            this.notifyExecutor = notifyExecutor;
+        }
+
+        protected WithExecutor(Executor notifyExecutor, FailureHolder initialState)
+        {
+            super(initialState);
+            this.notifyExecutor = notifyExecutor;
+        }
+
+        protected WithExecutor(Executor notifyExecutor, GenericFutureListener<? extends Future<? super V>> listener)
+        {
+            super(listener);
+            this.notifyExecutor = notifyExecutor;
+        }
+
+        @Override
+        public Executor notifyExecutor()
+        {
+            return notifyExecutor;
+        }
+    }
+
+    public SyncPromise() {}
+
+    SyncPromise(FailureHolder initialState)
+    {
+        super(initialState);
+    }
+
+    public SyncPromise(GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        super(listener);
+    }
+
+    SyncPromise(FailureHolder initialState, GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        super(initialState, listener);
+    }
+
+    public static <V> SyncPromise<V> withExecutor(Executor executor)
+    {
+        return new SyncPromise.WithExecutor<>(executor);
+    }
+
+    public static <V> SyncPromise<V> uncancellable()
+    {
+        return new SyncPromise<>(UNCANCELLABLE);
+    }
+
+    public static <V> SyncPromise<V> uncancellable(Executor executor)
+    {
+        return new WithExecutor<>(executor, UNCANCELLABLE);
+    }
+
+    public static <V> SyncPromise<V> uncancellable(GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        return new SyncPromise<>(UNCANCELLABLE, listener);
+    }
+
+    /**
+     * Complete the promise successfully if not already complete
+     * @throws IllegalStateException if already set
+     */
+    @Override
+    public Promise<V> setSuccess(V v)
+    {
+        if (!trySuccess(v))
+            throw new IllegalStateException("complete already: " + this);
+        return this;
+    }
+
+    /**
+     * Complete the promise successfully if not already complete
+     * @return true iff completed promise
+     */
+    @Override
+    public boolean trySuccess(V v)
+    {
+        return super.trySuccess(v);
+    }
+
+    /**
+     * Complete the promise abnormally if not already complete
+     * @throws IllegalStateException if already set
+     */
+    @Override
+    public Promise<V> setFailure(Throwable throwable)
+    {
+        if (!tryFailure(throwable))
+            throw new IllegalStateException("complete already: " + this);
+        return this;
+    }
+
+    /**
+     * Complete the promise abnormally if not already complete
+     * @return true iff completed promise
+     */
+    @Override
+    public boolean tryFailure(Throwable throwable)
+    {
+        return super.tryFailure(throwable);
+    }
+
+    /**
+     * Prevent a future caller from cancelling this promise
+     * @return true if the promise is now uncancellable (whether or not we did this)
+     */
+    @Override
+    public boolean setUncancellable()
+    {
+        return super.setUncancellable();
+    }
+
+    /**
+     * Prevent a future caller from cancelling this promise
+     * @return true iff this invocation set it to uncancellable, whether or not now uncancellable
+     */
+    @Override
+    public boolean setUncancellableExclusive()
+    {
+        return super.setUncancellableExclusive();
+    }
+
+    @Override
+    public boolean isUncancellable()
+    {
+        return super.isUncancellable();
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    public Promise<V> sync() throws InterruptedException
+    {
+        super.sync();
+        return this;
+    }
+
+    /**
+     * waits for completion; in case of failure rethrows the original exception without a new wrapping exception
+     * so may cause problems for reporting stack traces
+     */
+    @Override
+    public Promise<V> syncUninterruptibly()
+    {
+        super.syncUninterruptibly();
+        return this;
+    }
+
+    @Override
+    public SyncPromise<V> addListener(GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        super.addListener(listener);
+        return this;
+    }
+
+    @Override
+    public SyncPromise<V> addListeners(GenericFutureListener<? extends Future<? super V>>... listeners)
+    {
+        super.addListeners(listeners);
+        return this;
+    }
+
+    @Override
+    public SyncPromise<V> removeListener(GenericFutureListener<? extends Future<? super V>> listener)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public SyncPromise<V> removeListeners(GenericFutureListener<? extends Future<? super V>>... listeners)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public SyncPromise<V> addCallback(FutureCallback<? super V> callback)
+    {
+        super.addCallback(callback);
+        return this;
+    }
+
+    @Override
+    public SyncPromise<V> addCallback(FutureCallback<? super V> callback, Executor executor)
+    {
+        super.addCallback(callback, executor);
+        return this;
+    }
+
+    @Override
+    public SyncPromise<V> addCallback(Consumer<? super V> onSuccess, Consumer<? super Throwable> onFailure)
+    {
+        super.addCallback(onSuccess, onFailure);
+        return this;
+    }
+
+    /**
+     * Wait for this promise to complete
+     * @throws InterruptedException if interrupted
+     */
+    @Override
+    public SyncPromise<V> await() throws InterruptedException
+    {
+        super.await();
+        return this;
+    }
+
+    /**
+     * Wait uninterruptibly for this promise to complete
+     */
+    @Override
+    public SyncPromise<V> awaitUninterruptibly()
+    {
+        super.awaitUninterruptibly();
+        return this;
+    }
+
+    /**
+     * Wait for this promise to complete, throwing any interrupt as an UncheckedInterruptedException
+     * @throws UncheckedInterruptedException if interrupted
+     */
+    @Override
+    public SyncPromise<V> awaitThrowUncheckedOnInterrupt() throws UncheckedInterruptedException
+    {
+        super.awaitThrowUncheckedOnInterrupt();
+        return this;
+    }
+}
+
diff --git a/src/java/org/apache/cassandra/utils/concurrent/Threads.java b/src/java/org/apache/cassandra/utils/concurrent/Threads.java
new file mode 100644
index 0000000..f02519a
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/Threads.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.Collections;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.function.BinaryOperator;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collector;
+import java.util.stream.Stream;
+
+public class Threads
+{
+    public static class StackTraceCombiner implements Collector<StackTraceElement, StringBuilder, String>, Supplier<StringBuilder>, Function<StringBuilder, String>
+    {
+        final boolean printBriefPackages;
+        final String prefix;
+        final String delimiter;
+        final String suffix;
+
+        public StackTraceCombiner(boolean printBriefPackages, String prefix, String delimiter, String suffix)
+        {
+            this.printBriefPackages = printBriefPackages;
+            this.prefix = prefix;
+            this.delimiter = delimiter;
+            this.suffix = suffix;
+        }
+
+        public Supplier<StringBuilder> supplier()
+        {
+
+            return this;
+        }
+
+        public BiConsumer<StringBuilder, StackTraceElement> accumulator()
+        {
+            return (sb, ste) ->
+            {
+                if (sb.length() > prefix.length())
+                    sb.append(delimiter);
+
+                String className = ste.getClassName();
+
+                if (printBriefPackages)
+                {
+                    int afterPrevDot = 0;
+                    while (true)
+                    {
+                        int dot = className.indexOf('.', afterPrevDot);
+                        if (dot < 0)
+                            break;
+
+                        sb.append(className.charAt(afterPrevDot));
+                        sb.append('.');
+                        afterPrevDot = dot + 1;
+                    }
+                    sb.append(className, afterPrevDot, className.length());
+                }
+                else
+                {
+                    sb.append(className);
+                }
+                sb.append('.');
+                sb.append(ste.getMethodName());
+                sb.append(':');
+                sb.append(ste.getLineNumber());
+            };
+        }
+
+        public BinaryOperator<StringBuilder> combiner()
+        {
+            return (sb1, sb2) -> sb1.append("; ").append(sb2);
+        }
+
+        public Function<StringBuilder, String> finisher()
+        {
+            return this;
+        }
+
+        public Set<Characteristics> characteristics()
+        {
+            return Collections.emptySet();
+        }
+
+        public StringBuilder get()
+        {
+            return new StringBuilder(prefix);
+        }
+
+        public String apply(StringBuilder finish)
+        {
+            finish.append(suffix);
+            return finish.toString();
+        }
+    }
+
+    public static String prettyPrintStackTrace(Thread thread, boolean printBriefPackages, String delimiter)
+    {
+        return prettyPrint(thread.getStackTrace(), printBriefPackages, delimiter);
+    }
+
+    public static String prettyPrintStackTrace(Thread thread, boolean printBriefPackages, String prefix, String delimiter, String suffix)
+    {
+        return prettyPrint(thread.getStackTrace(), printBriefPackages, prefix, delimiter, suffix);
+    }
+
+    public static String prettyPrint(StackTraceElement[] st, boolean printBriefPackages, String delimiter)
+    {
+        return prettyPrint(st, printBriefPackages, "", delimiter, "");
+    }
+
+    public static String prettyPrint(StackTraceElement[] st, boolean printBriefPackages, String prefix, String delimiter, String suffix)
+    {
+        return Stream.of(st).collect(new StackTraceCombiner(printBriefPackages, prefix, delimiter, suffix));
+    }
+
+    public static String prettyPrint(Stream<StackTraceElement> st, boolean printBriefPackages, String prefix, String delimiter, String suffix)
+    {
+        return st.collect(new StackTraceCombiner(printBriefPackages, prefix, delimiter, suffix));
+    }
+
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/UncheckedInterruptedException.java b/src/java/org/apache/cassandra/utils/concurrent/UncheckedInterruptedException.java
new file mode 100644
index 0000000..d7248e8
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/concurrent/UncheckedInterruptedException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Unchecked {@link InterruptedException}, to be thrown in places where an interrupt is unexpected
+ */
+@Shared(scope = SIMULATION)
+public class UncheckedInterruptedException extends RuntimeException
+{
+    public UncheckedInterruptedException()
+    {
+    }
+    public UncheckedInterruptedException(InterruptedException cause)
+    {
+        super(cause);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java b/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java
index 3647623..e9dcdf8 100644
--- a/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java
+++ b/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java
@@ -23,8 +23,15 @@
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import java.util.concurrent.locks.LockSupport;
 import java.util.function.BooleanSupplier;
+import java.util.function.Consumer;
 
-import com.codahale.metrics.Timer;
+import org.apache.cassandra.utils.Intercept;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.Awaitable.AbstractAwaitable;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
 
 /**
  * <p>A relatively easy to use utility for general purpose thread signalling.</p>
@@ -67,131 +74,12 @@
  * to be met that we no longer need.
  * <p>5. This scheme is not fair</p>
  * <p>6. Only the thread that calls register() may call await()</p>
+ *
+ * TODO: this class should not be backed by CLQ (should use an intrusive linked-list with lower overhead)
  */
-public final class WaitQueue
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface WaitQueue
 {
-
-    private static final int CANCELLED = -1;
-    private static final int SIGNALLED = 1;
-    private static final int NOT_SET = 0;
-
-    private static final AtomicIntegerFieldUpdater signalledUpdater = AtomicIntegerFieldUpdater.newUpdater(RegisteredSignal.class, "state");
-
-    // the waiting signals
-    private final ConcurrentLinkedQueue<RegisteredSignal> queue = new ConcurrentLinkedQueue<>();
-
-    /**
-     * The calling thread MUST be the thread that uses the signal
-     * @return                                x
-     */
-    public Signal register()
-    {
-        RegisteredSignal signal = new RegisteredSignal();
-        queue.add(signal);
-        return signal;
-    }
-
-    /**
-     * The calling thread MUST be the thread that uses the signal.
-     * If the Signal is waited on, context.stop() will be called when the wait times out, the Signal is signalled,
-     * or the waiting thread is interrupted.
-     * @return
-     */
-    public Signal register(Timer.Context context)
-    {
-        assert context != null;
-        RegisteredSignal signal = new TimedSignal(context);
-        queue.add(signal);
-        return signal;
-    }
-
-    /**
-     * Signal one waiting thread
-     */
-    public boolean signal()
-    {
-        if (!hasWaiters())
-            return false;
-        while (true)
-        {
-            RegisteredSignal s = queue.poll();
-            if (s == null || s.signal() != null)
-                return s != null;
-        }
-    }
-
-    /**
-     * Signal all waiting threads
-     */
-    public void signalAll()
-    {
-        if (!hasWaiters())
-            return;
-
-        // to avoid a race where the condition is not met and the woken thread managed to wait on the queue before
-        // we finish signalling it all, we pick a random thread we have woken-up and hold onto it, so that if we encounter
-        // it again we know we're looping. We reselect a random thread periodically, progressively less often.
-        // the "correct" solution to this problem is to use a queue that permits snapshot iteration, but this solution is sufficient
-        int i = 0, s = 5;
-        Thread randomThread = null;
-        Iterator<RegisteredSignal> iter = queue.iterator();
-        while (iter.hasNext())
-        {
-            RegisteredSignal signal = iter.next();
-            Thread signalled = signal.signal();
-
-            if (signalled != null)
-            {
-                if (signalled == randomThread)
-                    break;
-
-                if (++i == s)
-                {
-                    randomThread = signalled;
-                    s <<= 1;
-                }
-            }
-
-            iter.remove();
-        }
-    }
-
-    private void cleanUpCancelled()
-    {
-        // TODO: attempt to remove the cancelled from the beginning only (need atomic cas of head)
-        Iterator<RegisteredSignal> iter = queue.iterator();
-        while (iter.hasNext())
-        {
-            RegisteredSignal s = iter.next();
-            if (s.isCancelled())
-                iter.remove();
-        }
-    }
-
-    public boolean hasWaiters()
-    {
-        return !queue.isEmpty();
-    }
-
-    /**
-     * Return how many threads are waiting
-     * @return
-     */
-    public int getWaiting()
-    {
-        if (!hasWaiters())
-            return 0;
-        Iterator<RegisteredSignal> iter = queue.iterator();
-        int count = 0;
-        while (iter.hasNext())
-        {
-            Signal next = iter.next();
-            if (!next.isCancelled())
-                count++;
-        }
-        return count;
-    }
-
     /**
      * A Signal is a one-time-use mechanism for a thread to wait for notification that some condition
      * state has transitioned that it may be interested in (and hence should check if it is).
@@ -207,14 +95,8 @@
      * thread that registered itself with WaitQueue(s) to obtain the underlying RegisteredSignal(s);
      * only the owning thread should use a Signal.
      */
-    public static interface Signal
+    public static interface Signal extends Condition
     {
-
-        /**
-         * @return true if signalled; once true, must be discarded by the owning thread.
-         */
-        public boolean isSignalled();
-
         /**
          * @return true if cancelled; once cancelled, must be discarded by the owning thread.
          */
@@ -238,324 +120,318 @@
          * and if signalled propagates the signal to another waiting thread
          */
         public abstract void cancel();
-
-        /**
-         * Wait, without throwing InterruptedException, until signalled. On exit isSignalled() must be true.
-         * If the thread is interrupted in the meantime, the interrupted flag will be set.
-         */
-        public void awaitUninterruptibly();
-
-        /**
-         * Wait until signalled, or throw an InterruptedException if interrupted before this happens.
-         * On normal exit isSignalled() must be true; however if InterruptedException is thrown isCancelled()
-         * will be true.
-         * @throws InterruptedException
-         */
-        public void await() throws InterruptedException;
-
-        /**
-         * Wait until signalled, or the provided time is reached, or the thread is interrupted. If signalled,
-         * isSignalled() will be true on exit, and the method will return true; if timedout, the method will return
-         * false and isCancelled() will be true; if interrupted an InterruptedException will be thrown and isCancelled()
-         * will be true.
-         * @param nanos System.nanoTime() to wait until
-         * @return true if signalled, false if timed out
-         * @throws InterruptedException
-         */
-        public boolean awaitUntil(long nanos) throws InterruptedException;
-
-        /**
-         * Wait until signalled, or the provided time is reached, or the thread is interrupted. If signalled,
-         * isSignalled() will be true on exit, and the method will return true; if timedout, the method will return
-         * false and isCancelled() will be true
-         * @param nanos System.nanoTime() to wait until
-         * @return true if signalled, false if timed out
-         */
-        public boolean awaitUntilUninterruptibly(long nanos);
     }
 
     /**
-     * An abstract signal implementation
+     * The calling thread MUST be the thread that uses the signal
      */
-    public static abstract class AbstractSignal implements Signal
+    public Signal register();
+
+    /**
+     * The calling thread MUST be the thread that uses the signal.
+     * If the Signal is waited on, context.stop() will be called when the wait times out, the Signal is signalled,
+     * or the waiting thread is interrupted.
+     */
+    public <V> Signal register(V supplyOnDone, Consumer<V> receiveOnDone);
+
+    /**
+     * Signal one waiting thread
+     */
+    public boolean signal();
+
+    /**
+     * Signal all waiting threads
+     */
+    public void signalAll();
+
+    /** getWaiting() > 0 */
+    public boolean hasWaiters();
+
+    /** Return how many threads are waiting */
+    public int getWaiting();
+
+    /**
+     * Factory method used to capture and redirect instantiations for simulation
+     */
+    @Intercept
+    public static WaitQueue newWaitQueue()
     {
-        public void awaitUninterruptibly()
+        return new Standard();
+    }
+
+    class Standard implements WaitQueue
+    {
+        private static final int CANCELLED = -1;
+        private static final int SIGNALLED = 1;
+        private static final int NOT_SET = 0;
+
+        private static final AtomicIntegerFieldUpdater<RegisteredSignal> signalledUpdater = AtomicIntegerFieldUpdater.newUpdater(RegisteredSignal.class, "state");
+
+        // the waiting signals
+        private final ConcurrentLinkedQueue<RegisteredSignal> queue = new ConcurrentLinkedQueue<>();
+
+        protected Standard() {}
+
+        /**
+         * The calling thread MUST be the thread that uses the signal
+         * @return                                x
+         */
+        public Signal register()
         {
-            boolean interrupted = false;
-            while (!isSignalled())
+            RegisteredSignal signal = new RegisteredSignal();
+            queue.add(signal);
+            return signal;
+        }
+
+        /**
+         * The calling thread MUST be the thread that uses the signal.
+         * If the Signal is waited on, context.stop() will be called when the wait times out, the Signal is signalled,
+         * or the waiting thread is interrupted.
+         */
+        public <V> Signal register(V supplyOnDone, Consumer<V> receiveOnDone)
+        {
+            RegisteredSignal signal = new SignalWithListener<>(supplyOnDone, receiveOnDone);
+            queue.add(signal);
+            return signal;
+        }
+
+        /**
+         * Signal one waiting thread
+         */
+        public boolean signal()
+        {
+            while (true)
+            {
+                RegisteredSignal s = queue.poll();
+                if (s == null || s.doSignal() != null)
+                    return s != null;
+            }
+        }
+
+        /**
+         * Signal all waiting threads
+         */
+        public void signalAll()
+        {
+            if (!hasWaiters())
+                return;
+
+            // to avoid a race where the condition is not met and the woken thread managed to wait on the queue before
+            // we finish signalling it all, we pick a random thread we have woken-up and hold onto it, so that if we encounter
+            // it again we know we're looping. We reselect a random thread periodically, progressively less often.
+            // the "correct" solution to this problem is to use a queue that permits snapshot iteration, but this solution is sufficient
+            // TODO: this is only necessary because we use CLQ - which is only for historical any-NIH reasons
+            int i = 0, s = 5;
+            Thread randomThread = null;
+            Iterator<RegisteredSignal> iter = queue.iterator();
+            while (iter.hasNext())
+            {
+                RegisteredSignal signal = iter.next();
+                Thread signalled = signal.doSignal();
+
+                if (signalled != null)
+                {
+                    if (signalled == randomThread)
+                        break;
+
+                    if (++i == s)
+                    {
+                        randomThread = signalled;
+                        s <<= 1;
+                    }
+                }
+
+                iter.remove();
+            }
+        }
+
+        private void cleanUpCancelled()
+        {
+            // TODO: attempt to remove the cancelled from the beginning only (need atomic cas of head)
+            queue.removeIf(RegisteredSignal::isCancelled);
+        }
+
+        public boolean hasWaiters()
+        {
+            return !queue.isEmpty();
+        }
+
+        /**
+         * @return how many threads are waiting
+         */
+        public int getWaiting()
+        {
+            if (!hasWaiters())
+                return 0;
+            Iterator<RegisteredSignal> iter = queue.iterator();
+            int count = 0;
+            while (iter.hasNext())
+            {
+                Signal next = iter.next();
+                if (!next.isCancelled())
+                    count++;
+            }
+            return count;
+        }
+
+        /**
+         * An abstract signal implementation
+         *
+         * TODO: use intrusive linked list
+         */
+        public static abstract class AbstractSignal extends AbstractAwaitable implements Signal
+        {
+            public Signal await() throws InterruptedException
+            {
+                while (!isSignalled())
+                {
+                    checkInterrupted();
+                    LockSupport.park();
+                }
+                checkAndClear();
+                return this;
+            }
+
+            public boolean awaitUntil(long nanoTimeDeadline) throws InterruptedException
+            {
+                long now;
+                while (nanoTimeDeadline > (now = nanoTime()) && !isSignalled())
+                {
+                    checkInterrupted();
+                    long delta = nanoTimeDeadline - now;
+                    LockSupport.parkNanos(delta);
+                }
+                return checkAndClear();
+            }
+
+            private void checkInterrupted() throws InterruptedException
             {
                 if (Thread.interrupted())
-                    interrupted = true;
-                LockSupport.park();
+                {
+                    cancel();
+                    throw new InterruptedException();
+                }
             }
-            if (interrupted)
-                Thread.currentThread().interrupt();
-            checkAndClear();
-        }
-
-        public void await() throws InterruptedException
-        {
-            while (!isSignalled())
-            {
-                checkInterrupted();
-                LockSupport.park();
-            }
-            checkAndClear();
-        }
-
-        public boolean awaitUntil(long until) throws InterruptedException
-        {
-            long now;
-            while (until > (now = System.nanoTime()) && !isSignalled())
-            {
-                checkInterrupted();
-                long delta = until - now;
-                LockSupport.parkNanos(delta);
-            }
-            return checkAndClear();
-        }
-
-        public boolean awaitUntilUninterruptibly(long until)
-        {
-            long now;
-            while (until > (now = System.nanoTime()) && !isSignalled())
-            {
-                long delta = until - now;
-                LockSupport.parkNanos(delta);
-            }
-            return checkAndClear();
-        }
-
-        private void checkInterrupted() throws InterruptedException
-        {
-            if (Thread.interrupted())
-            {
-                cancel();
-                throw new InterruptedException();
-            }
-        }
-    }
-
-    /**
-     * A signal registered with this WaitQueue
-     */
-    private class RegisteredSignal extends AbstractSignal
-    {
-        private volatile Thread thread = Thread.currentThread();
-        volatile int state;
-
-        public boolean isSignalled()
-        {
-            return state == SIGNALLED;
-        }
-
-        public boolean isCancelled()
-        {
-            return state == CANCELLED;
-        }
-
-        public boolean isSet()
-        {
-            return state != NOT_SET;
-        }
-
-        private Thread signal()
-        {
-            if (!isSet() && signalledUpdater.compareAndSet(this, NOT_SET, SIGNALLED))
-            {
-                Thread thread = this.thread;
-                LockSupport.unpark(thread);
-                this.thread = null;
-                return thread;
-            }
-            return null;
-        }
-
-        public boolean checkAndClear()
-        {
-            if (!isSet() && signalledUpdater.compareAndSet(this, NOT_SET, CANCELLED))
-            {
-                thread = null;
-                cleanUpCancelled();
-                return false;
-            }
-            // must now be signalled assuming correct API usage
-            return true;
         }
 
         /**
-         * Should only be called by the registered thread. Indicates the signal can be retired,
-         * and if signalled propagates the signal to another waiting thread
+         * A signal registered with this WaitQueue
          */
-        public void cancel()
+        private class RegisteredSignal extends AbstractSignal
         {
-            if (isCancelled())
-                return;
-            if (!signalledUpdater.compareAndSet(this, NOT_SET, CANCELLED))
+            private volatile Thread thread = Thread.currentThread();
+            volatile int state;
+
+            public boolean isSignalled()
             {
-                // must already be signalled - switch to cancelled and
-                state = CANCELLED;
-                // propagate the signal
-                WaitQueue.this.signal();
+                return state == SIGNALLED;
             }
-            thread = null;
-            cleanUpCancelled();
-        }
-    }
 
-    /**
-     * A RegisteredSignal that stores a TimerContext, and stops the timer when either cancelled or
-     * finished waiting. i.e. if the timer is started when the signal is registered it tracks the
-     * time in between registering and invalidating the signal.
-     */
-    private final class TimedSignal extends RegisteredSignal
-    {
-        private final Timer.Context context;
-
-        private TimedSignal(Timer.Context context)
-        {
-            this.context = context;
-        }
-
-        @Override
-        public boolean checkAndClear()
-        {
-            context.stop();
-            return super.checkAndClear();
-        }
-
-        @Override
-        public void cancel()
-        {
-            if (!isCancelled())
+            public boolean isCancelled()
             {
-                context.stop();
-                super.cancel();
+                return state == CANCELLED;
+            }
+
+            public boolean isSet()
+            {
+                return state != NOT_SET;
+            }
+
+            private Thread doSignal()
+            {
+                if (!isSet() && signalledUpdater.compareAndSet(this, NOT_SET, SIGNALLED))
+                {
+                    Thread thread = this.thread;
+                    LockSupport.unpark(thread);
+                    this.thread = null;
+                    return thread;
+                }
+                return null;
+            }
+
+            public void signal()
+            {
+                doSignal();
+            }
+
+            public boolean checkAndClear()
+            {
+                if (!isSet() && signalledUpdater.compareAndSet(this, NOT_SET, CANCELLED))
+                {
+                    thread = null;
+                    cleanUpCancelled();
+                    return false;
+                }
+                // must now be signalled assuming correct API usage
+                return true;
+            }
+
+            /**
+             * Should only be called by the registered thread. Indicates the signal can be retired,
+             * and if signalled propagates the signal to another waiting thread
+             */
+            public void cancel()
+            {
+                if (isCancelled())
+                    return;
+                if (!signalledUpdater.compareAndSet(this, NOT_SET, CANCELLED))
+                {
+                    // must already be signalled - switch to cancelled and
+                    state = CANCELLED;
+                    // propagate the signal
+                    WaitQueue.Standard.this.signal();
+                }
+                thread = null;
+                cleanUpCancelled();
             }
         }
-    }
 
-    /**
-     * An abstract signal wrapping multiple delegate signals
-     */
-    private abstract static class MultiSignal extends AbstractSignal
-    {
-        final Signal[] signals;
-        protected MultiSignal(Signal[] signals)
+        /**
+         * A RegisteredSignal that stores a TimerContext, and stops the timer when either cancelled or
+         * finished waiting. i.e. if the timer is started when the signal is registered it tracks the
+         * time in between registering and invalidating the signal.
+         */
+        private final class SignalWithListener<V> extends RegisteredSignal
         {
-            this.signals = signals;
+            private final V supplyOnDone;
+            private final Consumer<V> receiveOnDone;
+
+            private SignalWithListener(V supplyOnDone, Consumer<V> receiveOnDone)
+            {
+                this.receiveOnDone = receiveOnDone;
+                this.supplyOnDone = supplyOnDone;
+            }
+
+
+            @Override
+            public boolean checkAndClear()
+            {
+                receiveOnDone.accept(supplyOnDone);
+                return super.checkAndClear();
+            }
+
+            @Override
+            public void cancel()
+            {
+                if (!isCancelled())
+                {
+                    receiveOnDone.accept(supplyOnDone);
+                    super.cancel();
+                }
+            }
         }
-
-        public boolean isCancelled()
-        {
-            for (Signal signal : signals)
-                if (!signal.isCancelled())
-                    return false;
-            return true;
-        }
-
-        public boolean checkAndClear()
-        {
-            for (Signal signal : signals)
-                signal.checkAndClear();
-            return isSignalled();
-        }
-
-        public void cancel()
-        {
-            for (Signal signal : signals)
-                signal.cancel();
-        }
-    }
-
-    /**
-     * A Signal that wraps multiple Signals and returns when any single one of them would have returned
-     */
-    private static class AnySignal extends MultiSignal
-    {
-        protected AnySignal(Signal ... signals)
-        {
-            super(signals);
-        }
-
-        public boolean isSignalled()
-        {
-            for (Signal signal : signals)
-                if (signal.isSignalled())
-                    return true;
-            return false;
-        }
-
-        public boolean isSet()
-        {
-            for (Signal signal : signals)
-                if (signal.isSet())
-                    return true;
-            return false;
-        }
-    }
-
-    /**
-     * A Signal that wraps multiple Signals and returns when all of them would have finished returning
-     */
-    private static class AllSignal extends MultiSignal
-    {
-        protected AllSignal(Signal ... signals)
-        {
-            super(signals);
-        }
-
-        public boolean isSignalled()
-        {
-            for (Signal signal : signals)
-                if (!signal.isSignalled())
-                    return false;
-            return true;
-        }
-
-        public boolean isSet()
-        {
-            for (Signal signal : signals)
-                if (!signal.isSet())
-                    return false;
-            return true;
-        }
-    }
-
-    /**
-     * @param signals
-     * @return a signal that returns only when any of the provided signals would have returned
-     */
-    public static Signal any(Signal ... signals)
-    {
-        return new AnySignal(signals);
-    }
-
-    /**
-     * @param signals
-     * @return a signal that returns only when all provided signals would have returned
-     */
-    public static Signal all(Signal ... signals)
-    {
-        return new AllSignal(signals);
     }
 
     /**
      * Loops waiting on the supplied condition and WaitQueue and will not return until the condition is true
      */
-    public static void waitOnCondition(BooleanSupplier condition, WaitQueue queue)
+    public static void waitOnCondition(BooleanSupplier condition, WaitQueue queue) throws InterruptedException
     {
         while (!condition.getAsBoolean())
         {
             Signal s = queue.register();
-            if (!condition.getAsBoolean())
-            {
-                s.awaitUninterruptibly();
-            }
-            else
-            {
-                s.cancel();
-            }
+            if (!condition.getAsBoolean()) s.await();
+            else s.cancel();
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/utils/concurrent/WeightedQueue.java b/src/java/org/apache/cassandra/utils/concurrent/WeightedQueue.java
index 3a6505e..b1def45 100644
--- a/src/java/org/apache/cassandra/utils/concurrent/WeightedQueue.java
+++ b/src/java/org/apache/cassandra/utils/concurrent/WeightedQueue.java
@@ -22,14 +22,15 @@
 import java.util.Iterator;
 import java.util.Objects;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
+import static org.apache.cassandra.utils.concurrent.Semaphore.newSemaphore;
+
 /**
  * Weighted queue is a wrapper around any blocking queue that turns it into a blocking weighted queue. The queue
  * will weigh each element being added and removed. Adding to the queue is blocked if adding would violate
@@ -262,7 +263,7 @@
 
     public WeightedQueue(int maxWeight)
     {
-        this(maxWeight, new LinkedBlockingQueue<T>(), NATURAL_WEIGHER);
+        this(maxWeight, newBlockingQueue(), NATURAL_WEIGHER);
     }
 
     public WeightedQueue(int maxWeight, BlockingQueue<T> queue, Weigher<T> weigher)
@@ -273,7 +274,7 @@
         this.maxWeight = maxWeight;
         this.queue = queue;
         this.weigher = weigher;
-        availableWeight = new Semaphore(maxWeight);
+        availableWeight = newSemaphore(maxWeight);
     }
 
     boolean acquireWeight(T weighable, long timeout, TimeUnit unit) throws InterruptedException
diff --git a/src/java/org/apache/cassandra/utils/memory/BufferPool.java b/src/java/org/apache/cassandra/utils/memory/BufferPool.java
index a92f637..b9009bc 100644
--- a/src/java/org/apache/cassandra/utils/memory/BufferPool.java
+++ b/src/java/org/apache/cassandra/utils/memory/BufferPool.java
@@ -38,7 +38,7 @@
 import com.google.common.annotations.VisibleForTesting;
 
 import net.nicoulaj.compilecommand.annotations.Inline;
-import org.apache.cassandra.concurrent.InfiniteLoopExecutor;
+import org.apache.cassandra.concurrent.Shutdownable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,11 +48,15 @@
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.metrics.BufferPoolMetrics;
 import org.apache.cassandra.utils.NoSpamLogger;
+import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.concurrent.Ref;
 
 import static com.google.common.collect.ImmutableList.of;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe.UNSAFE;
 import static org.apache.cassandra.utils.ExecutorUtils.*;
 import static org.apache.cassandra.utils.FBUtilities.prettyPrintMemory;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
 import static org.apache.cassandra.utils.memory.MemoryUtil.isExactlyDirect;
 
 /**
@@ -129,6 +133,7 @@
     private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocateDirect(0);
 
     private volatile Debug debug = Debug.NO_OP;
+    private volatile DebugLeaks debugLeaks = DebugLeaks.NO_OP;
 
     protected final String name;
     protected final BufferPoolMetrics metrics;
@@ -174,7 +179,7 @@
     private final Set<LocalPoolRef> localPoolReferences = Collections.newSetFromMap(new ConcurrentHashMap<>());
 
     private final ReferenceQueue<Object> localPoolRefQueue = new ReferenceQueue<>();
-    private final InfiniteLoopExecutor localPoolCleaner;
+    private final Shutdownable localPoolCleaner;
 
     public BufferPool(String name, long memoryUsageThreshold, boolean recyclePartially)
     {
@@ -184,7 +189,7 @@
         this.globalPool = new GlobalPool();
         this.metrics = new BufferPoolMetrics(name, this);
         this.recyclePartially = recyclePartially;
-        this.localPoolCleaner = new InfiniteLoopExecutor("LocalPool-Cleaner-" + name, this::cleanupOneReference).start();
+        this.localPoolCleaner = executorFactory().infiniteLoop("LocalPool-Cleaner-" + name, this::cleanupOneReference, UNSAFE);
     }
 
     /**
@@ -331,10 +336,19 @@
         void recyclePartial(Chunk chunk);
     }
 
-    public void debug(Debug setDebug)
+    @Shared(scope = SIMULATION)
+    public interface DebugLeaks
     {
-        assert setDebug != null;
-        this.debug = setDebug;
+        public static DebugLeaks NO_OP = () -> {};
+        void leak();
+    }
+
+    public void debug(Debug newDebug, DebugLeaks newDebugLeaks)
+    {
+        if (newDebug != null)
+            this.debug = newDebug;
+        if (newDebugLeaks != null)
+            this.debugLeaks = newDebugLeaks;
     }
 
     interface Recycler
@@ -1069,6 +1083,7 @@
         Object obj = localPoolRefQueue.remove(100);
         if (obj instanceof LocalPoolRef)
         {
+            debugLeaks.leak();
             ((LocalPoolRef) obj).release();
             localPoolReferences.remove(obj);
         }
@@ -1578,8 +1593,7 @@
     @VisibleForTesting
     public void shutdownLocalCleaner(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException
     {
-        shutdownNow(of(localPoolCleaner));
-        awaitTermination(timeout, unit, of(localPoolCleaner));
+        shutdownAndWait(timeout, unit, of(localPoolCleaner));
     }
 
     @VisibleForTesting
diff --git a/src/java/org/apache/cassandra/utils/memory/BufferPools.java b/src/java/org/apache/cassandra/utils/memory/BufferPools.java
index 736e1cd..5d80881 100644
--- a/src/java/org/apache/cassandra/utils/memory/BufferPools.java
+++ b/src/java/org/apache/cassandra/utils/memory/BufferPools.java
@@ -35,13 +35,13 @@
     /**
      * Used by chunk cache to store decompressed data and buffers may be held by chunk cache for arbitrary period.
      */
-    private static final long FILE_MEMORY_USAGE_THRESHOLD = DatabaseDescriptor.getFileCacheSizeInMB() * 1024L * 1024L;
+    private static final long FILE_MEMORY_USAGE_THRESHOLD = DatabaseDescriptor.getFileCacheSizeInMiB() * 1024L * 1024L;
     private static final BufferPool CHUNK_CACHE_POOL = new BufferPool("chunk-cache", FILE_MEMORY_USAGE_THRESHOLD, true);
 
     /**
      * Used by client-server or inter-node requests, buffers should be released immediately after use.
      */
-    private static final long NETWORKING_MEMORY_USAGE_THRESHOLD = DatabaseDescriptor.getNetworkingCacheSizeInMB() * 1024L * 1024L;
+    private static final long NETWORKING_MEMORY_USAGE_THRESHOLD = DatabaseDescriptor.getNetworkingCacheSizeInMiB() * 1024L * 1024L;
     private static final BufferPool NETWORKING_POOL = new BufferPool("networking", NETWORKING_MEMORY_USAGE_THRESHOLD, false);
 
     static
diff --git a/src/java/org/apache/cassandra/utils/memory/HeapPool.java b/src/java/org/apache/cassandra/utils/memory/HeapPool.java
index b625b60..532d11f 100644
--- a/src/java/org/apache/cassandra/utils/memory/HeapPool.java
+++ b/src/java/org/apache/cassandra/utils/memory/HeapPool.java
@@ -20,18 +20,24 @@
 
 import java.nio.ByteBuffer;
 
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 
-import com.google.common.annotations.VisibleForTesting;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
 
 public class HeapPool extends MemtablePool
 {
+    private static final EnsureOnHeap ENSURE_NOOP = new EnsureOnHeap.NoOp();
+
     public HeapPool(long maxOnHeapMemory, float cleanupThreshold, MemtableCleaner cleaner)
     {
         super(maxOnHeapMemory, 0, cleanupThreshold, cleaner);
     }
 
-    public MemtableAllocator newAllocator()
+    public MemtableAllocator newAllocator(String table)
     {
         return new Allocator(this);
     }
@@ -39,8 +45,6 @@
     @VisibleForTesting
     public static class Allocator extends MemtableBufferAllocator
     {
-        private static final EnsureOnHeap ENSURE_NOOP = new EnsureOnHeap.NoOp();
-
         @VisibleForTesting
         public Allocator(HeapPool pool)
         {
@@ -63,4 +67,80 @@
             return allocator(opGroup);
         }
     }
+
+    public static class Logged extends MemtablePool
+    {
+        @Shared(scope = SIMULATION)
+        public interface Listener
+        {
+            public void accept(long size, String table);
+        }
+
+        private static Listener onAllocated = (i, table) -> {};
+
+        class LoggedSubPool extends SubPool
+        {
+            public LoggedSubPool(long limit, float cleanThreshold)
+            {
+                super(limit, cleanThreshold);
+            }
+
+            public MemtableAllocator.SubAllocator newAllocator(String table)
+            {
+                return new MemtableAllocator.SubAllocator(this)
+                {
+                    public void allocate(long size, OpOrder.Group opGroup)
+                    {
+                        onAllocated.accept(size, table);
+                        super.allocate(size, opGroup);
+                    }
+                };
+            }
+        }
+
+        public Logged(long maxOnHeapMemory, float cleanupThreshold, MemtableCleaner cleaner)
+        {
+            super(maxOnHeapMemory, 0, cleanupThreshold, cleaner);
+        }
+
+        public MemtableAllocator newAllocator(String table)
+        {
+            return new Allocator(this, table);
+        }
+
+        private static class Allocator extends MemtableBufferAllocator
+        {
+            Allocator(Logged pool, String table)
+            {
+                super(((LoggedSubPool) pool.onHeap).newAllocator(table), ((LoggedSubPool) pool.offHeap).newAllocator(table));
+            }
+
+            public ByteBuffer allocate(int size, OpOrder.Group opGroup)
+            {
+                super.onHeap().allocate(size, opGroup);
+                return ByteBuffer.allocate(size);
+            }
+
+            @Override
+            public EnsureOnHeap ensureOnHeap()
+            {
+                return ENSURE_NOOP;
+            }
+
+            public Cloner cloner(OpOrder.Group opGroup)
+            {
+                return allocator(opGroup);
+            }
+        }
+
+        SubPool getSubPool(long limit, float cleanThreshold)
+        {
+            return new LoggedSubPool(limit, cleanThreshold);
+        }
+
+        public static void setListener(Listener listener)
+        {
+            onAllocated = listener;
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/utils/memory/MemtableAllocator.java b/src/java/org/apache/cassandra/utils/memory/MemtableAllocator.java
index 42ae904..139d4a0 100644
--- a/src/java/org/apache/cassandra/utils/memory/MemtableAllocator.java
+++ b/src/java/org/apache/cassandra/utils/memory/MemtableAllocator.java
@@ -23,6 +23,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.codahale.metrics.Timer;
+
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
@@ -100,7 +102,7 @@
     }
 
     /** Mark the BB as unused, permitting it to be reclaimed */
-    public static final class SubAllocator
+    public static class SubAllocator
     {
         // the tracker we are owning memory from
         private final MemtablePool.SubPool parent;
@@ -180,19 +182,17 @@
                     allocated(size);
                     return;
                 }
-                WaitQueue.Signal signal = opGroup.isBlockingSignal(parent.hasRoom().register(parent.blockedTimerContext()));
+                WaitQueue.Signal signal = parent.hasRoom().register(parent.blockedTimerContext(), Timer.Context::stop);
+                opGroup.notifyIfBlocking(signal);
                 boolean allocated = parent.tryAllocate(size);
-                if (allocated || opGroup.isBlocking())
+                if (allocated)
                 {
                     signal.cancel();
-                    if (allocated) // if we allocated, take ownership
-                        acquired(size);
-                    else // otherwise we're blocking so we're permitted to overshoot our constraints, to just allocate without blocking
-                        allocated(size);
+                    acquired(size);
                     return;
                 }
                 else
-                    signal.awaitUninterruptibly();
+                    signal.awaitThrowUncheckedOnInterrupt();
             }
         }
 
diff --git a/src/java/org/apache/cassandra/utils/memory/MemtableCleaner.java b/src/java/org/apache/cassandra/utils/memory/MemtableCleaner.java
index d2cb9c5..bce0421 100644
--- a/src/java/org/apache/cassandra/utils/memory/MemtableCleaner.java
+++ b/src/java/org/apache/cassandra/utils/memory/MemtableCleaner.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.utils.memory;
 
-import java.util.concurrent.CompletableFuture;
+import org.apache.cassandra.utils.concurrent.Future;
 
 /**
  * The cleaner is used by {@link MemtableCleanerThread} in order to reclaim space from memtables, normally
@@ -36,5 +36,5 @@
      * The future will complete with an error if the cleaning operation encounters an error.
      *
      */
-    CompletableFuture<Boolean> clean();
+    Future<Boolean> clean();
 }
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/utils/memory/MemtableCleanerThread.java b/src/java/org/apache/cassandra/utils/memory/MemtableCleanerThread.java
index f6fccc6..dbc23e5 100644
--- a/src/java/org/apache/cassandra/utils/memory/MemtableCleanerThread.java
+++ b/src/java/org/apache/cassandra/utils/memory/MemtableCleanerThread.java
@@ -24,18 +24,24 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.concurrent.InfiniteLoopExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.concurrent.Interruptible;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe.SAFE;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 /**
  * A thread that reclaims memory from a MemtablePool on demand.  The actual reclaiming work is delegated to the
  * cleaner Runnable, e.g., FlushLargestColumnFamily
  */
-public class MemtableCleanerThread<P extends MemtablePool> extends InfiniteLoopExecutor
+public class MemtableCleanerThread<P extends MemtablePool> implements Interruptible
 {
     private static final Logger logger = LoggerFactory.getLogger(MemtableCleanerThread.class);
 
-    public static class Clean<P extends MemtablePool> implements InterruptibleRunnable
+    private static class Clean<P extends MemtablePool> implements Interruptible.SimpleTask
     {
         /** This is incremented when a cleaner is invoked and decremented when a cleaner has completed */
         final AtomicInteger numPendingTasks = new AtomicInteger(0);
@@ -47,7 +53,7 @@
         final MemtableCleaner cleaner;
 
         /** signalled whenever needsCleaning() may return true */
-        final WaitQueue wait = new WaitQueue();
+        final WaitQueue wait = newWaitQueue();
 
         private Clean(P pool, MemtableCleaner cleaner)
         {
@@ -79,7 +85,7 @@
                 if (logger.isTraceEnabled())
                     logger.trace("Invoking cleaner with {} tasks pending", numPendingTasks);
 
-                cleaner.clean().handle(this::apply);
+                cleaner.clean().addCallback(this::apply);
             }
         }
 
@@ -98,14 +104,20 @@
 
             return res;
         }
+
+        public String toString()
+        {
+            return pool.toString() + ' ' + cleaner.toString();
+        }
     }
 
+    private final Interruptible executor;
     private final Runnable trigger;
     private final Clean<P> clean;
 
     private MemtableCleanerThread(Clean<P> clean)
     {
-        super(clean.pool.getClass().getSimpleName() + "Cleaner", clean);
+        this.executor = executorFactory().infiniteLoop(clean.pool.getClass().getSimpleName() + "Cleaner", clean, SAFE);
         this.trigger = clean.wait::signal;
         this.clean = clean;
     }
@@ -127,4 +139,34 @@
     {
         return clean.numPendingTasks();
     }
+
+    @Override
+    public void interrupt()
+    {
+        executor.interrupt();
+    }
+
+    @Override
+    public boolean isTerminated()
+    {
+        return executor.isTerminated();
+    }
+
+    @Override
+    public void shutdown()
+    {
+        executor.shutdown();
+    }
+
+    @Override
+    public Object shutdownNow()
+    {
+        return executor.shutdownNow();
+    }
+
+    @Override
+    public boolean awaitTermination(long timeout, TimeUnit units) throws InterruptedException
+    {
+        return executor.awaitTermination(timeout, units);
+    }
 }
diff --git a/src/java/org/apache/cassandra/utils/memory/MemtablePool.java b/src/java/org/apache/cassandra/utils/memory/MemtablePool.java
index 966c560..26c4791 100644
--- a/src/java/org/apache/cassandra/utils/memory/MemtablePool.java
+++ b/src/java/org/apache/cassandra/utils/memory/MemtablePool.java
@@ -32,6 +32,8 @@
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 import org.apache.cassandra.utils.ExecutorUtils;
 
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
+
 
 /**
  * Represents an amount of memory used for a given purpose, that can be allocated to specific tasks through
@@ -48,7 +50,7 @@
     public final Timer blockedOnAllocating;
     public final Gauge<Long> numPendingTasks;
 
-    final WaitQueue hasRoom = new WaitQueue();
+    final WaitQueue hasRoom = newWaitQueue();
 
     MemtablePool(long maxOnHeapMemory, long maxOffHeapMemory, float cleanThreshold, MemtableCleaner cleaner)
     {
@@ -57,7 +59,6 @@
         this.onHeap = getSubPool(maxOnHeapMemory, cleanThreshold);
         this.offHeap = getSubPool(maxOffHeapMemory, cleanThreshold);
         this.cleaner = getCleaner(cleaner);
-        this.cleaner.start();
         DefaultNameFactory nameFactory = new DefaultNameFactory("MemtablePool");
         blockedOnAllocating = CassandraMetricsRegistry.Metrics.timer(nameFactory.createMetricName("BlockedOnAllocation"));
         numPendingTasks = CassandraMetricsRegistry.Metrics.register(nameFactory.createMetricName("PendingFlushTasks"),
@@ -80,7 +81,7 @@
         ExecutorUtils.shutdownNowAndWait(timeout, unit, cleaner);
     }
 
-    public abstract MemtableAllocator newAllocator();
+    public abstract MemtableAllocator newAllocator(String table);
 
     public boolean needsCleaning()
     {
diff --git a/src/java/org/apache/cassandra/utils/memory/NativeAllocator.java b/src/java/org/apache/cassandra/utils/memory/NativeAllocator.java
index 0505962..0d1fdd4 100644
--- a/src/java/org/apache/cassandra/utils/memory/NativeAllocator.java
+++ b/src/java/org/apache/cassandra/utils/memory/NativeAllocator.java
@@ -20,19 +20,21 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.Semaphore;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.concurrent.Semaphore;
 import org.apache.cassandra.utils.concurrent.OpOrder.Group;
 
+import static org.apache.cassandra.utils.concurrent.Semaphore.newSemaphore;
+
 /**
  * This NativeAllocator uses global slab allocation strategy
- * with slab size that scales exponentially from 8kb to 1Mb to
- * serve allocation of up to 128kb.
+ * with slab size that scales exponentially from 8KiB to 1MiB to
+ * serve allocation of up to 128KiB.
  * <p>
  * </p>
  * The slab allocation reduces heap fragmentation from small
@@ -207,10 +209,10 @@
     private static class RaceAllocated
     {
         final ConcurrentLinkedQueue<Region> stash = new ConcurrentLinkedQueue<>();
-        final Semaphore permits = new Semaphore(8);
+        final Semaphore permits = newSemaphore(8);
         boolean stash(Region region)
         {
-            if (!permits.tryAcquire())
+            if (!permits.tryAcquire(1))
                 return false;
             stash.add(region);
             return true;
@@ -219,7 +221,7 @@
         {
             Region next = stash.poll();
             if (next != null)
-                permits.release();
+                permits.release(1);
             return next;
         }
     }
diff --git a/src/java/org/apache/cassandra/utils/memory/NativePool.java b/src/java/org/apache/cassandra/utils/memory/NativePool.java
index e88b4d7..1391a37 100644
--- a/src/java/org/apache/cassandra/utils/memory/NativePool.java
+++ b/src/java/org/apache/cassandra/utils/memory/NativePool.java
@@ -26,7 +26,7 @@
     }
 
     @Override
-    public NativeAllocator newAllocator()
+    public NativeAllocator newAllocator(String table)
     {
         return new NativeAllocator(this);
     }
diff --git a/src/java/org/apache/cassandra/utils/memory/SlabAllocator.java b/src/java/org/apache/cassandra/utils/memory/SlabAllocator.java
index c5c4563..05f9927 100644
--- a/src/java/org/apache/cassandra/utils/memory/SlabAllocator.java
+++ b/src/java/org/apache/cassandra/utils/memory/SlabAllocator.java
@@ -32,8 +32,8 @@
 
 /**
 + * The SlabAllocator is a bump-the-pointer allocator that allocates
-+ * large (1MB) global regions and then doles them out to threads that
-+ * request smaller sized (up to 128kb) slices into the array.
++ * large (1MiB) global regions and then doles them out to threads that
++ * request smaller sized (up to 128KiB) slices into the array.
  * <p></p>
  * The purpose of this class is to combat heap fragmentation in long lived
  * objects: by ensuring that all allocations with similar lifetimes
diff --git a/src/java/org/apache/cassandra/utils/memory/SlabPool.java b/src/java/org/apache/cassandra/utils/memory/SlabPool.java
index 416d1dd..edaddc9 100644
--- a/src/java/org/apache/cassandra/utils/memory/SlabPool.java
+++ b/src/java/org/apache/cassandra/utils/memory/SlabPool.java
@@ -28,7 +28,7 @@
         this.allocateOnHeap = maxOffHeapMemory == 0;
     }
 
-    public MemtableAllocator newAllocator()
+    public MemtableAllocator newAllocator(String table)
     {
         return new SlabAllocator(onHeap.newAllocator(), offHeap.newAllocator(), allocateOnHeap);
     }
diff --git a/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java b/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java
index 486ec38..ae89594 100644
--- a/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java
+++ b/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java
@@ -17,10 +17,7 @@
  */
 package org.apache.cassandra.utils.obs;
 
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.IOException;
+import java.io.*;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -145,7 +142,7 @@
     }
 
     @SuppressWarnings("resource")
-    public static OffHeapBitSet deserialize(DataInputStream in, boolean oldBfFormat) throws IOException
+    public static <I extends InputStream & DataInput> OffHeapBitSet deserialize(I in, boolean oldBfFormat) throws IOException
     {
         long byteCount = in.readInt() * 8L;
         Memory memory = Memory.allocate(byteCount);
diff --git a/src/java/org/apache/cassandra/utils/progress/jmx/JMXBroadcastExecutor.java b/src/java/org/apache/cassandra/utils/progress/jmx/JMXBroadcastExecutor.java
index f28609c..c21146b 100644
--- a/src/java/org/apache/cassandra/utils/progress/jmx/JMXBroadcastExecutor.java
+++ b/src/java/org/apache/cassandra/utils/progress/jmx/JMXBroadcastExecutor.java
@@ -19,9 +19,8 @@
 package org.apache.cassandra.utils.progress.jmx;
 
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 
 /**
  * Holds dedicated executor for JMX event handling. Events will internally queued by ArrayNotificationBuffer,
@@ -32,6 +31,6 @@
 
     private JMXBroadcastExecutor() { }
 
-    public final static ExecutorService executor = Executors.newSingleThreadExecutor(new NamedThreadFactory("JMX"));
+    public final static ExecutorService executor = executorFactory().sequential("JMX");
 
 }
diff --git a/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java b/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java
index 12efd0d..1d5023d 100644
--- a/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java
+++ b/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java
@@ -26,6 +26,8 @@
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressListener;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * ProgressListener that translates ProgressEvent to JMX Notification message.
  */
@@ -46,7 +48,7 @@
         Notification notification = new Notification("progress",
                                                      tag,
                                                      notificationSerialNumber.getAndIncrement(),
-                                                     System.currentTimeMillis(),
+                                                     currentTimeMillis(),
                                                      event.getMessage());
         Map<String, Integer> userData = new HashMap<>();
         userData.put("type", event.getType().ordinal());
diff --git a/src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java b/src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java
old mode 100755
new mode 100644
diff --git a/src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java b/src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java
old mode 100755
new mode 100644
diff --git a/src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java b/src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java
old mode 100755
new mode 100644
diff --git a/src/java/org/apache/cassandra/utils/vint/VIntCoding.java b/src/java/org/apache/cassandra/utils/vint/VIntCoding.java
index c35f834..8543e6f 100644
--- a/src/java/org/apache/cassandra/utils/vint/VIntCoding.java
+++ b/src/java/org/apache/cassandra/utils/vint/VIntCoding.java
@@ -47,13 +47,13 @@
 package org.apache.cassandra.utils.vint;
 
 import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import io.netty.util.concurrent.FastThreadLocal;
 import net.nicoulaj.compilecommand.annotations.Inline;
 import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
 
 /**
  * Borrows idea from
@@ -61,6 +61,16 @@
  */
 public class VIntCoding
 {
+
+    protected static final FastThreadLocal<byte[]> encodingBuffer = new FastThreadLocal<byte[]>()
+    {
+        @Override
+        public byte[] initialValue()
+        {
+            return new byte[9];
+        }
+    };
+
     public static final int MAX_SIZE = 10;
 
     public static long readUnsignedVInt(DataInput input) throws IOException
@@ -176,26 +186,31 @@
         return Integer.numberOfLeadingZeros(~firstByte) - 24;
     }
 
-    protected static final FastThreadLocal<byte[]> encodingBuffer = new FastThreadLocal<byte[]>()
-    {
-        @Override
-        public byte[] initialValue()
-        {
-            return new byte[9];
-        }
-    };
-
     @Inline
-    public static void writeUnsignedVInt(long value, DataOutput output) throws IOException
+    public static void writeUnsignedVInt(long value, DataOutputPlus output) throws IOException
     {
         int size = VIntCoding.computeUnsignedVIntSize(value);
         if (size == 1)
         {
-            output.write((int)value);
-            return;
+            output.writeByte((int) value);
         }
-
-        output.write(VIntCoding.encodeUnsignedVInt(value, size), 0, size);
+        else if (size < 9)
+        {
+            int shift = (8 - size) << 3;
+            int extraBytes = size - 1;
+            long mask = (long)VIntCoding.encodeExtraBytesToRead(extraBytes) << 56;
+            long register = (value << shift) | mask;
+            output.writeBytes(register, size);
+        }
+        else if (size == 9)
+        {
+            output.write((byte) 0xFF);
+            output.writeLong(value);
+        }
+        else
+        {
+            throw new AssertionError();
+        }
     }
 
     @Inline
@@ -204,11 +219,47 @@
         int size = VIntCoding.computeUnsignedVIntSize(value);
         if (size == 1)
         {
-            output.put((byte) value);
-            return;
+            output.put((byte) (value));
         }
+        else if (size < 9)
+        {
+            int limit = output.limit();
+            int pos = output.position();
+            if (limit - pos >= 8)
+            {
+                int shift = (8 - size) << 3;
+                int extraBytes = size - 1;
+                long mask = (long)VIntCoding.encodeExtraBytesToRead(extraBytes) << 56;
+                long register = (value << shift) | mask;
+                output.putLong(pos, register);
+                output.position(pos + size);
+            }
+            else
+            {
+                output.put(VIntCoding.encodeUnsignedVInt(value, size), 0, size);
+            }
+        }
+        else if (size == 9)
+        {
+            output.put((byte) 0xFF);
+            output.putLong(value);
+        }
+        else
+        {
+            throw new AssertionError();
+        }
+    }
 
-        output.put(VIntCoding.encodeUnsignedVInt(value, size), 0, size);
+    @Inline
+    public static void writeVInt(long value, DataOutputPlus output) throws IOException
+    {
+        writeUnsignedVInt(encodeZigZag64(value), output);
+    }
+
+    @Inline
+    public static void writeVInt(long value, ByteBuffer output) throws IOException
+    {
+        writeUnsignedVInt(encodeZigZag64(value), output);
     }
 
     /**
@@ -219,26 +270,16 @@
     private static byte[] encodeUnsignedVInt(long value, int size)
     {
         byte[] encodingSpace = encodingBuffer.get();
-        encodeUnsignedVInt(value, size, encodingSpace);
-        return encodingSpace;
-    }
 
-    @Inline
-    private static void encodeUnsignedVInt(long value, int size, byte[] encodeInto)
-    {
         int extraBytes = size - 1;
         for (int i = extraBytes ; i >= 0; --i)
         {
-            encodeInto[i] = (byte) value;
+            encodingSpace[i] = (byte) value;
             value >>= 8;
         }
-        encodeInto[0] |= VIntCoding.encodeExtraBytesToRead(extraBytes);
-    }
+        encodingSpace[0] |= VIntCoding.encodeExtraBytesToRead(extraBytes);
 
-    @Inline
-    public static void writeVInt(long value, DataOutput output) throws IOException
-    {
-        writeUnsignedVInt(encodeZigZag64(value), output);
+        return encodingSpace;
     }
 
     /**
@@ -282,6 +323,7 @@
     public static int computeUnsignedVIntSize(final long value)
     {
         int magnitude = Long.numberOfLeadingZeros(value | 1); // | with 1 to ensure magntiude <= 63, so (63 - 1) / 7 <= 8
-        return 9 - ((magnitude - 1) / 7);
+        // the formula below is hand-picked to match the original 9 - ((magnitude - 1) / 7)
+        return (639 - magnitude * 9) >> 6;
     }
 }
diff --git a/test/anttasks/org/apache/cassandra/anttasks/EchoEclipseProjectLibs.java b/test/anttasks/org/apache/cassandra/anttasks/EchoEclipseProjectLibs.java
new file mode 100644
index 0000000..c0cfb41
--- /dev/null
+++ b/test/anttasks/org/apache/cassandra/anttasks/EchoEclipseProjectLibs.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.anttasks;
+
+import org.apache.cassandra.io.util.File;
+
+import org.apache.commons.io.FilenameUtils;
+
+import org.apache.tools.ant.Project;
+import org.apache.tools.ant.Task;
+import org.apache.tools.ant.taskdefs.Echo;
+
+public class EchoEclipseProjectLibs extends Task
+{
+    public void execute()
+    {
+        Project project = getProject();
+        StringBuilder msg = buildMsg();
+
+        Echo echo = (Echo) project.createTask("echo");
+        echo.setMessage(msg.toString());
+        echo.setFile(new File(".classpath").toJavaIOFile());
+        echo.setAppend(true);
+        echo.perform();
+    }
+
+    public StringBuilder buildMsg()
+    {
+        Project project = getProject();
+        String[] jars = project.getProperty("eclipse-project-libs")
+                               .split(project.getProperty("path.separator"));
+
+        StringBuilder msg = new StringBuilder();
+        for (int i=0; i< jars.length; i++)
+        {
+            String srcJar = FilenameUtils.getBaseName(jars[i]) + "-sources.jar";
+            String srcDir = FilenameUtils.concat(project.getProperty("build.test.dir"), "sources");
+            File srcFile = new File(FilenameUtils.concat(srcDir, srcJar));
+
+            msg.append("\" <classpathentry kind=\"lib\" path=\"").append(jars[i]).append('"');
+            if (srcFile.exists())
+                msg.append(" sourcepath=\"").append(srcFile.path()).append('"');
+            msg.append("/>\n");
+        }
+
+        msg.append("</classpath>");
+
+        return msg;
+    }
+}
diff --git a/test/anttasks/org/apache/cassandra/anttasks/TestHelper.java b/test/anttasks/org/apache/cassandra/anttasks/TestHelper.java
new file mode 100644
index 0000000..be2949a
--- /dev/null
+++ b/test/anttasks/org/apache/cassandra/anttasks/TestHelper.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.anttasks;
+
+import org.apache.tools.ant.Project;
+import org.apache.tools.ant.Task;
+import org.apache.tools.ant.taskdefs.MacroInstance;
+import org.apache.tools.ant.taskdefs.Sequential;
+
+public class TestHelper extends Task
+{
+    private String property;
+
+    public void setProperty(String property)
+    {
+        this.property = property;
+    }
+
+    public void execute()
+    {
+        Project project = getProject();
+
+        String sep = project.getProperty("path.separator");
+        String[] allTestClasses = project.getProperty("all-test-classes").split(sep);
+
+        Sequential seqTask = (Sequential) project.createTask("sequential");
+        for (int i=0; i< allTestClasses.length; i++)
+        {
+            if (allTestClasses[i] == null)
+                continue;
+
+            MacroInstance task = (MacroInstance) project.createTask(property);
+            task.setDynamicAttribute("test.file.list", ' ' + allTestClasses[i]);
+            seqTask.addTask(task);
+        }
+
+        seqTask.perform();
+    }
+}
diff --git a/test/burn/org/apache/cassandra/concurrent/LongOpOrderTest.java b/test/burn/org/apache/cassandra/concurrent/LongOpOrderTest.java
index d7105df..b236fb1 100644
--- a/test/burn/org/apache/cassandra/concurrent/LongOpOrderTest.java
+++ b/test/burn/org/apache/cassandra/concurrent/LongOpOrderTest.java
@@ -37,6 +37,7 @@
 import org.apache.cassandra.utils.concurrent.OpOrder;
 
 import static org.junit.Assert.assertTrue;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 // TODO: we don't currently test SAFE functionality at all!
 // TODO: should also test markBlocking and SyncOrdered
@@ -85,13 +86,13 @@
         @Override
         public void run()
         {
-            final long until = System.currentTimeMillis() + RUNTIME;
-            long lastReport = System.currentTimeMillis();
+            final long until = currentTimeMillis() + RUNTIME;
+            long lastReport = currentTimeMillis();
             long count = 0;
             long opCount = 0;
             while (true)
             {
-                long now = System.currentTimeMillis();
+                long now = currentTimeMillis();
                 if (now > until)
                     break;
                 if (now > lastReport + REPORT_INTERVAL)
diff --git a/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java b/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java
index fd7920a..b5f5718 100644
--- a/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java
+++ b/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java
@@ -31,6 +31,8 @@
 import org.junit.Ignore;
 import org.junit.Test;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class LongSharedExecutorPoolTest
 {
 
@@ -134,11 +136,11 @@
         // (beyond max queued size), and longer operations
         for (float multiplier = 0f ; multiplier < 2.01f ; )
         {
-            if (System.nanoTime() > until)
+            if (nanoTime() > until)
             {
                 System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f));
                 events = 0;
-                until = System.nanoTime() + intervalNanos;
+                until = nanoTime() + intervalNanos;
                 multiplier += loadIncrement;
                 System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
             }
@@ -150,20 +152,20 @@
             else if (pending.size() == executorCount) timeout = pending.first().timeout;
             else timeout = (long) (Math.random() * pending.last().timeout);
 
-            while (!pending.isEmpty() && timeout > System.nanoTime())
+            while (!pending.isEmpty() && timeout > nanoTime())
             {
                 Batch first = pending.first();
                 boolean complete = false;
                 try
                 {
                     for (Result result : first.results.descendingSet())
-                        result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
+                        result.future.get(timeout - nanoTime(), TimeUnit.NANOSECONDS);
                     complete = true;
                 }
                 catch (TimeoutException e)
                 {
                 }
-                if (!complete && System.nanoTime() > first.timeout)
+                if (!complete && nanoTime() > first.timeout)
                 {
                     for (Result result : first.results)
                         if (!result.future.isDone())
@@ -190,7 +192,7 @@
             TreeSet<Result> results = new TreeSet<>();
             int count = (int) (workCount[executorIndex].sample() * multiplier);
             long targetTotalElapsed = 0;
-            long start = System.nanoTime();
+            long start = nanoTime();
             long baseTime;
             if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier);
             else  baseTime = 0;
@@ -205,11 +207,11 @@
                     time = maxWorkTime;
                 targetTotalElapsed += time;
                 Future<?> future = executor.submit(new WaitTask(time));
-                results.add(new Result(future, System.nanoTime() + time));
+                results.add(new Result(future, nanoTime() + time));
             }
             long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                        + TimeUnit.MILLISECONDS.toNanos(100L);
-            long now = System.nanoTime();
+            long now = nanoTime();
             if (runs++ > executorCount && now > end)
                 throw new AssertionError();
             events += results.size();
diff --git a/test/burn/org/apache/cassandra/net/Connection.java b/test/burn/org/apache/cassandra/net/Connection.java
index c74c0ae..de5df6b 100644
--- a/test/burn/org/apache/cassandra/net/Connection.java
+++ b/test/burn/org/apache/cassandra/net/Connection.java
@@ -35,7 +35,7 @@
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
 import static org.apache.cassandra.net.MessagingService.current_version;
 import static org.apache.cassandra.utils.ExecutorUtils.runWithThreadName;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 public class Connection implements InboundMessageCallbacks, OutboundMessageCallbacks, OutboundDebugCallbacks
 {
diff --git a/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java b/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java
index eba8b65..6b0f5f5 100644
--- a/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java
+++ b/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java
@@ -43,6 +43,7 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.util.concurrent.Uninterruptibles;
 
+import org.apache.cassandra.utils.concurrent.FutureCombiner;
 import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,8 +65,9 @@
 import static java.lang.Math.min;
 import static org.apache.cassandra.net.MessagingService.current_version;
 import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
-import static org.apache.cassandra.utils.MonotonicClock.preciseTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.preciseTime;
 
 public class ConnectionBurnTest
 {
@@ -259,7 +261,7 @@
             Reporters reporters = new Reporters(endpoints, connections);
             try
             {
-                long deadline = System.nanoTime() + runForNanos;
+                long deadline = nanoTime() + runForNanos;
                 Verb._TEST_2.unsafeSetHandler(() -> message -> {});
                 Verb._TEST_2.unsafeSetSerializer(() -> serializer);
                 inbound.sockets.open().get();
@@ -345,7 +347,7 @@
                 executor.execute(() -> {
                     Thread.currentThread().setName("Test-Reconnect");
                     ThreadLocalRandom random = ThreadLocalRandom.current();
-                    while (deadline > System.nanoTime())
+                    while (deadline > nanoTime())
                     {
                         try
                         {
@@ -411,7 +413,7 @@
                     };
 
                     int count = 0;
-                    while (deadline > System.nanoTime())
+                    while (deadline > nanoTime())
                     {
 
                         try
@@ -465,7 +467,7 @@
                     }
                 });
 
-                while (deadline > System.nanoTime() && failed.getCount() > 0)
+                while (deadline > nanoTime() && failed.getCount() > 0)
                 {
                     reporters.update();
                     reporters.print();
@@ -481,7 +483,7 @@
                 reporters.print();
 
                 inbound.sockets.close().get();
-                new FutureCombiner(Arrays.stream(connections)
+                FutureCombiner.allOf(Arrays.stream(connections)
                                          .map(c -> c.outbound.close(false))
                                          .collect(Collectors.toList()))
                 .get();
diff --git a/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java b/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java
index 9b23041..da453fe 100644
--- a/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java
+++ b/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java
@@ -54,4 +54,4 @@
     {
         return new GlobalInboundSettings(queueCapacity, endpointReserveLimit, globalReserveLimit, template);
     }
-}
\ No newline at end of file
+}
diff --git a/test/burn/org/apache/cassandra/net/MessageGenerator.java b/test/burn/org/apache/cassandra/net/MessageGenerator.java
index 3c03689..43ea16e 100644
--- a/test/burn/org/apache/cassandra/net/MessageGenerator.java
+++ b/test/burn/org/apache/cassandra/net/MessageGenerator.java
@@ -31,7 +31,7 @@
 import sun.misc.Unsafe;
 
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 abstract class MessageGenerator
 {
diff --git a/test/burn/org/apache/cassandra/net/Reporters.java b/test/burn/org/apache/cassandra/net/Reporters.java
index 9ab4643..1f4f823 100644
--- a/test/burn/org/apache/cassandra/net/Reporters.java
+++ b/test/burn/org/apache/cassandra/net/Reporters.java
@@ -30,12 +30,14 @@
 
 import org.apache.cassandra.locator.InetAddressAndPort;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 class Reporters
 {
     final Collection<InetAddressAndPort> endpoints;
     final Connection[] connections;
     final List<Reporter> reporters;
-    final long start = System.nanoTime();
+    final long start = nanoTime();
 
     Reporters(Collection<InetAddressAndPort> endpoints, Connection[] connections)
     {
@@ -66,7 +68,7 @@
 
     void print()
     {
-        System.out.println("==" + prettyPrintElapsed(System.nanoTime() - start) + "==\n");
+        System.out.println("==" + prettyPrintElapsed(nanoTime() - start) + "==\n");
 
         for (Reporter reporter : reporters)
         {
diff --git a/test/burn/org/apache/cassandra/net/Verifier.java b/test/burn/org/apache/cassandra/net/Verifier.java
index 219e613..cd6bdc3 100644
--- a/test/burn/org/apache/cassandra/net/Verifier.java
+++ b/test/burn/org/apache/cassandra/net/Verifier.java
@@ -32,14 +32,11 @@
 import org.slf4j.LoggerFactory;
 
 import com.carrotsearch.hppc.LongObjectHashMap;
-import com.carrotsearch.hppc.predicates.LongObjectPredicate;
 import com.carrotsearch.hppc.procedures.LongObjectProcedure;
-import com.carrotsearch.hppc.procedures.LongProcedure;
 import org.apache.cassandra.net.Verifier.ExpiredMessageEvent.ExpirationType;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
 import static java.util.concurrent.TimeUnit.*;
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
 import static org.apache.cassandra.net.MessagingService.current_version;
 import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
@@ -65,7 +62,9 @@
 import static org.apache.cassandra.net.Verifier.EventType.SENT_FRAME;
 import static org.apache.cassandra.net.Verifier.EventType.SERIALIZE;
 import static org.apache.cassandra.net.Verifier.ExpiredMessageEvent.ExpirationType.ON_SENT;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
 
 /**
  * This class is a single-threaded verifier monitoring a single link, with events supplied by inbound and outbound threads
@@ -1110,7 +1109,7 @@
                                 throw new IllegalStateException();
                         }
 
-                        now = System.nanoTime();
+                        now = nanoTime();
                         if (m.expiresAtNanos > now)
                         {
                             // we fix the conversion AlmostSameTime for an entire run, which should suffice to guarantee these comparisons
@@ -1281,7 +1280,7 @@
 
         // we use a concurrent skip list to permit efficient searching, even if we always append
         final ConcurrentSkipListMap<Long, Chunk> chunkList = new ConcurrentSkipListMap<>();
-        final WaitQueue writerWaiting = new WaitQueue();
+        final WaitQueue writerWaiting = newWaitQueue();
 
         volatile Chunk writerChunk = new Chunk(0);
         Chunk readerChunk = writerChunk;
@@ -1356,7 +1355,7 @@
 
         public Event await(long id, long timeout, TimeUnit unit) throws InterruptedException
         {
-            return await(id, System.nanoTime() + unit.toNanos(timeout));
+            return await(id, nanoTime() + unit.toNanos(timeout));
         }
 
         public Event await(long id, long deadlineNanos) throws InterruptedException
@@ -1370,7 +1369,7 @@
             readerWaiting = Thread.currentThread();
             while (null == (result = chunk.get(id)))
             {
-                long waitNanos = deadlineNanos - System.nanoTime();
+                long waitNanos = deadlineNanos - nanoTime();
                 if (waitNanos <= 0)
                     return null;
                 LockSupport.parkNanos(waitNanos);
diff --git a/test/burn/org/apache/cassandra/transport/BurnTestUtil.java b/test/burn/org/apache/cassandra/transport/BurnTestUtil.java
index 9610455..c8017d1 100644
--- a/test/burn/org/apache/cassandra/transport/BurnTestUtil.java
+++ b/test/burn/org/apache/cassandra/transport/BurnTestUtil.java
@@ -35,6 +35,9 @@
 import org.apache.cassandra.net.ResourceLimits;
 import org.apache.cassandra.transport.messages.QueryMessage;
 import org.apache.cassandra.transport.messages.ResultMessage;
+import org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter;
+
+import static org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter.NO_OP_LIMITER;
 
 public class BurnTestUtil
 {
@@ -143,6 +146,12 @@
                     return delegate.endpointWaitQueue();
                 }
 
+                @Override
+                public NonBlockingRateLimiter requestRateLimiter()
+                {
+                    return NO_OP_LIMITER;
+                }
+                
                 public void release()
                 {
                     delegate.release();
diff --git a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
index 37ebec1..7856b1b 100644
--- a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
+++ b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
@@ -45,6 +45,7 @@
 import static org.apache.cassandra.transport.BurnTestUtil.generateQueryMessage;
 import static org.apache.cassandra.transport.BurnTestUtil.generateQueryStatement;
 import static org.apache.cassandra.transport.BurnTestUtil.generateRows;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.assertj.core.api.Assertions.assertThat;
 
 public class DriverBurnTest extends CQLTester
@@ -62,7 +63,7 @@
             }
         };
 
-        requireNetwork((builder) -> builder.withPipelineConfigurator(configurator));
+        requireNetwork(builder -> builder.withPipelineConfigurator(configurator), builder -> {});
     }
 
     @Test
@@ -385,10 +386,10 @@
 
                         for (int j = 0; j < perThread; j++)
                         {
-                            long startNanos = System.nanoTime();
+                            long startNanos = nanoTime();
                             ResultSetFuture future = session.executeAsync(request);
                             future.addListener(() -> {
-                                long diff = System.nanoTime() - startNanos;
+                                long diff = nanoTime() - startNanos;
                                 if (measure.get())
                                 {
                                     lock.lock();
@@ -437,4 +438,4 @@
         System.out.println("99p:      " + stats.getPercentile(0.99));
     }
 }
-// TODO: test disconnecting and reconnecting constantly
\ No newline at end of file
+// TODO: test disconnecting and reconnecting constantly
diff --git a/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java b/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java
index 7e57916..2d863cf 100644
--- a/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java
+++ b/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java
@@ -212,4 +212,4 @@
         server.stop();
     }
 
-}
\ No newline at end of file
+}
diff --git a/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java b/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java
index 20b8cb3..a050245 100644
--- a/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java
+++ b/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java
@@ -23,12 +23,14 @@
 import java.util.*;
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Collectors;
 
-import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import com.google.common.util.concurrent.RateLimiter;
+import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -41,15 +43,18 @@
 import org.apache.cassandra.auth.AllowAllNetworkAuthorizer;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.exceptions.OverloadedException;
 import org.apache.cassandra.metrics.ClientMetrics;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.messages.QueryMessage;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.AssertUtil;
+import org.apache.cassandra.utils.Throwables;
 
 import static org.apache.cassandra.transport.BurnTestUtil.SizeCaps;
 import static org.apache.cassandra.transport.BurnTestUtil.generateQueryMessage;
 import static org.apache.cassandra.transport.BurnTestUtil.generateRows;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 @RunWith(Parameterized.class)
 public class SimpleClientPerfTest
@@ -57,7 +62,7 @@
     @Parameterized.Parameter
     public ProtocolVersion version;
 
-    @Parameterized.Parameters()
+    @Parameterized.Parameters(name="{0}")
     public static Collection<Object[]> versions()
     {
         return ProtocolVersion.SUPPORTED.stream()
@@ -90,6 +95,7 @@
         }
     }
 
+    @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"})
     @Test
     public void measureSmall() throws Throwable
     {
@@ -102,6 +108,7 @@
                  version);
     }
 
+    @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"})
     @Test
     public void measureSmallWithCompression() throws Throwable
     {
@@ -114,6 +121,7 @@
                  version);
     }
 
+    @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"})
     @Test
     public void measureLarge() throws Throwable
     {
@@ -126,6 +134,7 @@
                  version);
     }
 
+    @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"})
     @Test
     public void measureLargeWithCompression() throws Throwable
     {
@@ -138,6 +147,7 @@
                  version);
     }
 
+    @SuppressWarnings({"UnstableApiUsage", "UseOfSystemOutOrSystemErr", "ResultOfMethodCallIgnored"})
     public void perfTest(SizeCaps requestCaps, SizeCaps responseCaps, AssertUtil.ThrowingSupplier<SimpleClient> clientSupplier, ProtocolVersion version) throws Throwable
     {
         ResultMessage.Rows response = generateRows(0, responseCaps);
@@ -190,7 +200,9 @@
         AtomicBoolean measure = new AtomicBoolean(false);
         DescriptiveStatistics stats = new DescriptiveStatistics();
         Lock lock = new ReentrantLock();
-
+        RateLimiter limiter = RateLimiter.create(2000);
+        AtomicLong overloadedExceptions = new AtomicLong(0);
+        
         // TODO: exercise client -> server large messages
         for (int t = 0; t < threads; t++)
         {
@@ -203,24 +215,53 @@
                         for (int j = 0; j < 1; j++)
                             messages.add(requestMessage);
 
-                        if (measure.get())
-                        {
-                            long nanoStart = System.nanoTime();
-                            client.execute(messages);
-                            long diff = System.nanoTime() - nanoStart;
+                            if (measure.get())
+                            {
+                                try
+                                {
+                                    limiter.acquire();
+                                    long nanoStart = nanoTime();
+                                    client.execute(messages);
+                                    long elapsed = nanoTime() - nanoStart;
 
-                            lock.lock();
-                            try
-                            {
-                                stats.addValue(TimeUnit.MICROSECONDS.toMillis(diff));
+                                    lock.lock();
+                                    try
+                                    {
+                                        stats.addValue(TimeUnit.NANOSECONDS.toMicros(elapsed));
+                                    }
+                                    finally
+                                    {
+                                        lock.unlock();
+                                    }
+                                }
+                                catch (RuntimeException e)
+                                {
+                                    if (Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException))
+                                    {
+                                        overloadedExceptions.incrementAndGet();
+                                    }
+                                    else
+                                    {
+                                        throw e;
+                                    }
+                                }
                             }
-                            finally
+                            else
                             {
-                                lock.unlock();
+                                try
+                                {
+                                    limiter.acquire();
+                                    client.execute(messages); // warm-up
+                                }
+                                catch (RuntimeException e)
+                                {
+                                    // Ignore overloads during warmup...
+                                    if (!Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException))
+                                    {
+                                        throw e;
+                                    }
+                                }
                             }
-                        }
-                        else
-                            client.execute(messages); // warm-up
                     }
                 }
                 catch (Throwable e)
@@ -240,14 +281,19 @@
 
         System.out.println("requestSize = " + requestSize);
         System.out.println("responseSize = " + responseSize);
+
+        System.out.println("Latencies (in microseconds)");
+        System.out.println("Elements: " + stats.getN());
         System.out.println("Mean:     " + stats.getMean());
         System.out.println("Variance: " + stats.getVariance());
         System.out.println("Median:   " + stats.getPercentile(0.5));
         System.out.println("90p:      " + stats.getPercentile(0.90));
         System.out.println("95p:      " + stats.getPercentile(0.95));
         System.out.println("99p:      " + stats.getPercentile(0.99));
+        System.out.println("Max:      " + stats.getMax());
+        
+        System.out.println("Failed due to overload: " + overloadedExceptions.get());
 
         server.stop();
     }
 }
-
diff --git a/test/burn/org/apache/cassandra/utils/LongBTreeTest.java b/test/burn/org/apache/cassandra/utils/LongBTreeTest.java
index aaa4e53..01b4493 100644
--- a/test/burn/org/apache/cassandra/utils/LongBTreeTest.java
+++ b/test/burn/org/apache/cassandra/utils/LongBTreeTest.java
@@ -53,6 +53,7 @@
 import static java.util.Comparator.naturalOrder;
 import static java.util.Comparator.reverseOrder;
 import static org.apache.cassandra.utils.btree.BTree.iterable;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -1179,7 +1180,7 @@
     {
         args = Arrays.copyOf(args, args.length + 1);
         System.arraycopy(args, 0, args, 1, args.length - 1);
-        args[0] = System.currentTimeMillis();
+        args[0] = currentTimeMillis();
         System.out.printf("%tT: " + formatstr + "\n", args);
     }
 }
\ No newline at end of file
diff --git a/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java b/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java
index 117ee7c..caf8ec2 100644
--- a/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java
+++ b/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java
@@ -39,6 +39,7 @@
 import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.utils.DynamicList;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 /**
@@ -210,7 +211,7 @@
             this.threadCount = threadCount;
             this.duration = duration;
             this.poolSize = Math.toIntExact(poolSize);
-            until = System.nanoTime() + duration;
+            until = nanoTime() + duration;
             latch = new CountDownLatch(threadCount);
             sharedRecycle = new SPSCQueue[threadCount];
 
@@ -329,7 +330,7 @@
         logger.info("{} - testing {} threads for {}m", DATE_FORMAT.format(new Date()), threadCount, TimeUnit.NANOSECONDS.toMinutes(duration));
         logger.info("Testing BufferPool with memoryUsageThreshold={} and enabling BufferPool.DEBUG", bufferPool.memoryUsageThreshold());
         Debug debug = new Debug();
-        bufferPool.debug(debug);
+        bufferPool.debug(debug, null);
 
         TestEnvironment testEnv = new TestEnvironment(threadCount, duration, bufferPool.memoryUsageThreshold());
 
@@ -380,7 +381,7 @@
         assertEquals(0, testEnv.executorService.shutdownNow().size());
 
         logger.info("Reverting BufferPool DEBUG config");
-        bufferPool.debug(BufferPool.Debug.NO_OP);
+        bufferPool.debug(BufferPool.Debug.NO_OP, null);
 
         testEnv.assertCheckedThreadsSucceeded();
 
@@ -429,7 +430,7 @@
                         }
                         else if (!recycleFromNeighbour())
                         {
-                            if (++spinCount > 1000 && System.nanoTime() > until)
+                            if (++spinCount > 1000 && nanoTime() > until)
                                 return;
                             // otherwise, free one of our other neighbour's buffers if can; and otherwise yield
                             Thread.yield();
@@ -674,7 +675,7 @@
         {
             try
             {
-                while (System.nanoTime() < until)
+                while (nanoTime() < until)
                 {
                     checkpoint();
                     for (int i = 0 ; i < 100 ; i++)
diff --git a/test/conf/cassandra-converters-special-cases-old-names.yaml b/test/conf/cassandra-converters-special-cases-old-names.yaml
new file mode 100644
index 0000000..b3e26d8
--- /dev/null
+++ b/test/conf/cassandra-converters-special-cases-old-names.yaml
@@ -0,0 +1,10 @@
+#
+# This test config is used to test the Converters added for configuration backward compatibility in 4.1 post
+# CASSANDRA-15234. It tests setting old Config names (CASSANDRA-17737)
+#
+sstable_preemptive_open_interval_in_mb: -1
+index_summary_resize_interval_in_minutes: -1
+cache_load_timeout_seconds: -1
+commitlog_sync_group_window_in_ms: NaN
+credentials_update_interval_in_ms: -1
+
diff --git a/test/conf/cassandra-converters-special-cases.yaml b/test/conf/cassandra-converters-special-cases.yaml
new file mode 100644
index 0000000..f3f896a
--- /dev/null
+++ b/test/conf/cassandra-converters-special-cases.yaml
@@ -0,0 +1,10 @@
+#
+# This test config is used to test the Converters added for configuration backward compatibility in 4.1 post
+# CASSANDRA-15234. It tests setting new Config names (CASSANDRA-17737)
+#
+sstable_preemptive_open_interval:
+index_summary_resize_interval:
+cache_load_timeout: 0s
+commitlog_sync_group_window: 0ms
+credentials_update_interval:
+
diff --git a/test/conf/cassandra-murmur.yaml b/test/conf/cassandra-murmur.yaml
index 3c263a5..c0c2ae7 100644
--- a/test/conf/cassandra-murmur.yaml
+++ b/test/conf/cassandra-murmur.yaml
@@ -6,7 +6,7 @@
 memtable_allocation_type: heap_buffers
 commitlog_sync: batch
 commitlog_sync_batch_window_in_ms: 1.0
-commitlog_segment_size_in_mb: 5
+commitlog_segment_size: 5MiB
 commitlog_directory: build/test/cassandra/commitlog
 cdc_raw_directory: build/test/cassandra/cdc_raw
 cdc_enabled: false
@@ -16,7 +16,7 @@
 storage_port: 7012
 start_native_transport: true
 native_transport_port: 9042
-column_index_size_in_kb: 4
+column_index_size: 4KiB
 saved_caches_directory: build/test/cassandra/saved_caches
 data_file_directories:
     - build/test/cassandra/data
@@ -35,10 +35,10 @@
     truststore_password: cassandra
 incremental_backups: true
 concurrent_compactors: 4
-compaction_throughput_mb_per_sec: 0
+compaction_throughput: 0MiB/s
 row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-row_cache_size_in_mb: 16
-enable_user_defined_functions: true
-enable_scripted_user_defined_functions: true
-enable_sasi_indexes: true
-enable_materialized_views: true
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+sasi_indexes_enabled: true
+materialized_views_enabled: true
diff --git a/test/conf/cassandra-old.yaml b/test/conf/cassandra-old.yaml
new file mode 100644
index 0000000..86983ac
--- /dev/null
+++ b/test/conf/cassandra-old.yaml
@@ -0,0 +1,58 @@
+#
+# Warning!
+# Consider the effects on 'o.a.c.i.s.LegacySSTableTest' before changing schemas in this file.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_sync_batch_window_in_ms: 1.0
+commitlog_segment_size_in_mb: 5
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+server_encryption_options:
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput_mb_per_sec: 64
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size_in_mb: 16
+enable_user_defined_functions: true
+enable_scripted_user_defined_functions: true
+prepared_statements_cache_size_mb:
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound_megabits_per_sec: 200000000
+enable_sasi_indexes: true
+enable_materialized_views: true
+enable_drop_compact_storage: true
+file_cache_enabled: true
+internode_send_buff_size_in_bytes: 5
+internode_recv_buff_size_in_bytes: 5
+max_hint_window_in_ms: 10800000
+cache_load_timeout_seconds: 35
diff --git a/test/conf/cassandra-pem-jks-sslcontextfactory.yaml b/test/conf/cassandra-pem-jks-sslcontextfactory.yaml
new file mode 100644
index 0000000..1f10e6c
--- /dev/null
+++ b/test/conf/cassandra-pem-jks-sslcontextfactory.yaml
@@ -0,0 +1,150 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+          private_key: |
+            -----BEGIN ENCRYPTED PRIVATE KEY-----
+            MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/
+            g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl
+            xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29
+            L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V
+            sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/
+            f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8
+            RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR
+            0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs
+            evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU
+            tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6
+            wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN
+            K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv
+            zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5
+            mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo
+            WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ
+            jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6
+            eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny
+            DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn
+            AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY
+            Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow
+            Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut
+            ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr
+            bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH
+            hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI
+            RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw
+            pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP
+            FujZhqbKUDbYAcqTkoQ=
+            -----END ENCRYPTED PRIVATE KEY-----
+            -----BEGIN CERTIFICATE-----
+            MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+            bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+            VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+            Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+            EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+            a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+            FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+            MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+            ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+            q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+            TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+            TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+            YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+            N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+            iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+            IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+            6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+            qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+            HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+            n3MVF9w=
+            -----END CERTIFICATE-----
+          private_key_password: "cassandra"
+          trusted_certificates: |
+            -----BEGIN CERTIFICATE-----
+            MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+            bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+            VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+            Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+            EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+            a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+            FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+            MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+            ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+            q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+            TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+            TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+            YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+            N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+            iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+            IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+            6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+            qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+            HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+            n3MVF9w=
+            -----END CERTIFICATE-----
+server_encryption_options:
+    internode_encryption: none
+    keystore: test/conf/cassandra_ssl_test.keystore
+    keystore_password: cassandra
+    truststore: test/conf/cassandra_ssl_test.truststore
+    truststore_password: cassandra
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 24MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra-pem-sslcontextfactory-invalidconfiguration.yaml b/test/conf/cassandra-pem-sslcontextfactory-invalidconfiguration.yaml
new file mode 100644
index 0000000..8c7d910
--- /dev/null
+++ b/test/conf/cassandra-pem-sslcontextfactory-invalidconfiguration.yaml
@@ -0,0 +1,147 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+            private_key: |
+             -----BEGIN ENCRYPTED PRIVATE KEY-----
+             MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/
+             g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl
+             xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29
+             L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V
+             sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/
+             f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8
+             RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR
+             0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs
+             evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU
+             tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6
+             wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN
+             K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv
+             zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5
+             mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo
+             WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ
+             jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6
+             eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny
+             DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn
+             AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY
+             Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow
+             Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut
+             ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr
+             bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH
+             hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI
+             RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw
+             pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP
+             FujZhqbKUDbYAcqTkoQ=
+             -----END ENCRYPTED PRIVATE KEY-----
+             -----BEGIN CERTIFICATE-----
+             MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+             bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+             VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+             Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+             EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+             a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+             FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+             MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+             ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+             q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+             TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+             TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+             YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+             N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+             iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+             IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+             6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+             qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+             HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+             n3MVF9w=
+             -----END CERTIFICATE-----
+            private_key_password: "cassandra"
+            trusted_certificates: |
+              -----BEGIN CERTIFICATE-----
+              MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+              bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+              VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+              Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+              EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+              a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+              FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+              MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+              ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+              q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+              TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+              TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+              YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+              N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+              iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+              IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+              6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+              qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+              HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+              n3MVF9w=
+              -----END CERTIFICATE-----
+    keystore: test/conf/cassandra_ssl_test.keystore.pem
+    keystore_password: cassandra
+    truststore: test/conf/cassandra_ssl_test.truststore.pem
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 24MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra-pem-sslcontextfactory-mismatching-passwords.yaml b/test/conf/cassandra-pem-sslcontextfactory-mismatching-passwords.yaml
new file mode 100644
index 0000000..5d64354
--- /dev/null
+++ b/test/conf/cassandra-pem-sslcontextfactory-mismatching-passwords.yaml
@@ -0,0 +1,154 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+            private_key: |
+             -----BEGIN ENCRYPTED PRIVATE KEY-----
+             MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/
+             g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl
+             xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29
+             L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V
+             sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/
+             f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8
+             RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR
+             0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs
+             evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU
+             tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6
+             wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN
+             K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv
+             zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5
+             mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo
+             WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ
+             jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6
+             eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny
+             DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn
+             AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY
+             Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow
+             Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut
+             ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr
+             bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH
+             hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI
+             RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw
+             pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP
+             FujZhqbKUDbYAcqTkoQ=
+             -----END ENCRYPTED PRIVATE KEY-----
+             -----BEGIN CERTIFICATE-----
+             MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+             bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+             VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+             Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+             EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+             a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+             FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+             MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+             ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+             q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+             TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+             TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+             YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+             N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+             iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+             IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+             6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+             qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+             HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+             n3MVF9w=
+             -----END CERTIFICATE-----
+            private_key_password: "cassandra"
+            trusted_certificates: |
+              -----BEGIN CERTIFICATE-----
+              MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+              bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+              VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+              Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+              EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+              a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+              FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+              MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+              ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+              q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+              TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+              TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+              YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+              N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+              iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+              IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+              6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+              qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+              HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+              n3MVF9w=
+              -----END CERTIFICATE-----
+    keystore_password: cassandra-abracadbra
+server_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+          private_key_password: "cassandra-abracadbra"
+    internode_encryption: none
+    keystore: test/conf/cassandra_ssl_test.keystore.pem
+    keystore_password: cassandra
+    truststore: test/conf/cassandra_ssl_test.truststore.pem
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: false
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 24MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra-pem-sslcontextfactory-unencryptedkeys.yaml b/test/conf/cassandra-pem-sslcontextfactory-unencryptedkeys.yaml
new file mode 100644
index 0000000..7b08bcc
--- /dev/null
+++ b/test/conf/cassandra-pem-sslcontextfactory-unencryptedkeys.yaml
@@ -0,0 +1,148 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+            private_key: |
+              -----BEGIN PRIVATE KEY-----
+              MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOSZVf8dLj1xLw
+              efqjbogbAwhwRXd3tmEfQY1zHyudJF3XR1T0Xp26BKNvAYUxxZRDNg16M3prPRZv
+              wkhDJdE9NaN+BpZPJUavRsZOfGDq/CHN8j/2PPKrn3G/b065JjV3GZjfV/Sln047
+              DeZptaSyOg7ZN7F8qjGqxEcnw+szV/wTzhnjGjZMlcbOm4jFGQAn6xUOooQCGsoB
+              9FgxKB0nvNG3xVe/2eCNfbS4DabT3Y1wfQqZ62hOa5ZS0rwT+pw3tQs3zFFY1Sfi
+              G7qYbqZrKTheWIZGojVVm6mqH4yA2ofOe5N3RsBitCwU/D0dpnaG9Wcl98nmXueM
+              B6Rk04v7AgMBAAECggEAYnxIKjrFz/JkJ5MmiszM5HV698r9YB0aqHnFIHPoykIL
+              uiCjiumantDrFsCkosixULwvI/BRwbxstTpyrheU9psT6P1CONICVPvV8ylgJAYU
+              l+ofn56cEXKxVuICSWFLDH7pM1479g+IJJQAchbKQpqxAGTuMu3SpvJolfuj5srt
+              bM7/RYhJFLwDuvHNA3ivlogMneItP03+C25aaxstM+lBuBf68+n78zMgSvt6J/6Y
+              G2TOMKnxveMlG2qu9l2lAw/2i8daG/qre08nTH7wpRx0gZLZqNpe45exkrzticzF
+              FgWYjG2K2brX21jqHroFgMhdXF7zhhRgLoIeC0BrIQKBgQDCfGfWrJESKBbVai5u
+              7wqD9nlzjv6N6FXfTDOPXO1vz5frdvtLVWbs0SMPy+NglkaZK0iqHvb9mf2of8eC
+              0D5cmewjn7WCDBQMypIMYgT912ak/BBVuGXcxb6UgD+xARfSARo2C8NG1hfprw1W
+              ad14CjS5xhFMs44HpVYhI7iPYwKBgQC7SqVG/b37vZ7CINemdvoMujLvvYXDJJM8
+              N21LqNJfVXdukdH3T0xuLnh9Z/wPHjJDMF/9+1foxSEPHijtyz5P19EilNEC/3qw
+              fI19+VZoY0mdhPtXSGzy+rbTE2v71QgwFLizSos14Gr+eNiIjF7FYccK05++K/zk
+              cd8ZA3bwiQKBgQCl+HTFBs9mpz+VMOAfW2+l3hkXPNiPUc62mNkHZ05ZNNd44jjh
+              uSf0wSUiveR08MmevQlt5K7zDQ8jVKh2QjB15gVXAVxsdtJFeDnax2trFP9LnLBz
+              9sE2/qn9INU5wK0LUlWD+dXUBbCyg+jl7cJKRqtoPldVFYYHkFlIPqup8QKBgHXv
+              hyuw1FUVDkdHzwOvn70r8q8sNHKxMVWVwWkHIZGOi+pAQGrusD4hXRX6yKnsZdIR
+              QCD6iFy25R5T64nxlYdJaxPPid3NakB/7ckJnPOWseBSwMIxhQlr/nvjmve1Kba9
+              FaEwq4B9lGIxToiNe4/nBiM3JzvlDxX67nUdzWOhAoGAdFvriyvjshSJ4JHgIY9K
+              37BVB0VKMcFV2P8fLVWO5oyRtE1bJhU4QVpQmauABU4RGSojJ3NPIVH1wxmJeYtj
+              Q3b7EZaqI6ovna2eK2qtUx4WwxhRaXTT8xueBI2lgL6sBSTGG+K69ZOzGQzG/Mfr
+              RXKInnLInFD9JD94VqmMozo=
+              -----END PRIVATE KEY-----
+              -----BEGIN CERTIFICATE-----
+              MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+              bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+              VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+              Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+              EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+              a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+              FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+              MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+              ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+              q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+              TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+              TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+              YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+              N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+              iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+              IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+              6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+              qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+              HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+              n3MVF9w=
+              -----END CERTIFICATE-----
+            trusted_certificates: |
+              -----BEGIN CERTIFICATE-----
+              MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+              bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+              VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+              Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+              EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+              a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+              FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+              MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+              ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+              q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+              TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+              TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+              YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+              N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+              iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+              IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+              6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+              qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+              HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+              n3MVF9w=
+              -----END CERTIFICATE-----
+server_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+    internode_encryption: none
+    keystore: test/conf/cassandra_ssl_test.unencrypted_keystore.pem
+    truststore: test/conf/cassandra_ssl_test.truststore.pem
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: false
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 24MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra-pem-sslcontextfactory.yaml b/test/conf/cassandra-pem-sslcontextfactory.yaml
new file mode 100644
index 0000000..26d0f1f
--- /dev/null
+++ b/test/conf/cassandra-pem-sslcontextfactory.yaml
@@ -0,0 +1,151 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+        parameters:
+            private_key: |
+             -----BEGIN ENCRYPTED PRIVATE KEY-----
+             MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/
+             g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl
+             xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29
+             L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V
+             sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/
+             f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8
+             RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR
+             0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs
+             evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU
+             tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6
+             wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN
+             K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv
+             zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5
+             mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo
+             WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ
+             jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6
+             eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny
+             DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn
+             AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY
+             Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow
+             Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut
+             ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr
+             bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH
+             hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI
+             RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw
+             pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP
+             FujZhqbKUDbYAcqTkoQ=
+             -----END ENCRYPTED PRIVATE KEY-----
+             -----BEGIN CERTIFICATE-----
+             MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+             bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+             VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+             Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+             EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+             a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+             FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+             MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+             ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+             q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+             TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+             TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+             YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+             N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+             iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+             IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+             6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+             qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+             HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+             n3MVF9w=
+             -----END CERTIFICATE-----
+            private_key_password: "cassandra"
+            trusted_certificates: |
+              -----BEGIN CERTIFICATE-----
+              MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+              bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+              VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+              Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+              EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+              a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+              FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+              MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+              ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+              q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+              TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+              TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+              YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+              N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+              iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+              IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+              6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+              qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+              HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+              n3MVF9w=
+              -----END CERTIFICATE-----
+server_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.PEMBasedSslContextFactory
+    internode_encryption: none
+    keystore: test/conf/cassandra_ssl_test.keystore.pem
+    keystore_password: cassandra
+    truststore: test/conf/cassandra_ssl_test.truststore.pem
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 24MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra-seeds.yaml b/test/conf/cassandra-seeds.yaml
index f3279ae..1c38f8e 100644
--- a/test/conf/cassandra-seeds.yaml
+++ b/test/conf/cassandra-seeds.yaml
@@ -7,7 +7,7 @@
 memtable_allocation_type: offheap_objects
 commitlog_sync: batch
 commitlog_sync_batch_window_in_ms: 1.0
-commitlog_segment_size_in_mb: 5
+commitlog_segment_size: 5MiB
 commitlog_directory: build/test/cassandra/commitlog
 cdc_raw_directory: build/test/cassandra/cdc_raw
 cdc_enabled: false
@@ -17,7 +17,7 @@
 storage_port: 7012
 start_native_transport: true
 native_transport_port: 9042
-column_index_size_in_kb: 4
+column_index_size: 4KiB
 saved_caches_directory: build/test/cassandra/saved_caches
 data_file_directories:
     - build/test/cassandra/data
@@ -36,8 +36,8 @@
     truststore_password: cassandra
 incremental_backups: true
 concurrent_compactors: 4
-compaction_throughput_mb_per_sec: 0
+compaction_throughput: 0MiB/s
 row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-row_cache_size_in_mb: 16
-enable_user_defined_functions: true
-enable_scripted_user_defined_functions: true
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
diff --git a/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml b/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml
new file mode 100644
index 0000000..d3970cb
--- /dev/null
+++ b/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_sync_batch_window_in_ms: 1.0
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.DummySslContextFactoryImpl
+        parameters:
+            key1: "value1"
+            key2: "value2"
+            key3: "value3"
+            truststore: conf/.truststore
+            truststore_password: cassandra
+    keystore: conf/.keystore
+    keystore_password: cassandra
+server_encryption_options:
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 23841858MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra-sslcontextfactory.yaml b/test/conf/cassandra-sslcontextfactory.yaml
new file mode 100644
index 0000000..fde4bfd
--- /dev/null
+++ b/test/conf/cassandra-sslcontextfactory.yaml
@@ -0,0 +1,85 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Testing for pluggable ssl_context_factory option for client and server encryption options with a valid and a missing
+# implementation classes.
+#
+cluster_name: Test Cluster
+# memtable_allocation_type: heap_buffers
+memtable_allocation_type: offheap_objects
+commitlog_sync: batch
+commitlog_sync_batch_window_in_ms: 1.0
+commitlog_segment_size: 5MiB
+commitlog_directory: build/test/cassandra/commitlog
+# commitlog_compression:
+# - class_name: LZ4Compressor
+cdc_raw_directory: build/test/cassandra/cdc_raw
+cdc_enabled: false
+hints_directory: build/test/cassandra/hints
+partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
+listen_address: 127.0.0.1
+storage_port: 7012
+ssl_storage_port: 17012
+start_native_transport: true
+native_transport_port: 9042
+column_index_size: 4KiB
+saved_caches_directory: build/test/cassandra/saved_caches
+data_file_directories:
+    - build/test/cassandra/data
+disk_access_mode: mmap
+seed_provider:
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          - seeds: "127.0.0.1:7012"
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+dynamic_snitch: true
+client_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.DummySslContextFactoryImpl
+        parameters:
+            key1: "value1"
+            key2: "value2"
+            key3: "value3"
+    keystore: dummy-keystore
+server_encryption_options:
+    ssl_context_factory:
+        class_name: org.apache.cassandra.security.MissingSslContextFactoryImpl
+        parameters:
+            key1: "value1"
+            key2: "value2"
+            key3: "value3"
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+incremental_backups: true
+concurrent_compactors: 4
+compaction_throughput: 0MiB/s
+row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
+corrupted_tombstone_strategy: exception
+stream_entire_sstables: true
+stream_throughput_outbound: 23841858MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+file_cache_enabled: true
diff --git a/test/conf/cassandra.yaml b/test/conf/cassandra.yaml
index 017797f..89c5685 100644
--- a/test/conf/cassandra.yaml
+++ b/test/conf/cassandra.yaml
@@ -7,7 +7,7 @@
 memtable_allocation_type: offheap_objects
 commitlog_sync: batch
 commitlog_sync_batch_window_in_ms: 1.0
-commitlog_segment_size_in_mb: 5
+commitlog_segment_size: 5MiB
 commitlog_directory: build/test/cassandra/commitlog
 # commitlog_compression:
 # - class_name: LZ4Compressor
@@ -20,7 +20,7 @@
 ssl_storage_port: 17012
 start_native_transport: true
 native_transport_port: 9042
-column_index_size_in_kb: 4
+column_index_size: 4KiB
 saved_caches_directory: build/test/cassandra/saved_caches
 data_file_directories:
     - build/test/cassandra/data
@@ -39,18 +39,69 @@
     truststore_password: cassandra
 incremental_backups: true
 concurrent_compactors: 4
-compaction_throughput_mb_per_sec: 0
+compaction_throughput: 0MiB/s
 row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-row_cache_size_in_mb: 16
-enable_user_defined_functions: true
-enable_scripted_user_defined_functions: true
-prepared_statements_cache_size_mb: 1
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
 corrupted_tombstone_strategy: exception
 stream_entire_sstables: true
-stream_throughput_outbound_megabits_per_sec: 200000000
-enable_sasi_indexes: true
-enable_materialized_views: true
-enable_drop_compact_storage: true
+stream_throughput_outbound: 23841858MiB/s
+sasi_indexes_enabled: true
+materialized_views_enabled: true
+drop_compact_storage_enabled: true
 file_cache_enabled: true
 full_query_logging_options:
   allow_nodetool_archive_command: true
+auto_hints_cleanup_enabled: true
+
+read_thresholds_enabled: true
+coordinator_read_size_warn_threshold: 1024KiB
+coordinator_read_size_fail_threshold: 4096KiB
+local_read_size_warn_threshold: 4096KiB
+local_read_size_fail_threshold: 8192KiB
+row_index_read_size_warn_threshold: 4096KiB
+row_index_read_size_fail_threshold: 8192KiB
+
+memtable:
+    configurations:
+        skiplist:
+            inherits: default
+            class_name: SkipListMemtable
+        skiplist_sharded:
+            class_name: ShardedSkipListMemtable
+            parameters:
+                serialize_writes: false
+                shards: 4
+        skiplist_sharded_locking:
+            inherits: skiplist_sharded
+            parameters:
+                serialize_writes: true
+        skiplist_remapped:
+            inherits: skiplist
+        test_fullname:
+            inherits: default
+            class_name: org.apache.cassandra.db.memtable.TestMemtable
+        test_shortname:
+            class_name: TestMemtable
+            parameters:
+                skiplist: true  # note: YAML must interpret this as string, not a boolean
+        test_empty_class:
+            class_name: ""
+        test_missing_class:
+            parameters:
+        test_unknown_class:
+            class_name: NotExisting
+        test_invalid_param:
+            class_name: SkipListMemtable
+            parameters:
+                invalid: throw
+        test_invalid_extra_param:
+            inherits: test_shortname
+            parameters:
+                invalid: throw
+        test_invalid_factory_method:
+            class_name: org.apache.cassandra.cql3.validation.operations.CreateTest$InvalidMemtableFactoryMethod
+        test_invalid_factory_field:
+            class_name: org.apache.cassandra.cql3.validation.operations.CreateTest$InvalidMemtableFactoryField
diff --git a/test/conf/cassandra_deprecated_parameters_names.yaml b/test/conf/cassandra_deprecated_parameters_names.yaml
deleted file mode 100644
index 3258b7f..0000000
--- a/test/conf/cassandra_deprecated_parameters_names.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Warning!
-# Consider the effects on 'o.a.c.i.s.LegacySSTableTest' before changing schemas in this file.
-#
-cluster_name: Test Cluster
-# memtable_allocation_type: heap_buffers
-memtable_allocation_type: offheap_objects
-commitlog_sync: batch
-commitlog_sync_batch_window_in_ms: 1.0
-commitlog_segment_size_in_mb: 5
-commitlog_directory: build/test/cassandra/commitlog
-# commitlog_compression:
-# - class_name: LZ4Compressor
-cdc_raw_directory: build/test/cassandra/cdc_raw
-cdc_enabled: false
-hints_directory: build/test/cassandra/hints
-partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
-listen_address: 127.0.0.1
-storage_port: 7012
-ssl_storage_port: 17012
-start_native_transport: true
-native_transport_port: 9042
-column_index_size_in_kb: 4
-saved_caches_directory: build/test/cassandra/saved_caches
-data_file_directories:
-    - build/test/cassandra/data
-disk_access_mode: mmap
-seed_provider:
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          - seeds: "127.0.0.1:7012"
-endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
-dynamic_snitch: true
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-incremental_backups: true
-concurrent_compactors: 4
-compaction_throughput_mb_per_sec: 0
-row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-row_cache_size_in_mb: 16
-enable_user_defined_functions: true
-enable_scripted_user_defined_functions: true
-prepared_statements_cache_size_mb: 1
-corrupted_tombstone_strategy: exception
-stream_entire_sstables: true
-stream_throughput_outbound_megabits_per_sec: 200000000
-enable_sasi_indexes: true
-enable_materialized_views: true
-enable_drop_compact_storage: true
-file_cache_enabled: true
-internode_send_buff_size_in_bytes: 5
-internode_recv_buff_size_in_bytes: 5
diff --git a/test/conf/cassandra_encryption.yaml b/test/conf/cassandra_encryption.yaml
index 47e1312..3b8d08d 100644
--- a/test/conf/cassandra_encryption.yaml
+++ b/test/conf/cassandra_encryption.yaml
@@ -1,14 +1,14 @@
 transparent_data_encryption_options:
-    enabled: true
-    chunk_length_kb: 2
-    cipher: AES/CBC/PKCS5Padding
-    key_alias: testing:1
-    # CBC requires iv length to be 16 bytes
-    # iv_length: 16
-    key_provider: 
-      - class_name: org.apache.cassandra.security.JKSKeyProvider
-        parameters: 
-          - keystore: test/conf/cassandra.keystore
-            keystore_password: cassandra
-            store_type: JCEKS
-            key_password: cassandra
+  enabled: true
+  chunk_length_kb: 2
+  cipher: AES/CBC/PKCS5Padding
+  key_alias: testing:1
+  # CBC requires iv length to be 16 bytes
+  # iv_length: 16
+  key_provider:
+    - class_name: org.apache.cassandra.security.JKSKeyProvider
+      parameters:
+        - keystore: test/conf/cassandra.keystore
+          keystore_password: cassandra
+          store_type: JCEKS
+          key_password: cassandra
diff --git a/test/conf/cassandra_ssl_test.keystore.pem b/test/conf/cassandra_ssl_test.keystore.pem
new file mode 100644
index 0000000..ed981cc
--- /dev/null
+++ b/test/conf/cassandra_ssl_test.keystore.pem
@@ -0,0 +1,51 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/
+g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl
+xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29
+L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V
+sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/
+f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8
+RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR
+0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs
+evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU
+tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6
+wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN
+K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv
+zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5
+mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo
+WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ
+jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6
+eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny
+DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn
+AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY
+Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow
+Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut
+ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr
+bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH
+hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI
+RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw
+pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP
+FujZhqbKUDbYAcqTkoQ=
+-----END ENCRYPTED PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+n3MVF9w=
+-----END CERTIFICATE-----
diff --git a/test/conf/cassandra_ssl_test.truststore.pem b/test/conf/cassandra_ssl_test.truststore.pem
new file mode 100644
index 0000000..8806ce8
--- /dev/null
+++ b/test/conf/cassandra_ssl_test.truststore.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+n3MVF9w=
+-----END CERTIFICATE-----
diff --git a/test/conf/cassandra_ssl_test.unencrypted_keystore.pem b/test/conf/cassandra_ssl_test.unencrypted_keystore.pem
new file mode 100644
index 0000000..ce3d8e7
--- /dev/null
+++ b/test/conf/cassandra_ssl_test.unencrypted_keystore.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOSZVf8dLj1xLw
+efqjbogbAwhwRXd3tmEfQY1zHyudJF3XR1T0Xp26BKNvAYUxxZRDNg16M3prPRZv
+wkhDJdE9NaN+BpZPJUavRsZOfGDq/CHN8j/2PPKrn3G/b065JjV3GZjfV/Sln047
+DeZptaSyOg7ZN7F8qjGqxEcnw+szV/wTzhnjGjZMlcbOm4jFGQAn6xUOooQCGsoB
+9FgxKB0nvNG3xVe/2eCNfbS4DabT3Y1wfQqZ62hOa5ZS0rwT+pw3tQs3zFFY1Sfi
+G7qYbqZrKTheWIZGojVVm6mqH4yA2ofOe5N3RsBitCwU/D0dpnaG9Wcl98nmXueM
+B6Rk04v7AgMBAAECggEAYnxIKjrFz/JkJ5MmiszM5HV698r9YB0aqHnFIHPoykIL
+uiCjiumantDrFsCkosixULwvI/BRwbxstTpyrheU9psT6P1CONICVPvV8ylgJAYU
+l+ofn56cEXKxVuICSWFLDH7pM1479g+IJJQAchbKQpqxAGTuMu3SpvJolfuj5srt
+bM7/RYhJFLwDuvHNA3ivlogMneItP03+C25aaxstM+lBuBf68+n78zMgSvt6J/6Y
+G2TOMKnxveMlG2qu9l2lAw/2i8daG/qre08nTH7wpRx0gZLZqNpe45exkrzticzF
+FgWYjG2K2brX21jqHroFgMhdXF7zhhRgLoIeC0BrIQKBgQDCfGfWrJESKBbVai5u
+7wqD9nlzjv6N6FXfTDOPXO1vz5frdvtLVWbs0SMPy+NglkaZK0iqHvb9mf2of8eC
+0D5cmewjn7WCDBQMypIMYgT912ak/BBVuGXcxb6UgD+xARfSARo2C8NG1hfprw1W
+ad14CjS5xhFMs44HpVYhI7iPYwKBgQC7SqVG/b37vZ7CINemdvoMujLvvYXDJJM8
+N21LqNJfVXdukdH3T0xuLnh9Z/wPHjJDMF/9+1foxSEPHijtyz5P19EilNEC/3qw
+fI19+VZoY0mdhPtXSGzy+rbTE2v71QgwFLizSos14Gr+eNiIjF7FYccK05++K/zk
+cd8ZA3bwiQKBgQCl+HTFBs9mpz+VMOAfW2+l3hkXPNiPUc62mNkHZ05ZNNd44jjh
+uSf0wSUiveR08MmevQlt5K7zDQ8jVKh2QjB15gVXAVxsdtJFeDnax2trFP9LnLBz
+9sE2/qn9INU5wK0LUlWD+dXUBbCyg+jl7cJKRqtoPldVFYYHkFlIPqup8QKBgHXv
+hyuw1FUVDkdHzwOvn70r8q8sNHKxMVWVwWkHIZGOi+pAQGrusD4hXRX6yKnsZdIR
+QCD6iFy25R5T64nxlYdJaxPPid3NakB/7ckJnPOWseBSwMIxhQlr/nvjmve1Kba9
+FaEwq4B9lGIxToiNe4/nBiM3JzvlDxX67nUdzWOhAoGAdFvriyvjshSJ4JHgIY9K
+37BVB0VKMcFV2P8fLVWO5oyRtE1bJhU4QVpQmauABU4RGSojJ3NPIVH1wxmJeYtj
+Q3b7EZaqI6ovna2eK2qtUx4WwxhRaXTT8xueBI2lgL6sBSTGG+K69ZOzGQzG/Mfr
+RXKInnLInFD9JD94VqmMozo=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh
+Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx
+EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu
+a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw
+FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d
+ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy
+q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2
+TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto
+TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA
+YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD
+N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v
+iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh
+IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv
+6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG
+qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa
+HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru
+n3MVF9w=
+-----END CERTIFICATE-----
diff --git a/test/conf/cdc.yaml b/test/conf/cdc.yaml
index 8fb9427..f79930a 100644
--- a/test/conf/cdc.yaml
+++ b/test/conf/cdc.yaml
@@ -1,4 +1 @@
 cdc_enabled: true
-# Compression enabled since uncompressed + cdc isn't compatible w/Windows
-commitlog_compression:
-  - class_name: LZ4Compressor
diff --git a/test/conf/harry-generic.yaml b/test/conf/harry-generic.yaml
new file mode 100644
index 0000000..e4bfbab
--- /dev/null
+++ b/test/conf/harry-generic.yaml
@@ -0,0 +1,75 @@
+seed: 1
+
+# Default schema provider generates random schema
+schema_provider:
+  default: {}
+
+drop_schema: false
+create_schema: true
+truncate_table: false
+
+clock:
+  approximate_monotonic:
+    history_size: 7300
+    epoch_length: 1
+    epoch_time_unit: "SECONDS"
+
+system_under_test:
+  println: {}
+
+partition_descriptor_selector:
+  default:
+    window_size: 100
+    slide_after_repeats: 10
+
+clustering_descriptor_selector:
+  default:
+    modifications_per_lts:
+      type: "constant"
+      constant: 2
+    rows_per_modification:
+      type: "constant"
+      constant: 2
+    operation_kind_weights:
+      DELETE_RANGE: 1
+      DELETE_SLICE: 1
+      DELETE_ROW: 1
+      DELETE_COLUMN: 1
+      DELETE_PARTITION: 1
+      INSERT: 50
+      UPDATE: 50
+      DELETE_COLUMN_WITH_STATICS: 1
+      INSERT_WITH_STATICS: 1
+      UPDATE_WITH_STATICS: 1
+    column_mask_bitsets: null
+    max_partition_size: 100
+
+data_tracker:
+  default:
+    max_seen_lts: -1
+    max_complete_lts: -1
+
+runner:
+  sequential:
+    run_time: 60
+    run_time_unit: "MINUTES"
+    visitors:
+      - logging:
+          row_visitor:
+            mutating: {}
+      - sampler:
+          trigger_after: 100000
+          sample_partitions: 10
+      - validate_recent_partitions:
+          partition_count: 5
+          trigger_after: 10000
+          model:
+            querying_no_op_checker: {}
+      - validate_all_partitions:
+          concurrency: 5
+          trigger_after: 10000
+          model:
+            querying_no_op_checker: {}
+
+metric_reporter:
+  no_op: {}
\ No newline at end of file
diff --git a/test/conf/logback-simulator.xml b/test/conf/logback-simulator.xml
new file mode 100644
index 0000000..25c9de8
--- /dev/null
+++ b/test/conf/logback-simulator.xml
@@ -0,0 +1,59 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration debug="false" scan="true" scanPeriod="60 seconds">
+  <define name="cluster_id" class="org.apache.cassandra.distributed.impl.ClusterIDDefiner" />
+  <define name="instance_id" class="org.apache.cassandra.distributed.impl.InstanceIDDefiner" />
+
+  <!-- Shutdown hook ensures that async appender flushes -->
+  <shutdownHook class="ch.qos.logback.core.hook.DelayingShutdownHook"/>
+
+  <appender name="INSTANCEFILE" class="ch.qos.logback.core.FileAppender">
+    <file>./build/test/logs/${cassandra.testtag}/${suitename}/${cluster_id}/${instance_id}/system.log</file>
+    <encoder>
+      <pattern>%-5level [%thread] ${instance_id} %date{ISO8601} %msg%n</pattern>
+    </encoder>
+    <immediateFlush>true</immediateFlush>
+  </appender>
+
+  <appender name="STDOUT" target="System.out" class="ch.qos.logback.core.ConsoleAppender">
+    <encoder>
+      <pattern>%-5level [%thread] ${instance_id} %date{ISO8601} %F:%L - %msg%n</pattern>
+    </encoder>
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+      <level>WARN</level>
+    </filter>
+  </appender>
+
+  <logger name="org.apache.cassandra.locator.TokenMetadata" level="ERROR"/>
+  <logger name="org.apache.cassandra.net.Message" level="ERROR"/>
+  <logger name="org.reflections.Reflections" level="ERROR"/>
+  <logger name="org.apache.hadoop" level="WARN"/>
+  <logger name="org.apache.cassandra.utils.SigarLibrary" level="ERROR"/>
+  <logger name="org.apache.cassandra.utils.FBUtilities" level="ERROR"/>
+  <logger name="org.apache.cassandra.config.DatabaseDescriptor" level="ERROR"/>
+  <logger name="org.apache.cassandra.service.StartupChecks" level="ERROR"/>
+  <logger name="org.apache.cassandra.metrics.DecayingEstimatedHistogramReservoir" level="ERROR"/>
+  <logger name="io.netty.handler.ssl.SslHandler" level="WARN"/>
+
+  <root level="INFO">
+    <appender-ref ref="INSTANCEFILE" />
+    <appender-ref ref="STDOUT" />
+  </root>
+</configuration>
+
diff --git a/test/conf/unit-test-conf/test-native-port.yaml b/test/conf/unit-test-conf/test-native-port.yaml
index b46525f..c8ed929 100644
--- a/test/conf/unit-test-conf/test-native-port.yaml
+++ b/test/conf/unit-test-conf/test-native-port.yaml
@@ -7,7 +7,7 @@
 memtable_allocation_type: offheap_objects
 commitlog_sync: batch
 commitlog_sync_batch_window_in_ms: 1.0
-commitlog_segment_size_in_mb: 5
+commitlog_segment_size: 5MiB
 commitlog_directory: build/test/cassandra/commitlog
 # commitlog_compression:
 # - class_name: LZ4Compressor
@@ -20,7 +20,7 @@
 ssl_storage_port: 7011
 start_native_transport: true
 native_transport_port_ssl: 9142
-column_index_size_in_kb: 4
+column_index_size: 4KiB
 saved_caches_directory: build/test/cassandra/saved_caches
 data_file_directories:
 - build/test/cassandra/data
@@ -39,15 +39,15 @@
   truststore_password: cassandra
 incremental_backups: true
 concurrent_compactors: 4
-compaction_throughput_mb_per_sec: 0
+compaction_throughput: 0MiB/s
 row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-row_cache_size_in_mb: 16
-enable_user_defined_functions: true
-enable_scripted_user_defined_functions: true
-prepared_statements_cache_size_mb: 1
+row_cache_size: 16MiB
+user_defined_functions_enabled: true
+scripted_user_defined_functions_enabled: true
+prepared_statements_cache_size: 1MiB
 corrupted_tombstone_strategy: exception
 stream_entire_sstables: true
-stream_throughput_outbound_megabits_per_sec: 200000000
+stream_throughput_outbound: 23841858MiB/s
 
 client_encryption_options:
   enabled: true
diff --git a/test/data/config/version=3.0.0-alpha1.yml b/test/data/config/version=3.0.0-alpha1.yml
new file mode 100644
index 0000000..4c5381b
--- /dev/null
+++ b/test/data/config/version=3.0.0-alpha1.yml
@@ -0,0 +1,214 @@
+---
+trickle_fsync: "java.lang.Boolean"
+rpc_listen_backlog: "java.lang.Integer"
+max_streaming_retries: "java.lang.Integer"
+native_transport_flush_in_batches_legacy: "java.lang.Boolean"
+row_cache_save_period: "java.lang.Integer"
+rpc_address: "java.lang.String"
+disk_optimization_estimate_percentile: "java.lang.Double"
+hinted_handoff_disabled_datacenters: "java.util.Set"
+num_tokens: "java.lang.Integer"
+read_request_timeout_in_ms: "java.lang.Long"
+rpc_max_threads: "java.lang.Integer"
+enable_drop_compact_storage: "java.lang.Boolean"
+commitlog_directory: "java.lang.String"
+unlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"
+auto_bootstrap: "java.lang.Boolean"
+authorizer: "java.lang.String"
+memtable_heap_space_in_mb: "java.lang.Integer"
+index_interval: "java.lang.Integer"
+sstable_preemptive_open_interval_in_mb: "java.lang.Integer"
+broadcast_rpc_address: "java.lang.String"
+commitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"
+listen_interface_prefer_ipv6: "java.lang.Boolean"
+repair_session_max_tree_depth: "java.lang.Integer"
+request_scheduler_options:
+  throttle_limit: "java.lang.Integer"
+  default_weight: "java.lang.Integer"
+  weights: "java.util.Map"
+user_defined_function_warn_timeout: "java.lang.Long"
+request_scheduler_id: "org.apache.cassandra.config.Config.RequestSchedulerId"
+tracetype_repair_ttl: "java.lang.Integer"
+rpc_send_buff_size_in_bytes: "java.lang.Integer"
+concurrent_compactors: "java.lang.Integer"
+buffer_pool_use_heap_if_exhausted: "java.lang.Boolean"
+concurrent_materialized_view_writes: "java.lang.Integer"
+commitlog_total_space_in_mb: "java.lang.Integer"
+hints_directory: "java.lang.String"
+listen_address: "java.lang.String"
+native_transport_max_concurrent_connections_per_ip: "java.lang.Long"
+rpc_keepalive: "java.lang.Boolean"
+request_scheduler: "java.lang.String"
+rpc_interface_prefer_ipv6: "java.lang.Boolean"
+check_for_duplicate_rows_during_compaction: "java.lang.Boolean"
+request_timeout_in_ms: "java.lang.Long"
+user_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"
+disk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"
+rpc_server_type: "java.lang.String"
+concurrent_counter_writes: "java.lang.Integer"
+counter_write_request_timeout_in_ms: "java.lang.Long"
+roles_update_interval_in_ms: "java.lang.Integer"
+row_cache_size_in_mb: "java.lang.Long"
+memtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"
+trickle_fsync_interval_in_kb: "java.lang.Integer"
+cas_contention_timeout_in_ms: "java.lang.Long"
+key_cache_size_in_mb: "java.lang.Long"
+tombstone_warn_threshold: "java.lang.Integer"
+min_free_space_per_drive_in_mb: "java.lang.Integer"
+write_request_timeout_in_ms: "java.lang.Long"
+cross_node_timeout: "java.lang.Boolean"
+dynamic_snitch: "java.lang.Boolean"
+permissions_validity_in_ms: "java.lang.Integer"
+phi_convict_threshold: "java.lang.Double"
+commitlog_sync_batch_window_in_ms: "java.lang.Double"
+native_transport_max_threads: "java.lang.Integer"
+thrift_max_message_length_in_mb: "java.lang.Integer"
+disk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"
+permissions_update_interval_in_ms: "java.lang.Integer"
+tombstone_failure_threshold: "java.lang.Integer"
+authenticator: "java.lang.String"
+max_mutation_size_in_kb: "java.lang.Integer"
+cache_load_timeout_seconds: "java.lang.Integer"
+initial_token: "java.lang.String"
+batch_size_warn_threshold_in_kb: "java.lang.Integer"
+concurrent_replicates: "java.lang.Integer"
+dynamic_snitch_badness_threshold: "java.lang.Double"
+index_summary_capacity_in_mb: "java.lang.Long"
+commitlog_sync_period_in_ms: "java.lang.Integer"
+counter_cache_keys_to_save: "java.lang.Integer"
+disk_optimization_page_cross_chance: "java.lang.Double"
+listen_on_broadcast_address: "java.lang.Boolean"
+native_transport_max_concurrent_requests_in_bytes: "java.lang.Long"
+rpc_min_threads: "java.lang.Integer"
+row_cache_class_name: "java.lang.String"
+gc_warn_threshold_in_ms: "java.lang.Integer"
+disk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"
+compaction_large_partition_warning_threshold_mb: "java.lang.Integer"
+enable_user_defined_functions_threads: "java.lang.Boolean"
+hinted_handoff_throttle_in_kb: "java.lang.Integer"
+otc_backlog_expiration_interval_ms: "java.lang.Integer"
+counter_cache_save_period: "java.lang.Integer"
+otc_coalescing_enough_coalesced_messages: "java.lang.Integer"
+hints_flush_period_in_ms: "java.lang.Integer"
+role_manager: "java.lang.String"
+thrift_framed_transport_size_in_mb: "java.lang.Integer"
+server_encryption_options:
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  truststore: "java.lang.String"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  algorithm: "java.lang.String"
+max_hints_delivery_threads: "java.lang.Integer"
+column_index_size_in_kb: "java.lang.Integer"
+memtable_offheap_space_in_mb: "java.lang.Integer"
+data_file_directories: "java.util.List"
+saved_caches_directory: "java.lang.String"
+native_transport_max_frame_size_in_mb: "java.lang.Integer"
+index_summary_resize_interval_in_minutes: "java.lang.Integer"
+streaming_socket_timeout_in_ms: "java.lang.Integer"
+encryption_options:
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  truststore: "java.lang.String"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  algorithm: "java.lang.String"
+start_rpc: "java.lang.Boolean"
+enable_user_defined_functions: "java.lang.Boolean"
+max_hint_window_in_ms: "java.lang.Integer"
+gc_log_threshold_in_ms: "java.lang.Integer"
+snapshot_on_duplicate_row_detection: "java.lang.Boolean"
+seed_provider:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+check_for_duplicate_rows_during_reads: "java.lang.Boolean"
+internode_compression: "org.apache.cassandra.config.Config.InternodeCompression"
+internode_send_buff_size_in_bytes: "java.lang.Integer"
+otc_coalescing_window_us: "java.lang.Integer"
+batchlog_replay_throttle_in_kb: "java.lang.Integer"
+enable_scripted_user_defined_functions: "java.lang.Boolean"
+commitlog_compression:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+broadcast_address: "java.lang.String"
+rpc_recv_buff_size_in_bytes: "java.lang.Integer"
+enable_materialized_views: "java.lang.Boolean"
+roles_validity_in_ms: "java.lang.Integer"
+snapshot_before_compaction: "java.lang.Boolean"
+native_transport_port_ssl: "java.lang.Integer"
+allocate_tokens_for_keyspace: "java.lang.String"
+storage_port: "java.lang.Integer"
+counter_cache_size_in_mb: "java.lang.Long"
+native_transport_port: "java.lang.Integer"
+dynamic_snitch_reset_interval_in_ms: "java.lang.Integer"
+permissions_cache_max_entries: "java.lang.Integer"
+tracetype_query_ttl: "java.lang.Integer"
+stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"
+rpc_port: "java.lang.Integer"
+commit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"
+concurrent_writes: "java.lang.Integer"
+range_request_timeout_in_ms: "java.lang.Long"
+dynamic_snitch_update_interval_in_ms: "java.lang.Integer"
+hints_compression:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+commitlog_periodic_queue_size: "java.lang.Integer"
+hinted_handoff_enabled: "java.lang.Boolean"
+max_value_size_in_mb: "java.lang.Integer"
+memtable_flush_writers: "java.lang.Integer"
+otc_coalescing_strategy: "java.lang.String"
+commitlog_max_compression_buffers_in_pool: "java.lang.Integer"
+roles_cache_max_entries: "java.lang.Integer"
+native_transport_max_negotiable_protocol_version: "java.lang.Integer"
+partitioner: "java.lang.String"
+internode_recv_buff_size_in_bytes: "java.lang.Integer"
+listen_interface: "java.lang.String"
+start_native_transport: "java.lang.Boolean"
+ssl_storage_port: "java.lang.Integer"
+user_defined_function_fail_timeout: "java.lang.Long"
+cluster_name: "java.lang.String"
+incremental_backups: "java.lang.Boolean"
+file_cache_size_in_mb: "java.lang.Integer"
+inter_dc_tcp_nodelay: "java.lang.Boolean"
+internode_authenticator: "java.lang.String"
+key_cache_keys_to_save: "java.lang.Integer"
+key_cache_save_period: "java.lang.Integer"
+windows_timer_interval: "java.lang.Integer"
+rpc_interface: "java.lang.String"
+commitlog_segment_size_in_mb: "java.lang.Integer"
+row_cache_keys_to_save: "java.lang.Integer"
+replica_filtering_protection:
+  cached_rows_fail_threshold: "java.lang.Integer"
+  cached_rows_warn_threshold: "java.lang.Integer"
+native_transport_max_concurrent_requests_in_bytes_per_ip: "java.lang.Long"
+native_transport_max_concurrent_connections: "java.lang.Long"
+memtable_cleanup_threshold: "java.lang.Float"
+concurrent_reads: "java.lang.Integer"
+inter_dc_stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"
+truncate_request_timeout_in_ms: "java.lang.Long"
+client_encryption_options:
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  optional: "java.lang.Boolean"
+  truststore: "java.lang.String"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  enabled: "java.lang.Boolean"
+  algorithm: "java.lang.String"
+auto_snapshot: "java.lang.Boolean"
+batch_size_fail_threshold_in_kb: "java.lang.Integer"
+compaction_throughput_mb_per_sec: "java.lang.Integer"
+max_hints_file_size_in_mb: "java.lang.Integer"
+endpoint_snitch: "java.lang.String"
diff --git a/test/data/config/version=3.11.0.yml b/test/data/config/version=3.11.0.yml
new file mode 100644
index 0000000..9492cbf
--- /dev/null
+++ b/test/data/config/version=3.11.0.yml
@@ -0,0 +1,247 @@
+---
+trickle_fsync: "java.lang.Boolean"
+rpc_listen_backlog: "java.lang.Integer"
+max_streaming_retries: "java.lang.Integer"
+cdc_total_space_in_mb: "java.lang.Integer"
+native_transport_flush_in_batches_legacy: "java.lang.Boolean"
+row_cache_save_period: "java.lang.Integer"
+rpc_address: "java.lang.String"
+disk_optimization_estimate_percentile: "java.lang.Double"
+hinted_handoff_disabled_datacenters: "java.util.Set"
+cdc_enabled: "java.lang.Boolean"
+cdc_raw_directory: "java.lang.String"
+num_tokens: "java.lang.Integer"
+read_request_timeout_in_ms: "java.lang.Long"
+rpc_max_threads: "java.lang.Integer"
+enable_drop_compact_storage: "java.lang.Boolean"
+commitlog_directory: "java.lang.String"
+unlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"
+credentials_validity_in_ms: "java.lang.Integer"
+auto_bootstrap: "java.lang.Boolean"
+authorizer: "java.lang.String"
+memtable_heap_space_in_mb: "java.lang.Integer"
+index_interval: "java.lang.Integer"
+sstable_preemptive_open_interval_in_mb: "java.lang.Integer"
+broadcast_rpc_address: "java.lang.String"
+commitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"
+listen_interface_prefer_ipv6: "java.lang.Boolean"
+repair_session_max_tree_depth: "java.lang.Integer"
+request_scheduler_options:
+  throttle_limit: "java.lang.Integer"
+  default_weight: "java.lang.Integer"
+  weights: "java.util.Map"
+user_defined_function_warn_timeout: "java.lang.Long"
+request_scheduler_id: "org.apache.cassandra.config.Config.RequestSchedulerId"
+tracetype_repair_ttl: "java.lang.Integer"
+rpc_send_buff_size_in_bytes: "java.lang.Integer"
+concurrent_compactors: "java.lang.Integer"
+buffer_pool_use_heap_if_exhausted: "java.lang.Boolean"
+concurrent_materialized_view_writes: "java.lang.Integer"
+commitlog_total_space_in_mb: "java.lang.Integer"
+hints_directory: "java.lang.String"
+listen_address: "java.lang.String"
+native_transport_max_concurrent_connections_per_ip: "java.lang.Long"
+rpc_keepalive: "java.lang.Boolean"
+request_scheduler: "java.lang.String"
+allow_extra_insecure_udfs: "java.lang.Boolean"
+rpc_interface_prefer_ipv6: "java.lang.Boolean"
+check_for_duplicate_rows_during_compaction: "java.lang.Boolean"
+request_timeout_in_ms: "java.lang.Long"
+user_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"
+disk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"
+rpc_server_type: "java.lang.String"
+concurrent_counter_writes: "java.lang.Integer"
+counter_write_request_timeout_in_ms: "java.lang.Long"
+roles_update_interval_in_ms: "java.lang.Integer"
+row_cache_size_in_mb: "java.lang.Long"
+memtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"
+trickle_fsync_interval_in_kb: "java.lang.Integer"
+cas_contention_timeout_in_ms: "java.lang.Long"
+key_cache_size_in_mb: "java.lang.Long"
+tombstone_warn_threshold: "java.lang.Integer"
+column_index_cache_size_in_kb: "java.lang.Integer"
+min_free_space_per_drive_in_mb: "java.lang.Integer"
+write_request_timeout_in_ms: "java.lang.Long"
+cross_node_timeout: "java.lang.Boolean"
+dynamic_snitch: "java.lang.Boolean"
+permissions_validity_in_ms: "java.lang.Integer"
+phi_convict_threshold: "java.lang.Double"
+commitlog_sync_batch_window_in_ms: "java.lang.Double"
+native_transport_max_threads: "java.lang.Integer"
+thrift_max_message_length_in_mb: "java.lang.Integer"
+disk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"
+permissions_update_interval_in_ms: "java.lang.Integer"
+tombstone_failure_threshold: "java.lang.Integer"
+authenticator: "java.lang.String"
+max_mutation_size_in_kb: "java.lang.Integer"
+allow_insecure_udfs: "java.lang.Boolean"
+cache_load_timeout_seconds: "java.lang.Integer"
+initial_token: "java.lang.String"
+batch_size_warn_threshold_in_kb: "java.lang.Integer"
+concurrent_replicates: "java.lang.Integer"
+dynamic_snitch_badness_threshold: "java.lang.Double"
+index_summary_capacity_in_mb: "java.lang.Long"
+commitlog_sync_period_in_ms: "java.lang.Integer"
+counter_cache_keys_to_save: "java.lang.Integer"
+disk_optimization_page_cross_chance: "java.lang.Double"
+listen_on_broadcast_address: "java.lang.Boolean"
+native_transport_max_concurrent_requests_in_bytes: "java.lang.Long"
+rpc_min_threads: "java.lang.Integer"
+row_cache_class_name: "java.lang.String"
+gc_warn_threshold_in_ms: "java.lang.Integer"
+disk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"
+compaction_large_partition_warning_threshold_mb: "java.lang.Integer"
+enable_user_defined_functions_threads: "java.lang.Boolean"
+hinted_handoff_throttle_in_kb: "java.lang.Integer"
+otc_backlog_expiration_interval_ms: "java.lang.Integer"
+counter_cache_save_period: "java.lang.Integer"
+otc_coalescing_enough_coalesced_messages: "java.lang.Integer"
+slow_query_log_timeout_in_ms: "java.lang.Long"
+hints_flush_period_in_ms: "java.lang.Integer"
+role_manager: "java.lang.String"
+thrift_framed_transport_size_in_mb: "java.lang.Integer"
+server_encryption_options:
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  truststore: "java.lang.String"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  require_endpoint_verification: "java.lang.Boolean"
+  algorithm: "java.lang.String"
+max_hints_delivery_threads: "java.lang.Integer"
+column_index_size_in_kb: "java.lang.Integer"
+memtable_offheap_space_in_mb: "java.lang.Integer"
+data_file_directories: "java.util.List"
+saved_caches_directory: "java.lang.String"
+native_transport_max_frame_size_in_mb: "java.lang.Integer"
+index_summary_resize_interval_in_minutes: "java.lang.Integer"
+streaming_socket_timeout_in_ms: "java.lang.Integer"
+encryption_options:
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  truststore: "java.lang.String"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  require_endpoint_verification: "java.lang.Boolean"
+  algorithm: "java.lang.String"
+file_cache_round_up: "java.lang.Boolean"
+streaming_keep_alive_period_in_secs: "java.lang.Integer"
+start_rpc: "java.lang.Boolean"
+enable_user_defined_functions: "java.lang.Boolean"
+max_hint_window_in_ms: "java.lang.Integer"
+enable_sasi_indexes: "java.lang.Boolean"
+gc_log_threshold_in_ms: "java.lang.Integer"
+snapshot_on_duplicate_row_detection: "java.lang.Boolean"
+seed_provider:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+check_for_duplicate_rows_during_reads: "java.lang.Boolean"
+internode_compression: "org.apache.cassandra.config.Config.InternodeCompression"
+internode_send_buff_size_in_bytes: "java.lang.Integer"
+otc_coalescing_window_us: "java.lang.Integer"
+credentials_cache_max_entries: "java.lang.Integer"
+batchlog_replay_throttle_in_kb: "java.lang.Integer"
+enable_scripted_user_defined_functions: "java.lang.Boolean"
+commitlog_compression:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+broadcast_address: "java.lang.String"
+rpc_recv_buff_size_in_bytes: "java.lang.Integer"
+credentials_update_interval_in_ms: "java.lang.Integer"
+enable_materialized_views: "java.lang.Boolean"
+roles_validity_in_ms: "java.lang.Integer"
+snapshot_before_compaction: "java.lang.Boolean"
+back_pressure_strategy:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+prepared_statements_cache_size_mb: "java.lang.Long"
+native_transport_port_ssl: "java.lang.Integer"
+allocate_tokens_for_keyspace: "java.lang.String"
+storage_port: "java.lang.Integer"
+counter_cache_size_in_mb: "java.lang.Long"
+native_transport_port: "java.lang.Integer"
+dynamic_snitch_reset_interval_in_ms: "java.lang.Integer"
+permissions_cache_max_entries: "java.lang.Integer"
+tracetype_query_ttl: "java.lang.Integer"
+stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"
+rpc_port: "java.lang.Integer"
+commit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"
+concurrent_writes: "java.lang.Integer"
+range_request_timeout_in_ms: "java.lang.Long"
+dynamic_snitch_update_interval_in_ms: "java.lang.Integer"
+hints_compression:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+commitlog_periodic_queue_size: "java.lang.Integer"
+force_new_prepared_statement_behaviour: "java.lang.Boolean"
+hinted_handoff_enabled: "java.lang.Boolean"
+back_pressure_enabled: "java.lang.Boolean"
+max_value_size_in_mb: "java.lang.Integer"
+memtable_flush_writers: "java.lang.Integer"
+otc_coalescing_strategy: "java.lang.String"
+commitlog_max_compression_buffers_in_pool: "java.lang.Integer"
+roles_cache_max_entries: "java.lang.Integer"
+cdc_free_space_check_interval_ms: "java.lang.Integer"
+native_transport_max_negotiable_protocol_version: "java.lang.Integer"
+transparent_data_encryption_options:
+  cipher: "java.lang.String"
+  chunk_length_kb: "java.lang.Integer"
+  iv_length: "java.lang.Integer"
+  key_alias: "java.lang.String"
+  key_provider:
+    class_name: "java.lang.String"
+    parameters: "java.util.Map"
+  enabled: "java.lang.Boolean"
+partitioner: "java.lang.String"
+internode_recv_buff_size_in_bytes: "java.lang.Integer"
+listen_interface: "java.lang.String"
+start_native_transport: "java.lang.Boolean"
+ssl_storage_port: "java.lang.Integer"
+user_defined_function_fail_timeout: "java.lang.Long"
+cluster_name: "java.lang.String"
+incremental_backups: "java.lang.Boolean"
+file_cache_size_in_mb: "java.lang.Integer"
+inter_dc_tcp_nodelay: "java.lang.Boolean"
+internode_authenticator: "java.lang.String"
+key_cache_keys_to_save: "java.lang.Integer"
+key_cache_save_period: "java.lang.Integer"
+windows_timer_interval: "java.lang.Integer"
+rpc_interface: "java.lang.String"
+commitlog_segment_size_in_mb: "java.lang.Integer"
+row_cache_keys_to_save: "java.lang.Integer"
+replica_filtering_protection:
+  cached_rows_fail_threshold: "java.lang.Integer"
+  cached_rows_warn_threshold: "java.lang.Integer"
+native_transport_max_concurrent_requests_in_bytes_per_ip: "java.lang.Long"
+native_transport_max_concurrent_connections: "java.lang.Long"
+memtable_cleanup_threshold: "java.lang.Float"
+concurrent_reads: "java.lang.Integer"
+inter_dc_stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"
+thrift_prepared_statements_cache_size_mb: "java.lang.Long"
+truncate_request_timeout_in_ms: "java.lang.Long"
+client_encryption_options:
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  optional: "java.lang.Boolean"
+  truststore: "java.lang.String"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  enabled: "java.lang.Boolean"
+  require_endpoint_verification: "java.lang.Boolean"
+  algorithm: "java.lang.String"
+auto_snapshot: "java.lang.Boolean"
+batch_size_fail_threshold_in_kb: "java.lang.Integer"
+compaction_throughput_mb_per_sec: "java.lang.Integer"
+max_hints_file_size_in_mb: "java.lang.Integer"
+endpoint_snitch: "java.lang.String"
diff --git a/test/data/config/version=4.0-alpha1.yml b/test/data/config/version=4.0-alpha1.yml
new file mode 100644
index 0000000..2a0c58e
--- /dev/null
+++ b/test/data/config/version=4.0-alpha1.yml
@@ -0,0 +1,301 @@
+---
+repaired_data_tracking_for_range_reads_enabled: "java.lang.Boolean"
+block_for_peers_timeout_in_secs: "java.lang.Integer"
+flush_compression: "org.apache.cassandra.config.Config.FlushCompression"
+audit_logging_options:
+  audit_logs_dir: "java.lang.String"
+  included_users: "java.lang.String"
+  logger:
+    class_name: "java.lang.String"
+    parameters: "java.util.Map"
+  excluded_categories: "java.lang.String"
+  roll_cycle: "java.lang.String"
+  enabled: "java.lang.Boolean"
+  included_categories: "java.lang.String"
+  max_archive_retries: "java.lang.Integer"
+  excluded_keyspaces: "java.lang.String"
+  archive_command: "java.lang.String"
+  included_keyspaces: "java.lang.String"
+  max_log_size: "java.lang.Long"
+  block: "java.lang.Boolean"
+  excluded_users: "java.lang.String"
+  max_queue_weight: "java.lang.Integer"
+cdc_total_space_in_mb: "java.lang.Integer"
+internode_application_send_queue_reserve_global_capacity_in_bytes: "java.lang.Integer"
+row_cache_save_period: "java.lang.Integer"
+snapshot_links_per_second: "java.lang.Long"
+disk_optimization_estimate_percentile: "java.lang.Double"
+hinted_handoff_disabled_datacenters: "java.util.Set"
+cdc_enabled: "java.lang.Boolean"
+read_request_timeout_in_ms: "java.lang.Long"
+internode_application_receive_queue_reserve_global_capacity_in_bytes: "java.lang.Integer"
+credentials_validity_in_ms: "java.lang.Integer"
+memtable_heap_space_in_mb: "java.lang.Integer"
+commitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"
+user_defined_function_warn_timeout: "java.lang.Long"
+tracetype_repair_ttl: "java.lang.Integer"
+concurrent_materialized_view_writes: "java.lang.Integer"
+commitlog_total_space_in_mb: "java.lang.Integer"
+hints_directory: "java.lang.String"
+native_transport_max_concurrent_connections_per_ip: "java.lang.Long"
+internode_socket_send_buffer_size_in_bytes: "java.lang.Integer"
+rpc_interface_prefer_ipv6: "java.lang.Boolean"
+check_for_duplicate_rows_during_compaction: "java.lang.Boolean"
+internode_socket_receive_buffer_size_in_bytes: "java.lang.Integer"
+user_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"
+counter_write_request_timeout_in_ms: "java.lang.Long"
+roles_update_interval_in_ms: "java.lang.Integer"
+memtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"
+trickle_fsync_interval_in_kb: "java.lang.Integer"
+enable_transient_replication: "java.lang.Boolean"
+key_cache_size_in_mb: "java.lang.Long"
+tombstone_warn_threshold: "java.lang.Integer"
+column_index_cache_size_in_kb: "java.lang.Integer"
+full_query_logging_options:
+  log_dir: "java.lang.String"
+  archive_command: "java.lang.String"
+  max_log_size: "java.lang.Long"
+  block: "java.lang.Boolean"
+  roll_cycle: "java.lang.String"
+  max_queue_weight: "java.lang.Integer"
+  max_archive_retries: "java.lang.Integer"
+table_count_warn_threshold: "java.lang.Integer"
+write_request_timeout_in_ms: "java.lang.Long"
+internode_tcp_user_timeout_in_ms: "java.lang.Integer"
+auto_optimise_inc_repair_streams: "java.lang.Boolean"
+commitlog_sync_batch_window_in_ms: "java.lang.Double"
+disk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"
+tombstone_failure_threshold: "java.lang.Integer"
+validation_preview_purge_head_start_in_sec: "java.lang.Integer"
+max_mutation_size_in_kb: "java.lang.Integer"
+initial_token: "java.lang.String"
+batch_size_warn_threshold_in_kb: "java.lang.Integer"
+dynamic_snitch_badness_threshold: "java.lang.Double"
+index_summary_capacity_in_mb: "java.lang.Long"
+allocate_tokens_for_local_replication_factor: "java.lang.Integer"
+counter_cache_keys_to_save: "java.lang.Integer"
+disk_optimization_page_cross_chance: "java.lang.Double"
+listen_on_broadcast_address: "java.lang.Boolean"
+internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: "java.lang.Integer"
+row_cache_class_name: "java.lang.String"
+gc_warn_threshold_in_ms: "java.lang.Integer"
+disk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"
+hinted_handoff_throttle_in_kb: "java.lang.Integer"
+otc_backlog_expiration_interval_ms: "java.lang.Integer"
+counter_cache_save_period: "java.lang.Integer"
+keyspace_count_warn_threshold: "java.lang.Integer"
+hints_flush_period_in_ms: "java.lang.Integer"
+role_manager: "java.lang.String"
+block_for_peers_in_remote_dcs: "java.lang.Boolean"
+repair_command_pool_size: "java.lang.Integer"
+column_index_size_in_kb: "java.lang.Integer"
+memtable_offheap_space_in_mb: "java.lang.Integer"
+data_file_directories: "java.util.List"
+native_transport_max_frame_size_in_mb: "java.lang.Integer"
+index_summary_resize_interval_in_minutes: "java.lang.Integer"
+enable_user_defined_functions: "java.lang.Boolean"
+max_hint_window_in_ms: "java.lang.Integer"
+seed_provider:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+check_for_duplicate_rows_during_reads: "java.lang.Boolean"
+key_cache_migrate_during_compaction: "java.lang.Boolean"
+network_authorizer: "java.lang.String"
+batchlog_replay_throttle_in_kb: "java.lang.Integer"
+enable_scripted_user_defined_functions: "java.lang.Boolean"
+internode_application_send_queue_reserve_endpoint_capacity_in_bytes: "java.lang.Integer"
+commitlog_compression:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+broadcast_address: "java.lang.String"
+credentials_update_interval_in_ms: "java.lang.Integer"
+snapshot_before_compaction: "java.lang.Boolean"
+back_pressure_strategy:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+prepared_statements_cache_size_mb: "java.lang.Long"
+native_transport_port_ssl: "java.lang.Integer"
+allocate_tokens_for_keyspace: "java.lang.String"
+diagnostic_events_enabled: "java.lang.Boolean"
+storage_port: "java.lang.Integer"
+counter_cache_size_in_mb: "java.lang.Long"
+dynamic_snitch_reset_interval_in_ms: "java.lang.Integer"
+tracetype_query_ttl: "java.lang.Integer"
+autocompaction_on_startup_enabled: "java.lang.Boolean"
+commit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"
+concurrent_writes: "java.lang.Integer"
+range_request_timeout_in_ms: "java.lang.Long"
+dynamic_snitch_update_interval_in_ms: "java.lang.Integer"
+hinted_handoff_enabled: "java.lang.Boolean"
+internode_application_receive_queue_capacity_in_bytes: "java.lang.Integer"
+automatic_sstable_upgrade: "java.lang.Boolean"
+max_value_size_in_mb: "java.lang.Integer"
+memtable_flush_writers: "java.lang.Integer"
+otc_coalescing_strategy: "java.lang.String"
+snapshot_on_repaired_data_mismatch: "java.lang.Boolean"
+commitlog_max_compression_buffers_in_pool: "java.lang.Integer"
+internode_application_send_queue_capacity_in_bytes: "java.lang.Integer"
+roles_cache_max_entries: "java.lang.Integer"
+native_transport_max_negotiable_protocol_version: "java.lang.Integer"
+start_native_transport: "java.lang.Boolean"
+ssl_storage_port: "java.lang.Integer"
+cluster_name: "java.lang.String"
+incremental_backups: "java.lang.Boolean"
+key_cache_save_period: "java.lang.Integer"
+windows_timer_interval: "java.lang.Integer"
+rpc_interface: "java.lang.String"
+repair_session_space_in_mb: "java.lang.Integer"
+row_cache_keys_to_save: "java.lang.Integer"
+repair_command_pool_full_strategy: "org.apache.cassandra.config.Config.RepairCommandPoolFullStrategy"
+inter_dc_stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"
+client_encryption_options:
+  optional: "java.lang.Boolean"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  enabled: "java.lang.Boolean"
+  require_endpoint_verification: "java.lang.Boolean"
+  accepted_protocols: "java.util.List"
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  truststore: "java.lang.String"
+  algorithm: "java.lang.String"
+concurrent_validations: "java.lang.Integer"
+ideal_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"
+consecutive_message_errors_threshold: "java.lang.Integer"
+trickle_fsync: "java.lang.Boolean"
+reject_repair_compaction_threshold: "java.lang.Integer"
+max_streaming_retries: "java.lang.Integer"
+native_transport_flush_in_batches_legacy: "java.lang.Boolean"
+rpc_address: "java.lang.String"
+file_cache_enabled: "java.lang.Boolean"
+cdc_raw_directory: "java.lang.String"
+num_tokens: "java.lang.Integer"
+repaired_data_tracking_for_partition_reads_enabled: "java.lang.Boolean"
+enable_drop_compact_storage: "java.lang.Boolean"
+commitlog_directory: "java.lang.String"
+unlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"
+auto_bootstrap: "java.lang.Boolean"
+authorizer: "java.lang.String"
+sstable_preemptive_open_interval_in_mb: "java.lang.Integer"
+broadcast_rpc_address: "java.lang.String"
+listen_interface_prefer_ipv6: "java.lang.Boolean"
+repair_session_max_tree_depth: "java.lang.Integer"
+auto_optimise_preview_repair_streams: "java.lang.Boolean"
+concurrent_compactors: "java.lang.Integer"
+buffer_pool_use_heap_if_exhausted: "java.lang.Boolean"
+local_system_data_file_directory: "java.lang.String"
+stream_entire_sstables: "java.lang.Boolean"
+corrupted_tombstone_strategy: "org.apache.cassandra.config.Config.CorruptedTombstoneStrategy"
+listen_address: "java.lang.String"
+rpc_keepalive: "java.lang.Boolean"
+allow_extra_insecure_udfs: "java.lang.Boolean"
+request_timeout_in_ms: "java.lang.Long"
+disk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"
+concurrent_counter_writes: "java.lang.Integer"
+row_cache_size_in_mb: "java.lang.Long"
+cas_contention_timeout_in_ms: "java.lang.Long"
+min_free_space_per_drive_in_mb: "java.lang.Integer"
+cross_node_timeout: "java.lang.Boolean"
+dynamic_snitch: "java.lang.Boolean"
+permissions_validity_in_ms: "java.lang.Integer"
+phi_convict_threshold: "java.lang.Double"
+native_transport_max_threads: "java.lang.Integer"
+permissions_update_interval_in_ms: "java.lang.Integer"
+authenticator: "java.lang.String"
+allow_insecure_udfs: "java.lang.Boolean"
+cache_load_timeout_seconds: "java.lang.Integer"
+concurrent_replicates: "java.lang.Integer"
+commitlog_sync_period_in_ms: "java.lang.Integer"
+auto_optimise_full_repair_streams: "java.lang.Boolean"
+internode_max_message_size_in_bytes: "java.lang.Integer"
+native_transport_max_concurrent_requests_in_bytes: "java.lang.Long"
+compaction_large_partition_warning_threshold_mb: "java.lang.Integer"
+enable_user_defined_functions_threads: "java.lang.Boolean"
+native_transport_allow_older_protocols: "java.lang.Boolean"
+otc_coalescing_enough_coalesced_messages: "java.lang.Integer"
+slow_query_log_timeout_in_ms: "java.lang.Long"
+report_unconfirmed_repaired_data_mismatches: "java.lang.Boolean"
+use_offheap_merkle_trees: "java.lang.Boolean"
+concurrent_materialized_view_builders: "java.lang.Integer"
+server_encryption_options:
+  enable_legacy_ssl_storage_port: "java.lang.Boolean"
+  optional: "java.lang.Boolean"
+  store_type: "java.lang.String"
+  cipher_suites: "java.util.List"
+  enabled: "java.lang.Boolean"
+  require_endpoint_verification: "java.lang.Boolean"
+  accepted_protocols: "java.util.List"
+  keystore_password: "java.lang.String"
+  protocol: "java.lang.String"
+  require_client_auth: "java.lang.Boolean"
+  internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"
+  truststore_password: "java.lang.String"
+  keystore: "java.lang.String"
+  truststore: "java.lang.String"
+  algorithm: "java.lang.String"
+max_hints_delivery_threads: "java.lang.Integer"
+native_transport_idle_timeout_in_ms: "java.lang.Long"
+saved_caches_directory: "java.lang.String"
+max_concurrent_automatic_sstable_upgrades: "java.lang.Integer"
+file_cache_round_up: "java.lang.Boolean"
+streaming_keep_alive_period_in_secs: "java.lang.Integer"
+enable_sasi_indexes: "java.lang.Boolean"
+gc_log_threshold_in_ms: "java.lang.Integer"
+snapshot_on_duplicate_row_detection: "java.lang.Boolean"
+commitlog_sync_group_window_in_ms: "java.lang.Double"
+internode_compression: "org.apache.cassandra.config.Config.InternodeCompression"
+otc_coalescing_window_us: "java.lang.Integer"
+credentials_cache_max_entries: "java.lang.Integer"
+periodic_commitlog_sync_lag_block_in_ms: "java.lang.Integer"
+enable_materialized_views: "java.lang.Boolean"
+roles_validity_in_ms: "java.lang.Integer"
+networking_cache_size_in_mb: "java.lang.Integer"
+native_transport_port: "java.lang.Integer"
+permissions_cache_max_entries: "java.lang.Integer"
+stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"
+hints_compression:
+  class_name: "java.lang.String"
+  parameters: "java.util.Map"
+commitlog_periodic_queue_size: "java.lang.Integer"
+force_new_prepared_statement_behaviour: "java.lang.Boolean"
+back_pressure_enabled: "java.lang.Boolean"
+cdc_free_space_check_interval_ms: "java.lang.Integer"
+transparent_data_encryption_options:
+  cipher: "java.lang.String"
+  chunk_length_kb: "java.lang.Integer"
+  iv_length: "java.lang.Integer"
+  key_alias: "java.lang.String"
+  key_provider:
+    class_name: "java.lang.String"
+    parameters: "java.util.Map"
+  enabled: "java.lang.Boolean"
+initial_range_tombstone_list_allocation_size: "java.lang.Integer"
+partitioner: "java.lang.String"
+listen_interface: "java.lang.String"
+user_defined_function_fail_timeout: "java.lang.Long"
+file_cache_size_in_mb: "java.lang.Integer"
+inter_dc_tcp_nodelay: "java.lang.Boolean"
+internode_authenticator: "java.lang.String"
+key_cache_keys_to_save: "java.lang.Integer"
+commitlog_segment_size_in_mb: "java.lang.Integer"
+replica_filtering_protection:
+  cached_rows_fail_threshold: "java.lang.Integer"
+  cached_rows_warn_threshold: "java.lang.Integer"
+internode_tcp_connect_timeout_in_ms: "java.lang.Integer"
+native_transport_max_concurrent_requests_in_bytes_per_ip: "java.lang.Long"
+range_tombstone_list_growth_factor: "java.lang.Double"
+native_transport_max_concurrent_connections: "java.lang.Long"
+memtable_cleanup_threshold: "java.lang.Float"
+concurrent_reads: "java.lang.Integer"
+streaming_connections_per_host: "java.lang.Integer"
+truncate_request_timeout_in_ms: "java.lang.Long"
+auto_snapshot: "java.lang.Boolean"
+native_transport_receive_queue_capacity_in_bytes: "java.lang.Integer"
+internode_streaming_tcp_user_timeout_in_ms: "java.lang.Integer"
+batch_size_fail_threshold_in_kb: "java.lang.Integer"
+compaction_throughput_mb_per_sec: "java.lang.Integer"
+max_hints_file_size_in_mb: "java.lang.Integer"
+endpoint_snitch: "java.lang.String"
diff --git a/test/data/jmxdump/cassandra-4.1-jmx.yaml b/test/data/jmxdump/cassandra-4.1-jmx.yaml
new file mode 100644
index 0000000..14e2044
--- /dev/null
+++ b/test/data/jmxdump/cassandra-4.1-jmx.yaml
@@ -0,0 +1,104302 @@
+org.apache.cassandra.db:type=BatchlogManager:
+  attributes:
+  - {access: read-only, name: TotalBatchesReplayed, type: long}
+  operations:
+  - name: countAllBatches
+    parameters: []
+    returnType: int
+  - name: forceBatchlogReplay
+    parameters: []
+    returnType: void
+org.apache.cassandra.db:type=BlacklistedDirectories:
+  attributes:
+  - {access: read-only, name: UnreadableDirectories, type: java.util.Set}
+  - {access: read-only, name: UnwritableDirectories, type: java.util.Set}
+  operations:
+  - name: markUnreadable
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: markUnwritable
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+org.apache.cassandra.db:type=Caches:
+  attributes:
+  - {access: write-only, name: CounterCacheCapacityInMB, type: long}
+  - {access: read/write, name: CounterCacheKeysToSave, type: int}
+  - {access: read/write, name: CounterCacheSavePeriodInSeconds, type: int}
+  - {access: write-only, name: KeyCacheCapacityInMB, type: long}
+  - {access: read/write, name: KeyCacheKeysToSave, type: int}
+  - {access: read/write, name: KeyCacheSavePeriodInSeconds, type: int}
+  - {access: write-only, name: RowCacheCapacityInMB, type: long}
+  - {access: read/write, name: RowCacheKeysToSave, type: int}
+  - {access: read/write, name: RowCacheSavePeriodInSeconds, type: int}
+  operations:
+  - name: invalidateCounterCache
+    parameters: []
+    returnType: void
+  - name: invalidateKeyCache
+    parameters: []
+    returnType: void
+  - name: invalidateRowCache
+    parameters: []
+    returnType: void
+  - name: saveCaches
+    parameters: []
+    returnType: void
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=IndexInfo:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=available_ranges:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=available_ranges_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=batches:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=built_views:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=compaction_history:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=local:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=paxos:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=peer_events:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=peer_events_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=peers:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=peers_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=prepared_statements:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=repairs:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=size_estimates:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=sstable_activity_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=table_estimates:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=transferred_ranges:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=transferred_ranges_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system,columnfamily=view_builds_in_progress:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_auth,columnfamily=network_permissions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_auth,columnfamily=resource_role_permissons_index:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_auth,columnfamily=role_members:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_auth,columnfamily=role_permissions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_auth,columnfamily=roles:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_distributed,columnfamily=parent_repair_history:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_distributed,columnfamily=repair_history:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_distributed,columnfamily=view_build_status:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=aggregates:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=columns:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=dropped_columns:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=functions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=indexes:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=keyspaces:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=tables:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=triggers:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=types:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_schema,columnfamily=views:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_traces,columnfamily=events:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=ColumnFamilies,keyspace=system_traces,columnfamily=sessions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Commitlog:
+  attributes:
+  - {access: read-only, name: ActiveContentSize, type: long}
+  - {access: read-only, name: ActiveOnDiskSize, type: long}
+  - {access: read-only, name: ActiveSegmentCompressionRatios, type: java.util.Map}
+  - {access: read-only, name: ActiveSegmentNames, type: java.util.List}
+  - {access: read-only, name: ArchiveCommand, type: java.lang.String}
+  - {access: read-only, name: ArchivingSegmentNames, type: java.util.List}
+  - {access: read-only, name: RestoreCommand, type: java.lang.String}
+  - {access: read-only, name: RestoreDirectories, type: java.lang.String}
+  - {access: read-only, name: RestorePointInTime, type: long}
+  - {access: read-only, name: RestorePrecision, type: java.lang.String}
+  operations:
+  - name: recover
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+org.apache.cassandra.db:type=CompactionManager:
+  attributes:
+  - {access: read/write, name: AutomaticSSTableUpgradeEnabled, type: boolean}
+  - {access: read-only, name: CompactionHistory, type: javax.management.openmbean.TabularData}
+  - {access: read-only, name: CompactionSummary, type: java.util.List}
+  - {access: read-only, name: Compactions, type: java.util.List}
+  - {access: read/write, name: CoreCompactorThreads, type: int}
+  - {access: read/write, name: CoreValidationThreads, type: int}
+  - {access: read/write, name: CoreViewBuildThreads, type: int}
+  - {access: read/write, name: DisableSTCSInL0, type: boolean}
+  - {access: read/write, name: MaxConcurrentAutoUpgradeTasks, type: int}
+  - {access: read/write, name: MaximumCompactorThreads, type: int}
+  - {access: read/write, name: MaximumValidatorThreads, type: int}
+  - {access: read/write, name: MaximumViewBuildThreads, type: int}
+  operations:
+  - name: forceUserDefinedCleanup
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: forceUserDefinedCompaction
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: stopCompaction
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: stopCompactionById
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+org.apache.cassandra.db:type=DisallowedDirectories:
+  attributes:
+  - {access: read-only, name: UnreadableDirectories, type: java.util.Set}
+  - {access: read-only, name: UnwritableDirectories, type: java.util.Set}
+  operations:
+  - name: markUnreadable
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: markUnwritable
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+org.apache.cassandra.db:type=DynamicEndpointSnitch:
+  attributes:
+  - {access: read-only, name: BadnessThreshold, type: double}
+  - {access: read-only, name: ResetInterval, type: int}
+  - {access: read-only, name: Scores, type: java.util.Map}
+  - {access: read-only, name: ScoresWithPort, type: java.util.Map}
+  - {access: read/write, name: Severity, type: double}
+  - {access: read-only, name: SubsnitchClassName, type: java.lang.String}
+  - {access: read-only, name: UpdateInterval, type: int}
+  operations:
+  - name: dumpTimings
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+org.apache.cassandra.db:type=EndpointSnitchInfo:
+  attributes:
+  - {access: read-only, name: Datacenter, type: java.lang.String}
+  - {access: read-only, name: Rack, type: java.lang.String}
+  - {access: read-only, name: SnitchName, type: java.lang.String}
+  operations:
+  - name: getDatacenter
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.lang.String
+  - name: getRack
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.lang.String
+org.apache.cassandra.db:type=IndexSummaries:
+  attributes:
+  - {access: read-only, name: AverageIndexInterval, type: double}
+  - {access: read-only, name: IndexIntervals, type: java.util.Map}
+  - {access: read/write, name: MemoryPoolCapacityInMB, type: long}
+  - {access: read-only, name: MemoryPoolSizeInMB, type: double}
+  - {access: read/write, name: ResizeIntervalInMinutes, type: int}
+  operations:
+  - name: redistributeSummaries
+    parameters: []
+    returnType: void
+org.apache.cassandra.db:type=NativeAccess:
+  attributes:
+  - {access: read-only, name: Available, type: boolean}
+  - {access: read-only, name: MemoryLockable, type: boolean}
+  operations: []
+org.apache.cassandra.db:type=RepairService:
+  attributes:
+  - {access: read/write, name: RepairPendingCompactionRejectThreshold, type: int}
+  - {access: read/write, name: RepairSessionSpaceInMegabytes, type: int}
+  - {access: read/write, name: UseOffheapMerkleTrees, type: boolean}
+  operations:
+  - name: cleanupPending
+    parameters:
+    - {name: p1, type: java.util.List}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: boolean}
+    returnType: java.util.List
+  - name: failSession
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: void
+  - name: getPendingStats
+    parameters:
+    - {name: p1, type: java.util.List}
+    - {name: p2, type: java.lang.String}
+    returnType: java.util.List
+  - name: getRepairStats
+    parameters:
+    - {name: p1, type: java.util.List}
+    - {name: p2, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSessions
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: java.lang.String}
+    returnType: java.util.List
+org.apache.cassandra.db:type=StorageProxy:
+  attributes:
+  - {access: read/write, name: CasContentionTimeout, type: java.lang.Long}
+  - {access: read-only, name: CheckForDuplicateRowsDuringCompaction, type: boolean}
+  - {access: read-only, name: CheckForDuplicateRowsDuringReads, type: boolean}
+  - {access: read/write, name: CounterWriteRpcTimeout, type: java.lang.Long}
+  - {access: read-only, name: HintedHandoffDisabledDCs, type: java.util.Set}
+  - {access: read/write, name: HintedHandoffEnabled, type: boolean}
+  - {access: read-only, name: HintsInProgress, type: int}
+  - {access: read-only, name: IdealConsistencyLevel, type: java.lang.String}
+  - {access: read/write, name: MaxHintWindow, type: int}
+  - {access: read/write, name: MaxHintsInProgress, type: int}
+  - {access: read/write, name: NativeTransportMaxConcurrentConnections, type: java.lang.Long}
+  - {access: read-only, name: NumberOfTables, type: int}
+  - {access: read/write, name: OtcBacklogExpirationInterval, type: int}
+  - {access: read/write, name: RangeRpcTimeout, type: java.lang.Long}
+  - {access: read-only, name: ReadRepairAttempted, type: long}
+  - {access: read-only, name: ReadRepairRepairedBackground, type: long}
+  - {access: read-only, name: ReadRepairRepairedBlocking, type: long}
+  - {access: read/write, name: ReadRpcTimeout, type: java.lang.Long}
+  - {access: read-only, name: RepairedDataTrackingEnabledForPartitionReads, type: boolean}
+  - {access: read-only, name: RepairedDataTrackingEnabledForRangeReads, type: boolean}
+  - {access: read-only, name: ReportingUnconfirmedRepairedDataMismatchesEnabled, type: boolean}
+  - {access: read/write, name: RpcTimeout, type: java.lang.Long}
+  - {access: read-only, name: SchemaVersions, type: java.util.Map}
+  - {access: read-only, name: SchemaVersionsWithPort, type: java.util.Map}
+  - {access: read-only, name: SnapshotOnDuplicateRowDetectionEnabled, type: boolean}
+  - {access: read-only, name: SnapshotOnRepairedDataMismatchEnabled, type: boolean}
+  - {access: read-only, name: TotalHints, type: long}
+  - {access: read/write, name: TruncateRpcTimeout, type: java.lang.Long}
+  - {access: read/write, name: WriteRpcTimeout, type: java.lang.Long}
+  operations:
+  - name: disableCheckForDuplicateRowsDuringCompaction
+    parameters: []
+    returnType: void
+  - name: disableCheckForDuplicateRowsDuringReads
+    parameters: []
+    returnType: void
+  - name: disableHintsForDC
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: disableRepairedDataTrackingForPartitionReads
+    parameters: []
+    returnType: void
+  - name: disableRepairedDataTrackingForRangeReads
+    parameters: []
+    returnType: void
+  - name: disableReportingUnconfirmedRepairedDataMismatches
+    parameters: []
+    returnType: void
+  - name: disableSnapshotOnDuplicateRowDetection
+    parameters: []
+    returnType: void
+  - name: disableSnapshotOnRepairedDataMismatch
+    parameters: []
+    returnType: void
+  - name: enableCheckForDuplicateRowsDuringCompaction
+    parameters: []
+    returnType: void
+  - name: enableCheckForDuplicateRowsDuringReads
+    parameters: []
+    returnType: void
+  - name: enableHintsForDC
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: enableRepairedDataTrackingForPartitionReads
+    parameters: []
+    returnType: void
+  - name: enableRepairedDataTrackingForRangeReads
+    parameters: []
+    returnType: void
+  - name: enableReportingUnconfirmedRepairedDataMismatches
+    parameters: []
+    returnType: void
+  - name: enableSnapshotOnDuplicateRowDetection
+    parameters: []
+    returnType: void
+  - name: enableSnapshotOnRepairedDataMismatch
+    parameters: []
+    returnType: void
+  - name: reloadTriggerClasses
+    parameters: []
+    returnType: void
+  - name: setIdealConsistencyLevel
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.lang.String
+org.apache.cassandra.db:type=StorageService:
+  attributes:
+  - {access: read-only, name: AllDataFileLocations, type: 'java.lang.String[]'}
+  - {access: read-only, name: AuditLogEnabled, type: boolean}
+  - {access: write-only, name: AutoOptimiseFullRepairStreams, type: boolean}
+  - {access: write-only, name: AutoOptimiseIncRepairStreams, type: boolean}
+  - {access: write-only, name: AutoOptimisePreviewRepairStreams, type: boolean}
+  - {access: read/write, name: BatchSizeFailureThreshold, type: int}
+  - {access: read/write, name: BatchSizeWarnThreshold, type: int}
+  - {access: read/write, name: BatchlogReplayThrottleInKB, type: int}
+  - {access: read-only, name: BootstrapMode, type: boolean}
+  - {access: read/write, name: CachedReplicaRowsFailThreshold, type: int}
+  - {access: read/write, name: CachedReplicaRowsWarnThreshold, type: int}
+  - {access: read/write, name: CasContentionTimeout, type: long}
+  - {access: read-only, name: ClusterName, type: java.lang.String}
+  - {access: read/write, name: ColumnIndexCacheSize, type: int}
+  - {access: read-only, name: CommitLogLocation, type: java.lang.String}
+  - {access: read/write, name: CompactionThroughputMbPerSec, type: int}
+  - {access: read/write, name: ConcurrentCompactors, type: int}
+  - {access: read/write, name: ConcurrentValidators, type: int}
+  - {access: read-only, name: ConcurrentValidatorsLimitEnforced, type: boolean}
+  - {access: read/write, name: ConcurrentViewBuilders, type: int}
+  - {access: read/write, name: CorruptedTombstoneStrategy, type: java.lang.String}
+  - {access: read/write, name: CounterWriteRpcTimeout, type: long}
+  - {access: read-only, name: CurrentGenerationNumber, type: int}
+  - {access: read-only, name: DrainProgress, type: java.lang.String}
+  - {access: read-only, name: Drained, type: boolean}
+  - {access: read-only, name: Draining, type: boolean}
+  - {access: read/write, name: DynamicUpdateInterval, type: int}
+  - {access: read-only, name: EndpointToHostId, type: java.util.Map}
+  - {access: read-only, name: EndpointWithPortToHostId, type: java.util.Map}
+  - {access: read-only, name: FullQueryLogEnabled, type: boolean}
+  - {access: read-only, name: FullQueryLoggerOptions, type: javax.management.openmbean.CompositeData}
+  - {access: read-only, name: GossipRunning, type: boolean}
+  - {access: write-only, name: HintedHandoffThrottleInKB, type: int}
+  - {access: read-only, name: HostIdMap, type: java.util.Map}
+  - {access: read-only, name: HostIdToEndpoint, type: java.util.Map}
+  - {access: read-only, name: HostIdToEndpointWithPort, type: java.util.Map}
+  - {access: read/write, name: IncrementalBackupsEnabled, type: boolean}
+  - {access: read/write, name: InitialRangeTombstoneListAllocationSize, type: int}
+  - {access: read-only, name: Initialized, type: boolean}
+  - {access: read/write, name: InterDCStreamThroughputMbPerSec, type: int}
+  - {access: read/write, name: InternodeStreamingTcpUserTimeoutInMS, type: int}
+  - {access: read/write, name: InternodeTcpConnectTimeoutInMS, type: int}
+  - {access: read/write, name: InternodeTcpUserTimeoutInMS, type: int}
+  - {access: read-only, name: Joined, type: boolean}
+  - {access: read-only, name: JoiningNodes, type: java.util.List}
+  - {access: read-only, name: JoiningNodesWithPort, type: java.util.List}
+  - {access: read/write, name: KeyspaceCountWarnThreshold, type: int}
+  - {access: read-only, name: Keyspaces, type: java.util.List}
+  - {access: read-only, name: LeavingNodes, type: java.util.List}
+  - {access: read-only, name: LeavingNodesWithPort, type: java.util.List}
+  - {access: read-only, name: LiveNodes, type: java.util.List}
+  - {access: read-only, name: LiveNodesWithPort, type: java.util.List}
+  - {access: read-only, name: LoadMap, type: java.util.Map}
+  - {access: read-only, name: LoadMapWithPort, type: java.util.Map}
+  - {access: read-only, name: LoadString, type: java.lang.String}
+  - {access: read-only, name: LocalHostId, type: java.lang.String}
+  - {access: read-only, name: LoggingLevels, type: java.util.Map}
+  - {access: read/write, name: MigrateKeycacheOnCompaction, type: boolean}
+  - {access: read-only, name: MovingNodes, type: java.util.List}
+  - {access: read-only, name: MovingNodesWithPort, type: java.util.List}
+  - {access: read/write, name: NativeTransportMaxConcurrentRequestsInBytes, type: long}
+  - {access: read/write, name: NativeTransportMaxConcurrentRequestsInBytesPerIp, type: long}
+  - {access: read-only, name: NativeTransportRunning, type: boolean}
+  - {access: read-only, name: NonLocalStrategyKeyspaces, type: java.util.List}
+  - {access: read-only, name: NonSystemKeyspaces, type: java.util.List}
+  - {access: read-only, name: NotificationInfo, type: 'javax.management.MBeanNotificationInfo[]'}
+  - {access: read-only, name: OperationMode, type: java.lang.String}
+  - {access: read-only, name: OutstandingSchemaVersions, type: java.util.Map}
+  - {access: read-only, name: OutstandingSchemaVersionsWithPort, type: java.util.Map}
+  - {access: read-only, name: Ownership, type: java.util.Map}
+  - {access: read-only, name: OwnershipWithPort, type: java.util.Map}
+  - {access: read-only, name: PartitionerName, type: java.lang.String}
+  - {access: read/write, name: RangeRpcTimeout, type: long}
+  - {access: write-only, name: RangeTombstoneListResizeGrowthFactor, type: double}
+  - {access: read-only, name: RangeTombstoneResizeListGrowthFactor, type: double}
+  - {access: read/write, name: ReadRpcTimeout, type: long}
+  - {access: read-only, name: ReleaseVersion, type: java.lang.String}
+  - {access: read-only, name: RemovalStatus, type: java.lang.String}
+  - {access: read-only, name: RemovalStatusWithPort, type: java.lang.String}
+  - {access: read/write, name: RepairSessionMaxTreeDepth, type: int}
+  - {access: read/write, name: RpcTimeout, type: long}
+  - {access: read/write, name: SSTablePreemptiveOpenIntervalInMB, type: int}
+  - {access: read-only, name: SavedCachesLocation, type: java.lang.String}
+  - {access: read-only, name: SchemaVersion, type: java.lang.String}
+  - {access: read-only, name: SnapshotDetails, type: java.util.Map}
+  - {access: read/write, name: SnapshotLinksPerSecond, type: long}
+  - {access: read-only, name: Starting, type: boolean}
+  - {access: read/write, name: StreamThroughputMbPerSec, type: int}
+  - {access: read/write, name: TableCountWarnThreshold, type: int}
+  - {access: read-only, name: TokenToEndpointMap, type: java.util.Map}
+  - {access: read-only, name: TokenToEndpointWithPortMap, type: java.util.Map}
+  - {access: read-only, name: Tokens, type: java.util.List}
+  - {access: read/write, name: TombstoneFailureThreshold, type: int}
+  - {access: read/write, name: TombstoneWarnThreshold, type: int}
+  - {access: read/write, name: TraceProbability, type: double}
+  - {access: read/write, name: TruncateRpcTimeout, type: long}
+  - {access: read-only, name: UnreachableNodes, type: java.util.List}
+  - {access: read-only, name: UnreachableNodesWithPort, type: java.util.List}
+  - {access: read/write, name: WriteRpcTimeout, type: long}
+  operations:
+  - name: addNotificationListener
+    parameters:
+    - {name: p1, type: javax.management.NotificationListener}
+    - {name: p2, type: javax.management.NotificationFilter}
+    - {name: p3, type: java.lang.Object}
+    returnType: void
+  - name: autoOptimiseFullRepairStreams
+    parameters: []
+    returnType: boolean
+  - name: autoOptimiseIncRepairStreams
+    parameters: []
+    returnType: boolean
+  - name: autoOptimisePreviewRepairStreams
+    parameters: []
+    returnType: boolean
+  - name: bulkLoad
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: bulkLoadAsync
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.lang.String
+  - name: bypassConcurrentValidatorsLimit
+    parameters: []
+    returnType: void
+  - name: cleanupSizeEstimates
+    parameters: []
+    returnType: void
+  - name: clearConnectionHistory
+    parameters: []
+    returnType: void
+  - name: clearSnapshot
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: void
+  - name: decommission
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: deliverHints
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: describeRingJMX
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: describeRingWithPortJMX
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: disableAuditLog
+    parameters: []
+    returnType: void
+  - name: disableAutoCompaction
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: void
+  - name: disableNativeTransportOldProtocolVersions
+    parameters: []
+    returnType: void
+  - name: drain
+    parameters: []
+    returnType: void
+  - name: effectiveOwnership
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: effectiveOwnershipWithPort
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: enableAuditLog
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.String}
+    - {name: p4, type: java.lang.String}
+    - {name: p5, type: java.lang.String}
+    - {name: p6, type: java.lang.String}
+    - {name: p7, type: java.lang.String}
+    returnType: void
+  - name: enableAuditLog
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.util.Map}
+    - {name: p3, type: java.lang.String}
+    - {name: p4, type: java.lang.String}
+    - {name: p5, type: java.lang.String}
+    - {name: p6, type: java.lang.String}
+    - {name: p7, type: java.lang.String}
+    - {name: p8, type: java.lang.String}
+    returnType: void
+  - name: enableAutoCompaction
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: void
+  - name: enableFullQueryLogger
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.Boolean}
+    - {name: p4, type: int}
+    - {name: p5, type: long}
+    - {name: p6, type: java.lang.String}
+    - {name: p7, type: int}
+    returnType: void
+  - name: enableNativeTransportOldProtocolVersions
+    parameters: []
+    returnType: void
+  - name: enforceConcurrentValidatorsLimit
+    parameters: []
+    returnType: void
+  - name: forceKeyspaceCleanup
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: int
+  - name: forceKeyspaceCleanup
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: int
+  - name: forceKeyspaceCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: void
+  - name: forceKeyspaceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.String}
+    - {name: p4, type: 'java.lang.String[]'}
+    returnType: void
+  - name: forceKeyspaceFlush
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: void
+  - name: forceRemoveCompletion
+    parameters: []
+    returnType: void
+  - name: forceTerminateAllRepairSessions
+    parameters: []
+    returnType: void
+  - name: garbageCollect
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: java.lang.String}
+    - {name: p4, type: 'java.lang.String[]'}
+    returnType: int
+  - name: getAutoCompactionStatus
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: java.util.Map
+  - name: getConcurrency
+    parameters:
+    - {name: p1, type: java.util.List}
+    returnType: java.util.Map
+  - name: getKeyspaceReplicationInfo
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.lang.String
+  - name: getNaturalEndpoints
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.nio.ByteBuffer}
+    returnType: java.util.List
+  - name: getNaturalEndpoints
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.String}
+    returnType: java.util.List
+  - name: getNaturalEndpointsWithPort
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.nio.ByteBuffer}
+    returnType: java.util.List
+  - name: getNaturalEndpointsWithPort
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.String}
+    returnType: java.util.List
+  - name: getParentRepairStatus
+    parameters:
+    - {name: p1, type: int}
+    returnType: java.util.List
+  - name: getPendingRangeToEndpointMap
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getPendingRangeToEndpointWithPortMap
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getRangeToEndpointMap
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getRangeToEndpointWithPortMap
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getRangeToNativeaddressWithPortMap
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getRangeToRpcaddressMap
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getTokens
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getViewBuildStatuses
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    returnType: java.util.Map
+  - name: getViewBuildStatusesWithPort
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    returnType: java.util.Map
+  - name: joinRing
+    parameters: []
+    returnType: void
+  - name: loadNewSSTables
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    returnType: void
+  - name: move
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: rebuild
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: rebuild
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.String}
+    - {name: p4, type: java.lang.String}
+    returnType: void
+  - name: rebuildSecondaryIndex
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: void
+  - name: refreshSizeEstimates
+    parameters: []
+    returnType: void
+  - name: reloadLocalSchema
+    parameters: []
+    returnType: void
+  - name: relocateSSTables
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: int
+  - name: relocateSSTables
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: int
+  - name: removeNode
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: removeNotificationListener
+    parameters:
+    - {name: p1, type: javax.management.NotificationListener}
+    returnType: void
+  - name: removeNotificationListener
+    parameters:
+    - {name: p1, type: javax.management.NotificationListener}
+    - {name: p2, type: javax.management.NotificationFilter}
+    - {name: p3, type: java.lang.Object}
+    returnType: void
+  - name: repairAsync
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.util.Map}
+    returnType: int
+  - name: rescheduleFailedDeletions
+    parameters: []
+    returnType: void
+  - name: resetFullQueryLogger
+    parameters: []
+    returnType: void
+  - name: resetLocalSchema
+    parameters: []
+    returnType: void
+  - name: resumeBootstrap
+    parameters: []
+    returnType: boolean
+  - name: sampleKeyRange
+    parameters: []
+    returnType: java.util.List
+  - name: samplePartitions
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    - {name: p4, type: java.util.List}
+    returnType: java.util.Map
+  - name: scrub
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: boolean}
+    - {name: p3, type: java.lang.String}
+    - {name: p4, type: 'java.lang.String[]'}
+    returnType: int
+  - name: scrub
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: java.lang.String}
+    - {name: p5, type: 'java.lang.String[]'}
+    returnType: int
+  - name: scrub
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: int}
+    - {name: p5, type: java.lang.String}
+    - {name: p6, type: 'java.lang.String[]'}
+    returnType: int
+  - name: scrub
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: int}
+    - {name: p6, type: java.lang.String}
+    - {name: p7, type: 'java.lang.String[]'}
+    returnType: int
+  - name: setConcurrency
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: setLoggingLevel
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    returnType: void
+  - name: startGossiping
+    parameters: []
+    returnType: void
+  - name: startNativeTransport
+    parameters: []
+    returnType: void
+  - name: stopDaemon
+    parameters: []
+    returnType: void
+  - name: stopFullQueryLogger
+    parameters: []
+    returnType: void
+  - name: stopGossiping
+    parameters: []
+    returnType: void
+  - name: stopNativeTransport
+    parameters: []
+    returnType: void
+  - name: takeMultipleTableSnapshot
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: void
+  - name: takeSnapshot
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: 'java.lang.String[]'}
+    returnType: void
+  - name: takeSnapshot
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.util.Map}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: void
+  - name: takeTableSnapshot
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: java.lang.String}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+  - name: truncate
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.String}
+    returnType: void
+  - name: updateSnitch
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: java.lang.Boolean}
+    - {name: p3, type: java.lang.Integer}
+    - {name: p4, type: java.lang.Integer}
+    - {name: p5, type: java.lang.Double}
+    returnType: void
+  - name: upgradeSSTables
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: int
+  - name: upgradeSSTables
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    - {name: p3, type: int}
+    - {name: p4, type: 'java.lang.String[]'}
+    returnType: int
+  - name: verify
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: java.lang.String}
+    - {name: p3, type: 'java.lang.String[]'}
+    returnType: int
+  - name: verify
+    parameters:
+    - {name: p1, type: boolean}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: java.lang.String}
+    - {name: p8, type: 'java.lang.String[]'}
+    returnType: int
+org.apache.cassandra.db:type=Tables,keyspace=system,table=IndexInfo:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=available_ranges:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=available_ranges_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=batches:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=built_views:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=compaction_history:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=local:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=paxos:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=peer_events:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=peer_events_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=peers:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=peers_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=prepared_statements:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=repairs:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=size_estimates:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=sstable_activity_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=table_estimates:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=transferred_ranges:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=transferred_ranges_v2:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system,table=view_builds_in_progress:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_auth,table=network_permissions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_auth,table=resource_role_permissons_index:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_auth,table=role_members:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_auth,table=role_permissions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_auth,table=roles:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_distributed,table=parent_repair_history:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_distributed,table=repair_history:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_distributed,table=view_build_status:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=aggregates:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=columns:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=dropped_columns:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=functions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=indexes:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=keyspaces:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=tables:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=triggers:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=types:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_schema,table=views:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_traces,table=events:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.db:type=Tables,keyspace=system_traces,table=sessions:
+  attributes:
+  - {access: read-only, name: AutoCompactionDisabled, type: boolean}
+  - {access: read-only, name: BuiltIndexes, type: java.util.List}
+  - {access: read-only, name: ColumnFamilyName, type: java.lang.String}
+  - {access: read-only, name: CompactionDiskSpaceCheckEnabled, type: boolean}
+  - {access: read/write, name: CompactionParameters, type: java.util.Map}
+  - {access: read/write, name: CompactionParametersJson, type: java.lang.String}
+  - {access: read/write, name: CompressionParameters, type: java.util.Map}
+  - {access: read/write, name: CompressionParametersJson, type: java.lang.String}
+  - {access: write-only, name: CrcCheckChance, type: double}
+  - {access: read-only, name: DroppableTombstoneRatio, type: double}
+  - {access: read-only, name: LevelFanoutSize, type: int}
+  - {access: read/write, name: MaximumCompactionThreshold, type: int}
+  - {access: read/write, name: MinimumCompactionThreshold, type: int}
+  - {access: read/write, name: NeverPurgeTombstones, type: boolean}
+  - {access: read-only, name: SSTableCountPerLevel, type: 'int[]'}
+  - {access: read-only, name: TableName, type: java.lang.String}
+  - {access: read-only, name: UnleveledSSTables, type: int}
+  operations:
+  - name: beginLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    - {name: p3, type: int}
+    returnType: void
+  - name: compactionDiskSpaceCheck
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: estimateKeys
+    parameters: []
+    returnType: long
+  - name: finishLocalSampling
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: int}
+    returnType: java.util.List
+  - name: forceCompactionForTokenRange
+    parameters:
+    - {name: p1, type: java.util.Collection}
+    returnType: void
+  - name: forceMajorCompaction
+    parameters:
+    - {name: p1, type: boolean}
+    returnType: void
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.util.List
+  - name: getSSTablesForKey
+    parameters:
+    - {name: p1, type: java.lang.String}
+    - {name: p2, type: boolean}
+    returnType: java.util.List
+  - name: importNewSSTables
+    parameters:
+    - {name: p1, type: java.util.Set}
+    - {name: p2, type: boolean}
+    - {name: p3, type: boolean}
+    - {name: p4, type: boolean}
+    - {name: p5, type: boolean}
+    - {name: p6, type: boolean}
+    - {name: p7, type: boolean}
+    returnType: java.util.List
+  - name: loadNewSSTables
+    parameters: []
+    returnType: void
+  - name: setCompactionThresholds
+    parameters:
+    - {name: p1, type: int}
+    - {name: p2, type: int}
+    returnType: void
+  - name: trueSnapshotsSize
+    parameters: []
+    returnType: long
+org.apache.cassandra.hints:type=HintsService:
+  attributes: []
+  operations:
+  - name: deleteAllHints
+    parameters: []
+    returnType: void
+  - name: deleteAllHintsForEndpoint
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: pauseDispatch
+    parameters: []
+    returnType: void
+  - name: resumeDispatch
+    parameters: []
+    returnType: void
+org.apache.cassandra.internal:type=CacheCleanupExecutor:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=CompactionExecutor:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=GossipStage:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=HintsDispatcher:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=MemtableFlushWriter:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=MemtablePostFlush:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=MemtableReclaimMemory:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=MigrationStage:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=PendingRangeCalculator:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=PerDiskMemtableFlushWriter_0:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=Sampler:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=SecondaryIndexManagement:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=ValidationExecutor:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.internal:type=ViewBuildExecutor:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: CoreThreads, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  - {access: read/write, name: MaximumThreads, type: int}
+  operations: []
+org.apache.cassandra.metrics:type=BufferPool,name=Misses:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,name=Size:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=chunk-cache,name=Hits:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=chunk-cache,name=Misses:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=chunk-cache,name=OverflowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=chunk-cache,name=Size:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=chunk-cache,name=UsedSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=networking,name=Hits:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=networking,name=Misses:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=networking,name=OverflowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=networking,name=Size:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=BufferPool,scope=networking,name=UsedSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CQL,name=PreparedStatementsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CQL,name=PreparedStatementsEvicted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CQL,name=PreparedStatementsExecuted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CQL,name=PreparedStatementsRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CQL,name=RegularStatementsExecuted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=Capacity:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=Entries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=FifteenMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=FiveMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=HitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=Hits:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=Misses:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=OneMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=Requests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=CounterCache,name=Size:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=FifteenMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=FiveMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=HitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Misses:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=OneMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=FifteenMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=FiveMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=HitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Misses:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=OneMinuteHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=AuthFailure:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=AuthSuccess:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=ClientsByProtocolVersion:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=ConnectedNativeClients:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=ConnectedNativeClientsByUser:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=Connections:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=PausedConnections:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=RequestDiscarded:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=RequestsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=RequestsSizeByIpDistribution:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Client,name=clientsByProtocolVersion:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=connectedNativeClients:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=connectedNativeClientsByUser:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Client,name=connections:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=ConditionNotMet:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=ContentionHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=UnfinishedCommit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=UnknownResult:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=ConditionNotMet:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=ContentionHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=UnfinishedCommit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=CASWrite,name=UnknownResult:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=RangeSlice,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=RangeSlice,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=RangeSlice,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=RangeSlice,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=RangeSlice,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ALL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ALL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ALL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ALL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ALL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ANY,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ANY,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ANY,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ANY,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ANY,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-EACH_QUORUM,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-EACH_QUORUM,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-EACH_QUORUM,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-EACH_QUORUM,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-EACH_QUORUM,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_ONE,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_ONE,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_ONE,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_ONE,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_ONE,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_QUORUM,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_QUORUM,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_QUORUM,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_QUORUM,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_QUORUM,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_SERIAL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_SERIAL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_SERIAL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_SERIAL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-LOCAL_SERIAL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-NODE_LOCAL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-NODE_LOCAL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-NODE_LOCAL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-NODE_LOCAL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-NODE_LOCAL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ONE,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ONE,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ONE,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ONE,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ONE,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-QUORUM,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-QUORUM,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-QUORUM,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-QUORUM,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-QUORUM,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-SERIAL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-SERIAL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-SERIAL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-SERIAL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-SERIAL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-THREE,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-THREE,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-THREE,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-THREE,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-THREE,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-TWO,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-TWO,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-TWO,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-TWO,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Read-TWO,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=ViewPendingMutations:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=ViewReplicasAttempted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=ViewReplicasSuccess:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=ViewWrite,name=ViewWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ALL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ALL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ALL,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ALL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ALL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ALL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ANY,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ANY,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ANY,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ANY,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ANY,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ANY,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-EACH_QUORUM,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-EACH_QUORUM,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-EACH_QUORUM,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-EACH_QUORUM,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-EACH_QUORUM,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-EACH_QUORUM,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_ONE,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_ONE,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_ONE,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_ONE,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_ONE,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_ONE,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_QUORUM,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_QUORUM,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_QUORUM,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_QUORUM,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_QUORUM,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_QUORUM,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_SERIAL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_SERIAL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_SERIAL,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_SERIAL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_SERIAL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-LOCAL_SERIAL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-NODE_LOCAL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-NODE_LOCAL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-NODE_LOCAL,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-NODE_LOCAL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-NODE_LOCAL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-NODE_LOCAL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ONE,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ONE,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ONE,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ONE,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ONE,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-ONE,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-QUORUM,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-QUORUM,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-QUORUM,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-QUORUM,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-QUORUM,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-QUORUM,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-SERIAL,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-SERIAL,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-SERIAL,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-SERIAL,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-SERIAL,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-SERIAL,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-THREE,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-THREE,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-THREE,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-THREE,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-THREE,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-THREE,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-TWO,name=Failures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-TWO,name=Latency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-TWO,name=MutationSizeHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-TWO,name=Timeouts:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-TWO,name=TotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ClientRequest,scope=Write-TWO,name=Unavailables:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=IndexInfo,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=available_ranges_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=batches,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=built_views,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=compaction_history,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=local,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=paxos,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peer_events_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=peers_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=prepared_statements,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=repairs,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=size_estimates,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=sstable_activity_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=table_estimates,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=transferred_ranges_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=EstimatedRowSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system,scope=view_builds_in_progress,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=EstimatedRowSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=network_permissions,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AdditionalWrites
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=AnticompactionTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesAnticompacted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesPendingRepair
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesRepaired
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesUnrepaired
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesValidated
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=CompactionBytesWritten
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=CompressionRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=CoordinatorReadLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=CoordinatorScanLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=CoordinatorWriteLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=DroppedMutations
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=EstimatedRowCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=EstimatedRowSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=KeyCacheHitRate
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=LiveDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=LiveSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=LiveScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableColumnsCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOnHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableSwitchCount
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=OldVersionSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=PartitionsValidated
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=PendingCompactions
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=PendingFlushes
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=PercentRepaired
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ReadRepairRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairJobsCompleted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairJobsStarted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairSyncTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RowCacheHitOutOfRange
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=SnapshotsSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=TombstoneFailures
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=TombstoneWarnings
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=TotalDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=UnleveledSSTables
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ValidationTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ViewLockAcquireTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=resource_role_permissons_index,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_members,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=role_permissions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_auth,scope=roles,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AdditionalWrites
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=AnticompactionTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesAnticompacted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesPendingRepair
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesUnrepaired
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=CompactionBytesWritten
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=CompressionRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=CoordinatorReadLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=CoordinatorScanLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=CoordinatorWriteLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=DroppedMutations
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=EstimatedRowCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=EstimatedRowSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=KeyCacheHitRate
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=LiveDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=LiveSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=LiveScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableColumnsCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOnHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MemtableSwitchCount
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=OldVersionSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=PartitionsValidated
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=PendingCompactions
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=PercentRepaired
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ReadRepairRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairJobsCompleted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairJobsStarted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RowCacheHitOutOfRange
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=TombstoneFailures
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=TombstoneWarnings
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=TotalDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=UnleveledSSTables
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ViewLockAcquireTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=parent_repair_history,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AllMemtablesOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=CompactionBytesWritten
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=CoordinatorReadLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=CoordinatorScanLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=CoordinatorWriteLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=EstimatedRowSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=OldVersionSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=repair_history,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=CompactionBytesWritten
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=CoordinatorReadLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=CoordinatorScanLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=CoordinatorWriteLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=EstimatedRowSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=LiveScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableColumnsCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MemtableSwitchCount
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=OldVersionSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=PartitionsValidated
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairJobsCompleted
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RowCacheHitOutOfRange
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ViewLockAcquireTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_distributed,scope=view_build_status,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=aggregates,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=columns,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=dropped_columns,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=functions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=indexes,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=keyspaces,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=tables,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=triggers,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=types,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_schema,scope=views,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=events,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=EstimatedRowCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=EstimatedRowSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,keyspace=system_traces,scope=sessions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=MaxRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MeanRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MinRowSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ColumnFamily,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ColumnFamily,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CommitLog,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CommitLog,name=OverSizedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=CommitLog,name=WaitingOnCommit:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=CommitLog,name=WaitingOnSegmentAllocation:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=CompactionsAborted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=CompactionsReduced:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=PendingTasksByTableName:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=SSTablesDroppedFromCompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=CLEANUP_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=CLEANUP_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=CLEANUP_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=ECHO_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=ECHO_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=ECHO_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=ECHO_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=ECHO_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=ECHO_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FAILED_SESSION_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FAILED_SESSION_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FAILED_SESSION_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FAILURE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FAILURE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FAILURE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_COMMIT_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_COMMIT_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_COMMIT_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_PROMISE_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_PROMISE_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_PROMISE_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_PROPOSE_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_PROPOSE_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=FINALIZE_PROPOSE_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_ACK,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_ACK,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_ACK,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_ACK2,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_ACK2,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_ACK2,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_SYN,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_SYN,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_DIGEST_SYN,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_SHUTDOWN,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_SHUTDOWN,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=GOSSIP_SHUTDOWN,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=INTERNAL_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=INTERNAL_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=INTERNAL_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_COMMIT_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_COMMIT_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_COMMIT_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_COMMIT_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_COMMIT_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_COMMIT_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PREPARE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PREPARE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PREPARE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PREPARE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PREPARE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PREPARE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PROPOSE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PROPOSE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PROPOSE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PROPOSE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PROPOSE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PAXOS_PROPOSE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PING_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PING_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PING_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PING_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PING_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PING_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_CONSISTENT_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_CONSISTENT_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_CONSISTENT_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_CONSISTENT_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_CONSISTENT_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_CONSISTENT_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=PREPARE_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPAIR_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPAIR_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPAIR_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPLICATION_DONE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPLICATION_DONE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPLICATION_DONE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPLICATION_DONE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPLICATION_DONE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REPLICATION_DONE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PULL_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PULL_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PULL_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PULL_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PULL_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PULL_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PUSH_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PUSH_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PUSH_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PUSH_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PUSH_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_PUSH_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_VERSION_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_VERSION_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_VERSION_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_VERSION_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_VERSION_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SCHEMA_VERSION_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_MSG,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_MSG,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_MSG,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SNAPSHOT_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=STATUS_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=STATUS_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=STATUS_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=STATUS_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=STATUS_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=STATUS_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SYNC_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SYNC_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SYNC_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SYNC_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SYNC_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=SYNC_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=TRUNCATE_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=TRUNCATE_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=TRUNCATE_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=TRUNCATE_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=TRUNCATE_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=TRUNCATE_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=UNUSED_CUSTOM_VERB,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=UNUSED_CUSTOM_VERB,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=UNUSED_CUSTOM_VERB,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=VALIDATION_REQ,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=VALIDATION_REQ,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=VALIDATION_REQ,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=VALIDATION_RSP,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=VALIDATION_RSP,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=VALIDATION_RSP,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_SAMPLE,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_SAMPLE,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_SAMPLE,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TEST_1,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TEST_1,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TEST_1,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TEST_2,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TEST_2,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TEST_2,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TRACE,name=CrossNodeDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TRACE,name=Dropped:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=DroppedMessage,scope=_TRACE,name=InternalDroppedLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Index,scope=RowIndexEntry,name=IndexInfoCount:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Index,scope=RowIndexEntry,name=IndexInfoGets:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Index,scope=RowIndexEntry,name=IndexInfoReads:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Index,scope=RowIndexEntry,name=IndexedEntrySize:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=AntiCompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=IdealCLWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=IdealCLWriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairPrepareTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=WriteFailedIdealCL:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=AntiCompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=IdealCLWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=IdealCLWriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairPrepareTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=WriteFailedIdealCL:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_auth,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=AntiCompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=IdealCLWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=IdealCLWriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairPrepareTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=WriteFailedIdealCL:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_distributed,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=AntiCompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=IdealCLWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=IdealCLWriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairPrepareTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=WriteFailedIdealCL:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_schema,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=AntiCompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=IdealCLWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=IdealCLWriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairPrepareTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=WriteFailedIdealCL:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Keyspace,keyspace=system_traces,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=MemtablePool,name=BlockedOnAllocation:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=MemtablePool,name=PendingFlushTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Messaging,name=BATCH_REMOVE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=BATCH_REMOVE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=BATCH_STORE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=BATCH_STORE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=CLEANUP_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=COUNTER_MUTATION_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=COUNTER_MUTATION_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=CrossNodeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=ECHO_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=ECHO_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=FAILED_SESSION_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=FAILURE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=FINALIZE_COMMIT_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=FINALIZE_PROMISE_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=FINALIZE_PROPOSE_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=GOSSIP_DIGEST_ACK-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=GOSSIP_DIGEST_ACK2-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=GOSSIP_DIGEST_SYN-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=GOSSIP_SHUTDOWN-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=HINT_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=HINT_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=INTERNAL_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=MUTATION_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=MUTATION_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PAXOS_COMMIT_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PAXOS_COMMIT_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PAXOS_PREPARE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PAXOS_PREPARE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PAXOS_PROPOSE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PAXOS_PROPOSE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PING_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PING_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PREPARE_CONSISTENT_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PREPARE_CONSISTENT_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=PREPARE_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=RANGE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=RANGE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=READ_REPAIR_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=READ_REPAIR_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=READ_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=READ_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=REPAIR_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=REPLICATION_DONE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=REPLICATION_DONE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=REQUEST_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SCHEMA_PULL_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SCHEMA_PULL_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SCHEMA_PUSH_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SCHEMA_PUSH_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SCHEMA_VERSION_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SCHEMA_VERSION_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SNAPSHOT_MSG-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SNAPSHOT_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SNAPSHOT_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=STATUS_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=STATUS_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SYNC_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=SYNC_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=TRUNCATE_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=TRUNCATE_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=UNUSED_CUSTOM_VERB-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=VALIDATION_REQ-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=VALIDATION_RSP-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=_SAMPLE-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=_TEST_1-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=_TEST_2-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Messaging,name=_TRACE-WaitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=ReadRepair,name=Attempted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ReadRepair,name=ReconcileRead:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ReadRepair,name=RepairedBackground:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ReadRepair,name=RepairedBlocking:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ReadRepair,name=SpeculatedRead:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ReadRepair,name=SpeculatedWrite:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Repair,name=PreviewFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Storage,name=Exceptions:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Storage,name=Load:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Storage,name=RepairExceptions:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Storage,name=TotalHints:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Storage,name=TotalHintsInProgress:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=IndexInfo,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=available_ranges_v2,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=batches,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=built_views,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=compaction_history,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=local,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=paxos,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peer_events_v2,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=peers_v2,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=prepared_statements,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=repairs,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=size_estimates,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=sstable_activity_v2,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=table_estimates,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=transferred_ranges_v2,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system,scope=view_builds_in_progress,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=EstimatedPartitionSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=network_permissions,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CasCommitTotalLatency
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CasPrepareTotalLatency
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CasProposeTotalLatency
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CompactionBytesWritten
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CoordinatorReadLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CoordinatorScanLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=CoordinatorWriteLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=EstimatedPartitionCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=EstimatedPartitionSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=LiveScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableColumnsCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=OldVersionSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RowCacheHitOutOfRange
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=resource_role_permissons_index,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_members,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=role_permissions,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ReplicaFilteringProtectionRowsCachedPerQuery:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_auth,scope=roles,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesLiveDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesOffHeapSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterDiskSpaceUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CasPrepareTotalLatency
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CasProposeTotalLatency
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CompactionBytesWritten
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CoordinatorReadLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CoordinatorScanLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=CoordinatorWriteLatency
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=EstimatedPartitionCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=EstimatedPartitionSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=OldVersionSSTableCount
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=SSTablesPerReadHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeFailedRetries
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=TombstoneScannedHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=parent_repair_history,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=EstimatedPartitionSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=repair_history,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AdditionalWriteLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesOffHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AllMemtablesOnHeapDataSize
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BloomFilterOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesMutatedAnticompaction
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ColUpdateTimeDeltaHistogram
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=EstimatedColumnCountHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=EstimatedPartitionSizeHistogram
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=IndexSummaryOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=MutatedAnticompactionGauge
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RecentBloomFilterFalsePositives
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RecentBloomFilterFalseRatio
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairedDataTrackingOverreadRows
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RepairedDataTrackingOverreadTime
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ShortReadProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=SpeculativeInsufficientReplicas
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=SpeculativeSampleLatencyNanos
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=WaitingOnFreeMemtableSpace
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_distributed,scope=view_build_status,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=aggregates,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=columns,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CompressionMetadataOffHeapMemoryUsed
+: attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairedDataInconsistenciesConfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairedDataInconsistenciesUnconfirmed
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ReplicaFilteringProtectionRequests
+: attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=dropped_columns,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=functions,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=indexes,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=keyspaces,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=tables,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=triggers,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=types,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_schema,scope=views,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=events,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CasCommitLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CasCommitTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CasPrepareLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CasPrepareTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CasProposeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CasProposeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CoordinatorReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CoordinatorScanLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=CoordinatorWriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=EstimatedColumnCountHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=EstimatedPartitionCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=EstimatedPartitionSizeHistogram:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=KeyCacheHitRate:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ReadRepairRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ReplicaFilteringProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+? org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ReplicaFilteringProtectionRowsCachedPerQuery
+: attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ShortReadProtectionRequests:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=WaitingOnFreeMemtableSpace:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,keyspace=system_traces,scope=sessions,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AdditionalWriteLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AdditionalWrites:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AllMemtablesHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AllMemtablesLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AllMemtablesOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AllMemtablesOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AllMemtablesOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=AnticompactionTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=BloomFilterDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BloomFilterOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesAnticompacted:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesFlushed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesMutatedAnticompaction:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesPendingRepair:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesUnrepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=BytesValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=ColUpdateTimeDeltaHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=CompactionBytesWritten:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=CompressionMetadataOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=CompressionRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=DroppedMutations:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=IndexSummaryOffHeapMemoryUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=LiveDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=LiveSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=LiveScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=MaxPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MeanPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableColumnsCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableLiveDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableOffHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableOffHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableOnHeapDataSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableOnHeapSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MemtableSwitchCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MinPartitionSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=MutatedAnticompactionGauge:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=OldVersionSSTableCount:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=PartitionsValidated:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=PendingCompactions:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=PendingFlushes:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=PercentRepaired:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RangeLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=RangeTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=ReadLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=ReadTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RecentBloomFilterFalsePositives:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RecentBloomFilterFalseRatio:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RepairJobsCompleted:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RepairJobsStarted:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RepairSyncTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=RepairedDataInconsistenciesConfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RepairedDataInconsistenciesUnconfirmed:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RepairedDataTrackingOverreadRows:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=RepairedDataTrackingOverreadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=RowCacheHit:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RowCacheHitOutOfRange:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=RowCacheMiss:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=SSTablesPerReadHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=SnapshotsSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=SpeculativeFailedRetries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=SpeculativeInsufficientReplicas:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=SpeculativeRetries:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=SpeculativeSampleLatencyNanos:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=TombstoneFailures:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=TombstoneScannedHistogram:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: Max, type: long}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: Min, type: long}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=TombstoneWarnings:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=TotalDiskSpaceUsed:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=UnleveledSSTables:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=Table,name=ValidationTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=ViewLockAcquireTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=ViewReadTime:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=WriteLatency:
+  attributes:
+  - {access: read-only, name: 50thPercentile, type: double}
+  - {access: read-only, name: 75thPercentile, type: double}
+  - {access: read-only, name: 95thPercentile, type: double}
+  - {access: read-only, name: 98thPercentile, type: double}
+  - {access: read-only, name: 999thPercentile, type: double}
+  - {access: read-only, name: 99thPercentile, type: double}
+  - {access: read-only, name: Count, type: long}
+  - {access: read-only, name: DurationUnit, type: java.lang.String}
+  - {access: read-only, name: FifteenMinuteRate, type: double}
+  - {access: read-only, name: FiveMinuteRate, type: double}
+  - {access: read-only, name: Max, type: double}
+  - {access: read-only, name: Mean, type: double}
+  - {access: read-only, name: MeanRate, type: double}
+  - {access: read-only, name: Min, type: double}
+  - {access: read-only, name: OneMinuteRate, type: double}
+  - {access: read-only, name: RateUnit, type: java.lang.String}
+  - {access: read-only, name: RecentValues, type: 'long[]'}
+  - {access: read-only, name: StdDev, type: double}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+  - name: values
+    parameters: []
+    returnType: long[]
+org.apache.cassandra.metrics:type=Table,name=WriteTotalLatency:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CacheCleanupExecutor,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=HintsDispatcher,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableReclaimMemory,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PendingRangeCalculator,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=PerDiskMemtableFlushWriter_0,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=Sampler,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=SecondaryIndexManagement,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ViewBuildExecutor,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=ActiveTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=CompletedTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=CurrentlyBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=MaxPoolSize:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=MaxTasksQueued:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=PendingTasks:
+  attributes:
+  - {access: read-only, name: Value, type: java.lang.Object}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.metrics:type=ThreadPools,path=transport,scope=Native-Transport-Requests,name=TotalBlockedTasks:
+  attributes:
+  - {access: read-only, name: Count, type: long}
+  operations:
+  - name: objectName
+    parameters: []
+    returnType: javax.management.ObjectName
+org.apache.cassandra.net:type=FailureDetector:
+  attributes:
+  - {access: read-only, name: AllEndpointStates, type: java.lang.String}
+  - {access: read-only, name: AllEndpointStatesWithPort, type: java.lang.String}
+  - {access: read-only, name: DownEndpointCount, type: int}
+  - {access: read/write, name: PhiConvictThreshold, type: double}
+  - {access: read-only, name: PhiValues, type: javax.management.openmbean.TabularData}
+  - {access: read-only, name: PhiValuesWithPort, type: javax.management.openmbean.TabularData}
+  - {access: read-only, name: SimpleStates, type: java.util.Map}
+  - {access: read-only, name: SimpleStatesWithPort, type: java.util.Map}
+  - {access: read-only, name: UpEndpointCount, type: int}
+  operations:
+  - name: dumpInterArrivalTimes
+    parameters: []
+    returnType: void
+  - name: getEndpointState
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: java.lang.String
+org.apache.cassandra.net:type=Gossiper:
+  attributes:
+  - {access: read-only, name: ReleaseVersionsWithPort, type: java.util.Map}
+  - {access: read-only, name: Seeds, type: java.util.List}
+  operations:
+  - name: assassinateEndpoint
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+  - name: getCurrentGenerationNumber
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: int
+  - name: getEndpointDowntime
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: long
+  - name: reloadSeeds
+    parameters: []
+    returnType: java.util.List
+  - name: unsafeAssassinateEndpoint
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: void
+org.apache.cassandra.net:type=MessagingService:
+  attributes:
+  - {access: read/write, name: BackPressureEnabled, type: boolean}
+  - {access: read-only, name: BackPressurePerHost, type: java.util.Map}
+  - {access: read-only, name: DroppedMessages, type: java.util.Map}
+  - {access: read-only, name: GossipMessageCompletedTasks, type: java.util.Map}
+  - {access: read-only, name: GossipMessageCompletedTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: GossipMessageDroppedTasks, type: java.util.Map}
+  - {access: read-only, name: GossipMessageDroppedTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: GossipMessagePendingTasks, type: java.util.Map}
+  - {access: read-only, name: GossipMessagePendingTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: LargeMessageCompletedTasks, type: java.util.Map}
+  - {access: read-only, name: LargeMessageCompletedTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: LargeMessageDroppedTasks, type: java.util.Map}
+  - {access: read-only, name: LargeMessageDroppedTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: LargeMessagePendingTasks, type: java.util.Map}
+  - {access: read-only, name: LargeMessagePendingTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: SmallMessageCompletedTasks, type: java.util.Map}
+  - {access: read-only, name: SmallMessageCompletedTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: SmallMessageDroppedTasks, type: java.util.Map}
+  - {access: read-only, name: SmallMessageDroppedTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: SmallMessagePendingTasks, type: java.util.Map}
+  - {access: read-only, name: SmallMessagePendingTasksWithPort, type: java.util.Map}
+  - {access: read-only, name: TimeoutsPerHost, type: java.util.Map}
+  - {access: read-only, name: TimeoutsPerHostWithPort, type: java.util.Map}
+  - {access: read-only, name: TotalTimeouts, type: long}
+  operations:
+  - name: getVersion
+    parameters:
+    - {name: p1, type: java.lang.String}
+    returnType: int
+  - name: reloadSslCertificates
+    parameters: []
+    returnType: void
+org.apache.cassandra.net:type=StreamManager:
+  attributes:
+  - {access: read-only, name: CurrentStreams, type: java.util.Set}
+  - {access: read-only, name: NotificationInfo, type: 'javax.management.MBeanNotificationInfo[]'}
+  operations:
+  - name: addNotificationListener
+    parameters:
+    - {name: p1, type: javax.management.NotificationListener}
+    - {name: p2, type: javax.management.NotificationFilter}
+    - {name: p3, type: java.lang.Object}
+    returnType: void
+  - name: removeNotificationListener
+    parameters:
+    - {name: p1, type: javax.management.NotificationListener}
+    returnType: void
+  - name: removeNotificationListener
+    parameters:
+    - {name: p1, type: javax.management.NotificationListener}
+    - {name: p2, type: javax.management.NotificationFilter}
+    - {name: p3, type: java.lang.Object}
+    returnType: void
+org.apache.cassandra.request:type=MutationStage:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  operations: []
+org.apache.cassandra.request:type=ReadStage:
+  attributes:
+  - {access: read/write, name: CorePoolSize, type: int}
+  - {access: read/write, name: MaximumPoolSize, type: int}
+  operations: []
+org.apache.cassandra.service:type=GCInspector:
+  attributes:
+  - {access: read-only, name: AndResetStats, type: 'double[]'}
+  - {access: read/write, name: GcLogThresholdInMs, type: long}
+  - {access: read/write, name: GcWarnThresholdInMs, type: long}
+  - {access: read-only, name: StatusThresholdInMs, type: long}
+  operations: []
diff --git a/test/data/serialization/4.1/gms.EndpointState.bin b/test/data/serialization/4.1/gms.EndpointState.bin
new file mode 100644
index 0000000..083dbb7
--- /dev/null
+++ b/test/data/serialization/4.1/gms.EndpointState.bin
Binary files differ
diff --git a/test/data/serialization/4.1/gms.Gossip.bin b/test/data/serialization/4.1/gms.Gossip.bin
new file mode 100644
index 0000000..7a4fb56
--- /dev/null
+++ b/test/data/serialization/4.1/gms.Gossip.bin
Binary files differ
diff --git a/test/data/serialization/4.1/service.SyncComplete.bin b/test/data/serialization/4.1/service.SyncComplete.bin
new file mode 100644
index 0000000..39a6243
--- /dev/null
+++ b/test/data/serialization/4.1/service.SyncComplete.bin
Binary files differ
diff --git a/test/data/serialization/4.1/service.SyncRequest.bin b/test/data/serialization/4.1/service.SyncRequest.bin
new file mode 100644
index 0000000..f853b20
--- /dev/null
+++ b/test/data/serialization/4.1/service.SyncRequest.bin
Binary files differ
diff --git a/test/data/serialization/4.1/service.ValidationComplete.bin b/test/data/serialization/4.1/service.ValidationComplete.bin
new file mode 100644
index 0000000..bb4de43
--- /dev/null
+++ b/test/data/serialization/4.1/service.ValidationComplete.bin
Binary files differ
diff --git a/test/data/serialization/4.1/service.ValidationRequest.bin b/test/data/serialization/4.1/service.ValidationRequest.bin
new file mode 100644
index 0000000..04c492a
--- /dev/null
+++ b/test/data/serialization/4.1/service.ValidationRequest.bin
Binary files differ
diff --git a/test/data/serialization/4.1/utils.EstimatedHistogram.bin b/test/data/serialization/4.1/utils.EstimatedHistogram.bin
new file mode 100644
index 0000000..e878eda
--- /dev/null
+++ b/test/data/serialization/4.1/utils.EstimatedHistogram.bin
Binary files differ
diff --git a/test/distributed/org/apache/cassandra/distributed/Cluster.java b/test/distributed/org/apache/cassandra/distributed/Cluster.java
index 5c5a954..b7b9207 100644
--- a/test/distributed/org/apache/cassandra/distributed/Cluster.java
+++ b/test/distributed/org/apache/cassandra/distributed/Cluster.java
@@ -32,15 +32,14 @@
  */
 public class Cluster extends AbstractCluster<IInvokableInstance>
 {
-
     private Cluster(Builder builder)
     {
         super(builder);
     }
 
-    protected IInvokableInstance newInstanceWrapper(int generation, Versions.Version version, IInstanceConfig config)
+    protected IInvokableInstance newInstanceWrapper(Versions.Version version, IInstanceConfig config)
     {
-        return new Wrapper(generation, version, config);
+        return new Wrapper(version, config);
     }
 
     public static Builder build()
diff --git a/test/distributed/org/apache/cassandra/distributed/UpgradeableCluster.java b/test/distributed/org/apache/cassandra/distributed/UpgradeableCluster.java
index 7a4d2bb..b7bd1d6 100644
--- a/test/distributed/org/apache/cassandra/distributed/UpgradeableCluster.java
+++ b/test/distributed/org/apache/cassandra/distributed/UpgradeableCluster.java
@@ -40,10 +40,10 @@
         super(builder);
     }
 
-    protected IUpgradeableInstance newInstanceWrapper(int generation, Versions.Version version, IInstanceConfig config)
+    protected IUpgradeableInstance newInstanceWrapper(Versions.Version version, IInstanceConfig config)
     {
         config.set(Constants.KEY_DTEST_API_CONFIG_CHECK, false);
-        return new Wrapper(generation, version, config);
+        return new Wrapper(version, config);
     }
 
     public static Builder build()
diff --git a/test/distributed/org/apache/cassandra/distributed/action/GossipHelper.java b/test/distributed/org/apache/cassandra/distributed/action/GossipHelper.java
index 4f0d343..5b23b86 100644
--- a/test/distributed/org/apache/cassandra/distributed/action/GossipHelper.java
+++ b/test/distributed/org/apache/cassandra/distributed/action/GossipHelper.java
@@ -29,7 +29,8 @@
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
@@ -45,15 +46,21 @@
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.schema.MigrationCoordinator;
+import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.toCassandraInetAddressAndPort;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.junit.Assert.assertTrue;
 
 public class GossipHelper
 {
+    public static InstanceAction statusToBlank(IInvokableInstance newNode)
+    {
+        return (instance) -> changeGossipState(instance, newNode,Collections.emptyList());
+    }
+
     public static InstanceAction statusToBootstrap(IInvokableInstance newNode)
     {
         return (instance) ->
@@ -234,8 +241,8 @@
             pullTo.acceptsOnInstance((InetSocketAddress pullFrom) -> {
                 InetAddressAndPort endpoint = toCassandraInetAddressAndPort(pullFrom);
                 EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
-                MigrationCoordinator.instance.reportEndpointVersion(endpoint, state);
-                assertTrue("schema is ready", MigrationCoordinator.instance.awaitSchemaRequests(TimeUnit.SECONDS.toMillis(10)));
+                Gossiper.instance.doOnChangeNotifications(endpoint, ApplicationState.SCHEMA, state.getApplicationState(ApplicationState.SCHEMA));
+                assertTrue("schema is ready", Schema.instance.waitUntilReady(Duration.ofSeconds(10)));
             }).accept(pullFrom);
         }
     }
@@ -265,7 +272,7 @@
                 List<Token> tokens = Collections.singletonList(partitioner.getTokenFactory().fromString(tokenString));
                 try
                 {
-                    Collection<InetAddressAndPort> collisions = StorageService.instance.prepareForBootstrap(waitForSchema.toMillis());
+                    Collection<InetAddressAndPort> collisions = StorageService.instance.prepareForBootstrap(waitForSchema.toMillis(), 0);
                     assert collisions.size() == 0 : String.format("Didn't expect any replacements but got %s", collisions);
                     boolean isBootstrapSuccessful = StorageService.instance.bootstrap(tokens, waitForBootstrap.toMillis());
                     assert isBootstrapSuccessful : "Bootstrap did not complete successfully";
@@ -342,14 +349,14 @@
     public static VersionedApplicationState statusLeft(IInvokableInstance instance)
     {
         return versionedToken(instance, ApplicationState.STATUS, (partitioner, tokens) -> {
-            return new VersionedValue.VersionedValueFactory(partitioner).left(tokens, System.currentTimeMillis() + Gossiper.aVeryLongTime);
+            return new VersionedValue.VersionedValueFactory(partitioner).left(tokens, currentTimeMillis() + Gossiper.aVeryLongTime);
         });
     }
 
     public static VersionedApplicationState statusWithPortLeft(IInvokableInstance instance)
     {
         return versionedToken(instance, ApplicationState.STATUS_WITH_PORT, (partitioner, tokens) -> {
-            return new VersionedValue.VersionedValueFactory(partitioner).left(tokens, System.currentTimeMillis() + Gossiper.aVeryLongTime);
+            return new VersionedValue.VersionedValueFactory(partitioner).left(tokens, currentTimeMillis() + Gossiper.aVeryLongTime);
 
         });
     }
@@ -376,9 +383,11 @@
     {
         return instance.appliesOnInstance((String partitionerString, String tokenString) -> {
             IPartitioner partitioner = FBUtilities.newPartitioner(partitionerString);
-            Token token = partitioner.getTokenFactory().fromString(tokenString);
+            Collection<Token> tokens = tokenString.contains(",")
+                                       ? Stream.of(tokenString.split(",")).map(partitioner.getTokenFactory()::fromString).collect(Collectors.toList())
+                                       : Collections.singleton(partitioner.getTokenFactory().fromString(tokenString));
 
-            VersionedValue versionedValue = supplier.apply(partitioner, Collections.singleton(token));
+            VersionedValue versionedValue = supplier.apply(partitioner, tokens);
             return new VersionedApplicationState(applicationState.ordinal(), versionedValue.value, versionedValue.version);
         }).apply(partitionerStr, initialTokenStr);
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/api/ConsistencyLevel.java b/test/distributed/org/apache/cassandra/distributed/api/ConsistencyLevel.java
new file mode 100644
index 0000000..c28de53
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/ConsistencyLevel.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.api;
+
+public enum ConsistencyLevel
+{
+    ANY(0),
+    ONE(1),
+    TWO(2),
+    THREE(3),
+    QUORUM(4),
+    ALL(5),
+    LOCAL_QUORUM(6),
+    EACH_QUORUM(7),
+    SERIAL(8),
+    LOCAL_SERIAL(9),
+    LOCAL_ONE(10),
+    NODE_LOCAL(11);
+
+    public final int code;
+    ConsistencyLevel(int code)
+    {
+        this.code = code;
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/IClassTransformer.java b/test/distributed/org/apache/cassandra/distributed/api/IClassTransformer.java
new file mode 100644
index 0000000..dd26822
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/IClassTransformer.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.api;
+
+public interface IClassTransformer {
+
+    /**
+     * Modify the bytecode of the provided class. Provides the original bytecode and the fully qualified name of the class.
+     * Note, bytecode may be null indicating the class definition could not be found. In this case a synthetic definition
+     * may be returned, or null.
+     */
+    byte[] transform(String name, byte[] bytecode);
+
+    default IClassTransformer initialise() { return this; }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/ICoordinator.java b/test/distributed/org/apache/cassandra/distributed/api/ICoordinator.java
new file mode 100644
index 0000000..6415a30
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/ICoordinator.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.api;
+
+import java.util.Iterator;
+import java.util.UUID;
+import java.util.concurrent.Future;
+
+import org.apache.cassandra.distributed.shared.FutureUtils;
+
+// The cross-version API requires that a Coordinator can be constructed without any constructor arguments
+public interface ICoordinator
+{
+    default Object[][] execute(String query, ConsistencyLevel consistencyLevel, Object... boundValues)
+    {
+        return executeWithResult(query, consistencyLevel, boundValues).toObjectArrays();
+    }
+
+    default Object[][] execute(String query, ConsistencyLevel serialConsistencyLevel, ConsistencyLevel commitConsistencyLevel, Object... boundValues)
+    {
+        return executeWithResult(query, serialConsistencyLevel, commitConsistencyLevel, boundValues).toObjectArrays();
+    }
+
+    SimpleQueryResult executeWithResult(String query, ConsistencyLevel consistencyLevel, Object... boundValues);
+
+    default SimpleQueryResult executeWithResult(String query, ConsistencyLevel serialConsistencyLevel, ConsistencyLevel commitConsistencyLevel, Object... boundValues)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    default Iterator<Object[]> executeWithPaging(String query, ConsistencyLevel consistencyLevel, int pageSize, Object... boundValues)
+    {
+        return executeWithPagingWithResult(query, consistencyLevel, pageSize, boundValues).map(Row::toObjectArray);
+    }
+
+    QueryResult executeWithPagingWithResult(String query, ConsistencyLevel consistencyLevel, int pageSize, Object... boundValues);
+
+    default Future<Object[][]> asyncExecuteWithTracing(UUID sessionId, String query, ConsistencyLevel consistencyLevel, Object... boundValues)
+    {
+        return FutureUtils.map(asyncExecuteWithTracingWithResult(sessionId, query, consistencyLevel, boundValues), r -> r.toObjectArrays());
+    }
+
+    Future<SimpleQueryResult> asyncExecuteWithTracingWithResult(UUID sessionId, String query, ConsistencyLevel consistencyLevel, Object... boundValues);
+
+    default Object[][] executeWithTracing(UUID sessionId, String query, ConsistencyLevel consistencyLevel, Object... boundValues)
+    {
+        return executeWithTracingWithResult(sessionId, query, consistencyLevel, boundValues).toObjectArrays();
+    }
+
+    default SimpleQueryResult executeWithTracingWithResult(UUID sessionId, String query, ConsistencyLevel consistencyLevel, Object... boundValues)
+    {
+        return FutureUtils.waitOn(asyncExecuteWithTracingWithResult(sessionId, query, consistencyLevel, boundValues));
+    }
+
+    IInstance instance();
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/IMessage.java b/test/distributed/org/apache/cassandra/distributed/api/IMessage.java
new file mode 100644
index 0000000..cf7e920
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/IMessage.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.api;
+
+import java.io.Serializable;
+import java.net.InetSocketAddress;
+
+/**
+ * A cross-version interface for delivering internode messages via message sinks.
+ * <p>
+ * Message implementations should be serializable so we could load into instances.
+ */
+public interface IMessage extends Serializable
+{
+    int verb();
+
+    byte[] bytes();
+
+    // TODO: need to make this a long
+    int id();
+
+    int version();
+
+    InetSocketAddress from();
+
+    default long expiresAtNanos() { return -1L; }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/QueryResult.java b/test/distributed/org/apache/cassandra/distributed/api/QueryResult.java
new file mode 100644
index 0000000..90cc83f
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/QueryResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.api;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+/**
+ * A table of data representing a complete query result.
+ * <p>
+ * A <code>QueryResult</code> is different from {@link java.sql.ResultSet} in several key ways:
+ *
+ * <ul>
+ *     <li>represents a complete result rather than a cursor</li>
+ *     <li>returns a {@link Row} to access the current row of data</li>
+ *     <li>relies on object pooling; {@link #hasNext()} may return the same object just with different data, accessing a
+ *     {@link Row} from a previous {@link #hasNext()} call has undefined behavior.</li>
+ *     <li>includes {@link #filter(Predicate)}, this will do client side filtering since Apache Cassandra is more
+ *     restrictive on server side filtering</li>
+ * </ul>
+ *
+ * <h2>Unsafe patterns</h2>
+ * <p>
+ * Below are a few unsafe patterns which may lead to unexpected results
+ *
+ * <code>{@code
+ * while (rs.hasNext()) {
+ *   list.add(rs.next());
+ * }
+ * }</code>
+ *
+ * <code>{@code
+ * rs.forEach(list::add)
+ * }</code>
+ * <p>
+ * Both cases have the same issue; reference to a row from a previous call to {@link #hasNext()}.  Since the same {@link Row}
+ * object can be used across different calls to {@link #hasNext()} this would mean any attempt to access after the fact
+ * points to newer data.  If this behavior is not desirable and access is needed between calls, then {@link Row#copy()}
+ * should be used; this will clone the {@link Row} and return a new object pointing to the same data.
+ */
+public interface QueryResult extends Iterator<Row>
+{
+    List<String> names();
+    
+    List<String> warnings();
+
+    default QueryResult filter(Predicate<Row> fn)
+    {
+        return QueryResults.filter(this, fn);
+    }
+
+    default <A> Iterator<A> map(Function<? super Row, ? extends A> fn)
+    {
+        return new Iterator<A>()
+        {
+            @Override
+            public boolean hasNext()
+            {
+                return QueryResult.this.hasNext();
+            }
+
+            @Override
+            public A next()
+            {
+                return fn.apply(QueryResult.this.next());
+            }
+        };
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/QueryResults.java b/test/distributed/org/apache/cassandra/distributed/api/QueryResults.java
new file mode 100644
index 0000000..081d06a
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/QueryResults.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.api;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.function.Predicate;
+
+public final class QueryResults
+{
+    private static final SimpleQueryResult EMPTY = new SimpleQueryResult(new String[0], null);
+
+    private QueryResults() {}
+
+    public static SimpleQueryResult empty()
+    {
+        return EMPTY;
+    }
+
+    public static QueryResult fromIterator(String[] names, Iterator<Row> iterator)
+    {
+        Objects.requireNonNull(names, "names");
+        Objects.requireNonNull(iterator, "iterator");
+        return new IteratorQueryResult(names, iterator);
+    }
+
+    public static QueryResult fromObjectArrayIterator(String[] names, Iterator<Object[]> iterator)
+    {
+        Row row = new Row(names);
+        return fromIterator(names, new Iterator<Row>()
+        {
+            @Override
+            public boolean hasNext()
+            {
+                return iterator.hasNext();
+            }
+
+            @Override
+            public Row next()
+            {
+                row.setResults(iterator.next());
+                return row;
+            }
+        });
+    }
+
+    public static QueryResult filter(QueryResult result, Predicate<Row> fn)
+    {
+        return new FilterQueryResult(result, fn);
+    }
+
+    public static Builder builder()
+    {
+        return new Builder();
+    }
+
+    public static final class Builder
+    {
+        private static final int UNSET = -1;
+
+        private int numColumns = UNSET;
+        private String[] names;
+        private final List<Object[]> results = new ArrayList<>();
+        private final List<String> warnings = new ArrayList<>();
+
+        public Builder columns(String... columns)
+        {
+            if (columns != null)
+            {
+                if (numColumns == UNSET)
+                    numColumns = columns.length;
+
+                if (numColumns != columns.length)
+                    throw new AssertionError("Attempted to add column names with different column count; " +
+                                             "expected " + numColumns + " columns but given " + Arrays.toString(columns));
+            }
+
+            names = columns;
+            return this;
+        }
+
+        public Builder row(Object... values)
+        {
+            if (numColumns == UNSET)
+                numColumns = values.length;
+
+            if (numColumns != values.length)
+                throw new AssertionError("Attempted to add row with different column count; " +
+                                         "expected " + numColumns + " columns but given " + Arrays.toString(values));
+            results.add(values);
+            return this;
+        }
+
+        public Builder warning(String message)
+        {
+            warnings.add(message);
+            return this;
+        }
+
+        public SimpleQueryResult build()
+        {
+            if (names == null)
+            {
+                if (numColumns == UNSET)
+                    return QueryResults.empty();
+                names = new String[numColumns];
+                for (int i = 0; i < numColumns; i++)
+                    names[i] = "unknown";
+            }
+            
+            return new SimpleQueryResult(names, results.toArray(new Object[0][]), warnings);
+        }
+    }
+
+    private static final class IteratorQueryResult implements QueryResult
+    {
+        private final List<String> names;
+        private final Iterator<Row> iterator;
+
+        private IteratorQueryResult(String[] names, Iterator<Row> iterator)
+        {
+            this(Collections.unmodifiableList(Arrays.asList(names)), iterator);
+        }
+
+        private IteratorQueryResult(List<String> names, Iterator<Row> iterator)
+        {
+            this.names = names;
+            this.iterator = iterator;
+        }
+
+        @Override
+        public List<String> names()
+        {
+            return names;
+        }
+
+        @Override
+        public List<String> warnings()
+        {
+            throw new UnsupportedOperationException("Warnings are not yet supported for " + getClass().getSimpleName());
+        }
+
+        @Override
+        public boolean hasNext()
+        {
+            return iterator.hasNext();
+        }
+
+        @Override
+        public Row next()
+        {
+            return iterator.next();
+        }
+    }
+
+    private static final class FilterQueryResult implements QueryResult
+    {
+        private final QueryResult delegate;
+        private final Predicate<Row> filter;
+        private Row current;
+
+        private FilterQueryResult(QueryResult delegate, Predicate<Row> filter)
+        {
+            this.delegate = delegate;
+            this.filter = filter;
+        }
+
+        @Override
+        public List<String> names()
+        {
+            return delegate.names();
+        }
+        
+        @Override 
+        public List<String> warnings()
+        {
+            return delegate.warnings();
+        }
+
+        @Override
+        public boolean hasNext()
+        {
+            while (delegate.hasNext())
+            {
+                Row row = delegate.next();
+                if (filter.test(row))
+                {
+                    current = row;
+                    return true;
+                }
+            }
+            current = null;
+            return false;
+        }
+
+        @Override
+        public Row next()
+        {
+            if (current == null)
+                throw new NoSuchElementException();
+            return current;
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/Row.java b/test/distributed/org/apache/cassandra/distributed/api/Row.java
new file mode 100644
index 0000000..33272ed
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/Row.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.api;
+
+import java.util.Arrays;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.Set;
+import java.util.UUID;
+
+import org.apache.cassandra.utils.TimeUUID;
+
+/**
+ * Data representing a single row in a query result.
+ * <p>
+ * This class is mutable from the parent {@link SimpleQueryResult} and can have the row it points to changed between calls
+ * to {@link SimpleQueryResult#hasNext()}, for this reason it is unsafe to hold reference to this class after that call;
+ * to get around this, a call to {@link #copy()} will return a new object pointing to the same row.
+ */
+public class Row
+{
+    private static final int NOT_FOUND = -1;
+
+    private final String[] names;
+    private final Map<String, Integer> nameIndex;
+    private Object[] results; // mutable to avoid allocations in loops
+
+    public Row(String[] names)
+    {
+        Objects.requireNonNull(names, "names");
+        this.names = names;
+        this.nameIndex = new HashMap<>(names.length);
+        for (int i = 0; i < names.length; i++)
+        {
+            // if duplicate names, always index by the first one seen
+            nameIndex.putIfAbsent(names[i], i);
+        }
+    }
+
+    private Row(String[] names, Map<String, Integer> nameIndex)
+    {
+        this.names = names;
+        this.nameIndex = nameIndex;
+    }
+
+    void setResults(Object[] results)
+    {
+        this.results = results;
+    }
+
+    /**
+     * Creates a copy of the current row; can be used past calls to {@link SimpleQueryResult#hasNext()}.
+     */
+    public Row copy()
+    {
+        Row copy = new Row(names, nameIndex);
+        copy.setResults(results);
+        return copy;
+    }
+
+    public <T> T get(int index)
+    {
+        checkAccess();
+        if (index < 0 || index >= results.length)
+            throw new NoSuchElementException("by index: " + index);
+        return (T) results[index];
+    }
+
+    public <T> T get(String name)
+    {
+        checkAccess();
+        int idx = findIndex(name);
+        if (idx == NOT_FOUND)
+            throw new NoSuchElementException("by name: " + name);
+        return (T) results[idx];
+    }
+
+    public Short getShort(int index)
+    {
+        return get(index);
+    }
+
+    public Short getShort(String name)
+    {
+        return get(name);
+    }
+
+    public Integer getInteger(int index)
+    {
+        return get(index);
+    }
+
+    public Integer getInteger(String name)
+    {
+        return get(name);
+    }
+
+    public Long getLong(int index)
+    {
+        return get(index);
+    }
+
+    public Long getLong(String name)
+    {
+        return get(name);
+    }
+
+    public Float getFloat(int index)
+    {
+        return get(index);
+    }
+
+    public Float getFloat(String name)
+    {
+        return get(name);
+    }
+
+    public Double getDouble(int index)
+    {
+        return get(index);
+    }
+
+    public Double getDouble(String name)
+    {
+        return get(name);
+    }
+
+    public String getString(int index)
+    {
+        return get(index);
+    }
+
+    public String getString(String name)
+    {
+        return get(name);
+    }
+
+    public UUID getUUID(int index)
+    {
+        Object uuid = get(index);
+        if (uuid instanceof TimeUUID)
+            return ((TimeUUID) uuid).asUUID();
+        return (UUID) uuid;
+    }
+
+    public UUID getUUID(String name)
+    {
+        Object uuid = get(name);
+        if (uuid instanceof TimeUUID)
+            return ((TimeUUID) uuid).asUUID();
+        return (UUID) uuid;
+    }
+
+    public Date getTimestamp(int index)
+    {
+        return get(index);
+    }
+
+    public Date getTimestamp(String name)
+    {
+        return get(name);
+    }
+
+    public <T> Set<T> getSet(int index)
+    {
+        return get(index);
+    }
+
+    public <T> Set<T> getSet(String name)
+    {
+        return get(name);
+    }
+
+    /**
+     * Get the row as a array.
+     */
+    public Object[] toObjectArray()
+    {
+        return results;
+    }
+
+    public String toString()
+    {
+        return "Row{" +
+               "names=" + Arrays.toString(names) +
+               ", results=" + (results == null ? "[]" : Arrays.toString(results)) +
+               '}';
+    }
+
+    private void checkAccess()
+    {
+        if (results == null)
+            throw new NoSuchElementException();
+    }
+
+    private int findIndex(String name)
+    {
+        return nameIndex.getOrDefault(name, NOT_FOUND);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/api/SimpleQueryResult.java b/test/distributed/org/apache/cassandra/distributed/api/SimpleQueryResult.java
new file mode 100644
index 0000000..2b71e8b
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/api/SimpleQueryResult.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.api;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * A table of data representing a complete query result.
+ * <p>
+ * A <code>QueryResult</code> is different from {@link java.sql.ResultSet} in several key ways:
+ *
+ * <ul>
+ *     <li>represents a complete result rather than a cursor</li>
+ *     <li>returns a {@link Row} to access the current row of data</li>
+ *     <li>relies on object pooling; {@link #hasNext()} may return the same object just with different data, accessing a
+ *     {@link Row} from a previous {@link #hasNext()} call has undefined behavior.</li>
+ *     <li>includes {@link #filter(Predicate)}, this will do client side filtering since Apache Cassandra is more
+ *     restrictive on server side filtering</li>
+ * </ul>
+ *
+ * <h2>Unsafe patterns</h2>
+ * <p>
+ * Below are a few unsafe patterns which may lead to unexpected results
+ *
+ * <code>{@code
+ * while (rs.hasNext()) {
+ *   list.add(rs.next());
+ * }
+ * }</code>
+ *
+ * <code>{@code
+ * rs.forEach(list::add)
+ * }</code>
+ * <p>
+ * Both cases have the same issue; reference to a row from a previous call to {@link #hasNext()}.  Since the same {@link Row}
+ * object can be used accross different calls to {@link #hasNext()} this would mean any attempt to access after the fact
+ * points to newer data.  If this behavior is not desirable and access is needed between calls, then {@link Row#copy()}
+ * should be used; this will clone the {@link Row} and return a new object pointing to the same data.
+ */
+public class SimpleQueryResult implements QueryResult
+{
+    private final String[] names;
+    private final Object[][] results;
+    private final List<String> warnings;
+    private final Predicate<Row> filter;
+    private final Row row;
+    private int offset = -1;
+
+    public SimpleQueryResult(String[] names, Object[][] results)
+    {
+        this(names, results, Collections.emptyList());
+    }
+
+    public SimpleQueryResult(String[] names, Object[][] results, List<String> warnings)
+    {
+        this.names = Objects.requireNonNull(names, "names");
+        this.results = results;
+        this.warnings = Objects.requireNonNull(warnings, "warnings");
+        this.row = new Row(names);
+        this.filter = ignore -> true;
+    }
+
+    private SimpleQueryResult(String[] names, Object[][] results, Predicate<Row> filter, int offset)
+    {
+        this.names = names;
+        this.results = results;
+        this.warnings = Collections.emptyList();
+        this.filter = filter;
+        this.offset = offset;
+        this.row = new Row(names);
+    }
+
+    public List<String> names()
+    {
+        return Collections.unmodifiableList(Arrays.asList(names));
+    }
+
+    @Override
+    public List<String> warnings()
+    {
+        return Collections.unmodifiableList(warnings);
+    }
+
+    public SimpleQueryResult filter(Predicate<Row> fn)
+    {
+        return new SimpleQueryResult(names, results, filter.and(fn), offset);
+    }
+
+    /**
+     * Reset the cursor to the start of the query result; if the query result has not been iterated, this has no effect.
+     */
+    public void reset()
+    {
+        offset = -1;
+        row.setResults(null);
+    }
+
+    /**
+     * Get all rows as a 2d array.  Any calls to {@link #filter(Predicate)} will be ignored and the array returned will
+     * be the full set from the query.
+     */
+    public Object[][] toObjectArrays()
+    {
+        return results;
+    }
+
+    @Override
+    public boolean hasNext()
+    {
+        if (results == null)
+            return false;
+        while ((offset += 1) < results.length)
+        {
+            row.setResults(results[offset]);
+            if (filter.test(row))
+            {
+                return true;
+            }
+        }
+        row.setResults(null);
+        return false;
+    }
+
+    @Override
+    public Row next()
+    {
+        // no null check needed for results since offset only increments IFF results is not null
+        if (offset < 0 || offset >= results.length)
+            throw new NoSuchElementException();
+        return row;
+    }
+
+    @Override
+    public String toString() {
+        if (results == null)
+            return "[]";
+        return Stream.of(results)
+                     .map(Arrays::toString)
+                     .collect(Collectors.joining(",", "[", "]"));
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/FixedSchemaProviderConfiguration.java b/test/distributed/org/apache/cassandra/distributed/fuzz/FixedSchemaProviderConfiguration.java
new file mode 100644
index 0000000..2e03903
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/FixedSchemaProviderConfiguration.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import harry.core.Configuration;
+import harry.ddl.SchemaSpec;
+import harry.model.sut.SystemUnderTest;
+
+@JsonTypeName("fixed")
+public class FixedSchemaProviderConfiguration implements Configuration.SchemaProviderConfiguration
+{
+    private final SchemaSpec schemaSpec;
+
+    @JsonCreator
+    public FixedSchemaProviderConfiguration(SchemaSpec schemaSpec)
+    {
+        this.schemaSpec = schemaSpec;
+    }
+
+    @Override
+    public SchemaSpec make(long l, SystemUnderTest systemUnderTest)
+    {
+        return this.schemaSpec;
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/FuzzTestBase.java b/test/distributed/org/apache/cassandra/distributed/fuzz/FuzzTestBase.java
new file mode 100644
index 0000000..13e4f8c
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/FuzzTestBase.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import java.util.Collection;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import harry.core.Configuration;
+import harry.core.Run;
+import harry.ddl.SchemaSpec;
+import harry.model.clock.OffsetClock;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+
+public abstract class FuzzTestBase extends TestBaseImpl
+{
+    protected static Configuration configuration;
+    public static final int RF = 2;
+    static
+    {
+        try
+        {
+            HarryHelper.init();
+            configuration = HarryHelper.defaultConfiguration().build();
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    protected static Cluster cluster;
+
+    @BeforeClass
+    public static void beforeClassOverride() throws Throwable
+    {
+        cluster = Cluster.build(2)
+                         .start();
+
+        init(cluster, RF);
+    }
+
+    @AfterClass
+    public static void afterClass()
+    {
+        if (cluster != null)
+            cluster.close();
+    }
+
+    public void reset()
+    {
+        cluster.schemaChange("DROP KEYSPACE IF EXISTS " + KEYSPACE);
+        init(cluster, RF);
+    }
+
+    /**
+     * Helped method to generate a {@code number} of sstables for the given {@code schemaSpec}.
+     */
+    @SuppressWarnings("unused")
+    public static void generateTables(SchemaSpec schemaSpec, int number)
+    {
+        Run run = configuration.unbuild()
+                               .setSeed(1)
+                               .setSchemaProvider(new FixedSchemaProviderConfiguration(schemaSpec))
+                               .setClock(() -> new OffsetClock(10000L))
+                               .setDropSchema(false)
+                               .setCreateSchema(false)
+                               .build()
+                               .createRun();
+
+        ColumnFamilyStore store = Keyspace.open(schemaSpec.keyspace).getColumnFamilyStore(schemaSpec.table);
+        store.disableAutoCompaction();
+
+        SSTableGenerator gen = new SSTableGenerator(run, store);
+        SSTableGenerator largePartitionGen = new SSTableWithLargePartition(run, store);
+        for (int i = 0; i < number; i++)
+        {
+            if (i % 3 == 0)
+                largePartitionGen.gen(1_000);
+            else
+                gen.gen(1_000);
+        }
+    }
+
+    /**
+     * Helper class to force generation of a fixed partition size.
+     */
+    private static class SSTableWithLargePartition extends SSTableGenerator
+    {
+        public SSTableWithLargePartition(Run run, ColumnFamilyStore store)
+        {
+            super(run, store);
+        }
+
+        @Override
+        public Collection<SSTableReader> gen(int rows)
+        {
+            long lts = 0;
+            for (int i = 0; i < rows; i++)
+            {
+                long current = lts++;
+                write(current, current, current, current, true).applyUnsafe();
+                if (schema.staticColumns != null)
+                    writeStatic(current, 0, current, current, true).applyUnsafe();
+            }
+            Util.flush(store);
+            return null;
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/HarryHelper.java b/test/distributed/org/apache/cassandra/distributed/fuzz/HarryHelper.java
new file mode 100644
index 0000000..2997fa6
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/HarryHelper.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import java.io.File;
+import java.util.concurrent.TimeUnit;
+
+import harry.core.Configuration;
+import harry.model.OpSelectors;
+import harry.model.clock.OffsetClock;
+import harry.model.sut.PrintlnSut;
+
+public class HarryHelper
+{
+    public static void init()
+    {
+        System.setProperty("log4j2.disableJmx", "true"); // setting both ways as changes between versions
+        System.setProperty("log4j2.disable.jmx", "true");
+        System.setProperty("log4j.shutdownHookEnabled", "false");
+        System.setProperty("cassandra.allow_simplestrategy", "true"); // makes easier to share OSS tests without RF limits
+        System.setProperty("cassandra.minimum_replication_factor", "0"); // makes easier to share OSS tests without RF limits
+
+        System.setProperty("cassandra.disable_tcactive_openssl", "true");
+        System.setProperty("relocated.shaded.io.netty.transport.noNative", "true");
+        System.setProperty("org.apache.cassandra.disable_mbean_registration", "true");
+
+        InJvmSut.init();
+        QueryingNoOpChecker.init();
+        Configuration.registerSubtypes(PrintlnSut.PrintlnSutConfiguration.class);
+        Configuration.registerSubtypes(Configuration.NoOpMetricReporterConfiguration.class);
+        Configuration.registerSubtypes(Configuration.RecentPartitionsValidatorConfiguration.class);
+    }
+    
+    public static Configuration configuration(String... args) throws Exception
+    {
+        File configFile = harry.runner.HarryRunner.loadConfig(args);
+        Configuration configuration = Configuration.fromFile(configFile);
+        System.out.println("Using configuration generated from: " + configFile);
+        return configuration;
+    }
+
+    public static Configuration.ConfigurationBuilder defaultConfiguration() throws Exception
+    {
+        return new Configuration.ConfigurationBuilder()
+               .setClock(() -> new OffsetClock(100000))
+               .setCreateSchema(true)
+               .setTruncateTable(false)
+               .setDropSchema(false)
+               .setSchemaProvider(new Configuration.DefaultSchemaProviderConfiguration())
+               .setClock(new Configuration.ApproximateMonotonicClockConfiguration(7300, 1, TimeUnit.SECONDS))
+               .setClusteringDescriptorSelector(defaultClusteringDescriptorSelectorConfiguration().build())
+               .setPartitionDescriptorSelector(new Configuration.DefaultPDSelectorConfiguration(100, 10))
+               .setSUT(new PrintlnSut.PrintlnSutConfiguration())
+               .setDataTracker(new Configuration.DefaultDataTrackerConfiguration())
+               .setRunner((run, configuration) -> {
+                   throw new IllegalArgumentException("Runner is not configured by default.");
+               })
+               .setMetricReporter(new Configuration.NoOpMetricReporterConfiguration());
+    }
+
+    public static Configuration.CDSelectorConfigurationBuilder defaultClusteringDescriptorSelectorConfiguration()
+    {
+        return new Configuration.CDSelectorConfigurationBuilder()
+               .setNumberOfModificationsDistribution(new Configuration.ConstantDistributionConfig(2))
+               .setRowsPerModificationDistribution(new Configuration.ConstantDistributionConfig(2))
+               .setMaxPartitionSize(100)
+               .setOperationKindWeights(new Configuration.OperationKindSelectorBuilder()
+                                        .addWeight(OpSelectors.OperationKind.DELETE_ROW, 1)
+                                        .addWeight(OpSelectors.OperationKind.DELETE_COLUMN, 1)
+                                        .addWeight(OpSelectors.OperationKind.DELETE_RANGE, 1)
+                                        .addWeight(OpSelectors.OperationKind.DELETE_SLICE, 1)
+                                        .addWeight(OpSelectors.OperationKind.DELETE_PARTITION, 1)
+                                        .addWeight(OpSelectors.OperationKind.DELETE_COLUMN_WITH_STATICS, 1)
+                                        .addWeight(OpSelectors.OperationKind.INSERT_WITH_STATICS, 20)
+                                        .addWeight(OpSelectors.OperationKind.INSERT, 20)
+                                        .addWeight(OpSelectors.OperationKind.UPDATE_WITH_STATICS, 20)
+                                        .addWeight(OpSelectors.OperationKind.UPDATE, 20)
+                                        .build());
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSut.java b/test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSut.java
new file mode 100644
index 0000000..d62b2c6
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSut.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.function.Consumer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import harry.core.Configuration;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+
+public class InJvmSut extends InJvmSutBase<IInvokableInstance, Cluster>
+{
+    public static void init()
+    {
+        Configuration.registerSubtypes(InJvmSutConfiguration.class);
+    }
+
+    private static final Logger logger = LoggerFactory.getLogger(InJvmSut.class);
+
+    public InJvmSut(Cluster cluster)
+    {
+        super(cluster, 10);
+    }
+
+    public InJvmSut(Cluster cluster, int threads)
+    {
+        super(cluster, threads);
+    }
+
+    @JsonTypeName("in_jvm")
+    public static class InJvmSutConfiguration extends InJvmSutBaseConfiguration<IInvokableInstance, Cluster>
+    {
+        @JsonCreator
+        public InJvmSutConfiguration(@JsonProperty(value = "nodes", defaultValue = "3") int nodes,
+                                     @JsonProperty(value = "worker_threads", defaultValue = "10") int worker_threads,
+                                     @JsonProperty("root") String root)
+        {
+            super(nodes, worker_threads, root);
+        }
+
+        protected Cluster cluster(Consumer<IInstanceConfig> cfg, int nodes, File root)
+        {
+            try
+            {
+                return Cluster.build().withConfig(cfg)
+                               .withNodes(nodes)
+                               .withRoot(root)
+                              .createWithoutStarting();
+            }
+            catch (IOException e)
+            {
+                throw new IllegalStateException(e);
+            }
+        }
+
+        protected InJvmSutBase<IInvokableInstance, Cluster> sut(Cluster cluster)
+        {
+            return new InJvmSut(cluster);
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSutBase.java b/test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSutBase.java
new file mode 100644
index 0000000..585fc81
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSutBase.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+
+import com.google.common.collect.Iterators;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import harry.core.Configuration;
+import harry.model.sut.SystemUnderTest;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.distributed.api.IMessageFilters;
+
+public class InJvmSutBase<NODE extends IInstance, CLUSTER extends ICluster<NODE>> implements SystemUnderTest.FaultInjectingSut
+{
+    public static void init()
+    {
+        Configuration.registerSubtypes(InJvmSutBaseConfiguration.class);
+    }
+
+    private static final Logger logger = LoggerFactory.getLogger(InJvmSutBase.class);
+
+    private final ExecutorService executor;
+    public final CLUSTER cluster;
+    private final AtomicLong cnt = new AtomicLong();
+    private final AtomicBoolean isShutdown = new AtomicBoolean(false);
+
+    public InJvmSutBase(CLUSTER cluster)
+    {
+        this(cluster, 10);
+    }
+
+    public InJvmSutBase(CLUSTER cluster, int threads)
+    {
+        this.cluster = cluster;
+        this.executor = Executors.newFixedThreadPool(threads);
+    }
+
+    public CLUSTER cluster()
+    {
+        return cluster;
+    }
+
+    @Override
+    public boolean isShutdown()
+    {
+        return isShutdown.get();
+    }
+
+    @Override
+    public void shutdown()
+    {
+        assert isShutdown.compareAndSet(false, true);
+
+        try
+        {
+            cluster.close();
+            executor.shutdown();
+            if (!executor.awaitTermination(30, TimeUnit.SECONDS))
+                throw new TimeoutException("Could not terminate cluster within expected timeout");
+        }
+        catch (Throwable e)
+        {
+            logger.error("Could not terminate cluster.", e);
+            throw new RuntimeException(e);
+        }
+    }
+
+    public void schemaChange(String statement)
+    {
+        cluster.schemaChange(statement);
+    }
+
+    public Object[][] execute(String statement, ConsistencyLevel cl, Object... bindings)
+    {
+        return execute(statement, cl, (int) (cnt.getAndIncrement() % cluster.size() + 1), bindings);
+    }
+
+    public Object[][] execute(String statement, ConsistencyLevel cl, int coordinator, Object... bindings)
+    {
+        if (isShutdown.get())
+            throw new RuntimeException("Instance is shut down");
+
+        try
+        {
+            if (cl == ConsistencyLevel.NODE_LOCAL)
+            {
+                return cluster.get(coordinator)
+                              .executeInternal(statement, bindings);
+            }
+            else if (StringUtils.startsWithIgnoreCase(statement, "SELECT"))
+            {
+                return Iterators.toArray(cluster
+                                         // round-robin
+                                         .coordinator(coordinator)
+                                         .executeWithPaging(statement, toApiCl(cl), 1, bindings),
+                                         Object[].class);
+            }
+            else
+            {
+                return cluster
+                       // round-robin
+                       .coordinator(coordinator)
+                       .execute(statement, toApiCl(cl), bindings);
+            }
+        }
+        catch (Throwable t)
+        {
+            // TODO: find a better way to work around timeouts
+            if (t.getMessage().contains("timed out"))
+                return execute(statement, cl, coordinator, bindings);
+
+            logger.error(String.format("Caught error while trying execute statement %s (%s): %s",
+                                       statement, Arrays.toString(bindings), t.getMessage()),
+                         t);
+            throw t;
+        }
+    }
+
+    // TODO: Ideally, we need to be able to induce a failure of a single specific message
+    public Object[][] executeWithWriteFailure(String statement, ConsistencyLevel cl, Object... bindings)
+    {
+        if (isShutdown.get())
+            throw new RuntimeException("Instance is shut down");
+
+        try
+        {
+            int coordinator = (int) (cnt.getAndIncrement() % cluster.size() + 1);
+            IMessageFilters filters = cluster.filters();
+
+            // Drop exactly one coordinated message
+            int MUTATION_REQ = 0;
+            // TODO: make dropping deterministic
+            filters.verbs(MUTATION_REQ).from(coordinator).messagesMatching(new IMessageFilters.Matcher()
+            {
+                private final AtomicBoolean issued = new AtomicBoolean();
+                public boolean matches(int from, int to, IMessage message)
+                {
+                    if (from != coordinator || message.verb() != MUTATION_REQ)
+                        return false;
+
+                    return !issued.getAndSet(true);
+                }
+            }).drop().on();
+            Object[][] res = cluster
+                             .coordinator(coordinator)
+                             .execute(statement, toApiCl(cl), bindings);
+            filters.reset();
+            return res;
+        }
+        catch (Throwable t)
+        {
+            logger.error(String.format("Caught error while trying execute statement %s", statement),
+                         t);
+            throw t;
+        }
+    }
+
+    public CompletableFuture<Object[][]> executeAsync(String statement, ConsistencyLevel cl, Object... bindings)
+    {
+        return CompletableFuture.supplyAsync(() -> execute(statement, cl, bindings), executor);
+    }
+
+    public CompletableFuture<Object[][]> executeAsyncWithWriteFailure(String statement, ConsistencyLevel cl, Object... bindings)
+    {
+        return CompletableFuture.supplyAsync(() -> executeWithWriteFailure(statement, cl, bindings), executor);
+    }
+
+    public static abstract class InJvmSutBaseConfiguration<NODE extends IInstance, CLUSTER extends ICluster<NODE>> implements Configuration.SutConfiguration
+    {
+        public final int nodes;
+        public final int worker_threads;
+        public final String root;
+
+        @JsonCreator
+        public InJvmSutBaseConfiguration(@JsonProperty(value = "nodes", defaultValue = "3") int nodes,
+                                         @JsonProperty(value = "worker_threads", defaultValue = "10") int worker_threads,
+                                         @JsonProperty("root") String root)
+        {
+            this.nodes = nodes;
+            this.worker_threads = worker_threads;
+            if (root == null)
+            {
+                try
+                {
+                    this.root = Files.createTempDirectory("cluster_" + nodes + "_nodes").toString();
+                }
+                catch (IOException e)
+                {
+                    throw new IllegalArgumentException(e);
+                }
+            }
+            else
+            {
+                this.root = root;
+            }
+        }
+
+        protected abstract CLUSTER cluster(Consumer<IInstanceConfig> cfg, int nodes, File root);
+        protected abstract InJvmSutBase<NODE, CLUSTER> sut(CLUSTER cluster);
+
+        public SystemUnderTest make()
+        {
+            try
+            {
+                ICluster.setup();
+            }
+            catch (Throwable throwable)
+            {
+                throw new RuntimeException(throwable);
+            }
+
+            CLUSTER cluster;
+
+            cluster = cluster((cfg) -> {
+                                  // TODO: make this configurable
+                                  cfg.with(Feature.NETWORK, Feature.GOSSIP, Feature.NATIVE_PROTOCOL)
+                                     .set("row_cache_size_in_mb", 10L)
+                                     .set("index_summary_capacity_in_mb", 10L)
+                                     .set("counter_cache_size_in_mb", 10L)
+                                     .set("key_cache_size_in_mb", 10L)
+                                     .set("file_cache_size_in_mb", 10)
+                                     .set("memtable_heap_space_in_mb", 128)
+                                     .set("memtable_offheap_space_in_mb", 128)
+                                     .set("memtable_flush_writers", 1)
+                                     .set("concurrent_compactors", 1)
+                                     .set("concurrent_reads", 5)
+                                     .set("concurrent_writes", 5)
+                                     .set("compaction_throughput_mb_per_sec", 10)
+                                     .set("hinted_handoff_enabled", false);
+                              },
+                              nodes,
+                              new File(root));
+
+            cluster.startup();
+            return sut(cluster);
+        }
+    }
+
+    public static org.apache.cassandra.distributed.api.ConsistencyLevel toApiCl(ConsistencyLevel cl)
+    {
+        switch (cl)
+        {
+            case ALL:    return org.apache.cassandra.distributed.api.ConsistencyLevel.ALL;
+            case QUORUM: return org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM;
+            case NODE_LOCAL: return org.apache.cassandra.distributed.api.ConsistencyLevel.NODE_LOCAL;
+            case ONE: return org.apache.cassandra.distributed.api.ConsistencyLevel.ONE;
+        }
+        throw new IllegalArgumentException("Don't know a CL: " + cl);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/QueryingNoOpChecker.java b/test/distributed/org/apache/cassandra/distributed/fuzz/QueryingNoOpChecker.java
new file mode 100644
index 0000000..77d19a2
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/QueryingNoOpChecker.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import harry.core.Configuration;
+import harry.core.Run;
+import harry.model.Model;
+import harry.model.sut.SystemUnderTest;
+import harry.operations.CompiledStatement;
+import harry.operations.Query;
+
+public class QueryingNoOpChecker implements Model
+{
+    public static void init()
+    {
+        Configuration.registerSubtypes(QueryingNoOpCheckerConfig.class);
+    }
+
+    private final Run run;
+
+    public QueryingNoOpChecker(Run run)
+    {
+        this.run = run;
+    }
+
+    @Override
+    public void validate(Query query)
+    {
+        CompiledStatement compiled = query.toSelectStatement();
+        run.sut.execute(compiled.cql(),
+                        SystemUnderTest.ConsistencyLevel.ALL,
+                        compiled.bindings());
+    }
+
+    @JsonTypeName("querying_no_op_checker")
+    public static class QueryingNoOpCheckerConfig implements Configuration.ModelConfiguration
+    {
+        @JsonCreator
+        public QueryingNoOpCheckerConfig()
+        {
+        }
+
+        public Model make(Run run)
+        {
+            return new QueryingNoOpChecker(run);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/SSTableGenerator.java b/test/distributed/org/apache/cassandra/distributed/fuzz/SSTableGenerator.java
new file mode 100644
index 0000000..627d360
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/SSTableGenerator.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+
+import harry.core.Run;
+import harry.ddl.SchemaSpec;
+import harry.model.OpSelectors;
+import harry.operations.Relation;
+import harry.operations.Query;
+import harry.operations.QueryGenerator;
+import harry.util.BitSet;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.cql3.AbstractMarker;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.cql3.Operator;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.SingleColumnRelation;
+import org.apache.cassandra.cql3.VariableSpecifications;
+import org.apache.cassandra.cql3.WhereClause;
+import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
+import org.apache.cassandra.cql3.statements.Bound;
+import org.apache.cassandra.cql3.statements.DeleteStatement;
+import org.apache.cassandra.cql3.statements.StatementType;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.DeletionTime;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.db.RangeTombstone;
+import org.apache.cassandra.db.RowUpdateBuilder;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
+import org.apache.cassandra.db.marshal.CompositeType;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static harry.generators.DataGenerators.UNSET_VALUE;
+
+public class SSTableGenerator
+{
+    protected final SchemaSpec schema;
+    protected final OpSelectors.DescriptorSelector descriptorSelector;
+    protected final OpSelectors.MonotonicClock clock;
+    protected final ColumnFamilyStore store;
+    protected final TableMetadata metadata;
+    protected final QueryGenerator rangeSelector;
+
+    private final Set<SSTableReader> sstables = new HashSet<>();
+
+    private long lts = 0;
+
+    public SSTableGenerator(Run run,
+                            ColumnFamilyStore store)
+    {
+        this.schema = run.schemaSpec;
+        this.descriptorSelector = run.descriptorSelector;
+        this.clock = run.clock;
+        this.store = store;
+        // We assume metadata can not change over the lifetime of sstable generator
+        this.metadata = store.metadata.get();
+        this.rangeSelector = new QueryGenerator(schema, run.pdSelector, descriptorSelector, run.rng);
+        store.disableAutoCompaction();
+    }
+
+    public Collection<SSTableReader> gen(int rows)
+    {
+        mark();
+        for (int i = 0; i < rows; i++)
+        {
+            long current = lts++;
+            write(current, current, current, current, true).applyUnsafe();
+            if (schema.staticColumns != null)
+                writeStatic(current, current, current, current, true).applyUnsafe();
+        }
+
+        return flush();
+    }
+
+    public void mark()
+    {
+        sstables.clear();
+        sstables.addAll(store.getLiveSSTables());
+    }
+
+    public Collection<SSTableReader> flush()
+    {
+        Util.flush(store);
+        sstables.removeAll(store.getLiveSSTables());
+
+        Set<SSTableReader> ret = new HashSet<>(sstables);
+        mark();
+        return ret;
+    }
+
+    public Mutation write(long lts, long pd, long cd, long opId, boolean withRowMarker)
+    {
+        long[] vds = descriptorSelector.vds(pd, cd, lts, opId, OpSelectors.OperationKind.INSERT, schema);
+
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+        Object[] clusteringKey = schema.inflateClusteringKey(cd);
+        Object[] regularColumns = schema.inflateRegularColumns(vds);
+
+        RowUpdateBuilder builder = new RowUpdateBuilder(metadata,
+                                                        FBUtilities.nowInSeconds(),
+                                                        clock.rts(lts),
+                                                        metadata.params.defaultTimeToLive,
+                                                        serializePartitionKey(store, partitionKey))
+                                   .clustering(clusteringKey);
+
+        if (!withRowMarker)
+            builder.noRowMarker();
+
+        for (int i = 0; i < regularColumns.length; i++)
+        {
+            Object value = regularColumns[i];
+            if (value == UNSET_VALUE)
+                continue;
+
+            ColumnMetadata def = metadata.getColumn(new ColumnIdentifier(schema.regularColumns.get(i).name, false));
+            builder.add(def, value);
+        }
+
+        return builder.build();
+    }
+
+    public Mutation writeStatic(long lts, long pd, long cd, long opId, boolean withRowMarker)
+    {
+        long[] sds = descriptorSelector.sds(pd, cd, lts, opId, OpSelectors.OperationKind.INSERT_WITH_STATICS, schema);
+
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+        Object[] staticColumns = schema.inflateStaticColumns(sds);
+
+        RowUpdateBuilder builder = new RowUpdateBuilder(metadata,
+                                                        FBUtilities.nowInSeconds(),
+                                                        clock.rts(lts),
+                                                        metadata.params.defaultTimeToLive,
+                                                        serializePartitionKey(store, partitionKey));
+
+        if (!withRowMarker)
+            builder.noRowMarker();
+
+        for (int i = 0; i < staticColumns.length; i++)
+        {
+            Object value = staticColumns[i];
+            if (value == UNSET_VALUE)
+                continue;
+            ColumnMetadata def = metadata.getColumn(new ColumnIdentifier(schema.staticColumns.get(i).name, false));
+            builder.add(def, staticColumns[i]);
+        }
+
+        return builder.build();
+    }
+
+    public Mutation deleteColumn(long lts, long pd, long cd, long opId)
+    {
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+        Object[] clusteringKey = schema.inflateClusteringKey(cd);
+
+        RowUpdateBuilder builder = new RowUpdateBuilder(metadata,
+                                                        FBUtilities.nowInSeconds(),
+                                                        clock.rts(lts),
+                                                        metadata.params.defaultTimeToLive,
+                                                        serializePartitionKey(store, partitionKey))
+                                   .noRowMarker()
+                                   .clustering(clusteringKey);
+
+        BitSet columns = descriptorSelector.columnMask(pd, lts, opId, OpSelectors.OperationKind.DELETE_COLUMN);
+        BitSet mask = schema.regularColumnsMask();
+
+        if (columns == null || columns.allUnset(mask))
+            throw new IllegalArgumentException("Can't have a delete column query with no columns set. Column mask: " + columns);
+
+        columns.eachSetBit((idx) -> {
+            if (idx < schema.regularColumnsOffset)
+                throw new RuntimeException("Can't delete parts of partition or clustering key");
+
+            if (idx > schema.allColumns.size())
+                throw new IndexOutOfBoundsException(String.format("Index %d is out of bounds. Max index: %d", idx, schema.allColumns.size()));
+
+            builder.delete(schema.allColumns.get(idx).name);
+        }, mask);
+
+        return builder.build();
+    }
+
+    public Mutation deleteStatic(long lts, long pd, long opId)
+    {
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+
+        RowUpdateBuilder builder = new RowUpdateBuilder(metadata,
+                                                        FBUtilities.nowInSeconds(),
+                                                        clock.rts(lts),
+                                                        metadata.params.defaultTimeToLive,
+                                                        serializePartitionKey(store, partitionKey))
+                                   .noRowMarker();
+
+
+        BitSet columns = descriptorSelector.columnMask(pd, lts, opId, OpSelectors.OperationKind.DELETE_COLUMN_WITH_STATICS);
+        BitSet mask = schema.staticColumnsMask();
+
+        if (columns == null || columns.allUnset(mask))
+            throw new IllegalArgumentException("Can't have a delete column query with no columns set. Column mask: " + columns);
+
+        columns.eachSetBit((idx) -> {
+            if (idx < schema.staticColumnsOffset)
+                throw new RuntimeException(String.format("Can't delete parts of partition or clustering key %d (%s)",
+                                                         idx, schema.allColumns.get(idx)));
+
+            if (idx > schema.allColumns.size())
+                throw new IndexOutOfBoundsException(String.format("Index %d is out of bounds. Max index: %d", idx, schema.allColumns.size()));
+
+            builder.delete(schema.allColumns.get(idx).name);
+        }, mask);
+
+        return builder.build();
+    }
+
+    public Mutation deletePartition(long lts, long pd)
+    {
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+
+        PartitionUpdate update = PartitionUpdate.fullPartitionDelete(metadata,
+                                                                     serializePartitionKey(store, partitionKey),
+                                                                     clock.rts(lts),
+                                                                     FBUtilities.nowInSeconds());
+
+        return new Mutation(update);
+    }
+
+    public Mutation deleteRow(long lts, long pd, long cd)
+    {
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+        Object[] clusteringKey = schema.inflateClusteringKey(cd);
+
+        return RowUpdateBuilder.deleteRow(metadata,
+                                          clock.rts(lts),
+                                          serializePartitionKey(store, partitionKey),
+                                          clusteringKey);
+    }
+
+    public Mutation deleteSlice(long lts, long pd, long opId)
+    {
+        return delete(lts, pd, rangeSelector.inflate(lts, opId, Query.QueryKind.CLUSTERING_SLICE));
+    }
+
+    public Mutation deleteRange(long lts, long pd, long opId)
+    {
+        return delete(lts, pd, rangeSelector.inflate(lts, opId, Query.QueryKind.CLUSTERING_RANGE));
+    }
+
+    Mutation delete(long lts, long pd, Query query)
+    {
+        Object[] partitionKey = schema.inflatePartitionKey(pd);
+        WhereClause.Builder builder = new WhereClause.Builder();
+        List<ColumnIdentifier> variableNames = new ArrayList<>();
+
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < partitionKey.length; i++)
+        {
+            String name = schema.partitionKeys.get(i).name;
+            ColumnMetadata columnDef = metadata.getColumn(ByteBufferUtil.bytes(name));
+            variableNames.add(columnDef.name);
+            values.add(ByteBufferUtil.objectToBytes(partitionKey[i]));
+            builder.add(new SingleColumnRelation(ColumnIdentifier.getInterned(name, true),
+                                                 toOperator(Relation.RelationKind.EQ),
+                                                 new AbstractMarker.Raw(values.size() - 1)));
+        }
+
+        for (Relation relation : query.relations)
+        {
+            String name = relation.column();
+            ColumnMetadata columnDef = metadata.getColumn(ByteBufferUtil.bytes(relation.column()));
+            variableNames.add(columnDef.name);
+            values.add(ByteBufferUtil.objectToBytes(relation.value()));
+            builder.add(new SingleColumnRelation(ColumnIdentifier.getInterned(name, false),
+                                                 toOperator(relation.kind),
+                                                 new AbstractMarker.Raw(values.size() - 1)));
+        }
+
+        StatementRestrictions restrictions = new StatementRestrictions(StatementType.DELETE,
+                                                                       metadata,
+                                                                       builder.build(),
+                                                                       new VariableSpecifications(variableNames),
+                                                                       false,
+                                                                       false,
+                                                                       false,
+                                                                       false);
+
+        QueryOptions options = QueryOptions.forInternalCalls(ConsistencyLevel.QUORUM, values);
+        SortedSet<ClusteringBound<?>> startBounds = restrictions.getClusteringColumnsBounds(Bound.START, options);
+        SortedSet<ClusteringBound<?>> endBounds = restrictions.getClusteringColumnsBounds(Bound.END, options);
+
+        Slices slices = DeleteStatement.toSlices(metadata, startBounds, endBounds);
+        assert slices.size() == 1;
+        int deletionTime = FBUtilities.nowInSeconds();
+        long rts = clock.rts(lts);
+
+        return new RowUpdateBuilder(metadata,
+                                    deletionTime,
+                                    rts,
+                                    metadata.params.defaultTimeToLive,
+                                    serializePartitionKey(store, partitionKey))
+               .noRowMarker()
+               .addRangeTombstone(new RangeTombstone(slices.get(0), new DeletionTime(rts, deletionTime)))
+               .build();
+    }
+
+    public static Operator toOperator(Relation.RelationKind kind)
+    {
+        switch (kind)
+        {
+            case LT: return Operator.LT;
+            case GT: return Operator.GT;
+            case LTE: return Operator.LTE;
+            case GTE: return Operator.GTE;
+            case EQ: return Operator.EQ;
+            default: throw new IllegalArgumentException("Unsupported " + kind);
+        }
+    }
+
+    public static DecoratedKey serializePartitionKey(ColumnFamilyStore store, Object... pk)
+    {
+        if (pk.length == 1)
+            return store.getPartitioner().decorateKey(ByteBufferUtil.objectToBytes(pk[0]));
+
+        ByteBuffer[] values = new ByteBuffer[pk.length];
+        for (int i = 0; i < pk.length; i++)
+            values[i] = ByteBufferUtil.objectToBytes(pk[i]);
+        return store.getPartitioner().decorateKey(CompositeType.build(ByteBufferAccessor.instance, values));
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/SSTableLoadingVisitor.java b/test/distributed/org/apache/cassandra/distributed/fuzz/SSTableLoadingVisitor.java
new file mode 100644
index 0000000..ce6ef57
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/SSTableLoadingVisitor.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz;
+
+import harry.core.Run;
+import harry.model.OpSelectors;
+import harry.visitors.VisitExecutor;
+import org.apache.cassandra.db.Keyspace;
+
+public class SSTableLoadingVisitor extends VisitExecutor
+{
+    private final SSTableGenerator gen;
+    private final int forceFlushAfter;
+
+    public SSTableLoadingVisitor(Run run, int forceFlushAfter)
+    {
+        this.forceFlushAfter = forceFlushAfter;
+        gen = new SSTableGenerator(run, Keyspace.open(run.schemaSpec.keyspace).getColumnFamilyStore(run.schemaSpec.table));
+        gen.mark();
+    }
+
+    @Override
+    protected void beforeLts(long l, long l1)
+    {
+
+    }
+
+    protected void afterLts(long lts, long pd) {
+        if (lts > 0 && lts % forceFlushAfter == 0)
+            forceFlush();
+    }
+
+    @Override
+    protected void beforeBatch(long l, long l1, long l2)
+    {
+
+    }
+
+    @Override
+    protected void afterBatch(long l, long l1, long l2)
+    {
+
+    }
+
+    @Override
+    protected void operation(long lts, long pd, long cd, long m, long opId, OpSelectors.OperationKind operationKind)
+    {
+        switch (operationKind)
+        {
+            case INSERT:
+                gen.write(lts, pd, cd, opId, true).applyUnsafe();
+                break;
+            case INSERT_WITH_STATICS:
+                gen.write(lts, pd, cd, opId, true).applyUnsafe();
+                gen.writeStatic(lts, pd, cd, opId, true).applyUnsafe();
+                break;
+            case UPDATE:
+                gen.write(lts, pd, cd, opId, false).applyUnsafe();
+                break;
+            case UPDATE_WITH_STATICS:
+                gen.write(lts, pd, cd, opId, false).applyUnsafe();
+                gen.writeStatic(lts, pd, cd, opId, false).applyUnsafe();
+                break;
+            case DELETE_PARTITION:
+                gen.deletePartition(lts, pd).applyUnsafe();
+                break;
+            case DELETE_ROW:
+                gen.deleteRow(lts, pd, cd).applyUnsafe();
+                break;
+            case DELETE_COLUMN:
+                gen.deleteColumn(lts, pd, cd, opId).applyUnsafe();
+                break;
+            case DELETE_COLUMN_WITH_STATICS:
+                gen.deleteColumn(lts, pd, cd, opId).applyUnsafe();
+                gen.deleteStatic(lts, pd, opId).applyUnsafe();
+                break;
+            case DELETE_RANGE:
+                gen.deleteRange(lts, pd, opId).applyUnsafe();
+                break;
+            case DELETE_SLICE:
+                gen.deleteSlice(lts, pd, opId).applyUnsafe();
+                break;
+        }
+    }
+
+    public void forceFlush()
+    {
+        gen.flush();
+    }
+
+
+    @Override
+    public void shutdown() throws InterruptedException
+    {
+
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/fuzz/test/SSTableGeneratorTest.java b/test/distributed/org/apache/cassandra/distributed/fuzz/test/SSTableGeneratorTest.java
new file mode 100644
index 0000000..5050bc6
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/fuzz/test/SSTableGeneratorTest.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.fuzz.test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+
+import com.google.common.collect.Iterators;
+import org.junit.Test;
+
+import harry.core.Configuration;
+import harry.core.Run;
+import harry.ddl.ColumnSpec;
+import harry.ddl.SchemaSpec;
+import harry.model.Model;
+import harry.model.QuiescentChecker;
+import harry.model.clock.OffsetClock;
+import harry.model.sut.SystemUnderTest;
+import harry.operations.Query;
+import harry.visitors.GeneratingVisitor;
+import harry.visitors.LtsVisitor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.distributed.fuzz.FixedSchemaProviderConfiguration;
+import org.apache.cassandra.distributed.fuzz.HarryHelper;
+import org.apache.cassandra.distributed.fuzz.SSTableLoadingVisitor;
+import org.apache.cassandra.distributed.impl.RowUtil;
+
+public class SSTableGeneratorTest extends CQLTester
+{
+    private static final Configuration configuration;
+
+    static
+    {
+        try
+        {
+            HarryHelper.init();
+            configuration = HarryHelper.defaultConfiguration()
+                                       .setClock(() -> new OffsetClock(10000L))
+                                       .build();
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static SchemaSpec schemaSpec = new SchemaSpec(KEYSPACE, "tbl1",
+                                                          Arrays.asList(ColumnSpec.pk("pk1", ColumnSpec.asciiType),
+                                                                        ColumnSpec.pk("pk2", ColumnSpec.int64Type)),
+                                                          Arrays.asList(ColumnSpec.ck("ck1", ColumnSpec.asciiType, false),
+                                                                        ColumnSpec.ck("ck2", ColumnSpec.int64Type, false)),
+                                                          Arrays.asList(ColumnSpec.regularColumn("v1", ColumnSpec.int32Type),
+                                                                        ColumnSpec.regularColumn("v2", ColumnSpec.int64Type),
+                                                                        ColumnSpec.regularColumn("v3", ColumnSpec.int32Type),
+                                                                        ColumnSpec.regularColumn("v4", ColumnSpec.asciiType)),
+                                                          Arrays.asList(ColumnSpec.staticColumn("s1", ColumnSpec.asciiType),
+                                                                        ColumnSpec.staticColumn("s2", ColumnSpec.int64Type)));
+
+    @Test
+    public void testSSTableGenerator()
+    {
+        createTable(schemaSpec.compile().cql());
+        Run run = configuration.unbuild()
+                               .setSchemaProvider(new FixedSchemaProviderConfiguration(schemaSpec))
+                               .setSUT(CqlTesterSUT::new)
+                               .build()
+                               .createRun();
+
+
+        SSTableLoadingVisitor sstableVisitor = new SSTableLoadingVisitor(run, 1000);
+        LtsVisitor visitor = new GeneratingVisitor(run, sstableVisitor);
+        Set<Long> pds = new HashSet<>();
+        run.tracker.onLtsStarted((lts) -> pds.add(run.pdSelector.pd(lts, run.schemaSpec)));
+        for (int i = 0; i < 1000; i++)
+            visitor.visit();
+
+        sstableVisitor.forceFlush();
+
+        Model checker = new QuiescentChecker(run);
+        for (Long pd : pds)
+            checker.validate(Query.selectPartition(run.schemaSpec, pd, false));
+    }
+
+    public class CqlTesterSUT implements SystemUnderTest
+    {
+        public boolean isShutdown()
+        {
+            return false;
+        }
+
+        public void shutdown()
+        {
+        }
+
+        public CompletableFuture<Object[][]> executeAsync(String s, ConsistencyLevel consistencyLevel, Object... objects)
+        {
+            throw new RuntimeException("Not implemented");
+        }
+
+        public Object[][] execute(String s, ConsistencyLevel consistencyLevel, Object... objects)
+        {
+            try
+            {
+                return Iterators.toArray(RowUtil.toIter(SSTableGeneratorTest.this.execute(s, objects)),
+                                         Object[].class);
+            }
+            catch (Throwable throwable)
+            {
+                throw new RuntimeException();
+            }
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java b/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
index 95a2ac5..7d33372 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
@@ -18,33 +18,45 @@
 
 package org.apache.cassandra.distributed.impl;
 
-import java.io.File;
+import java.io.IOException;
 import java.lang.annotation.Annotation;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
 import java.net.InetSocketAddress;
+import java.nio.file.FileSystem;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.BiConsumer;
 import java.util.function.BiPredicate;
 import java.util.function.Consumer;
+import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
-
 import javax.annotation.concurrent.GuardedBy;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
+import org.junit.Assume;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,35 +68,46 @@
 import org.apache.cassandra.distributed.Constants;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IClassTransformer;
 import org.apache.cassandra.distributed.api.ICluster;
 import org.apache.cassandra.distributed.api.ICoordinator;
 import org.apache.cassandra.distributed.api.IInstance;
 import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IInstanceInitializer;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.distributed.api.IIsolatedExecutor;
 import org.apache.cassandra.distributed.api.IListen;
 import org.apache.cassandra.distributed.api.IMessage;
 import org.apache.cassandra.distributed.api.IMessageFilters;
+import org.apache.cassandra.distributed.api.IMessageSink;
 import org.apache.cassandra.distributed.api.IUpgradeableInstance;
 import org.apache.cassandra.distributed.api.LogAction;
 import org.apache.cassandra.distributed.api.NodeToolResult;
 import org.apache.cassandra.distributed.api.TokenSupplier;
 import org.apache.cassandra.distributed.shared.InstanceClassLoader;
-import org.apache.cassandra.distributed.shared.Isolated;
-import org.apache.cassandra.distributed.shared.MessageFilters;
 import org.apache.cassandra.distributed.shared.Metrics;
 import org.apache.cassandra.distributed.shared.NetworkTopology;
-import org.apache.cassandra.distributed.shared.Shared;
 import org.apache.cassandra.distributed.shared.ShutdownException;
 import org.apache.cassandra.distributed.shared.Versions;
-import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.Isolated;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.Shared.Recursive;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.reflections.Reflections;
+import org.reflections.scanners.Scanners;
 import org.reflections.util.ConfigurationBuilder;
+import org.reflections.util.NameHelper;
 
+import static java.util.stream.Stream.of;
+import static org.apache.cassandra.distributed.impl.IsolatedExecutor.DEFAULT_SHUTDOWN_EXECUTOR;
 import static org.apache.cassandra.distributed.shared.NetworkTopology.addressAndPort;
+import static org.apache.cassandra.utils.Shared.Recursive.ALL;
+import static org.apache.cassandra.utils.Shared.Recursive.NONE;
+import static org.apache.cassandra.utils.Shared.Scope.ANY;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 /**
  * AbstractCluster creates, initializes and manages Cassandra instances ({@link Instance}.
@@ -118,23 +141,15 @@
     // to ensure we have instantiated the main classloader's LoggerFactory (and any LogbackStatusListener)
     // before we instantiate any for a new instance
     private static final Logger logger = LoggerFactory.getLogger(AbstractCluster.class);
-    private static final AtomicInteger GENERATION = new AtomicInteger();
 
     // include byteman so tests can use
-    private static final Set<String> SHARED_CLASSES = findClassesMarkedForSharedClassLoader();
-    private static final Set<String> ISOLATED_CLASSES = findClassesMarkedForInstanceClassLoader();
-    private static final Predicate<String> SHARED_PREDICATE = s -> {
-        if (ISOLATED_CLASSES.contains(s))
-            return false;
-
-        return SHARED_CLASSES.contains(s) ||
-               InstanceClassLoader.getDefaultLoadSharedFilter().test(s) ||
-               s.startsWith("org.jboss.byteman");
-    };
+    public static final Predicate<String> SHARED_PREDICATE = getSharedClassPredicate(ANY);
 
     private final UUID clusterId = UUID.randomUUID();
-    private final File root;
+    private final Path root;
     private final ClassLoader sharedClassLoader;
+    private final Predicate<String> sharedClassPredicate;
+    private final IClassTransformer classTransformer;
     private final int subnet;
     private final TokenSupplier tokenSupplier;
     private final Map<Integer, NetworkTopology.DcAndRack> nodeIdTopology;
@@ -150,12 +165,17 @@
     // mutated by user-facing API
     private final MessageFilters filters;
     private final INodeProvisionStrategy.Strategy nodeProvisionStrategy;
-    private final BiConsumer<ClassLoader, Integer> instanceInitializer;
+    private final IInstanceInitializer instanceInitializer;
     private final int datadirCount;
     private volatile Thread.UncaughtExceptionHandler previousHandler = null;
     private volatile BiPredicate<Integer, Throwable> ignoreUncaughtThrowable = null;
     private final List<Throwable> uncaughtExceptions = new CopyOnWriteArrayList<>();
 
+    private final ThreadGroup clusterThreadGroup = new ThreadGroup(clusterId.toString());
+    private final ShutdownExecutor shutdownExecutor;
+
+    private volatile IMessageSink messageSink;
+
     /**
      * Common builder, add methods that are applicable to both Cluster and Upgradable cluster here.
      */
@@ -163,6 +183,7 @@
         extends org.apache.cassandra.distributed.shared.AbstractBuilder<I, C, B>
     {
         private INodeProvisionStrategy.Strategy nodeProvisionStrategy = INodeProvisionStrategy.Strategy.MultipleNetworkInterfaces;
+        private ShutdownExecutor shutdownExecutor = DEFAULT_SHUTDOWN_EXECUTOR;
 
         {
             // those properties may be set for unit-test optimizations; those should not be used when running dtests
@@ -173,6 +194,7 @@
         public AbstractBuilder(Factory<I, C, B> factory)
         {
             super(factory);
+            withSharedClasses(SHARED_PREDICATE);
         }
 
         public B withNodeProvisionStrategy(INodeProvisionStrategy.Strategy nodeProvisionStrategy)
@@ -180,11 +202,47 @@
             this.nodeProvisionStrategy = nodeProvisionStrategy;
             return (B) this;
         }
+
+        public B withShutdownExecutor(ShutdownExecutor shutdownExecutor)
+        {
+            this.shutdownExecutor = shutdownExecutor;
+            return (B) this;
+        }
+
+        @Override
+        public C createWithoutStarting() throws IOException
+        {
+            // if running as vnode but test sets withoutVNodes(), then skip the test
+            // AbstractCluster.createInstanceConfig has similar logic, but handles the cases where the test
+            // attempts to control tokens via config
+            // when token supplier is defined, use getTokenCount() to see if vnodes is supported or not
+            if (isVnode())
+            {
+                Assume.assumeTrue("vnode is not supported", isVNodeAllowed());
+                // if token count > 1 and isVnode, then good
+                Assume.assumeTrue("no-vnode is requested but not supported", getTokenCount() > 1);
+            }
+            else
+            {
+                Assume.assumeTrue("single-token is not supported", isSingleTokenAllowed());
+                // if token count == 1 and isVnode == false, then goodAbstractClusterTest
+                Assume.assumeTrue("vnode is requested but not supported", getTokenCount() == 1);
+            }
+
+            return super.createWithoutStarting();
+        }
+
+        private boolean isVnode()
+        {
+            TokenSupplier ts = getTokenSupplier();
+            return ts == null
+                   ? getTokenCount() > 1 // token supplier wasn't defined yet, so rely on getTokenCount()
+                   : ts.tokens(1).size() > 1; // token supplier is defined... check the first instance to see what tokens are used
+        }
     }
 
     protected class Wrapper extends DelegatingInvokableInstance implements IUpgradeableInstance
     {
-        private final int generation;
         private final IInstanceConfig config;
         private volatile IInvokableInstance delegate;
         private volatile Versions.Version version;
@@ -192,38 +250,81 @@
         private volatile boolean isShutdown = true;
         @GuardedBy("this")
         private InetSocketAddress broadcastAddress;
+        private int generation = -1;
 
         protected IInvokableInstance delegate()
         {
             if (delegate == null)
-                throw new IllegalStateException("Can't use shut down instances, delegate is null");
+                throw new IllegalStateException("Can't use shutdown instances, delegate is null");
             return delegate;
         }
 
         protected IInvokableInstance delegateForStartup()
         {
             if (delegate == null)
-                delegate = newInstance(generation);
+                delegate = newInstance();
             return delegate;
         }
 
-        public Wrapper(int generation, Versions.Version version, IInstanceConfig config)
+        public Wrapper(Versions.Version version, IInstanceConfig config)
         {
-            this.generation = generation;
             this.config = config;
             this.version = version;
             // we ensure there is always a non-null delegate, so that the executor may be used while the node is offline
-            this.delegate = newInstance(generation);
+            this.delegate = newInstance();
             this.broadcastAddress = config.broadcastAddress();
         }
 
-        private IInvokableInstance newInstance(int generation)
+        private IInvokableInstance newInstance()
         {
-            ClassLoader classLoader = new InstanceClassLoader(generation, config.num(), version.classpath, sharedClassLoader, SHARED_PREDICATE);
+            ++generation;
+            IClassTransformer transformer = classTransformer == null ? null : classTransformer.initialise();
+            ClassLoader classLoader = new InstanceClassLoader(generation, config.num(), version.classpath, sharedClassLoader, sharedClassPredicate, transformer);
+            ThreadGroup threadGroup = new ThreadGroup(clusterThreadGroup, "node" + config.num() + (generation > 1 ? "_" + generation : ""));
             if (instanceInitializer != null)
-                instanceInitializer.accept(classLoader, config.num());
-            return Instance.transferAdhoc((SerializableBiFunction<IInstanceConfig, ClassLoader, Instance>)Instance::new, classLoader)
-                                        .apply(config.forVersion(version.version), classLoader);
+                instanceInitializer.initialise(classLoader, threadGroup, config.num(), generation);
+
+            IInvokableInstance instance;
+            try
+            {
+                instance = Instance.transferAdhocPropagate((SerializableQuadFunction<IInstanceConfig, ClassLoader, FileSystem, ShutdownExecutor, Instance>)Instance::new, classLoader)
+                                   .apply(config.forVersion(version.version), classLoader, root.getFileSystem(), shutdownExecutor);
+            }
+            catch (InvocationTargetException e)
+            {
+                try
+                {
+                    instance = Instance.transferAdhocPropagate((SerializableTriFunction<IInstanceConfig, ClassLoader, FileSystem, Instance>)Instance::new, classLoader)
+                                       .apply(config.forVersion(version.version), classLoader, root.getFileSystem());
+                }
+                catch (InvocationTargetException e2)
+                {
+                    instance = Instance.transferAdhoc((SerializableBiFunction<IInstanceConfig, ClassLoader, Instance>)Instance::new, classLoader)
+                                       .apply(config.forVersion(version.version), classLoader);
+                }
+                catch (IllegalAccessException e2)
+                {
+                    throw new RuntimeException(e);
+                }
+            }
+            catch (IllegalAccessException e)
+            {
+                throw new RuntimeException(e);
+            }
+
+            if (instanceInitializer != null)
+                instanceInitializer.beforeStartup(instance);
+
+            return instance;
+        }
+
+        public Executor executorFor(int verb)
+        {
+            if (isShutdown)
+                throw new IllegalStateException();
+
+            // this method must be lock-free to avoid Simulator deadlock
+            return delegate().executorFor(verb);
         }
 
         public IInstanceConfig config()
@@ -233,19 +334,29 @@
 
         public boolean isShutdown()
         {
-            return isShutdown;
+            IInvokableInstance delegate = this.delegate;
+            // if the instance shuts down on its own, detect that
+            return isShutdown || (delegate != null && delegate.isShutdown());
         }
 
         private boolean isRunning()
         {
-            return !isShutdown;
+            return !isShutdown();
+        }
+
+        @Override
+        public boolean isValid()
+        {
+            return delegate != null;
         }
 
         @Override
         public synchronized void startup()
         {
             startup(AbstractCluster.this);
+            postStartup();
         }
+
         public synchronized void startup(ICluster cluster)
         {
             if (cluster != AbstractCluster.this)
@@ -253,6 +364,9 @@
             if (isRunning())
                 throw new IllegalStateException("Can not start a instance that is already running");
             isShutdown = false;
+            // if the delegate isn't running, remove so it can be recreated
+            if (delegate != null && delegate.isShutdown())
+                delegate = null;
             if (!broadcastAddress.equals(config.broadcastAddress()))
             {
                 // previous address != desired address, so cleanup
@@ -261,6 +375,8 @@
                 instanceMap.put(newAddress, (I) this); // if the broadcast address changes, update
                 instanceMap.remove(previous);
                 broadcastAddress = newAddress;
+                // remove delegate to make sure static state is reset
+                delegate = null;
             }
             try
             {
@@ -288,6 +404,9 @@
             // This duplicates work done in Instance startup, but keeping as other Instance implementations
             // do not, so to permit older releases to be tested, repeat the setup
             updateMessagingVersions();
+
+            if (instanceInitializer != null)
+                instanceInitializer.afterStartup(this);
         }
 
         @Override
@@ -346,6 +465,14 @@
         }
 
         @Override
+        public void receiveMessageWithInvokingThread(IMessage message)
+        {
+            IInvokableInstance delegate = this.delegate;
+            if (isRunning() && delegate != null) // since we sync directly on the other node, we drop messages immediately if we are shutdown
+                delegate.receiveMessageWithInvokingThread(message);
+        }
+
+        @Override
         public boolean getLogsEnabled()
         {
             return delegate().getLogsEnabled();
@@ -392,28 +519,30 @@
 
     protected AbstractCluster(AbstractBuilder<I, ? extends ICluster<I>, ?> builder)
     {
-        this.root = builder.getRoot();
+        this.root = builder.getRootPath();
         this.sharedClassLoader = builder.getSharedClassLoader();
+        this.sharedClassPredicate = builder.getSharedClasses();
+        this.classTransformer = builder.getClassTransformer();
         this.subnet = builder.getSubnet();
         this.tokenSupplier = builder.getTokenSupplier();
         this.nodeIdTopology = builder.getNodeIdTopology();
         this.configUpdater = builder.getConfigUpdater();
         this.broadcastPort = builder.getBroadcastPort();
         this.nodeProvisionStrategy = builder.nodeProvisionStrategy;
+        this.shutdownExecutor = builder.shutdownExecutor;
         this.instances = new ArrayList<>();
         this.instanceMap = new ConcurrentHashMap<>();
         this.initialVersion = builder.getVersion();
         this.filters = new MessageFilters();
-        this.instanceInitializer = builder.getInstanceInitializer();
+        this.instanceInitializer = builder.getInstanceInitializer2();
         this.datadirCount = builder.getDatadirCount();
 
-        int generation = GENERATION.incrementAndGet();
         for (int i = 0; i < builder.getNodeCount(); ++i)
         {
             int nodeNum = i + 1;
             InstanceConfig config = createInstanceConfig(nodeNum);
 
-            I instance = newInstanceWrapperInternal(generation, initialVersion, config);
+            I instance = newInstanceWrapperInternal(initialVersion, config);
             instances.add(instance);
             // we use the config().broadcastAddressAndPort() here because we have not initialised the Instance
             I prev = instanceMap.put(instance.config().broadcastAddress(), instance);
@@ -427,15 +556,46 @@
         return createInstanceConfig(size() + 1);
     }
 
-    private InstanceConfig createInstanceConfig(int nodeNum)
+    @VisibleForTesting
+    InstanceConfig createInstanceConfig(int nodeNum)
     {
         INodeProvisionStrategy provisionStrategy = nodeProvisionStrategy.create(subnet);
-        long token = tokenSupplier.token(nodeNum);
+        Collection<String> tokens = tokenSupplier.tokens(nodeNum);
         NetworkTopology topology = buildNetworkTopology(provisionStrategy, nodeIdTopology);
-        InstanceConfig config = InstanceConfig.generate(nodeNum, provisionStrategy, topology, root, Long.toString(token), datadirCount);
+        InstanceConfig config = InstanceConfig.generate(nodeNum, provisionStrategy, topology, root, tokens, datadirCount);
         config.set(Constants.KEY_DTEST_API_CLUSTER_ID, clusterId.toString());
+        // if a test sets num_tokens directly, then respect it and only run if vnode or no-vnode is defined
+        int defaultTokenCount = config.getInt("num_tokens");
+        assert tokens.size() == defaultTokenCount : String.format("num_tokens=%d but tokens are %s; size does not match", defaultTokenCount, tokens);
+        String defaultTokens = config.getString("initial_token");
         if (configUpdater != null)
+        {
             configUpdater.accept(config);
+            int testTokenCount = config.getInt("num_tokens");
+            if (defaultTokenCount != testTokenCount)
+            {
+                if (testTokenCount == 1)
+                {
+                    // test is no-vnode, but running with vnode, so skip
+                    Assume.assumeTrue("vnode is not supported", false);
+                }
+                else
+                {
+                    Assume.assumeTrue("no-vnode is requested but not supported", defaultTokenCount > 1);
+                    // if the test controls initial_token or GOSSIP is enabled, then the test is safe to run
+                    if (defaultTokens.equals(config.getString("initial_token")))
+                    {
+                        // test didn't define initial_token
+                        Assume.assumeTrue("vnode is enabled and num_tokens is defined in test without GOSSIP or setting initial_token", config.has(Feature.GOSSIP));
+                        config.remove("initial_token");
+                    }
+                    else
+                    {
+                        // test defined initial_token; trust it
+                    }
+                }
+            }
+        }
         return config;
     }
 
@@ -453,17 +613,17 @@
     }
 
 
-    protected abstract I newInstanceWrapper(int generation, Versions.Version version, IInstanceConfig config);
+    protected abstract I newInstanceWrapper(Versions.Version version, IInstanceConfig config);
 
-    protected I newInstanceWrapperInternal(int generation, Versions.Version version, IInstanceConfig config)
+    protected I newInstanceWrapperInternal(Versions.Version version, IInstanceConfig config)
     {
         config.validate();
-        return newInstanceWrapper(generation, version, config);
+        return newInstanceWrapper(version, config);
     }
 
     public I bootstrap(IInstanceConfig config)
     {
-        I instance = newInstanceWrapperInternal(0, initialVersion, config);
+        I instance = newInstanceWrapperInternal(initialVersion, config);
         instances.add(instance);
         I prev = instanceMap.put(config.broadcastAddress(), instance);
 
@@ -530,7 +690,7 @@
                                               i.config().localRack().equals(rackName));
     }
 
-    public void run(Consumer<? super I> action,  Predicate<I> filter)
+    public void run(Consumer<? super I> action, Predicate<I> filter)
     {
         run(Collections.singletonList(action), filter);
     }
@@ -598,6 +758,20 @@
         return filters;
     }
 
+    public synchronized void setMessageSink(IMessageSink sink)
+    {
+        if (messageSink != null && sink != null)
+            throw new IllegalStateException();
+        this.messageSink = sink;
+    }
+
+    public void deliverMessage(InetSocketAddress to, IMessage message)
+    {
+        IMessageSink sink = messageSink;
+        if (sink == null) get(to).receiveMessage(message);
+        else sink.accept(to, message);
+    }
+
     public IMessageFilters.Builder verbs(Verb... verbs)
     {
         int[] ids = new int[verbs.length];
@@ -678,7 +852,7 @@
     public abstract class ChangeMonitor implements AutoCloseable
     {
         final List<IListen.Cancel> cleanup;
-        final SimpleCondition completed;
+        final Condition completed;
         private final long timeOut;
         private final TimeUnit timeoutUnit;
         protected Predicate<IInstance> instanceFilter;
@@ -690,7 +864,7 @@
             this.timeoutUnit = timeoutUnit;
             this.instanceFilter = i -> true;
             this.cleanup = new ArrayList<>(instances.size());
-            this.completed = new SimpleCondition();
+            this.completed = newOneTimeCondition();
         }
 
         public void ignoreStoppedInstances()
@@ -700,7 +874,7 @@
 
         protected void signal()
         {
-            if (initialized && !completed.isSignaled() && isCompleted())
+            if (initialized && !completed.isSignalled() && isCompleted())
                 completed.signalAll();
         }
 
@@ -819,8 +993,14 @@
                     startParallel.add(instance);
             }
 
-            forEach(startSequentially, I::startup);
-            parallelForEach(startParallel, I::startup, 0, null);
+            forEach(startSequentially, i -> {
+                i.startup(this);
+                i.postStartup();
+            });
+            parallelForEach(startParallel, i -> {
+                i.startup(this);
+                i.postStartup();
+            }, 0, null);
             monitor.waitForCompletion();
         }
     }
@@ -861,9 +1041,10 @@
 
         instances.clear();
         instanceMap.clear();
+        PathUtils.setDeletionListener(ignore -> {});
         // Make sure to only delete directory when threads are stopped
-        if (root.exists())
-            FileUtils.deleteRecursive(root);
+        if (Files.exists(root))
+            PathUtils.deleteRecursive(root);
         Thread.setDefaultUncaughtExceptionHandler(previousHandler);
         previousHandler = null;
         checkAndResetUncaughtExceptions();
@@ -915,12 +1096,12 @@
     public List<Token> tokens()
     {
         return stream()
-               .map(i ->
+               .flatMap(i ->
                     {
                         try
                         {
                             IPartitioner partitioner = ((IPartitioner)Class.forName(i.config().getString("partitioner")).newInstance());
-                            return partitioner.getTokenFactory().fromString(i.config().getString("initial_token"));
+                            return Stream.of(i.config().getString("initial_token").split(",")).map(partitioner.getTokenFactory()::fromString);
                         }
                         catch (Throwable t)
                         {
@@ -930,21 +1111,218 @@
                .collect(Collectors.toList());
     }
 
-    private static Set<String> findClassesMarkedForSharedClassLoader()
+    private static Set<String> findClassesMarkedForSharedClassLoader(Class<?>[] share, Shared.Scope ... scopes)
     {
-        return findClassesMarkedWith(Shared.class);
+        return findClassesMarkedForSharedClassLoader(share, ImmutableSet.copyOf(scopes)::contains);
     }
 
-    private static Set<String> findClassesMarkedForInstanceClassLoader()
+    private static Set<String> findClassesMarkedForSharedClassLoader(Class<?>[] share, Predicate<Shared.Scope> scopes)
     {
-        return findClassesMarkedWith(Isolated.class);
+        Set<Class<?>> classes = findClassesMarkedWith(Shared.class, a -> of(a.scope()).anyMatch(scopes));
+        Collections.addAll(classes, share);
+        assertTransitiveClosure(classes);
+        return toNames(classes);
     }
 
-    private static Set<String> findClassesMarkedWith(Class<? extends Annotation> annotation)
+    private static Set<String> findClassesMarkedForInstanceClassLoader(Class<?>[] isolate)
     {
-        return new Reflections(ConfigurationBuilder.build("org.apache.cassandra").setExpandSuperTypes(false))
-               .getTypesAnnotatedWith(annotation).stream()
-               .map(Class::getName)
+        Set<Class<?>> classes = findClassesMarkedWith(Isolated.class, ignore -> true);
+        Collections.addAll(classes, isolate);
+        return toNames(classes);
+    }
+
+    public static Predicate<String> getSharedClassPredicate(Shared.Scope ... scopes)
+    {
+        return getSharedClassPredicate(new Class[0], new Class[0], scopes);
+    }
+
+    public static Predicate<String> getSharedClassPredicate(Class<?>[] isolate, Class<?>[] share, Shared.Scope ... scopes)
+    {
+        Set<String> shared = findClassesMarkedForSharedClassLoader(share, scopes);
+        Set<String> isolated = findClassesMarkedForInstanceClassLoader(isolate);
+        return s -> {
+            if (isolated.contains(s))
+                return false;
+
+            return shared.contains(s) ||
+                   InstanceClassLoader.getDefaultLoadSharedFilter().test(s) ||
+                   s.startsWith("org.jboss.byteman");
+        };
+    }
+
+    private static <A extends Annotation> Set<Class<?>> findClassesMarkedWith(Class<A> annotation, Predicate<A> testAnnotation)
+    {
+        Reflections reflections = new Reflections(ConfigurationBuilder.build("org.apache.cassandra").setExpandSuperTypes(false));
+        return Utils.INSTANCE.forNames(reflections.get(Scanners.TypesAnnotated.get(annotation.getName())),
+                        reflections.getConfiguration().getClassLoaders())
+               .stream()
+               .filter(testAnnotation(annotation, testAnnotation))
+               .flatMap(expander())
                .collect(Collectors.toSet());
     }
+
+    private static Set<String> toNames(Set<Class<?>> classes)
+    {
+        return classes.stream().map(Class::getName).collect(Collectors.toSet());
+    }
+
+    private static <A extends Annotation> Predicate<Class<?>> testAnnotation(Class<A> annotation, Predicate<A> test)
+    {
+        return clazz -> {
+            A[] annotations = clazz.getDeclaredAnnotationsByType(annotation);
+            for (A a : annotations)
+            {
+                if (!test.test(a))
+                    return false;
+            }
+            return true;
+        };
+    }
+
+    private static void assertTransitiveClosure(Set<Class<?>> classes)
+    {
+        Set<Class<?>> tested = new HashSet<>();
+        for (Class<?> clazz : classes)
+        {
+            forEach(test -> {
+                if (!classes.contains(test))
+                    throw new AssertionError(clazz.getName() + " is shared, but its dependency " + test + " is not");
+            }, new SharedParams(ALL, ALL, NONE), clazz, tested);
+        }
+    }
+
+    private static class SharedParams
+    {
+        final Recursive ancestors, members, inner;
+
+        private SharedParams(Recursive ancestors, Recursive members, Recursive inner)
+        {
+            this.ancestors = ancestors;
+            this.members = members;
+            this.inner = inner;
+        }
+
+        private SharedParams(Shared shared)
+        {
+            this.ancestors = shared.ancestors();
+            this.members = shared.members();
+            this.inner = shared.inner();
+        }
+    }
+
+    private static void forEach(Consumer<Class<?>> forEach, SharedParams shared, Class<?> cur, Set<Class<?>> done)
+    {
+        if (null == (cur = consider(cur, done)))
+            return;
+
+        forEach.accept(cur);
+
+        switch (shared.ancestors)
+        {
+            case ALL:
+                forEach(forEach, shared, cur.getSuperclass(), done);
+            case INTERFACES:
+                for (Class<?> i : cur.getInterfaces())
+                    forEach(forEach, shared, i, done);
+        }
+
+        if (shared.members != NONE)
+        {
+            for (Field field : cur.getDeclaredFields())
+            {
+                if ((field.getModifiers() & Modifier.PRIVATE) == 0)
+                    forEachMatch(shared.members, forEach, shared, field.getType(), done);
+            }
+
+            for (Method method : cur.getDeclaredMethods())
+            {
+                if ((method.getModifiers() & Modifier.PRIVATE) == 0)
+                {
+                    forEachMatch(shared.members, forEach, shared, method.getReturnType(), done);
+                    forEachMatch(shared.members, forEach, shared, method.getParameterTypes(), done);
+                }
+            }
+        }
+
+        if (shared.inner != NONE)
+            forEachMatch(shared.inner, forEach, shared, cur.getDeclaredClasses(), done);
+    }
+
+    private static void forEachMatch(Recursive ifMatches, Consumer<Class<?>> forEach, SharedParams shared, Class<?>[] classes, Set<Class<?>> done)
+    {
+        for (Class<?> cur : classes)
+            forEachMatch(ifMatches, forEach, shared, cur, done);
+    }
+
+    private static void forEachMatch(Recursive ifMatches, Consumer<Class<?>> forEach, SharedParams shared, Class<?> cur, Set<Class<?>> done)
+    {
+        if (ifMatches == ALL || isInterface(cur))
+            forEach(forEach, shared, cur, done);
+    }
+
+    private static boolean isInterface(Class<?> test)
+    {
+        return test.isInterface() || test.isEnum() || Throwable.class.isAssignableFrom(test);
+    }
+
+    private static Function<Class<?>, Stream<Class<?>>> expander()
+    {
+        Set<Class<?>> done = new HashSet<>();
+        return clazz -> expand(clazz, done);
+    }
+
+    private static Stream<Class<?>> expand(Class<?> clazz, Set<Class<?>> done)
+    {
+        Optional<Shared> maybeShared = of(clazz.getDeclaredAnnotationsByType(Shared.class)).findFirst();
+        if (!maybeShared.isPresent())
+            return Stream.of(clazz);
+
+        Shared shared = maybeShared.get();
+        if (shared.inner() == NONE && shared.members() == NONE && shared.ancestors() == NONE)
+            return Stream.of(clazz);
+
+        Set<Class<?>> closure = new HashSet<>();
+        forEach(closure::add, new SharedParams(shared), clazz, done);
+        return closure.stream();
+    }
+
+    private static Class<?> consider(Class<?> consider, Set<Class<?>> considered)
+    {
+        if (consider == null) return null;
+        while (consider.isArray()) // TODO (future): this is inadequate handling of array types (fine for now)
+            consider = consider.getComponentType();
+
+        if (consider.isPrimitive()) return null;
+        if (consider.getPackage() != null && consider.getPackage().getName().startsWith("java.")) return null;
+        if (!considered.add(consider)) return null;
+        if (InstanceClassLoader.getDefaultLoadSharedFilter().test(consider.getName())) return null;
+
+        return consider;
+    }
+
+    // 3.0 and earlier clusters must have unique InetAddressAndPort for each InetAddress
+    public static <I extends IInstance> Map<InetSocketAddress, I> getUniqueAddressLookup(ICluster<I> cluster)
+    {
+        return getUniqueAddressLookup(cluster, Function.identity());
+    }
+
+    public static <I extends IInstance, V> Map<InetSocketAddress, V> getUniqueAddressLookup(ICluster<I> cluster, Function<I, V> function)
+    {
+        Map<InetSocketAddress, V> lookup = new HashMap<>();
+        cluster.stream().forEach(instance -> {
+            InetSocketAddress address = instance.broadcastAddress();
+            if (!address.equals(instance.config().broadcastAddress()))
+                throw new IllegalStateException("addressAndPort mismatch: " + address + " vs " + instance.config().broadcastAddress());
+            V prev = lookup.put(address, function.apply(instance));
+            if (null != prev)
+                throw new IllegalStateException("This version of Cassandra does not support multiple nodes with the same InetAddress: " + address + " vs " + prev);
+        });
+        return lookup;
+    }
+
+    // after upgrading a static function became an interface method, so need this class to mimic old behavior
+    private enum Utils implements NameHelper
+    {
+        INSTANCE;
+    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/AbstractClusterTest.java b/test/distributed/org/apache/cassandra/distributed/impl/AbstractClusterTest.java
new file mode 100644
index 0000000..1b2a1ce
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/impl/AbstractClusterTest.java
@@ -0,0 +1,251 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.impl;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.junit.AssumptionViolatedException;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.TokenSupplier;
+import org.apache.cassandra.distributed.impl.AbstractCluster.AbstractBuilder;
+import org.apache.cassandra.distributed.shared.Versions;
+import org.apache.cassandra.utils.FailingRunnable;
+import org.assertj.core.api.Assertions;
+import org.mockito.Mockito;
+
+
+public class AbstractClusterTest
+{
+    @Test
+    public void allowVnodeWithMultipleTokens()
+    {
+        AbstractBuilder builder = builder();
+        builder.withTokenCount(42);
+        unroll(() -> builder.createWithoutStarting());
+    }
+
+    @Test
+    public void allowVnodeWithSingleToken()
+    {
+        AbstractBuilder builder = builder();
+        builder.withTokenCount(1);
+        unroll(() -> builder.createWithoutStarting());
+    }
+
+    @Test
+    public void disallowVnodeWithMultipleTokens()
+    {
+        AbstractBuilder builder = builder();
+
+        builder.withoutVNodes();
+        builder.withTokenCount(42);
+
+        Assertions.assertThatThrownBy(() -> builder.createWithoutStarting())
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("vnode is not supported");
+    }
+
+
+    @Test
+    public void disallowVnodeWithSingleToken()
+    {
+        AbstractBuilder builder = builder();
+
+        builder.withoutVNodes();
+        builder.withTokenCount(1);
+
+        unroll(() -> builder.createWithoutStarting());
+    }
+
+    @Test
+    public void withoutVNodes()
+    {
+        AbstractBuilder builder = builder();
+
+        builder.withoutVNodes();
+        //TODO casting is annoying... what can be done to be smarter?
+        builder.withTokenSupplier((TokenSupplier) i -> Arrays.asList("a", "b", "c"));
+
+        Assertions.assertThatThrownBy(() -> builder.createWithoutStarting())
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("vnode is not supported");
+    }
+
+    @Test
+    public void vnodeButTokensDoNotMatch()
+    {
+        AbstractBuilder builder = builder();
+
+        builder.withTokenCount(1);
+        //TODO casting is annoying... what can be done to be smarter?
+        builder.withTokenSupplier((TokenSupplier) i -> Arrays.asList("a", "b", "c"));
+
+        Assertions.assertThatThrownBy(() -> builder.createWithoutStarting())
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("no-vnode is requested but not supported");
+    }
+
+    @Test
+    public void noVnodeButTokensDoNotMatch()
+    {
+        AbstractBuilder builder = builder();
+
+        builder.withTokenCount(42);
+        //TODO casting is annoying... what can be done to be smarter?
+        builder.withTokenSupplier((TokenSupplier) i -> Arrays.asList("a"));
+
+        Assertions.assertThatThrownBy(() -> builder.createWithoutStarting())
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("vnode is requested but not supported");
+    }
+
+    @Test
+    public void vnodeNotSupported()
+    {
+        ConfigUpdate config = ConfigUpdate.of("num_tokens", 1);
+        AbstractCluster<?> cluster = cluster(4, config);
+        config.check = true;
+        Assertions.assertThatThrownBy(() -> cluster.createInstanceConfig(1))
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("vnode is not supported");
+    }
+
+    @Test
+    public void noVnodeNotSupported()
+    {
+        ConfigUpdate config = ConfigUpdate.of("num_tokens", 4);
+        AbstractCluster<?> cluster = cluster(1, config);
+        config.check = true;
+        Assertions.assertThatThrownBy(() -> cluster.createInstanceConfig(1))
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("no-vnode is requested but not supported");
+    }
+
+    @Test
+    public void vnodeMismatch()
+    {
+        ConfigUpdate config = ConfigUpdate.of("num_tokens", 4);
+        AbstractCluster<?> cluster = cluster(2, config);
+        config.check = true;
+        Assertions.assertThatThrownBy(() -> cluster.createInstanceConfig(1))
+                  .isInstanceOf(AssumptionViolatedException.class)
+                  .hasMessage("vnode is enabled and num_tokens is defined in test without GOSSIP or setting initial_token");
+    }
+
+    @Test
+    public void vnodeMismatchDefinesTokens()
+    {
+        ConfigUpdate config = ConfigUpdate.of("num_tokens", 4, "initial_token", "some values");
+        AbstractCluster<?> cluster = cluster(2, config);
+        config.check = true;
+        unroll(() -> cluster.createInstanceConfig(1));
+    }
+
+    private static void unroll(FailingRunnable r)
+    {
+        try
+        {
+            r.run();
+        }
+        catch (AssumptionViolatedException e)
+        {
+            AssertionError e2 = new AssertionError(e.getMessage());
+            e2.setStackTrace(e.getStackTrace());
+            throw e2;
+        }
+    }
+
+    private static AbstractCluster<?> cluster(int tokenCount, Consumer<IInstanceConfig> fn)
+    {
+        try
+        {
+            return (AbstractCluster<?>) builder()
+                                        .withTokenCount(tokenCount)
+                                        .withConfig(fn)
+                                        .createWithoutStarting();
+        }
+        catch (IOException e)
+        {
+            throw new AssertionError(e);
+        }
+    }
+
+    private static AbstractBuilder builder()
+    {
+        return new Builder().withNodes(1);
+    }
+
+    private static class Builder extends AbstractBuilder<IInvokableInstance, Cluster, Builder>
+    {
+        public Builder()
+        {
+            super(Cluster::new);
+        }
+    }
+
+    private static class Cluster extends AbstractCluster<IInvokableInstance>
+    {
+        protected Cluster(AbstractBuilder builder)
+        {
+            super(builder);
+        }
+
+        @Override
+        protected IInvokableInstance newInstanceWrapper(Versions.Version version, IInstanceConfig config)
+        {
+            IInvokableInstance inst = Mockito.mock(IInvokableInstance.class);
+            Mockito.when(inst.config()).thenReturn(config);
+            return inst;
+        }
+    }
+
+    private static class ConfigUpdate implements Consumer<IInstanceConfig>
+    {
+        public boolean check = false;
+        private final Map<String, Object> override;
+
+        private ConfigUpdate(Map<String, Object> override)
+        {
+            this.override = override;
+        }
+
+        public static ConfigUpdate of(Object... args)
+        {
+            Map<String, Object> override = new HashMap<>();
+            for (int i = 0; i < args.length; i = i + 2)
+                override.put((String) args[i], args[i + 1]);
+            return new ConfigUpdate(override);
+        }
+
+        @Override
+        public void accept(IInstanceConfig config)
+        {
+            if (!check)
+                return;
+            for (Map.Entry<String, Object> e : override.entrySet())
+                config.set(e.getKey(), e.getValue());
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java b/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java
index e31ce2c..19d8062 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java
@@ -32,7 +32,6 @@
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.cql3.statements.SelectStatement;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.ICoordinator;
@@ -43,13 +42,15 @@
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.QueryState;
-import org.apache.cassandra.service.pager.QueryPager;
-import org.apache.cassandra.transport.ClientStat;
+import org.apache.cassandra.service.reads.thresholds.CoordinatorWarnings;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class Coordinator implements ICoordinator
 {
@@ -62,7 +63,7 @@
     @Override
     public SimpleQueryResult executeWithResult(String query, ConsistencyLevel consistencyLevel, Object... boundValues)
     {
-        return instance().sync(() -> executeInternal(query, consistencyLevel, boundValues)).call();
+        return instance().sync(() -> unsafeExecuteInternal(query, consistencyLevel, boundValues)).call();
     }
 
     public Future<SimpleQueryResult> asyncExecuteWithTracingWithResult(UUID sessionId, String query, ConsistencyLevel consistencyLevelOrigin, Object... boundValues)
@@ -70,8 +71,8 @@
         return instance.async(() -> {
             try
             {
-                Tracing.instance.newSession(sessionId, Collections.emptyMap());
-                return executeInternal(query, consistencyLevelOrigin, boundValues);
+                Tracing.instance.newSession(TimeUUID.fromUuid(sessionId), Collections.emptyMap());
+                return unsafeExecuteInternal(query, consistencyLevelOrigin, boundValues);
             }
             finally
             {
@@ -80,17 +81,33 @@
         }).call();
     }
 
-    protected org.apache.cassandra.db.ConsistencyLevel toCassandraCL(ConsistencyLevel cl)
+    public static org.apache.cassandra.db.ConsistencyLevel toCassandraCL(ConsistencyLevel cl)
     {
-        return org.apache.cassandra.db.ConsistencyLevel.fromCode(cl.ordinal());
+        try
+        {
+            return org.apache.cassandra.db.ConsistencyLevel.fromCode(cl.code);
+        }
+        catch (NoSuchFieldError e)
+        {
+            return org.apache.cassandra.db.ConsistencyLevel.fromCode(cl.ordinal());
+        }
     }
 
-    private SimpleQueryResult executeInternal(String query, ConsistencyLevel consistencyLevelOrigin, Object[] boundValues)
+    protected static org.apache.cassandra.db.ConsistencyLevel toCassandraSerialCL(ConsistencyLevel cl)
+    {
+        return toCassandraCL(cl == null ? ConsistencyLevel.SERIAL : cl);
+    }
+
+    public static SimpleQueryResult unsafeExecuteInternal(String query, ConsistencyLevel consistencyLevel, Object[] boundValues)
+    {
+        return unsafeExecuteInternal(query, null, consistencyLevel, boundValues);
+    }
+
+    public static SimpleQueryResult unsafeExecuteInternal(String query, ConsistencyLevel serialConsistencyLevel, ConsistencyLevel commitConsistencyLevel, Object[] boundValues)
     {
         ClientState clientState = makeFakeClientState();
         CQLStatement prepared = QueryProcessor.getStatement(query, clientState);
         List<ByteBuffer> boundBBValues = new ArrayList<>();
-        ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf(consistencyLevelOrigin.name());
         for (Object boundValue : boundValues)
             boundBBValues.add(ByteBufferUtil.objectToBytes(boundValue));
 
@@ -99,23 +116,36 @@
         // Start capturing warnings on this thread. Note that this will implicitly clear out any previous 
         // warnings as it sets a new State instance on the ThreadLocal.
         ClientWarn.instance.captureWarnings();
-        
-        ResultMessage res = prepared.execute(QueryState.forInternalCalls(),
-                                             QueryOptions.create(toCassandraCL(consistencyLevel),
-                                                                 boundBBValues,
-                                                                 false,
-                                                                 Integer.MAX_VALUE,
-                                                                 null,
-                                                                 null,
-                                                                 ProtocolVersion.CURRENT,
-                                                                 null),
-                                             System.nanoTime());
+        CoordinatorWarnings.init();
+        try
+        {
+            ResultMessage res = prepared.execute(QueryState.forInternalCalls(),
+                                   QueryOptions.create(toCassandraCL(commitConsistencyLevel),
+                                                       boundBBValues,
+                                                       false,
+                                                       Integer.MAX_VALUE,
+                                                       null,
+                                                       toCassandraSerialCL(serialConsistencyLevel),
+                                                       ProtocolVersion.CURRENT,
+                                                       null),
+                                   nanoTime());
+            // Collect warnings reported during the query.
+            CoordinatorWarnings.done();
+            if (res != null)
+                res.setWarnings(ClientWarn.instance.getWarnings());
 
-        // Collect warnings reported during the query.
-        if (res != null)
-            res.setWarnings(ClientWarn.instance.getWarnings());
-
-        return RowUtil.toQueryResult(res);
+            return RowUtil.toQueryResult(res);
+        }
+        catch (Exception | Error e)
+        {
+            CoordinatorWarnings.done();
+            throw e;
+        }
+        finally
+        {
+            CoordinatorWarnings.reset();
+            ClientWarn.instance.resetWarnings();
+        }
     }
 
     public Object[][] executeWithTracing(UUID sessionId, String query, ConsistencyLevel consistencyLevelOrigin, Object... boundValues)
@@ -129,6 +159,12 @@
     }
 
     @Override
+    public SimpleQueryResult executeWithResult(String query, ConsistencyLevel serialConsistencyLevel, ConsistencyLevel commitConsistencyLevel, Object... boundValues)
+    {
+        return instance.sync(() -> unsafeExecuteInternal(query, serialConsistencyLevel, commitConsistencyLevel, boundValues)).call();
+    }
+
+    @Override
     public QueryResult executeWithPagingWithResult(String query, ConsistencyLevel consistencyLevelOrigin, int pageSize, Object... boundValues)
     {
         if (pageSize <= 0)
@@ -145,7 +181,7 @@
             prepared.validate(clientState);
             assert prepared instanceof SelectStatement : "Only SELECT statements can be executed with paging";
 
-            long nanoTime = System.nanoTime();
+            long nanoTime = nanoTime();
             SelectStatement selectStatement = (SelectStatement) prepared;
 
             QueryState queryState = new QueryState(clientState);
@@ -197,7 +233,7 @@
         }).call();
     }
 
-    private static final ClientState makeFakeClientState()
+    public static ClientState makeFakeClientState()
     {
         return ClientState.forExternalCalls(new InetSocketAddress(FBUtilities.getJustLocalAddress(), 9042));
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/DelegatingInvokableInstance.java b/test/distributed/org/apache/cassandra/distributed/impl/DelegatingInvokableInstance.java
index 50f59a2..23b3768 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/DelegatingInvokableInstance.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/DelegatingInvokableInstance.java
@@ -21,6 +21,8 @@
 import java.io.Serializable;
 import java.net.InetSocketAddress;
 import java.util.UUID;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.function.BiConsumer;
 import java.util.function.BiFunction;
@@ -31,6 +33,7 @@
 import org.apache.cassandra.distributed.api.ICoordinator;
 import org.apache.cassandra.distributed.api.IInstanceConfig;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
 import org.apache.cassandra.distributed.api.IListen;
 import org.apache.cassandra.distributed.api.IMessage;
 import org.apache.cassandra.distributed.api.SimpleQueryResult;
@@ -49,7 +52,7 @@
     @Override
     public InetSocketAddress broadcastAddress()
     {
-        return delegate().broadcastAddress();
+        return config().broadcastAddress();
     }
 
     @Override
@@ -134,6 +137,18 @@
     }
 
     @Override
+    public IIsolatedExecutor with(ExecutorService executor)
+    {
+        return delegate().with(executor);
+    }
+
+    @Override
+    public Executor executor()
+    {
+        return delegate().executor();
+    }
+
+    @Override
     public void startup(ICluster cluster)
     {
         delegateForStartup().startup(cluster);
@@ -254,11 +269,6 @@
     }
 
     @Override
-    public <O> O callOnInstance(SerializableCallable<O> call)
-    {
-        return delegate().callOnInstance(call);
-    }
-
     public <I1, I2, I3, I4, O> QuadFunction<I1, I2, I3, I4, Future<O>> async(QuadFunction<I1, I2, I3, I4, O> f)
     {
         return delegate().async(f);
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/DirectStreamingConnectionFactory.java b/test/distributed/org/apache/cassandra/distributed/impl/DirectStreamingConnectionFactory.java
new file mode 100644
index 0000000..72105d8
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/impl/DirectStreamingConnectionFactory.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.impl;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+import java.util.ArrayDeque;
+import java.util.Queue;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+
+import io.netty.util.concurrent.Future;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
+import org.apache.cassandra.io.util.RebufferingInputStream;
+import org.apache.cassandra.net.OutboundConnectionSettings;
+import org.apache.cassandra.streaming.StreamDeserializingTask;
+import org.apache.cassandra.streaming.StreamingDataInputPlus;
+import org.apache.cassandra.streaming.StreamingDataOutputPlus;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingDataOutputPlusFixed;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+import static org.apache.cassandra.net.MessagingService.*;
+
+// TODO: Simulator should schedule based on some streaming data rate
+public class DirectStreamingConnectionFactory
+{
+    static class DirectConnection
+    {
+        private static final AtomicInteger nextId = new AtomicInteger();
+
+        final int protocolVersion;
+        final long sendBufferSize;
+
+        // TODO rename
+        private static class Buffer
+        {
+            final Queue<byte[]> pending = new ArrayDeque<>();
+            boolean isClosed;
+            int pendingBytes = 0;
+        }
+
+        @SuppressWarnings({"InnerClassMayBeStatic","unused"}) // helpful for debugging
+        class DirectStreamingChannel implements StreamingChannel
+        {
+            class Out extends BufferedDataOutputStreamPlus implements StreamingDataOutputPlus
+            {
+                private final Buffer out;
+                private Thread thread;
+                private boolean inUse;
+
+                Out(Buffer out)
+                {
+                    super(ByteBuffer.allocate(16 << 10));
+                    this.out = out;
+                }
+
+                protected void doFlush(int count) throws IOException
+                {
+                    if (buffer.position() == 0)
+                        return;
+
+                    try
+                    {
+                        synchronized (out)
+                        {
+                            while (out.pendingBytes > 0 && count + out.pendingBytes > sendBufferSize && !out.isClosed)
+                                out.wait();
+
+                            if (out.isClosed)
+                                throw new ClosedChannelException();
+
+                            buffer.flip();
+                            out.pendingBytes += buffer.remaining();
+                            out.pending.add(ByteBufferUtil.getArray(buffer));
+                            buffer.clear();
+
+                            out.notify();
+                        }
+                    }
+                    catch (InterruptedException e)
+                    {
+                        throw new UncheckedInterruptedException(e);
+                    }
+                }
+
+                public synchronized Out acquire()
+                {
+                    if (inUse)
+                        throw new IllegalStateException();
+                    inUse = true;
+                    thread = Thread.currentThread();
+                    return this;
+                }
+
+                public synchronized void close() throws IOException
+                {
+                    flush();
+                    inUse = false;
+                }
+
+                void realClose()
+                {
+                    synchronized (out)
+                    {
+                        out.isClosed = true;
+                        out.notifyAll();
+                    }
+                }
+
+                @Override
+                public int writeToChannel(Write write, RateLimiter limiter) throws IOException
+                {
+                    class Holder
+                    {
+                        ByteBuffer buffer;
+                    }
+                    Holder holder = new Holder();
+
+                    write.write(size -> {
+                        if (holder.buffer != null)
+                            throw new IllegalStateException("Can only allocate one ByteBuffer");
+                        holder.buffer = ByteBuffer.allocate(size);
+                        return holder.buffer;
+                    });
+
+                    ByteBuffer buffer = holder.buffer;
+                    int length = buffer.limit();
+                    write(buffer);
+                    return length;
+                }
+
+                // TODO (future): support RateLimiter
+                @Override
+                public long writeFileToChannel(FileChannel file, RateLimiter limiter) throws IOException
+                {
+                    long count = 0;
+                    while (file.read(buffer) >= 0)
+                    {
+                        count += buffer.position();
+                        doFlush(0);
+                    }
+                    return count;
+                }
+            }
+
+            class In extends RebufferingInputStream implements StreamingDataInputPlus
+            {
+                private final Buffer in;
+                private Thread thread;
+
+                In(Buffer in)
+                {
+                    super(ByteBuffer.allocate(0));
+                    this.in = in;
+                }
+
+                protected void reBuffer() throws IOException
+                {
+                    try
+                    {
+                        synchronized (in)
+                        {
+                            while (in.pendingBytes == 0 && !in.isClosed)
+                                in.wait();
+
+                            if (in.pendingBytes == 0)
+                                throw new ClosedChannelException();
+
+                            byte[] bytes = in.pending.poll();
+                            if (bytes == null)
+                                throw new IllegalStateException();
+
+                            in.pendingBytes -= bytes.length;
+                            buffer = ByteBuffer.wrap(bytes);
+                            in.notify();
+                        }
+                    }
+                    catch (InterruptedException e)
+                    {
+                        throw new UncheckedInterruptedException(e);
+                    }
+                }
+
+                public void close()
+                {
+                    DirectStreamingChannel.this.close();
+                }
+
+                public void realClose()
+                {
+                    synchronized (in)
+                    {
+                        in.isClosed = true;
+                        in.notifyAll();
+                    }
+                }
+            }
+
+            final InetSocketAddress remoteAddress;
+
+            private final In in;
+            private final Out out;
+            private final Integer id = nextId.incrementAndGet();
+            Runnable onClose;
+            boolean isClosed;
+
+            DirectStreamingChannel(InetSocketAddress remoteAddress, Buffer outBuffer, Buffer inBuffer)
+            {
+                this.remoteAddress = remoteAddress;
+                this.in = new In(inBuffer);
+                this.out = new Out(outBuffer);
+            }
+
+            public StreamingDataOutputPlus acquireOut()
+            {
+                return out.acquire();
+            }
+
+            @Override
+            public synchronized Future<?> send(Send send) throws IOException
+            {
+                class Factory implements IntFunction<StreamingDataOutputPlus>
+                {
+                    ByteBuffer buffer;
+                    @Override
+                    public StreamingDataOutputPlus apply(int size)
+                    {
+                        buffer = ByteBuffer.allocate(size);
+                        return new StreamingDataOutputPlusFixed(buffer);
+                    }
+                }
+                Factory factory = new Factory();
+                send.send(factory);
+                factory.buffer.flip();
+                try (StreamingDataOutputPlus out = acquireOut())
+                {
+                    out.write(factory.buffer);
+                }
+                return ImmediateFuture.success(true);
+            }
+
+            @Override
+            public Object id()
+            {
+                return id;
+            }
+
+            @Override
+            public String description()
+            {
+                return remoteAddress.getAddress().getHostAddress() + "/in@" + id;
+            }
+
+            public StreamingDataInputPlus in()
+            {
+                in.thread = Thread.currentThread();
+                return in;
+            }
+
+            public InetSocketAddress peer()
+            {
+                return remoteAddress;
+            }
+
+            @Override
+            public InetSocketAddress connectedTo()
+            {
+                return remoteAddress;
+            }
+
+            @Override
+            public boolean connected()
+            {
+                return true;
+            }
+
+            @Override
+            public Future<?> close()
+            {
+                in.realClose();
+                out.realClose();
+                synchronized (this)
+                {
+                    if (!isClosed)
+                    {
+                        isClosed = true;
+                        if (onClose != null)
+                            onClose.run();
+                    }
+                }
+                return ImmediateFuture.success(null);
+            }
+
+            @Override
+            public synchronized void onClose(Runnable runOnClose)
+            {
+                if (isClosed) runOnClose.run();
+                else if (onClose == null) onClose = runOnClose;
+                else { Runnable tmp = onClose; onClose = () -> { tmp.run(); runOnClose.run(); }; }
+            }
+        }
+
+        private final DirectStreamingChannel outToRecipient, outToOriginator;
+
+        DirectConnection(int protocolVersion, long sendBufferSize, InetSocketAddress originator, InetSocketAddress recipient)
+        {
+            this.protocolVersion = protocolVersion;
+            this.sendBufferSize = sendBufferSize;
+            Buffer buffer1 = new Buffer(), buffer2 = new Buffer();
+            outToRecipient = new DirectStreamingChannel(recipient, buffer1, buffer2);
+            outToOriginator = new DirectStreamingChannel(originator, buffer2, buffer1);
+        }
+
+        StreamingChannel get(InetSocketAddress remoteAddress)
+        {
+            if (remoteAddress.equals(outToOriginator.remoteAddress)) return outToOriginator;
+            else if (remoteAddress.equals(outToRecipient.remoteAddress)) return outToRecipient;
+            else throw new IllegalArgumentException();
+        }
+    }
+
+    public class Factory implements StreamingChannel.Factory
+    {
+        final InetSocketAddress from;
+        Factory(InetSocketAddress from)
+        {
+            this.from = from;
+        }
+
+        @Override
+        public StreamingChannel create(InetSocketAddress to, int messagingVersion, StreamingChannel.Kind kind)
+        {
+            long sendBufferSize = new OutboundConnectionSettings(getByAddress(to)).socketSendBufferSizeInBytes();
+            if (sendBufferSize <= 0)
+                sendBufferSize = 1 << 14;
+            
+            DirectConnection connection = new DirectConnection(messagingVersion, sendBufferSize, from, to);
+            IInvokableInstance instance = cluster.get(to);
+            instance.unsafeAcceptOnThisThread((channel, version) -> executorFactory().startThread(channel.description(), new StreamDeserializingTask(null, channel, version)),
+                         connection.get(from), messagingVersion);
+            return connection.get(to);
+        }
+    }
+
+    final ICluster<IInvokableInstance> cluster;
+    final int protocolVersion;
+
+    private DirectStreamingConnectionFactory(ICluster<IInvokableInstance> cluster)
+    {
+        this.cluster = cluster;
+        // we don't invoke this on the host ClassLoader as it initiates state like DatabaseDescriptor,
+        // potentially leading to resource leaks on the hosts (particularly in validateHeader which runs on the host threads)
+        this.protocolVersion = current_version;
+    }
+
+    public static Function<IInvokableInstance, StreamingChannel.Factory> create(ICluster<IInvokableInstance> cluster)
+    {
+        return cluster.get(1).unsafeApplyOnThisThread(c -> new DirectStreamingConnectionFactory(c)::get, cluster);
+    }
+
+    public static void setup(ICluster<IInvokableInstance> cluster)
+    {
+        Function<IInvokableInstance, StreamingChannel.Factory> streamingConnectionFactory = create(cluster);
+        cluster.stream().forEach(i -> i.unsafeAcceptOnThisThread(StreamingChannel.Factory.Global::unsafeSet, streamingConnectionFactory.apply(i)));
+    }
+
+    public Factory get(IInvokableInstance instance)
+    {
+        return new Factory(instance.config().broadcastAddress());
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/DistributedTestSnitch.java b/test/distributed/org/apache/cassandra/distributed/impl/DistributedTestSnitch.java
index 0dfaa7e..6a892c4 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/DistributedTestSnitch.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/DistributedTestSnitch.java
@@ -56,7 +56,7 @@
         InetSocketAddress m = cache.get(addressAndPort);
         if (m == null)
         {
-            m = NetworkTopology.addressAndPort(addressAndPort.address, addressAndPort.port);
+            m = NetworkTopology.addressAndPort(addressAndPort.getAddress(), addressAndPort.getPort());
             cache.put(addressAndPort, m);
         }
         return m;
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
index 0f48a23..db2dfaa 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
@@ -18,16 +18,15 @@
 
 package org.apache.cassandra.distributed.impl;
 
-import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.io.UncheckedIOException;
 import java.util.Objects;
 import java.util.function.Predicate;
 
 import com.google.common.io.Closeables;
 
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.utils.AbstractIterator;
 import org.apache.cassandra.distributed.api.LogAction;
 import org.apache.cassandra.distributed.api.LineIterator;
@@ -50,36 +49,21 @@
     @Override
     public LineIterator match(long startPosition, Predicate<String> fn)
     {
-        RandomAccessFile reader;
-        try
-        {
-            reader = new RandomAccessFile(file, "r");
-        }
-        catch (FileNotFoundException e)
-        {
-            // if file isn't present, don't return an empty stream as it looks the same as no log lines matched
-            throw new UncheckedIOException(e);
-        }
+        RandomAccessReader reader;
+        reader = RandomAccessReader.open(file);
         if (startPosition > 0) // -1 used to disable, so ignore any negative values or 0 (default offset)
         {
-            try
-            {
-                reader.seek(startPosition);
-            }
-            catch (IOException e)
-            {
-                throw new UncheckedIOException("Unable to seek to " + startPosition, e);
-            }
+            reader.seek(startPosition);
         }
         return new FileLineIterator(reader, fn);
     }
 
     private static final class FileLineIterator extends AbstractIterator<String> implements LineIterator
     {
-        private final RandomAccessFile reader;
+        private final RandomAccessReader reader;
         private final Predicate<String> fn;
 
-        private FileLineIterator(RandomAccessFile reader, Predicate<String> fn)
+        private FileLineIterator(RandomAccessReader reader, Predicate<String> fn)
         {
             this.reader = reader;
             this.fn = fn;
@@ -88,14 +72,7 @@
         @Override
         public long mark()
         {
-            try
-            {
-                return reader.getFilePointer();
-            }
-            catch (IOException e)
-            {
-                throw new UncheckedIOException(e);
-            }
+            return reader.getFilePointer();
         }
 
         @Override
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/INodeProvisionStrategy.java b/test/distributed/org/apache/cassandra/distributed/impl/INodeProvisionStrategy.java
index 32f82c0..1ec844e 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/INodeProvisionStrategy.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/INodeProvisionStrategy.java
@@ -18,9 +18,13 @@
 
 package org.apache.cassandra.distributed.impl;
 
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+
+@Shared(inner = INTERFACES)
 public interface INodeProvisionStrategy
 {
-
     public enum Strategy
     {
         OneNetworkInterface
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
index b8833dd..d4cb1cb 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
@@ -19,12 +19,12 @@
 package org.apache.cassandra.distributed.impl;
 
 import java.io.ByteArrayOutputStream;
-import java.io.Closeable;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.file.FileSystem;
 import java.security.Permission;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -32,13 +32,13 @@
 import java.util.Map;
 import java.util.Objects;
 import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
 import javax.management.ListenerNotFoundException;
 import javax.management.Notification;
 import javax.management.NotificationListener;
@@ -49,9 +49,13 @@
 import org.slf4j.LoggerFactory;
 
 import io.netty.util.concurrent.GlobalEventExecutor;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.auth.AuthCache;
 import org.apache.cassandra.batchlog.Batch;
 import org.apache.cassandra.batchlog.BatchlogManager;
+import org.apache.cassandra.concurrent.ExecutorFactory;
 import org.apache.cassandra.concurrent.ExecutorLocals;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.concurrent.SharedExecutorPool;
 import org.apache.cassandra.concurrent.Stage;
@@ -64,13 +68,12 @@
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.Memtable;
 import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.db.SystemKeyspaceMigrator40;
+import org.apache.cassandra.db.SystemKeyspaceMigrator41;
 import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.compaction.CompactionLogger;
 import org.apache.cassandra.db.compaction.CompactionManager;
-import org.apache.cassandra.dht.IPartitioner;
-import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.db.memtable.AbstractAllocatorMemtable;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.Constants;
 import org.apache.cassandra.distributed.action.GossipHelper;
@@ -87,9 +90,8 @@
 import org.apache.cassandra.distributed.mock.nodetool.InternalNodeProbe;
 import org.apache.cassandra.distributed.mock.nodetool.InternalNodeProbeFactory;
 import org.apache.cassandra.distributed.shared.Metrics;
-import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.distributed.shared.ThrowingRunnable;
 import org.apache.cassandra.gms.Gossiper;
-import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.hints.DTestSerializer;
 import org.apache.cassandra.hints.HintsService;
 import org.apache.cassandra.index.SecondaryIndexManager;
@@ -99,9 +101,12 @@
 import org.apache.cassandra.io.util.DataInputBuffer;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.PathUtils;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry;
+import org.apache.cassandra.metrics.Sampler;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.NoPayload;
@@ -112,14 +117,21 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.CassandraDaemon;
 import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.DefaultFSErrorHandler;
 import org.apache.cassandra.service.PendingRangeCalculatorService;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.StorageServiceMBean;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.service.paxos.uncommitted.UncommittedTableData;
+import org.apache.cassandra.service.reads.thresholds.CoordinatorWarnings;
+import org.apache.cassandra.service.snapshot.SnapshotManager;
+import org.apache.cassandra.streaming.StreamManager;
 import org.apache.cassandra.streaming.StreamReceiveTask;
 import org.apache.cassandra.streaming.StreamTransferTask;
-import org.apache.cassandra.streaming.async.StreamingInboundHandler;
+import org.apache.cassandra.streaming.async.NettyStreamingChannel;
 import org.apache.cassandra.tools.NodeTool;
 import org.apache.cassandra.tools.Output;
 import org.apache.cassandra.tools.SystemExitException;
@@ -127,24 +139,30 @@
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteArrayUtil;
+import org.apache.cassandra.utils.Closeable;
 import org.apache.cassandra.utils.DiagnosticSnapshotService;
 import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.Throwables;
-import org.apache.cassandra.utils.UUIDSerializer;
 import org.apache.cassandra.utils.concurrent.Ref;
 import org.apache.cassandra.utils.memory.BufferPools;
 import org.apache.cassandra.utils.progress.jmx.JMXBroadcastExecutor;
 
 import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.distributed.api.Feature.BLANK_GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.fromCassandraInetAddressAndPort;
 import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.toCassandraInetAddressAndPort;
 import static org.apache.cassandra.net.Verb.BATCH_STORE_REQ;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
+/**
+ * This class is instantiated on the relevant classloader, so its methods invoke the correct target classes automatically
+ */
 public class Instance extends IsolatedExecutor implements IInvokableInstance
 {
     private Logger inInstancelogger; // Defer creation until running in the instance context
@@ -153,12 +171,23 @@
     private volatile boolean internodeMessagingStarted = false;
     private final AtomicLong startedAt = new AtomicLong();
 
-    // should never be invoked directly, so that it is instantiated on other class loader;
-    // only visible for inheritance
+    @Deprecated
     Instance(IInstanceConfig config, ClassLoader classLoader)
     {
-        super("node" + config.num(), classLoader);
+        this(config, classLoader, null);
+    }
+
+    Instance(IInstanceConfig config, ClassLoader classLoader, FileSystem fileSystem)
+    {
+        this(config, classLoader, fileSystem, null);
+    }
+
+    Instance(IInstanceConfig config, ClassLoader classLoader, FileSystem fileSystem, ShutdownExecutor shutdownExecutor)
+    {
+        super("node" + config.num(), classLoader, executorFactory().pooled("isolatedExecutor", Integer.MAX_VALUE), shutdownExecutor);
         this.config = config;
+        if (fileSystem != null)
+            File.unsafeSetFilesystem(fileSystem);
         Object clusterId = Objects.requireNonNull(config.get(Constants.KEY_DTEST_API_CLUSTER_ID), "cluster_id is not defined");
         ClusterIDDefiner.setId("cluster-" + clusterId);
         InstanceIDDefiner.setInstanceId(config.num());
@@ -173,7 +202,7 @@
 
         // Enable streaming inbound handler tracking so they can be closed properly without leaking
         // the blocking IO thread.
-        StreamingInboundHandler.trackInboundHandlers();
+        NettyStreamingChannel.trackInboundHandlers();
     }
 
     @Override
@@ -191,7 +220,13 @@
         String suite = System.getProperty("suitename", "suitename_IS_UNDEFINED");
         String clusterId = ClusterIDDefiner.getId();
         String instanceId = InstanceIDDefiner.getInstanceId();
-        return new FileLogAction(new File(String.format("build/test/logs/%s/%s/%s/%s/system.log", tag, suite, clusterId, instanceId)));
+        File f = new File(String.format("build/test/logs/%s/%s/%s/%s/system.log", tag, suite, clusterId, instanceId));
+        // when creating a cluster globally in a test class we get the logs without the suite, try finding those logs:
+        if (!f.exists())
+            f = new File(String.format("build/test/logs/%s/%s/%s/system.log", tag, clusterId, instanceId));
+        if (!f.exists())
+            throw new AssertionError("Unable to locate system.log under " + new File("build/test/logs").absolutePath() + "; make sure ICluster.setup() is called or extend TestBaseImpl and do not define a static beforeClass function with @BeforeClass");
+        return new FileLogAction(f);
     }
 
     @Override
@@ -217,12 +252,34 @@
     @Override
     public SimpleQueryResult executeInternalWithResult(String query, Object... args)
     {
-        return sync(() -> {
+        return sync(() -> unsafeExecuteInternalWithResult(query, args)).call();
+    }
+
+    public static SimpleQueryResult unsafeExecuteInternalWithResult(String query, Object ... args)
+    {
+        ClientWarn.instance.captureWarnings();
+        CoordinatorWarnings.init();
+        try
+        {
             QueryHandler.Prepared prepared = QueryProcessor.prepareInternal(query);
             ResultMessage result = prepared.statement.executeLocally(QueryProcessor.internalQueryState(),
                                                                      QueryProcessor.makeInternalOptions(prepared.statement, args));
+            CoordinatorWarnings.done();
+
+            if (result != null)
+                result.setWarnings(ClientWarn.instance.getWarnings());
             return RowUtil.toQueryResult(result);
-        }).call();
+        }
+        catch (Exception | Error e)
+        {
+            CoordinatorWarnings.done();
+            throw e;
+        }
+        finally
+        {
+            CoordinatorWarnings.reset();
+            ClientWarn.instance.resetWarnings();
+        }
     }
 
     @Override
@@ -265,7 +322,7 @@
         }).run();
     }
 
-    private void registerMockMessaging(ICluster cluster)
+    private void registerMockMessaging(ICluster<?> cluster)
     {
         MessagingService.instance().outboundSink.add((message, to) -> {
             if (!internodeMessagingStarted)
@@ -274,17 +331,16 @@
                                        message, to);
                 return false;
             }
-            InetSocketAddress toAddr = fromCassandraInetAddressAndPort(to);
-            IInstance toInstance = cluster.get(toAddr);
-            if (toInstance != null)
-                toInstance.receiveMessage(serializeMessage(message.from(), to, message));
+            cluster.deliverMessage(to, serializeMessage(message.from(), to, message));
             return false;
         });
     }
 
-    private void registerInboundFilter(ICluster cluster)
+    private void registerInboundFilter(ICluster<?> cluster)
     {
         MessagingService.instance().inboundSink.add(message -> {
+            if (!cluster.filters().hasInbound())
+                return true;
             if (isShutdown())
                 return false;
             IMessage serialized = serializeMessage(message.from(), toCassandraInetAddressAndPort(broadcastAddress()), message);
@@ -301,12 +357,12 @@
     {
         MessagingService.instance().outboundSink.add((message, to) -> {
             if (isShutdown())
-                return false;
+                return false; // TODO: Simulator needs this to trigger a failure
             IMessage serialzied = serializeMessage(message.from(), to, message);
             int fromNum = config.num(); // since this instance is sending the message, from will always be this instance
             IInstance toInstance = cluster.get(fromCassandraInetAddressAndPort(to));
             if (toInstance == null)
-                return false;
+                return false; // TODO: Simulator needs this to trigger a failure
             int toNum = toInstance.config().num();
             return cluster.filters().permitOutbound(fromNum, toNum, serialzied);
         });
@@ -314,10 +370,10 @@
 
     public void uncaughtException(Thread thread, Throwable throwable)
     {
-        sync(CassandraDaemon::uncaughtException).accept(thread, throwable);
+        sync(JVMStabilityInspector::uncaughtException).accept(thread, throwable);
     }
 
-    private static IMessage serializeMessage(InetAddressAndPort from, InetAddressAndPort to, Message<?> messageOut)
+    public static IMessage serializeMessage(InetAddressAndPort from, InetAddressAndPort to, Message<?> messageOut)
     {
         int fromVersion = MessagingService.instance().versions.get(from);
         int toVersion = MessagingService.instance().versions.get(to);
@@ -331,6 +387,7 @@
                                    ByteArrayUtil.EMPTY_BYTE_ARRAY,
                                    messageOut.id(),
                                    toVersion,
+                                   messageOut.expiresAtNanos(),
                                    fromCassandraInetAddressAndPort(from));
         }
 
@@ -353,7 +410,7 @@
                     {
                         reserialize(batch, out, toVersion);
                         byte[] bytes = out.toByteArray();
-                        return new MessageImpl(messageOut.verb().id, bytes, messageOut.id(), toVersion, fromCassandraInetAddressAndPort(from));
+                        return new MessageImpl(messageOut.verb().id, bytes, messageOut.id(), toVersion, messageOut.expiresAtNanos(), fromCassandraInetAddressAndPort(from));
                     }
                 }
             }
@@ -364,7 +421,7 @@
                 throw new AssertionError(String.format("Message serializedSize(%s) does not match what was written with serialize(out, %s) for verb %s and serializer %s; " +
                                                        "expected %s, actual %s", toVersion, toVersion, messageOut.verb(), Message.serializer.getClass(),
                                                        messageOut.serializedSize(toVersion), bytes.length));
-            return new MessageImpl(messageOut.verb().id, bytes, messageOut.id(), toVersion, fromCassandraInetAddressAndPort(from));
+            return new MessageImpl(messageOut.verb().id, bytes, messageOut.id(), toVersion, messageOut.expiresAtNanos(), fromCassandraInetAddressAndPort(from));
         }
         catch (IOException e)
         {
@@ -381,7 +438,7 @@
     {
         assert !batch.isLocal() : "attempted to reserialize a 'local' batch";
 
-        UUIDSerializer.serializer.serialize(batch.id, out, version);
+        batch.id.serialize(out);
         out.writeLong(batch.creationTime);
 
         out.writeUnsignedVInt(batch.getEncodedMutations().size());
@@ -408,32 +465,52 @@
     @Override
     public void receiveMessage(IMessage message)
     {
-        sync(() -> receiveMessageWithInvokingThread(message)).run();
+        sync(receiveMessageRunnable(message)).accept(false);
     }
 
     @Override
     public void receiveMessageWithInvokingThread(IMessage message)
     {
-        if (!internodeMessagingStarted)
-        {
-            inInstancelogger.debug("Dropping inbound message {} to {} as internode messaging has not been started yet",
-                                   message, config().broadcastAddress());
-            return;
-        }
-        if (message.version() > MessagingService.current_version)
-        {
-            throw new IllegalStateException(String.format("Node%d received message version %d but current version is %d",
-                                                          this.config.num(),
-                                                          message.version(),
-                                                          MessagingService.current_version));
-        }
+        if (classLoader != Thread.currentThread().getContextClassLoader())
+            throw new IllegalStateException("Must be invoked by a Thread utilising the node's class loader");
+        receiveMessageRunnable(message).accept(true);
+    }
 
-        Message<?> messageIn = deserializeMessage(message);
-        Message.Header header = messageIn.header;
-        TraceState state = Tracing.instance.initializeFromMessage(header);
-        if (state != null) state.trace("{} message received from {}", header.verb, header.from);
-        header.verb.stage.execute(() -> MessagingService.instance().inboundSink.accept(messageIn),
-                                  ExecutorLocals.create(state));
+    private SerializableConsumer<Boolean> receiveMessageRunnable(IMessage message)
+    {
+        return runOnCaller -> {
+            if (!internodeMessagingStarted)
+            {
+                inInstancelogger.debug("Dropping inbound message {} to {} as internode messaging has not been started yet",
+                             message, config().broadcastAddress());
+                return;
+            }
+            if (message.version() > MessagingService.current_version)
+            {
+                throw new IllegalStateException(String.format("Node%d received message version %d but current version is %d",
+                                                              this.config.num(),
+                                                              message.version(),
+                                                              MessagingService.current_version));
+            }
+
+            Message<?> messageIn = deserializeMessage(message);
+            Message.Header header = messageIn.header;
+            TraceState state = Tracing.instance.initializeFromMessage(header);
+            if (state != null)
+                state.trace("{} message received from {}", header.verb, header.from);
+
+            if (runOnCaller)
+            {
+                try (Closeable close = ExecutorLocals.create(state))
+                {
+                    MessagingService.instance().inboundSink.accept(messageIn);
+                }
+            }
+            else
+            {
+                header.verb.stage.executor().execute(ExecutorLocals.create(state), () -> MessagingService.instance().inboundSink.accept(messageIn));
+            }
+        };
     }
 
     public int getMessagingVersion()
@@ -451,43 +528,53 @@
             MessagingService.instance().versions.set(toCassandraInetAddressAndPort(endpoint), version);
         else
             inInstancelogger.warn("Skipped setting messaging version for {} to {} as daemon not initialized yet. Stacktrace attached for debugging.",
-                                  endpoint, version, new RuntimeException());
+                        endpoint, version, new RuntimeException());
     }
 
     @Override
     public String getReleaseVersionString()
     {
-        return callsOnInstance(() -> FBUtilities.getReleaseVersionString()).call();
+        return FBUtilities.getReleaseVersionString();
     }
 
     public void flush(String keyspace)
     {
-        runOnInstance(() -> FBUtilities.waitOnFutures(Keyspace.open(keyspace).flush()));
+        Util.flushKeyspace(keyspace);
     }
 
     public void forceCompact(String keyspace, String table)
     {
-        runOnInstance(() -> {
-            try
-            {
-                Keyspace.open(keyspace).getColumnFamilyStore(table).forceMajorCompaction();
-            }
-            catch (Exception e)
-            {
-                throw new RuntimeException(e);
-            }
-        });
+        try
+        {
+            Keyspace.open(keyspace).getColumnFamilyStore(table).forceMajorCompaction();
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public ExecutorPlus executorFor(int verbId)
+    {
+        return Verb.fromId(verbId).stage.executor();
     }
 
     @Override
     public void startup(ICluster cluster)
     {
+        // Defer initialisation of Clock.Global until cluster/instance identifiers are set.
+        // Otherwise, the instance classloader's logging classes are setup ahead of time and
+        // the patterns/file paths are not set correctly. This will be addressed in a subsequent
+        // commit to extend the functionality of the @Shared annotation to app classes.
         assert startedAt.compareAndSet(0L, System.nanoTime()) : "startedAt uninitialized";
 
         sync(() -> {
             inInstancelogger = LoggerFactory.getLogger(Instance.class);
             try
             {
+                // org.apache.cassandra.distributed.impl.AbstractCluster.startup sets the exception handler for the thread
+                // so extract it to populate ExecutorFactory.Global
+                ExecutorFactory.Global.tryUnsafeSet(new ExecutorFactory.Default(Thread.currentThread().getContextClassLoader(), null, Thread.getDefaultUncaughtExceptionHandler()));
                 if (config.has(GOSSIP))
                 {
                     // TODO: hacky
@@ -513,7 +600,7 @@
                 // We need to persist this as soon as possible after startup checks.
                 // This should be the first write to SystemKeyspace (CASSANDRA-11742)
                 SystemKeyspace.persistLocalMetadata(config::hostId);
-                SystemKeyspaceMigrator40.migrate();
+                SystemKeyspaceMigrator41.migrate();
 
                 // Same order to populate tokenMetadata for the first time,
                 // see org.apache.cassandra.service.CassandraDaemon.setup
@@ -547,6 +634,15 @@
                 // Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
                 StorageService.instance.populateTokenMetadata();
 
+                try
+                {
+                    PaxosState.maybeRebuildUncommittedState();
+                }
+                catch (IOException e)
+                {
+                    throw new RuntimeException(e);
+                }
+
                 Verb.HINT_REQ.unsafeSetSerializer(DTestSerializer::new);
 
                 if (config.has(NETWORK))
@@ -567,38 +663,51 @@
                     propagateMessagingVersions(cluster); // fake messaging needs to know messaging version for filters
                 }
                 internodeMessagingStarted = true;
-
-                JVMStabilityInspector.replaceKiller(new InstanceKiller());
+                JVMStabilityInspector.replaceKiller(new InstanceKiller(Instance.this::shutdown));
 
                 // TODO: this is more than just gossip
                 StorageService.instance.registerDaemon(CassandraDaemon.getInstanceForTesting());
                 if (config.has(GOSSIP))
                 {
-                    MigrationCoordinator.setUptimeFn(() -> TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startedAt.get()));
-                    StorageService.instance.initServer();
+                    MigrationCoordinator.setUptimeFn(() -> TimeUnit.NANOSECONDS.toMillis(nanoTime() - startedAt.get()));
+                    try
+                    {
+                        StorageService.instance.initServer();
+                    }
+                    catch (Exception e)
+                    {
+                        // I am tired of looking up my notes for how to fix this... so why not tell the user?
+                        Throwable cause = com.google.common.base.Throwables.getRootCause(e);
+                        if (cause instanceof BindException && "Can't assign requested address".equals(cause.getMessage()))
+                            throw new RuntimeException("Unable to bind, run the following in a termanl and try again:\nfor subnet in $(seq 0 5); do for id in $(seq 0 5); do sudo ifconfig lo0 alias \"127.0.$subnet.$id\"; done; done;", e);
+                        throw e;
+                    }
                     StorageService.instance.removeShutdownHook();
+
                     Gossiper.waitToSettle();
                 }
                 else
                 {
-                    cluster.stream().forEach(peer -> {
-                        if (cluster instanceof Cluster)
-                            GossipHelper.statusToNormal((IInvokableInstance) peer).accept(this);
-                        else
-                            GossipHelper.unsafeStatusToNormal(this, (IInstance) peer);
-                    });
+                    Schema.instance.startSync();
+                    Stream peers = cluster.stream().filter(instance -> ((IInstance) instance).isValid());
+                    SystemKeyspace.setLocalHostId(config.hostId());
+                    if (config.has(BLANK_GOSSIP))
+                        peers.forEach(peer -> GossipHelper.statusToBlank((IInvokableInstance) peer).accept(this));
+                    else if (cluster instanceof Cluster)
+                        peers.forEach(peer -> GossipHelper.statusToNormal((IInvokableInstance) peer).accept(this));
+                    else
+                        peers.forEach(peer -> GossipHelper.unsafeStatusToNormal(this, (IInstance) peer));
 
                     StorageService.instance.setUpDistributedSystemKeyspaces();
                     StorageService.instance.setNormalModeUnsafe();
+                    Gossiper.instance.register(StorageService.instance);
+                    StorageService.instance.startSnapshotManager();
                 }
 
                 // Populate tokenMetadata for the second time,
                 // see org.apache.cassandra.service.CassandraDaemon.setup
                 StorageService.instance.populateTokenMetadata();
 
-                SystemKeyspace.finishStartup();
-
-                StorageService.instance.doAuthSetup(false);
                 CassandraDaemon.getInstanceForTesting().completeSetup();
 
                 if (config.has(NATIVE_PROTOCOL))
@@ -607,11 +716,16 @@
                     CassandraDaemon.getInstanceForTesting().start();
                 }
 
-                if (!FBUtilities.getBroadcastAddressAndPort().address.equals(broadcastAddress().getAddress()) ||
-                    FBUtilities.getBroadcastAddressAndPort().port != broadcastAddress().getPort())
+                if (!FBUtilities.getBroadcastAddressAndPort().getAddress().equals(broadcastAddress().getAddress()) ||
+                    FBUtilities.getBroadcastAddressAndPort().getPort() != broadcastAddress().getPort())
                     throw new IllegalStateException(String.format("%s != %s", FBUtilities.getBroadcastAddressAndPort(), broadcastAddress()));
 
                 ActiveRepairService.instance.start();
+                StreamManager.instance.start();
+
+                PaxosState.startAutoRepairs();
+
+                CassandraDaemon.getInstanceForTesting().completeSetup();
             }
             catch (Throwable t)
             {
@@ -651,101 +765,30 @@
         });
     }
 
+    @Override
+    public void postStartup()
+    {
+        StorageService.instance.doAuthSetup(false);
+    }
+
     private void mkdirs()
     {
-        new File(config.getString("saved_caches_directory")).mkdirs();
-        new File(config.getString("hints_directory")).mkdirs();
-        new File(config.getString("commitlog_directory")).mkdirs();
+        new File(config.getString("saved_caches_directory")).tryCreateDirectories();
+        new File(config.getString("hints_directory")).tryCreateDirectories();
+        new File(config.getString("commitlog_directory")).tryCreateDirectories();
         for (String dir : (String[]) config.get("data_file_directories"))
-            new File(dir).mkdirs();
+            new File(dir).tryCreateDirectories();
     }
 
     private Config loadConfig(IInstanceConfig overrides)
     {
-        Map<String,Object> params = overrides.getParams();
+        Map<String, Object> params = overrides.getParams();
         boolean check = true;
         if (overrides.get(Constants.KEY_DTEST_API_CONFIG_CHECK) != null)
             check = (boolean) overrides.get(Constants.KEY_DTEST_API_CONFIG_CHECK);
         return YamlConfigurationLoader.fromMap(params, check, Config.class);
     }
 
-    public static void addToRing(boolean bootstrapping, IInstance peer)
-    {
-        try
-        {
-            IInstanceConfig config = peer.config();
-            IPartitioner partitioner = FBUtilities.newPartitioner(config.getString("partitioner"));
-            Token token = partitioner.getTokenFactory().fromString(config.getString("initial_token"));
-            InetAddressAndPort addressAndPort = toCassandraInetAddressAndPort(peer.broadcastAddress());
-
-            UUID hostId = config.hostId();
-            Gossiper.runInGossipStageBlocking(() -> {
-                Gossiper.instance.initializeNodeUnsafe(addressAndPort, hostId, 1);
-                Gossiper.instance.injectApplicationState(addressAndPort,
-                                                         ApplicationState.TOKENS,
-                                                         new VersionedValue.VersionedValueFactory(partitioner).tokens(Collections.singleton(token)));
-                StorageService.instance.onChange(addressAndPort,
-                                                 ApplicationState.STATUS,
-                                                 bootstrapping
-                                                 ? new VersionedValue.VersionedValueFactory(partitioner).bootstrapping(Collections.singleton(token))
-                                                 : new VersionedValue.VersionedValueFactory(partitioner).normal(Collections.singleton(token)));
-                Gossiper.instance.realMarkAlive(addressAndPort, Gossiper.instance.getEndpointStateForEndpoint(addressAndPort));
-            });
-            int messagingVersion = peer.isShutdown()
-                    ? MessagingService.current_version
-                    : Math.min(MessagingService.current_version, peer.getMessagingVersion());
-            MessagingService.instance().versions.set(addressAndPort, messagingVersion);
-
-            assert bootstrapping || StorageService.instance.getTokenMetadata().isMember(addressAndPort);
-            PendingRangeCalculatorService.instance.blockUntilFinished();
-        }
-        catch (Throwable e) // UnknownHostException
-        {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public static void removeFromRing(IInstance peer)
-    {
-        try
-        {
-            IInstanceConfig config = peer.config();
-            IPartitioner partitioner = FBUtilities.newPartitioner(config.getString("partitioner"));
-            Token token = partitioner.getTokenFactory().fromString(config.getString("initial_token"));
-            InetAddressAndPort addressAndPort = toCassandraInetAddressAndPort(peer.broadcastAddress());
-
-            Gossiper.runInGossipStageBlocking(() -> {
-                StorageService.instance.onChange(addressAndPort,
-                        ApplicationState.STATUS,
-                        new VersionedValue.VersionedValueFactory(partitioner).left(Collections.singleton(token), 0L));
-            });
-        }
-        catch (Throwable e) // UnknownHostException
-        {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public static void addToRingNormal(IInstance peer)
-    {
-        addToRing(false, peer);
-        assert StorageService.instance.getTokenMetadata().isMember(toCassandraInetAddressAndPort(peer.broadcastAddress()));
-    }
-
-    public static void addToRingBootstrapping(IInstance peer)
-    {
-        addToRing(true, peer);
-    }
-
-    private static void initializeRing(ICluster cluster)
-    {
-        for (int i = 1 ; i <= cluster.size() ; ++i)
-            addToRing(false, cluster.get(i));
-
-        for (int i = 1; i <= cluster.size(); ++i)
-            assert StorageService.instance.getTokenMetadata().isMember(toCassandraInetAddressAndPort(cluster.get(i).broadcastAddress()));
-    }
-
     public Future<Void> shutdown()
     {
         return shutdown(true);
@@ -754,9 +797,7 @@
     @Override
     public Future<Void> shutdown(boolean graceful)
     {
-        if (!graceful)
-            MessagingService.instance().shutdown(1L, MINUTES, false, true);
-
+        inInstancelogger.info("Shutting down instance {} / {}", config.num(), config.broadcastAddress().getHostString());
         Future<?> future = async((ExecutorService executor) -> {
             Throwable error = null;
 
@@ -776,34 +817,54 @@
                 Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
             }
 
+            // trigger init early or else it could try to init and touch a thread pool that got shutdown
+            HintsService hints = HintsService.instance;
+            ThrowingRunnable shutdownHints = () -> {
+                // this is to allow shutdown in the case hints were halted already
+                try
+                {
+                    HintsService.instance.shutdownBlocking();
+                }
+                catch (IllegalStateException e)
+                {
+                    if (!"HintsService has already been shut down".equals(e.getMessage()))
+                        throw e;
+                }
+            };
             error = parallelRun(error, executor,
                                 () -> Gossiper.instance.stopShutdownAndWait(1L, MINUTES),
                                 CompactionManager.instance::forceShutdown,
                                 () -> BatchlogManager.instance.shutdownAndWait(1L, MINUTES),
-                                HintsService.instance::shutdownBlocking,
-                                StreamingInboundHandler::shutdown,
+                                shutdownHints,
+                                () -> CompactionLogger.shutdownNowAndWait(1L, MINUTES),
+                                () -> AuthCache.shutdownAllAndWait(1L, MINUTES),
+                                () -> Sampler.shutdownNowAndWait(1L, MINUTES),
+                                NettyStreamingChannel::shutdown,
                                 () -> StreamReceiveTask.shutdownAndWait(1L, MINUTES),
                                 () -> StreamTransferTask.shutdownAndWait(1L, MINUTES),
+                                () -> StreamManager.instance.stop(),
                                 () -> SecondaryIndexManager.shutdownAndWait(1L, MINUTES),
                                 () -> IndexSummaryManager.instance.shutdownAndWait(1L, MINUTES),
                                 () -> ColumnFamilyStore.shutdownExecutorsAndWait(1L, MINUTES),
-                                () -> PendingRangeCalculatorService.instance.shutdownAndWait(1L, MINUTES),
                                 () -> BufferPools.shutdownLocalCleaner(1L, MINUTES),
+                                () -> PaxosRepair.shutdownAndWait(1L, MINUTES),
                                 () -> Ref.shutdownReferenceReaper(1L, MINUTES),
-                                () -> Memtable.MEMORY_POOL.shutdownAndWait(1L, MINUTES),
+                                () -> UncommittedTableData.shutdownAndWait(1L, MINUTES),
+                                () -> AbstractAllocatorMemtable.MEMORY_POOL.shutdownAndWait(1L, MINUTES),
                                 () -> DiagnosticSnapshotService.instance.shutdownAndWait(1L, MINUTES),
                                 () -> SSTableReader.shutdownBlocking(1L, MINUTES),
-                                () -> shutdownAndWait(Collections.singletonList(ActiveRepairService.repairCommandExecutor()))
+                                () -> shutdownAndWait(Collections.singletonList(ActiveRepairService.repairCommandExecutor())),
+                                () -> ActiveRepairService.instance.shutdownNowAndWait(1L, MINUTES),
+                                () -> SnapshotManager.shutdownAndWait(1L, MINUTES)
             );
 
             internodeMessagingStarted = false;
             error = parallelRun(error, executor,
                                 // can only shutdown message once, so if the test shutsdown an instance, then ignore the failure
-                                (IgnoreThrowingRunnable) () -> MessagingService.instance().shutdown(1L, MINUTES, false, true)
+                                (IgnoreThrowingRunnable) () -> MessagingService.instance().shutdown(1L, MINUTES, false, config.has(NETWORK))
             );
-
             error = parallelRun(error, executor,
-                                () -> GlobalEventExecutor.INSTANCE.awaitInactivity(1L, MINUTES),
+                                () -> { if (config.has(NETWORK)) { try { GlobalEventExecutor.INSTANCE.awaitInactivity(1L, MINUTES); } catch (IllegalStateException ignore) {} } },
                                 () -> Stage.shutdownAndWait(1L, MINUTES),
                                 () -> SharedExecutorPool.SHARED.shutdownAndWait(1L, MINUTES)
             );
@@ -811,17 +872,40 @@
             // CommitLog must shut down after Stage, or threads from the latter may attempt to use the former.
             // (ex. A Mutation stage thread may attempt to add a mutation to the CommitLog.)
             error = parallelRun(error, executor, CommitLog.instance::shutdownBlocking);
-            error = parallelRun(error, executor, () -> shutdownAndWait(Collections.singletonList(JMXBroadcastExecutor.executor)));
-            
+            error = parallelRun(error, executor,
+                                () -> PendingRangeCalculatorService.instance.shutdownAndWait(1L, MINUTES),
+                                () -> shutdownAndWait(Collections.singletonList(JMXBroadcastExecutor.executor))
+            );
+
             // ScheduledExecutors shuts down after MessagingService, as MessagingService may issue tasks to it.
-            error = parallelRun(error, executor, () -> ScheduledExecutors.shutdownAndWait(1L, MINUTES));
+            error = parallelRun(error, executor, () -> ScheduledExecutors.shutdownNowAndWait(1L, MINUTES));
+
+            // Make sure any shutdown hooks registered for DeleteOnExit are released to prevent
+            // references to the instance class loaders from being held
+            if (graceful)
+            {
+                PathUtils.runOnExitThreadsAndClear();
+            }
+            else
+            {
+                PathUtils.clearOnExitThreads();
+            }
 
             Throwables.maybeFail(error);
         }).apply(isolatedExecutor);
 
-        return CompletableFuture.runAsync(ThrowingRunnable.toRunnable(future::get), isolatedExecutor)
-                                .thenRun(super::shutdown)
-                                .thenRun(() -> startedAt.set(0L));
+        return isolatedExecutor.submit(() -> {
+            try
+            {
+                future.get();
+                return null;
+            }
+            finally
+            {
+                super.shutdown();
+                startedAt.set(0L);
+            }
+        });
     }
 
     @Override
@@ -840,7 +924,7 @@
     @Override
     public Metrics metrics()
     {
-        return callOnInstance(() -> new InstanceMetrics(CassandraMetricsRegistry.Metrics));
+        return new InstanceMetrics(CassandraMetricsRegistry.Metrics);
     }
 
     @Override
@@ -1006,8 +1090,7 @@
 
     private static void shutdownAndWait(List<ExecutorService> executors) throws TimeoutException, InterruptedException
     {
-        ExecutorUtils.shutdownNow(executors);
-        ExecutorUtils.awaitTermination(1L, MINUTES, executors);
+        ExecutorUtils.shutdownNowAndWait(1L, MINUTES, executors);
     }
 
     private static Throwable parallelRun(Throwable accumulate, ExecutorService runOn, ThrowingRunnable ... runnables)
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java
index 037c221..92c56d6 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java
@@ -18,34 +18,28 @@
 
 package org.apache.cassandra.distributed.impl;
 
-import java.io.File;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
+import java.nio.file.Path;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.function.Function;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import java.util.stream.Collectors;
 
 import com.vdurmont.semver4j.Semver;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IInstanceConfig;
 import org.apache.cassandra.distributed.shared.NetworkTopology;
-import org.apache.cassandra.distributed.shared.Shared;
 import org.apache.cassandra.distributed.upgrade.UpgradeTestBase;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.SimpleSeedProvider;
 
-@Shared
 public class InstanceConfig implements IInstanceConfig
 {
-    private static final Object NULL = new Object();
-    private static final Logger logger = LoggerFactory.getLogger(InstanceConfig.class);
-
     public final int num;
     public int num() { return num; }
 
@@ -75,14 +69,16 @@
                            String commitlog_directory,
                            String hints_directory,
                            String cdc_raw_directory,
-                           String initial_token,
+                           Collection<String> initial_token,
                            int storage_port,
                            int native_transport_port)
     {
         this.num = num;
         this.networkTopology = networkTopology;
-        this.hostId = java.util.UUID.randomUUID();
-        this    .set("num_tokens", 1)
+        this.hostId = new UUID(0x4000L, (1L << 63) | num); // deterministic hostId for simulator
+        //TODO move away from magic strings in favor of constants
+        this    .set("num_tokens", initial_token.size())
+                .set("initial_token", initial_token.stream().collect(Collectors.joining(",")))
                 .set("broadcast_address", broadcast_address)
                 .set("listen_address", listen_address)
                 .set("broadcast_rpc_address", broadcast_rpc_address)
@@ -92,7 +88,6 @@
                 .set("commitlog_directory", commitlog_directory)
                 .set("hints_directory", hints_directory)
                 .set("cdc_raw_directory", cdc_raw_directory)
-                .set("initial_token", initial_token)
                 .set("partitioner", "org.apache.cassandra.dht.Murmur3Partitioner")
                 .set("start_native_transport", true)
                 .set("concurrent_writes", 2)
@@ -101,7 +96,7 @@
                 .set("concurrent_reads", 2)
                 .set("memtable_flush_writers", 1)
                 .set("concurrent_compactors", 1)
-                .set("memtable_heap_space_in_mb", 10)
+                .set("memtable_heap_space", "10MiB")
                 .set("commitlog_sync", "batch")
                 .set("storage_port", storage_port)
                 .set("native_transport_port", native_transport_port)
@@ -112,11 +107,11 @@
                 .set("diagnostic_events_enabled", true)
                 .set("auto_bootstrap", false)
                 // capacities that are based on `totalMemory` that should be fixed size
-                .set("index_summary_capacity_in_mb", 50l)
-                .set("counter_cache_size_in_mb", 50l)
-                .set("key_cache_size_in_mb", 50l)
+                .set("index_summary_capacity", "50MiB")
+                .set("counter_cache_size", "50MiB")
+                .set("key_cache_size", "50MiB")
                 // legacy parameters
-                .forceSet("commitlog_sync_batch_window_in_ms", 1.0);
+                .forceSet("commitlog_sync_batch_window_in_ms", "1");
         this.featureFlags = EnumSet.noneOf(Feature.class);
     }
 
@@ -131,7 +126,6 @@
         this.broadcastAddressAndPort = copy.broadcastAddressAndPort;
     }
 
-
     @Override
     public InetSocketAddress broadcastAddress()
     {
@@ -194,18 +188,18 @@
 
     public InstanceConfig set(String fieldName, Object value)
     {
-        if (value == null)
-            value = NULL;
         getParams(fieldName).put(fieldName, value);
         return this;
     }
 
+    public InstanceConfig remove(String fieldName)
+    {
+        getParams(fieldName).remove(fieldName);
+        return this;
+    }
+
     public InstanceConfig forceSet(String fieldName, Object value)
     {
-        if (value == null)
-            value = NULL;
-
-        // test value
         getParams(fieldName).put(fieldName, value);
         return this;
     }
@@ -223,10 +217,12 @@
         throw new IllegalStateException("In-JVM dtests no longer support propagate");
     }
 
+    @Override
     public void validate()
     {
-        if (((int) get("num_tokens")) > 1)
-            throw new IllegalArgumentException("In-JVM dtests do not support vnodes as of now.");
+        // Previous logic would validate vnode was not used, but with vnode support added that validation isn't needed.
+        // Rather than attempting validating the configs here, its best to leave that to the instance; this method
+        // is no longer really needed, but can not be removed due to backwards compatability.
     }
 
     public Object get(String name)
@@ -252,8 +248,8 @@
     public static InstanceConfig generate(int nodeNum,
                                           INodeProvisionStrategy provisionStrategy,
                                           NetworkTopology networkTopology,
-                                          File root,
-                                          String token,
+                                          Path root,
+                                          Collection<String> tokens,
                                           int datadirCount)
     {
         return new InstanceConfig(nodeNum,
@@ -269,14 +265,14 @@
                                   String.format("%s/node%d/commitlog", root, nodeNum),
                                   String.format("%s/node%d/hints", root, nodeNum),
                                   String.format("%s/node%d/cdc", root, nodeNum),
-                                  token,
+                                  tokens,
                                   provisionStrategy.storagePort(nodeNum),
                                   provisionStrategy.nativeTransportPort(nodeNum));
     }
 
-    private static String[] datadirs(int datadirCount, File root, int nodeNum)
+    private static String[] datadirs(int datadirCount, Path root, int nodeNum)
     {
-        String datadirFormat = String.format("%s/node%d/data%%d", root.getPath(), nodeNum);
+        String datadirFormat = String.format("%s/node%d/data%%d", root, nodeNum);
         String [] datadirs = new String[datadirCount];
         for (int i = 0; i < datadirs.length; i++)
             datadirs[i] = String.format(datadirFormat, i);
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/InstanceKiller.java b/test/distributed/org/apache/cassandra/distributed/impl/InstanceKiller.java
index e7ca49b..38b045b 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/InstanceKiller.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/InstanceKiller.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.distributed.impl;
 
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
 
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
@@ -26,6 +27,13 @@
 {
     private static final AtomicLong KILL_ATTEMPTS = new AtomicLong(0);
 
+    private final Consumer<Boolean> onKill;
+
+    public InstanceKiller(Consumer<Boolean> onKill)
+    {
+        this.onKill = onKill != null ? onKill : ignore -> {};
+    }
+
     public static long getKillAttempts()
     {
         return KILL_ATTEMPTS.get();
@@ -40,6 +48,7 @@
     protected void killCurrentJVM(Throwable t, boolean quiet)
     {
         KILL_ATTEMPTS.incrementAndGet();
+        onKill.accept(quiet);
         // the bad part is that System.exit kills the JVM, so all code which calls kill won't hit the
         // next line; yet in in-JVM dtests System.exit is not desirable, so need to rely on a runtime exception
         // as a means to try to stop execution
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/InstanceMetrics.java b/test/distributed/org/apache/cassandra/distributed/impl/InstanceMetrics.java
index 939691d..7bedce1 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/InstanceMetrics.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/InstanceMetrics.java
@@ -24,10 +24,11 @@
 import java.util.Map;
 import java.util.function.Predicate;
 
-import com.codahale.metrics.Counter;
+import com.codahale.metrics.Counting;
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Histogram;
 import com.codahale.metrics.Meter;
+import com.codahale.metrics.Metric;
 import com.codahale.metrics.Snapshot;
 import com.codahale.metrics.Timer;
 import org.apache.cassandra.distributed.shared.Metrics;
@@ -45,33 +46,43 @@
         this.metricsRegistry = metricsRegistry;
     }
 
+    @Override
     public List<String> getNames()
     {
         return new ArrayList<>(metricsRegistry.getNames());
     }
 
+    @Override
     public long getCounter(String name)
     {
-        return metricsRegistry.getCounters().get(name).getCount();
+        Metric metric = metricsRegistry.getMetrics().get(name);
+        if (metric instanceof Counting)
+            return ((Counting) metric).getCount();
+        // If the metric is not found or does not expose a getCount method
+        return 0;
     }
 
+    @Override
     public Map<String, Long> getCounters(Predicate<String> filter)
     {
         Map<String, Long> values = new HashMap<>();
-        for (Map.Entry<String, Counter> e : metricsRegistry.getCounters().entrySet())
+        for (Map.Entry<String, Metric> e : metricsRegistry.getMetrics().entrySet())
         {
-            if (filter.test(e.getKey()))
-                values.put(e.getKey(), e.getValue().getCount());
+            Metric metric = e.getValue();
+            if (metric instanceof Counting && filter.test(e.getKey()))
+                values.put(e.getKey(), ((Counting) metric).getCount());
         }
         return values;
     }
 
+    @Override
     public double getHistogram(String name, MetricValue value)
     {
         Histogram histogram = metricsRegistry.getHistograms().get(name);
         return getValue(histogram, value);
     }
 
+    @Override
     public Map<String, Double> getHistograms(Predicate<String> filter, MetricValue value)
     {
         Map<String, Double> values = new HashMap<>();
@@ -83,11 +94,13 @@
         return values;
     }
 
+    @Override
     public Object getGauge(String name)
     {
         return metricsRegistry.getGauges().get(name).getValue();
     }
 
+    @Override
     public Map<String, Object> getGauges(Predicate<String> filter)
     {
         Map<String, Object> values = new HashMap<>();
@@ -99,11 +112,13 @@
         return values;
     }
 
+    @Override
     public double getMeter(String name, Rate value)
     {
         return getRate(metricsRegistry.getMeters().get(name), value);
     }
 
+    @Override
     public Map<String, Double> getMeters(Predicate<String> filter, Rate rate)
     {
         Map<String, Double> values = new HashMap<>();
@@ -115,11 +130,13 @@
         return values;
     }
 
+    @Override
     public double getTimer(String name, MetricValue value)
     {
         return getValue(metricsRegistry.getTimers().get(name).getSnapshot(), value);
     }
 
+    @Override
     public Map<String, Double> getTimers(Predicate<String> filter, MetricValue value)
     {
         Map<String, Double> values = new HashMap<>();
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/IsolatedExecutor.java b/test/distributed/org/apache/cassandra/distributed/impl/IsolatedExecutor.java
index dd52b5d..68ff1e7 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/IsolatedExecutor.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/IsolatedExecutor.java
@@ -28,8 +28,8 @@
 import java.lang.reflect.Method;
 import java.net.URLClassLoader;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
@@ -44,61 +44,102 @@
 import org.slf4j.LoggerFactory;
 
 import ch.qos.logback.classic.LoggerContext;
-import org.apache.cassandra.concurrent.NamedThreadFactory;
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.concurrent.ExecutorFactory;
 import org.apache.cassandra.distributed.api.IIsolatedExecutor;
 import org.apache.cassandra.utils.ExecutorUtils;
 import org.apache.cassandra.utils.Throwables;
 
+import static java.util.concurrent.TimeUnit.SECONDS;
+
 public class IsolatedExecutor implements IIsolatedExecutor
 {
     final ExecutorService isolatedExecutor;
     private final String name;
-    private final ClassLoader classLoader;
-    private final Method deserializeOnInstance;
+    final ClassLoader classLoader;
+    private final DynamicFunction<Serializable> transfer;
+    private final ShutdownExecutor shutdownExecutor;
 
-    IsolatedExecutor(String name, ClassLoader classLoader)
-    {
-        this.name = name;
-        this.isolatedExecutor = Executors.newCachedThreadPool(new NamedThreadFactory("isolatedExecutor", Thread.NORM_PRIORITY, classLoader, new ThreadGroup(name)));
-        this.classLoader = classLoader;
-        this.deserializeOnInstance = lookupDeserializeOneObject(classLoader);
-    }
-
-    public Future<Void> shutdown()
-    {
-        isolatedExecutor.shutdownNow();
-
+    public static final ShutdownExecutor DEFAULT_SHUTDOWN_EXECUTOR = (name, classLoader, shuttingDown, onTermination) -> {
         /* Use a thread pool with a core pool size of zero to terminate the thread as soon as possible
-        ** so the instance class loader can be garbage collected.  Uses a custom thread factory
-        ** rather than NamedThreadFactory to avoid calling FastThreadLocal.removeAll() in 3.0 and up
-        ** as it was observed crashing during test failures and made it harder to find the real cause.
-        */
+         ** so the instance class loader can be garbage collected.  Uses a custom thread factory
+         ** rather than NamedThreadFactory to avoid calling FastThreadLocal.removeAll() in 3.0 and up
+         ** as it was observed crashing during test failures and made it harder to find the real cause.
+         */
         ThreadFactory threadFactory = (Runnable r) -> {
             Thread t = new Thread(r, name + "_shutdown");
             t.setDaemon(true);
             return t;
         };
-        ExecutorService shutdownExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 0, TimeUnit.SECONDS,
+
+        ExecutorService shutdownExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 0, SECONDS,
                                                                   new LinkedBlockingQueue<>(), threadFactory);
         return shutdownExecutor.submit(() -> {
             try
             {
-                ExecutorUtils.awaitTermination(60, TimeUnit.SECONDS, isolatedExecutor);
-
-                // Shutdown logging last - this is not ideal as the logging subsystem is initialized
-                // outsize of this class, however doing it this way provides access to the full
-                // logging system while termination is taking place.
-                LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
-                loggerContext.stop();
-
-                // Close the instance class loader after shutting down the isolatedExecutor and logging
-                // in case error handling triggers loading additional classes
-                ((URLClassLoader) classLoader).close();
+                ExecutorUtils.awaitTermination(60, TimeUnit.SECONDS, shuttingDown);
+                return onTermination.call();
             }
             finally
             {
                 shutdownExecutor.shutdownNow();
             }
+        });
+    };
+
+    // retained for backwards compatibility
+    @SuppressWarnings("unused")
+    public IsolatedExecutor(String name, ClassLoader classLoader, ExecutorFactory executorFactory)
+    {
+        this(name, classLoader, executorFactory.pooled("isolatedExecutor", Integer.MAX_VALUE), DEFAULT_SHUTDOWN_EXECUTOR);
+    }
+
+    // retained for backwards compatibility
+    @SuppressWarnings("unused")
+    public IsolatedExecutor(String name, ClassLoader classLoader, ExecutorService executorService)
+    {
+        this(name, classLoader, executorService, DEFAULT_SHUTDOWN_EXECUTOR);
+    }
+
+    IsolatedExecutor(String name, ClassLoader classLoader, ExecutorService executorService, ShutdownExecutor shutdownExecutor)
+    {
+        this.name = name;
+        this.isolatedExecutor = executorService;
+        this.classLoader = classLoader;
+        this.transfer = transferTo(classLoader);
+        this.shutdownExecutor = shutdownExecutor;
+    }
+
+    protected IsolatedExecutor(IsolatedExecutor from, ExecutorService executor)
+    {
+        this.name = from.name;
+        this.isolatedExecutor = executor;
+        this.classLoader = from.classLoader;
+        this.transfer = from.transfer;
+        this.shutdownExecutor = from.shutdownExecutor;
+    }
+
+    public IIsolatedExecutor with(ExecutorService executor)
+    {
+        return new IsolatedExecutor(this, executor);
+    }
+
+    public Future<Void> shutdown()
+    {
+        isolatedExecutor.shutdownNow();
+        return shutdownExecutor.shutdown(name, classLoader, isolatedExecutor, () -> {
+
+            // Shutdown logging last - this is not ideal as the logging subsystem is initialized
+            // outsize of this class, however doing it this way provides access to the full
+            // logging system while termination is taking place.
+            LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
+            loggerContext.stop();
+
+            FastThreadLocal.destroy();
+
+            // Close the instance class loader after shutting down the isolatedExecutor and logging
+            // in case error handling triggers loading additional classes
+            ((URLClassLoader) classLoader).close();
             return null;
         });
     }
@@ -136,33 +177,57 @@
     public <I1, I2, I3, I4, I5, O> QuintFunction<I1, I2, I3, I4, I5, Future<O>> async(QuintFunction<I1, I2, I3, I4, I5, O> f) { return (a, b, c, d, e) -> isolatedExecutor.submit(() -> f.apply(a, b, c, d, e)); }
     public <I1, I2, I3, I4, I5, O> QuintFunction<I1, I2, I3, I4, I5, O> sync(QuintFunction<I1, I2, I3, I4, I5, O> f) { return (a, b, c, d,e ) -> waitOn(async(f).apply(a, b, c, d, e)); }
 
-    public <E extends Serializable> E transfer(E object)
+    public Executor executor()
     {
-        return (E) transferOneObject(object, classLoader, deserializeOnInstance);
+        return isolatedExecutor;
     }
 
-    static <E extends Serializable> E transferAdhoc(E object, ClassLoader classLoader)
+    public <T extends Serializable> T transfer(T in)
     {
-        return transferOneObject(object, classLoader, lookupDeserializeOneObject(classLoader));
+        return transfer.apply(in);
     }
 
-    private static <E extends Serializable> E transferOneObject(E object, ClassLoader classLoader, Method deserializeOnInstance)
+    public static <T extends Serializable> T transferAdhoc(T object, ClassLoader classLoader)
     {
-        byte[] bytes = serializeOneObject(object);
         try
         {
-            Object onInstance = deserializeOnInstance.invoke(null, bytes);
-            if (onInstance.getClass().getClassLoader() != classLoader)
-                throw new IllegalStateException(onInstance + " seemingly from wrong class loader: " + onInstance.getClass().getClassLoader() + ", but expected " + classLoader);
-
-            return (E) onInstance;
+            return transferOneObjectAdhoc(object, classLoader, lookupDeserializeOneObject(classLoader));
         }
         catch (IllegalAccessException | InvocationTargetException e)
         {
-            throw new RuntimeException("Error while transfering object to " + classLoader, e);
+            throw new RuntimeException(e);
         }
     }
 
+    public static <T extends Serializable> T transferAdhocPropagate(T object, ClassLoader classLoader) throws InvocationTargetException, IllegalAccessException
+    {
+        return transferOneObjectAdhoc(object, classLoader, lookupDeserializeOneObject(classLoader));
+    }
+
+    private static final SerializableFunction<byte[], Object> DESERIALIZE_ONE_OBJECT = IsolatedExecutor::deserializeOneObject;
+
+    public static DynamicFunction<Serializable> transferTo(ClassLoader classLoader)
+    {
+        SerializableFunction<byte[], Object> deserializeOneObject = transferAdhoc(DESERIALIZE_ONE_OBJECT, classLoader);
+        return new DynamicFunction<Serializable>()
+        {
+            public <T extends Serializable> T apply(T in)
+            {
+                return (T) deserializeOneObject.apply(serializeOneObject(in));
+            }
+        };
+    }
+
+    private static <T extends Serializable> T transferOneObjectAdhoc(T object, ClassLoader classLoader, Method deserializeOnInstance) throws IllegalAccessException, InvocationTargetException
+    {
+        byte[] bytes = serializeOneObject(object);
+        Object onInstance = deserializeOnInstance.invoke(null, bytes);
+        if (onInstance.getClass().getClassLoader() != classLoader)
+            throw new IllegalStateException(onInstance + " seemingly from wrong class loader: " + onInstance.getClass().getClassLoader() + ", but expected " + classLoader);
+
+        return (T) onInstance;
+    }
+
     private static Method lookupDeserializeOneObject(ClassLoader classLoader)
     {
         try
@@ -221,22 +286,4 @@
         }
     }
 
-    public interface ThrowingRunnable
-    {
-        public void run() throws Throwable;
-
-        public static Runnable toRunnable(ThrowingRunnable runnable)
-        {
-            return () -> {
-                try
-                {
-                    runnable.run();
-                }
-                catch (Throwable throwable)
-                {
-                    throw new RuntimeException(throwable);
-                }
-            };
-        }
-    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/MessageFilters.java b/test/distributed/org/apache/cassandra/distributed/impl/MessageFilters.java
new file mode 100644
index 0000000..84db349
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/impl/MessageFilters.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.impl;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.distributed.api.IMessageFilters;
+
+// note: as a pure implementation class of an interface, this should not live in dtest-api
+public class MessageFilters implements IMessageFilters
+{
+    private final List<Filter> inboundFilters = new CopyOnWriteArrayList<>();
+    private final List<Filter> outboundFilters = new CopyOnWriteArrayList<>();
+
+    public boolean permitInbound(int from, int to, IMessage msg)
+    {
+        return permit(inboundFilters, from, to, msg);
+    }
+
+    public boolean permitOutbound(int from, int to, IMessage msg)
+    {
+        return permit(outboundFilters, from, to, msg);
+    }
+
+    @Override
+    public boolean hasInbound()
+    {
+        return !inboundFilters.isEmpty();
+    }
+
+    @Override
+    public boolean hasOutbound()
+    {
+        return !outboundFilters.isEmpty();
+    }
+
+    private static boolean permit(List<Filter> filters, int from, int to, IMessage msg)
+    {
+        for (Filter filter : filters)
+        {
+            if (filter.matches(from, to, msg))
+                return false;
+        }
+        return true;
+    }
+
+    public static class Filter implements IMessageFilters.Filter
+    {
+        final int[] from;
+        final int[] to;
+        final int[] verbs;
+        final Matcher matcher;
+        final List<Filter> parent;
+
+        Filter(int[] from, int[] to, int[] verbs, Matcher matcher, List<Filter> parent)
+        {
+            if (from != null)
+            {
+                from = from.clone();
+                Arrays.sort(from);
+            }
+            if (to != null)
+            {
+                to = to.clone();
+                Arrays.sort(to);
+            }
+            if (verbs != null)
+            {
+                verbs = verbs.clone();
+                Arrays.sort(verbs);
+            }
+            this.from = from;
+            this.to = to;
+            this.verbs = verbs;
+            this.matcher = matcher;
+            this.parent = Objects.requireNonNull(parent, "parent");
+        }
+
+        public int hashCode()
+        {
+            return (from == null ? 0 : Arrays.hashCode(from))
+                   + (to == null ? 0 : Arrays.hashCode(to))
+                   + (verbs == null ? 0 : Arrays.hashCode(verbs)
+                                          + parent.hashCode());
+        }
+
+        public boolean equals(Object that)
+        {
+            return that instanceof Filter && equals((Filter) that);
+        }
+
+        public boolean equals(Filter that)
+        {
+            return Arrays.equals(from, that.from)
+                   && Arrays.equals(to, that.to)
+                   && Arrays.equals(verbs, that.verbs)
+                   && parent.equals(that.parent);
+        }
+
+        public Filter off()
+        {
+            parent.remove(this);
+            return this;
+        }
+
+        public Filter on()
+        {
+            parent.add(this);
+            return this;
+        }
+
+        public boolean matches(int from, int to, IMessage msg)
+        {
+            return (this.from == null || Arrays.binarySearch(this.from, from) >= 0)
+                   && (this.to == null || Arrays.binarySearch(this.to, to) >= 0)
+                   && (this.verbs == null || Arrays.binarySearch(this.verbs, msg.verb()) >= 0)
+                   && (this.matcher == null || this.matcher.matches(from, to, msg));
+        }
+    }
+
+    public class Builder implements IMessageFilters.Builder
+    {
+        int[] from;
+        int[] to;
+        int[] verbs;
+        Matcher matcher;
+        boolean inbound;
+
+        private Builder(boolean inbound)
+        {
+            this.inbound = inbound;
+        }
+
+        public Builder from(int... nums)
+        {
+            from = nums;
+            return this;
+        }
+
+        public Builder to(int... nums)
+        {
+            to = nums;
+            return this;
+        }
+
+        public IMessageFilters.Builder verbs(int... verbs)
+        {
+            this.verbs = verbs;
+            return this;
+        }
+
+        public IMessageFilters.Builder allVerbs()
+        {
+            this.verbs = null;
+            return this;
+        }
+
+        public IMessageFilters.Builder inbound(boolean inbound)
+        {
+            this.inbound = inbound;
+            return this;
+        }
+
+        public IMessageFilters.Builder messagesMatching(Matcher matcher)
+        {
+            this.matcher = matcher;
+            return this;
+        }
+
+        public IMessageFilters.Filter drop()
+        {
+            return new Filter(from, to, verbs, matcher, inbound ? inboundFilters : outboundFilters).on();
+        }
+    }
+
+    public IMessageFilters.Builder inbound(boolean inbound)
+    {
+        return new Builder(inbound);
+    }
+
+    @Override
+    public void reset()
+    {
+        inboundFilters.clear();
+        outboundFilters.clear();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/MessageImpl.java b/test/distributed/org/apache/cassandra/distributed/impl/MessageImpl.java
index 607e890..758d413 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/MessageImpl.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/MessageImpl.java
@@ -30,14 +30,16 @@
     public final byte[] bytes;
     public final long id;
     public final int version;
+    public final long expiresAtNanos;
     public final InetSocketAddress from;
 
-    public MessageImpl(int verb, byte[] bytes, long id, int version, InetSocketAddress from)
+    public MessageImpl(int verb, byte[] bytes, long id, int version, long expiresAtNanos, InetSocketAddress from)
     {
         this.verb = verb;
         this.bytes = bytes;
         this.id = id;
         this.version = version;
+        this.expiresAtNanos = expiresAtNanos;
         this.from = from;
     }
 
@@ -61,6 +63,12 @@
         return version;
     }
 
+    @Override
+    public long expiresAtNanos()
+    {
+        return expiresAtNanos;
+    }
+
     public InetSocketAddress from()
     {
         return from;
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Query.java b/test/distributed/org/apache/cassandra/distributed/impl/Query.java
new file mode 100644
index 0000000..823113f
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Query.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.impl;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.transport.messages.ResultMessage;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public class Query implements IIsolatedExecutor.SerializableCallable<Object[][]>
+{
+    private static final long serialVersionUID = 1L;
+
+    final String query;
+    final long timestamp;
+    final org.apache.cassandra.distributed.api.ConsistencyLevel commitConsistencyOrigin;
+    final org.apache.cassandra.distributed.api.ConsistencyLevel serialConsistencyOrigin;
+    final Object[] boundValues;
+
+    public Query(String query, long timestamp, org.apache.cassandra.distributed.api.ConsistencyLevel commitConsistencyOrigin, org.apache.cassandra.distributed.api.ConsistencyLevel serialConsistencyOrigin, Object[] boundValues)
+    {
+        this.query = query;
+        this.timestamp = timestamp;
+        this.commitConsistencyOrigin = commitConsistencyOrigin;
+        this.serialConsistencyOrigin = serialConsistencyOrigin;
+        this.boundValues = boundValues;
+    }
+
+    public Object[][] call()
+    {
+        ConsistencyLevel commitConsistency = toCassandraCL(commitConsistencyOrigin);
+        ConsistencyLevel serialConsistency = serialConsistencyOrigin == null ? null : toCassandraCL(serialConsistencyOrigin);
+        ClientState clientState = Coordinator.makeFakeClientState();
+        CQLStatement prepared = QueryProcessor.getStatement(query, clientState);
+        List<ByteBuffer> boundBBValues = new ArrayList<>();
+        for (Object boundValue : boundValues)
+            boundBBValues.add(ByteBufferUtil.objectToBytes(boundValue));
+
+        prepared.validate(QueryState.forInternalCalls().getClientState());
+
+        // Start capturing warnings on this thread. Note that this will implicitly clear out any previous
+        // warnings as it sets a new State instance on the ThreadLocal.
+        ClientWarn.instance.captureWarnings();
+
+        ResultMessage res = prepared.execute(QueryState.forInternalCalls(),
+                                             QueryOptions.create(commitConsistency,
+                                                                 boundBBValues,
+                                                                 false,
+                                                                 Integer.MAX_VALUE,
+                                                                 null,
+                                                                 serialConsistency,
+                                                                 ProtocolVersion.V4,
+                                                                 null,
+                                                                 timestamp,
+                                                                 FBUtilities.nowInSeconds()),
+                                             nanoTime());
+
+        // Collect warnings reported during the query.
+        if (res != null)
+            res.setWarnings(ClientWarn.instance.getWarnings());
+
+        return RowUtil.toQueryResult(res).toObjectArrays();
+    }
+
+    public String toString()
+    {
+        return String.format(query.replaceAll("\\?", "%s") + " AT " + commitConsistencyOrigin, boundValues);
+    }
+
+    static org.apache.cassandra.db.ConsistencyLevel toCassandraCL(org.apache.cassandra.distributed.api.ConsistencyLevel cl)
+    {
+        return org.apache.cassandra.db.ConsistencyLevel.fromCode(cl.ordinal());
+    }
+
+    static final org.apache.cassandra.distributed.api.ConsistencyLevel[] API_CLs = org.apache.cassandra.distributed.api.ConsistencyLevel.values();
+    static org.apache.cassandra.distributed.api.ConsistencyLevel fromCassandraCL(org.apache.cassandra.db.ConsistencyLevel cl)
+    {
+        return API_CLs[cl.ordinal()];
+    }
+
+}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/RowUtil.java b/test/distributed/org/apache/cassandra/distributed/impl/RowUtil.java
index ca26639..18ac07d 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/RowUtil.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/RowUtil.java
@@ -29,7 +29,6 @@
 
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.core.Row;
-
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.distributed.api.QueryResults;
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/ShutdownExecutor.java b/test/distributed/org/apache/cassandra/distributed/impl/ShutdownExecutor.java
new file mode 100644
index 0000000..15ed4a2
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/impl/ShutdownExecutor.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.impl;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.apache.cassandra.utils.Shared;
+
+@Shared
+public interface ShutdownExecutor
+{
+    Future<Void> shutdown(String name, ClassLoader classLoader, ExecutorService shuttingDown, Callable<Void> onTermination);
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/TracingUtil.java b/test/distributed/org/apache/cassandra/distributed/impl/TracingUtil.java
index 439d4b7..06fdabb 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/TracingUtil.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/TracingUtil.java
@@ -25,6 +25,7 @@
 import java.util.UUID;
 
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.utils.TimeUUID;
 
 
 /**
@@ -56,8 +57,9 @@
 
         static TraceEntry fromRowResultObjects(Object[] objects)
         {
-            return new TraceEntry((UUID) objects[0],
-                                  (UUID) objects[1],
+            UUID eventId = objects[1] instanceof UUID ? (UUID)objects[1]: ((TimeUUID)objects[1]).asUUID();
+            return new TraceEntry((UUID)objects[0],
+                                  eventId,
                                   (String) objects[2],
                                   (InetAddress) objects[3],
                                   (Integer) objects[4],
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/UnsafeGossipHelper.java b/test/distributed/org/apache/cassandra/distributed/impl/UnsafeGossipHelper.java
new file mode 100644
index 0000000..7cf4c16
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/impl/UnsafeGossipHelper.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.impl;
+
+import java.io.Serializable;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.UUID;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.service.PendingRangeCalculatorService;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static com.google.common.collect.Iterables.getOnlyElement;
+import static java.util.Collections.singleton;
+import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.toCassandraInetAddressAndPort;
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+
+public class UnsafeGossipHelper
+{
+    public static class HostInfo implements Serializable
+    {
+        final InetSocketAddress address;
+        final UUID hostId;
+        final String tokenString;
+        final int messagingVersion;
+        final boolean isShutdown;
+
+        private HostInfo(InetSocketAddress address, UUID hostId, String tokenString, int messagingVersion, boolean isShutdown)
+        {
+            this.address = address;
+            this.hostId = hostId;
+            this.tokenString = tokenString;
+            this.messagingVersion = messagingVersion;
+            this.isShutdown = isShutdown;
+        }
+
+        private HostInfo(IInstance instance)
+        {
+            this(instance, instance.config().hostId(), instance.config().getString("initial_token"));
+        }
+
+        private HostInfo(IInstance instance, UUID hostId, String tokenString)
+        {
+            this(instance.broadcastAddress(), hostId, tokenString, instance.getMessagingVersion(), instance.isShutdown());
+        }
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingRunner(IIsolatedExecutor.SerializableBiFunction<VersionedValue.VersionedValueFactory, Collection<Token>, VersionedValue> statusFactory, InetSocketAddress address, UUID hostId, String tokenString, int messagingVersion, boolean isShutdown)
+    {
+        return () -> {
+            try
+            {
+                IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
+                InetAddressAndPort addressAndPort = getByAddress(address);
+                Token token;
+                if (FBUtilities.getBroadcastAddressAndPort().equals(addressAndPort))
+                {
+                    // try grabbing saved tokens so that - if we're leaving - we get the ones we may have adopted as part of a range movement
+                    // if that fails, grab them from config (as we're probably joining and should just use the default token)
+                    Token.TokenFactory tokenFactory = DatabaseDescriptor.getPartitioner().getTokenFactory();
+                    if (tokenString == null)
+                    {
+                        Token tmp;
+                        try
+                        {
+                             tmp = getOnlyElement(SystemKeyspace.getSavedTokens());
+                        }
+                        catch (Throwable t)
+                        {
+                            tmp = tokenFactory.fromString(getOnlyElement(DatabaseDescriptor.getInitialTokens()));
+                        }
+                        token = tmp;
+                    }
+                    else
+                    {
+                        token = tokenFactory.fromString(tokenString);
+                    }
+
+                    SystemKeyspace.setLocalHostId(hostId);
+                    SystemKeyspace.updateTokens(singleton(token));
+                }
+                else
+                {
+                    if (tokenString == null)
+                        throw new IllegalArgumentException();
+
+                    token = DatabaseDescriptor.getPartitioner().getTokenFactory().fromString(tokenString);
+                }
+
+                Gossiper.runInGossipStageBlocking(() -> {
+                    EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(addressAndPort);
+                    if (state == null)
+                    {
+                        Gossiper.instance.initializeNodeUnsafe(addressAndPort, hostId, 1);
+                        state = Gossiper.instance.getEndpointStateForEndpoint(addressAndPort);
+                        Gossiper.instance.realMarkAlive(addressAndPort, state);
+                    }
+
+                    state.addApplicationState(ApplicationState.TOKENS, new VersionedValue.VersionedValueFactory(partitioner).tokens(singleton(token)));
+                    VersionedValue status = statusFactory.apply(new VersionedValue.VersionedValueFactory(partitioner), singleton(token));
+                    state.addApplicationState(ApplicationState.STATUS_WITH_PORT, status);
+                    StorageService.instance.onChange(addressAndPort, ApplicationState.STATUS_WITH_PORT, status);
+                });
+
+                int setMessagingVersion = isShutdown
+                                          ? MessagingService.current_version
+                                          : Math.min(MessagingService.current_version, messagingVersion);
+                MessagingService.instance().versions.set(addressAndPort, setMessagingVersion);
+
+                PendingRangeCalculatorService.instance.blockUntilFinished();
+            }
+            catch (Throwable e) // UnknownHostException
+            {
+                throw new RuntimeException(e);
+            }
+        };
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingNormalRunner(IInstance peer)
+    {
+        return addToRingNormalRunner(new HostInfo(peer));
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingNormalRunner(IInstance peer, UUID hostId, String tokenString)
+    {
+        return addToRingNormalRunner(new HostInfo(peer, hostId, tokenString));
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingNormalRunner(HostInfo info)
+    {
+        return addToRingNormalRunner(info.address, info.hostId, info.tokenString, info.messagingVersion, info.isShutdown);
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingNormalRunner(InetSocketAddress address, UUID hostId, String tokenString, int messagingVersion, boolean isShutdown)
+    {
+        return addToRingRunner(VersionedValue.VersionedValueFactory::normal, address, hostId, tokenString, messagingVersion, isShutdown);
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingRunner(IIsolatedExecutor.SerializableBiFunction<VersionedValue.VersionedValueFactory, Collection<Token>, VersionedValue> statusFactory, HostInfo info)
+    {
+        return addToRingRunner(statusFactory, info.address, info.hostId, info.tokenString, info.messagingVersion, info.isShutdown);
+    }
+
+    // reset gossip state so we know of the node being alive only
+    public static IIsolatedExecutor.SerializableRunnable removeFromRingRunner(IInstance instance)
+    {
+        return removeFromRingRunner(new HostInfo(instance));
+    }
+
+    // reset gossip state so we know of the node being alive only
+    public static IIsolatedExecutor.SerializableRunnable removeFromRingRunner(HostInfo info)
+    {
+        return removeFromRingRunner(info.address, info.hostId, info.tokenString);
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable removeFromRingRunner(InetSocketAddress address, UUID hostId, String tokenString)
+    {
+        return () -> {
+
+            try
+            {
+                IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
+                Token token = partitioner.getTokenFactory().fromString(tokenString);
+                InetAddressAndPort addressAndPort = toCassandraInetAddressAndPort(address);
+
+                Gossiper.runInGossipStageBlocking(() -> {
+                    StorageService.instance.onChange(addressAndPort,
+                                                     ApplicationState.STATUS,
+                                                     new VersionedValue.VersionedValueFactory(partitioner).left(singleton(token), 0L));
+                    Gossiper.instance.unsafeAnnulEndpoint(addressAndPort);
+                    Gossiper.instance.initializeNodeUnsafe(addressAndPort, hostId, 1);
+                    Gossiper.instance.realMarkAlive(addressAndPort, Gossiper.instance.getEndpointStateForEndpoint(addressAndPort));
+                });
+                PendingRangeCalculatorService.instance.blockUntilFinished();
+            }
+            catch (Throwable e) // UnknownHostException
+            {
+                throw new RuntimeException(e);
+            }
+        };
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingBootstrappingRunner(IInstance peer)
+    {
+        return addToRingRunner(VersionedValue.VersionedValueFactory::bootstrapping, new HostInfo(peer));
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingBootstrapReplacingRunner(IInstance peer, IInvokableInstance replacing, UUID hostId, String tokenString)
+    {
+        return addToRingBootstrapReplacingRunner(peer, replacing.broadcastAddress(), hostId, tokenString);
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingBootstrapReplacingRunner(IInstance peer, InetSocketAddress replacingAddress, UUID hostId, String tokenString)
+    {
+        return addToRingRunner((factory, ignore) -> factory.bootReplacingWithPort(getByAddress(replacingAddress)), new HostInfo(peer, hostId, tokenString));
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingNormalReplacedRunner(IInstance peer, IInstance replaced)
+    {
+        return addToRingNormalReplacedRunner(peer, replaced.broadcastAddress());
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingNormalReplacedRunner(IInstance peer, InetSocketAddress replacedAddress)
+    {
+        return addToRingRunner((factory, ignore) -> factory.bootReplacingWithPort(getByAddress(replacedAddress)), new HostInfo(peer, null, null));
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingLeavingRunner(IInstance peer)
+    {
+        return addToRingRunner(VersionedValue.VersionedValueFactory::leaving, new HostInfo(peer, null, null));
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable addToRingLeftRunner(IInstance peer)
+    {
+        return addToRingRunner((factory, tokens) -> factory.left(tokens, Long.MAX_VALUE), new HostInfo(peer, null, null));
+    }
+
+    public static void removeFromRing(IInstance peer)
+    {
+        removeFromRingRunner(peer).run();
+    }
+
+    public static void addToRingNormal(IInstance peer)
+    {
+        addToRingNormalRunner(peer).run();
+        assert StorageService.instance.getTokenMetadata().isMember(toCassandraInetAddressAndPort(peer.broadcastAddress()));
+    }
+
+    public static void addToRingBootstrapping(IInstance peer)
+    {
+        addToRingBootstrappingRunner(peer).run();
+    }
+
+    public static IIsolatedExecutor.SerializableRunnable markShutdownRunner(InetSocketAddress address)
+    {
+        return () -> {
+            IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
+            Gossiper.runInGossipStageBlocking(() -> {
+                EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(getByAddress(address));
+                VersionedValue status = new VersionedValue.VersionedValueFactory(partitioner).shutdown(true);
+                state.addApplicationState(ApplicationState.STATUS, status);
+                state.getHeartBeatState().forceHighestPossibleVersionUnsafe();
+                StorageService.instance.onChange(getByAddress(address), ApplicationState.STATUS, status);
+            });
+        };
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java b/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java
index bc27ec7..b4dd10c 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.distributed.shared;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.lang.reflect.Method;
@@ -42,6 +41,7 @@
 import com.google.common.base.StandardSystemProperty;
 import com.google.common.io.ByteStreams;
 import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -96,7 +96,7 @@
         List<String> texts = Stream.of(scripts).map(p -> {
             try
             {
-                return Files.toString(new File(p), StandardCharsets.UTF_8);
+                return Files.toString(new File(p).toJavaIOFile(), StandardCharsets.UTF_8);
             }
             catch (IOException e)
             {
@@ -155,11 +155,11 @@
                 if (DEBUG_TRANSFORMATIONS)
                 {
                     File f = new File(StandardSystemProperty.JAVA_IO_TMPDIR.value(), "byteman/" + details.klassPath + ".class");
-                    f.getParentFile().mkdirs();
-                    File original = new File(f.getParentFile(), "original-" + f.getName());
-                    logger.info("Writing class file for {} to {}", details.klassPath, f.getAbsolutePath());
-                    Files.asByteSink(f).write(newBytes);
-                    Files.asByteSink(original).write(details.bytes);
+                    f.parent().tryCreateDirectories();
+                    File original = new File(f.parent(), "original-" + f.name());
+                    logger.info("Writing class file for {} to {}", details.klassPath, f.absolutePath());
+                    Files.asByteSink(f.toJavaIOFile()).write(newBytes);
+                    Files.asByteSink(original.toJavaIOFile()).write(details.bytes);
                 }
             }
         }
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
index 1755857..dc280f3 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
@@ -18,8 +18,9 @@
 
 package org.apache.cassandra.distributed.shared;
 
-import java.io.File;
+import java.lang.reflect.Field;
 import java.net.InetSocketAddress;
+import java.security.Permission;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -39,6 +40,7 @@
 import java.util.stream.Collectors;
 
 import com.google.common.util.concurrent.Futures;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 
 import org.apache.cassandra.dht.Token;
@@ -51,7 +53,9 @@
 import org.apache.cassandra.distributed.impl.AbstractCluster;
 import org.apache.cassandra.distributed.impl.InstanceConfig;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.tools.SystemExitException;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Isolated;
 
 import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 import static org.apache.cassandra.config.CassandraRelevantProperties.BOOTSTRAP_SCHEMA_DELAY_MS;
@@ -139,6 +143,22 @@
      * Create a new instance and add it to the cluster, without starting it.
      *
      * @param cluster to add to
+     * @param other config to copy from
+     * @param fn function to add to the config before starting
+     * @param <I> instance type
+     * @return the instance added
+     */
+    public static <I extends IInstance> I addInstance(AbstractCluster<I> cluster,
+                                                      IInstanceConfig other,
+                                                      Consumer<IInstanceConfig> fn)
+    {
+        return addInstance(cluster, other.localDatacenter(), other.localRack(), fn);
+    }
+
+    /**
+     * Create a new instance and add it to the cluster, without starting it.
+     *
+     * @param cluster to add to
      * @param dc the instance should be in
      * @param rack the instance should be in
      * @param <I> instance type
@@ -208,8 +228,26 @@
                                                               IInstance toReplace,
                                                               Consumer<WithProperties> fn)
     {
+        return replaceHostAndStart(cluster, toReplace, (ignore, prop) -> fn.accept(prop));
+    }
+
+    /**
+     * Create and start a new instance that replaces an existing instance.
+     *
+     * The instance will be in the same datacenter and rack as the existing instance.
+     *
+     * @param cluster to add to
+     * @param toReplace instance to replace
+     * @param fn lambda to add additional properties or modify instance
+     * @param <I> instance type
+     * @return the instance added
+     */
+    public static <I extends IInstance> I replaceHostAndStart(AbstractCluster<I> cluster,
+                                                              IInstance toReplace,
+                                                              BiConsumer<I, WithProperties> fn)
+    {
         IInstanceConfig toReplaceConf = toReplace.config();
-        I inst = addInstance(cluster, toReplaceConf.localDatacenter(), toReplaceConf.localRack(), c -> c.set("auto_bootstrap", true));
+        I inst = addInstance(cluster, toReplaceConf, c -> c.set("auto_bootstrap", true));
 
         return start(inst, properties -> {
             // lower this so the replacement waits less time
@@ -221,7 +259,7 @@
             // state which node to replace
             properties.setProperty("cassandra.replace_address_first_boot", toReplace.config().broadcastAddress().getAddress().getHostAddress());
 
-            fn.accept(properties);
+            fn.accept(inst, properties);
         });
     }
 
@@ -237,15 +275,13 @@
                                                           .collect(Collectors.toList()));
     }
 
-    public static String getLocalToken(IInvokableInstance inst)
+    public static Collection<String> getLocalTokens(IInvokableInstance inst)
     {
         return inst.callOnInstance(() -> {
             List<String> tokens = new ArrayList<>();
             for (Token t : StorageService.instance.getTokenMetadata().getTokens(FBUtilities.getBroadcastAddressAndPort()))
                 tokens.add(t.getTokenValue().toString());
-
-            assert tokens.size() == 1 : "getLocalToken assumes a single token, but multiple tokens found";
-            return tokens.get(0);
+            return tokens;
         });
     }
 
@@ -592,6 +628,17 @@
     }
 
     /**
+     * Get the number of tokens for the instance via config.
+     *
+     * @param instance to get token count from
+     * @return number of tokens
+     */
+    public static int getTokenCount(IInvokableInstance instance)
+    {
+        return instance.config().getInt("num_tokens");
+    }
+
+    /**
      * Get all data directories for the given instance.
      *
      * @param instance to get data directories for
@@ -702,13 +749,33 @@
      */
     private static void updateAddress(IInstanceConfig conf, String address)
     {
+        InetSocketAddress previous = conf.broadcastAddress();
+
         for (String key : Arrays.asList("broadcast_address", "listen_address", "broadcast_rpc_address", "rpc_address"))
             conf.set(key, address);
 
         // InstanceConfig caches InetSocketAddress -> InetAddressAndPort
         // this causes issues as startup now ignores config, so force reset it to pull from conf.
         ((InstanceConfig) conf).unsetBroadcastAddressAndPort(); //TODO remove the need to null out the cache...
-        conf.networkTopology().put(conf.broadcastAddress(), NetworkTopology.dcAndRack(conf.localDatacenter(), conf.localRack()));
+
+        //TODO NetworkTopology class isn't flexible and doesn't handle adding/removing nodes well...
+        // it also uses a HashMap which makes the class not thread safe... so mutating AFTER starting nodes
+        // are a risk
+        if (!conf.broadcastAddress().equals(previous))
+        {
+            conf.networkTopology().put(conf.broadcastAddress(), NetworkTopology.dcAndRack(conf.localDatacenter(), conf.localRack()));
+            try
+            {
+                Field field = NetworkTopology.class.getDeclaredField("map");
+                field.setAccessible(true);
+                Map<InetSocketAddress, NetworkTopology.DcAndRack> map = (Map<InetSocketAddress, NetworkTopology.DcAndRack>) field.get(conf.networkTopology());
+                map.remove(previous);
+            }
+            catch (NoSuchFieldException | IllegalAccessException e)
+            {
+                throw new AssertionError(e);
+            }
+        }
     }
 
     /**
@@ -802,4 +869,26 @@
             return Arrays.asList(address, rack, status, state, token).toString();
         }
     }
+
+    public static void preventSystemExit()
+    {
+        System.setSecurityManager(new SecurityManager()
+        {
+            @Override
+            public void checkExit(int status)
+            {
+                throw new SystemExitException(status);
+            }
+
+            @Override
+            public void checkPermission(Permission perm)
+            {
+            }
+
+            @Override
+            public void checkPermission(Permission perm, Object context)
+            {
+            }
+        });
+    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/Isolated.java b/test/distributed/org/apache/cassandra/distributed/shared/Isolated.java
deleted file mode 100644
index 898631f..0000000
--- a/test/distributed/org/apache/cassandra/distributed/shared/Isolated.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.shared;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Tells jvm-dtest that a class should be isolated and loaded into the instance class loader.
- *
- * Jvm-dtest relies on classloader isolation to run multiple cassandra instances in the same JVM, this makes it
- * so some classes do not get shared (outside a blesssed set of classes/packages). When the default behavior
- * is not desirable, this annotation will tell jvm-dtest to isolate the class accross all class loaders.
- *
- * This is the oposite of {@link Shared}.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target({ ElementType.TYPE })
-public @interface Isolated
-{
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/Shared.java b/test/distributed/org/apache/cassandra/distributed/shared/Shared.java
deleted file mode 100644
index bb67070..0000000
--- a/test/distributed/org/apache/cassandra/distributed/shared/Shared.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.shared;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Tells jvm-dtest that a class should be shared accross all {@link ClassLoader}s.
- *
- * Jvm-dtest relies on classloader isolation to run multiple cassandra instances in the same JVM, this makes it
- * so some classes do not get shared (outside a blesssed set of classes/packages). When the default behavior
- * is not desirable, this annotation will tell jvm-dtest to share the class accross all class loaders.
- *
- * This is the oposite of {@link Isolated}.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target({ ElementType.TYPE })
-public @interface Shared
-{
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java b/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java
index d2b5bf7..9738357 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java
@@ -27,4 +27,4 @@
         super("Uncaught exceptions were thrown during test");
         uncaughtExceptions.forEach(super::addSuppressed);
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/AbstractEncryptionOptionsImpl.java b/test/distributed/org/apache/cassandra/distributed/test/AbstractEncryptionOptionsImpl.java
index 3ca69c8..1c5ddbf 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/AbstractEncryptionOptionsImpl.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/AbstractEncryptionOptionsImpl.java
@@ -21,7 +21,6 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -29,6 +28,7 @@
 import javax.net.ssl.SSLSession;
 
 import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -50,8 +50,13 @@
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.security.SSLFactory;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.distributed.test.AbstractEncryptionOptionsImpl.ConnectResult.CONNECTING;
+import static org.apache.cassandra.distributed.test.AbstractEncryptionOptionsImpl.ConnectResult.UNINITIALIZED;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 public class AbstractEncryptionOptionsImpl extends TestBaseImpl
 {
@@ -189,17 +194,17 @@
         ConnectResult connect() throws Throwable
         {
             AtomicInteger connectAttempts = new AtomicInteger(0);
-            result.set(ConnectResult.UNINITIALIZED);
+            result.set(UNINITIALIZED);
             setLastThrowable(null);
             setProtocolAndCipher(null, null);
 
             SslContext sslContext = SSLFactory.getOrCreateSslContext(
                 encryptionOptions.withAcceptedProtocols(acceptedProtocols).withCipherSuites(cipherSuites),
-                true, SSLFactory.SocketType.CLIENT);
+                true, ISslContextFactory.SocketType.CLIENT);
 
             EventLoopGroup workerGroup = new NioEventLoopGroup();
             Bootstrap b = new Bootstrap();
-            SimpleCondition attemptCompleted = new SimpleCondition();
+            Condition attemptCompleted = newOneTimeCondition();
 
             // Listener on the SSL handshake makes sure that the test completes immediately as
             // the server waits to receive a message over the TLS connection, so the discardHandler.decode
@@ -296,12 +301,12 @@
                 }
             });
 
-            result.set(ConnectResult.CONNECTING);
+            result.set(CONNECTING);
             ChannelFuture f = b.connect(host, port);
             try
             {
                 f.sync();
-                attemptCompleted.await(15, TimeUnit.SECONDS);
+                attemptCompleted.await(15, SECONDS);
             }
             finally
             {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsBootstrapStreaming.java b/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsBootstrapStreaming.java
index 7aca7bd..d327fd7 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsBootstrapStreaming.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsBootstrapStreaming.java
@@ -36,12 +36,22 @@
     protected void executeTest(final boolean streamEntireSSTables,
                                final boolean compressionEnabled) throws Exception
     {
+        executeTest(streamEntireSSTables, compressionEnabled, 1);
+    }
+
+    protected void executeTest(final boolean streamEntireSSTables,
+                               final boolean compressionEnabled,
+                               final int throughput) throws Exception
+    {
         final Cluster.Builder builder = builder().withNodes(1)
                                                  .withTokenSupplier(TokenSupplier.evenlyDistributedTokens(2))
                                                  .withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(2, "dc0", "rack0"))
                                                  .withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)
-                                                                             .set("stream_throughput_outbound_megabits_per_sec", 1)
-                                                                             .set("compaction_throughput_mb_per_sec", 1)
+                                                                             .set(streamEntireSSTables
+                                                                                  ? "entire_sstable_stream_throughput_outbound"
+                                                                                  : "stream_throughput_outbound",
+                                                                                  throughput+"MiB/s")
+                                                                             .set("compaction_throughput", "1MiB/s")
                                                                              .set("stream_entire_sstables", streamEntireSSTables));
 
         try (final Cluster cluster = builder.withNodes(1).start())
@@ -52,14 +62,7 @@
 
             cluster.get(1).nodetoolResult("disableautocompaction", "netstats_test").asserts().success();
 
-            if (compressionEnabled)
-            {
-                populateData(true);
-            }
-            else
-            {
-                populateData(false);
-            }
+            populateData(compressionEnabled);
 
             cluster.get(1).flush("netstats_test");
 
@@ -74,8 +77,10 @@
             final Future<?> startupRunnable = executorService.submit((Runnable) secondNode::startup);
             final Future<AbstractNetstatsStreaming.NetstatResults> netstatsFuture = executorService.submit(new NetstatsCallable(cluster.get(1)));
 
+            startupRunnable.get(3, MINUTES);
+            // 1m is a bit much, but should be fine on slower environments.  Node2 can't come up without streaming
+            // completing, so if node2 is up 1m is enough time for the nodetool watcher to yield
             final AbstractNetstatsStreaming.NetstatResults results = netstatsFuture.get(1, MINUTES);
-            startupRunnable.get(2, MINUTES);
 
             results.assertSuccessful();
 
diff --git a/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsStreaming.java b/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsStreaming.java
index 2e828e0..93d26f1 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsStreaming.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/AbstractNetstatsStreaming.java
@@ -40,6 +40,7 @@
 import com.datastax.driver.core.Session;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.LogResult;
 import org.apache.cassandra.distributed.api.NodeToolResult;
 import org.apache.cassandra.utils.Pair;
 
@@ -509,6 +510,7 @@
 
             boolean sawAnyStreamingOutput = false;
 
+            long mark = 0;
             while (true)
             {
                 try
@@ -523,6 +525,20 @@
                         {
                             sawAnyStreamingOutput = true;
                         }
+                        else
+                        {
+                            // there is a race condition that streaming starts/stops between calls to netstats
+                            // to detect this, check to see if the node has completed a stream
+                            // expected log: [Stream (.*)?] All sessions completed
+                            LogResult<List<String>> logs = node.logs().grep(mark, "\\[Stream .*\\] All sessions completed");
+                            mark = logs.getMark();
+                            if (!logs.getResult().isEmpty())
+                            {
+                                // race condition detected...
+                                logger.info("Test race condition detected where streaming started/stopped between calls to netstats");
+                                sawAnyStreamingOutput = true;
+                            }
+                        }
                     }
 
                     if (sawAnyStreamingOutput && (!result.getStdout().contains("Receiving") && !result.getStdout().contains("Sending")))
diff --git a/test/distributed/org/apache/cassandra/distributed/test/AlterTest.java b/test/distributed/org/apache/cassandra/distributed/test/AlterTest.java
index b8912b2..2061c291 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/AlterTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/AlterTest.java
@@ -18,17 +18,29 @@
 
 package org.apache.cassandra.distributed.test;
 
+import java.util.List;
+
+import com.google.common.collect.ImmutableMap;
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.cql3.Lists;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.ICluster;
 import org.apache.cassandra.distributed.api.IInstanceConfig;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.distributed.api.IIsolatedExecutor;
 import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.distributed.api.TokenSupplier;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
+import org.apache.cassandra.distributed.util.QueryResultUtil;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.Throwables;
 
 import static org.apache.cassandra.distributed.action.GossipHelper.withProperty;
 import static org.apache.cassandra.distributed.api.ConsistencyLevel.ONE;
@@ -39,6 +51,7 @@
 import static org.apache.cassandra.distributed.shared.NetworkTopology.singleDcNetworkTopology;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 public class AlterTest extends TestBaseImpl
 {
@@ -107,4 +120,73 @@
             }
         }
     }
+
+    @Test
+    public void unknownMemtableConfigurationTest() throws Throwable
+    {
+        Logger logger = LoggerFactory.getLogger(getClass());
+        try (Cluster cluster = Cluster.build(1)
+                                      .withTokenSupplier(TokenSupplier.evenlyDistributedTokens(3, 1))
+                                      .withConfig(c -> c.with(Feature.values())
+                                                        .set("memtable", ImmutableMap.of(
+                                                        "configurations", ImmutableMap.of(
+                                                            "testconfig", ImmutableMap.of(
+                                                                "class_name", "SkipListMemtable")))))
+                                      .start())
+        {
+            init(cluster);
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int PRIMARY KEY)");
+
+            // Start Node 2 without the memtable configuration definition.
+            IInvokableInstance node1 = cluster.get(1);
+            IInvokableInstance node2 = ClusterUtils.addInstance(cluster, node1.config(), c -> c.set("memtable", ImmutableMap.of()));
+            node2.startup(cluster);
+
+            try
+            {
+                cluster.schemaChange("ALTER TABLE " + KEYSPACE + ".tbl WITH memtable = 'testconfig'", false, node2);
+                fail("Expected ALTER to fail with unknown memtable configuration.");
+            }
+            catch (Throwable t)
+            {
+                // expected
+                logger.info("Expected: {}", t.getMessage());
+                Assert.assertTrue(Throwables.isCausedBy(t, x -> x.getMessage().matches("Memtable configuration.*not found.*")));
+            }
+            long mark = node2.logs().mark();
+
+            cluster.schemaChange("ALTER TABLE " + KEYSPACE + ".tbl WITH memtable = 'testconfig'", false, node1);
+            // the above should succeed, the configuration is acceptable to node1
+
+            final String schema1 = QueryResultUtil.expand(node1.executeInternalWithResult("SELECT * FROM system_schema.tables WHERE keyspace_name=?", KEYSPACE));
+            final String schema2 = QueryResultUtil.expand(node2.executeInternalWithResult("SELECT * FROM system_schema.tables WHERE keyspace_name=?", KEYSPACE));
+            logger.info("node1 schema: \n{}", schema1);
+            logger.info("node2 schema: \n{}", schema2);
+            Assert.assertEquals(schema1, schema2);
+            List<String> errorInLog = node2.logs().grep(mark, "ERROR.*Invalid memtable configuration.*").getResult();
+            Assert.assertTrue(errorInLog.size() > 0);
+            logger.info(Lists.listToString(errorInLog));
+
+            // Add a new node that has an invalid definition but should accept the already defined table schema.
+            IInvokableInstance node3 = ClusterUtils.addInstance(cluster,
+                                                                node2.config(),
+                                                                c -> c.set("memtable", ImmutableMap.of(
+                                                                "configurations", ImmutableMap.of(
+                                                                    "testconfig", ImmutableMap.of(
+                                                                        "class_name", "NotExistingMemtable")))));
+            node3.startup(cluster);
+            final String schema3 = QueryResultUtil.expand(node3.executeInternalWithResult("SELECT * FROM system_schema.tables WHERE keyspace_name=?", KEYSPACE));
+            logger.info("node3 schema: \n{}", schema3);
+            Assert.assertEquals(schema1, schema3);
+
+            errorInLog = node3.logs().grep("ERROR.*Invalid memtable configuration.*").getResult();
+            Assert.assertTrue(errorInLog.size() > 0);
+            logger.info(Lists.listToString(errorInLog));
+
+            // verify that all nodes can write to the table
+            node1.executeInternalWithResult("INSERT INTO " + KEYSPACE + ".tbl (pk) VALUES (?)", 1);
+            node2.executeInternalWithResult("INSERT INTO " + KEYSPACE + ".tbl (pk) VALUES (?)", 2);
+            node3.executeInternalWithResult("INSERT INTO " + KEYSPACE + ".tbl (pk) VALUES (?)", 3);
+        }
+    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/AutoSnapshotTtlTest.java b/test/distributed/org/apache/cassandra/distributed/test/AutoSnapshotTtlTest.java
new file mode 100644
index 0000000..550cbdc
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/AutoSnapshotTtlTest.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.shared.WithProperties;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.db.ColumnFamilyStore.SNAPSHOT_DROP_PREFIX;
+import static org.apache.cassandra.db.ColumnFamilyStore.SNAPSHOT_TRUNCATE_PREFIX;
+import static org.apache.cassandra.distributed.Cluster.build;
+import static org.apache.cassandra.distributed.shared.ClusterUtils.stopUnchecked;
+import static org.awaitility.Awaitility.await;
+
+public class AutoSnapshotTtlTest extends TestBaseImpl
+{
+    public static final Integer SNAPSHOT_CLEANUP_PERIOD_SECONDS = 1;
+    public static final Integer FIVE_SECONDS = 5;
+    private static WithProperties properties = new WithProperties();
+
+    @BeforeClass
+    public static void beforeClass() throws Throwable
+    {
+        TestBaseImpl.beforeClass();
+        properties.set(CassandraRelevantProperties.SNAPSHOT_CLEANUP_INITIAL_DELAY_SECONDS, 0);
+        properties.set(CassandraRelevantProperties.SNAPSHOT_CLEANUP_PERIOD_SECONDS, SNAPSHOT_CLEANUP_PERIOD_SECONDS);
+        properties.set(CassandraRelevantProperties.SNAPSHOT_MIN_ALLOWED_TTL_SECONDS, FIVE_SECONDS);
+    }
+
+    @AfterClass
+    public static void after()
+    {
+        properties.close();
+    }
+
+    /**
+     * Check that when auto_snapshot_ttl=5s, snapshots created from TRUNCATE are expired after 10s
+     */
+    @Test
+    public void testAutoSnapshotTTlOnTruncate() throws IOException
+    {
+        try (Cluster cluster = init(build().withNodes(1)
+                                      .withConfig(c -> c.with(Feature.GOSSIP)
+                                                        .set("auto_snapshot_ttl", String.format("%ds", FIVE_SECONDS)))
+                                      .start()))
+        {
+            IInvokableInstance instance = cluster.get(1);
+
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+            populate(cluster);
+
+            // Truncate Table
+            cluster.schemaChange(withKeyspace("TRUNCATE %s.tbl;"));
+
+            // Check snapshot is listed after table is truncated
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(SNAPSHOT_TRUNCATE_PREFIX);
+
+            // Check snapshot is removed after 10s
+            await().timeout(10, SECONDS)
+                   .pollInterval(1, SECONDS)
+                   .until(() -> !instance.nodetoolResult("listsnapshots").getStdout().contains(SNAPSHOT_DROP_PREFIX));
+        }
+    }
+
+    /**
+     * Check that when auto_snapshot_ttl=5s, snapshots created from TRUNCATE are expired after 10s
+     */
+    @Test
+    public void testAutoSnapshotTTlOnDrop() throws IOException
+    {
+        try (Cluster cluster = init(build().withNodes(1)
+                                      .withConfig(c -> c.with(Feature.GOSSIP)
+                                                                  .set("auto_snapshot_ttl", String.format("%ds", FIVE_SECONDS)))
+                                      .start()))
+        {
+            IInvokableInstance instance = cluster.get(1);
+
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+            populate(cluster);
+
+            // Drop Table
+            cluster.schemaChange(withKeyspace("DROP TABLE %s.tbl;"));
+
+            // Check snapshot is listed after table is dropped
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(SNAPSHOT_DROP_PREFIX);
+
+            // Check snapshot is removed after 10s
+            await().timeout(10, SECONDS)
+                   .pollInterval(1, SECONDS)
+                   .until(() -> !instance.nodetoolResult("listsnapshots").getStdout().contains(SNAPSHOT_DROP_PREFIX));
+        }
+    }
+
+    /**
+     * Check that when auto_snapshot_ttl=60s, snapshots created from DROP TABLE are expired after a node restart
+     */
+    @Test
+    public void testAutoSnapshotTTlOnDropAfterRestart() throws IOException
+    {
+        int ONE_MINUTE = 60; // longer TTL to allow snapshot to survive node restart
+        try (Cluster cluster = init(build().withNodes(1)
+                                           .withConfig(c -> c.with(Feature.GOSSIP)
+                                                             .set("auto_snapshot_ttl", String.format("%ds", ONE_MINUTE)))
+                                           .start()))
+        {
+            IInvokableInstance instance = cluster.get(1);
+
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+            populate(cluster);
+
+            // Drop Table
+            cluster.schemaChange(withKeyspace("DROP TABLE %s.tbl;"));
+
+            // Restart node
+            stopUnchecked(instance);
+            instance.startup();
+
+            // Check snapshot is listed after restart
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(SNAPSHOT_DROP_PREFIX);
+
+            // Check snapshot is removed after at most auto_snapshot_ttl + 1s
+            await().timeout(ONE_MINUTE + 1, SECONDS)
+                   .pollInterval(1, SECONDS)
+                   .until(() -> !instance.nodetoolResult("listsnapshots").getStdout().contains(SNAPSHOT_DROP_PREFIX));
+        }
+    }
+
+    /**
+     * Check that when auto_snapshot_ttl is unset, snapshots created from DROP or TRUNCATE do not expire
+     */
+    @Test
+    public void testAutoSnapshotTtlDisabled() throws IOException, InterruptedException
+    {
+        try (Cluster cluster = init(build().withNodes(1)
+                                      .withConfig(c -> c.with(Feature.GOSSIP))
+                                      .start()))
+        {
+            IInvokableInstance instance = cluster.get(1);
+
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+            populate(cluster);
+
+            // Truncate Table
+            cluster.schemaChange(withKeyspace("TRUNCATE %s.tbl;"));
+
+            // Drop Table
+            cluster.schemaChange(withKeyspace("DROP TABLE %s.tbl;"));
+
+            // Check snapshots are created after table is truncated and dropped
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(SNAPSHOT_TRUNCATE_PREFIX);
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(SNAPSHOT_DROP_PREFIX);
+
+            // Check snapshot are *NOT* expired after 10s
+            Thread.sleep(2 * FIVE_SECONDS * 1000L);
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(ColumnFamilyStore.SNAPSHOT_TRUNCATE_PREFIX);
+            instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains(ColumnFamilyStore.SNAPSHOT_DROP_PREFIX);
+        }
+    }
+
+    protected static void populate(Cluster cluster)
+    {
+        for (int i = 0; i < 100; i++)
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (key, value) VALUES (?, 'txt')"), ConsistencyLevel.ONE, i);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/BootstrapBinaryDisabledTest.java b/test/distributed/org/apache/cassandra/distributed/test/BootstrapBinaryDisabledTest.java
index a7ac605..3f50c30 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/BootstrapBinaryDisabledTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/BootstrapBinaryDisabledTest.java
@@ -36,7 +36,7 @@
 import org.apache.cassandra.distributed.api.TokenSupplier;
 import org.apache.cassandra.distributed.shared.Byteman;
 import org.apache.cassandra.distributed.shared.NetworkTopology;
-import org.apache.cassandra.distributed.shared.Shared;
+import org.apache.cassandra.utils.Shared;
 
 /**
  * Replaces python dtest bootstrap_test.py::TestBootstrap::test_bootstrap_binary_disabled
@@ -50,8 +50,8 @@
         config.put("authenticator", "org.apache.cassandra.auth.PasswordAuthenticator");
         config.put("authorizer", "org.apache.cassandra.auth.CassandraAuthorizer");
         config.put("role_manager", "org.apache.cassandra.auth.CassandraRoleManager");
-        config.put("permissions_validity_in_ms", 0);
-        config.put("roles_validity_in_ms", 0);
+        config.put("permissions_validity", "0ms");
+        config.put("roles_validity", "0ms");
 
         int originalNodeCount = 1;
         int expandedNodeCount = originalNodeCount + 2;
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CASAddTest.java b/test/distributed/org/apache/cassandra/distributed/test/CASAddTest.java
new file mode 100644
index 0000000..d6c892e
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/CASAddTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.utils.AssertionUtils;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+
+public class CASAddTest extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(CASAddTest.class);
+
+    /**
+     * The {@code cas_contention_timeout} used during the tests
+     */
+    private static final long CONTENTION_TIMEOUT = 1000L;
+
+    /**
+     * The {@code write_request_timeout} used during the tests
+     */
+    private static final long REQUEST_TIMEOUT = 1000L;
+
+    @Test
+    public void testAddition() throws Throwable
+    {
+        try (Cluster cluster = init(Cluster.create(3)))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int PRIMARY KEY, v int)");
+
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, v) VALUES (1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1));
+            
+            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = v + 1 WHERE pk = 1 IF v = 2", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1));
+
+            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = v + 1 WHERE pk = 1 IF v = 1", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 2));
+        }
+    }
+
+    @Test
+    public void testAdditionNotExists() throws Throwable
+    {
+        try (Cluster cluster = init(Cluster.create(3)))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int PRIMARY KEY, a int, b text)");
+
+            // in this context partition/row not existing looks like column not existing, so to simplify the LWT required
+            // condition, add a row with null columns so can rely on IF EXISTS
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk) VALUES (1)", ConsistencyLevel.QUORUM);
+
+            // n = n + value where n = null
+            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET a = a + 1, b = b + 'fail' WHERE pk = 1 IF EXISTS", ConsistencyLevel.QUORUM);
+            // the SET should all no-op due to null... so should no-op
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, null, null));
+
+            // this section is testing current limitations... if they start to fail due to the limitations going away... update this test to include those cases
+            Assertions.assertThatThrownBy(() -> cluster.coordinator(1).execute(batch(
+                      "INSERT INTO " + KEYSPACE + ".tbl (pk, a, b) VALUES (1, 0, '') IF NOT EXISTS",
+                      "UPDATE " + KEYSPACE + ".tbl SET a = a + 1, b = b + 'success' WHERE pk = 1 IF EXISTS"
+                      ), ConsistencyLevel.QUORUM))
+                      .is(AssertionUtils.is(InvalidRequestException.class))
+                      .hasMessage("Cannot mix IF EXISTS and IF NOT EXISTS conditions for the same row");
+            Assertions.assertThatThrownBy(() -> cluster.coordinator(1).execute(batch(
+                      "INSERT INTO " + KEYSPACE + ".tbl (pk, a, b) VALUES (1, 0, '') IF NOT EXISTS",
+
+                      "UPDATE " + KEYSPACE + ".tbl SET a = a + 1, b = b + 'success' WHERE pk = 1"
+                      ), ConsistencyLevel.QUORUM))
+                      .is(AssertionUtils.is(InvalidRequestException.class))
+                      .hasMessage("Invalid operation (a = a + 1) for non counter column a");
+
+            // since CAS doesn't allow the above cases, manually add the data to unblock...
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, a, b) VALUES (1, 0, '')", ConsistencyLevel.QUORUM);
+
+            // have cas add defaults when missing
+            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET a = a + 1, b = b + 'success' WHERE pk = 1 IF EXISTS", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1, "success"));
+        }
+    }
+
+    @Test
+    public void testConcat() throws Throwable
+    {
+        try (Cluster cluster = init(Cluster.create(3)))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int PRIMARY KEY, v text)");
+
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, v) VALUES (1, 'foo') IF NOT EXISTS", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, "foo"));
+
+            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = v + 'bar' WHERE pk = 1 IF v = 'foobar'", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, "foo"));
+
+            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = v + 'bar' WHERE pk = 1 IF v = 'foo'", ConsistencyLevel.QUORUM);
+            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, "foobar"));
+        }
+    }
+
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CASCommonTestCases.java b/test/distributed/org/apache/cassandra/distributed/test/CASCommonTestCases.java
new file mode 100644
index 0000000..151abb7
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/CASCommonTestCases.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IMessageFilters;
+import org.apache.cassandra.exceptions.CasWriteTimeoutException;
+
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PROPOSE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_PROPOSE_REQ;
+
+public abstract class CASCommonTestCases extends CASTestBase
+{
+    protected abstract Cluster getCluster();
+
+    @Test
+    public void simpleUpdate() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        getCluster().schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        getCluster().coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+        assertRows(getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL),
+                   row(1, 1, 1));
+        getCluster().coordinator(1).execute("UPDATE " + fullTableName + " SET v = 3 WHERE pk = 1 and ck = 1 IF v = 2", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+        assertRows(getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL),
+                   row(1, 1, 1));
+        getCluster().coordinator(1).execute("UPDATE " + fullTableName + " SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+        assertRows(getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL),
+                   row(1, 1, 2));
+    }
+
+    @Test
+    public void incompletePrepare() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        getCluster().schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        IMessageFilters.Filter drop = getCluster().filters().verbs(PAXOS2_PREPARE_REQ.id, PAXOS_PREPARE_REQ.id).from(1).to(2, 3).drop();
+        try
+        {
+            getCluster().coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+            Assert.assertTrue(false);
+        }
+        catch (RuntimeException t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw new AssertionError(t);
+        }
+        drop.off();
+        getCluster().coordinator(1).execute("UPDATE " + fullTableName + " SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+        assertRows(getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL));
+    }
+
+    @Test
+    public void incompletePropose() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        getCluster().schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        IMessageFilters.Filter drop1 = getCluster().filters().verbs(PAXOS2_PROPOSE_REQ.id, PAXOS_PROPOSE_REQ.id).from(1).to(2, 3).drop();
+        try
+        {
+            getCluster().coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+            Assert.assertTrue(false);
+        }
+        catch (RuntimeException t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw new AssertionError(t);
+        }
+        drop1.off();
+        // make sure we encounter one of the in-progress proposals so we complete it
+        drop(getCluster(), 1, to(2), to(), to());
+        getCluster().coordinator(1).execute("UPDATE " + fullTableName + " SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+        assertRows(getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL),
+                   row(1, 1, 2));
+    }
+
+    @Test
+    public void incompleteCommit() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        getCluster().schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        try (AutoCloseable drop = drop(getCluster(), 1, to(), to(), to(2, 3)))
+        {
+            getCluster().coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+            Assert.assertTrue(false);
+        }
+        catch (RuntimeException t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw new AssertionError(t);
+        }
+
+        // make sure we see one of the successful commits
+        try (AutoCloseable drop = drop(getCluster(), 1, to(2), to(2), to()))
+        {
+            getCluster().coordinator(1).execute("UPDATE " + fullTableName + " SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM);
+            assertRows(getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL),
+                       row(1, 1, 2));
+        }
+    }
+
+    /**
+     *  - Prepare A to {1, 2, 3}
+     *  - Propose A to {1}
+     */
+    @Test
+    public void testRepairIncompletePropose() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        getCluster().schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        for (int repairWithout = 1 ; repairWithout <= 3 ; ++repairWithout)
+        {
+            try (AutoCloseable drop = drop(getCluster(), 1, to(), to(2, 3), to()))
+            {
+                getCluster().coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (?, 1, 1) IF NOT EXISTS", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM, repairWithout);
+                Assert.assertTrue(false);
+            }
+            catch (RuntimeException t)
+            {
+                if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                    throw new AssertionError(t);
+            }
+            int repairWith = repairWithout == 3 ? 2 : 3;
+            repair(getCluster(), tableName, repairWithout, repairWith, repairWithout);
+
+            try (AutoCloseable drop = drop(getCluster(), repairWith, to(repairWithout), to(), to()))
+            {
+                Object[][] rows = getCluster().coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = ?", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM, repairWithout);
+                if (repairWithout == 1) assertRows(rows); // invalidated
+                else assertRows(rows, row(repairWithout, 1, 1)); // finished
+            }
+        }
+    }
+
+    /**
+     *  - Prepare A to {1, 2, 3}
+     *  - Propose A to {1, 2}
+     *  -  Commit A to {1}
+     *  - Repair using {2, 3}
+     */
+    @Test
+    public void testRepairIncompleteCommit() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        getCluster().schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        for (int repairWithout = 1 ; repairWithout <= 3 ; ++repairWithout)
+        {
+            try (AutoCloseable drop = drop(getCluster(), 1, to(), to(3), to(2, 3)))
+            {
+                getCluster().coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (?, 1, 1) IF NOT EXISTS", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM, repairWithout);
+                Assert.assertTrue(false);
+            }
+            catch (RuntimeException t)
+            {
+                if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                    throw new AssertionError(t);
+            }
+
+            int repairWith = repairWithout == 3 ? 2 : 3;
+            repair(getCluster(), tableName, repairWithout, repairWith, repairWithout);
+            try (AutoCloseable drop = drop(getCluster(), repairWith, to(repairWithout), to(), to()))
+            {
+                //TODO dtest api is missing one with message?  booo... removed "" + repairWithout,
+                assertRows(getCluster().coordinator(repairWith).execute("SELECT * FROM " + fullTableName + " WHERE pk = ?", org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM, repairWithout),
+                           row(repairWithout, 1, 1));
+            }
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CASContentionTest.java b/test/distributed/org/apache/cassandra/distributed/test/CASContentionTest.java
new file mode 100644
index 0000000..aafbc45
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/CASContentionTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.service.paxos.ContentionStrategy;
+import org.apache.cassandra.utils.FBUtilities;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REQ;
+
+public class CASContentionTest extends CASTestBase
+{
+    private static Cluster THREE_NODES;
+
+    @BeforeClass
+    public static void beforeClass() throws Throwable
+    {
+        System.setProperty("cassandra.paxos.use_self_execution", "false");
+        TestBaseImpl.beforeClass();
+        Consumer<IInstanceConfig> conf = config -> config
+                .set("paxos_variant", "v2")
+                .set("write_request_timeout_in_ms", 20000L)
+                .set("cas_contention_timeout_in_ms", 20000L)
+                .set("request_timeout_in_ms", 20000L);
+        THREE_NODES = init(Cluster.create(3, conf));
+    }
+
+    @AfterClass
+    public static void afterClass()
+    {
+        if (THREE_NODES != null)
+            THREE_NODES.close();
+    }
+
+    @Test
+    public void testDynamicContentionTracing() throws Throwable
+    {
+        try
+        {
+
+            String tableName = tableName("tbl");
+            THREE_NODES.schemaChange("CREATE TABLE " + KEYSPACE + '.' + tableName + " (pk int, v int, PRIMARY KEY (pk))");
+
+            CountDownLatch haveStarted = new CountDownLatch(1);
+            CountDownLatch haveInvalidated = new CountDownLatch(1);
+            THREE_NODES.verbs(PAXOS2_PREPARE_REQ).from(1).messagesMatching((from, to, verb) -> {
+                haveStarted.countDown();
+                Uninterruptibles.awaitUninterruptibly(haveInvalidated);
+                return false;
+            }).drop();
+            THREE_NODES.get(1).runOnInstance(() -> ContentionStrategy.setStrategy("trace=1"));
+            Future<?> insert = THREE_NODES.get(1).async(() -> {
+                THREE_NODES.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + tableName + " (pk, v) VALUES (1, 1) IF NOT EXISTS", QUORUM);
+            }).call();
+            haveStarted.await();
+            THREE_NODES.coordinator(2).execute("INSERT INTO " + KEYSPACE + '.' + tableName + " (pk, v) VALUES (1, 1) IF NOT EXISTS", QUORUM);
+            haveInvalidated.countDown();
+            THREE_NODES.filters().reset();
+            insert.get();
+            Uninterruptibles.sleepUninterruptibly(1L, TimeUnit.SECONDS);
+            THREE_NODES.forEach(i -> i.runOnInstance(() -> FBUtilities.waitOnFuture(Stage.TRACING.submit(() -> {}))));
+            Object[][] result = THREE_NODES.coordinator(1).execute("SELECT parameters FROM system_traces.sessions", QUORUM);
+            Assert.assertEquals(1, result.length);
+            Assert.assertEquals(1, result[0].length);
+            Assert.assertTrue(Map.class.isAssignableFrom(result[0][0].getClass()));
+            Map<?, ?> params = (Map<?, ?>) result[0][0];
+            Assert.assertEquals("SERIAL", params.get("consistency"));
+            Assert.assertEquals(tableName, params.get("table"));
+            Assert.assertEquals(KEYSPACE, params.get("keyspace"));
+            Assert.assertEquals("1", params.get("partitionKey"));
+            Assert.assertEquals("write", params.get("kind"));
+        }
+        finally
+        {
+            THREE_NODES.filters().reset();
+        }
+    }
+
+
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CASMultiDCTest.java b/test/distributed/org/apache/cassandra/distributed/test/CASMultiDCTest.java
new file mode 100644
index 0000000..e36f34b
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/CASMultiDCTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.service.paxos.PaxosCommit;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.*;
+
+public class CASMultiDCTest
+{
+    private static Cluster CLUSTER;
+    private static final String KEYSPACE = "ks";
+    private static final String TABLE = "tbl";
+    private static final String KS_TBL = KEYSPACE + '.' + TABLE;
+
+    private static final AtomicInteger nextKey = new AtomicInteger();
+
+    @BeforeClass
+    public static void beforeClass() throws Throwable
+    {
+        TestBaseImpl.beforeClass();
+        Consumer<IInstanceConfig> conf = config -> config
+                                                   .with(Feature.NETWORK)
+                                                   .set("paxos_variant", "v2")
+                                                   .set("write_request_timeout_in_ms", 1000L)
+                                                   .set("cas_contention_timeout_in_ms", 1000L)
+                                                   .set("request_timeout_in_ms", 1000L);
+
+        Cluster.Builder builder = new Cluster.Builder();
+        builder.withNodes(4);
+        builder.withDCs(2);
+        builder.withConfig(conf);
+        CLUSTER = builder.start();
+        CLUSTER.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':2};");
+        CLUSTER.schemaChange("CREATE TABLE " + KS_TBL + " (k int, c int, v int, primary key (k, v))");
+        CLUSTER.forEach(i -> i.runOnInstance(() -> {
+            Assert.assertTrue(PaxosCommit.getEnableDcLocalCommit()); // should be enabled by default
+        }));
+    }
+
+    @Before
+    public void setUp()
+    {
+        CLUSTER.forEach(i -> i.runOnInstance(() -> {
+            PaxosCommit.setEnableDcLocalCommit(true);
+        }));
+    }
+
+    private static void testLocalSerialCommit(ConsistencyLevel serialCL, ConsistencyLevel commitCL, int key, boolean expectRemoteCommit)
+    {
+        for (int i=0; i<CLUSTER.size(); i++)
+        {
+            CLUSTER.get(i + 1).runOnInstance(() -> {
+                UntypedResultSet result = QueryProcessor.executeInternal("SELECT * FROM system.paxos WHERE row_key=?", ByteBufferUtil.bytes(key));
+                Assert.assertEquals(0, result.size());
+            });
+        }
+
+        CLUSTER.coordinator(1).execute("INSERT INTO " + KS_TBL + " (k, c, v) VALUES (?, ?, ?) IF NOT EXISTS", serialCL, commitCL, key, key, key);
+
+        int numCommitted = 0;
+        int numWritten = 0;
+        for (int i=0; i<CLUSTER.size(); i++)
+        {
+            boolean expectPaxosRows = expectRemoteCommit || i < 2;
+            int flags = CLUSTER.get(i + 1).callOnInstance(() -> {
+                int numPaxosRows = QueryProcessor.executeInternal("SELECT * FROM system.paxos WHERE row_key=?", ByteBufferUtil.bytes(key)).size();
+                Assert.assertTrue(numPaxosRows == 0 || numPaxosRows == 1);
+                if (!expectRemoteCommit)
+                    Assert.assertEquals(expectPaxosRows ? 1 : 0, numPaxosRows);
+                int numTableRows = QueryProcessor.executeInternal("SELECT * FROM " + KS_TBL + " WHERE k=?", ByteBufferUtil.bytes(key)).size();
+                Assert.assertTrue(numTableRows == 0 || numTableRows == 1);
+                return (numPaxosRows > 0 ? 1 : 0) | (numTableRows > 0 ? 2 : 0);
+            });
+            if ((flags & 1) != 0)
+                numCommitted++;
+            if ((flags & 2) != 0)
+                numWritten++;
+        }
+        Assert.assertTrue(String.format("numWritten: %s < 3", numWritten), numWritten >= 3);
+        if (expectRemoteCommit)
+            Assert.assertTrue(String.format("numCommitted: %s < 3", numCommitted), numCommitted >= 3);
+        else
+            Assert.assertEquals(2, numCommitted);
+    }
+
+    @Test
+    public void testLocalSerialLocalCommit()
+    {
+        testLocalSerialCommit(LOCAL_SERIAL, LOCAL_QUORUM, nextKey.getAndIncrement(), false);
+    }
+
+    @Test
+    public void testLocalSerialQuorumCommit()
+    {
+        testLocalSerialCommit(LOCAL_SERIAL, QUORUM, nextKey.getAndIncrement(), false);
+    }
+
+    @Test
+    public void testSerialLocalCommit()
+    {
+        testLocalSerialCommit(SERIAL, LOCAL_QUORUM, nextKey.getAndIncrement(), true);
+    }
+
+    @Test
+    public void testSerialQuorumCommit()
+    {
+        testLocalSerialCommit(SERIAL, QUORUM, nextKey.getAndIncrement(), true);
+    }
+
+    @Test
+    public void testDcLocalCommitDisabled()
+    {
+        CLUSTER.forEach(i -> i.runOnInstance(() -> {
+            PaxosCommit.setEnableDcLocalCommit(false);
+        }));
+        testLocalSerialCommit(LOCAL_SERIAL, QUORUM, nextKey.getAndIncrement(), true);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CASTest.java b/test/distributed/org/apache/cassandra/distributed/test/CASTest.java
index 91d26f8..8e2b8ac 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/CASTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/CASTest.java
@@ -19,261 +19,180 @@
 package org.apache.cassandra.distributed.test;
 
 import java.io.IOException;
-import java.util.UUID;
 import java.util.function.BiConsumer;
+import java.util.function.Consumer;
 
 
+import org.junit.AfterClass;
 import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.dht.Murmur3Partitioner;
-import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.ICoordinator;
-import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
 import org.apache.cassandra.distributed.api.IMessageFilters;
-import org.apache.cassandra.distributed.impl.Instance;
-import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.exceptions.CasWriteTimeoutException;
 
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ANY;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ONE;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.LOCAL_QUORUM;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
-import static org.apache.cassandra.distributed.shared.AssertUtils.fail;
 import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+import static org.apache.cassandra.net.Verb.PAXOS2_COMMIT_AND_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REFRESH_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PROPOSE_REQ;
 import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_REQ;
 import static org.apache.cassandra.net.Verb.PAXOS_PREPARE_REQ;
 import static org.apache.cassandra.net.Verb.PAXOS_PROPOSE_REQ;
 import static org.apache.cassandra.net.Verb.READ_REQ;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-public class CASTest extends TestBaseImpl
+public class CASTest extends CASCommonTestCases
 {
-    private static final Logger logger = LoggerFactory.getLogger(CASTest.class);
-
     /**
-     * The {@code cas_contention_timeout_in_ms} used during the tests
+     * The {@code cas_contention_timeout} used during the tests
      */
-    private static final long CONTENTION_TIMEOUT = 1000L;
+    private static final String CONTENTION_TIMEOUT = "1000ms";
 
     /**
-     * The {@code write_request_timeout_in_ms} used during the tests
+     * The {@code write_request_timeout} used during the tests
      */
-    private static final long REQUEST_TIMEOUT = 1000L;
+    private static final String REQUEST_TIMEOUT = "1000ms";
 
-    @Test
-    public void simpleUpdate() throws Throwable
+    private static Cluster THREE_NODES;
+    private static Cluster FOUR_NODES;
+
+    @BeforeClass
+    public static void beforeClass() throws Throwable
     {
-        try (Cluster cluster = init(Cluster.create(3)))
-        {
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
-
-            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
-            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL),
-                    row(1, 1, 1));
-            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 3 WHERE pk = 1 and ck = 1 IF v = 2", ConsistencyLevel.QUORUM);
-            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL),
-                    row(1, 1, 1));
-            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
-            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL),
-                    row(1, 1, 2));
-        }
+        System.setProperty("cassandra.paxos.use_self_execution", "false");
+        TestBaseImpl.beforeClass();
+        Consumer<IInstanceConfig> conf = config -> config
+                                                   .set("paxos_variant", "v2")
+                                                   .set("write_request_timeout", REQUEST_TIMEOUT)
+                                                   .set("cas_contention_timeout", CONTENTION_TIMEOUT)
+                                                   .set("request_timeout", REQUEST_TIMEOUT);
+        // TODO: fails with vnode enabled
+        THREE_NODES = init(Cluster.build(3).withConfig(conf).withoutVNodes().start());
+        FOUR_NODES = init(Cluster.build(4).withConfig(conf).withoutVNodes().start(), 3);
     }
 
-    @Test
-    public void incompletePrepare() throws Throwable
+    @AfterClass
+    public static void afterClass()
     {
-        try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                                                                      .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT))))
-        {
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
-
-            IMessageFilters.Filter drop = cluster.filters().verbs(PAXOS_PREPARE_REQ.id).from(1).to(2, 3).drop();
-            try
-            {
-                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
-                Assert.fail();
-            }
-            catch (RuntimeException e)
-            {
-                Assert.assertEquals("CAS operation timed out - encountered contentions: 0", e.getMessage());
-            }
-            drop.off();
-            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
-            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL));
-        }
+        if (THREE_NODES != null)
+            THREE_NODES.close();
+        if (FOUR_NODES != null)
+            FOUR_NODES.close();
     }
 
-    @Test
-    public void incompletePropose() throws Throwable
+    @Before
+    public void before()
     {
-        try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                                                                      .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT))))
+        THREE_NODES.filters().reset();
+        FOUR_NODES.filters().reset();
+        // tests add/remove nodes from the ring, so attempt to add them back
+        for (int i = 1 ; i <= 4 ; ++i)
         {
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
-
-            IMessageFilters.Filter drop1 = cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(2, 3).drop();
-            try
+            for (int j = 1; j <= 4; j++)
             {
-                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
-                Assert.fail();
+                FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingNormal).accept(FOUR_NODES.get(j));
             }
-            catch (RuntimeException e)
-            {
-                Assert.assertEquals("CAS operation timed out - encountered contentions: 0", e.getMessage());
-            }
-            drop1.off();
-            // make sure we encounter one of the in-progress proposals so we complete it
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id).from(1).to(2).drop();
-            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
-            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL),
-                    row(1, 1, 2));
         }
     }
 
-    @Test
-    public void incompleteCommit() throws Throwable
-    {
-        try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                                                                      .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT))))
-        {
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
-
-            IMessageFilters.Filter drop1 = cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
-            try
-            {
-                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
-                Assert.fail();
-            }
-            catch (RuntimeException e)
-            {
-                Assert.assertEquals("CAS operation timed out - encountered contentions: 0", e.getMessage());
-            }
-            drop1.off();
-            // make sure we see one of the successful commits
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(2).drop();
-            cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
-            assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL),
-                    row(1, 1, 2));
-        }
-    }
-
-    private int[] paxosAndReadVerbs() {
-        return new int[] { PAXOS_PREPARE_REQ.id, PAXOS_PROPOSE_REQ.id, PAXOS_COMMIT_REQ.id, READ_REQ.id };
-    }
-
     /**
-     * Base test to ensure that if a write times out but with a proposal accepted by some nodes (less then quorum), and
-     * a following SERIAL operation does not observe that write (the node having accepted it do not participate in that
-     * following operation), then that write is never applied, even when the nodes having accepted the original proposal
-     * participate.
+     * A write and a read that are able to witness different (i.e. non-linearizable) histories
+     * See CASSANDRA-12126
      *
-     * <p>In other words, if an operation timeout, it may or may not be applied, but that "fate" is persistently decided
-     * by the very SERIAL operation that "succeed" (in the sense of 'not timing out or throwing some other exception').
-     *
-     * @param postTimeoutOperation1 a SERIAL operation executed after an initial write that inserts the row [0, 0] times
-     *                              out. It is executed with a QUORUM of nodes that have _not_ see the timed out
-     *                              proposal, and so that operation should expect that the [0, 0] write has not taken
-     *                              place.
-     * @param postTimeoutOperation2 a 2nd SERIAL operation executed _after_ {@code postTimeoutOperation1}, with no
-     *                              write executed between the 2 operation. Contrarily to the 1st operation, the QORUM
-     *                              for this operation _will_ include the node that got the proposal for the [0, 0]
-     *                              insert but didn't participated to {@code postTimeoutOperation1}}. That operation
-     *                              should also no witness that [0, 0] write (since {@code postTimeoutOperation1}
-     *                              didn't).
-     * @param loseCommitOfOperation1 if {@code true}, the test will also drop the "commits" messages for
-     *                               {@code postTimeoutOperation1}. In general, the test should behave the same with or
-     *                               without that flag since a value is decided as soon as it has been "accepted by
-     *                               quorum" and the commits should always be properly replayed.
+     *  - A Promised by {1, 2, 3}
+     *  - A Acccepted by {1}
+     *  - B (=>!A) Promised and Proposed to {2, 3}
+     *  - Read from (or attempt C (=>!B)) to {1, 2} -> witness either A or B, not both
      */
-    private void consistencyAfterWriteTimeoutTest(BiConsumer<String, ICoordinator> postTimeoutOperation1,
-                                                  BiConsumer<String, ICoordinator> postTimeoutOperation2,
-                                                  boolean loseCommitOfOperation1) throws IOException
+    @Test
+    public void testIncompleteWriteSupersededByConflictingRejectedCondition() throws Throwable
     {
-        // It's unclear why (haven't dug), but in some of the instance of this test method, there is a consistent 2+
-        // seconds pauses between the prepare and propose phases during the execution of 'postTimeoutOperation2'. This
-        // does not happen on 3.0 and there is no report of such long pauses otherwise, so an hypothesis is that this
-        // is due to the in-jvm dtest framework. This is is why we use a 4 seconds timeout here. Given this test is
-        // not about performance, this is probably ok, even if we ideally should dug into the underlying reason.
-        try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout_in_ms", 4000L)
-                                                                      .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT))))
+        String tableName = tableName("tbl");
+        THREE_NODES.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        IMessageFilters.Filter drop1 = THREE_NODES.filters().verbs(PAXOS2_PROPOSE_REQ.id, PAXOS_PROPOSE_REQ.id).from(1).to(2, 3).drop();
+        try
         {
-            String table = KEYSPACE + ".t";
-            cluster.schemaChange("CREATE TABLE " + table + " (k int PRIMARY KEY, v int)");
-
-            // We do a CAS insertion, but have with the PROPOSE message dropped on node 1 and 2. The CAS will not get
-            // through and should timeout. Importantly, node 3 does receive and answer the PROPOSE.
-            IMessageFilters.Filter dropProposeFilter = cluster.filters()
-                                                              .inbound()
-                                                              .verbs(PAXOS_PROPOSE_REQ.id)
-                                                              .from(3)
-                                                              .to(1, 2)
-                                                              .drop();
-            try
-            {
-                // NOTE: the consistency below is the "commit" one, so it doesn't matter at all here.
-                // NOTE 2: we use node 3 as coordinator because message filters don't currently work for locally
-                //   delivered messages and as we want to drop messages to 1 and 2, we can't use them.
-                cluster.coordinator(3)
-                       .execute("INSERT INTO " + table + "(k, v) VALUES (0, 0) IF NOT EXISTS", ConsistencyLevel.ONE);
-                fail("The insertion should have timed-out");
-            }
-            catch (Exception e)
-            {
-                // We expect a write timeout. If we get one, the test can continue, otherwise, we rethrow. Note that we
-                // look at the root cause because the dtest framework effectively wrap the exception in a RuntimeException
-                // (we could just look at the immediate cause, but this feel a bit more resilient this way).
-                // TODO: we can't use an instanceof below because the WriteTimeoutException we get is from a different class
-                //  loader than the one the test run under, and that's our poor-man work-around. This kind of things should
-                //  be improved at the dtest API level.
-                if (!e.getClass().getSimpleName().equals("CasWriteTimeoutException"))
-                    throw e;
-            }
-            finally
-            {
-                dropProposeFilter.off();
-            }
-
-            // Isolates node 3 and executes the SERIAL operation. As neither node 1 or 2 got the initial insert proposal,
-            // there is nothing to "replay" and the operation should assert the table is still empty.
-            IMessageFilters.Filter ignoreNode3Filter = cluster.filters().verbs(paxosAndReadVerbs()).to(3).drop();
-            IMessageFilters.Filter dropCommitFilter = null;
-            if (loseCommitOfOperation1)
-            {
-                dropCommitFilter = cluster.filters().verbs(PAXOS_COMMIT_REQ.id).to(1, 2).drop();
-            }
-            try
-            {
-                postTimeoutOperation1.accept(table, cluster.coordinator(1));
-            }
-            finally
-            {
-                ignoreNode3Filter.off();
-                if (dropCommitFilter != null)
-                    dropCommitFilter.off();
-            }
-
-            // Node 3 is now back and we isolate node 2 to ensure the next read hits node 1 and 3.
-            // What we want to ensure is that despite node 3 having the initial insert in its paxos state in a position of
-            // being replayed, that insert is _not_ replayed (it would contradict serializability since the previous
-            // operation asserted nothing was inserted). It is this execution that failed before CASSANDRA-12126.
-            IMessageFilters.Filter ignoreNode2Filter = cluster.filters().verbs(paxosAndReadVerbs()).to(2).drop();
-            try
-            {
-                postTimeoutOperation2.accept(table, cluster.coordinator(1));
-            }
-            finally
-            {
-                ignoreNode2Filter.off();
-            }
+            THREE_NODES.coordinator(1).execute("INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", QUORUM);
+            fail();
         }
+        catch (Throwable t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw t;
+        }
+        drop(THREE_NODES, 2, to(1), to(1), to());
+        assertRows(THREE_NODES.coordinator(2).execute("UPDATE " + KEYSPACE + "." + tableName + " SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", QUORUM),
+                   row(false));
+        drop1.off();
+        drop(THREE_NODES, 1, to(2), to(), to());
+        assertRows(THREE_NODES.coordinator(1).execute("SELECT * FROM " + KEYSPACE + "." + tableName + " WHERE pk = 1", SERIAL));
+    }
+
+    /**
+     * Two reads that are able to witness different (i.e. non-linearizable) histories
+     *  - A Promised by {1, 2, 3}
+     *  - A Accepted by {1}
+     *  - Read from {2, 3} -> do not witness A?
+     *  - Read from {1, 2} -> witnesses A?
+     * See CASSANDRA-12126
+     */
+    @Ignore
+    @Test
+    public void testIncompleteWriteSupersededByRead() throws Throwable
+    {
+        String tableName = tableName();
+        String fullTableName = KEYSPACE + "." + tableName;
+        THREE_NODES.schemaChange("CREATE TABLE " + fullTableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        IMessageFilters.Filter drop1 = THREE_NODES.filters().verbs(PAXOS2_PROPOSE_REQ.id, PAXOS_PROPOSE_REQ.id).from(1).to(2, 3).drop();
+        try
+        {
+            THREE_NODES.coordinator(1).execute("INSERT INTO " + fullTableName + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", QUORUM);
+            fail();
+        }
+        catch (Throwable t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw t;
+        }
+        drop(THREE_NODES, 2, to(1), to(), to());
+        assertRows(THREE_NODES.coordinator(2).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", SERIAL));
+        drop1.off();
+
+        drop(THREE_NODES, 1, to(2), to(), to());
+        assertRows(THREE_NODES.coordinator(1).execute("SELECT * FROM " + fullTableName + " WHERE pk = 1", SERIAL));
+    }
+
+    private static int[] paxosAndReadVerbs()
+    {
+        return new int[] {
+            PAXOS_PREPARE_REQ.id,
+            PAXOS2_PREPARE_REQ.id,
+            PAXOS2_PREPARE_REFRESH_REQ.id,
+            PAXOS2_COMMIT_AND_PREPARE_REQ.id,
+            PAXOS_PROPOSE_REQ.id,
+            PAXOS2_PROPOSE_REQ.id,
+            PAXOS_COMMIT_REQ.id,
+            READ_REQ.id
+        };
     }
 
     /**
@@ -286,11 +205,73 @@
     public void readConsistencyAfterWriteTimeoutTest() throws IOException
     {
         BiConsumer<String, ICoordinator> operation =
-            (table, coordinator) -> assertRows(coordinator.execute("SELECT * FROM " + table + " WHERE k=0",
-                                                                   ConsistencyLevel.SERIAL));
+                (table, coordinator) -> assertRows(coordinator.execute("SELECT * FROM " + table + " WHERE k=0",
+                        SERIAL));
 
-        consistencyAfterWriteTimeoutTest(operation, operation, false);
-        consistencyAfterWriteTimeoutTest(operation, operation, true);
+        consistencyAfterWriteTimeoutTest(operation, operation, false, THREE_NODES);
+        consistencyAfterWriteTimeoutTest(operation, operation, true, THREE_NODES);
+    }
+
+    /**
+     * Tests that a sequence of reads exploit the fast read optimisation, as does a fail write, but that
+     * a read after a failed write that does not propose successfully does not
+     */
+    @Test
+    public void fastReadsAndFailedWrites() throws IOException
+    {
+        String tableName = tableName("t");
+        String table = KEYSPACE + "." + tableName;
+        THREE_NODES.schemaChange("CREATE TABLE " + table + " (k int PRIMARY KEY, v int)");
+
+        // We do a CAS insertion, but have with the PROPOSE message dropped on node 1 and 2. The CAS will not get
+        // through and should timeout. Importantly, node 3 does receive and answer the PROPOSE.
+        IMessageFilters.Filter dropProposeFilter = THREE_NODES.filters()
+                                                              .verbs(PAXOS_PROPOSE_REQ.id, PAXOS2_PROPOSE_REQ.id,
+                                                                     PAXOS_COMMIT_REQ.id, PAXOS2_COMMIT_AND_PREPARE_REQ.id)
+                                                              .to(1, 2)
+                                                              .drop();
+
+        try
+        {
+            // shouldn't timeout
+            THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+            THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+            THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+            THREE_NODES.coordinator(1).execute("UPDATE " + table + " SET v = 1 WHERE k = 1 IF EXISTS", ANY);
+            try
+            {
+                THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+                Assert.fail();
+            }
+            catch (AssertionError propagate)
+            {
+                throw propagate;
+            }
+            catch (Throwable maybeIgnore)
+            {
+                if (!maybeIgnore.getClass().getSimpleName().equals("ReadTimeoutException"))
+                    throw maybeIgnore;
+            }
+        }
+        finally
+        {
+            dropProposeFilter.off();
+        }
+
+        THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+
+        try
+        {
+            dropProposeFilter.on();
+            // shouldn't timeout
+            THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+            THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+            THREE_NODES.coordinator(1).execute("SELECT * FROM " + table + " WHERE k = 1", SERIAL);
+        }
+        finally
+        {
+            dropProposeFilter.off();
+        }
     }
 
     /**
@@ -306,10 +287,10 @@
         // Note: we use CL.ANY so that the operation don't timeout in the case where we "lost" the operation1 commits.
         // The commit CL shouldn't have impact on this test anyway, so this doesn't diminishes the test.
         BiConsumer<String, ICoordinator> operation =
-            (table, coordinator) -> assertCasNotApplied(coordinator.execute("UPDATE " + table + " SET v = 1 WHERE k = 0 IF v = 0",
-                                                                            ConsistencyLevel.ANY));
-        consistencyAfterWriteTimeoutTest(operation, operation, false);
-        consistencyAfterWriteTimeoutTest(operation, operation, true);
+                (table, coordinator) -> assertCasNotApplied(coordinator.execute("UPDATE " + table + " SET v = 1 WHERE k = 0 IF v = 0",
+                        ANY));
+        consistencyAfterWriteTimeoutTest(operation, operation, false, THREE_NODES);
+        consistencyAfterWriteTimeoutTest(operation, operation, true, THREE_NODES);
     }
 
     /**
@@ -322,13 +303,13 @@
     public void mixedReadAndNonApplyingCasConsistencyAfterWriteTimeout() throws IOException
     {
         BiConsumer<String, ICoordinator> operation1 =
-            (table, coordinator) -> assertRows(coordinator.execute("SELECT * FROM " + table + " WHERE k=0",
-                                                                   ConsistencyLevel.SERIAL));
+                (table, coordinator) -> assertRows(coordinator.execute("SELECT * FROM " + table + " WHERE k=0",
+                        SERIAL));
         BiConsumer<String, ICoordinator> operation2 =
-            (table, coordinator) -> assertCasNotApplied(coordinator.execute("UPDATE " + table + " SET v = 1 WHERE k = 0 IF v = 0",
-                                                                            ConsistencyLevel.QUORUM));
-        consistencyAfterWriteTimeoutTest(operation1, operation2, false);
-        consistencyAfterWriteTimeoutTest(operation1, operation2, true);
+                (table, coordinator) -> assertCasNotApplied(coordinator.execute("UPDATE " + table + " SET v = 1 WHERE k = 0 IF v = 0",
+                        QUORUM));
+        consistencyAfterWriteTimeoutTest(operation1, operation2, false, THREE_NODES);
+        consistencyAfterWriteTimeoutTest(operation1, operation2, true, THREE_NODES);
     }
 
     /**
@@ -344,104 +325,116 @@
         // Note: we use CL.ANY so that the operation don't timeout in the case where we "lost" the operation1 commits.
         // The commit CL shouldn't have impact on this test anyway, so this doesn't diminishes the test.
         BiConsumer<String, ICoordinator> operation1 =
-            (table, coordinator) -> assertCasNotApplied(coordinator.execute("UPDATE " + table + " SET v = 1 WHERE k = 0 IF v = 0",
-                                                                            ConsistencyLevel.ANY));
+                (table, coordinator) -> assertCasNotApplied(coordinator.execute("UPDATE " + table + " SET v = 1 WHERE k = 0 IF v = 0",
+                        ANY));
         BiConsumer<String, ICoordinator> operation2 =
-            (table, coordinator) -> assertRows(coordinator.execute("SELECT * FROM " + table + " WHERE k=0",
-                                                                   ConsistencyLevel.SERIAL));
-        consistencyAfterWriteTimeoutTest(operation1, operation2, false);
-        consistencyAfterWriteTimeoutTest(operation1, operation2, true);
+                (table, coordinator) -> assertRows(coordinator.execute("SELECT * FROM " + table + " WHERE k=0",
+                        SERIAL));
+        consistencyAfterWriteTimeoutTest(operation1, operation2, false, THREE_NODES);
+        consistencyAfterWriteTimeoutTest(operation1, operation2, true, THREE_NODES);
     }
 
     // TODO: this shoud probably be moved into the dtest API.
     private void assertCasNotApplied(Object[][] resultSet)
     {
         assertFalse("Expected a CAS resultSet (with at least application result) but got an empty one.",
-                    resultSet.length == 0);
+                resultSet.length == 0);
         assertFalse("Invalid empty first row in CAS resultSet.", resultSet[0].length == 0);
         Object wasApplied = resultSet[0][0];
         assertTrue("Expected 1st column of CAS resultSet to be a boolean, but got a " + wasApplied.getClass(),
-                   wasApplied instanceof Boolean);
+                wasApplied instanceof Boolean);
         assertFalse("Expected CAS to not be applied, but was applied.", (Boolean)wasApplied);
     }
 
-    /**
-     * Failed write (by node that did not yet witness a range movement via gossip) is witnessed later as successful
-     * conflicting with another successful write performed by a node that did witness the range movement
-     * Prepare, Propose and Commit A to {1, 2}
-     * Range moves to {2, 3, 4}
-     * Prepare and Propose B (=> !A) to {3, 4}
-     */
-    @Ignore
+
+    private static Object[][] executeWithRetry(int attempts, ICoordinator coordinator, String query, ConsistencyLevel consistencyLevel, Object... boundValues)
+    {
+        while (--attempts > 0)
+        {
+            try
+            {
+                return coordinator.execute(query, consistencyLevel, boundValues);
+            }
+            catch (Throwable t)
+            {
+                if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                    throw t;
+                FBUtilities.sleepQuietly(100);
+            }
+        }
+        return coordinator.execute(query, consistencyLevel, boundValues);
+    }
+
+    private static Object[][] executeWithRetry(ICoordinator coordinator, String query, ConsistencyLevel consistencyLevel, Object... boundValues)
+    {
+        return executeWithRetry(2, coordinator, query, consistencyLevel, boundValues);
+    }
+
+    // failed write (by node that did not yet witness a range movement via gossip) is witnessed later as successful
+    // conflicting with another successful write performed by a node that did witness the range movement
+    // A Promised, Accepted and Committed by {1, 2}
+    // Range moves to {2, 3, 4}
+    // B (=> !A) Promised and Proposed to {3, 4}
     @Test
     public void testSuccessfulWriteBeforeRangeMovement() throws Throwable
     {
-        try (Cluster cluster = Cluster.create(4, config -> config
-                .set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT)))
+        String tableName = tableName("tbl");
+        FOUR_NODES.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+
+        // make it so {1} is unaware (yet) that {4} is an owner of the token
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::removeFromRing).accept(FOUR_NODES.get(4));
+
+        int pk = pk(FOUR_NODES, 1, 2);
+
+        // {1} promises and accepts on !{3} => {1, 2}; commits on !{2,3} => {1}
+        drop(FOUR_NODES, 1, to(3), to(3), to(2, 3));
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        assertRows(executeWithRetry(FOUR_NODES.coordinator(1), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ONE, pk),
+                   row(true));
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+
+        for (int i = 1; i <= 3; ++i)
         {
-            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
-
-            // make it so {1} is unaware (yet) that {4} is an owner of the token
-            cluster.get(1).acceptsOnInstance(Instance::removeFromRing).accept(cluster.get(4));
-
-            int pk = pk(cluster, 1, 2);
-
-            // {1} promises and accepts on !{3} => {1, 2}; commits on !{2,3} => {1}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
-            assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(true));
-
-            for (int i = 1 ; i <= 3 ; ++i)
-                cluster.get(i).acceptsOnInstance(Instance::addToRingNormal).accept(cluster.get(4));
-
-            // {4} reads from !{2} => {3, 4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(4).to(2).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(4).to(2).drop();
-            assertRows(cluster.coordinator(4).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(false, pk, 1, 1, null));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingNormal).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
         }
+
+        // {4} reads from !{2} => {3, 4}
+        drop(FOUR_NODES, 4, to(2), to(2), to());
+        assertRows(executeWithRetry(FOUR_NODES.coordinator(4), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ONE, pk),
+                   row(false, pk, 1, 1, null));
     }
 
     /**
      * Failed write (by node that did not yet witness a range movement via gossip) is witnessed later as successful
      * conflicting with another successful write performed by a node that did witness the range movement
      *  - Range moves from {1, 2, 3} to {2, 3, 4}, witnessed by X (not by !X)
-     *  -  X: Prepare, Propose and Commit A to {3, 4}
-     *  - !X: Prepare and Propose B (=>!A) to {1, 2}
+     *  -  X: A Promised, Accepted and Committed by {3, 4}
+     *  - !X: B (=>!A) Promised and Proposed to {1, 2}
      */
-    @Ignore
     @Test
     public void testConflictingWritesWithStaleRingInformation() throws Throwable
     {
-        try (Cluster cluster = Cluster.create(4, config -> config
-                .set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT)))
-        {
-            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+        String tableName = tableName("tbl");
+        FOUR_NODES.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
 
-            // make it so {1} is unaware (yet) that {4} is an owner of the token
-            cluster.get(1).acceptsOnInstance(Instance::removeFromRing).accept(cluster.get(4));
+        // make it so {1} is unaware (yet) that {4} is an owner of the token
+        FOUR_NODES.get(1).acceptOnInstance(CASTestBase::removeFromRing, FOUR_NODES.get(4));
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
 
-            // {4} promises, accepts and commits on !{2} => {3, 4}
-            int pk = pk(cluster, 1, 2);
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(4).to(2).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(4).to(2).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(4).to(2).drop();
-            assertRows(cluster.coordinator(4).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(true));
+        // {4} promises, accepts and commits on !{2} => {3, 4}
+        int pk = pk(FOUR_NODES, 1, 2);
+        drop(FOUR_NODES, 4, to(2), to(2), to(2));
+        assertRows(executeWithRetry(FOUR_NODES.coordinator(4), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ONE, pk),
+                   row(true));
 
-            // {1} promises, accepts and commmits on !{3} => {1, 2}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(3).drop();
-            assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(false, pk, 1, 1, null));
-        }
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        // {1} promises, accepts and commmits on !{3} => {1, 2}
+        drop(FOUR_NODES, 1, to(3), to(3), to(3));
+        assertRows(executeWithRetry(FOUR_NODES.coordinator(1), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ONE, pk),
+                   row(false, pk, 1, 1, null));
+
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
     }
 
     /**
@@ -449,90 +442,91 @@
      * Very similar to {@link #testConflictingWritesWithStaleRingInformation}.
      *
      *  - Range moves from {1, 2, 3} to {2, 3, 4}; witnessed by X (not by !X)
-     *  -  !X: Prepare and Propose to {1, 2}
+     *  -  !X: Promised and Accepted by {1, 2}
      *  - Range movement witnessed by !X
-     *  - Any: Prepare and Read from {3, 4}
+     *  - Any: Promised and Read from {3, 4}
      */
-    @Ignore
     @Test
     public void testSucccessfulWriteDuringRangeMovementFollowedByRead() throws Throwable
     {
-        try (Cluster cluster = Cluster.create(4, config -> config
-                .set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT)))
+        String tableName = tableName("tbl");
+        FOUR_NODES.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+        // make it so {4} is bootstrapping, and this has propagated to only a quorum of other nodes
+        for (int i = 1 ; i <= 4 ; ++i)
         {
-            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
-
-            // make it so {4} is bootstrapping, and this has not propagated to other nodes yet
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(1).acceptsOnInstance(Instance::removeFromRing).accept(cluster.get(4));
-            cluster.get(4).acceptsOnInstance(Instance::addToRingBootstrapping).accept(cluster.get(4));
-
-            int pk = pk(cluster, 1, 2);
-
-            // {1} promises and accepts on !{3} => {1, 2}; commmits on !{2, 3} => {1}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
-            assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(true));
-
-            // finish topology change
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(i).acceptsOnInstance(Instance::addToRingNormal).accept(cluster.get(4));
-
-            // {3} reads from !{2} => {3, 4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(3).to(2).drop();
-            assertRows(cluster.coordinator(3).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = ?", ConsistencyLevel.SERIAL, pk),
-                    row(pk, 1, 1));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::removeFromRing).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
         }
+        for (int i = 2 ; i <= 4 ; ++i)
+        {
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingBootstrapping).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+        }
+
+        int pk = pk(FOUR_NODES, 1, 2);
+
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        // {1} promises and accepts on !{3} => {1, 2}; commmits on !{2, 3} => {1}
+        drop(FOUR_NODES, 1, to(3), to(3), to(2, 3));
+        assertRows(executeWithRetry(FOUR_NODES.coordinator(1), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v) VALUES (?, 1, 1) IF NOT EXISTS", ONE, pk),
+                   row(true));
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+
+        // finish topology change
+        for (int i = 1 ; i <= 4 ; ++i)
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingNormal).accept(FOUR_NODES.get(4));
+
+        // {3} reads from !{2} => {3, 4}
+        drop(FOUR_NODES, 3, to(2), to(), to());
+        assertRows(FOUR_NODES.coordinator(3).execute("SELECT * FROM " + KEYSPACE + "." + tableName + " WHERE pk = ?", SERIAL, pk),
+                   row(pk, 1, 1));
     }
 
     /**
      * Successful write during range movement not witnessed by write after range movement
      *
      *  - Range moves from {1, 2, 3} to {2, 3, 4}; witnessed by X (not by !X)
-     *  -  !X: Prepare and Propose to {1, 2}
+     *  -  !X: Promised and Accepted by {1, 2}
      *  - Range movement witnessed by !X
-     *  - Any: Prepare and Propose to {3, 4}
+     *  - Any: Promised and Propose to {3, 4}
      */
-    @Ignore
     @Test
     public void testSuccessfulWriteDuringRangeMovementFollowedByConflicting() throws Throwable
     {
-        try (Cluster cluster = Cluster.create(4, config -> config
-                .set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT)))
+        FOUR_NODES.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+
+        // make it so {4} is bootstrapping, and this has propagated to only a quorum of other nodes
+        for (int i = 1 ; i <= 4 ; ++i)
         {
-            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
-
-            // make it so {4} is bootstrapping, and this has not propagated to other nodes yet
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(1).acceptsOnInstance(Instance::removeFromRing).accept(cluster.get(4));
-            cluster.get(4).acceptsOnInstance(Instance::addToRingBootstrapping).accept(cluster.get(4));
-
-            int pk = pk(cluster, 1, 2);
-
-            // {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
-            assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(true));
-
-            // finish topology change
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(i).acceptsOnInstance(Instance::addToRingNormal).accept(cluster.get(4));
-
-            // {3} reads from !{2} => {3, 4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(3).to(2).drop();
-            assertRows(cluster.coordinator(3).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(false, pk, 1, 1, null));
-
-            // TODO: repair and verify base table state
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::removeFromRing).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
         }
+        for (int i = 2 ; i <= 4 ; ++i)
+        {
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingBootstrapping).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+        }
+
+        int pk = pk(FOUR_NODES, 1, 2);
+
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        // {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
+        drop(FOUR_NODES, 1, to(3), to(3), to(2, 3));
+        assertRows(executeWithRetry(FOUR_NODES.coordinator(1), "INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ONE, pk),
+                   row(true));
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+
+        // finish topology change
+        for (int i = 1 ; i <= 4 ; ++i)
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingNormal).accept(FOUR_NODES.get(4));
+
+        // {3} reads from !{2} => {3, 4}
+        drop(FOUR_NODES, 3, to(2), to(), to());
+        assertRows(FOUR_NODES.coordinator(3).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ONE, pk),
+                   row(false, pk, 1, 1, null));
+
+        // TODO: repair and verify base table state
     }
 
     /**
@@ -543,60 +537,70 @@
      * See CASSANDRA-15745
      *
      *  - Range moves from {1, 2, 3} to {2, 3, 4}; witnessed by X (not by !X)
-     *  -   X: Prepare to {2, 3, 4}
-     *  -   X: Propose to {4}
-     *  -  !X: Prepare and Propose to {1, 2}
+     *  -   X: Promised by {2, 3, 4}
+     *  -   X: Accepted by {4}
+     *  -  !X: Promised and Accepted by {1, 2}
      *  - Range move visible by !X
-     *  - Any: Prepare and Read from {3, 4}
+     *  - Any: Promised and Propose to {3, 4}
      */
-    @Ignore
     @Test
     public void testIncompleteWriteFollowedBySuccessfulWriteWithStaleRingDuringRangeMovementFollowedByRead() throws Throwable
     {
-        try (Cluster cluster = Cluster.create(4, config -> config
-                .set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT)))
+        String tableName = tableName("tbl");
+        FOUR_NODES.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+
+        // make it so {4} is bootstrapping, and this has propagated to only a quorum of other nodes
+        for (int i = 1 ; i <= 4 ; ++i)
         {
-            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
-
-            // make it so {4} is bootstrapping, and this has not propagated to other nodes yet
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(1).acceptsOnInstance(Instance::removeFromRing).accept(cluster.get(4));
-            cluster.get(4).acceptsOnInstance(Instance::addToRingBootstrapping).accept(cluster.get(4));
-
-            int pk = pk(cluster, 1, 2);
-
-            // {4} promises and accepts on !{1} => {2, 3, 4}; commits on !{1, 2, 3} => {4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(4).to(1).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(4).to(1, 2, 3).drop();
-            try
-            {
-                cluster.coordinator(4).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM, pk);
-                Assert.assertTrue(false);
-            }
-            catch (RuntimeException wrapped)
-            {
-                Assert.assertEquals("Operation timed out - received only 1 responses.", wrapped.getCause().getMessage());
-            }
-
-            // {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
-            assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(true));
-
-            // finish topology change
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(i).acceptsOnInstance(Instance::addToRingNormal).accept(cluster.get(4));
-
-            // {3} reads from !{2} => {3, 4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(3).to(2).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(3).to(2).drop();
-            assertRows(cluster.coordinator(3).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = ?", ConsistencyLevel.SERIAL, pk),
-                    row(pk, 1, null, 2));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::removeFromRing).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
         }
+        for (int i = 2 ; i <= 4 ; ++i)
+        {
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingBootstrapping).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+        }
+
+        int pk = pk(FOUR_NODES, 1, 2);
+
+        // {4} promises !{1} => {2, 3, 4}, accepts on !{1, 2, 3} => {4}
+        try (AutoCloseable drop = drop(FOUR_NODES, 4, to(1), to(1, 2, 3), to()))
+        {
+            FOUR_NODES.coordinator(4).execute("INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", QUORUM, pk);
+            Assert.assertTrue(false);
+        }
+        catch (Throwable t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw t;
+        }
+
+        // {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
+        drop(FOUR_NODES, 1, to(3), to(3), to(2, 3));
+        // two options: either we can invalidate the previous operation and succeed, or we can complete the previous operation
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        Object[][] result = executeWithRetry(FOUR_NODES.coordinator(1), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ONE, pk);
+        Object[] expectRow;
+        if (result[0].length == 1)
+        {
+            assertRows(result, row(true));
+            expectRow = row(pk, 1, null, 2);
+        }
+        else
+        {
+            assertRows(result, row(false, pk, 1, 1, null));
+            expectRow = row(pk, 1, 1, null);
+        }
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+
+        // finish topology change
+        for (int i = 1 ; i <= 4 ; ++i)
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingNormal).accept(FOUR_NODES.get(4));
+
+        // {3} reads from !{2} => {3, 4}
+        drop(FOUR_NODES, 3, to(2), to(2), to());
+        assertRows(FOUR_NODES.coordinator(3).execute("SELECT * FROM " + KEYSPACE + "." + tableName + " WHERE pk = ?", SERIAL, pk),
+                   expectRow);
     }
 
     /**
@@ -607,95 +611,173 @@
      * See CASSANDRA-15745
      *
      *  - Range moves from {1, 2, 3} to {2, 3, 4}; witnessed by X (not by !X)
-     *  -   X: Prepare to {2, 3, 4}
-     *  -   X: Propose to {4}
-     *  -  !X: Prepare and Propose to {1, 2}
+     *  -   X: Promised by {2, 3, 4}
+     *  -   X: Accepted by {4}
+     *  -  !X: Promised and Accepted by {1, 2}
      *  - Range move visible by !X
-     *  - Any: Prepare and Propose to {3, 4}
+     *  - Any: Promised and Propose to {3, 4}
      */
-    @Ignore
     @Test
     public void testIncompleteWriteFollowedBySuccessfulWriteWithStaleRingDuringRangeMovementFollowedByWrite() throws Throwable
     {
-        try (Cluster cluster = Cluster.create(4, config -> config
-                .set("write_request_timeout_in_ms", REQUEST_TIMEOUT)
-                .set("cas_contention_timeout_in_ms", CONTENTION_TIMEOUT)))
+        String tableName = tableName("tbl");
+        FOUR_NODES.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+
+        // make it so {4} is bootstrapping, and this has propagated to only a quorum of other nodes
+        for (int i = 1; i <= 4; ++i)
         {
-            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::removeFromRing).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        }
+        for (int i = 2; i <= 4; ++i)
+        {
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingBootstrapping).accept(FOUR_NODES.get(4));
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
+        }
 
-            // make it so {4} is bootstrapping, and this has not propagated to other nodes yet
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(1).acceptsOnInstance(Instance::removeFromRing).accept(cluster.get(4));
-            cluster.get(4).acceptsOnInstance(Instance::addToRingBootstrapping).accept(cluster.get(4));
+        int pk = pk(FOUR_NODES, 1, 2);
 
-            int pk = pk(cluster, 1, 2);
+        // {4} promises and accepts on !{1} => {2, 3, 4}; commits on !{1, 2, 3} => {4}
+        drop(FOUR_NODES, 4, to(1), to(1, 2, 3), to());
+        try
+        {
+            FOUR_NODES.coordinator(4).execute("INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", QUORUM, pk);
+            Assert.assertTrue(false);
+        }
+        catch (Throwable t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw t;
+        }
 
-            // {4} promises and accepts on !{1} => {2, 3, 4}; commits on !{1, 2, 3} => {4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(4).to(1).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(4).to(1, 2, 3).drop();
-            try
-            {
-                cluster.coordinator(4).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM, pk);
-                Assert.assertTrue(false);
-            }
-            catch (RuntimeException wrapped)
-            {
-                Assert.assertEquals("Operation timed out - received only 1 responses.", wrapped.getCause().getMessage());
-            }
+        // {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
+        drop(FOUR_NODES, 1, to(3), to(3), to(2, 3));
+        // two options: either we can invalidate the previous operation and succeed, or we can complete the previous operation
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertNotVisibleInRing).accept(FOUR_NODES.get(4));
+        Object[][] result = executeWithRetry(FOUR_NODES.coordinator(1), "INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ONE, pk);
+        Object[] expectRow;
+        if (result[0].length == 1)
+        {
+            assertRows(result, row(true));
+            expectRow = row(false, pk, 1, null, 2);
+        }
+        else
+        {
+            assertRows(result, row(false, pk, 1, 1, null));
+            expectRow = row(false, pk, 1, 1, null);
+        }
+        FOUR_NODES.get(1).acceptsOnInstance(CASTestBase::assertVisibleInRing).accept(FOUR_NODES.get(4));
 
-            // {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
-            cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
-            assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(true));
+        // finish topology change
+        for (int i = 1; i <= 4; ++i)
+            FOUR_NODES.get(i).acceptsOnInstance(CASTestBase::addToRingNormal).accept(FOUR_NODES.get(4));
 
-            // finish topology change
-            for (int i = 1 ; i <= 4 ; ++i)
-                cluster.get(i).acceptsOnInstance(Instance::addToRingNormal).accept(cluster.get(4));
+        // {3} reads from !{2} => {3, 4}
+        FOUR_NODES.filters().verbs(PAXOS2_PREPARE_REQ.id, PAXOS_PREPARE_REQ.id, READ_REQ.id).from(3).to(2).drop();
+        FOUR_NODES.filters().verbs(PAXOS2_PROPOSE_REQ.id, PAXOS_PROPOSE_REQ.id).from(3).to(2).drop();
+        assertRows(FOUR_NODES.coordinator(3).execute("INSERT INTO " + KEYSPACE + "." + tableName + " (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ONE, pk),
+                   expectRow);
+    }
 
-            // {3} reads from !{2} => {3, 4}
-            cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(3).to(2).drop();
-            cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(3).to(2).drop();
-            assertRows(cluster.coordinator(3).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk),
-                    row(false, 5, 1, null, 2));
+    // TODO: RF changes
+    // TODO: Aborted range movements
+    // TODO: Leaving ring
+
+    static void consistencyAfterWriteTimeoutTest(BiConsumer<String, ICoordinator> postTimeoutOperation1, BiConsumer<String, ICoordinator> postTimeoutOperation2, boolean loseCommitOfOperation1, Cluster cluster)
+    {
+        String tableName = tableName("t");
+        String table = KEYSPACE + "." + tableName;
+        cluster.schemaChange("CREATE TABLE " + table + " (k int PRIMARY KEY, v int)");
+
+        // We do a CAS insertion, but have with the PROPOSE message dropped on node 1 and 2. The CAS will not get
+        // through and should timeout. Importantly, node 3 does receive and answer the PROPOSE.
+        IMessageFilters.Filter dropProposeFilter = cluster.filters()
+                                                          .verbs(PAXOS_PROPOSE_REQ.id, PAXOS2_PROPOSE_REQ.id)
+                                                          .to(1, 2)
+                                                          .drop();
+
+        // Prepare A to {1, 2, 3}
+        // Propose A to {3}
+        // Timeout
+        try
+        {
+            // NOTE: the consistency below is the "commit" one, so it doesn't matter at all here.
+            cluster.coordinator(1)
+                   .execute("INSERT INTO " + table + "(k, v) VALUES (0, 0) IF NOT EXISTS", ONE);
+            Assert.fail("The insertion should have timed-out");
+        }
+        catch (Throwable t)
+        {
+            if (!t.getClass().getName().equals(CasWriteTimeoutException.class.getName()))
+                throw t;
+        }
+        finally
+        {
+            dropProposeFilter.off();
+        }
+
+        // Prepare and Propose to {1, 2}
+        // Commit(?) to either {1, 2, 3} or {3}
+        // Isolates node 3 and executes the SERIAL operation. As neither node 1 or 2 got the initial insert proposal,
+        // there is nothing to "replay" and the operation should assert the table is still empty.
+        IMessageFilters.Filter ignoreNode3Filter = cluster.filters().verbs(paxosAndReadVerbs()).to(3).drop();
+        IMessageFilters.Filter dropCommitFilter = null;
+        if (loseCommitOfOperation1)
+        {
+            dropCommitFilter = cluster.filters().verbs(PAXOS_COMMIT_REQ.id).to(1, 2).drop();
+        }
+        try
+        {
+            postTimeoutOperation1.accept(table, cluster.coordinator(1));
+        }
+        finally
+        {
+            ignoreNode3Filter.off();
+            if (dropCommitFilter != null)
+                dropCommitFilter.off();
+        }
+
+        // Node 3 is now back and we isolate node 2 to ensure the next read hits node 1 and 3.
+        // What we want to ensure is that despite node 3 having the initial insert in its paxos state in a position of
+        // being replayed, that insert is _not_ replayed (it would contradict serializability since the previous
+        // operation asserted nothing was inserted). It is this execution that failed before CASSANDRA-12126.
+        IMessageFilters.Filter ignoreNode2Filter = cluster.filters().verbs(paxosAndReadVerbs()).to(2).drop();
+        try
+        {
+            postTimeoutOperation2.accept(table, cluster.coordinator(1));
+        }
+        finally
+        {
+            ignoreNode2Filter.off();
         }
     }
 
-    private static int pk(Cluster cluster, int lb, int ub)
+    protected Cluster getCluster()
     {
-        return pk(cluster.get(lb), cluster.get(ub));
+        return THREE_NODES;
     }
 
-    private static int pk(IInstance lb, IInstance ub)
+    /**
+     * Regression test for a bug (CASSANDRA-17999) where a WriteTimeoutException is encountered when using Paxos v2 in
+     * an LWT performance test that only has a single datacenter because Paxos was still waiting for a response from
+     * another datacenter during the Commit/Acknowledge phase even though we were running with LOCAL_SERIAL.
+     *
+     *
+     * <p>This specifically test for the inconsistency described/fixed by CASSANDRA-17999.
+     */
+    @Test
+    public void testWriteTimeoutExceptionUsingPaxosInLwtPerformaceTest() throws IOException
     {
-        return pk(Murmur3Partitioner.instance.getTokenFactory().fromString(lb.config().getString("initial_token")),
-                Murmur3Partitioner.instance.getTokenFactory().fromString(ub.config().getString("initial_token")));
-    }
 
-    private static int pk(Token lb, Token ub)
-    {
-        int pk = 0;
-        Token pkt;
-        while (lb.compareTo(pkt = Murmur3Partitioner.instance.getToken(Int32Type.instance.decompose(pk))) >= 0 || ub.compareTo(pkt) < 0)
-            ++pk;
-        return pk;
-    }
+        THREE_NODES.schemaChange(String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}", KEYSPACE));
 
-    private static void debugOwnership(Cluster cluster, int pk)
-    {
-        for (int i = 1 ; i <= cluster.size() ; ++i)
-            System.out.println(i + ": " + cluster.get(i).appliesOnInstance((Integer v) -> StorageService.instance.getNaturalEndpointsWithPort(KEYSPACE, Int32Type.instance.decompose(v)))
-                    .apply(pk));
-    }
+        String tableName = tableName("t");
+        String table = KEYSPACE + "." + tableName;
+        THREE_NODES.schemaChange("CREATE TABLE " + table + " (k int PRIMARY KEY, v int)");
 
-    private static void debugPaxosState(Cluster cluster, int pk)
-    {
-        TableId tableId = cluster.get(1).callOnInstance(() -> Keyspace.open(KEYSPACE).getColumnFamilyStore("tbl").metadata.id);
-        for (int i = 1 ; i <= cluster.size() ; ++i)
-            for (Object[] row : cluster.get(i).executeInternal("select in_progress_ballot, proposal_ballot, most_recent_commit_at from system.paxos where row_key = ? and cf_id = ?", Int32Type.instance.decompose(pk), tableId))
-                System.out.println(i + ": " + (row[0] == null ? 0L : UUIDGen.microsTimestamp((UUID)row[0])) + ", " + (row[1] == null ? 0L : UUIDGen.microsTimestamp((UUID)row[1])) + ", " + (row[2] == null ? 0L : UUIDGen.microsTimestamp((UUID)row[2])));
+        THREE_NODES.coordinator(1).execute("INSERT INTO " + table + " (k, v) VALUES (5, 5) IF NOT EXISTS", LOCAL_QUORUM);
+        THREE_NODES.coordinator(1).execute("UPDATE " + table + " SET v = 123 WHERE k = 5 IF EXISTS", LOCAL_QUORUM);
+
     }
 
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CASTestBase.java b/test/distributed/org/apache/cassandra/distributed/test/CASTestBase.java
new file mode 100644
index 0000000..cae6eef
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/CASTestBase.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.Collections;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IMessageFilters;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.HeartBeatState;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.PendingRangeCalculatorService;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.PaxosRepair;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.db.ConsistencyLevel.SERIAL;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PROPOSE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_REPAIR_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_PROPOSE_REQ;
+import static org.apache.cassandra.net.Verb.READ_REQ;
+
+public abstract class CASTestBase extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(CASTestBase.class);
+
+    static final AtomicInteger TABLE_COUNTER = new AtomicInteger(0);
+
+    static String tableName()
+    {
+        return tableName("tbl");
+    }
+
+    static String tableName(String prefix)
+    {
+        return prefix + TABLE_COUNTER.getAndIncrement();
+    }
+
+    static void repair(Cluster cluster, String tableName, int pk, int repairWith, int repairWithout)
+    {
+        IMessageFilters.Filter filter = cluster.filters().verbs(
+                PAXOS2_REPAIR_REQ.id,
+                PAXOS2_PREPARE_REQ.id, PAXOS_PREPARE_REQ.id, READ_REQ.id).from(repairWith).to(repairWithout).drop();
+        cluster.get(repairWith).runOnInstance(() -> {
+            TableMetadata schema = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName).metadata.get();
+            DecoratedKey key = schema.partitioner.decorateKey(Int32Type.instance.decompose(pk));
+            try
+            {
+                PaxosRepair.create(SERIAL, key, null, schema).start().await();
+            }
+            catch (Throwable t)
+            {
+                throw new RuntimeException(t);
+            }
+        });
+        filter.off();
+    }
+
+    static int pk(Cluster cluster, int lb, int ub)
+    {
+        return pk(cluster.get(lb), cluster.get(ub));
+    }
+
+    static int pk(IInstance lb, IInstance ub)
+    {
+        return pk(Murmur3Partitioner.instance.getTokenFactory().fromString(lb.config().getString("initial_token")),
+                Murmur3Partitioner.instance.getTokenFactory().fromString(ub.config().getString("initial_token")));
+    }
+
+    static int pk(Token lb, Token ub)
+    {
+        int pk = 0;
+        Token pkt;
+        while (lb.compareTo(pkt = Murmur3Partitioner.instance.getToken(Int32Type.instance.decompose(pk))) >= 0 || ub.compareTo(pkt) < 0)
+            ++pk;
+        return pk;
+    }
+
+    int[] to(int ... nodes)
+    {
+        return nodes;
+    }
+
+
+    private static final IMessageFilters.Matcher LOG_DROPPED = (from, to, message) -> { logger.info("Dropping {} from {} to {}", Verb.fromId(message.verb()), from, to); return true; };
+    AutoCloseable drop(Cluster cluster, int from, int[] toPrepareAndRead, int[] toPropose, int[] toCommit)
+    {
+        IMessageFilters.Filter filter1 = cluster.filters().verbs(PAXOS2_PREPARE_REQ.id, PAXOS_PREPARE_REQ.id, READ_REQ.id).from(from).to(toPrepareAndRead).messagesMatching(LOG_DROPPED).drop();
+        IMessageFilters.Filter filter2 = cluster.filters().verbs(PAXOS2_PROPOSE_REQ.id, PAXOS_PROPOSE_REQ.id).from(from).to(toPropose).messagesMatching(LOG_DROPPED).drop();
+        IMessageFilters.Filter filter3 = cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(from).to(toCommit).messagesMatching(LOG_DROPPED).drop();
+        return () -> {
+            filter1.off();
+            filter2.off();
+            filter3.off();
+        };
+    }
+
+    AutoCloseable drop(Cluster cluster, int from, int[] toPrepare, int[] toRead, int[] toPropose, int[] toCommit)
+    {
+        IMessageFilters.Filter filter1 = cluster.filters().verbs(PAXOS2_PREPARE_REQ.id, PAXOS_PREPARE_REQ.id).from(from).to(toPrepare).drop();
+        IMessageFilters.Filter filter2 = cluster.filters().verbs(READ_REQ.id).from(from).to(toRead).drop();
+        IMessageFilters.Filter filter3 = cluster.filters().verbs(PAXOS2_PROPOSE_REQ.id, PAXOS_PROPOSE_REQ.id).from(from).to(toPropose).drop();
+        IMessageFilters.Filter filter4 = cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(from).to(toCommit).drop();
+        return () -> {
+            filter1.off();
+            filter2.off();
+            filter3.off();
+            filter4.off();
+        };
+    }
+
+    public static void addToRing(boolean bootstrapping, IInstance peer)
+    {
+        try
+        {
+            IInstanceConfig config = peer.config();
+            IPartitioner partitioner = FBUtilities.newPartitioner(config.getString("partitioner"));
+            Token token = partitioner.getTokenFactory().fromString(config.getString("initial_token"));
+            InetAddressAndPort address = InetAddressAndPort.getByAddress(peer.broadcastAddress());
+
+            UUID hostId = config.hostId();
+            Gossiper.runInGossipStageBlocking(() -> {
+                Gossiper.instance.initializeNodeUnsafe(address, hostId, 1);
+                Gossiper.instance.injectApplicationState(address,
+                                                         ApplicationState.TOKENS,
+                                                         new VersionedValue.VersionedValueFactory(partitioner).tokens(Collections.singleton(token)));
+                VersionedValue status = bootstrapping
+                                        ? new VersionedValue.VersionedValueFactory(partitioner).bootstrapping(Collections.singleton(token))
+                                        : new VersionedValue.VersionedValueFactory(partitioner).normal(Collections.singleton(token));
+                Gossiper.instance.injectApplicationState(address, ApplicationState.STATUS, status);
+                StorageService.instance.onChange(address, ApplicationState.STATUS, status);
+                Gossiper.instance.realMarkAlive(address, Gossiper.instance.getEndpointStateForEndpoint(address));
+            });
+            int version = Math.min(MessagingService.current_version, peer.getMessagingVersion());
+            MessagingService.instance().versions.set(address, version);
+
+            if (!bootstrapping)
+                assert StorageService.instance.getTokenMetadata().isMember(address);
+            PendingRangeCalculatorService.instance.blockUntilFinished();
+        }
+        catch (Throwable e) // UnknownHostException
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static void assertVisibleInRing(IInstance peer)
+    {
+        InetAddressAndPort endpoint = InetAddressAndPort.getByAddress(peer.broadcastAddress());
+        Assert.assertTrue(Gossiper.instance.isAlive(endpoint));
+    }
+
+    // reset gossip state so we know of the node being alive only
+    public static void removeFromRing(IInstance peer)
+    {
+        try
+        {
+            IInstanceConfig config = peer.config();
+            IPartitioner partitioner = FBUtilities.newPartitioner(config.getString("partitioner"));
+            Token token = partitioner.getTokenFactory().fromString(config.getString("initial_token"));
+            InetAddressAndPort address = InetAddressAndPort.getByAddress(config.broadcastAddress());
+
+            Gossiper.runInGossipStageBlocking(() -> {
+                StorageService.instance.onChange(address,
+                                                 ApplicationState.STATUS,
+                                                 new VersionedValue.VersionedValueFactory(partitioner).left(Collections.singleton(token), 0L, 0));
+                Gossiper.instance.unsafeAnnulEndpoint(address);
+                Gossiper.instance.realMarkAlive(address, new EndpointState(new HeartBeatState(0, 0)));
+            });
+            PendingRangeCalculatorService.instance.blockUntilFinished();
+        }
+        catch (Throwable e) // UnknownHostException
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static void assertNotVisibleInRing(IInstance peer)
+    {
+        InetAddressAndPort endpoint = InetAddressAndPort.getByAddress(peer.broadcastAddress());
+        Assert.assertFalse(Gossiper.instance.isAlive(endpoint));
+    }
+
+    public static void addToRingNormal(IInstance peer)
+    {
+        addToRing(false, peer);
+        assert StorageService.instance.getTokenMetadata().isMember(InetAddressAndPort.getByAddress(peer.broadcastAddress()));
+    }
+
+    public static void addToRingBootstrapping(IInstance peer)
+    {
+        addToRing(true, peer);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CasCriticalSectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/CasCriticalSectionTest.java
new file mode 100644
index 0000000..685e848
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/CasCriticalSectionTest.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BooleanSupplier;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * A simple sanity check that uses CAS as mutex: each lock tries to CAS a variable thread_id for a specific row,
+ * and set its own thread id. Then it sleeps for a short time, and releases the lock.
+ *
+ * Write timeouts are handled by simply re-reading the variable and checking if locking has actually succeeded.
+ */
+public class CasCriticalSectionTest extends TestBaseImpl
+{
+    private static Random rng = new Random();
+    private static final int threadCount = 5;
+    private static final int rowCount = 1;
+
+    @Test
+    public void criticalSectionTest() throws IOException, InterruptedException
+    {
+        try (Cluster cluster = init(Cluster.create(5, c -> c.set("paxos_variant", "v2")
+                                                            .set("write_request_timeout_in_ms", 2000L)
+                                                            .set("cas_contention_timeout_in_ms", 2000L)
+                                                            .set("request_timeout_in_ms", 2000L))))
+        {
+            cluster.schemaChange("create table " + KEYSPACE + ".tbl (pk int, ck int, thread_id int, PRIMARY KEY(pk, ck))");
+
+            List<Thread> threads = new ArrayList<>();
+
+            AtomicBoolean failed = new AtomicBoolean(false);
+            AtomicBoolean stop = new AtomicBoolean(false);
+            BooleanSupplier exitCondition = () -> failed.get() || stop.get();
+
+            for (int i = 0; i < rowCount; i++)
+            {
+                final int rowId = i;
+                AtomicInteger counter = new AtomicInteger();
+                cluster.coordinator(1)
+                       .execute("insert into " + KEYSPACE + ".tbl (pk, ck, thread_id) VALUES (?, ?, ?) IF NOT EXISTS",
+                                ConsistencyLevel.QUORUM,
+                                1, rowId, 0);
+
+                // threads should be numbered from 1 to allow 0 to be "unlocked"
+                for (int j = 1; j <= threadCount; j++)
+                {
+                    int threadId = j;
+                    AtomicInteger lockedTimes = new AtomicInteger();
+                    AtomicInteger unlockedTimes = new AtomicInteger();
+
+                    Runnable sanityCheck = () -> {
+                        Assert.assertEquals(lockedTimes.get(), unlockedTimes.get());
+                    };
+                    threads.add(new Thread(() -> {
+                        while (!exitCondition.getAsBoolean())
+                        {
+                            while (!tryLockOnce(cluster, threadId, rowId))
+                            {
+                                if (exitCondition.getAsBoolean())
+                                {
+                                    sanityCheck.run();
+                                    return;
+                                }
+                            }
+                            int ctr = counter.getAndIncrement();
+                            if (ctr != 0)
+                            {
+                                failed.set(true);
+                                Assert.fail(String.format("Thread %s encountered lock that is held by %d participants while trying to lock.",
+                                                          Thread.currentThread().getName(),
+                                                          ctr));
+                            }
+
+                            // hold lock for a bit
+                            Uninterruptibles.sleepUninterruptibly(rng.nextInt(5), TimeUnit.MILLISECONDS);
+                            ctr = counter.decrementAndGet();
+                            if (ctr != 0)
+                            {
+                                failed.set(true);
+                                Assert.fail(String.format("Thread %s encountered lock that is held by %d participants while trying to unlock.",
+                                                          Thread.currentThread().getName(),
+                                                          ctr));
+                            }
+                            while (!tryUnlockOnce(cluster, threadId, rowId))
+                            {
+                                if (exitCondition.getAsBoolean())
+                                {
+                                    sanityCheck.run();
+                                    return;
+                                }
+                            }
+                        }
+                        sanityCheck.run();
+                    }, String.format("CAS Thread %d-%d", rowId, threadId)));
+                }
+            }
+
+            for (Thread thread : threads)
+                thread.start();
+
+            Thread.sleep(TimeUnit.SECONDS.toMillis(30));
+            stop.set(true);
+
+            for (Thread thread : threads)
+                thread.join();
+
+            Assert.assertFalse(failed.get());
+        }
+    }
+
+    public static boolean isCasSuccess(Object[][] res)
+    {
+        if (res == null || res.length != 1)
+            return false;
+
+        return Arrays.equals(res[0], new Object[] {true});
+    }
+
+    public static boolean tryLockOnce(Cluster cluster, int threadId, int rowId)
+    {
+        Object[][] res = null;
+
+        try
+        {
+            res = cluster.coordinator(rng.nextInt(cluster.size()) + 1)
+                         .execute("update " + KEYSPACE + ".tbl SET thread_id = ? WHERE pk = ? AND ck = ? IF thread_id = 0",
+                                  ConsistencyLevel.QUORUM,
+                                  threadId, 1, rowId);
+            return isCasSuccess(res);
+        }
+        catch (Throwable writeTimeout)
+        {
+            while (true)
+            {
+                try
+                {
+                    res = cluster.coordinator(rng.nextInt(cluster.size()) + 1)
+                                 .execute("SELECT thread_id FROM " + KEYSPACE + ".tbl WHERE pk = ? AND ck = ?",
+                                          ConsistencyLevel.SERIAL,
+                                          1, rowId);
+                    break;
+                }
+                catch (Throwable t)
+                {
+                    // retry
+                }
+            }
+
+            return (int) res[0][0] == threadId;
+        }
+    }
+
+    public static boolean tryUnlockOnce(Cluster cluster, int threadId, int rowId)
+    {
+        Object[][] res = null;
+
+        try
+        {
+            res = cluster.coordinator(rng.nextInt(cluster.size()) + 1)
+                         .execute("update " + KEYSPACE + ".tbl SET thread_id = ? WHERE pk = ? AND ck = ? IF thread_id = ?",
+                                  ConsistencyLevel.QUORUM,
+                                  0, 1, rowId, threadId);
+            return isCasSuccess(res);
+        }
+        catch (Throwable writeTimeout)
+        {
+            while (true)
+            {
+                try
+                {
+                    res = cluster.coordinator(rng.nextInt(cluster.size()) + 1)
+                                 .execute("SELECT thread_id FROM " + KEYSPACE + ".tbl WHERE pk = ? AND ck = ?",
+                                          ConsistencyLevel.SERIAL,
+                                          1, rowId);
+                    break;
+                }
+                catch (Throwable t)
+                {
+                    // retry
+                }
+            }
+
+            return (int) res[0][0] != threadId;
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CompactionOverlappingSSTableTest.java b/test/distributed/org/apache/cassandra/distributed/test/CompactionOverlappingSSTableTest.java
index 54f8ad7..6a65c91 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/CompactionOverlappingSSTableTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/CompactionOverlappingSSTableTest.java
@@ -22,6 +22,7 @@
 import java.util.Arrays;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 
@@ -47,7 +48,7 @@
 public class CompactionOverlappingSSTableTest extends TestBaseImpl
 {
     @Test
-    public void partialCompactionOverlappingTest() throws IOException
+    public void partialCompactionOverlappingTest() throws IOException, TimeoutException
     {
 
         try (Cluster cluster = init(builder().withNodes(1)
diff --git a/test/distributed/org/apache/cassandra/distributed/test/CountersTest.java b/test/distributed/org/apache/cassandra/distributed/test/CountersTest.java
index 7cc632f..29ce511 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/CountersTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/CountersTest.java
@@ -45,7 +45,7 @@
 
     private static void testUpdateCounter(boolean droppedCompactStorage) throws Throwable
     {
-        try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(GOSSIP, NATIVE_PROTOCOL).set("enable_drop_compact_storage", true)).start())
+        try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(GOSSIP, NATIVE_PROTOCOL).set("drop_compact_storage_enabled", true)).start())
         {
             cluster.schemaChange("CREATE KEYSPACE k WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}");
 
diff --git a/test/distributed/org/apache/cassandra/distributed/test/DataResurrectionCheckTest.java b/test/distributed/org/apache/cassandra/distributed/test/DataResurrectionCheckTest.java
new file mode 100644
index 0000000..1b297da
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/DataResurrectionCheckTest.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.StartupChecksOptions;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.exceptions.StartupException;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.service.DataResurrectionCheck;
+import org.apache.cassandra.service.DataResurrectionCheck.Heartbeat;
+import org.apache.cassandra.service.StartupChecks.StartupCheckType;
+import org.apache.cassandra.utils.Clock.Global;
+
+import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.config.StartupChecksOptions.ENABLED_PROPERTY;
+import static org.apache.cassandra.distributed.Cluster.build;
+import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
+import static org.apache.cassandra.service.DataResurrectionCheck.DEFAULT_HEARTBEAT_FILE;
+import static org.apache.cassandra.service.DataResurrectionCheck.EXCLUDED_KEYSPACES_CONFIG_PROPERTY;
+import static org.apache.cassandra.service.DataResurrectionCheck.EXCLUDED_TABLES_CONFIG_PROPERTY;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.check_data_resurrection;
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.Matchers.containsString;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class DataResurrectionCheckTest extends TestBaseImpl
+{
+    @Test
+    public void testDataResurrectionCheck() throws Exception
+    {
+        try
+        {
+            // set it to 1 hour so check will be not updated after it is written, for test purposes
+            System.setProperty(CassandraRelevantProperties.CHECK_DATA_RESURRECTION_HEARTBEAT_PERIOD.getKey(), "3600000");
+
+            // start the node with the check enabled, it will just pass fine as there are not any user tables yet
+            // and system tables are young enough
+            try (Cluster cluster = build().withNodes(1)
+                                          .withDataDirCount(3) // we will expect heartbeat to be in the first data dir
+                                          .withConfig(config -> config.with(NATIVE_PROTOCOL)
+                                                                      .set("startup_checks",
+                                                                           getStartupChecksConfig(ENABLED_PROPERTY, "true")))
+                                          .start())
+            {
+                IInvokableInstance instance = cluster.get(1);
+
+                checkHeartbeat(instance);
+
+                for (String ks : new String[]{ "ks1", "ks2", "ks3" })
+                {
+                    cluster.schemaChange("CREATE KEYSPACE " + ks + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};");
+                    cluster.schemaChange(format("CREATE TABLE %s.tb1 (pk text PRIMARY KEY) WITH gc_grace_seconds = 10", ks));
+                    cluster.schemaChange(format("CREATE TABLE %s.tb2 (pk text PRIMARY KEY)", ks));
+                }
+
+                AtomicReference<Throwable> throwable = new AtomicReference<>();
+                // periodically execute check on a running instance and wait until the exception is thrown on all keyspaces
+                // wait for all violations by Awaitility as due to nature how tables were created,
+                // they will not expire on their gc_grace_period exactly at the same time
+                await().timeout(1, MINUTES)
+                       .pollInterval(5, SECONDS)
+                       .until(() -> {
+                           Throwable t = executeChecksOnInstance(instance);
+                           if (t == null)
+                               return false;
+                           String message = t.getMessage();
+                           if (!message.contains("ks1") || !message.contains("ks2") || !message.contains("ks3"))
+                           {
+                               return false;
+                           }
+                           throwable.set(t);
+                           return true;
+                       });
+
+                assertThat(throwable.get().getMessage(), containsString("Invalid tables"));
+                // returned tables in output are not in any particular order
+                // it is how they are returned from system tables
+                assertThat(throwable.get().getMessage(), containsString("ks1.tb1"));
+                assertThat(throwable.get().getMessage(), containsString("ks2.tb1"));
+                assertThat(throwable.get().getMessage(), containsString("ks3.tb1"));
+
+                // exclude failing keyspaces which already expired on their gc_grace_seconds, so we will pass the check
+                assertNull(executeChecksOnInstance(instance, EXCLUDED_KEYSPACES_CONFIG_PROPERTY, "ks1,ks2,ks3"));
+
+                // exclude failing tables which already expired on their gc_grace_seconds, so we will pass the check
+                assertNull(executeChecksOnInstance(instance, EXCLUDED_TABLES_CONFIG_PROPERTY, "ks1.tb1,ks2.tb1,ks3.tb1"));
+
+                // exclude failing tables, but not all of them,
+                // so check detects only one table violates the check
+                Throwable t = executeChecksOnInstance(instance, EXCLUDED_TABLES_CONFIG_PROPERTY, "ks1.tb1,ks2.tb1");
+
+                assertNotNull(t);
+                assertThat(t.getMessage(), containsString("Invalid tables: ks3.tb1"));
+
+                // shadow table exclusion with keyspace exclusion, we have not excluded ks3.tb1, but we excluded whole ks3
+                assertNull(executeChecksOnInstance(instance,
+                                                   EXCLUDED_TABLES_CONFIG_PROPERTY, "ks1.tb1,ks2.tb1",
+                                                   EXCLUDED_KEYSPACES_CONFIG_PROPERTY, "ks3"));
+            }
+        }
+        finally
+        {
+            System.clearProperty(CassandraRelevantProperties.CHECK_DATA_RESURRECTION_HEARTBEAT_PERIOD.getKey());
+        }
+    }
+
+    private Throwable executeChecksOnInstance(IInvokableInstance instance, final String... config)
+    {
+        assert config.length % 2 == 0;
+        return instance.callsOnInstance((IIsolatedExecutor.SerializableCallable<Throwable>) () ->
+        {
+            try
+            {
+                DataResurrectionCheck check = new DataResurrectionCheck();
+                StartupChecksOptions startupChecksOptions = new StartupChecksOptions();
+                startupChecksOptions.enable(check_data_resurrection);
+
+                for (int i = 0; i < config.length - 1; i = i + 2)
+                    startupChecksOptions.set(check_data_resurrection, config[i], config[i + 1]);
+
+                check.execute(startupChecksOptions);
+                return null;
+            }
+            catch (StartupException e)
+            {
+                return e;
+            }
+        }).call();
+    }
+
+    private Map<StartupCheckType, Map<String, Object>> getStartupChecksConfig(String... configs)
+    {
+        return new EnumMap<StartupCheckType, Map<String, Object>>(StartupCheckType.class)
+        {{
+            put(check_data_resurrection,
+                new HashMap<String, Object>()
+                {{
+                    for (int i = 0; i < configs.length - 1; i = i + 2)
+                        put(configs[i], configs[i + 1]);
+                }});
+        }};
+    }
+
+    private void checkHeartbeat(IInvokableInstance instance) throws Exception
+    {
+        File heartbeatFile = new File(((String[]) instance.config().get("data_file_directories"))[0],
+                                      DEFAULT_HEARTBEAT_FILE);
+        assertTrue(heartbeatFile.exists());
+        Heartbeat heartbeat = Heartbeat.deserializeFromJsonFile(heartbeatFile);
+        assertNotNull(heartbeat.lastHeartbeat);
+        assertTrue(heartbeat.lastHeartbeat.toEpochMilli() < Global.currentTimeMillis());
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/FailingRepairTest.java b/test/distributed/org/apache/cassandra/distributed/test/FailingRepairTest.java
index 7feefa3..eb1cdff 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/FailingRepairTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/FailingRepairTest.java
@@ -20,6 +20,7 @@
 
 import java.io.IOException;
 import java.io.Serializable;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -44,9 +45,8 @@
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.distributed.Cluster;
-import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.db.DataRange;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.PartitionPosition;
@@ -55,10 +55,12 @@
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.ICluster;
-import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
 import org.apache.cassandra.distributed.impl.InstanceKiller;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
@@ -72,6 +74,7 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus;
 import org.apache.cassandra.service.StorageService;
+import org.awaitility.Awaitility;
 
 @RunWith(Parameterized.class)
 public class FailingRepairTest extends TestBaseImpl implements Serializable
@@ -110,7 +113,7 @@
         return () -> {
             String cfName = getCfName(type, parallelism, withTracing);
             ColumnFamilyStore cf = Keyspace.open(KEYSPACE).getColumnFamilyStore(cfName);
-            cf.forceBlockingFlush();
+            Util.flush(cf);
             Set<SSTableReader> remove = cf.getLiveSSTables();
             Set<SSTableReader> replace = new HashSet<>();
             if (type == Verb.VALIDATION_REQ)
@@ -162,7 +165,12 @@
     public void cleanupState()
     {
         for (int i = 1; i <= CLUSTER.size(); i++)
-            CLUSTER.get(i).runOnInstance(InstanceKiller::clear);
+        {
+            IInvokableInstance inst = CLUSTER.get(i);
+            if (inst.isShutdown())
+                inst.startup();
+            inst.runOnInstance(InstanceKiller::clear);
+        }
     }
 
     @Test(timeout = 10 * 60 * 1000)
@@ -240,10 +248,7 @@
         // its possible that the coordinator gets the message that the replica failed before the replica completes
         // shutting down; this then means that isKilled could be updated after the fact
         IInvokableInstance replicaInstance = CLUSTER.get(replica);
-        while (replicaInstance.killAttempts() <= 0)
-            Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-
-        Assert.assertEquals("replica should be killed", 1, replicaInstance.killAttempts());
+        Awaitility.await().atMost(Duration.ofSeconds(30)).until(replicaInstance::isShutdown);
         Assert.assertEquals("coordinator should not be killed", 0, CLUSTER.get(coordinator).killAttempts());
     }
 
diff --git a/test/distributed/org/apache/cassandra/distributed/test/FailingResponseDoesNotLogTest.java b/test/distributed/org/apache/cassandra/distributed/test/FailingResponseDoesNotLogTest.java
new file mode 100644
index 0000000..1071938
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/FailingResponseDoesNotLogTest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.collect.ImmutableMap;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.BatchQueryOptions;
+import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.cql3.QueryHandler;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.statements.BatchStatement;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.LogAction;
+import org.apache.cassandra.distributed.api.LogResult;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.exceptions.RequestValidationException;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.transport.SimpleClient;
+import org.apache.cassandra.transport.messages.ResultMessage;
+import org.apache.cassandra.utils.MD5Digest;
+import org.assertj.core.api.Assertions;
+
+/**
+ * This class is rather impelemntation specific.  It is possible that changes made will cause this tests to fail,
+ * so updating to the latest logic is fine.
+ *
+ * This class makes sure we do not do logging/update metrics for client from a specific set of ip domains, so as long
+ * as we still do not log/update metrics, then the test is doing the right thing.
+ */
+public class FailingResponseDoesNotLogTest extends TestBaseImpl
+{
+    @BeforeClass
+    public static void beforeClassTopLevel() // need to make sure not to conflict with TestBaseImpl.beforeClass
+    {
+
+        DatabaseDescriptor.clientInitialization();
+    }
+
+    @Test
+    public void dispatcherErrorDoesNotLock() throws IOException
+    {
+        System.setProperty("cassandra.custom_query_handler_class", AlwaysRejectErrorQueryHandler.class.getName());
+        try (Cluster cluster = Cluster.build(1)
+                                      .withConfig(c -> c.with(Feature.NATIVE_PROTOCOL, Feature.GOSSIP)
+                                                        .set("client_error_reporting_exclusions", ImmutableMap.of("subnets", Collections.singletonList("127.0.0.1")))
+                                      )
+                                      .start())
+        {
+            try (SimpleClient client = SimpleClient.builder("127.0.0.1", 9042).build().connect(false))
+            {
+                client.execute("SELECT * FROM system.peers", ConsistencyLevel.ONE);
+                Assert.fail("Query should have failed");
+            }
+            catch (Exception e)
+            {
+                // ignore; expected
+            }
+
+            // logs happen before client response; so grep is enough
+            LogAction logs = cluster.get(1).logs();
+            LogResult<List<String>> matches = logs.grep("address contained in client_error_reporting_exclusions");
+            Assertions.assertThat(matches.getResult()).hasSize(1);
+            matches = logs.grep("Unexpected exception during request");
+            Assertions.assertThat(matches.getResult()).isEmpty();
+        }
+        finally
+        {
+            System.clearProperty("cassandra.custom_query_handler_class");
+        }
+    }
+
+    public static class AlwaysRejectErrorQueryHandler implements QueryHandler
+    {
+        @Override
+        public CQLStatement parse(String queryString, QueryState queryState, QueryOptions options)
+        {
+            throw new AssertionError("reject");
+        }
+
+        @Override
+        public ResultMessage process(CQLStatement statement, QueryState state, QueryOptions options, Map<String, ByteBuffer> customPayload, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException
+        {
+            throw new AssertionError("reject");
+        }
+
+        @Override
+        public ResultMessage.Prepared prepare(String query, ClientState clientState, Map<String, ByteBuffer> customPayload) throws RequestValidationException
+        {
+            throw new AssertionError("reject");
+        }
+
+        @Override
+        public Prepared getPrepared(MD5Digest id)
+        {
+            throw new AssertionError("reject");
+        }
+
+        @Override
+        public ResultMessage processPrepared(CQLStatement statement, QueryState state, QueryOptions options, Map<String, ByteBuffer> customPayload, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException
+        {
+            throw new AssertionError("reject");
+        }
+
+        @Override
+        public ResultMessage processBatch(BatchStatement statement, QueryState state, BatchQueryOptions options, Map<String, ByteBuffer> customPayload, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException
+        {
+            throw new AssertionError("reject");
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/FailingTruncationTest.java b/test/distributed/org/apache/cassandra/distributed/test/FailingTruncationTest.java
index bcd184e..dd419db 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/FailingTruncationTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/FailingTruncationTest.java
@@ -35,13 +35,17 @@
 
 public class FailingTruncationTest extends TestBaseImpl
 {
+    private static final String BB_FAIL_HELPER_PROP = "test.bbfailhelper.enabled";
+
     @Test
     public void testFailingTruncation() throws IOException
     {
-        try(Cluster cluster = init(Cluster.build(2)
-                                          .withInstanceInitializer(BBFailHelper::install)
-                                          .start()))
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withInstanceInitializer(BBFailHelper::install)
+                                           .start()))
         {
+            cluster.setUncaughtExceptionsFilter(t -> "truncateBlocking".equals(t.getMessage()));
+            System.setProperty(BB_FAIL_HELPER_PROP, "true");
             cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
             try
             {
@@ -53,11 +57,11 @@
                 assertTrue(e.getMessage().contains("Truncate failed on replica /127.0.0.2"));
             }
         }
-
     }
 
     public static class BBFailHelper
     {
+
         static void install(ClassLoader cl, int nodeNumber)
         {
             if (nodeNumber == 2)
@@ -72,8 +76,8 @@
 
         public static void truncateBlocking()
         {
-            throw new RuntimeException();
+            if (Boolean.getBoolean(BB_FAIL_HELPER_PROP))
+                throw new RuntimeException("truncateBlocking");
         }
     }
-
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/FailureLoggingTest.java b/test/distributed/org/apache/cassandra/distributed/test/FailureLoggingTest.java
new file mode 100644
index 0000000..ff8234d
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/FailureLoggingTest.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.List;
+
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.db.SinglePartitionReadCommand;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.LogResult;
+import org.apache.cassandra.exceptions.UnavailableException;
+import org.apache.cassandra.service.StorageProxy;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.reads.range.RangeCommandIterator;
+
+import static net.bytebuddy.matcher.ElementMatchers.named;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class FailureLoggingTest extends TestBaseImpl
+{
+    private static Cluster cluster;
+    
+    @BeforeClass
+    public static void setUpCluster() throws IOException
+    {
+        CassandraRelevantProperties.FAILURE_LOGGING_INTERVAL_SECONDS.setInt(0);
+        cluster = init(Cluster.build(1).withInstanceInitializer(BBRequestFailures::install).start());
+        cluster.schemaChange("create table "+KEYSPACE+".tbl (id int primary key, i int)");
+    }
+
+    @AfterClass
+    public static void tearDownCluster()
+    {
+        if (cluster != null)
+            cluster.close();
+    }
+
+    @Before
+    public void resetBootstrappingState()
+    {
+        cluster.get(1).callOnInstance(() -> BBRequestFailures.bootstrapping = false);
+        
+    }
+
+    @Test
+    public void testRequestBootstrapFail() throws Throwable
+    {
+        cluster.get(1).callOnInstance(() -> BBRequestFailures.bootstrapping = true);
+        long mark = cluster.get(1).logs().mark();
+
+        try
+        {
+            cluster.coordinator(1).execute("select * from " + KEYSPACE + ".tbl where id = 55", ConsistencyLevel.ALL);
+            fail("Query should fail");
+        }
+        catch (RuntimeException e)
+        {
+            LogResult<List<String>> result = cluster.get(1).logs().grep(mark, "while executing SELECT");
+            assertEquals(1, result.getResult().size());
+            assertTrue(result.getResult().get(0).contains("Cannot read from a bootstrapping node"));
+        }
+    }
+
+    @Test
+    public void testRangeRequestFail() throws Throwable
+    {
+        long mark = cluster.get(1).logs().mark();
+
+        try
+        {
+            cluster.coordinator(1).execute("select * from " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
+            fail("Query should fail");
+        }
+        catch (RuntimeException e)
+        {
+            LogResult<List<String>> result = cluster.get(1).logs().grep(mark, "while executing SELECT");
+            assertEquals(1, result.getResult().size());
+            assertTrue(result.getResult().get(0).contains("Cannot achieve consistency level"));
+        }
+    }
+
+    @Test
+    public void testReadRequestFail() throws Throwable
+    {
+        long mark = cluster.get(1).logs().mark();
+
+        try
+        {
+            cluster.coordinator(1).execute("select * from " + KEYSPACE + ".tbl where id = 55", ConsistencyLevel.ALL);
+            fail("Query should fail");
+        }
+        catch (RuntimeException e)
+        {
+            LogResult<List<String>> result = cluster.get(1).logs().grep(mark, "while executing SELECT");
+            assertEquals(1, result.getResult().size());
+            assertTrue(result.getResult().get(0).contains("Cannot achieve consistency level"));
+        }
+    }
+
+    public static class BBRequestFailures
+    {
+        static volatile boolean bootstrapping = false;
+        
+        static void install(ClassLoader cl, int nodeNumber)
+        {
+            ByteBuddy bb = new ByteBuddy();
+            
+            bb.redefine(StorageService.class)
+              .method(named("isBootstrapMode"))
+              .intercept(MethodDelegation.to(BBRequestFailures.class))
+              .make()
+              .load(cl, ClassLoadingStrategy.Default.INJECTION);
+
+            bb.redefine(RangeCommandIterator.class)
+              .method(named("sendNextRequests"))
+              .intercept(MethodDelegation.to(BBRequestFailures.class))
+              .make()
+              .load(cl, ClassLoadingStrategy.Default.INJECTION);
+
+            bb.redefine(StorageProxy.class)
+              .method(named("fetchRows"))
+              .intercept(MethodDelegation.to(BBRequestFailures.class))
+              .make()
+              .load(cl, ClassLoadingStrategy.Default.INJECTION);
+        }
+
+        @SuppressWarnings("unused")
+        public static boolean isBootstrapMode()
+        {
+            return bootstrapping;
+        }
+
+        @SuppressWarnings("unused")
+        public static PartitionIterator sendNextRequests()
+        {
+            throw UnavailableException.create(org.apache.cassandra.db.ConsistencyLevel.ALL, 1, 0);
+        }
+
+        @SuppressWarnings("unused")
+        public static PartitionIterator fetchRows(List<SinglePartitionReadCommand> commands, 
+                                                  org.apache.cassandra.db.ConsistencyLevel consistencyLevel, 
+                                                  long queryStartNanoTime)
+        {
+            throw UnavailableException.create(org.apache.cassandra.db.ConsistencyLevel.ALL, 1, 0);
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java b/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java
index 2a45b86..3b54398 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java
@@ -150,4 +150,4 @@
     {
         return String.format("system.fromjson('{\"foo\":\"%d\", \"bar\":\"%d\"}')", i, j);
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/GossipSettlesTest.java b/test/distributed/org/apache/cassandra/distributed/test/GossipSettlesTest.java
index 5b9629a..341d854 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/GossipSettlesTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/GossipSettlesTest.java
@@ -33,7 +33,7 @@
 import org.apache.cassandra.gms.FailureDetector;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.repair.SystemDistributedKeyspace;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.StorageService;
diff --git a/test/distributed/org/apache/cassandra/distributed/test/GossipShutdownTest.java b/test/distributed/org/apache/cassandra/distributed/test/GossipShutdownTest.java
index ad21c21..c05925f 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/GossipShutdownTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/GossipShutdownTest.java
@@ -20,29 +20,30 @@
 
 import java.io.IOException;
 import java.io.Serializable;
-import java.net.InetSocketAddress;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.Test;
 
 import org.apache.cassandra.distributed.Cluster;
-import org.apache.cassandra.distributed.api.ConsistencyLevel;
-import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.EndpointState;
-import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
-import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
+
+import static java.lang.Thread.sleep;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ALL;
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.gms.Gossiper.instance;
+import static org.apache.cassandra.net.Verb.GOSSIP_DIGEST_ACK;
+import static org.apache.cassandra.net.Verb.GOSSIP_DIGEST_SYN;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 public class GossipShutdownTest extends TestBaseImpl
 {
@@ -61,27 +62,27 @@
             cluster.schemaChange("create table "+KEYSPACE+".tbl (id int primary key, v int)");
 
             for (int i = 0; i < 10; i++)
-                cluster.coordinator(1).execute("insert into "+KEYSPACE+".tbl (id, v) values (?,?)", ConsistencyLevel.ALL, i, i);
+                cluster.coordinator(1).execute("insert into "+KEYSPACE+".tbl (id, v) values (?,?)", ALL, i, i);
 
-            SimpleCondition timeToShutdown = new SimpleCondition();
-            SimpleCondition waitForShutdown = new SimpleCondition();
+            Condition timeToShutdown = newOneTimeCondition();
+            Condition waitForShutdown = newOneTimeCondition();
             AtomicBoolean signalled = new AtomicBoolean(false);
             Future f = es.submit(() -> {
                 await(timeToShutdown);
 
                 cluster.get(1).runOnInstance(() -> {
-                    Gossiper.instance.register(new EPChanges());
+                    instance.register(new EPChanges());
                 });
 
                 cluster.get(2).runOnInstance(() -> {
                     StorageService.instance.setIsShutdownUnsafeForTests(true);
-                    Gossiper.instance.stop();
+                    instance.stop();
                 });
                 waitForShutdown.signalAll();
             });
 
-            cluster.filters().outbound().from(2).to(1).verbs(Verb.GOSSIP_DIGEST_SYN.id).messagesMatching((from, to, message) -> true).drop();
-            cluster.filters().outbound().from(2).to(1).verbs(Verb.GOSSIP_DIGEST_ACK.id).messagesMatching((from, to, message) ->
+            cluster.filters().outbound().from(2).to(1).verbs(GOSSIP_DIGEST_SYN.id).messagesMatching((from, to, message) -> true).drop();
+            cluster.filters().outbound().from(2).to(1).verbs(GOSSIP_DIGEST_ACK.id).messagesMatching((from, to, message) ->
                                                                                                          {
                                                                                                              if (signalled.compareAndSet(false, true))
                                                                                                              {
@@ -92,7 +93,7 @@
                                                                                                              return true;
                                                                                                          }).drop();
 
-            Thread.sleep(10000); // wait for gossip to exchange a few messages
+            sleep(10000); // wait for gossip to exchange a few messages
             f.get();
         }
         finally
@@ -101,7 +102,7 @@
         }
     }
 
-    private static void await(SimpleCondition sc)
+    private static void await(Condition sc)
     {
         try
         {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/GossipTest.java b/test/distributed/org/apache/cassandra/distributed/test/GossipTest.java
index 4569600..c5f2a07 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/GossipTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/GossipTest.java
@@ -37,6 +37,7 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.*;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
 import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.EndpointState;
 import org.apache.cassandra.gms.Gossiper;
@@ -46,6 +47,7 @@
 import org.apache.cassandra.streaming.StreamPlan;
 import org.apache.cassandra.streaming.StreamResultFuture;
 import org.apache.cassandra.utils.FBUtilities;
+import org.assertj.core.api.Assertions;
 
 import static net.bytebuddy.matcher.ElementMatchers.named;
 import static net.bytebuddy.matcher.ElementMatchers.takesArguments;
@@ -54,7 +56,6 @@
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.api.TokenSupplier.evenlyDistributedTokens;
 import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.toCassandraInetAddressAndPort;
-import static org.apache.cassandra.distributed.shared.ClusterUtils.getLocalToken;
 import static org.apache.cassandra.distributed.shared.ClusterUtils.runAndWaitForLogs;
 import static org.apache.cassandra.distributed.shared.NetworkTopology.singleDcNetworkTopology;
 import static org.junit.Assert.assertEquals;
@@ -116,7 +117,7 @@
                 }).accept(failAddress);
             }
 
-            cluster.get(fail).shutdown(false).get();
+            ClusterUtils.stopAbrupt(cluster, cluster.get(fail));
             cluster.get(late).startup();
             cluster.get(late).acceptsOnInstance((InetSocketAddress address) -> {
                 EndpointState ep;
@@ -244,9 +245,11 @@
     @Test
     public void gossipShutdownUpdatesTokenMetadata() throws Exception
     {
+        // TODO: fails with vnode enabled
         try (Cluster cluster = Cluster.build(3)
                                       .withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK))
                                       .withInstanceInitializer(FailureHelper::installMoveFailure)
+                                      .withoutVNodes()
                                       .start())
         {
             init(cluster, 2);
@@ -313,6 +316,13 @@
         }
     }
 
+    private static String getLocalToken(IInvokableInstance node)
+    {
+        Collection<String> tokens = ClusterUtils.getLocalTokens(node);
+        Assertions.assertThat(tokens).hasSize(1);
+        return tokens.stream().findFirst().get();
+    }
+
     void assertPendingRangesForPeer(final boolean expectPending, final InetSocketAddress movingAddress, final Cluster cluster)
     {
         for (IInvokableInstance inst : new IInvokableInstance[]{ cluster.get(1), cluster.get(3)})
diff --git a/test/distributed/org/apache/cassandra/distributed/test/GroupByTest.java b/test/distributed/org/apache/cassandra/distributed/test/GroupByTest.java
index cab3fee..789321d 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/GroupByTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/GroupByTest.java
@@ -18,6 +18,7 @@
 
 package org.apache.cassandra.distributed.test;
 
+import java.util.Date;
 import java.util.Iterator;
 
 import com.google.common.collect.Iterators;
@@ -31,7 +32,11 @@
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.ICoordinator;
+import org.apache.cassandra.serializers.SimpleDateSerializer;
+import org.apache.cassandra.serializers.TimeSerializer;
+import org.apache.cassandra.serializers.TimestampSerializer;
 
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM;
 import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
@@ -42,7 +47,7 @@
     @Test
     public void groupByWithDeletesAndSrpOnPartitions() throws Throwable
     {
-        try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("enable_user_defined_functions", "true")).start()))
+        try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("user_defined_functions_enabled", "true")).start()))
         {
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, PRIMARY KEY (pk, ck))"));
             initFunctions(cluster);
@@ -69,7 +74,7 @@
     @Test
     public void groupByWithDeletesAndSrpOnRows() throws Throwable
     {
-        try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("enable_user_defined_functions", "true")).start()))
+        try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("user_defined_functions_enabled", "true")).start()))
         {
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, PRIMARY KEY (pk, ck))"));
             initFunctions(cluster);
@@ -96,7 +101,7 @@
     @Test
     public void testGroupByWithAggregatesAndPaging() throws Throwable
     {
-        try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("enable_user_defined_functions", "true")).start()))
+        try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("user_defined_functions_enabled", "true")).start()))
         {
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v1 text, v2 text, v3 text, primary key (pk, ck))"));
             initFunctions(cluster);
@@ -149,6 +154,183 @@
         }
     }
 
+    @Test
+    public void testGroupByTimeRangesWithTimestampType() throws Throwable
+    {
+        try (Cluster cluster = init(builder().withNodes(3).start()))
+        {
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.testWithTimestamp (pk int, time timestamp, v int, primary key (pk, time))"));
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:10:00 UTC', 1)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:12:00 UTC', 2)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:14:00 UTC', 3)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:15:00 UTC', 4)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:21:00 UTC', 5)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:22:00 UTC', 6)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:26:00 UTC', 7)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (1, '2016-09-27 16:26:20 UTC', 8)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (2, '2016-09-27 16:26:20 UTC', 10)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTimestamp (pk, time, v) VALUES (2, '2016-09-27 16:30:00 UTC', 11)"), ConsistencyLevel.QUORUM);
+
+            for (int pageSize : new int[] {2, 3, 4, 5, 7, 10})
+            {
+                for (String startingTime : new String[] {"", ", '2016-09-27 UTC'"} )
+                {
+                    String stmt = "SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithTimestamp GROUP BY pk, floor(time, 5m" + startingTime + ")";
+                    Iterator<Object[]> pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                               row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                               row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                               row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                               row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                               row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+                    stmt = "SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithTimestamp GROUP BY pk, floor(time, 5m" + startingTime + ") LIMIT 2";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                               row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L));
+
+                    stmt = "SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithTimestamp GROUP BY pk, floor(time, 5m" + startingTime + ") PER PARTITION LIMIT 1";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                               row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L));
+
+                    stmt = "SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithTimestamp WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                               row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                               row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                               row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L));
+
+                    stmt = "SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithTimestamp WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC LIMIT 2";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                               row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L));
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithDateType() throws Throwable
+    {
+        try (Cluster cluster = init(builder().withNodes(3).start()))
+        {
+
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.testWithDate (pk int, time date, v int, primary key (pk, time))"));
+
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-09-27', 1)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-09-28', 2)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-09-29', 3)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-09-30', 4)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-10-01', 5)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-10-04', 6)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-10-20', 7)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (1, '2016-11-27', 8)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (2, '2016-11-01', 10)"), ConsistencyLevel.QUORUM);
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithDate (pk, time, v) VALUES (2, '2016-11-02', 11)"), ConsistencyLevel.QUORUM);
+
+            for (int pageSize : new int[] {2, 3, 4, 5, 7, 10})
+            {
+                for (String startingTime : new String[] {"", ", '2016-06-01'"} )
+                {
+
+                    String stmt = "SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithDate GROUP BY pk, floor(time, 1mo" + startingTime + ")";
+                    Iterator<Object[]> pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-01"), 1, 4, 4L),
+                               row(1, toLocalDate("2016-10-01"), 5, 7, 3L),
+                               row(1, toLocalDate("2016-11-01"), 8, 8, 1L),
+                               row(2, toLocalDate("2016-11-01"), 10, 11, 2L));
+
+                    stmt = "SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithDate GROUP BY pk, floor(time, 1mo" + startingTime + ") LIMIT 2";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-01"), 1, 4, 4L),
+                               row(1, toLocalDate("2016-10-01"), 5, 7, 3L));
+
+                    stmt = "SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithDate GROUP BY pk, floor(time, 1mo" + startingTime + ") PER PARTITION LIMIT 1";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-01"), 1, 4, 4L),
+                               row(2, toLocalDate("2016-11-01"), 10, 11, 2L));
+
+                    stmt = "SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithDate WHERE pk = 1 GROUP BY pk, floor(time, 1mo" + startingTime + ") ORDER BY time DESC";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-11-01"), 8, 8, 1L),
+                               row(1, toLocalDate("2016-10-01"), 5, 7, 3L),
+                               row(1, toLocalDate("2016-09-01"), 1, 4, 4L));
+
+                    stmt = "SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s.testWithDate WHERE pk = 1 GROUP BY pk, floor(time, 1mo" + startingTime + ") ORDER BY time DESC LIMIT 2";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-11-01"), 8, 8, 1L),
+                               row(1, toLocalDate("2016-10-01"), 5, 7, 3L));
+                }
+            }
+        }
+    }
+
+        @Test
+        public void testGroupByTimeRangesWithTimeType() throws Throwable
+        {
+            try (Cluster cluster = init(builder().withNodes(3).start()))
+            {
+
+                cluster.schemaChange(withKeyspace("CREATE TABLE %s.testWithTime (pk int, date date, time time, v int, primary key (pk, date, time))"));
+
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:10:00', 1)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:12:00', 2)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:14:00', 3)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:15:00', 4)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:21:00', 5)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:22:00', 6)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:26:00', 7)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-27', '16:26:20', 8)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-28', '16:26:20', 9)"), ConsistencyLevel.QUORUM);
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.testWithTime (pk, date, time, v) VALUES (1, '2016-09-28', '16:26:30', 10)"), ConsistencyLevel.QUORUM);
+
+                for (int pageSize : new int[] {2, 3, 4, 5, 7, 10})
+                {
+
+                    String stmt = "SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s.testWithTime GROUP BY pk, date, floor(time, 5m)";
+                    Iterator<Object[]> pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:20:00"), 5, 6, 2L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L),
+                               row(1, toLocalDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L));
+
+                    stmt = "SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s.testWithTime GROUP BY pk, date, floor(time, 5m) LIMIT 2";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L));
+
+                    stmt = "SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s.testWithTime WHERE pk = 1 GROUP BY pk, date, floor(time, 5m) ORDER BY date DESC, time DESC";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:20:00"), 5, 6, 2L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L));
+
+                    stmt = "SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s.testWithTime WHERE pk = 1 GROUP BY pk, date, floor(time, 5m) ORDER BY date DESC, time DESC LIMIT 2";
+                    pagingRows = cluster.coordinator(1).executeWithPaging(withKeyspace(stmt), QUORUM, pageSize);
+                    assertRows(pagingRows, 
+                               row(1, toLocalDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L),
+                               row(1, toLocalDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L));
+                }
+            }
+        }
+
     private static void initFunctions(Cluster cluster)
     {
         cluster.schemaChange(withKeyspace("CREATE FUNCTION %s.concat_strings_fn(a text, b text) " +
@@ -162,4 +344,20 @@
                                           " STYPE text" +
                                           " INITCOND '_'"));
     }
+
+    private static Date toTimestamp(String timestampAsString)
+    {
+        return new Date(TimestampSerializer.dateStringToTimestamp(timestampAsString));
+    }
+
+    private static int toLocalDate(String dateAsString)
+    {
+        return SimpleDateSerializer.dateStringToDays(dateAsString) ;
+    }
+
+    private static long toTime(String timeAsString)
+    {
+        return TimeSerializer.timeStringToLong(timeAsString) ;
+    }
+
 }
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/test/HintedHandoffNodetoolTest.java b/test/distributed/org/apache/cassandra/distributed/test/HintedHandoffNodetoolTest.java
index 5045301..a401bd0 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/HintedHandoffNodetoolTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/HintedHandoffNodetoolTest.java
@@ -122,10 +122,10 @@
     @Test
     public void testThrottle()
     {
-        Integer throttleInKB = cluster.get(node).callOnInstance(DatabaseDescriptor::getHintedHandoffThrottleInKB);
-        cluster.get(node).nodetoolResult("sethintedhandoffthrottlekb", String.valueOf(throttleInKB * 2)).asserts().success();
-        Integer newThrottleInKB = cluster.get(node).callOnInstance(DatabaseDescriptor::getHintedHandoffThrottleInKB);
-        assertEquals(throttleInKB * 2, newThrottleInKB.intValue());
+        Integer throttleInKiB = cluster.get(node).callOnInstance(DatabaseDescriptor::getHintedHandoffThrottleInKiB);
+        cluster.get(node).nodetoolResult("sethintedhandoffthrottlekb", String.valueOf(throttleInKiB * 2)).asserts().success();
+        Integer newThrottleInKB = cluster.get(node).callOnInstance(DatabaseDescriptor::getHintedHandoffThrottleInKiB);
+        assertEquals(throttleInKiB * 2, newThrottleInKB.intValue());
     }
 
     @SuppressWarnings("Convert2MethodRef")
diff --git a/test/distributed/org/apache/cassandra/distributed/test/HintsDisabledTest.java b/test/distributed/org/apache/cassandra/distributed/test/HintsDisabledTest.java
new file mode 100644
index 0000000..00c7e7c
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/HintsDisabledTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import org.awaitility.Awaitility;
+import org.junit.Test;
+
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IMessageFilters;
+import org.apache.cassandra.metrics.StorageMetrics;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+
+import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
+import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.net.Verb.MUTATION_REQ;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class HintsDisabledTest extends TestBaseImpl
+{
+    @Test
+    public void testHintedHandoffDisabled() throws IOException
+    {
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.with(NETWORK, GOSSIP)
+                                                                       .set("write_request_timeout", "1000ms")
+                                                                       .set("hinted_handoff_enabled", false))
+                                           .start(), 2))
+        {
+            String createTableStatement = String.format("CREATE TABLE %s.cf (k text PRIMARY KEY, c1 text) " +
+                                                        "WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'} ", KEYSPACE);
+            cluster.schemaChange(createTableStatement);
+
+            CountDownLatch dropped = CountDownLatch.newCountDownLatch(1);
+            // Drop all messages from node1 to node2 so hints should be created
+            IMessageFilters.Filter drop1to2 = cluster.filters().verbs(MUTATION_REQ.id).messagesMatching((from, to, m) -> {
+                if (from != 1 || to != 2)
+                    return false;
+                dropped.decrement();
+                return true;
+            }).drop();
+
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.cf (k, c1) VALUES (?, ?) USING TIMESTAMP 1;"),
+                                           ConsistencyLevel.ONE,
+                                           String.valueOf(1),
+                                           String.valueOf(1));
+            // make sure the write response handler has completed after a chance to hint
+            assertTrue(dropped.awaitUninterruptibly(1, TimeUnit.MINUTES));
+            cluster.get(1).runOnInstance(() -> {
+                Awaitility.waitAtMost(1, TimeUnit.MINUTES).until(() -> Stage.INTERNAL_RESPONSE.executor().getPendingTaskCount() == 0);
+            });
+
+            // Check that no hints were created on node1
+            assertThat(cluster.get(1).callOnInstance(() -> Long.valueOf(StorageMetrics.totalHints.getCount()))).isEqualTo(0L);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/IncRepairAdminTest.java b/test/distributed/org/apache/cassandra/distributed/test/IncRepairAdminTest.java
index 236c819..b902632 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/IncRepairAdminTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/IncRepairAdminTest.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
@@ -41,11 +40,13 @@
 import org.apache.cassandra.repair.consistent.LocalSessionAccessor;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.repair.consistent.ConsistentSession.State.REPAIRING;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertTrue;
 
 public class IncRepairAdminTest extends TestBaseImpl
@@ -83,7 +84,7 @@
                 res.asserts().stdoutContains("no sessions");
             });
 
-            UUID uuid = makeFakeSession(cluster);
+            TimeUUID uuid = makeFakeSession(cluster);
             awaitNodetoolRepairAdminContains(cluster, uuid, "REPAIRING", false);
             IInvokableInstance instance = cluster.get(coordinator ? 1 : 2);
 
@@ -113,7 +114,7 @@
 
 
 
-    private static void awaitNodetoolRepairAdminContains(Cluster cluster, UUID uuid, String state, boolean all)
+    private static void awaitNodetoolRepairAdminContains(Cluster cluster, TimeUUID uuid, String state, boolean all)
     {
         cluster.forEach(i -> {
             while (true)
@@ -136,9 +137,9 @@
         });
     }
 
-    private static UUID makeFakeSession(Cluster cluster)
+    private static TimeUUID makeFakeSession(Cluster cluster)
     {
-        UUID sessionId = UUIDGen.getTimeUUID();
+        TimeUUID sessionId = nextTimeUUID();
         InetSocketAddress coordinator = cluster.get(1).config().broadcastAddress();
         Set<InetSocketAddress> participants = cluster.stream()
                                                      .map(i -> i.config().broadcastAddress())
@@ -153,7 +154,7 @@
                                                                          Lists.newArrayList(cfs),
                                                                          Sets.newHashSet(range),
                                                                          true,
-                                                                         System.currentTimeMillis(),
+                                                                         currentTimeMillis(),
                                                                          true,
                                                                          PreviewKind.NONE);
                 LocalSessionAccessor.prepareUnsafe(sessionId,
diff --git a/test/distributed/org/apache/cassandra/distributed/test/IncRepairCoordinatorErrorTest.java b/test/distributed/org/apache/cassandra/distributed/test/IncRepairCoordinatorErrorTest.java
index c06e848..447cd82 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/IncRepairCoordinatorErrorTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/IncRepairCoordinatorErrorTest.java
@@ -18,14 +18,13 @@
 
 package org.apache.cassandra.distributed.test;
 
-import java.util.UUID;
-
 import org.junit.Test;
 
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.net.Verb.FINALIZE_COMMIT_MSG;
 import static org.assertj.core.api.Assertions.assertThat;
@@ -47,7 +46,7 @@
                    .to(3)
                    .messagesMatching((from, to, msg) -> msg.verb() == FINALIZE_COMMIT_MSG.id).drop();
             cluster.get(1).nodetoolResult("repair", KEYSPACE).asserts().success();
-            UUID result = (UUID) cluster.get(1).executeInternal("select parent_id from system_distributed.repair_history")[0][0];
+            TimeUUID result = (TimeUUID) cluster.get(1).executeInternal("select parent_id from system_distributed.repair_history")[0][0];
             cluster.get(3).runOnInstance(() -> {
                 ActiveRepairService.instance.failSession(result.toString(), true);
             });
diff --git a/test/distributed/org/apache/cassandra/distributed/test/IncRepairTruncationTest.java b/test/distributed/org/apache/cassandra/distributed/test/IncRepairTruncationTest.java
index 4ffdaa9..989e0ab 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/IncRepairTruncationTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/IncRepairTruncationTest.java
@@ -26,6 +26,7 @@
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.Test;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -33,14 +34,15 @@
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.IMessage;
-import org.apache.cassandra.distributed.api.IMessageFilters;
 import org.apache.cassandra.distributed.api.NodeToolResult;
 import org.apache.cassandra.net.Verb;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+
 
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.distributed.api.IMessageFilters.Matcher;
 import static org.apache.cassandra.distributed.test.PreviewRepairTest.insert;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 
 public class IncRepairTruncationTest extends TestBaseImpl
 {
@@ -133,10 +135,10 @@
         }
     }
 
-    private static class BlockMessage implements IMessageFilters.Matcher
+    private static class BlockMessage implements Matcher
     {
-        private final SimpleCondition gotMessage = new SimpleCondition();
-        private final SimpleCondition allowMessage = new SimpleCondition();
+        private final Condition gotMessage = newOneTimeCondition();
+        private final Condition allowMessage = newOneTimeCondition();
 
         public boolean matches(int from, int to, IMessage message)
         {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/IndexDroppingTest.java b/test/distributed/org/apache/cassandra/distributed/test/IndexDroppingTest.java
index 3622d7d..a7cc1ad3 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/IndexDroppingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/IndexDroppingTest.java
@@ -57,7 +57,7 @@
     @BeforeClass
     public static void init() throws IOException
     {
-        CLUSTER = Cluster.build(1).withConfig(conf -> conf.with(NETWORK).set("enable_materialized_views", "true")).start();
+        CLUSTER = Cluster.build(1).withConfig(conf -> conf.with(NETWORK).set("materialized_views_enabled", "true")).start();
         CLUSTER.get(1).runOnInstance((IIsolatedExecutor.SerializableRunnable) () -> CompactionManager.instance.disableAutoCompaction());
     }
 
diff --git a/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java b/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
index 969f372..62f0a73 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
@@ -32,7 +32,6 @@
 
 import static com.google.common.collect.Iterables.getOnlyElement;
 import static org.hamcrest.Matchers.containsString;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
@@ -87,8 +86,7 @@
 
             cluster.get(1).runOnInstance(() ->
             {
-                InboundMessageHandlers inbound = getOnlyElement(MessagingService.instance().messageHandlers.values());
-                assertEquals(0, inbound.count());
+                assertTrue(MessagingService.instance().messageHandlers.isEmpty());
 
                 OutboundConnections outbound = getOnlyElement(MessagingService.instance().channelManagers.values());
                 assertFalse(outbound.small.isConnected() || outbound.large.isConnected() || outbound.urgent.isConnected());
diff --git a/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionOptionsTest.java b/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionOptionsTest.java
index 1462b03..b06f02a 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionOptionsTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionOptionsTest.java
@@ -55,7 +55,7 @@
                   ImmutableMap.builder().putAll(validKeystore)
                   .put("internode_encryption", "none")
                   .put("optional", false)
-                  .put("enable_legacy_ssl_storage_port", "true")
+                  .put("legacy_ssl_storage_port_enabled", "true")
                   .build());
         }).createWithoutStarting())
         {
@@ -113,7 +113,7 @@
                   ImmutableMap.builder().putAll(validKeystore)
                               .put("internode_encryption", "none")
                               .put("optional", true)
-                              .put("enable_legacy_ssl_storage_port", "true")
+                              .put("legacy_ssl_storage_port_enabled", "true")
                               .build());
         }).createWithoutStarting())
         {
@@ -148,7 +148,7 @@
                   ImmutableMap.builder().putAll(validKeystore)
                               .put("internode_encryption", "none")
                               .put("optional", true)
-                              .put("enable_legacy_ssl_storage_port", "true")
+                              .put("legacy_ssl_storage_port_enabled", "true")
                               .build());
         }).createWithoutStarting())
         {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/InternodeErrorExclusionTest.java b/test/distributed/org/apache/cassandra/distributed/test/InternodeErrorExclusionTest.java
new file mode 100644
index 0000000..08fd122
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/InternodeErrorExclusionTest.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+import com.google.common.collect.ImmutableMap;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.transport.SimpleClient;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InternodeErrorExclusionTest extends TestBaseImpl
+{
+    @BeforeClass
+    public static void beforeClass2()
+    {
+        DatabaseDescriptor.clientInitialization();
+    }
+
+    // Connect a simple native client to the internode port (which fails on the protocol magic check)
+    // and make sure the exception is swallowed.
+    @Test
+    public void ignoreExcludedInternodeErrors() throws IOException, TimeoutException
+    {
+        try (Cluster cluster = Cluster.build(1)
+                                      .withConfig(c -> c
+                                                       .with(Feature.NETWORK)
+                                                       .set("internode_error_reporting_exclusions", ImmutableMap.of("subnets", Arrays.asList("127.0.0.1"))))
+                                      .start())
+        {
+            try (SimpleClient client = SimpleClient.builder("127.0.0.1", 7012).build())
+            {
+                client.connect(true);
+                Assert.fail("Connection should fail");
+            }
+            catch (Exception e)
+            {
+                // expected
+            }
+            assertThat(cluster.get(1).logs().watchFor("address contained in internode_error_reporting_exclusions").getResult()).hasSize(1);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/JVMDTestTest.java b/test/distributed/org/apache/cassandra/distributed/test/JVMDTestTest.java
index ebf00d7..089d17b 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/JVMDTestTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/JVMDTestTest.java
@@ -26,6 +26,7 @@
 import java.util.concurrent.TimeoutException;
 
 import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.junit.Test;
 
 import org.apache.cassandra.concurrent.Stage;
@@ -35,7 +36,6 @@
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.LogAction;
-import org.apache.cassandra.service.CassandraDaemon;
 import org.apache.cassandra.utils.FBUtilities;
 import org.assertj.core.api.Assertions;
 
@@ -80,7 +80,7 @@
             long mark = logs.mark(); // get the current position so watching doesn't see any previous exceptions
             cluster.get(2).runOnInstance(() -> {
                 // pretend that an uncaught exception was thrown
-                CassandraDaemon.uncaughtException(Thread.currentThread(), new RuntimeException("fail without fail"));
+                JVMStabilityInspector.uncaughtException(Thread.currentThread(), new RuntimeException("fail without fail"));
             });
             List<String> errors = logs.watchFor(mark, "^ERROR").getResult();
             Assertions.assertThat(errors)
diff --git a/test/distributed/org/apache/cassandra/distributed/test/JVMStabilityInspectorThrowableTest.java b/test/distributed/org/apache/cassandra/distributed/test/JVMStabilityInspectorThrowableTest.java
index f8b9f01..665d58c 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/JVMStabilityInspectorThrowableTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/JVMStabilityInspectorThrowableTest.java
@@ -29,6 +29,7 @@
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.Config.DiskFailurePolicy;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -53,6 +54,7 @@
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.service.CassandraDaemon;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.Throwables;
 
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
@@ -102,6 +104,8 @@
         String table = policy.name();
         try (final Cluster cluster = init(getCluster(policy).start()))
         {
+            cluster.setUncaughtExceptionsFilter(t -> Throwables.anyCauseMatches(
+            t, t2 -> Arrays.asList(CorruptSSTableException.class.getCanonicalName(), FSReadError.class.getCanonicalName()).contains(t2.getClass().getCanonicalName())));
             IInvokableInstance node = cluster.get(1);
             boolean[] setup = node.callOnInstance(() -> {
                 CassandraDaemon instanceForTesting = CassandraDaemon.getInstanceForTesting();
@@ -190,7 +194,7 @@
     {
         node.runOnInstance(() -> {
             ColumnFamilyStore cf = Keyspace.open(keyspace).getColumnFamilyStore(table);
-            cf.forceBlockingFlush();
+            Util.flush(cf);
 
             Set<SSTableReader> remove = cf.getLiveSSTables();
             Set<SSTableReader> replace = new HashSet<>();
@@ -220,7 +224,7 @@
         }
 
         @Override
-        public UnfilteredRowIterator iterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
+        public UnfilteredRowIterator rowIterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
         {
             if (shouldThrowCorrupted)
                 throw throwCorrupted();
@@ -228,7 +232,7 @@
         }
 
         @Override
-        public UnfilteredRowIterator iterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed)
+        public UnfilteredRowIterator rowIterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed)
         {
             if (shouldThrowCorrupted)
                 throw throwCorrupted();
diff --git a/test/distributed/org/apache/cassandra/distributed/test/JavaDriverUtils.java b/test/distributed/org/apache/cassandra/distributed/test/JavaDriverUtils.java
new file mode 100644
index 0000000..c7c478b
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/JavaDriverUtils.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import com.datastax.driver.core.ProtocolVersion;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInstance;
+
+public final class JavaDriverUtils
+{
+    private JavaDriverUtils()
+    {
+    }
+
+    public static com.datastax.driver.core.Cluster create(ICluster<? extends IInstance> dtest)
+    {
+        return create(dtest, null);
+    }
+
+    public static com.datastax.driver.core.Cluster create(ICluster<? extends IInstance> dtest, ProtocolVersion version)
+    {
+        if (dtest.size() == 0)
+            throw new IllegalArgumentException("Attempted to open java driver for empty cluster");
+
+        // make sure the needed Features are added
+        dtest.stream().forEach(i -> {
+            if (!(i.config().has(Feature.NATIVE_PROTOCOL) && i.config().has(Feature.GOSSIP))) // gossip is needed as currently Host.getHostId is empty without it
+                throw new IllegalStateException("java driver requires Feature.NATIVE_PROTOCOL and Feature.GOSSIP; but one or more is missing");
+        });
+
+        com.datastax.driver.core.Cluster.Builder builder = com.datastax.driver.core.Cluster.builder();
+
+        //TODO support port
+        //TODO support auth
+        dtest.stream().forEach(i -> builder.addContactPoint(i.broadcastAddress().getAddress().getHostAddress()));
+
+        if (version != null)
+            builder.withProtocolVersion(version);
+
+        return builder.build();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java
index 0a81359..b03171b 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java
@@ -29,8 +29,6 @@
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.ICluster;
 
-import static java.util.concurrent.TimeUnit.SECONDS;
-
 // TODO: this test should be removed after running in-jvm dtests is set up via the shared API repository
 public class LargeColumnTest extends TestBaseImpl
 {
@@ -67,12 +65,12 @@
         try (ICluster cluster = init(builder()
                                      .withNodes(nodes)
                                      .withConfig(config ->
-                                                 config.set("commitlog_segment_size_in_mb", (columnSize * 3) >> 20)
-                                                       .set("internode_application_send_queue_reserve_endpoint_capacity_in_bytes", columnSize * 2)
-                                                       .set("internode_application_send_queue_reserve_global_capacity_in_bytes", columnSize * 3)
-                                                       .set("write_request_timeout_in_ms", SECONDS.toMillis(30L))
-                                                       .set("read_request_timeout_in_ms", SECONDS.toMillis(30L))
-                                                       .set("memtable_heap_space_in_mb", 1024)
+                                                 config.set("commitlog_segment_size", String.format("%dMiB",(columnSize * 3) >> 20))
+                                                       .set("internode_application_send_queue_reserve_endpoint_capacity", String.format("%dB", (columnSize * 2)))
+                                                       .set("internode_application_send_queue_reserve_global_capacity", String.format("%dB", (columnSize * 3)))
+                                                       .set("write_request_timeout", "30s")
+                                                       .set("read_request_timeout", "30s")
+                                                       .set("memtable_heap_space", "1024MiB")
                                      )
                                      .start()))
         {
@@ -94,4 +92,4 @@
     {
         testLargeColumns(2, 16 << 20, 5);
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/LegacyCASTest.java b/test/distributed/org/apache/cassandra/distributed/test/LegacyCASTest.java
new file mode 100644
index 0000000..0f764cf
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/LegacyCASTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.function.Consumer;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.impl.UnsafeGossipHelper;
+
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ANY;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM;
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+
+public class LegacyCASTest extends CASCommonTestCases
+{
+    private static Cluster CLUSTER;
+
+    @BeforeClass
+    public static void beforeClass() throws Throwable
+    {
+        TestBaseImpl.beforeClass();
+        CLUSTER = init(Cluster.create(3, config()));
+    }
+
+    @AfterClass
+    public static void afterClass()
+    {
+        if (CLUSTER != null)
+            CLUSTER.close();
+    }
+
+    private static Consumer<IInstanceConfig> config()
+    {
+        return config -> config
+                .set("paxos_variant", "v1")
+                .set("write_request_timeout_in_ms", 5000L)
+                .set("cas_contention_timeout_in_ms", 5000L)
+                .set("request_timeout_in_ms", 5000L);
+    }
+
+    /**
+     * This particular variant is unique to legacy Paxos because of the differing quorums for consensus, read and commit.
+     * It is also unique to range movements with an even-numbered RF under legacy paxos.
+     *
+     * Range movements do not necessarily complete; they may be aborted.
+     * CAS consistency should not be affected by this.
+     *
+     *  - Range moving from {1, 2} to {2, 3}; witnessed by all
+     *  - Promised and Accepted on {2, 3}; Commits are delayed and arrive after next commit (or perhaps vanish)
+     *  - Range move cancelled; a new one starts moving {1, 2} to {2, 4}; witnessed by all
+     *  - Promised, Accepted and Committed on {1, 4}
+     */
+    @Ignore // known to be unsafe, just documents issue
+    @Test
+    public void testAbortedRangeMovement() throws Throwable
+    {
+        try (Cluster cluster = Cluster.create(4, config()))
+        {
+            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
+            int pk = pk(cluster, 1, 2);
+
+            // set {3} bootstrapping, {4} not in ring
+            for (int i = 1 ; i <= 4 ; ++i)
+                cluster.get(i).acceptsOnInstance(UnsafeGossipHelper::removeFromRing).accept(cluster.get(3));
+            for (int i = 1 ; i <= 4 ; ++i)
+                cluster.get(i).acceptsOnInstance(UnsafeGossipHelper::removeFromRing).accept(cluster.get(4));
+            for (int i = 1 ; i <= 4 ; ++i)
+                cluster.get(i).acceptsOnInstance(UnsafeGossipHelper::addToRingBootstrapping).accept(cluster.get(3));
+
+            // {3} promises and accepts on !{1} => {2, 3}
+            // {3} commits do not YET arrive on either of {1, 2} (must be either due to read quorum differing on legacy Paxos)
+            drop(cluster, 3, to(1), to(), to(1), to(1, 2));
+            assertRows(cluster.coordinator(3).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ANY, pk),
+                    row(true));
+
+            // abort {3} bootstrap, start {4} bootstrap
+            for (int i = 1 ; i <= 4 ; ++i)
+                cluster.get(i).acceptsOnInstance(UnsafeGossipHelper::removeFromRing).accept(cluster.get(3));
+            for (int i = 1 ; i <= 4 ; ++i)
+                cluster.get(i).acceptsOnInstance(UnsafeGossipHelper::addToRingBootstrapping).accept(cluster.get(4));
+
+            // {4} promises and accepts on !{2} => {1, 4}
+            // {4} commits on {1, 2, 4}
+            drop(cluster, 4, to(2), to(), to(2), to());
+            assertRows(cluster.coordinator(4).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", QUORUM, pk),
+                    row(false, pk, 1, 1, null));
+        }
+    }
+
+    protected Cluster getCluster()
+    {
+        return CLUSTER;
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java b/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java
index 6ea186c..e1a0a91 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java
@@ -187,7 +187,7 @@
         String read = "SELECT * FROM " + KEYSPACE + ".tbl";
         String write = "INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)";
 
-        try (ICluster<IInvokableInstance> cluster = builder().withNodes(2).withConfig(c -> c.set("range_request_timeout_in_ms", 20000)).start())
+        try (ICluster<IInvokableInstance> cluster = builder().withNodes(2).withConfig(c -> c.set("range_request_timeout", "2000ms")).start())
         {
             cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + cluster.size() + "};");
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
@@ -335,4 +335,4 @@
             // ignore
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java b/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java
index f7883b3..a4dd8e6 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java
@@ -23,7 +23,6 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
@@ -40,7 +39,9 @@
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.impl.IsolatedExecutor;
 import org.apache.cassandra.distributed.impl.TracingUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class MessageForwardingTest extends TestBaseImpl
 {
@@ -60,9 +61,9 @@
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v text, PRIMARY KEY (pk, ck))");
 
             cluster.forEach(instance -> commitCounts.put(instance.broadcastAddress().getAddress(), 0));
-            final UUID sessionId = UUIDGen.getTimeUUID();
+            final TimeUUID sessionId = nextTimeUUID();
             Stream<Future<Object[][]>> inserts = IntStream.range(0, numInserts).mapToObj((idx) -> {
-                return cluster.coordinator(1).asyncExecuteWithTracing(sessionId,
+                return cluster.coordinator(1).asyncExecuteWithTracing(sessionId.asUUID(),
                                                                       "INSERT INTO " + KEYSPACE + ".tbl(pk,ck,v) VALUES (1, 1, 'x')",
                                                                       ConsistencyLevel.ALL);
             });
@@ -89,7 +90,7 @@
 
             cluster.stream("dc1").forEach(instance -> forwardFromCounts.put(instance.broadcastAddress().getAddress(), 0));
             cluster.forEach(instance -> commitCounts.put(instance.broadcastAddress().getAddress(), 0));
-            List<TracingUtil.TraceEntry> traces = TracingUtil.getTrace(cluster, sessionId, ConsistencyLevel.ALL);
+            List<TracingUtil.TraceEntry> traces = TracingUtil.getTrace(cluster, sessionId.asUUID(), ConsistencyLevel.ALL);
             traces.forEach(traceEntry -> {
                 if (traceEntry.activity.contains("Appending to commitlog"))
                 {
@@ -119,4 +120,4 @@
             TracingUtil.setWaitForTracingEventTimeoutSecs(originalTraceTimeout);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/MoveTest.java b/test/distributed/org/apache/cassandra/distributed/test/MoveTest.java
new file mode 100644
index 0000000..fc1bca3
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/MoveTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.NodeToolResult;
+import org.apache.cassandra.service.StorageService;
+
+import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
+import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+
+public class MoveTest extends TestBaseImpl
+{
+    private static final String KEYSPACE = "move_test_ks";
+    private static final String TABLE = "tbl";
+    private static final String KS_TBL = KEYSPACE + '.' + TABLE;
+
+    static
+    {
+        System.setProperty("cassandra.ring_delay_ms", "5000"); // down from 30s default
+    }
+
+    private void move(boolean forwards) throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = Cluster.build(4)
+                                      .withConfig(config -> config.set("paxos_variant", "v2_without_linearizable_reads").with(NETWORK).with(GOSSIP))
+                                      .withoutVNodes()
+                                      .start())
+        {
+            cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
+            cluster.schemaChange("CREATE TABLE " + KS_TBL + " (k int, c int, v int, primary key (k, c));");
+            for (int i=0; i<30; i++)
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KS_TBL + " (k, c, v) VALUES (?, 1, 1) IF NOT EXISTS",
+                                               ConsistencyLevel.SERIAL, ConsistencyLevel.ALL, i);
+            }
+
+            List<String> initialTokens = new ArrayList<>();
+            for (int i=0; i<cluster.size(); i++)
+            {
+                String token = cluster.get(i + 1).callsOnInstance(() -> Iterables.getOnlyElement(StorageService.instance.getLocalTokens()).toString()).call();
+                initialTokens.add(token);
+            }
+            Assert.assertEquals(Lists.newArrayList("-4611686018427387905",
+                                                   "-3",
+                                                   "4611686018427387899",
+                                                   "9223372036854775801"), initialTokens);
+
+            NodeToolResult result = cluster.get(forwards ? 2 : 3).nodetoolResult("move", "2305843009213693949");
+            Assert.assertTrue(result.toString(), result.getRc() == 0);
+        }
+    }
+
+    @Test
+    public void moveBack() throws Throwable
+    {
+        move(false);
+    }
+
+    @Test
+    public void moveForwards() throws Throwable
+    {
+        move(true);
+    }
+
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/MultipleDataDirectoryTest.java b/test/distributed/org/apache/cassandra/distributed/test/MultipleDataDirectoryTest.java
index a2f4aab..0826954 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/MultipleDataDirectoryTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/MultipleDataDirectoryTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.distributed.test;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.util.Iterator;
@@ -38,6 +37,7 @@
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 
 public class MultipleDataDirectoryTest extends TestBaseImpl
 {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NativeMixedVersionTest.java b/test/distributed/org/apache/cassandra/distributed/test/NativeMixedVersionTest.java
new file mode 100644
index 0000000..26d0186
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/NativeMixedVersionTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+
+import org.junit.Test;
+
+import com.datastax.driver.core.ProtocolVersion;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Session;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.assertj.core.api.Assertions;
+
+public class NativeMixedVersionTest extends TestBaseImpl
+{
+    @Test
+    public void v4ConnectionCleansUpThreadLocalState() throws IOException
+    {
+        // make sure to limit the netty thread pool to size 1, this will make the test determanistic as all work
+        // will happen on the single thread.
+        System.setProperty("io.netty.eventLoopThreads", "1");
+        try (Cluster cluster = Cluster.build(1)
+                                      .withConfig(c ->
+                                                  c.with(Feature.values())
+                                                   .set("read_thresholds_enabled", true)
+                                                   .set("local_read_size_warn_threshold", "1KiB")
+                                      )
+                                      .start())
+        {
+            init(cluster);
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck1 int, value blob, PRIMARY KEY (pk, ck1))"));
+            IInvokableInstance node = cluster.get(1);
+
+            ByteBuffer blob = ByteBuffer.wrap("This is just some large string to get some number of bytes".getBytes(StandardCharsets.UTF_8));
+
+            for (int i = 0; i < 100; i++)
+                node.executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck1, value) VALUES (?, ?, ?)"), 0, i, blob);
+
+            // v4+ process STARTUP message on the netty thread.  To make sure we do not leak the ClientWarn state,
+            // make sure a warning will be generated by a query then run on the same threads on the v3 protocol (which
+            // does not support warnings)
+            try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V5);
+                 Session session = driver.connect())
+            {
+                ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
+                Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isNotEmpty();
+            }
+
+            try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V3);
+                 Session session = driver.connect())
+            {
+                ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
+                Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
+            }
+
+            // this should not happen; so make sure no logs are found
+            List<String> result = node.logs().grep("Warnings present in message with version less than").getResult();
+            Assertions.assertThat(result).isEmpty();
+        }
+        finally
+        {
+            System.clearProperty("io.netty.eventLoopThreads");
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java b/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
index d9d01e0..f965572 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
@@ -18,6 +18,11 @@
 
 package org.apache.cassandra.distributed.test;
 
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.service.StorageService;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -26,21 +31,8 @@
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.SimpleStatement;
 import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.exceptions.DriverInternalError;
-import com.datastax.driver.core.policies.LoadBalancingPolicy;
-import net.bytebuddy.ByteBuddy;
-import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
-import net.bytebuddy.implementation.MethodDelegation;
-import org.apache.cassandra.cql3.CQLStatement;
-import org.apache.cassandra.cql3.QueryHandler;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ICluster;
-import org.apache.cassandra.distributed.api.IInstanceConfig;
-import org.apache.cassandra.distributed.api.IInvokableInstance;
-import org.apache.cassandra.distributed.api.IIsolatedExecutor;
 import org.apache.cassandra.distributed.impl.RowUtil;
-import org.apache.cassandra.service.StorageService;
 
 import static org.apache.cassandra.distributed.action.GossipHelper.withProperty;
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
@@ -127,4 +119,4 @@
                                                            () -> StorageService.instance.isNativeTransportRunning()));
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NetstatsBootstrapWithEntireSSTablesCompressionStreamingTest.java b/test/distributed/org/apache/cassandra/distributed/test/NetstatsBootstrapWithEntireSSTablesCompressionStreamingTest.java
index 7c53426..8cc8a44 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/NetstatsBootstrapWithEntireSSTablesCompressionStreamingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/NetstatsBootstrapWithEntireSSTablesCompressionStreamingTest.java
@@ -33,4 +33,10 @@
     {
         executeTest(true, false);
     }
+
+    @Test
+    public void testWithStreamingEntireSSTablesWithoutCompressionWithoutThrottling() throws Exception
+    {
+        executeTest(true, false, 0);
+    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java b/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java
index 5f74c77..8e280a3 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java
@@ -54,8 +54,8 @@
         try (final Cluster cluster = Cluster.build()
                                             .withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(2, "dc0", "rack0"))
                                             .withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)
-                                                                        .set("stream_throughput_outbound_megabits_per_sec", 1)
-                                                                        .set("compaction_throughput_mb_per_sec", 1)
+                                                                        .set("stream_throughput_outbound", "122KiB/s")
+                                                                        .set("compaction_throughput", "1MiB/s")
                                                                         .set("stream_entire_sstables", false)).start())
         {
             final IInvokableInstance node1 = cluster.get(1);
@@ -85,4 +85,4 @@
             NetstatsOutputParser.validate(NetstatsOutputParser.parse(results));
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NodeToolTest.java b/test/distributed/org/apache/cassandra/distributed/test/NodeToolTest.java
index 89e6168..9087f96 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/NodeToolTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/NodeToolTest.java
@@ -73,7 +73,7 @@
     public void testNodetoolSystemExit()
     {
         // Verify currently calls System.exit, this test uses that knowlege to test System.exit behavior in jvm-dtest
-        NODE.nodetoolResult("verify", "--check-tokens")
+        NODE.nodetoolResult("verify", "--check-tokens", "--force")
             .asserts()
             .failure()
             .stdoutContains("Token verification requires --extended-verify");
@@ -110,7 +110,7 @@
     @Test
     public void testSetCacheCapacityWhenDisabled() throws Throwable
     {
-        try (ICluster cluster = init(builder().withNodes(1).withConfig(c->c.set("row_cache_size_in_mb", "0")).start()))
+        try (ICluster cluster = init(builder().withNodes(1).withConfig(c->c.set("row_cache_size", "0MiB")).start()))
         {
             NodeToolResult ringResult = cluster.get(1).nodetoolResult("setcachecapacity", "1", "1", "1");
             ringResult.asserts().stderrContains("is not permitted as this cache is disabled");
diff --git a/test/distributed/org/apache/cassandra/distributed/test/OversizedMutationTest.java b/test/distributed/org/apache/cassandra/distributed/test/OversizedMutationTest.java
new file mode 100644
index 0000000..2b0d0a0
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/OversizedMutationTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ALL;
+
+public class OversizedMutationTest extends TestBaseImpl
+{
+    @Test
+    public void testSingleOversizedMutation() throws Throwable
+    {
+        try (Cluster cluster = init(builder().withNodes(1).withConfig(c -> c.set("max_mutation_size", "48KiB"))
+                                             .start()))
+        {
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (key int PRIMARY KEY, val blob)"));
+            String payload = StringUtils.repeat('1', 1024 * 49);
+            String query = "INSERT INTO %s.t (key, val) VALUES (1, textAsBlob('" + payload + "'))";
+            Assertions.assertThatThrownBy(() -> cluster.coordinator(1).execute(withKeyspace(query), ALL))
+                      .hasMessageContaining("Rejected an oversized mutation (")
+                      .hasMessageContaining("/49152) for keyspace: distributed_test_keyspace. Top keys are: t.1");
+        }
+    }
+
+    @Test
+    public void testOversizedBatch() throws Throwable
+    {
+        try (Cluster cluster = init(builder().withNodes(1).withConfig(c -> c.set("max_mutation_size", "48KiB"))
+                                             .start()))
+        {
+            cluster.schemaChange(withKeyspace("CREATE KEYSPACE ks1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
+            cluster.schemaChange(withKeyspace("CREATE TABLE ks1.t (key int PRIMARY KEY, val blob)"));
+            String payload = StringUtils.repeat('1', 1024 * 48);
+            String query = "BEGIN BATCH\n" +
+                           "INSERT INTO ks1.t (key, val) VALUES (1, textAsBlob('" + payload + "'))\n" +
+                           "INSERT INTO ks1.t (key, val) VALUES (2, textAsBlob('222'))\n" +
+                           "APPLY BATCH";
+            Assertions.assertThatThrownBy(() -> cluster.coordinator(1).execute(withKeyspace(query), ALL))
+                      .hasMessageContaining("Rejected an oversized mutation (")
+                      .hasMessageContaining("/49152) for keyspace: ks1. Top keys are: t.1");
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/PartitionDenylistTest.java b/test/distributed/org/apache/cassandra/distributed/test/PartitionDenylistTest.java
new file mode 100644
index 0000000..382981f
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/PartitionDenylistTest.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.service.StorageProxy;
+import org.apache.cassandra.service.StorageService;
+
+import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
+import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
+public class PartitionDenylistTest extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(PartitionDenylistTest.class);
+    private static final int testReplicationFactor = 3;
+
+    // Create a four node cluster, populate with some denylist entries, stop all
+    // the nodes, then bring them up one by one, waiting for each node to complete
+    // startup before starting the next.
+    //
+    // On startup each node runs a SELECT * query on the partition denylist table
+    // to populate the cache.  The whole keyspace is unlikely to be available until
+    // three of the four nodes are started, so the early nodes will go through several
+    // cycles of failing to retrieve the partition denylist before succeeding.
+    //
+    // with({NETWORK,GOSSIP} is currently required for in-JVM dtests to create
+    // the distributed system tables.
+    @Test
+    public void checkStartupWithoutTriggeringUnavailable() throws IOException, InterruptedException, ExecutionException, TimeoutException
+    {
+        int nodeCount = 4;
+        System.setProperty("cassandra.ring_delay_ms", "5000"); // down from 30s default
+        System.setProperty("cassandra.consistent.rangemovement", "false");
+        System.setProperty("cassandra.consistent.simultaneousmoves.allow", "true");
+        try (Cluster cluster = Cluster.build(nodeCount)
+                                      .withConfig(config -> config
+                                      .with(NETWORK)
+                                      .with(GOSSIP)
+                                      .set("partition_denylist_enabled", true)
+                                      .set("denylist_initial_load_retry", "1s"))
+                                      .createWithoutStarting())
+        {
+            cluster.forEach(i -> {
+                i.startup();
+                i.runOnInstance(PartitionDenylistTest::waitUntilStarted);
+            });
+
+            // Do a cluster-wide no unavailables were recorded while the denylist was loaded.
+            cluster.forEach(i -> i.runOnInstance(PartitionDenylistTest::checkNoUnavailables));
+        }
+    }
+
+    static private void waitUntilStarted()
+    {
+        waitUntilStarted(60, TimeUnit.SECONDS);
+    }
+
+    // To be called inside the instance with runOnInstance
+    static private void waitUntilStarted(int waitDuration, TimeUnit waitUnits)
+    {
+        long deadlineInMillis = currentTimeMillis() + Math.max(1, waitUnits.toMillis(waitDuration));
+        while (!StorageService.instance.getOperationMode().equals("NORMAL"))
+        {
+            if (currentTimeMillis() >= deadlineInMillis)
+            {
+                throw new RuntimeException("Instance did not reach application state NORMAL before timeout");
+            }
+            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
+        }
+    }
+
+    // To be called inside the instance with runOnInstance
+    static private void checkNoUnavailables()
+    {
+        long deadlineInMillis = currentTimeMillis() + TimeUnit.SECONDS.toMillis(30);
+
+        while (currentTimeMillis() < deadlineInMillis &&
+               StorageProxy.instance.getPartitionDenylistLoadSuccesses() == 0)
+        {
+            Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
+        }
+
+        Assert.assertTrue("Partition denylist must have loaded before checking unavailables",
+                          StorageProxy.instance.getPartitionDenylistLoadSuccesses() > 0);
+    }
+
+    // To be called inside the instance with runOnInstance, no nodes are started/stopped
+    // and not enough nodes are available to succeed, so it should just retry a few times
+    static private void checkTimerActive()
+    {
+        long deadlineInMillis = currentTimeMillis() + TimeUnit.SECONDS.toMillis(30);
+
+        do
+        {
+            // Make sure at least two load attempts have happened,
+            // in case we received a node up event about this node
+            if (StorageProxy.instance.getPartitionDenylistLoadAttempts() > 2)
+            {
+                return;
+            }
+            Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
+        } while (currentTimeMillis() < deadlineInMillis);
+
+        Assert.fail("Node did not retry loading on timeout in 30s");
+    }
+
+    @Test
+    public void checkTimerRetriesLoad() throws IOException
+    {
+        int nodeCount = 3;
+
+        try (Cluster cluster = Cluster.build(nodeCount)
+                                      .withConfig(config -> config
+                                      .with(NETWORK)
+                                      .with(GOSSIP)
+                                      .set("partition_denylist_enabled", true)
+                                      .set("denylist_initial_load_retry", "1s"))
+                                      .createWithoutStarting())
+        {
+            // Starting without networking enabled in the hope it doesn't trigger
+            // node lifecycle events when nodes start up.
+            cluster.get(1).startup();
+            cluster.get(1).runOnInstance(PartitionDenylistTest::checkTimerActive);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/PaxosRepair2Test.java b/test/distributed/org/apache/cassandra/distributed/test/PaxosRepair2Test.java
new file mode 100644
index 0000000..574b84f
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/PaxosRepair2Test.java
@@ -0,0 +1,609 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Sets;
+import org.awaitility.Awaitility;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.statements.SelectStatement;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.ReadExecutionController;
+import org.apache.cassandra.db.ReadQuery;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.rows.BTreeRow;
+import org.apache.cassandra.db.rows.BufferCell;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.exceptions.CasWriteTimeoutException;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.repair.RepairParallelism;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.Paxos;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosKeyState;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosRows;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTracker;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTracker.UpdateSupplier;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Pair;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.schema.SchemaConstants.SYSTEM_KEYSPACE_NAME;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.GLOBAL;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.staleBallot;
+
+import org.apache.cassandra.utils.CloseableIterator;
+
+// quick workaround for metaspace ooms, will properly reuse clusters later
+public class PaxosRepair2Test extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosRepair2Test.class);
+    private static final String TABLE = "tbl";
+    public static final String OFFSETTABLE_CLOCK_NAME = OffsettableClock.class.getName();
+
+    static
+    {
+        CassandraRelevantProperties.PAXOS_EXECUTE_ON_SELF.setBoolean(false);
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    private static int getUncommitted(IInvokableInstance instance, String keyspace, String table)
+    {
+        if (instance.isShutdown())
+            return 0;
+        int uncommitted = instance.callsOnInstance(() -> {
+            TableMetadata cfm = Schema.instance.getTableMetadata(keyspace, table);
+            return Iterators.size(PaxosState.uncommittedTracker().uncommittedKeyIterator(cfm.id, null));
+        }).call();
+        logger.info("{} has {} uncommitted instances", instance, uncommitted);
+        return uncommitted;
+    }
+
+    private static void assertUncommitted(IInvokableInstance instance, String ks, String table, int expected)
+    {
+        Assert.assertEquals(expected, getUncommitted(instance, ks, table));
+    }
+
+    private static void repair(Cluster cluster, String keyspace, String table, boolean force)
+    {
+        Map<String, String> options = new HashMap<>();
+        options.put(RepairOption.PARALLELISM_KEY, RepairParallelism.SEQUENTIAL.getName());
+        options.put(RepairOption.PRIMARY_RANGE_KEY, Boolean.toString(false));
+        options.put(RepairOption.INCREMENTAL_KEY, Boolean.toString(false));
+        options.put(RepairOption.JOB_THREADS_KEY, Integer.toString(1));
+        options.put(RepairOption.TRACE_KEY, Boolean.toString(false));
+        options.put(RepairOption.COLUMNFAMILIES_KEY, "");
+        options.put(RepairOption.PULL_REPAIR_KEY, Boolean.toString(false));
+        options.put(RepairOption.FORCE_REPAIR_KEY, Boolean.toString(force));
+        options.put(RepairOption.PREVIEW, PreviewKind.NONE.toString());
+        options.put(RepairOption.IGNORE_UNREPLICATED_KS, Boolean.toString(false));
+        options.put(RepairOption.REPAIR_PAXOS_KEY, Boolean.toString(true));
+        options.put(RepairOption.PAXOS_ONLY_KEY, Boolean.toString(true));
+
+        cluster.get(1).runOnInstance(() -> {
+            int cmd = StorageService.instance.repairAsync(keyspace, options);
+
+            while (true)
+            {
+                try
+                {
+                    Thread.sleep(500);
+                }
+                catch (InterruptedException e)
+                {
+                    throw new AssertionError(e);
+                }
+                Pair<ActiveRepairService.ParentRepairStatus, List<String>> status = ActiveRepairService.instance.getRepairStatus(cmd);
+                if (status == null)
+                    continue;
+
+                switch (status.left)
+                {
+                    case IN_PROGRESS:
+                        continue;
+                    case COMPLETED:
+                        return;
+                    default:
+                        throw new AssertionError("Repair failed with errors: " + status.right);
+                }
+            }
+        });
+    }
+
+    private static void repair(Cluster cluster, String keyspace, String table)
+    {
+        repair(cluster, keyspace, table, false);
+    }
+
+    @Test
+    public void paxosRepairPreventsStaleReproposal() throws Throwable
+    {
+        Ballot staleBallot = Paxos.newBallot(Ballot.none(), org.apache.cassandra.db.ConsistencyLevel.SERIAL);
+        try (Cluster cluster = init(Cluster.create(3, cfg -> cfg
+                                                             .set("paxos_variant", "v2")
+                                                             .set("paxos_purge_grace_period", "0s")
+                                                             .set("truncate_request_timeout_in_ms", 1000L)))
+        )
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (k int primary key, v int)");
+            repair(cluster, KEYSPACE, TABLE);
+
+            // stop and start node 2 to test loading paxos repair history from disk
+            cluster.get(2).shutdown().get();
+            cluster.get(2).startup();
+
+            for (int i=0; i<cluster.size(); i++)
+            {
+                cluster.get(i+1).runOnInstance(() -> {
+                    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE);
+                    DecoratedKey key = cfs.decorateKey(ByteBufferUtil.bytes(1));
+                    Assert.assertFalse(FBUtilities.getBroadcastAddressAndPort().toString(), Commit.isAfter(staleBallot, cfs.getPaxosRepairLowBound(key)));
+                });
+            }
+
+            // add in the stale proposal
+            cluster.get(1).runOnInstance(() -> {
+                TableMetadata cfm = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+                DecoratedKey key = DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.bytes(1));
+                ColumnMetadata cdef = cfm.getColumn(new ColumnIdentifier("v", false));
+                Cell cell = BufferCell.live(cdef, staleBallot.unixMicros(), ByteBufferUtil.bytes(1));
+                Row row = BTreeRow.singleCellRow(Clustering.EMPTY, cell);
+                PartitionUpdate update = PartitionUpdate.singleRowUpdate(cfm, key, row);
+                Commit.Proposal proposal = new Commit.Proposal(staleBallot, update);
+                SystemKeyspace.savePaxosProposal(proposal);
+            });
+
+            // shutdown node 3 so we're guaranteed to see the stale proposal
+            cluster.get(3).shutdown().get();
+
+            // the stale inflight proposal should be ignored and the query should succeed
+            String query = "INSERT INTO " + KEYSPACE + '.' + TABLE + " (k, v) VALUES (1, 2) IF NOT EXISTS";
+            Object[][] result = cluster.coordinator(1).execute(query, ConsistencyLevel.QUORUM);
+            Assert.assertEquals(new Object[][]{new Object[]{ true }}, result);
+
+            assertLowBoundPurged(cluster.get(1));
+            assertLowBoundPurged(cluster.get(2));
+        }
+    }
+
+    @Test
+    public void paxosRepairHistoryIsntUpdatedInForcedRepair() throws Throwable
+    {
+        Ballot staleBallot = staleBallot(System.currentTimeMillis() - 1000000, System.currentTimeMillis() - 100000, GLOBAL);
+        try (Cluster cluster = init(Cluster.create(3, cfg -> cfg.with(Feature.GOSSIP, Feature.NETWORK)
+                                                                .set("paxos_variant", "v2")
+                                                                .set("truncate_request_timeout_in_ms", 1000L)))
+        )
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (k int primary key, v int)");
+            cluster.get(3).shutdown().get();
+            InetAddressAndPort node3 = InetAddressAndPort.getByAddress(cluster.get(3).broadcastAddress());
+
+            // make sure node1 knows node3 is down
+            Awaitility.waitAtMost(1,TimeUnit.MINUTES).until(
+            () -> !cluster.get(1).callOnInstance(() -> FailureDetector.instance.isAlive(node3)));
+
+            repair(cluster, KEYSPACE, TABLE, true);
+            for (int i = 0; i < cluster.size() - 1; i++)
+            {
+                cluster.get(i + 1).runOnInstance(() -> {
+                    Assert.assertFalse(CassandraRelevantProperties.CLOCK_GLOBAL.isPresent());
+                    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE);
+                    DecoratedKey key = cfs.decorateKey(ByteBufferUtil.bytes(1));
+                    Assert.assertTrue(FBUtilities.getBroadcastAddressAndPort().toString(), Commit.isAfter(staleBallot, cfs.getPaxosRepairLowBound(key)));
+                });
+            }
+        }
+    }
+
+    private static class PaxosRow
+    {
+        final DecoratedKey key;
+        final Row row;
+
+        PaxosRow(DecoratedKey key, Row row)
+        {
+            this.key = key;
+            this.row = row;
+        }
+
+        public String toString()
+        {
+            TableMetadata cfm = Schema.instance.getTableMetadata(SYSTEM_KEYSPACE_NAME, SystemKeyspace.PAXOS);
+            return ByteBufferUtil.bytesToHex(key.getKey()) + " -> " + row.toString(cfm, true);
+        }
+    }
+
+    private static void compactPaxos()
+    {
+        ColumnFamilyStore paxos = Keyspace.open(SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PAXOS);
+        FBUtilities.waitOnFuture(paxos.forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS));
+        FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(paxos, 0, false));
+    }
+
+    private static Map<Integer, PaxosRow> getPaxosRows()
+    {
+        Map<Integer, PaxosRow> rows = new HashMap<>();
+        String queryStr = "SELECT * FROM " + SYSTEM_KEYSPACE_NAME + '.' + SystemKeyspace.PAXOS;
+        SelectStatement stmt = (SelectStatement) QueryProcessor.parseStatement(queryStr).prepare(ClientState.forInternalCalls());
+        ReadQuery query = stmt.getQuery(QueryOptions.DEFAULT, FBUtilities.nowInSeconds());
+        try (ReadExecutionController controller = query.executionController(); PartitionIterator partitions = query.executeInternal(controller))
+        {
+            while (partitions.hasNext())
+            {
+                RowIterator partition = partitions.next();
+                while (partition.hasNext())
+                {
+                    rows.put(Int32Type.instance.compose(partition.partitionKey().getKey()),
+                             new PaxosRow(partition.partitionKey(), partition.next()));
+                }
+            }
+        }
+        return rows;
+    }
+
+    private static void assertLowBoundPurged(Collection<PaxosRow> rows)
+    {
+        Assert.assertEquals(0, DatabaseDescriptor.getPaxosPurgeGrace(SECONDS));
+        String ip = FBUtilities.getBroadcastAddressAndPort().toString();
+        for (PaxosRow row : rows)
+        {
+            Ballot keyLowBound = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).getPaxosRepairLowBound(row.key);
+            Assert.assertTrue(ip, Commit.isAfter(keyLowBound, Ballot.none()));
+            Assert.assertFalse(ip, PaxosRows.hasBallotBeforeOrEqualTo(row.row, keyLowBound));
+        }
+    }
+
+    private static void assertLowBoundPurged(IInvokableInstance instance)
+    {
+        instance.runOnInstance(() -> assertLowBoundPurged(getPaxosRows().values()));
+    }
+
+    private static void assertLowBoundPurged(Cluster cluster)
+    {
+        cluster.forEach(PaxosRepair2Test::assertLowBoundPurged);
+    }
+
+    @Test
+    public void paxosAutoRepair() throws Throwable
+    {
+        System.setProperty("cassandra.auto_repair_frequency_seconds", "1");
+        System.setProperty("cassandra.disable_paxos_auto_repairs", "true");
+        try (Cluster cluster = init(Cluster.create(3, cfg -> cfg
+                                                             .set("paxos_variant", "v2")
+                                                             .set("paxos_repair_enabled", true)
+                                                             .set("truncate_request_timeout_in_ms", 1000L)))
+        )
+        {
+            cluster.forEach(i -> {
+                Assert.assertFalse(CassandraRelevantProperties.CLOCK_GLOBAL.isPresent());
+                Assert.assertEquals("1", System.getProperty("cassandra.auto_repair_frequency_seconds"));
+                Assert.assertEquals("true", System.getProperty("cassandra.disable_paxos_auto_repairs"));
+            });
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+            cluster.get(3).shutdown().get();
+            cluster.verbs(Verb.PAXOS_COMMIT_REQ).drop();
+            try
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }
+            catch (Throwable t)
+            {
+                // expected
+            }
+            assertUncommitted(cluster.get(1), KEYSPACE, TABLE, 1);
+            assertUncommitted(cluster.get(2), KEYSPACE, TABLE, 1);
+
+            cluster.filters().reset();
+            // paxos table needs at least 1 flush to be picked up by auto-repairs
+            cluster.get(1).flush("system");
+            cluster.get(2).flush("system");
+            // re-enable repairs
+            cluster.get(1).runOnInstance(() -> StorageService.instance.setPaxosAutoRepairsEnabled(true));
+            cluster.get(2).runOnInstance(() -> StorageService.instance.setPaxosAutoRepairsEnabled(true));
+            Thread.sleep(2000);
+            for (int i=0; i<20; i++)
+            {
+                if (!cluster.get(1).callsOnInstance(() -> PaxosState.uncommittedTracker().hasInflightAutoRepairs()).call()
+                 && !cluster.get(2).callsOnInstance(() -> PaxosState.uncommittedTracker().hasInflightAutoRepairs()).call())
+                    break;
+                logger.info("Waiting for auto repairs to finish...");
+                Thread.sleep(1000);
+            }
+            assertUncommitted(cluster.get(1), KEYSPACE, TABLE, 0);
+            assertUncommitted(cluster.get(2), KEYSPACE, TABLE, 0);
+        }
+        finally
+        {
+            System.clearProperty("cassandra.auto_repair_frequency_seconds");
+            System.clearProperty("cassandra.disable_paxos_auto_repairs");
+        }
+    }
+
+    @Test
+    public void paxosPurgeGraceSeconds() throws Exception
+    {
+        int graceSeconds = 5;
+        try (Cluster cluster = init(Cluster.create(3, cfg -> cfg
+                                                             .set("paxos_variant", "v2")
+                                                             .set("paxos_purge_grace_period", graceSeconds + "s")
+                                                             .set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString())
+                                                             .set("truncate_request_timeout_in_ms", 1000L)))
+        )
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+
+            repair(cluster, KEYSPACE, TABLE);
+            cluster.forEach(i -> i.runOnInstance(() -> {
+                Assert.assertFalse(CassandraRelevantProperties.CLOCK_GLOBAL.isPresent());
+                compactPaxos();
+                Map<Integer, PaxosRow> rows = getPaxosRows();
+                Assert.assertEquals(Sets.newHashSet(1), rows.keySet());
+            }));
+
+            // wait for the grace period to pass, repair again, and the rows should be removed
+            Thread.sleep((graceSeconds + 1) * 1000);
+            repair(cluster, KEYSPACE, TABLE);
+            cluster.forEach(i -> i.runOnInstance(() -> {
+                compactPaxos();
+                Map<Integer, PaxosRow> rows = getPaxosRows();
+                Assert.assertEquals(Sets.newHashSet(), rows.keySet());
+            }));
+        }
+    }
+
+    static void assertTimeout(Runnable runnable)
+    {
+        try
+        {
+            runnable.run();
+            Assert.fail("timeout expected");
+        }
+        catch (RuntimeException e)
+        {
+            Assert.assertEquals(CasWriteTimeoutException.class.getName(), e.getClass().getName());
+        }
+    }
+
+    private static int ballotDeletion(Commit commit)
+    {
+        return (int) TimeUnit.MICROSECONDS.toSeconds(commit.ballot.unixMicros()) + SystemKeyspace.legacyPaxosTtlSec(commit.update.metadata());
+    }
+
+    private static void backdateTimestamps(int seconds)
+    {
+        long offsetMillis = SECONDS.toMillis(seconds);
+        ClientState.resetLastTimestamp(System.currentTimeMillis() - offsetMillis);
+        OffsettableClock.offsetMillis = -offsetMillis;
+    }
+
+    public static class OffsettableClock implements Clock
+    {
+        private static volatile long offsetMillis = 0;
+        public long nanoTime()
+        {
+            return System.nanoTime(); // checkstyle: permit system clock
+        }
+
+        public long currentTimeMillis()
+        {
+            return System.currentTimeMillis() + offsetMillis; // checkstyle: permit system clock
+        }
+    }
+
+    @Test
+    public void legacyPurgeRepairLoop() throws Exception
+    {
+        try
+        {
+            CassandraRelevantProperties.CLOCK_GLOBAL.setString(OFFSETTABLE_CLOCK_NAME);
+            try (Cluster cluster = init(Cluster.create(3, cfg -> cfg
+                                                                 .set("paxos_variant", "v2")
+                                                                 .set("paxos_state_purging", "legacy")
+                                                                 .set("paxos_purge_grace_period", "0s")
+                                                                 .set("truncate_request_timeout_in_ms", 1000L)))
+            )
+            {
+                cluster.forEach(i -> Assert.assertEquals(OFFSETTABLE_CLOCK_NAME, CassandraRelevantProperties.CLOCK_GLOBAL.getString()));
+                int ttl = 3 * 3600;
+                cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck)) WITH gc_grace_seconds=" + ttl);
+
+                // prepare an operation ttl + 1 hour into the past on a single node
+                cluster.forEach(instance -> {
+                    instance.runOnInstance(() -> {
+                        Assert.assertEquals(OFFSETTABLE_CLOCK_NAME, CassandraRelevantProperties.CLOCK_GLOBAL.getString());
+                        backdateTimestamps(ttl + 3600);
+                    });
+                });
+                cluster.filters().inbound().to(1, 2).drop();
+                assertTimeout(() -> cluster.coordinator(3).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (400, 2, 2) IF NOT EXISTS", ConsistencyLevel.QUORUM));
+                Ballot oldBallot = Ballot.fromUuid(cluster.get(3).callOnInstance(() -> {
+                    TableMetadata cfm = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+                    DecoratedKey dk = cfm.partitioner.decorateKey(ByteBufferUtil.bytes(400));
+                    try (PaxosState state = PaxosState.get(dk, cfm))
+                    {
+                        return state.currentSnapshot().promised.asUUID();
+                    }
+                }));
+
+                assertUncommitted(cluster.get(1), KEYSPACE, TABLE, 0);
+                assertUncommitted(cluster.get(2), KEYSPACE, TABLE, 0);
+                assertUncommitted(cluster.get(3), KEYSPACE, TABLE, 1);
+
+                // commit an operation just over ttl in the past on the other nodes
+                cluster.filters().reset();
+                cluster.filters().inbound().to(2).drop();
+                cluster.forEach(instance -> {
+                    instance.runOnInstance(() -> {
+                        backdateTimestamps(ttl + 2);
+                    });
+                });
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (400, 2, 2) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+
+                // expire the cache entries
+                int nowInSec = FBUtilities.nowInSeconds();
+                cluster.get(1).runOnInstance(() -> {
+                    TableMetadata table = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+                    DecoratedKey dk = table.partitioner.decorateKey(ByteBufferUtil.bytes(400));
+                    try (PaxosState state = PaxosState.get(dk, table))
+                    {
+                        state.updateStateUnsafe(s -> {
+                            Assert.assertNull(s.accepted);
+                            Assert.assertTrue(Commit.isAfter(s.committed.ballot, oldBallot));
+                            Commit.CommittedWithTTL committed = new Commit.CommittedWithTTL(s.committed.ballot,
+                                                                                            s.committed.update,
+                                                                                            ballotDeletion(s.committed));
+                            Assert.assertTrue(committed.localDeletionTime < nowInSec);
+                            return new PaxosState.Snapshot(Ballot.none(), Ballot.none(), null, committed);
+                        });
+                    }
+                });
+
+                cluster.get(3).runOnInstance(() -> {
+                    TableMetadata table = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+                    DecoratedKey dk = table.partitioner.decorateKey(ByteBufferUtil.bytes(400));
+                    try (PaxosState state = PaxosState.get(dk, table))
+                    {
+                        state.updateStateUnsafe(s -> {
+                            Assert.assertNull(s.accepted);
+                            Assert.assertTrue(Commit.isAfter(s.committed.ballot, oldBallot));
+                            Commit.CommittedWithTTL committed = new Commit.CommittedWithTTL(s.committed.ballot,
+                                                                                            s.committed.update,
+                                                                                            ballotDeletion(s.committed));
+                            Assert.assertTrue(committed.localDeletionTime < nowInSec);
+                            return new PaxosState.Snapshot(oldBallot, oldBallot, null, committed);
+                        });
+                    }
+                });
+
+                cluster.forEach(instance -> {
+                    instance.runOnInstance(() -> {
+                        backdateTimestamps(0);
+                    });
+                });
+
+                cluster.filters().reset();
+                cluster.filters().inbound().to(2).drop();
+                cluster.get(3).runOnInstance(() -> {
+
+                    TableMetadata table = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+                    DecoratedKey dk = table.partitioner.decorateKey(ByteBufferUtil.bytes(400));
+
+                    UpdateSupplier supplier = PaxosState.uncommittedTracker().unsafGetUpdateSupplier();
+                    try
+                    {
+                        PaxosUncommittedTracker.unsafSetUpdateSupplier(new SingleUpdateSupplier(table, dk, oldBallot));
+                        StorageService.instance.autoRepairPaxos(table.id).get();
+                    }
+                    catch (Exception e)
+                    {
+                        throw new RuntimeException(e);
+                    }
+                    finally
+                    {
+                        PaxosUncommittedTracker.unsafSetUpdateSupplier(supplier);
+                    }
+                });
+
+                assertUncommitted(cluster.get(1), KEYSPACE, TABLE, 0);
+                assertUncommitted(cluster.get(2), KEYSPACE, TABLE, 0);
+                assertUncommitted(cluster.get(3), KEYSPACE, TABLE, 0);
+            }
+        }
+        finally
+        {
+            CassandraRelevantProperties.CLOCK_GLOBAL.reset();
+        }
+    }
+
+    private static class SingleUpdateSupplier implements UpdateSupplier
+    {
+        private final TableMetadata cfm;
+        private final DecoratedKey dk;
+        private final Ballot ballot;
+
+        public SingleUpdateSupplier(TableMetadata cfm, DecoratedKey dk, Ballot ballot)
+        {
+            this.cfm = cfm;
+            this.dk = dk;
+            this.ballot = ballot;
+        }
+
+        public CloseableIterator<PaxosKeyState> repairIterator(TableId cfId, Collection<Range<Token>> ranges)
+        {
+            if (!cfId.equals(cfm.id))
+                return CloseableIterator.empty();
+            return CloseableIterator.wrap(Collections.singleton(new PaxosKeyState(cfId, dk, ballot, false)).iterator());
+        }
+
+        public CloseableIterator<PaxosKeyState> flushIterator(Memtable paxos)
+        {
+            throw new UnsupportedOperationException();
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java b/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java
new file mode 100644
index 0000000..bfd4e95
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java
@@ -0,0 +1,642 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.net.InetSocketAddress;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicIntegerArray;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.*;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.statements.SelectStatement;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.rows.*;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IMessageFilters;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.repair.RepairParallelism;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.paxos.*;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanup;
+import org.apache.cassandra.service.paxos.uncommitted.PaxosRows;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.*;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_FINISH_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_COMMIT_AND_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PREPARE_RSP;
+import static org.apache.cassandra.net.Verb.PAXOS2_PROPOSE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_PROPOSE_RSP;
+import static org.apache.cassandra.net.Verb.PAXOS2_REPAIR_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_RSP;
+import static org.apache.cassandra.schema.SchemaConstants.SYSTEM_KEYSPACE_NAME;
+
+public class PaxosRepairTest extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosRepairTest.class);
+    private static final String TABLE = "tbl";
+
+    static
+    {
+        CassandraRelevantProperties.PAXOS_EXECUTE_ON_SELF.setBoolean(false);
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    private static int getUncommitted(IInvokableInstance instance, String keyspace, String table)
+    {
+        if (instance.isShutdown())
+            return 0;
+        int uncommitted = instance.callsOnInstance(() -> {
+            TableMetadata meta = Schema.instance.getTableMetadata(keyspace, table);
+            return Iterators.size(PaxosState.uncommittedTracker().uncommittedKeyIterator(meta.id, null));
+        }).call();
+        logger.info("{} has {} uncommitted instances", instance, uncommitted);
+        return uncommitted;
+    }
+
+    private static void assertAllAlive(Cluster cluster)
+    {
+        Set<InetAddressAndPort> allEndpoints = cluster.stream().map(i -> InetAddressAndPort.getByAddress(i.broadcastAddress())).collect(Collectors.toSet());
+        cluster.stream().forEach(instance -> {
+            instance.runOnInstance(() -> {
+                ImmutableSet<InetAddressAndPort> endpoints = Gossiper.instance.getEndpoints();
+                Assert.assertEquals(allEndpoints, endpoints);
+                for (InetAddressAndPort endpoint : endpoints)
+                    Assert.assertTrue(FailureDetector.instance.isAlive(endpoint));
+            });
+        });
+    }
+
+    private static void assertUncommitted(IInvokableInstance instance, String ks, String table, int expected)
+    {
+        Assert.assertEquals(expected, getUncommitted(instance, ks, table));
+    }
+
+    private static boolean hasUncommitted(Cluster cluster, String ks, String table)
+    {
+        return cluster.stream().map(instance -> getUncommitted(instance, ks, table)).reduce((a, b) -> a + b).get() > 0;
+    }
+
+    private static boolean hasUncommittedQuorum(Cluster cluster, String ks, String table)
+    {
+        int uncommitted = 0;
+        for (int i=0; i<cluster.size(); i++)
+        {
+            if (getUncommitted(cluster.get(i+1), ks, table) > 0)
+                uncommitted++;
+        }
+        return uncommitted >= ((cluster.size() / 2) + 1);
+    }
+
+    private static void repair(Cluster cluster, String keyspace, String table, boolean force)
+    {
+        Map<String, String> options = new HashMap<>();
+        options.put(RepairOption.PARALLELISM_KEY, RepairParallelism.SEQUENTIAL.getName());
+        options.put(RepairOption.PRIMARY_RANGE_KEY, Boolean.toString(false));
+        options.put(RepairOption.INCREMENTAL_KEY, Boolean.toString(false));
+        options.put(RepairOption.JOB_THREADS_KEY, Integer.toString(1));
+        options.put(RepairOption.TRACE_KEY, Boolean.toString(false));
+        options.put(RepairOption.COLUMNFAMILIES_KEY, "");
+        options.put(RepairOption.PULL_REPAIR_KEY, Boolean.toString(false));
+        options.put(RepairOption.FORCE_REPAIR_KEY, Boolean.toString(force));
+        options.put(RepairOption.PREVIEW, PreviewKind.NONE.toString());
+        options.put(RepairOption.IGNORE_UNREPLICATED_KS, Boolean.toString(false));
+        options.put(RepairOption.REPAIR_PAXOS_KEY, Boolean.toString(true));
+        options.put(RepairOption.PAXOS_ONLY_KEY, Boolean.toString(true));
+
+        cluster.get(1).runOnInstance(() -> {
+            int cmd = StorageService.instance.repairAsync(keyspace, options);
+
+            while (true)
+            {
+                try
+                {
+                    Thread.sleep(500);
+                }
+                catch (InterruptedException e)
+                {
+                    throw new AssertionError(e);
+                }
+                Pair<ActiveRepairService.ParentRepairStatus, List<String>> status = ActiveRepairService.instance.getRepairStatus(cmd);
+                if (status == null)
+                    continue;
+
+                switch (status.left)
+                {
+                    case IN_PROGRESS:
+                        continue;
+                    case COMPLETED:
+                        return;
+                    default:
+                        throw new AssertionError("Repair failed with errors: " + status.right);
+                }
+            }
+        });
+    }
+
+    private static void repair(Cluster cluster, String keyspace, String table)
+    {
+        repair(cluster, keyspace, table, false);
+    }
+
+    private static final Consumer<IInstanceConfig> WITH_NETWORK = cfg -> {
+        cfg.with(Feature.NETWORK);
+        cfg.with(Feature.GOSSIP);
+        cfg.set("paxos_purge_grace_period", "0s");
+        cfg.set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString());
+        cfg.set("paxos_variant", "v2_without_linearizable_reads");
+        cfg.set("truncate_request_timeout", "1000ms");
+        cfg.set("partitioner", "ByteOrderedPartitioner");
+        cfg.set("initial_token", ByteBufferUtil.bytesToHex(ByteBufferUtil.bytes(cfg.num() * 100)));
+    };
+
+    private static final Consumer<IInstanceConfig> WITHOUT_NETWORK = cfg -> {
+        cfg.set("paxos_purge_grace_period", "0s");
+        cfg.set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString());
+        cfg.set("paxos_variant", "v2_without_linearizable_reads");
+        cfg.set("truncate_request_timeout", "1000ms");
+        cfg.set("partitioner", "ByteOrderedPartitioner");
+        cfg.set("initial_token", ByteBufferUtil.bytesToHex(ByteBufferUtil.bytes(cfg.num() * 100)));
+    };
+
+    @Test
+    public void paxosRepairTest() throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = init(Cluster.build(3).withConfig(WITH_NETWORK).withoutVNodes().start()))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+            Assert.assertFalse(hasUncommittedQuorum(cluster, KEYSPACE, TABLE));
+
+            assertAllAlive(cluster);
+            cluster.verbs(PAXOS_COMMIT_REQ).drop();
+            try
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (400, 2, 2) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }
+            catch (RuntimeException e)
+            {
+                // exception expected
+            }
+
+            Assert.assertTrue(hasUncommitted(cluster, KEYSPACE, TABLE));
+
+            cluster.filters().reset();
+
+            assertAllAlive(cluster);
+            repair(cluster, KEYSPACE, TABLE);
+
+            Assert.assertFalse(hasUncommitted(cluster, KEYSPACE, TABLE));
+
+            cluster.forEach(i -> i.runOnInstance(() -> {
+                compactPaxos();
+                Map<Integer, PaxosRow> rows = getPaxosRows();
+                assertLowBoundPurged(rows.values());
+                Assert.assertEquals(Sets.newHashSet(400), rows.keySet());
+            }));
+
+            // check that operations occuring after the last repair are not purged
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (500, 3, 3) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+            cluster.forEach(i -> i.runOnInstance(() -> {
+                compactPaxos();
+                Map<Integer, PaxosRow> rows = getPaxosRows();
+                assertLowBoundPurged(rows.values());
+                Assert.assertEquals(Sets.newHashSet(400, 500), rows.keySet());
+            }));
+        }
+    }
+
+    @Ignore
+    @Test
+    public void topologyChangePaxosTest() throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = Cluster.build(4).withConfig(WITH_NETWORK).withoutVNodes().createWithoutStarting())
+        {
+            for (int i=1; i<=3; i++)
+                cluster.get(i).startup();
+
+            init(cluster);
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+
+            cluster.verbs(PAXOS_COMMIT_REQ).drop();
+            try
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (350, 2, 2) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }
+            catch (RuntimeException e)
+            {
+                // exception expected
+            }
+            Assert.assertTrue(hasUncommitted(cluster, KEYSPACE, TABLE));
+
+            cluster.filters().reset();
+
+            // node 4 starting should repair paxos and inform the other nodes of its gossip state
+            cluster.get(4).startup();
+            Assert.assertFalse(hasUncommittedQuorum(cluster, KEYSPACE, TABLE));
+        }
+    }
+
+    @Test
+    public void paxosCleanupWithReproposal() throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = init(Cluster.build(3)
+                                           .withConfig(cfg -> cfg
+                                                              .set("paxos_variant", "v2")
+                                                              .set("paxos_purge_grace_period", "0s")
+                                                              .set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString())
+                                                              .set("truncate_request_timeout", "1000ms"))
+                                           .withoutVNodes()
+                                           .start()))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+            cluster.verbs(PAXOS_COMMIT_REQ).drop();
+            try
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }
+            catch (RuntimeException e)
+            {
+                // exception expected
+            }
+            Assert.assertTrue(hasUncommitted(cluster, KEYSPACE, TABLE));
+            cluster.forEach(i -> i.runOnInstance(() -> Keyspace.open("system").getColumnFamilyStore("paxos").forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS)));
+
+            CountDownLatch haveFetchedLowBound = new CountDownLatch(1);
+            CountDownLatch haveReproposed = new CountDownLatch(1);
+            cluster.verbs(PAXOS2_CLEANUP_FINISH_PREPARE_REQ).inbound().messagesMatching((from, to, verb) -> {
+                haveFetchedLowBound.countDown();
+                Uninterruptibles.awaitUninterruptibly(haveReproposed);
+                return false;
+            }).drop();
+
+            ExecutorService executor = Executors.newCachedThreadPool();
+            List<InetAddressAndPort> endpoints = cluster.stream().map(IInstance::broadcastAddress).map(InetAddressAndPort::getByAddress).collect(Collectors.toList());
+            Future<?> cleanup = cluster.get(1).appliesOnInstance((List<? extends InetSocketAddress> es, ExecutorService exec)-> {
+                TableMetadata metadata = Keyspace.open(KEYSPACE).getMetadata().getTableOrViewNullable(TABLE);
+                return PaxosCleanup.cleanup(es.stream().map(InetAddressAndPort::getByAddress).collect(Collectors.toSet()), metadata, StorageService.instance.getLocalRanges(KEYSPACE), false, exec);
+            }).apply(endpoints, executor);
+
+            Uninterruptibles.awaitUninterruptibly(haveFetchedLowBound);
+            IMessageFilters.Filter filter2 = cluster.verbs(PAXOS_COMMIT_REQ, PAXOS2_COMMIT_AND_PREPARE_REQ).drop();
+            try
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }
+            catch (RuntimeException e)
+            {
+                // exception expected
+            }
+            filter2.off();
+            haveReproposed.countDown();
+            cluster.filters().reset();
+
+            cleanup.get();
+            ExecutorUtils.shutdownNowAndWait(1L, TimeUnit.MINUTES, executor);
+            Assert.assertFalse(hasUncommitted(cluster, KEYSPACE, TABLE));
+            cluster.forEach(i -> i.runOnInstance(PaxosRepairTest::compactPaxos));
+            for (int i = 1 ; i <= 3 ; ++i)
+                assertRows(cluster.get(i).executeInternal("SELECT * FROM " + KEYSPACE + '.' + TABLE + " WHERE pk = 1"), row(1, 1, 1));
+
+            Assert.assertFalse(hasUncommittedQuorum(cluster, KEYSPACE, TABLE));
+            assertLowBoundPurged(cluster);
+        }
+    }
+
+    @SuppressWarnings("unused")
+    @Test
+    public void paxosCleanupWithReproposalClashingTimestamp() throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = init(Cluster.build(5)
+                                           .withConfig(cfg -> cfg
+                                                              .set("paxos_variant", "v2")
+                                                              .set("paxos_purge_grace_period", "0s")
+                                                              .set("paxos_cache_size", "0MiB")
+                                                              .set("truncate_request_timeout", "1000ms"))
+                                           .withoutVNodes()
+                                           .start()))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+            // we ensure:
+            //  - node 1 only witnesses a promise that conflicts with something we committed
+            //  - node 2 does not witness the commit, so it has an in progress proposal
+            //  - node 3 does not witness the proposal, so that we have an incomplete commit
+            //  - node 1's response arrives first, so that it might retain its promise as latestWitnessed (without bugfix)
+
+            CountDownLatch haveStartedCleanup = new CountDownLatch(1);
+            CountDownLatch haveInsertedClashingPromise = new CountDownLatch(1);
+            IMessageFilters.Filter pauseCleanupUntilCommitted = cluster.verbs(PAXOS2_CLEANUP_REQ).from(1).to(1).outbound().messagesMatching((from, to, verb) -> {
+                haveStartedCleanup.countDown();
+                Uninterruptibles.awaitUninterruptibly(haveInsertedClashingPromise);
+                return false;
+            }).drop();
+
+            ExecutorService executor = Executors.newCachedThreadPool();
+            List<InetAddressAndPort> endpoints = cluster.stream().map(i -> InetAddressAndPort.getByAddress(i.broadcastAddress())).collect(Collectors.toList());
+            Future<?> cleanup = cluster.get(1).appliesOnInstance((List<? extends InetSocketAddress> es, ExecutorService exec)-> {
+                TableMetadata metadata = Keyspace.open(KEYSPACE).getMetadata().getTableOrViewNullable(TABLE);
+                return PaxosCleanup.cleanup(es.stream().map(InetAddressAndPort::getByAddress).collect(Collectors.toSet()), metadata, StorageService.instance.getLocalRanges(KEYSPACE), false, exec);
+            }).apply(endpoints, executor);
+
+            IMessageFilters.Filter dropAllTo1 = cluster.verbs(PAXOS2_PREPARE_REQ, PAXOS2_PROPOSE_REQ, PAXOS_COMMIT_REQ).from(2).to(1).outbound().drop();
+            IMessageFilters.Filter dropCommitTo3 = cluster.verbs(PAXOS_COMMIT_REQ).from(2).to(3).outbound().drop();
+            IMessageFilters.Filter dropAcceptTo4 = cluster.verbs(PAXOS2_PROPOSE_REQ).from(2).to(4).outbound().drop();
+
+            CountDownLatch haveFetchedClashingRepair = new CountDownLatch(1);
+            AtomicIntegerArray fetchResponseIds = new AtomicIntegerArray(new int[] { -1, -1, -1, -1, -1, -1 });
+            cluster.verbs(PAXOS2_REPAIR_REQ).outbound().from(1).messagesMatching((from, to, msg) -> {
+                fetchResponseIds.set(to, msg.id());
+                return false;
+            }).drop();
+            cluster.verbs(PAXOS2_PREPARE_RSP, PAXOS2_PROPOSE_RSP, PAXOS_COMMIT_RSP).outbound().to(1).messagesMatching((from, to, msg) -> {
+                if (fetchResponseIds.get(from) == msg.id())
+                {
+                    if (from == 1) haveFetchedClashingRepair.countDown();
+                    else Uninterruptibles.awaitUninterruptibly(haveFetchedClashingRepair);
+                }
+                return false;
+            }).drop();
+
+            Uninterruptibles.awaitUninterruptibly(haveStartedCleanup);
+            cluster.coordinator(2).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.ONE);
+
+            UUID cfId = cluster.get(2).callOnInstance(() -> Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).metadata.id.asUUID());
+            TimeUUID uuid = (TimeUUID) cluster.get(2).executeInternal("select in_progress_ballot from system.paxos WHERE row_key = ? and cf_id = ?", Int32Type.instance.decompose(1), cfId)[0][0];
+            TimeUUID clashingUuid = TimeUUID.fromBytes(uuid.msb(), 0);
+            cluster.get(1).executeInternal("update system.paxos set in_progress_ballot = ? WHERE row_key = ? and cf_id = ?", clashingUuid, Int32Type.instance.decompose(1), cfId);
+            Assert.assertEquals(clashingUuid, cluster.get(1).executeInternal("select in_progress_ballot from system.paxos WHERE row_key = ? and cf_id = ?", Int32Type.instance.decompose(1), cfId)[0][0]);
+
+            Assert.assertTrue(hasUncommitted(cluster, KEYSPACE, TABLE));
+            haveInsertedClashingPromise.countDown();
+
+            cleanup.get();
+            ExecutorUtils.shutdownNowAndWait(1L, TimeUnit.MINUTES, executor);
+        }
+    }
+
+    @Test
+    public void paxosCleanupWithDelayedProposal() throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = init(Cluster.build(3)
+                                           .withConfig(cfg -> cfg
+                                                              .set("paxos_variant", "v2")
+                                                              .set("paxos_purge_grace_period", "0s")
+                                                              .set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString())
+                                                              .set("truncate_request_timeout", "1000ms"))
+                                           .withoutVNodes()
+                                           .start())
+        )
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+
+            CountDownLatch haveFinishedRepair = new CountDownLatch(1);
+            cluster.verbs(PAXOS2_PREPARE_REQ).messagesMatching((from, to, verb) -> {
+                Uninterruptibles.awaitUninterruptibly(haveFinishedRepair);
+                return false;
+            }).drop();
+            cluster.verbs(PAXOS_COMMIT_REQ).drop();
+            Future<?> insert = cluster.get(1).async(() -> {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }).call();
+            cluster.verbs(PAXOS2_CLEANUP_FINISH_PREPARE_REQ).messagesMatching((from, to, verb) -> {
+                haveFinishedRepair.countDown();
+                try { insert.get(); } catch (Throwable t) {}
+                cluster.filters().reset();
+                return false;
+            }).drop();
+
+            ExecutorService executor = Executors.newCachedThreadPool();
+
+            Uninterruptibles.sleepUninterruptibly(10L, TimeUnit.MILLISECONDS);
+
+            List<InetAddressAndPort> endpoints = cluster.stream().map(i -> InetAddressAndPort.getByAddress(i.broadcastAddress())).collect(Collectors.toList());
+            Future<?> cleanup = cluster.get(1).appliesOnInstance((List<? extends InetSocketAddress> es, ExecutorService exec)-> {
+                TableMetadata metadata = Keyspace.open(KEYSPACE).getMetadata().getTableOrViewNullable(TABLE);
+                return PaxosCleanup.cleanup(es.stream().map(InetAddressAndPort::getByAddress).collect(Collectors.toSet()), metadata, StorageService.instance.getLocalRanges(KEYSPACE), false, exec);
+            }).apply(endpoints, executor);
+
+            cleanup.get();
+            try
+            {
+                insert.get();
+            }
+            catch (Throwable t)
+            {
+            }
+            ExecutorUtils.shutdownNowAndWait(1L, TimeUnit.MINUTES, executor);
+            Assert.assertFalse(hasUncommittedQuorum(cluster, KEYSPACE, TABLE));
+
+            assertLowBoundPurged(cluster);
+        }
+    }
+
+    private static void setVersion(IInvokableInstance instance, InetSocketAddress peer, String version)
+    {
+        instance.runOnInstance(() -> {
+            Gossiper.runInGossipStageBlocking(() -> {
+                EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(InetAddressAndPort.getByAddress(peer.getAddress()));
+                VersionedValue value = version != null ? StorageService.instance.valueFactory.rack(version) : null;
+                epState.addApplicationState(ApplicationState.RELEASE_VERSION, value);
+            });
+        });
+    }
+
+    private static void assertRepairFailsWithVersion(Cluster cluster, String version)
+    {
+        for (int i = 1 ; i <= cluster.size() ; ++i)
+            setVersion(cluster.get(i), cluster.get(2).broadcastAddress(), version);
+        try
+        {
+            repair(cluster, KEYSPACE, TABLE);
+        }
+        catch (AssertionError e)
+        {
+            return;
+        }
+        Assert.fail("Repair should have failed on unsupported version");
+    }
+
+    private static void assertRepairSucceedsWithVersion(Cluster cluster, String version)
+    {
+        for (int i = 1 ; i <= cluster.size() ; ++i)
+            setVersion(cluster.get(i), cluster.get(2).broadcastAddress(), version);
+        repair(cluster, KEYSPACE, TABLE);
+    }
+
+    @Test
+    public void paxosRepairVersionGate() throws Throwable
+    {
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = init(Cluster.build(3).withConfig(WITHOUT_NETWORK).withoutVNodes().start()))
+        {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + '.' + TABLE + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+            cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+            Assert.assertFalse(hasUncommittedQuorum(cluster, KEYSPACE, TABLE));
+
+            assertAllAlive(cluster);
+            cluster.verbs(PAXOS_COMMIT_REQ).drop();
+            try
+            {
+                cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + '.' + TABLE + " (pk, ck, v) VALUES (400, 2, 2) IF NOT EXISTS", ConsistencyLevel.QUORUM);
+                Assert.fail("expected write timeout");
+            }
+            catch (RuntimeException e)
+            {
+                // exception expected
+            }
+
+            Assert.assertTrue(hasUncommitted(cluster, KEYSPACE, TABLE));
+
+            cluster.filters().reset();
+
+            assertAllAlive(cluster);
+
+            assertRepairFailsWithVersion(cluster, "3.0.24");
+            assertRepairFailsWithVersion(cluster, "4.0.0");
+
+            // test valid versions
+            assertRepairSucceedsWithVersion(cluster, "4.1.0");
+        }
+    }
+
+    private static class PaxosRow
+    {
+        final DecoratedKey key;
+        final Row row;
+
+        PaxosRow(DecoratedKey key, Row row)
+        {
+            this.key = key;
+            this.row = row;
+        }
+
+        public String toString()
+        {
+            TableMetadata table = Schema.instance.getTableMetadata(SYSTEM_KEYSPACE_NAME, SystemKeyspace.PAXOS);
+            return ByteBufferUtil.bytesToHex(key.getKey()) + " -> " + row.toString(table, true);
+        }
+    }
+
+    private static void compactPaxos()
+    {
+        ColumnFamilyStore paxos = Keyspace.open(SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PAXOS);
+        FBUtilities.waitOnFuture(paxos.forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS));
+        FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(paxos, 0, false));
+    }
+
+    private static Map<Integer, PaxosRow> getPaxosRows()
+    {
+        Map<Integer, PaxosRow> rows = new HashMap<>();
+        String queryStr = "SELECT * FROM " + SYSTEM_KEYSPACE_NAME + '.' + SystemKeyspace.PAXOS;
+        SelectStatement stmt = (SelectStatement) QueryProcessor.parseStatement(queryStr).prepare(ClientState.forInternalCalls());
+        ReadQuery query = stmt.getQuery(QueryOptions.DEFAULT, FBUtilities.nowInSeconds());
+        try (ReadExecutionController controller = query.executionController(); PartitionIterator partitions = query.executeInternal(controller))
+        {
+            while (partitions.hasNext())
+            {
+                try (RowIterator partition = partitions.next())
+                {
+                    while (partition.hasNext())
+                    {
+                        rows.put(Int32Type.instance.compose(partition.partitionKey().getKey()),
+                                 new PaxosRow(partition.partitionKey(), partition.next()));
+                    }
+                }
+            }
+        }
+        return rows;
+    }
+
+    private static void assertLowBoundPurged(Collection<PaxosRow> rows)
+    {
+        Assert.assertEquals(0, DatabaseDescriptor.getPaxosPurgeGrace(SECONDS));
+        String ip = FBUtilities.getBroadcastAddressAndPort().toString();
+        for (PaxosRow row : rows)
+        {
+            Ballot keyLowBound = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).getPaxosRepairLowBound(row.key);
+            Assert.assertTrue(ip, Commit.isAfter(keyLowBound, Ballot.none()));
+            Assert.assertFalse(ip, PaxosRows.hasBallotBeforeOrEqualTo(row.row, keyLowBound));
+        }
+    }
+
+    private static void assertLowBoundPurged(IInvokableInstance instance)
+    {
+        instance.runOnInstance(() -> assertLowBoundPurged(getPaxosRows().values()));
+    }
+
+    private static void assertLowBoundPurged(Cluster cluster)
+    {
+        cluster.forEach(PaxosRepairTest::assertLowBoundPurged);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/PaxosUncommittedIndexTest.java b/test/distributed/org/apache/cassandra/distributed/test/PaxosUncommittedIndexTest.java
new file mode 100644
index 0000000..0ef52e7
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/PaxosUncommittedIndexTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+
+public class PaxosUncommittedIndexTest extends TestBaseImpl
+{
+    @Test
+    public void indexCqlIsExportableAndParsableTest() throws Throwable
+    {
+        String expectedCreateCustomIndex = "CREATE CUSTOM INDEX \"PaxosUncommittedIndex\" ON system.paxos () USING 'org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedIndex'";
+        try (Cluster dtestCluster = init(Cluster.build(1).withConfig(c -> c.with(Feature.NATIVE_PROTOCOL)).start()))
+        {
+            try (com.datastax.driver.core.Cluster clientCluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build())
+            {
+                Assert.assertTrue(clientCluster.getMetadata().exportSchemaAsString()
+                                               .contains(expectedCreateCustomIndex));
+                Throwable thrown = null;
+                try
+                {
+                    dtestCluster.schemaChange(expectedCreateCustomIndex);
+                }
+                catch (Throwable tr)
+                {
+                    thrown = tr;
+                }
+
+                // Check parsing succeeds and index creation fails
+                Assert.assertTrue(thrown.getMessage().contains("System keyspace 'system' is not user-modifiable"));
+            }
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairTest.java b/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairTest.java
index 849bc65..90e29f2 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairTest.java
@@ -33,8 +33,10 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import com.google.common.collect.ImmutableList;
 import com.google.common.util.concurrent.Uninterruptibles;
+
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -50,9 +52,8 @@
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.distributed.api.IIsolatedExecutor;
 import org.apache.cassandra.distributed.api.IMessage;
-import org.apache.cassandra.distributed.api.IMessageFilters;
 import org.apache.cassandra.distributed.api.NodeToolResult;
-import org.apache.cassandra.distributed.impl.Instance;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
 import org.apache.cassandra.distributed.shared.RepairResult;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.Verb;
@@ -63,14 +64,22 @@
 import org.apache.cassandra.repair.messages.RepairOption;
 import org.apache.cassandra.repair.messages.ValidationRequest;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
-import org.apache.cassandra.utils.progress.ProgressEventType;
 
+import static com.google.common.collect.ImmutableList.of;
+import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.distributed.api.IMessageFilters.Matcher;
+import static org.apache.cassandra.distributed.impl.Instance.deserializeMessage;
+import static org.apache.cassandra.distributed.test.PreviewRepairTest.DelayFirstRepairTypeMessageFilter.finalizePropose;
+import static org.apache.cassandra.distributed.test.PreviewRepairTest.DelayFirstRepairTypeMessageFilter.validationRequest;
+import static org.apache.cassandra.net.Verb.FINALIZE_PROPOSE_MSG;
+import static org.apache.cassandra.net.Verb.VALIDATION_REQ;
+import static org.apache.cassandra.service.StorageService.instance;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+import static org.apache.cassandra.utils.progress.ProgressEventType.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -168,11 +177,11 @@
             insert(cluster.coordinator(1), 100, 100);
             cluster.forEach((node) -> node.flush(KEYSPACE));
             
-            SimpleCondition previewRepairStarted = new SimpleCondition();
-            SimpleCondition continuePreviewRepair = new SimpleCondition();
-            DelayFirstRepairTypeMessageFilter filter = DelayFirstRepairTypeMessageFilter.validationRequest(previewRepairStarted, continuePreviewRepair);
+            Condition previewRepairStarted = newOneTimeCondition();
+            Condition continuePreviewRepair = newOneTimeCondition();
+            DelayFirstRepairTypeMessageFilter filter = validationRequest(previewRepairStarted, continuePreviewRepair);
             // this pauses the validation request sent from node1 to node2 until we have run a full inc repair below
-            cluster.filters().outbound().verbs(Verb.VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
+            cluster.filters().outbound().verbs(VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
 
             Future<RepairResult> rsFuture = es.submit(() -> cluster.get(1).callOnInstance(repair(options(true, false))));
             previewRepairStarted.await();
@@ -207,24 +216,24 @@
             insert(cluster.coordinator(1), 100, 100);
             cluster.forEach((node) -> node.flush(KEYSPACE));
 
-            SimpleCondition previewRepairStarted = new SimpleCondition();
-            SimpleCondition continuePreviewRepair = new SimpleCondition();
+            Condition previewRepairStarted = newOneTimeCondition();
+            Condition continuePreviewRepair = newOneTimeCondition();
             // this pauses the validation request sent from node1 to node2 until the inc repair below has run
             cluster.filters()
                    .outbound()
-                   .verbs(Verb.VALIDATION_REQ.id)
+                   .verbs(VALIDATION_REQ.id)
                    .from(1).to(2)
-                   .messagesMatching(DelayFirstRepairTypeMessageFilter.validationRequest(previewRepairStarted, continuePreviewRepair))
+                   .messagesMatching(validationRequest(previewRepairStarted, continuePreviewRepair))
                    .drop();
 
-            SimpleCondition irRepairStarted = new SimpleCondition();
-            SimpleCondition continueIrRepair = new SimpleCondition();
+            Condition irRepairStarted = newOneTimeCondition();
+            Condition continueIrRepair = newOneTimeCondition();
             // this blocks the IR from committing, so we can reenable the preview
             cluster.filters()
                    .outbound()
-                   .verbs(Verb.FINALIZE_PROPOSE_MSG.id)
+                   .verbs(FINALIZE_PROPOSE_MSG.id)
                    .from(1).to(2)
-                   .messagesMatching(DelayFirstRepairTypeMessageFilter.finalizePropose(irRepairStarted, continueIrRepair))
+                   .messagesMatching(finalizePropose(irRepairStarted, continueIrRepair))
                    .drop();
 
             Future<RepairResult> previewResult = cluster.get(1).asyncCallsOnInstance(repair(options(true, false))).call();
@@ -258,6 +267,7 @@
         ExecutorService es = Executors.newSingleThreadExecutor();
         try(Cluster cluster = init(Cluster.build(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).start()))
         {
+            int tokenCount = ClusterUtils.getTokenCount(cluster.get(1));
             cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
 
             insert(cluster.coordinator(1), 0, 100);
@@ -268,20 +278,20 @@
             cluster.forEach((node) -> node.flush(KEYSPACE));
 
             // pause preview repair validation messages on node2 until node1 has finished
-            SimpleCondition previewRepairStarted = new SimpleCondition();
-            SimpleCondition continuePreviewRepair = new SimpleCondition();
-            DelayFirstRepairTypeMessageFilter filter = DelayFirstRepairTypeMessageFilter.validationRequest(previewRepairStarted, continuePreviewRepair);
-            cluster.filters().outbound().verbs(Verb.VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
+            Condition previewRepairStarted = newOneTimeCondition();
+            Condition continuePreviewRepair = newOneTimeCondition();
+            DelayFirstRepairTypeMessageFilter filter = validationRequest(previewRepairStarted, continuePreviewRepair);
+            cluster.filters().outbound().verbs(VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
 
             // get local ranges to repair two separate ranges:
             List<String> localRanges = cluster.get(1).callOnInstance(() -> {
                 List<String> res = new ArrayList<>();
-                for (Range<Token> r : StorageService.instance.getLocalReplicas(KEYSPACE).ranges())
+                for (Range<Token> r : instance.getLocalReplicas(KEYSPACE).ranges())
                     res.add(r.left.getTokenValue()+ ":"+ r.right.getTokenValue());
                 return res;
             });
 
-            assertEquals(2, localRanges.size());
+            assertEquals(2 * tokenCount, localRanges.size());
             Future<RepairResult> repairStatusFuture = es.submit(() -> cluster.get(1).callOnInstance(repair(options(true, false, localRanges.get(0)))));
             previewRepairStarted.await(); // wait for node1 to start validation compaction
             // this needs to finish before the preview repair is unpaused on node2
@@ -310,6 +320,7 @@
                                                                      .with(NETWORK))
                                           .start()))
         {
+            int tokenCount = ClusterUtils.getTokenCount(cluster.get(1));
             cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
             insert(cluster.coordinator(1), 0, 100);
             cluster.forEach((node) -> node.flush(KEYSPACE));
@@ -319,8 +330,8 @@
             cluster.forEach((node) -> node.flush(KEYSPACE));
 
             // pause inc repair validation messages on node2 until node1 has finished
-            SimpleCondition incRepairStarted = new SimpleCondition();
-            SimpleCondition continueIncRepair = new SimpleCondition();
+            Condition incRepairStarted = newOneTimeCondition();
+            Condition continueIncRepair = newOneTimeCondition();
 
             DelayFirstRepairTypeMessageFilter filter = DelayFirstRepairTypeMessageFilter.validationRequest(incRepairStarted, continueIncRepair);
             cluster.filters().outbound().verbs(Verb.VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
@@ -333,7 +344,7 @@
                 return res;
             });
 
-            assertEquals(2, localRanges.size());
+            assertEquals(2 * tokenCount, localRanges.size());
             String [] previewedRange = localRanges.get(0).split(":");
             String [] repairedRange = localRanges.get(1).split(":");
             Future<NodeToolResult> repairStatusFuture = es.submit(() -> cluster.get(1).nodetoolResult("repair", "-st", repairedRange[0], "-et", repairedRange[1], KEYSPACE, "tbl"));
@@ -434,23 +445,23 @@
             ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(table);
             if(shouldBeEmpty)
             {
-                assertTrue(cfs.getSnapshotDetails().isEmpty());
+                assertTrue(cfs.listSnapshots().isEmpty());
             }
             else
             {
-                while (cfs.getSnapshotDetails().isEmpty())
+                while (cfs.listSnapshots().isEmpty())
                     Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
             }
         }));
     }
 
-    static abstract class DelayFirstRepairMessageFilter implements IMessageFilters.Matcher
+    static abstract class DelayFirstRepairMessageFilter implements Matcher
     {
-        private final SimpleCondition pause;
-        private final SimpleCondition resume;
+        private final Condition pause;
+        private final Condition resume;
         private final AtomicBoolean waitForRepair = new AtomicBoolean(true);
 
-        protected DelayFirstRepairMessageFilter(SimpleCondition pause, SimpleCondition resume)
+        protected DelayFirstRepairMessageFilter(Condition pause, Condition resume)
         {
             this.pause = pause;
             this.resume = resume;
@@ -462,7 +473,7 @@
         {
             try
             {
-                Message<?> msg = Instance.deserializeMessage(message);
+                Message<?> msg = deserializeMessage(message);
                 RepairMessage repairMessage = (RepairMessage) msg.payload;
                 // only the first message should be delayed:
                 if (matchesMessage(repairMessage) && waitForRepair.compareAndSet(true, false))
@@ -483,18 +494,18 @@
     {
         private final Class<? extends RepairMessage> type;
 
-        public DelayFirstRepairTypeMessageFilter(SimpleCondition pause, SimpleCondition resume, Class<? extends RepairMessage> type)
+        public DelayFirstRepairTypeMessageFilter(Condition pause, Condition resume, Class<? extends RepairMessage> type)
         {
             super(pause, resume);
             this.type = type;
         }
 
-        public static DelayFirstRepairTypeMessageFilter validationRequest(SimpleCondition pause, SimpleCondition resume)
+        public static DelayFirstRepairTypeMessageFilter validationRequest(Condition pause, Condition resume)
         {
             return new DelayFirstRepairTypeMessageFilter(pause, resume, ValidationRequest.class);
         }
 
-        public static DelayFirstRepairTypeMessageFilter finalizePropose(SimpleCondition pause, SimpleCondition resume)
+        public static DelayFirstRepairTypeMessageFilter finalizePropose(Condition pause, Condition resume)
         {
             return new DelayFirstRepairTypeMessageFilter(pause, resume, FinalizePropose.class);
         }
@@ -522,25 +533,25 @@
     private static IIsolatedExecutor.SerializableCallable<RepairResult> repair(Map<String, String> options)
     {
         return () -> {
-            SimpleCondition await = new SimpleCondition();
+            Condition await = newOneTimeCondition();
             AtomicBoolean success = new AtomicBoolean(true);
             AtomicBoolean wasInconsistent = new AtomicBoolean(false);
-            StorageService.instance.repair(KEYSPACE, options, ImmutableList.of((tag, event) -> {
-                if (event.getType() == ProgressEventType.ERROR)
+            instance.repair(KEYSPACE, options, of((tag, event) -> {
+                if (event.getType() == ERROR)
                 {
                     success.set(false);
                     await.signalAll();
                 }
-                else if (event.getType() == ProgressEventType.NOTIFICATION && event.getMessage().contains("Repaired data is inconsistent"))
+                else if (event.getType() == NOTIFICATION && event.getMessage().contains("Repaired data is inconsistent"))
                 {
                     wasInconsistent.set(true);
                 }
-                else if (event.getType() == ProgressEventType.COMPLETE)
+                else if (event.getType() == COMPLETE)
                     await.signalAll();
             }));
             try
             {
-                await.await(1, TimeUnit.MINUTES);
+                await.await(1, MINUTES);
             }
             catch (InterruptedException e)
             {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadDigestConsistencyTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReadDigestConsistencyTest.java
index 05e705b..85c2783 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadDigestConsistencyTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadDigestConsistencyTest.java
@@ -31,6 +31,7 @@
 import org.apache.cassandra.distributed.api.ICoordinator;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class ReadDigestConsistencyTest extends TestBaseImpl
 {
@@ -65,7 +66,7 @@
 
     public static void checkTraceForDigestMismatch(ICoordinator coordinator, String query, Object... boundValues)
     {
-        UUID sessionId = UUID.randomUUID();
+        UUID sessionId = TimeUUID.Generator.nextTimeUUID().asUUID();
         try
         {
             coordinator.executeWithTracing(sessionId, query, ConsistencyLevel.ALL, boundValues);
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java
index c07b128..6cdea9b 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java
@@ -33,7 +33,6 @@
 import org.apache.cassandra.distributed.shared.AssertUtils;
 import org.apache.cassandra.service.reads.repair.ReadRepairStrategy;
 
-import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.distributed.shared.AssertUtils.row;
 
 
@@ -96,8 +95,8 @@
     public static void setupCluster() throws IOException
     {
         cluster = init(Cluster.build(NUM_NODES)
-                              .withConfig(config -> config.set("read_request_timeout_in_ms", MINUTES.toMillis(1))
-                                                          .set("write_request_timeout_in_ms", MINUTES.toMillis(1)))
+                              .withConfig(config -> config.set("read_request_timeout", "1m")
+                                                          .set("write_request_timeout", "1m"))
                               .start());
         cluster.schemaChange(withKeyspace("CREATE TYPE %s.udt (x int, y int)"));
     }
@@ -286,4 +285,4 @@
             return this;
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairQueryTester.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairQueryTester.java
index 10bf050..26516104 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairQueryTester.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairQueryTester.java
@@ -31,7 +31,6 @@
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.service.reads.repair.ReadRepairStrategy;
 
-import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertEquals;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
 import static org.apache.cassandra.service.reads.repair.ReadRepairStrategy.NONE;
@@ -116,8 +115,8 @@
     public static void setupCluster() throws IOException
     {
         cluster = init(Cluster.build(NUM_NODES)
-                              .withConfig(config -> config.set("read_request_timeout_in_ms", MINUTES.toMillis(1))
-                                                          .set("write_request_timeout_in_ms", MINUTES.toMillis(1)))
+                              .withConfig(config -> config.set("read_request_timeout", "1m")
+                                                          .set("write_request_timeout", "1m"))
                               .start());
         cluster.schemaChange(withKeyspace("CREATE TYPE %s.udt (x int, y int)"));
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTest.java
index c68320e..9a70e89 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTest.java
@@ -26,6 +26,7 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -36,7 +37,6 @@
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
@@ -52,14 +52,14 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.locator.ReplicaPlan;
-import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.service.PendingRangeCalculatorService;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.reads.repair.BlockingReadRepair;
 import org.apache.cassandra.service.reads.repair.ReadRepairStrategy;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
 import static net.bytebuddy.matcher.ElementMatchers.named;
+
+import static org.apache.cassandra.db.Keyspace.open;
 import static org.apache.cassandra.distributed.api.ConsistencyLevel.ALL;
 import static org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertEquals;
@@ -68,6 +68,8 @@
 import static org.apache.cassandra.net.Verb.READ_REPAIR_REQ;
 import static org.apache.cassandra.net.Verb.READ_REPAIR_RSP;
 import static org.apache.cassandra.net.Verb.READ_REQ;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 import static org.junit.Assert.fail;
 
 public class ReadRepairTest extends TestBaseImpl
@@ -131,7 +133,7 @@
             cluster.get(2).executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)");
             assertRows(cluster.get(3).executeInternal("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1"));
             cluster.verbs(READ_REPAIR_RSP).to(1).drop();
-            final long start = System.currentTimeMillis();
+            final long start = currentTimeMillis();
             try
             {
                 cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.ALL);
@@ -141,7 +143,7 @@
             {
                 // the containing exception class was loaded by another class loader. Comparing the message as a workaround to assert the exception
                 Assert.assertTrue(ex.getClass().toString().contains("ReadTimeoutException"));
-                long actualTimeTaken = System.currentTimeMillis() - start;
+                long actualTimeTaken = currentTimeMillis() - start;
                 long magicDelayAmount = 100L; // it might not be the best way to check if the time taken is around the timeout value.
                 // Due to the delays, the actual time taken from client perspective is slighly more than the timeout value
                 Assert.assertTrue(actualTimeTaken > reducedReadTimeout);
@@ -178,7 +180,8 @@
     @Test
     public void movingTokenReadRepairTest() throws Throwable
     {
-        try (Cluster cluster = init(Cluster.create(4), 3))
+        // TODO: fails with vnode enabled
+        try (Cluster cluster = init(Cluster.build(4).withoutVNodes().start(), 3))
         {
             List<Token> tokens = cluster.tokens();
 
@@ -353,7 +356,7 @@
         String key = "test1";
         try (Cluster cluster = init(Cluster.build()
                                            .withConfig(config -> config.with(Feature.GOSSIP, Feature.NETWORK)
-                                                                       .set("read_request_timeout_in_ms", Integer.MAX_VALUE))
+                                                                       .set("read_request_timeout", String.format("%dms", Integer.MAX_VALUE)))
                                            .withTokenSupplier(TokenSupplier.evenlyDistributedTokens(4))
                                            .withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(4, "dc0", "rack0"))
                                            .withNodes(3)
@@ -365,7 +368,7 @@
                                  "    PRIMARY KEY (key, column1)\n" +
                                  ") WITH CLUSTERING ORDER BY (column1 ASC)");
 
-            cluster.forEach(i -> i.runOnInstance(() -> Keyspace.open(KEYSPACE).getColumnFamilyStore("tbl").disableAutoCompaction()));
+            cluster.forEach(i -> i.runOnInstance(() -> open(KEYSPACE).getColumnFamilyStore("tbl").disableAutoCompaction()));
 
             for (int i = 1; i <= 2; i++)
             {
@@ -378,9 +381,9 @@
             cluster.get(3).flush(KEYSPACE);
 
             // pause the read until we have bootstrapped a new node below
-            SimpleCondition continueRead = new SimpleCondition();
-            SimpleCondition readStarted = new SimpleCondition();
-            cluster.filters().outbound().from(3).to(1,2).verbs(Verb.READ_REQ.id).messagesMatching((i, i1, iMessage) -> {
+            Condition continueRead = newOneTimeCondition();
+            Condition readStarted = newOneTimeCondition();
+            cluster.filters().outbound().from(3).to(1,2).verbs(READ_REQ.id).messagesMatching((i, i1, iMessage) -> {
                 try
                 {
                     readStarted.signalAll();
@@ -394,7 +397,7 @@
             }).drop();
             Future<Object[][]> read = es.submit(() -> cluster.coordinator(3)
                                                           .execute("SELECT * FROM distributed_test_keyspace.tbl WHERE key=? and column1 >= ? and column1 <= ?",
-                                                                   ConsistencyLevel.ALL, key, 20, 40));
+                                                                   ALL, key, 20, 40));
             readStarted.await();
             IInstanceConfig config = cluster.newInstanceConfig();
             config.set("auto_bootstrap", true);
@@ -491,7 +494,7 @@
         // on timestamp tie of RT and partition deletion: we should not generate RT bounds in such case,
         // since monotonicity is already ensured by the partition deletion, and RT is unnecessary there.
         // For details, see CASSANDRA-16453.
-        public static Object repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForTokenWrite writePlan, @SuperCall Callable<Void> r) throws Exception
+        public static Object repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForWrite writePlan, @SuperCall Callable<Void> r) throws Exception
         {
             Assert.assertEquals(2, mutations.size());
             for (Mutation value : mutations.values())
@@ -505,4 +508,4 @@
             return r.call();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java
index c3a36cb..a73d968 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java
@@ -159,4 +159,4 @@
             return cfs.metric.readRepairRequests.getCount();
         });
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/RepairBoundaryTest.java b/test/distributed/org/apache/cassandra/distributed/test/RepairBoundaryTest.java
index d269bef..d7cd0ec 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/RepairBoundaryTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/RepairBoundaryTest.java
@@ -23,7 +23,7 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import com.google.common.collect.ImmutableList;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -31,16 +31,17 @@
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
-import org.apache.cassandra.utils.progress.ProgressEventType;
 
+import static com.google.common.collect.ImmutableList.of;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.dht.Murmur3Partitioner.*;
 import static org.apache.cassandra.dht.Murmur3Partitioner.LongToken.keyForToken;
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.service.StorageService.instance;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+import static org.apache.cassandra.utils.progress.ProgressEventType.COMPLETE;
 
 public class RepairBoundaryTest extends TestBaseImpl
 {
@@ -142,9 +143,9 @@
                 Map<String, String> options = new HashMap<>();
                 options.put("ranges", "999:1000");
                 options.put("incremental", "false");
-                SimpleCondition await = new SimpleCondition();
-                StorageService.instance.repair(KEYSPACE, options, ImmutableList.of((tag, event) -> {
-                    if (event.getType() == ProgressEventType.COMPLETE)
+                Condition await = newOneTimeCondition();
+                instance.repair(KEYSPACE, options, of((tag, event) -> {
+                    if (event.getType() == COMPLETE)
                         await.signalAll();
                 })).right.get();
                 await.await(1L, MINUTES);
diff --git a/test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorBase.java b/test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorBase.java
index fc058db..f1266fa 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorBase.java
@@ -22,6 +22,7 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.RejectedExecutionException;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -78,6 +79,8 @@
                               .withConfig(c -> c.with(Feature.NETWORK)
                                                 .with(Feature.GOSSIP))
                               .start());
+
+        CLUSTER.setUncaughtExceptionsFilter(throwable -> throwable instanceof RejectedExecutionException && "RepairJobTask has shut down".equals(throwable.getMessage()));
     }
 
     @AfterClass
diff --git a/test/distributed/org/apache/cassandra/distributed/test/RepairDigestTrackingTest.java b/test/distributed/org/apache/cassandra/distributed/test/RepairDigestTrackingTest.java
index a4daceb..f6da888 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/RepairDigestTrackingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/RepairDigestTrackingTest.java
@@ -19,7 +19,11 @@
 package org.apache.cassandra.distributed.test;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.TimeUnit;
@@ -63,6 +67,7 @@
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -540,7 +545,7 @@
                     SSTableReader sstable = sstables.next();
                     Descriptor descriptor = sstable.descriptor;
                     descriptor.getMetadataSerializer()
-                              .mutateRepairMetadata(descriptor, System.currentTimeMillis(), null, false);
+                              .mutateRepairMetadata(descriptor, currentTimeMillis(), null, false);
                     sstable.reloadSSTableMetadata();
                 }
             } catch (IOException e) {
@@ -586,7 +591,7 @@
             int attempts = 100;
             ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE);
 
-            while (cfs.getSnapshotDetails().isEmpty())
+            while (cfs.listSnapshots().isEmpty())
             {
                 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
                 if (attempts-- < 0)
diff --git a/test/distributed/org/apache/cassandra/distributed/test/RepairErrorsTest.java b/test/distributed/org/apache/cassandra/distributed/test/RepairErrorsTest.java
index b3de7db..f34bb2a 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/RepairErrorsTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/RepairErrorsTest.java
@@ -19,32 +19,38 @@
 package org.apache.cassandra.distributed.test;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
 import net.bytebuddy.ByteBuddy;
 import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
 import net.bytebuddy.implementation.MethodDelegation;
 import net.bytebuddy.implementation.bind.annotation.SuperCall;
+import org.assertj.core.api.Assertions;
 import org.junit.Test;
 
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.compaction.CompactionIterator;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.db.streaming.CassandraIncomingFile;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.distributed.api.NodeToolResult;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.streaming.StreamSession;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static net.bytebuddy.matcher.ElementMatchers.named;
-import static net.bytebuddy.matcher.ElementMatchers.takesArguments;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -54,6 +60,33 @@
 public class RepairErrorsTest extends TestBaseImpl
 {
     @Test
+    public void testRemoteValidationFailure() throws IOException
+    {
+        Cluster.Builder builder = Cluster.build(2)
+                                         .withConfig(config -> config.with(GOSSIP).with(NETWORK))
+                                         .withInstanceInitializer(ByteBuddyHelper::install);
+        try (Cluster cluster = builder.createWithoutStarting())
+        {
+            cluster.setUncaughtExceptionsFilter((i, throwable) -> {
+                if (i == 2)
+                    return throwable.getMessage() != null && throwable.getMessage().contains("IGNORE");
+                return false;
+            });
+
+            cluster.startup();
+            init(cluster);
+
+            cluster.schemaChange("create table "+KEYSPACE+".tbl (id int primary key, x int)");
+            for (int i = 0; i < 10; i++)
+                cluster.coordinator(1).execute("insert into "+KEYSPACE+".tbl (id, x) VALUES (?,?)", ConsistencyLevel.ALL, i, i);
+            cluster.forEach(i -> i.flush(KEYSPACE));
+            long mark = cluster.get(1).logs().mark();
+            cluster.forEach(i -> i.nodetoolResult("repair", "--full").asserts().failure());
+            Assertions.assertThat(cluster.get(1).logs().grep(mark, "^ERROR").getResult()).isEmpty();
+        }
+    }
+
+    @Test
     public void testRemoteSyncFailure() throws Exception
     {
         try (Cluster cluster = init(Cluster.build(3)
@@ -96,6 +129,10 @@
             result.asserts().success();
 
             assertNoActiveRepairSessions(cluster.get(1));
+
+            cluster.forEach(i -> Assertions.assertThat(i.logs().grep("SomeRepairFailedException").getResult())
+                                           .describedAs("node%d logged hidden exception org.apache.cassandra.repair.SomeRepairFailedException", i.config().num())
+                                           .isEmpty());
         }
     }
 
@@ -142,6 +179,24 @@
         }
     }
 
+    @Test
+    public void testNoSuchRepairSessionAnticompaction() throws IOException
+    {
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withConfig(config -> config.with(GOSSIP).with(NETWORK))
+                                           .withInstanceInitializer(ByteBuddyHelper::installACNoSuchRepairSession)
+                                           .start()))
+        {
+            cluster.schemaChange("create table "+KEYSPACE+".tbl (id int primary key, x int)");
+            for (int i = 0; i < 10; i++)
+                cluster.coordinator(1).execute("insert into "+KEYSPACE+".tbl (id, x) VALUES (?,?)", ConsistencyLevel.ALL, i, i);
+            cluster.forEach(i -> i.flush(KEYSPACE));
+            long mark = cluster.get(1).logs().mark();
+            cluster.forEach(i -> i.nodetoolResult("repair", KEYSPACE).asserts().failure());
+            assertTrue(cluster.get(1).logs().grep(mark, "^ERROR").getResult().isEmpty());
+        }
+    }
+
     @SuppressWarnings("Convert2MethodRef")
     private void assertNoActiveRepairSessions(IInvokableInstance instance)
     {
@@ -152,6 +207,30 @@
 
     public static class ByteBuddyHelper
     {
+        public static void install(ClassLoader cl, int nodeNumber)
+        {
+            if (nodeNumber == 2)
+            {
+                new ByteBuddy().redefine(CompactionIterator.class)
+                               .method(named("next"))
+                               .intercept(MethodDelegation.to(ByteBuddyHelper.class))
+                               .make()
+                               .load(cl, ClassLoadingStrategy.Default.INJECTION);
+            }
+        }
+
+        public static void installACNoSuchRepairSession(ClassLoader cl, int nodeNumber)
+        {
+            if (nodeNumber == 2)
+            {
+                new ByteBuddy().redefine(CompactionManager.class)
+                               .method(named("validateSSTableBoundsForAnticompaction"))
+                               .intercept(MethodDelegation.to(ByteBuddyHelper.class))
+                               .make()
+                               .load(cl, ClassLoadingStrategy.Default.INJECTION);
+            }
+        }
+        
         public static void installStreamPlanExecutionFailure(ClassLoader cl, int nodeNumber)
         {
             if (nodeNumber == 2)
@@ -170,15 +249,22 @@
                         .intercept(MethodDelegation.to(ByteBuddyHelper.class))
                         .make()
                         .load(cl, ClassLoadingStrategy.Default.INJECTION);
-
-                new ByteBuddy().rebase(DebuggableThreadPoolExecutor.class)
-                        .method(named("extractThrowable").and(takesArguments(Future.class)))
-                        .intercept(MethodDelegation.to(ByteBuddyHelper.class))
-                        .make()
-                        .load(cl, ClassLoadingStrategy.Default.INJECTION);
             }
         }
 
+        public static UnfilteredRowIterator next()
+        {
+            throw new RuntimeException("IGNORE");
+        }
+
+        @SuppressWarnings("unused")
+        public static void validateSSTableBoundsForAnticompaction(TimeUUID sessionID,
+                                                                  Collection<SSTableReader> sstables,
+                                                                  RangesAtEndpoint ranges)
+        {
+            throw new CompactionInterruptedException(String.valueOf(sessionID));
+        }
+
         @SuppressWarnings("unused")
         public static void onInitializationComplete()
         {
@@ -192,7 +278,7 @@
             {
                 try
                 {
-                    TimeUnit.SECONDS.sleep(60);
+                    TimeUnit.SECONDS.sleep(10);
                 }
                 catch (InterruptedException e)
                 {
@@ -203,16 +289,6 @@
 
             return zuper.call();
         }
-
-        @SuppressWarnings({"unused", "ResultOfMethodCallIgnored"})
-        public static Throwable extractThrowable(Future<?> future, @SuperCall Callable<Throwable> zuper) throws Exception
-        {
-            if (Thread.currentThread().getName().contains("RepairJobTask"))
-                // Clear the interrupt flag so the FSReadError is propagated correctly in DebuggableThreadPoolExecutor:
-                Thread.interrupted();
-            
-            return zuper.call();
-        }
     }
 
     public static class ByteBuddyHelperStreamFailure
@@ -222,19 +298,19 @@
             if (nodeNumber == 3)
             {
                 new ByteBuddy().rebase(CassandraIncomingFile.class)
-                        .method(named("read"))
-                        .intercept(MethodDelegation.to(ByteBuddyHelperStreamFailure.class))
-                        .make()
-                        .load(cl, ClassLoadingStrategy.Default.INJECTION);
+                               .method(named("read"))
+                               .intercept(MethodDelegation.to(ByteBuddyHelperStreamFailure.class))
+                               .make()
+                               .load(cl, ClassLoadingStrategy.Default.INJECTION);
             }
 
             if (nodeNumber == 1)
             {
                 new ByteBuddy().rebase(SystemKeyspace.class)
-                        .method(named("getPreferredIP"))
-                        .intercept(MethodDelegation.to(ByteBuddyHelperStreamFailure.class))
-                        .make()
-                        .load(cl, ClassLoadingStrategy.Default.INJECTION);
+                               .method(named("getPreferredIP"))
+                               .intercept(MethodDelegation.to(ByteBuddyHelperStreamFailure.class))
+                               .make()
+                               .load(cl, ClassLoadingStrategy.Default.INJECTION);
             }
         }
 
@@ -247,7 +323,7 @@
         @SuppressWarnings("unused")
         public static InetAddressAndPort getPreferredIP(InetAddressAndPort ep, @SuperCall Callable<InetAddressAndPort> zuper) throws Exception
         {
-            if (Thread.currentThread().getName().contains("NettyStreaming-Outbound") && ep.address.toString().contains("127.0.0.2"))
+            if (Thread.currentThread().getName().contains("NettyStreaming-Outbound") && ep.getAddress().toString().contains("127.0.0.2"))
             {
                 try
                 {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/RepairTest.java b/test/distributed/org/apache/cassandra/distributed/test/RepairTest.java
index b127a74..669e762 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/RepairTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/RepairTest.java
@@ -22,11 +22,13 @@
 import java.util.Arrays;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 
-import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -37,14 +39,16 @@
 import org.apache.cassandra.distributed.api.IInstanceConfig;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
-import org.apache.cassandra.utils.progress.ProgressEventType;
 
+import static com.google.common.collect.ImmutableList.of;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
 import static org.apache.cassandra.distributed.test.ExecUtil.rethrow;
+import static org.apache.cassandra.service.StorageService.instance;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+import static org.apache.cassandra.utils.progress.ProgressEventType.COMPLETE;
 
 public class RepairTest extends TestBaseImpl
 {
@@ -77,7 +81,8 @@
     private static void flush(ICluster<IInvokableInstance> cluster, String keyspace, int ... nodes)
     {
         for (int node : nodes)
-            cluster.get(node).runOnInstance(rethrow(() -> StorageService.instance.forceKeyspaceFlush(keyspace)));
+            cluster.get(node).runOnInstance(rethrow(() -> StorageService.instance.forceKeyspaceFlush(keyspace,
+                                                                                                     ColumnFamilyStore.FlushReason.UNIT_TESTS)));
     }
 
     private static ICluster create(Consumer<IInstanceConfig> configModifier) throws IOException
@@ -95,9 +100,9 @@
     static void repair(ICluster<IInvokableInstance> cluster, String keyspace, Map<String, String> options)
     {
         cluster.get(1).runOnInstance(rethrow(() -> {
-            SimpleCondition await = new SimpleCondition();
-            StorageService.instance.repair(keyspace, options, ImmutableList.of((tag, event) -> {
-                if (event.getType() == ProgressEventType.COMPLETE)
+            Condition await = newOneTimeCondition();
+            instance.repair(keyspace, options, of((tag, event) -> {
+                if (event.getType() == COMPLETE)
                     await.signalAll();
             })).right.get();
             await.await(1L, MINUTES);
@@ -139,7 +144,7 @@
     void shutDownNodesAndForceRepair(ICluster<IInvokableInstance> cluster, String keyspace, int downNode) throws Exception
     {
         populate(cluster, keyspace, "{'enabled': false}");
-        cluster.get(downNode).shutdown().get(5, TimeUnit.SECONDS);
+        ClusterUtils.stopUnchecked(cluster.get(downNode));
         repair(cluster, keyspace, ImmutableMap.of("forceRepair", "true"));
     }
 
@@ -198,6 +203,7 @@
         // The test uses its own keyspace with rf == 2
         String forceRepairKeyspace = "test_force_repair_keyspace";
         int rf = 2;
+        int tokenCount = ClusterUtils.getTokenCount(cluster.get(1));
         cluster.schemaChange("CREATE KEYSPACE " + forceRepairKeyspace + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + rf + "};");
 
         try
@@ -208,9 +214,9 @@
                 Set<String> requestedRanges = row.getSet("requested_ranges");
                 Assert.assertNotNull("Found no successful ranges", successfulRanges);
                 Assert.assertNotNull("Found no requested ranges", requestedRanges);
-                Assert.assertEquals("Requested ranges count should equals to replication factor", rf, requestedRanges.size());
-                Assert.assertTrue("Given clusterSize = 3, RF = 2 and 1 node down in the replica set, it should yield only 1 successful repaired range.",
-                                  successfulRanges.size() == 1 && !successfulRanges.contains("")); // the successful ranges set should not only contain empty string
+                Assert.assertEquals("Requested ranges count should equals to replication factor", rf * tokenCount, requestedRanges.size());
+                Assert.assertTrue("Given clusterSize = 3, RF = 2 and 1 node down in the replica set, it should yield only " + tokenCount + " successful repaired range.",
+                                  successfulRanges.size() == tokenCount && !successfulRanges.contains("")); // the successful ranges set should not only contain empty string
             });
         }
         finally
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java
index f891dfe..099ea05 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java
@@ -57,8 +57,7 @@
         cluster = init(Cluster.build()
                               .withNodes(REPLICAS)
                               .withConfig(config -> config.set("hinted_handoff_enabled", false)
-                                                          .set("commitlog_sync", "batch")
-                                                          .set("num_tokens", 1)).start());
+                                                          .set("commitlog_sync", "batch")).start());
 
         // Make sure we start w/ the correct defaults:
         cluster.get(1).runOnInstance(() -> assertEquals(DEFAULT_WARN_THRESHOLD, StorageService.instance.getCachedReplicaRowsWarnThreshold()));
@@ -68,7 +67,8 @@
     @AfterClass
     public static void teardown()
     {
-        cluster.close();
+        if (cluster != null)
+            cluster.close();
     }
 
     @Test
@@ -241,4 +241,4 @@
                                                      .getColumnFamilyStore(tableName)
                                                      .metric.rfpRowsCachedPerQuery.getCount());
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java b/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java
index 5430800..a5a2dce 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java
@@ -18,17 +18,16 @@
 
 package org.apache.cassandra.distributed.test;
 
-import java.io.File;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.nio.file.FileSystems;
 import java.nio.file.Path;
 import java.sql.Date;
 import java.text.SimpleDateFormat;
-import java.time.Instant;
 import java.util.function.Consumer;
 import javax.management.MBeanServer;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Ignore;
 import org.junit.Test;
 
@@ -41,6 +40,7 @@
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.utils.FBUtilities.now;
 
 /* Resource Leak Test - useful when tracking down issues with in-JVM framework cleanup.
  * All objects referencing the InstanceClassLoader need to be garbage collected or
@@ -65,7 +65,7 @@
     final long finalWaitMillis = 0l;       // Number of millis to wait before final resource dump to give gc a chance
 
     static final SimpleDateFormat format = new SimpleDateFormat("yyyyMMddHHmmss");
-    static final String when = format.format(Date.from(Instant.now()));
+    static final String when = format.format(Date.from(now()));
 
     static String outputFilename(String base, String description, String extension)
     {
@@ -123,7 +123,7 @@
         long pid = getProcessId();
         ProcessBuilder map = new ProcessBuilder("/usr/sbin/lsof", "-p", Long.toString(pid));
         File output = new File(outputFilename("lsof", description, ".txt"));
-        map.redirectOutput(output);
+        map.redirectOutput(output.toJavaIOFile());
         map.redirectErrorStream(true);
         map.start().waitFor();
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/RestartTest.java b/test/distributed/org/apache/cassandra/distributed/test/RestartTest.java
new file mode 100644
index 0000000..4d3049b
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/RestartTest.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.utils.FBUtilities;
+
+public class RestartTest extends TestBaseImpl
+{
+    @Test
+    public void test() throws Exception
+    {
+        try (Cluster cluster = init(Cluster.build(2).withDataDirCount(1).start()))
+        {
+            FBUtilities.waitOnFuture(cluster.get(2).shutdown());
+            FBUtilities.waitOnFuture(cluster.get(1).shutdown());
+            cluster.get(1).startup();
+            cluster.get(2).startup();
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/SSTableIdGenerationTest.java b/test/distributed/org/apache/cassandra/distributed/test/SSTableIdGenerationTest.java
new file mode 100644
index 0000000..dd37bd3
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/SSTableIdGenerationTest.java
@@ -0,0 +1,531 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.commons.io.FileUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
+import org.apache.cassandra.db.compaction.DateTieredCompactionStrategy;
+import org.apache.cassandra.db.compaction.LeveledCompactionStrategy;
+import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
+import org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
+import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.io.sstable.UUIDBasedSSTableId;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.metrics.RestorableMeter;
+import org.apache.cassandra.tools.SystemExitException;
+import org.apache.cassandra.utils.TimeUUID;
+import org.assertj.core.api.Assertions;
+import org.assertj.core.data.Offset;
+
+import static java.lang.String.format;
+import static org.apache.cassandra.Util.bulkLoadSSTables;
+import static org.apache.cassandra.Util.getBackups;
+import static org.apache.cassandra.Util.getSSTables;
+import static org.apache.cassandra.Util.getSnapshots;
+import static org.apache.cassandra.Util.relativizePath;
+import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
+import static org.apache.cassandra.db.SystemKeyspace.LEGACY_SSTABLE_ACTIVITY;
+import static org.apache.cassandra.db.SystemKeyspace.SSTABLE_ACTIVITY_V2;
+import static org.apache.cassandra.distributed.shared.FutureUtils.waitOn;
+import static org.apache.cassandra.distributed.test.ExecUtil.rethrow;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SSTableIdGenerationTest extends TestBaseImpl
+{
+    private final static String ENABLE_UUID_FIELD_NAME = "uuid_sstable_identifiers_enabled";
+    private final static String SNAPSHOT_TAG = "test";
+
+    private int v;
+
+    private static SecurityManager originalSecurityManager;
+
+    @BeforeClass
+    public static void beforeClass() throws Throwable
+    {
+        TestBaseImpl.beforeClass();
+
+        originalSecurityManager = System.getSecurityManager();
+        // we prevent system exit and convert it to exception becuase this is one of the expected test outcomes,
+        // and we want to make an assertion on that
+        ClusterUtils.preventSystemExit();
+    }
+
+    @AfterClass
+    public static void afterClass() throws Throwable
+    {
+        System.setSecurityManager(originalSecurityManager);
+    }
+
+    /**
+     * This test verifies that a node with uuid disabled actually creates sstables with sequential ids and
+     * both the current and legacy sstable activity tables are updated.
+     * Then, when enable uuid, we actually create sstables with uuid but keep and can read the old sstables. Also, only
+     * update the current sstable activity table.
+     */
+    @Test
+    public void testRestartWithUUIDEnabled() throws IOException
+    {
+        try (Cluster cluster = init(Cluster.build(1)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.set(ENABLE_UUID_FIELD_NAME, false))
+                                           .start()))
+        {
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl", null));
+            createSSTables(cluster.get(1), KEYSPACE, "tbl", 1, 2);
+            assertSSTablesCount(cluster.get(1), 2, 0, KEYSPACE, "tbl");
+            verfiySSTableActivity(cluster, true);
+
+            restartNode(cluster, 1, true);
+
+            createSSTables(cluster.get(1), KEYSPACE, "tbl", 3, 4);
+            assertSSTablesCount(cluster.get(1), 2, 2, KEYSPACE, "tbl");
+            verfiySSTableActivity(cluster, false);
+
+            checkRowsNumber(cluster.get(1), KEYSPACE, "tbl", 9);
+        }
+    }
+
+    /**
+     * This test verifies that we should not be able to start a node with uuid disabled when there are uuid sstables
+     */
+    @Test
+    public void testRestartWithUUIDDisabled() throws IOException
+    {
+        try (Cluster cluster = init(Cluster.build(1)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.set(ENABLE_UUID_FIELD_NAME, true))
+                                           .start()))
+        {
+            cluster.disableAutoCompaction(KEYSPACE);
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl", null));
+            createSSTables(cluster.get(1), KEYSPACE, "tbl", 1, 2);
+            assertSSTablesCount(cluster.get(1), 0, 2, KEYSPACE, "tbl");
+            verfiySSTableActivity(cluster, false);
+
+            Assertions.assertThatExceptionOfType(RuntimeException.class)
+                      .isThrownBy(() -> restartNode(cluster, 1, false))
+                      .withCauseInstanceOf(SystemExitException.class);
+        }
+    }
+
+    @Test
+    public final void testCompactionStrategiesWithMixedSSTables() throws Exception
+    {
+        testCompactionStrategiesWithMixedSSTables(SizeTieredCompactionStrategy.class,
+                                                  DateTieredCompactionStrategy.class,
+                                                  TimeWindowCompactionStrategy.class,
+                                                  LeveledCompactionStrategy.class);
+    }
+
+    /**
+     * The purpose of this test is to verify that we can compact using the given strategy the mix of sstables created
+     * with sequential id and with uuid. Then we verify whether the number results matches the number of rows which we
+     * would get by merging data from the initial sstables.
+     */
+    @SafeVarargs
+    private final void testCompactionStrategiesWithMixedSSTables(final Class<? extends AbstractCompactionStrategy>... compactionStrategyClasses) throws Exception
+    {
+        try (Cluster cluster = init(Cluster.build(1)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.set(ENABLE_UUID_FIELD_NAME, false))
+                                           .start()))
+        {
+            // create a table and two sstables with sequential id for each strategy, the sstables will contain overlapping partitions
+            for (Class<? extends AbstractCompactionStrategy> compactionStrategyClass : compactionStrategyClasses)
+            {
+                String tableName = "tbl_" + compactionStrategyClass.getSimpleName().toLowerCase();
+                cluster.schemaChange(createTableStmt(KEYSPACE, tableName, compactionStrategyClass));
+
+                createSSTables(cluster.get(1), KEYSPACE, tableName, 1, 2);
+                assertSSTablesCount(cluster.get(1), 2, 0, KEYSPACE, tableName);
+            }
+
+            // restart the node with uuid enabled
+            restartNode(cluster, 1, true);
+
+            // create another two sstables with uuid for each previously created table
+            for (Class<? extends AbstractCompactionStrategy> compactionStrategyClass : compactionStrategyClasses)
+            {
+                String tableName = "tbl_" + compactionStrategyClass.getSimpleName().toLowerCase();
+
+                createSSTables(cluster.get(1), KEYSPACE, tableName, 3, 4);
+
+                // expect to have a mix of sstables with sequential id and uuid
+                assertSSTablesCount(cluster.get(1), 2, 2, KEYSPACE, tableName);
+
+                // after compaction, we expect to have a single sstable with uuid
+                cluster.get(1).forceCompact(KEYSPACE, tableName);
+                assertSSTablesCount(cluster.get(1), 0, 1, KEYSPACE, tableName);
+
+                // verify the number of rows
+                checkRowsNumber(cluster.get(1), KEYSPACE, tableName, 9);
+            }
+        }
+    }
+
+    @Test
+    public void testStreamingToNodeWithUUIDEnabled() throws Exception
+    {
+        testStreaming(true);
+    }
+
+    @Test
+    public void testStreamingToNodeWithUUIDDisabled() throws Exception
+    {
+        testStreaming(false);
+    }
+
+    /**
+     * The purpose of this test case is to verify the scenario when we need to stream mixed UUID and seq sstables to
+     * a node which have: 1) UUID disabled, and 2) UUID enabled; then verify that we can read all the data properly
+     * from that node alone.
+     */
+    private void testStreaming(boolean uuidEnabledOnTargetNode) throws Exception
+    {
+        // start both nodes with uuid disabled
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.set(ENABLE_UUID_FIELD_NAME, false).with(Feature.NETWORK))
+                                           .start()))
+        {
+            // create an empty table and shutdown nodes 2, 3
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl", null));
+            waitOn(cluster.get(2).shutdown());
+
+            // create 2 sstables with overlapping partitions on node 1 (with seq ids)
+            createSSTables(cluster.get(1), KEYSPACE, "tbl", 1, 2);
+
+            // restart node 1 with uuid enabled
+            restartNode(cluster, 1, true);
+
+            // create 2 sstables with overlapping partitions on node 1 (with UUID ids)
+            createSSTables(cluster.get(1), KEYSPACE, "tbl", 3, 4);
+
+            assertSSTablesCount(cluster.get(1), 2, 2, KEYSPACE, "tbl");
+
+            // now start node with UUID disabled and perform repair
+            cluster.get(2).config().set(ENABLE_UUID_FIELD_NAME, uuidEnabledOnTargetNode);
+            cluster.get(2).startup();
+
+            assertSSTablesCount(cluster.get(2), 0, 0, KEYSPACE, "tbl");
+
+            // at this point we have sstables with seq and uuid on nodes and no sstables on node
+            // when we run repair, we expect streaming all 4 sstables from node 1 to node 2
+
+            cluster.get(2).nodetool("repair", KEYSPACE);
+
+            if (uuidEnabledOnTargetNode)
+                assertSSTablesCount(cluster.get(2), 0, 4, KEYSPACE, "tbl");
+            else
+                assertSSTablesCount(cluster.get(2), 4, 0, KEYSPACE, "tbl");
+
+            waitOn(cluster.get(1).shutdown());
+
+            checkRowsNumber(cluster.get(2), KEYSPACE, "tbl", 9);
+        }
+    }
+
+    @Test
+    public void testSnapshot() throws Exception
+    {
+        File tmpDir = new File(Files.createTempDirectory("test"));
+        Set<String> seqOnlyBackupDirs;
+        Set<String> seqAndUUIDBackupDirs;
+        Set<String> uuidOnlyBackupDirs;
+        try (Cluster cluster = init(Cluster.build(1)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.with(Feature.NETWORK)
+                                                                       .set("incremental_backups", true)
+                                                                       .set("snapshot_before_compaction", false)
+                                                                       .set("auto_snapshot", false)
+                                                                       .set(ENABLE_UUID_FIELD_NAME, false))
+                                           .start()))
+        {
+            // create the tables
+
+            cluster.schemaChange("CREATE KEYSPACE new_ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};");
+
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl_seq_only", null));
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl_seq_and_uuid", null));
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl_uuid_only", null));
+            cluster.schemaChange(createTableStmt("new_ks", "tbl_seq_only", null));
+            cluster.schemaChange(createTableStmt("new_ks", "tbl_seq_and_uuid", null));
+            cluster.schemaChange(createTableStmt("new_ks", "tbl_uuid_only", null));
+
+            // creating sstables
+            createSSTables(cluster.get(1), KEYSPACE, "tbl_seq_only", 1, 2, 3, 4);
+            createSSTables(cluster.get(1), KEYSPACE, "tbl_seq_and_uuid", 1, 2);
+            createSSTables(cluster.get(1), "new_ks", "tbl_seq_only", 5, 6, 7, 8);
+            createSSTables(cluster.get(1), "new_ks", "tbl_seq_and_uuid", 5, 6);
+
+            restartNode(cluster, 1, true);
+
+            createSSTables(cluster.get(1), KEYSPACE, "tbl_seq_and_uuid", 3, 4);
+            createSSTables(cluster.get(1), KEYSPACE, "tbl_uuid_only", 1, 2, 3, 4);
+            createSSTables(cluster.get(1), "new_ks", "tbl_seq_and_uuid", 7, 8);
+            createSSTables(cluster.get(1), "new_ks", "tbl_uuid_only", 5, 6, 7, 8);
+
+            Set<String> seqOnlySnapshotDirs = snapshot(cluster.get(1), KEYSPACE, "tbl_seq_only");
+            Set<String> seqAndUUIDSnapshotDirs = snapshot(cluster.get(1), KEYSPACE, "tbl_seq_and_uuid");
+            Set<String> uuidOnlySnapshotDirs = snapshot(cluster.get(1), KEYSPACE, "tbl_uuid_only");
+
+            seqOnlyBackupDirs = getBackupDirs(cluster.get(1), KEYSPACE, "tbl_seq_only");
+            seqAndUUIDBackupDirs = getBackupDirs(cluster.get(1), KEYSPACE, "tbl_seq_and_uuid");
+            uuidOnlyBackupDirs = getBackupDirs(cluster.get(1), KEYSPACE, "tbl_uuid_only");
+
+            // at this point, we should have sstables with backups and snapshots for all tables
+            assertSSTablesCount(cluster.get(1), 4, 0, KEYSPACE, "tbl_seq_only");
+            assertSSTablesCount(cluster.get(1), 2, 2, KEYSPACE, "tbl_seq_and_uuid");
+            assertSSTablesCount(cluster.get(1), 0, 4, KEYSPACE, "tbl_uuid_only");
+
+            assertBackupSSTablesCount(cluster.get(1), 4, 0, KEYSPACE, "tbl_seq_only");
+            assertBackupSSTablesCount(cluster.get(1), 2, 2, KEYSPACE, "tbl_seq_and_uuid");
+            assertBackupSSTablesCount(cluster.get(1), 0, 4, KEYSPACE, "tbl_uuid_only");
+
+            assertSnapshotSSTablesCount(cluster.get(1), 4, 0, KEYSPACE, "tbl_seq_only");
+            assertSnapshotSSTablesCount(cluster.get(1), 2, 2, KEYSPACE, "tbl_seq_and_uuid");
+            assertSnapshotSSTablesCount(cluster.get(1), 0, 4, KEYSPACE, "tbl_uuid_only");
+
+            checkRowsNumber(cluster.get(1), KEYSPACE, "tbl_seq_only", 9);
+            checkRowsNumber(cluster.get(1), KEYSPACE, "tbl_seq_and_uuid", 9);
+            checkRowsNumber(cluster.get(1), KEYSPACE, "tbl_uuid_only", 9);
+
+            // truncate the first set of tables
+            truncateAndAssertEmpty(cluster.get(1), KEYSPACE, "tbl_seq_only", "tbl_seq_and_uuid", "tbl_uuid_only");
+
+            restore(cluster.get(1), seqOnlySnapshotDirs, "tbl_seq_only", 9);
+            restore(cluster.get(1), seqAndUUIDSnapshotDirs, "tbl_seq_and_uuid", 9);
+            restore(cluster.get(1), uuidOnlySnapshotDirs, "tbl_uuid_only", 9);
+
+            truncateAndAssertEmpty(cluster.get(1), KEYSPACE, "tbl_seq_only", "tbl_seq_and_uuid", "tbl_uuid_only");
+
+            restore(cluster.get(1), seqOnlyBackupDirs, "tbl_seq_only", 9);
+            restore(cluster.get(1), seqAndUUIDBackupDirs, "tbl_seq_and_uuid", 9);
+            restore(cluster.get(1), uuidOnlyBackupDirs, "tbl_uuid_only", 9);
+
+            ImmutableSet<String> allBackupDirs = ImmutableSet.<String>builder().addAll(seqOnlyBackupDirs).addAll(seqAndUUIDBackupDirs).addAll(uuidOnlyBackupDirs).build();
+            cluster.get(1).runOnInstance(rethrow(() -> allBackupDirs.forEach(dir -> bulkLoadSSTables(new File(dir), "new_ks"))));
+
+            checkRowsNumber(cluster.get(1), "new_ks", "tbl_seq_only", 17);
+            checkRowsNumber(cluster.get(1), "new_ks", "tbl_seq_and_uuid", 17);
+            checkRowsNumber(cluster.get(1), "new_ks", "tbl_uuid_only", 17);
+
+
+            for (String dir : allBackupDirs)
+            {
+                File src = new File(dir);
+                File dest = relativizePath(tmpDir, src, 3);
+                Files.createDirectories(dest.parent().toPath());
+                FileUtils.moveDirectory(src.toJavaIOFile(), dest.toJavaIOFile());
+            }
+        }
+
+        try (Cluster cluster = init(Cluster.build(1)
+                                           .withDataDirCount(1)
+                                           .withConfig(config -> config.with(Feature.NETWORK, Feature.NATIVE_PROTOCOL)
+                                                                       .set("incremental_backups", true)
+                                                                       .set("snapshot_before_compaction", false)
+                                                                       .set("auto_snapshot", false)
+                                                                       .set(ENABLE_UUID_FIELD_NAME, false))
+                                           .start()))
+        {
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl_seq_only", null));
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl_seq_and_uuid", null));
+            cluster.schemaChange(createTableStmt(KEYSPACE, "tbl_uuid_only", null));
+
+            Function<String, String> relativeToTmpDir = d -> relativizePath(tmpDir, new File(d), 3).toString();
+            restore(cluster.get(1), seqOnlyBackupDirs.stream().map(relativeToTmpDir).collect(Collectors.toSet()), "tbl_seq_only", 9);
+            restore(cluster.get(1), seqAndUUIDBackupDirs.stream().map(relativeToTmpDir).collect(Collectors.toSet()), "tbl_seq_and_uuid", 9);
+            restore(cluster.get(1), uuidOnlyBackupDirs.stream().map(relativeToTmpDir).collect(Collectors.toSet()), "tbl_uuid_only", 9);
+        }
+    }
+
+    private static void restore(IInvokableInstance instance, Set<String> dirs, String targetTableName, int expectedRowsNum)
+    {
+        List<String> failedImports = instance.callOnInstance(() -> ColumnFamilyStore.getIfExists(KEYSPACE, targetTableName)
+                                                                                    .importNewSSTables(dirs, false, false, true, true, true, true, true));
+        assertThat(failedImports).isEmpty();
+        checkRowsNumber(instance, KEYSPACE, targetTableName, expectedRowsNum);
+    }
+
+    private static void truncateAndAssertEmpty(IInvokableInstance instance, String ks, String... tableNames)
+    {
+        for (String tableName : tableNames)
+        {
+            instance.executeInternal(format("TRUNCATE %s.%s", ks, tableName));
+            assertSSTablesCount(instance, 0, 0, ks, tableName);
+            checkRowsNumber(instance, ks, tableName, 0);
+        }
+    }
+
+    private static Set<String> snapshot(IInvokableInstance instance, String ks, String tableName)
+    {
+        Set<String> snapshotDirs = instance.callOnInstance(() -> ColumnFamilyStore.getIfExists(ks, tableName)
+                                                                                  .snapshot(SNAPSHOT_TAG)
+                                                                                  .getDirectories()
+                                                                                  .stream()
+                                                                                  .map(File::toString)
+                                                                                  .collect(Collectors.toSet()));
+        assertThat(snapshotDirs).isNotEmpty();
+        return snapshotDirs;
+    }
+
+    private static String createTableStmt(String ks, String name, Class<? extends AbstractCompactionStrategy> compactionStrategy)
+    {
+        if (compactionStrategy == null)
+            compactionStrategy = SizeTieredCompactionStrategy.class;
+        return format("CREATE TABLE %s.%s (pk int, ck int, v int, PRIMARY KEY (pk, ck)) " +
+                      "WITH compaction = {'class':'%s', 'enabled':'false'}",
+                      ks, name, compactionStrategy.getCanonicalName());
+    }
+
+    private void createSSTables(IInstance instance, String ks, String tableName, int... records)
+    {
+        String insert = format("INSERT INTO %s.%s (pk, ck, v) VALUES (?, ?, ?)", ks, tableName);
+        for (int record : records)
+        {
+            instance.executeInternal(insert, record, record, ++v);
+            instance.executeInternal(insert, record, record + 1, ++v);
+            instance.executeInternal(insert, record + 1, record + 1, ++v);
+            instance.flush(ks);
+        }
+    }
+
+    private static void assertSSTablesCount(Set<Descriptor> descs, String tableName, int expectedSeqGenIds, int expectedUUIDGenIds)
+    {
+        List<String> seqSSTables = descs.stream().filter(desc -> desc.id instanceof SequenceBasedSSTableId).map(Descriptor::baseFilename).sorted().collect(Collectors.toList());
+        List<String> uuidSSTables = descs.stream().filter(desc -> desc.id instanceof UUIDBasedSSTableId).map(Descriptor::baseFilename).sorted().collect(Collectors.toList());
+        assertThat(seqSSTables).describedAs("SSTables of %s with sequence based id", tableName).hasSize(expectedSeqGenIds);
+        assertThat(uuidSSTables).describedAs("SSTables of %s with UUID based id", tableName).hasSize(expectedUUIDGenIds);
+    }
+
+    private static void assertSSTablesCount(IInvokableInstance instance, int expectedSeqGenIds, int expectedUUIDGenIds, String ks, String... tableNames)
+    {
+        instance.runOnInstance(rethrow(() -> Arrays.stream(tableNames).forEach(tableName -> assertSSTablesCount(getSSTables(ks, tableName), tableName, expectedSeqGenIds, expectedUUIDGenIds))));
+    }
+
+    private static void assertSnapshotSSTablesCount(IInvokableInstance instance, int expectedSeqGenIds, int expectedUUIDGenIds, String ks, String... tableNames)
+    {
+        instance.runOnInstance(rethrow(() -> Arrays.stream(tableNames).forEach(tableName -> assertSSTablesCount(getSnapshots(ks, tableName, SNAPSHOT_TAG), tableName, expectedSeqGenIds, expectedUUIDGenIds))));
+    }
+
+    private static void assertBackupSSTablesCount(IInvokableInstance instance, int expectedSeqGenIds, int expectedUUIDGenIds, String ks, String... tableNames)
+    {
+        instance.runOnInstance(rethrow(() -> Arrays.stream(tableNames).forEach(tableName -> assertSSTablesCount(getBackups(ks, tableName), tableName, expectedSeqGenIds, expectedUUIDGenIds))));
+    }
+
+    private static Set<String> getBackupDirs(IInvokableInstance instance, String ks, String tableName)
+    {
+        return instance.callOnInstance(() -> getBackups(ks, tableName).stream()
+                                                                      .map(d -> d.directory)
+                                                                      .map(File::toString)
+                                                                      .collect(Collectors.toSet()));
+    }
+
+    private static void verfiySSTableActivity(Cluster cluster, boolean expectLegacyTableIsPopulated)
+    {
+        cluster.get(1).runOnInstance(() -> {
+            RestorableMeter meter = new RestorableMeter(15, 120);
+            SequenceBasedSSTableId seqGenId = new SequenceBasedSSTableId(1);
+            SystemKeyspace.persistSSTableReadMeter("ks", "tab", seqGenId, meter);
+            assertThat(SystemKeyspace.getSSTableReadMeter("ks", "tab", seqGenId)).matches(m -> m.fifteenMinuteRate() == meter.fifteenMinuteRate()
+                                                                                               && m.twoHourRate() == meter.twoHourRate());
+
+            checkSSTableActivityRow(SSTABLE_ACTIVITY_V2, seqGenId.toString(), true);
+            if (expectLegacyTableIsPopulated)
+                checkSSTableActivityRow(LEGACY_SSTABLE_ACTIVITY, seqGenId.generation, true);
+
+            SystemKeyspace.clearSSTableReadMeter("ks", "tab", seqGenId);
+
+            checkSSTableActivityRow(SSTABLE_ACTIVITY_V2, seqGenId.toString(), false);
+            if (expectLegacyTableIsPopulated)
+                checkSSTableActivityRow(LEGACY_SSTABLE_ACTIVITY, seqGenId.generation, false);
+
+            UUIDBasedSSTableId uuidGenId = new UUIDBasedSSTableId(TimeUUID.Generator.nextTimeUUID());
+            SystemKeyspace.persistSSTableReadMeter("ks", "tab", uuidGenId, meter);
+            assertThat(SystemKeyspace.getSSTableReadMeter("ks", "tab", uuidGenId)).matches(m -> m.fifteenMinuteRate() == meter.fifteenMinuteRate()
+                                                                                                && m.twoHourRate() == meter.twoHourRate());
+
+            checkSSTableActivityRow(SSTABLE_ACTIVITY_V2, uuidGenId.toString(), true);
+
+            SystemKeyspace.clearSSTableReadMeter("ks", "tab", uuidGenId);
+
+            checkSSTableActivityRow(SSTABLE_ACTIVITY_V2, uuidGenId.toString(), false);
+        });
+    }
+
+    private static void checkSSTableActivityRow(String table, Object genId, boolean expectExists)
+    {
+        String tableColName = SSTABLE_ACTIVITY_V2.equals(table) ? "table_name" : "columnfamily_name";
+        String idColName = SSTABLE_ACTIVITY_V2.equals(table) ? "id" : "generation";
+        String cql = "SELECT rate_15m, rate_120m FROM system.%s WHERE keyspace_name=? and %s=? and %s=?";
+        UntypedResultSet results = executeInternal(format(cql, table, tableColName, idColName), "ks", "tab", genId);
+        assertThat(results).isNotNull();
+
+        if (expectExists)
+        {
+            assertThat(results.isEmpty()).isFalse();
+            UntypedResultSet.Row row = results.one();
+            assertThat(row.getDouble("rate_15m")).isEqualTo(15d, Offset.offset(0.001d));
+            assertThat(row.getDouble("rate_120m")).isEqualTo(120d, Offset.offset(0.001d));
+        }
+        else
+        {
+            assertThat(results.isEmpty()).isTrue();
+        }
+    }
+
+    private static void restartNode(Cluster cluster, int node, boolean uuidEnabled)
+    {
+        waitOn(cluster.get(node).shutdown());
+        cluster.get(node).config().set(ENABLE_UUID_FIELD_NAME, uuidEnabled);
+        cluster.get(node).startup();
+    }
+
+    private static void checkRowsNumber(IInstance instance, String ks, String tableName, int expectedNumber)
+    {
+        SimpleQueryResult result = instance.executeInternalWithResult(format("SELECT * FROM %s.%s", ks, tableName));
+        Object[][] rows = result.toObjectArrays();
+        assertThat(rows).withFailMessage("Invalid results for %s.%s - should have %d rows but has %d: \n%s", ks, tableName, expectedNumber,
+                                         rows.length, result.toString()).hasSize(expectedNumber);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/SSTableLoaderEncryptionOptionsTest.java b/test/distributed/org/apache/cassandra/distributed/test/SSTableLoaderEncryptionOptionsTest.java
index c9b6c62..00834ba 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/SSTableLoaderEncryptionOptionsTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/SSTableLoaderEncryptionOptionsTest.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.distributed.test;
 
 import java.io.IOException;
-import java.io.File;
 import java.util.Collections;
 import java.util.List;
 
@@ -30,9 +29,11 @@
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.tools.BulkLoader;
 import org.apache.cassandra.tools.ToolRunner;
 import org.apache.cassandra.service.StorageService;
@@ -41,6 +42,7 @@
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
+import static com.google.common.collect.Lists.transform;
 import static org.apache.cassandra.distributed.test.ExecUtil.rethrow;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
 import static org.apache.cassandra.distributed.shared.AssertUtils.row;
@@ -96,7 +98,7 @@
                                                             "--truststore", validTrustStorePath,
                                                             "--truststore-password", validTrustStorePassword,
                                                             "--conf-path", "test/conf/sstableloader_with_encryption.yaml",
-                                                            sstables_to_upload.getAbsolutePath());
+                                                            sstables_to_upload.absolutePath());
         tool.assertOnCleanExit();
         assertTrue(tool.getStdout().contains("Summary statistics"));
         assertRows(CLUSTER.get(1).executeInternal("SELECT count(*) FROM ssl_upload_tables.test"), row(42L));
@@ -116,7 +118,7 @@
                                                             "--truststore", validTrustStorePath,
                                                             "--truststore-password", validTrustStorePassword,
                                                             "--conf-path", "test/conf/sstableloader_with_encryption.yaml",
-                                                            sstables_to_upload.getAbsolutePath());
+                                                            sstables_to_upload.absolutePath());
         tool.assertOnCleanExit();
         assertTrue(tool.getStdout().contains("Summary statistics"));
         assertTrue(tool.getStdout().contains("ssl storage port is deprecated and not used"));
@@ -156,7 +158,8 @@
         {
             CLUSTER.get(1).executeInternal(String.format("INSERT INTO ssl_upload_tables.test (pk, val) VALUES (%s, '%s')", i, i));
         }
-        CLUSTER.get(1).runOnInstance(rethrow(() -> StorageService.instance.forceKeyspaceFlush("ssl_upload_tables")));
+        CLUSTER.get(1).runOnInstance(rethrow(() -> StorageService.instance.forceKeyspaceFlush("ssl_upload_tables",
+                                                                                              ColumnFamilyStore.FlushReason.UNIT_TESTS)));
     }
 
     private static void truncateGeneratedTables() throws IOException
@@ -167,16 +170,18 @@
     private static File copySstablesFromDataDir(String table) throws IOException
     {
         File cfDir = new File("build/test/cassandra/ssl_upload_tables", table);
-        cfDir.mkdirs();
-        List<File> keyspace_dirs = CLUSTER.get(1).callOnInstance(() -> Keyspace.open("ssl_upload_tables").getColumnFamilyStore(table).getDirectories().getCFDirectories());
-        for (File srcDir : keyspace_dirs)
+        cfDir.tryCreateDirectories();
+        // Get paths as Strings, because org.apache.cassandra.io.util.File in the dtest
+        // node is loaded by org.apache.cassandra.distributed.shared.InstanceClassLoader.
+        List<String> keyspace_dir_paths = CLUSTER.get(1).callOnInstance(() -> {
+            List<File> cfDirs = Keyspace.open("ssl_upload_tables").getColumnFamilyStore(table).getDirectories().getCFDirectories();
+            return transform(cfDirs, (d) -> d.absolutePath());
+        });
+        for (File srcDir : transform(keyspace_dir_paths, (p) -> new File(p)))
         {
-            for (File file : srcDir.listFiles())
+            for (File file : srcDir.tryList((file) -> file.isFile()))
             {
-                if (file.isFile())
-                {
-                    FileUtils.copyFileToDirectory(file, cfDir);
-                }
+                FileUtils.copyFileToDirectory(file.toJavaIOFile(), cfDir.toJavaIOFile());
             }
         }
         return cfDir;
diff --git a/test/distributed/org/apache/cassandra/distributed/test/SchemaTest.java b/test/distributed/org/apache/cassandra/distributed/test/SchemaTest.java
index bc5ccbe..142b66b 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/SchemaTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/SchemaTest.java
@@ -18,27 +18,35 @@
 
 package org.apache.cassandra.distributed.test;
 
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.util.concurrent.Uninterruptibles;
 import org.junit.Test;
 
 import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableCallable;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.assertj.core.api.Assertions;
 import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionFactory;
 
-import static java.time.Duration.ofMillis;
 import static java.time.Duration.ofSeconds;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 public class SchemaTest extends TestBaseImpl
 {
+    public static final String TABLE_ONE = "tbl_one";
+    public static final String TABLE_TWO = "tbl_two";
+
     @Test
     public void readRepair() throws Throwable
     {
@@ -91,7 +99,7 @@
             Throwable cause = e;
             while (cause != null)
             {
-                if (cause.getMessage() != null && cause.getMessage().contains("Unknown column "+name+" during deserialization"))
+                if (cause.getMessage() != null && cause.getMessage().contains("Unknown column " + name + " during deserialization"))
                     causeIsUnknownColumn = true;
                 cause = cause.getCause();
             }
@@ -99,58 +107,94 @@
         }
     }
 
+    /**
+     * The purpose of this test is to verify manual schema reset functinality.
+     * <p>
+     * There is a 2-node cluster and a TABLE_ONE created. The schema version is agreed on both nodes. Then the 2nd node
+     * is shutdown. We introduce a disagreement by dropping TABLE_ONE and creating TABLE_TWO on the 1st node. Therefore,
+     * the 1st node has a newer schema version with TABLE_TWO, while the shutdown 2nd node has older schema version with
+     * TABLE_ONE.
+     * <p>
+     * At this point, if we just started the 2nd node, it would sync its schema by getting fresh mutations from the 1st
+     * node which would result in both nodes having only the definition of TABLE_TWO.
+     * <p>
+     * However, before starting the 2nd node the schema is reset on the 1st node, so the 1st node will discard its local
+     * schema whenever it manages to fetch a schema definition from some other node (the 2nd node in this case).
+     * It is expected to end up with both nodes having only the definition of TABLE_ONE.
+     * <p>
+     * In the second phase of the test we simply break the schema on the 1st node and call reset to fetch the schema
+     * definition it from the 2nd node.
+     */
     @Test
     public void schemaReset() throws Throwable
     {
-        int delayUnit = 1000;
-        CassandraRelevantProperties.MIGRATION_DELAY.setInt(5 * delayUnit);
-        CassandraRelevantProperties.SCHEMA_PULL_INTERVAL_MS.setInt(5 * delayUnit);
-        CassandraRelevantProperties.SCHEMA_PULL_BACKOFF_DELAY_MS.setInt(delayUnit);
-
+        CassandraRelevantProperties.MIGRATION_DELAY.setLong(10000);
+        CassandraRelevantProperties.SCHEMA_PULL_INTERVAL_MS.setLong(10000);
         try (Cluster cluster = init(Cluster.build(2).withConfig(cfg -> cfg.with(Feature.GOSSIP, Feature.NETWORK)).start()))
         {
-            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk INT PRIMARY KEY, v TEXT)");
+            // create TABLE_ONE and make sure it is propagated
+            cluster.schemaChange(String.format("CREATE TABLE %s.%s (pk INT PRIMARY KEY, v TEXT)", KEYSPACE, TABLE_ONE));
+            assertTrue(checkTablesPropagated(cluster.get(1), true, false));
+            assertTrue(checkTablesPropagated(cluster.get(2), true, false));
 
-            assertTrue(cluster.get(1).callOnInstance(() -> Schema.instance.getTableMetadata(KEYSPACE, "tbl") != null));
-            assertTrue(cluster.get(2).callOnInstance(() -> Schema.instance.getTableMetadata(KEYSPACE, "tbl") != null));
-
+            // shutdown the 2nd node and make sure that the 1st does not see it any longer as alive
             cluster.get(2).shutdown().get();
+            await(30).until(() -> cluster.get(1).callOnInstance(() -> {
+                return Gossiper.instance.getLiveMembers()
+                                        .stream()
+                                        .allMatch(e -> e.equals(getBroadcastAddressAndPort()));
+            }));
 
-            Awaitility.await()
-                      .atMost(ofSeconds(30))
-                      .until(() -> cluster
-                                   .get(1)
-                                   .callOnInstance(() -> Gossiper.instance
-                                                         .getLiveMembers()
-                                                         .stream()
-                                                         .allMatch(addr -> addr.equals(FBUtilities.getBroadcastAddressAndPort()))));
+            // when there is no node to fetch the schema from, reset local schema should immediately fail
+            Assertions.assertThatExceptionOfType(RuntimeException.class).isThrownBy(() -> {
+                cluster.get(1).runOnInstance(() -> Schema.instance.resetLocalSchema());
+            }).withMessageContaining("Cannot reset local schema when there are no other live nodes");
 
-            // when schema is removed and there is no other node to fetch it from, node 1 should be left with clean schema
-            //noinspection Convert2MethodRef
-            cluster.get(1).runOnInstance(() -> StorageService.instance.resetLocalSchema());
-            assertTrue(cluster.get(1).callOnInstance(() -> Schema.instance.getTableMetadata(KEYSPACE, "tbl") == null));
+            // now, let's make a disagreement, the shutdown node 2 has a definition of TABLE_ONE, while the running
+            // node 1 will have a definition of TABLE_TWO
+            cluster.coordinator(1).execute(String.format("DROP TABLE %s.%s", KEYSPACE, TABLE_ONE), ConsistencyLevel.ONE);
+            cluster.coordinator(1).execute(String.format("CREATE TABLE %s.%s (pk INT PRIMARY KEY, v TEXT)", KEYSPACE, TABLE_TWO), ConsistencyLevel.ONE);
+            await(30).until(() -> checkTablesPropagated(cluster.get(1), false, true));
 
-            // sleep slightly longer than the schema pull interval
-            Uninterruptibles.sleepUninterruptibly(6 * delayUnit, TimeUnit.MILLISECONDS);
+            // Schema.resetLocalSchema is guarded by some conditions which would not let us reset schema if there is no
+            // live node in the cluster, therefore we simply call SchemaUpdateHandler.clear (this is the only real thing
+            // being done by Schema.resetLocalSchema under the hood)
+            SerializableCallable<Boolean> clear = () -> Schema.instance.updateHandler.clear().awaitUninterruptibly(1, TimeUnit.MINUTES);
+            Future<Boolean> clear1 = cluster.get(1).asyncCallsOnInstance(clear).call();
+            assertFalse(clear1.isDone());
 
-            // when the other node is started, schema should be back in sync - node 2 should send schema mutations to node 1
+            // when the 2nd node is started, schema should be back in sync
             cluster.get(2).startup();
+            await(30).until(() -> clear1.isDone() && clear1.get());
 
-            // sleep slightly longer than the schema pull interval
-            Uninterruptibles.sleepUninterruptibly(6 * delayUnit, TimeUnit.MILLISECONDS);
+            // this proves that reset schema works on the 1st node - the most recent change should be discarded because
+            // it receives the schema from the 2nd node and applies it on empty schema
+            await(60).until(() -> checkTablesPropagated(cluster.get(1), true, false));
 
-            Awaitility.waitAtMost(ofMillis(6 * delayUnit))
-                      .pollDelay(ofSeconds(1))
-                      .until(() -> cluster.get(1).callOnInstance(() -> Schema.instance.getTableMetadata(KEYSPACE, "tbl") != null));
+            // now let's break schema locally and let it be reset
+            cluster.get(1).runOnInstance(() -> Schema.instance.getLocalKeyspaces()
+                                                              .get(SchemaConstants.SCHEMA_KEYSPACE_NAME)
+                                                              .get().tables.forEach(t -> ColumnFamilyStore.getIfExists(t.keyspace, t.name).truncateBlockingWithoutSnapshot()));
 
-            // when schema is removed and there is a node to fetch it from, node 1 should immediately restore the schema
-            //noinspection Convert2MethodRef
-            cluster.get(2).runOnInstance(() -> StorageService.instance.resetLocalSchema());
-
-            Awaitility.waitAtMost(ofMillis(6 * delayUnit))
-                      .pollDelay(ofSeconds(1))
-                      .until(() -> cluster.get(2).callOnInstance(() -> Schema.instance.getTableMetadata(KEYSPACE, "tbl") != null));
+            // when schema is removed and there is a node to fetch it from, the 1st node should immediately restore it
+            cluster.get(1).runOnInstance(() -> Schema.instance.resetLocalSchema());
+            // note that we should not wait for this to be true because resetLocalSchema is blocking
+            // and after successfully completing it, the schema should be already back in sync
+            assertTrue(checkTablesPropagated(cluster.get(1), true, false));
+            assertTrue(checkTablesPropagated(cluster.get(2), true, false));
         }
     }
 
+    private static ConditionFactory await(int seconds)
+    {
+        return Awaitility.await().atMost(ofSeconds(seconds)).pollDelay(ofSeconds(1));
+    }
+
+    private static boolean checkTablesPropagated(IInvokableInstance instance, boolean one, boolean two)
+    {
+        return instance.callOnInstance(() -> {
+            return (Schema.instance.getTableMetadata(KEYSPACE, TABLE_ONE) != null ^ !one)
+                   && (Schema.instance.getTableMetadata(KEYSPACE, TABLE_TWO) != null ^ !two);
+        });
+    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/SecondaryIndexTest.java b/test/distributed/org/apache/cassandra/distributed/test/SecondaryIndexTest.java
new file mode 100644
index 0000000..3b55dcf
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/SecondaryIndexTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.utils.TimeUUID;
+
+public class SecondaryIndexTest extends TestBaseImpl
+{
+    private static final int NUM_NODES = 3;
+    private static final int REPLICATION_FACTOR = 1;
+    private static final String CREATE_TABLE = "CREATE TABLE %s(k int, v int, PRIMARY KEY (k))";
+    private static final String CREATE_INDEX = "CREATE INDEX v_index_%d ON %s(v)";
+
+    private static final AtomicInteger seq = new AtomicInteger();
+    
+    private static String tableName;
+    private static Cluster cluster;
+
+    @BeforeClass
+    public static void setupCluster() throws IOException
+    {
+        cluster = init(Cluster.build(NUM_NODES).start(), REPLICATION_FACTOR);
+    }
+
+    @AfterClass
+    public static void teardownCluster()
+    {
+        if (cluster != null)
+            cluster.close();
+    }
+
+    @Before
+    public void before()
+    {
+        tableName = String.format("%s.t_%d", KEYSPACE, seq.getAndIncrement());
+        cluster.schemaChange(String.format(CREATE_TABLE, tableName));
+        cluster.schemaChange(String.format(CREATE_INDEX, seq.get(), tableName));
+    }
+
+    @After
+    public void after()
+    {
+        cluster.schemaChange(String.format("DROP TABLE %s", tableName));
+    }
+
+    @Test
+    public void test_only_coordinator_chooses_index_for_query()
+    {
+        for (int i = 0 ; i < 99 ; ++i)
+            cluster.coordinator(1).execute(String.format("INSERT INTO %s (k, v) VALUES (?, ?)", tableName), ConsistencyLevel.ALL, i, i/3);
+        cluster.forEach(i -> i.flush(KEYSPACE));
+
+        Pattern indexScanningPattern =
+                Pattern.compile(String.format("Index mean cardinalities are v_index_%d:[0-9]+. Scanning with v_index_%d.", seq.get(), seq.get()));
+
+        for (int i = 0 ; i < 33; ++i)
+        {
+            UUID trace = TimeUUID.Generator.nextTimeUUID().asUUID();
+            Object[][] result = cluster.coordinator(1).executeWithTracing(trace, String.format("SELECT * FROM %s WHERE v = ?", tableName), ConsistencyLevel.ALL, i);
+            Assert.assertEquals("Failed on iteration " + i, 3, result.length);
+
+            Awaitility.await("For all events in the tracing session to persist")
+                    .pollInterval(100, TimeUnit.MILLISECONDS)
+                    .atMost(10, TimeUnit.SECONDS)
+                    .untilAsserted(() -> 
+                                   {
+                                       Object[][] traces = cluster.coordinator(1)
+                                                                  .execute("SELECT source, activity FROM system_traces.events WHERE session_id = ?", 
+                                                                           ConsistencyLevel.ALL, trace);
+
+                                       List<InetAddress> scanning =
+                                               Arrays.stream(traces)
+                                                     .filter(t -> indexScanningPattern.matcher(t[1].toString()).matches())
+                                                     .map(t -> (InetAddress) t[0])
+                                                     .distinct().collect(Collectors.toList());
+
+                                       List<InetAddress> executing =
+                                               Arrays.stream(traces)
+                                                     .filter(t -> t[1].toString().equals(String.format("Executing read on " + tableName + " using index v_index_%d", seq.get())))
+                                                     .map(t -> (InetAddress) t[0])
+                                                     .distinct().collect(Collectors.toList());
+
+                                       Assert.assertEquals(Collections.singletonList(cluster.get(1).broadcastAddress().getAddress()), scanning);
+                                       Assert.assertEquals(3, executing.size());
+                                   });
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java
index 69b074f..2e26659 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java
@@ -544,4 +544,4 @@
             cluster.schemaChange(format("DROP TABLE IF EXISTS %s"));
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/SnapshotsTest.java b/test/distributed/org/apache/cassandra/distributed/test/SnapshotsTest.java
new file mode 100644
index 0000000..8ba3a90
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/SnapshotsTest.java
@@ -0,0 +1,365 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.NodeToolResult;
+import org.apache.cassandra.distributed.shared.WithProperties;
+import org.apache.cassandra.utils.Clock;
+
+import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.stream.Collectors.toList;
+import static org.apache.cassandra.distributed.shared.ClusterUtils.stopUnchecked;
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+
+public class SnapshotsTest extends TestBaseImpl
+{
+    public static final Integer SNAPSHOT_CLEANUP_PERIOD_SECONDS = 1;
+    public static final Integer FIVE_SECONDS = 5;
+    public static final Integer TEN_SECONDS = 10;
+    private static final WithProperties properties = new WithProperties();
+    private static Cluster cluster;
+
+    private final String[] exoticSnapshotNames = new String[] { "snapshot", "snapshots", "backup", "backups",
+                                                                "Snapshot", "Snapshots", "Backups", "Backup",
+                                                                "snapshot.with.dots-and-dashes"};
+
+    @BeforeClass
+    public static void before() throws IOException
+    {
+        properties.set(CassandraRelevantProperties.SNAPSHOT_CLEANUP_INITIAL_DELAY_SECONDS, 0);
+        properties.set(CassandraRelevantProperties.SNAPSHOT_CLEANUP_PERIOD_SECONDS, SNAPSHOT_CLEANUP_PERIOD_SECONDS);
+        properties.set(CassandraRelevantProperties.SNAPSHOT_MIN_ALLOWED_TTL_SECONDS, FIVE_SECONDS);
+        cluster = init(Cluster.build(1).start());
+    }
+
+    @After
+    public void clearAllSnapshots()
+    {
+        cluster.schemaChange(withKeyspace("DROP TABLE IF EXISTS %s.tbl;"));
+        cluster.get(1).nodetoolResult("clearsnapshot", "--all").asserts().success();
+        for (String tag : new String[] {"basic", "first", "second", "tag1"})
+            waitForSnapshotCleared(tag);
+        for (String tag : exoticSnapshotNames)
+            waitForSnapshot(tag, false, true);
+    }
+
+    @AfterClass
+    public static void after()
+    {
+        properties.close();
+        if (cluster != null)
+            cluster.close();
+    }
+
+    @Test
+    public void testSnapshotsCleanupByTTL()
+    {
+        cluster.get(1).nodetoolResult("snapshot", "--ttl", format("%ds", FIVE_SECONDS),
+                                      "-t", "basic").asserts().success();
+        waitForSnapshotPresent("basic");
+        waitForSnapshotCleared("basic");
+    }
+
+    @Test
+    public void testSnapshotCleanupAfterRestart() throws Exception
+    {
+        int TWENTY_SECONDS = 20; // longer TTL to allow snapshot to survive node restart
+        IInvokableInstance instance = cluster.get(1);
+
+        // Create snapshot and check exists
+        instance.nodetoolResult("snapshot", "--ttl", format("%ds", TWENTY_SECONDS),
+                                "-t", "basic").asserts().success();
+        waitForSnapshotPresent("basic");
+
+        // Restart node
+        long beforeStop = Clock.Global.currentTimeMillis();
+        stopUnchecked(instance);
+        instance.startup();
+        long afterStart = Clock.Global.currentTimeMillis();
+
+        // if stop & start of the node took more than 20 seconds
+        // we assume that the snapshot should be expired by now, so we wait until we do not see it
+        if (afterStart - beforeStop > 20_000)
+        {
+            waitForSnapshotCleared("basic");
+            return;
+        }
+        else
+        {
+            // Check snapshot still exists after restart
+            cluster.get(1).nodetoolResult("listsnapshots").asserts().stdoutContains("basic");
+        }
+
+        // Sleep for 2*TTL and then check snapshot is gone
+        Thread.sleep(TWENTY_SECONDS * 1000L);
+        waitForSnapshotCleared("basic");
+    }
+
+    @Test
+    public void testSnapshotInvalidArgument() throws Exception
+    {
+        IInvokableInstance instance = cluster.get(1);
+
+        instance.nodetoolResult("snapshot", "--ttl", format("%ds", 1), "-t", "basic")
+                .asserts()
+                .failure()
+                .stdoutContains(format("ttl for snapshot must be at least %d seconds", FIVE_SECONDS));
+
+        instance.nodetoolResult("snapshot", "--ttl", "invalid-ttl").asserts().failure();
+    }
+
+    @Test
+    public void testListingSnapshotsWithoutTTL()
+    {
+        // take snapshot without ttl
+        cluster.get(1).nodetoolResult("snapshot", "-t", "snapshot_without_ttl").asserts().success();
+
+        // take snapshot with ttl
+        cluster.get(1).nodetoolResult("snapshot", "--ttl",
+                                      format("%ds", 1000),
+                                      "-t", "snapshot_with_ttl").asserts().success();
+
+        // list snaphots without TTL
+        waitForSnapshot("snapshot_without_ttl", true, true);
+        waitForSnapshot("snapshot_with_ttl", false, true);
+
+        // list all snapshots
+        waitForSnapshotPresent("snapshot_without_ttl");
+        waitForSnapshotPresent("snapshot_with_ttl");
+    }
+
+    @Test
+    public void testManualSnapshotCleanup() {
+        // take snapshots with ttl
+        cluster.get(1).nodetoolResult("snapshot", "--ttl",
+                                      format("%ds", TEN_SECONDS),
+                                      "-t", "first").asserts().success();
+
+        cluster.get(1).nodetoolResult("snapshot", "--ttl",
+                                      format("%ds", TEN_SECONDS),
+                                      "-t", "second").asserts().success();
+
+        waitForSnapshotPresent("first");
+        waitForSnapshotPresent("second");
+
+        cluster.get(1).nodetoolResult("clearsnapshot", "-t", "first").asserts().success();
+
+        waitForSnapshotCleared("first");
+        waitForSnapshotPresent("second");
+
+        // wait for the second snapshot to be removed as well
+        waitForSnapshotCleared("second");
+    }
+
+    @Test
+    public void testSecondaryIndexCleanup()
+    {
+        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+        cluster.schemaChange(withKeyspace("CREATE INDEX value_idx ON %s.tbl (value)"));
+
+        populate(cluster);
+
+        cluster.get(1).nodetoolResult("snapshot", "--ttl",
+                                      format("%ds", FIVE_SECONDS),
+                                      "-t", "first",
+                                      "-kt", withKeyspace("%s.tbl")).asserts().success();
+
+        waitForSnapshotPresent("first");
+        waitForSnapshotCleared("first");
+    }
+
+    @Test
+    public void testListSnapshotOfDroppedTable()
+    {
+        IInvokableInstance instance = cluster.get(1);
+
+        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+        populate(cluster);
+
+        instance.nodetoolResult("snapshot",
+                                      "-t", "tag1",
+                                      "-kt", withKeyspace("%s.tbl")).asserts().success();
+
+        // Check snapshot is listed when table is not dropped
+        waitForSnapshotPresent("tag1");
+
+        // Drop Table
+        cluster.schemaChange(withKeyspace("DROP TABLE %s.tbl;"));
+
+        // Check snapshot is listed after table is dropped
+        waitForSnapshotPresent("tag1");
+
+        // Restart node
+        stopUnchecked(instance);
+        instance.startup();
+
+        // Check snapshot of dropped table still exists after restart
+        waitForSnapshotPresent("tag1");
+    }
+
+    @Test
+    public void testTTLSnapshotOfDroppedTable()
+    {
+        IInvokableInstance instance = cluster.get(1);
+
+        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+        populate(cluster);
+
+        instance.nodetoolResult("snapshot",
+                                "-t", "tag1",
+                                "-kt", withKeyspace("%s.tbl"),
+                                "--ttl", format("%ds", FIVE_SECONDS)).asserts().success();
+
+        // Check snapshot is listed when table is not dropped
+        instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains("tag1");
+
+        // Drop Table
+        cluster.schemaChange(withKeyspace("DROP TABLE %s.tbl;"));
+
+        // Check snapshot is listed after table is dropped
+        instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains("tag1");
+
+        // Check snapshot is removed after at most 10s
+        await().timeout(2L * FIVE_SECONDS, SECONDS)
+               .pollInterval(1, SECONDS)
+               .until(() -> !instance.nodetoolResult("listsnapshots").getStdout().contains("tag1"));
+    }
+
+    @Test
+    public void testTTLSnapshotOfDroppedTableAfterRestart()
+    {
+        IInvokableInstance instance = cluster.get(1);
+
+        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+
+        populate(cluster);
+
+        instance.nodetoolResult("snapshot",
+                                "-t", "tag1",
+                                "-kt", withKeyspace("%s.tbl"),
+                                "--ttl", "1h").asserts().success();
+
+
+        // Check snapshot is listed when table is not dropped
+        waitForSnapshotPresent("tag1");
+
+        // Drop Table
+        cluster.schemaChange(withKeyspace("DROP TABLE %s.tbl;"));
+
+        // Restart node
+        stopUnchecked(instance);
+        instance.startup();
+
+        // Check snapshot still exists after restart
+        waitForSnapshotPresent("tag1");
+    }
+
+    @Test
+    public void testExoticSnapshotNames()
+    {
+        IInvokableInstance instance = cluster.get(1);
+        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (key int, value text, PRIMARY KEY (key))"));
+        populate(cluster);
+
+        for (String tag : exoticSnapshotNames)
+        {
+            instance.nodetoolResult("snapshot",
+                                    "-t", tag,
+                                    "-kt", withKeyspace("%s.tbl")).asserts().success();
+
+            waitForSnapshot(tag, true, true);
+        }
+    }
+
+    @Test
+    public void testSameTimestampOnEachTableOfSnaphot()
+    {
+        cluster.get(1).nodetoolResult("snapshot", "-t", "sametimestamp").asserts().success();
+        waitForSnapshotPresent("sametimestamp");
+        NodeToolResult result = cluster.get(1).nodetoolResult("listsnapshots");
+
+        Pattern COMPILE = Pattern.compile(" +");
+        long distinctTimestamps = Arrays.stream(result.getStdout().split("\n"))
+                                   .filter(line -> line.startsWith("sametimestamp"))
+                                   .map(line -> COMPILE.matcher(line).replaceAll(" ").split(" ")[7])
+                                   .distinct()
+                                   .count();
+
+        // assert all dates are same so there is just one value accross all individual tables
+        assertEquals(1, distinctTimestamps);
+    }
+
+    private void populate(Cluster cluster)
+    {
+        for (int i = 0; i < 100; i++)
+            cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (key, value) VALUES (?, 'txt')"), ConsistencyLevel.ONE, i);
+    }
+
+    private void waitForSnapshotPresent(String snapshotName)
+    {
+        waitForSnapshot(snapshotName, true, false);
+    }
+
+    private void waitForSnapshotCleared(String snapshotName)
+    {
+        waitForSnapshot(snapshotName, false, false);
+    }
+
+    private void waitForSnapshot(String snapshotName, boolean expectPresent, boolean noTTL)
+    {
+        await().timeout(20, SECONDS)
+               .pollDelay(0, SECONDS)
+               .pollInterval(1, SECONDS)
+               .until(() -> waitForSnapshotInternal(snapshotName, expectPresent, noTTL));
+    }
+
+    private boolean waitForSnapshotInternal(String snapshotName, boolean expectPresent, boolean noTTL) {
+        NodeToolResult listsnapshots;
+        if (noTTL)
+            listsnapshots = cluster.get(1).nodetoolResult("listsnapshots", "-nt");
+        else
+            listsnapshots = cluster.get(1).nodetoolResult("listsnapshots");
+
+        List<String> lines = Arrays.stream(listsnapshots.getStdout().split("\n"))
+                                   .filter(line -> !line.isEmpty())
+                                   .filter(line -> !line.startsWith("Snapshot Details:") && !line.startsWith("There are no snapshots"))
+                                   .filter(line -> !line.startsWith("Snapshot name") && !line.startsWith("Total TrueDiskSpaceUsed"))
+                                   .collect(toList());
+
+        return expectPresent == lines.stream().anyMatch(line -> line.startsWith(snapshotName));
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/StreamingTest.java b/test/distributed/org/apache/cassandra/distributed/test/StreamingTest.java
index 8fff004..72fd7f9 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/StreamingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/StreamingTest.java
@@ -121,7 +121,10 @@
             // verify on follower's stream session
             MessageStateSinkImpl followerSink = new MessageStateSinkImpl();
             followerSink.messages(initiator, Arrays.asList(STREAM_INIT, PREPARE_SYN, PREPARE_ACK, RECEIVED));
-            followerSink.states(initiator,  Arrays.asList(PREPARING, STREAMING, StreamSession.State.COMPLETE));
+            // why 2 completes?  There is a race condition bug with sending COMPLETE where the socket gets closed
+            // by the initator, which then triggers a ClosedChannelException, which then checks the current state (PREPARING)
+            // to solve this, COMPLETE is set before sending the message, and reset when closing the stream
+            followerSink.states(initiator,  Arrays.asList(PREPARING, STREAMING, StreamSession.State.COMPLETE, StreamSession.State.COMPLETE));
             followerNode.runOnInstance(() -> StreamSession.sink = followerSink);
         }
 
@@ -149,7 +152,7 @@
         @Override
         public void recordState(InetAddressAndPort from, StreamSession.State state)
         {
-            Queue<Integer> states = stateTransitions.get(from.address);
+            Queue<Integer> states = stateTransitions.get(from.getAddress());
             if (states.peek() == null)
                 Assert.fail("Unexpected state " + state);
 
@@ -163,7 +166,7 @@
             if (message == StreamMessage.Type.KEEP_ALIVE)
                 return;
 
-            Queue<Integer> messages = messageSink.get(from.address);
+            Queue<Integer> messages = messageSink.get(from.getAddress());
             if (messages.peek() == null)
                 Assert.fail("Unexpected message " + message);
 
@@ -174,10 +177,10 @@
         @Override
         public void onClose(InetAddressAndPort from)
         {
-            Queue<Integer> states = stateTransitions.get(from.address);
+            Queue<Integer> states = stateTransitions.get(from.getAddress());
             Assert.assertTrue("Missing states: " + states, states.isEmpty());
 
-            Queue<Integer> messages = messageSink.get(from.address);
+            Queue<Integer> messages = messageSink.get(from.getAddress());
             Assert.assertTrue("Missing messages: " + messages, messages.isEmpty());
         }
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/TestBaseImpl.java b/test/distributed/org/apache/cassandra/distributed/test/TestBaseImpl.java
index 343ccc8..e97a081 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/TestBaseImpl.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/TestBaseImpl.java
@@ -104,6 +104,16 @@
         return TupleType.buildValue(bbs);
     }
 
+    public static String batch(String... queries)
+    {
+        StringBuilder sb = new StringBuilder();
+        sb.append("BEGIN UNLOGGED BATCH\n");
+        for (String q : queries)
+            sb.append(q).append(";\n");
+        sb.append("APPLY BATCH;");
+        return sb.toString();
+    }
+
     protected void bootstrapAndJoinNode(Cluster cluster)
     {
         IInstanceConfig config = cluster.newInstanceConfig();
@@ -178,7 +188,7 @@
 
     public static void fixDistributedSchemas(Cluster cluster)
     {
-        // These keyspaces are under replicated by default, so must be updated when doing a mulit-node cluster;
+        // These keyspaces are under replicated by default, so must be updated when doing a multi-node cluster;
         // else bootstrap will fail with 'Unable to find sufficient sources for streaming range <range> in keyspace <name>'
         for (String ks : Arrays.asList("system_auth", "system_traces"))
         {
diff --git a/test/distributed/org/apache/cassandra/distributed/test/TombstoneWarningTest.java b/test/distributed/org/apache/cassandra/distributed/test/TombstoneWarningTest.java
new file mode 100644
index 0000000..9406432
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/TombstoneWarningTest.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.LogResult;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TombstoneWarningTest extends TestBaseImpl
+{
+    private static final int COMPACTION_TOMBSTONE_WARN = 75;
+    private static final ICluster<IInvokableInstance> cluster;
+
+    static
+    {
+        try
+        {
+            Cluster.Builder builder = Cluster.build(3);
+            builder.withConfig(c -> c.set("compaction_tombstone_warning_threshold", COMPACTION_TOMBSTONE_WARN));
+            cluster = builder.createWithoutStarting();
+        }
+        catch (IOException e)
+        {
+            throw new AssertionError(e);
+        }
+    }
+
+    @BeforeClass
+    public static void setupClass()
+    {
+        cluster.startup();
+    }
+
+    @Before
+    public void setup()
+    {
+        cluster.schemaChange("DROP KEYSPACE IF EXISTS " + KEYSPACE);
+        init(cluster);
+        cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+    }
+
+    @Test
+    public void regularTombstonesLogTest()
+    {
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                cluster.coordinator(1).execute(withKeyspace("update %s.tbl set v = null where pk = ? and ck = ?"), ConsistencyLevel.ALL, i, j);
+        assertTombstoneLogs(99 - COMPACTION_TOMBSTONE_WARN , false);
+    }
+
+    @Test
+    public void rowTombstonesLogTest()
+    {
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                cluster.coordinator(1).execute(withKeyspace("delete from %s.tbl where pk = ? and ck = ?"), ConsistencyLevel.ALL, i, j);
+        assertTombstoneLogs(99 - COMPACTION_TOMBSTONE_WARN , false);
+    }
+
+    @Test
+    public void rangeTombstonesLogTest()
+    {
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                cluster.coordinator(1).execute(withKeyspace("delete from %s.tbl where pk = ? and ck >= ? and ck <= ?"), ConsistencyLevel.ALL, i, j, j);
+        assertTombstoneLogs(99 - (COMPACTION_TOMBSTONE_WARN / 2), true);
+    }
+
+    @Test
+    public void ttlTest() throws InterruptedException
+    {
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                cluster.coordinator(1).execute(withKeyspace("insert into %s.tbl (pk, ck, v) values (?, ?, ?) using ttl 1000"), ConsistencyLevel.ALL, i, j, j);
+        assertTombstoneLogs(0, true);
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                cluster.coordinator(1).execute(withKeyspace("update %s.tbl using ttl 1 set v = 33 where pk = ? and ck = ?"), ConsistencyLevel.ALL, i, j);
+        Thread.sleep(1500);
+        assertTombstoneLogs(99 - COMPACTION_TOMBSTONE_WARN, false);
+    }
+
+    @Test
+    public void noTombstonesLogTest()
+    {
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                cluster.coordinator(1).execute(withKeyspace("insert into %s.tbl (pk, ck, v) values (?, ?, ?)"), ConsistencyLevel.ALL, i, j, j);
+        assertTombstoneLogs(0, false);
+    }
+
+    private void assertTombstoneLogs(long expectedCount, boolean isRangeTombstones)
+    {
+        long mark = cluster.get(1).logs().mark();
+        cluster.get(1).flush(KEYSPACE);
+        String pattern = ".*Writing (?<tscount>\\d+) tombstones to distributed_test_keyspace/tbl:(?<key>\\d+).*";
+        LogResult<List<String>> res = cluster.get(1).logs().grep(mark, pattern);
+        assertEquals(expectedCount, res.getResult().size());
+        Pattern p = Pattern.compile(pattern);
+        for (String r : res.getResult())
+        {
+            Matcher m = p.matcher(r);
+            assertTrue(m.matches());
+            long tombstoneCount = Integer.parseInt(m.group("tscount"));
+            assertTrue(tombstoneCount > COMPACTION_TOMBSTONE_WARN);
+            assertEquals(r, Integer.parseInt(m.group("key")) * (isRangeTombstones ? 2 : 1), tombstoneCount);
+        }
+
+        mark = cluster.get(1).logs().mark();
+        cluster.get(1).forceCompact(KEYSPACE, "tbl");
+        res = cluster.get(1).logs().grep(mark, pattern);
+        assertEquals(expectedCount, res.getResult().size());
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/TopPartitionsTest.java b/test/distributed/org/apache/cassandra/distributed/test/TopPartitionsTest.java
new file mode 100644
index 0000000..9df6726
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/TopPartitionsTest.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.NodeToolResult;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
+import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.distributed.test.PreviewRepairTest.logMark;
+import static org.junit.Assert.assertEquals;
+import static org.psjava.util.AssertStatus.assertTrue;
+
+@RunWith(Parameterized.class)
+public class TopPartitionsTest extends TestBaseImpl
+{
+    public enum Repair
+    {
+        Incremental, Full, FullPreview
+    }
+
+    private static AtomicInteger COUNTER = new AtomicInteger(0);
+    private static Cluster CLUSTER;
+
+    private final Repair repair;
+
+    public TopPartitionsTest(Repair repair)
+    {
+        this.repair = repair;
+    }
+
+    @Parameterized.Parameters(name = "{0}")
+    public static Collection<Object[]> messages()
+    {
+        return Stream.of(Repair.values())
+                     .map(a -> new Object[]{ a })
+                     .collect(Collectors.toList());
+    }
+
+    @BeforeClass
+    public static void setup() throws IOException
+    {
+        CLUSTER = init(Cluster.build(2).withConfig(config ->
+                                                   config.set("min_tracked_partition_size", "0MiB")
+                                                         .set("min_tracked_partition_tombstone_count", 0)
+                                                         .with(GOSSIP, NETWORK))
+                              .start());
+    }
+
+    @AfterClass
+    public static void cleanup()
+    {
+        if (CLUSTER != null)
+            CLUSTER.close();
+    }
+
+    @Before
+    public void before()
+    {
+        setCount(10, 10);
+    }
+
+    @Test
+    public void basicPartitionSizeTest() throws TimeoutException
+    {
+        String name = "tbl" + COUNTER.getAndIncrement();
+        String table = KEYSPACE + "." + name;
+        CLUSTER.schemaChange("create table " + table + " (id int, ck int, t int, primary key (id, ck))");
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                CLUSTER.coordinator(1).execute("insert into " + table + " (id, ck, t) values (?,?,?)", ConsistencyLevel.ALL, i, j, i * j + 100);
+
+        repair();
+        CLUSTER.forEach(inst -> inst.runOnInstance(() -> {
+            // partitions 99 -> 90 are the largest, make sure they are in the map;
+            Map<String, Long> sizes = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopSizePartitions();
+            for (int i = 99; i >= 90; i--)
+                assertTrue(sizes.containsKey(String.valueOf(i)));
+
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            assertEquals(10, tombstones.size());
+            assertTrue(tombstones.values().stream().allMatch(l -> l == 0));
+        }));
+
+        // make sure incremental repair doesn't change anything;
+        CLUSTER.get(1).nodetool("repair", KEYSPACE);
+        CLUSTER.forEach(inst -> inst.runOnInstance(() -> {
+            Map<String, Long> sizes = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopSizePartitions();
+            for (int i = 99; i >= 90; i--)
+                assertTrue(sizes.containsKey(String.valueOf(i)));
+        }));
+    }
+
+    @Test
+    public void configChangeTest() throws TimeoutException
+    {
+        String name = "tbl" + COUNTER.getAndIncrement();
+        String table = KEYSPACE + "." + name;
+        CLUSTER.schemaChange("create table " + table + " (id int, ck int, t int, primary key (id, ck))");
+        for (int i = 0; i < 100; i++)
+        {
+            for (int j = 0; j < i; j++)
+            {
+                CLUSTER.coordinator(1).execute("insert into " + table + " (id, ck, t) values (?,?,?)", ConsistencyLevel.ALL, i, j, i * j + 100);
+                CLUSTER.coordinator(1).execute("DELETE FROM " + table + " where id = ? and ck = ?", ConsistencyLevel.ALL, i, -j);
+            }
+        }
+
+        // top should have 10 elements
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            ColumnFamilyStore store = Keyspace.open(KEYSPACE).getColumnFamilyStore(name);
+            Assertions.assertThat(store.getTopTombstonePartitions()).hasSize(10);
+            Assertions.assertThat(store.getTopTombstonePartitions()).hasSize(10);
+        });
+
+        // reconfigure and repair; top should have 20 elements
+        setCount(20, 20);
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            ColumnFamilyStore store = Keyspace.open(KEYSPACE).getColumnFamilyStore(name);
+            Assertions.assertThat(store.getTopTombstonePartitions()).hasSize(20);
+            Assertions.assertThat(store.getTopTombstonePartitions()).hasSize(20);
+        });
+
+        // test shrinking config
+        setCount(5, 5);
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            ColumnFamilyStore store = Keyspace.open(KEYSPACE).getColumnFamilyStore(name);
+            Assertions.assertThat(store.getTopTombstonePartitions()).hasSize(5);
+            Assertions.assertThat(store.getTopTombstonePartitions()).hasSize(5);
+        });
+    }
+
+    @Test
+    public void basicRowTombstonesTest() throws InterruptedException, TimeoutException
+    {
+        String name = "tbl" + COUNTER.getAndIncrement();
+        String table = KEYSPACE + "." + name;
+        CLUSTER.schemaChange("create table " + table + " (id int, ck int, t int, primary key (id, ck)) with gc_grace_seconds = 1");
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                CLUSTER.coordinator(1).execute("DELETE FROM " + table + " where id = ? and ck = ?", ConsistencyLevel.ALL, i, j);
+        repair();
+        // tombstones not purgeable
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            for (int i = 99; i >= 90; i--)
+            {
+                assertTrue(tombstones.containsKey(String.valueOf(i)));
+                assertEquals(i, (long) tombstones.get(String.valueOf(i)));
+            }
+        });
+        Thread.sleep(2000);
+        // count purgeable tombstones;
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            for (int i = 99; i >= 90; i--)
+            {
+                assertTrue(tombstones.containsKey(String.valueOf(i)));
+                assertEquals(i, (long) tombstones.get(String.valueOf(i)));
+            }
+        });
+        CLUSTER.get(1).forceCompact(KEYSPACE, name);
+        // all tombstones actually purged;
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            assertTrue(tombstones.values().stream().allMatch(l -> l == 0));
+        });
+    }
+
+    @Test
+    public void basicRegularTombstonesTest() throws InterruptedException, TimeoutException
+    {
+        String name = "tbl" + COUNTER.getAndIncrement();
+        String table = KEYSPACE + "." + name;
+        CLUSTER.schemaChange("create table " + table + " (id int, ck int, t int, primary key (id, ck)) with gc_grace_seconds = 1");
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                CLUSTER.coordinator(1).execute("UPDATE " + table + " SET t = null where id = ? and ck = ?", ConsistencyLevel.ALL, i, j);
+        repair();
+        // tombstones not purgeable
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            for (int i = 99; i >= 90; i--)
+            {
+                assertTrue(tombstones.containsKey(String.valueOf(i)));
+                assertEquals(i, (long) tombstones.get(String.valueOf(i)));
+            }
+        });
+        Thread.sleep(2000);
+        // count purgeable tombstones;
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            for (int i = 99; i >= 90; i--)
+            {
+                assertTrue(tombstones.containsKey(String.valueOf(i)));
+                assertEquals(i, (long) tombstones.get(String.valueOf(i)));
+            }
+        });
+
+        CLUSTER.get(1).forceCompact(KEYSPACE, name);
+        // all tombstones actually purged;
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            assertTrue(tombstones.values().stream().allMatch(l -> l == 0));
+        });
+    }
+
+    private static void setCount(int size, int tombstone)
+    {
+        CLUSTER.forEach(i -> i.runOnInstance(() -> {
+            DatabaseDescriptor.setMaxTopSizePartitionCount(size);
+            DatabaseDescriptor.setMaxTopTombstonePartitionCount(tombstone);
+        }));
+    }
+
+    private void repair() throws TimeoutException
+    {
+        switch (repair)
+        {
+            case Incremental:
+            {
+                // IR will not populate, as it only looks at non-repaired data
+                // to trigger this patch, we need IR + --validate
+                long[] marks = logMark(CLUSTER);
+                NodeToolResult res = CLUSTER.get(1).nodetoolResult("repair", KEYSPACE);
+                res.asserts().success();
+                PreviewRepairTest.waitLogsRepairFullyFinished(CLUSTER, marks);
+                res = CLUSTER.get(1).nodetoolResult("repair", "--validate", KEYSPACE);
+                res.asserts().success();
+                res.asserts().notificationContains("Repaired data is in sync");
+            }
+            break;
+            case Full:
+            {
+                CLUSTER.get(1).nodetoolResult("repair", "-full", KEYSPACE).asserts().success();
+            }
+            break;
+            case FullPreview:
+            {
+                CLUSTER.get(1).nodetoolResult("repair", "-full", "--preview", KEYSPACE).asserts().success();
+            }
+            break;
+            default:
+                throw new AssertionError("Unknown repair type: " + repair);
+        }
+    }
+
+    @Test
+    public void basicRangeTombstonesTest() throws Throwable
+    {
+        String name = "tbl" + COUNTER.getAndIncrement();
+        String table = KEYSPACE + "." + name;
+        CLUSTER.schemaChange("create table " + table + " (id int, ck int, t int, primary key (id, ck)) with gc_grace_seconds = 1");
+        for (int i = 0; i < 100; i++)
+            for (int j = 0; j < i; j++)
+                CLUSTER.coordinator(1).execute("DELETE FROM " + table + " WHERE id = ? and ck >= ? and ck <= ?", ConsistencyLevel.ALL, i, j, j);
+        repair();
+        // tombstones not purgeable
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            // note that we count range tombstone markers - so the count will be double the number of deletions we did above
+            for (int i = 99; i >= 90; i--)
+                assertEquals(i * 2, (long)tombstones.get(String.valueOf(i)));
+        });
+        Thread.sleep(2000);
+        // count purgeable tombstones;
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            for (int i = 99; i >= 90; i--)
+                assertEquals(i * 2, (long)tombstones.get(String.valueOf(i)));
+        });
+
+        CLUSTER.get(1).forceCompact(KEYSPACE, name);
+        // all tombstones actually purged;
+        repair();
+        CLUSTER.get(1).runOnInstance(() -> {
+            Map<String, Long> tombstones = Keyspace.open(KEYSPACE).getColumnFamilyStore(name).getTopTombstonePartitions();
+            assertTrue(tombstones.values().stream().allMatch( l -> l == 0));
+        });
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageFromBlockedSubnetTest.java b/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageFromBlockedSubnetTest.java
new file mode 100644
index 0000000..27dcd2c
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageFromBlockedSubnetTest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import com.google.common.collect.ImmutableMap;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.LogAction;
+import org.apache.cassandra.transport.Message;
+import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.transport.SimpleClient;
+import org.apache.cassandra.transport.messages.ErrorMessage;
+import org.assertj.core.api.Assertions;
+
+@RunWith(Parameterized.class)
+public class UnableToParseClientMessageFromBlockedSubnetTest extends TestBaseImpl
+{
+    private static Cluster CLUSTER;
+    private static List<String> CLUSTER_EXCLUDED_SUBNETS;
+
+    @SuppressWarnings("DefaultAnnotationParam")
+    @Parameterized.Parameter(0)
+    public List<String> excludeSubnets;
+    @Parameterized.Parameter(1)
+    public ProtocolVersion version;
+
+    @Parameterized.Parameters(name = "domains={0},version={1}")
+    public static Iterable<Object[]> params()
+    {
+        List<Object[]> tests = new ArrayList<>();
+        for (List<String> domains : Arrays.asList(Collections.singletonList("127.0.0.1"), Collections.singletonList("127.0.0.0/31")))
+        {
+            for (ProtocolVersion version : ProtocolVersion.SUPPORTED)
+            {
+                tests.add(new Object[] { domains, version });
+            }
+        }
+        return tests;
+    }
+
+    @BeforeClass
+    public static void setup()
+    {
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    @AfterClass
+    public static void cleanup()
+    {
+        if (CLUSTER != null)
+            CLUSTER.close();
+    }
+
+    @Test
+    public void badMessageCausesProtocolExceptionFromExcludeList() throws IOException, TimeoutException
+    {
+        Cluster cluster = getCluster();
+        // write gibberish to the native protocol
+        IInvokableInstance node = cluster.get(1);
+        // make sure everything is fine at the start
+        Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.ProtocolException")).isEqualTo(0);
+        Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.UnknownException")).isEqualTo(0);
+
+        LogAction logs = node.logs();
+        long mark = logs.mark();
+        try (SimpleClient client = SimpleClient.builder("127.0.0.1", 9042).protocolVersion(version).useBeta().build())
+        {
+            client.connect(false, true);
+
+            // this should return a failed response
+            // disable waiting on procol errors as that logic was reverted until we can figure out its 100% safe
+            // right now ProtocolException is thrown for fatal and non-fatal issues, so closing the channel
+            // on non-fatal issues could cause other issues for the cluster
+            byte expectedVersion = (byte) (80 + version.asInt());
+            Message.Response response = client.execute(new UnableToParseClientMessageTest.CustomHeaderMessage(new byte[]{ expectedVersion, 1, 2, 3, 4, 5, 6, 7, 8, 9 }), false);
+            Assertions.assertThat(response).isInstanceOf(ErrorMessage.class);
+
+            logs.watchFor(mark, "address contained in client_error_reporting_exclusions");
+            Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.ProtocolException")).isEqualTo(0);
+            Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.UnknownException")).isEqualTo(0);
+
+            Assertions.assertThat(logs.grep(mark, "Excluding client exception fo").getResult()).hasSize(1);
+            Assertions.assertThat(logs.grep(mark, "Unexpected exception during request").getResult()).isEmpty();
+        }
+    }
+
+    private Cluster getCluster()
+    {
+        if (CLUSTER == null || CLUSTER_EXCLUDED_SUBNETS != excludeSubnets)
+        {
+            if (CLUSTER != null)
+            {
+                CLUSTER.close();
+                CLUSTER = null;
+            }
+            try
+            {
+                CLUSTER = init(Cluster.build(1)
+                                      .withConfig(c -> c.with(Feature.values()).set("client_error_reporting_exclusions", ImmutableMap.of("subnets", excludeSubnets)))
+                                      .start());
+                CLUSTER_EXCLUDED_SUBNETS = excludeSubnets;
+            }
+            catch (IOException e)
+            {
+                throw new UncheckedIOException(e);
+            }
+        }
+        return CLUSTER;
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageTest.java b/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageTest.java
index 332b783..5c976b9 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/UnableToParseClientMessageTest.java
@@ -22,7 +22,6 @@
 import java.nio.charset.StandardCharsets;
 import java.util.List;
 import java.util.Objects;
-import java.util.function.Consumer;
 import java.util.function.Predicate;
 
 import org.junit.AfterClass;
@@ -144,13 +143,13 @@
                 // using spinAssertEquals as the metric is updated AFTER replying back to the client
                 // so there is a race where we check the metric before it gets updated
                 Util.spinAssertEquals(currentCount + 1L,
-                        () -> CassandraMetricsRegistry.Metrics.getMeters()
-                                .get("org.apache.cassandra.metrics.Client.ProtocolException")
-                                .getCount(),
-                        10);
+                                      () -> CassandraMetricsRegistry.Metrics.getMeters()
+                                                                            .get("org.apache.cassandra.metrics.Client.ProtocolException")
+                                                                            .getCount(),
+                                      10);
                 Assert.assertEquals(0, CassandraMetricsRegistry.Metrics.getMeters()
-                        .get("org.apache.cassandra.metrics.Client.UnknownException")
-                        .getCount());
+                                                                       .get("org.apache.cassandra.metrics.Client.UnknownException")
+                                                                       .getCount());
             });
             // the logs are noSpamLogger, so each iteration may not produce a new log; only valid if present and not seen before
             List<String> results = node.logs().grep(logStart, "Protocol exception with client networking").getResult();
@@ -165,11 +164,11 @@
     private static long getProtocolExceptionCount(IInvokableInstance node)
     {
         return node.callOnInstance(() -> CassandraMetricsRegistry.Metrics.getMeters()
-                .get("org.apache.cassandra.metrics.Client.ProtocolException")
-                .getCount());
+                                                                         .get("org.apache.cassandra.metrics.Client.ProtocolException")
+                                                                         .getCount());
     }
 
-    private static class CustomHeaderMessage extends OptionsMessage
+    public static class CustomHeaderMessage extends OptionsMessage
     {
         private final ByteBuf headerEncoded;
 
diff --git a/test/distributed/org/apache/cassandra/distributed/test/UpdateSystemAuthAfterDCExpansionTest.java b/test/distributed/org/apache/cassandra/distributed/test/UpdateSystemAuthAfterDCExpansionTest.java
new file mode 100644
index 0000000..8765067
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/UpdateSystemAuthAfterDCExpansionTest.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.Collections;
+import java.util.UUID;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.RoleOptions;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.TokenSupplier;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.progress.ProgressEventType;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.cassandra.auth.AuthKeyspace.ROLES;
+import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
+import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+import static org.apache.cassandra.distributed.shared.NetworkTopology.dcAndRack;
+import static org.apache.cassandra.distributed.shared.NetworkTopology.networkTopology;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/*
+ * Test that system_auth can only be altered to have valid datacenters, and that
+ * all valid datacenters must have at least one replica.
+ *
+ * Create a cluster with one nodes in dc1 with a new role
+ * Alter the system_auth keyspace to use NTS with {dc1: 1}
+ * Expand a cluster with a new node in dc2
+ * Alter the system auth keyspace to use NTS with {dc1: 1}, {dc2, 1} & repair
+ * Check that the new role is present in the new datacenter
+ * Remove the dc2 node
+ * Check the keyspace can be altered again to remove it
+ */
+public class UpdateSystemAuthAfterDCExpansionTest extends TestBaseImpl
+{
+    static final Logger logger = LoggerFactory.getLogger(UpdateSystemAuthAfterDCExpansionTest.class);
+    static final String username = "shinynewuser";
+
+    static void assertRolePresent(IInstance instance)
+    {
+        assertRows(instance.executeInternal(String.format("SELECT role FROM %s.%s WHERE role = ?",
+                                                          SchemaConstants.AUTH_KEYSPACE_NAME, ROLES),
+                                            username),
+                                            row(username));
+    }
+
+    static void assertRoleAbsent(IInstance instance)
+    {
+        assertRows(instance.executeInternal(String.format("SELECT role FROM %s.%s WHERE role = ?",
+                                                          SchemaConstants.AUTH_KEYSPACE_NAME, ROLES),
+                                            username));
+    }
+
+    static void assertQueryThrowsConfigurationException(Cluster cluster, String query)
+    {
+        cluster.forEach(instance -> {
+            try
+            {
+                // No need to use cluster.schemaChange as we're expecting a failure
+                instance.executeInternal(query);
+                fail("Expected \"" + query + "\" to throw a ConfigurationException, but it completed");
+            }
+            catch (Throwable tr)
+            {
+                assertEquals("org.apache.cassandra.exceptions.ConfigurationException", tr.getClass().getCanonicalName());
+            }
+        });
+    }
+
+    String alterKeyspaceStatement(String ntsOptions)
+    {
+        return String.format("ALTER KEYSPACE " + SchemaConstants.AUTH_KEYSPACE_NAME +
+                             " WITH replication = {'class': 'NetworkTopologyStrategy', %s};", ntsOptions);
+    }
+
+    @BeforeClass
+    static public void beforeClass() throws Throwable
+    {
+        // reduce the time from 10s to prevent "Cannot process role related query as the role manager isn't yet setup."
+        // exception from CassandraRoleManager
+        System.setProperty("cassandra.superuser_setup_delay_ms", "0");
+        TestBaseImpl.beforeClass();
+    }
+
+    public void validateExpandAndContract(String initialDatacenters,
+                                          String expandedDatacenters,
+                                          String beforeDecommissionedDatacenters,
+                                          String afterDecommissionedDatacenters) throws Throwable
+    {
+        try (Cluster cluster = Cluster.build(1)
+                                      .withConfig(config -> config.set("auto_bootstrap", true)
+                                                                      .with(GOSSIP)
+                                                                      .with(NETWORK))
+                                      .withTokenSupplier(TokenSupplier.evenlyDistributedTokens(2))
+                                      .withNodeIdTopology(networkTopology(2,
+                                                                          (nodeid) -> nodeid % 2 == 1 ? dcAndRack("dc1", "rack1")
+                                                                                                      : dcAndRack("dc2", "rack2")
+                                      ))
+                                      .withNodes(1)
+                                      .createWithoutStarting())
+        {
+            logger.debug("Starting cluster with single node in dc1");
+            cluster.startup();
+
+            // currently no way to set authenticated user for coordinator
+            logger.debug("Creating test role");
+            cluster.get(1).runOnInstance(() -> DatabaseDescriptor.getRoleManager().createRole(AuthenticatedUser.SYSTEM_USER,
+                                                                                              RoleResource.role(username),
+                                                                                              new RoleOptions()));
+            assertRolePresent(cluster.get(1));
+
+            logger.debug("Try changing NTS too early before a node from the DC has joined");
+            assertQueryThrowsConfigurationException(cluster, alterKeyspaceStatement("'dc1': '1', 'dc2': '1'"));
+
+            logger.debug("Altering '{}' keyspace to use NTS with {}", SchemaConstants.AUTH_KEYSPACE_NAME, initialDatacenters);
+            cluster.schemaChangeIgnoringStoppedInstances(alterKeyspaceStatement(initialDatacenters));
+
+            logger.debug("Bootstrapping second node in dc2");
+            IInstanceConfig config = cluster.newInstanceConfig();
+            config.set("auto_bootstrap", true);
+            cluster.bootstrap(config).startup();
+
+            // Check that the role is on node1 but has not made it to node2
+            assertRolePresent(cluster.get(1));
+            assertRoleAbsent(cluster.get(2));
+
+            // Update options to make sure a replica is in the remote DC
+            logger.debug("Altering '{}' keyspace to use NTS with dc1 & dc2", SchemaConstants.AUTH_KEYSPACE_NAME);
+            cluster.schemaChangeIgnoringStoppedInstances(alterKeyspaceStatement(expandedDatacenters));
+
+            // make sure that all sstables have moved to repaired by triggering a compaction
+            logger.debug("Repair system_auth to make sure role is replicated everywhere");
+            cluster.get(1).runOnInstance(() -> {
+                try
+                {
+                    Condition await = newOneTimeCondition();
+                    StorageService.instance.repair(SchemaConstants.AUTH_KEYSPACE_NAME, Collections.emptyMap(), ImmutableList.of((tag, event) -> {
+                        if (event.getType() == ProgressEventType.COMPLETE)
+                            await.signalAll();
+                    })).right.get();
+                    await.await(1L, MINUTES);
+                }
+                catch (Exception e)
+                {
+                    fail("Unexpected exception: " + e);
+                }
+            });
+
+            logger.debug("Check the role is now replicated as expected after repairing");
+            assertRolePresent(cluster.get(1));
+            assertRolePresent(cluster.get(2));
+
+            // Make sure we cannot remove either of the active datacenters
+            logger.debug("Verify that neither active datacenter can be ALTER KEYSPACEd away");
+            assertQueryThrowsConfigurationException(cluster, alterKeyspaceStatement("'dc1': '1'"));
+            assertQueryThrowsConfigurationException(cluster, alterKeyspaceStatement("'dc2': '1'"));
+
+            logger.debug("Starting to decomission dc2");
+            cluster.schemaChangeIgnoringStoppedInstances(alterKeyspaceStatement(beforeDecommissionedDatacenters));
+
+            // Forcibly shutdown and have node2 evicted by FD
+            logger.debug("Force shutdown node2");
+            String node2hostId = cluster.get(2).callOnInstance(() -> StorageService.instance.getLocalHostId());
+            cluster.get(2).shutdown(false);
+
+            logger.debug("removeNode node2");
+            cluster.get(1).runOnInstance(() -> {
+                UUID hostId = UUID.fromString(node2hostId);
+                InetAddressAndPort endpoint = StorageService.instance.getEndpointForHostId(hostId);
+                FailureDetector.instance.forceConviction(endpoint);
+                StorageService.instance.removeNode(node2hostId);
+            });
+
+            logger.debug("Remove replication to decomissioned dc2");
+            cluster.schemaChangeIgnoringStoppedInstances(alterKeyspaceStatement(afterDecommissionedDatacenters));
+        }
+    }
+
+    @Test
+    public void explicitDCTest() throws Throwable
+    {
+        String initialDatacenters = "'dc1': '1'";
+        String expandedDatacenters = "'dc1': '1', 'dc2': '1'";
+        String beforeDecommissionedDatacenters = "'dc1': '1', 'dc2': '1'";
+        String afterDecommissionedDatacenters = "'dc1': '1'";
+        validateExpandAndContract(initialDatacenters, expandedDatacenters, beforeDecommissionedDatacenters, afterDecommissionedDatacenters);
+    }
+
+    @Test
+    public void replicaFactorTest() throws Throwable
+    {
+        String initialDatacenters = "'replication_factor': '1'";
+        String expandedDatacenters = "'replication_factor': '1'";
+        String beforeDecommissionedDatacenters = "'replication_factor': '1', 'dc2': '1'";
+        String afterDecommissionedDatacenters =  "'dc1': '1'";
+        validateExpandAndContract(initialDatacenters, expandedDatacenters, beforeDecommissionedDatacenters, afterDecommissionedDatacenters);
+    }
+}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/test/UpgradeSSTablesTest.java b/test/distributed/org/apache/cassandra/distributed/test/UpgradeSSTablesTest.java
new file mode 100644
index 0000000..ee89a81
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/UpgradeSSTablesTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.Set;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.LogAction;
+import org.apache.cassandra.io.sstable.Component;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+
+public class UpgradeSSTablesTest extends TestBaseImpl
+{
+    @Test
+    public void rewriteSSTablesTest() throws Throwable
+    {
+        try (ICluster<IInvokableInstance> cluster = builder().withNodes(1).withDataDirCount(1).start())
+        {
+            for (String compressionBefore : new String[]{ "{'class' : 'LZ4Compressor', 'chunk_length_in_kb' : 32}", "{'enabled': 'false'}" })
+            {
+                for (String command : new String[]{ "upgradesstables", "recompress_sstables" })
+                {
+                    cluster.schemaChange(withKeyspace("DROP KEYSPACE IF EXISTS %s"));
+                    cluster.schemaChange(withKeyspace("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
+
+                    cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v text, PRIMARY KEY (pk, ck)) " +
+                                                      "WITH compression = " + compressionBefore));
+                    cluster.get(1).acceptsOnInstance((String ks) -> {
+                        Keyspace.open(ks).getColumnFamilyStore("tbl").disableAutoCompaction();
+                    }).accept(KEYSPACE);
+
+                    String blob = "blob";
+                    for (int i = 0; i < 6; i++)
+                        blob += blob;
+
+                    for (int i = 0; i < 100; i++)
+                    {
+                        cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (?,?,?)"),
+                                                       ConsistencyLevel.QUORUM, i, i, blob);
+                    }
+                    cluster.get(1).nodetool("flush", KEYSPACE, "tbl");
+
+                    Assert.assertEquals(0, cluster.get(1).nodetool("upgradesstables", "-a", KEYSPACE, "tbl"));
+                    cluster.schemaChange(withKeyspace("ALTER TABLE %s.tbl WITH compression = {'class' : 'LZ4Compressor', 'chunk_length_in_kb' : 128};"));
+
+                    Thread.sleep(2000); // Make sure timestamp will be different even with 1-second resolution.
+
+                    long maxSoFar = cluster.get(1).appliesOnInstance((String ks) -> {
+                        long maxTs = -1;
+                        ColumnFamilyStore cfs = Keyspace.open(ks).getColumnFamilyStore("tbl");
+                        cfs.disableAutoCompaction();
+                        for (SSTableReader tbl : cfs.getLiveSSTables())
+                        {
+                            maxTs = Math.max(maxTs, tbl.getCreationTimeFor(Component.DATA));
+                        }
+                        return maxTs;
+                    }).apply(KEYSPACE);
+
+                    for (int i = 100; i < 200; i++)
+                    {
+                        cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (?,?,?)"),
+                                                       ConsistencyLevel.QUORUM, i, i, blob);
+                    }
+                    cluster.get(1).nodetool("flush", KEYSPACE, "tbl");
+
+                    LogAction logAction = cluster.get(1).logs();
+                    logAction.mark();
+
+                    long expectedCount = cluster.get(1).appliesOnInstance((String ks, Long maxTs) -> {
+                        long count = 0;
+                        long skipped = 0;
+                        Set<SSTableReader> liveSSTables = Keyspace.open(ks).getColumnFamilyStore("tbl").getLiveSSTables();
+                        assert liveSSTables.size() == 2 : String.format("Expected 2 sstables, but got " + liveSSTables.size());
+                        for (SSTableReader tbl : liveSSTables)
+                        {
+                            if (tbl.getCreationTimeFor(Component.DATA) <= maxTs)
+                                count++;
+                            else
+                                skipped++;
+                        }
+                        assert skipped > 0;
+                        return count;
+                    }).apply(KEYSPACE, maxSoFar);
+
+                    if (command.equals("upgradesstables"))
+                        Assert.assertEquals(0, cluster.get(1).nodetool("upgradesstables", "-a", "-t", Long.toString(maxSoFar), KEYSPACE, "tbl"));
+                    else
+                        Assert.assertEquals(0, cluster.get(1).nodetool("recompress_sstables", KEYSPACE, "tbl"));
+
+                    Assert.assertFalse(logAction.grep(String.format("%d sstables to", expectedCount)).getResult().isEmpty());
+                }
+            }
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/VirtualTableFromInternodeTest.java b/test/distributed/org/apache/cassandra/distributed/test/VirtualTableFromInternodeTest.java
new file mode 100644
index 0000000..229dd96
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/VirtualTableFromInternodeTest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.test;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+import static org.apache.cassandra.distributed.util.QueryResultUtil.assertThat;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class VirtualTableFromInternodeTest extends TestBaseImpl
+{
+    private static Cluster CLUSTER;
+
+    @BeforeClass
+    public static void setup() throws IOException
+    {
+        CLUSTER = Cluster.build(2)
+                         .withConfig(c -> c.with(Feature.values()))
+                         .start();
+    }
+
+    @AfterClass
+    public static void cleanup()
+    {
+        if (CLUSTER != null)
+            CLUSTER.close();
+    }
+
+    @Test
+    public void normal()
+    {
+        assertThat(CLUSTER.coordinator(1).executeWithResult("SELECT * FROM system_views.settings", ConsistencyLevel.ONE))
+        .hasSizeGreaterThan(2)
+        .contains("rpc_address", "127.0.0.1")
+        .contains("broadcast_address", "127.0.0.1");
+
+        assertThat(CLUSTER.coordinator(1).executeWithResult("SELECT * FROM system_views.settings WHERE name=?", ConsistencyLevel.ONE, "rpc_address"))
+        .isEqualTo("rpc_address", "127.0.0.1");
+
+        assertThat(CLUSTER.coordinator(1).executeWithResult("SELECT * FROM system_views.settings WHERE name IN (?, ?)", ConsistencyLevel.ONE, "rpc_address", "broadcast_address"))
+        .contains("rpc_address", "127.0.0.1")
+        .contains("broadcast_address", "127.0.0.1")
+        .hasSize(2);
+    }
+
+    @Test
+    public void readCommandAccessVirtualTable()
+    {
+        CLUSTER.get(1).runOnInstance(() -> {
+            boolean didWork = false;
+            for (InetAddressAndPort address : Gossiper.instance.getLiveMembers())
+            {
+                didWork = true;
+                UntypedResultSet rs = QueryProcessor.executeAsync(address, "SELECT * FROM system_views.settings")
+                                                    .syncUninterruptibly().getNow();
+                assertThat(rs.isEmpty()).isFalse();
+                for (UntypedResultSet.Row row : rs)
+                {
+                    String name = row.getString("name");
+                    switch (name)
+                    {
+                        case "broadcast_address":
+                        case "rpc_address":
+                            assertThat(row.getString("value")).isEqualTo(address.getAddress().getHostAddress());
+                            break;
+                    }
+                }
+            }
+            assertThat(didWork).isTrue();
+        });
+    }
+
+    @Test
+    public void readCommandAccessVirtualTableSinglePartition()
+    {
+        CLUSTER.get(1).runOnInstance(() -> {
+            boolean didWork = false;
+            for (InetAddressAndPort address : Gossiper.instance.getLiveMembers())
+            {
+                didWork = true;
+                UntypedResultSet rs = QueryProcessor.executeAsync(address, "SELECT * FROM system_views.settings WHERE name=?", "rpc_address")
+                                                    .syncUninterruptibly().getNow();
+                assertThat(rs.isEmpty()).isFalse();
+                assertThat(rs.one().getString("value")).isEqualTo(address.getAddress().getHostAddress());
+            }
+            assertThat(didWork).isTrue();
+        });
+    }
+
+    @Test
+    public void readCommandAccessVirtualTableMultiplePartition()
+    {
+        CLUSTER.get(1).runOnInstance(() -> {
+            boolean didWork = false;
+            for (InetAddressAndPort address : Gossiper.instance.getLiveMembers())
+            {
+                didWork = true;
+                UntypedResultSet rs = QueryProcessor.executeAsync(address, "SELECT * FROM system_views.settings WHERE name IN (?, ?)", "rpc_address", "broadcast_address")
+                                                    .syncUninterruptibly().getNow();
+                assertThat(rs.isEmpty()).isFalse();
+                Set<String> columns = new HashSet<>();
+                for (UntypedResultSet.Row row : rs)
+                {
+                    columns.add(row.getString("name"));
+                    assertThat(row.getString("value")).isEqualTo(address.getAddress().getHostAddress());
+                }
+                assertThat(columns).isEqualTo(ImmutableSet.of("rpc_address", "broadcast_address"));
+            }
+            assertThat(didWork).isTrue();
+        });
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailCollectionSizeOnSSTableWriteTest.java b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailCollectionSizeOnSSTableWriteTest.java
new file mode 100644
index 0000000..6b7daef
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailCollectionSizeOnSSTableWriteTest.java
@@ -0,0 +1,437 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.guardrails;
+
+import java.io.IOException;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.SimpleStatement;
+import org.apache.cassandra.db.guardrails.Guardrails;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+
+import static java.nio.ByteBuffer.allocate;
+
+/**
+ * Tests the guardrail for the size of collections, {@link Guardrails#collectionSize}.
+ * <p>
+ * This test only includes the activation of the guardrail during sstable writes, all other cases are covered by
+ * {@link org.apache.cassandra.db.guardrails.GuardrailCollectionSizeTest}.
+ */
+public class GuardrailCollectionSizeOnSSTableWriteTest extends GuardrailTester
+{
+    private static final int NUM_NODES = 2;
+
+    private static final int WARN_THRESHOLD = 1024;
+    private static final int FAIL_THRESHOLD = WARN_THRESHOLD * 4;
+
+    private static Cluster cluster;
+    private static com.datastax.driver.core.Cluster driverCluster;
+    private static Session driverSession;
+
+    @BeforeClass
+    public static void setupCluster() throws IOException
+    {
+        cluster = init(Cluster.build(NUM_NODES)
+                              .withConfig(c -> c.with(Feature.GOSSIP, Feature.NATIVE_PROTOCOL)
+                                                .set("collection_size_warn_threshold", WARN_THRESHOLD + "B")
+                                                .set("collection_size_fail_threshold", FAIL_THRESHOLD + "B"))
+                              .start());
+        cluster.disableAutoCompaction(KEYSPACE);
+        driverCluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
+        driverSession = driverCluster.connect();
+    }
+
+    @AfterClass
+    public static void teardownCluster()
+    {
+        if (driverSession != null)
+            driverSession.close();
+
+        if (driverCluster != null)
+            driverCluster.close();
+
+        if (cluster != null)
+            cluster.close();
+    }
+
+    @Override
+    protected Cluster getCluster()
+    {
+        return cluster;
+    }
+
+    @Test
+    public void testSetSize() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v set<blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", set());
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", set(allocate(WARN_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", set(allocate(WARN_THRESHOLD)));
+        assertWarnedOnFlush(warnMessage("4"));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", set(allocate(WARN_THRESHOLD / 4), allocate(WARN_THRESHOLD * 3 / 4)));
+        assertWarnedOnFlush(warnMessage("5"));
+
+        execute("INSERT INTO %s (k, v) VALUES (6, ?)", set(allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("6"));
+
+        execute("INSERT INTO %s (k, v) VALUES (7, ?)", set(allocate(FAIL_THRESHOLD / 4), allocate(FAIL_THRESHOLD * 3 / 4)));
+        assertFailedOnFlush(failMessage("7"));
+    }
+
+    @Test
+    public void testSetSizeFrozen()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v frozen<set<blob>>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", set());
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", set(allocate(WARN_THRESHOLD)));
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", set(allocate(FAIL_THRESHOLD)));
+
+        // frozen collections size is not checked during sstable write
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testSetSizeWithUpdates()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v set<blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", set(allocate(1)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 0", set(allocate(1)));
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", set(allocate(WARN_THRESHOLD / 4)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 1", set(allocate(WARN_THRESHOLD * 3 / 4)));
+        assertWarnedOnFlush(warnMessage("1"));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(FAIL_THRESHOLD / 4)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 2", set(allocate(FAIL_THRESHOLD * 3 / 4)));
+        assertFailedOnFlush(failMessage("2"));
+
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", set(allocate(FAIL_THRESHOLD)));
+        execute("UPDATE %s SET v = v - ? WHERE k = 4", set(allocate(FAIL_THRESHOLD)));
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testSetSizeAfterCompaction() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v set<blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", set(allocate(1)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 0", set(allocate(1)));
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", set(allocate(WARN_THRESHOLD * 3 / 4)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 1", set(allocate(WARN_THRESHOLD / 4)));
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("1"));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(FAIL_THRESHOLD * 3 / 4)));
+        assertWarnedOnFlush(warnMessage("2"));
+        execute("UPDATE %s SET v = v + ? WHERE k = 2", set(allocate(FAIL_THRESHOLD / 4)));
+        assertWarnedOnFlush(warnMessage("2"));
+        assertFailedOnCompact(failMessage("2"));
+
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", set(allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("3"));
+        execute("UPDATE %s SET v = v - ? WHERE k = 3", set(allocate(FAIL_THRESHOLD)));
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+    }
+
+    @Test
+    public void testListSize() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v list<blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", list());
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", list(allocate(WARN_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", list(allocate(WARN_THRESHOLD)));
+        assertWarnedOnFlush(warnMessage("4"));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", list(allocate(WARN_THRESHOLD / 2), allocate(WARN_THRESHOLD / 2)));
+        assertWarnedOnFlush(warnMessage("5"));
+
+        execute("INSERT INTO %s (k, v) VALUES (6, ?)", list(allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("6"));
+
+        execute("INSERT INTO %s (k, v) VALUES (7, ?)", list(allocate(FAIL_THRESHOLD / 2), allocate(FAIL_THRESHOLD / 2)));
+        assertFailedOnFlush(failMessage("7"));
+    }
+
+    @Test
+    public void testListSizeFrozen()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v frozen<list<blob>>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", list());
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", list(allocate(WARN_THRESHOLD)));
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", list(allocate(FAIL_THRESHOLD)));
+
+        // frozen collections size is not checked during sstable write
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testListSizeWithUpdates()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v list<blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", list(allocate(1)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 0", list(allocate(1)));
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", list(allocate(WARN_THRESHOLD / 2)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 1", list(allocate(WARN_THRESHOLD / 2)));
+        assertWarnedOnFlush(warnMessage("1"));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(FAIL_THRESHOLD / 2)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 2", list(allocate(FAIL_THRESHOLD / 2)));
+        assertFailedOnFlush(failMessage("2"));
+
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", list(allocate(FAIL_THRESHOLD)));
+        execute("UPDATE %s SET v = v - ? WHERE k = 4", list(allocate(FAIL_THRESHOLD)));
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testListSizeAfterCompaction() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v list<blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", list(allocate(1)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 0", list(allocate(1)));
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", list(allocate(WARN_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 1", list(allocate(WARN_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("1"));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(FAIL_THRESHOLD / 2)));
+        assertWarnedOnFlush(warnMessage("2"));
+        execute("UPDATE %s SET v = v + ? WHERE k = 2", list(allocate(FAIL_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+        assertFailedOnCompact(failMessage("2"));
+
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", list(allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("3"));
+        execute("UPDATE %s SET v[0] = ? WHERE k = 3", allocate(1));
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+    }
+
+    @Test
+    public void testMapSize() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v map<blob, blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", map());
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(1), allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", map(allocate(1), allocate(WARN_THRESHOLD / 2)));
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(WARN_THRESHOLD / 2), allocate(1)));
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(WARN_THRESHOLD), allocate(1)));
+        assertWarnedOnFlush(warnMessage("5"));
+
+        execute("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(1), allocate(WARN_THRESHOLD)));
+        assertWarnedOnFlush(warnMessage("6"));
+
+        execute("INSERT INTO %s (k, v) VALUES (7, ?)", map(allocate(WARN_THRESHOLD), allocate(WARN_THRESHOLD)));
+        assertWarnedOnFlush(warnMessage("7"));
+
+        execute("INSERT INTO %s (k, v) VALUES (8, ?)", map(allocate(FAIL_THRESHOLD), allocate(1)));
+        assertFailedOnFlush(failMessage("8"));
+
+        execute("INSERT INTO %s (k, v) VALUES (9, ?)", map(allocate(1), allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("9"));
+
+        execute("INSERT INTO %s (k, v) VALUES (10, ?)", map(allocate(FAIL_THRESHOLD), allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("10"));
+    }
+
+    @Test
+    public void testMapSizeFrozen()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v frozen<map<blob, blob>>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", map());
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(1), allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(1), allocate(WARN_THRESHOLD)));
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(WARN_THRESHOLD), allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(WARN_THRESHOLD), allocate(WARN_THRESHOLD)));
+        execute("INSERT INTO %s (k, v) VALUES (7, ?)", map(allocate(1), allocate(FAIL_THRESHOLD)));
+        execute("INSERT INTO %s (k, v) VALUES (8, ?)", map(allocate(FAIL_THRESHOLD), allocate(1)));
+        execute("INSERT INTO %s (k, v) VALUES (9, ?)", map(allocate(FAIL_THRESHOLD), allocate(FAIL_THRESHOLD)));
+
+        // frozen collections size is not checked during sstable write
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testMapSizeWithUpdates()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v map<blob, blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", map(allocate(1), allocate(1)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 0", map(allocate(1), allocate(1)));
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", map(allocate(1), allocate(WARN_THRESHOLD / 2)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 1", map(allocate(2), allocate(WARN_THRESHOLD / 2)));
+        assertWarnedOnFlush(warnMessage("1"));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(WARN_THRESHOLD / 4), allocate(1)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 2", map(allocate(WARN_THRESHOLD * 3 / 4), allocate(1)));
+        assertWarnedOnFlush(warnMessage("2"));
+
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", map(allocate(WARN_THRESHOLD / 4), allocate(WARN_THRESHOLD / 4)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 3", map(allocate(WARN_THRESHOLD / 4 + 1), allocate(WARN_THRESHOLD / 4)));
+        assertWarnedOnFlush(warnMessage("3"));
+
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(1), allocate(FAIL_THRESHOLD / 2)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 4", map(allocate(2), allocate(FAIL_THRESHOLD / 2)));
+        assertFailedOnFlush(failMessage("4"));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(FAIL_THRESHOLD / 4), allocate(1)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 5", map(allocate(FAIL_THRESHOLD * 3 / 4), allocate(1)));
+        assertFailedOnFlush(failMessage("5"));
+
+        execute("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(FAIL_THRESHOLD / 4), allocate(FAIL_THRESHOLD / 4)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 6", map(allocate(FAIL_THRESHOLD / 4 + 1), allocate(FAIL_THRESHOLD / 4)));
+        assertFailedOnFlush(failMessage("6"));
+    }
+
+    @Test
+    public void testMapSizeAfterCompaction()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v map<blob, blob>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", map(allocate(1), allocate(1)));
+        execute("UPDATE %s SET v = v + ? WHERE k = 0", map(allocate(1), allocate(1)));
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, ?)", map(allocate(1), allocate(WARN_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 1", map(allocate(2), allocate(WARN_THRESHOLD / 2)));
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("1"));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(WARN_THRESHOLD / 4), allocate(1)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 2", map(allocate(WARN_THRESHOLD * 3 / 4), allocate(1)));
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("2"));
+
+        execute("INSERT INTO %s (k, v) VALUES (3, ?)", map(allocate(WARN_THRESHOLD / 4), allocate(WARN_THRESHOLD / 4)));
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + ? WHERE k = 3", map(allocate(WARN_THRESHOLD / 4 + 1), allocate(WARN_THRESHOLD / 4)));
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("3"));
+
+        execute("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(1), allocate(FAIL_THRESHOLD / 2)));
+        assertWarnedOnFlush(failMessage("4"));
+        execute("UPDATE %s SET v = v + ? WHERE k = 4", map(allocate(2), allocate(FAIL_THRESHOLD / 2)));
+        assertWarnedOnFlush(warnMessage("4"));
+        assertFailedOnCompact(failMessage("4"));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(FAIL_THRESHOLD / 4), allocate(1)));
+        assertWarnedOnFlush(failMessage("5"));
+        execute("UPDATE %s SET v = v + ? WHERE k = 5", map(allocate(FAIL_THRESHOLD * 3 / 4), allocate(1)));
+        assertWarnedOnFlush(warnMessage("5"));
+        assertFailedOnCompact(failMessage("5"));
+
+        execute("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(FAIL_THRESHOLD / 4), allocate(FAIL_THRESHOLD / 4)));
+        assertWarnedOnFlush(failMessage("6"));
+        execute("UPDATE %s SET v = v + ? WHERE k = 6", map(allocate(FAIL_THRESHOLD / 4 + 1), allocate(FAIL_THRESHOLD / 4)));
+        assertWarnedOnFlush(warnMessage("6"));
+        assertFailedOnCompact(failMessage("6"));
+    }
+
+    @Test
+    public void testCompositePartitionKey()
+    {
+        schemaChange("CREATE TABLE %s (k1 int, k2 text, v set<blob>, PRIMARY KEY((k1, k2)))");
+
+        execute("INSERT INTO %s (k1, k2, v) VALUES (0, 'a', ?)", set(allocate(WARN_THRESHOLD)));
+        assertWarnedOnFlush(warnMessage("(0, 'a')"));
+
+        execute("INSERT INTO %s (k1, k2, v) VALUES (1, 'b', ?)", set(allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("(1, 'b')"));
+    }
+
+    @Test
+    public void testCompositeClusteringKey()
+    {
+        schemaChange("CREATE TABLE %s (k int, c1 int, c2 text, v set<blob>, PRIMARY KEY(k, c1, c2))");
+
+        execute("INSERT INTO %s (k, c1, c2, v) VALUES (1, 10, 'a', ?)", set(allocate(WARN_THRESHOLD)));
+        assertWarnedOnFlush(warnMessage("(1, 10, 'a')"));
+
+        execute("INSERT INTO %s (k, c1, c2, v) VALUES (2, 20, 'b', ?)", set(allocate(FAIL_THRESHOLD)));
+        assertFailedOnFlush(failMessage("(2, 20, 'b')"));
+    }
+
+    private void execute(String query, Object... args)
+    {
+        SimpleStatement stmt = new SimpleStatement(format(query), args);
+        stmt.setConsistencyLevel(com.datastax.driver.core.ConsistencyLevel.ALL);
+        driverSession.execute(stmt);
+    }
+
+    private String warnMessage(String key)
+    {
+        return String.format("Detected collection v in row %s in table %s of size", key, qualifiedTableName);
+    }
+
+    private String failMessage(String key)
+    {
+        return String.format("Detected collection v in row %s in table %s of size", key, qualifiedTableName);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailDiskUsageTest.java b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailDiskUsageTest.java
new file mode 100644
index 0000000..e1868e2
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailDiskUsageTest.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.guardrails;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.exceptions.InvalidQueryException;
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.SuperCall;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.db.guardrails.Guardrails;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.service.disk.usage.DiskUsageBroadcaster;
+import org.apache.cassandra.service.disk.usage.DiskUsageMonitor;
+import org.apache.cassandra.service.disk.usage.DiskUsageState;
+import org.assertj.core.api.Assertions;
+
+import static net.bytebuddy.matcher.ElementMatchers.named;
+
+/**
+ * Tests the guardrails for disk usage, {@link Guardrails#localDataDiskUsage} and {@link Guardrails#replicaDiskUsage}.
+ */
+public class GuardrailDiskUsageTest extends GuardrailTester
+{
+    private static final int NUM_ROWS = 100;
+
+    private static final String WARN_MESSAGE = "Replica disk usage exceeds warning threshold";
+    private static final String FAIL_MESSAGE = "Write request failed because disk usage exceeds failure threshold";
+
+    private static Cluster cluster;
+    private static com.datastax.driver.core.Cluster driverCluster;
+    private static Session driverSession;
+
+    @BeforeClass
+    public static void setupCluster() throws IOException
+    {
+        // speed up the task that calculates and propagates the disk usage info
+        CassandraRelevantProperties.DISK_USAGE_MONITOR_INTERVAL_MS.setInt(100);
+
+        // build a 2-node cluster with RF=1
+        cluster = init(Cluster.build(2)
+                              .withInstanceInitializer(DiskStateInjection::install)
+                              .withConfig(c -> c.with(Feature.GOSSIP, Feature.NATIVE_PROTOCOL)
+                                                .set("data_disk_usage_percentage_warn_threshold", 98)
+                                                .set("data_disk_usage_percentage_fail_threshold", 99)
+                                                .set("authenticator", "PasswordAuthenticator"))
+                              .start(), 1);
+
+        // create a regular user, since the default superuser is excluded from guardrails
+        com.datastax.driver.core.Cluster.Builder builder = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1");
+        try (com.datastax.driver.core.Cluster c = builder.withCredentials("cassandra", "cassandra").build();
+             Session session = c.connect())
+        {
+            session.execute("CREATE USER test WITH PASSWORD 'test'");
+        }
+
+        // connect using that superuser, we use the driver to get access to the client warnings
+        driverCluster = builder.withCredentials("test", "test").build();
+        driverSession = driverCluster.connect();
+    }
+
+    @AfterClass
+    public static void teardownCluster()
+    {
+        if (driverSession != null)
+            driverSession.close();
+
+        if (driverCluster != null)
+            driverCluster.close();
+
+        if (cluster != null)
+            cluster.close();
+    }
+
+    @Override
+    protected Cluster getCluster()
+    {
+        return cluster;
+    }
+
+    @Test
+    public void testDiskUsage() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+        String insert = format("INSERT INTO %s(k, v) VALUES (?, 0)");
+
+        // With both nodes in SPACIOUS state, we can write without warnings nor failures
+        for (int i = 0; i < NUM_ROWS; i++)
+        {
+            ResultSet rs = driverSession.execute(insert, i);
+            Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
+        }
+
+        // If the disk usage information about one node becomes unavailable, we can still write without warnings
+        DiskStateInjection.setState(getCluster(), 2, DiskUsageState.NOT_AVAILABLE);
+        for (int i = 0; i < NUM_ROWS; i++)
+        {
+            ResultSet rs = driverSession.execute(insert, i);
+            Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
+        }
+
+        // If one node becomes STUFFED, the writes targeting that node will raise a warning, while the writes targetting
+        // the node that remains SPACIOUS will keep succeeding without warnings
+        DiskStateInjection.setState(getCluster(), 2, DiskUsageState.STUFFED);
+        int numWarnings = 0;
+        for (int i = 0; i < NUM_ROWS; i++)
+        {
+            ResultSet rs = driverSession.execute(insert, i);
+
+            List<String> warnings = rs.getExecutionInfo().getWarnings();
+            if (!warnings.isEmpty())
+            {
+                Assertions.assertThat(warnings).hasSize(1).anyMatch(s -> s.contains(WARN_MESSAGE));
+                numWarnings++;
+            }
+        }
+        Assertions.assertThat(numWarnings).isGreaterThan(0).isLessThan(NUM_ROWS);
+
+        // If the STUFFED node becomes FULL, the writes targeting that node will fail, while the writes targeting
+        // the node that remains SPACIOUS will keep succeeding without warnings
+        DiskStateInjection.setState(getCluster(), 2, DiskUsageState.FULL);
+        int numFailures = 0;
+        for (int i = 0; i < NUM_ROWS; i++)
+        {
+            try
+            {
+                ResultSet rs = driverSession.execute(insert, i);
+                Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
+            }
+            catch (InvalidQueryException e)
+            {
+                Assertions.assertThat(e).hasMessageContaining(FAIL_MESSAGE);
+                numFailures++;
+            }
+        }
+        Assertions.assertThat(numFailures).isGreaterThan(0).isLessThan(NUM_ROWS);
+
+        // If both nodes are FULL, all queries will fail
+        DiskStateInjection.setState(getCluster(), 1, DiskUsageState.FULL);
+        for (int i = 0; i < NUM_ROWS; i++)
+        {
+            try
+            {
+                driverSession.execute(insert, i);
+                Assertions.fail("Should have failed");
+            }
+            catch (InvalidQueryException e)
+            {
+                numFailures++;
+            }
+        }
+
+        // Finally, if both nodes go back to SPACIOUS, all queries will succeed again
+        DiskStateInjection.setState(getCluster(), 1, DiskUsageState.SPACIOUS);
+        DiskStateInjection.setState(getCluster(), 2, DiskUsageState.SPACIOUS);
+        for (int i = 0; i < NUM_ROWS; i++)
+        {
+            ResultSet rs = driverSession.execute(insert, i);
+            Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
+        }
+    }
+
+    /**
+     * ByteBuddy rule to override the disk usage state of each node.
+     */
+    public static class DiskStateInjection
+    {
+        public static volatile DiskUsageState state = DiskUsageState.SPACIOUS;
+
+        private static void install(ClassLoader cl, int node)
+        {
+            new ByteBuddy().rebase(DiskUsageMonitor.class)
+                           .method(named("getState"))
+                           .intercept(MethodDelegation.to(DiskStateInjection.class))
+                           .make()
+                           .load(cl, ClassLoadingStrategy.Default.INJECTION);
+        }
+
+        public static void setState(Cluster cluster, int node, DiskUsageState state)
+        {
+            IInvokableInstance instance = cluster.get(node);
+            instance.runOnInstance(() -> DiskStateInjection.state = state);
+
+            // wait for disk usage state propagation, all nodes must see it
+            InetAddressAndPort enpoint = InetAddressAndPort.getByAddress(instance.broadcastAddress());
+            cluster.forEach(n -> n.runOnInstance(() -> Util.spinAssertEquals(state, () -> DiskUsageBroadcaster.instance.state(enpoint), 60)));
+        }
+
+        @SuppressWarnings("unused")
+        public static DiskUsageState getState(long usagePercentage, @SuperCall Callable<DiskUsageState> zuper)
+        {
+            return state;
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailItemsPerCollectionOnSSTableWriteTest.java b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailItemsPerCollectionOnSSTableWriteTest.java
new file mode 100644
index 0000000..260752e
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailItemsPerCollectionOnSSTableWriteTest.java
@@ -0,0 +1,339 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.guardrails;
+
+import java.io.IOException;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.db.guardrails.Guardrails;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.ICoordinator;
+
+/**
+ * Tests the guardrail for the number of items on a collection, {@link Guardrails#itemsPerCollection}.
+ * <p>
+ * This test only includes the activation of the guardrail during sstable writes, all other cases are covered by
+ * {@link org.apache.cassandra.db.guardrails.GuardrailItemsPerCollectionTest}.
+ */
+public class GuardrailItemsPerCollectionOnSSTableWriteTest extends GuardrailTester
+{
+    private static final int NUM_NODES = 2;
+
+    private static final int WARN_THRESHOLD = 2;
+    private static final int FAIL_THRESHOLD = 4;
+
+    private static Cluster cluster;
+    private static ICoordinator coordinator;
+
+    @BeforeClass
+    public static void setupCluster() throws IOException
+    {
+        cluster = init(Cluster.build(NUM_NODES)
+                              .withConfig(c -> c.set("items_per_collection_warn_threshold", WARN_THRESHOLD)
+                                                .set("items_per_collection_fail_threshold", FAIL_THRESHOLD))
+                              .start());
+        cluster.disableAutoCompaction(KEYSPACE);
+        coordinator = cluster.coordinator(1);
+    }
+
+    @AfterClass
+    public static void teardownCluster()
+    {
+        if (cluster != null)
+            cluster.close();
+    }
+
+    @Override
+    protected Cluster getCluster()
+    {
+        return cluster;
+    }
+
+    @Test
+    public void testSetSize() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v set<int>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, {1})");
+        execute("INSERT INTO %s (k, v) VALUES (2, {1, 2})");
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (3, {1, 2, 3})");
+        execute("INSERT INTO %s (k, v) VALUES (4, {1, 2, 3, 4})");
+        assertWarnedOnFlush(warnMessage("3", 3), warnMessage("4", 4));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, {1, 2, 3, 4, 5})");
+        execute("INSERT INTO %s (k, v) VALUES (6, {1, 2, 3, 4, 5, 6})");
+        assertFailedOnFlush(failMessage("5", 5), failMessage("6", 6));
+    }
+
+    @Test
+    public void testSetSizeFrozen()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v frozen<set<int>>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (3, {1, 2, 3})");
+        execute("INSERT INTO %s (k, v) VALUES (5, {1, 2, 3, 4, 5})");
+
+        // the size of frozen collections is not checked during sstable write
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testSetSizeWithUpdates()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v set<int>)");
+
+        execute("UPDATE %s SET v = v + {1, 2} WHERE k = 1");
+        execute("UPDATE %s SET v = v - {1, 2} WHERE k = 2");
+        assertNotWarnedOnFlush();
+
+        execute("UPDATE %s SET v = v + {1, 2, 3} WHERE k = 3");
+        execute("UPDATE %s SET v = v - {1, 2, 3} WHERE k = 4");
+        assertWarnedOnFlush(warnMessage("3", 3));
+
+        execute("UPDATE %s SET v = v + {1, 2, 3, 4, 5} WHERE k = 5");
+        execute("UPDATE %s SET v = v - {1, 2, 3, 4, 5} WHERE k = 6");
+        assertFailedOnFlush(failMessage("5", 5));
+    }
+
+    @Test
+    public void testSetSizeAfterCompaction() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v set<int>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, {1})");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + {2} WHERE k = 0");
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, {1})");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + {2, 3} WHERE k = 1");
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("1", 3));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, {1, 2})");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + {3, 4, 5} WHERE k = 2");
+        assertWarnedOnFlush(warnMessage("2", 3));
+        assertFailedOnCompact(failMessage("2", 5));
+    }
+
+    @Test
+    public void testListSize() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v list<int>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, [1])");
+        execute("INSERT INTO %s (k, v) VALUES (2, [1, 2])");
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (3, [1, 2, 3])");
+        execute("INSERT INTO %s (k, v) VALUES (4, [1, 2, 3, 4])");
+        assertWarnedOnFlush(warnMessage("3", 3), warnMessage("4", 4));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, [1, 2, 3, 4, 5])");
+        execute("INSERT INTO %s (k, v) VALUES (6, [1, 2, 3, 4, 5, 6])");
+        assertFailedOnFlush(failMessage("5", 5), failMessage("6", 6));
+    }
+
+    @Test
+    public void testListSizeFrozen() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v frozen<list<int>>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (3, [1, 2, 3])");
+        execute("INSERT INTO %s (k, v) VALUES (5, [1, 2, 3, 4, 5])");
+
+        // the size of frozen collections is not checked during sstable write
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testListSizeWithUpdates()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v list<int>)");
+
+        execute("UPDATE %s SET v = v + [1, 2] WHERE k = 1");
+        execute("UPDATE %s SET v = v - [1, 2] WHERE k = 2");
+        assertNotWarnedOnFlush();
+
+        execute("UPDATE %s SET v = v + [1, 2, 3] WHERE k = 3");
+        execute("UPDATE %s SET v = v - [1, 2, 3] WHERE k = 4");
+        assertWarnedOnFlush(warnMessage("3", 3));
+
+        execute("UPDATE %s SET v = v + [1, 2, 3, 4, 5] WHERE k = 5");
+        execute("UPDATE %s SET v = v - [1, 2, 3, 4, 5] WHERE k = 6");
+        assertFailedOnFlush(failMessage("5", 5));
+    }
+
+    @Test
+    public void testListSizeAfterCompaction() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v list<int>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, [1])");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + [2] WHERE k = 0");
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, [1])");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + [2, 3] WHERE k = 1");
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("1", 3));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, [1, 2])");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + [3, 4, 5] WHERE k = 2");
+        assertWarnedOnFlush(warnMessage("2", 3));
+        assertFailedOnCompact(failMessage("2", 5));
+
+        execute("INSERT INTO %s (k, v) VALUES (3, [1, 2, 3])");
+        assertWarnedOnFlush(warnMessage("3", 3));
+        execute("UPDATE %s SET v[1] = null WHERE k = 3");
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+    }
+
+    @Test
+    public void testMapSize() throws Throwable
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v map<int, int>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, null)");
+        execute("INSERT INTO %s (k, v) VALUES (1, {1:10})");
+        execute("INSERT INTO %s (k, v) VALUES (2, {1:10, 2:20})");
+        assertNotWarnedOnFlush();
+
+        execute("INSERT INTO %s (k, v) VALUES (3, {1:10, 2:20, 3:30})");
+        execute("INSERT INTO %s (k, v) VALUES (4, {1:10, 2:20, 3:30, 4:40})");
+        assertWarnedOnFlush(warnMessage("3", 3), warnMessage("4", 4));
+
+        execute("INSERT INTO %s (k, v) VALUES (5, {1:10, 2:20, 3:30, 4:40, 5:50})");
+        execute("INSERT INTO %s (k, v) VALUES (6, {1:10, 2:20, 3:30, 4:40, 5:50, 6:60})");
+        assertFailedOnFlush(failMessage("5", 5), failMessage("6", 6));
+    }
+
+    @Test
+    public void testMapSizeFrozen()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v frozen<map<int, int>>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (3, {1:10, 2:20, 3:30})");
+        execute("INSERT INTO %s (k, v) VALUES (4, {1:10, 2:20, 3:30, 4:40})");
+
+        // the size of frozen collections is not checked during sstable write
+        assertNotWarnedOnFlush();
+    }
+
+    @Test
+    public void testMapSizeWithUpdates()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v map<int, int>)");
+
+        execute("UPDATE %s SET v = v + {1:10, 2:20} WHERE k = 1");
+        execute("UPDATE %s SET v = v - {1, 2} WHERE k = 2");
+        assertNotWarnedOnFlush();
+
+        execute("UPDATE %s SET v = v + {1:10, 2:20, 3:30} WHERE k = 3");
+        execute("UPDATE %s SET v = v - {1, 2, 3} WHERE k = 4");
+        assertWarnedOnFlush(warnMessage("3", 3));
+
+        execute("UPDATE %s SET v = v + {1:10, 2:20, 3:30, 4:40, 5:50} WHERE k = 5");
+        execute("UPDATE %s SET v = v - {1, 2, 3, 4, 5} WHERE k = 6");
+        assertFailedOnFlush(failMessage("5", 5));
+    }
+
+    @Test
+    public void testMapSizeAfterCompaction()
+    {
+        schemaChange("CREATE TABLE %s (k int PRIMARY KEY, v map<int, int>)");
+
+        execute("INSERT INTO %s (k, v) VALUES (0, {1:10})");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + {2:20} WHERE k = 0");
+        assertNotWarnedOnFlush();
+        assertNotWarnedOnCompact();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, {1:10})");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + {2:20, 3:30} WHERE k = 1");
+        assertNotWarnedOnFlush();
+        assertWarnedOnCompact(warnMessage("1", 3));
+
+        execute("INSERT INTO %s (k, v) VALUES (2, {1:10, 2:20})");
+        assertNotWarnedOnFlush();
+        execute("UPDATE %s SET v = v + {3:30, 4:40, 5:50} WHERE k = 2");
+        assertWarnedOnFlush(warnMessage("2", 3));
+        assertFailedOnCompact(failMessage("2", 5));
+    }
+
+    @Test
+    public void testCompositePartitionKey()
+    {
+        schemaChange("CREATE TABLE %s (k1 int, k2 text, v set<int>, PRIMARY KEY((k1, k2)))");
+
+        execute("INSERT INTO %s (k1, k2, v) VALUES (0, 'a', {1, 2, 3})");
+        assertWarnedOnFlush(warnMessage("(0, 'a')", 3));
+
+        execute("INSERT INTO %s (k1, k2, v) VALUES (1, 'b', {1, 2, 3, 4, 5})");
+        assertFailedOnFlush(failMessage("(1, 'b')", 5));
+    }
+
+    @Test
+    public void testCompositeClusteringKey()
+    {
+        schemaChange("CREATE TABLE %s (k int, c1 int, c2 text, v set<int>, PRIMARY KEY(k, c1, c2))");
+
+        execute("INSERT INTO %s (k, c1, c2, v) VALUES (1, 10, 'a', {1, 2, 3})");
+        assertWarnedOnFlush(warnMessage("(1, 10, 'a')", 3));
+
+        execute("INSERT INTO %s (k, c1, c2, v) VALUES (2, 20, 'b', {1, 2, 3, 4, 5})");
+        assertFailedOnFlush(failMessage("(2, 20, 'b')", 5));
+    }
+
+    private void execute(String query)
+    {
+        coordinator.execute(format(query), ConsistencyLevel.ALL);
+    }
+
+    private String warnMessage(String key, int numItems)
+    {
+        return String.format("Detected collection v in row %s in table %s with %d items, " +
+                             "this exceeds the warning threshold of %d.",
+                             key, qualifiedTableName, numItems, WARN_THRESHOLD);
+    }
+
+    private String failMessage(String key, int numItems)
+    {
+        return String.format("Detected collection v in row %s in table %s with %d items, " +
+                             "this exceeds the failure threshold of %d.",
+                             key, qualifiedTableName, numItems, FAIL_THRESHOLD);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailTester.java b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailTester.java
new file mode 100644
index 0000000..b12d9ab
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/guardrails/GuardrailTester.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.guardrails;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.After;
+import org.junit.Before;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.assertj.core.api.Assertions;
+import org.assertj.core.api.ListAssert;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public abstract class GuardrailTester extends TestBaseImpl
+{
+    private static final AtomicInteger seqNumber = new AtomicInteger();
+    protected String tableName, qualifiedTableName;
+
+    protected abstract Cluster getCluster();
+
+    @Before
+    public void beforeTest()
+    {
+        tableName = "t_" + seqNumber.getAndIncrement();
+        qualifiedTableName = KEYSPACE + "." + tableName;
+    }
+
+    @After
+    public void afterTest()
+    {
+        schemaChange("DROP TABLE IF EXISTS %s");
+    }
+
+    protected String format(String query)
+    {
+        return String.format(query, qualifiedTableName);
+    }
+
+    protected void schemaChange(String query)
+    {
+        getCluster().schemaChange(format(query));
+    }
+
+    protected void assertNotWarnedOnFlush()
+    {
+        assertNotWarnsOnSSTableWrite(false);
+    }
+
+    protected void assertNotWarnedOnCompact()
+    {
+        assertNotWarnsOnSSTableWrite(true);
+    }
+
+    protected void assertNotWarnsOnSSTableWrite(boolean compact)
+    {
+        getCluster().stream().forEach(node -> assertNotWarnsOnSSTableWrite(node, compact));
+    }
+
+    protected void assertNotWarnsOnSSTableWrite(IInstance node, boolean compact)
+    {
+        long mark = node.logs().mark();
+        try
+        {
+            writeSSTables(node, compact);
+            assertTrue(node.logs().grep(mark, "^ERROR", "^WARN").getResult().isEmpty());
+        }
+        catch (InvalidRequestException e)
+        {
+            fail("Expected not to fail, but Fails with error message: " + e.getMessage());
+        }
+    }
+
+    protected void assertWarnedOnFlush(String... msgs)
+    {
+        assertWarnsOnSSTableWrite(false, msgs);
+    }
+
+    protected void assertWarnedOnCompact(String... msgs)
+    {
+        assertWarnsOnSSTableWrite(true, msgs);
+    }
+
+    protected void assertWarnsOnSSTableWrite(boolean compact, String... msgs)
+    {
+        getCluster().stream().forEach(node -> assertWarnsOnSSTableWrite(node, compact, msgs));
+    }
+
+    protected void assertWarnsOnSSTableWrite(IInstance node, boolean compact, String... msgs)
+    {
+        long mark = node.logs().mark();
+        writeSSTables(node, compact);
+        assertTrue(node.logs().grep(mark, "^ERROR").getResult().isEmpty());
+        List<String> warnings = node.logs().grep(mark, "^WARN").getResult();
+        ListAssert<String> assertion = Assertions.assertThat(warnings).isNotEmpty().hasSize(msgs.length);
+        for (String msg : msgs)
+            assertion.anyMatch(m -> m.contains(msg));
+    }
+
+    protected void assertFailedOnFlush(String... msgs)
+    {
+        assertFailsOnSSTableWrite(false, msgs);
+    }
+
+    protected void assertFailedOnCompact(String... msgs)
+    {
+        assertFailsOnSSTableWrite(true, msgs);
+    }
+
+    private void assertFailsOnSSTableWrite(boolean compact, String... msgs)
+    {
+        getCluster().stream().forEach(node -> assertFailsOnSSTableWrite(node, compact, msgs));
+    }
+
+    private void assertFailsOnSSTableWrite(IInstance node, boolean compact, String... msgs)
+    {
+        long mark = node.logs().mark();
+        writeSSTables(node, compact);
+        assertTrue(node.logs().grep(mark, "^WARN").getResult().isEmpty());
+        List<String> warnings = node.logs().grep(mark, "^ERROR").getResult();
+        ListAssert<String> assertion = Assertions.assertThat(warnings).isNotEmpty().hasSize(msgs.length);
+        for (String msg : msgs)
+            assertion.anyMatch(m -> m.contains(msg));
+    }
+
+    private void writeSSTables(IInstance node, boolean compact)
+    {
+        node.flush(KEYSPACE);
+        if (compact)
+            node.forceCompact(KEYSPACE, tableName);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java
index 11a30e5..d7b8f99 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java
@@ -44,6 +44,7 @@
 import static org.apache.cassandra.distributed.shared.ClusterUtils.stopAbrupt;
 import static org.apache.cassandra.distributed.test.hostreplacement.HostReplacementTest.setupCluster;
 import static org.apache.cassandra.distributed.test.hostreplacement.HostReplacementTest.validateRows;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class HostReplacementAbruptDownedInstanceTest extends TestBaseImpl
 {
@@ -82,14 +83,14 @@
 //            peers.forEach(p -> validateRows(p.coordinator(), expectedState));
 
             // now create a new node to replace the other node
-            long startNanos = System.nanoTime();
+            long startNanos = nanoTime();
             IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove, properties -> {
                 // since node2 was killed abruptly its possible that node2's gossip state has an old schema version
                 // if this happens then bootstrap will fail waiting for a schema version it will never see; to avoid
                 // this, setting this property to log the warning rather than fail bootstrap
                 properties.set(BOOTSTRAP_SKIP_SCHEMA_CHECK, true);
             });
-            logger.info("Host replacement of {} with {} took {}", nodeToRemove, replacingNode, Duration.ofNanos(System.nanoTime() - startNanos));
+            logger.info("Host replacement of {} with {} took {}", nodeToRemove, replacingNode, Duration.ofNanos(nanoTime() - startNanos));
             peers.forEach(p -> awaitRingJoin(p, replacingNode));
 
             // make sure all nodes are healthy
diff --git a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/NodeCannotJoinAsHibernatingNodeWithoutReplaceAddressTest.java b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/NodeCannotJoinAsHibernatingNodeWithoutReplaceAddressTest.java
new file mode 100644
index 0000000..7d69248
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/NodeCannotJoinAsHibernatingNodeWithoutReplaceAddressTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.hostreplacement;
+
+import java.io.IOException;
+import java.util.Objects;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.SuperCall;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.Constants;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.TokenSupplier;
+import org.apache.cassandra.distributed.impl.InstanceIDDefiner;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.service.PendingRangeCalculatorService;
+import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.Shared;
+import org.assertj.core.api.Assertions;
+
+import static net.bytebuddy.matcher.ElementMatchers.named;
+
+public class NodeCannotJoinAsHibernatingNodeWithoutReplaceAddressTest extends TestBaseImpl
+{
+    @Test
+    public void test() throws IOException, InterruptedException
+    {
+        TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withConfig(c -> c.with(Feature.values()).set(Constants.KEY_DTEST_API_STARTUP_FAILURE_AS_SHUTDOWN, false))
+                                           .withInstanceInitializer(BBHelper::install)
+                                           .withTokenSupplier(node -> even.token((node == 3 || node == 4) ? 2 : node))
+                                           .start()))
+        {
+            final IInvokableInstance toReplace = cluster.get(2);
+            final String toReplaceAddress = toReplace.broadcastAddress().getAddress().getHostAddress();
+
+            SharedState.cluster = cluster;
+            cluster.setUncaughtExceptionsFilter((nodeId, cause) -> nodeId > 2); // ignore host replacement errors
+            fixDistributedSchemas(cluster);
+
+            ClusterUtils.stopUnchecked(toReplace);
+
+            try
+            {
+                ClusterUtils.replaceHostAndStart(cluster, toReplace, (inst, ignore) -> ClusterUtils.updateAddress(inst, toReplaceAddress));
+                Assert.fail("Host replacement should exit with an error");
+            }
+            catch (Exception e)
+            {
+                // the instance is expected to fail, but it may not have finished shutdown yet, so wait for it to shutdown
+                SharedState.shutdownComplete.await(1, TimeUnit.MINUTES);
+            }
+
+            IInvokableInstance inst = ClusterUtils.addInstance(cluster, toReplace.config(), c -> c.set("auto_bootstrap", true));
+            ClusterUtils.updateAddress(inst, toReplaceAddress);
+            Assertions.assertThatThrownBy(() -> inst.startup())
+                      .hasMessageContaining("A node with address")
+                      .hasMessageContaining("already exists, cancelling join");
+        }
+    }
+
+    public static class BBHelper
+    {
+        static void install(ClassLoader cl, int nodeNumber)
+        {
+            if (nodeNumber != 3)
+                return;
+            shutdownBeforeNormal(cl);
+        }
+
+        private static void shutdownBeforeNormal(ClassLoader cl)
+        {
+            new ByteBuddy().rebase(PendingRangeCalculatorService.class)
+                           .method(named("blockUntilFinished"))
+                           .intercept(MethodDelegation.to(ShutdownBeforeNormal.class))
+                           .make()
+                           .load(cl, ClassLoadingStrategy.Default.INJECTION);
+        }
+    }
+
+    @Shared
+    public static class SharedState
+    {
+        public static volatile ICluster cluster;
+        // Instance.shutdown can only be called once so only the caller knows when its done (isShutdown looks at a field set BEFORE shutting down..)
+        // since the test needs to know when shutdown completes, add this static state so the caller (bytebuddy rewrite) can update it
+        public static final CountDownLatch shutdownComplete = new CountDownLatch(1);
+    }
+
+    public static class ShutdownBeforeNormal
+    {
+        public static void blockUntilFinished(@SuperCall Runnable fn)
+        {
+            fn.run();
+            int id = Integer.parseInt(InstanceIDDefiner.getInstanceId().replace("node", ""));
+            ICluster cluster = Objects.requireNonNull(SharedState.cluster);
+            // can't stop here as the stop method and start method share a lock; and block gets called in start...
+            ForkJoinPool.commonPool().execute(() -> {
+                ClusterUtils.stopAbrupt(cluster, cluster.get(id));
+                SharedState.shutdownComplete.countDown();
+            });
+            JVMStabilityInspector.killCurrentJVM(new RuntimeException("Attempting to stop the instance"), false);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/jmx/JMXGetterCheckTest.java b/test/distributed/org/apache/cassandra/distributed/test/jmx/JMXGetterCheckTest.java
new file mode 100644
index 0000000..ef0f1ec
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/jmx/JMXGetterCheckTest.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.test.jmx;
+
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import javax.management.JMRuntimeException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanInfo;
+import javax.management.MBeanOperationInfo;
+import javax.management.MBeanServerConnection;
+import javax.management.ObjectName;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXConnectorServer;
+import javax.management.remote.JMXServiceURL;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.utils.JMXServerUtils;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.IS_DISABLED_MBEAN_REGISTRATION;
+import static org.apache.cassandra.cql3.CQLTester.getAutomaticallyAllocatedPort;
+
+public class JMXGetterCheckTest extends TestBaseImpl
+{
+    private static final Set<String> IGNORE_ATTRIBUTES = ImmutableSet.of(
+    "org.apache.cassandra.net:type=MessagingService:BackPressurePerHost" // throws unsupported saying the feature was removed... dropped in CASSANDRA-15375
+    );
+    private static final Set<String> IGNORE_OPERATIONS = ImmutableSet.of(
+    "org.apache.cassandra.db:type=StorageService:stopDaemon", // halts the instance, which then causes the JVM to exit
+    "org.apache.cassandra.db:type=StorageService:drain", // don't drain, it stops things which can cause other APIs to be unstable as we are in a stopped state
+    "org.apache.cassandra.db:type=StorageService:stopGossiping", // if we stop gossip this can cause other issues, so avoid
+    "org.apache.cassandra.db:type=StorageService:resetLocalSchema" // this will fail when there are no other nodes which can serve schema
+    );
+
+    @Test
+    public void test() throws Exception
+    {
+        // start JMX server, which the instance will register with
+        InetAddress loopback = InetAddress.getLoopbackAddress();
+        String jmxHost = loopback.getHostAddress();
+        int jmxPort = getAutomaticallyAllocatedPort(loopback);
+        JMXConnectorServer jmxServer = JMXServerUtils.createJMXServer(jmxPort, true);
+        jmxServer.start();
+        String url = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi";
+
+        IS_DISABLED_MBEAN_REGISTRATION.setBoolean(false);
+        try (Cluster cluster = Cluster.build(1).withConfig(c -> c.with(Feature.values())).start())
+        {
+            List<Named> errors = new ArrayList<>();
+            try (JMXConnector jmxc = JMXConnectorFactory.connect(new JMXServiceURL(url), null))
+            {
+                MBeanServerConnection mbsc = jmxc.getMBeanServerConnection();
+                Set<ObjectName> metricNames = new TreeSet<>(mbsc.queryNames(null, null));
+                for (ObjectName name : metricNames)
+                {
+                    if (!name.getDomain().startsWith("org.apache.cassandra"))
+                        continue;
+                    MBeanInfo info = mbsc.getMBeanInfo(name);
+                    for (MBeanAttributeInfo a : info.getAttributes())
+                    {
+                        String fqn = String.format("%s:%s", name, a.getName());
+                        if (!a.isReadable() || IGNORE_ATTRIBUTES.contains(fqn))
+                            continue;
+                        try
+                        {
+                            mbsc.getAttribute(name, a.getName());
+                        }
+                        catch (JMRuntimeException e)
+                        {
+                            errors.add(new Named(String.format("Attribute %s", fqn), e.getCause()));
+                        }
+                    }
+
+                    for (MBeanOperationInfo o : info.getOperations())
+                    {
+                        String fqn = String.format("%s:%s", name, o.getName());
+                        if (o.getSignature().length != 0 || IGNORE_OPERATIONS.contains(fqn))
+                            continue;
+                        try
+                        {
+                            mbsc.invoke(name, o.getName(), new Object[0], new String[0]);
+                        }
+                        catch (JMRuntimeException e)
+                        {
+                            errors.add(new Named(String.format("Operation %s", fqn), e.getCause()));
+                        }
+                    }
+                }
+            }
+            if (!errors.isEmpty())
+            {
+                AssertionError root = new AssertionError();
+                errors.forEach(root::addSuppressed);
+                throw root;
+            }
+        }
+    }
+
+    /**
+     * This class is meant to make new errors easier to read, by adding the JMX endpoint, and cleaning up the unneded JMX/Reflection logic cluttering the stacktrace
+     */
+    private static class Named extends RuntimeException
+    {
+        public Named(String msg, Throwable cause)
+        {
+            super(msg + "\nCaused by: " + cause.getClass().getCanonicalName() + ": " + cause.getMessage(), cause.getCause());
+            StackTraceElement[] stack = cause.getStackTrace();
+            List<StackTraceElement> copy = new ArrayList<>();
+            for (StackTraceElement s : stack)
+            {
+                if (!s.getClassName().startsWith("org.apache.cassandra"))
+                    break;
+                copy.add(s);
+            }
+            Collections.reverse(copy);
+            setStackTrace(copy.toArray(new StackTraceElement[0]));
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/metric/TableMetricTest.java b/test/distributed/org/apache/cassandra/distributed/test/metric/TableMetricTest.java
index 3540acd..08c9324 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/metric/TableMetricTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/metric/TableMetricTest.java
@@ -41,8 +41,9 @@
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.distributed.test.TestBaseImpl;
-import org.apache.cassandra.repair.SystemDistributedKeyspace;
+import org.apache.cassandra.schema.SystemDistributedKeyspace;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.tracing.TraceKeyspace;
 import org.apache.cassandra.utils.MBeanWrapper;
 
@@ -120,7 +121,7 @@
         SYSTEM_TABLES = cluster.get(1).callOnInstance(() -> {
             Map<String, Collection<String>> map = new HashMap<>();
             Arrays.asList(SystemKeyspace.metadata(), AuthKeyspace.metadata(), SystemDistributedKeyspace.metadata(),
-                          Schema.getSystemKeyspaceMetadata(), TraceKeyspace.metadata())
+                          SchemaKeyspace.metadata(), TraceKeyspace.metadata())
                   .forEach(meta -> {
                       Set<String> tables = meta.tables.stream().map(t -> t.name).collect(Collectors.toSet());
                       map.put(meta.name, tables);
diff --git a/test/distributed/org/apache/cassandra/distributed/test/metrics/HintsServiceMetricsTest.java b/test/distributed/org/apache/cassandra/distributed/test/metrics/HintsServiceMetricsTest.java
index a47c782..0fc9bff 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/metrics/HintsServiceMetricsTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/metrics/HintsServiceMetricsTest.java
@@ -20,7 +20,6 @@
 
 import java.util.Arrays;
 import java.util.concurrent.Callable;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -38,6 +37,7 @@
 import org.apache.cassandra.hints.Hint;
 import org.apache.cassandra.metrics.HintsServiceMetrics;
 import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.awaitility.core.ThrowingRunnable;
 
 import static java.util.concurrent.TimeUnit.MINUTES;
@@ -69,6 +69,7 @@
                                         .withInstanceInitializer(FailHints::install)
                                         .start())
         {
+            cluster.setUncaughtExceptionsFilter(t -> "Injected failure".equals(t.getMessage()));
             // setup a message filter to drop some of the hint request messages from node1
             AtomicInteger hintsNode2 = new AtomicInteger();
             AtomicInteger hintsNode3 = new AtomicInteger();
@@ -152,6 +153,7 @@
         await().atMost(5, MINUTES)
                .pollDelay(0, SECONDS)
                .pollInterval(1, SECONDS)
+               .dontCatchUncaughtExceptions()
                .untilAsserted(assertion);
     }
 
@@ -221,7 +223,7 @@
                            .load(cl, ClassLoadingStrategy.Default.INJECTION);
         }
 
-        public static CompletableFuture<?> execute(@SuperCall Callable<CompletableFuture<?>> r) throws Exception
+        public static Future<?> execute(@SuperCall Callable<Future<?>> r) throws Exception
         {
             if (numHints.incrementAndGet() <= NUM_FAILURES_PER_NODE)
                 throw new RuntimeException("Injected failure");
diff --git a/test/distributed/org/apache/cassandra/distributed/test/repair/ForceRepairTest.java b/test/distributed/org/apache/cassandra/distributed/test/repair/ForceRepairTest.java
new file mode 100644
index 0000000..946c41e
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/repair/ForceRepairTest.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.test.repair;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.commons.lang3.ArrayUtils;
+import org.junit.Test;
+
+import com.carrotsearch.hppc.LongArrayList;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.lifecycle.View;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.QueryResults;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.distributed.shared.AssertUtils;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.FBUtilities;
+import org.assertj.core.api.Assertions;
+
+public class ForceRepairTest extends TestBaseImpl
+{
+    /**
+     * Port of python dtest "repair_tests/incremental_repair_test.py::TestIncRepair::test_force" but extends to test
+     * all types of repair.
+     */
+    @Test
+    public void force() throws IOException
+    {
+        force(false);
+    }
+
+    @Test
+    public void forceWithDifference() throws IOException
+    {
+        force(true);
+    }
+
+    private void force(boolean includeDifference) throws IOException
+    {
+        long nowInMicro = System.currentTimeMillis() * 1000;
+        try (Cluster cluster = Cluster.build(3)
+                                      .withConfig(c -> c.set("hinted_handoff_enabled", false)
+                                                        .with(Feature.values()))
+                                      .start())
+        {
+            init(cluster);
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (k INT PRIMARY KEY, v INT)"));
+
+            for (int i = 0; i < 10; i++)
+                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (k,v) VALUES (?, ?) USING TIMESTAMP ?"), ConsistencyLevel.ALL, i, i, nowInMicro++);
+
+            String downAddress = cluster.get(2).callOnInstance(() -> FBUtilities.getBroadcastAddressAndPort().getHostAddressAndPort());
+            ClusterUtils.stopUnchecked(cluster.get(2));
+            cluster.get(1).runOnInstance(() -> {
+                InetAddressAndPort neighbor;
+                try
+                {
+                    neighbor = InetAddressAndPort.getByName(downAddress);
+                }
+                catch (UnknownHostException e)
+                {
+                    throw new RuntimeException(e);
+                }
+                while (FailureDetector.instance.isAlive(neighbor))
+                    Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
+            });
+
+
+            // repair should fail because node2 is down
+            IInvokableInstance node1 = cluster.get(1);
+
+            for (String[] args : Arrays.asList(new String[]{ "--full" },
+                                               new String[]{ "--full", "--preview" },
+                                               new String[]{ "--full", "--validate"}, // nothing should be in the repaired set, so shouldn't stream
+                                               new String[]{ "--preview" }, // IR Preview
+                                               new String[]{ "--validate"}, // nothing should be in the repaired set, so shouldn't stream
+                                               new String[0])) // IR
+            {
+                if (includeDifference)
+                    node1.executeInternal(withKeyspace("INSERT INTO %s.tbl (k,v) VALUES (?, ?) USING TIMESTAMP ?"), -1, -1, nowInMicro++); // each loop should have a different timestamp, causing a new difference
+
+                try
+                {
+                    node1.nodetoolResult(ArrayUtils.addAll(new String[] {"repair", KEYSPACE}, args)).asserts().failure();
+                    node1.nodetoolResult(ArrayUtils.addAll(new String[] {"repair", KEYSPACE, "--force"}, args)).asserts().success();
+
+                    assertNoRepairedAt(cluster);
+                }
+                catch (Exception | Error e)
+                {
+                    // tag the error to include which args broke
+                    e.addSuppressed(new AssertionError("Failure for args: " + Arrays.toString(args)));
+                    throw e;
+                }
+            }
+
+            if (includeDifference)
+            {
+                SimpleQueryResult expected = QueryResults.builder()
+                                                         .row(-1, -1)
+                                                         .build();
+                for (IInvokableInstance node : Arrays.asList(node1, cluster.get(3)))
+                {
+                    SimpleQueryResult results = node.executeInternalWithResult(withKeyspace("SELECT * FROM %s.tbl WHERE k=?"), -1);
+                    expected.reset();
+                    AssertUtils.assertRows(results, expected);
+                }
+            }
+        }
+    }
+
+    private static void assertNoRepairedAt(Cluster cluster)
+    {
+        List<long[]> repairedAt = getRepairedAt(cluster, KEYSPACE, "tbl");
+        Assertions.assertThat(repairedAt).hasSize(cluster.size());
+        for (int i = 0; i < repairedAt.size(); i++)
+        {
+            long[] array = repairedAt.get(i);
+            if (array == null)
+            {
+                // ignore downed nodes
+                Assertions.assertThat(cluster.get(i + 1).isShutdown()).isTrue();
+                continue;
+            }
+            Assertions.assertThat(array).isNotEmpty();
+            for (long a : array)
+                Assertions.assertThat(a).describedAs("node%d had a repaired sstable", i + 1).isEqualTo(0);
+        }
+    }
+
+    private static List<long[]> getRepairedAt(Cluster cluster, String keyspace, String table)
+    {
+        return cluster.stream().map(i -> {
+            if (i.isShutdown())
+                return null;
+
+            return i.callOnInstance(() -> {
+                TableMetadata meta = Schema.instance.getTableMetadata(keyspace, table);
+                ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(meta.id);
+
+                View view = cfs.getTracker().getView();
+                LongArrayList list = new LongArrayList();
+                for (SSTableReader sstable : view.liveSSTables())
+                {
+                    try
+                    {
+                        StatsMetadata metadata = sstable.getSSTableMetadata();
+                        list.add(metadata.repairedAt);
+                    }
+                    catch (Exception e)
+                    {
+                        // ignore
+                    }
+                }
+                return list.toArray();
+            });
+        }).collect(Collectors.toList());
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/AutoBootstrapTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/AutoBootstrapTest.java
index 3051639..dd8b7be 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/AutoBootstrapTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/AutoBootstrapTest.java
@@ -49,7 +49,7 @@
                                         .withConfig(config -> config.with(NETWORK, GOSSIP))
                                         .start())
         {
-            populate(cluster,0, 100);
+            populate(cluster, 0, 100);
             bootstrapAndJoinNode(cluster);
 
             for (Map.Entry<Integer, Long> e : count(cluster).entrySet())
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java
index d6e715a..423e78b 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java
@@ -18,13 +18,17 @@
 
 package org.apache.cassandra.distributed.test.ring;
 
+import java.lang.management.ManagementFactory;
 import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.ICluster;
@@ -44,6 +48,27 @@
 
 public class BootstrapTest extends TestBaseImpl
 {
+    private long savedMigrationDelay;
+
+    @Before
+    public void beforeTest()
+    {
+        // MigrationCoordinator schedules schema pull requests immediatelly when the node is just starting up, otherwise
+        // the first pull request is sent in 60 seconds. Whether we are starting up or not is detected by examining
+        // the node up-time and if it is lower than MIGRATION_DELAY, we consider the server is starting up.
+        // When we are running multiple test cases in the class, where each starts a node but in the same JVM, the
+        // up-time will be more or less relevant only for the first test. In order to enforce the startup-like behaviour
+        // for each test case, the MIGRATION_DELAY time is adjusted accordingly
+        savedMigrationDelay = CassandraRelevantProperties.MIGRATION_DELAY.getLong();
+        CassandraRelevantProperties.MIGRATION_DELAY.setLong(ManagementFactory.getRuntimeMXBean().getUptime() + savedMigrationDelay);
+    }
+
+    @After
+    public void afterTest()
+    {
+        CassandraRelevantProperties.MIGRATION_DELAY.setLong(savedMigrationDelay);
+    }
+
     @Test
     public void bootstrapTest() throws Throwable
     {
@@ -56,7 +81,7 @@
                                         .withConfig(config -> config.with(NETWORK, GOSSIP))
                                         .start())
         {
-            populate(cluster,0, 100);
+            populate(cluster, 0, 100);
 
             IInstanceConfig config = cluster.newInstanceConfig();
             IInvokableInstance newInstance = cluster.bootstrap(config);
@@ -95,7 +120,7 @@
 
             cluster.forEach(statusToBootstrap(newInstance));
 
-            populate(cluster,0, 100);
+            populate(cluster, 0, 100);
 
             Assert.assertEquals(100, newInstance.executeInternal("SELECT *FROM " + KEYSPACE + ".tbl").length);
         }
@@ -125,5 +150,4 @@
                         .collect(Collectors.toMap(nodeId -> nodeId,
                                                   nodeId -> (Long) cluster.get(nodeId).executeInternal("SELECT count(*) FROM " + KEYSPACE + ".tbl")[0][0]));
     }
-
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java
index d6825d9..631ea91 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java
@@ -33,6 +33,7 @@
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class CommunicationDuringDecommissionTest extends TestBaseImpl
 {
@@ -53,10 +54,10 @@
 
 
             Map<Integer, Long> connectionAttempts = new HashMap<>();
-            long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
+            long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
 
             // Wait 10 seconds and check if there are any new connection attempts to the decomissioned node
-            while (System.currentTimeMillis() <= deadline)
+            while (currentTimeMillis() <= deadline)
             {
                 for (int i = 2; i <= cluster.size(); i++)
                 {
@@ -73,4 +74,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java
index 8daa58a..2e702b2 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java
@@ -106,4 +106,4 @@
                 Assert.assertEquals("Node " + e.getKey() + " has incorrect row state", e.getValue().longValue(), 150L);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/ReadsDuringBootstrapTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/ReadsDuringBootstrapTest.java
index 4898479..ba2cdc5 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/ReadsDuringBootstrapTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/ReadsDuringBootstrapTest.java
@@ -50,6 +50,7 @@
 
 public class ReadsDuringBootstrapTest extends TestBaseImpl
 {
+
     @Test
     public void readsDuringBootstrapTest() throws IOException, ExecutionException, InterruptedException, TimeoutException
     {
@@ -60,8 +61,8 @@
                                         .withTokenSupplier(TokenSupplier.evenlyDistributedTokens(expandedNodeCount))
                                         .withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(expandedNodeCount, "dc0", "rack0"))
                                         .withConfig(config -> config.with(NETWORK, GOSSIP)
-                                                                    .set("read_request_timeout_in_ms", Integer.MAX_VALUE)
-                                                                    .set("request_timeout_in_ms", Integer.MAX_VALUE))
+                                                                    .set("read_request_timeout", String.format("%dms", Integer.MAX_VALUE))
+                                                                    .set("request_timeout", String.format("%dms", Integer.MAX_VALUE)))
                                         .withInstanceInitializer(BB::install)
                                         .start())
         {
@@ -90,6 +91,7 @@
     {
         public static final AtomicBoolean block = new AtomicBoolean();
         public static final CountDownLatch latch = new CountDownLatch(1);
+
         private static void install(ClassLoader cl, Integer instanceId)
         {
             if (instanceId != 1)
@@ -110,5 +112,4 @@
             return zuper.call();
         }
     }
-
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/streaming/RebuildStreamingTest.java b/test/distributed/org/apache/cassandra/distributed/test/streaming/RebuildStreamingTest.java
new file mode 100644
index 0000000..ee41ec4
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/streaming/RebuildStreamingTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.test.streaming;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.Row;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.distributed.util.QueryResultUtil;
+import org.assertj.core.api.Assertions;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class RebuildStreamingTest extends TestBaseImpl
+{
+    private static final ByteBuffer BLOB = ByteBuffer.wrap(new byte[1 << 16]);
+    // zero copy streaming sends all components, so the events will include non-Data files as well
+    private static final int NUM_COMPONENTS = 7;
+
+    @Test
+    public void zeroCopy() throws IOException
+    {
+        test(true);
+    }
+
+    @Test
+    public void notZeroCopy() throws IOException
+    {
+        test(false);
+    }
+
+    private void test(boolean zeroCopyStreaming) throws IOException
+    {
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withConfig(c -> c.with(Feature.values())
+                                                             .set("stream_entire_sstables", zeroCopyStreaming).set("streaming_slow_events_log_timeout", "0s"))
+                                           .start()))
+        {
+            // streaming sends events every 65k, so need to make sure that the files are larger than this to hit
+            // all cases of the vtable
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.users (user_id varchar, spacing blob, PRIMARY KEY (user_id)) WITH compression = { 'enabled' : false };"));
+            cluster.stream().forEach(i -> i.nodetoolResult("disableautocompaction", KEYSPACE).asserts().success());
+            IInvokableInstance first = cluster.get(1);
+            IInvokableInstance second = cluster.get(2);
+            long expectedFiles = 10;
+            for (int i = 0; i < expectedFiles; i++)
+            {
+                first.executeInternal(withKeyspace("insert into %s.users(user_id, spacing) values (?, ? )"), "dcapwell" + i, BLOB);
+                first.flush(KEYSPACE);
+            }
+            if (zeroCopyStreaming) // will include all components so need to account for
+                expectedFiles *= NUM_COMPONENTS;
+
+            second.nodetoolResult("rebuild", "--keyspace", KEYSPACE).asserts().success();
+
+            SimpleQueryResult qr = first.executeInternalWithResult("SELECT * FROM system_views.streaming");
+            String txt = QueryResultUtil.expand(qr);
+            qr.reset();
+            assertThat(qr.toObjectArrays().length).describedAs("Found rows\n%s", txt).isEqualTo(1);
+            assertThat(qr.hasNext()).isTrue();
+            Row row = qr.next();
+            QueryResultUtil.assertThat(row)
+                           .isEqualTo("peers", Collections.singletonList("/127.0.0.2:7012"))
+                           .isEqualTo("follower", true)
+                           .isEqualTo("operation", "Rebuild")
+                           .isEqualTo("status", "success")
+                           .isEqualTo("progress_percentage", 100.0F)
+                           .isEqualTo("success_message", null).isEqualTo("failure_cause", null)
+                           .isEqualTo("files_sent", expectedFiles)
+                           .columnsEqualTo("files_sent", "files_to_send")
+                           .columnsEqualTo("bytes_sent", "bytes_to_send")
+                           .isEqualTo("files_received", 0L)
+                           .columnsEqualTo("files_received", "files_to_receive", "bytes_received", "bytes_to_receive");
+            long totalBytes = row.getLong("bytes_sent");
+            assertThat(totalBytes).isGreaterThan(0);
+
+            qr = second.executeInternalWithResult("SELECT * FROM system_views.streaming");
+            txt = QueryResultUtil.expand(qr);
+            qr.reset();
+            assertThat(qr.toObjectArrays().length).describedAs("Found rows\n%s", txt).isEqualTo(1);
+            assertThat(qr.hasNext()).isTrue();
+
+            QueryResultUtil.assertThat(qr.next())
+                           .isEqualTo("peers", Collections.singletonList("/127.0.0.1:7012"))
+                           .isEqualTo("follower", false)
+                           .isEqualTo("operation", "Rebuild")
+                           .isEqualTo("status", "success")
+                           .isEqualTo("progress_percentage", 100.0F)
+                           .isEqualTo("success_message", null).isEqualTo("failure_cause", null)
+                           .columnsEqualTo("files_to_receive", "files_received").isEqualTo("files_received", expectedFiles)
+                           .columnsEqualTo("bytes_to_receive", "bytes_received").isEqualTo("bytes_received", totalBytes)
+                           .columnsEqualTo("files_sent", "files_to_send", "bytes_sent", "bytes_to_send").isEqualTo("files_sent", 0L);
+
+            // did we trigger slow event log?
+            cluster.forEach(i -> Assertions.assertThat(i.logs().grep("Handling streaming events took longer than").getResult()).describedAs("Unable to find slow log for node%d", i.config().num()).isNotEmpty());
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/streaming/StreamCloseInMiddleTest.java b/test/distributed/org/apache/cassandra/distributed/test/streaming/StreamCloseInMiddleTest.java
new file mode 100644
index 0000000..fc52ab6
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/streaming/StreamCloseInMiddleTest.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.test.streaming;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.concurrent.Callable;
+import java.util.stream.Collectors;
+
+import org.junit.Test;
+
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.SuperCall;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.streaming.CassandraIncomingFile;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.TokenSupplier;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.io.sstable.format.RangeAwareSSTableWriter;
+import org.apache.cassandra.io.sstable.format.big.BigTableZeroCopyWriter;
+import org.apache.cassandra.io.util.SequentialWriter;
+import org.assertj.core.api.Assertions;
+
+import static net.bytebuddy.matcher.ElementMatchers.named;
+import static net.bytebuddy.matcher.ElementMatchers.takesArguments;
+
+public class StreamCloseInMiddleTest extends TestBaseImpl
+{
+    @Test
+    public void zeroCopy() throws IOException
+    {
+        streamClose(true);
+    }
+
+    @Test
+    public void notZeroCopy() throws IOException
+    {
+        streamClose(false);
+    }
+
+    private void streamClose(boolean zeroCopyStreaming) throws IOException
+    {
+        try (Cluster cluster = Cluster.build(2)
+                                      .withTokenSupplier(TokenSupplier.evenlyDistributedTokens(3))
+                                      .withInstanceInitializer(BBHelper::install)
+                                      .withConfig(c -> c.with(Feature.values())
+                                                        .set("stream_entire_sstables", zeroCopyStreaming)
+                                                        // when die, this will try to halt JVM, which is easier to validate in the test
+                                                        // other levels require checking state of the subsystems
+                                                        .set("disk_failure_policy", "die"))
+                                      .start())
+        {
+            init(cluster);
+
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int PRIMARY KEY)"));
+
+            triggerStreaming(cluster, zeroCopyStreaming);
+            // make sure disk failure policy is not triggered
+            assertNoNodeShutdown(cluster);
+
+            // now bootstrap a new node; streaming will fail
+            IInvokableInstance node3 = ClusterUtils.addInstance(cluster, cluster.get(1).config(), c -> c.set("auto_bootstrap", true));
+            node3.startup();
+            for (String line : Arrays.asList("Error while waiting on bootstrap to complete. Bootstrap will have to be restarted", // bootstrap failed
+                                             "Some data streaming failed. Use nodetool to check bootstrap state and resume")) // didn't join ring because bootstrap failed
+                Assertions.assertThat(node3.logs().grep(line).getResult())
+                          .hasSize(1);
+
+            assertNoNodeShutdown(cluster);
+        }
+    }
+
+    private void assertNoNodeShutdown(Cluster cluster)
+    {
+        AssertionError t = null;
+        for (IInvokableInstance i : cluster.stream().collect(Collectors.toList()))
+        {
+            try
+            {
+                Assertions.assertThat(i.isShutdown()).describedAs("%s was shutdown; this is not expected", i).isFalse();
+                Assertions.assertThat(i.killAttempts()).describedAs("%s saw kill attempts; this is not expected", i).isEqualTo(0);
+            }
+            catch (AssertionError t2)
+            {
+                if (t == null)
+                    t = t2;
+                else
+                    t.addSuppressed(t2);
+            }
+        }
+        if (t != null)
+            throw t;
+    }
+
+    private static void triggerStreaming(Cluster cluster, boolean expectedEntireSSTable)
+    {
+        IInvokableInstance node1 = cluster.get(1);
+        IInvokableInstance node2 = cluster.get(2);
+
+        // repair will do streaming IFF there is a mismatch; so cause one
+        for (int i = 0; i < 10; i++)
+            node1.executeInternal(withKeyspace("INSERT INTO %s.tbl (pk) VALUES (?)"), i); // timestamp won't match, causing a mismatch
+
+        // trigger streaming; expected to fail as streaming socket closed in the middle (currently this is an unrecoverable event)
+        node2.nodetoolResult("repair", "-full", KEYSPACE, "tbl").asserts().failure();
+
+        assertStreamingType(node2, expectedEntireSSTable);
+    }
+
+    private static void assertStreamingType(IInvokableInstance node, boolean expectedEntireSSTable)
+    {
+        String key = "org.apache.cassandra.metrics.Streaming.%s./127.0.0.1.7012";
+        long entire = node.metrics().getCounter(String.format(key, "EntireSSTablesStreamedIn"));
+        long partial = node.metrics().getCounter(String.format(key, "PartialSSTablesStreamedIn"));
+        if (expectedEntireSSTable)
+        {
+            Assertions.assertThat(partial).isEqualTo(0);
+            Assertions.assertThat(entire).isGreaterThan(0);
+        }
+        else
+        {
+            Assertions.assertThat(partial).isGreaterThan(0);
+            Assertions.assertThat(entire).isEqualTo(0);
+        }
+    }
+
+    public static class BBHelper
+    {
+        @SuppressWarnings("unused")
+        public static int writeDirectlyToChannel(ByteBuffer buf, @SuperCall Callable<Integer> zuper) throws Exception
+        {
+            if (isCaller(BigTableZeroCopyWriter.class.getName(), "write"))
+                throw new java.nio.channels.ClosedChannelException();
+            // different context; pass through
+            return zuper.call();
+        }
+
+        @SuppressWarnings("unused")
+        public static boolean append(UnfilteredRowIterator partition, @SuperCall Callable<Boolean> zuper) throws Exception
+        {
+            if (isCaller(CassandraIncomingFile.class.getName(), "read")) // handles compressed and non-compressed
+                throw new java.nio.channels.ClosedChannelException();
+            // different context; pass through
+            return zuper.call();
+        }
+
+        private static boolean isCaller(String klass, String method)
+        {
+            //TODO is there a cleaner way to check this?
+            StackTraceElement[] stack = Thread.currentThread().getStackTrace();
+            for (int i = 0; i < stack.length; i++)
+            {
+                StackTraceElement e = stack[i];
+                if (klass.equals(e.getClassName()) && method.equals(e.getMethodName()))
+                    return true;
+            }
+            return false;
+        }
+
+        public static void install(ClassLoader classLoader, Integer num)
+        {
+            new ByteBuddy().rebase(SequentialWriter.class)
+                           .method(named("writeDirectlyToChannel").and(takesArguments(1)))
+                           .intercept(MethodDelegation.to(BBHelper.class))
+                           .make()
+                           .load(classLoader, ClassLoadingStrategy.Default.INJECTION);
+
+            new ByteBuddy().rebase(RangeAwareSSTableWriter.class)
+                           .method(named("append").and(takesArguments(1)))
+                           .intercept(MethodDelegation.to(BBHelper.class))
+                           .make()
+                           .load(classLoader, ClassLoadingStrategy.Default.INJECTION);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/streaming/StreamingStatsDisabledTest.java b/test/distributed/org/apache/cassandra/distributed/test/streaming/StreamingStatsDisabledTest.java
new file mode 100644
index 0000000..f3d4394
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/streaming/StreamingStatsDisabledTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.streaming;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.distributed.util.QueryResultUtil;
+import org.apache.cassandra.streaming.StreamManager;
+
+public class StreamingStatsDisabledTest extends TestBaseImpl
+{
+    @Test
+    public void test() throws IOException
+    {
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withConfig(c -> c.with(Feature.values()).set("streaming_stats_enabled", false))
+                                           .start()))
+        {
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.users (user_id varchar, PRIMARY KEY (user_id));"));
+            cluster.stream().forEach(i -> i.nodetoolResult("disableautocompaction", KEYSPACE).asserts().success());
+
+            long expectedFiles = 10;
+            for (int i = 0; i < expectedFiles; i++)
+            {
+                cluster.get(1).executeInternal(withKeyspace("insert into %s.users(user_id) values (?)"), "dcapwell" + i);
+                cluster.get(1).flush(KEYSPACE);
+            }
+
+            cluster.get(2).nodetoolResult("rebuild", "--keyspace", KEYSPACE).asserts().success();
+            for (int nodeId : Arrays.asList(1, 2))
+                QueryResultUtil.assertThat(cluster.get(nodeId).executeInternalWithResult("SELECT * FROM system_views.streaming")).isEmpty();
+
+            // trigger streaming again
+            cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.users(user_id) VALUES ('trigger streaming')"));
+            // mimic JMX
+            cluster.get(2).runOnInstance(() -> StreamManager.instance.setStreamingStatsEnabled(true));
+            cluster.get(2).nodetoolResult("repair", KEYSPACE).asserts().success();
+
+            QueryResultUtil.assertThat(cluster.get(1).executeInternalWithResult("SELECT * FROM system_views.streaming")).isEmpty();
+            QueryResultUtil.assertThat(cluster.get(2).executeInternalWithResult("SELECT * FROM system_views.streaming")).hasSize(1);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/thresholds/AbstractClientSizeWarning.java b/test/distributed/org/apache/cassandra/distributed/test/thresholds/AbstractClientSizeWarning.java
new file mode 100644
index 0000000..2355403
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/thresholds/AbstractClientSizeWarning.java
@@ -0,0 +1,390 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.thresholds;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.function.Consumer;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.SimpleStatement;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.ICoordinator;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.distributed.test.JavaDriverUtils;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.exceptions.ReadSizeAbortException;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.service.reads.thresholds.CoordinatorWarnings;
+import org.assertj.core.api.Condition;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public abstract class AbstractClientSizeWarning extends TestBaseImpl
+{
+    private static final String CQL_PK_READ = "SELECT * FROM " + KEYSPACE + ".tbl WHERE pk=1";
+    private static final String CQL_TABLE_SCAN = "SELECT * FROM " + KEYSPACE + ".tbl";
+
+    private static final Random RANDOM = new Random(0);
+    protected static ICluster<IInvokableInstance> CLUSTER;
+    protected static com.datastax.driver.core.Cluster JAVA_DRIVER;
+    protected static com.datastax.driver.core.Session JAVA_DRIVER_SESSION;
+
+    @BeforeClass
+    public static void setupClass() throws IOException
+    {
+        Cluster.Builder builder = Cluster.build(3);
+        builder.withConfig(c -> c.with(Feature.NATIVE_PROTOCOL, Feature.GOSSIP));
+        CLUSTER = builder.start();
+        JAVA_DRIVER = JavaDriverUtils.create(CLUSTER);
+        JAVA_DRIVER_SESSION = JAVA_DRIVER.connect();
+    }
+
+    protected abstract long totalWarnings();
+    protected abstract long totalAborts();
+    protected abstract void assertWarnings(List<String> warnings);
+    protected abstract void assertAbortWarnings(List<String> warnings);
+    protected boolean shouldFlush()
+    {
+        return false;
+    }
+
+    @Before
+    public void setup()
+    {
+        CLUSTER.schemaChange("DROP KEYSPACE IF EXISTS " + KEYSPACE);
+        init(CLUSTER);
+        // disable key cache so RowIndexEntry is read each time
+        CLUSTER.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v blob, PRIMARY KEY (pk, ck)) WITH caching = { 'keys' : 'NONE'}");
+    }
+
+    @Test
+    public void noWarningsSinglePartition()
+    {
+        noWarnings(CQL_PK_READ);
+    }
+
+    @Test
+    public void noWarningsScan()
+    {
+        noWarnings(CQL_TABLE_SCAN);
+    }
+
+    public void noWarnings(String cql)
+    {
+        CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, ?)", ConsistencyLevel.ALL, bytes(128));
+        CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 2, ?)", ConsistencyLevel.ALL, bytes(128));
+        if (shouldFlush())
+            CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
+
+        Consumer<List<String>> test = warnings ->
+                                      Assert.assertEquals(Collections.emptyList(), warnings);
+
+        for (boolean b : Arrays.asList(true, false))
+        {
+            enable(b);
+            checkpointHistogram();
+            SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
+            test.accept(result.warnings());
+            if (b)
+            {
+                assertHistogramUpdated();
+            }
+            else
+            {
+                assertHistogramNotUpdated();
+            }
+            test.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
+            if (b)
+            {
+                assertHistogramUpdated();
+            }
+            else
+            {
+                assertHistogramNotUpdated();
+            }
+            assertWarnAborts(0, 0, 0);
+        }
+    }
+
+    @Test
+    public void warnThresholdSinglePartition()
+    {
+        warnThreshold(CQL_PK_READ, false);
+    }
+
+    @Test
+    public void warnThresholdScan()
+    {
+        warnThreshold(CQL_TABLE_SCAN, false);
+    }
+
+    @Test
+    public void warnThresholdSinglePartitionWithReadRepair()
+    {
+        warnThreshold(CQL_PK_READ, true);
+    }
+
+    @Test
+    public void warnThresholdScanWithReadRepair()
+    {
+        warnThreshold(CQL_TABLE_SCAN, true);
+    }
+
+    protected int warnThresholdRowCount()
+    {
+        return 2;
+    }
+
+    public void warnThreshold(String cql, boolean triggerReadRepair)
+    {
+        for (int i = 0; i < warnThresholdRowCount(); i++)
+        {
+            if (triggerReadRepair)
+            {
+                int finalI = i;
+                // cell timestamps will not match (even though the values match) which will trigger a read-repair
+                CLUSTER.stream().forEach(node -> node.executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", finalI + 1, bytes(512)));
+            }
+            else
+            {
+                CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", ConsistencyLevel.ALL, i + 1, bytes(512));
+            }
+        }
+
+        if (shouldFlush())
+            CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
+
+        enable(true);
+        checkpointHistogram();
+        SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
+        assertWarnings(result.warnings());
+        assertHistogramUpdated();
+        assertWarnAborts(1, 0, 0);
+        assertWarnings(driverQueryAll(cql).getExecutionInfo().getWarnings());
+        assertHistogramUpdated();
+        assertWarnAborts(2, 0, 0);
+
+        enable(false);
+        result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
+        assertThat(result.warnings()).isEmpty();
+        assertHistogramNotUpdated();
+        assertThat(driverQueryAll(cql).getExecutionInfo().getWarnings()).isEmpty();
+        assertHistogramNotUpdated();
+        assertWarnAborts(2, 0, 0);
+    }
+
+    @Test
+    public void failThresholdSinglePartitionTrackingEnabled() throws UnknownHostException
+    {
+        failThresholdEnabled(CQL_PK_READ);
+    }
+
+    @Test
+    public void failThresholdSinglePartitionTrackingDisabled() throws UnknownHostException
+    {
+        failThresholdDisabled(CQL_PK_READ);
+    }
+
+    @Test
+    public void failThresholdScanTrackingEnabled() throws UnknownHostException
+    {
+        failThresholdEnabled(CQL_TABLE_SCAN);
+    }
+
+    @Test
+    public void failThresholdScanTrackingDisabled() throws UnknownHostException
+    {
+        failThresholdDisabled(CQL_TABLE_SCAN);
+    }
+
+    protected int failThresholdRowCount()
+    {
+        return 5;
+    }
+
+    public void failThresholdEnabled(String cql) throws UnknownHostException
+    {
+        ICoordinator node = CLUSTER.coordinator(1);
+        for (int i = 0; i < failThresholdRowCount(); i++)
+            node.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", ConsistencyLevel.ALL, i + 1, bytes(512));
+
+        if (shouldFlush())
+            CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
+
+        enable(true);
+        checkpointHistogram();
+        List<String> warnings = CLUSTER.get(1).callsOnInstance(() -> {
+            ClientWarn.instance.captureWarnings();
+            CoordinatorWarnings.init();
+            try
+            {
+                QueryProcessor.execute(cql, org.apache.cassandra.db.ConsistencyLevel.ALL, QueryState.forInternalCalls());
+                Assert.fail("Expected query failure");
+            }
+            catch (ReadSizeAbortException e)
+            {
+                // expected, client transport returns an error message and includes client warnings
+            }
+            CoordinatorWarnings.done();
+            CoordinatorWarnings.reset();
+            return ClientWarn.instance.getWarnings();
+        }).call();
+        assertAbortWarnings(warnings);
+        assertHistogramUpdated();
+        assertWarnAborts(0, 1, 1);
+
+        try
+        {
+            driverQueryAll(cql);
+            Assert.fail("Query should have thrown ReadFailureException");
+        }
+        catch (com.datastax.driver.core.exceptions.ReadFailureException e)
+        {
+            // without changing the client can't produce a better message...
+            // client does NOT include the message sent from the server in the exception; so the message doesn't work
+            // well in this case
+            assertThat(e.getMessage()).contains("responses were required but only 0 replica responded");
+            ImmutableSet<InetAddress> expectedKeys = ImmutableSet.of(InetAddress.getByAddress(new byte[]{ 127, 0, 0, 1 }), InetAddress.getByAddress(new byte[]{ 127, 0, 0, 2 }), InetAddress.getByAddress(new byte[]{ 127, 0, 0, 3 }));
+            assertThat(e.getFailuresMap())
+            .hasSizeBetween(1, 3)
+            // coordinator changes from run to run, so can't assert map as the key is dynamic... so assert the domain of keys and the single value expect
+            .containsValue(RequestFailureReason.READ_SIZE.code)
+            .hasKeySatisfying(new Condition<InetAddress>() {
+                public boolean matches(InetAddress value)
+                {
+                    return expectedKeys.contains(value);
+                }
+            });
+        }
+        assertHistogramUpdated();
+        assertWarnAborts(0, 2, 1);
+    }
+
+    public void failThresholdDisabled(String cql) throws UnknownHostException
+    {
+        ICoordinator node = CLUSTER.coordinator(1);
+        for (int i = 0; i < failThresholdRowCount(); i++)
+            node.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", ConsistencyLevel.ALL, i + 1, bytes(512));
+
+        if (shouldFlush())
+            CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
+
+        // query should no longer fail
+        enable(false);
+        checkpointHistogram();
+        SimpleQueryResult result = node.executeWithResult(cql, ConsistencyLevel.ALL);
+        assertThat(result.warnings()).isEmpty();
+        assertHistogramNotUpdated();
+        assertThat(driverQueryAll(cql).getExecutionInfo().getWarnings()).isEmpty();
+        assertHistogramNotUpdated();
+        assertWarnAborts(0, 0, 0);
+    }
+
+    protected static void enable(boolean value)
+    {
+        CLUSTER.stream().forEach(i -> i.runOnInstance(() -> DatabaseDescriptor.setReadThresholdsEnabled(value)));
+    }
+
+    protected static ByteBuffer bytes(int size)
+    {
+        byte[] b = new byte[size];
+        RANDOM.nextBytes(b);
+        return ByteBuffer.wrap(b);
+    }
+
+    protected static ResultSet driverQueryAll(String cql)
+    {
+        return JAVA_DRIVER_SESSION.execute(new SimpleStatement(cql).setConsistencyLevel(com.datastax.driver.core.ConsistencyLevel.ALL));
+    }
+
+    protected abstract long[] getHistogram();
+    private static long[] previous = new long[0];
+    protected void assertHistogramUpdated()
+    {
+        long[] latestCount = getHistogram();
+        try
+        {
+            // why notEquals?  timing can cause 1 replica to not process before the failure makes it to the test
+            // for this reason it is possible 1 replica was not updated but the others were; by expecting everyone
+            // to update the test will become flaky
+            assertThat(latestCount).isNotEqualTo(previous);
+        }
+        finally
+        {
+            previous = latestCount;
+        }
+    }
+
+    protected void assertHistogramNotUpdated()
+    {
+        long[] latestCount = getHistogram();
+        try
+        {
+            assertThat(latestCount).isEqualTo(previous);
+        }
+        finally
+        {
+            previous = latestCount;
+        }
+    }
+
+    private void checkpointHistogram()
+    {
+        previous = getHistogram();
+    }
+
+    private static long GLOBAL_READ_ABORTS = 0;
+    protected void assertWarnAborts(int warns, int aborts, int globalAborts)
+    {
+        assertThat(totalWarnings()).as("warnings").isEqualTo(warns);
+        assertThat(totalAborts()).as("aborts").isEqualTo(aborts);
+        long expectedGlobalAborts = GLOBAL_READ_ABORTS + globalAborts;
+        assertThat(totalReadAborts()).as("global aborts").isEqualTo(expectedGlobalAborts);
+        GLOBAL_READ_ABORTS = expectedGlobalAborts;
+    }
+
+    protected static long totalReadAborts()
+    {
+        return CLUSTER.stream().mapToLong(i ->
+                                          i.metrics().getCounter("org.apache.cassandra.metrics.ClientRequest.Aborts.Read-ALL")
+                                          + i.metrics().getCounter("org.apache.cassandra.metrics.ClientRequest.Aborts.RangeSlice")
+        ).sum();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/thresholds/CoordinatorReadSizeWarningTest.java b/test/distributed/org/apache/cassandra/distributed/test/thresholds/CoordinatorReadSizeWarningTest.java
new file mode 100644
index 0000000..31469af
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/thresholds/CoordinatorReadSizeWarningTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.thresholds;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.junit.*;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * ReadSize client warn/abort is coordinator only, so the fact ClientMetrics is coordinator only does not
+ * impact the user experience
+ */
+public class CoordinatorReadSizeWarningTest extends AbstractClientSizeWarning
+{
+    @BeforeClass
+    public static void setupClass() throws IOException
+    {
+        AbstractClientSizeWarning.setupClass();
+
+        // setup threshold after init to avoid driver issues loading
+        // the test uses a rather small limit, which causes driver to fail while loading metadata
+        CLUSTER.stream().forEach(i -> i.runOnInstance(() -> {
+            DatabaseDescriptor.setCoordinatorReadSizeWarnThreshold(new DataStorageSpec.LongBytesBound(1, KIBIBYTES));
+            DatabaseDescriptor.setCoordinatorReadSizeFailThreshold(new DataStorageSpec.LongBytesBound(2, KIBIBYTES));
+        }));
+    }
+
+    private static void assertPrefix(String expectedPrefix, String actual)
+    {
+        if (!actual.startsWith(expectedPrefix))
+            throw new AssertionError(String.format("expected \"%s\" to begin with \"%s\"", actual, expectedPrefix));
+    }
+
+    @Override
+    protected void assertWarnings(List<String> warnings)
+    {
+        assertThat(warnings).hasSize(1);
+        assertPrefix("Read on table " + KEYSPACE + ".tbl has exceeded the size warning threshold", warnings.get(0));
+    }
+
+    @Override
+    protected void assertAbortWarnings(List<String> warnings)
+    {
+        assertThat(warnings).hasSize(1);
+        assertPrefix("Read on table " + KEYSPACE + ".tbl has exceeded the size failure threshold", warnings.get(0));
+    }
+
+    @Override
+    protected long[] getHistogram()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.CoordinatorReadSize." + KEYSPACE)).toArray();
+    }
+
+    @Override
+    protected long totalWarnings()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.CoordinatorReadSizeWarnings." + KEYSPACE)).sum();
+    }
+
+    @Override
+    protected long totalAborts()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.CoordinatorReadSizeAborts." + KEYSPACE)).sum();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/thresholds/LocalReadSizeWarningTest.java b/test/distributed/org/apache/cassandra/distributed/test/thresholds/LocalReadSizeWarningTest.java
new file mode 100644
index 0000000..31a9415
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/thresholds/LocalReadSizeWarningTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.thresholds;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.junit.BeforeClass;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class LocalReadSizeWarningTest extends AbstractClientSizeWarning
+{
+    @BeforeClass
+    public static void setupClass() throws IOException
+    {
+        AbstractClientSizeWarning.setupClass();
+
+        // setup threshold after init to avoid driver issues loading
+        // the test uses a rather small limit, which causes driver to fail while loading metadata
+        CLUSTER.stream().forEach(i -> i.runOnInstance(() -> {
+            // disable coordinator version
+            DatabaseDescriptor.setCoordinatorReadSizeWarnThreshold(null);
+            DatabaseDescriptor.setCoordinatorReadSizeFailThreshold(null);
+
+            DatabaseDescriptor.setLocalReadSizeWarnThreshold(new DataStorageSpec.LongBytesBound(1, KIBIBYTES));
+            DatabaseDescriptor.setLocalReadSizeFailThreshold(new DataStorageSpec.LongBytesBound(2, KIBIBYTES));
+        }));
+    }
+
+    @Override
+    protected void assertWarnings(List<String> warnings)
+    {
+        assertThat(warnings).hasSize(1);
+        assertThat(warnings.get(0)).contains("(see local_read_size_warn_threshold)").contains("and issued local read size warnings for query");
+    }
+
+    @Override
+    protected void assertAbortWarnings(List<String> warnings)
+    {
+        assertThat(warnings).hasSize(1);
+        assertThat(warnings.get(0)).contains("(see local_read_size_fail_threshold)").contains("aborted the query");
+    }
+
+    @Override
+    protected long[] getHistogram()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.LocalReadSize." + KEYSPACE)).toArray();
+    }
+
+    @Override
+    protected long totalWarnings()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.LocalReadSizeWarnings." + KEYSPACE)).sum();
+    }
+
+    @Override
+    protected long totalAborts()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.LocalReadSizeAborts." + KEYSPACE)).sum();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/thresholds/RowIndexSizeWarningTest.java b/test/distributed/org/apache/cassandra/distributed/test/thresholds/RowIndexSizeWarningTest.java
new file mode 100644
index 0000000..33e6cd6
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/thresholds/RowIndexSizeWarningTest.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.thresholds;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.junit.Assume;
+import org.junit.BeforeClass;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class RowIndexSizeWarningTest extends AbstractClientSizeWarning
+{
+    @BeforeClass
+    public static void setupClass() throws IOException
+    {
+        AbstractClientSizeWarning.setupClass();
+
+        CLUSTER.stream().forEach(i -> i.runOnInstance(() -> {
+            DatabaseDescriptor.setRowIndexReadSizeWarnThreshold(new DataStorageSpec.LongBytesBound(1, KIBIBYTES));
+            DatabaseDescriptor.setRowIndexReadSizeFailThreshold(new DataStorageSpec.LongBytesBound(2, KIBIBYTES));
+
+            // hack to force multiple index entries
+            DatabaseDescriptor.setColumnIndexCacheSize(1 << 20);
+            DatabaseDescriptor.setColumnIndexSize(0);
+        }));
+    }
+
+    @Override
+    protected boolean shouldFlush()
+    {
+        // need to flush as RowIndexEntry is at the SSTable level
+        return true;
+    }
+
+    @Override
+    protected int warnThresholdRowCount()
+    {
+        return 15;
+    }
+
+    @Override
+    protected int failThresholdRowCount()
+    {
+        // since the RowIndexEntry grows slower than a partition, need even more rows to trigger
+        return 40;
+    }
+
+    @Override
+    public void noWarningsScan()
+    {
+        Assume.assumeFalse("Ignore Scans", true);
+    }
+
+    @Override
+    public void warnThresholdScan()
+    {
+        Assume.assumeFalse("Ignore Scans", true);
+    }
+
+    @Override
+    public void warnThresholdScanWithReadRepair()
+    {
+        Assume.assumeFalse("Ignore Scans", true);
+    }
+
+    @Override
+    public void failThresholdScanTrackingEnabled()
+    {
+        Assume.assumeFalse("Ignore Scans", true);
+    }
+
+    @Override
+    public void failThresholdScanTrackingDisabled()
+    {
+        Assume.assumeFalse("Ignore Scans", true);
+    }
+
+    @Override
+    protected void assertWarnings(List<String> warnings)
+    {
+        assertThat(warnings).hasSize(1);
+        assertThat(warnings.get(0)).contains("(see row_index_size_warn_threshold)").contains("bytes in RowIndexEntry and issued warnings for query");
+    }
+
+    @Override
+    protected void assertAbortWarnings(List<String> warnings)
+    {
+        assertThat(warnings).hasSize(1);
+        assertThat(warnings.get(0)).contains("(see row_index_size_fail_threshold)").contains("bytes in RowIndexEntry and aborted the query");
+    }
+
+    @Override
+    protected long[] getHistogram()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.RowIndexSize." + KEYSPACE)).toArray();
+    }
+
+    @Override
+    protected long totalWarnings()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.RowIndexSizeWarnings." + KEYSPACE)).sum();
+    }
+
+    @Override
+    protected long totalAborts()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.RowIndexSizeAborts." + KEYSPACE)).sum();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/thresholds/TombstoneCountWarningTest.java b/test/distributed/org/apache/cassandra/distributed/test/thresholds/TombstoneCountWarningTest.java
new file mode 100644
index 0000000..5e409cb
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/thresholds/TombstoneCountWarningTest.java
@@ -0,0 +1,443 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.thresholds;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Consumer;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.SimpleStatement;
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.SuperCall;
+import net.bytebuddy.implementation.bind.annotation.This;
+import org.apache.cassandra.concurrent.SEPExecutor;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.distributed.test.JavaDriverUtils;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.exceptions.ReadFailureException;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.exceptions.TombstoneAbortException;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.service.reads.ReadCallback;
+import org.apache.cassandra.service.reads.thresholds.CoordinatorWarnings;
+import org.apache.cassandra.utils.Shared;
+import org.assertj.core.api.Assertions;
+import org.assertj.core.api.Condition;
+
+import static net.bytebuddy.matcher.ElementMatchers.named;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TombstoneCountWarningTest extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(TombstoneCountWarningTest.class);
+
+    private static final int TOMBSTONE_WARN = 50;
+    private static final int TOMBSTONE_FAIL = 100;
+    private static ICluster<IInvokableInstance> CLUSTER;
+    private static com.datastax.driver.core.Cluster JAVA_DRIVER;
+    private static com.datastax.driver.core.Session JAVA_DRIVER_SESSION;
+
+    @BeforeClass
+    public static void setupClass() throws IOException
+    {
+        logger.info("[test step : @BeforeClass] setupClass");
+        Cluster.Builder builder = Cluster.build(3);
+        builder.withConfig(c -> c.set("tombstone_warn_threshold", TOMBSTONE_WARN)
+                                 .set("tombstone_failure_threshold", TOMBSTONE_FAIL)
+                                 .with(Feature.NATIVE_PROTOCOL, Feature.GOSSIP));
+        builder.withInstanceInitializer(BB::install);
+        CLUSTER = builder.start();
+        JAVA_DRIVER = JavaDriverUtils.create(CLUSTER);
+        JAVA_DRIVER_SESSION = JAVA_DRIVER.connect();
+    }
+
+    @AfterClass
+    public static void teardown()
+    {
+        logger.info("[test step : @AfterClass] teardown");
+        if (JAVA_DRIVER_SESSION != null)
+            JAVA_DRIVER_SESSION.close();
+        if (JAVA_DRIVER != null)
+            JAVA_DRIVER.close();
+    }
+
+    @Before
+    public void setup()
+    {
+        logger.info("[test step : @Before] setup");
+        CLUSTER.schemaChange("DROP KEYSPACE IF EXISTS " + KEYSPACE);
+        init(CLUSTER);
+        CLUSTER.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+    }
+
+    private static void enable(boolean value)
+    {
+        CLUSTER.stream().forEach(i -> i.runOnInstance(() -> DatabaseDescriptor.setReadThresholdsEnabled(value)));
+    }
+
+    @Test
+    public void noWarningsSinglePartition()
+    {
+        logger.info("[test step : @Test] noWarningsSinglePartition");
+        noWarnings("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk=1");
+    }
+
+    @Test
+    public void noWarningsScan()
+    {
+        logger.info("[test step : @Test] noWarningsScan");
+        noWarnings("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk=1");
+    }
+
+    public void noWarnings(String cql)
+    {
+        Consumer<List<String>> test = warnings ->
+                                      Assert.assertEquals(Collections.emptyList(), warnings);
+
+        for (int i=0; i<TOMBSTONE_WARN; i++)
+            CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, null)", ConsistencyLevel.ALL, i);
+
+        for (boolean b : Arrays.asList(true, false))
+        {
+            enable(b);
+
+            SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
+            test.accept(result.warnings());
+            test.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
+
+            assertWarnAborts(0, 0, 0);
+        }
+    }
+
+    @Test
+    public void warnThresholdSinglePartition()
+    {
+        logger.info("[test step : @Test] warnThresholdSinglePartition");
+        warnThreshold("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", false);
+    }
+
+    @Test
+    public void warnThresholdScan()
+    {
+        logger.info("[test step : @Test] warnThresholdScan");
+        warnThreshold("SELECT * FROM " + KEYSPACE + ".tbl", true);
+    }
+
+    private void warnThreshold(String cql, boolean isScan)
+    {
+        for (int i = 0; i < TOMBSTONE_WARN + 1; i++)
+            CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, null)", ConsistencyLevel.ALL, i);
+
+        enable(true);
+        Consumer<List<String>> testEnabled = warnings ->
+                                             Assertions.assertThat(Iterables.getOnlyElement(warnings))
+                                                       .contains("nodes scanned up to " + (TOMBSTONE_WARN + 1) + " tombstones and issued tombstone warnings for query " + cql);
+
+        SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
+        testEnabled.accept(result.warnings());
+        assertWarnAborts(1, 0, 0);
+        testEnabled.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
+        assertWarnAborts(2, 0, 0);
+
+        enable(false);
+        Consumer<List<String>> testDisabled = warnings -> {
+            // client warnings are currently coordinator only, so if present only 1 is expected
+            if (isScan)
+            {
+                // Scans perform multiple ReadCommands, which will not propgate the warnings to the top-level coordinator; so no warnings are expected
+                Assertions.assertThat(warnings).isEmpty();
+            }
+            else
+            {
+                Assertions.assertThat(Iterables.getOnlyElement(warnings))
+                          .startsWith("Read " + (TOMBSTONE_WARN + 1) + " live rows and " + (TOMBSTONE_WARN + 1) + " tombstone cells for query " + cql);
+            }
+        };
+        result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
+        testDisabled.accept(result.warnings());
+        assertWarnAborts(2, 0, 0);
+        testDisabled.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
+        assertWarnAborts(2, 0, 0);
+    }
+
+    @Test
+    public void failThresholdSinglePartition() throws UnknownHostException
+    {
+        logger.info("[test step : @Test] failThresholdSinglePartition");
+        failThreshold("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", false);
+    }
+
+    @Test
+    public void failThresholdScan() throws UnknownHostException
+    {
+        logger.info("[test step : @Test] failThresholdScan");
+        failThreshold("SELECT * FROM " + KEYSPACE + ".tbl", true);
+    }
+
+    private void failThreshold(String cql, boolean isScan) throws UnknownHostException
+    {
+        for (int i = 0; i < TOMBSTONE_FAIL + 1; i++)
+            CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, null)", ConsistencyLevel.ALL, i);
+
+        enable(true);
+        List<String> warnings = CLUSTER.get(1).callsOnInstance(() -> {
+            ClientWarn.instance.captureWarnings();
+            CoordinatorWarnings.init();
+            try
+            {
+                QueryProcessor.execute(cql, org.apache.cassandra.db.ConsistencyLevel.ALL, QueryState.forInternalCalls());
+                Assert.fail("Expected query failure");
+            }
+            catch (TombstoneAbortException e)
+            {
+                Assert.assertTrue(e.nodes >= 1 && e.nodes <= 3);
+                Assert.assertEquals(TOMBSTONE_FAIL + 1, e.tombstones);
+                // expected, client transport returns an error message and includes client warnings
+            }
+            CoordinatorWarnings.done();
+            CoordinatorWarnings.reset();
+            return ClientWarn.instance.getWarnings();
+        }).call();
+        Assertions.assertThat(Iterables.getOnlyElement(warnings))
+                  .contains("nodes scanned over " + (TOMBSTONE_FAIL + 1) + " tombstones and aborted the query " + cql);
+
+        assertWarnAborts(0, 1, 1);
+
+        try
+        {
+            driverQueryAll(cql);
+            Assert.fail("Query should have thrown ReadFailureException");
+        }
+        catch (com.datastax.driver.core.exceptions.ReadFailureException e)
+        {
+            // without changing the client can't produce a better message...
+            // client does NOT include the message sent from the server in the exception; so the message doesn't work
+            // well in this case
+            Assertions.assertThat(e.getMessage()).contains("responses were required but only 0 replica responded"); // can't include ', 3 failed)' as some times its 2
+
+            ImmutableSet<InetAddress> expectedKeys = ImmutableSet.of(InetAddress.getByAddress(new byte[]{ 127, 0, 0, 1 }), InetAddress.getByAddress(new byte[]{ 127, 0, 0, 2 }), InetAddress.getByAddress(new byte[]{ 127, 0, 0, 3 }));
+            assertThat(e.getFailuresMap())
+            .hasSizeBetween(1, 3)
+            // coordinator changes from run to run, so can't assert map as the key is dynamic... so assert the domain of keys and the single value expect
+            .containsValue(RequestFailureReason.READ_TOO_MANY_TOMBSTONES.code)
+            .hasKeySatisfying(new Condition<InetAddress>() {
+                public boolean matches(InetAddress value)
+                {
+                    return expectedKeys.contains(value);
+                }
+            });
+        }
+
+        assertWarnAborts(0, 2, 1);
+
+        // when disabled warnings only happen if on the coordinator, and coordinator may not be the one replying
+        // to every query
+        enable(false);
+        State.blockFor(CLUSTER.get(1).config().broadcastAddress());
+        warnings = CLUSTER.get(1).callsOnInstance(() -> {
+            ClientWarn.instance.captureWarnings();
+            try
+            {
+                QueryProcessor.execute(cql, org.apache.cassandra.db.ConsistencyLevel.ALL, QueryState.forInternalCalls());
+                Assert.fail("Expected query failure");
+            }
+            catch (ReadFailureException e)
+            {
+                Assertions.assertThat(e).isNotInstanceOf(TombstoneAbortException.class);
+                Assertions.assertThat(e.failureReasonByEndpoint).isNotEmpty();
+                Assertions.assertThat(e.failureReasonByEndpoint.values())
+                          .as("Non READ_TOO_MANY_TOMBSTONES exists")
+                          .allMatch(RequestFailureReason.READ_TOO_MANY_TOMBSTONES::equals);
+            }
+            logger.warn("Checking warnings...");
+            return ClientWarn.instance.getWarnings();
+        }).call();
+        // client warnings are currently coordinator only, so if present only 1 is expected
+        if (isScan)
+        {
+            // Scans perform multiple ReadCommands, which will not propgate the warnings to the top-level coordinator; so no warnings are expected
+            Assertions.assertThat(warnings).isNull();
+        }
+        else
+        {
+            Assertions.assertThat(Iterables.getOnlyElement(warnings))
+                      .startsWith("Read " + TOMBSTONE_FAIL + " live rows and " + (TOMBSTONE_FAIL + 1) + " tombstone cells for query " + cql);
+        }
+
+        assertWarnAborts(0, 2, 0);
+
+        State.blockFor(CLUSTER.get(1).config().broadcastAddress());
+        try
+        {
+            driverQueryAll(cql);
+            Assert.fail("Query should have thrown ReadFailureException");
+        }
+        catch (com.datastax.driver.core.exceptions.ReadFailureException e)
+        {
+            // not checking the message as different cases exist for the failure, so the fact that this failed is enough
+
+            Assertions.assertThat(e.getFailuresMap())
+                      .isNotEmpty();
+            Assertions.assertThat(e.getFailuresMap().values())
+                      .as("Non READ_TOO_MANY_TOMBSTONES exists")
+                      .allMatch(i -> i.equals(RequestFailureReason.READ_TOO_MANY_TOMBSTONES.code));
+        }
+
+        assertWarnAborts(0, 2, 0);
+    }
+
+    private static long GLOBAL_READ_ABORTS = 0;
+    private static void assertWarnAborts(int warns, int aborts, int globalAborts)
+    {
+        Assertions.assertThat(totalWarnings()).as("warnings").isEqualTo(warns);
+        Assertions.assertThat(totalAborts()).as("aborts").isEqualTo(aborts);
+        long expectedGlobalAborts = GLOBAL_READ_ABORTS + globalAborts;
+        Assertions.assertThat(totalReadAborts()).as("global aborts").isEqualTo(expectedGlobalAborts);
+        GLOBAL_READ_ABORTS = expectedGlobalAborts;
+    }
+
+    private static long totalWarnings()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.ClientTombstoneWarnings." + KEYSPACE)).sum();
+    }
+
+    private static long totalAborts()
+    {
+        return CLUSTER.stream().mapToLong(i -> i.metrics().getCounter("org.apache.cassandra.metrics.keyspace.ClientTombstoneAborts." + KEYSPACE)).sum();
+    }
+
+    private static long totalReadAborts()
+    {
+        return CLUSTER.stream().mapToLong(i ->
+                                          i.metrics().getCounter("org.apache.cassandra.metrics.ClientRequest.Aborts.Read-ALL")
+                                          + i.metrics().getCounter("org.apache.cassandra.metrics.ClientRequest.Aborts.RangeSlice")
+        ).sum();
+    }
+
+    private static ResultSet driverQueryAll(String cql)
+    {
+        return JAVA_DRIVER_SESSION.execute(new SimpleStatement(cql).setConsistencyLevel(com.datastax.driver.core.ConsistencyLevel.ALL));
+    }
+
+    @Shared
+    public static class State
+    {
+        // use InetSocketAddress as InetAddressAndPort is @Isolated which means equality doesn't work due to different
+        // ClassLoaders; InetSocketAddress is @Shared so safe to use between app and cluster class loaders
+        public static volatile InetSocketAddress blockFor = null;
+        public static volatile CompletableFuture<Void> promise = null;
+
+        // called on main thread
+        public static void blockFor(InetSocketAddress address)
+        {
+            blockFor = address;
+            promise = new CompletableFuture<>();
+        }
+
+        // called in C* threads; non-test threads
+        public static void onFailure(InetSocketAddress address)
+        {
+            if (address.equals(blockFor))
+                promise.complete(null);
+        }
+
+        // called on main thread
+        public static void syncAndClear()
+        {
+            if (blockFor != null)
+            {
+                promise.join();
+                blockFor = null;
+                promise = null;
+            }
+        }
+    }
+
+    public static class BB
+    {
+        private static void install(ClassLoader cl, int instanceId)
+        {
+            if (instanceId != 1)
+                return;
+            new ByteBuddy().rebase(ReadCallback.class)
+                           .method(named("awaitResults"))
+                           .intercept(MethodDelegation.to(BB.class))
+                           .method(named("onFailure"))
+                           .intercept(MethodDelegation.to(BB.class))
+                           .make()
+                           .load(cl, ClassLoadingStrategy.Default.INJECTION);
+            new ByteBuddy().rebase(SEPExecutor.class)
+                           .method(named("maybeExecuteImmediately"))
+                           .intercept(MethodDelegation.to(BB.class))
+                           .make()
+                           .load(cl, ClassLoadingStrategy.Default.INJECTION);
+        }
+
+        @SuppressWarnings("unused")
+        public static void awaitResults(@SuperCall Runnable zuper)
+        {
+            State.syncAndClear();
+            zuper.run();
+        }
+
+        @SuppressWarnings("unused")
+        public static void onFailure(InetAddressAndPort from, RequestFailureReason failureReason, @SuperCall Runnable zuper) throws Exception
+        {
+            State.onFailure(new InetSocketAddress(from.getAddress(), from.getPort()));
+            zuper.run();
+        }
+
+        // make sure to schedule the task rather than running inline...
+        // this is imporant as the read may block on the local version which can get the test to include it rather than
+        // block waiting, so by scheduling we make sure its always fair
+        @SuppressWarnings("unused")
+        public static void maybeExecuteImmediately(Runnable task, @This SEPExecutor executor)
+        {
+            executor.execute(task);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
new file mode 100644
index 0000000..db5e2e1
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.junit.Test;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+import static org.junit.Assert.assertEquals;
+
+public class BatchUpgradeTest extends UpgradeTestBase
+{
+    @Test
+    public void batchTest() throws Throwable
+    {
+        new TestCase()
+        .nodes(2)
+        .nodesToUpgrade(2)
+        .upgradesFrom(v40).setup((cluster) -> {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".users (" +
+                                 "userid uuid PRIMARY KEY," +
+                                 "firstname ascii," +
+                                 "lastname ascii," +
+                                 "age int) WITH COMPACT STORAGE");
+        }).runAfterNodeUpgrade((cluster, node) -> {
+            cluster.coordinator(2).execute("BEGIN BATCH\n" +
+                                           "    UPDATE " + KEYSPACE + ".users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479\n" +
+                                           "    DELETE firstname, lastname FROM " + KEYSPACE + ".users WHERE userid = 550e8400-e29b-41d4-a716-446655440000\n" +
+                                           "APPLY BATCH", ConsistencyLevel.ALL);
+        }).runAfterClusterUpgrade((cluster) -> {
+            Util.spinAssertEquals(0, () -> cluster.get(1).executeInternal("select * from system.batches").length, 10);
+            Util.spinAssertEquals(0, () -> cluster.get(2).executeInternal("select * from system.batches").length, 10);
+            assertEquals(0, cluster.get(1).logs().grep("ClassCastException").getResult().size());
+            assertEquals(0, cluster.get(2).logs().grep("ClassCastException").getResult().size());
+        })
+        .run();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolTester.java b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolTester.java
index 2683e7d..6dd3bd9 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolTester.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolTester.java
@@ -61,7 +61,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(1)
-        .singleUpgrade(initialVersion(), CURRENT)
+        .singleUpgrade(initialVersion())
         .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
         .setup(c -> {
             c.schemaChange(withKeyspace("CREATE TABLE %s.t (pk text, ck text, v text, " +
@@ -88,7 +88,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(1)
-        .singleUpgrade(initialVersion(), CURRENT)
+        .singleUpgrade(initialVersion())
         .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
         .setup(c -> {
             c.schemaChange(withKeyspace("CREATE TABLE %s.t (pk text, ck1 text, ck2 text, v text, " +
@@ -114,7 +114,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(1)
-        .singleUpgrade(initialVersion(), CURRENT)
+        .singleUpgrade(initialVersion())
         .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
         .setup(c -> {
             c.schemaChange(withKeyspace("CREATE TABLE %s.t (pk text PRIMARY KEY, v1 text, v2 text) WITH COMPACT STORAGE"));
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolV40Test.java b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolV40Test.java
new file mode 100644
index 0000000..d245d9f
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingWithProtocolV40Test.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import com.vdurmont.semver4j.Semver;
+
+/**
+ * {@link CompactStoragePagingWithProtocolTester} for v40 -> CURRENT upgrade path.
+ */
+public class CompactStoragePagingWithProtocolV40Test extends CompactStoragePagingWithProtocolTester
+{
+    @Override
+    protected Semver initialVersion()
+    {
+        return v40;
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/ConfigCompatabilityTestGenerate.java b/test/distributed/org/apache/cassandra/distributed/upgrade/ConfigCompatabilityTestGenerate.java
new file mode 100644
index 0000000..518fd14
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/ConfigCompatabilityTestGenerate.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.upgrade;
+
+import java.io.File;
+import java.util.Arrays;
+
+import org.apache.commons.lang3.ArrayUtils;
+
+import com.vdurmont.semver4j.Semver;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.distributed.UpgradeableCluster;
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.impl.AbstractCluster;
+import org.apache.cassandra.distributed.shared.Versions;
+
+import static org.apache.cassandra.config.ConfigCompatabilityTest.TEST_DIR;
+import static org.apache.cassandra.config.ConfigCompatabilityTest.dump;
+import static org.apache.cassandra.config.ConfigCompatabilityTest.toTree;
+
+/**
+ * This class is to generate YAML dumps per version, this is a manual process and should be updated for each release.
+ */
+public class ConfigCompatabilityTestGenerate
+{
+    public static void main(String[] args) throws Throwable
+    {
+        ICluster.setup();
+        Versions versions = Versions.find();
+        for (Semver version : Arrays.asList(UpgradeTestBase.v30, UpgradeTestBase.v3X, UpgradeTestBase.v40))
+        {
+            File path = new File(TEST_DIR, "version=" + version + ".yml");
+            path.getParentFile().mkdirs();
+            Versions.Version latest = versions.getLatest(version);
+            // this class isn't present so the lambda can't be deserialized... so add to the classpath
+            latest = new Versions.Version(latest.version, ArrayUtils.addAll(latest.classpath, AbstractCluster.CURRENT_VERSION.classpath));
+
+            try (UpgradeableCluster cluster = UpgradeableCluster.create(1, latest))
+            {
+                IInvokableInstance inst = (IInvokableInstance) cluster.get(1);
+                Class<?> klass = inst.callOnInstance(() -> Config.class);
+                assert klass.getClassLoader() != ConfigCompatabilityTestGenerate.class.getClassLoader();
+                dump(toTree(klass), path.getAbsolutePath());
+            }
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
index 2e67497..b09b882 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
@@ -21,10 +21,8 @@
 import org.junit.Test;
 
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
-import org.apache.cassandra.distributed.shared.Versions;
 
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
-import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
 import static org.apache.cassandra.distributed.shared.AssertUtils.row;
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
index c1ae153..4e50eb1 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
@@ -18,18 +18,22 @@
 
 package org.apache.cassandra.distributed.upgrade;
 
-import java.util.Arrays;
-import java.util.List;
 import java.util.UUID;
+import java.util.concurrent.RejectedExecutionException;
+
+import org.junit.Test;
 
 import com.vdurmont.semver4j.Semver;
-
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.ICoordinator;
+import org.apache.cassandra.exceptions.ReadFailureException;
 import org.apache.cassandra.exceptions.ReadTimeoutException;
+import org.apache.cassandra.exceptions.WriteFailureException;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.net.Verb;
+import org.assertj.core.api.Assertions;
 
+import static java.lang.String.format;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.cassandra.distributed.api.ConsistencyLevel.ALL;
 import static org.apache.cassandra.distributed.api.ConsistencyLevel.ONE;
@@ -37,47 +41,67 @@
 import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
 import static org.apache.cassandra.distributed.shared.AssertUtils.row;
 import static org.apache.cassandra.net.Verb.READ_REQ;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static java.lang.String.format;
 
 
-public class MixedModeAvailabilityTestBase extends UpgradeTestBase
+public abstract class MixedModeAvailabilityTestBase extends UpgradeTestBase
 {
     private static final int NUM_NODES = 3;
     private static final int COORDINATOR = 1;
-    private static final List<Tester> TESTERS = Arrays.asList(new Tester(ONE, ALL),
-                                                              new Tester(QUORUM, QUORUM),
-                                                              new Tester(ALL, ONE));
+    private static final String INSERT = withKeyspace("INSERT INTO %s.t (k, c, v) VALUES (?, ?, ?)");
+    private static final String SELECT = withKeyspace("SELECT * FROM %s.t WHERE k = ?");
 
+    private final Semver initial;
+    private final ConsistencyLevel writeConsistencyLevel;
+    private final ConsistencyLevel readConsistencyLevel;
 
-    protected static void testAvailability(Semver initial) throws Throwable
+    public MixedModeAvailabilityTestBase(Semver initial, ConsistencyLevel writeConsistencyLevel, ConsistencyLevel readConsistencyLevel)
     {
-        testAvailability(initial, UpgradeTestBase.CURRENT);
+        this.initial = initial;
+        this.writeConsistencyLevel = writeConsistencyLevel;
+        this.readConsistencyLevel = readConsistencyLevel;
     }
 
-    protected static void testAvailability(Semver initial, Semver upgrade) throws Throwable
+    @Test
+    public void testAvailabilityCoordinatorNotUpgraded() throws Throwable
     {
-        testAvailability(true, initial, upgrade);
-        testAvailability(false, initial, upgrade);
+        testAvailability(false, initial, writeConsistencyLevel, readConsistencyLevel);
+    }
+
+    @Test
+    public void testAvailabilityCoordinatorUpgraded() throws Throwable
+    {
+        testAvailability(true, initial, writeConsistencyLevel, readConsistencyLevel);
     }
 
     private static void testAvailability(boolean upgradedCoordinator,
                                          Semver initial,
-                                         Semver upgrade) throws Throwable
+                                         ConsistencyLevel writeConsistencyLevel,
+                                         ConsistencyLevel readConsistencyLevel) throws Throwable
     {
         new TestCase()
         .nodes(NUM_NODES)
         .nodesToUpgrade(upgradedCoordinator ? 1 : 2)
-        .upgrades(initial, upgrade)
-        .withConfig(config -> config.set("read_request_timeout_in_ms", SECONDS.toMillis(2))
-                                    .set("write_request_timeout_in_ms", SECONDS.toMillis(2)))
-        .setup(c -> c.schemaChange(withKeyspace("CREATE TABLE %s.t (k uuid, c int, v int, PRIMARY KEY (k, c))")))
+        .upgrades(initial, UpgradeTestBase.CURRENT)
+        .withConfig(config -> config.set("read_request_timeout_in_ms", SECONDS.toMillis(5))
+                                    .set("write_request_timeout_in_ms", SECONDS.toMillis(5)))
+        // use retry of 10ms so that each check is consistent
+        // At the start of the world cfs.sampleLatencyNanos == 0, which means speculation acts as if ALWAYS is done,
+        // but after the first refresh this gets set high enough that we don't trigger speculation for the rest of the test!
+        // To be consistent set retry to 10ms so cfs.sampleLatencyNanos stays consistent for the duration of the test.
+        .setup(cluster -> {
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k uuid, c int, v int, PRIMARY KEY (k, c)) WITH speculative_retry = '10ms'"));
+            cluster.setUncaughtExceptionsFilter(throwable -> throwable instanceof RejectedExecutionException);
+        })
         .runAfterNodeUpgrade((cluster, n) -> {
 
+            ICoordinator coordinator = cluster.coordinator(COORDINATOR);
+
             // using 0 to 2 down nodes...
-            for (int numNodesDown = 0; numNodesDown < NUM_NODES; numNodesDown++)
+            for (int i = 0; i < NUM_NODES; i++)
             {
+                final int numNodesDown = i;
+
                 // disable communications to the down nodes
                 if (numNodesDown > 0)
                 {
@@ -85,10 +109,38 @@
                     cluster.filters().outbound().verbs(Verb.MUTATION_REQ.id).to(replica(COORDINATOR, numNodesDown)).drop();
                 }
 
-                // run the test cases that are compatible with the number of down nodes
-                ICoordinator coordinator = cluster.coordinator(COORDINATOR);
-                for (Tester tester : TESTERS)
-                    tester.test(coordinator, numNodesDown, upgradedCoordinator);
+                UUID key = UUID.randomUUID();
+                Object[] row1 = row(key, 1, 10);
+                Object[] row2 = row(key, 2, 20);
+
+                boolean wrote = false;
+                try
+                {
+                    // test write
+                    maybeFail(false, numNodesDown > maxNodesDown(writeConsistencyLevel), () -> {
+                        coordinator.execute(INSERT, writeConsistencyLevel, row1);
+                        coordinator.execute(INSERT, writeConsistencyLevel, row2);
+                    });
+
+                    wrote = true;
+
+                    // test read
+                    maybeFail(true, numNodesDown > maxNodesDown(readConsistencyLevel), () -> {
+                        Object[][] rows = coordinator.execute(SELECT, readConsistencyLevel, key);
+                        if (numNodesDown <= maxNodesDown(writeConsistencyLevel))
+                            assertRows(rows, row1, row2);
+                    });
+                }
+                catch (Throwable t)
+                {
+                    throw new AssertionError(format("Unexpected error while %s in case write-read consistency %s-%s with %s coordinator and %d nodes down: %s",
+                                                    wrote ? "reading" : "writing",
+                                                    writeConsistencyLevel,
+                                                    readConsistencyLevel,
+                                                    upgradedCoordinator ? "upgraded" : "not upgraded",
+                                                    numNodesDown,
+                                                    t), t);
+                }
             }
         }).run();
     }
@@ -99,88 +151,49 @@
         return depth == 0 ? node : replica(node == NUM_NODES ? 1 : node + 1, depth - 1);
     }
 
-    private static class Tester
+    private static void maybeFail(boolean isRead, boolean shouldFail, Runnable test)
     {
-        private static final String INSERT = withKeyspace("INSERT INTO %s.t (k, c, v) VALUES (?, ?, ?)");
-        private static final String SELECT = withKeyspace("SELECT * FROM %s.t WHERE k = ?");
-
-        private final ConsistencyLevel writeConsistencyLevel;
-        private final ConsistencyLevel readConsistencyLevel;
-
-        private Tester(ConsistencyLevel writeConsistencyLevel, ConsistencyLevel readConsistencyLevel)
+        try
         {
-            this.writeConsistencyLevel = writeConsistencyLevel;
-            this.readConsistencyLevel = readConsistencyLevel;
+            test.run();
+            assertFalse("Should have failed", shouldFail);
         }
-
-        public void test(ICoordinator coordinator, int numNodesDown, boolean upgradedCoordinator)
+        catch (Exception e)
         {
-            UUID key = UUID.randomUUID();
-            Object[] row1 = row(key, 1, 10);
-            Object[] row2 = row(key, 2, 20);
+            if (!shouldFail)
+                throw e;
 
-            boolean wrote = false;
-            try
+            // we should use exception class names due to the different classpaths
+            String className = (e instanceof RuntimeException && e.getCause() != null)
+                               ? e.getCause().getClass().getCanonicalName()
+                               : e.getClass().getCanonicalName();
+
+            if (isRead)
             {
-                // test write
-                maybeFail(WriteTimeoutException.class, numNodesDown > maxNodesDown(writeConsistencyLevel), () -> {
-                    coordinator.execute(INSERT, writeConsistencyLevel, row1);
-                    coordinator.execute(INSERT, writeConsistencyLevel, row2);
-                });
-
-                wrote = true;
-
-                // test read
-                maybeFail(ReadTimeoutException.class, numNodesDown > maxNodesDown(readConsistencyLevel), () -> {
-                    Object[][] rows = coordinator.execute(SELECT, readConsistencyLevel, key);
-                    if (numNodesDown <= maxNodesDown(writeConsistencyLevel))
-                        assertRows(rows, row1, row2);
-                });
+                Assertions.assertThat(className)
+                          .isIn(ReadTimeoutException.class.getCanonicalName(),
+                                ReadFailureException.class.getCanonicalName());
             }
-            catch (Throwable t)
+            else
             {
-                throw new AssertionError(format("Unexpected error while %s in case write-read consistency %s-%s with %s coordinator and %d nodes down",
-                                                wrote ? "reading" : "writing",
-                                                writeConsistencyLevel,
-                                                readConsistencyLevel,
-                                                upgradedCoordinator ? "upgraded" : "not upgraded",
-                                                numNodesDown), t);
+                Assertions.assertThat(className)
+                          .isIn(WriteTimeoutException.class.getCanonicalName(),
+                                WriteFailureException.class.getCanonicalName());
             }
         }
+    }
 
-        private static <E extends Exception> void maybeFail(Class<E> exceptionClass, boolean shouldFail, Runnable test)
-        {
-            try
-            {
-                test.run();
-                assertFalse(shouldFail);
-            }
-            catch (Exception e)
-            {
-                // we should use exception class names due to the different classpaths
-                String className = e.getClass().getCanonicalName();
-                if (e instanceof RuntimeException && e.getCause() != null)
-                    className = e.getCause().getClass().getCanonicalName();
+    private static int maxNodesDown(ConsistencyLevel cl)
+    {
+        if (cl == ONE)
+            return 2;
 
-                if (shouldFail)
-                    assertEquals(exceptionClass.getCanonicalName(), className);
-                else
-                    throw e;
-            }
-        }
+        if (cl == QUORUM)
+            return 1;
 
-        private static int maxNodesDown(ConsistencyLevel cl)
-        {
-            if (cl == ONE)
-                return 2;
+        if (cl == ALL)
+            return 0;
 
-            if (cl == QUORUM)
-                return 1;
-
-            if (cl == ALL)
-                return 0;
-
-            throw new IllegalArgumentException("Unsupported consistency level: " + cl);
-        }
+        throw new IllegalArgumentException("Unsupported consistency level: " + cl);
     }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV22Test.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV22Test.java
deleted file mode 100644
index f756574..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV22Test.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-/**
- * {@link MixedModeAvailabilityTestBase} for upgrades from v22.
- */
-public class MixedModeAvailabilityV22Test extends MixedModeAvailabilityTestBase
-{
-    @Test
-    public void testAvailabilityV22ToV30() throws Throwable
-    {
-        testAvailability(v22, v30);
-    }
-
-    @Test
-    public void testAvailabilityV22ToV3X() throws Throwable
-    {
-        testAvailability(v22, v3X);
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30AllOneTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30AllOneTest.java
new file mode 100644
index 0000000..96dc454
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30AllOneTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * {@link MixedModeAvailabilityTestBase} for upgrades from v30 with ALL-ONE write-read consistency.
+ */
+public class MixedModeAvailabilityV30AllOneTest extends MixedModeAvailabilityTestBase
+{
+    public MixedModeAvailabilityV30AllOneTest()
+    {
+        super(v30, ConsistencyLevel.ALL, ConsistencyLevel.ONE);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30OneAllTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30OneAllTest.java
new file mode 100644
index 0000000..be51549
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30OneAllTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * {@link MixedModeAvailabilityTestBase} for upgrades from v30 with ONE-ALL write-read consistency.
+ */
+public class MixedModeAvailabilityV30OneAllTest extends MixedModeAvailabilityTestBase
+{
+    public MixedModeAvailabilityV30OneAllTest()
+    {
+        super(v30, ConsistencyLevel.ONE, ConsistencyLevel.ALL);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30QuorumQuorumTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30QuorumQuorumTest.java
new file mode 100644
index 0000000..8df53d1
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30QuorumQuorumTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * {@link MixedModeAvailabilityTestBase} for upgrades from v30 with QUORUM-QUORUM write-read consistency.
+ */
+public class MixedModeAvailabilityV30QuorumQuorumTest extends MixedModeAvailabilityTestBase
+{
+    public MixedModeAvailabilityV30QuorumQuorumTest()
+    {
+        super(v30, ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30Test.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30Test.java
deleted file mode 100644
index 984df3b..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30Test.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-/**
- * {@link MixedModeAvailabilityTestBase} for upgrades from v30.
- */
-public class MixedModeAvailabilityV30Test extends MixedModeAvailabilityTestBase
-{
-    @Test
-    public void testAvailability() throws Throwable
-    {
-        testAvailability(v30);
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XAllOneTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XAllOneTest.java
new file mode 100644
index 0000000..955e0ba
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XAllOneTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * {@link MixedModeAvailabilityTestBase} for upgrades from v3X with ALL-ONE write-read consistency.
+ */
+public class MixedModeAvailabilityV3XAllOneTest extends MixedModeAvailabilityTestBase
+{
+    public MixedModeAvailabilityV3XAllOneTest()
+    {
+        super(v3X, ConsistencyLevel.ALL, ConsistencyLevel.ONE);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java
new file mode 100644
index 0000000..8ea94ea
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * {@link MixedModeAvailabilityTestBase} for upgrades from v3X with ONE-ALL write-read consistency.
+ */
+public class MixedModeAvailabilityV3XOneAllTest extends MixedModeAvailabilityTestBase
+{
+    public MixedModeAvailabilityV3XOneAllTest() throws Throwable
+    {
+        super(v3X, ConsistencyLevel.ONE, ConsistencyLevel.ALL);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XQuorumQuorumTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XQuorumQuorumTest.java
new file mode 100644
index 0000000..e65f4f4
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XQuorumQuorumTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+
+/**
+ * {@link MixedModeAvailabilityTestBase} for upgrades from v3X with QUORUM-QUORUM write-read consistency.
+ */
+public class MixedModeAvailabilityV3XQuorumQuorumTest extends MixedModeAvailabilityTestBase
+{
+    public MixedModeAvailabilityV3XQuorumQuorumTest()
+    {
+        super(v3X, ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM);
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XTest.java
deleted file mode 100644
index 70230f5..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XTest.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-/**
- * {@link MixedModeAvailabilityTestBase} for upgrades from v3X.
- */
-public class MixedModeAvailabilityV3XTest extends MixedModeAvailabilityTestBase
-{
-    @Test
-    public void testAvailability() throws Throwable
-    {
-        testAvailability(v3X);
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV22Test.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV22Test.java
deleted file mode 100644
index deb0863..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV22Test.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-/**
- * {@link MixedModeConsistencyTestBase} for upgrades from v22.
- */
-public class MixedModeConsistencyV22Test extends MixedModeConsistencyTestBase
-{
-    @Test
-    public void testConsistencyV22ToV30() throws Throwable
-    {
-        testConsistency(v22, v30);
-    }
-
-    @Test
-    public void testConsistencyV22ToV3X() throws Throwable
-    {
-        testConsistency(v22, v3X);
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV30Test.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV30Test.java
index 8687c55..2712a46 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV30Test.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV30Test.java
@@ -20,8 +20,6 @@
 
 import org.junit.Test;
 
-import org.apache.cassandra.distributed.shared.Versions;
-
 /**
  * {@link MixedModeConsistencyTestBase} for upgrades from v30.
  */
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV3XTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV3XTest.java
index 9e4ec6a..d39ea56 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV3XTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyV3XTest.java
@@ -20,8 +20,6 @@
 
 import org.junit.Test;
 
-import org.apache.cassandra.distributed.shared.Versions;
-
 /**
  * {@link MixedModeConsistencyTestBase} for upgrades from v3X.
  */
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2LoggedBatchTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2LoggedBatchTest.java
deleted file mode 100644
index 3835521..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2LoggedBatchTest.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-public class MixedModeFrom2LoggedBatchTest extends MixedModeBatchTestBase
-{
-    @Test
-    public void testSimpleStrategy22to30() throws Throwable
-    {
-        testSimpleStrategy(v22, v30, true);
-    }
-
-    @Test
-    public void testSimpleStrategy22to3X() throws Throwable
-    {
-        testSimpleStrategy(v22, v3X, true);
-    }
-}
\ No newline at end of file
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2ReplicationTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2ReplicationTest.java
deleted file mode 100644
index ea56415..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2ReplicationTest.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-public class MixedModeFrom2ReplicationTest extends MixedModeReplicationTestBase
-{
-    @Test
-    public void testSimpleStrategy22to30() throws Throwable
-    {
-        testSimpleStrategy(v22, v30);
-    }
-
-    @Test
-    public void testSimpleStrategy22to3X() throws Throwable
-    {
-        testSimpleStrategy(v22, v3X);
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2UnloggedBatchTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2UnloggedBatchTest.java
deleted file mode 100644
index 4f4b722..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom2UnloggedBatchTest.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.shared.Versions;
-
-public class MixedModeFrom2UnloggedBatchTest extends MixedModeBatchTestBase
-{
-    @Test
-    public void testSimpleStrategy22to30() throws Throwable
-    {
-        testSimpleStrategy(v22, v30, false);
-    }
-
-    @Test
-    public void testSimpleStrategy22to3X() throws Throwable
-    {
-        testSimpleStrategy(v22, v3X, false);
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3LoggedBatchTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3LoggedBatchTest.java
index 77eb058..7326f6f 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3LoggedBatchTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3LoggedBatchTest.java
@@ -20,8 +20,6 @@
 
 import org.junit.Test;
 
-import org.apache.cassandra.distributed.shared.Versions;
-
 public class MixedModeFrom3LoggedBatchTest extends MixedModeBatchTestBase
 {
     @Test
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
index a38e25d..4902385 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
@@ -20,8 +20,6 @@
 
 import org.junit.Test;
 
-import org.apache.cassandra.distributed.shared.Versions;
-
 public class MixedModeFrom3ReplicationTest extends MixedModeReplicationTestBase
 {
     @Test
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3UnloggedBatchTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3UnloggedBatchTest.java
index 7256d2e..d169142 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3UnloggedBatchTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3UnloggedBatchTest.java
@@ -20,8 +20,6 @@
 
 import org.junit.Test;
 
-import org.apache.cassandra.distributed.shared.Versions;
-
 public class MixedModeFrom3UnloggedBatchTest extends MixedModeBatchTestBase
 {
     @Test
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
index e706bda..f4c9695 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
@@ -33,7 +33,6 @@
 import org.apache.cassandra.distributed.UpgradeableCluster;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IMessageFilters;
-import org.apache.cassandra.distributed.shared.Versions;
 import org.apache.cassandra.net.Verb;
 import org.assertj.core.api.Assertions;
 
@@ -50,9 +49,9 @@
         .withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK))
         .nodes(3)
         .nodesToUpgradeOrdered(1, 2, 3)
-        // all upgrades from v30 up, excluding v30->v3X
-        .singleUpgrade(v30, v40)
-        .upgradesFrom(v3X)
+        // all upgrades from v30 up, excluding v30->v3X and from v40
+        .singleUpgrade(v30)
+        .singleUpgrade(v3X)
         .setup(c -> {})
         .runAfterNodeUpgrade((cluster, node) -> {
             if (node == 1) {
@@ -86,9 +85,9 @@
         .withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK))
         .nodes(3)
         .nodesToUpgradeOrdered(1, 2, 3)
-        // all upgrades from v30 up, excluding v30->v3X
-        .singleUpgrade(v30, v40)
-        .upgradesFrom(v3X)
+        // all upgrades from v30 up, excluding v30->v3X and from v40
+        .singleUpgrade(v30)
+        .singleUpgrade(v3X)
         .setup(cluster -> {
             // node2 and node3 gossiper cannot talk with each other
             cluster.filters().verbs(Verb.GOSSIP_DIGEST_SYN.id).from(2).to(3).drop();
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
index 29c3209..d1551fb 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
@@ -30,12 +30,10 @@
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IUpgradeableInstance;
-import org.apache.cassandra.distributed.shared.Shared;
 import org.awaitility.Awaitility;
 
 import static org.apache.cassandra.distributed.shared.AssertUtils.*;
 
-@Shared
 public class MixedModeMessageForwardTest extends UpgradeTestBase
 {
     private static final Logger logger = LoggerFactory.getLogger(MixedModeMessageForwardTest.class);
@@ -105,7 +103,7 @@
         .withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK))
         .withBuilder(b -> b.withRacks(numDCs, 1, nodesPerDc))
         .nodes(numDCs * nodesPerDc)
-        .singleUpgrade(v30, v40)
+        .singleUpgrade(v30)
         .setup(cluster -> {
             cluster.schemaChange("ALTER KEYSPACE " + KEYSPACE +
                 " WITH replication = {'class': 'NetworkTopologyStrategy', " + ntsArgs + " };");
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
index 4e3074d..fcb0482 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
@@ -100,4 +100,4 @@
         })
         .run();
     }
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
index c95aede..cbbcff0 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
@@ -22,7 +22,6 @@
 
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
-import org.apache.cassandra.distributed.shared.Versions;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.utils.CassandraVersion;
 
@@ -39,9 +38,9 @@
         .withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK))
         .nodes(2)
         .nodesToUpgrade(1)
-        // all upgrades from v30 up, excluding v30->v3X
-        .singleUpgrade(v30, v40)
-        .upgradesFrom(v3X)
+        // all upgrades from v30 up, excluding v30->v3X and from v40
+        .singleUpgrade(v30)
+        .singleUpgrade(v3X)
         .setup(cluster -> {
             cluster.schemaChange(CREATE_TABLE);
             insertData(cluster.coordinator(1));
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
index adcfd1f..813d9f2 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
@@ -54,7 +54,7 @@
         new UpgradeTestBase.TestCase()
         .nodes(2)
         .nodesToUpgrade(UPGRADED_NODE)
-        .upgradesFrom(v3X)
+        .singleUpgrade(v3X)
         .withConfig(config -> config.with(NETWORK, GOSSIP))
         .setup(cluster -> {
             cluster.schemaChange(CREATE_TABLE);
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/PagingTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/PagingTest.java
deleted file mode 100644
index 30e248d..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/PagingTest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-
-import org.junit.Test;
-
-import com.datastax.driver.core.ProtocolVersion;
-import com.datastax.driver.core.QueryOptions;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.SimpleStatement;
-import com.datastax.driver.core.Statement;
-import org.apache.cassandra.distributed.api.ConsistencyLevel;
-import org.apache.cassandra.distributed.shared.DistributedTestBase;
-import org.apache.cassandra.distributed.shared.Versions;
-
-import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
-import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
-import static org.apache.cassandra.distributed.api.Feature.NETWORK;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class PagingTest extends UpgradeTestBase
-{
-    @Test
-    public void testReads() throws Throwable
-    {
-        new UpgradeTestBase.TestCase()
-        .nodes(2)
-        .upgrades(v22, v30)
-        .nodesToUpgrade(2)
-        .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
-        .setup((cluster) -> {
-            cluster.disableAutoCompaction(DistributedTestBase.KEYSPACE);
-            cluster.schemaChange("CREATE TABLE " + DistributedTestBase.KEYSPACE + ".tbl (pk int, ck int, v text, PRIMARY KEY (pk, ck)) ");
-            for (int j = 0; j < 5000; j++)
-            {
-                for (int i = 0; i < 10; i++)
-                    cluster.coordinator(1).execute("insert into " + DistributedTestBase.KEYSPACE + ".tbl (pk, ck, v) VALUES (" + j + ", " + i + ", 'hello')", ConsistencyLevel.ALL);
-            }
-            cluster.forEach(c -> c.flush(DistributedTestBase.KEYSPACE));
-            checkDuplicates("BOTH ON 2.2");
-        })
-        .runAfterClusterUpgrade((cluster) -> checkDuplicates("MIXED MODE"))
-        .run();
-    }
-
-    private void checkDuplicates(String message) throws InterruptedException
-    {
-        Thread.sleep(5000); // sometimes one node doesn't have time come up properly?
-        try (com.datastax.driver.core.Cluster c = com.datastax.driver.core.Cluster.builder()
-                                                                                  .addContactPoint("127.0.0.1")
-                                                                                  .withProtocolVersion(ProtocolVersion.V3)
-                                                                                  .withQueryOptions(new QueryOptions().setFetchSize(101))
-                                                                                  .build();
-             Session s = c.connect())
-        {
-            Statement stmt = new SimpleStatement("select distinct token(pk) from " + DistributedTestBase.KEYSPACE + ".tbl WHERE token(pk) > " + Long.MIN_VALUE + " AND token(pk) < " + Long.MAX_VALUE);
-            stmt.setConsistencyLevel(com.datastax.driver.core.ConsistencyLevel.ALL);
-            ResultSet res = s.execute(stmt);
-            Set<Object> seenTokens = new HashSet<>();
-            Iterator<Row> rows = res.iterator();
-            Set<Object> dupes = new HashSet<>();
-            while (rows.hasNext())
-            {
-                Object token = rows.next().getObject(0);
-                if (seenTokens.contains(token))
-                    dupes.add(token);
-                seenTokens.add(token);
-            }
-            assertEquals(message+": too few rows", 5000, seenTokens.size());
-            assertTrue(message+": dupes is not empty", dupes.isEmpty());
-        }
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
index 3e6c9ca..4cca7b9 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
@@ -35,7 +35,7 @@
         .withConfig(configConsumer)
         .nodesToUpgrade(1)
         // all upgrades from v30 up, excluding v30->v3X
-        .singleUpgrade(v30, v40)
+        .singleUpgrade(v30)
         .upgradesFrom(v3X)
         .setup((cluster) -> {
             cluster.filters().outbound().allVerbs().messagesMatching((f,t,m) -> false).drop();
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/ReadRepairCompactStorageUpgradeTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/ReadRepairCompactStorageUpgradeTest.java
deleted file mode 100644
index 5567d40..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/ReadRepairCompactStorageUpgradeTest.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import org.junit.Test;
-
-import org.apache.cassandra.distributed.api.ConsistencyLevel;
-import org.apache.cassandra.distributed.shared.Versions;
-
-public class ReadRepairCompactStorageUpgradeTest extends UpgradeTestBase
-{
-    /**
-     * Tests {@code COMPACT STORAGE} behaviour with mixed replica versions.
-     * <p>
-     * See CASSANDRA-15363 for further details.
-     */
-    @Test
-    public void mixedModeReadRepairCompactStorage() throws Throwable
-    {
-        new TestCase()
-        .nodes(2)
-        .upgrades(v22, v3X)
-        .setup((cluster) -> cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl" +
-                                                              " (pk ascii, b boolean, v blob, PRIMARY KEY (pk))" +
-                                                              " WITH COMPACT STORAGE")))
-        .runAfterNodeUpgrade((cluster, node) -> {
-            if (node != 1)
-                return;
-            // now node1 is 3.0/3.x and node2 is 2.2
-            // make sure 2.2 side does not get the mutation
-            cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk = ?"), "something");
-            // trigger a read repair
-            cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk = ?"),
-                                           ConsistencyLevel.ALL,
-                                           "something");
-            cluster.get(2).flush(KEYSPACE);
-        })
-        .runAfterClusterUpgrade((cluster) -> cluster.get(2).forceCompact(KEYSPACE, "tbl"))
-        .run();
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/RepairRequestTimeoutUpgradeTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/RepairRequestTimeoutUpgradeTest.java
new file mode 100644
index 0000000..4929d8d
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/RepairRequestTimeoutUpgradeTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.upgrade;
+
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.api.Feature;
+
+import static org.apache.cassandra.net.Verb.VALIDATION_REQ;
+
+public class RepairRequestTimeoutUpgradeTest extends UpgradeTestBase
+{
+    @Test
+    public void simpleUpgradeWithNetworkAndGossipTest() throws Throwable
+    {
+        new TestCase()
+        .nodes(2)
+        .nodesToUpgrade(1)
+        .withConfig((cfg) -> cfg.with(Feature.NETWORK, Feature.GOSSIP).set("repair_request_timeout_in_ms", 1000))
+        .upgrades(v40, v41)
+        .setup((cluster) -> {
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
+            for (int i = 0; i < 10; i++)
+                cluster.get(i % 2 + 1).executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES ("+i+", 1, 1)");
+            cluster.forEach(i -> i.flush(KEYSPACE));
+        })
+        .runAfterNodeUpgrade((cluster, node) -> {
+            cluster.filters().verbs(VALIDATION_REQ.id).drop();
+            cluster.get(2).nodetoolResult("repair", KEYSPACE, "-full").asserts().failure();
+            cluster.filters().reset();
+            for (int i = 10; i < 20; i++)
+                cluster.get(i % 2 + 1).executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES ("+i+", 1, 1)");
+            cluster.forEach(i -> i.flush(KEYSPACE));
+            cluster.get(1).nodetoolResult("repair", KEYSPACE, "-full").asserts().success();
+        }).run();
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
index 691d8af..b1692b9 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
@@ -22,7 +22,6 @@
 
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
-import org.apache.cassandra.distributed.shared.Versions;
 import static org.apache.cassandra.distributed.shared.AssertUtils.*;
 
 public class UpgradeTest extends UpgradeTestBase
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
index 11178c3..5c32fcd 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
@@ -32,13 +32,16 @@
 import org.junit.After;
 import org.junit.BeforeClass;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.distributed.UpgradeableCluster;
 import org.apache.cassandra.distributed.api.ICluster;
 import org.apache.cassandra.distributed.api.IInstanceConfig;
-import org.apache.cassandra.distributed.impl.Instance;
 import org.apache.cassandra.distributed.shared.DistributedTestBase;
+import org.apache.cassandra.distributed.shared.ThrowingRunnable;
 import org.apache.cassandra.distributed.shared.Versions;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
@@ -46,10 +49,10 @@
 import static org.apache.cassandra.distributed.shared.Versions.Version;
 import static org.apache.cassandra.distributed.shared.Versions.find;
 
-
-
 public class UpgradeTestBase extends DistributedTestBase
 {
+    private static final Logger logger = LoggerFactory.getLogger(UpgradeTestBase.class);
+
     @After
     public void afterEach()
     {
@@ -79,17 +82,18 @@
         public void run(UpgradeableCluster cluster, int node) throws Throwable;
     }
 
-    public static final Semver v22 = new Semver("2.2.0-beta1", SemverType.LOOSE);
     public static final Semver v30 = new Semver("3.0.0-alpha1", SemverType.LOOSE);
     public static final Semver v3X = new Semver("3.11.0", SemverType.LOOSE);
     public static final Semver v40 = new Semver("4.0-alpha1", SemverType.LOOSE);
+    public static final Semver v41 = new Semver("4.1-alpha1", SemverType.LOOSE);
 
     protected static final List<Pair<Semver,Semver>> SUPPORTED_UPGRADE_PATHS = ImmutableList.of(
-        Pair.create(v22, v30),
-        Pair.create(v22, v3X),
         Pair.create(v30, v3X),
         Pair.create(v30, v40),
-        Pair.create(v3X, v40));
+        Pair.create(v30, v41),
+        Pair.create(v3X, v40),
+        Pair.create(v3X, v41),
+        Pair.create(v40, v41));
 
     // the last is always the current
     public static final Semver CURRENT = SUPPORTED_UPGRADE_PATHS.get(SUPPORTED_UPGRADE_PATHS.size() - 1).right;
@@ -106,7 +110,7 @@
         }
     }
 
-    public static class TestCase implements Instance.ThrowingRunnable
+    public static class TestCase implements ThrowingRunnable
     {
         private final Versions versions;
         private final List<TestVersions> upgrade = new ArrayList<>();
@@ -155,9 +159,9 @@
         }
 
         /** Will test this specific upgrade path **/
-        public TestCase singleUpgrade(Semver from, Semver to)
+        public TestCase singleUpgrade(Semver from)
         {
-            this.upgrade.add(new TestVersions(versions.getLatest(from), versions.getLatest(to)));
+            this.upgrade.add(new TestVersions(versions.getLatest(from), versions.getLatest(CURRENT)));
             return this;
         }
 
@@ -217,7 +221,7 @@
 
             for (TestVersions upgrade : this.upgrade)
             {
-                System.out.printf("testing upgrade from %s to %s%n", upgrade.initial.version, upgrade.upgrade.version);
+                logger.info("testing upgrade from {} to {}", upgrade.initial.version, upgrade.upgrade.version);
                 try (UpgradeableCluster cluster = init(UpgradeableCluster.create(nodeCount, upgrade.initial, configConsumer, builderConsumer)))
                 {
                     setup.run(cluster);
@@ -259,7 +263,7 @@
     protected TestCase allUpgrades(int nodes, int... toUpgrade)
     {
         return new TestCase().nodes(nodes)
-                             .upgradesFrom(v22)
+                             .upgradesFrom(v30)
                              .nodesToUpgrade(toUpgrade);
     }
 
diff --git a/test/distributed/org/apache/cassandra/distributed/util/QueryResultUtil.java b/test/distributed/org/apache/cassandra/distributed/util/QueryResultUtil.java
new file mode 100644
index 0000000..a502e8c
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/util/QueryResultUtil.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.distributed.util;
+
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.function.Predicate;
+
+import com.google.monitoring.runtime.instrumentation.common.collect.Iterators;
+import org.apache.cassandra.distributed.api.Row;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+import org.apache.cassandra.tools.nodetool.formatter.TableBuilder;
+import org.assertj.core.api.Assertions;
+
+public class QueryResultUtil
+{
+    private QueryResultUtil()
+    {
+    }
+
+    public static boolean contains(SimpleQueryResult qr, Object... values)
+    {
+        return contains(qr, a -> equals(a, values));
+    }
+
+    public static boolean contains(SimpleQueryResult qr, Row row)
+    {
+        return contains(qr, a -> equals(a, row));
+    }
+
+    public static boolean contains(SimpleQueryResult qr, Predicate<Row> fn)
+    {
+        while (qr.hasNext())
+        {
+            if (fn.test(qr.next()))
+                return true;
+        }
+        return false;
+    }
+
+    private static boolean equals(Row a, Row b)
+    {
+        return equals(a, b.toObjectArray());
+    }
+
+    private static boolean equals(Row a, Object[] bs)
+    {
+        Object[] as = a.toObjectArray();
+        if (as.length != bs.length)
+            return false;
+        for (int i = 0; i < as.length; i++)
+        {
+            if (!Objects.equals(as[i], bs[i]))
+                return false;
+        }
+        return true;
+    }
+
+    public static SimpleQueryResultAssertHelper assertThat(SimpleQueryResult qr)
+    {
+        return new SimpleQueryResultAssertHelper(qr);
+    }
+
+    public static RowAssertHelper assertThat(Row row)
+    {
+        return new RowAssertHelper(row);
+    }
+
+    public static String expand(SimpleQueryResult qr)
+    {
+        StringBuilder sb = new StringBuilder();
+        int rowNum = 1;
+        while (qr.hasNext())
+        {
+            sb.append("@ Row ").append(rowNum).append('\n');
+            TableBuilder table = new TableBuilder('|');
+            Row next = qr.next();
+            for (String column : qr.names())
+            {
+                Object value = next.get(column);
+                table.add(column, value == null ? null : value.toString());
+            }
+            sb.append(table);
+        }
+        return sb.toString();
+    }
+
+    public static class RowAssertHelper
+    {
+        private final Row row;
+
+        public RowAssertHelper(Row row)
+        {
+            this.row = row;
+        }
+
+        public RowAssertHelper isEqualTo(String column, Object expected)
+        {
+            Object actual = row.get(column);
+            Assertions.assertThat(actual).describedAs("Column %s had unexpected value", column).isEqualTo(expected);
+            return this;
+        }
+
+        public RowAssertHelper columnsEqualTo(String first, String... others)
+        {
+            Object expected = row.get(first);
+            for (String other : others)
+                Assertions.assertThat(row.<Object>get(other)).describedAs("Columns %s and %s are not equal", first, other).isEqualTo(expected);
+            return this;
+        }
+    }
+
+    public static class SimpleQueryResultAssertHelper
+    {
+        private final SimpleQueryResult qr;
+
+        private SimpleQueryResultAssertHelper(SimpleQueryResult qr)
+        {
+            this.qr = qr;
+        }
+
+        public SimpleQueryResultAssertHelper contains(Object... values)
+        {
+            qr.reset();
+            if (!QueryResultUtil.contains(qr, a -> QueryResultUtil.equals(a, values)))
+                throw new AssertionError("Row " + Arrays.asList(values) + " is not present");
+            return this;
+        }
+
+        public SimpleQueryResultAssertHelper contains(Row row)
+        {
+            qr.reset();
+            if (!QueryResultUtil.contains(qr, a -> QueryResultUtil.equals(a, row)))
+                throw new AssertionError("Row " + row + " is not present");
+            return this;
+        }
+
+        public SimpleQueryResultAssertHelper contains(Predicate<Row> fn)
+        {
+            qr.reset();
+            if (!QueryResultUtil.contains(qr, fn))
+                throw new AssertionError("Row  is not present");
+            return this;
+        }
+
+        public SimpleQueryResultAssertHelper isEqualTo(Object... values)
+        {
+            Assertions.assertThat(qr.toObjectArrays())
+                      .hasSize(1)
+                      .contains(values);
+            return this;
+        }
+
+        public SimpleQueryResultAssertHelper hasSize(int size)
+        {
+            Assertions.assertThat(qr.toObjectArrays()).hasSize(size);
+            return this;
+        }
+
+        public SimpleQueryResultAssertHelper hasSizeGreaterThan(int size)
+        {
+            Assertions.assertThat(qr.toObjectArrays()).hasSizeGreaterThan(size);
+            return this;
+        }
+
+        public void isEmpty()
+        {
+            int size = Iterators.size(qr);
+            Assertions.assertThat(size).describedAs("QueryResult is not empty").isEqualTo(0);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java b/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java
index 9a76661..9012dcc 100644
--- a/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java
+++ b/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java
@@ -23,7 +23,6 @@
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
-import java.util.UUID;
 
 import com.google.common.util.concurrent.RateLimiter;
 
@@ -36,6 +35,7 @@
 import org.apache.cassandra.db.RowIndexEntry;
 import org.apache.cassandra.db.Slices;
 import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 import org.apache.cassandra.db.rows.EncodingStats;
 import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.dht.AbstractBounds;
@@ -56,6 +56,7 @@
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.EstimatedHistogram;
 import org.apache.cassandra.utils.IFilter;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Ref;
 
 public abstract class ForwardingSSTableReader extends SSTableReader
@@ -283,15 +284,15 @@
     }
 
     @Override
-    public UnfilteredRowIterator iterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
+    public UnfilteredRowIterator rowIterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
     {
-        return delegate.iterator(key, slices, selectedColumns, reversed, listener);
+        return delegate.rowIterator(key, slices, selectedColumns, reversed, listener);
     }
 
     @Override
-    public UnfilteredRowIterator iterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed)
+    public UnfilteredRowIterator rowIterator(FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry, Slices slices, ColumnFilter selectedColumns, boolean reversed)
     {
-        return delegate.iterator(file, key, indexEntry, slices, selectedColumns, reversed);
+        return delegate.rowIterator(file, key, indexEntry, slices, selectedColumns, reversed);
     }
 
     @Override
@@ -385,9 +386,9 @@
     }
 
     @Override
-    public ISSTableScanner getScanner(ColumnFilter columns, DataRange dataRange, SSTableReadsListener listener)
+    public UnfilteredPartitionIterator partitionIterator(ColumnFilter columns, DataRange dataRange, SSTableReadsListener listener)
     {
-        return delegate.getScanner(columns, dataRange, listener);
+        return delegate.partitionIterator(columns, dataRange, listener);
     }
 
     @Override
@@ -427,7 +428,7 @@
     }
 
     @Override
-    public UUID getPendingRepair()
+    public TimeUUID getPendingRepair()
     {
         return delegate.getPendingRepair();
     }
diff --git a/test/long/org/apache/cassandra/cql3/CorruptionTest.java b/test/long/org/apache/cassandra/cql3/CorruptionTest.java
index f2ed36a..0ef43a0 100644
--- a/test/long/org/apache/cassandra/cql3/CorruptionTest.java
+++ b/test/long/org/apache/cassandra/cql3/CorruptionTest.java
@@ -18,8 +18,6 @@
 package org.apache.cassandra.cql3;
 
 
-import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -28,6 +26,10 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.io.util.File;
+
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -35,13 +37,12 @@
 import com.datastax.driver.core.policies.LoggingRetryPolicy;
 import com.datastax.driver.core.policies.Policies;
 import com.datastax.driver.core.utils.Bytes;
-import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.io.util.FileWriter;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.service.EmbeddedCassandraService;
 
-public class CorruptionTest extends SchemaLoader
+public class CorruptionTest
 {
 
     private static EmbeddedCassandraService cassandra;
@@ -59,10 +60,7 @@
     @BeforeClass()
     public static void setup() throws ConfigurationException, IOException
     {
-        Schema.instance.clear();
-
-        cassandra = new EmbeddedCassandraService();
-        cassandra.start();
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
 
         cluster = Cluster.builder().addContactPoint("127.0.0.1")
                          .withRetryPolicy(new LoggingRetryPolicy(Policies.defaultRetryPolicy()))
@@ -102,6 +100,15 @@
         VALUE = s.toString();
     }
 
+    @AfterClass
+    public static void tearDown()
+    {
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
+    }
+
     @Test
     public void runCorruptionTest()
     {
@@ -145,10 +152,12 @@
                     String basename = "bad-data-tid" + Thread.currentThread().getId();
                     File put = new File(basename+"-put");
                     File get = new File(basename+"-get");
-                    try(FileWriter pw = new FileWriter(put)) {
+                    try (FileWriter pw = put.newWriter(File.WriteMode.OVERWRITE))
+                    {
                         pw.write(new String(putdata));
                     }
-                    try(FileWriter pw = new FileWriter(get)) {
+                    try (FileWriter pw = get.newWriter(File.WriteMode.OVERWRITE))
+                    {
                         pw.write(new String(getdata));
                     }
                 }
diff --git a/test/long/org/apache/cassandra/cql3/ViewLongTest.java b/test/long/org/apache/cassandra/cql3/ViewLongTest.java
index de888d4..2a9415f 100644
--- a/test/long/org/apache/cassandra/cql3/ViewLongTest.java
+++ b/test/long/org/apache/cassandra/cql3/ViewLongTest.java
@@ -18,86 +18,25 @@
 
 package org.apache.cassandra.cql3;
 
-import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CyclicBarrier;
-import java.util.stream.Collectors;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import com.datastax.driver.core.Row;
 import com.datastax.driver.core.exceptions.NoHostAvailableException;
-import com.datastax.driver.core.exceptions.OperationTimedOutException;
 import com.datastax.driver.core.exceptions.WriteTimeoutException;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.batchlog.BatchlogManager;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
-import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.concurrent.SEPExecutor;
-import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.WrappedRunnable;
 
-@RunWith(Parameterized.class)
-public class ViewLongTest extends CQLTester
+public class ViewLongTest extends ViewAbstractParameterizedTest
 {
-    @Parameterized.Parameter
-    public ProtocolVersion version;
-
-    @Parameterized.Parameters()
-    public static Collection<Object[]> versions()
-    {
-        return ProtocolVersion.SUPPORTED.stream()
-                                        .map(v -> new Object[]{v})
-                                        .collect(Collectors.toList());
-    }
-
-    private final List<String> views = new ArrayList<>();
-
-    @BeforeClass
-    public static void startup()
-    {
-        requireNetwork();
-    }
-    @Before
-    public void begin()
-    {
-        views.clear();
-    }
-
-    @After
-    public void end() throws Throwable
-    {
-        for (String viewName : views)
-            executeNet(version, "DROP MATERIALIZED VIEW " + viewName);
-    }
-
-    private void createView(String name, String query) throws Throwable
-    {
-        try
-        {
-            executeNet(version, String.format(query, name));
-            // If exception is thrown, the view will not be added to the list; since it shouldn't have been created, this is
-            // the desired behavior
-            views.add(name);
-        }
-        catch (OperationTimedOutException ex)
-        {
-            // ... except for timeout, when we actually do not know whether the view was created or not
-            views.add(name);
-            throw ex;
-        }
-    }
-
     @Test
     public void testConflictResolution() throws Throwable
     {
@@ -111,9 +50,9 @@
                     "c int," +
                     "PRIMARY KEY (a, b))");
 
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL " +
+                   "PRIMARY KEY (c, a, b)");
 
         CyclicBarrier semaphore = new CyclicBarrier(writers);
 
@@ -121,7 +60,7 @@
         for (int i = 0; i < writers; i++)
         {
             final int writer = i;
-            Thread t = NamedThreadFactory.createThread(new WrappedRunnable()
+            Thread t = NamedThreadFactory.createAnonymousThread(new WrappedRunnable()
             {
                 public void runMayThrow()
                 {
@@ -133,7 +72,7 @@
                         {
                             try
                             {
-                                executeNet(version, "INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP 1",
+                                executeNet("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP 1",
                                            1,
                                            1,
                                            i + writerOffset);
@@ -159,7 +98,7 @@
 
         for (int i = 0; i < writers * insertsPerWriter; i++)
         {
-            if (executeNet(version, "SELECT COUNT(*) FROM system.batches").one().getLong(0) == 0)
+            if (executeNet("SELECT COUNT(*) FROM system.batches").one().getLong(0) == 0)
                 break;
             try
             {
@@ -173,9 +112,9 @@
             }
         }
 
-        int value = executeNet(version, "SELECT c FROM %s WHERE a = 1 AND b = 1").one().getInt("c");
+        int value = executeNet("SELECT c FROM %s WHERE a = 1 AND b = 1").one().getInt("c");
 
-        List<Row> rows = executeNet(version, "SELECT c FROM " + keyspace() + ".mv").all();
+        List<Row> rows = executeNet("SELECT c FROM " + keyspace() + "." + currentView()).all();
 
         boolean containsC = false;
         StringBuilder others = new StringBuilder();
@@ -203,13 +142,19 @@
 
         if (rows.size() > 1)
         {
-            throw new AssertionError(String.format("Expected 1 row, but found %d; %s c = %d, and (%s) of which (%s) failed to insert", rows.size(), containsC ? "found row with" : "no rows contained", value, others, overlappingFailedWrites));
+            throw new AssertionError(String.format("Expected 1 row, but found %d; %s c = %d, " +
+                                                   "and (%s) of which (%s) failed to insert",
+                                                   rows.size(),
+                                                   containsC ? "found row with" : "no rows contained",
+                                                   value,
+                                                   others,
+                                                   overlappingFailedWrites));
         }
         else if (rows.isEmpty())
         {
             throw new AssertionError(String.format("Could not find row with c = %d", value));
         }
-        else if (rows.size() == 1 && !containsC)
+        else if (!containsC)
         {
             throw new AssertionError(String.format("Single row had c = %d, expected %d", rows.get(0).getInt("c"), value));
         }
@@ -231,104 +176,102 @@
     {
         createTable("CREATE TABLE %s (field1 int,field2 int,date int,PRIMARY KEY ((field1), field2)) WITH default_time_to_live = 5;");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv",
-                   "CREATE MATERIALIZED VIEW mv AS SELECT * FROM %%s WHERE field1 IS NOT NULL AND field2 IS NOT NULL AND date IS NOT NULL PRIMARY KEY ((field1), date, field2) WITH CLUSTERING ORDER BY (date desc, field2 asc);");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE field1 IS NOT NULL AND field2 IS NOT NULL AND date IS NOT NULL " +
+                   "PRIMARY KEY ((field1), date, field2) WITH CLUSTERING ORDER BY (date desc, field2 asc)");
 
         updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 111);", flush);
         assertRows(execute("select * from %s"), row(1, 2, 111));
-        assertRows(execute("select * from mv"), row(1, 111, 2));
+        assertRows(executeView("select * from %s"), row(1, 111, 2));
 
         updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 222);", flush);
         assertRows(execute("select * from %s"), row(1, 2, 222));
-        assertRows(execute("select * from mv"), row(1, 222, 2));
+        assertRows(executeView("select * from %s"), row(1, 222, 2));
 
         updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 333);", flush);
 
         assertRows(execute("select * from %s"), row(1, 2, 333));
-        assertRows(execute("select * from mv"), row(1, 333, 2));
+        assertRows(executeView("select * from %s"), row(1, 333, 2));
 
         if (flush)
         {
-            Keyspace.open(keyspace()).getColumnFamilyStore("mv").forceMajorCompaction();
+            Keyspace.open(keyspace()).getColumnFamilyStore(currentView()).forceMajorCompaction();
             assertRows(execute("select * from %s"), row(1, 2, 333));
-            assertRows(execute("select * from mv"), row(1, 333, 2));
+            assertRows(executeView("select * from %s"), row(1, 333, 2));
         }
 
         // wait for ttl, data should be removed
         updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 444);", flush);
         assertRows(execute("select * from %s"), row(1, 2, 444));
-        assertRows(execute("select * from mv"), row(1, 444, 2));
+        assertRows(executeView("select * from %s"), row(1, 444, 2));
 
         Thread.sleep(5000);
         assertRows(execute("select * from %s"));
-        assertRows(execute("select * from mv"));
+        assertRows(executeView("select * from %s"));
 
         // shadow mv with date=555 and then update it back to live, wait for ttl
         updateView("update %s set date=555 where field1=1 and field2=2;");
         updateView("update %s set date=666 where field1=1 and field2=2;");
         updateViewWithFlush("update %s set date=555 where field1=1 and field2=2;", flush);
         assertRows(execute("select * from %s"), row(1, 2, 555));
-        assertRows(execute("select * from mv"), row(1, 555, 2));
+        assertRows(executeView("select * from %s"), row(1, 555, 2));
 
         Thread.sleep(5000);
         assertRows(execute("select * from %s"));
-        assertRows(execute("select * from mv"));
+        assertRows(executeView("select * from %s"));
 
         // test user-provided ttl for table with/without default-ttl
         for (boolean withDefaultTTL : Arrays.asList(true, false))
         {
             execute("TRUNCATE %s");
             if (withDefaultTTL)
-                execute("ALTER TABLE %s with default_time_to_live=" + (withDefaultTTL ? 10 : 0));
+                execute("ALTER TABLE %s with default_time_to_live=" + 10);
             updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 666) USING TTL 1000;", flush);
 
             assertRows(execute("select * from %s"), row(1, 2, 666));
-            assertRows(execute("select * from mv"), row(1, 666, 2));
+            assertRows(executeView("select * from %s"), row(1, 666, 2));
 
             updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 777) USING TTL 1100;", flush);
             assertRows(execute("select * from %s"), row(1, 2, 777));
-            assertRows(execute("select * from mv"), row(1, 777, 2));
+            assertRows(executeView("select * from %s"), row(1, 777, 2));
 
             updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 888) USING TTL 800;", flush);
 
             assertRows(execute("select * from %s"), row(1, 2, 888));
-            assertRows(execute("select * from mv"), row(1, 888, 2));
+            assertRows(executeView("select * from %s"), row(1, 888, 2));
 
             if (flush)
             {
-                Keyspace.open(keyspace()).getColumnFamilyStore("mv").forceMajorCompaction();
+                Keyspace.open(keyspace()).getColumnFamilyStore(currentView()).forceMajorCompaction();
                 assertRows(execute("select * from %s"), row(1, 2, 888));
-                assertRows(execute("select * from mv"), row(1, 888, 2));
+                assertRows(executeView("select * from %s"), row(1, 888, 2));
             }
 
             // wait for ttl, data should be removed
             updateViewWithFlush("insert into %s (field1, field2, date) values (1, 2, 999) USING TTL 5;", flush);
             assertRows(execute("select * from %s"), row(1, 2, 999));
-            assertRows(execute("select * from mv"), row(1, 999, 2));
+            assertRows(executeView("select * from %s"), row(1, 999, 2));
 
             Thread.sleep(5000);
             assertRows(execute("select * from %s"));
-            assertRows(execute("select * from mv"));
+            assertRows(executeView("select * from %s"));
 
             // shadow mv with date=555 and then update it back to live with ttl=5, wait for ttl to expire
             updateViewWithFlush("update %s  USING TTL 800 set date=555 where field1=1 and field2=2;", flush);
             assertRows(execute("select * from %s"), row(1, 2, 555));
-            assertRows(execute("select * from mv"), row(1, 555, 2));
+            assertRows(executeView("select * from %s"), row(1, 555, 2));
 
             updateViewWithFlush("update %s set date=666 where field1=1 and field2=2;", flush);
             assertRows(execute("select * from %s"), row(1, 2, 666));
-            assertRows(execute("select * from mv"), row(1, 666, 2));
+            assertRows(executeView("select * from %s"), row(1, 666, 2));
 
             updateViewWithFlush("update %s USING TTL 5 set date=555 where field1=1 and field2=2;", flush);
             assertRows(execute("select * from %s"), row(1, 2, 555));
-            assertRows(execute("select * from mv"), row(1, 555, 2));
+            assertRows(executeView("select * from %s"), row(1, 555, 2));
 
             Thread.sleep(5000);
             assertRows(execute("select * from %s"));
-            assertRows(execute("select * from mv"));
+            assertRows(executeView("select * from %s"));
         }
     }
 
@@ -348,35 +291,32 @@
     {
         createTable("CREATE TABLE %s (k int,c int,a int, b int, PRIMARY KEY ((k), c)) WITH default_time_to_live = 1000;");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv",
-                   "CREATE MATERIALIZED VIEW mv AS SELECT k,c,a FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL "
-                           + "PRIMARY KEY (c, k)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT k,c,a FROM %s " +
+                   "WHERE k IS NOT NULL AND c IS NOT NULL " +
+                   "PRIMARY KEY (c, k)");
 
         // table default ttl
         updateViewWithFlush("UPDATE %s SET b = 111 WHERE k = 1 AND c = 2", flush);
         assertRows(execute("select k,c,a,b from %s"), row(1, 2, null, 111));
-        assertRows(execute("select k,c,a from mv"), row(1, 2, null));
+        assertRows(executeView("select k,c,a from %s"), row(1, 2, null));
 
         updateViewWithFlush("UPDATE %s SET b = null WHERE k = 1 AND c = 2", flush);
         assertRows(execute("select k,c,a,b from %s"));
-        assertRows(execute("select k,c,a from mv"));
+        assertRows(executeView("select k,c,a from %s"));
 
         updateViewWithFlush("UPDATE %s SET b = 222 WHERE k = 1 AND c = 2", flush);
         assertRows(execute("select k,c,a,b from %s"), row(1, 2, null, 222));
-        assertRows(execute("select k,c,a from mv"), row(1, 2, null));
+        assertRows(executeView("select k,c,a from %s"), row(1, 2, null));
 
         updateViewWithFlush("DELETE b FROM %s WHERE k = 1 AND c = 2", flush);
         assertRows(execute("select k,c,a,b from %s"));
-        assertRows(execute("select k,c,a from mv"));
+        assertRows(executeView("select k,c,a from %s"));
 
         if (flush)
         {
-            Keyspace.open(keyspace()).getColumnFamilyStore("mv").forceMajorCompaction();
+            Keyspace.open(keyspace()).getColumnFamilyStore(currentView()).forceMajorCompaction();
             assertRows(execute("select k,c,a,b from %s"));
-            assertRows(execute("select k,c,a from mv"));
+            assertRows(executeView("select k,c,a from %s"));
         }
 
         // test user-provided ttl for table with/without default-ttl
@@ -384,59 +324,42 @@
         {
             execute("TRUNCATE %s");
             if (withDefaultTTL)
-                execute("ALTER TABLE %s with default_time_to_live=" + (withDefaultTTL ? 10 : 0));
+                execute("ALTER TABLE %s with default_time_to_live=" + 10);
 
             updateViewWithFlush("UPDATE %s USING TTL 100 SET b = 666 WHERE k = 1 AND c = 2", flush);
             assertRows(execute("select k,c,a,b from %s"), row(1, 2, null, 666));
-            assertRows(execute("select k,c,a from mv"), row(1, 2, null));
+            assertRows(executeView("select k,c,a from %s"), row(1, 2, null));
 
             updateViewWithFlush("UPDATE %s USING TTL 90  SET b = null WHERE k = 1 AND c = 2", flush);
             if (flush)
-                FBUtilities.waitOnFutures(Keyspace.open(keyspace()).flush());
+                Util.flushKeyspace(keyspace());
             assertRows(execute("select k,c,a,b from %s"));
-            assertRows(execute("select k,c,a from mv"));
+            assertRows(executeView("select k,c,a from %s"));
 
             updateViewWithFlush("UPDATE %s USING TTL 80  SET b = 777 WHERE k = 1 AND c = 2", flush);
             assertRows(execute("select k,c,a,b from %s"), row(1, 2, null, 777));
-            assertRows(execute("select k,c,a from mv"), row(1, 2, null));
+            assertRows(executeView("select k,c,a from %s"), row(1, 2, null));
 
             updateViewWithFlush("DELETE b FROM %s WHERE k = 1 AND c = 2", flush);
             assertRows(execute("select k,c,a,b from %s"));
-            assertRows(execute("select k,c,a from mv"));
+            assertRows(executeView("select k,c,a from %s"));
 
             updateViewWithFlush("UPDATE %s USING TTL 110  SET b = 888 WHERE k = 1 AND c = 2", flush);
             assertRows(execute("select k,c,a,b from %s"), row(1, 2, null, 888));
-            assertRows(execute("select k,c,a from mv"), row(1, 2, null));
+            assertRows(executeView("select k,c,a from %s"), row(1, 2, null));
 
             updateViewWithFlush("UPDATE %s USING TTL 5  SET b = 999 WHERE k = 1 AND c = 2", flush);
             assertRows(execute("select k,c,a,b from %s"), row(1, 2, null, 999));
-            assertRows(execute("select k,c,a from mv"), row(1, 2, null));
+            assertRows(executeView("select k,c,a from %s"), row(1, 2, null));
 
             Thread.sleep(5000); // wait for ttl expired
 
             if (flush)
             {
-                Keyspace.open(keyspace()).getColumnFamilyStore("mv").forceMajorCompaction();
+                Keyspace.open(keyspace()).getColumnFamilyStore(currentView()).forceMajorCompaction();
                 assertRows(execute("select k,c,a,b from %s"));
-                assertRows(execute("select k,c,a from mv"));
+                assertRows(executeView("select k,c,a from %s"));
             }
         }
     }
-
-    private void updateView(String query, Object... params) throws Throwable
-    {
-        updateViewWithFlush(query, false, params);
-    }
-
-    private void updateViewWithFlush(String query, boolean flush, Object... params) throws Throwable
-    {
-        executeNet(version, query, params);
-        while (!(((SEPExecutor) Stage.VIEW_MUTATION.executor()).getPendingTaskCount() == 0
-                && ((SEPExecutor) Stage.VIEW_MUTATION.executor()).getActiveTaskCount() == 0))
-        {
-            Thread.sleep(1);
-        }
-        if (flush)
-            Keyspace.open(keyspace()).flush();
-    }
 }
diff --git a/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java b/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
index a4f98e9..9531dd2 100644
--- a/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
+++ b/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
@@ -21,7 +21,7 @@
  *
  */
 
-import java.io.*;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.*;
@@ -30,6 +30,8 @@
 
 import com.google.common.util.concurrent.RateLimiter;
 
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -55,6 +57,7 @@
 import org.apache.cassandra.security.EncryptionContext;
 import org.apache.cassandra.security.EncryptionContextGenerator;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 @Ignore
 public abstract class CommitLogStressTest
@@ -101,7 +104,7 @@
     @BeforeClass
     static public void initialize() throws IOException
     {
-        try (FileInputStream fis = new FileInputStream("CHANGES.txt"))
+        try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt"))
         {
             dataSource = ByteBuffer.allocateDirect((int) fis.getChannel().size());
             while (dataSource.hasRemaining())
@@ -123,15 +126,15 @@
         File dir = new File(location);
         if (dir.isDirectory())
         {
-            File[] files = dir.listFiles();
+            File[] files = dir.tryList();
 
             for (File f : files)
-                if (!f.delete())
+                if (!f.tryDelete())
                     Assert.fail("Failed to delete " + f);
         }
         else
         {
-            dir.mkdir();
+            dir.tryCreateDirectory();
         }
     }
 
@@ -245,13 +248,13 @@
         System.out.println("Stopped. Replaying... ");
         System.out.flush();
         Reader reader = new Reader();
-        File[] files = new File(location).listFiles();
+        File[] files = new File(location).tryList();
 
         DummyHandler handler = new DummyHandler();
         reader.readAllFiles(handler, files);
 
         for (File f : files)
-            if (!f.delete())
+            if (!f.tryDelete())
                 Assert.fail("Failed to delete " + f);
 
         if (hash == reader.hash && cells == reader.cells)
@@ -278,7 +281,7 @@
         commitLog.segmentManager.awaitManagementTasksCompletion();
 
         long combinedSize = 0;
-        for (File f : new File(commitLog.segmentManager.storageDirectory).listFiles())
+        for (File f : new File(commitLog.segmentManager.storageDirectory).tryList())
             combinedSize += f.length();
         Assert.assertEquals(combinedSize, commitLog.getActiveOnDiskSize());
 
@@ -307,7 +310,7 @@
             t.start();
         }
 
-        final long start = System.currentTimeMillis();
+        final long start = currentTimeMillis();
         Runnable printRunnable = new Runnable()
         {
             long lastUpdate = 0;
@@ -325,11 +328,11 @@
                     temp += clt.counter.get();
                     sz += clt.dataSize;
                 }
-                double time = (System.currentTimeMillis() - start) / 1000.0;
+                double time = (currentTimeMillis() - start) / 1000.0;
                 double avg = (temp / time);
                 System.out.println(
                         String.format("second %d mem max %.0fmb allocated %.0fmb free %.0fmb mutations %d since start %d avg %.3f content %.1fmb ondisk %.1fmb transfer %.3fmb",
-                                      ((System.currentTimeMillis() - start) / 1000),
+                                      ((currentTimeMillis() - start) / 1000),
                                       mb(maxMemory),
                                       mb(allocatedMemory),
                                       mb(freeMemory),
diff --git a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
index fe8cdc2..a75ca52 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
@@ -40,6 +40,9 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class LongCompactionsTest
@@ -122,8 +125,8 @@
         // give garbage collection a bit of time to catch up
         Thread.sleep(1000);
 
-        long start = System.nanoTime();
-        final int gcBefore = (int) (System.currentTimeMillis() / 1000) - Schema.instance.getTableMetadata(KEYSPACE1, "Standard1").params.gcGraceSeconds;
+        long start = nanoTime();
+        final int gcBefore = (int) (currentTimeMillis() / 1000) - Schema.instance.getTableMetadata(KEYSPACE1, "Standard1").params.gcGraceSeconds;
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.COMPACTION))
         {
             assert txn != null : "Cannot markCompacting all sstables";
@@ -134,7 +137,7 @@
                                          sstableCount,
                                          partitionsPerSSTable,
                                          rowsPerPartition,
-                                         TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)));
+                                         TimeUnit.NANOSECONDS.toMillis(nanoTime() - start)));
     }
 
     @Test
@@ -165,7 +168,7 @@
 
                 inserted.add(key);
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
             CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
 
             assertEquals(inserted.toString(), inserted.size(), Util.getAll(Util.cmd(cfs).build()).size());
diff --git a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyCQLTest.java b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyCQLTest.java
index 9bfa380..1df7e7c 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyCQLTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyCQLTest.java
@@ -32,9 +32,12 @@
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.Hex;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class LongLeveledCompactionStrategyCQLTest extends CQLTester
 {
 
@@ -47,7 +50,7 @@
         ExecutorService es = Executors.newSingleThreadExecutor();
         DatabaseDescriptor.setConcurrentCompactors(8);
         AtomicBoolean stop = new AtomicBoolean(false);
-        long start = System.currentTimeMillis();
+        long start = currentTimeMillis();
         try
         {
             Random r = new Random();
@@ -70,12 +73,12 @@
                             throw new RuntimeException(throwable);
                         }
                     }
-                    getCurrentColumnFamilyStore().forceBlockingFlush();
+                    getCurrentColumnFamilyStore().forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
                     Uninterruptibles.sleepUninterruptibly(r.nextInt(200), TimeUnit.MILLISECONDS);
                 }
             });
 
-            while(System.currentTimeMillis() - start < TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES))
+            while(currentTimeMillis() - start < TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES))
             {
                 StorageService.instance.getTokenMetadata().invalidateCachedRings();
                 Uninterruptibles.sleepUninterruptibly(r.nextInt(1000), TimeUnit.MILLISECONDS);
diff --git a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
index 3de764a..733e46f 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
@@ -29,7 +29,6 @@
 import org.apache.cassandra.io.sstable.ISSTableScanner;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
@@ -40,7 +39,6 @@
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertFalse;
@@ -77,7 +75,7 @@
         CompactionStrategyManager mgr = store.getCompactionStrategyManager();
         LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) mgr.getStrategies().get(1).get(0);
 
-        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KiB value, make it easy to have multiple files
 
         populateSSTables(store);
 
@@ -140,7 +138,7 @@
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARDLVL2);
-        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KiB value, make it easy to have multiple files
 
         populateSSTables(store);
 
@@ -149,7 +147,7 @@
         CompactionStrategyManager mgr = store.getCompactionStrategyManager();
         LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) mgr.getStrategies().get(1).get(0);
 
-        value = ByteBuffer.wrap(new byte[10 * 1024]); // 10 KB value
+        value = ByteBuffer.wrap(new byte[10 * 1024]); // 10 KiB value
 
         // Adds 10 partitions
         for (int r = 0; r < 10; r++)
@@ -164,7 +162,7 @@
         }
 
         //Flush sstable
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         store.runWithCompactionsDisabled(new Callable<Void>()
         {
@@ -247,7 +245,7 @@
 
     private void populateSSTables(ColumnFamilyStore store)
     {
-        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KiB value, make it easy to have multiple files
 
         // Enough data to have a level 1 and 2
         int rows = 128;
@@ -263,7 +261,7 @@
 
             Mutation rm = new Mutation(builder.build());
             rm.apply();
-            store.forceBlockingFlush();
+            Util.flush(store);
         }
     }
 }
diff --git a/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java b/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java
index 5f9aa31..26d454d 100644
--- a/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java
+++ b/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java
@@ -31,6 +31,8 @@
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Base class for {@link Murmur3ReplicationAwareTokenAllocatorTest} and {@link RandomReplicationAwareTokenAllocatorTest},
  * we need to separate classes to avoid timeous in case flaky tests need to be repeated, see CASSANDRA-12784.
@@ -489,7 +491,7 @@
         if (size < targetClusterSize)
         {
             System.out.format("Adding %d unit(s) using %s...", targetClusterSize - size, t.toString());
-            long time = System.currentTimeMillis();
+            long time = currentTimeMillis();
             while (size < targetClusterSize)
             {
                 int tokens = tc.tokenCount(perUnitCount, rand);
@@ -500,7 +502,7 @@
                 if (verifyMetrics)
                     updateSummary(t, su, st, false);
             }
-            System.out.format(" Done in %.3fs\n", (System.currentTimeMillis() - time) / 1000.0);
+            System.out.format(" Done in %.3fs\n", (currentTimeMillis() - time) / 1000.0);
             if (verifyMetrics)
             {
                 updateSummary(t, su, st, true);
@@ -540,4 +542,4 @@
             System.out.format("Worst intermediate unit\t%s  token %s\n", su, st);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/long/org/apache/cassandra/dht/tokenallocator/NoReplicationTokenAllocatorTest.java b/test/long/org/apache/cassandra/dht/tokenallocator/NoReplicationTokenAllocatorTest.java
index ee38a28..5e13519 100644
--- a/test/long/org/apache/cassandra/dht/tokenallocator/NoReplicationTokenAllocatorTest.java
+++ b/test/long/org/apache/cassandra/dht/tokenallocator/NoReplicationTokenAllocatorTest.java
@@ -34,6 +34,8 @@
 import org.apache.cassandra.dht.RandomPartitioner;
 import org.apache.cassandra.dht.Token;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class NoReplicationTokenAllocatorTest extends TokenAllocatorTestBase
 {
 
@@ -149,7 +151,7 @@
         if (size < targetClusterSize)
         {
             System.out.format("Adding %d unit(s) using %s...", targetClusterSize - size, t.toString());
-            long time = System.currentTimeMillis();
+            long time = currentTimeMillis();
 
             while (size < targetClusterSize)
             {
@@ -160,7 +162,7 @@
                 if (verifyMetrics)
                     updateSummary(t, su, st, false);
             }
-            System.out.format(" Done in %.3fs\n", (System.currentTimeMillis() - time) / 1000.0);
+            System.out.format(" Done in %.3fs\n", (currentTimeMillis() - time) / 1000.0);
 
             if (verifyMetrics)
             {
diff --git a/test/long/org/apache/cassandra/dht/tokenallocator/RandomReplicationAwareTokenAllocatorTest.java b/test/long/org/apache/cassandra/dht/tokenallocator/RandomReplicationAwareTokenAllocatorTest.java
index 6a2d59e..bb1a2c8 100644
--- a/test/long/org/apache/cassandra/dht/tokenallocator/RandomReplicationAwareTokenAllocatorTest.java
+++ b/test/long/org/apache/cassandra/dht/tokenallocator/RandomReplicationAwareTokenAllocatorTest.java
@@ -20,7 +20,6 @@
 
 import org.junit.Test;
 
-import org.apache.cassandra.Util;
 import org.apache.cassandra.dht.RandomPartitioner;
 
 public class RandomReplicationAwareTokenAllocatorTest extends AbstractReplicationAwareTokenAllocatorTest
diff --git a/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java b/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java
index 24a9a78..1b78c48 100644
--- a/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java
+++ b/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
@@ -29,6 +28,7 @@
 
 import com.google.common.collect.Iterables;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
@@ -48,6 +48,8 @@
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class HintsWriteThenReadTest
 {
     private static final String KEYSPACE = "hints_write_then_read_test";
@@ -61,9 +63,9 @@
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), SchemaLoader.standardCFMD(KEYSPACE, TABLE));
 
-        HintsDescriptor descriptor = new HintsDescriptor(UUID.randomUUID(), System.currentTimeMillis());
+        HintsDescriptor descriptor = new HintsDescriptor(UUID.randomUUID(), currentTimeMillis());
 
-        File directory = Files.createTempDirectory(null).toFile();
+        File directory = new File(Files.createTempDirectory(null));
         try
         {
             testWriteReadCycle(directory, descriptor);
@@ -96,8 +98,8 @@
 
     private static void verifyChecksum(File directory, HintsDescriptor descriptor) throws IOException
     {
-        File hintsFile = new File(directory, descriptor.fileName());
-        File checksumFile = new File(directory, descriptor.checksumFileName());
+        File hintsFile = descriptor.file(directory);
+        File checksumFile = descriptor.checksumFile(directory);
 
         assertTrue(checksumFile.exists());
 
@@ -112,7 +114,7 @@
         long baseTimestamp = descriptor.timestamp;
         int index = 0;
 
-        try (HintsReader reader = HintsReader.open(new File(directory, descriptor.fileName())))
+        try (HintsReader reader = HintsReader.open(descriptor.file(directory)))
         {
             for (HintsReader.Page page : reader)
             {
diff --git a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
index b3cdaa1..9fbf24d 100644
--- a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
+++ b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
@@ -20,12 +20,15 @@
  */
 package org.apache.cassandra.io.compress;
 
-import java.io.FileInputStream;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.concurrent.ThreadLocalRandom;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CompressorPerformance
 {
 
@@ -71,7 +74,7 @@
         int checksum = 0;
         int count = 100;
 
-        long time = System.nanoTime();
+        long time = nanoTime();
         long uncompressedBytes = 0;
         long compressedBytes = 0;
         for (int i=0; i<count; ++i)
@@ -85,12 +88,12 @@
             checksum += output.get(ThreadLocalRandom.current().nextInt(output.position()));
             dataSource.rewind();
         }
-        long timec = System.nanoTime() - time;
+        long timec = nanoTime() - time;
         output.flip();
         input.put(output);
         input.flip();
 
-        time = System.nanoTime();
+        time = nanoTime();
         for (int i=0; i<count; ++i)
         {
             output.clear();
@@ -99,7 +102,7 @@
             checksum += output.get(ThreadLocalRandom.current().nextInt(output.position()));
             input.rewind();
         }
-        long timed = System.nanoTime() - time;
+        long timed = nanoTime() - time;
         System.out.format("Compressor %s %s->%s compress %.3f ns/b %.3f mb/s uncompress %.3f ns/b %.3f mb/s ratio %.2f:1.%s\n",
                           compressor.getClass().getSimpleName(),
                           in,
@@ -114,7 +117,7 @@
 
     public static void main(String[] args) throws IOException
     {
-        try (FileInputStream fis = new FileInputStream("CHANGES.txt"))
+        try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt"))
         {
             int len = (int)fis.getChannel().size();
             dataSource = ByteBuffer.allocateDirect(len);
diff --git a/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java b/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java
index a6f428a..f2bbfa6 100644
--- a/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java
+++ b/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java
@@ -18,13 +18,13 @@
 
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
 import com.google.common.io.Files;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
@@ -46,9 +46,9 @@
         String TABLE = "table1";
         int size = 30000;
 
-        File tempdir = Files.createTempDir();
-        File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
-        assert dataDir.mkdirs();
+        File tempdir = new File(Files.createTempDir());
+        File dataDir = new File(tempdir.absolutePath() + File.pathSeparator() + KS + File.pathSeparator() + TABLE);
+        assert dataDir.tryCreateDirectories();
 
         StringBuilder schemaColumns = new StringBuilder();
         StringBuilder queryColumns = new StringBuilder();
diff --git a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
index e37045a..19d7709 100644
--- a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
+++ b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
@@ -18,12 +18,12 @@
 
 package org.apache.cassandra.streaming;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -44,6 +44,7 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.OutputHandler;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class LongStreamingTest
@@ -58,8 +59,8 @@
         StorageService.instance.initServer();
 
         StorageService.instance.setCompactionThroughputMbPerSec(0);
-        StorageService.instance.setStreamThroughputMbPerSec(0);
-        StorageService.instance.setInterDCStreamThroughputMbPerSec(0);
+        StorageService.instance.setStreamThroughputMbitPerSec(0);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(0);
     }
 
     @Test
@@ -79,9 +80,9 @@
         String KS = useSstableCompression ? "sstable_compression_ks" : "stream_compression_ks";
         String TABLE = "table1";
 
-        File tempdir = Files.createTempDir();
-        File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
-        assert dataDir.mkdirs();
+        File tempdir = new File(Files.createTempDir());
+        File dataDir = new File(tempdir.absolutePath() + File.pathSeparator() + KS + File.pathSeparator() + TABLE);
+        assert dataDir.tryCreateDirectories();
 
         String schema = "CREATE TABLE " + KS + '.'  + TABLE + "  ("
                         + "  k int PRIMARY KEY,"
@@ -99,20 +100,20 @@
         Assert.assertEquals(useSstableCompression, compressionParams.isEnabled());
 
 
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         for (int i = 0; i < 10_000_000; i++)
             writer.addRow(i, "test1", 24);
 
         writer.close();
-        System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start)));
+        System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(nanoTime() - start)));
 
-        File[] dataFiles = dataDir.listFiles((dir, name) -> name.endsWith("-Data.db"));
-        long dataSize = 0l;
+        File[] dataFiles = dataDir.tryList((dir, name) -> name.endsWith("-Data.db"));
+        long dataSizeInBytes = 0l;
         for (File file : dataFiles)
         {
-            System.err.println("File : "+file.getAbsolutePath());
-            dataSize += file.length();
+            System.err.println("File : "+file.absolutePath());
+            dataSizeInBytes += file.length();
         }
 
         SSTableLoader loader = new SSTableLoader(dataDir, new SSTableLoader.Client()
@@ -132,13 +133,13 @@
             }
         }, new OutputHandler.SystemOutput(false, false));
 
-        start = System.nanoTime();
+        start = nanoTime();
         loader.stream().get();
 
-        long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
-        System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec",
+        long millis = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
+        System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f MiBsec",
                                          millis/1000d,
-                                         (dataSize / (1 << 20) / (millis / 1000d)) * 8));
+                                         (dataSizeInBytes / (1 << 20) / (millis / 1000d)) * 8));
 
 
         //Stream again
@@ -159,23 +160,23 @@
             }
         }, new OutputHandler.SystemOutput(false, false));
 
-        start = System.nanoTime();
+        start = nanoTime();
         loader.stream().get();
 
-        millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
-        System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec",
+        millis = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
+        System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f MiBsec",
                                          millis/1000d,
-                                         (dataSize / (1 << 20) / (millis / 1000d)) * 8));
+                                         (dataSizeInBytes / (1 << 20) / (millis / 1000d)) * 8));
 
 
         //Compact them both
-        start = System.nanoTime();
+        start = nanoTime();
         Keyspace.open(KS).getColumnFamilyStore(TABLE).forceMajorCompaction();
-        millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+        millis = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
 
-        System.err.println(String.format("Finished Compacting in %.2f seconds: %.2f Mb/sec",
+        System.err.println(String.format("Finished Compacting in %.2f seconds: %.2f MiBsec",
                                          millis / 1000d,
-                                         (dataSize * 2 / (1 << 20) / (millis / 1000d)) * 8));
+                                         (dataSizeInBytes * 2 / (1 << 20) / (millis / 1000d)) * 8));
 
         UntypedResultSet rs = QueryProcessor.executeInternal("SELECT * FROM " + KS + '.'  + TABLE + " limit 100;");
         assertEquals(100, rs.size());
diff --git a/test/memory/org/apache/cassandra/db/compaction/CompactionAllocationTest.java b/test/memory/org/apache/cassandra/db/compaction/CompactionAllocationTest.java
index 4398b3d..4d652b7 100644
--- a/test/memory/org/apache/cassandra/db/compaction/CompactionAllocationTest.java
+++ b/test/memory/org/apache/cassandra/db/compaction/CompactionAllocationTest.java
@@ -23,15 +23,22 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Ints;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.index.internal.CollatedViewIndexBuilder;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -51,8 +58,13 @@
 import org.apache.cassandra.db.ReadExecutionController;
 import org.apache.cassandra.db.ReadQuery;
 import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.lifecycle.SSTableSet;
+import org.apache.cassandra.db.lifecycle.View;
 import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 import org.apache.cassandra.db.partitions.UnfilteredPartitionIterators;
+import org.apache.cassandra.index.Index;
+import org.apache.cassandra.index.SecondaryIndexBuilder;
+import org.apache.cassandra.io.sstable.ReducingKeyIterator;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.UnbufferedDataOutputStreamPlus;
@@ -64,6 +76,7 @@
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.ObjectSizes;
+import org.apache.cassandra.utils.concurrent.Refs;
 
 public class CompactionAllocationTest
 {
@@ -186,6 +199,8 @@
                                       "wideOverlappingPartitions9",
                                       "widePartitionsOverlappingRows9",
                                       "widePartitionsOverlappingRows3"));
+        groups.add(Lists.newArrayList("widePartitionsSingleIndexedColumn",
+                                      "widePartitionsMultipleIndexedColumns"));
 
         Map<String, List<String>> fullRows = new HashMap<>();
         for (String workload : Iterables.concat(groups))
@@ -222,6 +237,38 @@
         ColumnFamilyStore getCfs();
         String name();
         List<Runnable> getReads();
+
+        default int executeReads()
+        {
+            List<Runnable> reads = getReads();
+            for (int i=0; i<reads.size(); i++)
+                reads.get(i).run();
+            return reads.size();
+        }
+
+        default void executeCompactions()
+        {
+            ColumnFamilyStore cfs = getCfs();
+            ActiveCompactions active = new ActiveCompactions();
+            Set<SSTableReader> sstables = cfs.getLiveSSTables();
+
+            CompactionTasks tasks = cfs.getCompactionStrategyManager()
+                                       .getUserDefinedTasks(sstables, FBUtilities.nowInSeconds());
+            
+            Assert.assertFalse(tasks.isEmpty());
+
+            for (AbstractCompactionTask task : tasks)
+                task.execute(active);
+
+            Assert.assertEquals(1, cfs.getLiveSSTables().size());
+        }
+
+        default int[] getSSTableStats()
+        {
+            int numPartitions = Ints.checkedCast(Iterables.getOnlyElement(getCfs().getLiveSSTables()).getSSTableMetadata().estimatedPartitionSize.count());
+            int numRows = Ints.checkedCast(Iterables.getOnlyElement(getCfs().getLiveSSTables()).getSSTableMetadata().totalRows);
+            return new int[] {numPartitions, numRows};
+        }
     }
 
     private static Measurement createMeasurement()
@@ -259,6 +306,7 @@
         {
             if (Thread.currentThread().getId() != threadID)
                 return;
+
             objectsAllocated++;
             bytesAllocated += bytes;
         }
@@ -369,8 +417,7 @@
                 logger.info(">>> Start profiling");
                 Thread.sleep(10000);
             }
-            for (int i=0; i<reads.size(); i++)
-                reads.get(i).run();
+            int readCount = workload.executeReads();
             Thread.sleep(1000);
             if (PROFILING_READS && !workload.name().equals("warmup"))
             {
@@ -378,20 +425,12 @@
                 Thread.sleep(10000);
             }
             readSampler.stop();
-
-            readSummary = String.format("%s bytes, %s /read, %s cpu", readSampler.bytes(), readSampler.bytes()/reads.size(), readSampler.cpu());
-            readSummaries.put(workload.name(), new ReadSummary(readSampler, reads.size()));
+            readSummary = String.format("%s bytes, %s /read, %s cpu", readSampler.bytes(), readSampler.bytes()/readCount, readSampler.cpu());
+            readSummaries.put(workload.name(), new ReadSummary(readSampler, readCount));
         }
 
-        ColumnFamilyStore cfs = workload.getCfs();
-        ActiveCompactions active = new ActiveCompactions();
-        Set<SSTableReader> sstables = cfs.getLiveSSTables();
-
-        CompactionTasks tasks = cfs.getCompactionStrategyManager()
-                                   .getUserDefinedTasks(sstables, FBUtilities.nowInSeconds());
-        Assert.assertFalse(tasks.isEmpty());
-
         String compactionSummary = "SKIPPED";
+        ColumnFamilyStore cfs = workload.getCfs();
         if (!PROFILING_READS)
         {
             compactionSampler.start();
@@ -400,8 +439,8 @@
                 logger.info(">>> Start profiling");
                 Thread.sleep(10000);
             }
-            for (AbstractCompactionTask task : tasks)
-                task.execute(active);
+
+            workload.executeCompactions();
             Thread.sleep(1000);
             if (PROFILING_COMPACTION && !workload.name().equals("warmup"))
             {
@@ -410,11 +449,11 @@
             }
             compactionSampler.stop();
 
-            Assert.assertEquals(1, cfs.getLiveSSTables().size());
-            int numPartitions = Ints.checkedCast(Iterables.getOnlyElement(cfs.getLiveSSTables()).getSSTableMetadata().estimatedPartitionSize.count());
-            int numRows = Ints.checkedCast(Iterables.getOnlyElement(cfs.getLiveSSTables()).getSSTableMetadata().totalRows);
+            int[] tableStats = workload.getSSTableStats();
+            int numPartitions = tableStats[0];
+            int numRows = tableStats[1];
 
-            compactionSummary = String.format("%s bytes, %s /partition, %s /row, %s cpu", compactionSampler.bytes(), compactionSampler.bytes()/numPartitions, compactionSampler.bytes()/numRows, compactionSampler.cpu());
+            compactionSummary = String.format("%s bytes, %s objects, %s /partition, %s /row, %s cpu", compactionSampler.bytes(), compactionSampler.objects(), compactionSampler.bytes()/numPartitions, compactionSampler.bytes()/numRows, compactionSampler.cpu());
             compactionSummaries.put(workload.name(), new CompactionSummary(compactionSampler, numPartitions, numRows));
         }
 
@@ -474,7 +513,7 @@
                 String read = String.format("SELECT * FROM %s.%s WHERE k = ?", ksname, "tbl");
                 SelectStatement select = (SelectStatement) QueryProcessor.parseStatement(read).prepare(ClientState.forInternalCalls());
                 QueryState queryState = QueryState.forInternalCalls();
-                for (int f=0; f<numSSTable; f++)
+                for (int f = 0; f < numSSTable; f++)
                 {
                     for (int p = 0; p < sstablePartitions; p++)
                     {
@@ -487,7 +526,7 @@
                             reads.add(() -> runQuery(query, cfs.metadata.get()));
                         }
                     }
-                    cfs.forceBlockingFlush();
+                    Util.flush(cfs);
                 }
 
                 Assert.assertEquals(numSSTable, cfs.getLiveSSTables().size());
@@ -583,12 +622,12 @@
                 String read = String.format("SELECT * FROM %s.%s WHERE k = ?", ksname, "tbl");
                 SelectStatement select = (SelectStatement) QueryProcessor.parseStatement(read).prepare(ClientState.forInternalCalls());
                 QueryState queryState = QueryState.forInternalCalls();
-                for (int f=0; f<numSSTable; f++)
+                for (int f = 0; f < numSSTable; f++)
                 {
                     for (int p = 0; p < sstablePartitions; p++)
                     {
                         String key = String.format("%08d", overlap ? p : (f * sstablePartitions) + p);
-                        for (int r=0; r<rowsPerPartition; r++)
+                        for (int r = 0; r < rowsPerPartition; r++)
                         {
                             QueryProcessor.executeInternal(insert, key, makeRandomString(6, overlapCK ? r : -1),
                                                            makeRandomString(8), makeRandomString(8),
@@ -602,7 +641,7 @@
                             reads.add(() -> runQuery(query, cfs.metadata.get()));
                         }
                     }
-                    cfs.forceBlockingFlush();
+                    Util.flush(cfs);
                 }
 
                 Assert.assertEquals(numSSTable, cfs.getLiveSSTables().size());
@@ -684,12 +723,12 @@
                 String read = String.format("SELECT * FROM %s.%s WHERE k = ?", ksname, "tbl");
                 SelectStatement select = (SelectStatement) QueryProcessor.parseStatement(read).prepare(ClientState.forInternalCalls());
                 QueryState queryState = QueryState.forInternalCalls();
-                for (int f=0; f<numSSTable; f++)
+                for (int f = 0; f < numSSTable; f++)
                 {
                     for (int p = 0; p < sstablePartitions; p++)
                     {
                         String key = String.format("%08d", overlap ? p : (f * sstablePartitions) + p);
-                        for (int r=0; r<rowsPerPartition; r++)
+                        for (int r = 0; r < rowsPerPartition; r++)
                         {
                             QueryProcessor.executeInternal(insert , key, makeRandomString(6, overlapCK ? r : -1),
                                                            makeRandomString(rowWidth>>2), makeRandomString(rowWidth>>2),
@@ -702,7 +741,7 @@
                             reads.add(() -> runQuery(query, cfs.metadata.get()));
                         }
                     }
-                    cfs.forceBlockingFlush();
+                    Util.flush(cfs);
                 }
 
                 Assert.assertEquals(numSSTable, cfs.getLiveSSTables().size());
@@ -760,4 +799,135 @@
     {
         testWidePartitions("widePartitionsOverlappingRows3", 3, maybeInflate(24), true, true);
     }
+
+
+    private static void testIndexingWidePartitions(String name,
+                                                   int numSSTable,
+                                                   int sstablePartitions,
+                                                   IndexDef...indexes) throws Throwable
+    {
+        String ksname = "ks_" + name.toLowerCase();
+        SchemaLoader.createKeyspace(ksname, KeyspaceParams.simple(1),
+                CreateTableStatement.parse("CREATE TABLE tbl (k text, c text, v1 text, v2 text, v3 text, v4 text, PRIMARY KEY (k, c))", ksname).build());
+
+        ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(Schema.instance.getTableMetadata(ksname, "tbl").id);
+        Assert.assertNotNull(cfs);
+        cfs.disableAutoCompaction();
+        int rowWidth = 100;
+        int rowsPerPartition = 1000;
+
+        measure(new Workload()
+        {
+            @SuppressWarnings("UnstableApiUsage")
+            public void setup()
+            {
+                cfs.disableAutoCompaction();
+                String insert = String.format("INSERT INTO %s.%s (k, c, v1, v2, v3, v4) VALUES (?, ?, ?, ?, ?, ?)", ksname, "tbl");
+                for (int f = 0; f < numSSTable; f++)
+                {
+                    for (int p = 0; p < sstablePartitions; p++)
+                    {
+                        String key = String.format("%08d", (f * sstablePartitions) + p);
+                        for (int r = 0; r < rowsPerPartition; r++)
+                        {
+                            QueryProcessor.executeInternal(insert , key, makeRandomString(6, -1),
+                                                           makeRandomString(rowWidth>>2), makeRandomString(rowWidth>>2),
+                                                           makeRandomString(rowWidth>>2), makeRandomString(rowWidth>>2));
+                        }
+                    }
+                    Util.flush(cfs);
+                }
+
+                for (IndexDef index : indexes)
+                {
+                    QueryProcessor.executeInternal(String.format(index.cql, index.name, ksname, "tbl"));
+                    while (!cfs.indexManager.getBuiltIndexNames().contains(index.name))
+                        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+                }
+
+                Assert.assertEquals(numSSTable, cfs.getLiveSSTables().size());
+            }
+
+            public ColumnFamilyStore getCfs()
+            {
+                return cfs;
+            }
+
+            public List<Runnable> getReads()
+            {
+                return new ArrayList<>();
+            }
+
+            public String name()
+            {
+                return name;
+            }
+
+            public int executeReads()
+            {
+                // return 1 to avoid divide by zero error
+                return 1;
+            }
+
+            public void executeCompactions()
+            {
+                logger.info("Starting index re-build");
+                try (ColumnFamilyStore.RefViewFragment viewFragment = cfs.selectAndReference(View.selectFunction(SSTableSet.CANONICAL));
+                     Refs<SSTableReader> sstables = viewFragment.refs)
+                {
+
+                    Set<Index> indexes = new HashSet<>(cfs.indexManager.listIndexes());
+                    SecondaryIndexBuilder builder = new CollatedViewIndexBuilder(cfs,
+                                                                                 indexes,
+                                                                                 new ReducingKeyIterator(sstables),
+                                                                                 ImmutableSet.copyOf(sstables));
+                    builder.build();
+                }
+                logger.info("Index re-build complete");
+            }
+
+            public int[] getSSTableStats()
+            {
+                int numPartitions = cfs.getLiveSSTables()
+                                       .stream()
+                                       .mapToInt(sstable -> Ints.checkedCast(sstable.getSSTableMetadata().estimatedPartitionSize.count()))
+                                       .sum();
+                int numRows = cfs.getLiveSSTables()
+                                 .stream()
+                                 .mapToInt(sstable -> Ints.checkedCast(sstable.getSSTableMetadata().totalRows))
+                                 .sum();
+
+                return new int[] {numPartitions, numRows};
+            }
+        });
+    }
+
+    @Test
+    public void widePartitionsSingleIndexedColumn() throws Throwable
+    {
+        testIndexingWidePartitions("widePartitionsSingleIndexedColumn", 3, maybeInflate(24),
+                new IndexDef("wide_partition_index_0", "CREATE INDEX %s on %s.%s(v1)"));
+    }
+
+    @Test
+    public void widePartitionsMultipleIndexedColumns() throws Throwable
+    {
+        testIndexingWidePartitions("widePartitionsMultipleIndexedColumns", 3, maybeInflate(24),
+                                   new IndexDef("wide_partition_index_0", "CREATE INDEX %s on %s.%s(v1)"),
+                                   new IndexDef("wide_partition_index_1", "CREATE INDEX %s on %s.%s(v2)"),
+                                   new IndexDef("wide_partition_index_2", "CREATE INDEX %s on %s.%s(v3)"),
+                                   new IndexDef("wide_partition_index_3", "CREATE INDEX %s on %s.%s(v4)"));
+    }
+
+    static class IndexDef
+    {
+        final String name;
+        final String cql;
+
+        IndexDef(String name, String cql)
+        {
+            this.name = name;
+            this.cql = cql;
+        }
+    }
 }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/BTreeSearchIteratorBench.java b/test/microbench/org/apache/cassandra/test/microbench/BTreeSearchIteratorBench.java
index 400b297..d7cf8dc 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/BTreeSearchIteratorBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/BTreeSearchIteratorBench.java
@@ -36,7 +36,6 @@
 import org.openjdk.jmh.annotations.Level;
 import org.openjdk.jmh.annotations.Measurement;
 import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OperationsPerInvocation;
 import org.openjdk.jmh.annotations.OutputTimeUnit;
 import org.openjdk.jmh.annotations.Param;
 import org.openjdk.jmh.annotations.Scope;
diff --git a/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java b/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java
index b79f154..9678b8d 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java
@@ -40,8 +40,9 @@
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.utils.FBUtilities;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
@@ -63,6 +64,7 @@
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 
 @BenchmarkMode(Mode.Throughput)
@@ -87,7 +89,7 @@
     String table = "tbl";
 
     int nowInSec = FBUtilities.nowInSeconds();
-    long queryStartTime = System.nanoTime();
+    long queryStartTime = nanoTime();
     BatchStatement bs;
     BatchQueryOptions bqo;
 
@@ -100,11 +102,11 @@
     @Setup
     public void setup() throws Throwable
     {
-        Schema.instance.load(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)));
+        SchemaTestUtil.addOrUpdateKeyspace(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)), false);
         KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspace);
         TableMetadata metadata = CreateTableStatement.parse(String.format("CREATE TABLE %s (id int, ck int, v int, primary key (id, ck))", table), keyspace).build();
 
-        Schema.instance.load(ksm.withSwapped(ksm.tables.with(metadata)));
+        SchemaTestUtil.addOrUpdateKeyspace(ksm.withSwapped(ksm.tables.with(metadata)), false);
 
         List<ModificationStatement> modifications = new ArrayList<>(batchSize);
         List<List<ByteBuffer>> parameters = new ArrayList<>(batchSize);
@@ -124,7 +126,7 @@
     @Benchmark
     public void bench()
     {
-        bs.getMutations(bqo, false, nowInSec, nowInSec, queryStartTime);
+        bs.getMutations(ClientState.forInternalCalls(), bqo, false, nowInSec, nowInSec, queryStartTime);
     }
 
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java b/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java
index 9222811..32e048d 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java
@@ -18,18 +18,16 @@
 
 package org.apache.cassandra.test.microbench;
 
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
+import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.cassandra.db.BufferDecoratedKey;
 import org.apache.cassandra.dht.Murmur3Partitioner;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.BloomFilter;
 import org.apache.cassandra.utils.BloomFilterSerializer;
@@ -77,7 +75,7 @@
         {
             BloomFilter filter = (BloomFilter) FilterFactory.getFilter(numElemsInK * 1024, 0.01d);
             filter.add(wrap(testVal));
-            DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(file));
+            DataOutputStreamPlus out = new FileOutputStreamPlus(file);
             if (oldBfFormat)
                 SerializationsTest.serializeOldBfFormat(filter, out);
             else
@@ -85,14 +83,14 @@
             out.close();
             filter.close();
 
-            DataInputStream in = new DataInputStream(new FileInputStream(file));
+            FileInputStreamPlus in = new FileInputStreamPlus(file);
             BloomFilter filter2 = BloomFilterSerializer.deserialize(in, oldBfFormat);
             FileUtils.closeQuietly(in);
             filter2.close();
         }
         finally
         {
-            file.delete();
+            file.tryDelete();
         }
     }
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/CacheLoaderBench.java b/test/microbench/org/apache/cassandra/test/microbench/CacheLoaderBench.java
index d91db6e..de788b7 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/CacheLoaderBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/CacheLoaderBench.java
@@ -97,7 +97,7 @@
                 RowUpdateBuilder rowBuilder = new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis() + random.nextInt(), "key");
                 rowBuilder.add(colDef, "val1");
                 rowBuilder.build().apply();
-                cfs.forceBlockingFlush();
+                cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
             }
 
             Assert.assertEquals(numSSTables, cfs.getLiveSSTables().size());
diff --git a/test/microbench/org/apache/cassandra/test/microbench/CachingBenchTest.java b/test/microbench/org/apache/cassandra/test/microbench/CachingBenchTest.java
index 2859f58..c589ca5 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/CachingBenchTest.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/CachingBenchTest.java
@@ -51,6 +51,8 @@
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.assertj.core.api.Assertions.assertThat;
 
 public class CachingBenchTest extends CQLTester
@@ -186,9 +188,9 @@
             if (ii % (FLUSH_FREQ * 10) == 0)
             {
                 System.out.println("C");
-                long startTime = System.nanoTime();
+                long startTime = nanoTime();
                 getCurrentColumnFamilyStore().enableAutoCompaction(!CONCURRENT_COMPACTIONS);
-                long endTime = System.nanoTime();
+                long endTime = nanoTime();
                 compactionTimeNanos += endTime - startTime;
                 getCurrentColumnFamilyStore().disableAutoCompaction();
             }
@@ -206,7 +208,7 @@
         ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
         cfs.disableAutoCompaction();
 
-        long onStartTime = System.currentTimeMillis();
+        long onStartTime = currentTimeMillis();
         ExecutorService es = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
         List<Future<?>> tasks = new ArrayList<>();
         for (int ti = 0; ti < 1; ++ti)
@@ -230,7 +232,7 @@
             task.get();
 
         flush();
-        long onEndTime = System.currentTimeMillis();
+        long onEndTime = currentTimeMillis();
         int startRowCount = countRows(cfs);
         int startTombCount = countTombstoneMarkers(cfs);
         int startRowDeletions = countRowDeletions(cfs);
@@ -260,9 +262,9 @@
             System.out.println();
 
         String hashesBefore = getHashes();
-        long startTime = System.currentTimeMillis();
+        long startTime = currentTimeMillis();
         CompactionManager.instance.performMaximal(cfs, true);
-        long endTime = System.currentTimeMillis();
+        long endTime = currentTimeMillis();
 
         int endRowCount = countRows(cfs);
         int endTombCount = countTombstoneMarkers(cfs);
@@ -283,9 +285,9 @@
 
     private String getHashes() throws Throwable
     {
-        long startTime = System.currentTimeMillis();
+        long startTime = currentTimeMillis();
         String hashes = Arrays.toString(getRows(execute(hashQuery))[0]);
-        long endTime = System.currentTimeMillis();
+        long endTime = currentTimeMillis();
         System.out.println(String.format("Hashes: %s, retrieved in %.3fs", hashes, (endTime - startTime) * 1e-3));
         return hashes;
     }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java b/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java
index 41220a2..8d7e800 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.test.microbench;
 
 
-import java.io.File;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.*;
@@ -29,6 +28,7 @@
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.openjdk.jmh.annotations.*;
 
@@ -70,13 +70,13 @@
             execute(writeStatement, i, i, i );
 
 
-        cfs.forceBlockingFlush();
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
 
         System.err.println("Writing 50k again...");
         for (long i = 0; i < 50000; i++)
             execute(writeStatement, i, i, i );
 
-        cfs.forceBlockingFlush();
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
 
         cfs.snapshot("originals");
 
@@ -108,7 +108,7 @@
 
         for (File file : directories)
         {
-            for (File f : file.listFiles())
+            for (File f : file.tryList())
             {
                 if (f.isDirectory())
                     continue;
@@ -119,7 +119,7 @@
 
 
         for (File file : snapshotFiles)
-            FileUtils.createHardLink(file, new File(file.toPath().getParent().getParent().getParent().toFile(), file.getName()));
+            FileUtils.createHardLink(file, new File(new File(file.toPath().getParent().getParent().getParent()), file.name()));
 
         cfs.loadNewSSTables();
     }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java b/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java
index 52dcd2d..ad72f3d 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java
@@ -18,14 +18,13 @@
 
 package org.apache.cassandra.test.microbench;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.nio.file.Files;
-import java.util.Arrays;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.DirectorySizeCalculator;
 import org.openjdk.jmh.annotations.*;
@@ -46,7 +45,7 @@
     @Setup(Level.Trial)
     public void setUp() throws IOException
     {
-        tempDir = Files.createTempDirectory(randString()).toFile();
+        tempDir = new File(Files.createTempDirectory(randString()));
 
         // Since #'s on laptops and commodity desktops are so useful in considering enterprise virtualized server environments...
 
@@ -65,14 +64,9 @@
         // [java]   Statistics: (min, avg, max) = (73.687, 74.714, 76.872), stdev = 0.835
         // [java]   Confidence interval (99.9%): [74.156, 75.272]
 
-        // Throttle CPU on the Windows box to .87GHZ from 4.3GHZ turbo single-core, and #'s for 25600:
-        // [java] Result: 298.628 ▒(99.9%) 14.755 ms/op [Average]
-        // [java]   Statistics: (min, avg, max) = (291.245, 298.628, 412.881), stdev = 22.085
-        // [java]   Confidence interval (99.9%): [283.873, 313.383]
-
         // Test w/25,600 files, 100x the load of a full default CommitLog (8192) divided by size (32 per)
         populateRandomFiles(tempDir, 25600);
-        sizer = new DirectorySizeCalculator(tempDir);
+        sizer = new DirectorySizeCalculator();
     }
 
     @TearDown
@@ -85,7 +79,7 @@
     {
         for (int i = 0; i < count; i++)
         {
-            PrintWriter pw = new PrintWriter(dir + File.separator + randString(), "UTF-8");
+            PrintWriter pw = new PrintWriter(dir + File.pathSeparator() + randString(), "UTF-8");
             pw.write(randString());
             pw.close();
         }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/FastThreadExecutor.java b/test/microbench/org/apache/cassandra/test/microbench/FastThreadExecutor.java
index 5644e4f..03ea710 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/FastThreadExecutor.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/FastThreadExecutor.java
@@ -18,12 +18,13 @@
 
 package org.apache.cassandra.test.microbench;
 
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
 
 import io.netty.util.concurrent.DefaultThreadFactory;
 
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
+
 /**
  * Created to test perf of FastThreadLocal
  *
@@ -34,6 +35,6 @@
 {
     public FastThreadExecutor(int size, String name)
     {
-        super(size, size, 10, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new DefaultThreadFactory(name, true));
+        super(size, size, 10, SECONDS, newBlockingQueue(), new DefaultThreadFactory(name, true));
     }
 }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/GcCompactionBenchTest.java b/test/microbench/org/apache/cassandra/test/microbench/GcCompactionBenchTest.java
index a1a6146..6af9811 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/GcCompactionBenchTest.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/GcCompactionBenchTest.java
@@ -49,6 +49,9 @@
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class GcCompactionBenchTest extends CQLTester
 {
     private static final String SIZE_TIERED_STRATEGY = "SizeTieredCompactionStrategy', 'min_sstable_size' : '0";
@@ -186,9 +189,9 @@
             if (ii % (FLUSH_FREQ * 10) == 0)
             {
                 System.out.println("C");
-                long startTime = System.nanoTime();
+                long startTime = nanoTime();
                 getCurrentColumnFamilyStore().enableAutoCompaction(true);
-                long endTime = System.nanoTime();
+                long endTime = nanoTime();
                 compactionTimeNanos += endTime - startTime;
                 getCurrentColumnFamilyStore().disableAutoCompaction();
             }
@@ -203,7 +206,7 @@
         ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
         cfs.disableAutoCompaction();
 
-        long onStartTime = System.currentTimeMillis();
+        long onStartTime = currentTimeMillis();
         ExecutorService es = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
         List<Future<?>> tasks = new ArrayList<>();
         for (int ti = 0; ti < 1; ++ti)
@@ -227,7 +230,7 @@
             task.get();
 
         flush();
-        long onEndTime = System.currentTimeMillis();
+        long onEndTime = currentTimeMillis();
         int startRowCount = countRows(cfs);
         int startTombCount = countTombstoneMarkers(cfs);
         int startRowDeletions = countRowDeletions(cfs);
@@ -238,9 +241,9 @@
 
         String hashesBefore = getHashes();
 
-        long startTime = System.currentTimeMillis();
+        long startTime = currentTimeMillis();
         CompactionManager.instance.performGarbageCollection(cfs, tombstoneOption, 0);
-        long endTime = System.currentTimeMillis();
+        long endTime = currentTimeMillis();
 
         int endRowCount = countRows(cfs);
         int endTombCount = countTombstoneMarkers(cfs);
@@ -267,9 +270,9 @@
 
     private String getHashes() throws Throwable
     {
-        long startTime = System.currentTimeMillis();
+        long startTime = currentTimeMillis();
         String hashes = Arrays.toString(getRows(execute(hashQuery))[0]);
-        long endTime = System.currentTimeMillis();
+        long endTime = currentTimeMillis();
         System.out.println(String.format("Hashes: %s, retrieved in %.3fs", hashes, (endTime - startTime) * 1e-3));
         return hashes;
     }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/LatencyTrackingBench.java b/test/microbench/org/apache/cassandra/test/microbench/LatencyTrackingBench.java
index eaa74de..8db3f34 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/LatencyTrackingBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/LatencyTrackingBench.java
@@ -20,18 +20,12 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
-import com.codahale.metrics.Histogram;
-import com.codahale.metrics.Timer;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry;
-import org.apache.cassandra.metrics.ClearableHistogram;
 import org.apache.cassandra.metrics.DecayingEstimatedHistogramReservoir;
 import org.apache.cassandra.metrics.LatencyMetrics;
-import org.apache.cassandra.metrics.LatencyMetricsTest;
 import org.apache.cassandra.metrics.MetricNameFactory;
-import org.apache.cassandra.metrics.TableMetrics;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.CompilerControl;
 import org.openjdk.jmh.annotations.Fork;
 import org.openjdk.jmh.annotations.Level;
 import org.openjdk.jmh.annotations.Measurement;
@@ -41,7 +35,6 @@
 import org.openjdk.jmh.annotations.Scope;
 import org.openjdk.jmh.annotations.Setup;
 import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
 import org.openjdk.jmh.annotations.Threads;
 import org.openjdk.jmh.annotations.Warmup;
 import org.openjdk.jmh.infra.Blackhole;
diff --git a/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java b/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java
index a3446aa..9070473 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.EnumMap;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.net.InetAddresses;
@@ -36,7 +35,7 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.NoPayload;
 import org.apache.cassandra.net.ParamType;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
 import org.openjdk.jmh.annotations.Fork;
@@ -50,6 +49,8 @@
 import org.openjdk.jmh.annotations.Warmup;
 
 import static org.apache.cassandra.net.Verb.ECHO_REQ;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 @State(Scope.Thread)
 @Warmup(iterations = 4, time = 1, timeUnit = TimeUnit.SECONDS)
@@ -71,12 +72,12 @@
     {
         DatabaseDescriptor.daemonInitialization();
 
-        UUID uuid = UUIDGen.getTimeUUID();
+        TimeUUID timeUuid = nextTimeUUID();
         Map<ParamType, Object> parameters = new EnumMap<>(ParamType.class);
 
         if (withParams)
         {
-            parameters.put(ParamType.TRACE_SESSION, uuid);
+            parameters.put(ParamType.TRACE_SESSION, timeUuid);
         }
 
         addr = InetAddressAndPort.getByAddress(InetAddresses.forString("127.0.73.101"));
@@ -96,7 +97,7 @@
     {
         try (DataOutputBuffer out = new DataOutputBuffer())
         {
-            Message.serializer.serialize(Message.builder(msgOut).withCreatedAt(System.nanoTime()).withId(42).build(),
+            Message.serializer.serialize(Message.builder(msgOut).withCreatedAt(nanoTime()).withId(42).build(),
                                          out, messagingVersion);
             DataInputBuffer in = new DataInputBuffer(out.buffer(), false);
             Message.serializer.deserialize(in, addr, messagingVersion);
diff --git a/test/microbench/org/apache/cassandra/test/microbench/MutationBench.java b/test/microbench/org/apache/cassandra/test/microbench/MutationBench.java
index 41d6aab..d258026 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/MutationBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/MutationBench.java
@@ -29,6 +29,7 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.io.util.DataInputBuffer;
@@ -87,7 +88,7 @@
     @Setup
     public void setup() throws IOException
     {
-        Schema.instance.load(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)));
+        SchemaTestUtil.addOrUpdateKeyspace(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)), false);
         KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspace);
         TableMetadata metadata =
             CreateTableStatement.parse("CREATE TABLE userpics " +
@@ -97,7 +98,7 @@
                                        "PRIMARY KEY(userid, picid))", keyspace)
                                 .build();
 
-        Schema.instance.load(ksm.withSwapped(ksm.tables.with(metadata)));
+        SchemaTestUtil.addOrUpdateKeyspace(ksm.withSwapped(ksm.tables.with(metadata)), false);
 
         mutation = (Mutation)UpdateBuilder.create(metadata, 1L).newRow(1L).add("commentid", 32L).makeMutation();
         buffer = ByteBuffer.allocate(mutation.serializedSize(MessagingService.current_version));
diff --git a/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java b/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java
index cd15646..890f74c 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java
@@ -288,4 +288,4 @@
     public void testRLargeLegacyWriteUTF() throws IOException {
         BufferedDataOutputStreamTest.writeUTFLegacy(large, hole);
     }
-}
\ No newline at end of file
+}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/PendingRangesBench.java b/test/microbench/org/apache/cassandra/test/microbench/PendingRangesBench.java
index b4b126f..e3644c7 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/PendingRangesBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/PendingRangesBench.java
@@ -29,7 +29,6 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.PendingRangeMaps;
 import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.locator.ReplicaUtils;
 import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.infra.Blackhole;
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java b/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java
old mode 100755
new mode 100644
index f3df8c1..0dd51f2
--- a/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java
@@ -28,6 +28,8 @@
 import org.openjdk.jmh.runner.*;
 import org.openjdk.jmh.runner.options.*;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 @BenchmarkMode(Mode.AverageTime)
 @OutputTimeUnit(TimeUnit.MILLISECONDS)
 @Warmup(iterations = 3, time = 1, timeUnit = TimeUnit.SECONDS)
@@ -45,7 +47,7 @@
 
     static
     {
-        final int now = (int) (System.currentTimeMillis() / 1000L);
+        final int now = (int) (currentTimeMillis() / 1000L);
         Random random = new Random();
         for(int i = 0 ; i < 10000000; i++)
         {
diff --git a/test/microbench/org/apache/cassandra/test/microbench/TimedMonitorBench.java b/test/microbench/org/apache/cassandra/test/microbench/TimedMonitorBench.java
new file mode 100644
index 0000000..2420b71
--- /dev/null
+++ b/test/microbench/org/apache/cassandra/test/microbench/TimedMonitorBench.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.test.microbench;
+
+import org.openjdk.jmh.annotations.*;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+@BenchmarkMode(Mode.SampleTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@Warmup(iterations = 1, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 2, time = 10, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2)
+@Threads(4)
+@State(Scope.Benchmark)
+public class TimedMonitorBench
+{
+    @Param({"A", "B"})
+    private String type;
+
+    private Lock lock;
+
+    @State(Scope.Thread)
+    public static class ThreadState
+    {
+        Lock lock;
+
+        @Setup(Level.Iteration)
+        public void setup(TimedMonitorBench benchState) throws Throwable
+        {
+            if (benchState.type.equals("A")) lock = new A();
+            else if (benchState.type.equals("B")) lock = new B();
+            else throw new IllegalStateException();
+        }
+    }
+
+    @Setup(Level.Trial)
+    public void setup() throws Throwable
+    {
+        if (type.equals("A")) lock = new A();
+        else if (type.equals("B")) lock = new B();
+        else throw new IllegalStateException();
+    }
+
+    interface Lock
+    {
+        boolean lock(long deadline);
+        void maybeUnlock();
+    }
+
+    static class A implements Lock
+    {
+        private volatile Thread lockedBy;
+        private volatile int waiting;
+
+        private static final AtomicReferenceFieldUpdater<A, Thread> lockedByUpdater = AtomicReferenceFieldUpdater.newUpdater(A.class, Thread.class, "lockedBy");
+
+        public boolean lock(long deadline)
+        {
+            try
+            {
+                Thread thread = Thread.currentThread();
+                if (lockedByUpdater.compareAndSet(this, null, thread))
+                    return true;
+
+                synchronized (this)
+                {
+                    waiting++;
+
+                    try
+                    {
+                        while (true)
+                        {
+                            if (lockedByUpdater.compareAndSet(this, null, thread))
+                                return true;
+
+                            while (lockedBy != null)
+                            {
+                                long now = System.nanoTime();
+                                if (now >= deadline)
+                                    return false;
+
+                                wait(1 + ((deadline - now) - 1) / 1000000);
+                            }
+                        }
+                    }
+                    finally
+                    {
+                        waiting--;
+                    }
+                }
+            }
+            catch (InterruptedException e)
+            {
+                Thread.currentThread().interrupt();
+                return false;
+            }
+        }
+
+        public void maybeUnlock()
+        {
+            // no visibility requirements, as if we hold the lock it was last updated by us
+            if (lockedBy == null)
+                return;
+
+            Thread thread = Thread.currentThread();
+
+            if (lockedBy == thread)
+            {
+                lockedBy = null;
+                if (waiting > 0)
+                {
+                    synchronized (this)
+                    {
+                        notify();
+                    }
+                }
+            }
+        }
+    }
+
+    static class B implements Lock
+    {
+        private Thread lockedBy;
+
+        public synchronized boolean lock(long deadline)
+        {
+            try
+            {
+                Thread thread = Thread.currentThread();
+                while (lockedBy != null)
+                {
+                    long now = System.nanoTime();
+                    if (now >= deadline)
+                        return false;
+
+                    wait(1 + ((deadline - now) - 1) / 1000000);
+                }
+                lockedBy = thread;
+                return true;
+            }
+            catch (InterruptedException e)
+            {
+                Thread.currentThread().interrupt();
+                return false;
+            }
+        }
+
+        public void maybeUnlock()
+        {
+            // no visibility requirements, as if we hold the lock it was last updated by us
+            if (lockedBy == null)
+                return;
+
+            Thread thread = Thread.currentThread();
+
+            if (lockedBy == thread)
+            {
+                synchronized (this)
+                {
+                    lockedBy = null;
+                    notify();
+                }
+            }
+        }
+    }
+
+    @Benchmark
+    public void unshared(ThreadState state)
+    {
+        state.lock.lock(Long.MAX_VALUE);
+        state.lock.maybeUnlock();
+    }
+
+    @Benchmark
+    public void shared()
+    {
+        lock.lock(Long.MAX_VALUE);
+        lock.maybeUnlock();
+    }
+}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/VIntCodingBench.java b/test/microbench/org/apache/cassandra/test/microbench/VIntCodingBench.java
new file mode 100644
index 0000000..9c82236
--- /dev/null
+++ b/test/microbench/org/apache/cassandra/test/microbench/VIntCodingBench.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.test.microbench;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.WritableByteChannel;
+import java.util.PrimitiveIterator;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.utils.vint.VIntCoding;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 3, jvmArgsAppend = "-Xmx512M")
+@Threads(1)
+@State(Scope.Benchmark)
+public class VIntCodingBench
+{
+    long oneByte = 53;
+    long twoBytes = 10201;
+    long threeBytes = 1097151L;
+    long fourBytes = 168435455L;
+    long fiveBytes = 33251130335L;
+    long sixBytes = 3281283447775L;
+    long sevenBytes = 417672546086779L;
+    long eightBytes = 52057592037927932L;
+    long nineBytes = 72057594037927937L;
+
+    final static String MONOMORPHIC = "monomorphic";
+    final static String BIMORPHIC = "bimorphic";
+    final static String MEGAMORPHIC = "megamorphic";
+
+    @Param({ MONOMORPHIC, BIMORPHIC, MEGAMORPHIC})
+    String allocation;
+
+    final Random random = new Random(100);
+    final PrimitiveIterator.OfLong longs = random.longs().iterator();
+    final static int BUFFER_SIZE = 8192;
+
+    ByteBuffer onheap = ByteBuffer.allocate(BUFFER_SIZE);
+    ByteBuffer direct = ByteBuffer.allocateDirect(BUFFER_SIZE);
+    File mmapedFile = new File(VIntCodingBench.class + "_mmap");
+    ByteBuffer mmaped = allocateMmapedByteBuffer(mmapedFile, BUFFER_SIZE);
+
+    @TearDown
+    public void tearDown()
+    {
+        mmapedFile.delete();
+    }
+
+    private static ByteBuffer allocateMmapedByteBuffer(File mmapFile, int bufferSize)
+    {
+        try(RandomAccessFile file = new RandomAccessFile(mmapFile, "rw");
+            FileChannel ch = file.getChannel())
+        {
+            return ch.map(FileChannel.MapMode.READ_WRITE, 0, bufferSize);
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private ByteBuffer getByteBuffer(String allocation)
+    {
+        ByteBuffer buffer;
+        if (allocation.equals(MONOMORPHIC))
+        {
+            buffer = onheap;
+        }
+        else if (allocation.equals(BIMORPHIC))
+        {
+            buffer = random.nextBoolean() ? onheap : direct;
+        }
+        else
+        {
+            switch(random.nextInt(3))
+            {
+                case 0:
+                    buffer = onheap;
+                    break;
+                case 1:
+                    buffer = direct;
+                    break;
+                default:
+                    buffer = mmaped;
+                    break;
+            }
+        }
+        return buffer;
+    }
+
+    private DataOutputPlus getBufferedDataOutput(Blackhole bh, ByteBuffer buffer)
+    {
+        WritableByteChannel wbc = new WritableByteChannel() {
+
+            @Override
+            public boolean isOpen()
+            {
+                return true;
+            }
+
+            @Override
+            public void close() throws IOException
+            {
+            }
+
+            @Override
+            public int write(ByteBuffer src) throws IOException
+            {
+                bh.consume(src);
+                int remaining = src.remaining();
+                src.position(src.limit());
+                return remaining;
+            }
+        };
+        return new BufferedDataOutputStreamPlus(wbc, buffer);
+    }
+
+    @Benchmark
+    public void testWrite1ByteBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(oneByte, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite1ByteDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(oneByte, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite2BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(twoBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite2BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(twoBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite3BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(threeBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite3BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(threeBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite4BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(fourBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite4BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(fourBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite5BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(fiveBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite5BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(fiveBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite6BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(sixBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite6BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(sixBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite7BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(sevenBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite7BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(sevenBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite8BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(eightBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite8BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(eightBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite9BytesBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(nineBytes, buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWrite9BytesDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(nineBytes, out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWriteRandomLongBB(final Blackhole bh)
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        VIntCoding.writeUnsignedVInt(longs.nextLong(), buffer);
+        bh.consume(buffer);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testWriteRandomLongDOP(final Blackhole bh) throws IOException
+    {
+        ByteBuffer buffer = getByteBuffer(allocation);
+        DataOutputPlus out = getBufferedDataOutput(bh, buffer);
+        VIntCoding.writeUnsignedVInt(longs.nextLong(), out);
+        bh.consume(out);
+        buffer.clear();
+    }
+
+    @Benchmark
+    public void testComputeUnsignedVIntSize(final Blackhole bh)
+    {
+        bh.consume(VIntCoding.computeUnsignedVIntSize(longs.nextLong()));
+    }
+}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java b/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java
index 9e84e0a..b5bb40c 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java
@@ -25,7 +25,6 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import io.netty.buffer.ByteBuf;
@@ -55,7 +54,7 @@
 import org.apache.cassandra.net.AsyncStreamingOutputPlus;
 import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.SessionInfo;
 import org.apache.cassandra.streaming.StreamCoordinator;
@@ -79,9 +78,11 @@
 import org.openjdk.jmh.annotations.Threads;
 import org.openjdk.jmh.annotations.Warmup;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 /**
- * Please ensure that this benchmark is run with stream_throughput_outbound_megabits_per_sec set to a
- * really high value otherwise, throttling will kick in and the results will not be meaningful.
+ * Please ensure that this benchmark is run with entire_sstable_stream_throughput_outbound
+ * set to a really high value otherwise, throttling will kick in and the results will not be meaningful.
  */
 @Warmup(iterations = 1, time = 1, timeUnit = TimeUnit.SECONDS)
 @Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@@ -202,7 +203,7 @@
                 .build()
                 .applyUnsafe();
             }
-            store.forceBlockingFlush();
+            store.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
             CompactionManager.instance.performMaximal(store, false);
         }
 
@@ -216,13 +217,13 @@
 
         private StreamSession setupStreamingSessionForTest()
         {
-            StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new DefaultConnectionFactory(), false, false, null, PreviewKind.NONE);
-            StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.BOOTSTRAP, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
+            StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
+            StreamResultFuture future = StreamResultFuture.createInitiator(nextTimeUUID(), StreamOperation.BOOTSTRAP, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
 
             InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
             streamCoordinator.addSessionInfo(new SessionInfo(peer, 0, peer, Collections.emptyList(), Collections.emptyList(), StreamSession.State.INITIALIZED));
 
-            StreamSession session = streamCoordinator.getOrCreateNextSession(peer);
+            StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
             session.init(future);
             return session;
         }
@@ -241,7 +242,7 @@
 
     @Benchmark
     @BenchmarkMode(Mode.Throughput)
-    public void blockStreamReader(BenchmarkState state) throws Exception
+    public void blockStreamReader(BenchmarkState state) throws Throwable
     {
         EmbeddedChannel channel = createMockNettyChannel();
         AsyncStreamingInputPlus in = new AsyncStreamingInputPlus(channel);
@@ -265,7 +266,7 @@
 
     @Benchmark
     @BenchmarkMode(Mode.Throughput)
-    public void partialStreamReader(BenchmarkState state) throws Exception
+    public void partialStreamReader(BenchmarkState state) throws Throwable
     {
         EmbeddedChannel channel = createMockNettyChannel();
         AsyncStreamingInputPlus in = new AsyncStreamingInputPlus(channel);
diff --git a/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java b/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java
index c32b1e3..34ec29a 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java
@@ -24,7 +24,6 @@
 import java.util.List;
 import java.util.Random;
 import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -71,6 +70,7 @@
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.btree.BTree;
 import org.apache.cassandra.utils.btree.UpdateFunction;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.BulkIterator;
 import org.apache.cassandra.utils.memory.ByteBufferCloner;
@@ -105,7 +105,7 @@
 {
     private static final OpOrder NO_ORDER = new OpOrder();
     private static final MutableDeletionInfo NO_DELETION_INFO = new MutableDeletionInfo(DeletionTime.LIVE);
-    private static final HeapPool POOL = new HeapPool(Long.MAX_VALUE, 1.0f, () -> CompletableFuture.completedFuture(Boolean.TRUE));
+    private static final HeapPool POOL = new HeapPool(Long.MAX_VALUE, 1.0f, () -> ImmediateFuture.success(Boolean.TRUE));
     private static final ByteBuffer zero = Int32Type.instance.decompose(0);
     private static final DecoratedKey decoratedKey = new BufferDecoratedKey(new ByteOrderedPartitioner().getToken(zero), zero);
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java b/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java
index 789ca00..54f721b 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java
@@ -19,108 +19,59 @@
 package org.apache.cassandra.test.microbench.instance;
 
 
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
 import java.util.function.Supplier;
-import java.util.stream.IntStream;
 
 import com.google.common.base.Throwables;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.Memtable;
-import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.utils.FBUtilities;
 import org.openjdk.jmh.annotations.*;
 
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.MILLISECONDS)
-@Warmup(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
-@Measurement(iterations = 15, time = 2, timeUnit = TimeUnit.SECONDS)
-@Fork(value = 1)
-@Threads(1)
 @State(Scope.Benchmark)
-public abstract class ReadTest extends CQLTester
+public abstract class ReadTest extends SimpleTableWriter
 {
-    static String keyspace;
-    String table;
-    ColumnFamilyStore cfs;
-    Random rand;
-
-    @Param({"1000"})
-    int BATCH = 1_000;
-
     public enum Flush
     {
         INMEM, NO, YES
     }
 
-    @Param({"1000000"})
-    int count = 1_000_000;
-
     @Param({"INMEM", "YES"})
     Flush flush = Flush.INMEM;
 
-    public enum Execution
-    {
-        SERIAL,
-        SERIAL_NET,
-        PARALLEL,
-        PARALLEL_NET,
-    }
-
-    @Param({"PARALLEL"})
-    Execution async = Execution.PARALLEL;
-
     @Setup(Level.Trial)
     public void setup() throws Throwable
     {
-        rand = new Random(1);
-        CQLTester.setUpClass();
-        CQLTester.prepareServer();
-        System.err.println("setupClass done.");
-        keyspace = createKeyspace("CREATE KEYSPACE %s with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } and durable_writes = false");
-        table = createTable(keyspace, "CREATE TABLE %s ( userid bigint, picid bigint, commentid bigint, PRIMARY KEY(userid, picid)) with compression = {'enabled': false}");
-        execute("use "+keyspace+";");
-        switch (async)
-        {
-            case SERIAL_NET:
-            case PARALLEL_NET:
-                CQLTester.requireNetwork();
-                executeNet(getDefaultVersion(), "use " + keyspace + ";");
-        }
-        String writeStatement = "INSERT INTO "+table+"(userid,picid,commentid)VALUES(?,?,?)";
-        System.err.println("Prepared, batch " + BATCH + " flush " + flush);
-        System.err.println("Disk access mode " + DatabaseDescriptor.getDiskAccessMode() + " index " + DatabaseDescriptor.getIndexAccessMode());
+        super.commonSetup();
 
-        cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
-        cfs.disableAutoCompaction();
-        cfs.forceBlockingFlush();
-
-        //Warm up
+        // Write the data we are going to read.
+        long writeStart = System.currentTimeMillis();
         System.err.println("Writing " + count);
         long i;
         for (i = 0; i <= count - BATCH; i += BATCH)
-            performWrite(writeStatement, i, BATCH);
+            performWrite(i, BATCH);
         if (i < count)
-            performWrite(writeStatement, i, count - i);
+            performWrite(i, (int) (count - i));
+        long writeLength = System.currentTimeMillis() - writeStart;
+        System.err.format("... done in %.3f s.\n", writeLength / 1000.0);
 
         Memtable memtable = cfs.getTracker().getView().getCurrentMemtable();
-        System.err.format("Memtable in %s mode: %d ops, %s serialized bytes, %s (%.0f%%) on heap, %s (%.0f%%) off-heap\n",
+        Memtable.MemoryUsage usage = Memtable.getMemoryUsage(memtable);
+        System.err.format("%s in %s mode: %d ops, %s serialized bytes, %s\n",
+                          memtable.getClass().getSimpleName(),
                           DatabaseDescriptor.getMemtableAllocationType(),
-                          memtable.getOperations(),
+                          memtable.operationCount(),
                           FBUtilities.prettyPrintMemory(memtable.getLiveDataSize()),
-                          FBUtilities.prettyPrintMemory(memtable.getAllocator().onHeap().owns()),
-                          100 * memtable.getAllocator().onHeap().ownershipRatio(),
-                          FBUtilities.prettyPrintMemory(memtable.getAllocator().offHeap().owns()),
-                          100 * memtable.getAllocator().offHeap().ownershipRatio());
+                          usage);
 
         switch (flush)
         {
         case YES:
-            cfs.forceBlockingFlush();
+            cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
             break;
         case INMEM:
             if (!cfs.getLiveSSTables().isEmpty())
@@ -137,29 +88,6 @@
         }
     }
 
-    abstract Object[] writeArguments(long i);
-
-    public void performWrite(String writeStatement, long ofs, long count) throws Throwable
-    {
-        for (long i = ofs; i < ofs + count; ++i)
-            execute(writeStatement, writeArguments(i));
-    }
-
-
-    @TearDown(Level.Trial)
-    public void teardown() throws InterruptedException
-    {
-        if (flush == Flush.INMEM && !cfs.getLiveSSTables().isEmpty())
-            throw new AssertionError("SSTables created for INMEM test.");
-
-        // do a flush to print sizes
-        cfs.forceBlockingFlush();
-
-        CommitLog.instance.shutdownBlocking();
-        CQLTester.tearDownClass();
-        CQLTester.cleanup();
-    }
-
     public Object performReadSerial(String readStatement, Supplier<Object[]> supplier) throws Throwable
     {
         long sum = 0;
@@ -170,20 +98,25 @@
 
     public Object performReadThreads(String readStatement, Supplier<Object[]> supplier) throws Throwable
     {
-        return IntStream.range(0, BATCH)
-                        .parallel()
-                        .mapToLong(i ->
-                                   {
-                                       try
-                                       {
-                                           return execute(readStatement, supplier.get()).size();
-                                       }
-                                       catch (Throwable throwable)
-                                       {
-                                           throw Throwables.propagate(throwable);
-                                       }
-                                   })
-                        .sum();
+        List<Future<Integer>> futures = new ArrayList<>();
+        for (long i = 0; i < BATCH; ++i)
+        {
+            futures.add(executorService.submit(() ->
+                                               {
+                                                   try
+                                                   {
+                                                       return execute(readStatement, supplier.get()).size();
+                                                   }
+                                                   catch (Throwable throwable)
+                                                   {
+                                                       throw Throwables.propagate(throwable);
+                                                   }
+                                               }));
+        }
+        long done = 0;
+        for (Future<Integer> f : futures)
+            done += f.get();
+        return done;
     }
 
     public Object performReadSerialNet(String readStatement, Supplier<Object[]> supplier) throws Throwable
@@ -197,37 +130,55 @@
 
     public long performReadThreadsNet(String readStatement, Supplier<Object[]> supplier) throws Throwable
     {
-        return IntStream.range(0, BATCH)
-                        .parallel()
-                        .mapToLong(i ->
-                                   {
-                                       try
-                                       {
-                                           return executeNet(getDefaultVersion(), readStatement, supplier.get())
-                                                          .getAvailableWithoutFetching();
-                                       }
-                                       catch (Throwable throwable)
-                                       {
-                                           throw Throwables.propagate(throwable);
-                                       }
-                                   })
-                        .sum();
+        List<Future<Integer>> futures = new ArrayList<>();
+        for (long i = 0; i < BATCH; ++i)
+        {
+            futures.add(executorService.submit(() ->
+                                               {
+                                                   try
+                                                   {
+                                                       return executeNet(getDefaultVersion(), readStatement, supplier.get())
+                                                              .getAvailableWithoutFetching();
+                                                   }
+                                                   catch (Throwable throwable)
+                                                   {
+                                                       throw Throwables.propagate(throwable);
+                                                   }
+                                               }));
+        }
+        long done = 0;
+        for (Future<Integer> f : futures)
+            done += f.get();
+        return done;
     }
 
 
     public Object performRead(String readStatement, Supplier<Object[]> supplier) throws Throwable
     {
-        switch (async)
+        if (useNet)
         {
-            case SERIAL:
-                return performReadSerial(readStatement, supplier);
-            case SERIAL_NET:
+            if (threadCount == 1)
                 return performReadSerialNet(readStatement, supplier);
-            case PARALLEL:
-                return performReadThreads(readStatement, supplier);
-            case PARALLEL_NET:
+            else
                 return performReadThreadsNet(readStatement, supplier);
         }
-        return null;
+        else
+        {
+            if (threadCount == 1)
+                return performReadSerial(readStatement, supplier);
+            else
+                return performReadThreads(readStatement, supplier);
+        }
+    }
+
+    void doExtraChecks()
+    {
+        if (flush == Flush.INMEM && !cfs.getLiveSSTables().isEmpty())
+            throw new AssertionError("SSTables created for INMEM test.");
+    }
+
+    String extraInfo()
+    {
+        return " flush " + flush;
     }
 }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTestSmallPartitions.java b/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTestSmallPartitions.java
index b36cfd1..c665c59 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTestSmallPartitions.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTestSmallPartitions.java
@@ -19,8 +19,26 @@
 package org.apache.cassandra.test.microbench.instance;
 
 
-import org.openjdk.jmh.annotations.Benchmark;
+import java.util.concurrent.TimeUnit;
 
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 1)
+@Threads(1)
+@State(Scope.Benchmark)
 public class ReadTestSmallPartitions extends ReadTest
 {
     String readStatement()
diff --git a/test/microbench/org/apache/cassandra/test/microbench/instance/SimpleTableWriter.java b/test/microbench/org/apache/cassandra/test/microbench/instance/SimpleTableWriter.java
new file mode 100644
index 0000000..fba8d16
--- /dev/null
+++ b/test/microbench/org/apache/cassandra/test/microbench/instance/SimpleTableWriter.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.test.microbench.instance;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Throwables;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.utils.FBUtilities;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+
+@State(Scope.Benchmark)
+public abstract class SimpleTableWriter extends CQLTester
+{
+    static String keyspace;
+    String table;
+    ColumnFamilyStore cfs;
+    Random rand;
+    String writeStatement;
+    ExecutorService executorService;
+
+    @Param({"1000000"})
+    int count = 1_000_000;
+
+    @Param({ "1000" })
+    int BATCH = 1_000;
+
+    @Param({ "default" })
+    String memtableClass = "default";
+
+    @Param({ "false" })
+    boolean useNet = false;
+
+    @Param({ "32" })
+    int threadCount;
+
+    public void commonSetup() throws Throwable
+    {
+        rand = new Random(1);
+        executorService = Executors.newFixedThreadPool(threadCount);
+        CQLTester.setUpClass();
+        CQLTester.prepareServer();
+        DatabaseDescriptor.setAutoSnapshot(false);
+        System.err.println("setupClass done.");
+        String memtableSetup = "";
+        if (!memtableClass.isEmpty())
+            memtableSetup = String.format(" AND memtable = '%s'", memtableClass);
+        keyspace = createKeyspace("CREATE KEYSPACE %s with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } and durable_writes = false");
+        table = createTable(keyspace,
+                            "CREATE TABLE %s ( userid bigint, picid bigint, commentid bigint, PRIMARY KEY(userid, picid)) with compression = {'enabled': false}" +
+                            memtableSetup);
+        execute("use " + keyspace + ";");
+        if (useNet)
+        {
+            CQLTester.requireNetwork();
+            executeNet(getDefaultVersion(), "use " + keyspace + ";");
+        }
+        writeStatement = "INSERT INTO " + table + "(userid,picid,commentid)VALUES(?,?,?)";
+        System.err.println("Prepared, batch " + BATCH + " threads " + threadCount + extraInfo());
+        System.err.println("Disk access mode " + DatabaseDescriptor.getDiskAccessMode() +
+                           " index " + DatabaseDescriptor.getIndexAccessMode());
+
+        cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
+        cfs.disableAutoCompaction();
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
+    }
+
+    abstract Object[] writeArguments(long i);
+
+    public void performWrite(long ofs, int count) throws Throwable
+    {
+        if (useNet)
+        {
+            if (threadCount == 1)
+                performWriteSerialNet(ofs, count);
+            else
+                performWriteThreadsNet(ofs, count);
+        }
+        else
+        {
+            if (threadCount == 1)
+                performWriteSerial(ofs, count);
+            else
+                performWriteThreads(ofs, count);
+        }
+    }
+
+    public void performWriteSerial(long ofs, int count) throws Throwable
+    {
+        for (long i = ofs; i < ofs + count; ++i)
+            execute(writeStatement, writeArguments(i));
+    }
+
+    public void performWriteThreads(long ofs, int count) throws Throwable
+    {
+        List<Future<Integer>> futures = new ArrayList<>();
+        for (int i = 0; i < count; ++i)
+        {
+            long pos = ofs + i;
+            futures.add(executorService.submit(() ->
+                                               {
+                                                   try
+                                                   {
+                                                       execute(writeStatement, writeArguments(pos));
+                                                       return 1;
+                                                   }
+                                                   catch (Throwable throwable)
+                                                   {
+                                                       throw Throwables.propagate(throwable);
+                                                   }
+                                               }));
+        }
+        int done = 0;
+        for (Future<Integer> f : futures)
+            done += f.get();
+        assert count == done;
+    }
+
+    public void performWriteSerialNet(long ofs, int count) throws Throwable
+    {
+        for (long i = ofs; i < ofs + count; ++i)
+            sessionNet().execute(writeStatement, writeArguments(i));
+    }
+
+    public void performWriteThreadsNet(long ofs, int count) throws Throwable
+    {
+        List<Future<Integer>> futures = new ArrayList<>();
+        for (long i = 0; i < count; ++i)
+        {
+            long pos = ofs + i;
+            futures.add(executorService.submit(() ->
+                                               {
+                                                   try
+                                                   {
+                                                       sessionNet().execute(writeStatement, writeArguments(pos));
+                                                       return 1;
+                                                   }
+                                                   catch (Throwable throwable)
+                                                   {
+                                                       throw Throwables.propagate(throwable);
+                                                   }
+                                               }));
+        }
+        long done = 0;
+        for (Future<Integer> f : futures)
+            done += f.get();
+        assert count == done;
+    }
+
+    @TearDown(Level.Trial)
+    public void teardown() throws InterruptedException
+    {
+        executorService.shutdown();
+        executorService.awaitTermination(15, TimeUnit.SECONDS);
+
+        Memtable memtable = cfs.getTracker().getView().getCurrentMemtable();
+        Memtable.MemoryUsage usage = Memtable.getMemoryUsage(memtable);
+        System.err.format("\n%s in %s mode: %d ops, %s serialized bytes, %s\n",
+                          memtable.getClass().getSimpleName(),
+                          DatabaseDescriptor.getMemtableAllocationType(),
+                          memtable.operationCount(),
+                          FBUtilities.prettyPrintMemory(memtable.getLiveDataSize()),
+                          usage);
+
+        doExtraChecks();
+
+        // do a flush to print sizes
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
+
+        CommitLog.instance.shutdownBlocking();
+        CQLTester.tearDownClass();
+        CQLTester.cleanup();
+    }
+
+    abstract void doExtraChecks();
+    abstract String extraInfo();
+}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/instance/WriteTest.java b/test/microbench/org/apache/cassandra/test/microbench/instance/WriteTest.java
new file mode 100644
index 0000000..eda93b7
--- /dev/null
+++ b/test/microbench/org/apache/cassandra/test/microbench/instance/WriteTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.test.microbench.instance;
+
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.openjdk.jmh.annotations.*;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 1)
+@Threads(1)
+@State(Scope.Benchmark)
+public class WriteTest extends SimpleTableWriter
+{
+
+    public enum EndOp
+    {
+        INMEM, TRUNCATE, FLUSH
+    }
+
+    @Param({"INMEM", "TRUNCATE", "FLUSH"})
+    EndOp flush = EndOp.INMEM;
+
+    @Setup(Level.Trial)
+    public void setup() throws Throwable
+    {
+        super.commonSetup();
+    }
+
+    @Benchmark
+    public void writeTable() throws Throwable
+    {
+        long i;
+        for (i = 0; i <= count - BATCH; i += BATCH)
+            performWrite(i, BATCH);
+        if (i < count)
+            performWrite(i, Math.toIntExact(count - i));
+
+        switch (flush)
+        {
+        case FLUSH:
+            cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.USER_FORCED);
+            // if we flush we also must truncate to avoid accummulating sstables
+        case TRUNCATE:
+            execute("TRUNCATE TABLE " + table);
+            // note: we turn snapshotting and durable writes (which would have caused a flush) off for this benchmark
+            break;
+        case INMEM:
+            if (!cfs.getLiveSSTables().isEmpty())
+                throw new AssertionError("SSTables created for INMEM test.");
+            // leave unflushed, i.e. next iteration will overwrite data
+        default:
+        }
+    }
+
+    public Object[] writeArguments(long i)
+    {
+        return new Object[] { i, i, i };
+    }
+
+    void doExtraChecks()
+    {
+        if (flush == WriteTest.EndOp.INMEM && !cfs.getLiveSSTables().isEmpty())
+            throw new AssertionError("SSTables created for INMEM test.");
+    }
+
+    String extraInfo()
+    {
+        return " flush " + flush;
+    }
+}
diff --git a/test/resources/data/config/YamlConfigurationLoaderTest/shared_client_error_reporting_exclusions.yaml b/test/resources/data/config/YamlConfigurationLoaderTest/shared_client_error_reporting_exclusions.yaml
new file mode 100644
index 0000000..7fcb720
--- /dev/null
+++ b/test/resources/data/config/YamlConfigurationLoaderTest/shared_client_error_reporting_exclusions.yaml
@@ -0,0 +1,6 @@
+# This is used to test to validate that YAML Anchors ARE supported; this is useful for cases where client/internode errors are coming from the same sources (such as security scans)
+client_error_reporting_exclusions: &share
+  subnets:
+    - 127.0.0.1
+    - 127.0.0.0/31
+internode_error_reporting_exclusions: *share
\ No newline at end of file
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/ChanceSupplier.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/ChanceSupplier.java
new file mode 100644
index 0000000..620f3e0
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/ChanceSupplier.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+public interface ChanceSupplier
+{
+    float get();
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/ClassTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/ClassTransformer.java
new file mode 100644
index 0000000..6e6b0d3
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/ClassTransformer.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.EnumSet;
+import java.util.List;
+import java.util.function.Consumer;
+
+import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.ClassReader;
+import org.objectweb.asm.ClassVisitor;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.Handle;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.tree.AbstractInsnNode;
+import org.objectweb.asm.tree.MethodInsnNode;
+import org.objectweb.asm.tree.MethodNode;
+
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.simulator.asm.Flag.DETERMINISTIC;
+import static org.apache.cassandra.simulator.asm.Flag.GLOBAL_METHODS;
+import static org.apache.cassandra.simulator.asm.Flag.MONITORS;
+import static org.apache.cassandra.simulator.asm.Flag.NEMESIS;
+import static org.apache.cassandra.simulator.asm.Flag.NO_PROXY_METHODS;
+import static org.apache.cassandra.simulator.asm.TransformationKind.HASHCODE;
+import static org.apache.cassandra.simulator.asm.TransformationKind.SYNCHRONIZED;
+import static org.apache.cassandra.simulator.asm.Utils.deterministicToString;
+import static org.apache.cassandra.simulator.asm.Utils.visitEachRefType;
+import static org.apache.cassandra.simulator.asm.Utils.generateTryFinallyProxyCall;
+import static org.objectweb.asm.Opcodes.ACC_PRIVATE;
+import static org.objectweb.asm.Opcodes.ACC_STATIC;
+import static org.objectweb.asm.Opcodes.ACC_SYNTHETIC;
+import static org.objectweb.asm.Opcodes.INVOKESTATIC;
+
+class ClassTransformer extends ClassVisitor implements MethodWriterSink
+{
+    private static final List<AbstractInsnNode> DETERMINISM_SETUP = singletonList(new MethodInsnNode(INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptibleThread", "enterDeterministicMethod", "()V", false));
+    private static final List<AbstractInsnNode> DETERMINISM_CLEANUP = singletonList(new MethodInsnNode(INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptibleThread", "exitDeterministicMethod", "()V", false));
+
+    class DependentTypeVisitor extends MethodVisitor
+    {
+        public DependentTypeVisitor(int api, MethodVisitor methodVisitor)
+        {
+            super(api, methodVisitor);
+        }
+
+        @Override
+        public void visitTypeInsn(int opcode, String type)
+        {
+            super.visitTypeInsn(opcode, type);
+            Utils.visitIfRefType(type, dependentTypes);
+        }
+
+        @Override
+        public void visitFieldInsn(int opcode, String owner, String name, String descriptor)
+        {
+            super.visitFieldInsn(opcode, owner, name, descriptor);
+            Utils.visitIfRefType(descriptor, dependentTypes);
+        }
+
+        @Override
+        public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface)
+        {
+            super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
+            Utils.visitEachRefType(descriptor, dependentTypes);
+        }
+
+        @Override
+        public void visitInvokeDynamicInsn(String name, String descriptor, Handle bootstrapMethodHandle, Object... bootstrapMethodArguments)
+        {
+            super.visitInvokeDynamicInsn(name, descriptor, bootstrapMethodHandle, bootstrapMethodArguments);
+            Utils.visitEachRefType(descriptor, dependentTypes);
+        }
+
+        @Override
+        public void visitLocalVariable(String name, String descriptor, String signature, Label start, Label end, int index)
+        {
+            super.visitLocalVariable(name, descriptor, signature, start, end, index);
+            Utils.visitIfRefType(descriptor, dependentTypes);
+        }
+    }
+
+    private final String className;
+    private final ChanceSupplier monitorDelayChance;
+    private final NemesisGenerator nemesis;
+    private final NemesisFieldKind.Selector nemesisFieldSelector;
+    private final Hashcode insertHashcode;
+    private final MethodLogger methodLogger;
+    private boolean isTransformed;
+    private boolean isCacheablyTransformed = true;
+    private final EnumSet<Flag> flags;
+    private final Consumer<String> dependentTypes;
+
+    ClassTransformer(int api, String className, EnumSet<Flag> flags, Consumer<String> dependentTypes)
+    {
+        this(api, new ClassWriter(0), className, flags, null, null, null, null, dependentTypes);
+    }
+
+    ClassTransformer(int api, String className, EnumSet<Flag> flags, ChanceSupplier monitorDelayChance, NemesisGenerator nemesis, NemesisFieldKind.Selector nemesisFieldSelector, Hashcode insertHashcode, Consumer<String> dependentTypes)
+    {
+        this(api, new ClassWriter(0), className, flags, monitorDelayChance, nemesis, nemesisFieldSelector, insertHashcode, dependentTypes);
+    }
+
+    private ClassTransformer(int api, ClassWriter classWriter, String className, EnumSet<Flag> flags, ChanceSupplier monitorDelayChance, NemesisGenerator nemesis, NemesisFieldKind.Selector nemesisFieldSelector, Hashcode insertHashcode, Consumer<String> dependentTypes)
+    {
+        super(api, classWriter);
+        if (flags.contains(NEMESIS) && (nemesis == null || nemesisFieldSelector == null))
+            throw new IllegalArgumentException();
+        if (flags.contains(MONITORS) && monitorDelayChance == null)
+            throw new IllegalArgumentException();
+        this.dependentTypes = dependentTypes;
+        this.className = className;
+        this.flags = flags;
+        this.monitorDelayChance = monitorDelayChance;
+        this.nemesis = nemesis;
+        this.nemesisFieldSelector = nemesisFieldSelector;
+        this.insertHashcode = insertHashcode;
+        this.methodLogger = MethodLogger.log(api, className);
+    }
+
+    @Override
+    public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value)
+    {
+        if (dependentTypes != null)
+            Utils.visitIfRefType(descriptor, dependentTypes);
+        return super.visitField(access, name, descriptor, signature, value);
+    }
+
+    @Override
+    public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions)
+    {
+        if (dependentTypes != null)
+            visitEachRefType(descriptor, dependentTypes);
+
+        EnumSet<Flag> flags = this.flags;
+        if (flags.isEmpty() || ((access & ACC_SYNTHETIC) != 0 && (name.endsWith("$unsync") || name.endsWith("$catch") || name.endsWith("$nemesis"))))
+        {
+            MethodVisitor visitor = super.visitMethod(access, name, descriptor, signature, exceptions);
+            if (dependentTypes != null && (access & (ACC_STATIC | ACC_SYNTHETIC)) != 0 && (name.equals("<clinit>") || name.startsWith("lambda$")))
+                visitor = new DependentTypeVisitor(api, visitor);
+            return visitor;
+        }
+
+        boolean isToString = false;
+        if (access == Opcodes.ACC_PUBLIC && name.equals("toString") && descriptor.equals("()Ljava/lang/String;") && !flags.contains(NO_PROXY_METHODS))
+        {
+            generateTryFinallyProxyCall(super.visitMethod(access, name, descriptor, signature, exceptions), className,
+                                        "toString$original", "()Ljava/lang/String;", access, true, false, DETERMINISM_SETUP, DETERMINISM_CLEANUP);
+            access = ACC_PRIVATE | ACC_SYNTHETIC;
+            name = "toString$original";
+            if (!flags.contains(DETERMINISTIC) || flags.contains(NEMESIS))
+            {
+                flags = EnumSet.copyOf(flags);
+                flags.add(DETERMINISTIC);
+                flags.remove(NEMESIS);
+            }
+            isToString = true;
+        }
+
+        MethodVisitor visitor;
+        if (flags.contains(MONITORS) && (access & Opcodes.ACC_SYNCHRONIZED) != 0)
+        {
+            visitor = new MonitorMethodTransformer(this, className, api, access, name, descriptor, signature, exceptions, monitorDelayChance);
+            witness(SYNCHRONIZED);
+        }
+        else
+        {
+            visitor = super.visitMethod(access, name, descriptor, signature, exceptions);
+            visitor = methodLogger.visitMethod(access, name, descriptor, visitor);
+        }
+
+        if (flags.contains(MONITORS))
+            visitor = new MonitorEnterExitParkTransformer(this, api, visitor, className, monitorDelayChance);
+        if (isToString)
+            visitor = deterministicToString(visitor);
+        if (flags.contains(GLOBAL_METHODS) || flags.contains(Flag.LOCK_SUPPORT) || flags.contains(Flag.DETERMINISTIC))
+            visitor = new GlobalMethodTransformer(flags, this, api, name, visitor);
+        if (flags.contains(NEMESIS))
+            visitor = new NemesisTransformer(this, api, name, visitor, nemesis, nemesisFieldSelector);
+        if (dependentTypes != null && (access & (ACC_STATIC | ACC_SYNTHETIC)) != 0 && (name.equals("<clinit>") || name.startsWith("lambda$")))
+            visitor = new DependentTypeVisitor(api, visitor);
+        return visitor;
+    }
+
+    @Override
+    public void visitEnd()
+    {
+        if (insertHashcode != null)
+            writeSyntheticMethod(HASHCODE, insertHashcode);
+        super.visitEnd();
+        methodLogger.visitEndOfClass();
+    }
+
+    public void writeMethod(MethodNode node)
+    {
+        writeMethod(null, node);
+    }
+
+    public void writeSyntheticMethod(TransformationKind kind, MethodNode node)
+    {
+        writeMethod(kind, node);
+    }
+
+    void writeMethod(TransformationKind kind, MethodNode node)
+    {
+        String[] exceptions = node.exceptions == null ? null : node.exceptions.toArray(new String[0]);
+        MethodVisitor visitor = super.visitMethod(node.access, node.name, node.desc, node.signature, exceptions);
+        visitor = methodLogger.visitMethod(node.access, node.name, node.desc, visitor);
+        if (kind != null)
+            witness(kind);
+        node.accept(visitor);
+    }
+
+    @Override
+    public AnnotationVisitor visitAnnotation(String descriptor, boolean visible)
+    {
+        return Utils.checkForSimulationAnnotations(api, descriptor, super.visitAnnotation(descriptor, visible), (flag, add) -> {
+            if (add) flags.add(flag);
+            else flags.remove(flag);
+        });
+    }
+
+    void readAndTransform(byte[] input)
+    {
+        ClassReader reader = new ClassReader(input);
+        reader.accept(this, 0);
+    }
+
+    void witness(TransformationKind kind)
+    {
+        isTransformed = true;
+        switch (kind)
+        {
+            case FIELD_NEMESIS:
+            case SIGNAL_NEMESIS:
+                isCacheablyTransformed = false;
+        }
+        methodLogger.witness(kind);
+    }
+
+    String className()
+    {
+        return className;
+    }
+
+    boolean isTransformed()
+    {
+        return isTransformed;
+    }
+
+    boolean isCacheablyTransformed()
+    {
+        return isCacheablyTransformed;
+    }
+
+    byte[] toBytes()
+    {
+        return ((ClassWriter) cv).toByteArray();
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/Flag.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/Flag.java
new file mode 100644
index 0000000..d127064
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/Flag.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+public enum Flag
+{
+    GLOBAL_CLOCK, SYSTEM_CLOCK, MONITORS, LOCK_SUPPORT, GLOBAL_METHODS, DETERMINISTIC, NO_PROXY_METHODS, NEMESIS
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/GlobalMethodTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/GlobalMethodTransformer.java
new file mode 100644
index 0000000..fbea223
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/GlobalMethodTransformer.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.EnumSet;
+
+import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+
+import static org.apache.cassandra.simulator.asm.Flag.GLOBAL_METHODS;
+import static org.apache.cassandra.simulator.asm.TransformationKind.CONCURRENT_HASH_MAP;
+import static org.apache.cassandra.simulator.asm.TransformationKind.GLOBAL_METHOD;
+import static org.apache.cassandra.simulator.asm.TransformationKind.IDENTITY_HASH_MAP;
+
+/**
+ * Intercept factory methods in org.apache.concurrent.utils.concurrent, and redirect them to
+ * {@link org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods}
+ */
+class GlobalMethodTransformer extends MethodVisitor
+{
+    private final ClassTransformer transformer;
+    private final String methodName;
+    private boolean globalMethods;
+    private boolean globalClock;
+    private boolean systemClock;
+    private boolean lockSupport;
+    private boolean deterministic;
+    boolean hasSeenAnyMethodInsn;
+
+    public GlobalMethodTransformer(EnumSet<Flag> flags, ClassTransformer transformer, int api, String methodName, MethodVisitor parent)
+    {
+        super(api, parent);
+        this.globalMethods = flags.contains(GLOBAL_METHODS);
+        this.globalClock = flags.contains(Flag.GLOBAL_CLOCK);
+        this.systemClock = flags.contains(Flag.SYSTEM_CLOCK);
+        this.lockSupport = flags.contains(Flag.LOCK_SUPPORT);
+        this.deterministic = flags.contains(Flag.DETERMINISTIC);
+        this.transformer = transformer;
+        this.methodName = methodName;
+    }
+
+    @Override
+    public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface)
+    {
+        boolean isFirstMethodInsn = !hasSeenAnyMethodInsn;
+        hasSeenAnyMethodInsn = true;
+
+        if (globalMethods && opcode == Opcodes.INVOKESTATIC && owner.startsWith("org/apache/cassandra/utils/") && (
+               (owner.equals("org/apache/cassandra/utils/concurrent/WaitQueue") && name.equals("newWaitQueue"))
+            || (owner.equals("org/apache/cassandra/utils/concurrent/CountDownLatch") && name.equals("newCountDownLatch"))
+            || (owner.equals("org/apache/cassandra/utils/concurrent/Condition") && name.equals("newOneTimeCondition"))
+            || (owner.equals("org/apache/cassandra/utils/concurrent/BlockingQueues") && name.equals("newBlockingQueue"))
+            || (owner.equals("org/apache/cassandra/utils/concurrent/Semaphore") && (name.equals("newSemaphore") || name.equals("newFairSemaphore")))
+            ))
+        {
+            transformer.witness(GLOBAL_METHOD);
+            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfGlobalMethods$Global", name, descriptor, false);
+        }
+        else if (globalMethods && ((opcode == Opcodes.INVOKESTATIC && (
+                   owner.startsWith("org/apache/cassandra/utils/") && (
+                        (owner.equals("org/apache/cassandra/utils/Clock") && name.equals("waitUntil"))
+                     || (owner.equals("org/apache/cassandra/utils/concurrent/Awaitable$SyncAwaitable") && name.equals("waitUntil")))
+                || !deterministic && owner.equals("java/lang/System") && name.equals("identityHashCode")
+                || owner.equals("java/util/UUID") && name.equals("randomUUID")
+                || owner.equals("com/google/common/util/concurrent/Uninterruptibles") && name.equals("sleepUninterruptibly")
+                || owner.equals("sun/misc/Unsafe") && name.equals("getUnsafe")))
+             || (owner.equals("java/util/concurrent/TimeUnit") && name.equals("sleep")))
+        )
+        {
+            transformer.witness(GLOBAL_METHOD);
+            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
+        }
+        else if ((globalMethods || deterministic) && opcode == Opcodes.INVOKESTATIC &&
+            owner.equals("java/util/concurrent/ThreadLocalRandom") && (name.equals("getProbe") || name.equals("advanceProbe") || name.equals("localInit"))
+        )
+        {
+            transformer.witness(GLOBAL_METHOD);
+            // if we're in deterministic mode (i.e. for base ConcurrentHashMap) don't initialise ThreadLocalRandom
+            if (name.equals("getProbe")) super.visitLdcInsn(0);
+            else if (name.equals("advanceProbe")) super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
+        }
+        else if (globalMethods && opcode == Opcodes.INVOKESPECIAL && owner.equals("java/util/IdentityHashMap") && name.equals("<init>"))
+        {
+            transformer.witness(IDENTITY_HASH_MAP);
+            super.visitMethodInsn(opcode, "org/apache/cassandra/simulator/systems/InterceptedIdentityHashMap", name, descriptor, false);
+        }
+        else if (globalMethods && opcode == Opcodes.INVOKESPECIAL && owner.equals("java/util/concurrent/ConcurrentHashMap") && name.equals("<init>")
+                 && !(transformer.className().equals("org/apache/cassandra/simulator/systems/InterceptibleConcurrentHashMap") && methodName.equals("<init>") && isFirstMethodInsn))
+        {
+            transformer.witness(CONCURRENT_HASH_MAP);
+            super.visitMethodInsn(opcode, "org/apache/cassandra/simulator/systems/InterceptibleConcurrentHashMap", name, descriptor, false);
+        }
+        else if (lockSupport && opcode == Opcodes.INVOKESTATIC && owner.equals("java/util/concurrent/locks/LockSupport") && (name.startsWith("park") || name.equals("unpark")))
+        {
+            transformer.witness(TransformationKind.LOCK_SUPPORT);
+            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
+        }
+        else if (globalClock && opcode == Opcodes.INVOKESTATIC && name.equals("timestampMicros") && owner.equals("org/apache/cassandra/utils/FBUtilities"))
+        {
+            transformer.witness(GLOBAL_METHOD);
+            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/SimulatedTime$Global", "nextGlobalMonotonicMicros", descriptor, false);
+        }
+        else if (systemClock && opcode == Opcodes.INVOKESTATIC && owner.equals("java/lang/System") && (name.equals("nanoTime") || name.equals("currentTimeMillis")))
+        {
+            transformer.witness(GLOBAL_METHOD);
+            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
+        }
+        else
+        {
+            super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
+        }
+    }
+
+    @Override
+    public void visitTypeInsn(int opcode, String type)
+    {
+        if (globalMethods && opcode == Opcodes.NEW && type.equals("java/util/IdentityHashMap"))
+        {
+            super.visitTypeInsn(opcode, "org/apache/cassandra/simulator/systems/InterceptedIdentityHashMap");
+        }
+        else if (globalMethods && opcode == Opcodes.NEW && type.equals("java/util/concurrent/ConcurrentHashMap"))
+        {
+            super.visitTypeInsn(opcode, "org/apache/cassandra/simulator/systems/InterceptibleConcurrentHashMap");
+        }
+        else
+        {
+            super.visitTypeInsn(opcode, type);
+        }
+    }
+
+    @Override
+    public AnnotationVisitor visitAnnotation(String descriptor, boolean visible)
+    {
+        return Utils.checkForSimulationAnnotations(api, descriptor, super.visitAnnotation(descriptor, visible), (flag, add) -> {
+            switch (flag)
+            {
+                default: throw new AssertionError();
+                case GLOBAL_METHODS: globalMethods = add; break;
+                case GLOBAL_CLOCK: globalClock = add; break;
+                case SYSTEM_CLOCK: systemClock = add; break;
+                case LOCK_SUPPORT: lockSupport = add; break;
+                case DETERMINISTIC: deterministic = add; break;
+                case MONITORS: throw new UnsupportedOperationException("Cannot currently toggle MONITORS at the method level");
+            }
+        });
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/Hashcode.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/Hashcode.java
new file mode 100644
index 0000000..0377854
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/Hashcode.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.tree.InsnNode;
+import org.objectweb.asm.tree.IntInsnNode;
+import org.objectweb.asm.tree.LabelNode;
+import org.objectweb.asm.tree.MethodInsnNode;
+import org.objectweb.asm.tree.MethodNode;
+
+/**
+ * Generate a new hashCode method in the class that invokes a deterministic hashCode generator
+ */
+class Hashcode extends MethodNode
+{
+    Hashcode(int api)
+    {
+        super(api, Opcodes.ACC_PUBLIC, "hashCode", "()I", null, null);
+        maxLocals = 1;
+        maxStack = 1;
+        instructions.add(new LabelNode());
+        instructions.add(new IntInsnNode(Opcodes.ALOAD, 0));
+        instructions.add(new MethodInsnNode(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "identityHashCode", "(Ljava/lang/Object;)I", false));
+        instructions.add(new LabelNode());
+        instructions.add(new InsnNode(Opcodes.IRETURN));
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptAgent.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptAgent.java
new file mode 100644
index 0000000..87cfab0
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptAgent.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.instrument.ClassDefinition;
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.UnmodifiableClassException;
+import java.security.ProtectionDomain;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.function.BiFunction;
+import java.util.regex.Pattern;
+
+import org.objectweb.asm.ClassReader;
+import org.objectweb.asm.ClassVisitor;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+
+import static org.apache.cassandra.simulator.asm.Flag.DETERMINISTIC;
+import static org.apache.cassandra.simulator.asm.Flag.LOCK_SUPPORT;
+import static org.apache.cassandra.simulator.asm.Flag.NO_PROXY_METHODS;
+import static org.apache.cassandra.simulator.asm.Flag.SYSTEM_CLOCK;
+import static org.apache.cassandra.simulator.asm.InterceptClasses.BYTECODE_VERSION;
+import static org.objectweb.asm.Opcodes.ALOAD;
+import static org.objectweb.asm.Opcodes.GETFIELD;
+import static org.objectweb.asm.Opcodes.GETSTATIC;
+import static org.objectweb.asm.Opcodes.INVOKESPECIAL;
+import static org.objectweb.asm.Opcodes.INVOKESTATIC;
+import static org.objectweb.asm.Opcodes.INVOKEVIRTUAL;
+import static org.objectweb.asm.Opcodes.IRETURN;
+import static org.objectweb.asm.Opcodes.RETURN;
+
+/**
+ * A mechanism for weaving classes loaded by the bootstrap classloader that we cannot override.
+ * The design supports weaving of the internals of these classes, and in future we may want to
+ * weave LockSupport or the internals of other blocking concurrency primitives.
+ *
+ * Ultimately this wasn't necessary for the initial functionality, but we have maintained
+ * the layout so that it will be easier to enable such functionality in future should it be needed.
+ *
+ * To this end, the asm package and simulator-asm.jar is as self-contained set of classes for performing
+ * simulator byteweaving, and simulator-bootstrap.jar contains a self-contained class and interface for
+ * replacing important system methods.
+ */
+public class InterceptAgent
+{
+    public static void premain(final String agentArgs, final Instrumentation instrumentation) throws UnmodifiableClassException, ClassNotFoundException, IOException
+    {
+        setup(agentArgs, instrumentation);
+    }
+
+    public void agentmain(final String agentArgs, final Instrumentation instrumentation) throws UnmodifiableClassException, ClassNotFoundException, IOException
+    {
+        setup(agentArgs, instrumentation);
+    }
+
+    private static void setup(final String agentArgs, final Instrumentation instrumentation) throws UnmodifiableClassException, ClassNotFoundException, IOException
+    {
+        instrumentation.addTransformer(new ClassFileTransformer()
+        {
+            @Override
+            public byte[] transform(ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] bytecode) throws IllegalClassFormatException
+            {
+                if (className == null)
+                    return null;
+
+                if (className.equals("java/lang/Object"))
+                    return transformObject(bytecode);
+
+                if (className.equals("java/lang/Enum"))
+                    return transformEnum(bytecode);
+
+                if (className.equals("java/util/Random"))
+                    return transformRandom(bytecode);
+
+                if (className.equals("java/util/concurrent/ThreadLocalRandom"))
+                    return transformThreadLocalRandom(bytecode);
+
+                if (className.startsWith("java/util/concurrent/ConcurrentHashMap"))
+                    return transformConcurrent(className, bytecode, DETERMINISTIC, NO_PROXY_METHODS);
+
+                if (className.startsWith("java/util/concurrent/locks"))
+                    return transformConcurrent(className, bytecode, SYSTEM_CLOCK, LOCK_SUPPORT, NO_PROXY_METHODS);
+
+                return null;
+            }
+        });
+
+        Pattern reloadPattern = Pattern.compile("java\\.(lang\\.Enum|util\\.concurrent\\.(locks\\..*|ConcurrentHashMap)|util\\.(concurrent\\.ThreadLocal)?Random|lang\\.Object)");
+        List<ClassDefinition> redefine = new ArrayList<>();
+        for (Class<?> loadedClass : instrumentation.getAllLoadedClasses())
+        {
+            if (reloadPattern.matcher(loadedClass.getName()).matches())
+                redefine.add(new ClassDefinition(loadedClass, readDefinition(loadedClass)));
+        }
+        if (!redefine.isEmpty())
+            instrumentation.redefineClasses(redefine.toArray(new ClassDefinition[0]));
+    }
+
+    private static byte[] readDefinition(Class<?> clazz) throws IOException
+    {
+        return readDefinition(clazz.getName().replaceAll("\\.", "/"));
+    }
+
+    private static byte[] readDefinition(String className) throws IOException
+    {
+        byte[] bytes = new byte[1024];
+        try (InputStream in = ClassLoader.getSystemResourceAsStream(className + ".class"))
+        {
+            int count = 0;
+            while (true)
+            {
+                int add = in.read(bytes, count, bytes.length - count);
+                if (add < 0)
+                    break;
+                if (add == 0)
+                    bytes = Arrays.copyOf(bytes, bytes.length * 2);
+                count += add;
+            }
+            return Arrays.copyOf(bytes, count);
+        }
+    }
+
+    /**
+     * We don't want Object.toString() to invoke our overridden identityHashCode by virtue of invoking some overridden hashCode()
+     * So we overwrite Object.toString() to replace calls to Object.hashCode() with direct calls to System.identityHashCode()
+     */
+    private static byte[] transformObject(byte[] bytes)
+    {
+        class ObjectVisitor extends ClassVisitor
+        {
+            public ObjectVisitor(int api, ClassVisitor classVisitor)
+            {
+                super(api, classVisitor);
+            }
+
+            @Override
+            public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions)
+            {
+                if (descriptor.equals("()Ljava/lang/String;") && name.equals("toString"))
+                    return Utils.deterministicToString(super.visitMethod(access, name, descriptor, signature, exceptions));
+                else
+                    return super.visitMethod(access, name, descriptor, signature, exceptions);
+            }
+        }
+        return transform(bytes, ObjectVisitor::new);
+    }
+
+    /**
+     * We want Enum to have a deterministic hashCode() so we simply forward calls to ordinal()
+     */
+    private static byte[] transformEnum(byte[] bytes)
+    {
+        class EnumVisitor extends ClassVisitor
+        {
+            public EnumVisitor(int api, ClassVisitor classVisitor)
+            {
+                super(api, classVisitor);
+            }
+
+            @Override
+            public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions)
+            {
+                if (descriptor.equals("()I") && name.equals("hashCode"))
+                {
+                    MethodVisitor visitor = super.visitMethod(access, name, descriptor, signature, exceptions);
+                    visitor.visitLabel(new Label());
+                    visitor.visitIntInsn(ALOAD, 0);
+                    visitor.visitFieldInsn(GETFIELD, "java/lang/Enum", "ordinal", "I");
+                    visitor.visitInsn(IRETURN);
+                    visitor.visitLabel(new Label());
+                    visitor.visitMaxs(1, 1);
+                    visitor.visitEnd();
+
+                    return new MethodVisitor(BYTECODE_VERSION) {};
+                }
+                else
+                {
+                    return super.visitMethod(access, name, descriptor, signature, exceptions);
+                }
+            }
+        }
+        return transform(bytes, EnumVisitor::new);
+    }
+
+    /**
+     * We want Random to be initialised deterministically, so we modify the default constructor to fetch
+     * some deterministically generated seed to pass to its seed constructor
+     */
+    private static byte[] transformRandom(byte[] bytes)
+    {
+        class RandomVisitor extends ClassVisitor
+        {
+            public RandomVisitor(int api, ClassVisitor classVisitor)
+            {
+                super(api, classVisitor);
+            }
+
+            @Override
+            public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions)
+            {
+                if (descriptor.equals("()V") && name.equals("<init>"))
+                {
+                    MethodVisitor visitor = super.visitMethod(access, name, descriptor, signature, exceptions);
+                    visitor.visitLabel(new Label());
+                    visitor.visitIntInsn(ALOAD, 0);
+                    visitor.visitMethodInsn(INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "randomSeed", "()J", false);
+                    visitor.visitMethodInsn(INVOKESPECIAL, "java/util/Random", "<init>", "(J)V", false);
+                    visitor.visitInsn(RETURN);
+                    visitor.visitLabel(new Label());
+                    visitor.visitMaxs(3, 1);
+                    visitor.visitEnd();
+
+                    return new MethodVisitor(BYTECODE_VERSION) {};
+                }
+                else
+                {
+                    return super.visitMethod(access, name, descriptor, signature, exceptions);
+                }
+            }
+        }
+        return transform(bytes, RandomVisitor::new);
+    }
+
+    /**
+     * We require ThreadLocalRandom to be deterministic, so we modify its initialisation method to invoke a
+     * global deterministic random value generator
+     */
+    private static byte[] transformThreadLocalRandom(byte[] bytes)
+    {
+        class ThreadLocalRandomVisitor extends ClassVisitor
+        {
+            // CassandraRelevantProperties is not available to us here
+            final boolean determinismCheck = System.getProperty("cassandra.test.simulator.determinismcheck", "none").matches("relaxed|strict");
+
+            public ThreadLocalRandomVisitor(int api, ClassVisitor classVisitor)
+            {
+                super(api, classVisitor);
+            }
+
+            String unsafeDescriptor;
+            String unsafeFieldName;
+
+            @Override
+            public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value)
+            {
+                if (descriptor.equals("Lsun/misc/Unsafe;") || descriptor.equals("Ljdk/internal/misc/Unsafe;"))
+                {
+                    unsafeFieldName = name;
+                    unsafeDescriptor = descriptor;
+                }
+                return super.visitField(access, name, descriptor, signature, value);
+            }
+
+            @Override
+            public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions)
+            {
+                if (descriptor.equals("()V") && name.equals("localInit"))
+                {
+                    if (unsafeFieldName == null)
+                    {
+                        String version = System.getProperty("java.version");
+                        if (version.startsWith("11.")) { unsafeFieldName = "U"; unsafeDescriptor = "Ljdk/internal/misc/Unsafe;"; }
+                        else if (version.startsWith("1.8")) { unsafeFieldName = "UNSAFE"; unsafeDescriptor = "Lsun/misc/Unsafe;"; }
+                        else throw new AssertionError("Unsupported Java Version");
+                    }
+
+                    MethodVisitor visitor = super.visitMethod(access, name, descriptor, signature, exceptions);
+                    visitor.visitLabel(new Label());
+                    visitor.visitIntInsn(ALOAD, 0);
+                    visitor.visitFieldInsn(GETSTATIC, "java/util/concurrent/ThreadLocalRandom", unsafeFieldName, unsafeDescriptor);
+                    visitor.visitMethodInsn(INVOKESTATIC, "java/lang/Thread", "currentThread", "()Ljava/lang/Thread;", false);
+                    visitor.visitFieldInsn(GETSTATIC, "java/util/concurrent/ThreadLocalRandom", "SEED", "J");
+                    visitor.visitMethodInsn(INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "randomSeed", "()J", false);
+                    visitor.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "putLong", "(Ljava/lang/Object;JJ)V", false);
+                    visitor.visitFieldInsn(GETSTATIC, "java/util/concurrent/ThreadLocalRandom", unsafeFieldName, unsafeDescriptor);
+                    visitor.visitMethodInsn(INVOKESTATIC, "java/lang/Thread", "currentThread", "()Ljava/lang/Thread;", false);
+                    visitor.visitFieldInsn(GETSTATIC, "java/util/concurrent/ThreadLocalRandom", "PROBE", "J");
+                    visitor.visitLdcInsn(0);
+                    visitor.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "putInt", "(Ljava/lang/Object;JI)V", false);
+                    visitor.visitInsn(RETURN);
+                    visitor.visitLabel(new Label());
+                    visitor.visitMaxs(6, 1);
+                    visitor.visitEnd();
+
+                    return new MethodVisitor(BYTECODE_VERSION) {};
+                }
+                else
+                {
+                    MethodVisitor mv = super.visitMethod(access, name, descriptor, signature, exceptions);
+                    if (determinismCheck && (name.equals("nextSeed") || name.equals("nextSecondarySeed")))
+                        mv = new ThreadLocalRandomCheckTransformer(api, mv);
+                    return mv;
+                }
+            }
+        }
+        return transform(bytes, ThreadLocalRandomVisitor::new);
+    }
+
+    private static byte[] transform(byte[] bytes, BiFunction<Integer, ClassWriter, ClassVisitor> constructor)
+    {
+        ClassWriter out = new ClassWriter(0);
+        ClassReader in = new ClassReader(bytes);
+        ClassVisitor transform = constructor.apply(BYTECODE_VERSION, out);
+        in.accept(transform, 0);
+        return out.toByteArray();
+    }
+
+    private static byte[] transformConcurrent(String className, byte[] bytes, Flag flag, Flag ... flags)
+    {
+        ClassTransformer transformer = new ClassTransformer(BYTECODE_VERSION, className, EnumSet.of(flag, flags), null);
+        transformer.readAndTransform(bytes);
+        if (!transformer.isTransformed())
+            return null;
+        return transformer.toBytes();
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptClasses.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptClasses.java
new file mode 100644
index 0000000..a57074d
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptClasses.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Serializable;
+import java.io.UncheckedIOException;
+import java.lang.reflect.Method;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.regex.Pattern;
+
+import org.objectweb.asm.Opcodes;
+
+import static org.apache.cassandra.simulator.asm.InterceptClasses.Cached.Kind.MODIFIED;
+import static org.apache.cassandra.simulator.asm.InterceptClasses.Cached.Kind.UNMODIFIED;
+import static org.apache.cassandra.simulator.asm.InterceptClasses.Cached.Kind.UNSHAREABLE;
+
+// TODO (completeness): confirm that those classes we weave monitor-access for only extend other classes we also weave monitor access for
+// TODO (completeness): confirm that those classes we weave monitor access for only take monitors on types we also weave monitor access for (and vice versa)
+// WARNING: does not implement IClassTransformer directly as must be accessible to bootstrap class loader
+public class InterceptClasses implements BiFunction<String, byte[], byte[]>
+{
+    public static final int BYTECODE_VERSION = Opcodes.ASM7;
+
+    // TODO (cleanup): use annotations
+    private static final Pattern MONITORS = Pattern.compile( "org[/.]apache[/.]cassandra[/.]utils[/.]concurrent[/.].*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]concurrent[/.].*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]simulator[/.]test.*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]db[/.]ColumnFamilyStore.*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]db[/.]Keyspace.*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]db[/.]SystemKeyspace.*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]streaming[/.].*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]db.streaming[/.].*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]distributed[/.]impl[/.]DirectStreamingConnectionFactory.*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]db[/.]commitlog[/.].*" +
+                                                            "|org[/.]apache[/.]cassandra[/.]service[/.]paxos[/.].*");
+
+    private static final Pattern GLOBAL_METHODS = Pattern.compile("org[/.]apache[/.]cassandra[/.](?!simulator[/.]).*" +
+                                                                  "|org[/.]apache[/.]cassandra[/.]simulator[/.]test[/.].*" +
+                                                                  "|org[/.]apache[/.]cassandra[/.]simulator[/.]cluster[/.].*" +
+                                                                  "|io[/.]netty[/.]util[/.]concurrent[/.]FastThreadLocal"); // intercept IdentityHashMap for execution consistency
+    private static final Pattern NEMESIS = GLOBAL_METHODS;
+    private static final Set<String> WARNED = Collections.newSetFromMap(new ConcurrentHashMap<>());
+
+    static final byte[] SENTINEL = new byte[0];
+    static class Cached
+    {
+        enum Kind { MODIFIED, UNMODIFIED, UNSHAREABLE }
+        final Kind kind;
+        final byte[] bytes;
+        final Set<String> uncacheablePeers;
+        private Cached(Kind kind, byte[] bytes, Set<String> uncacheablePeers)
+        {
+            this.kind = kind;
+            this.bytes = bytes;
+            this.uncacheablePeers = uncacheablePeers;
+        }
+    }
+
+    static class PeerGroup
+    {
+        final Set<String> uncacheablePeers = new TreeSet<>();
+        final Cached unmodified = new Cached(UNMODIFIED, null, uncacheablePeers);
+    }
+
+    class SubTransformer implements BiFunction<String, byte[], byte[]>
+    {
+        private final Map<String, byte[]> isolatedCache = new ConcurrentHashMap<>();
+
+        @Override
+        public byte[] apply(String name, byte[] bytes)
+        {
+            return transformTransitiveClosure(name, bytes, isolatedCache);
+        }
+    }
+
+    private final Map<String, Cached> cache = new ConcurrentHashMap<>();
+
+    private final int api;
+    private final ChanceSupplier nemesisChance;
+    private final ChanceSupplier monitorDelayChance;
+    private final Hashcode insertHashcode;
+    private final NemesisFieldKind.Selector nemesisFieldSelector;
+    private final ClassLoader prewarmClassLoader;
+    private final Predicate<String> prewarm;
+    private final byte[] bufIn = new byte[4096];
+    private final ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
+
+    public InterceptClasses(ChanceSupplier monitorDelayChance, ChanceSupplier nemesisChance, NemesisFieldKind.Selector nemesisFieldSelector, ClassLoader prewarmClassLoader, Predicate<String> prewarm)
+    {
+        this(BYTECODE_VERSION, monitorDelayChance, nemesisChance, nemesisFieldSelector, prewarmClassLoader, prewarm);
+    }
+
+    public InterceptClasses(int api, ChanceSupplier monitorDelayChance, ChanceSupplier nemesisChance, NemesisFieldKind.Selector nemesisFieldSelector, ClassLoader prewarmClassLoader, Predicate<String> prewarm)
+    {
+        this.api = api;
+        this.nemesisChance = nemesisChance;
+        this.monitorDelayChance = monitorDelayChance;
+        this.insertHashcode = new Hashcode(api);
+        this.nemesisFieldSelector = nemesisFieldSelector;
+        this.prewarmClassLoader = prewarmClassLoader;
+        this.prewarm = prewarm;
+    }
+
+    @Override
+    public byte[] apply(String name, byte[] bytes)
+    {
+        return transformTransitiveClosure(name, bytes, null);
+    }
+
+    private synchronized byte[] transformTransitiveClosure(String externalName, byte[] input, Map<String, byte[]> isolatedCache)
+    {
+        if (input == null)
+            return maybeSynthetic(externalName);
+
+        String internalName = dotsToSlashes(externalName);
+        if (isolatedCache != null)
+        {
+            byte[] isolatedCached = isolatedCache.get(internalName);
+            if (isolatedCached != null)
+                return isolatedCached == SENTINEL ? input : isolatedCached;
+        }
+
+        Cached cached = cache.get(internalName);
+        if (cached != null)
+        {
+            if (isolatedCache == null)
+            {
+                switch (cached.kind)
+                {
+                    default: throw new AssertionError();
+                    case MODIFIED:
+                        return cached.bytes;
+                    case UNMODIFIED:
+                        return input;
+                    case UNSHAREABLE:
+                        return transform(internalName, externalName, null, input, null, null);
+                }
+            }
+
+            for (String peer : cached.uncacheablePeers)
+                transform(peer, slashesToDots(peer), null, cache.get(peer).bytes, isolatedCache, null);
+
+            switch (cached.kind)
+            {
+                default: throw new AssertionError();
+                case MODIFIED:
+                    return cached.bytes;
+                case UNMODIFIED:
+                    return input;
+                case UNSHAREABLE:
+                    return isolatedCache.get(internalName);
+            }
+        }
+
+        Set<String> visited = new HashSet<>();
+        visited.add(internalName);
+        NavigableSet<String> load = new TreeSet<>();
+        Consumer<String> dependentTypeConsumer = type -> {
+            if (prewarm.test(type) && visited.add(type))
+                load.add(type);
+        };
+
+        final PeerGroup peerGroup = new PeerGroup();
+        byte[] result = transform(internalName, externalName, peerGroup, input, isolatedCache, dependentTypeConsumer);
+        for (String next = load.pollFirst(); next != null; next = load.pollFirst())
+        {
+            // TODO (now): otherwise merge peer groups
+            Cached existing = cache.get(next);
+            if (existing == null)
+                transform(next, slashesToDots(next), peerGroup, read(next), isolatedCache, dependentTypeConsumer);
+        }
+
+        return result;
+    }
+
+    private byte[] read(String name)
+    {
+        try (InputStream in = prewarmClassLoader.getResourceAsStream(dotsToSlashes(name) + ".class"))
+        {
+            if (in == null)
+                throw new NoClassDefFoundError(dotsToSlashes(name) + ".class");
+
+            bufOut.reset();
+            for (int c = in.read(bufIn) ; c >= 0 ; c = in.read(bufIn))
+                bufOut.write(bufIn, 0, c);
+            return bufOut.toByteArray();
+        }
+        catch (IOException e)
+        {
+            throw new NoClassDefFoundError(name);
+        }
+    }
+
+    private byte[] transform(String internalName, String externalName, PeerGroup peerGroup, byte[] input, Map<String, byte[]> isolatedCache, Consumer<String> dependentTypes)
+    {
+        Hashcode hashcode = insertHashCode(externalName);
+
+        EnumSet<Flag> flags = EnumSet.noneOf(Flag.class);
+        if (MONITORS.matcher(internalName).matches())
+        {
+            flags.add(Flag.MONITORS);
+        }
+        if (GLOBAL_METHODS.matcher(internalName).matches())
+        {
+            flags.add(Flag.GLOBAL_METHODS);
+            flags.add(Flag.LOCK_SUPPORT);
+        }
+        if (NEMESIS.matcher(internalName).matches())
+        {
+            flags.add(Flag.NEMESIS);
+        }
+
+        if (flags.isEmpty() && hashcode == null)
+        {
+            cache.put(internalName, peerGroup.unmodified);
+            return input;
+        }
+
+        ClassTransformer transformer = new ClassTransformer(api, internalName, flags, monitorDelayChance, new NemesisGenerator(api, internalName, nemesisChance), nemesisFieldSelector, hashcode, dependentTypes);
+        transformer.readAndTransform(input);
+
+        if (!transformer.isTransformed())
+        {
+            cache.put(internalName, peerGroup.unmodified);
+            return input;
+        }
+
+        byte[] output = transformer.toBytes();
+        if (transformer.isCacheablyTransformed())
+        {
+            cache.put(internalName, new Cached(MODIFIED, output, peerGroup.uncacheablePeers));
+        }
+        else
+        {
+            if (peerGroup != null)
+            {
+                cache.put(internalName, new Cached(UNSHAREABLE, input, peerGroup.uncacheablePeers));
+                peerGroup.uncacheablePeers.add(internalName);
+            }
+            if (isolatedCache != null)
+                isolatedCache.put(internalName, output);
+        }
+
+        return output;
+    }
+
+    static String dotsToSlashes(String className)
+    {
+        return className.replace('.', '/');
+    }
+
+    static String dotsToSlashes(Class<?> clazz)
+    {
+        return dotsToSlashes(clazz.getName());
+    }
+
+    static String slashesToDots(String className)
+    {
+        return className.replace('/', '.');
+    }
+
+    /**
+     * Decide if we should insert our own hashCode() implementation that assigns deterministic hashes, i.e.
+     *   - If it's one of our classes
+     *   - If its parent is not one of our classes (else we'll assign it one anyway)
+     *   - If it does not have its own hashCode() implementation that overrides Object's
+     *   - If it is not Serializable OR it has a serialVersionUID
+     *
+     * Otherwise we either probably do not need it, or may break serialization between classloaders
+     */
+    private Hashcode insertHashCode(String externalName)
+    {
+        try
+        {
+            if (!externalName.startsWith("org.apache.cassandra"))
+                return null;
+
+            Class<?> sharedClass = getClass().getClassLoader().loadClass(externalName);
+            if (sharedClass.isInterface() || sharedClass.isEnum() || sharedClass.isArray() || sharedClass.isSynthetic())
+                return null;
+
+            Class<?> parent = sharedClass.getSuperclass();
+            if (parent.getName().startsWith("org.apache.cassandra"))
+                return null;
+
+            try
+            {
+                Method method = sharedClass.getMethod("hashCode");
+                if (method.getDeclaringClass() != Object.class)
+                    return null;
+            }
+            catch (NoSuchMethodException ignore)
+            {
+            }
+
+            if (!Serializable.class.isAssignableFrom(sharedClass))
+                return insertHashcode;
+
+            try
+            {
+                // if we haven't specified serialVersionUID we break ObjectInputStream transfers between class loaders
+                // (might be easiest to switch to serialization that doesn't require it)
+                sharedClass.getDeclaredField("serialVersionUID");
+                return insertHashcode;
+            }
+            catch (NoSuchFieldException e)
+            {
+                if (!Throwable.class.isAssignableFrom(sharedClass) && WARNED.add(externalName))
+                    System.err.println("No serialVersionUID on Serializable " + sharedClass);
+                return null;
+            }
+        }
+        catch (ClassNotFoundException e)
+        {
+            System.err.println("Unable to determine if should insert hashCode() for " + externalName);
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    static final String shadowRootExternalType = "org.apache.cassandra.simulator.systems.InterceptibleConcurrentHashMap";
+    static final String shadowRootType = "org/apache/cassandra/simulator/systems/InterceptibleConcurrentHashMap";
+    static final String originalRootType = Utils.toInternalName(ConcurrentHashMap.class);
+    static final String shadowOuterTypePrefix = shadowRootType + '$';
+    static final String originalOuterTypePrefix = originalRootType + '$';
+
+    protected byte[] maybeSynthetic(String externalName)
+    {
+        if (!externalName.startsWith(shadowRootExternalType))
+            return null;
+
+        try
+        {
+            String originalType, shadowType = Utils.toInternalName(externalName);
+            if (!shadowType.startsWith(shadowOuterTypePrefix))
+                originalType = originalRootType;
+            else
+                originalType = originalOuterTypePrefix + externalName.substring(shadowOuterTypePrefix.length());
+
+            EnumSet<Flag> flags = EnumSet.of(Flag.GLOBAL_METHODS, Flag.MONITORS, Flag.LOCK_SUPPORT);
+            if (NEMESIS.matcher(externalName).matches()) flags.add(Flag.NEMESIS);
+            NemesisGenerator nemesis = new NemesisGenerator(api, externalName, nemesisChance);
+
+            ShadowingTransformer transformer;
+            transformer = new ShadowingTransformer(InterceptClasses.BYTECODE_VERSION,
+                                                   originalType, shadowType, originalRootType, shadowRootType,
+                                                   originalOuterTypePrefix, shadowOuterTypePrefix,
+                                                   flags, monitorDelayChance, nemesis, nemesisFieldSelector, null);
+            transformer.readAndTransform(Utils.readDefinition(originalType + ".class"));
+            return transformer.toBytes();
+        }
+        catch (IOException e)
+        {
+            throw new UncheckedIOException(e);
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/MethodLogger.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/MethodLogger.java
new file mode 100644
index 0000000..1e6a844
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/MethodLogger.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Pattern;
+
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.util.Printer;
+import org.objectweb.asm.util.Textifier;
+import org.objectweb.asm.util.TraceMethodVisitor;
+
+import static java.util.Arrays.stream;
+import static org.apache.cassandra.simulator.asm.MethodLogger.Level.NONE;
+import static org.apache.cassandra.simulator.asm.MethodLogger.Level.valueOf;
+
+// TODO (config): support logging only for packages/classes matching a pattern
+interface MethodLogger
+{
+    static final Level LOG = valueOf(System.getProperty("cassandra.test.simulator.print_asm", "none").toUpperCase());
+    static final Set<TransformationKind> KINDS = System.getProperty("cassandra.test.simulator.print_asm_opts", "").isEmpty()
+                                                 ? EnumSet.allOf(TransformationKind.class)
+                                                 : stream(System.getProperty("cassandra.test.simulator.print_asm_opts", "").split(","))
+                                                   .map(TransformationKind::valueOf)
+                                                   .collect(() -> EnumSet.noneOf(TransformationKind.class), Collection::add, Collection::addAll);
+    static final Pattern LOG_CLASSES = System.getProperty("cassandra.test.simulator.print_asm_classes", "").isEmpty()
+                                                 ? null
+                                                 : Pattern.compile(System.getProperty("cassandra.test.simulator.print_asm_classes", ""));
+
+    // debug the output of each class at most once
+    static final Set<String> LOGGED_CLASS = LOG != NONE ? Collections.newSetFromMap(new ConcurrentHashMap<>()) : null;
+
+    enum Level { NONE, CLASS_SUMMARY, CLASS_DETAIL, METHOD_SUMMARY, METHOD_DETAIL, ASM }
+
+    MethodVisitor visitMethod(int access, String name, String descriptor, MethodVisitor parent);
+    void witness(TransformationKind kind);
+    void visitEndOfClass();
+
+    static MethodLogger log(int api, String className)
+    {
+        switch (LOG)
+        {
+            default:
+            case NONE:
+                return None.INSTANCE;
+            case ASM:
+                return (LOG_CLASSES == null || LOG_CLASSES.matcher(className).matches()) && LOGGED_CLASS.add(className)
+                       ? new Printing(api, className) : None.INSTANCE;
+            case CLASS_DETAIL:
+            case CLASS_SUMMARY:
+            case METHOD_DETAIL:
+            case METHOD_SUMMARY:
+                return (LOG_CLASSES == null || LOG_CLASSES.matcher(className).matches()) && LOGGED_CLASS.add(className)
+                       ? new Counting(api, className, LOG) : None.INSTANCE;
+        }
+    }
+
+    static class None implements MethodLogger
+    {
+        static final None INSTANCE = new None();
+
+        @Override
+        public MethodVisitor visitMethod(int access, String name, String descriptor, MethodVisitor parent)
+        {
+            return parent;
+        }
+
+        @Override
+        public void witness(TransformationKind kind)
+        {
+        }
+
+        @Override
+        public void visitEndOfClass()
+        {
+        }
+    }
+
+    static class Counting implements MethodLogger
+    {
+        final int api;
+        final String className;
+        final Level level;
+        StringWriter buffer = new StringWriter();
+        PrintWriter out = new PrintWriter(buffer);
+
+        boolean isMethodInProgress;
+        boolean printMethod;
+        boolean printClass;
+
+        int methodCount;
+        final int[] methodCounts = new int[TransformationKind.VALUES.size()];
+        final int[] classCounts = new int[TransformationKind.VALUES.size()];
+
+        public Counting(int api, String className, Level level)
+        {
+            this.api = api;
+            this.className = className;
+            this.level = level;
+        }
+
+        @Override
+        public MethodVisitor visitMethod(int access, String name, String descriptor, MethodVisitor parent)
+        {
+            ++methodCount;
+            if (isMethodInProgress)
+                return parent;
+
+            return new MethodVisitor(api, parent) {
+                @Override
+                public void visitEnd()
+                {
+                    super.visitEnd();
+                    if (printMethod)
+                    {
+                        for (int i = 0 ; i < methodCounts.length ; ++i)
+                            classCounts[i] += methodCounts[i];
+
+                        switch (level)
+                        {
+                            case METHOD_DETAIL:
+                                out.printf("Transformed %s.%s %s\n", className, name, descriptor);
+                                for (int i = 0 ; i < methodCounts.length ; ++i)
+                                {
+                                    if (methodCounts[i] > 0)
+                                        out.printf("    %3d %s\n", methodCounts[i], TransformationKind.VALUES.get(i));
+                                }
+                                break;
+
+                            case METHOD_SUMMARY:
+                                out.printf("Transformed %s.%s %s with %d modifications\n", className, name, descriptor, stream(methodCounts).sum());
+                                break;
+                        }
+                        printMethod = false;
+                        Arrays.fill(methodCounts, 0);
+                    }
+                    isMethodInProgress = false;
+                }
+            };
+        }
+
+        public void visitEndOfClass()
+        {
+            if (!printClass)
+                return;
+
+            switch (level)
+            {
+                case CLASS_DETAIL:
+                    out.printf("Transformed %s: %d methods\n", className, methodCount);
+                    for (int i = 0 ; i < classCounts.length ; ++i)
+                    {
+                        if (classCounts[i] > 0)
+                            out.printf("    %3d %s\n", classCounts[i], TransformationKind.VALUES.get(i));
+                    }
+                case CLASS_SUMMARY:
+                    out.printf("Transformed %s: %d methods with %d modifications\n", className, methodCount, stream(classCounts).sum());
+            }
+            System.out.print(buffer.toString());
+            buffer = null;
+            out = null;
+        }
+
+        @Override
+        public void witness(TransformationKind kind)
+        {
+            ++methodCounts[kind.ordinal()];
+            if (KINDS.contains(kind))
+            {
+                printMethod = true;
+                printClass = true;
+            }
+        }
+    }
+
+    static class Printing implements MethodLogger
+    {
+        final int api;
+        final String className;
+        final Textifier textifier = new Textifier();
+        StringWriter buffer = new StringWriter();
+        PrintWriter out = new PrintWriter(buffer);
+
+        boolean printClass;
+        boolean printMethod;
+        boolean isMethodInProgress;
+
+        public Printing(int api, String className)
+        {
+            this.api = api;
+            this.className = className;
+        }
+
+        @Override
+        public MethodVisitor visitMethod(int access, String name, String descriptor, MethodVisitor parent)
+        {
+            Printer printer = textifier.visitMethod(access, name, descriptor, null, null);
+            boolean isOuter = !isMethodInProgress;
+            if (isOuter) isMethodInProgress = true;
+            return new TraceMethodVisitor(new MethodVisitor(api, parent) {
+                @Override
+                public void visitEnd()
+                {
+                    super.visitEnd();
+                    if (printMethod)
+                    {
+                        out.println("====" + className + '.' + name + ' ' + descriptor + ' ');
+                        printer.print(out);
+                    }
+                    if (isOuter) isMethodInProgress = false;
+                }
+            }, printer);
+        }
+
+        @Override
+        public void witness(TransformationKind kind)
+        {
+            if (KINDS.contains(kind))
+            {
+                printMethod = true;
+                printClass = true;
+            }
+        }
+
+        @Override
+        public void visitEndOfClass()
+        {
+            if (printClass)
+                System.out.println(buffer.toString());
+            buffer = null;
+            out = null;
+        }
+    }
+
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/MethodWriterSink.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/MethodWriterSink.java
new file mode 100644
index 0000000..a4b8a8d
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/MethodWriterSink.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import org.objectweb.asm.tree.MethodNode;
+
+public interface MethodWriterSink
+{
+
+    void writeMethod(MethodNode method);
+    void writeSyntheticMethod(TransformationKind kind, MethodNode method);
+
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/MonitorEnterExitParkTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/MonitorEnterExitParkTransformer.java
new file mode 100644
index 0000000..f92013e
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/MonitorEnterExitParkTransformer.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+
+import static org.apache.cassandra.simulator.asm.TransformationKind.LOCK_SUPPORT;
+import static org.apache.cassandra.simulator.asm.TransformationKind.MONITOR;
+
+/**
+ * Handle simply thread signalling behaviours, namely monitorenter/monitorexit bytecodes to
+ * {@link org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods}, and LockSupport invocations to
+ * {@link org.apache.cassandra.simulator.systems.InterceptibleThread}.
+ *
+ * The global static methods we redirect monitors to take only one parameter (the monitor) and also return it,
+ * so that they have net zero effect on the stack, permitting the existing monitorenter/monitorexit instructions
+ * to remain where they are. LockSupport on the other hand is redirected entirely to the new method.
+ */
+class MonitorEnterExitParkTransformer extends MethodVisitor
+{
+    private final ClassTransformer transformer;
+    private final String className;
+    private final ChanceSupplier monitorDelayChance;
+
+    public MonitorEnterExitParkTransformer(ClassTransformer transformer,
+                                           int api,
+                                           MethodVisitor parent,
+                                           String className,
+                                           ChanceSupplier monitorDelayChance)
+    {
+        super(api, parent);
+        this.transformer = transformer;
+        this.className = className;
+        this.monitorDelayChance = monitorDelayChance;
+    }
+
+    @Override
+    public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface)
+    {
+        if (opcode == Opcodes.INVOKEVIRTUAL && !isInterface && owner.equals("java/lang/Object"))
+        {
+            switch (name.charAt(0))
+            {
+                case 'w':
+                    assert name.equals("wait");
+                    switch (descriptor.charAt(2))
+                    {
+                        default:
+                            throw new AssertionError("Unexpected descriptor for method wait() in " + className + '.' + name);
+                        case 'V': // ()V
+                            transformer.witness(MONITOR);
+                            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "wait", "(Ljava/lang/Object;)V", false);
+                            return;
+                        case ')': // (J)V
+                            transformer.witness(MONITOR);
+                            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "wait", "(Ljava/lang/Object;J)V", false);
+                            return;
+                        case 'I': // (JI)V
+                            transformer.witness(MONITOR);
+                            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "wait", "(Ljava/lang/Object;JI)V", false);
+                            return;
+                    }
+                case 'n':
+                    switch (name.length())
+                    {
+                        default:
+                            throw new AssertionError();
+                        case 6: // notify
+                            transformer.witness(MONITOR);
+                            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "notify", "(Ljava/lang/Object;)V", false);
+                            return;
+
+                        case 9: // notifyAll
+                            transformer.witness(MONITOR);
+                            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "notifyAll", "(Ljava/lang/Object;)V", false);
+                            return;
+                    }
+            }
+        }
+        if (opcode == Opcodes.INVOKESTATIC && !isInterface && owner.equals("java/util/concurrent/locks/LockSupport"))
+        {
+            transformer.witness(LOCK_SUPPORT);
+            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptibleThread", name, descriptor, false);
+            return;
+        }
+        super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
+    }
+
+    @Override
+    public void visitInsn(int opcode)
+    {
+        switch (opcode)
+        {
+            case Opcodes.MONITORENTER:
+                transformer.witness(MONITOR);
+                super.visitLdcInsn(monitorDelayChance.get());
+                super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "preMonitorEnter", "(Ljava/lang/Object;F)Ljava/lang/Object;", false);
+                break;
+            case Opcodes.MONITOREXIT:
+                transformer.witness(MONITOR);
+                super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "preMonitorExit", "(Ljava/lang/Object;)Ljava/lang/Object;", false);
+                break;
+        }
+        super.visitInsn(opcode);
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/MonitorMethodTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/MonitorMethodTransformer.java
new file mode 100644
index 0000000..d9c9c7a
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/MonitorMethodTransformer.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.Comparator;
+import java.util.ListIterator;
+
+import org.objectweb.asm.Label;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+import org.objectweb.asm.tree.FrameNode;
+import org.objectweb.asm.tree.InsnNode;
+import org.objectweb.asm.tree.IntInsnNode;
+import org.objectweb.asm.tree.LdcInsnNode;
+import org.objectweb.asm.tree.LocalVariableNode;
+import org.objectweb.asm.tree.MethodInsnNode;
+import org.objectweb.asm.tree.MethodNode;
+import org.objectweb.asm.tree.TryCatchBlockNode;
+
+import static org.apache.cassandra.simulator.asm.TransformationKind.MONITOR;
+
+/**
+ * For synchronized methods, we generate a new method that contains the source method's body, and the original method
+ * instead invoke preMonitorEnter before invoking the new hidden method.
+ */
+class MonitorMethodTransformer extends MethodNode
+{
+    private final String className;
+    private final MethodWriterSink methodWriterSink;
+    private final ChanceSupplier monitorDelayChance;
+    private final String baseName;
+    private final boolean isInstanceMethod;
+    private int returnCode;
+
+    int maxLocalParams; // double counts long/double to match asm spec
+
+    public MonitorMethodTransformer(MethodWriterSink methodWriterSink, String className, int api, int access, String name, String descriptor, String signature, String[] exceptions, ChanceSupplier monitorDelayChance)
+    {
+        super(api, access, name, descriptor, signature, exceptions);
+        this.methodWriterSink = methodWriterSink;
+        this.className = className;
+        this.baseName = name;
+        this.isInstanceMethod = (access & Opcodes.ACC_STATIC) == 0;
+        this.monitorDelayChance = monitorDelayChance;
+    }
+
+    @Override
+    public void visitInsn(int opcode)
+    {
+        switch (opcode)
+        {
+            case Opcodes.RETURN:
+            case Opcodes.ARETURN:
+            case Opcodes.IRETURN:
+            case Opcodes.FRETURN:
+            case Opcodes.LRETURN:
+            case Opcodes.DRETURN:
+                if (returnCode != 0) assert returnCode == opcode;
+                else returnCode = opcode;
+        }
+        super.visitInsn(opcode);
+    }
+
+    int returnCode()
+    {
+        return returnCode;
+    }
+
+    // TODO (cleanup): this _should_ be possible to determine purely from the method signature
+    int loadParamsAndReturnInvokeCode()
+    {
+        if (isInstanceMethod)
+            instructions.add(new IntInsnNode(Opcodes.ALOAD, 0));
+
+        ListIterator<LocalVariableNode> it = localVariables.listIterator();
+        while (it.hasNext())
+        {
+            LocalVariableNode cur = it.next();
+            if (cur.index < maxLocalParams)
+            {
+                if (!isInstanceMethod || cur.index > 0)
+                {
+                    int opcode;
+                    switch (cur.desc.charAt(0))
+                    {
+                        case 'L':
+                        case '[':
+                            opcode = Opcodes.ALOAD;
+                            break;
+                        case 'J':
+                            opcode = Opcodes.LLOAD;
+                            break;
+                        case 'D':
+                            opcode = Opcodes.DLOAD;
+                            break;
+                        case 'F':
+                            opcode = Opcodes.FLOAD;
+                            break;
+                        default:
+                            opcode = Opcodes.ILOAD;
+                            break;
+                    }
+                    instructions.add(new IntInsnNode(opcode, cur.index));
+                }
+            }
+        }
+
+        int invokeCode;
+        if (isInstanceMethod && (access & Opcodes.ACC_PRIVATE) != 0) invokeCode = Opcodes.INVOKESPECIAL;
+        else if (isInstanceMethod) invokeCode = Opcodes.INVOKEVIRTUAL;
+        else invokeCode = Opcodes.INVOKESTATIC;
+        return invokeCode;
+    }
+
+    void pushRef()
+    {
+        if (isInstanceMethod) instructions.add(new IntInsnNode(Opcodes.ALOAD, 0));
+        else instructions.add(new LdcInsnNode(org.objectweb.asm.Type.getType('L' + className + ';')));
+    }
+
+    void pop()
+    {
+        instructions.add(new InsnNode(Opcodes.POP));
+    }
+
+    void invokePreMonitorExit()
+    {
+        pushRef();
+        instructions.add(new MethodInsnNode(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "preMonitorExit", "(Ljava/lang/Object;)Ljava/lang/Object;", false));
+    }
+
+    void invokePreMonitorEnter()
+    {
+        pushRef();
+        instructions.add(new LdcInsnNode(monitorDelayChance.get()));
+        instructions.add(new MethodInsnNode(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "preMonitorEnter", "(Ljava/lang/Object;F)Ljava/lang/Object;", false));
+    }
+
+    void invokeMonitor(int insn)
+    {
+        instructions.add(new InsnNode(insn));
+    }
+
+    void reset(Label start, Label end)
+    {
+        instructions.clear();
+        tryCatchBlocks.clear();
+        if (visibleLocalVariableAnnotations != null)
+            visibleLocalVariableAnnotations.clear();
+        if (invisibleLocalVariableAnnotations != null)
+            invisibleLocalVariableAnnotations.clear();
+
+        Type[] args = Type.getArgumentTypes(desc);
+        // remove all local variables that aren't parameters and the `this` parameter
+        maxLocals = args.length == 1 && Type.VOID_TYPE.equals(args[0]) ? 0 : args.length;
+        if (isInstanceMethod) ++maxLocals;
+
+        // sort our local variables and remove those that aren't parameters
+        localVariables.sort(Comparator.comparingInt(c -> c.index));
+        ListIterator<LocalVariableNode> it = localVariables.listIterator();
+        while (it.hasNext())
+        {
+            LocalVariableNode cur = it.next();
+            if (cur.index >= maxLocals)
+            {
+                it.remove();
+            }
+            else
+            {
+                it.set(new LocalVariableNode(cur.name, cur.desc, cur.signature, getLabelNode(start), getLabelNode(end), cur.index));
+                switch (cur.desc.charAt(0))
+                {
+                    case 'J':
+                    case 'D':
+                        // doubles and longs take two local variable positions
+                        ++maxLocals;
+                }
+            }
+        }
+
+        // save the number of pure-parameters for use elsewhere
+        maxLocalParams = maxLocals;
+    }
+
+    void writeOriginal()
+    {
+        access &= ~Opcodes.ACC_SYNCHRONIZED;
+        access |= Opcodes.ACC_SYNTHETIC;
+        name = baseName + "$unsync";
+        methodWriterSink.writeMethod(this);
+    }
+
+    // alternative approach (with writeInnerTryCatchSynchronized)
+    @SuppressWarnings("unused")
+    void writeOuterUnsynchronized()
+    {
+        access &= ~(Opcodes.ACC_SYNCHRONIZED | Opcodes.ACC_SYNTHETIC);
+        name = baseName;
+
+        Label start = new Label();
+        Label end = new Label();
+
+        reset(start, end);
+        maxStack = maxLocalParams;
+
+        instructions.add(getLabelNode(start));
+        invokePreMonitorEnter();
+        pop();
+
+        int invokeCode = loadParamsAndReturnInvokeCode();
+        instructions.add(new MethodInsnNode(invokeCode, className, baseName + "$catch", desc));
+        instructions.add(new InsnNode(returnCode()));
+        instructions.add(getLabelNode(end));
+        methodWriterSink.writeMethod(this);
+    }
+
+    // alternative approach (with writeOuterUnsynchronized)
+    @SuppressWarnings("unused")
+    void writeInnerTryCatchSynchronized()
+    {
+        access |= Opcodes.ACC_SYNCHRONIZED | Opcodes.ACC_SYNTHETIC;
+        name = baseName + "$catch";
+
+        Label start = new Label();
+        Label normal = new Label();
+        Label except = new Label();
+        Label end = new Label();
+        reset(start, end);
+        maxStack = Math.max(maxLocalParams, returnCode == Opcodes.RETURN ? 1 : 2); // must load self or class onto stack, and return value (if any)
+        ++maxLocals;
+        tryCatchBlocks.add(new TryCatchBlockNode(getLabelNode(start), getLabelNode(normal), getLabelNode(except), null));
+        instructions.add(getLabelNode(start));
+        int invokeCode = loadParamsAndReturnInvokeCode();
+        instructions.add(new MethodInsnNode(invokeCode, className, baseName + "$unsync", desc));
+        instructions.add(getLabelNode(normal));
+        invokePreMonitorExit();
+        instructions.add(new InsnNode(returnCode()));
+        instructions.add(getLabelNode(except));
+        instructions.add(new FrameNode(Opcodes.F_SAME1, 0, null, 1, new Object[]{ "java/lang/Throwable" }));
+        instructions.add(new IntInsnNode(Opcodes.ASTORE, maxLocalParams));
+        invokePreMonitorExit();
+        instructions.add(new IntInsnNode(Opcodes.ALOAD, maxLocalParams));
+        instructions.add(new InsnNode(Opcodes.ATHROW));
+        instructions.add(getLabelNode(end));
+        methodWriterSink.writeSyntheticMethod(MONITOR, this);
+    }
+
+    void writeTryCatchMonitorEnterExit()
+    {
+        access |= Opcodes.ACC_SYNTHETIC;
+        name = baseName;
+
+        Label start = new Label();
+        Label inmonitor = new Label();
+        Label normal = new Label();
+        Label except = new Label(); // normal
+        Label normalRetExcept = new Label(); // normal return failed
+        Label exceptRetNormal = new Label(); // exceptional return success
+        Label exceptRetExcept = new Label(); // exceptional return failed
+        Label end = new Label();
+        reset(start, end);
+        ++maxLocals; // add a local variable slot to save any exceptions into (at maxLocalParams position)
+        maxStack = Math.max(maxLocalParams, returnCode == Opcodes.RETURN ? 2 : 3); // must load self or class onto stack, and return value (if any)
+        tryCatchBlocks.add(new TryCatchBlockNode(getLabelNode(inmonitor), getLabelNode(normal), getLabelNode(except), null));
+        tryCatchBlocks.add(new TryCatchBlockNode(getLabelNode(normal), getLabelNode(normalRetExcept), getLabelNode(normalRetExcept), null));
+        tryCatchBlocks.add(new TryCatchBlockNode(getLabelNode(except), getLabelNode(exceptRetNormal), getLabelNode(exceptRetExcept), null));
+        // preMonitorEnter
+        // monitorenter
+        instructions.add(getLabelNode(start));
+        invokePreMonitorEnter();
+        invokeMonitor(Opcodes.MONITORENTER);
+        {
+            // try1 { val = original();
+            instructions.add(getLabelNode(inmonitor));
+            int invokeCode = loadParamsAndReturnInvokeCode();
+            instructions.add(new MethodInsnNode(invokeCode, className, baseName + "$unsync", desc));
+            {
+                // try2 { preMonitorExit(); monitorexit; return val; }
+                instructions.add(getLabelNode(normal));
+                invokePreMonitorExit();
+                invokeMonitor(Opcodes.MONITOREXIT);
+                instructions.add(new InsnNode(returnCode())); // success
+                // }
+                // catch2 { monitorexit; throw }
+                instructions.add(getLabelNode(normalRetExcept));
+                instructions.add(new FrameNode(Opcodes.F_SAME1, 0, null, 1, new Object[]{ "java/lang/Throwable" }));
+                instructions.add(new IntInsnNode(Opcodes.ASTORE, maxLocalParams));
+                pushRef();
+                invokeMonitor(Opcodes.MONITOREXIT);
+                instructions.add(new IntInsnNode(Opcodes.ALOAD, maxLocalParams));
+                instructions.add(new InsnNode(Opcodes.ATHROW));
+                // }
+            }
+            // catch1 { try3 { preMonitorExit; monitorexit; throw
+            instructions.add(getLabelNode(except));
+            instructions.add(new FrameNode(Opcodes.F_SAME1, 0, null, 1, new Object[]{ "java/lang/Throwable" }));
+            instructions.add(new IntInsnNode(Opcodes.ASTORE, maxLocalParams));
+            invokePreMonitorExit();
+            invokeMonitor(Opcodes.MONITOREXIT);
+            instructions.add(new IntInsnNode(Opcodes.ALOAD, maxLocalParams));
+            instructions.add(getLabelNode(exceptRetNormal));
+            instructions.add(new InsnNode(Opcodes.ATHROW));
+            instructions.add(getLabelNode(exceptRetExcept));
+            instructions.add(new FrameNode(Opcodes.F_SAME1, 0, null, 1, new Object[]{ "java/lang/Throwable" }));
+            instructions.add(new IntInsnNode(Opcodes.ASTORE, maxLocalParams));
+            pushRef();
+            invokeMonitor(Opcodes.MONITOREXIT);
+            instructions.add(new IntInsnNode(Opcodes.ALOAD, maxLocalParams));
+            instructions.add(new InsnNode(Opcodes.ATHROW));
+        }
+        instructions.add(getLabelNode(end));
+        methodWriterSink.writeSyntheticMethod(MONITOR, this);
+    }
+
+    @Override
+    public void visitEnd()
+    {
+        writeOriginal();
+        writeTryCatchMonitorEnterExit();
+        super.visitEnd();
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisFieldKind.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisFieldKind.java
new file mode 100644
index 0000000..d26f196
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisFieldKind.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicLongFieldUpdater;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+public enum NemesisFieldKind
+{
+    SIMPLE(),
+    ATOMICX(AtomicBoolean.class, AtomicInteger.class, AtomicLong.class, AtomicReference.class),
+    ATOMICUPDATERX(AtomicIntegerFieldUpdater.class, AtomicLongFieldUpdater.class, AtomicReferenceFieldUpdater.class);
+
+    public interface Selector
+    {
+        NemesisFieldKind get(String className, String fieldName);
+    }
+
+    final Set<Class<?>> classes;
+
+    NemesisFieldKind(Class<?> ... classes)
+    {
+        this.classes = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(classes)));
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisGenerator.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisGenerator.java
new file mode 100644
index 0000000..814348b
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisGenerator.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.tree.InsnNode;
+import org.objectweb.asm.tree.LabelNode;
+import org.objectweb.asm.tree.LdcInsnNode;
+import org.objectweb.asm.tree.MethodInsnNode;
+import org.objectweb.asm.tree.MethodNode;
+
+/**
+ * Generate a new static method in the class with a randomly generated constant chance of triggering the nemesis.
+ * Generate also the invocation of this method at the relevant point(s).
+ *
+ * A static method with no parameters or return values is created, so that only the method invocation instruction is
+ * needed in the original method, simplifying the transformation.
+ */
+class NemesisGenerator extends MethodNode
+{
+    private final ChanceSupplier chanceSupplier;
+
+    private final String className;
+    private String baseName;
+    private int methodsCounter = 0; // avoid nemesis method name clashes when weaving two or more methods with same name
+    private int withinMethodCounter = 0;
+    private final LdcInsnNode ldc = new LdcInsnNode(null);
+
+    NemesisGenerator(int api, String className, ChanceSupplier chanceSupplier)
+    {
+        super(api, Opcodes.ACC_STATIC | Opcodes.ACC_SYNTHETIC | Opcodes.ACC_PRIVATE, null, "()V", "", null);
+        this.chanceSupplier = chanceSupplier;
+        this.className = className;
+        this.maxLocals = 0;
+        this.maxStack = 1;
+        instructions.add(new LabelNode());
+        instructions.add(ldc);
+        instructions.add(new MethodInsnNode(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", "nemesis", "(F)V", false));
+        instructions.add(new LabelNode());
+        instructions.add(new InsnNode(Opcodes.RETURN));
+    }
+
+    void newMethod(String name)
+    {
+        this.baseName = name.replaceAll("[<>]", "") + '$' + (methodsCounter++) + '$';
+        this.withinMethodCounter = 0;
+    }
+
+    void generateAndCall(TransformationKind kind, ClassTransformer writeDefinitionTo, MethodVisitor writeInvocationTo)
+    {
+        this.name = baseName + '$' + (withinMethodCounter++) + "$nemesis";
+        ldc.cst = chanceSupplier.get();
+        writeDefinitionTo.writeSyntheticMethod(kind, this);
+        writeInvocationTo.visitMethodInsn(Opcodes.INVOKESTATIC, className, name, "()V", false);
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisTransformer.java
new file mode 100644
index 0000000..94ed1f4
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/NemesisTransformer.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+
+import static org.apache.cassandra.simulator.asm.TransformationKind.FIELD_NEMESIS;
+import static org.apache.cassandra.simulator.asm.TransformationKind.SIGNAL_NEMESIS;
+
+/**
+ * Insert nemesis points at all obvious thread signalling points (execution and blocking primitive methods),
+ * as well as to any fields annotated with {@link org.apache.cassandra.utils.Nemesis}.
+ *
+ * If the annotated field is an AtomicX or AtomicXFieldUpdater, we insert nemesis points either side of the next
+ * invocation of
+ *
+ * TODO (config): permit Nemesis on a class as well as a field, so as to mark all (at least volatile or atomic) members
+ */
+class NemesisTransformer extends MethodVisitor
+{
+    private final ClassTransformer transformer;
+    final NemesisGenerator generator;
+    final NemesisFieldKind.Selector nemesisFieldSelector;
+
+    // for simplicity, we simply activate nemesis for all atomic operations on the relevant type once any such
+    // field is loaded in a method
+    Set<String> onForTypes;
+
+    public NemesisTransformer(ClassTransformer transformer, int api, String name, MethodVisitor parent, NemesisGenerator generator, NemesisFieldKind.Selector nemesisFieldSelector)
+    {
+        super(api, parent);
+        this.transformer = transformer;
+        this.generator = generator;
+        this.nemesisFieldSelector = nemesisFieldSelector;
+        generator.newMethod(name);
+    }
+
+    @Override
+    public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface)
+    {
+        boolean nemesisAfter = false;
+        if (isInterface && opcode == Opcodes.INVOKEINTERFACE
+        && (owner.startsWith("org/apache/cassandra/concurrent") || owner.startsWith("org/apache/cassandra/utils/concurrent")) && (
+               (owner.equals("org/apache/cassandra/utils/concurrent/CountDownLatch") && name.equals("decrement"))
+            || (owner.equals("org/apache/cassandra/utils/concurrent/Condition") && name.equals("signal"))
+            || (owner.equals("org/apache/cassandra/utils/concurrent/Semaphore") && name.equals("release"))
+            || ((owner.equals("org/apache/cassandra/concurrent/ExecutorPlus")
+                 || owner.equals("org/apache/cassandra/concurrent/LocalAwareExecutorPlus")
+                 || owner.equals("org/apache/cassandra/concurrent/ScheduledExecutorPlus")
+                 || owner.equals("org/apache/cassandra/concurrent/SequentialExecutorPlus")
+                 || owner.equals("org/apache/cassandra/concurrent/LocalAwareSequentialExecutorPlus")
+                ) && (name.equals("execute") || name.equals("submit") || name.equals("maybeExecuteImmediately")))
+        ))
+        {
+            generateAndCall(SIGNAL_NEMESIS);
+        }
+        else if ((opcode == Opcodes.INVOKESPECIAL || opcode == Opcodes.INVOKEVIRTUAL)
+                 && (onForTypes != null && onForTypes.contains(owner)))
+        {
+            nemesisAfter = true;
+            generateAndCall(FIELD_NEMESIS);
+        }
+
+        super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
+        if (nemesisAfter)
+            generateAndCall(FIELD_NEMESIS);
+    }
+
+    @Override
+    public void visitFieldInsn(int opcode, String owner, String name, String descriptor)
+    {
+        boolean nemesisAfter = false;
+        NemesisFieldKind nemesis = nemesisFieldSelector.get(owner, name);
+        if (nemesis != null)
+        {
+            switch (nemesis)
+            {
+                case SIMPLE:
+                    switch (opcode)
+                    {
+                        default:
+                            throw new AssertionError();
+                        case Opcodes.PUTFIELD:
+                        case Opcodes.PUTSTATIC:
+                            generateAndCall(FIELD_NEMESIS);
+                            break;
+                        case Opcodes.GETFIELD:
+                        case Opcodes.GETSTATIC:
+                            nemesisAfter = true;
+                    }
+                    break;
+                case ATOMICX:
+                case ATOMICUPDATERX:
+                    switch (opcode)
+                    {
+                        case Opcodes.GETFIELD:
+                        case Opcodes.GETSTATIC:
+                            if (onForTypes == null)
+                                onForTypes = new HashSet<>();
+                            onForTypes.add(descriptor.substring(1, descriptor.length() - 1));
+                    }
+                    break;
+            }
+        }
+        super.visitFieldInsn(opcode, owner, name, descriptor);
+        if (nemesisAfter)
+            generateAndCall(FIELD_NEMESIS);
+    }
+
+    private void generateAndCall(TransformationKind kind)
+    {
+        generator.generateAndCall(kind, transformer, mv);
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/ShadowingTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/ShadowingTransformer.java
new file mode 100644
index 0000000..ca42f24
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/ShadowingTransformer.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.EnumSet;
+
+import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.Handle;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Type;
+import org.objectweb.asm.TypePath;
+
+/**
+ * A SORT OF general purpose facility for creating a copy of a system class that we want to transform
+ * and use in place of the system class without transforming the system class itself.
+ *
+ * NOTE that this does not implement this translation perfectly, so care must be taken when extending its usage
+ * Some things not handled:
+ *   - generic type signatures in class files
+ *   - 
+ *
+ * While it is possible and safe in principle to modify ConcurrentHashMap in particular, in practice it messes
+ * with class loading, as ConcurrentHashMap is used widely within the JDK, including for things like class loaders
+ * and method handle caching. It seemed altogether more tractable and safe to selectively replace ConcurrentHashMap
+ * with a shadowed variety.
+ *
+ * This approach makes some rough assumptions, namely that any public method on the root class should accept the
+ * shadowed type, but that any inner class may safely use the shadow type.
+ */
+public class ShadowingTransformer extends ClassTransformer
+{
+    final String originalType;
+    final String originalRootType;
+    final String shadowRootType;
+    final String originalOuterTypePrefix;
+    final String shadowOuterTypePrefix;
+    String originalSuperName;
+
+    ShadowingTransformer(int api, String originalType, String shadowType, String originalRootType, String shadowRootType, String originalOuterTypePrefix, String shadowOuterTypePrefix, EnumSet<Flag> flags, ChanceSupplier monitorDelayChance, NemesisGenerator nemesis, NemesisFieldKind.Selector nemesisFieldSelector, Hashcode insertHashcode)
+    {
+        super(api, shadowType, flags, monitorDelayChance, nemesis, nemesisFieldSelector, insertHashcode, null);
+        this.originalType = originalType;
+        this.originalRootType = originalRootType;
+        this.shadowRootType = shadowRootType;
+        this.originalOuterTypePrefix = originalOuterTypePrefix;
+        this.shadowOuterTypePrefix = shadowOuterTypePrefix;
+    }
+
+    private String toShadowType(String type)
+    {
+        if (type.startsWith("["))
+            return toShadowTypeDescriptor(type);
+        else if (type.equals(originalRootType))
+            type = shadowRootType;
+        else if (type.startsWith(originalOuterTypePrefix))
+            type = shadowOuterTypePrefix + type.substring(originalOuterTypePrefix.length());
+        else
+            return type;
+
+        witness(TransformationKind.SHADOW);
+        return type;
+    }
+
+    private String toShadowTypeDescriptor(String owner)
+    {
+        return toShadowTypeDescriptor(owner, false);
+    }
+
+    private String toShadowTypeDescriptor(String desc, boolean innerTypeOnly)
+    {
+        int i = 0;
+        while (i < desc.length() && desc.charAt(i) == '[') ++i;
+        if (desc.charAt(i) != 'L')
+            return desc;
+        ++i;
+
+        if (!innerTypeOnly && desc.regionMatches(i, originalRootType, 0, originalRootType.length()) && desc.length() == originalRootType.length() + 1 + i && desc.charAt(i + originalRootType.length()) == ';')
+            desc = desc.substring(0, i) + shadowRootType + ';';
+        else if (desc.regionMatches(i, originalOuterTypePrefix, 0, originalOuterTypePrefix.length()))
+            desc = desc.substring(0, i) + shadowOuterTypePrefix + desc.substring(i + originalOuterTypePrefix.length());
+        else
+            return desc;
+
+        witness(TransformationKind.SHADOW);
+        return desc;
+    }
+
+    private Type toShadowTypeDescriptor(Type type)
+    {
+        String in = type.getDescriptor();
+        String out = toShadowTypeDescriptor(in, false);
+        if (in == out) return type;
+        return Type.getType(out);
+    }
+
+    private Type toShadowInnerTypeDescriptor(Type type)
+    {
+        String in = type.getDescriptor();
+        String out = toShadowTypeDescriptor(in, true);
+        if (in == out) return type;
+        return Type.getType(out);
+    }
+
+    Object[] toShadowTypes(Object[] in)
+    {
+        Object[] out = null;
+        for (int i = 0 ; i < in.length ; ++i)
+        {
+            if (in[i] instanceof String)
+            {
+                // TODO (broader correctness): in some cases we want the original type, and others the new type
+                String inv = (String) in[i];
+                String outv = toShadowType(inv);
+                if (inv != outv)
+                {
+                    if (out == null)
+                    {
+                        out = new Object[in.length];
+                        System.arraycopy(in, 0, out, 0, i);
+                    }
+                    out[i] = outv;
+                    continue;
+                }
+            }
+
+            if (out != null)
+                out[i] = in[i];
+        }
+        return out != null ? out : in;
+    }
+
+    String methodDescriptorToShadowInnerArgumentTypes(String descriptor)
+    {
+        Type ret = toShadowTypeDescriptor(Type.getReturnType(descriptor));
+        Type[] args = Type.getArgumentTypes(descriptor);
+        for (int i = 0 ; i < args.length ; ++i)
+            args[i] = toShadowInnerTypeDescriptor(args[i]);
+        return Type.getMethodDescriptor(ret, args);
+    }
+
+    String methodDescriptorToShadowTypes(String descriptor)
+    {
+        Type ret = toShadowTypeDescriptor(Type.getReturnType(descriptor));
+        Type[] args = Type.getArgumentTypes(descriptor);
+        for (int i = 0 ; i < args.length ; ++i)
+            args[i] = toShadowTypeDescriptor(args[i]);
+        return Type.getMethodDescriptor(ret, args);
+    }
+
+    class ShadowingMethodVisitor extends MethodVisitor
+    {
+        final boolean isConstructor;
+        public ShadowingMethodVisitor(int api, boolean isConstructor, MethodVisitor methodVisitor)
+        {
+            super(api, methodVisitor);
+            this.isConstructor = isConstructor;
+        }
+
+        @Override
+        public AnnotationVisitor visitTypeAnnotation(int typeRef, TypePath typePath, String descriptor, boolean visible)
+        {
+            return super.visitTypeAnnotation(typeRef, typePath, descriptor, visible);
+        }
+
+        @Override
+        public void visitFieldInsn(int opcode, String owner, String name, String descriptor)
+        {
+            super.visitFieldInsn(opcode, toShadowType(owner), name, toShadowTypeDescriptor(descriptor));
+        }
+
+        @Override
+        public void visitTypeInsn(int opcode, String type)
+        {
+            // TODO (broader correctness): in some cases we want the original type, and others the new type
+            super.visitTypeInsn(opcode, toShadowType(type));
+        }
+
+        @Override
+        public void visitLocalVariable(String name, String descriptor, String signature, Label start, Label end, int index)
+        {
+            super.visitLocalVariable(name, toShadowTypeDescriptor(descriptor), signature, start, end, index);
+        }
+
+        @Override
+        public void visitFrame(int type, int numLocal, Object[] local, int numStack, Object[] stack)
+        {
+            super.visitFrame(type, numLocal, toShadowTypes(local), numStack, toShadowTypes(stack));
+        }
+
+        @Override
+        public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface)
+        {
+            // TODO (broader correctness): this is incorrect, but will do for ConcurrentHashMap (no general guarantee of same constructors)
+            if (owner.equals(originalRootType)) descriptor = methodDescriptorToShadowInnerArgumentTypes(descriptor);
+            else descriptor = methodDescriptorToShadowTypes(descriptor);
+            if (isConstructor && name.equals("<init>") && owner.equals(originalSuperName) && originalType.equals(originalRootType)) owner = originalRootType;
+            else owner = toShadowType(owner);
+            super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
+        }
+
+        @Override
+        public void visitInvokeDynamicInsn(String name, String descriptor, Handle bootstrapMethodHandle, Object... bootstrapMethodArguments)
+        {
+            if (bootstrapMethodHandle.getOwner().startsWith(originalRootType))
+            {
+                bootstrapMethodHandle = new Handle(bootstrapMethodHandle.getTag(), toShadowType(bootstrapMethodHandle.getOwner()),
+                                                   bootstrapMethodHandle.getName(), bootstrapMethodHandle.getDesc(),
+                                                   bootstrapMethodHandle.isInterface());
+            }
+            super.visitInvokeDynamicInsn(name, descriptor, bootstrapMethodHandle, bootstrapMethodArguments);
+        }
+
+        @Override
+        public void visitLdcInsn(Object value)
+        {
+            if (value instanceof Type)
+                value = toShadowTypeDescriptor((Type) value);
+            super.visitLdcInsn(value);
+        }
+    }
+
+    @Override
+    public void visitInnerClass(String name, String outerName, String innerName, int access)
+    {
+        super.visitInnerClass(name, toShadowType(outerName), innerName, access);
+    }
+
+    @Override
+    public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value)
+    {
+        return super.visitField(access, name, toShadowTypeDescriptor(descriptor), signature, value);
+    }
+
+    @Override
+    public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions)
+    {
+        if (originalType.equals(originalRootType)) descriptor = methodDescriptorToShadowInnerArgumentTypes(descriptor);
+        else descriptor = methodDescriptorToShadowTypes(descriptor);
+        return new ShadowingMethodVisitor(api, name.equals("<init>"), super.visitMethod(access, name, descriptor, signature, exceptions));
+    }
+
+    @Override
+    public void visit(int version, int access, String name, String signature, String superName, String[] interfaces)
+    {
+        originalSuperName = superName;
+        if (originalType.equals(originalRootType))
+        {
+            superName = name;
+            name = shadowRootType;
+        }
+        else
+        {
+            name = toShadowType(name);
+            superName = toShadowType(superName);
+        }
+
+        super.visit(version, access, name, signature, superName, interfaces);
+    }
+
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/ThreadLocalRandomCheckTransformer.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/ThreadLocalRandomCheckTransformer.java
new file mode 100644
index 0000000..ba5cefb
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/ThreadLocalRandomCheckTransformer.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+
+/**
+ * Handle simply thread signalling behaviours, namely monitorenter/monitorexit bytecodes to
+ * {@link org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods}, and LockSupport invocations to
+ * {@link org.apache.cassandra.simulator.systems.InterceptibleThread}.
+ *
+ * The global static methods we redirect monitors to take only one parameter (the monitor) and also return it,
+ * so that they have net zero effect on the stack, permitting the existing monitorenter/monitorexit instructions
+ * to remain where they are. LockSupport on the other hand is redirected entirely to the new method.
+ */
+class ThreadLocalRandomCheckTransformer extends MethodVisitor
+{
+    public ThreadLocalRandomCheckTransformer(int api, MethodVisitor parent)
+    {
+        super(api, parent);
+    }
+
+    @Override
+    public void visitInsn(int opcode)
+    {
+        switch (opcode)
+        {
+            case Opcodes.IRETURN:
+                super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global",
+                                      "threadLocalRandomCheck", "(I)I", false);
+                break;
+            case Opcodes.LRETURN:
+                super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global",
+                                      "threadLocalRandomCheck", "(J)J", false);
+                break;
+        }
+        super.visitInsn(opcode);
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/TransformationKind.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/TransformationKind.java
new file mode 100644
index 0000000..841a28c
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/TransformationKind.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.Arrays;
+import java.util.List;
+
+public enum TransformationKind
+{
+    MONITOR, SYNCHRONIZED, LOCK_SUPPORT, GLOBAL_METHOD, SIGNAL_NEMESIS, FIELD_NEMESIS, SYNTHETIC_METHOD, HASHCODE,
+    IDENTITY_HASH_MAP, CONCURRENT_HASH_MAP, SHADOW;
+    static final List<TransformationKind> VALUES = Arrays.asList(values());
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/Utils.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/Utils.java
new file mode 100644
index 0000000..be2ef6c
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/Utils.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+
+import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+import org.objectweb.asm.tree.AbstractInsnNode;
+
+import static org.apache.cassandra.simulator.asm.InterceptClasses.BYTECODE_VERSION;
+import static org.objectweb.asm.Opcodes.ATHROW;
+import static org.objectweb.asm.Opcodes.F_SAME1;
+import static org.objectweb.asm.Opcodes.INVOKESTATIC;
+
+public class Utils
+{
+    public static String toInternalName(Class<?> clazz)
+    {
+        return toInternalName(clazz.getName());
+    }
+
+    public static String toInternalName(String className)
+    {
+        return className.replace('.', '/');
+    }
+
+    public static String toPath(Class<?> clazz)
+    {
+        return toInternalName(clazz) + ".class";
+    }
+
+    public static byte[] readDefinition(Class<?> clazz) throws IOException
+    {
+        return readDefinition(toPath(clazz));
+    }
+
+    public static byte[] readDefinition(String path) throws IOException
+    {
+        byte[] bytes = new byte[1024];
+        try (InputStream in = ClassLoader.getSystemResourceAsStream(path))
+        {
+            int count = 0;
+            while (true)
+            {
+                int add = in.read(bytes, count, bytes.length - count);
+                if (add < 0)
+                    break;
+                if (add == 0)
+                    bytes = Arrays.copyOf(bytes, bytes.length * 2);
+                count += add;
+            }
+            return Arrays.copyOf(bytes, count);
+        }
+    }
+
+    /**
+     * Generate a proxy method call, i.e. one whose only job is forwarding the parameters to a different method
+     * (and perhaps within a superclass, or another class entirely if static) with the same signature but perhaps
+     * different properties.
+     */
+    private static long visitProxyCall(MethodVisitor visitor, String calleeClassName, String calleeMethodName, String descriptor, int access, boolean isInstanceMethod, boolean isInterface)
+    {
+        Type[] argTypes = Type.getArgumentTypes(descriptor);
+        Type returnType = Type.getReturnType(descriptor);
+
+        int stackSize = argTypes.length;
+        int localsSize = 0;
+        if (isInstanceMethod)
+        {
+            visitor.visitIntInsn(Opcodes.ALOAD, 0);
+            localsSize += 2;
+            stackSize += 1;
+        }
+
+        int i = 1;
+        for (Type type : argTypes)
+        {
+            int opcode;
+            switch (type.getDescriptor().charAt(0))
+            {
+                case 'L':
+                case '[':
+                    opcode = Opcodes.ALOAD;
+                    localsSize += 1;
+                    break;
+                case 'J':
+                    opcode = Opcodes.LLOAD;
+                    localsSize += 2;
+                    break;
+                case 'D':
+                    opcode = Opcodes.DLOAD;
+                    localsSize += 2;
+                    break;
+                case 'F':
+                    opcode = Opcodes.FLOAD;
+                    localsSize += 1;
+                    break;
+                default:
+                    opcode = Opcodes.ILOAD;
+                    localsSize += 1;
+                    break;
+            }
+            visitor.visitIntInsn(opcode, i++);
+        }
+
+        int returnCode;
+        switch (returnType.getDescriptor().charAt(0))
+        {
+            case 'L':
+            case '[':
+                returnCode = Opcodes.ARETURN;
+                localsSize = Math.max(localsSize, 1);
+                break;
+            case 'J':
+                returnCode = Opcodes.LRETURN;
+                localsSize = Math.max(localsSize, 2);
+                break;
+            case 'D':
+                returnCode = Opcodes.DRETURN;
+                localsSize = Math.max(localsSize, 2);
+                break;
+            case 'F':
+                returnCode = Opcodes.FRETURN;
+                localsSize = Math.max(localsSize, 1);
+                break;
+            case 'V':
+                returnCode = Opcodes.RETURN;
+                break;
+            default:
+                returnCode = Opcodes.IRETURN;
+                localsSize = Math.max(localsSize, 1);
+                break;
+        }
+
+        int invokeCode;
+        if (isInstanceMethod && (access & Opcodes.ACC_PRIVATE) != 0 || calleeMethodName.equals("<init>")) invokeCode = Opcodes.INVOKESPECIAL;
+        else if (isInstanceMethod) invokeCode = Opcodes.INVOKEVIRTUAL;
+        else invokeCode = Opcodes.INVOKESTATIC;
+
+        visitor.visitMethodInsn(invokeCode, calleeClassName, calleeMethodName, descriptor, isInterface);
+
+        return localsSize | (((long)stackSize) << 28) | (((long) returnCode) << 56);
+    }
+
+    /**
+     * Generate a proxy method call, i.e. one whose only job is forwarding the parameters to a different method
+     * (and perhaps within a superclass, or another class entirely if static) with the same signature but perhaps
+     * different properties.
+     */
+    public static void generateProxyCall(MethodVisitor visitor, String calleeClassName, String calleeMethodName, String descriptor, int access, boolean isInstanceMethod, boolean isInterface)
+    {
+        Label start = new Label(), end = new Label();
+        visitor.visitLabel(start);
+
+        long sizesAndReturnCode = visitProxyCall(visitor, calleeClassName, calleeMethodName, descriptor, access, isInstanceMethod, isInterface);
+        visitor.visitLabel(end);
+        visitor.visitInsn((int)(sizesAndReturnCode >>> 56) & 0xff);
+        visitor.visitMaxs((int)(sizesAndReturnCode >>> 28) & 0xfffffff, (int)(sizesAndReturnCode & 0xfffffff));
+        visitor.visitEnd();
+    }
+
+    /**
+     * Generate a proxy method call, i.e. one whose only job is forwarding the parameters to a different method
+     * (and perhaps within a superclass, or another class entirely if static) with the same signature but perhaps
+     * different properties.
+     *
+     * Invoke within a try/catch block, invoking the provided setup/cleanup instructions.
+     * As designed these must not assign any local variables, and the catch block must be exception free.
+     */
+    public static void generateTryFinallyProxyCall(MethodVisitor visitor, String calleeClassName, String calleeMethodName, String descriptor, int access, boolean isInstanceMethod, boolean isInterface,
+                                                   List<AbstractInsnNode> setup, List<AbstractInsnNode> cleanup)
+    {
+        Label startMethod = new Label(), startTry = new Label(), endTry = new Label(), startCatch = new Label(), endMethod = new Label();
+        visitor.visitLabel(startMethod);
+        visitor.visitTryCatchBlock(startTry, endTry, startCatch, null);
+        setup.forEach(i -> i.accept(visitor));
+        visitor.visitLabel(startTry);
+        long sizesAndReturnCode = visitProxyCall(visitor, calleeClassName, calleeMethodName, descriptor, access, isInstanceMethod, isInterface);
+        int returnCode = (int)(sizesAndReturnCode >>> 56) & 0xff;
+        visitor.visitLabel(endTry);
+        cleanup.forEach(i -> i.accept(visitor));
+        visitor.visitInsn(returnCode);
+        visitor.visitLabel(startCatch);
+        visitor.visitFrame(F_SAME1, 0, null, 1, new Object[] { "java/lang/Throwable" });
+        cleanup.forEach(i -> i.accept(visitor));
+        visitor.visitInsn(ATHROW);
+        visitor.visitLabel(endMethod);
+        if (isInstanceMethod)
+            visitor.visitLocalVariable("this", "L" + calleeClassName + ';', null, startMethod, endMethod, 0);
+        visitor.visitMaxs((int)(sizesAndReturnCode >>> 28) & 0xfffffff, (int)(sizesAndReturnCode & 0xfffffff));
+        visitor.visitEnd();
+    }
+
+    public static AnnotationVisitor checkForSimulationAnnotations(int api, String descriptor, AnnotationVisitor wrap, BiConsumer<Flag, Boolean> annotations)
+    {
+        if (!descriptor.equals("Lorg/apache/cassandra/utils/Simulate;"))
+            return wrap;
+
+        return new AnnotationVisitor(api, wrap)
+        {
+            @Override
+            public AnnotationVisitor visitArray(String name)
+            {
+                if (!name.equals("with") && !name.equals("without"))
+                    return super.visitArray(name);
+
+                boolean add = name.equals("with");
+                return new AnnotationVisitor(api, super.visitArray(name))
+                {
+                    @Override
+                    public void visitEnum(String name, String descriptor, String value)
+                    {
+                        super.visitEnum(name, descriptor, value);
+                        if (descriptor.equals("Lorg/apache/cassandra/utils/Simulate$With;"))
+                            annotations.accept(Flag.valueOf(value), add);
+                    }
+                };
+            }
+        };
+    }
+
+    public static MethodVisitor deterministicToString(MethodVisitor wrap)
+    {
+        return new MethodVisitor(BYTECODE_VERSION, wrap)
+        {
+            @Override
+            public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface)
+            {
+                if (name.equals("hashCode") && owner.equals("java/lang/Object"))
+                {
+                    super.visitMethodInsn(INVOKESTATIC, "java/lang/System", "identityHashCode", "(Ljava/lang/Object;)I", false);
+                }
+                else
+                {
+                    super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
+                }
+            }
+        };
+    }
+
+    public static void visitEachRefType(String descriptor, Consumer<String> forEach)
+    {
+        Type[] argTypes = Type.getArgumentTypes(descriptor);
+        Type retType = Type.getReturnType(descriptor);
+        for (Type argType : argTypes)
+            visitIfRefType(argType.getDescriptor(), forEach);
+        visitIfRefType(retType.getDescriptor(), forEach);
+    }
+
+    public static void visitIfRefType(String descriptor, Consumer<String> forEach)
+    {
+        if (descriptor.charAt(0) != '[' && descriptor.charAt(descriptor.length() - 1) != ';')
+        {
+            if (descriptor.length() > 1)
+                forEach.accept(descriptor);
+        }
+        else
+        {
+            int i = 1;
+            while (descriptor.charAt(i) == '[') ++i;
+            if (descriptor.charAt(i) == 'L')
+                forEach.accept(descriptor.substring(i + 1, descriptor.length() - 1));
+        }
+    }
+}
diff --git a/test/simulator/asm/org/apache/cassandra/simulator/asm/package-info.java b/test/simulator/asm/org/apache/cassandra/simulator/asm/package-info.java
new file mode 100644
index 0000000..fe9394b
--- /dev/null
+++ b/test/simulator/asm/org/apache/cassandra/simulator/asm/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package has no dependencies besides asm.*\.jar (and should perhaps be a separate module)
+ *
+ * This is to permit it to be built as a standlone javaagent.
+ */
+package org.apache.cassandra.simulator.asm;
+
diff --git a/test/simulator/bootstrap/org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods.java b/test/simulator/bootstrap/org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods.java
new file mode 100644
index 0000000..645bbb8
--- /dev/null
+++ b/test/simulator/bootstrap/org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods.java
@@ -0,0 +1,415 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.lang.reflect.Field;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
+import java.util.function.ToIntFunction;
+
+import sun.misc.Unsafe;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * A superclass of InterceptorOfGlobalMethods exposing those methods we might want to use for byte-weaving classes
+ * loaded by the system classloader (such as concurrency primitives). Today we byte weave Enum, Object, Random,
+ * ThreadLocalRandom, ConcurrentHashMap (only for determinism) and all of java.util.concurrent.locks (for park/unpark).
+ * See {@link org.apache.cassandra.simulator.asm.InterceptAgent} for more details.
+ */
+@SuppressWarnings("unused")
+public interface InterceptorOfSystemMethods
+{
+    void waitUntil(long deadlineNanos) throws InterruptedException;
+    void sleep(long period, TimeUnit units) throws InterruptedException;
+    void sleepUninterriptibly(long period, TimeUnit units);
+    boolean waitUntil(Object monitor, long deadlineNanos) throws InterruptedException;
+    void wait(Object monitor) throws InterruptedException;
+    void wait(Object monitor, long millis) throws InterruptedException;
+    void wait(Object monitor, long millis, int nanos) throws InterruptedException;
+    void preMonitorEnter(Object object, float chanceOfSwitch);
+    void preMonitorExit(Object object);
+    void notify(Object monitor);
+    void notifyAll(Object monitor);
+    void nemesis(float chance);
+
+    void park();
+    void parkNanos(long nanos);
+    void parkUntil(long millis);
+    void park(Object blocker);
+    void parkNanos(Object blocker, long nanos);
+    void parkUntil(Object blocker, long millis);
+    void unpark(Thread thread);
+
+    long randomSeed();
+    UUID randomUUID();
+
+    void threadLocalRandomCheck(long seed);
+
+    long nanoTime();
+    long currentTimeMillis();
+
+    @SuppressWarnings("unused")
+    public static class Global
+    {
+        private static InterceptorOfSystemMethods methods = new None();
+        private static ToIntFunction<Object> identityHashCode;
+
+        public static void waitUntil(long deadlineNanos) throws InterruptedException
+        {
+            methods.waitUntil(deadlineNanos);
+        }
+
+        public static void sleep(long millis) throws InterruptedException
+        {
+            sleep(MILLISECONDS, millis);
+        }
+
+        // slipped param order to replace instance method call without other ASM modification
+        public static void sleep(TimeUnit units, long period) throws InterruptedException
+        {
+            methods.sleep(period, units);
+        }
+
+        // to match Guava Uninterruptibles
+        public static void sleepUninterruptibly(long period, TimeUnit units)
+        {
+            methods.sleepUninterriptibly(period, units);
+        }
+
+        public static boolean waitUntil(Object monitor, long deadlineNanos) throws InterruptedException
+        {
+            return methods.waitUntil(monitor, deadlineNanos);
+        }
+
+        public static void wait(Object monitor) throws InterruptedException
+        {
+            methods.wait(monitor);
+        }
+
+        public static void wait(Object monitor, long millis) throws InterruptedException
+        {
+            methods.wait(monitor, millis);
+        }
+
+        @SuppressWarnings("unused")
+        public static Object preMonitorEnter(Object object, float chance)
+        {
+            methods.preMonitorEnter(object, chance);
+            return object;
+        }
+
+        public static Object preMonitorExit(Object object)
+        {
+            methods.preMonitorExit(object);
+            return object;
+        }
+
+        public static void notify(Object monitor)
+        {
+            methods.notify(monitor);
+        }
+
+        public static void notifyAll(Object monitor)
+        {
+            methods.notifyAll(monitor);
+        }
+
+        public static void park()
+        {
+            methods.park();
+        }
+
+        public static void parkNanos(long nanos)
+        {
+            methods.parkNanos(nanos);
+        }
+
+        public static void parkUntil(long millis)
+        {
+            methods.parkUntil(millis);
+        }
+
+        public static void park(Object blocker)
+        {
+            methods.park(blocker);
+        }
+
+        public static void parkNanos(Object blocker, long nanos)
+        {
+            methods.parkNanos(blocker, nanos);
+        }
+
+        public static void parkUntil(Object blocker, long millis)
+        {
+            methods.parkUntil(blocker, millis);
+        }
+
+        public static void unpark(Thread thread)
+        {
+            methods.unpark(thread);
+        }
+        
+        public static void nemesis(float chance)
+        {
+            methods.nemesis(chance);
+        }
+
+        public static int advanceProbe(int probe)
+        {
+            return probe + 1;
+        }
+
+        public static long randomSeed()
+        {
+            return methods.randomSeed();
+        }
+
+        public static UUID randomUUID()
+        {
+            return methods.randomUUID();
+        }
+
+        public static int threadLocalRandomCheck(int seed)
+        {
+            methods.threadLocalRandomCheck(seed);
+            return seed;
+        }
+
+        public static long threadLocalRandomCheck(long seed)
+        {
+            methods.threadLocalRandomCheck(seed);
+            return seed;
+        }
+
+        public static long nanoTime()
+        {
+            return methods.nanoTime();
+        }
+
+        public static long currentTimeMillis()
+        {
+            return methods.currentTimeMillis();
+        }
+
+        public static int identityHashCode(Object object)
+        {
+            return identityHashCode.applyAsInt(object);
+        }
+
+        public static Unsafe getUnsafe()
+        {
+            try
+            {
+                Field field = Unsafe.class.getDeclaredField("theUnsafe");
+                field.setAccessible(true);
+                return (Unsafe) field.get(null);
+            }
+            catch (Exception e)
+            {
+                throw new AssertionError(e);
+            }
+        }
+
+        public static void unsafeSet(InterceptorOfSystemMethods methods)
+        {
+            Global.methods = methods;
+        }
+
+        public static void unsafeSet(InterceptorOfSystemMethods methods, ToIntFunction<Object> identityHashCode)
+        {
+            Global.methods = methods;
+            Global.identityHashCode = identityHashCode;
+        }
+    }
+
+    public static class None implements InterceptorOfSystemMethods
+    {
+        @Override
+        public void waitUntil(long deadlineNanos) throws InterruptedException
+        {
+            long waitNanos = System.nanoTime() - deadlineNanos;
+            if (waitNanos > 0)
+                TimeUnit.NANOSECONDS.sleep(waitNanos);
+        }
+
+        @Override
+        public void sleep(long period, TimeUnit units) throws InterruptedException
+        {
+            waitUntil(System.nanoTime() + units.toNanos(period));
+        }
+
+        @Override
+        public void sleepUninterriptibly(long period, TimeUnit units)
+        {
+            long until = System.nanoTime() + units.toNanos(period);
+            boolean isInterrupted = false;
+            while (true)
+            {
+                try
+                {
+                    waitUntil(until);
+                    break;
+                }
+                catch (InterruptedException e)
+                {
+                    isInterrupted = true;
+                }
+            }
+
+            if (isInterrupted)
+                Thread.currentThread().interrupt();
+        }
+
+        @Override
+        public boolean waitUntil(Object monitor, long deadlineNanos) throws InterruptedException
+        {
+            long wait = deadlineNanos - System.nanoTime();
+            if (wait <= 0)
+                return false;
+
+            monitor.wait((wait + 999999) / 1000000);
+            return true;
+        }
+
+        @Override
+        public void wait(Object monitor) throws InterruptedException
+        {
+            monitor.wait();
+        }
+
+        @Override
+        public void wait(Object monitor, long millis) throws InterruptedException
+        {
+            monitor.wait(millis);
+        }
+
+        @Override
+        public void wait(Object monitor, long millis, int nanos) throws InterruptedException
+        {
+            monitor.wait(millis, nanos);
+        }
+
+        @Override
+        public void preMonitorEnter(Object object, float chanceOfSwitch)
+        {
+        }
+
+        @Override
+        public void preMonitorExit(Object object)
+        {
+        }
+
+        @Override
+        public void notify(Object monitor)
+        {
+            monitor.notify();
+        }
+
+        @Override
+        public void notifyAll(Object monitor)
+        {
+            monitor.notifyAll();
+        }
+
+        @Override
+        public void nemesis(float chance)
+        {
+        }
+
+        @Override
+        public void park()
+        {
+            LockSupport.park();
+        }
+
+        @Override
+        public void parkNanos(long nanos)
+        {
+            LockSupport.parkNanos(nanos);
+        }
+
+        @Override
+        public void parkUntil(long millis)
+        {
+            LockSupport.parkUntil(millis);
+        }
+
+        @Override
+        public void park(Object blocker)
+        {
+            LockSupport.park(blocker);
+        }
+
+        @Override
+        public void parkNanos(Object blocker, long nanos)
+        {
+            LockSupport.parkNanos(blocker, nanos);
+        }
+
+        @Override
+        public void parkUntil(Object blocker, long millis)
+        {
+            LockSupport.parkUntil(blocker, millis);
+        }
+
+        @Override
+        public void unpark(Thread thread)
+        {
+            LockSupport.unpark(thread);
+        }
+
+        private static final long SEED_MULTIPLIER = 2862933555777941757L;
+        private static final long SEED_CONSTANT = 0x121d34a;
+        private long nextSeed = 0x10523dfe2L;
+        @Override
+        public synchronized long randomSeed()
+        {
+            long next = nextSeed;
+            nextSeed *= SEED_MULTIPLIER;
+            nextSeed += SEED_CONSTANT;
+            return next;
+        }
+
+        @Override
+        public UUID randomUUID()
+        {
+            return UUID.randomUUID();
+        }
+
+        @Override
+        public long nanoTime()
+        {
+            return System.nanoTime();
+        }
+
+        @Override
+        public long currentTimeMillis()
+        {
+            return System.currentTimeMillis();
+        }
+
+        @Override
+        public void threadLocalRandomCheck(long seed)
+        {
+        }
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/Action.java b/test/simulator/main/org/apache/cassandra/simulator/Action.java
new file mode 100644
index 0000000..edb661f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/Action.java
@@ -0,0 +1,1007 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.io.Serializable;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.EnumSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+
+import com.google.common.base.Preconditions;
+
+import io.netty.util.internal.DefaultPriorityQueue;
+import io.netty.util.internal.PriorityQueue;
+import io.netty.util.internal.PriorityQueueNode;
+import org.apache.cassandra.simulator.Ordered.StrictlyOrdered;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.utils.Throwables;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_SIMULATOR_DEBUG;
+import static org.apache.cassandra.simulator.Action.Modifier.*;
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Phase.CANCELLED;
+import static org.apache.cassandra.simulator.Action.Phase.CONSEQUENCE;
+import static org.apache.cassandra.simulator.Action.Phase.FINISHED;
+import static org.apache.cassandra.simulator.Action.Phase.INVALIDATED;
+import static org.apache.cassandra.simulator.Action.Phase.NASCENT;
+import static org.apache.cassandra.simulator.Action.Phase.STARTED;
+import static org.apache.cassandra.simulator.Action.Phase.WITHHELD;
+import static org.apache.cassandra.simulator.Action.RegisteredType.CHILD;
+import static org.apache.cassandra.simulator.Action.RegisteredType.LISTENER;
+import static org.apache.cassandra.simulator.ActionListener.Before.DROP;
+import static org.apache.cassandra.simulator.ActionListener.Before.INVALIDATE;
+import static org.apache.cassandra.simulator.ActionListener.Before.EXECUTE;
+import static org.apache.cassandra.simulator.utils.CompactLists.append;
+import static org.apache.cassandra.simulator.utils.CompactLists.remove;
+import static org.apache.cassandra.simulator.utils.CompactLists.safeForEach;
+
+public abstract class Action implements PriorityQueueNode
+{
+    private static final boolean DEBUG = TEST_SIMULATOR_DEBUG.getBoolean();
+
+    public enum Modifier
+    {
+        /**
+         * Never drop an action. Primarily intended to transitively mark an action's descendants as
+         * required to succeed (e.g. in the case of repair and other cluster actions that are brittle)
+         */
+        RELIABLE('r', true),
+
+        /**
+         * Mark an action to be discarded, which may result in some alternative action being undertaken.
+         * This marker is only to ensure correctness, as the simulator will fail if an action is performed
+         * that is marked DROP and RELIABLE.
+         */
+        DROP('f', false),
+
+        // indicates some scheduler delay should be added to the execution time of this action
+        // TODO (feature): support per node scheduler delay to simulate struggling nodes
+        THREAD_SIGNAL('t', false),
+
+        /**
+         * a general purpose mechanism for withholding actions until all other actions have had an opportunity to run
+         * (intended primarily for TIMEOUT+NO_TIMEOUTS, but may be used elsewhere)
+         * this is a very similar feature to Ordered, but it was easier to model this way, as the predecessors are
+         * all other child actions in the entire transitive closure of operations, with the premise that the action
+         * will no longer be valid by the time it has an opportunity run
+         */
+        WITHHOLD((char)0, false),
+
+        // Mark operations as a THREAD_TIMEOUT, and parent operations as forbidding such timeouts (unless all else has failed)
+        NO_THREAD_TIMEOUTS('n', true, null, true), THREAD_TIMEOUT('t', false, NO_THREAD_TIMEOUTS),
+
+        /**
+         * All children of this action should be performed in strict order wrt the parent's consequences
+         * i.e. this is the continuation version of {@link #STRICT_CHILD_ORDER}
+         * this is a bit clunky, but not the end of the world
+         */
+        STRICT_CHILD_OF_PARENT_ORDER('c', false),
+
+        /**
+         * All children of this action should be performed in strict order, which means not only that
+         * they must be performed in the provided order, but all of their consequences must finish before
+         * the next sibling is permitted to run
+         */
+        STRICT_CHILD_ORDER('s', true, null, STRICT_CHILD_OF_PARENT_ORDER),
+
+        /**
+         * InfiniteLoopExecutors, when started, should be treated as detached from the action that happens to start them
+         * so the child action is considered to be orphaned, and not registered or counted against its parent action
+         */
+        ORPHAN('o', false),
+
+        /**
+         * Must be combined with ORPHAN. Unlinks an Action from its direct parent, attaching it as a child of its
+         * grandparent. This is used to support streams of streams
+         */
+        ORPHAN_TO_GRANDPARENT((char)0, false),
+
+        /**
+         * When we both deliver a message and timeout, the timeout may be scheduled for much later. We do not want to
+         * apply restrictions on later operations starting because we are waiting for a timeout to fire in this case,
+         * so we detach the timeout from its parent's accounting - but re-attach its children to the parent if
+         * still alive. Must be coincident with ORPHAN.
+         */
+        PSEUDO_ORPHAN('p', false),
+
+        /**
+         * Recurring tasks, that the schedule may discount when determining if has terminated
+         */
+        STREAM((char)0, false),
+
+        /**
+         * Recurring scheduled tasks, that the schedule should discount when determining if has terminated
+         */
+        DAEMON((char)0, false),
+
+        /**
+         * Informational messages produced for logging only
+         */
+        INFO((char)0, false),
+
+        /**
+         * A thread wakeup action that is ordinarily filtered from logging.
+         */
+        WAKEUP('w', false),
+
+        /**
+         * Show this action in the chain of origin actions
+         */
+        DISPLAY_ORIGIN('d', false);
+
+        final char displayId;
+        final boolean heritable;
+        final Modifier withholdIfPresent;
+        final Modifier inheritIfContinuation;
+
+        private Modifiers asSet;
+
+        Modifier(char displayId, boolean heritable)
+        {
+            this(displayId, heritable, null);
+        }
+
+        Modifier(char displayId, boolean heritable, Modifier withholdIfPresent)
+        {
+            this(displayId, heritable, withholdIfPresent, null);
+        }
+
+        Modifier(char displayId, boolean heritable, Modifier withholdIfPresent, boolean inheritIfContinuation)
+        {
+            this.displayId = displayId;
+            this.heritable = heritable;
+            this.withholdIfPresent = withholdIfPresent;
+            this.inheritIfContinuation = inheritIfContinuation ? this : null;
+        }
+
+        Modifier(char displayId, boolean heritable, Modifier withholdIfPresent, Modifier inheritIfContinuation)
+        {
+            this.displayId = displayId;
+            this.heritable = heritable;
+            this.withholdIfPresent = withholdIfPresent;
+            this.inheritIfContinuation = inheritIfContinuation;
+        }
+
+        Modifiers asSet()
+        {
+            if (asSet == null)
+                asSet = Modifiers.of(this);
+            return asSet;
+        }
+    }
+
+    public static class Modifiers implements Serializable
+    {
+        public static final Modifiers NONE = of();
+        public static final Modifiers INFO = Modifier.INFO.asSet();
+        public static final Modifiers RELIABLE = Modifier.RELIABLE.asSet();
+        public static final Modifiers DROP = Modifier.DROP.asSet();
+        public static final Modifiers PSEUDO_ORPHAN = of(Modifier.PSEUDO_ORPHAN);
+        public static final Modifiers STREAM = of(Modifier.STREAM);
+        public static final Modifiers INFINITE_STREAM = of(Modifier.STREAM, DAEMON);
+        public static final Modifiers STREAM_ITEM = of(Modifier.STREAM, ORPHAN, ORPHAN_TO_GRANDPARENT);
+        public static final Modifiers INFINITE_STREAM_ITEM = of(Modifier.STREAM, DAEMON, ORPHAN, ORPHAN_TO_GRANDPARENT);
+
+        public static final Modifiers START_TASK = of(THREAD_SIGNAL);
+        public static final Modifiers START_THREAD = of(THREAD_SIGNAL);
+        public static final Modifiers START_INFINITE_LOOP = of(ORPHAN, THREAD_SIGNAL);
+        public static final Modifiers START_SCHEDULED_TASK = of(THREAD_SIGNAL);
+        public static final Modifiers START_TIMEOUT_TASK = of(Modifier.THREAD_TIMEOUT, THREAD_SIGNAL);
+        public static final Modifiers START_DAEMON_TASK = of(ORPHAN, Modifier.DAEMON, THREAD_SIGNAL);
+
+        public static final Modifiers WAKE_UP_THREAD = of(THREAD_SIGNAL, WAKEUP);
+
+        public static final Modifiers STRICT = of(STRICT_CHILD_ORDER);
+        public static final Modifiers NO_TIMEOUTS = Modifier.NO_THREAD_TIMEOUTS.asSet();
+
+        public static final Modifiers RELIABLE_NO_TIMEOUTS = of(Modifier.NO_THREAD_TIMEOUTS, Modifier.RELIABLE);
+        public static final Modifiers DISPLAY_ORIGIN = of(Modifier.DISPLAY_ORIGIN);
+
+        public static Modifiers of()
+        {
+            return new Modifiers(EnumSet.noneOf(Modifier.class));
+        }
+
+        public static Modifiers of(Modifier first, Modifier ... rest)
+        {
+            return new Modifiers(EnumSet.of(first, rest));
+        }
+
+        final EnumSet<Modifier> contents;
+        Modifiers(EnumSet<Modifier> contents)
+        {
+            this.contents = contents;
+        }
+
+        public Modifiers with(Modifiers add)
+        {
+            if (add == NONE)
+                return this;
+
+            if (this == NONE)
+                return add;
+
+            if (contents.containsAll(add.contents))
+                return this;
+
+            return add(add.contents);
+        }
+
+        public Modifiers with(Modifier add)
+        {
+            if (this == NONE)
+                return add.asSet();
+
+            if (contents.contains(add))
+                return this;
+
+            return add(add.asSet().contents);
+        }
+
+        private Modifiers add(EnumSet<Modifier> add)
+        {
+            EnumSet<Modifier> merge = EnumSet.noneOf(Modifier.class);
+            for (Modifier modifier : this.contents) add(modifier, merge, add);
+            for (Modifier modifier : add) add(modifier, merge, this.contents);
+            return new Modifiers(merge);
+        }
+
+        private static void add(Modifier modifier, EnumSet<Modifier> to, EnumSet<Modifier> mergingWith)
+        {
+            if (modifier.withholdIfPresent != null && mergingWith.contains(modifier.withholdIfPresent))
+                to.add(WITHHOLD);
+            to.add(modifier);
+        }
+
+        // for continuations to inherit the relevant modifiers from their immediate parent
+        // (since we represent a continuation of the same execution)
+        public Modifiers inheritIfContinuation(Modifiers inheritIfContinuation)
+        {
+            EnumSet<Modifier> merge = null;
+            for (Modifier modifier : inheritIfContinuation.contents)
+            {
+                if (modifier.inheritIfContinuation != null)
+                {
+                    if (merge == null && !contents.contains(modifier.inheritIfContinuation)) merge = EnumSet.copyOf(contents);
+                    if (merge != null) merge.add(modifier.inheritIfContinuation);
+                }
+            }
+
+            if (merge == null)
+                return this;
+
+            if (!merge.contains(WITHHOLD))
+            {
+                for (Modifier modifier : merge)
+                {
+                    if (modifier.withholdIfPresent != null && merge.contains(modifier.withholdIfPresent))
+                        merge.add(WITHHOLD);
+                }
+            }
+            return new Modifiers(merge);
+        }
+
+        public Modifiers without(Modifier modifier)
+        {
+            if (!contents.contains(modifier))
+                return this;
+
+            EnumSet<Modifier> remove = EnumSet.noneOf(Modifier.class);
+            remove.addAll(this.contents);
+            remove.remove(modifier);
+            return new Modifiers(remove);
+        }
+
+        public boolean is(Modifier modifier)
+        {
+            return contents.contains(modifier);
+        }
+    }
+
+    enum RegisteredType { LISTENER, CHILD }
+
+    enum Phase
+    {
+        NASCENT,
+        WITHHELD,
+        CONSEQUENCE,
+        READY_TO_SCHEDULE,
+        SEQUENCED_PRE_SCHEDULED,
+        SCHEDULED,
+        SEQUENCED_POST_SCHEDULED,
+        RUNNABLE,
+        STARTED,
+        FINISHED,
+        CANCELLED,
+        INVALIDATED
+    }
+
+    // configuration/status
+    private final Object description;
+    private OrderOns orderOn;
+    private Modifiers self, transitive;
+    private Phase phase = NASCENT;
+    Ordered ordered;
+
+    /** The listeners (and, if DEBUG, children) we have already registered */
+    private final Map<Object, RegisteredType> registered = new IdentityHashMap<>(2);
+
+    /** The list of listeners (for deterministic evaluation order) to notify on any event */
+    private List<ActionListener> listeners;
+
+    /** The immediate parent, and furthest ancestor of this Action */
+    protected Action parent, origin = this, pseudoParent;
+
+    /** The number of direct consequences of this action that have not <i>transitively</i> terminated */
+    private int childCount;
+
+    /**
+     * Consequences marked WITHHOLD are kept in their parent (or parent thread's) {@code withheld} queue until all
+     * other immediate children have <i>transitively</i> terminated their execution
+     */
+    private DefaultPriorityQueue<Action> withheld;
+
+    // scheduler and scheduled state
+    protected RunnableActionScheduler scheduler;
+
+    private long deadline; // some actions have an associated wall clock time to execute and are first scheduled by this
+    private double priority; // all actions eventually get prioritised for execution in some order "now"
+
+    // used to track the index and priority queue we're being managed for execution by (either by scheduledAt or priority)
+    private PriorityQueue<?> scheduledIn;
+    private int scheduledIndex = -1;
+
+    // used to track the scheduledAt of events we have moved to actively scheduling/prioritising
+    private PriorityQueue<?> savedIn;
+    private int savedIndex = -1;
+
+    public Action(Object description, Modifiers self)
+    {
+        this(description, self, NONE);
+    }
+    public Action(Object description, Modifiers self, Modifiers transitive)
+    {
+        this(description, OrderOn.NONE, self, transitive);
+    }
+
+    public Action(Object description, OrderOns orderOn, Modifiers self, Modifiers transitive)
+    {
+        this.description = description;
+        if (orderOn == null || self == null || transitive == null)
+            throw new IllegalArgumentException();
+        assert transitive.contents.stream().allMatch(m -> m.heritable) : transitive.contents.toString();
+        this.orderOn = orderOn;
+        this.self = self;
+        this.transitive = transitive;
+    }
+
+    public Object description()
+    {
+        return description;
+    }
+    public OrderOns orderOns() { return orderOn; }
+    public Phase phase() { return phase; }
+    public Modifiers self() { return self; }
+    public Modifiers transitive() { return transitive; }
+    public boolean is(Modifier modifier)
+    {
+        return self.contents.contains(modifier);
+    }
+    public void inherit(Modifiers add)
+    {
+        if (add != NONE)
+            add(add, add);
+    }
+    public void add(Modifiers self, Modifiers children)
+    {
+        this.self = this.self.with(self);
+        this.transitive = this.transitive.with(children);
+    }
+
+    public boolean isStarted()
+    {
+        return phase.compareTo(STARTED) >= 0;
+    }
+    public boolean isFinished()
+    {
+        return phase.compareTo(FINISHED) >= 0;
+    }
+    public boolean isCancelled()
+    {
+        return phase.compareTo(CANCELLED) >= 0;
+    }
+    public boolean isInvalidated()
+    {
+        return phase.compareTo(INVALIDATED) >= 0;
+    }
+
+    public Action parent()
+    {
+        return parent;
+    }
+    public int childCount()
+    {
+        return childCount;
+    }
+
+    /**
+     * Main implementation of {@link #perform()}, that must be completed by an extending classes.
+     *
+     * Does not need to handle consequences, registration etc.
+     *
+     * @return the action consequences
+     */
+    protected abstract ActionList performSimple();
+
+    /**
+     * Invokes {@link #performSimple} before invoking {@link #performed}.
+     *
+     * May be overridden by an extending classes that does not finish immediately (e.g, SimulatedAction).
+     *
+     * MUST handle consequences, registration etc by invoking performed() on its results before returning,
+     * to register children and record the action's state
+     *
+     * @return the action consequences
+     */
+    protected ActionList performAndRegister()
+    {
+        return performed(performSimple(), true, true);
+    }
+
+    /**
+     * Invoke the action, and return its consequences, i.e. any follow up actions.
+     */
+    public final ActionList perform()
+    {
+        Preconditions.checkState(!(is(RELIABLE) && is(Modifier.DROP)));
+        Throwable fail = safeForEach(listeners, ActionListener::before, this, is(Modifier.DROP) ? DROP : EXECUTE);
+        if (fail != null)
+        {
+            invalidate(false);
+            Throwables.maybeFail(fail);
+        }
+
+        if (DEBUG && parent != null && parent.registered.get(this) != CHILD) throw new AssertionError();
+
+        ActionList next = performAndRegister();
+        next.forEach(Action::setConsequence);
+
+        if (is(STRICT_CHILD_ORDER)) next.setStrictlySequentialOn(this);
+        else if (is(STRICT_CHILD_OF_PARENT_ORDER)) next.setStrictlySequentialOn(parent);
+
+        return next;
+    }
+
+    /**
+     * To be invoked on the results of {@link #performSimple()} by its implementations.
+     * We invite the implementation to invoke it so that it may control state either side of its invocation.
+     *
+     * {@link #register(ActionList)}'s the consequences, restores any old withheld actions,
+     * and updates this Action's internal state.
+     *
+     * @return the provided actions, minus any withheld
+     */
+    protected ActionList performed(ActionList consequences, boolean isStart, boolean isFinish)
+    {
+        assert isStarted() != isStart;
+        assert !isFinished();
+
+        consequences = register(consequences);
+        assert !consequences.anyMatch(c -> c.is(WITHHOLD));
+
+        if (isFinish) finishedSelf();
+        else if (isStart) phase = STARTED;
+
+        return restoreWithheld(consequences);
+    }
+
+    /**
+     * Similar to cancel() but invoked under abnormal termination
+     */
+    public void invalidate()
+    {
+        invalidate(INVALIDATED);
+    }
+
+    /**
+     * To be invoked when this action has become redundant.
+     *  - Marks itself invalidated
+     *  - Notifies its listeners (which may remove it from any ordered sequences in the ActionSchedule)
+     *  - If present, clears itself directly from:
+     *    - its parent's withholding space
+     *    - the schedule's priority queue
+     */
+    public void cancel()
+    {
+        assert !isStarted();
+        invalidate(CANCELLED);
+    }
+
+    private void invalidate(Phase advanceTo)
+    {
+        if (phase.compareTo(CANCELLED) >= 0)
+            return;
+
+        advanceTo(advanceTo);
+        Throwable fail = safeForEach(listeners, ActionListener::before, this, INVALIDATE);
+        fail = Throwables.merge(fail, safeInvalidate(phase == CANCELLED));
+        invalidate(phase == CANCELLED);
+        finishedSelf();
+        Throwables.maybeFail(fail);
+    }
+
+    protected Throwable safeInvalidate(boolean isCancellation)
+    {
+        return null;
+    }
+
+    private void invalidate(boolean isCancellation)
+    {
+        if (parent != null && parent.withheld != null && is(WITHHOLD))
+        {
+            if (parent.withheld.remove(this))
+                parent.cleanupWithheld();
+        }
+        if (scheduledIndex >= 0) scheduledIn.remove(this);
+        if (savedIndex >= 0) savedIn.remove(this);
+        if (ordered != null) ordered.invalidate(isCancellation);
+    }
+
+    /**
+     * Register consequences of this action, i.e.:
+     *  - attach a scheduler to them for prioritising when they are permitted to execute
+     *  - pass them to any listeners as consequences
+     *  - count them as children, and mark ourselves as parent, so that we may track transitive completion
+     *  - withhold any actions that are so marked, to be {@link #restoreWithheld}d once we have transitively completed
+     *    all non-withheld actions.
+     */
+    protected ActionList register(ActionList consequences)
+    {
+        assert !isFinished();
+        if (consequences.isEmpty())
+            return consequences;
+
+        scheduler.attachTo(consequences);
+        Throwable fail = safeForEach(listeners, ActionListener::consequences, consequences);
+        if (fail != null)
+        {
+            invalidate(false);
+            Throwables.merge(fail, consequences.safeForEach(Action::invalidate));
+            Throwables.maybeFail(fail);
+        }
+
+        boolean isParentPseudoOrphan = is(PSEUDO_ORPHAN);
+        boolean withheld = false;
+        for (int i = 0 ; i < consequences.size() ; ++i)
+        {
+            Action child = consequences.get(i);
+            if (child.is(ORPHAN))
+            {
+                if (parent != null && child.is(ORPHAN_TO_GRANDPARENT))
+                {
+                    ++parent.childCount;
+                    parent.registerChild(child);
+                }
+                else if (child.is(PSEUDO_ORPHAN))
+                {
+                    child.inherit(transitive);
+                    registerPseudoOrphan(child);
+                    assert !child.is(WITHHOLD);
+                }
+            }
+            else
+            {
+                Action parent;
+                if (isParentPseudoOrphan && pseudoParent != null && pseudoParent.childCount > 0)
+                    parent = pseudoParent;
+                else
+                    parent = this;
+
+                child.inherit(parent.transitive);
+                if (child.is(WITHHOLD))
+                {
+                    // this could be supported in principle by applying the ordering here, but it would be
+                    // some work to ensure it doesn't lead to deadlocks so for now just assert we don't use it
+                    Preconditions.checkState(!parent.is(STRICT_CHILD_ORDER) && !parent.is(STRICT_CHILD_OF_PARENT_ORDER));
+                    withheld = true;
+                    parent.addWithheld(child);
+                }
+
+                parent.registerChild(child);
+                parent.childCount++;
+            }
+        }
+
+        if (!withheld)
+            return consequences;
+
+        return consequences.filter(child -> !child.is(WITHHOLD));
+    }
+
+    // setup the child relationship, but do not update childCount
+    private void registerChild(Action child)
+    {
+        assert child.parent == null;
+        child.parent = this;
+        registerChildOrigin(child);
+        if (DEBUG && !register(child, CHILD)) throw new AssertionError();
+    }
+
+    private void registerPseudoOrphan(Action child)
+    {
+        assert child.parent == null;
+        assert child.pseudoParent == null;
+        child.pseudoParent = this;
+        registerChildOrigin(child);
+    }
+
+    private void registerChildOrigin(Action child)
+    {
+        if (is(Modifier.DISPLAY_ORIGIN)) child.origin = this;
+        else if (origin != this) child.origin = origin;
+    }
+
+    private boolean register(Object object, RegisteredType type)
+    {
+        RegisteredType prev = registered.putIfAbsent(object, type);
+        if (prev != null && prev != type)
+            throw new AssertionError();
+        return prev == null;
+    }
+
+    public void register(ActionListener listener)
+    {
+        if (register(listener, LISTENER))
+            listeners = append(listeners, listener);
+    }
+
+    private boolean deregister(Object object, RegisteredType type)
+    {
+        return registered.remove(object, type);
+    }
+
+    public void deregister(ActionListener listener)
+    {
+        if (deregister(listener, LISTENER))
+            listeners = remove(listeners, listener);
+    }
+
+    private void addWithheld(Action action)
+    {
+        if (withheld == null)
+            withheld = new DefaultPriorityQueue<>(Action::compareByPriority, 2);
+        action.advanceTo(WITHHELD);
+        action.saveIn(withheld);
+    }
+
+    /**
+     * Restore withheld (by ourselves or a parent) actions, when no other outstanding actions remain
+     */
+    public ActionList restoreWithheld(ActionList consequences)
+    {
+        if (withheld != null && childCount == withheld.size())
+        {
+            Action next = withheld.poll();
+            cleanupWithheld();
+            consequences = consequences.andThen(next);
+        }
+        else if (childCount == 0 && parent != null)
+        {
+            Action cur = parent;
+            while (cur.childCount == 0 && cur.parent != null)
+                cur = cur.parent;
+            consequences = cur.restoreWithheld(consequences);
+        }
+        return consequences;
+    }
+
+    private void cleanupWithheld()
+    {
+        Action cur = this;
+        if (cur.withheld.isEmpty())
+            cur.withheld = null;
+    }
+
+    /**
+     * Invoked once we finish executing ourselves. Typically this occurs immediately after invocation,
+     * but for SimulatedAction it occurs only once the thread terminates its execution.
+     *
+     * In practice this is entirely determined by the {@code isFinished} parameter supplied to
+     * {@link #performed(ActionList, boolean, boolean)}.
+     */
+    void finishedSelf()
+    {
+        if (phase.compareTo(CANCELLED) < 0)
+            advanceTo(FINISHED);
+
+        scheduler = null;
+        if (withheld != null)
+        {
+            Queue<Action> withheld = this.withheld;
+            this.withheld = null;
+            withheld.forEach(Action::cancel);
+        }
+        Throwable fail = safeForEach(listeners, ActionListener::after, this);
+        if (childCount == 0)
+            fail = Throwables.merge(fail, transitivelyFinished());
+
+        if (fail != null)
+        {
+            invalidate(false);
+            Throwables.maybeFail(fail);
+        }
+    }
+
+    /**
+     * Invoked once all of the consequences of this action, and of those actions (recursively) have completed.
+     */
+    Throwable transitivelyFinished()
+    {
+        return transitivelyFinished(this);
+    }
+
+    static Throwable transitivelyFinished(Action cur)
+    {
+        Throwable fail = null;
+        while (true)
+        {
+            Action parent = cur.parent;
+            assert 0 == cur.childCount && cur.isFinished();
+            if (DEBUG && cur.registered.values().stream().anyMatch(t -> t == CHILD)) throw new AssertionError();
+            fail = Throwables.merge(fail, safeForEach(cur.listeners, ActionListener::transitivelyAfter, cur));
+            if (parent == null)
+                break;
+            if (DEBUG && CHILD != parent.registered.remove(cur)) throw new AssertionError();
+            if (--parent.childCount == 0 && parent.isFinished()) cur = parent;
+            else break;
+        }
+        return fail;
+    }
+
+    void orderOn(OrderOn orderOn)
+    {
+        this.orderOn = this.orderOn.with(orderOn);
+    }
+
+    void setupOrdering(ActionSchedule schedule)
+    {
+        if (orderOn.isOrdered())
+        {
+            ordered = orderOn.isStrict() ? new StrictlyOrdered(this, schedule) : new Ordered(this, schedule);
+            for (int i = 0, maxi = orderOn.size(); i < maxi ; ++i)
+                ordered.join(orderOn.get(i));
+        }
+    }
+
+    void advanceTo(Phase phase)
+    {
+        Preconditions.checkArgument(phase.compareTo(this.phase) > 0);
+        this.phase = phase;
+    }
+
+    void addTo(PriorityQueue<Action> byDeadline)
+    {
+        Preconditions.checkState(scheduledIndex < 0);
+        scheduledIn = byDeadline;
+        byDeadline.add(this);
+    }
+
+    void saveIn(PriorityQueue<Action> saveIn)
+    {
+        Preconditions.checkState(savedIndex < 0);
+        savedIn = saveIn;
+        saveIn.add(this);
+    }
+
+    void setScheduler(RunnableActionScheduler scheduler)
+    {
+        Preconditions.checkState(this.scheduler == null);
+        Preconditions.checkState(this.phase == NASCENT);
+        this.scheduler = scheduler;
+    }
+
+    void setConsequence()
+    {
+        advanceTo(CONSEQUENCE);
+    }
+
+    void schedule(SimulatedTime time, FutureActionScheduler future)
+    {
+        setPriority(time, scheduler.priority());
+        if (is(THREAD_SIGNAL) || deadline == 0)
+        {
+            long newDeadline = deadline == 0 ? time.nanoTime() : deadline;
+            newDeadline += future.schedulerDelayNanos();
+            deadline = newDeadline;
+            time.onTimeEvent("ResetDeadline", newDeadline);
+        }
+    }
+
+    public void setDeadline(SimulatedTime time, long deadlineNanos)
+    {
+        Preconditions.checkState(deadline == 0);
+        Preconditions.checkArgument(deadlineNanos >= deadline);
+        deadline = deadlineNanos;
+        time.onTimeEvent("SetDeadline", deadlineNanos);
+    }
+
+    public void setPriority(SimulatedTime time, double priority)
+    {
+        this.priority = priority;
+        time.onTimeEvent("SetPriority", Double.doubleToLongBits(priority));
+    }
+
+    public long deadline()
+    {
+        if (deadline < 0) throw new AssertionError();
+        return deadline;
+    }
+
+    public double priority()
+    {
+        return priority;
+    }
+
+    @Override
+    public int priorityQueueIndex(DefaultPriorityQueue<?> queue)
+    {
+        if (queue == scheduledIn) return scheduledIndex;
+        else if (queue == savedIn) return savedIndex;
+        else return -1;
+    }
+
+    @Override
+    public void priorityQueueIndex(DefaultPriorityQueue<?> queue, int i)
+    {
+        if (queue == scheduledIn) { scheduledIndex = i; if (i < 0) scheduledIn = null; }
+        else if (queue == savedIn) { savedIndex = i; if (i < 0) savedIn = null; }
+        else throw new IllegalStateException();
+    }
+
+    public int compareByDeadline(Action that)
+    {
+        return Long.compare(this.deadline, that.deadline);
+    }
+
+    public int compareByPriority(Action that)
+    {
+        return Double.compare(this.priority, that.priority);
+    }
+
+    private String describeModifiers()
+    {
+        StringBuilder builder = new StringBuilder("[");
+        for (Modifier modifier : self.contents)
+        {
+            if (modifier.displayId == 0)
+                continue;
+
+            if (!transitive.is(modifier)) builder.append(modifier.displayId);
+            else builder.append(Character.toUpperCase(modifier.displayId));
+        }
+
+        boolean hasTransitiveOnly = false;
+        for (Modifier modifier : transitive.contents)
+        {
+            if (modifier.displayId == 0)
+                continue;
+
+            if (!self.is(modifier))
+            {
+                if (!hasTransitiveOnly)
+                {
+                    hasTransitiveOnly = true;
+                    builder.append('(');
+                }
+                builder.append(modifier.displayId);
+            }
+        }
+
+        if (builder.length() == 1)
+            return "";
+
+        if (hasTransitiveOnly)
+            builder.append(')');
+        builder.append(']');
+
+        return builder.toString();
+    }
+
+    public String toString()
+    {
+        return describeModifiers() + description() + (origin != this ? " for " + origin : "");
+    }
+
+    public String toReconcileString()
+    {
+        return this + " at [" + deadline + ',' + priority + ']';
+    }
+
+    private static class StackElement
+    {
+        final Action action;
+        final Deque<Action> children;
+
+        private StackElement(Action action)
+        {
+            this.action = action;
+            this.children = new ArrayDeque<>(action.childCount);
+            for (Map.Entry<Object, RegisteredType> e : action.registered.entrySet())
+            {
+                if (e.getValue() == CHILD)
+                    children.add((Action) e.getKey());
+            }
+        }
+    }
+
+    public String describeCurrentState()
+    {
+        StringBuilder sb = new StringBuilder();
+        Deque<StackElement> stack = new ArrayDeque<>();
+        appendCurrentState(sb);
+
+        stack.push(new StackElement(this));
+        while (!stack.isEmpty())
+        {
+            StackElement last = stack.peek();
+            if (last.children.isEmpty())
+            {
+                stack.pop();
+            }
+            else
+            {
+                Action child = last.children.pop();
+                sb.append('\n');
+                appendPrefix(stack.size(), sb);
+                child.appendCurrentState(sb);
+                stack.push(new StackElement(child));
+            }
+        }
+        return sb.toString();
+    }
+
+    private static void appendPrefix(int count, StringBuilder sb)
+    {
+        while (--count >= 0)
+            sb.append("   |");
+    }
+
+    private void appendCurrentState(StringBuilder sb)
+    {
+        if (!isStarted()) sb.append("NOT_STARTED ");
+        else if (!isFinished()) sb.append("NOT_FINISHED ");
+        if (childCount > 0)
+        {
+            sb.append('(');
+            sb.append(childCount);
+            sb.append(") ");
+        }
+        if (orderOn.isOrdered())
+        {
+            sb.append(orderOn);
+            sb.append(": ");
+        }
+        sb.append(description());
+    }
+
+}
\ No newline at end of file
diff --git a/test/simulator/main/org/apache/cassandra/simulator/ActionList.java b/test/simulator/main/org/apache/cassandra/simulator/ActionList.java
new file mode 100644
index 0000000..64474e6
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/ActionList.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.AbstractCollection;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import com.google.common.collect.Iterators;
+
+import org.apache.cassandra.simulator.OrderOn.StrictSequential;
+import org.apache.cassandra.utils.Throwables;
+
+import static java.util.Arrays.copyOf;
+
+public class ActionList extends AbstractCollection<Action>
+{
+    private static final ActionList EMPTY = new ActionList(new Action[0]);
+    public static ActionList empty() { return EMPTY; }
+    public static ActionList of(Action action) { return new ActionList(new Action[] { action }); }
+    public static ActionList of(Stream<Action> action) { return new ActionList(action.toArray(Action[]::new)); }
+    public static ActionList of(Collection<Action> actions) { return actions.isEmpty() ? EMPTY : new ActionList(actions.toArray(new Action[0])); }
+    public static ActionList of(Action ... actions) { return new ActionList(actions); }
+
+    private final Action[] actions;
+
+    ActionList(Action[] actions)
+    {
+        this.actions = actions;
+    }
+
+    public int size()
+    {
+        return actions.length;
+    }
+
+    public boolean isEmpty()
+    {
+        return 0 == actions.length;
+    }
+
+    public Action get(int i)
+    {
+        return actions[i];
+    }
+
+    public Iterator<Action> iterator()
+    {
+        return Iterators.forArray(actions);
+    }
+
+    public Stream<Action> stream()
+    {
+        return Stream.of(actions);
+    }
+
+    public ActionList transform(Function<Action, Action> apply)
+    {
+        return ActionList.of(stream().map(apply));
+    }
+
+    public ActionList filter(Predicate<Action> apply)
+    {
+        return ActionList.of(stream().filter(apply));
+    }
+
+    public boolean anyMatch(Predicate<Action> test)
+    {
+        for (int i = 0 ; i < actions.length ; ++i)
+            if (test.test(actions[i])) return true;
+        return false;
+    }
+
+    public ActionList andThen(Action andThen)
+    {
+        return andThen(ActionList.of(andThen));
+    }
+
+    public ActionList andThen(ActionList andThen)
+    {
+        Action[] result = copyOf(actions, size() + andThen.size());
+        System.arraycopy(andThen.actions, 0, result, size(), andThen.size());
+        return new ActionList(result);
+    }
+
+    public ActionList setStrictlySequential()
+    {
+        return setStrictlySequentialOn(this);
+    }
+
+    public ActionList setStrictlySequentialOn(Object on)
+    {
+        if (isEmpty()) return this;
+        StrictSequential orderOn = new StrictSequential(on);
+        forEach(a -> a.orderOn(orderOn));
+        return this;
+    }
+
+    public Throwable safeForEach(Consumer<Action> forEach)
+    {
+        Throwable result = null;
+        for (Action action : actions)
+        {
+            try
+            {
+                forEach.accept(action);
+            }
+            catch (Throwable t)
+            {
+                result = Throwables.merge(result, t);
+            }
+        }
+        return result;
+    }
+
+    public String toString()
+    {
+        return Arrays.toString(actions);
+    }
+
+    public String toReconcileString()
+    {
+        return Arrays.stream(actions).map(Action::toReconcileString).collect(Collectors.joining(",", "[", "]"));
+    }
+}
+
diff --git a/test/simulator/main/org/apache/cassandra/simulator/ActionListener.java b/test/simulator/main/org/apache/cassandra/simulator/ActionListener.java
new file mode 100644
index 0000000..2bc8ae8
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/ActionListener.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.List;
+import java.util.function.Consumer;
+
+public interface ActionListener
+{
+    enum Before { EXECUTE, DROP, INVALIDATE }
+
+    /**
+     * Immediately before the action is first executed
+     * @param action the action we are about to perform
+     * @param before if the action is to be performed (rather than dropped)
+     */
+    default void before(Action action, Before before) {}
+
+    /**
+     * Immediately after the action is first executed (or dropped)
+     * @param consequences the actions that result from the execution
+     */
+    default void consequences(ActionList consequences) {}
+
+    /**
+     * If an ActionThread, after termination; otherwise immediately after invoked
+     * @param finished the action that has finished
+     */
+    default void after(Action finished) {}
+
+    /**
+     * After the action and all its consequent terminate (excluding the initiation of an infinite loop execution)
+     * @param finished the action that has finished
+     */
+    default void transitivelyAfter(Action finished) {}
+
+    static ActionListener runAfter(Consumer<Action> after)
+    {
+        return new ActionListener()
+        {
+            @Override
+            public void after(Action performed)
+            {
+                after.accept(performed);
+            }
+        };
+    }
+
+    static ActionListener runAfterAndTransitivelyAfter(Consumer<Action> after)
+    {
+        return new ActionListener()
+        {
+            @Override
+            public void after(Action performed)
+            {
+                after.accept(performed);
+            }
+
+            @Override
+            public void transitivelyAfter(Action performed)
+            {
+                after.accept(performed);
+            }
+        };
+    }
+
+    static ActionListener runAfterTransitiveClosure(Consumer<Action> transitivelyAfter)
+    {
+        return new ActionListener()
+        {
+            @Override
+            public void transitivelyAfter(Action performed)
+            {
+                transitivelyAfter.accept(performed);
+            }
+        };
+    }
+
+    static ActionListener recursive(ActionListener runOnAll)
+    {
+        return new WrappedRecursiveActionListener(runOnAll);
+    }
+
+    public interface SelfAddingActionListener extends ActionListener, Consumer<Action>
+    {
+        @Override
+        default public void accept(Action action)
+        {
+            action.register(this);
+        }
+    }
+
+    public static class RecursiveActionListener implements SelfAddingActionListener
+    {
+        @Override
+        public void consequences(ActionList consequences)
+        {
+            consequences.forEach(this);
+        }
+    }
+
+    public static class WrappedRecursiveActionListener extends Wrapped implements SelfAddingActionListener
+    {
+        public WrappedRecursiveActionListener(ActionListener wrap)
+        {
+            super(wrap);
+        }
+
+        @Override
+        public void consequences(ActionList consequences)
+        {
+            consequences.forEach(this);
+            super.consequences(consequences);
+        }
+    }
+
+    public static class Wrapped implements ActionListener
+    {
+        final ActionListener wrap;
+
+        public Wrapped(ActionListener wrap)
+        {
+            this.wrap = wrap;
+        }
+
+        @Override
+        public void before(Action action, Before before)
+        {
+            wrap.before(action, before);
+        }
+
+        @Override
+        public void consequences(ActionList consequences)
+        {
+            wrap.consequences(consequences);
+        }
+
+        @Override
+        public void after(Action finished)
+        {
+            wrap.after(finished);
+        }
+
+        @Override
+        public void transitivelyAfter(Action finished)
+        {
+            wrap.transitivelyAfter(finished);
+        }
+    }
+
+    public static class Combined implements ActionListener
+    {
+        final List<ActionListener> combined;
+
+        public Combined(List<ActionListener> combined)
+        {
+            this.combined = combined;
+        }
+
+        @Override
+        public void before(Action action, Before before)
+        {
+            combined.forEach(listener -> listener.before(action, before));
+        }
+
+        @Override
+        public void consequences(ActionList consequences)
+        {
+            combined.forEach(listener -> listener.consequences(consequences));
+        }
+
+        @Override
+        public void after(Action finished)
+        {
+            combined.forEach(listener -> listener.after(finished));
+        }
+
+        @Override
+        public void transitivelyAfter(Action finished)
+        {
+            combined.forEach(listener -> listener.transitivelyAfter(finished));
+        }
+    }
+}
+
diff --git a/test/simulator/main/org/apache/cassandra/simulator/ActionPlan.java b/test/simulator/main/org/apache/cassandra/simulator/ActionPlan.java
new file mode 100644
index 0000000..1813de0
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/ActionPlan.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.List;
+import java.util.function.LongSupplier;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.cassandra.simulator.ActionSchedule.Work;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.FINITE;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.UNLIMITED;
+
+public class ActionPlan
+{
+    /**
+     * Actions to perform (reliably, and in strict order) before starting the proper simulation
+     */
+    public final ActionList pre;
+
+    /**
+     * List of action sequences, each representing the actions planned by a given actor, that will
+     * be performed in the provided sequence but otherwise randomly interleaved with the other action sequences.
+     * These planned actions may initiate other actions, that will all complete before the next planned action
+     * for that action sequence is started.
+     */
+    public final List<ActionList> interleave;
+
+    /**
+     * Actions to perform (reliably, and in strict order) after finishing the proper simulation.
+     *
+     * This is only run if the simulation was successful, so no cleanup should be performed here unless optional.
+     */
+    public final ActionList post;
+
+    public ActionPlan(ActionList pre, List<ActionList> interleave, ActionList post)
+    {
+        this.pre = pre;
+        this.interleave = interleave;
+        this.post = post;
+    }
+
+    public CloseableIterator<?> iterator(ActionSchedule.Mode mode, long runForNanos, LongSupplier schedulerJitter, SimulatedTime time, RunnableActionScheduler runnableScheduler, FutureActionScheduler futureScheduler)
+    {
+        return new ActionSchedule(time, futureScheduler, schedulerJitter, runnableScheduler,
+                                  new Work(UNLIMITED, singletonList(pre.setStrictlySequential())),
+                                  new Work(mode, runForNanos, interleave),
+                                  new Work(FINITE, singletonList(post.setStrictlySequential())));
+    }
+
+    public static ActionPlan interleave(List<ActionList> interleave)
+    {
+        return new ActionPlan(ActionList.empty(), interleave, ActionList.empty());
+    }
+
+    public static ActionPlan setUpTearDown(ActionList pre, ActionList post)
+    {
+        return new ActionPlan(pre, emptyList(), post);
+    }
+
+    public ActionPlan encapsulate(ActionPlan that)
+    {
+        return new ActionPlan(
+                this.pre.andThen(that.pre),
+                ImmutableList.<ActionList>builder().addAll(this.interleave).addAll(that.interleave).build(),
+                that.post.andThen(this.post));
+    }
+}
+
diff --git a/test/simulator/main/org/apache/cassandra/simulator/ActionSchedule.java b/test/simulator/main/org/apache/cassandra/simulator/ActionSchedule.java
new file mode 100644
index 0000000..18fc877
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/ActionSchedule.java
@@ -0,0 +1,438 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.function.LongConsumer;
+import java.util.function.LongSupplier;
+import java.util.stream.Stream;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.netty.util.internal.DefaultPriorityQueue;
+import io.netty.util.internal.PriorityQueue;
+import org.apache.cassandra.simulator.OrderOn.OrderOnId;
+import org.apache.cassandra.simulator.Ordered.Sequence;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.simulator.utils.SafeCollections;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.Throwables;
+
+import static org.apache.cassandra.simulator.Action.Modifier.DAEMON;
+import static org.apache.cassandra.simulator.Action.Modifier.STREAM;
+import static org.apache.cassandra.simulator.Action.Phase.CONSEQUENCE;
+import static org.apache.cassandra.simulator.Action.Phase.READY_TO_SCHEDULE;
+import static org.apache.cassandra.simulator.Action.Phase.RUNNABLE;
+import static org.apache.cassandra.simulator.Action.Phase.SCHEDULED;
+import static org.apache.cassandra.simulator.Action.Phase.SEQUENCED_POST_SCHEDULED;
+import static org.apache.cassandra.simulator.Action.Phase.SEQUENCED_PRE_SCHEDULED;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.TIME_LIMITED;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.UNLIMITED;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.SimulatorUtils.dumpStackTraces;
+
+/**
+ * TODO (feature): support total stalls on specific nodes
+ *
+ * This class coordinates the running of actions that have been planned by an ActionPlan, or are the consequences
+ * of actions that have been executed by such a plan. This coordination includes enforcing all {@link OrderOn}
+ * criteria, and running DAEMON (recurring scheduled) tasks.
+ *
+ * Note there is a distinct scheduling mechanism {@link org.apache.cassandra.simulator.Action.Modifier#WITHHOLD}
+ * that is coordinated by an Action and its parent, that is used to prevent certain actions from running unless
+ * all descendants have executed (with the aim of it ordinarily being invalidated before this happens), and this
+ * is not imposed here because it would be more complicated to manage.
+ */
+public class ActionSchedule implements CloseableIterator<Object>, LongConsumer
+{
+    private static final Logger logger = LoggerFactory.getLogger(ActionList.class);
+
+    public enum Mode { TIME_LIMITED, STREAM_LIMITED, TIME_AND_STREAM_LIMITED, FINITE, UNLIMITED }
+
+    public static class Work
+    {
+        final Mode mode;
+        final long runForNanos;
+        final List<ActionList> actors;
+
+        public Work(Mode mode, List<ActionList> actors)
+        {
+            this(mode, -1, actors);
+            Preconditions.checkArgument(mode != TIME_LIMITED);
+        }
+
+        public Work(long runForNanos, List<ActionList> actors)
+        {
+            this(TIME_LIMITED, runForNanos, actors);
+            Preconditions.checkArgument(runForNanos > 0);
+        }
+
+        public Work(Mode mode, long runForNanos, List<ActionList> actors)
+        {
+            this.mode = mode;
+            this.runForNanos = runForNanos;
+            this.actors = actors;
+        }
+    }
+
+    public static class ReconcileItem
+    {
+        final long start, end;
+        final Action performed;
+        final ActionList result;
+
+        public ReconcileItem(long start, long end, Action performed, ActionList result)
+        {
+            this.start = start;
+            this.end = end;
+            this.performed = performed;
+            this.result = result;
+        }
+
+        public String toString()
+        {
+            return "run:" + performed.toReconcileString() + "; next:" + result.toReconcileString()
+                   + "; between [" + start + ',' + end + ']';
+        }
+    }
+
+    final SimulatedTime time;
+    final FutureActionScheduler scheduler;
+    final RunnableActionScheduler runnableScheduler;
+    final LongSupplier schedulerJitter; // we will prioritise all actions scheduled to run within this period of the current oldest action
+    long currentJitter, currentJitterUntil;
+
+    // Action flow is:
+    //    perform() -> [withheld]
+    //              -> consequences
+    //              -> [pendingDaemonWave | <invalidate daemon>]
+    //              -> [sequences (if ordered and SEQUENCE_EAGERLY)]
+    //              -> [scheduled]
+    //              -> [sequences (if ordered and !SEQUENCE_EAGERLY)]
+    //              -> runnable + [runnableByScheduledAt]
+    final Map<OrderOn, Sequence> sequences = new HashMap<>();
+    // queue of items that are not yet runnable sorted by deadline
+    final PriorityQueue<Action> scheduled = new DefaultPriorityQueue<>(Action::compareByDeadline, 128);
+    // queue of items that are runnable (i.e. within scheduler jitter of min deadline) sorted by their execution order (i.e. priority)
+    final PriorityQueue<Action> runnable = new DefaultPriorityQueue<>(Action::compareByPriority, 128);
+    // auxillary queue of items that are runnable so that we may track the time span covered by runnable items we are randomising execution of
+    final PriorityQueue<Action> runnableByDeadline = new DefaultPriorityQueue<>(Action::compareByDeadline, 128);
+
+    private Mode mode;
+
+    // if running in TIME_LIMITED mode, stop ALL streams (finite or infinite) and daemon tasks once we pass this point
+    private long runUntilNanos;
+
+    // if running in STREAM_LIMITED mode, stop infinite streams once we have no more finite streams to process
+    private int activeFiniteStreamCount;
+
+    // If running in UNLIMITED mode, release daemons (recurring tasks) in waves,
+    // so we can simplify checking if they're all that's running
+    // TODO (cleanup): we can do better than this, probably most straightforwardly by ensuring daemon actions
+    //                 have a consistent but unique id(), and managing the set of these.
+    private int activeDaemonWaveCount;
+    private int pendingDaemonWaveCountDown;
+    private DefaultPriorityQueue<Action> pendingDaemonWave;
+
+    private final Iterator<Work> moreWork;
+
+    public ActionSchedule(SimulatedTime time, FutureActionScheduler futureScheduler, LongSupplier schedulerJitter, RunnableActionScheduler runnableScheduler, Work... moreWork)
+    {
+        this(time, futureScheduler, runnableScheduler, schedulerJitter, Arrays.asList(moreWork).iterator());
+    }
+
+    public ActionSchedule(SimulatedTime time, FutureActionScheduler futureScheduler, RunnableActionScheduler runnableScheduler, LongSupplier schedulerJitter, Iterator<Work> moreWork)
+    {
+        this.time = time;
+        this.runnableScheduler = runnableScheduler;
+        this.time.onDiscontinuity(this);
+        this.scheduler = futureScheduler;
+        this.schedulerJitter = schedulerJitter;
+        this.moreWork = moreWork;
+        moreWork();
+    }
+
+    void add(Action action)
+    {
+        Preconditions.checkState(action.phase() == CONSEQUENCE);
+        action.schedule(time, scheduler);
+        action.setupOrdering(this);
+        if (action.is(STREAM) && !action.is(DAEMON))
+            ++activeFiniteStreamCount;
+
+        switch (mode)
+        {
+            default: throw new AssertionError();
+            case TIME_AND_STREAM_LIMITED:
+                if ((activeFiniteStreamCount == 0 || time.nanoTime() >= runUntilNanos) && action.is(DAEMON))
+                {
+                    action.cancel();
+                    return;
+                }
+                break;
+            case TIME_LIMITED:
+                if (time.nanoTime() >= runUntilNanos && (action.is(DAEMON) || action.is(STREAM)))
+                {
+                    action.cancel();
+                    return;
+                }
+                break;
+            case STREAM_LIMITED:
+                if (activeFiniteStreamCount == 0 && action.is(DAEMON))
+                {
+                    action.cancel();
+                    return;
+                }
+                break;
+            case UNLIMITED:
+                if (action.is(STREAM)) throw new IllegalStateException();
+                if (action.is(DAEMON))
+                {
+                    action.saveIn(pendingDaemonWave);
+                    action.advanceTo(READY_TO_SCHEDULE);
+                    return;
+                }
+                break;
+            case FINITE:
+                if (action.is(STREAM)) throw new IllegalStateException();
+                break;
+        }
+        action.advanceTo(READY_TO_SCHEDULE);
+        advance(action);
+    }
+
+    void advance(Action action)
+    {
+        switch (action.phase())
+        {
+            default:
+                throw new AssertionError();
+
+            case CONSEQUENCE:
+                    // this should only happen if we invalidate an Ordered action that tries to
+                    // enqueue one of the actions we are in the middle of scheduling for the first time
+                    return;
+
+            case READY_TO_SCHEDULE:
+                if (action.ordered != null && action.ordered.waitPreScheduled())
+                {
+                    action.advanceTo(SEQUENCED_PRE_SCHEDULED);
+                    return;
+                }
+
+            case SEQUENCED_PRE_SCHEDULED:
+                if (action.deadline() > time.nanoTime())
+                {
+                    action.addTo(scheduled);
+                    action.advanceTo(SCHEDULED);
+                    return;
+                }
+
+            case SCHEDULED:
+                if (action.ordered != null && action.ordered.waitPostScheduled())
+                {
+                    action.advanceTo(SEQUENCED_POST_SCHEDULED);
+                    return;
+                }
+
+            case SEQUENCED_POST_SCHEDULED:
+                action.addTo(runnable);
+                action.saveIn(runnableByDeadline);
+                action.advanceTo(RUNNABLE);
+        }
+    }
+
+    void add(ActionList add)
+    {
+        if (add.isEmpty())
+            return;
+
+        add.forEach(this::add);
+    }
+
+    public boolean hasNext()
+    {
+        if (!runnable.isEmpty() || !scheduled.isEmpty())
+            return true;
+
+        while (moreWork())
+        {
+            if (!runnable.isEmpty() || !scheduled.isEmpty())
+                return true;
+        }
+
+        if (!sequences.isEmpty())
+        {
+            // TODO (feature): detection of which action is blocking progress, and logging of its stack trace only
+            Stream<Action> actions;
+            if (Ordered.DEBUG)
+            {
+                logger.error("Simulation failed to make progress; blocked task graph:");
+                actions = sequences.values()
+                                   .stream()
+                                   .flatMap(s -> Stream.concat(s.maybeRunning.stream(), s.next.stream()))
+                                   .map(o -> o.ordered().action);
+            }
+            else
+            {
+                logger.error("Simulation failed to make progress. Run with -Dcassandra.test.simulator.debug=true to see the blocked task graph. Blocked tasks:");
+                actions = sequences.values()
+                                   .stream()
+                                   .filter(s -> s.on instanceof OrderOnId)
+                                   .map(s -> ((OrderOnId) s.on).id)
+                                   .flatMap(s -> s instanceof ActionList ? ((ActionList) s).stream() : Stream.empty());
+            }
+
+            actions.filter(Action::isStarted)
+                   .distinct()
+                   .sorted(Comparator.comparingLong(a -> ((long) ((a.isStarted() ? 1 : 0) + (a.isFinished() ? 2 : 0)) << 32) | a.childCount()))
+                   .forEach(a -> logger.error(a.describeCurrentState()));
+
+            logger.error("Thread stack traces:");
+            dumpStackTraces(logger);
+            throw failWithOOM();
+        }
+
+        return false;
+    }
+
+    private boolean moreWork()
+    {
+        if (!moreWork.hasNext())
+            return false;
+
+        Work work = moreWork.next();
+        this.runUntilNanos = work.runForNanos < 0 ? -1 : time.nanoTime() + work.runForNanos;
+        Mode oldMode = mode;
+        mode = work.mode;
+        if (oldMode != work.mode)
+        {
+            if (work.mode == UNLIMITED)
+            {
+                this.pendingDaemonWave = new DefaultPriorityQueue<>(Action::compareByPriority, 128);
+            }
+            else if (oldMode == UNLIMITED)
+            {
+                while (!pendingDaemonWave.isEmpty())
+                    advance(pendingDaemonWave.poll());
+                pendingDaemonWave = null;
+            }
+        }
+        work.actors.forEach(runnableScheduler::attachTo);
+        work.actors.forEach(a -> a.forEach(Action::setConsequence));
+        work.actors.forEach(this::add);
+        return true;
+    }
+
+    public Object next()
+    {
+        long now = time.nanoTime();
+        if (now >= currentJitterUntil)
+        {
+            currentJitter = schedulerJitter.getAsLong();
+            currentJitterUntil = now + currentJitter + schedulerJitter.getAsLong();
+        }
+        if (!scheduled.isEmpty())
+        {
+            long scheduleUntil = Math.min((runnableByDeadline.isEmpty() ? now : runnableByDeadline.peek().deadline())
+                                          + currentJitter, currentJitterUntil);
+
+            while (!scheduled.isEmpty() && (runnable.isEmpty() || scheduled.peek().deadline() <= scheduleUntil))
+                advance(scheduled.poll());
+        }
+
+        Action perform = runnable.poll();
+        if (perform == null)
+            throw new NoSuchElementException();
+
+        if (!runnableByDeadline.remove(perform) && perform.deadline() > 0)
+            throw new IllegalStateException();
+        time.tick(perform.deadline());
+        maybeScheduleDaemons(perform);
+
+        ActionList consequences = perform.perform();
+        add(consequences);
+        if (perform.is(STREAM) && !perform.is(DAEMON))
+            --activeFiniteStreamCount;
+
+        long end = time.nanoTime();
+        return new ReconcileItem(now, end, perform, consequences);
+    }
+
+    private void maybeScheduleDaemons(Action perform)
+    {
+        if (pendingDaemonWave != null)
+        {
+            if (perform.is(DAEMON) && --activeDaemonWaveCount == 0)
+            {
+                pendingDaemonWaveCountDown = Math.max(128, 16 * (scheduled.size() + pendingDaemonWave.size()));
+            }
+            else if (activeDaemonWaveCount == 0 && --pendingDaemonWaveCountDown <= 0)
+            {
+                activeDaemonWaveCount = pendingDaemonWave.size();
+                while (!pendingDaemonWave.isEmpty())
+                    advance(pendingDaemonWave.poll());
+                if (activeDaemonWaveCount == 0) pendingDaemonWaveCountDown = Math.max(128, 16 * scheduled.size());
+            }
+        }
+    }
+
+    public void close()
+    {
+        if (sequences.isEmpty() && scheduled.isEmpty() && runnable.isEmpty()
+            && (pendingDaemonWave == null || pendingDaemonWave.isEmpty()) && !moreWork.hasNext())
+            return;
+
+        List<Sequence> invalidateSequences = new ArrayList<>(this.sequences.values());
+        List<Action> invalidateActions = new ArrayList<>(scheduled.size() + runnable.size() + (pendingDaemonWave == null ? 0 : pendingDaemonWave.size()));
+        invalidateActions.addAll(scheduled);
+        invalidateActions.addAll(runnable);
+        if (pendingDaemonWave != null)
+            invalidateActions.addAll(pendingDaemonWave);
+        while (moreWork.hasNext())
+            moreWork.next().actors.forEach(invalidateActions::addAll);
+
+        Throwable fail = SafeCollections.safeForEach(invalidateSequences, Sequence::invalidatePending);
+        fail = Throwables.merge(fail, SafeCollections.safeForEach(invalidateActions, Action::invalidate));
+        scheduled.clear();
+        runnable.clear();
+        runnableByDeadline.clear();
+        if (pendingDaemonWave != null)
+            pendingDaemonWave.clear();
+        sequences.clear();
+        Throwables.maybeFail(fail);
+    }
+
+    @Override
+    public void accept(long discontinuity)
+    {
+        if (runUntilNanos > 0)
+            runUntilNanos += discontinuity;
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/Actions.java b/test/simulator/main/org/apache/cassandra/simulator/Actions.java
new file mode 100644
index 0000000..eea1dd9
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/Actions.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.function.Supplier;
+
+import org.apache.cassandra.simulator.Action.Modifiers;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.INFINITE_STREAM;
+import static org.apache.cassandra.simulator.Action.Modifiers.INFINITE_STREAM_ITEM;
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+import static org.apache.cassandra.simulator.Action.Modifiers.STREAM;
+import static org.apache.cassandra.simulator.Action.Modifiers.STREAM_ITEM;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+public class Actions
+{
+    public static class LambdaAction extends Action
+    {
+        private Supplier<ActionList> perform;
+
+        public LambdaAction(Object description, Supplier<ActionList> perform)
+        {
+            this(description, Modifiers.NONE, perform);
+        }
+
+        public LambdaAction(Object description, Modifiers self, Supplier<ActionList> perform)
+        {
+            this(description, self, Modifiers.NONE, perform);
+        }
+
+        public LambdaAction(Object description, Modifiers self, Modifiers children, Supplier<ActionList> perform)
+        {
+            this(description, OrderOn.NONE, self, children, perform);
+        }
+
+        public LambdaAction(Object description, OrderOn orderOn, Modifiers self, Modifiers children, Supplier<ActionList> perform)
+        {
+            super(description, orderOn, self, children);
+            this.perform = perform;
+        }
+
+        protected ActionList performSimple()
+        {
+            ActionList result = perform.get();
+            perform = null;
+            return result;
+        }
+    }
+
+    /**
+     * Should always be performed eventually.
+     */
+    public static class ReliableAction extends LambdaAction
+    {
+        public ReliableAction(Object description, Supplier<ActionList> perform, boolean transitive)
+        {
+            this(description, RELIABLE, transitive ? RELIABLE : NONE, perform);
+        }
+
+        public ReliableAction(Object description, Modifiers self, Modifiers children, Supplier<ActionList> perform)
+        {
+            this(description, OrderOn.NONE, self, children, perform);
+        }
+
+        public ReliableAction(Object description, OrderOn orderOn, Modifiers self, Modifiers children, Supplier<ActionList> perform)
+        {
+            super(description, orderOn, self, children, perform);
+            assert !is(Modifier.DROP);
+            assert children.is(Modifier.RELIABLE);
+        }
+
+        public static ReliableAction transitively(Object description, Supplier<ActionList> action)
+        {
+            return new ReliableAction(description, action, true);
+        }
+    }
+
+    /**
+     * Should always be performed in strict order, i.e. all of this action's child actions should complete before
+     * the next action scheduled by the same actor is invoked.
+     */
+    public static class StrictAction extends LambdaAction
+    {
+        public StrictAction(Object description, Supplier<ActionList> perform, boolean transitive)
+        {
+            super(description, STRICT, transitive ? STRICT : NONE, perform);
+        }
+
+        public static StrictAction of(Object description, Supplier<ActionList> action)
+        {
+            return new StrictAction(description, action, false);
+        }
+    }
+
+    public static Action of(Object description, Supplier<ActionList> action)
+    {
+        return new LambdaAction(description, action);
+    }
+
+    public static Action of(Modifiers self, Modifiers children, Object description, Supplier<ActionList> action)
+    {
+        return new LambdaAction(description, self, children, action);
+    }
+
+    public static Action of(OrderOn orderOn, Modifiers self, Modifiers children, Object description, Supplier<ActionList> action)
+    {
+        return new LambdaAction(description, orderOn, self, children, action);
+    }
+
+    public static Action empty(String message)
+    {
+        return of(message, ActionList::empty);
+    }
+
+    public static Action empty(Modifiers modifiers, Object message)
+    {
+        return of(modifiers, NONE, message, ActionList::empty);
+    }
+
+    public static Action stream(int concurrency, Supplier<Action> actions) { return stream(new OrderOn.Strict(actions, concurrency), actions); }
+    public static Action stream(OrderOn on, Supplier<Action> actions) { return of(OrderOn.NONE, STREAM, NONE, on, () -> ActionList.of(streamNextSupplier(STREAM, STREAM_ITEM, on, 0, on, actions))); }
+    public static Action infiniteStream(int concurrency, Supplier<Action> actions) { return infiniteStream(new OrderOn.Strict(actions, concurrency), actions); }
+    public static Action infiniteStream(OrderOn on, Supplier<Action> actions) { return of(OrderOn.NONE, INFINITE_STREAM, NONE, on, () -> ActionList.of(streamNextSupplier(INFINITE_STREAM, INFINITE_STREAM_ITEM, on, 0, on, actions))); }
+    private static ActionList next(Modifiers modifiers, Object description, int sequence, OrderOn on, Supplier<Action> actions)
+    {
+        Action next = actions.get();
+        if (next == null)
+            return ActionList.empty();
+        return ActionList.of(next, streamNextSupplier(modifiers, modifiers, description, sequence + 1, on, actions));
+    }
+
+    private static Action streamNextSupplier(Modifiers modifiers, Modifiers nextModifiers, Object description, int sequence, OrderOn on, Supplier<Action> actions)
+    {
+        return Actions.of(on, modifiers, NONE,
+                          lazy(() -> description + " " + sequence), () -> next(nextModifiers, description, sequence, on, actions));
+    }
+
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java b/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java
new file mode 100644
index 0000000..caf642e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java
@@ -0,0 +1,835 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.nio.file.FileSystem;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.function.Consumer;
+import java.util.function.IntSupplier;
+import java.util.function.LongConsumer;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+import com.google.common.jimfs.Configuration;
+import com.google.common.jimfs.Jimfs;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.FutureCallback;
+
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.distributed.api.IInstanceInitializer;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableBiConsumer;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableConsumer;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
+import org.apache.cassandra.distributed.impl.DirectStreamingConnectionFactory;
+import org.apache.cassandra.distributed.impl.IsolatedExecutor;
+import org.apache.cassandra.io.compress.LZ4Compressor;
+import org.apache.cassandra.service.paxos.BallotGenerator;
+import org.apache.cassandra.service.paxos.PaxosPrepare;
+import org.apache.cassandra.simulator.RandomSource.Choices;
+import org.apache.cassandra.simulator.asm.InterceptAsClassTransformer;
+import org.apache.cassandra.simulator.asm.NemesisFieldSelectors;
+import org.apache.cassandra.simulator.cluster.ClusterActions;
+import org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange;
+import org.apache.cassandra.simulator.systems.Failures;
+import org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture;
+import org.apache.cassandra.simulator.systems.InterceptibleThread;
+import org.apache.cassandra.simulator.systems.InterceptingGlobalMethods;
+import org.apache.cassandra.simulator.systems.InterceptingGlobalMethods.ThreadLocalRandomCheck;
+import org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods;
+import org.apache.cassandra.simulator.systems.InterceptingExecutorFactory;
+import org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods.IfInterceptibleThread;
+import org.apache.cassandra.simulator.systems.NetworkConfig;
+import org.apache.cassandra.simulator.systems.NetworkConfig.PhaseConfig;
+import org.apache.cassandra.simulator.systems.SchedulerConfig;
+import org.apache.cassandra.simulator.systems.SimulatedFutureActionScheduler;
+import org.apache.cassandra.simulator.systems.SimulatedSystems;
+import org.apache.cassandra.simulator.systems.SimulatedBallots;
+import org.apache.cassandra.simulator.systems.SimulatedExecution;
+import org.apache.cassandra.simulator.systems.SimulatedFailureDetector;
+import org.apache.cassandra.simulator.systems.SimulatedMessageDelivery;
+import org.apache.cassandra.simulator.systems.SimulatedSnitch;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.simulator.utils.ChanceRange;
+import org.apache.cassandra.simulator.utils.IntRange;
+import org.apache.cassandra.simulator.utils.KindOfSequence;
+import org.apache.cassandra.simulator.utils.LongRange;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Throwables;
+import org.apache.cassandra.utils.concurrent.Ref;
+import org.apache.cassandra.utils.memory.BufferPool;
+import org.apache.cassandra.utils.memory.BufferPools;
+import org.apache.cassandra.utils.memory.HeapPool;
+
+import static java.lang.Integer.min;
+import static java.util.Collections.emptyMap;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.distributed.impl.AbstractCluster.getSharedClassPredicate;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.utils.Shared.Scope.ANY;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Wraps a Cluster and a Simulation to run upon it
+ */
+@SuppressWarnings("RedundantCast")
+public class ClusterSimulation<S extends Simulation> implements AutoCloseable
+{
+    public static final Class<?>[] SHARE = new Class[]
+                                        {
+                                            AsyncFunction.class,
+                                            FutureCallback.class,
+                                            io.netty.util.concurrent.GenericFutureListener.class,
+                                            io.netty.channel.FileRegion.class,
+                                            io.netty.util.ReferenceCounted.class
+                                        };
+
+    public static final Class<?>[] ISOLATE = new Class<?>[0];
+
+    public interface SimulationFactory<S extends Simulation>
+    {
+        S create(SimulatedSystems simulated, RunnableActionScheduler scheduler, Cluster cluster, ClusterActions.Options options);
+    }
+
+    public interface SchedulerFactory
+    {
+        RunnableActionScheduler create(RandomSource random);
+    }
+
+    @SuppressWarnings("UnusedReturnValue")
+    public static abstract class Builder<S extends Simulation>
+    {
+        protected Supplier<RandomSource> randomSupplier = RandomSource.Default::new;
+        protected int uniqueNum = 0;
+        protected int threadCount;
+
+        protected int concurrency = 10;
+        protected IntRange nodeCount = new IntRange(4, 16), dcCount = new IntRange(1, 2),
+                        primaryKeySeconds = new IntRange(5, 30), withinKeyConcurrency = new IntRange(2, 5);
+        protected TopologyChange[] topologyChanges = TopologyChange.values();
+        protected int topologyChangeLimit = -1;
+
+        protected int primaryKeyCount;
+        protected int secondsToSimulate;
+
+        protected ChanceRange normalNetworkDropChance  = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0f, 0.001f),
+                              normalNetworkDelayChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.01f, 0.1f),
+                                flakyNetworkDropChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.01f, 0.1f),
+                               flakyNetworkDelayChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.01f, 0.1f),
+                                networkPartitionChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.0f, 0.1f),
+                                    networkFlakyChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.0f, 0.1f),
+                                    monitorDelayChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.01f, 0.1f),
+                                  schedulerDelayChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.01f, 0.1f),
+                                         timeoutChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.01f, 0.1f),
+                                            readChance = new ChanceRange(RandomSource::uniformFloat,                        0.05f, 0.95f),
+                                         nemesisChance = new ChanceRange(randomSource -> randomSource.qlog2uniformFloat(4), 0.001f, 0.01f);
+
+        protected LongRange normalNetworkLatencyNanos = new LongRange(1, 2, MILLISECONDS, NANOSECONDS),
+                              normalNetworkDelayNanos = new LongRange(2, 100, MILLISECONDS, NANOSECONDS),
+                             flakyNetworkLatencyNanos = new LongRange(2, 100, MILLISECONDS, NANOSECONDS),
+                               flakyNetworkDelayNanos = new LongRange(2, 100, MILLISECONDS, NANOSECONDS),
+                           networkReconfigureInterval = new LongRange(50, 5000, MICROSECONDS, NANOSECONDS),
+                                 schedulerJitterNanos = new LongRange(100, 2000, MICROSECONDS, NANOSECONDS),
+                                  schedulerDelayNanos = new LongRange(0, 50, MICROSECONDS, NANOSECONDS),
+                              schedulerLongDelayNanos = new LongRange(50, 5000, MICROSECONDS, NANOSECONDS),
+                                      clockDriftNanos = new LongRange(1, 5000, MILLISECONDS, NANOSECONDS),
+                       clockDiscontinuitIntervalNanos = new LongRange(10, 60, SECONDS, NANOSECONDS),
+                          topologyChangeIntervalNanos = new LongRange(5, 15, SECONDS, NANOSECONDS);
+
+
+
+        protected long contentionTimeoutNanos = MILLISECONDS.toNanos(500L),
+                            writeTimeoutNanos = SECONDS.toNanos(1L),
+                             readTimeoutNanos = SECONDS.toNanos(2L),
+                          requestTimeoutNanos = SECONDS.toNanos(2L);
+
+        protected SchedulerFactory schedulerFactory = schedulerFactory(RunnableActionScheduler.Kind.values());
+
+        protected Debug debug = new Debug();
+        protected Capture capture = new Capture(false, false, false);
+        protected HeapPool.Logged.Listener memoryListener;
+        protected SimulatedTime.Listener timeListener = (i1, i2) -> {};
+        protected LongConsumer onThreadLocalRandomCheck;
+
+        public Debug debug()
+        {
+            return debug;
+        }
+
+        public Builder<S> debug(EnumMap<Debug.Info, Debug.Levels> debug, int[] primaryKeys)
+        {
+            this.debug = new Debug(debug, primaryKeys);
+            return this;
+        }
+
+        public Builder<S> unique(int num)
+        {
+            this.uniqueNum = num;
+            return this;
+        }
+
+        public Builder<S> threadCount(int count)
+        {
+            this.threadCount = count;
+            return this;
+        }
+
+        public Builder<S> nodes(IntRange range)
+        {
+            this.nodeCount = range;
+            return this;
+        }
+
+        public Builder<S> nodes(int min, int max)
+        {
+            this.nodeCount = new IntRange(min, max);
+            return this;
+        }
+
+        public Builder<S> dcs(IntRange range)
+        {
+            this.dcCount = range;
+            return this;
+        }
+
+        public Builder<S> dcs(int min, int max)
+        {
+            this.dcCount = new IntRange(min, max);
+            return this;
+        }
+
+        public Builder<S> concurrency(int concurrency)
+        {
+            this.concurrency = concurrency;
+            return this;
+        }
+
+        public IntRange primaryKeySeconds()
+        {
+            return primaryKeySeconds;
+        }
+
+        public Builder<S> primaryKeySeconds(IntRange range)
+        {
+            this.primaryKeySeconds = range;
+            return this;
+        }
+
+        public Builder<S> withinKeyConcurrency(IntRange range)
+        {
+            this.withinKeyConcurrency = range;
+            return this;
+        }
+
+        public Builder<S> withinKeyConcurrency(int min, int max)
+        {
+            this.withinKeyConcurrency = new IntRange(min, max);
+            return this;
+        }
+
+        public Builder<S> topologyChanges(TopologyChange[] topologyChanges)
+        {
+            this.topologyChanges = topologyChanges;
+            return this;
+        }
+
+        public Builder<S> topologyChangeIntervalNanos(LongRange topologyChangeIntervalNanos)
+        {
+            this.topologyChangeIntervalNanos = topologyChangeIntervalNanos;
+            return this;
+        }
+
+        public Builder<S> topologyChangeLimit(int topologyChangeLimit)
+        {
+            this.topologyChangeLimit = topologyChangeLimit;
+            return this;
+        }
+
+        public int primaryKeyCount()
+        {
+            return primaryKeyCount;
+        }
+
+        public Builder<S> primaryKeyCount(int count)
+        {
+            this.primaryKeyCount = count;
+            return this;
+        }
+
+        public int secondsToSimulate()
+        {
+            return secondsToSimulate;
+        }
+
+        public Builder<S> secondsToSimulate(int seconds)
+        {
+            this.secondsToSimulate = seconds;
+            return this;
+        }
+
+        public Builder<S> networkPartitionChance(ChanceRange partitionChance)
+        {
+            this.networkPartitionChance = partitionChance;
+            return this;
+        }
+
+        public Builder<S> networkFlakyChance(ChanceRange flakyChance)
+        {
+            this.networkFlakyChance = flakyChance;
+            return this;
+        }
+
+        public Builder<S> networkReconfigureInterval(LongRange reconfigureIntervalNanos)
+        {
+            this.networkReconfigureInterval = reconfigureIntervalNanos;
+            return this;
+        }
+
+        public Builder<S> networkDropChance(ChanceRange dropChance)
+        {
+            this.normalNetworkDropChance = dropChance;
+            return this;
+        }
+
+        public Builder<S> networkDelayChance(ChanceRange delayChance)
+        {
+            this.normalNetworkDelayChance = delayChance;
+            return this;
+        }
+
+        public Builder<S> networkLatencyNanos(LongRange networkLatencyNanos)
+        {
+            this.normalNetworkLatencyNanos = networkLatencyNanos;
+            return this;
+        }
+
+        public Builder<S> networkDelayNanos(LongRange networkDelayNanos)
+        {
+            this.normalNetworkDelayNanos = networkDelayNanos;
+            return this;
+        }
+
+        public Builder<S> flakyNetworkDropChance(ChanceRange dropChance)
+        {
+            this.flakyNetworkDropChance = dropChance;
+            return this;
+        }
+
+        public Builder<S> flakyNetworkDelayChance(ChanceRange delayChance)
+        {
+            this.flakyNetworkDelayChance = delayChance;
+            return this;
+        }
+
+        public Builder<S> flakyNetworkLatencyNanos(LongRange networkLatencyNanos)
+        {
+            this.flakyNetworkLatencyNanos = networkLatencyNanos;
+            return this;
+        }
+
+        public Builder<S> flakyNetworkDelayNanos(LongRange networkDelayNanos)
+        {
+            this.flakyNetworkDelayNanos = networkDelayNanos;
+            return this;
+        }
+
+        public Builder<S> clockDriftNanos(LongRange clockDriftNanos)
+        {
+            this.clockDriftNanos = clockDriftNanos;
+            return this;
+        }
+
+        public Builder<S> clockDiscontinuityIntervalNanos(LongRange clockDiscontinuityIntervalNanos)
+        {
+            this.clockDiscontinuitIntervalNanos = clockDiscontinuityIntervalNanos;
+            return this;
+        }
+
+        public Builder<S> schedulerDelayChance(ChanceRange delayChance)
+        {
+            this.schedulerDelayChance = delayChance;
+            return this;
+        }
+
+        public Builder<S> schedulerJitterNanos(LongRange schedulerJitterNanos)
+        {
+            this.schedulerJitterNanos = schedulerJitterNanos;
+            return this;
+        }
+
+        public LongRange schedulerJitterNanos()
+        {
+            return schedulerJitterNanos;
+        }
+
+        public Builder<S> schedulerDelayNanos(LongRange schedulerDelayNanos)
+        {
+            this.schedulerDelayNanos = schedulerDelayNanos;
+            return this;
+        }
+
+        public Builder<S> schedulerLongDelayNanos(LongRange schedulerLongDelayNanos)
+        {
+            this.schedulerLongDelayNanos = schedulerLongDelayNanos;
+            return this;
+        }
+
+        public Builder<S> timeoutChance(ChanceRange timeoutChance)
+        {
+            this.timeoutChance = timeoutChance;
+            return this;
+        }
+
+        public ChanceRange readChance()
+        {
+            return readChance;
+        }
+
+        public IntRange withinKeyConcurrency()
+        {
+            return withinKeyConcurrency;
+        }
+
+        public int concurrency()
+        {
+            return concurrency;
+        }
+
+        public Builder<S> readChance(ChanceRange readChance)
+        {
+            this.readChance = readChance;
+            return this;
+        }
+
+        public Builder<S> nemesisChance(ChanceRange nemesisChance)
+        {
+            this.nemesisChance = nemesisChance;
+            return this;
+        }
+
+        public Builder<S> scheduler(RunnableActionScheduler.Kind... kinds)
+        {
+            this.schedulerFactory = schedulerFactory(kinds);
+            return this;
+        }
+
+        public SimulatedFutureActionScheduler futureActionScheduler(int nodeCount, SimulatedTime time, RandomSource random)
+        {
+            KindOfSequence kind = Choices.random(random, KindOfSequence.values())
+                                         .choose(random);
+            return new SimulatedFutureActionScheduler(kind, nodeCount, random, time,
+                                                      new NetworkConfig(new PhaseConfig(normalNetworkDropChance, normalNetworkDelayChance, normalNetworkLatencyNanos, normalNetworkDelayNanos),
+                                                                        new PhaseConfig(flakyNetworkDropChance, flakyNetworkDelayChance, flakyNetworkLatencyNanos, flakyNetworkDelayNanos),
+                                                                        networkPartitionChance, networkFlakyChance, networkReconfigureInterval),
+                                                      new SchedulerConfig(schedulerDelayChance, schedulerDelayNanos, schedulerLongDelayNanos));
+        }
+
+        static SchedulerFactory schedulerFactory(RunnableActionScheduler.Kind... kinds)
+        {
+            return (random) -> {
+                switch (Choices.random(random, kinds).choose(random))
+                {
+                    default: throw new AssertionError();
+                    case SEQUENTIAL: return new RunnableActionScheduler.Sequential();
+                    case UNIFORM: return new RunnableActionScheduler.RandomUniform(random);
+                    case RANDOM_WALK: return new RunnableActionScheduler.RandomWalk(random);
+                }
+            };
+        }
+
+        public Builder<S> scheduler(SchedulerFactory schedulerFactory)
+        {
+            this.schedulerFactory = schedulerFactory;
+            return this;
+        }
+
+        public Builder<S> random(Supplier<RandomSource> randomSupplier)
+        {
+            this.randomSupplier = randomSupplier;
+            return this;
+        }
+
+        public Builder<S> memoryListener(HeapPool.Logged.Listener memoryListener)
+        {
+            this.memoryListener = memoryListener;
+            return this;
+        }
+
+        public Builder<S> timeListener(SimulatedTime.Listener timeListener)
+        {
+            this.timeListener = timeListener;
+            return this;
+        }
+
+        public Builder<S> capture(Capture capture)
+        {
+            this.capture = capture;
+            return this;
+        }
+
+        public Capture capture()
+        {
+            return capture;
+        }
+
+        public Builder<S> onThreadLocalRandomCheck(LongConsumer runnable)
+        {
+            this.onThreadLocalRandomCheck = runnable;
+            return this;
+        }
+
+        public abstract ClusterSimulation<S> create(long seed) throws IOException;
+    }
+
+    static class ThreadAllocator
+    {
+        final RandomSource random;
+        int clusterPool; // number of threads we have left for the whole cluster
+        int remainingNodes; // number of nodes we still need to allocate them to
+        int allocationPool; //  threads to allocate for the node we're currently processing
+        int remainingAllocations; // number of _remaining_ allocations take() assumes we want to evenly allocate threads over
+
+        public ThreadAllocator(RandomSource random, int threadsToAllocate, int betweenNodes)
+        {
+            this.random = random;
+            this.clusterPool = threadsToAllocate;
+            this.remainingNodes = betweenNodes;
+        }
+
+        // randomly set the number of threads in various thread pools
+        IInstanceConfig update(IInstanceConfig config)
+        {
+            cycle();
+            // allocate in ascending order of max, for take() correctness
+            return config
+                   .set("memtable_flush_writers", take(1, 1, 2))
+                   .set("concurrent_compactors", take(1, 1, 4))
+                   .set("concurrent_writes", take(1, 4))
+                   .set("concurrent_counter_writes", take(1, 4))
+                   .set("concurrent_materialized_view_writes", take(1, 4))
+                   .set("concurrent_reads", take(1, 4))
+                   .forceSet("available_processors", take(3, 4));
+        }
+
+        // begin allocating for a new node
+        void cycle()
+        {
+            assert remainingNodes > 0;
+            // return unallocated items to the outerPool
+            clusterPool += allocationPool;
+            // set the curPool to allocate the next allocationPool size
+            allocationPool = clusterPool;
+            remainingAllocations = remainingNodes;
+            // randomly select the next pool size, subtracting it from the outer pool
+            allocationPool = take(1, 1);
+            clusterPool -= allocationPool;
+            // this is hard-coded to match the sum of the first arguments above
+            remainingAllocations = 9;
+            --remainingNodes;
+        }
+
+        /**
+         * See {@link #take(int, int, int)}
+         */
+        int take(int times, int min)
+        {
+            return take(times, min, allocationPool);
+        }
+
+        /**
+         * Allocate a random number of threads between [min..max)
+         * The allocation is suitable for multiple users of the value, i.e.
+         * {@code times} multiple of the result are deducted from the pool.
+         *
+         * If there are adequate supplies we aim to allocate threads "equally" between pools,
+         * selecting a uniform value between 0.5x and 2x the fair split of the remaining pool
+         * on each allocation. If the min/max bounds override that, they are preferred.
+         *
+         * The minimum is always honoured, regardless of available pool size.
+         */
+        int take(int times, int min, int max)
+        {
+            int remaining = remainingAllocations;
+            assert remaining >= times;
+            remainingAllocations -= times;
+            if (remaining * min <= allocationPool)
+                return min;
+            if (times == remaining)
+                return allocationPool / remaining;
+            if (times + 1 == remaining)
+                return random.uniform(Math.max(min, (allocationPool - max) / times), Math.min(max, (allocationPool - min) / times));
+
+            int median = allocationPool / remaining;
+            min = Math.max(min, Math.min(max, median) / 2);
+            max = Math.min(max, median * 2);
+            return min >= max ? min : random.uniform(min, max);
+        }
+    }
+
+
+    public final RandomSource random;
+    public final SimulatedSystems simulated;
+    public final Cluster cluster;
+    public final S simulation;
+    private final FileSystem jimfs;
+    protected final Map<Integer, List<Closeable>> onUnexpectedShutdown = new TreeMap<>();
+    protected final List<Callable<Void>> onShutdown = new CopyOnWriteArrayList<>();
+    protected final ThreadLocalRandomCheck threadLocalRandomCheck;
+
+    public ClusterSimulation(RandomSource random, long seed, int uniqueNum,
+                             Builder<?> builder,
+                             Consumer<IInstanceConfig> configUpdater,
+                             SimulationFactory<S> factory) throws IOException
+    {
+        this.random = random;
+        this.jimfs  = Jimfs.newFileSystem(Long.toHexString(seed) + 'x' + uniqueNum, Configuration.unix().toBuilder()
+                                                                               .setMaxSize(4L << 30).setBlockSize(512)
+                                                                               .build());
+
+        final SimulatedMessageDelivery delivery;
+        final SimulatedExecution execution;
+        final SimulatedBallots ballots;
+        final SimulatedSnitch snitch;
+        final SimulatedTime time;
+        final SimulatedFailureDetector failureDetector;
+
+        int numOfNodes = builder.nodeCount.select(random);
+        int numOfDcs = builder.dcCount.select(random, 0, numOfNodes / 4);
+        int[] numInDcs = new int[numOfDcs];
+        int[] nodeToDc = new int[numOfNodes];
+
+        int[] minRf = new int[numOfDcs], initialRf = new int[numOfDcs], maxRf = new int[numOfDcs];
+        {
+            // TODO (feature): split unevenly
+            int n = 0, nc = 0;
+            for (int i = 0; i < numOfDcs; ++i)
+            {
+                int numInDc = (numOfNodes / numOfDcs) + (numOfNodes % numOfDcs > i ? 1 : 0);
+                numInDcs[i] = numInDc;
+                minRf[i] = 3;
+                maxRf[i] = min(numInDc, 9);
+                initialRf[i] = random.uniform(minRf[i], 1 + maxRf[i]);
+                nc += numInDc;
+                while (n < nc)
+                    nodeToDc[n++] = i;
+            }
+        }
+        snitch = new SimulatedSnitch(nodeToDc, numInDcs);
+
+        execution = new SimulatedExecution();
+
+        KindOfSequence kindOfDriftSequence = Choices.uniform(KindOfSequence.values()).choose(random);
+        KindOfSequence kindOfDiscontinuitySequence = Choices.uniform(KindOfSequence.values()).choose(random);
+        time = new SimulatedTime(numOfNodes, random, 1577836800000L /*Jan 1st UTC*/, builder.clockDriftNanos, kindOfDriftSequence,
+                                 kindOfDiscontinuitySequence.period(builder.clockDiscontinuitIntervalNanos, random),
+                                 builder.timeListener);
+        ballots = new SimulatedBallots(random, () -> {
+            long max = random.uniform(2, 16);
+            return () -> random.uniform(1, max);
+        });
+
+        Predicate<String> sharedClassPredicate = getSharedClassPredicate(ISOLATE, SHARE, ANY, SIMULATION);
+        InterceptAsClassTransformer interceptClasses = new InterceptAsClassTransformer(builder.monitorDelayChance.asSupplier(random), builder.nemesisChance.asSupplier(random), NemesisFieldSelectors.get(), ClassLoader.getSystemClassLoader(), sharedClassPredicate.negate());
+        threadLocalRandomCheck = new ThreadLocalRandomCheck(builder.onThreadLocalRandomCheck);
+
+        Failures failures = new Failures();
+        ThreadAllocator threadAllocator = new ThreadAllocator(random, builder.threadCount, numOfNodes);
+        cluster = snitch.setup(Cluster.build(numOfNodes)
+                         .withRoot(jimfs.getPath("/cassandra"))
+                         .withSharedClasses(sharedClassPredicate)
+                         .withConfig(config -> configUpdater.accept(threadAllocator.update(config
+                             .with(Feature.BLANK_GOSSIP)
+                             .set("read_request_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.readTimeoutNanos)))
+                             .set("write_request_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.writeTimeoutNanos)))
+                             .set("cas_contention_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.contentionTimeoutNanos)))
+                             .set("request_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.requestTimeoutNanos)))
+                             .set("memtable_heap_space", "1MiB")
+                             .set("memtable_allocation_type", builder.memoryListener != null ? "unslabbed_heap_buffers_logged" : "heap_buffers")
+                             .set("file_cache_size", "16MiB")
+                             .set("use_deterministic_table_id", true)
+                             .set("disk_access_mode", "standard")
+                             .set("failure_detector", SimulatedFailureDetector.Instance.class.getName())
+                             .set("commitlog_compression", new ParameterizedClass(LZ4Compressor.class.getName(), emptyMap()))
+                         )))
+                         .withInstanceInitializer(new IInstanceInitializer()
+                         {
+                             @Override
+                             public void initialise(ClassLoader classLoader, ThreadGroup threadGroup, int num, int generation)
+                             {
+                                 List<Closeable> onShutdown = new ArrayList<>();
+                                 InterceptorOfGlobalMethods interceptorOfGlobalMethods = IsolatedExecutor.transferAdhoc((IIsolatedExecutor.SerializableQuadFunction<Capture, LongConsumer, Consumer<Throwable>, RandomSource, InterceptorOfGlobalMethods>) InterceptingGlobalMethods::new, classLoader)
+                                                                                                         .apply(builder.capture, builder.onThreadLocalRandomCheck, failures, random);
+                                 onShutdown.add(interceptorOfGlobalMethods);
+
+                                 InterceptingExecutorFactory factory = execution.factory(interceptorOfGlobalMethods, classLoader, threadGroup);
+                                 IsolatedExecutor.transferAdhoc((SerializableConsumer<ExecutorFactory>) ExecutorFactory.Global::unsafeSet, classLoader)
+                                                 .accept(factory);
+                                 onShutdown.add(factory);
+
+                                 IsolatedExecutor.transferAdhoc((SerializableBiConsumer<InterceptorOfGlobalMethods, IntSupplier>) InterceptorOfGlobalMethods.Global::unsafeSet, classLoader)
+                                                 .accept(interceptorOfGlobalMethods, () -> {
+                                                     if (InterceptibleThread.isDeterministic())
+                                                         throw failWithOOM();
+                                                     return random.uniform(Integer.MIN_VALUE, Integer.MAX_VALUE);
+                                                 });
+                                 onShutdown.add(IsolatedExecutor.transferAdhoc((SerializableRunnable)InterceptorOfGlobalMethods.Global::unsafeReset, classLoader)::run);
+                                 onShutdown.add(time.setup(num, classLoader));
+
+                                 onUnexpectedShutdown.put(num, onShutdown);
+                             }
+
+                             @Override
+                             public void beforeStartup(IInstance i)
+                             {
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread(FBUtilities::setAvailableProcessors, i.config().getInt("available_processors"));
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread(IfInterceptibleThread::setThreadLocalRandomCheck, (LongConsumer) threadLocalRandomCheck);
+
+                                 int num = i.config().num();
+                                 if (builder.memoryListener != null)
+                                 {
+                                    ((IInvokableInstance) i).unsafeAcceptOnThisThread(HeapPool.Logged::setListener, builder.memoryListener);
+                                     onUnexpectedShutdown.get(num).add(() -> ((IInvokableInstance) i).unsafeAcceptOnThisThread(HeapPool.Logged::setListener, (ignore1, ignore2) -> {}));
+                                 }
+
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread(PaxosPrepare::setOnLinearizabilityViolation, SimulatorUtils::failWithOOM);
+                                 onUnexpectedShutdown.get(num).add(() -> ((IInvokableInstance) i).unsafeRunOnThisThread(() -> PaxosPrepare.setOnLinearizabilityViolation(null)));
+                             }
+
+                             @Override
+                             public void afterStartup(IInstance i)
+                             {
+                                 int num = i.config().num();
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread(BallotGenerator.Global::unsafeSet, (BallotGenerator) ballots.get());
+                                 onUnexpectedShutdown.get(num).add(() -> ((IInvokableInstance) i).unsafeRunOnThisThread(() -> BallotGenerator.Global.unsafeSet(new BallotGenerator.Default())));
+
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread((SerializableConsumer<BufferPool.DebugLeaks>) debug -> BufferPools.forChunkCache().debug(null, debug), failures);
+                                 onUnexpectedShutdown.get(num).add(() -> ((IInvokableInstance) i).unsafeRunOnThisThread(() -> BufferPools.forChunkCache().debug(null, null)));
+
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread((SerializableConsumer<BufferPool.DebugLeaks>) debug -> BufferPools.forNetworking().debug(null, debug), failures);
+                                 onUnexpectedShutdown.get(num).add(() -> ((IInvokableInstance) i).unsafeRunOnThisThread(() -> BufferPools.forNetworking().debug(null, null)));
+
+                                 ((IInvokableInstance) i).unsafeAcceptOnThisThread((SerializableConsumer<Ref.OnLeak>) Ref::setOnLeak, failures);
+                                 onUnexpectedShutdown.get(num).add(() -> ((IInvokableInstance) i).unsafeRunOnThisThread(() -> Ref.setOnLeak(null)));
+                             }
+                         }).withClassTransformer(interceptClasses)
+                           .withShutdownExecutor((name, classLoader, shuttingDown, call) -> {
+                               onShutdown.add(call);
+                               return null;
+                           })
+        ).createWithoutStarting();
+
+        IfInterceptibleThread.setThreadLocalRandomCheck(threadLocalRandomCheck);
+        snitch.setup(cluster);
+        DirectStreamingConnectionFactory.setup(cluster);
+        delivery = new SimulatedMessageDelivery(cluster);
+        failureDetector = new SimulatedFailureDetector(cluster);
+        SimulatedFutureActionScheduler futureActionScheduler = builder.futureActionScheduler(numOfNodes, time, random);
+        simulated = new SimulatedSystems(random, time, delivery, execution, ballots, failureDetector, snitch, futureActionScheduler, builder.debug, failures);
+        simulated.register(futureActionScheduler);
+
+        RunnableActionScheduler scheduler = builder.schedulerFactory.create(random);
+        ClusterActions.Options options = new ClusterActions.Options(builder.topologyChangeLimit, Choices.uniform(KindOfSequence.values()).choose(random).period(builder.topologyChangeIntervalNanos, random),
+                                                                    Choices.random(random, builder.topologyChanges),
+                                                                    minRf, initialRf, maxRf, null);
+        simulation = factory.create(simulated, scheduler, cluster, options);
+    }
+
+    public synchronized void close() throws IOException
+    {
+        // Re-enable time on shutdown
+        try
+        {
+            Field field = Clock.Global.class.getDeclaredField("instance");
+            field.setAccessible(true);
+
+            Field modifiersField = Field.class.getDeclaredField("modifiers");
+            modifiersField.setAccessible(true);
+            modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
+
+            field.set(null, new Clock.Default());
+        }
+        catch (NoSuchFieldException|IllegalAccessException e)
+        {
+            throw new RuntimeException(e);
+        }
+
+        threadLocalRandomCheck.stop();
+        simulated.execution.forceStop();
+        SimulatedTime.Global.disable();
+
+        Throwable fail = null;
+        for (int num = 1 ; num <= cluster.size() ; ++num)
+        {
+            if (!cluster.get(num).isShutdown())
+            {
+                fail = Throwables.close(fail, onUnexpectedShutdown.get(num));
+            }
+        }
+
+        try
+        {
+            simulation.close();
+        }
+        catch (Throwable t)
+        {
+            fail = t;
+        }
+
+        try
+        {
+            cluster.close();
+        }
+        catch (Throwable t)
+        {
+            fail = Throwables.merge(fail, t);
+        }
+        for (Callable<Void> call : onShutdown)
+        {
+            try
+            {
+                call.call();
+            }
+            catch (Throwable t)
+            {
+                fail = Throwables.merge(fail, t);
+            }
+        }
+        Throwables.maybeFail(fail, IOException.class);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/Debug.java b/test/simulator/main/org/apache/cassandra/simulator/Debug.java
new file mode 100644
index 0000000..8afdd3e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/Debug.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.BufferDecoratedKey;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.TriFunction;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.ReplicaLayout;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static java.util.function.Function.identity;
+import static org.apache.cassandra.simulator.Action.Modifier.INFO;
+import static org.apache.cassandra.simulator.Action.Modifier.WAKEUP;
+import static org.apache.cassandra.simulator.ActionListener.runAfter;
+import static org.apache.cassandra.simulator.ActionListener.runAfterAndTransitivelyAfter;
+import static org.apache.cassandra.simulator.ActionListener.recursive;
+import static org.apache.cassandra.simulator.Debug.EventType.*;
+import static org.apache.cassandra.simulator.Debug.Info.LOG;
+import static org.apache.cassandra.simulator.Debug.Level.*;
+import static org.apache.cassandra.simulator.paxos.Ballots.paxosDebugInfo;
+
+// TODO (feature): move logging to a depth parameter
+// TODO (feature): log only deltas for schema/cluster data
+public class Debug
+{
+    private static final Logger logger = LoggerFactory.getLogger(Debug.class);
+
+    public enum EventType { PARTITION, CLUSTER }
+    public enum Level
+    {
+        PLANNED,
+        CONSEQUENCES,
+        ALL;
+
+        private static final Level[] LEVELS = values();
+    }
+    public enum Info
+    {
+        LOG(EventType.values()),
+        PAXOS(PARTITION),
+        OWNERSHIP(CLUSTER),
+        GOSSIP(CLUSTER),
+        RF(CLUSTER),
+        RING(CLUSTER);
+
+        public final EventType[] defaultEventTypes;
+
+        Info(EventType ... defaultEventTypes)
+        {
+            this.defaultEventTypes = defaultEventTypes;
+        }
+    }
+
+    public static class Levels
+    {
+        private final EnumMap<EventType, Level> levels;
+
+        public Levels(EnumMap<EventType, Level> levels)
+        {
+            this.levels = levels;
+        }
+
+        public Levels(Level level, EventType ... types)
+        {
+            this.levels = new EnumMap<>(EventType.class);
+            for (EventType type : types)
+                this.levels.put(type, level);
+        }
+
+        public Levels(int partition, int cluster)
+        {
+            this.levels = new EnumMap<>(EventType.class);
+            if (partition > 0) this.levels.put(PARTITION, Level.LEVELS[partition - 1]);
+            if (cluster > 0) this.levels.put(CLUSTER, Level.LEVELS[cluster - 1]);
+        }
+
+        Level get(EventType type)
+        {
+            return levels.get(type);
+        }
+
+        boolean anyMatch(Predicate<Level> test)
+        {
+            return levels.values().stream().anyMatch(test);
+        }
+    }
+
+    private final EnumMap<Info, Levels> levels;
+    public final int[] primaryKeys;
+
+    public Debug()
+    {
+        this(new EnumMap<>(Info.class), null);
+    }
+
+    public Debug(Map<Info, Levels> levels, int[] primaryKeys)
+    {
+        this.levels = new EnumMap<>(levels);
+        this.primaryKeys = primaryKeys;
+    }
+
+    public ActionListener debug(EventType type, SimulatedTime time, Cluster cluster, String keyspace, Integer primaryKey)
+    {
+        List<ActionListener> listeners = new ArrayList<>();
+        for (Map.Entry<Info, Levels> e : levels.entrySet())
+        {
+            Info info = e.getKey();
+            Level level = e.getValue().get(type);
+            if (level == null) continue;
+
+            ActionListener listener;
+            if (info == LOG)
+            {
+                Function<ActionListener, ActionListener> adapt = type == CLUSTER ? LogTermination::new : identity();
+                switch (level)
+                {
+                    default: throw new AssertionError();
+                    case PLANNED: listener = adapt.apply(new LogOne(time, false)); break;
+                    case CONSEQUENCES: case ALL: listener = adapt.apply(recursive(new LogOne(time, true))); break;
+                }
+            }
+            else if (keyspace != null)
+            {
+                Consumer<Action> debug;
+                switch (info)
+                {
+                    default: throw new AssertionError();
+                    case GOSSIP: debug = debugGossip(cluster); break;
+                    case RF: debug = debugRf(cluster, keyspace); break;
+                    case RING: debug = debugRing(cluster, keyspace); break;
+                    case PAXOS: debug = forKeys(cluster, keyspace, primaryKey, Debug::debugPaxos); break;
+                    case OWNERSHIP: debug = forKeys(cluster, keyspace, primaryKey, Debug::debugOwnership); break;
+                }
+                switch (level)
+                {
+                    default: throw new AssertionError();
+                    case PLANNED: listener = type == CLUSTER ? runAfterAndTransitivelyAfter(debug) : runAfter(debug); break;
+                    case CONSEQUENCES: listener = recursive(runAfter(ignoreWakeupAndLogEvents(debug))); break;
+                    case ALL: listener = recursive(runAfter(ignoreLogEvents(debug))); break;
+                }
+            }
+            else continue;
+
+            listeners.add(listener);
+        }
+
+        if (listeners.isEmpty())
+            return null;
+        return new ActionListener.Combined(listeners);
+    }
+
+    public boolean isOn(Info info)
+    {
+        return isOn(info, PLANNED);
+    }
+
+    public boolean isOn(Info info, Level level)
+    {
+        Levels levels = this.levels.get(info);
+        if (levels == null) return false;
+        return levels.anyMatch(test -> level.compareTo(test) >= 0);
+    }
+
+    @SuppressWarnings("UnnecessaryToStringCall")
+    private static class LogOne implements ActionListener
+    {
+        final SimulatedTime time;
+        final boolean logConsequences;
+        private LogOne(SimulatedTime time, boolean logConsequences)
+        {
+            this.time = time;
+            this.logConsequences = logConsequences;
+        }
+
+        @Override
+        public void before(Action action, Before before)
+        {
+            if (logger.isWarnEnabled()) // invoke toString() eagerly to ensure we have the task's descriptin
+                logger.warn(String.format("%6ds %s %s", TimeUnit.NANOSECONDS.toSeconds(time.nanoTime()), before, action));
+        }
+
+        @Override
+        public void consequences(ActionList consequences)
+        {
+            if (logConsequences && !consequences.isEmpty() && logger.isWarnEnabled())
+                logger.warn(String.format("%6ds Next: %s", TimeUnit.NANOSECONDS.toSeconds(time.nanoTime()), consequences));
+        }
+    }
+
+    private static class LogTermination extends ActionListener.Wrapped
+    {
+        public LogTermination(ActionListener wrap)
+        {
+            super(wrap);
+        }
+
+        @Override
+        public void transitivelyAfter(Action finished)
+        {
+            logger.warn("Terminated {}", finished);
+        }
+    }
+
+    private static Consumer<Action> ignoreWakeupAndLogEvents(Consumer<Action> consumer)
+    {
+        return action -> {
+            if (!action.is(WAKEUP) && !action.is(INFO))
+                consumer.accept(action);
+        };
+    }
+
+    private static Consumer<Action> ignoreLogEvents(Consumer<Action> consumer)
+    {
+        return action -> {
+            if (!action.is(INFO))
+                consumer.accept(action);
+        };
+    }
+
+    private Consumer<Action> debugGossip(Cluster cluster)
+    {
+        return ignore -> {
+            cluster.forEach(i -> i.unsafeRunOnThisThread(() -> {
+                for (InetAddressAndPort ep : Gossiper.instance.getLiveMembers())
+                {
+                    EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(ep);
+                    logger.warn("Gossip {}: {} {}", ep, epState.isAlive(), epState.states().stream()
+                                                                                   .map(e -> e.getKey().toString() + "=(" + e.getValue().value + ',' + e.getValue().version + ')')
+                                                                                   .collect(Collectors.joining(", ", "[", "]")));
+                }
+            }));
+        };
+    }
+
+    private Consumer<Action> forKeys(Cluster cluster, String keyspace, @Nullable Integer specificPrimaryKey, TriFunction<Cluster, String, Integer, Consumer<Action>> factory)
+    {
+        if (specificPrimaryKey != null) return factory.apply(cluster, keyspace, specificPrimaryKey);
+        else return forEachKey(cluster, keyspace, primaryKeys, Debug::debugPaxos);
+    }
+
+    public static Consumer<Action> forEachKey(Cluster cluster, String keyspace, int[] primaryKeys, TriFunction<Cluster, String, Integer, Consumer<Action>> factory)
+    {
+        Consumer<Action>[] eachKey = new Consumer[primaryKeys.length];
+        for (int i = 0 ; i < primaryKeys.length ; ++i)
+            eachKey[i] = factory.apply(cluster, keyspace, primaryKeys[i]);
+
+        return action -> {
+            for (Consumer<Action> run : eachKey)
+                run.accept(action);
+        };
+    }
+
+    public static Consumer<Action> debugPaxos(Cluster cluster, String keyspace, int primaryKey)
+    {
+        return ignore -> {
+            for (int node = 1 ; node <= cluster.size() ; ++node)
+            {
+                cluster.get(node).unsafeAcceptOnThisThread((num, pkint) -> {
+                    try
+                    {
+                        TableMetadata metadata = Keyspace.open(keyspace).getColumnFamilyStore("tbl").metadata.get();
+                        ByteBuffer pkbb = Int32Type.instance.decompose(pkint);
+                        DecoratedKey key = new BufferDecoratedKey(DatabaseDescriptor.getPartitioner().getToken(pkbb), pkbb);
+                        logger.warn("node{}({}): {}", num, primaryKey, paxosDebugInfo(key, metadata, FBUtilities.nowInSeconds()));
+                    }
+                    catch (Throwable t)
+                    {
+                        logger.warn("node{}({})", num, primaryKey, t);
+                    }
+                }, node, primaryKey);
+            }
+        };
+    }
+
+    public static Consumer<Action> debugRf(Cluster cluster, String keyspace)
+    {
+        return ignore -> {
+            cluster.forEach(i -> i.unsafeRunOnThisThread(() -> {
+                logger.warn("{} {}",
+                        Schema.instance.getKeyspaceMetadata(keyspace) == null ? "" : Schema.instance.getKeyspaceMetadata(keyspace).params.replication.toString(),
+                        Schema.instance.getKeyspaceMetadata(keyspace) == null ? "" : Keyspace.open(keyspace).getReplicationStrategy().configOptions.toString());
+            }));
+        };
+    }
+
+    public static Consumer<Action> debugOwnership(Cluster cluster, String keyspace, int primaryKey)
+    {
+        return ignore -> {
+            for (int node = 1 ; node <= cluster.size() ; ++node)
+            {
+                logger.warn("node{}({}): {}", node, primaryKey, cluster.get(node).unsafeApplyOnThisThread(v -> {
+                    try
+                    {
+                        return ReplicaLayout.forTokenWriteLiveAndDown(Keyspace.open(keyspace), Murmur3Partitioner.instance.getToken(Int32Type.instance.decompose(v))).all().endpointList().toString();
+                    }
+                    catch (Throwable t)
+                    {
+                        return "Error";
+                    }
+                }, primaryKey));
+            }
+        };
+    }
+
+    public static Consumer<Action> debugRing(Cluster cluster, String keyspace)
+    {
+        return ignore -> cluster.forEach(i -> i.unsafeRunOnThisThread(() -> {
+            if (Schema.instance.getKeyspaceMetadata(keyspace) != null)
+                logger.warn("{}", StorageService.instance.getTokenMetadata().toString());
+        }));
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/FutureActionScheduler.java b/test/simulator/main/org/apache/cassandra/simulator/FutureActionScheduler.java
new file mode 100644
index 0000000..67d601d
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/FutureActionScheduler.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+/**
+ * Makes decisions about when in the simulated scheduled, in terms of the global simulated nanoTime,
+ * events should occur.
+ */
+public interface FutureActionScheduler
+{
+    enum Deliver { DELIVER, TIMEOUT, DELIVER_AND_TIMEOUT, FAILURE }
+
+    /**
+     * Make a decision about the result of some attempt to deliver a message.
+     * Note that this includes responses, so for any given message the chance
+     * of a successful reply depends on two of these calls succeeding.
+     */
+    Deliver shouldDeliver(int from, int to);
+
+    /**
+     * The simulated global nanoTime arrival of a message
+     */
+    long messageDeadlineNanos(int from, int to);
+
+    /**
+     * The simulated global nanoTime at which a timeout should be reported for a message
+     * with {@code expiresAfterNanos} timeout
+     */
+    long messageTimeoutNanos(long expiresAfterNanos, long expirationIntervalNanos);
+
+    /**
+     * The simulated global nanoTime at which a failure should be reported for a message
+     */
+    long messageFailureNanos(int from, int to);
+
+    /**
+     * The additional time in nanos that should elapse for some thread signal event to occur
+     * to simulate scheduler latency
+     */
+    long schedulerDelayNanos();
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/OrderOn.java b/test/simulator/main/org/apache/cassandra/simulator/OrderOn.java
new file mode 100644
index 0000000..382bd8c
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/OrderOn.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A token representing some ordering property of the system.
+ * Most notably this is used to implement executor services where a number of tasks may be run concurrently,
+ * particularly single threaded executors where causality is a strict requirement of correctness.
+ *
+ * This is also used to denote "strict" ordering on any suitably annotated {@link Action}.
+ *
+ * For convenience, efficiency and simplicity we have OrderOn represent a singleton collection of OrderOns.
+ */
+@Shared(scope = SIMULATION)
+public interface OrderOn extends OrderOns
+{
+    /**
+     * The number of {@link Action} in the provided sequence that may be executed at once,
+     * before any ordering takes effect.
+     */
+    int concurrency();
+
+    /**
+     * If true then all child actions (and their children, etc) must be ordered together, i.e. the next
+     * {@link Action} ordered by this sequence may not run until the present {@link Action} and all actions
+     * started by it, directly or indirectly, have completed.
+     */
+    default boolean isStrict() { return false; }
+
+    /**
+     * Whether the ordering is imposed immediately, occupying a slot in the sequence prior to any Action being scheduled
+     * (e.g. in the case of {@code executor.execute()}), or if it applies only after the scheduled time elapsed
+     * (e.g. in the case of {@code executor.schedule()}).
+     *
+     * This may be modified and still refer to the same {@code OrderOn} as another {@code OrderOn} by overriding
+     * the {@code unwrap()} method
+     */
+    default boolean appliesBeforeScheduling() { return true; }
+
+    /**
+     * {@code this} may be a thin wrapper around another {@code OrderOn} with a different {@code appliesBeforeScheduling()}.
+     * In this case this method returns the underlying {@code OrderOn} to impose the order upon.
+     */
+    default OrderOn unwrap() { return this; }
+
+    /**
+     * A convenience method to indicate if this {@code OrderOn} imposes any ordering
+     */
+    @Override
+    default boolean isOrdered() { return concurrency() < Integer.MAX_VALUE; }
+
+    @Override
+    default OrderOns with(OrderOn add)
+    {
+        return new TwoOrderOns(this, add);
+    }
+
+    @Override
+    default int size()
+    {
+        return 1;
+    }
+
+    @Override
+    default OrderOn get(int i)
+    {
+        Preconditions.checkArgument(i == 0);
+        return this;
+    }
+
+    abstract class OrderOnId implements OrderOn
+    {
+        public final Object id;
+
+        public OrderOnId(Object id)
+        {
+            this.id = id;
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return id.hashCode();
+        }
+
+        @Override
+        public boolean equals(Object that)
+        {
+            return that instanceof OrderOnId && id.equals(((OrderOnId) that).id);
+        }
+
+        public String toString()
+        {
+            return id.toString();
+        }
+    }
+
+    public class Sequential extends OrderOnId
+    {
+        public Sequential(Object id)
+        {
+            super(id);
+        }
+
+        public int concurrency() { return 1; }
+    }
+
+    public class StrictSequential extends Sequential
+    {
+        public StrictSequential(Object id)
+        {
+            super(id);
+        }
+
+        @Override
+        public boolean isStrict()
+        {
+            return true;
+        }
+    }
+
+    public class Strict extends Sequential
+    {
+        final int concurrency;
+
+        public Strict(Object id, int concurrency)
+        {
+            super(id);
+            this.concurrency = concurrency;
+        }
+
+        @Override
+        public int concurrency()
+        {
+            return concurrency;
+        }
+
+        @Override
+        public boolean isStrict()
+        {
+            return true;
+        }
+    }
+
+    public class OrderAppliesAfterScheduling implements OrderOn
+    {
+        final OrderOn inner;
+
+        public OrderAppliesAfterScheduling(OrderOn inner) { this.inner = inner; }
+        @Override public int concurrency() { return inner.concurrency(); }
+        @Override public boolean isStrict() { return inner.isStrict(); }
+        @Override public boolean isOrdered() { return inner.isOrdered(); }
+
+        @Override public boolean appliesBeforeScheduling() { return false; }
+        @Override public OrderOn unwrap() { return inner; }
+        @Override public String toString() { return inner.toString(); }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/OrderOns.java b/test/simulator/main/org/apache/cassandra/simulator/OrderOns.java
new file mode 100644
index 0000000..d4a07c1
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/OrderOns.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.ArrayList;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A (possibly empty) collection of OrderOn
+ */
+@Shared(scope = SIMULATION)
+public interface OrderOns
+{
+    /**
+     * Equivalent to !isEmpty()
+     */
+    boolean isOrdered();
+
+    /**
+     * Equivalent to anyMatch(OrderOn::isOrdered)
+     */
+    boolean isStrict();
+
+    /**
+     * Return an {@code OrderOns} (possibly this one) also containing {@code add}
+     */
+    OrderOns with(OrderOn add);
+
+    /**
+     * The number of {@link OrderOn} contained in this collection
+     */
+    int size();
+
+    /**
+     * The i'th {@link OrderOn} contained in this collection
+     */
+    OrderOn get(int i);
+
+    public static final OrderOn NONE = new OrderOn()
+    {
+        @Override
+        public OrderOns with(OrderOn add)
+        {
+            return add;
+        }
+
+        @Override
+        public int size()
+        {
+            return 0;
+        }
+
+        @Override
+        public OrderOn get(int i)
+        {
+            throw new IndexOutOfBoundsException();
+        }
+
+        @Override
+        public int concurrency()
+        {
+            return Integer.MAX_VALUE;
+        }
+
+        @Override
+        public String toString()
+        {
+            return "Unordered";
+        }
+    };
+
+    public class TwoOrderOns implements OrderOns
+    {
+        final OrderOn one;
+        final OrderOn two;
+
+        public TwoOrderOns(OrderOn one, OrderOn two)
+        {
+            this.one = one;
+            this.two = two;
+        }
+
+        @Override
+        public boolean isOrdered()
+        {
+            return true;
+        }
+
+        @Override
+        public boolean isStrict()
+        {
+            return one.isStrict() || two.isStrict();
+        }
+
+        @Override
+        public OrderOns with(OrderOn three)
+        {
+            OrderOnsList result = new OrderOnsList();
+            result.add(one);
+            result.add(two);
+            result.add(three);
+            return result;
+        }
+
+        @Override
+        public int size()
+        {
+            return 2;
+        }
+
+        @Override
+        public OrderOn get(int i)
+        {
+            Preconditions.checkArgument((i & 1) == i);
+            return i == 0 ? one : two;
+        }
+    }
+
+    public class OrderOnsList extends ArrayList<OrderOn> implements OrderOns
+    {
+        @Override
+        public boolean isOrdered()
+        {
+            return true;
+        }
+
+        @Override
+        public boolean isStrict()
+        {
+            for (int i = 0 ; i < size() ; ++i)
+            {
+                if (get(i).isStrict())
+                    return true;
+            }
+            return false;
+        }
+
+        public OrderOns with(OrderOn add)
+        {
+            add(add);
+            return this;
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/Ordered.java b/test/simulator/main/org/apache/cassandra/simulator/Ordered.java
new file mode 100644
index 0000000..3e57c95
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/Ordered.java
@@ -0,0 +1,330 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.function.Function;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.simulator.utils.CountingCollection;
+import org.apache.cassandra.simulator.utils.IntrusiveLinkedList;
+import org.apache.cassandra.simulator.utils.IntrusiveLinkedListNode;
+
+import static java.util.Collections.newSetFromMap;
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_SIMULATOR_DEBUG;
+
+/**
+ * Represents an action that may not run before certain other actions
+ * have been executed, excluding child tasks that are not continuations
+ * (i.e. required threads/tasks to terminate their execution, but not
+ * any other child or transitive child actions)
+ */
+class Ordered extends OrderedLink implements ActionListener
+{
+    static final boolean DEBUG = TEST_SIMULATOR_DEBUG.getBoolean();
+
+    /**
+     * A sequence is used to model STRICT execution order imposed on certain actions that are not able
+     * to reliably complete if their actions are re-ordered, and to implement thread executor order,
+     * both for sequential executors and for ensuring executors with a given concurrency level do not
+     * exceed that concurrency level.
+     */
+    static class Sequence
+    {
+        final OrderOn on;
+        final int concurrency;
+        /** The tasks we are currently permitting to run (but may not be running due to membership of other sequences) */
+        final Collection<Ordered> maybeRunning;
+        /** The tasks we have pending */
+        final IntrusiveLinkedList<OrderedLink> next = new IntrusiveLinkedList<>();
+
+        Sequence(OrderOn on)
+        {
+            this.on = on;
+            this.concurrency = on.concurrency();
+            this.maybeRunning = concurrency == 1
+                                ? new ArrayList<>(1)
+                                : new LinkedHashSet<>();
+        }
+
+        <O extends Ordered> void add(O add, Function<O, List<Sequence>> memberOf)
+        {
+            memberOf.apply(add).add(this);
+            if (maybeRunning.size() < concurrency)
+            {
+                maybeRunning.add(add);
+            }
+            else
+            {
+                if (add.isFree())
+                {
+                    next.add(add);
+                }
+                else
+                {
+                    Preconditions.checkState(add.additionalLink == null);
+                    add.additionalLink = new AdditionalOrderedLink(add);
+                    next.add(add.additionalLink);
+                }
+
+                add.predecessors.add(this); // we don't submit, as we may yet be added to other sequences that prohibit our execution
+            }
+        }
+
+        /**
+         * Mark a task complete, and maybe schedule another from {@link #next}
+         */
+        void complete(Ordered completed, ActionSchedule schedule)
+        {
+            if (!maybeRunning.remove(completed))
+                throw new IllegalStateException();
+
+            complete(schedule);
+        }
+
+        void invalidate(Ordered completed, ActionSchedule schedule)
+        {
+            if (maybeRunning.remove(completed))
+                complete(schedule);
+        }
+
+        void invalidatePending()
+        {
+            if (next.isEmpty())
+                return;
+
+            List<Ordered> invalidate = new ArrayList<>();
+            for (OrderedLink link = next.poll() ; link != null ; link = next.poll())
+                invalidate.add(link.ordered());
+            invalidate.forEach(Ordered::invalidate);
+        }
+
+        void complete(ActionSchedule schedule)
+        {
+            if (next.isEmpty() && maybeRunning.isEmpty())
+            {
+                schedule.sequences.remove(on);
+            }
+            else
+            {
+                OrderedLink nextLink = this.next.poll();
+                if (nextLink != null)
+                {
+                    Ordered next = nextLink.ordered();
+                    if (!next.predecessors.remove(this))
+                        throw new IllegalStateException();
+                    maybeRunning.add(next);
+                    next.maybeAdvance();
+                }
+            }
+        }
+
+        public String toString()
+        {
+            return on.toString();
+        }
+    }
+
+    /**
+     * Represents an action that may not run before all child actions
+     * have been executed, transitively (i.e. child of child, ad infinitum).
+     */
+    static class StrictlyOrdered extends Ordered implements ActionListener
+    {
+        /** The sequences we participate in, in a strict fashion */
+        final List<Sequence> strictMemberOf = new ArrayList<>(1);
+        boolean isCompleteStrict;
+
+        StrictlyOrdered(Action action, ActionSchedule schedule)
+        {
+            super(action, schedule);
+        }
+
+        @Override
+        public void transitivelyAfter(Action finished)
+        {
+            assert !isCompleteStrict;
+            isCompleteStrict = true;
+            strictMemberOf.forEach(m -> m.complete(this, schedule));
+        }
+
+        @Override
+        void invalidate(boolean isCancellation)
+        {
+            super.invalidate(isCancellation);
+            strictMemberOf.forEach(m -> m.invalidate(this, schedule));
+        }
+
+        @Override
+        void joinNow(OrderOn orderOn)
+        {
+            schedule.sequences.computeIfAbsent(orderOn.unwrap(), Sequence::new)
+                              .add(this, orderOn.isStrict() ? o -> o.strictMemberOf : o -> o.memberOf);
+        }
+    }
+
+    final ActionSchedule schedule;
+    /** Those sequences that contain tasks that must complete before we can execute */
+    final Collection<Sequence> predecessors = !DEBUG ? new CountingCollection<>() : newSetFromMap(new IdentityHashMap<>());
+
+    /** The sequences we participate in, in a non-strict fashion */
+    final List<Sequence> memberOf = new ArrayList<>(1);
+    /** The underlying action waiting to execute */
+    final Action action;
+    /** State tracking to assert correct behaviour */
+    boolean isStarted, isComplete;
+    List<OrderOn> joinPostScheduling;
+    OrderedLink additionalLink;
+
+    Ordered(Action action, ActionSchedule schedule)
+    {
+        this.schedule = schedule;
+        this.action = action;
+        action.register(this);
+    }
+
+    public String toString()
+    {
+        return action.toString();
+    }
+
+    public void before(Action performed, Before before)
+    {
+        switch (before)
+        {
+            default: throw new AssertionError();
+            case INVALIDATE: // will be handled by invalidate()
+                return;
+            case DROP:
+            case EXECUTE:
+                assert performed == action;
+                assert !isStarted;
+                isStarted = true;
+        }
+    }
+
+    void join(OrderOn orderOn)
+    {
+        if (!orderOn.isOrdered())
+            return;
+
+        if (orderOn.appliesBeforeScheduling()) joinNow(orderOn);
+        else joinPostScheduling(orderOn);
+    }
+
+    void joinNow(OrderOn orderOn)
+    {
+        schedule.sequences.computeIfAbsent(orderOn.unwrap(), Sequence::new)
+                          .add(this, o -> o.memberOf);
+    }
+
+    void joinPostScheduling(OrderOn orderOn)
+    {
+        if (joinPostScheduling == null)
+        {
+            joinPostScheduling = Collections.singletonList(orderOn);
+        }
+        else
+        {
+            if (joinPostScheduling.size() == 1)
+            {
+                List<OrderOn> tmp = new ArrayList<>(2);
+                tmp.addAll(joinPostScheduling);
+                joinPostScheduling = tmp;
+            }
+            joinPostScheduling.add(orderOn);
+        }
+    }
+
+    boolean waitPreScheduled()
+    {
+        return !predecessors.isEmpty();
+    }
+
+    boolean waitPostScheduled()
+    {
+        Preconditions.checkState(predecessors.isEmpty());
+        if (joinPostScheduling == null)
+            return false;
+        joinPostScheduling.forEach(this::joinNow);
+        joinPostScheduling = null;
+        return !predecessors.isEmpty();
+    }
+
+    void invalidate()
+    {
+        invalidate(false);
+    }
+
+    void invalidate(boolean isCancellation)
+    {
+        Preconditions.checkState(!isCancellation || !isStarted);
+        isStarted = isComplete = true;
+        action.deregister(this);
+        remove();
+        if (additionalLink != null)
+        {
+            additionalLink.remove();
+            additionalLink = null;
+        }
+        memberOf.forEach(m -> m.invalidate(this, schedule));
+    }
+
+    void maybeAdvance()
+    {
+        if (predecessors.isEmpty())
+            schedule.advance(action);
+    }
+
+    @Override
+    public void after(Action performed)
+    {
+        assert isStarted;
+        assert !isComplete;
+        isComplete = true;
+        memberOf.forEach(m -> m.complete(this, schedule));
+    }
+
+    @Override
+    Ordered ordered()
+    {
+        return this;
+    }
+}
+
+abstract class OrderedLink extends IntrusiveLinkedListNode
+{
+    abstract Ordered ordered();
+    public void remove() { super.remove(); }
+    public boolean isFree() { return super.isFree(); }
+}
+
+class AdditionalOrderedLink extends OrderedLink
+{
+    final Ordered ordered;
+
+    AdditionalOrderedLink(Ordered ordered) { this.ordered = ordered; }
+    Ordered ordered() { return ordered; }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/OrderedOn.java b/test/simulator/main/org/apache/cassandra/simulator/OrderedOn.java
new file mode 100644
index 0000000..b8dc81f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/OrderedOn.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface OrderedOn
+{
+    OrderOn on();
+    default boolean appliesBeforeScheduling() { return true; }
+    default boolean isOrdered() { return on().isOrdered(); }
+    default boolean isStrict() { return on().isStrict(); }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/RandomSource.java b/test/simulator/main/org/apache/cassandra/simulator/RandomSource.java
new file mode 100644
index 0000000..14d7ad9
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/RandomSource.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Random;
+import java.util.function.IntSupplier;
+import java.util.function.LongSupplier;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface RandomSource
+{
+    public static class Choices<T>
+    {
+        final float[] cumulativeProbabilities;
+        public final T[] options;
+
+        private Choices(float[] cumulativeProbabilities, T[] options)
+        {
+            this.cumulativeProbabilities = cumulativeProbabilities;
+            this.options = options;
+        }
+
+        public T choose(RandomSource random)
+        {
+            if (options.length == 0)
+                return null;
+
+            float choose = random.uniformFloat();
+            int i = Arrays.binarySearch(cumulativeProbabilities, choose);
+
+            if (i < 0) i = -1 - i;
+            return options[i];
+        }
+
+        public Choices<T> without(T option)
+        {
+            for (int i = 0 ; i < options.length ; ++i)
+            {
+                if (option.equals(options[i]))
+                {
+                    float[] prob = new float[cumulativeProbabilities.length - 1];
+                    T[] opts = (T[]) Array.newInstance(options.getClass().getComponentType(), options.length - 1);
+                    System.arraycopy(cumulativeProbabilities, 0, prob, 0, i);
+                    System.arraycopy(cumulativeProbabilities, i + 1, prob, i, this.options.length - (i + 1));
+                    System.arraycopy(options, 0, opts, 0, i);
+                    System.arraycopy(options, i + 1, opts, i, options.length - (i + 1));
+                    for (int j = prob.length - 1 ; j > 1 ; --j)
+                        prob[j] -= prob[j - 1];
+                    return build(prob, opts);
+                }
+            }
+            return this;
+        }
+
+        private static float[] randomCumulativeProbabilities(RandomSource random, int count)
+        {
+            float[] nonCumulativeProbabilities = new float[count];
+            for (int i = 0 ; i < count ; ++i)
+                nonCumulativeProbabilities[i] = random.uniformFloat();
+            return cumulativeProbabilities(nonCumulativeProbabilities);
+        }
+
+        private static float[] cumulativeProbabilities(float[] nonCumulativeProbabilities)
+        {
+            int count = nonCumulativeProbabilities.length;
+            if (count == 0)
+                return new float[0];
+
+            float[] result = new float[nonCumulativeProbabilities.length];
+            float sum = 0;
+            for (int i = 0 ; i < count ; ++i)
+                result[i] = sum += nonCumulativeProbabilities[i];
+            result[result.length - 1] = 1.0f;
+            for (int i = 0 ; i < count - 1 ; ++i)
+                result[i] = result[i] /= sum;
+            return result;
+        }
+
+        public static <T> Choices<T> random(RandomSource random, T[] options)
+        {
+            return new Choices<>(randomCumulativeProbabilities(random, options.length), options);
+        }
+
+        public static <T> Choices<T> random(RandomSource random, T[] options, Map<T, float[]> bounds)
+        {
+            float[] nonCumulativeProbabilities = new float[options.length];
+            for (int i = 0 ; i < options.length ; ++i)
+            {
+                float[] minmax = bounds.get(options[i]);
+                float uniform = random.uniformFloat();
+                nonCumulativeProbabilities[i] = minmax == null ? uniform : minmax[0] + (uniform * (minmax[1] - minmax[0]));
+            }
+            return new Choices<>(cumulativeProbabilities(nonCumulativeProbabilities), options);
+        }
+
+        public static <T> Choices<T> build(float[] nonCumulativeProbabilities, T[] options)
+        {
+            if (nonCumulativeProbabilities.length != options.length)
+                throw new IllegalArgumentException();
+            return new Choices<>(cumulativeProbabilities(nonCumulativeProbabilities), options);
+        }
+
+        public static <T> Choices<T> uniform(T ... options)
+        {
+            float[] nonCumulativeProbabilities = new float[options.length];
+            Arrays.fill(nonCumulativeProbabilities, 1f / options.length);
+            return new Choices<>(cumulativeProbabilities(nonCumulativeProbabilities), options);
+        }
+    }
+
+    public static abstract class Abstract implements RandomSource
+    {
+        public abstract float uniformFloat();
+        public abstract int uniform(int min, int max);
+        public abstract long uniform(long min, long max);
+
+        public LongSupplier uniqueUniformSupplier(long min, long max)
+        {
+            return uniqueUniformStream(min, max).iterator()::nextLong;
+        }
+
+        public LongStream uniqueUniformStream(long min, long max)
+        {
+            return uniformStream(min, max).distinct();
+        }
+
+        public LongStream uniformStream(long min, long max)
+        {
+            return LongStream.generate(() -> uniform(min, max));
+        }
+
+        public LongSupplier uniformSupplier(long min, long max)
+        {
+            return () -> uniform(min, max);
+        }
+
+        public IntSupplier uniqueUniformSupplier(int min, int max)
+        {
+            return uniqueUniformStream(min, max).iterator()::nextInt;
+        }
+
+        public IntStream uniqueUniformStream(int min, int max)
+        {
+            return uniformStream(min, max).distinct();
+        }
+
+        public IntStream uniformStream(int min, int max)
+        {
+            return IntStream.generate(() -> uniform(min, max));
+        }
+
+        public boolean decide(float chance)
+        {
+            return uniformFloat() < chance;
+        }
+
+        public int log2uniform(int min, int max)
+        {
+            return (int) log2uniform((long) min, max);
+        }
+
+        public long log2uniform(long min, long max)
+        {
+            return qlog2uniform(min, max, 64);
+        }
+
+        public long qlog2uniform(long min, long max, int quantizations)
+        {
+            return min + log2uniform(max - min, quantizations);
+        }
+
+        private long log2uniform(long max, int quantizations)
+        {
+            int maxBits = 64 - Long.numberOfLeadingZeros(max - 1);
+            if (maxBits == 0)
+                return 0;
+
+            long min;
+            if (maxBits <= quantizations)
+            {
+                int bits = uniform(0, maxBits);
+                min = 1L << (bits - 1);
+                max = Math.min(max, min * 2);
+            }
+            else
+            {
+                int bitsPerRange = (maxBits / quantizations);
+                int i = uniform(0, quantizations);
+                min = 1L << (i * bitsPerRange);
+                max = Math.min(max, 1L << ((i + 1) * bitsPerRange));
+            }
+
+            return uniform(min, max);
+        }
+
+        public float qlog2uniformFloat(int quantizations)
+        {
+            return qlog2uniform(0, 1 << 24, quantizations) / (float)(1 << 24);
+        }
+    }
+
+    public static class Default extends Abstract
+    {
+        private final Random random = new Random(0);
+
+        public float uniformFloat() { return random.nextFloat(); }
+
+        @Override
+        public double uniformDouble()
+        {
+            return random.nextDouble();
+        }
+
+        public int uniform(int min, int max)
+        {
+            int delta = max - min;
+            if (delta > 1) return min + random.nextInt(max - min);
+            if (delta == 1) return min;
+            if (min >= max)
+                throw new IllegalArgumentException(String.format("Min (%s) should be less than max (%d).", min, max));
+            return (int)uniform(min, (long)max);
+        }
+
+        public long uniform(long min, long max)
+        {
+            if (min >= max) throw new IllegalArgumentException();
+
+            long delta = max - min;
+            if (delta == 1) return min;
+            if (delta == Long.MIN_VALUE && max == Long.MAX_VALUE) return random.nextLong();
+            if (delta < 0) return random.longs(min, max).iterator().nextLong();
+            if (delta <= Integer.MAX_VALUE) return min + uniform(0, (int) delta);
+
+            long result = min + 1 == max ? min : min + ((random.nextLong() & 0x7fffffff) % (max - min));
+            assert result >= min && result < max;
+            return result;
+        }
+
+        public void reset(long seed)
+        {
+            random.setSeed(seed);
+        }
+
+        public long reset()
+        {
+            long seed = random.nextLong();
+            reset(seed);
+            return seed;
+        }
+    }
+
+    IntStream uniqueUniformStream(int min, int max);
+
+    LongSupplier uniqueUniformSupplier(long min, long max);
+    LongStream uniqueUniformStream(long min, long max);
+    LongStream uniformStream(long min, long max);
+
+    // [min...max)
+    int uniform(int min, int max);
+    // [min...max)
+    long uniform(long min, long max);
+
+    /**
+     * Select a number in the range [min, max), with a power of two in the range [0, max-min)
+     * selected uniformly and a uniform value less than this power of two added to it
+     */
+    int log2uniform(int min, int max);
+    long log2uniform(long min, long max);
+
+    /**
+     * Select a number in the range [min, max), with the range being split into
+     * {@code quantizations} adjacent powers of two, a range being select from these
+     * with uniform probability, and the value within that range being selected uniformly
+     */
+    long qlog2uniform(long min, long max, int quantizations);
+
+    float uniformFloat();
+
+    /**
+     * Select a number in the range [0, 1), with the range being split into
+     * {@code quantizations} adjacent powers of two; a range being select from these
+     * with uniform probability, and the value within that range being selected uniformly
+     *
+     * This is used to distribute behavioural toggles more extremely between different runs of the simulator.
+     */
+    float qlog2uniformFloat(int quantizations);
+    double uniformDouble();
+
+    // options should be cumulative probability in range [0..1]
+    boolean decide(float chance);
+
+    void reset(long seed);
+    long reset();
+}
+
diff --git a/test/simulator/main/org/apache/cassandra/simulator/RunnableActionScheduler.java b/test/simulator/main/org/apache/cassandra/simulator/RunnableActionScheduler.java
new file mode 100644
index 0000000..95be929
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/RunnableActionScheduler.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.simulator.utils.KindOfSequence;
+
+public abstract class RunnableActionScheduler implements Consumer<Action>
+{
+    public enum Kind { RANDOM_WALK, UNIFORM, SEQUENTIAL }
+
+    public static class Immediate extends RunnableActionScheduler
+    {
+        private final AtomicLong id = new AtomicLong();
+
+        public Immediate() { }
+
+        @Override
+        public double priority()
+        {
+            return id.incrementAndGet();
+        }
+    }
+
+    public static class Sequential extends RunnableActionScheduler
+    {
+        final AtomicInteger next = new AtomicInteger();
+
+        @Override
+        public double priority()
+        {
+            return next.incrementAndGet();
+        }
+
+        public Sequential() { }
+    }
+
+    public abstract static class AbstractRandom extends RunnableActionScheduler
+    {
+        protected final RandomSource random;
+
+        public AbstractRandom(RandomSource random)
+        {
+            this.random = random;
+        }
+    }
+
+    public static class RandomUniform extends AbstractRandom
+    {
+        final double min, range;
+
+        RandomUniform(RandomSource random, double min, double range)
+        {
+            super(random);
+            this.min = min;
+            this.range = range;
+        }
+
+        public RandomUniform(RandomSource random)
+        {
+            this(random, 0d, 1d);
+        }
+
+        @Override
+        public double priority()
+        {
+            return min + random.uniformDouble() * range;
+        }
+    }
+
+    static class RandomWalk extends AbstractRandom
+    {
+        final double maxStepSize;
+        double cur;
+
+        @Override
+        public double priority()
+        {
+            double result = cur;
+            double step = 2 * (random.uniformDouble() - 1f) * maxStepSize;
+            this.cur = step > 0 ? Math.min(1d, cur + step)
+                                : Math.max(0d, cur + step);
+            return result;
+        }
+
+        @Override
+        protected RunnableActionScheduler next()
+        {
+            return new RunnableActionScheduler.RandomWalk(random, cur);
+        }
+
+        RandomWalk(RandomSource random, double cur)
+        {
+            super(random);
+            this.maxStepSize = KindOfSequence.maxStepSize(0f, 1f, random);
+            this.cur = cur;
+        }
+
+        RandomWalk(RandomSource random)
+        {
+            this(random, 0.5d);
+        }
+    }
+
+    public abstract double priority();
+
+    protected RunnableActionScheduler next()
+    {
+        return this;
+    }
+
+    public void attachTo(ActionList actions)
+    {
+        actions.forEach(next());
+    }
+
+    @Override
+    public void accept(Action action)
+    {
+        action.setScheduler(this);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/Simulation.java b/test/simulator/main/org/apache/cassandra/simulator/Simulation.java
new file mode 100644
index 0000000..8fd1532
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/Simulation.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import org.apache.cassandra.utils.CloseableIterator;
+
+public interface Simulation extends AutoCloseable
+{
+    CloseableIterator<?> iterator();
+    void run();
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/SimulationException.java b/test/simulator/main/org/apache/cassandra/simulator/SimulationException.java
new file mode 100644
index 0000000..71ad4e0
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/SimulationException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+public class SimulationException extends RuntimeException
+{
+    public SimulationException(long seed, Throwable t)
+    {
+        super(createMsg(seed, null), t, true, false);
+    }
+
+    public SimulationException(long seed, String msg, Throwable t)
+    {
+        super(createMsg(seed, msg), t, true, false);
+    }
+
+    private static String createMsg(long seed, String msg)
+    {
+        return String.format("Failed on seed 0x%s%s", Long.toHexString(seed), msg == null ? "" : "; " + msg);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/SimulationRunner.java b/test/simulator/main/org/apache/cassandra/simulator/SimulationRunner.java
new file mode 100644
index 0000000..6f1eb12
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/SimulationRunner.java
@@ -0,0 +1,471 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.ToDoubleFunction;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Help;
+import io.airlift.airline.Option;
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.simulator.Debug.Info;
+import org.apache.cassandra.simulator.Debug.Levels;
+import org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange;
+import org.apache.cassandra.simulator.debug.SelfReconcile;
+import org.apache.cassandra.simulator.systems.InterceptedWait;
+import org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture;
+import org.apache.cassandra.simulator.systems.InterceptibleThread;
+import org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods;
+import org.apache.cassandra.simulator.utils.ChanceRange;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.Hex;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static java.util.Arrays.stream;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.BATCH_COMMIT_LOG_SYNC_INTERVAL;
+import static org.apache.cassandra.config.CassandraRelevantProperties.CASSANDRA_JMX_REMOTE_PORT;
+import static org.apache.cassandra.config.CassandraRelevantProperties.CLOCK_GLOBAL;
+import static org.apache.cassandra.config.CassandraRelevantProperties.CLOCK_MONOTONIC_APPROX;
+import static org.apache.cassandra.config.CassandraRelevantProperties.CLOCK_MONOTONIC_PRECISE;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DETERMINISM_CONSISTENT_DIRECTORY_LISTINGS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DETERMINISM_UNSAFE_UUID_NODE;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DISABLE_SSTABLE_ACTIVITY_TRACKING;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DETERMINISM_SSTABLE_COMPRESSION_DEFAULT;
+import static org.apache.cassandra.config.CassandraRelevantProperties.GOSSIPER_SKIP_WAITING_TO_SETTLE;
+import static org.apache.cassandra.config.CassandraRelevantProperties.IGNORE_MISSING_NATIVE_FILE_HINTS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.IS_DISABLED_MBEAN_REGISTRATION;
+import static org.apache.cassandra.config.CassandraRelevantProperties.MEMTABLE_OVERHEAD_SIZE;
+import static org.apache.cassandra.config.CassandraRelevantProperties.MIGRATION_DELAY;
+import static org.apache.cassandra.config.CassandraRelevantProperties.PAXOS_REPAIR_RETRY_TIMEOUT_IN_MS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.RING_DELAY;
+import static org.apache.cassandra.config.CassandraRelevantProperties.SHUTDOWN_ANNOUNCE_DELAY_IN_MS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.SYSTEM_AUTH_DEFAULT_RF;
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_IGNORE_SIGAR;
+import static org.apache.cassandra.config.CassandraRelevantProperties.DISABLE_GOSSIP_ENDPOINT_REMOVAL;
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL;
+import static org.apache.cassandra.simulator.debug.Reconcile.reconcileWith;
+import static org.apache.cassandra.simulator.debug.Record.record;
+import static org.apache.cassandra.simulator.debug.SelfReconcile.reconcileWithSelf;
+import static org.apache.cassandra.simulator.utils.IntRange.parseRange;
+import static org.apache.cassandra.simulator.utils.LongRange.parseNanosRange;
+
+@SuppressWarnings({ "ZeroLengthArrayAllocation", "CodeBlock2Expr", "SameParameterValue", "DynamicRegexReplaceableByCompiledPattern", "CallToSystemGC" })
+public class SimulationRunner
+{
+    private static final Logger logger = LoggerFactory.getLogger(SimulationRunner.class);
+
+    public enum RecordOption { NONE, VALUE, WITH_CALLSITES }
+
+    @BeforeClass
+    public static void beforeAll()
+    {
+        // setup system properties for our instances to behave correctly and consistently/deterministically
+
+        // Disallow time on the bootstrap classloader
+        for (CassandraRelevantProperties property : Arrays.asList(CLOCK_GLOBAL, CLOCK_MONOTONIC_APPROX, CLOCK_MONOTONIC_PRECISE))
+            property.setString("org.apache.cassandra.simulator.systems.SimulatedTime$Delegating");
+        try { Clock.Global.nanoTime(); } catch (IllegalStateException e) {} // make sure static initializer gets called
+
+        // TODO (cleanup): disable unnecessary things like compaction logger threads etc
+        System.setProperty("cassandra.libjemalloc", "-");
+        System.setProperty("cassandra.dtest.api.log.topology", "false");
+
+        // this property is used to allow non-members of the ring to exist in gossip without breaking RF changes
+        // it would be nice not to rely on this, but hopefully we'll have consistent range movements before it matters
+        System.setProperty("cassandra.allow_alter_rf_during_range_movement", "true");
+
+        for (CassandraRelevantProperties property : Arrays.asList(CLOCK_GLOBAL, CLOCK_MONOTONIC_APPROX, CLOCK_MONOTONIC_PRECISE))
+            property.setString("org.apache.cassandra.simulator.systems.SimulatedTime$Global");
+
+        CASSANDRA_JMX_REMOTE_PORT.setString("");
+        RING_DELAY.setInt(0);
+        PAXOS_REPAIR_RETRY_TIMEOUT_IN_MS.setLong(NANOSECONDS.toMillis(Long.MAX_VALUE));
+        SHUTDOWN_ANNOUNCE_DELAY_IN_MS.setInt(0);
+        DETERMINISM_UNSAFE_UUID_NODE.setBoolean(true);
+        GOSSIPER_SKIP_WAITING_TO_SETTLE.setInt(0);
+        BATCH_COMMIT_LOG_SYNC_INTERVAL.setInt(-1);
+        DISABLE_SSTABLE_ACTIVITY_TRACKING.setBoolean(false);
+        DETERMINISM_SSTABLE_COMPRESSION_DEFAULT.setBoolean(false); // compression causes variation in file size for e.g. UUIDs, IP addresses, random file paths
+        DETERMINISM_CONSISTENT_DIRECTORY_LISTINGS.setBoolean(true);
+        TEST_IGNORE_SIGAR.setBoolean(true);
+        SYSTEM_AUTH_DEFAULT_RF.setInt(3);
+        MIGRATION_DELAY.setInt(Integer.MAX_VALUE);
+        DISABLE_GOSSIP_ENDPOINT_REMOVAL.setBoolean(true);
+        MEMTABLE_OVERHEAD_SIZE.setInt(100);
+        IGNORE_MISSING_NATIVE_FILE_HINTS.setBoolean(true);
+        IS_DISABLED_MBEAN_REGISTRATION.setBoolean(true);
+        TEST_JVM_DTEST_DISABLE_SSL.setBoolean(true); // to support easily running without netty from dtest-jar
+
+        if (Thread.currentThread() instanceof InterceptibleThread); // load InterceptibleThread class to avoid infinite loop in InterceptorOfGlobalMethods
+        new InterceptedWait.CaptureSites(Thread.currentThread())
+        .toString(ste -> !ste.getClassName().equals(SelfReconcile.class.getName())); // ensure self reconcile verify can work without infinite looping
+        InterceptorOfGlobalMethods.Global.unsafeReset();
+        ThreadLocalRandom.current();
+    }
+
+    protected interface ICommand<B extends ClusterSimulation.Builder<?>>
+    {
+        public void run(B builder) throws IOException;
+    }
+
+    protected abstract static class BasicCommand<B extends ClusterSimulation.Builder<?>> implements ICommand<B>
+    {
+        @Option(name = { "--seed" } , title = "0x", description = "Specify the first seed to test (each simulation will increment the seed by 1)")
+        protected String seed;
+
+        @Option(name = { "--simulations"} , title = "int", description = "The number of simulations to run")
+        protected int simulationCount = 1;
+
+        @Option(name = { "-t", "--threads" }, title = "int", description = "The number of threads to split between node thread pools. Each ongoing action also requires its own thread.")
+        protected int threadCount = 1000;
+
+        @Option(name = { "-n", "--nodes" } , title = "int...int", description = "Cluster size range, lb..ub (default 4..16)")
+        protected String nodeCount = "4..16";
+
+        @Option(name = { "--dcs" }, title = "int...int", description = "Cluster DC count, lb..ub (default 1..2)")
+        protected String dcCount = "1..2";
+
+        @Option(name = { "-o", "--within-key-concurrency" }, title = "int..int", description = "Number of simultaneous paxos operations per key, lb..ub (default 2..5)")
+        protected String withinKeyConcurrency = "2..5";
+
+        @Option(name = { "-c", "--concurrency" }, title = "int", description = "Number of keys to operate on simultaneously (default 10)")
+        protected int concurrency = 10;
+        @Option(name = { "-k", "--keys" }, title = "int", description = "Number of unique partition keys to operate over (default to 2* concurrency)")
+        protected int primaryKeyCount = -1;
+        @Option(name = { "--key-seconds" }, title = "int...int", description = "Number of seconds to simulate a partition key for before selecting another (default 5..30)")
+        protected String primaryKeySeconds = "5..30";
+
+        @Option(name = { "--cluster-actions" }, title = "JOIN,LEAVE,REPLACE,CHANGE_RF", description = "Cluster actions to select from, comma delimited (JOIN, LEAVE, REPLACE, CHANGE_RF)")
+        protected String topologyChanges = stream(TopologyChange.values()).map(Object::toString).collect(Collectors.joining(","));
+        @Option(name = { "--cluster-action-interval" }, title = "int...int(s|ms|us|ns)", description = "The period of time between two cluster actions (default 5..15s)")
+        protected String topologyChangeInterval = "5..15s";
+        @Option(name = { "--cluster-action-limit" }, title = "int", description = "The maximum number of topology change events to perform (default 0)")
+        protected String topologyChangeLimit = "0";
+
+        @Option(name = { "-s", "--run-time" }, title = "int", description = "Length of simulated time to run in seconds (default -1)")
+        protected int secondsToSimulate = -1;
+
+        @Option(name = { "--reads" }, title = "[distribution:]float...float", description = "Proportion of actions that are reads (default: 0.05..0.95)")
+        protected String readChance;
+        @Option(name = { "--nemesis" }, title = "[distribution:]float...float", description = "Proportion of nemesis points that are intercepted (default: 0..0.01)")
+        protected String nemesisChance;
+
+        @Option(name = { "--priority" }, title = "uniform|randomwalk|seq", description = "Priority assignment for actions that may overlap their execution", allowedValues = { "uniform", "randomwalk", "seq" })
+        protected String priority;
+
+        // TODO (feature): simulate GC pauses
+
+        @Option(name = { "--network-flaky-chance" }, title = "[distribution:]float...float", description = "Chance of some minority of nodes experiencing flaky connections (default: qlog:0.01..0.1)")
+        protected String networkFlakyChance = "qlog:0.01..0.1";
+        @Option(name = { "--network-partition-chance" }, title = "[distribution:]float...float", description = "Chance of some minority of nodes being isolated (default: qlog:0.01..0.1)")
+        protected String networkPartitionChance = "qlog:0.01..0.1";
+        @Option(name = { "--network-reconfigure-interval" }, title = "int...int(s|ms|us|ns)", description = "Period of time for which a flaky or catastrophic network partition may be in force")
+        protected String networkReconfigureInterval = "1..10s";
+        @Option(name = { "--network-drop-chance" }, title = "[distribution:]float...float", description = "Chance of dropping a message under normal circumstances (default: qlog:0..0.001)")
+        protected String networkDropChance = "qlog:0..0.001";
+        // TODO (feature): TDP vs UDP simulation (right now we have no head of line blocking so we deliver in a UDP fashion which is not how the cluster operates)
+        @Option(name = { "--network-delay-chance" }, title = "[distribution:]float...float", description = "Chance of delaying a message under normal circumstances (default: qlog:0..0.1)")
+        protected String networkDelayChance = "qlog:0..0.01";
+        @Option(name = { "--network-latency" }, title = "int...int(s|ms|us|ns)", description = "Range of possible latencies messages can be simulated to experience (default 1..2ms)")
+        protected String networkLatency = "1..2ms";
+        @Option(name = { "--network-delay" }, title = "int...int(s|ms|us|ns)", description = "Range of possible latencies messages can be simulated to experience when delayed (default 2..20ms)")
+        protected String networkDelay = "2..20ms";
+        @Option(name = { "--network-flaky-drop-chance" }, title = "[distribution:]float...float", description = "Chance of dropping a message on a flaky connection (default: qlog:0.01..0.1)")
+        protected String flakyNetworkDropChance = "qlog:0.01..0.1";
+        @Option(name = { "--network-flaky-delay-chance" }, title = "[distribution:]float...float", description = "Chance of delaying a message on a flaky connection (default: qlog:0.01..0.2)")
+        protected String flakyNetworkDelayChance = "qlog:0.01..0.2";
+        @Option(name = { "--network-flaky-latency" }, title = "int...int(s|ms|us|ns)", description = "Range of possible latencies messages can be simulated to experience on a flaky connection (default 2..4ms)")
+        protected String flakyNetworkLatency = "2..4ms";
+        @Option(name = { "--network-flaky-delay" }, title = "int...int(s|ms|us|ns)", description = "Range of possible latencies messages can be simulated to experience when delayed on a flaky connection (default 4..100ms)")
+        protected String flakyNetworkDelay = "4..100ms";
+
+        @Option(name = { "--clock-drift" }, title = "int...int(s|ms|us|ns)", description = "The range of clock skews to experience (default 10..1000ms)")
+        protected String clockDrift = "10..1000ms";
+        @Option(name = { "--clock-discontinuity-interval" }, title = "int...int(s|ms|us|ns)", description = "The period of clock continuity (a discontinuity is a large jump of the global clock to introduce additional chaos for event scheduling) (default 10..60s)")
+        protected String clockDiscontinuityInterval = "10..60s";
+
+        @Option(name = { "--scheduler-jitter" }, title = "int...int(s|ms|us|ns)", description = "The scheduler will randomly prioritise all tasks scheduled to run within this interval (default 10..1500us)")
+        protected String schedulerJitter = "10..1500us";
+        @Option(name = { "--scheduler-delay-chance" }, title = "[distribution:]float...float", description = "Chance of delaying the consequence of an action (default: 0..0.1)")
+        protected String schedulerDelayChance = "qlog:0..0.1";
+        @Option(name = { "--scheduler-delay" }, title = "int...int(s|ms|us|ns)", description = "Range of possible additional latencies thread execution can be simulated to experience when delayed (default 1..10000us)")
+        protected String schedulerDelayMicros = "1..10000us";
+        @Option(name = { "--scheduler-long-delay" }, title = "int...int(s|ms|us|ns)", description = "Range of possible additional latencies thread execution can be simulated to experience when delayed (default 1..10000us)")
+        protected String schedulerLongDelayMicros = "1..10000us";
+
+        @Option(name = { "--log" }, title = "level", description = "<partition> <cluster> level events, between 0 and 2", arity = 2)
+        protected List<Integer> log;
+
+        @Option(name = { "--debug-keys" }, title = "level", description = "Print debug info only for these keys (comma delimited)")
+        protected String debugKeys;
+
+        @Option(name = { "--debug-rf" }, title = "level", description = "Print RF on <partition> <cluster> events; level 0 to 2", arity = 2, allowedValues = { "0", "1", "2" })
+        protected List<Integer> debugRf;
+
+        @Option(name = { "--debug-ownership" }, title = "level", description = "Print ownership on <partition> <cluster> events; level 0 to 2", arity = 2, allowedValues = { "0", "1", "2" })
+        protected List<Integer> debugOwnership;
+
+        @Option(name = { "--debug-ring" }, title = "level", description = "Print ring state on <partition> <cluster> events; level 0 to 2", arity = 2, allowedValues = { "0", "1", "2" })
+        protected List<Integer> debugRing;
+
+        @Option(name = { "--debug-gossip" }, title = "level", description = "Debug gossip at <partition> <cluster> events; level 0 to 2", arity = 2, allowedValues = { "0", "1", "2" })
+        protected List<Integer> debugGossip;
+
+        @Option(name = { "--debug-paxos" }, title = "level", description = "Print paxos state on <partition> <cluster> events; level 0 to 2", arity = 2, allowedValues = { "0", "1", "2" })
+        protected List<Integer> debugPaxos;
+
+        @Option(name = { "--capture" }, title = "wait,wake,now", description = "Capture thread stack traces alongside events, choose from (wait,wake,now)")
+        protected String capture;
+
+        protected void propagate(B builder)
+        {
+            builder.threadCount(threadCount);
+            builder.concurrency(concurrency);
+            if (primaryKeyCount >= 0) builder.primaryKeyCount(primaryKeyCount);
+            else builder.primaryKeyCount(2 * concurrency);
+            builder.secondsToSimulate(secondsToSimulate);
+            parseChanceRange(Optional.ofNullable(networkPartitionChance)).ifPresent(builder::networkPartitionChance);
+            parseChanceRange(Optional.ofNullable(networkFlakyChance)).ifPresent(builder::networkFlakyChance);
+            parseNanosRange(Optional.ofNullable(networkReconfigureInterval)).ifPresent(builder::networkReconfigureInterval);
+            parseChanceRange(Optional.ofNullable(networkDropChance)).ifPresent(builder::networkDropChance);
+            parseChanceRange(Optional.ofNullable(networkDelayChance)).ifPresent(builder::networkDelayChance);
+            parseNanosRange(Optional.ofNullable(networkLatency)).ifPresent(builder::networkLatencyNanos);
+            parseNanosRange(Optional.ofNullable(networkDelay)).ifPresent(builder::networkDelayNanos);
+            parseChanceRange(Optional.ofNullable(flakyNetworkDropChance)).ifPresent(builder::flakyNetworkDropChance);
+            parseChanceRange(Optional.ofNullable(flakyNetworkDelayChance)).ifPresent(builder::flakyNetworkDelayChance);
+            parseNanosRange(Optional.ofNullable(flakyNetworkLatency)).ifPresent(builder::flakyNetworkLatencyNanos);
+            parseNanosRange(Optional.ofNullable(flakyNetworkDelay)).ifPresent(builder::flakyNetworkDelayNanos);
+            parseChanceRange(Optional.ofNullable(schedulerDelayChance)).ifPresent(builder::schedulerDelayChance);
+            parseNanosRange(Optional.ofNullable(clockDrift)).ifPresent(builder::clockDriftNanos);
+            parseNanosRange(Optional.ofNullable(clockDiscontinuityInterval)).ifPresent(builder::clockDiscontinuityIntervalNanos);
+            parseNanosRange(Optional.ofNullable(schedulerJitter)).ifPresent(builder::schedulerJitterNanos);
+            parseNanosRange(Optional.ofNullable(schedulerDelayMicros)).ifPresent(builder::schedulerDelayNanos);
+            parseNanosRange(Optional.ofNullable(schedulerLongDelayMicros)).ifPresent(builder::schedulerLongDelayNanos);
+            parseChanceRange(Optional.ofNullable(readChance)).ifPresent(builder::readChance);
+            parseChanceRange(Optional.ofNullable(nemesisChance)).ifPresent(builder::nemesisChance);
+            parseRange(Optional.ofNullable(nodeCount)).ifPresent(builder::nodes);
+            parseRange(Optional.ofNullable(dcCount)).ifPresent(builder::dcs);
+            parseRange(Optional.ofNullable(primaryKeySeconds)).ifPresent(builder::primaryKeySeconds);
+            parseRange(Optional.ofNullable(withinKeyConcurrency)).ifPresent(builder::withinKeyConcurrency);
+            Optional.ofNullable(topologyChanges).ifPresent(topologyChanges -> {
+                builder.topologyChanges(stream(topologyChanges.split(","))
+                                        .filter(v -> !v.isEmpty())
+                                        .map(v -> TopologyChange.valueOf(v.toUpperCase()))
+                                        .toArray(TopologyChange[]::new));
+            });
+            parseNanosRange(Optional.ofNullable(topologyChangeInterval)).ifPresent(builder::topologyChangeIntervalNanos);
+            builder.topologyChangeLimit(Integer.parseInt(topologyChangeLimit));
+            Optional.ofNullable(priority).ifPresent(kinds -> {
+                builder.scheduler(stream(kinds.split(","))
+                                  .filter(v -> !v.isEmpty())
+                                  .map(v -> RunnableActionScheduler.Kind.valueOf(v.toUpperCase()))
+                                  .toArray(RunnableActionScheduler.Kind[]::new));
+            });
+
+            Optional.ofNullable(this.capture)
+                    .map(s -> s.split(","))
+                    .map(s -> new Capture(
+                        stream(s).anyMatch(s2 -> s2.equalsIgnoreCase("wait")),
+                        stream(s).anyMatch(s2 -> s2.equalsIgnoreCase("wake")),
+                        stream(s).anyMatch(s2 -> s2.equalsIgnoreCase("now"))
+                    ))
+                    .ifPresent(builder::capture);
+
+            EnumMap<Info, Levels> debugLevels = new EnumMap<>(Info.class);
+            Optional.ofNullable(log).ifPresent(list -> debugLevels.put(Info.LOG, new Levels(list.get(0), list.get(1))));
+            Optional.ofNullable(debugRf).ifPresent(list -> debugLevels.put(Info.RF, new Levels(list.get(0), list.get(1))));
+            Optional.ofNullable(debugOwnership).ifPresent(list -> debugLevels.put(Info.OWNERSHIP, new Levels(list.get(0), list.get(1))));
+            Optional.ofNullable(debugRing).ifPresent(list -> debugLevels.put(Info.RING, new Levels(list.get(0), list.get(1))));
+            Optional.ofNullable(debugGossip).ifPresent(list -> debugLevels.put(Info.GOSSIP, new Levels(list.get(0), list.get(1))));
+            Optional.ofNullable(debugPaxos).ifPresent(list -> debugLevels.put(Info.PAXOS, new Levels(list.get(0), list.get(1))));
+            if (!debugLevels.isEmpty())
+            {
+                int[] debugPrimaryKeys = Optional.ofNullable(debugKeys)
+                                                 .map(pks -> stream(pks.split(",")).mapToInt(Integer::parseInt).sorted().toArray())
+                                                 .orElse(new int[0]);
+                builder.debug(debugLevels, debugPrimaryKeys);
+            }
+        }
+
+        public void run(B builder) throws IOException
+        {
+            beforeAll();
+            Thread.setDefaultUncaughtExceptionHandler((th, e) -> {
+                boolean isInterrupt = false;
+                Throwable t = e;
+                while (!isInterrupt && t != null)
+                {
+                    isInterrupt = t instanceof InterruptedException || t instanceof UncheckedInterruptedException;
+                    t = t.getCause();
+                }
+                if (!isInterrupt)
+                    logger.error("Uncaught exception on {}", th, e);
+                if (e instanceof Error)
+                    throw (Error) e;
+            });
+
+            propagate(builder);
+
+            long seed = parseHex(Optional.ofNullable(this.seed)).orElse(new Random(System.nanoTime()).nextLong());
+            for (int i = 0 ; i < simulationCount ; ++i)
+            {
+                cleanup();
+                run(seed, builder);
+                ++seed;
+            }
+        }
+
+        protected abstract void run(long seed, B builder) throws IOException;
+    }
+
+    @Command(name = "run")
+    protected static class Run<B extends ClusterSimulation.Builder<?>> extends BasicCommand<B>
+    {
+        protected void run(long seed, B builder) throws IOException
+        {
+            logger.error("Seed 0x{}", Long.toHexString(seed));
+
+            try (ClusterSimulation<?> cluster = builder.create(seed))
+            {
+                try
+                {
+                    cluster.simulation.run();
+                }
+                catch (Throwable t)
+                {
+                    throw new SimulationException(seed, t);
+                }
+            }
+            catch (Throwable t)
+            {
+                if (t instanceof SimulationException) throw t;
+                throw new SimulationException(seed, "Failure creating the simulation", t);
+            }
+        }
+    }
+
+    @Command(name = "record")
+    protected static class Record<B extends ClusterSimulation.Builder<?>> extends BasicCommand<B>
+    {
+        @Option(name = {"--to"}, description = "Directory of recordings to reconcile with for the seed", required = true)
+        private String dir;
+
+        @Option(name = {"--with-rng"}, title = "0|1", description = "Record RNG values (with or without call sites)", allowedValues = {"0", "1"})
+        private int rng = -1;
+
+        @Option(name = {"--with-time"}, title = "0|1", description = "Record time values (with or without call sites)", allowedValues = {"0", "1"})
+        private int time = -1;
+
+        @Override
+        protected void run(long seed, B builder) throws IOException
+        {
+            record(dir, seed, RecordOption.values()[rng + 1], RecordOption.values()[time + 1], builder);
+        }
+    }
+
+    @Command(name = "reconcile")
+    protected static class Reconcile<B extends ClusterSimulation.Builder<?>> extends BasicCommand<B>
+    {
+        @Option(name = {"--with"}, description = "Directory of recordings to reconcile with for the seed")
+        private String dir;
+
+        @Option(name = {"--with-rng"}, title = "0|1", description = "Reconcile RNG values (if present in source)", allowedValues = {"0", "1"})
+        private int rng = -1;
+
+        @Option(name = {"--with-time"}, title = "0|1", description = "Reconcile time values (if present in source)", allowedValues = {"0", "1"})
+        private int time = -1;
+
+        @Option(name = {"--with-allocations"}, description = "Reconcile memtable allocations (only with --with-self)", arity = 0)
+        private boolean allocations;
+
+        @Option(name = {"--with-self"}, description = "Reconcile with self", arity = 0)
+        private boolean withSelf;
+
+        @Override
+        protected void run(long seed, B builder) throws IOException
+        {
+            RecordOption withRng = RecordOption.values()[rng + 1];
+            RecordOption withTime = RecordOption.values()[time + 1];
+            if (withSelf) reconcileWithSelf(seed, withRng, withTime, allocations, builder);
+            else if (allocations) throw new IllegalArgumentException("--with-allocations is only compatible with --with-self");
+            else reconcileWith(dir, seed, withRng, withTime, builder);
+        }
+    }
+
+    protected static class HelpCommand<B extends ClusterSimulation.Builder<?>> extends Help implements ICommand<B>
+    {
+        @Override
+        public void run(B builder) throws IOException
+        {
+            super.run();
+        }
+    }
+
+
+    private static Optional<Long> parseHex(Optional<String> value)
+    {
+        return value.map(s -> {
+            if (s.startsWith("0x"))
+                return Hex.parseLong(s, 2, s.length());
+            throw new IllegalArgumentException("Invalid hex string: " + s);
+        });
+    }
+
+    private static final Pattern CHANCE_PATTERN = Pattern.compile("(uniform|(?<qlog>qlog(\\((?<quantizations>[0-9]+)\\))?):)?(?<min>0(\\.[0-9]+)?)(..(?<max>0\\.[0-9]+))?", Pattern.CASE_INSENSITIVE);
+    private static Optional<ChanceRange> parseChanceRange(Optional<String> chance)
+    {
+        return chance.map(s -> {
+            ToDoubleFunction<RandomSource> chanceSelector = RandomSource::uniformFloat;
+            Matcher m = CHANCE_PATTERN.matcher(s);
+            if (!m.matches()) throw new IllegalArgumentException("Invalid chance specification: " + s);
+            if (m.group("qlog") != null)
+            {
+                int quantizations = m.group("quantizations") == null ? 4 : Integer.parseInt(m.group("quantizations"));
+                chanceSelector = randomSource -> randomSource.qlog2uniformFloat(quantizations);
+            }
+            float min = Float.parseFloat(m.group("min"));
+            float max = m.group("max") == null ? min : Float.parseFloat(m.group("max"));
+            return new ChanceRange(chanceSelector, min, max);
+        });
+    }
+
+    private static void cleanup()
+    {
+        FastThreadLocal.destroy();
+        for (int i = 0 ; i < 10 ; ++i)
+            System.gc();
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/SimulatorUtils.java b/test/simulator/main/org/apache/cassandra/simulator/SimulatorUtils.java
new file mode 100644
index 0000000..5be3384
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/SimulatorUtils.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.utils.concurrent.Threads;
+
+public class SimulatorUtils
+{
+    public static RuntimeException failWithOOM()
+    {
+        List<long[]> oom = new ArrayList<>();
+        for (int i = 0 ; i < 1024 ; ++i)
+            oom.add(new long[0x7fffffff]);
+        throw new AssertionError();
+    }
+
+    public static void dumpStackTraces(Logger logger)
+    {
+        Map<Thread, StackTraceElement[]> threadMap = Thread.getAllStackTraces();
+        threadMap.forEach((thread, ste) -> {
+            logger.error("{}:\n   {}", thread, Threads.prettyPrint(ste, false, "   ", "\n", ""));
+        });
+        FastThreadLocal.destroy();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/asm/InterceptAsClassTransformer.java b/test/simulator/main/org/apache/cassandra/simulator/asm/InterceptAsClassTransformer.java
new file mode 100644
index 0000000..200f984
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/asm/InterceptAsClassTransformer.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.function.Predicate;
+
+import org.apache.cassandra.distributed.api.IClassTransformer;
+
+// an adapter to IClassTransformer that is loaded by the system classloader
+public class InterceptAsClassTransformer extends InterceptClasses implements IClassTransformer
+{
+    public InterceptAsClassTransformer(ChanceSupplier monitorDelayChance, ChanceSupplier nemesisChance, NemesisFieldKind.Selector nemesisFieldSelector, ClassLoader prewarmClassLoader, Predicate<String> prewarm)
+    {
+        super(monitorDelayChance, nemesisChance, nemesisFieldSelector, prewarmClassLoader, prewarm);
+    }
+
+    public InterceptAsClassTransformer(int api, ChanceSupplier monitorDelayChance, ChanceSupplier nemesisChance, NemesisFieldKind.Selector nemesisFieldSelector, ClassLoader prewarmClassLoader, Predicate<String> prewarm)
+    {
+        super(api, monitorDelayChance, nemesisChance, nemesisFieldSelector, prewarmClassLoader, prewarm);
+    }
+
+    @Override
+    public byte[] transform(String name, byte[] bytecode)
+    {
+        return apply(name, bytecode);
+    }
+
+    @Override
+    public IClassTransformer initialise()
+    {
+        return new SubTransformer()::apply;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/asm/NemesisFieldSelectors.java b/test/simulator/main/org/apache/cassandra/simulator/asm/NemesisFieldSelectors.java
new file mode 100644
index 0000000..e8c420d
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/asm/NemesisFieldSelectors.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.asm;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicLongFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.stream.Stream;
+
+import org.apache.cassandra.utils.Nemesis;
+import org.reflections.Reflections;
+import org.reflections.scanners.FieldAnnotationsScanner;
+import org.reflections.util.ConfigurationBuilder;
+
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.simulator.asm.InterceptClasses.dotsToSlashes;
+import static org.apache.cassandra.simulator.asm.NemesisFieldKind.SIMPLE;
+
+/**
+ * Define classes that receive special handling.
+ * At present all instance methods invoked on such classes have nemesis points inserted either side of them.
+ */
+public class NemesisFieldSelectors
+{
+    public static final Map<String, Map<String, NemesisFieldKind>> classToFieldToNemesis;
+
+    static
+    {
+        Map<Class<?>, NemesisFieldKind> byClass = new HashMap<>();
+        for (NemesisFieldKind type : NemesisFieldKind.values())
+            type.classes.forEach(c -> byClass.put(c, type));
+
+        Stream.of(AtomicIntegerFieldUpdater.class, AtomicLongFieldUpdater.class, AtomicReferenceFieldUpdater.class)
+              .forEach(c -> byClass.put(c, NemesisFieldKind.ATOMICUPDATERX));
+
+        Map<String, Map<String, NemesisFieldKind>> byField = new HashMap<>();
+        new Reflections(ConfigurationBuilder.build("org.apache.cassandra").addScanners(new FieldAnnotationsScanner()))
+        .getFieldsAnnotatedWith(Nemesis.class)
+        .forEach(field -> byField.computeIfAbsent(dotsToSlashes(field.getDeclaringClass()), ignore -> new HashMap<>())
+                                 .put(field.getName(), byClass.getOrDefault(field.getType(), SIMPLE)));
+        classToFieldToNemesis = Collections.unmodifiableMap(byField);
+    }
+
+    public static NemesisFieldKind.Selector get()
+    {
+        return (name, field) -> classToFieldToNemesis.getOrDefault(name, emptyMap()).get(field);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterAction.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterAction.java
new file mode 100644
index 0000000..90289de
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterAction.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
+import org.apache.cassandra.simulator.systems.SimulatedActionTask;
+
+class ClusterAction extends SimulatedActionTask
+{
+    ClusterAction(String description, Modifiers self, Modifiers children, ClusterActions actions, int on, SerializableRunnable invoke)
+    {
+        super(description, self.with(Modifier.DISPLAY_ORIGIN), children, actions, actions.cluster.get(on), invoke);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActionListener.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActionListener.java
new file mode 100644
index 0000000..c8d873f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActionListener.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+public interface ClusterActionListener
+{
+    interface TopologyChangeValidator
+    {
+        public void before(Topology before, int[] participatingKeys);
+        public void after(Topology after);
+    }
+
+    interface RepairValidator
+    {
+        public void before(Topology topology, boolean repairPaxos, boolean repairOnlyPaxos);
+        public void after();
+    }
+
+    TopologyChangeValidator newTopologyChangeValidator(Object id);
+    RepairValidator newRepairValidator(Object id);
+
+    class NoOpListener implements ClusterActionListener
+    {
+
+        @Override
+        public TopologyChangeValidator newTopologyChangeValidator(Object id)
+        {
+            return new TopologyChangeValidator()
+            {
+                @Override
+                public void before(Topology before, int[] participatingKeys)
+                {
+                }
+
+                @Override
+                public void after(Topology after)
+                {
+                }
+            };
+        }
+
+        @Override
+        public RepairValidator newRepairValidator(Object id)
+        {
+            return new RepairValidator()
+            {
+                @Override
+                public void before(Topology topology, boolean repairPaxos, boolean repairOnlyPaxos)
+                {
+                }
+
+                @Override
+                public void after()
+                {
+                }
+            };
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActions.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActions.java
new file mode 100644
index 0000000..ab66d50
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActions.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.Config.PaxosVariant;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstance;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaLayout;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+import org.apache.cassandra.simulator.Actions.StrictAction;
+import org.apache.cassandra.simulator.Debug;
+import org.apache.cassandra.simulator.RandomSource.Choices;
+import org.apache.cassandra.simulator.systems.InterceptedExecution;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor;
+import org.apache.cassandra.simulator.systems.NonInterceptible;
+import org.apache.cassandra.simulator.systems.SimulatedSystems;
+import org.apache.cassandra.simulator.utils.KindOfSequence;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.addToRingNormalRunner;
+import static org.apache.cassandra.simulator.Action.Modifiers.NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Debug.EventType.CLUSTER;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange.JOIN;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange.LEAVE;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange.REPLACE;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.REQUIRED;
+import static org.apache.cassandra.simulator.utils.KindOfSequence.UNIFORM;
+
+
+// TODO (feature): add Gossip failures (up to some acceptable number)
+// TODO (feature): add node down/up (need to coordinate bootstrap/repair execution around this)
+// TODO (feature): add node stop/start (need to coordinate normal operation execution around this)
+// TODO (feature): permit multiple topology actions in parallel, e.g. REPLACE and CHANGE_RF
+// TODO (feature): support nodes rejoining cluster so we can leave running indefinitely
+@SuppressWarnings("unused")
+public class ClusterActions extends SimulatedSystems
+{
+    private static final Logger logger = LoggerFactory.getLogger(ClusterActions.class);
+
+    public enum TopologyChange
+    {
+        JOIN, LEAVE, REPLACE, CHANGE_RF
+    }
+
+    public static class Options
+    {
+        public final int topologyChangeLimit;
+        public final KindOfSequence.Period topologyChangeInterval;
+        public final Choices<TopologyChange> allChoices;
+        public final Choices<TopologyChange> choicesNoLeave;
+        public final Choices<TopologyChange> choicesNoJoin;
+
+        public final int[] minRf, initialRf, maxRf;
+        public final PaxosVariant changePaxosVariantTo;
+
+        public Options(Options copy)
+        {
+            this(copy, copy.changePaxosVariantTo);
+        }
+
+        public Options(Options copy, PaxosVariant changePaxosVariantTo)
+        {
+            this.topologyChangeLimit = copy.topologyChangeLimit;
+            this.topologyChangeInterval = copy.topologyChangeInterval;
+            this.allChoices = copy.allChoices;
+            this.choicesNoLeave = copy.choicesNoLeave;
+            this.choicesNoJoin = copy.choicesNoJoin;
+            this.minRf = copy.minRf;
+            this.initialRf = copy.initialRf;
+            this.maxRf = copy.maxRf;
+            this.changePaxosVariantTo = changePaxosVariantTo;
+        }
+
+        public Options(int topologyChangeLimit, KindOfSequence.Period topologyChangeInterval, Choices<TopologyChange> choices, int[] minRf, int[] initialRf, int[] maxRf, PaxosVariant changePaxosVariantTo)
+        {
+            if (Arrays.equals(minRf, maxRf))
+                choices = choices.without(TopologyChange.CHANGE_RF);
+
+            this.topologyChangeInterval = topologyChangeInterval;
+            this.topologyChangeLimit = topologyChangeLimit;
+            this.minRf = minRf;
+            this.initialRf = initialRf;
+            this.maxRf = maxRf;
+            this.allChoices = choices;
+            this.choicesNoJoin = allChoices.without(JOIN).without(REPLACE);
+            this.choicesNoLeave = allChoices.without(LEAVE);
+            this.changePaxosVariantTo = changePaxosVariantTo;
+        }
+
+        public static Options noActions(int clusterSize)
+        {
+            int[] rf = new int[]{clusterSize};
+            return new Options(0, UNIFORM.period(null, null), Choices.uniform(), rf, rf, rf, null);
+        }
+
+        public Options changePaxosVariantTo(PaxosVariant newVariant)
+        {
+            return new Options(this, newVariant);
+        }
+    }
+
+    final Cluster cluster;
+    final Options options;
+    final ClusterActionListener listener;
+    final Debug debug;
+
+    public ClusterActions(SimulatedSystems simulated,
+                          Cluster cluster,
+                          Options options,
+                          ClusterActionListener listener,
+                          Debug debug)
+    {
+        super(simulated);
+        Preconditions.checkNotNull(cluster);
+        Preconditions.checkNotNull(options);
+        Preconditions.checkNotNull(listener);
+        Preconditions.checkNotNull(debug);
+        this.cluster = cluster;
+        this.options = options;
+        this.listener = listener;
+        this.debug = debug;
+    }
+
+    public static class InitialConfiguration
+    {
+        public static final int[] EMPTY = {};
+        private final int[] joined;
+        private final int[] prejoin;
+
+        public InitialConfiguration(int[] joined, int[] prejoin)
+        {
+            this.joined = joined;
+            this.prejoin = prejoin;
+        }
+
+        public static InitialConfiguration initializeAll(int nodes)
+        {
+            int[] joined = new int[nodes];
+            for (int i = 0; i < nodes; i++)
+                joined[i] = i + 1;
+            return new InitialConfiguration(joined, EMPTY);
+        }
+    }
+
+    public Action initializeCluster(InitialConfiguration config)
+    {
+        return this.initializeCluster(config.joined, config.prejoin);
+    }
+
+    public Action initializeCluster(int[] joined, int[] prejoin)
+    {
+        return StrictAction.of("Initialise Cluster", () -> {
+            List<Action> actions = new ArrayList<>();
+
+            cluster.stream().forEach(i -> actions.add(invoke("Startup " + i.broadcastAddress(), NO_TIMEOUTS, NO_TIMEOUTS,
+                                                             new InterceptedExecution.InterceptedRunnableExecution((InterceptingExecutor) i.executor(), i::startup))));
+
+            List<InetSocketAddress> endpoints = cluster.stream().map(IInstance::broadcastAddress).collect(Collectors.toList());
+            cluster.forEach(i -> actions.add(resetGossipState(i, endpoints)));
+
+            for (int add : joined)
+            {
+                actions.add(transitivelyReliable("Add " + add + " to ring", cluster.get(add), addToRingNormalRunner(cluster.get(add))));
+                actions.addAll(sendLocalGossipStateToAll(add));
+            }
+
+            actions.add(ReliableAction.transitively("Sync Pending Ranges Executor", ClusterActions.this::syncPendingRanges));
+            debug.debug(CLUSTER, time, cluster, null, null);
+            return ActionList.of(actions);
+        });
+    }
+
+    Action resetGossipState(IInvokableInstance i, List<InetSocketAddress> endpoints)
+    {
+        return transitivelyReliable("Reset Gossip", i, () -> Gossiper.runInGossipStageBlocking(Gossiper.instance::unsafeSetEnabled));
+    }
+
+    @SuppressWarnings("unchecked")
+    void validateReplicasForKeys(IInvokableInstance on, String keyspace, String table, Topology topology)
+    {
+        int[] primaryKeys = topology.primaryKeys;
+        int[][] validate = NonInterceptible.apply(REQUIRED, () -> {
+            Map<InetSocketAddress, Integer> lookup = Cluster.getUniqueAddressLookup(cluster, i -> i.config().num());
+            int[][] result = new int[primaryKeys.length][];
+            for (int i = 0 ; i < primaryKeys.length ; ++i)
+            {
+                int primaryKey = primaryKeys[i];
+                result[i] = on.unsafeApplyOnThisThread(ClusterActions::replicasForPrimaryKey, keyspace, table, primaryKey)
+                              .stream()
+                              .mapToInt(lookup::get)
+                              .filter(r -> Arrays.binarySearch(topology.membersOfQuorum, r) >= 0)
+                              .toArray();
+            }
+            return result;
+        });
+        for (int i = 0 ; i < primaryKeys.length ; ++i)
+        {
+            int[] vs1 = validate[i];
+            int[] vs2 = topology.replicasForKeys[i].clone();
+            Arrays.sort(vs1);
+            Arrays.sort(vs2);
+            if (!Arrays.equals(vs1, vs2))
+                throw new AssertionError();
+        }
+    }
+
+    // assumes every node knows the correct topology
+    static List<InetSocketAddress> replicasForPrimaryKey(String keyspaceName, String table, int primaryKey)
+    {
+        Keyspace keyspace = Keyspace.open(keyspaceName);
+        TableMetadata metadata = keyspace.getColumnFamilyStore(table).metadata.get();
+        DecoratedKey key = metadata.partitioner.decorateKey(Int32Type.instance.decompose(primaryKey));
+        // we return a Set because simulator can easily encounter point where nodes are both natural and pending
+        return ReplicaLayout.forTokenWriteLiveAndDown(keyspace, key.getToken()).all().asList(Replica::endpoint);
+    }
+
+    private ActionList to(BiFunction<Integer, Integer, Action> action, int from, IntStream to)
+    {
+        return ActionList.of(to.filter(i -> i != from)
+                .mapToObj(i -> action.apply(from, i)));
+    }
+    private ActionList toAll(BiFunction<Integer, Integer, Action> action, int from)
+    {
+        return to(action, from, IntStream.rangeClosed(1, cluster.size()));
+    }
+    private ActionList to(BiFunction<Integer, Integer, Action> action, int from, int[] to)
+    {
+        return to(action, from, IntStream.of(to));
+    }
+
+    ActionList on(Function<Integer, Action> action, IntStream on)
+    {
+        return ActionList.of(on.mapToObj(action::apply));
+    }
+    ActionList onAll(Function<Integer, Action> action)
+    {
+        return on(action, IntStream.rangeClosed(1, cluster.size()));
+    }
+    ActionList on(Function<Integer, Action> action, int[] on)
+    {
+        return on(action, IntStream.of(on));
+    }
+
+    ActionList syncPendingRanges() { return onAll(OnInstanceSyncPendingRanges.factory(this)); }
+    ActionList gossipWithAll(int from) { return toAll(OnInstanceGossipWith.factory(this), from); }
+    ActionList sendShutdownToAll(int from) { return toAll(OnInstanceSendShutdown.factory(this), from); }
+    ActionList sendLocalGossipStateToAll(int from) { return toAll(OnInstanceSendLocalGossipState.factory(this), from); }
+    ActionList flushAndCleanup(int[] on) { return on(OnInstanceFlushAndCleanup.factory(this), on); }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterReliableAction.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterReliableAction.java
new file mode 100644
index 0000000..591f2b9
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterReliableAction.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import static org.apache.cassandra.distributed.api.IIsolatedExecutor.*;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+
+public class ClusterReliableAction extends ClusterAction
+{
+    public ClusterReliableAction(String description, ClusterActions actions, int on, SerializableRunnable runnable)
+    {
+        super(description, RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS, actions, on, runnable);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterReliableQueryAction.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterReliableQueryAction.java
new file mode 100644
index 0000000..ea22081
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterReliableQueryAction.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.simulator.systems.SimulatedQuery;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+
+class ClusterReliableQueryAction extends SimulatedQuery
+{
+    ClusterReliableQueryAction(String id, ClusterActions actions, int on, String query, long timestamp, ConsistencyLevel consistencyLevel, Object... params)
+    {
+        super(id, RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS, actions, actions.cluster.get(on), query, timestamp, consistencyLevel, params);
+    }
+
+    public static ClusterReliableQueryAction schemaChange(String id, ClusterActions actions, int on, String query)
+    {
+        // this isn't used on 4.0+ nodes, but no harm in supplying it anyway
+        return new ClusterReliableQueryAction(id, actions, on, query, actions.time.nextGlobalMonotonicMicros(), ConsistencyLevel.ALL);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterUnsafeAction.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterUnsafeAction.java
new file mode 100644
index 0000000..3b61e92
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterUnsafeAction.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.simulator.systems.SimulatedActionTask;
+
+class ClusterUnsafeAction extends SimulatedActionTask
+{
+    ClusterUnsafeAction(String id, Modifiers self, Modifiers children, ClusterActions actions, int on, Runnable run)
+    {
+        this(id, self, children, actions, actions.cluster.get(on), run);
+    }
+
+    ClusterUnsafeAction(String id, Modifiers self, Modifiers children, ClusterActions actions, IInvokableInstance on, Runnable run)
+    {
+        super(id, self, children, null, actions, unsafeAsTask(on, run, actions.failures));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/KeyspaceActions.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/KeyspaceActions.java
new file mode 100644
index 0000000..55ab20a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/KeyspaceActions.java
@@ -0,0 +1,395 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.IntStream;
+
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner.LongToken;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.locator.AbstractReplicationStrategy;
+import org.apache.cassandra.locator.EndpointsForToken;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.NetworkTopologyStrategy;
+import org.apache.cassandra.locator.PendingRangeMaps;
+import org.apache.cassandra.locator.TokenMetadata;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.ActionListener;
+import org.apache.cassandra.simulator.ActionPlan;
+import org.apache.cassandra.simulator.Actions;
+import org.apache.cassandra.simulator.Debug;
+import org.apache.cassandra.simulator.OrderOn.StrictSequential;
+import org.apache.cassandra.simulator.systems.SimulatedSystems;
+
+import static java.util.Collections.singleton;
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.LOCAL_SERIAL;
+import static org.apache.cassandra.simulator.Debug.EventType.CLUSTER;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange.CHANGE_RF;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange.JOIN;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.TopologyChange.LEAVE;
+import static org.apache.cassandra.simulator.cluster.ClusterReliableQueryAction.schemaChange;
+
+public class KeyspaceActions extends ClusterActions
+{
+    final String keyspace;
+    final String table;
+    final String createTableCql;
+    final ConsistencyLevel serialConsistency;
+    final int[] primaryKeys;
+
+    final EnumSet<TopologyChange> ops = EnumSet.noneOf(TopologyChange.class);
+    final NodeLookup nodeLookup;
+    final int[] minRf, initialRf, maxRf;
+    final int[] membersOfQuorumDcs;
+
+    // working state
+    final NodesByDc all;
+    final NodesByDc prejoin;
+    final NodesByDc joined;
+    final NodesByDc left;
+
+    final int[] currentRf;
+    final TokenMetadata tokenMetadata = new TokenMetadata(snitch.get());
+    Topology topology;
+    boolean haveChangedVariant;
+    int topologyChangeCount = 0;
+
+    public KeyspaceActions(SimulatedSystems simulated,
+                           String keyspace, String table, String createTableCql,
+                           Cluster cluster,
+                           Options options,
+                           ConsistencyLevel serialConsistency,
+                           ClusterActionListener listener,
+                           int[] primaryKeys,
+                           Debug debug)
+    {
+        super(simulated, cluster, options, listener, debug);
+        this.keyspace = keyspace;
+        this.table = table;
+        this.createTableCql = createTableCql;
+        this.primaryKeys = primaryKeys;
+        this.serialConsistency = serialConsistency;
+
+        this.nodeLookup = simulated.snitch;
+
+        int[] dcSizes = new int[options.initialRf.length];
+        for (int dc : nodeLookup.nodeToDc)
+            ++dcSizes[dc];
+
+        this.all = new NodesByDc(nodeLookup, dcSizes);
+        this.prejoin = new NodesByDc(nodeLookup, dcSizes);
+        this.joined = new NodesByDc(nodeLookup, dcSizes);
+        this.left = new NodesByDc(nodeLookup, dcSizes);
+
+        for (int i = 1 ; i <= nodeLookup.nodeToDc.length ; ++i)
+        {
+            this.prejoin.add(i);
+            this.all.add(i);
+        }
+
+        minRf = options.minRf;
+        initialRf = options.initialRf;
+        maxRf = options.maxRf;
+        currentRf = initialRf.clone();
+        membersOfQuorumDcs = serialConsistency == LOCAL_SERIAL ? all.dcs[0] : all.toArray();
+        ops.addAll(Arrays.asList(options.allChoices.options));
+
+    }
+
+    public ActionPlan plan()
+    {
+        ActionList pre = ActionList.of(pre(createKeyspaceCql(keyspace), createTableCql));
+        ActionList interleave = stream();
+        ActionList post = ActionList.empty();
+        return new ActionPlan(pre, singletonList(interleave), post);
+    }
+
+    @SuppressWarnings("StringConcatenationInLoop")
+    private String createKeyspaceCql(String keyspace)
+    {
+        String createKeyspaceCql = "CREATE KEYSPACE " + keyspace  + " WITH replication = {'class': 'NetworkTopologyStrategy'";
+        for (int i = 0 ; i < options.initialRf.length ; ++i)
+            createKeyspaceCql += ", '" + snitch.nameOfDc(i) + "': " + options.initialRf[i];
+        createKeyspaceCql += "};";
+        return createKeyspaceCql;
+    }
+
+    private Action pre(String createKeyspaceCql, String createTableCql)
+    {
+        // randomise initial cluster, and return action to initialise it
+        for (int dc = 0 ; dc < options.initialRf.length ; ++dc)
+        {
+            for (int i = 0 ; i < options.initialRf[dc] ; ++i)
+            {
+                int join = prejoin.removeRandom(random, dc);
+                joined.add(join);
+                tokenMetadata.updateNormalToken(tokenOf(join), inet(join));
+            }
+        }
+
+        updateTopology(recomputeTopology());
+        int[] joined = this.joined.toArray();
+        int[] prejoin = this.prejoin.toArray();
+        return Actions.StrictAction.of("Initialize", () -> {
+            return ActionList.of(initializeCluster(joined, prejoin),
+                                 schemaChange("Create Keyspace", KeyspaceActions.this, 1, createKeyspaceCql),
+                                 schemaChange("Create Table", KeyspaceActions.this, 1, createTableCql));
+        });
+    }
+
+    @SuppressWarnings("StatementWithEmptyBody")
+    private ActionList stream()
+    {
+        ActionListener listener = debug.debug(CLUSTER, time, cluster, keyspace, null);
+        if (listener == null)
+            return ActionList.of(Actions.stream(new StrictSequential("Cluster Actions"), this::next));
+
+        return ActionList.of(Actions.stream(new StrictSequential("Cluster Actions"), () -> {
+            Action action = next();
+            if (action != null)
+                action.register(listener);
+            return action;
+        }));
+    }
+
+    private Action next()
+    {
+        if (options.topologyChangeLimit >= 0 && topologyChangeCount++ > options.topologyChangeLimit)
+            return null;
+
+        while (!ops.isEmpty() && (!prejoin.isEmpty() || joined.size() > sum(minRf)))
+        {
+            if (options.changePaxosVariantTo != null && !haveChangedVariant && random.decide(1f / (1 + prejoin.size())))
+            {
+                haveChangedVariant = true;
+                return schedule(new OnClusterSetPaxosVariant(KeyspaceActions.this, options.changePaxosVariantTo));
+            }
+
+            // pick a dc
+            int dc = random.uniform(0, currentRf.length);
+
+            // try to pick an action (and simply loop again if we cannot for this dc)
+            TopologyChange next;
+            if (prejoin.size(dc) > 0 && joined.size(dc) > currentRf[dc]) next = options.allChoices.choose(random);
+            else if (prejoin.size(dc) > 0 && ops.contains(JOIN)) next = options.choicesNoLeave.choose(random);
+            else if (joined.size(dc) > currentRf[dc] && ops.contains(LEAVE)) next = options.choicesNoJoin.choose(random);
+            else if (joined.size(dc) > minRf[dc]) next = CHANGE_RF;
+            else continue;
+
+            // TODO (feature): introduce some time period between cluster actions
+            switch (next)
+            {
+                case REPLACE:
+                {
+                    Topology before = topology;
+                    int join = prejoin.removeRandom(random, dc);
+                    int leave = joined.selectRandom(random, dc);
+                    joined.add(join);
+                    joined.remove(leave);
+                    left.add(leave);
+                    nodeLookup.setTokenOf(join, nodeLookup.tokenOf(leave));
+                    Collection<Token> token = singleton(tokenOf(leave));
+                    tokenMetadata.addReplaceTokens(token, inet(join), inet(leave));
+                    tokenMetadata.unsafeCalculatePendingRanges(strategy(), keyspace);
+                    Topology during = recomputeTopology();
+                    updateTopology(during);
+                    tokenMetadata.updateNormalTokens(token, inet(join));
+                    tokenMetadata.unsafeCalculatePendingRanges(strategy(), keyspace);
+                    Topology after = recomputeTopology();
+                    Action action = new OnClusterReplace(KeyspaceActions.this, before, during, after, leave, join);
+                    return scheduleAndUpdateTopologyOnCompletion(action, after);
+                    // if replication factor is 2, cannot perform safe replacements
+                    // however can have operations that began earlier during RF=2
+                    // so need to introduce some concept of barriers/ordering/sync points
+                }
+                case JOIN:
+                {
+                    Topology before = topology;
+                    int join = prejoin.removeRandom(random, dc);
+                    joined.add(join);
+                    Collection<Token> token = singleton(tokenOf(join));
+                    tokenMetadata.addBootstrapTokens(token, inet(join));
+                    tokenMetadata.unsafeCalculatePendingRanges(strategy(), keyspace);
+                    Topology during = recomputeTopology();
+                    updateTopology(during);
+                    tokenMetadata.updateNormalTokens(token, inet(join));
+                    tokenMetadata.unsafeCalculatePendingRanges(strategy(), keyspace);
+                    Topology after = recomputeTopology();
+                    Action action = new OnClusterJoin(KeyspaceActions.this, before, during, after, join);
+                    return scheduleAndUpdateTopologyOnCompletion(action, after);
+                }
+                case LEAVE:
+                {
+                    Topology before = topology;
+                    int leave = joined.removeRandom(random, dc);
+                    left.add(leave);
+                    tokenMetadata.addLeavingEndpoint(inet(leave));
+                    tokenMetadata.unsafeCalculatePendingRanges(strategy(), keyspace);
+                    Topology during = recomputeTopology();
+                    updateTopology(during);
+                    tokenMetadata.removeEndpoint(inet(leave));
+                    tokenMetadata.unsafeCalculatePendingRanges(strategy(), keyspace);
+                    Topology after = recomputeTopology();
+                    Action action = new OnClusterLeave(KeyspaceActions.this, before, during, after, leave);
+                    return scheduleAndUpdateTopologyOnCompletion(action, after);
+                }
+                case CHANGE_RF:
+                    if (maxRf[dc] == minRf[dc]) {} // cannot perform RF changes at all
+                    if (currentRf[dc] == minRf[dc] && joined.size(dc) == currentRf[dc]) {} // can do nothing until joined grows
+                    else
+                    {
+                        boolean increase;
+                        if (currentRf[dc] == minRf[dc]) // can only grow
+                        { ++currentRf[dc]; increase = true;}
+                        else if (currentRf[dc] == joined.size(dc) || currentRf[dc] == maxRf[dc]) // can only shrink, and we know currentRf > minRf
+                        { --currentRf[dc]; increase = false; }
+                        else if (random.decide(0.5f)) // can do either
+                        { --currentRf[dc]; increase = false; }
+                        else
+                        { ++currentRf[dc]; increase = true; }
+
+                        // this isn't used on 4.0+ nodes, but no harm in supplying it anyway
+                        long timestamp = time.nextGlobalMonotonicMicros();
+                        int coordinator = joined.selectRandom(random, dc);
+                        Topology before = topology;
+                        Topology after = recomputeTopology();
+                        return scheduleAndUpdateTopologyOnCompletion(new OnClusterChangeRf(KeyspaceActions.this, timestamp, coordinator, before, after, increase), after);
+                    }
+            }
+        }
+
+        if (options.changePaxosVariantTo != null && !haveChangedVariant)
+        {
+            haveChangedVariant = true;
+            return schedule(new OnClusterSetPaxosVariant(KeyspaceActions.this, options.changePaxosVariantTo));
+        }
+
+        return null;
+    }
+
+    private Action schedule(Action action)
+    {
+        action.setDeadline(time, time.nanoTime() + options.topologyChangeInterval.get(random));
+        return action;
+    }
+
+    private Action scheduleAndUpdateTopologyOnCompletion(Action action, Topology newTopology)
+    {
+        action.register(new ActionListener()
+        {
+            @Override
+            public void before(Action action, Before before)
+            {
+                if (before == Before.EXECUTE)
+                    time.forbidDiscontinuities();
+            }
+
+            @Override
+            public void transitivelyAfter(Action finished)
+            {
+                updateTopology(newTopology);
+                time.permitDiscontinuities();
+            }
+        });
+        return schedule(action);
+    }
+
+    void updateTopology(Topology newTopology)
+    {
+        topology = newTopology;
+        announce(topology);
+    }
+
+    private Topology recomputeTopology()
+    {
+        AbstractReplicationStrategy strategy = strategy();
+        Map<InetSocketAddress, Integer> lookup = Cluster.getUniqueAddressLookup(cluster, i -> i.config().num());
+        int[][] replicasForKey = new int[primaryKeys.length][];
+        int[][] pendingReplicasForKey = new int[primaryKeys.length][];
+        for (int i = 0 ; i < primaryKeys.length ; ++i)
+        {
+            int primaryKey = primaryKeys[i];
+            Token token = new Murmur3Partitioner().getToken(Int32Type.instance.decompose(primaryKey));
+            replicasForKey[i] = strategy.calculateNaturalReplicas(token, tokenMetadata)
+                                        .endpointList().stream().mapToInt(lookup::get).toArray();
+            PendingRangeMaps pendingRanges = tokenMetadata.getPendingRanges(keyspace);
+            EndpointsForToken pendingEndpoints = pendingRanges == null ? null : pendingRanges.pendingEndpointsFor(token);
+            if (pendingEndpoints == null) pendingReplicasForKey[i] = new int[0];
+            else pendingReplicasForKey[i] = pendingEndpoints.endpointList().stream().mapToInt(lookup::get).toArray();
+        }
+        int[] membersOfRing = joined.toArray();
+        long[] membersOfRingTokens = IntStream.of(membersOfRing).mapToLong(nodeLookup::tokenOf).toArray();
+        return new Topology(primaryKeys, membersOfRing, membersOfRingTokens, membersOfQuorum(), currentRf.clone(),
+                            quorumRf(), replicasForKey, pendingReplicasForKey);
+    }
+
+    private int quorumRf()
+    {
+        if (serialConsistency == LOCAL_SERIAL)
+            return currentRf[0];
+
+        return sum(currentRf);
+    }
+
+    private int[] membersOfQuorum()
+    {
+        if (serialConsistency == LOCAL_SERIAL)
+            return joined.toArray(0);
+
+        return joined.toArray();
+    }
+
+    private static int sum(int[] vs)
+    {
+        int sum = 0;
+        for (int v : vs)
+            sum += v;
+        return sum;
+    }
+
+    private InetAddressAndPort inet(int node)
+    {
+        return InetAddressAndPort.getByAddress(cluster.get(node).config().broadcastAddress());
+    }
+
+    AbstractReplicationStrategy strategy()
+    {
+        Map<String, String> rf = new HashMap<>();
+        for (int i = 0 ; i < snitch.dcCount() ; ++i)
+            rf.put(snitch.nameOfDc(i), Integer.toString(currentRf[i]));
+        return new NetworkTopologyStrategy(keyspace, tokenMetadata, snitch.get(), rf);
+    }
+
+    private Token tokenOf(int node)
+    {
+        return new LongToken(Long.parseLong(cluster.get(nodeLookup.tokenOf(node)).config().getString("initial_token")));
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/NodeLookup.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/NodeLookup.java
new file mode 100644
index 0000000..e1e339f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/NodeLookup.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+public class NodeLookup
+{
+    protected final int[] nodeToDc;
+    protected final int[] nodeToToken;
+
+    protected NodeLookup(int[] nodeToDc)
+    {
+        this.nodeToDc = nodeToDc;
+        this.nodeToToken = new int[nodeToDc.length];
+        for (int i = 0; i < nodeToToken.length; ++i)
+            nodeToToken[i] = i + 1;
+    }
+
+    public int dcOf(int node)
+    {
+        return nodeToDc[node - 1];
+    }
+
+    public int tokenOf(int node)
+    {
+        return nodeToToken[node - 1];
+    }
+
+    public void setTokenOf(int node, int token)
+    {
+        nodeToToken[node - 1] = token;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/NodesByDc.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/NodesByDc.java
new file mode 100644
index 0000000..976febd
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/NodesByDc.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.Arrays;
+import java.util.function.IntConsumer;
+
+import org.apache.cassandra.simulator.RandomSource;
+
+@SuppressWarnings("ManualArrayCopy")
+class NodesByDc
+{
+    final NodeLookup lookup;
+    final int[][] dcs;
+    final int[] dcSizes;
+    int size;
+
+    public NodesByDc(NodeLookup lookup, int[] dcSizes)
+    {
+        this.lookup = lookup;
+        this.dcs = new int[dcSizes.length][];
+        for (int i = 0; i < dcs.length; ++i)
+        {
+            dcs[i] = new int[dcSizes[i]];
+            Arrays.fill(dcs[i], Integer.MAX_VALUE);
+        }
+        this.dcSizes = new int[dcSizes.length];
+    }
+
+    boolean contains(int node)
+    {
+        int[] dc = dcs[lookup.dcOf(node)];
+        return Arrays.binarySearch(dc, node) >= 0;
+    }
+
+    void add(int node)
+    {
+        int dcIndex = lookup.dcOf(node);
+        int dcSize = dcSizes[dcIndex];
+        int[] dc = dcs[dcIndex];
+        int insertPos = -1 - Arrays.binarySearch(dc, node);
+        assert insertPos >= 0;
+        for (int i = dcSize; i > insertPos; --i)
+            dc[i] = dc[i - 1];
+        dc[insertPos] = node;
+        ++size;
+        ++dcSizes[dcIndex];
+    }
+
+    int removeRandom(RandomSource random, int dcIndex)
+    {
+        return removeIndex(dcIndex, random.uniform(0, dcSizes[dcIndex]));
+    }
+
+    private int removeIndex(int dcIndex, int nodeIndex)
+    {
+        int dcSize = dcSizes[dcIndex];
+        int[] dc = dcs[dcIndex];
+        int node = dc[nodeIndex];
+        for (int i = nodeIndex + 1; i < dcSize; ++i)
+            dc[i - 1] = dc[i];
+        dc[dcSize - 1] = Integer.MAX_VALUE;
+        --size;
+        --dcSizes[dcIndex];
+        return node;
+    }
+
+    int removeRandom(RandomSource random)
+    {
+        int index = random.uniform(0, size);
+        for (int dcIndex = 0; dcIndex < dcSizes.length; ++dcIndex)
+        {
+            if (dcSizes[dcIndex] > index)
+                return removeIndex(dcIndex, index);
+            index -= dcSizes[dcIndex];
+        }
+        throw new IllegalStateException();
+    }
+
+    int selectRandom(RandomSource random, int dcIndex)
+    {
+        int i = random.uniform(0, dcSizes[dcIndex]);
+        return dcs[dcIndex][i];
+    }
+
+    int selectRandom(RandomSource random)
+    {
+        int index = random.uniform(0, size);
+        for (int dcIndex = 0; dcIndex < dcSizes.length; ++dcIndex)
+        {
+            if (dcSizes[dcIndex] > index)
+                return dcs[dcIndex][index];
+            index -= dcSizes[dcIndex];
+        }
+        throw new IllegalStateException();
+    }
+
+    void remove(int node)
+    {
+        int dcIndex = lookup.dcOf(node);
+        int[] dc = dcs[dcIndex];
+        removeIndex(dcIndex, Arrays.binarySearch(dc, node));
+    }
+
+    void forEach(IntConsumer consumer)
+    {
+        for (int dc = 0; dc < dcs.length; ++dc)
+            forEach(consumer, dc);
+    }
+
+    void forEach(IntConsumer consumer, int dc)
+    {
+        for (int i : dcs[dc])
+        {
+            if (i == Integer.MAX_VALUE)
+                break;
+            consumer.accept(dcs[dc][i]);
+        }
+    }
+
+    int[] toArray()
+    {
+        int[] result = new int[size];
+        int count = 0;
+        for (int dcIndex = 0; dcIndex < dcs.length; ++dcIndex)
+        {
+            int dcSize = dcSizes[dcIndex];
+            System.arraycopy(dcs[dcIndex], 0, result, count, dcSize);
+            count += dcSize;
+        }
+        return result;
+    }
+
+    int[] toArray(int dcIndex)
+    {
+        int size = dcSizes[dcIndex];
+        int[] result = new int[size];
+        System.arraycopy(dcs[dcIndex], 0, result, 0, size);
+        return result;
+    }
+
+    boolean isEmpty()
+    {
+        return size == 0;
+    }
+
+    int size()
+    {
+        return size;
+    }
+
+    int size(int dc)
+    {
+        return dcSizes[dc];
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterChangeRf.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterChangeRf.java
new file mode 100644
index 0000000..46067f9
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterChangeRf.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.Arrays;
+
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.Actions;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+import static org.apache.cassandra.simulator.cluster.ClusterReliableQueryAction.schemaChange;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+class OnClusterChangeRf extends OnClusterChangeTopology
+{
+    final KeyspaceActions actions;
+    final long timestamp;
+    final int on;
+    final boolean increase;
+
+    OnClusterChangeRf(KeyspaceActions actions, long timestamp, int on, Topology before, Topology after, boolean increase)
+    {
+        super(increase ? lazy(() -> "Increase replication factor to " + Arrays.toString(after.rf))
+                       : lazy(() -> "Decrease replication factor to " + Arrays.toString(after.rf)),
+              actions, STRICT, NONE, before, after, before.primaryKeys);
+        this.actions = actions;
+        this.timestamp = timestamp;
+        this.on = on;
+        this.increase = increase;
+    }
+
+    protected ActionList performSimple()
+    {
+        before(actions.cluster.get(on));
+
+        StringBuilder command = new StringBuilder("ALTER KEYSPACE " + actions.keyspace + " WITH replication = {'class': 'NetworkTopologyStrategy'");
+        for (int i = 0; i < after.rf.length; ++i)
+            command.append(", '").append(actions.snitch.nameOfDc(i)).append("': ").append(after.rf[i]);
+        command.append("};");
+
+        return ActionList.of(
+            schemaChange("ALTER KEYSPACE " + description(), actions, on, command.toString()),
+            new OnClusterSyncPendingRanges(actions),
+            new OnClusterFullRepair(actions, after, true, false, false),
+            // TODO: cleanup should clear paxos state tables
+            Actions.of("Flush and Cleanup", !increase ? () -> actions.flushAndCleanup(after.membersOfRing) : ActionList::empty)
+        );
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterChangeTopology.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterChangeTopology.java
new file mode 100644
index 0000000..5f0d46f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterChangeTopology.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.function.Consumer;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.cluster.ClusterActionListener.TopologyChangeValidator;
+import org.apache.cassandra.simulator.systems.NonInterceptible;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+import static org.apache.cassandra.simulator.ActionListener.runAfterTransitiveClosure;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.REQUIRED;
+
+abstract class OnClusterChangeTopology extends Action implements Consumer<Action>
+{
+    final KeyspaceActions actions;
+
+    final int[] participatingKeys;
+    final Topology before;
+    final Topology after;
+
+    final TopologyChangeValidator validator;
+
+    public OnClusterChangeTopology(Object description, KeyspaceActions actions, Topology before, Topology after, int[] participatingKeys)
+    {
+        this(description, actions, STRICT, RELIABLE, before, after, participatingKeys);
+    }
+
+    public OnClusterChangeTopology(Object description, KeyspaceActions actions, Modifiers self, Modifiers children, Topology before, Topology after, int[] participatingKeys)
+    {
+        super(description, self, children);
+        this.actions = actions;
+        this.participatingKeys = participatingKeys;
+        this.before = before;
+        this.after = after;
+        this.validator = actions.listener.newTopologyChangeValidator(this);
+        register(runAfterTransitiveClosure(this));
+    }
+
+    void before(IInvokableInstance instance)
+    {
+        NonInterceptible.execute(REQUIRED, () -> {
+            actions.validateReplicasForKeys(instance, actions.keyspace, actions.table, before);
+            validator.before(before, participatingKeys);
+        });
+    }
+
+    public void accept(Action ignore)
+    {
+        validator.after(after);
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterFullRepair.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterFullRepair.java
new file mode 100644
index 0000000..d415d4e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterFullRepair.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.Arrays;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.cluster.ClusterActionListener.RepairValidator;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+import static org.apache.cassandra.simulator.ActionListener.runAfterTransitiveClosure;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+class OnClusterFullRepair extends Action implements Consumer<Action>
+{
+    final KeyspaceActions actions;
+    final Topology topology;
+    final boolean force;
+    final RepairValidator validator;
+    final boolean repairPaxos;
+    final boolean repairOnlyPaxos;
+
+    public OnClusterFullRepair(KeyspaceActions actions, Topology topology, boolean repairPaxos, boolean repairOnlyPaxos, boolean force)
+    {
+        super(lazy(() -> "Full Repair on " + Arrays.toString(topology.membersOfRing)), STRICT, RELIABLE_NO_TIMEOUTS);
+        this.actions = actions;
+        // STRICT to ensure repairs do not run simultaneously, as seems not to be permitted even for non-overlapping ranges?
+        this.topology = topology;
+        this.repairPaxos = repairPaxos;
+        this.repairOnlyPaxos = repairOnlyPaxos;
+        this.force = force;
+        this.validator = actions.listener.newRepairValidator(this);
+        register(runAfterTransitiveClosure(this));
+    }
+
+    protected ActionList performSimple()
+    {
+        actions.validateReplicasForKeys(actions.cluster.get(topology.membersOfQuorum[0]), actions.keyspace, actions.table, topology);
+        validator.before(topology, repairPaxos, repairOnlyPaxos);
+        return actions.on(i -> new OnInstanceRepair(actions, i, repairPaxos, repairOnlyPaxos, force), topology.membersOfRing);
+    }
+
+    public void accept(Action ignore)
+    {
+        validator.after();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterJoin.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterJoin.java
new file mode 100644
index 0000000..cf39379
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterJoin.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.simulator.ActionList;
+
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+class OnClusterJoin extends OnClusterChangeTopology
+{
+    final int joining;
+
+    OnClusterJoin(KeyspaceActions actions, Topology before, Topology during, Topology after, int joining)
+    {
+        super(lazy(() -> String.format("node%d Joining", joining)), actions, before, after, during.pendingKeys());
+        this.joining = joining;
+    }
+
+    public ActionList performSimple()
+    {
+        IInvokableInstance joinInstance = actions.cluster.get(joining);
+        before(joinInstance);
+        return ActionList.of(
+            // setup the node's own gossip state for pending ownership, and return gossip actions to disseminate
+            new OnClusterUpdateGossip(actions, joining, new OnInstanceSetBootstrapping(actions, joining)),
+            new OnInstanceSyncSchemaForBootstrap(actions, joining),
+            new OnInstanceTopologyChangePaxosRepair(actions, joining, "Join"),
+            // stream/repair from a peer
+            new OnInstanceBootstrap(actions, joinInstance),
+            // setup the node's own gossip state for natural ownership, and return gossip actions to disseminate
+            new OnClusterUpdateGossip(actions, joining, new OnInstanceSetNormal(actions, joining))
+        );
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterLeave.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterLeave.java
new file mode 100644
index 0000000..4250e24
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterLeave.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.systems.SimulatedActionConsumer;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+class OnClusterLeave extends OnClusterChangeTopology
+{
+    final int leaving;
+
+    OnClusterLeave(KeyspaceActions actions, Topology before, Topology during, Topology after, int leaving)
+    {
+        super(lazy(() -> String.format("node%d Leaving", leaving)), actions, before, after, during.pendingKeys());
+        this.leaving = leaving;
+    }
+
+    public ActionList performSimple()
+    {
+        IInvokableInstance leaveInstance = actions.cluster.get(leaving);
+        before(leaveInstance);
+        AtomicReference<Supplier<? extends Future<?>>> preparedUnbootstrap = new AtomicReference<>();
+        return ActionList.of(
+            // setup the node's own gossip state for pending ownership, and return gossip actions to disseminate
+            new OnClusterUpdateGossip(actions, leaving, new OnInstanceSetLeaving(actions, leaving)),
+            new SimulatedActionConsumer<>("Prepare unbootstrap on " + leaving, RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS, actions, leaveInstance,
+                                          ref -> ref.set(StorageService.instance.prepareUnbootstrapStreaming()), preparedUnbootstrap),
+            new OnInstanceTopologyChangePaxosRepair(actions, leaving, "Leave"),
+            new SimulatedActionConsumer<>("Execute unbootstrap on " + leaving, RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS, actions, leaveInstance,
+                                          ref -> ref.get().get().syncThrowUncheckedOnInterrupt(), preparedUnbootstrap),
+            // setup the node's own gossip state for natural ownership, and return gossip actions to disseminate
+            new OnClusterUpdateGossip(actions, leaving, new OnInstanceSetLeft(actions, leaving))
+        );
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterMarkDown.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterMarkDown.java
new file mode 100644
index 0000000..c5a3186
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterMarkDown.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+
+class OnClusterMarkDown extends ClusterUnsafeAction
+{
+    OnClusterMarkDown(ClusterActions actions, int on)
+    {
+        this(actions, on, actions.cluster.get(on));
+    }
+
+    OnClusterMarkDown(ClusterActions actions, int on, IInvokableInstance instance)
+    {
+        super("Mark " + on + " Down in Snitches", RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS,
+              actions, on, () -> actions.failureDetector.markDown(instance.broadcastAddress()));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterRepairRanges.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterRepairRanges.java
new file mode 100644
index 0000000..94dcea5
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterRepairRanges.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+
+import static java.util.stream.IntStream.range;
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+
+public class OnClusterRepairRanges extends ReliableAction
+{
+    public OnClusterRepairRanges(KeyspaceActions actions, int[] on, boolean repairPaxos, boolean repairOnlyPaxos, List<Map.Entry<String, String>> ranges)
+    {
+        super("Repair ranges", NONE, RELIABLE_NO_TIMEOUTS,
+              () -> ActionList.of(range(0, on.length)
+                              .mapToObj(
+                                    i -> new OnInstanceRepair(actions, on[i], repairPaxos, repairOnlyPaxos, ranges.get(i), true))));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterReplace.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterReplace.java
new file mode 100644
index 0000000..8e0af40
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterReplace.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.net.InetSocketAddress;
+import java.util.AbstractMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+class OnClusterReplace extends OnClusterChangeTopology
+{
+    final int leaving;
+    final int joining;
+
+    OnClusterReplace(KeyspaceActions actions, Topology before, Topology during, Topology after, int leaving, int joining)
+    {
+        super(lazy(() -> String.format("node%d Replacing node%d", joining, leaving)), actions, STRICT, NONE, before, after, during.pendingKeys());
+        this.leaving = leaving;
+        this.joining = joining;
+    }
+
+    public ActionList performSimple()
+    {
+        // need to mark it as DOWN, and perhaps shut it down
+        Map<InetSocketAddress, IInvokableInstance> lookup = Cluster.getUniqueAddressLookup(actions.cluster);
+        IInvokableInstance leaveInstance = actions.cluster.get(leaving);
+        IInvokableInstance joinInstance = actions.cluster.get(joining);
+        before(leaveInstance);
+        UUID hostId = leaveInstance.unsafeCallOnThisThread(SystemKeyspace::getLocalHostId);
+        String movingToken = leaveInstance.unsafeCallOnThisThread(() -> Utils.currentToken().toString());
+        List<Map.Entry<String, String>> repairRanges = leaveInstance.unsafeApplyOnThisThread(
+            (String keyspaceName) -> StorageService.instance.getLocalAndPendingRanges(keyspaceName).stream()
+                                                            .map(OnClusterReplace::toStringEntry)
+                                                            .collect(Collectors.toList()),
+            actions.keyspace
+        );
+
+        int[] others = repairRanges.stream().mapToInt(
+            repairRange -> lookup.get(leaveInstance.unsafeApplyOnThisThread(
+                (String keyspaceName, String tk) -> Keyspace.open(keyspaceName).getReplicationStrategy().getNaturalReplicasForToken(Utils.parseToken(tk)).stream().map(Replica::endpoint)
+                                                            .filter(i -> !i.equals(getBroadcastAddressAndPort()))
+                                                            .findFirst()
+                                                            .orElseThrow(IllegalStateException::new),
+                actions.keyspace, repairRange.getValue())
+            ).config().num()
+        ).toArray();
+
+        return ActionList.of(
+        // first sync gossip so that newly joined nodes are known by all, so that when we markdown we do not throw UnavailableException
+        ReliableAction.transitively("Sync Gossip", () -> actions.gossipWithAll(leaving)),
+
+        // "shutdown" the leaving instance
+        new OnClusterUpdateGossip(actions,
+                                      ActionList.of(new OnInstanceMarkShutdown(actions, leaving),
+                                                    new OnClusterMarkDown(actions, leaving)),
+                                      new OnInstanceSendShutdownToAll(actions, leaving)),
+
+        // TODO (safety): confirm repair does not include this node
+
+        // note that in the case of node replacement, we must perform a paxos repair before AND mid-transition.
+        // the first ensures the paxos state is flushed to the base table's sstables, so that the replacing node
+        // must receive a copy of all earlier operations (since the old node is now "offline")
+
+        new OnClusterRepairRanges(actions, others, true, false, repairRanges),
+
+        // stream/repair from a peer
+        new OnClusterUpdateGossip(actions, joining, new OnInstanceSetBootstrapReplacing(actions, joining, leaving, hostId, movingToken)),
+
+        new OnInstanceSyncSchemaForBootstrap(actions, joining),
+        new OnInstanceTopologyChangePaxosRepair(actions, joining, "Replace"),
+        new OnInstanceBootstrap(actions, joinInstance, movingToken, true),
+
+        // setup the node's own gossip state for natural ownership, and return gossip actions to disseminate
+        new OnClusterUpdateGossip(actions, joining, new OnInstanceSetNormal(actions, joining, hostId, movingToken))
+        );
+    }
+
+    static Map.Entry<String, String> toStringEntry(Range<Token> range)
+    {
+        return new AbstractMap.SimpleImmutableEntry<>(range.left.toString(), range.right.toString());
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterSetPaxosVariant.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterSetPaxosVariant.java
new file mode 100644
index 0000000..7919b2c
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterSetPaxosVariant.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.config.Config.PaxosVariant;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+
+class OnClusterSetPaxosVariant extends Action
+{
+    private final ClusterActions actions;
+    final PaxosVariant newVariant;
+
+    OnClusterSetPaxosVariant(ClusterActions actions, PaxosVariant newVariant)
+    {
+        super("Set Paxos Variant to " + newVariant, RELIABLE, NONE);
+        this.actions = actions;
+        this.newVariant = newVariant;
+    }
+
+    protected ActionList performSimple()
+    {
+        return actions.onAll((on) -> new OnInstanceSetPaxosVariant(actions, on, newVariant));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterSyncPendingRanges.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterSyncPendingRanges.java
new file mode 100644
index 0000000..04065ae
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterSyncPendingRanges.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+
+public class OnClusterSyncPendingRanges extends ReliableAction
+{
+    public OnClusterSyncPendingRanges(ClusterActions actions)
+    {
+        super("Sync Pending Ranges Executor", actions::syncPendingRanges, true);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterUpdateGossip.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterUpdateGossip.java
new file mode 100644
index 0000000..18263a3
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnClusterUpdateGossip.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.systems.SimulatedAction;
+
+import static org.apache.cassandra.simulator.Action.Modifier.DISPLAY_ORIGIN;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+
+public class OnClusterUpdateGossip extends ReliableAction
+{
+    ActionList cancel;
+    OnClusterUpdateGossip(ClusterActions actions, int on, SimulatedAction updateLocalState)
+    {
+        this(updateLocalState.description() + " and Sync Gossip", actions, ActionList.of(updateLocalState),
+             new OnInstanceGossipWithAll(actions, on));
+    }
+
+    OnClusterUpdateGossip(ClusterActions actions, ActionList updateLocalState, Action sendGossip)
+    {
+        this(updateLocalState.get(0).description() + " and Sync Gossip", actions, updateLocalState, sendGossip);
+    }
+
+    OnClusterUpdateGossip(Object id, ClusterActions actions, ActionList updateLocalState, Action sendGossip)
+    {
+        this(id, actions, updateLocalState.andThen(sendGossip));
+    }
+
+    OnClusterUpdateGossip(Object id, ClusterActions actions, ActionList updateLocalStateThenSendGossip)
+    {
+        super(id, STRICT.with(DISPLAY_ORIGIN), RELIABLE_NO_TIMEOUTS, () -> updateLocalStateThenSendGossip.andThen(new OnClusterSyncPendingRanges(actions)));
+        cancel = updateLocalStateThenSendGossip;
+    }
+
+    @Override
+    protected Throwable safeInvalidate(boolean isCancellation)
+    {
+        ActionList list = cancel;
+        if (list == null)
+            return null;
+        cancel = null;
+        return list.safeForEach(Action::invalidate);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceBootstrap.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceBootstrap.java
new file mode 100644
index 0000000..8fa9375
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceBootstrap.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.List;
+
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.simulator.systems.SimulatedActionTask;
+
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.simulator.Action.Modifier.DISPLAY_ORIGIN;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.cluster.Utils.parseTokens;
+
+class OnInstanceBootstrap extends SimulatedActionTask
+{
+    public OnInstanceBootstrap(ClusterActions actions, IInvokableInstance on)
+    {
+        this(actions, on, on.config().getString("initial_token"), false);
+    }
+
+    public OnInstanceBootstrap(ClusterActions actions, IInvokableInstance on, String token, boolean replacing)
+    {
+        super("Bootstrap on " + on.config().num(), RELIABLE_NO_TIMEOUTS.with(DISPLAY_ORIGIN), RELIABLE_NO_TIMEOUTS, actions, on,
+              invokableBootstrap(token, replacing));
+    }
+
+    private static IIsolatedExecutor.SerializableRunnable invokableBootstrap(String token, boolean replacing)
+    {
+        return () -> {
+            List<Token> tokens = parseTokens(singletonList(token));
+            StorageService.instance.startBootstrap(tokens, replacing);
+        };
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceFlushAndCleanup.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceFlushAndCleanup.java
new file mode 100644
index 0000000..c34887a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceFlushAndCleanup.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.function.Function;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.simulator.Action;
+
+class OnInstanceFlushAndCleanup extends ClusterReliableAction
+{
+    OnInstanceFlushAndCleanup(ClusterActions actions, int on)
+    {
+        super("Flush and Cleanup on " + on, actions, on, invokableFlushAndCleanup());
+    }
+
+    public static Function<Integer, Action> factory(ClusterActions actions)
+    {
+        return (on) -> new OnInstanceFlushAndCleanup(actions, on);
+    }
+
+    private static IIsolatedExecutor.SerializableRunnable invokableFlushAndCleanup()
+    {
+        return () -> {
+            for (Keyspace keyspace : Keyspace.all())
+            {
+                for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores())
+                {
+                    try
+                    {
+                        Util.flush(cfs);
+                        if (cfs.forceCleanup(1) != CompactionManager.AllSSTableOpStatus.SUCCESSFUL)
+                            throw new IllegalStateException();
+                        cfs.forceMajorCompaction();
+                    }
+                    catch (Throwable t) { throw new RuntimeException(t); }
+                }
+            }
+        };
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceGossipWith.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceGossipWith.java
new file mode 100644
index 0000000..0532b35
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceGossipWith.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.net.InetSocketAddress;
+import java.util.function.BiFunction;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.simulator.Action;
+
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+
+class OnInstanceGossipWith extends ClusterAction
+{
+    OnInstanceGossipWith(ClusterActions actions, int from, int to)
+    {
+        super("Gossip from " + from + " to " + to, STRICT, RELIABLE_NO_TIMEOUTS, actions, from, invokableGossipWith(actions.cluster, to));
+    }
+
+    public static BiFunction<Integer, Integer, Action> factory(ClusterActions actions)
+    {
+        return (from, to) -> new OnInstanceGossipWith(actions, from, to);
+    }
+
+    static IIsolatedExecutor.SerializableRunnable invokableGossipWith(Cluster cluster, int to)
+    {
+        InetSocketAddress address = cluster.get(to).broadcastAddress();
+        return () -> Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.unsafeGossipWith(getByAddress(address)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceGossipWithAll.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceGossipWithAll.java
new file mode 100644
index 0000000..4db8e2b
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceGossipWithAll.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+
+class OnInstanceGossipWithAll extends ReliableAction
+{
+    OnInstanceGossipWithAll(ClusterActions actions, int from)
+    {
+        super("Send Shutdown from " + from + " to all", NONE, RELIABLE_NO_TIMEOUTS,
+              () -> actions.gossipWithAll(from));
+    }
+    OnInstanceGossipWithAll(ClusterActions actions, IInvokableInstance from)
+    {
+        this(actions, from.config().num());
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceMarkShutdown.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceMarkShutdown.java
new file mode 100644
index 0000000..6ef615f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceMarkShutdown.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.markShutdownRunner;
+
+class OnInstanceMarkShutdown extends ClusterReliableAction
+{
+    OnInstanceMarkShutdown(ClusterActions actions, int on)
+    {
+        super("Mark Self Shutdown on " + on, actions, on, markShutdownRunner(actions.cluster.get(on).broadcastAddress()));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceRepair.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceRepair.java
new file mode 100644
index 0000000..9da7317
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceRepair.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.TokenMetadata;
+import org.apache.cassandra.repair.RepairParallelism;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.progress.ProgressEventType;
+
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.cluster.Utils.currentToken;
+import static org.apache.cassandra.simulator.cluster.Utils.parseTokenRanges;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+
+class OnInstanceRepair extends ClusterAction
+{
+    public OnInstanceRepair(KeyspaceActions actions, int on, boolean repairPaxos, boolean repairOnlyPaxos, boolean force)
+    {
+        super("Repair on " + on, RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS, actions, on, invokableBlockingRepair(actions.keyspace, repairPaxos, repairOnlyPaxos, false, force));
+    }
+
+    public OnInstanceRepair(KeyspaceActions actions, int on, boolean repairPaxos, boolean repairOnlyPaxos, Map.Entry<String, String> repairRange, boolean force)
+    {
+        this(actions, on, RELIABLE_NO_TIMEOUTS, RELIABLE_NO_TIMEOUTS, repairPaxos, repairOnlyPaxos, repairRange, force);
+    }
+
+    public OnInstanceRepair(KeyspaceActions actions, int on, Modifiers self, Modifiers transitive, String id, boolean repairPaxos, boolean repairOnlyPaxos, boolean primaryRangeOnly, boolean force)
+    {
+        super(id, self, transitive, actions, on, invokableBlockingRepair(actions.keyspace, repairPaxos, repairOnlyPaxos, primaryRangeOnly, force));
+    }
+
+    public OnInstanceRepair(KeyspaceActions actions, int on, Modifiers self, Modifiers transitive, boolean repairPaxos, boolean repairOnlyPaxos, Map.Entry<String, String> repairRange, boolean force)
+    {
+        super("Repair on " + on, self, transitive, actions, on, invokableBlockingRepair(actions.keyspace, repairPaxos, repairOnlyPaxos, repairRange, force));
+    }
+
+    private static IIsolatedExecutor.SerializableRunnable invokableBlockingRepair(String keyspaceName, boolean repairPaxos, boolean repairOnlyPaxos, boolean primaryRangeOnly, boolean force)
+    {
+        return () -> {
+            Condition done = newOneTimeCondition();
+            invokeRepair(keyspaceName, repairPaxos, repairOnlyPaxos, primaryRangeOnly, force, done::signal);
+            done.awaitThrowUncheckedOnInterrupt();
+        };
+    }
+
+    private static IIsolatedExecutor.SerializableRunnable invokableBlockingRepair(String keyspaceName, boolean repairPaxos, boolean repairOnlyPaxos, Map.Entry<String, String> repairRange, boolean force)
+    {
+        return () -> {
+            Condition done = newOneTimeCondition();
+            invokeRepair(keyspaceName, repairPaxos, repairOnlyPaxos, () -> parseTokenRanges(singletonList(repairRange)), false, force, done::signal);
+            done.awaitThrowUncheckedOnInterrupt();
+        };
+    }
+
+    private static void invokeRepair(String keyspaceName, boolean repairPaxos, boolean repairOnlyPaxos, boolean primaryRangeOnly, boolean force, Runnable listener)
+    {
+        Keyspace keyspace = Keyspace.open(keyspaceName);
+        TokenMetadata metadata = StorageService.instance.getTokenMetadata().cloneOnlyTokenMap();
+        invokeRepair(keyspaceName, repairPaxos, repairOnlyPaxos,
+                     () -> primaryRangeOnly ? Collections.singletonList(metadata.getPrimaryRangeFor(currentToken()))
+                                            : keyspace.getReplicationStrategy().getAddressReplicas(metadata).get(getBroadcastAddressAndPort()).asList(Replica::range),
+                     primaryRangeOnly, force, listener);
+    }
+
+    private static void invokeRepair(String keyspaceName, boolean repairPaxos, boolean repairOnlyPaxos, IIsolatedExecutor.SerializableCallable<Collection<Range<Token>>> rangesSupplier, boolean isPrimaryRangeOnly, boolean force, Runnable listener)
+    {
+        Collection<Range<Token>> ranges = rangesSupplier.call();
+        // no need to wait for completion, as we track all task submissions and message exchanges, and ensure they finish before continuing to next action
+        StorageService.instance.repair(keyspaceName, new RepairOption(RepairParallelism.SEQUENTIAL, isPrimaryRangeOnly, false, false, 1, ranges, false, false, force, PreviewKind.NONE, false, true, repairPaxos, repairOnlyPaxos), singletonList((tag, event) -> {
+            if (event.getType() == ProgressEventType.COMPLETE)
+                listener.run();
+        }));
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendLocalGossipState.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendLocalGossipState.java
new file mode 100644
index 0000000..f466b35
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendLocalGossipState.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.net.InetSocketAddress;
+import java.util.function.BiFunction;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.simulator.Action;
+
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+
+class OnInstanceSendLocalGossipState extends ClusterAction
+{
+    OnInstanceSendLocalGossipState(ClusterActions actions, int from, int to)
+    {
+        super("Send Local Gossip State from " + from + " to " + to, STRICT, RELIABLE_NO_TIMEOUTS, actions, from,
+              invokableSendLocalGossipState(actions.cluster, to));
+    }
+
+    public static BiFunction<Integer, Integer, Action> factory(ClusterActions actions)
+    {
+        return (from, to) -> new OnInstanceSendLocalGossipState(actions, from, to);
+    }
+
+    static IIsolatedExecutor.SerializableRunnable invokableSendLocalGossipState(Cluster cluster, int to)
+    {
+        InetSocketAddress address = cluster.get(to).broadcastAddress();
+        return () -> Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.unsafeSendLocalEndpointStateTo(getByAddress(address)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendShutdown.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendShutdown.java
new file mode 100644
index 0000000..b932349
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendShutdown.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.net.InetSocketAddress;
+import java.util.function.BiFunction;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.simulator.Action;
+
+import static org.apache.cassandra.locator.InetAddressAndPort.getByAddress;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Action.Modifiers.STRICT;
+
+class OnInstanceSendShutdown extends ClusterAction
+{
+    OnInstanceSendShutdown(ClusterActions actions, int from, int to)
+    {
+        super("Send Shutdown from " + from + " to " + to, STRICT, RELIABLE_NO_TIMEOUTS, actions, from, invokableSendShutdown(actions.cluster, to));
+    }
+
+    public static BiFunction<Integer, Integer, Action> factory(ClusterActions actions)
+    {
+        return (from, to) -> new OnInstanceSendShutdown(actions, from, to);
+    }
+
+    static IIsolatedExecutor.SerializableRunnable invokableSendShutdown(Cluster cluster, int to)
+    {
+        InetSocketAddress address = cluster.get(to).broadcastAddress();
+        return () -> Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.unsafeSendShutdown(getByAddress(address)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendShutdownToAll.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendShutdownToAll.java
new file mode 100644
index 0000000..fe62986
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSendShutdownToAll.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.simulator.Actions.ReliableAction;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+
+class OnInstanceSendShutdownToAll extends ReliableAction
+{
+    OnInstanceSendShutdownToAll(ClusterActions actions, int from)
+    {
+        super("Send Shutdown from " + from + " to all", NONE, RELIABLE_NO_TIMEOUTS,
+              () -> actions.sendShutdownToAll(from));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetBootstrapReplacing.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetBootstrapReplacing.java
new file mode 100644
index 0000000..fea4e05
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetBootstrapReplacing.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.UUID;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.addToRingBootstrapReplacingRunner;
+
+class OnInstanceSetBootstrapReplacing extends ClusterReliableAction
+{
+    OnInstanceSetBootstrapReplacing(ClusterActions actions, int on, int replacing, UUID hostId, String token)
+    {
+        super("Set " + on + " to Bootstrap Replacing", actions, on, addToRingBootstrapReplacingRunner(actions.cluster.get(on), actions.cluster.get(replacing), hostId, token));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetBootstrapping.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetBootstrapping.java
new file mode 100644
index 0000000..c8a6f5d
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetBootstrapping.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.addToRingBootstrappingRunner;
+
+class OnInstanceSetBootstrapping extends ClusterReliableAction
+{
+    OnInstanceSetBootstrapping(ClusterActions actions, int on)
+    {
+        super("Set " + on + " to Bootstrapping", actions, on, addToRingBootstrappingRunner(actions.cluster.get(on)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetLeaving.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetLeaving.java
new file mode 100644
index 0000000..c35d0b4
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetLeaving.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.addToRingLeavingRunner;
+
+class OnInstanceSetLeaving extends ClusterReliableAction
+{
+    OnInstanceSetLeaving(ClusterActions actions, int on)
+    {
+        super("Set " + on + " to Leaving", actions, on, addToRingLeavingRunner(actions.cluster.get(on)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetLeft.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetLeft.java
new file mode 100644
index 0000000..83de795
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetLeft.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.addToRingLeftRunner;
+
+class OnInstanceSetLeft extends ClusterReliableAction
+{
+    OnInstanceSetLeft(ClusterActions actions, int on)
+    {
+        super("Set " + on + " to Left", actions, on, addToRingLeftRunner(actions.cluster.get(on)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetNormal.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetNormal.java
new file mode 100644
index 0000000..cc3702c
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetNormal.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.UUID;
+
+import static org.apache.cassandra.distributed.impl.UnsafeGossipHelper.addToRingNormalRunner;
+
+class OnInstanceSetNormal extends ClusterReliableAction
+{
+    OnInstanceSetNormal(ClusterActions actions, int on, UUID hostId, String tokenString)
+    {
+        super("Set " + on + " to Normal", actions, on, addToRingNormalRunner(actions.cluster.get(on), hostId, tokenString));
+    }
+
+    OnInstanceSetNormal(ClusterActions actions, int on)
+    {
+        super("Set " + on + " to Normal", actions, on, addToRingNormalRunner(actions.cluster.get(on)));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetPaxosVariant.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetPaxosVariant.java
new file mode 100644
index 0000000..0cd3eb6
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSetPaxosVariant.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.config.Config.PaxosVariant;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.service.paxos.Paxos;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+
+class OnInstanceSetPaxosVariant extends Action
+{
+    final IInvokableInstance instance;
+    final int on;
+    final PaxosVariant newVariant;
+
+    OnInstanceSetPaxosVariant(ClusterActions actions, int on, PaxosVariant newVariant)
+    {
+        super(lazy(() -> "Set Paxos Variant to " + newVariant + " on node" + on), RELIABLE, NONE);
+        this.instance = actions.cluster.get(on);
+        this.on = on;
+        this.newVariant = newVariant;
+    }
+
+    protected ActionList performSimple()
+    {
+        instance.unsafeRunOnThisThread(invokableSetVariant(newVariant));
+        return ActionList.empty();
+    }
+
+    static IIsolatedExecutor.SerializableRunnable invokableSetVariant(PaxosVariant to)
+    {
+        return () -> Paxos.setPaxosVariant(to);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSyncPendingRanges.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSyncPendingRanges.java
new file mode 100644
index 0000000..4611d36
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSyncPendingRanges.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.function.Function;
+
+import org.apache.cassandra.service.PendingRangeCalculatorService;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.systems.SimulatedActionTask;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+
+class OnInstanceSyncPendingRanges extends SimulatedActionTask
+{
+    OnInstanceSyncPendingRanges(ClusterActions actions, int node)
+    {
+        //noinspection Convert2MethodRef - invalid inspection across multiple classloaders
+        super("Sync Pending Ranges on " + node, RELIABLE, RELIABLE, actions, actions.cluster.get(node),
+              () -> PendingRangeCalculatorService.instance.blockUntilFinished());
+    }
+
+    public static Function<Integer, Action> factory(ClusterActions clusterActions)
+    {
+        return (node) -> new OnInstanceSyncPendingRanges(clusterActions, node);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSyncSchemaForBootstrap.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSyncSchemaForBootstrap.java
new file mode 100644
index 0000000..c8bc9f7
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceSyncSchemaForBootstrap.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.time.Duration;
+
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.simulator.systems.SimulatedActionTask;
+
+import static org.apache.cassandra.simulator.Action.Modifier.DISPLAY_ORIGIN;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.junit.Assert.assertTrue;
+
+class OnInstanceSyncSchemaForBootstrap extends SimulatedActionTask
+{
+    public OnInstanceSyncSchemaForBootstrap(ClusterActions actions, int node)
+    {
+        super("Sync Schema on " + node, RELIABLE_NO_TIMEOUTS.with(DISPLAY_ORIGIN), RELIABLE_NO_TIMEOUTS, actions, actions.cluster.get(node),
+              () -> assertTrue("schema is ready", Schema.instance.waitUntilReady(Duration.ofMinutes(10))));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceTopologyChangePaxosRepair.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceTopologyChangePaxosRepair.java
new file mode 100644
index 0000000..daaa474
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/OnInstanceTopologyChangePaxosRepair.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static org.apache.cassandra.net.Verb.MUTATION_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_COMPLETE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_FINISH_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_RSP;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_RSP2;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_START_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.READ_REQ;
+import static org.apache.cassandra.net.Verb.SCHEMA_PULL_REQ;
+import static org.apache.cassandra.net.Verb.SCHEMA_PUSH_REQ;
+import static org.apache.cassandra.simulator.Action.Modifiers.NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+
+class OnInstanceTopologyChangePaxosRepair extends ClusterAction
+{
+    public OnInstanceTopologyChangePaxosRepair(ClusterActions actions, int on, String reason)
+    {
+        this("Paxos Topology Repair on " + on, RELIABLE, NO_TIMEOUTS, actions, on, invokableTopologyChangeRepair(reason));
+    }
+
+    public OnInstanceTopologyChangePaxosRepair(String id, Modifiers self, Modifiers transitive, ClusterActions actions, int on, SerializableRunnable runnable)
+    {
+        super(id, RELIABLE.with(self), NO_TIMEOUTS.with(transitive), actions, on, runnable);
+        setMessageModifiers(SCHEMA_PULL_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(SCHEMA_PUSH_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(PAXOS2_CLEANUP_START_PREPARE_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(PAXOS2_CLEANUP_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(PAXOS2_CLEANUP_FINISH_PREPARE_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(PAXOS2_CLEANUP_COMPLETE_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(PAXOS2_CLEANUP_RSP, RELIABLE, RELIABLE);
+        setMessageModifiers(PAXOS2_CLEANUP_RSP2, RELIABLE, RELIABLE);
+        setMessageModifiers(MUTATION_REQ, RELIABLE, RELIABLE);
+        setMessageModifiers(READ_REQ, RELIABLE, RELIABLE);
+    }
+
+    protected static SerializableRunnable invokableTopologyChangeRepair(String reason)
+    {
+        return () -> {
+            Condition condition = newOneTimeCondition();
+            Future<?> future = StorageService.instance.startRepairPaxosForTopologyChange(reason);
+            future.addListener(condition::signal); // add listener so we don't use Futures.addAllAsList
+            condition.awaitThrowUncheckedOnInterrupt();
+        };
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/Topology.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/Topology.java
new file mode 100644
index 0000000..11ec2f8
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/Topology.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.util.Arrays;
+
+public class Topology
+{
+    public final int[] primaryKeys;
+    public final int[] membersOfRing;
+    public final long[] membersOfRingTokens;
+    public final int[] membersOfQuorum;
+    public final int[] rf;
+    public final int quorumRf;
+    public final int[][] replicasForKeys;
+    public final int[][] pendingReplicasForKeys;
+
+    public Topology(int[] primaryKeys, int[] membersOfRing, long[] membersOfRingTokens, int[] membersOfQuorum, int[] rf, int quorumRf, int[][] replicasForKeys, int[][] pendingReplicasForKeys)
+    {
+        for (int i = 0 ; i < primaryKeys.length ; ++i)
+        {
+            if (replicasForKeys[i].length != quorumRf)
+                throw new AssertionError(String.format("Inconsistent ownership information: %s (expect %d)", Arrays.toString(replicasForKeys[i]), quorumRf));
+        }
+        this.primaryKeys = primaryKeys;
+        this.membersOfRing = membersOfRing;
+        this.membersOfRingTokens = membersOfRingTokens;
+        this.membersOfQuorum = membersOfQuorum;
+        this.rf = rf;
+        this.quorumRf = quorumRf;
+        this.replicasForKeys = replicasForKeys;
+        this.pendingReplicasForKeys = pendingReplicasForKeys;
+    }
+
+    public int[] pendingKeys()
+    {
+        int count = 0;
+        for (int i = 0 ; i < pendingReplicasForKeys.length ; ++i)
+        {
+            if (pendingReplicasForKeys[i].length > 0)
+                ++count;
+        }
+        int[] pendingKeys = new int[count];
+        count = 0;
+        for (int i = 0 ; i < pendingReplicasForKeys.length ; ++i)
+        {
+            if (pendingReplicasForKeys[i].length == 0)
+                continue;
+            pendingKeys[count] = primaryKeys[i];
+            count++;
+        }
+        return pendingKeys;
+    }
+
+    public Topology select(int[] selectPrimaryKeys)
+    {
+        int[][] newReplicasForKeys = new int[selectPrimaryKeys.length][];
+        int[][] newPendingReplicasForKeys = new int[selectPrimaryKeys.length][];
+        int in = 0, out = 0;
+        while (out < newReplicasForKeys.length)
+        {
+            if (primaryKeys[in] < selectPrimaryKeys[out])
+            {
+                ++in;
+                continue;
+            }
+            if (primaryKeys[in] > selectPrimaryKeys[out])
+                throw new AssertionError();
+
+            newReplicasForKeys[out] = replicasForKeys[in];
+            newPendingReplicasForKeys[out] = pendingReplicasForKeys[in];
+            ++in;
+            ++out;
+        }
+        return new Topology(selectPrimaryKeys, membersOfRing, membersOfRingTokens, membersOfQuorum, rf, quorumRf,
+                            newReplicasForKeys, newPendingReplicasForKeys);
+    }
+}
\ No newline at end of file
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/TopologyListener.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/TopologyListener.java
new file mode 100644
index 0000000..f08c587
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/TopologyListener.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+public interface TopologyListener
+{
+    void onChange(Topology newTopology);
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/cluster/Utils.java b/test/simulator/main/org/apache/cassandra/simulator/cluster/Utils.java
new file mode 100644
index 0000000..277ae11
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/cluster/Utils.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.TokenSerializer;
+import org.apache.cassandra.gms.VersionedValue;
+
+import static org.apache.cassandra.config.DatabaseDescriptor.getPartitioner;
+import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
+
+public class Utils
+{
+    static Token currentToken()
+    {
+        EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(getBroadcastAddressAndPort());
+        VersionedValue value = epState.getApplicationState(ApplicationState.TOKENS);
+        try (DataInputStream in = new DataInputStream(new ByteArrayInputStream(value.toBytes())))
+        {
+            return TokenSerializer.deserialize(getPartitioner(), in).iterator().next();
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    static List<Token> parseTokens(Collection<String> tokens)
+    {
+        return tokens.stream()
+                .map(Utils::parseToken)
+                .collect(Collectors.toList());
+    }
+
+    static List<Range<Token>> parseTokenRanges(Collection<Map.Entry<String, String>> tokenRanges)
+    {
+        return tokenRanges.stream()
+                .map(Utils::parseTokenRange)
+                .collect(Collectors.toList());
+    }
+
+    static Token parseToken(String token)
+    {
+        return getPartitioner().getTokenFactory().fromString(token);
+    }
+
+    static Range<Token> parseTokenRange(Map.Entry<String, String> tokenRange)
+    {
+        return parseTokenRange(tokenRange.getKey(), tokenRange.getValue());
+    }
+
+    static Range<Token> parseTokenRange(String exclusiveLowerBound, String inclusiveUpperBound)
+    {
+        return new Range<>(parseToken(exclusiveLowerBound), parseToken(inclusiveUpperBound));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/debug/Reconcile.java b/test/simulator/main/org/apache/cassandra/simulator/debug/Reconcile.java
new file mode 100644
index 0000000..5acf764
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/debug/Reconcile.java
@@ -0,0 +1,563 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.debug;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+import java.util.zip.GZIPInputStream;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.simulator.ClusterSimulation;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.SimulationRunner.RecordOption;
+import org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.concurrent.Threads;
+
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.NONE;
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.VALUE;
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.WITH_CALLSITES;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+
+public class Reconcile
+{
+    private static final Logger logger = LoggerFactory.getLogger(Reconcile.class);
+
+    private static final Pattern STRIP_TRACES = Pattern.compile("(Wakeup|Continue|Timeout|Waiting)\\[(((([a-zA-Z]\\.)*[a-zA-Z0-9_$]+\\.[a-zA-Z0-9_<>$]+:[\\-0-9]+; )*(([a-zA-Z]\\.)*[a-zA-Z0-9_$]+\\.[a-zA-Z0-9_<>$]+:[\\-0-9]+))( #\\[.*?]#)?) ?(by\\[.*?])?]");
+    private static final Pattern STRIP_NOW_TRACES = Pattern.compile("( #\\[.*?]#)");
+    private static final Pattern NORMALISE_THREAD_RECORDING_IN = Pattern.compile("(Thread\\[[^]]+:[0-9]+),?[0-9]+(,node[0-9]+)]");
+    static final Pattern NORMALISE_LAMBDA = Pattern.compile("((\\$\\$Lambda\\$[0-9]+/[0-9]+)?(@[0-9a-f]+)?)");
+    static final Pattern NORMALISE_THREAD = Pattern.compile("(Thread\\[[^]]+:[0-9]+),[0-9](,node[0-9]+)(_[0-9]+)?]");
+
+    public static class AbstractReconciler
+    {
+        private static final Logger logger = LoggerFactory.getLogger(AbstractReconciler.class);
+
+        final DataInputPlus in;
+        final List<String> strings = new ArrayList<>();
+        final boolean inputHasCallSites;
+        final boolean reconcileCallSites;
+        int line;
+
+        public AbstractReconciler(DataInputPlus in, boolean inputHasCallSites, RecordOption reconcile)
+        {
+            this.in = in;
+            this.inputHasCallSites = inputHasCallSites;
+            this.reconcileCallSites = reconcile == WITH_CALLSITES;
+        }
+
+        String readInterned() throws IOException
+        {
+            int id = (int) in.readVInt();
+            if (id == strings.size()) strings.add(in.readUTF());
+            else if (id > strings.size()) throw failWithOOM();
+            return strings.get(id);
+        }
+
+        private String readCallSite() throws IOException
+        {
+            if (!inputHasCallSites)
+                return "";
+
+            String trace = in.readUTF();
+            for (int i = trace.indexOf('\n') ; i >= 0 ; i = trace.indexOf('\n', i + 1))
+                ++line;
+            return reconcileCallSites ? trace : "";
+        }
+
+        private String ourCallSite()
+        {
+            if (!reconcileCallSites)
+                return "";
+
+            StackTraceElement[] ste = Thread.currentThread().getStackTrace();
+            return Arrays.stream(ste, 4, ste.length)
+                         .filter(st -> !st.getClassName().equals("org.apache.cassandra.simulator.debug.Reconcile")
+                                       && !st.getClassName().equals("org.apache.cassandra.simulator.SimulationRunner$Reconcile")
+                                       && !st.getClassName().equals("sun.reflect.NativeMethodAccessorImpl") // depends on async compile thread
+                                       && !st.getClassName().startsWith("sun.reflect.GeneratedMethodAccessor")) // depends on async compile thread
+                         .collect(new Threads.StackTraceCombiner(true, "", "\n", ""));
+        }
+
+        public void checkThread() throws IOException
+        {
+            // normalise lambda also strips Object.toString() inconsistencies for some Thread objects
+            String thread = NORMALISE_LAMBDA.matcher(readInterned()).replaceAll("");
+            String ourThread = NORMALISE_LAMBDA.matcher(Thread.currentThread().toString()).replaceAll("");
+            String callSite = NORMALISE_LAMBDA.matcher(readCallSite()).replaceAll("");
+            String ourCallSite = NORMALISE_LAMBDA.matcher(ourCallSite()).replaceAll("");
+            if (!thread.equals(ourThread) || !callSite.equals(ourCallSite))
+            {
+                logger.error(String.format("(%s,%s) != (%s,%s)", thread, callSite, ourThread, ourCallSite));
+                throw failWithOOM();
+            }
+        }
+    }
+
+    public static class TimeReconciler extends AbstractReconciler implements SimulatedTime.Listener, Closeable
+    {
+        boolean disabled;
+
+        public TimeReconciler(DataInputPlus in, boolean inputHasCallSites, RecordOption reconcile)
+        {
+            super(in, inputHasCallSites, reconcile);
+        }
+
+        @Override
+        public void close()
+        {
+            disabled = true;
+        }
+
+        @Override
+        public synchronized void accept(String kind, long value)
+        {
+            if (disabled)
+                return;
+
+            try
+            {
+                String testKind = readInterned();
+                long testValue = in.readUnsignedVInt();
+                checkThread();
+                if (!kind.equals(testKind) || value != testValue)
+                {
+                    logger.error("({},{}) != ({},{})", kind, value, testKind, testValue);
+                    throw failWithOOM();
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    public static class RandomSourceReconciler extends RandomSource.Abstract implements Supplier<RandomSource>, Closeable
+    {
+        private static final Logger logger = LoggerFactory.getLogger(RandomSourceReconciler.class);
+        private static final AtomicReferenceFieldUpdater<Reconcile.RandomSourceReconciler, Thread> lockedUpdater = AtomicReferenceFieldUpdater.newUpdater(Reconcile.RandomSourceReconciler.class, Thread.class, "locked");
+        final DataInputPlus in;
+        final RandomSource wrapped;
+        final AbstractReconciler threads;
+        int count;
+        volatile Thread locked;
+        volatile boolean disabled;
+
+        public RandomSourceReconciler(DataInputPlus in, RandomSource wrapped, boolean inputHasCallSites, RecordOption reconcile)
+        {
+            this.in = in;
+            this.wrapped = wrapped;
+            this.threads = new AbstractReconciler(in, inputHasCallSites, reconcile);
+        }
+
+        private void enter()
+        {
+            if (!lockedUpdater.compareAndSet(this, null, Thread.currentThread()))
+            {
+                if (disabled)
+                    return;
+
+                disabled = true;
+                logger.error("Race within RandomSourceReconciler - means we have a Simulator bug permitting two threads to run at once");
+                throw failWithOOM();
+            }
+        }
+
+        private void exit()
+        {
+            locked = null;
+        }
+
+        public void onDeterminismCheck(long value)
+        {
+            if (disabled)
+                return;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                long v = in.readLong();
+                threads.checkThread();
+                if (type != 7 || c != count || value != v)
+                {
+                    logger.error(String.format("(%d,%d,%d) != (%d,%d,%d)", 7, count, value, type, c, v));
+                    throw failWithOOM();
+                }
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+        }
+
+        public int uniform(int min, int max)
+        {
+            int v = wrapped.uniform(min, max);
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                threads.checkThread();
+                int min1 = (int) in.readVInt();
+                int max1 = (int) in.readVInt() + min1;
+                int v1 = (int) in.readVInt() + min1;
+                if (type != 1 || min != min1 || max != max1 || v != v1 || c != count)
+                {
+                    logger.error(String.format("(%d,%d,%d[%d,%d]) != (%d,%d,%d[%d,%d])", 1, count, v, min, max, type, c, v1, min1, max1));
+                    throw failWithOOM();
+                }
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public long uniform(long min, long max)
+        {
+            long v = wrapped.uniform(min, max);
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                threads.checkThread();
+                long min1 = in.readVInt();
+                long max1 = in.readVInt() + min1;
+                long v1 = in.readVInt() + min1;
+                if (type != 2 || min != min1 || max != max1 || v != v1 || c != count)
+                {
+                    logger.error(String.format("(%d,%d,%d[%d,%d]) != (%d,%d,%d[%d,%d])", 2, count, v, min, max, type, c, v1, min1, max1));
+                    throw failWithOOM();
+                }
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public float uniformFloat()
+        {
+            float v = wrapped.uniformFloat();
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                threads.checkThread();
+                float v1 = in.readFloat();
+                if (type != 3 || v != v1 || c != count)
+                {
+                    logger.error(String.format("(%d,%d,%f) != (%d,%d,%f)", 3, count, v, type, c, v1));
+                    throw failWithOOM();
+                }
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        @Override
+        public double uniformDouble()
+        {
+            double v = wrapped.uniformDouble();
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                threads.checkThread();
+                double v1 = in.readDouble();
+                if (type != 6 || v != v1 || c != count)
+                {
+                    logger.error(String.format("(%d,%d,%f) != (%d,%d,%f)", 6, count, v, type, c, v1));
+                    throw failWithOOM();
+                }
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public synchronized void reset(long seed)
+        {
+            wrapped.reset(seed);
+            if (disabled)
+                return;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                long v1 = in.readVInt();
+                if (type != 4 || seed != v1 || c != count)
+                    throw failWithOOM();
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+        }
+
+        public synchronized long reset()
+        {
+            long v = wrapped.reset();
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                byte type = in.readByte();
+                int c = (int) in.readVInt();
+                long v1 = in.readVInt();
+                if (type != 5 || v != v1 || c != count)
+                    throw failWithOOM();
+                ++count;
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public synchronized RandomSource get()
+        {
+            if (count++ > 0)
+                throw failWithOOM();
+            return this;
+        }
+
+        @Override
+        public void close()
+        {
+            disabled = true;
+        }
+    }
+
+    public static void reconcileWith(String loadFromDir, long seed, RecordOption withRng, RecordOption withTime, ClusterSimulation.Builder<?> builder)
+    {
+        File eventFile = new File(new File(loadFromDir), Long.toHexString(seed) + ".gz");
+        File rngFile = new File(new File(loadFromDir), Long.toHexString(seed) + ".rng.gz");
+        File timeFile = new File(new File(loadFromDir), Long.toHexString(seed) + ".time.gz");
+
+        try (BufferedReader eventIn = new BufferedReader(new InputStreamReader(new GZIPInputStream(eventFile.newInputStream())));
+             DataInputPlus.DataInputStreamPlus rngIn = new DataInputPlus.DataInputStreamPlus(rngFile.exists() && withRng != NONE ? new GZIPInputStream(rngFile.newInputStream()) : new ByteArrayInputStream(new byte[0]));
+             DataInputPlus.DataInputStreamPlus timeIn = new DataInputPlus.DataInputStreamPlus(timeFile.exists() && withTime != NONE ? new GZIPInputStream(timeFile.newInputStream()) : new ByteArrayInputStream(new byte[0])))
+        {
+            boolean inputHasWaitSites, inputHasWakeSites, inputHasRngCallSites, inputHasTimeCallSites;
+            {
+                String modifiers = eventIn.readLine();
+                if (!modifiers.startsWith("modifiers:"))
+                    throw new IllegalStateException();
+
+                builder.capture(new Capture(
+                    builder.capture().waitSites & (inputHasWaitSites = modifiers.contains("waitSites")),
+                    builder.capture().wakeSites & (inputHasWakeSites = modifiers.contains("wakeSites")),
+                    builder.capture().nowSites)
+                );
+                inputHasRngCallSites = modifiers.contains("rngCallSites");
+                if (!modifiers.contains("rng")) withRng = NONE;
+                if (withRng == WITH_CALLSITES && !inputHasRngCallSites) withRng = VALUE;
+
+                inputHasTimeCallSites = modifiers.contains("timeCallSites");
+                if (!modifiers.contains("time")) withTime = NONE;
+                if (withTime == WITH_CALLSITES && !inputHasTimeCallSites) withTime = VALUE;
+            }
+            if (withRng != NONE && !rngFile.exists())
+                throw new IllegalStateException();
+            if (withTime != NONE && !timeFile.exists())
+                throw new IllegalStateException();
+
+            {
+                Set<String> modifiers = new LinkedHashSet<>();
+                if (withRng == WITH_CALLSITES)
+                    modifiers.add("rngCallSites");
+                else if (withRng == VALUE)
+                    modifiers.add("rng");
+                if (withTime == WITH_CALLSITES)
+                    modifiers.add("timeCallSites");
+                else if (withTime == VALUE)
+                    modifiers.add("time");
+                if (builder.capture().waitSites)
+                    modifiers.add("WaitSites");
+                if (builder.capture().wakeSites)
+                    modifiers.add("WakeSites");
+                logger.error("Seed 0x{} ({}) (With: {})", Long.toHexString(seed), eventFile, modifiers);
+            }
+
+            RandomSourceReconciler random = null;
+            TimeReconciler time = null;
+            if (withRng != NONE)
+            {
+                builder.random(random = new RandomSourceReconciler(rngIn, new RandomSource.Default(), inputHasRngCallSites, withRng));
+                builder.onThreadLocalRandomCheck(random::onDeterminismCheck);
+            }
+            if (withTime != NONE)
+                builder.timeListener(time = new TimeReconciler(timeIn, inputHasTimeCallSites, withTime));
+
+            class Line { int line = 1; } Line line = new Line(); // box for heap dump analysis
+            try (ClusterSimulation<?> cluster = builder.create(seed);
+                 CloseableIterator<?> iter = cluster.simulation.iterator())
+            {
+                try
+                {
+                    while (iter.hasNext())
+                    {
+                        ++line.line;
+                        String rawInput = eventIn.readLine();
+                        String input = (inputHasWaitSites != builder.capture().waitSites || inputHasWakeSites != builder.capture().wakeSites)
+                                       ? normaliseRecordingInWithoutWaitOrWakeSites(rawInput, inputHasWaitSites && !builder.capture().waitSites, inputHasWakeSites && !builder.capture().wakeSites)
+                                       : normaliseRecordingIn(rawInput);
+                        Object next = iter.next();
+                        String rawOutput = next.toString();
+                        String output = normaliseReconcileWithRecording(rawOutput);
+                        if (!input.equals(output))
+                            failWithHeapDump(line.line, input, output);
+                    }
+                    if (random != null)
+                        random.close();
+                    if (time != null)
+                        time.close();
+                }
+                catch (Throwable t)
+                {
+                    t.printStackTrace();
+                    throw t;
+                }
+            }
+        }
+        catch (Throwable t)
+        {
+            if (t instanceof Error)
+                throw (Error) t;
+            throw new RuntimeException("Failed on seed " + Long.toHexString(seed), t);
+        }
+    }
+
+    private static String normaliseRecordingIn(String input)
+    {
+        return STRIP_NOW_TRACES.matcher(
+            NORMALISE_THREAD_RECORDING_IN.matcher(
+                NORMALISE_LAMBDA.matcher(input).replaceAll("")
+            ).replaceAll("$1$2]")
+        ).replaceAll("");
+    }
+
+    private static String normaliseRecordingInWithoutWaitOrWakeSites(String input, boolean stripWaitSites, boolean stripWakeSites)
+    {
+        return STRIP_TRACES.matcher(
+            NORMALISE_THREAD_RECORDING_IN.matcher(
+                NORMALISE_LAMBDA.matcher(input).replaceAll("")
+            ).replaceAll("$1$2]")
+        ).replaceAll(stripWaitSites && stripWakeSites ? "$1[]" : stripWaitSites ? "$1[$9]" : "$1[$3]");
+    }
+
+    private static String normaliseReconcileWithRecording(String input)
+    {
+        return STRIP_NOW_TRACES.matcher(
+            NORMALISE_THREAD.matcher(
+                NORMALISE_LAMBDA.matcher(input).replaceAll("")
+            ).replaceAll("$1$2]")
+        ).replaceAll("");
+    }
+
+    static void failWithHeapDump(int line, Object input, Object output)
+    {
+        logger.error("Line {}", line);
+        logger.error("Input {}", input);
+        logger.error("Output {}", output);
+        throw failWithOOM();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/debug/Record.java b/test/simulator/main/org/apache/cassandra/simulator/debug/Record.java
new file mode 100644
index 0000000..7ca7a20
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/debug/Record.java
@@ -0,0 +1,527 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.debug;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.channels.Channels;
+import java.util.Arrays;
+import java.util.IdentityHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+import java.util.zip.GZIPOutputStream;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
+import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.simulator.ClusterSimulation;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.SimulationRunner.RecordOption;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.concurrent.Threads;
+
+import static org.apache.cassandra.io.util.File.WriteMode.OVERWRITE;
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.NONE;
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.VALUE;
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.WITH_CALLSITES;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+
+public class Record
+{
+    private static final Logger logger = LoggerFactory.getLogger(Record.class);
+    private static final Pattern NORMALISE_THREAD_RECORDING_OUT = Pattern.compile("(Thread\\[[^]]+:[0-9]+),[0-9](,node[0-9]+)_[0-9]+]");
+    private static final Pattern NORMALISE_LAMBDA = Pattern.compile("((\\$\\$Lambda\\$[0-9]+/[0-9]+)?(@[0-9a-f]+)?)");
+
+    public static void record(String saveToDir, long seed, RecordOption withRng, RecordOption withTime, ClusterSimulation.Builder<?> builder)
+    {
+        File eventFile = new File(new File(saveToDir), Long.toHexString(seed) + ".gz");
+        File rngFile = new File(new File(saveToDir), Long.toHexString(seed) + ".rng.gz");
+        File timeFile = new File(new File(saveToDir), Long.toHexString(seed) + ".time.gz");
+
+        {
+            Set<String> modifiers = new LinkedHashSet<>();
+            if (withRng == WITH_CALLSITES)
+                modifiers.add("rngCallSites");
+            else if (withRng == VALUE)
+                modifiers.add("rng");
+            if (withTime == WITH_CALLSITES)
+                modifiers.add("timeCallSites");
+            else if (withTime == VALUE)
+                modifiers.add("time");
+            if (builder.capture().waitSites)
+                modifiers.add("WaitSites");
+            if (builder.capture().wakeSites)
+                modifiers.add("WakeSites");
+            logger.error("Seed 0x{} ({}) (With: {})", Long.toHexString(seed), eventFile, modifiers);
+        }
+
+        try (PrintWriter eventOut = new PrintWriter(new GZIPOutputStream(eventFile.newOutputStream(OVERWRITE), 1 << 16));
+             DataOutputStreamPlus rngOut = new BufferedDataOutputStreamPlus(Channels.newChannel(withRng != NONE ? new GZIPOutputStream(rngFile.newOutputStream(OVERWRITE), 1 << 16) : new ByteArrayOutputStream(0)));
+             DataOutputStreamPlus timeOut = new BufferedDataOutputStreamPlus(Channels.newChannel(withTime != NONE ? new GZIPOutputStream(timeFile.newOutputStream(OVERWRITE), 1 << 16) : new ByteArrayOutputStream(0))))
+        {
+            eventOut.println("modifiers:"
+                             + (withRng == VALUE ? "rng," : "") + (withRng == WITH_CALLSITES ? "rngCallSites," : "")
+                             + (withTime == VALUE ? "time," : "") + (withTime == WITH_CALLSITES ? "timeCallSites," : "")
+                             + (builder.capture().waitSites ? "waitSites," : "") + (builder.capture().wakeSites ? "wakeSites," : ""));
+
+            TimeRecorder time;
+            RandomSourceRecorder random;
+            if (withRng != NONE)
+            {
+                builder.random(random = new RandomSourceRecorder(rngOut, new RandomSource.Default(), withRng));
+                builder.onThreadLocalRandomCheck(random::onDeterminismCheck);
+            }
+            else random = null;
+
+            if (withTime != NONE) builder.timeListener(time = new TimeRecorder(timeOut, withTime));
+            else time = null;
+
+            // periodic forced flush to ensure state is on disk after some kind of stall
+            Thread flusher = new Thread(() -> {
+                try
+                {
+                    while (true)
+                    {
+                        Thread.sleep(1000);
+                        eventOut.flush();
+                        if (random != null)
+                        {
+                            synchronized (random)
+                            {
+                                rngOut.flush();
+                            }
+                        }
+                        if (time != null)
+                        {
+                            synchronized (time)
+                            {
+                                timeOut.flush();
+                            }
+                        }
+                    }
+                }
+                catch (IOException e)
+                {
+                    e.printStackTrace();
+                }
+                catch (InterruptedException ignore)
+                {
+                }
+                finally
+                {
+                    eventOut.flush();
+                    try
+                    {
+                        if (random != null)
+                        {
+                            synchronized (random)
+                            {
+                                rngOut.flush();
+                            }
+                        }
+                    }
+                    catch (IOException e)
+                    {
+                        e.printStackTrace();
+                    }
+                }
+            }, "Flush Recordings of " + seed);
+            flusher.setDaemon(true);
+            flusher.start();
+
+            try (ClusterSimulation<?> cluster = builder.create(seed))
+            {
+                try (CloseableIterator<?> iter = cluster.simulation.iterator();)
+                {
+                    while (iter.hasNext())
+                        eventOut.println(normaliseRecordingOut(iter.next().toString()));
+
+                    if (random != null)
+                        random.close();
+                }
+                finally
+                {
+                    eventOut.flush();
+                    rngOut.flush();
+                }
+            }
+            finally
+            {
+                flusher.interrupt();
+            }
+        }
+        catch (Throwable t)
+        {
+            t.printStackTrace();
+            throw new RuntimeException("Failed on seed " + Long.toHexString(seed), t);
+        }
+    }
+
+    private static String normaliseRecordingOut(String input)
+    {
+        return NORMALISE_THREAD_RECORDING_OUT.matcher(
+            NORMALISE_LAMBDA.matcher(input).replaceAll("")
+        ).replaceAll("$1$2]");
+    }
+
+    public static class TimeRecorder extends AbstractRecorder implements SimulatedTime.Listener, java.io.Closeable
+    {
+        boolean disabled;
+
+        public TimeRecorder(DataOutputStreamPlus out, RecordOption option)
+        {
+            super(out, option);
+        }
+
+        @Override
+        public void close() throws IOException
+        {
+            disabled = true;
+            out.close();
+        }
+
+        @Override
+        public synchronized void accept(String kind, long value)
+        {
+            if (disabled)
+                return;
+
+            try
+            {
+                writeInterned(kind);
+                out.writeUnsignedVInt(value);
+                writeThread();
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    // TODO: merge with TimeRecorder to produce one stream, use to support live reconciliation between two JVMs over socket
+    public static class RandomSourceRecorder extends RandomSource.Abstract implements Supplier<RandomSource>, Closeable
+    {
+        private static final AtomicReferenceFieldUpdater<RandomSourceRecorder, Thread> lockedUpdater = AtomicReferenceFieldUpdater.newUpdater(Record.RandomSourceRecorder.class, Thread.class, "locked");
+
+        final DataOutputStreamPlus out;
+        final AbstractRecorder threads;
+        final RandomSource wrapped;
+        int count = 0;
+        volatile Thread locked;
+        volatile boolean disabled;
+
+        public RandomSourceRecorder(DataOutputStreamPlus out, RandomSource wrapped, RecordOption option)
+        {
+            this.out = out;
+            this.wrapped = wrapped;
+            this.threads = new AbstractRecorder(out, option);
+        }
+
+        private void enter()
+        {
+            while (!lockedUpdater.compareAndSet(this, null, Thread.currentThread()))
+            {
+                if (disabled)
+                    return;
+
+                Thread alt = locked;
+                if (alt == null)
+                    continue;
+                StackTraceElement[] altTrace = alt.getStackTrace();
+                if (Stream.of(altTrace).noneMatch(ste -> ste.getClassName().equals(RandomSourceRecorder.class.getName())))
+                    continue;
+
+                disabled = true;
+                logger.error("Race within RandomSourceReconciler between {} and {} - means we have a Simulator bug permitting two threads to run at once\n{}", Thread.currentThread(), alt, Threads.prettyPrint(altTrace, true, "\n"));
+                throw failWithOOM();
+            }
+        }
+
+        private void exit()
+        {
+            locked = null;
+        }
+
+        // determinism check is exclusively a ThreadLocalRandom issue at the moment
+        public void onDeterminismCheck(long value)
+        {
+            if (disabled)
+                return;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(7);
+                    out.writeVInt(count++);
+                    out.writeLong(value);
+                    threads.writeThread();
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+        }
+
+        public int uniform(int min, int max)
+        {
+            int v = wrapped.uniform(min, max);
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(1);
+                    out.writeVInt(count++);
+                    threads.writeThread();
+                    out.writeVInt(min);
+                    out.writeVInt(max - min);
+                    out.writeVInt(v - min);
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public long uniform(long min, long max)
+        {
+            long v = wrapped.uniform(min, max);
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(2);
+                    out.writeVInt(count++);
+                    threads.writeThread();
+                    out.writeVInt(min);
+                    out.writeVInt(max - min);
+                    out.writeVInt(v - min);
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public float uniformFloat()
+        {
+            float v = wrapped.uniformFloat();
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(3);
+                    out.writeVInt(count++);
+                    threads.writeThread();
+                    out.writeFloat(v);
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public double uniformDouble()
+        {
+            double v = wrapped.uniformDouble();
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(6);
+                    out.writeVInt(count++);
+                    threads.writeThread();
+                    out.writeDouble(v);
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public void reset(long seed)
+        {
+            wrapped.reset(seed);
+            if (disabled)
+                return;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(4);
+                    out.writeVInt(count++);
+                    out.writeVInt(seed);
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+        }
+
+        public long reset()
+        {
+            long v = wrapped.reset();
+            if (disabled)
+                return v;
+
+            enter();
+            try
+            {
+                synchronized (this)
+                {
+                    out.writeByte(5);
+                    out.writeVInt(count++);
+                    out.writeFloat(v);
+                }
+            }
+            catch (IOException e)
+            {
+                throw new RuntimeException(e);
+            }
+            finally
+            {
+                exit();
+            }
+            return v;
+        }
+
+        public RandomSource get()
+        {
+            if (count++ > 0)
+                throw failWithOOM();
+            return this;
+        }
+
+        @Override
+        public void close()
+        {
+            disabled = true;
+        }
+    }
+
+    public static class AbstractRecorder
+    {
+        final DataOutputStreamPlus out;
+        final boolean withCallSites;
+        final Map<Object, Integer> objects = new IdentityHashMap<>();
+
+        public AbstractRecorder(DataOutputStreamPlus out, RecordOption option)
+        {
+            this.out = out;
+            this.withCallSites = option == WITH_CALLSITES;
+        }
+
+        public void writeThread() throws IOException
+        {
+            Thread thread = Thread.currentThread();
+            writeInterned(thread);
+            if (withCallSites)
+            {
+                StackTraceElement[] ste = thread.getStackTrace();
+                String trace = Arrays.stream(ste, 3, ste.length)
+                                     .filter(st ->    !st.getClassName().equals("org.apache.cassandra.simulator.debug.Record")
+                                                   && !st.getClassName().equals("org.apache.cassandra.simulator.SimulationRunner$Record")
+                                                   && !st.getClassName().equals("sun.reflect.NativeMethodAccessorImpl") // depends on async compile thread
+                                                   && !st.getClassName().startsWith("sun.reflect.GeneratedMethodAccessor")) // depends on async compile thread
+                                     .collect(new Threads.StackTraceCombiner(true, "", "\n", ""));
+                out.writeUTF(trace);
+            }
+        }
+
+        public void writeInterned(Object o) throws IOException
+        {
+            Integer id = objects.get(o);
+            if (id != null)
+            {
+                out.writeVInt(id);
+            }
+            else
+            {
+                out.writeVInt(objects.size());
+                out.writeUTF(o.toString());
+                objects.put(o, objects.size());
+            }
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/debug/SelfReconcile.java b/test/simulator/main/org/apache/cassandra/simulator/debug/SelfReconcile.java
new file mode 100644
index 0000000..be8c05a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/debug/SelfReconcile.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.debug;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.simulator.ClusterSimulation;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.SimulationRunner.RecordOption;
+import org.apache.cassandra.simulator.systems.InterceptedExecution;
+import org.apache.cassandra.simulator.systems.InterceptedWait;
+import org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites;
+import org.apache.cassandra.simulator.systems.InterceptibleThread;
+import org.apache.cassandra.simulator.systems.InterceptorOfConsequences;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+import org.apache.cassandra.utils.memory.HeapPool;
+
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.NONE;
+import static org.apache.cassandra.simulator.SimulationRunner.RecordOption.WITH_CALLSITES;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.debug.Reconcile.NORMALISE_LAMBDA;
+import static org.apache.cassandra.simulator.debug.Reconcile.NORMALISE_THREAD;
+
+/**
+ * Simulator runs should be deterministic, so run two in parallel and see if they produce the same event stream
+ */
+public class SelfReconcile
+{
+    private static final Logger logger = LoggerFactory.getLogger(SelfReconcile.class);
+    static final Pattern NORMALISE_RECONCILE_THREAD = Pattern.compile("(Thread\\[Reconcile:)[0-9]+,[0-9],Reconcile(_[0-9]+)?]");
+
+    static class InterceptReconciler implements InterceptorOfConsequences, Supplier<RandomSource>, SimulatedTime.Listener
+    {
+        final List<Object> events = new ArrayList<>();
+        final boolean withRngCallsites;
+        int counter = 0;
+        boolean verifyUninterceptedRng;
+        boolean closed;
+
+        InterceptReconciler(boolean withRngCallsites)
+        {
+            this.withRngCallsites = withRngCallsites;
+        }
+
+        @Override
+        public void beforeInvocation(InterceptibleThread realThread)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public synchronized void interceptMessage(IInvokableInstance from, IInvokableInstance to, IMessage message)
+        {
+            verify("Send " + message.verb() + " from " + from.config().num() + " to " + to.config().num());
+        }
+
+        @Override
+        public synchronized void interceptWakeup(InterceptedWait wakeup, InterceptedWait.Trigger trigger, InterceptorOfConsequences waitWasInterceptedBy)
+        {
+            verify(normalise("Wakeup " + wakeup.waiting() + wakeup));
+        }
+
+        @Override
+        public synchronized void interceptExecution(InterceptedExecution invoke, OrderOn orderOn)
+        {
+            verify("Execute " + normalise(invoke.toString()) + " on " + orderOn);
+        }
+
+        @Override
+        public synchronized void interceptWait(InterceptedWait wakeupWith)
+        {
+            verify(normalise("Wait " + wakeupWith.waiting() + wakeupWith));
+        }
+
+        @Override
+        public void interceptTermination(boolean isThreadTermination)
+        {
+            // Cannot verify this, as thread may terminate after triggering follow-on events
+        }
+
+        void interceptAllocation(long amount, String table)
+        {
+            verify("Allocate " + amount + " for " + table + " on " + Thread.currentThread());
+        }
+
+        synchronized void verify(Object event)
+        {
+            if (closed)
+                return;
+
+            events.add(event);
+
+            if (events.size() == 1)
+            {
+                int cur = counter;
+                while (cur == counter)
+                {
+                    try
+                    {
+                        wait();
+                    }
+                    catch (InterruptedException e)
+                    {
+                        throw new UncheckedInterruptedException(e);
+                    }
+                }
+            }
+            else
+            {
+                if (events.size() != 2)
+                    throw new IllegalStateException();
+
+                try
+                {
+                    Object event0 = events.get(0);
+                    Object event1 = events.get(1);
+                    if (event0 instanceof Pair)
+                        event0 = ((Pair<?, ?>) event0).left;
+                    if (event1 instanceof Pair)
+                        event1 = ((Pair<?, ?>) event1).left;
+                    String e0 = normalise(event0.toString());
+                    String e1 = normalise(event1.toString());
+                    if (!e0.equals(e1))
+                        throw failWithOOM();
+                }
+                finally
+                {
+                    events.clear();
+                    ++counter;
+                    notifyAll();
+                }
+            }
+        }
+
+        public RandomSource get()
+        {
+            return new RandomSource.Abstract()
+            {
+                final RandomSource wrapped = new Default();
+
+                @Override
+                public float uniformFloat()
+                {
+                    return verify("uniformFloat:", wrapped.uniformFloat());
+                }
+
+                @Override
+                public double uniformDouble()
+                {
+                    return verify("uniformDouble:", wrapped.uniformDouble());
+                }
+
+                @Override
+                public void reset(long seed)
+                {
+                    wrapped.reset(seed);
+                    verify("reset(" + seed + ')', "");
+                }
+
+                @Override
+                public long reset()
+                {
+                    return verify("reset:", wrapped.reset());
+                }
+
+                @Override
+                public int uniform(int min, int max)
+                {
+                    return verify("uniform(" + min + ',' + max + "):", wrapped.uniform(min, max));
+                }
+
+                @Override
+                public long uniform(long min, long max)
+                {
+                    return verify("uniform(" + min + ',' + max + "):", wrapped.uniform(min, max));
+                }
+
+                private <T> T verify(String event, T result)
+                {
+                    Thread thread = Thread.currentThread();
+                    if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).isIntercepting())
+                    {
+                        if (!verifyUninterceptedRng)
+                            return result;
+                    }
+                    InterceptReconciler.this.verify(withRngCallsites ? event + result + ' ' + Thread.currentThread() + ' '
+                                                                       + new CaptureSites(Thread.currentThread())
+                                                                         .toString(ste -> !ste.getClassName().startsWith(SelfReconcile.class.getName()))
+                                                                     : event + result);
+                    return result;
+                }
+            };
+        }
+
+        void close()
+        {
+            closed = true;
+        }
+
+        @Override
+        public void accept(String kind, long value)
+        {
+            verify(Thread.currentThread() + ":" + kind + ':' + value);
+        }
+    }
+
+    public static void reconcileWithSelf(long seed, RecordOption withRng, RecordOption withTime, boolean withAllocations, ClusterSimulation.Builder<?> builder)
+    {
+        logger.error("Seed 0x{}", Long.toHexString(seed));
+
+        InterceptReconciler reconciler = new InterceptReconciler(withRng == WITH_CALLSITES);
+        if (withRng != NONE) builder.random(reconciler);
+        if (withTime != NONE) builder.timeListener(reconciler);
+
+        HeapPool.Logged.Listener memoryListener = withAllocations ? reconciler::interceptAllocation : null;
+        ExecutorService executor = ExecutorFactory.Global.executorFactory().pooled("Reconcile", 2);
+
+        try (ClusterSimulation<?> cluster1 = builder.unique(0).memoryListener(memoryListener).create(seed);
+             ClusterSimulation<?> cluster2 = builder.unique(1).memoryListener(memoryListener).create(seed))
+        {
+            try
+            {
+
+                InterceptibleThread.setDebugInterceptor(reconciler);
+                reconciler.verifyUninterceptedRng = true;
+
+                Future<?> f1 = executor.submit(() -> {
+                    try (CloseableIterator<?> iter = cluster1.simulation.iterator())
+                    {
+                        while (iter.hasNext())
+                        {
+                            Object o = iter.next();
+                            reconciler.verify(Pair.create(normalise(o.toString()), o));
+                        }
+                    }
+                    reconciler.verify("done");
+                });
+                Future<?> f2 = executor.submit(() -> {
+                    try (CloseableIterator<?> iter = cluster2.simulation.iterator())
+                    {
+                        while (iter.hasNext())
+                        {
+                            Object o = iter.next();
+                            reconciler.verify(Pair.create(normalise(o.toString()), o));
+                        }
+                    }
+                    reconciler.verify("done");
+                });
+                f1.get();
+                f2.get();
+            }
+            finally
+            {
+                reconciler.close();
+            }
+        }
+        catch (Throwable t)
+        {
+            t.printStackTrace();
+            throw new RuntimeException("Failed on seed " + Long.toHexString(seed), t);
+        }
+    }
+
+    private static String normalise(String input)
+    {
+        return NORMALISE_RECONCILE_THREAD.matcher(
+            NORMALISE_THREAD.matcher(
+                NORMALISE_LAMBDA.matcher(input).replaceAll("")
+            ).replaceAll("$1$2]")
+        ).replaceAll("$1]");
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/debug/SelfReconcilingRandom.java b/test/simulator/main/org/apache/cassandra/simulator/debug/SelfReconcilingRandom.java
new file mode 100644
index 0000000..444ec44
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/debug/SelfReconcilingRandom.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.debug;
+
+import java.util.function.Supplier;
+
+import org.apache.cassandra.simulator.RandomSource;
+import org.hsqldb.lib.IntKeyLongValueHashMap;
+
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+
+public class SelfReconcilingRandom implements Supplier<RandomSource>
+{
+    static class Map extends IntKeyLongValueHashMap
+    {
+        public boolean put(int i, long v)
+        {
+            int size = this.size();
+            super.addOrRemove((long)i, (long)v, (Object)null, (Object)null, false);
+            return size != this.size();
+        }
+    }
+    final Map map = new Map();
+    long[] tmp = new long[1];
+    boolean isNextPrimary = true;
+
+    static abstract class AbstractVerifying extends RandomSource.Abstract
+    {
+        final RandomSource wrapped;
+        int cur = 0;
+
+        protected AbstractVerifying(RandomSource wrapped)
+        {
+            this.wrapped = wrapped;
+        }
+
+        abstract void next(long verify);
+
+        public int uniform(int min, int max)
+        {
+            int v = wrapped.uniform(min, max);
+            next(v);
+            return v;
+        }
+
+        public long uniform(long min, long max)
+        {
+            long v = wrapped.uniform(min, max);
+            next(v);
+            return v;
+        }
+
+        public float uniformFloat()
+        {
+            float v = wrapped.uniformFloat();
+            next(Float.floatToIntBits(v));
+            return v;
+        }
+
+        public double uniformDouble()
+        {
+            double v = wrapped.uniformDouble();
+            next(Double.doubleToLongBits(v));
+            return v;
+        }
+    }
+
+    public RandomSource get()
+    {
+        if (isNextPrimary)
+        {
+            isNextPrimary = false;
+            return new AbstractVerifying(new RandomSource.Default())
+            {
+                void next(long verify)
+                {
+                    map.put(++cur, verify);
+                }
+
+                public void reset(long seed)
+                {
+                    map.clear();
+                    cur = 0;
+                    wrapped.reset(seed);
+                }
+
+                public long reset()
+                {
+                    map.clear();
+                    cur = 0;
+                    return wrapped.reset();
+                }
+            };
+        }
+
+        return new AbstractVerifying(new RandomSource.Default())
+        {
+            void next(long v)
+            {
+                if (!map.get(++cur, tmp))
+                    throw failWithOOM();
+                map.remove(cur);
+                if (tmp[0] != v)
+                    throw failWithOOM();
+            }
+
+            public void reset(long seed)
+            {
+                cur = 0;
+                wrapped.reset(seed);
+            }
+
+            public long reset()
+            {
+                cur = 0;
+                return wrapped.reset();
+            }
+        };
+    }
+}
\ No newline at end of file
diff --git a/test/simulator/main/org/apache/cassandra/simulator/package-info.java b/test/simulator/main/org/apache/cassandra/simulator/package-info.java
new file mode 100644
index 0000000..896ce4e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/package-info.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The Simulator is a facility for deterministic pseudorandom execution of anything
+ * from a snippet of code to an entire Cassandra cluster. A simulation execution
+ * consists of an {@link org.apache.cassandra.simulator.ActionPlan} containing various
+ * {@link org.apache.cassandra.simulator.Action} to simulate. Each {@code Action} represents
+ * some discrete unit of simulation and its consequences - which are themselves {@code Action}s.
+ * A simulation completes once all transitive consequences of the initial actions have completed.
+ * The execution order is determined by an {@link org.apache.cassandra.simulator.ActionSchedule}
+ * and its {@link org.apache.cassandra.simulator.RunnableActionScheduler} and
+ * {@link org.apache.cassandra.simulator.FutureActionScheduler}.
+ *
+ * An {@code Action} may be simple, representing some simulation book-keeping or setup, but most
+ * commonly derives from {@link org.apache.cassandra.simulator.systems.SimulatedAction}.
+ * These simulated actions represent some execution context on an {@link org.apache.cassandra.simulator.systems.InterceptibleThread},
+ * such as a task execution on an executor or a new thread. An {@code Action} is typically a one-shot
+ * concept, but these threaded actions simultaneously represent any initial synchronous step taken
+ * by a threaded execution context and the continuation of that state - through the creation of new
+ * child and continuation {@code Action} that resume execution as the schedule demands. Note that such
+ * a threaded action may have no consequences while still being in progress, e.g. when the thread enters
+ * an unbounded wait condition, in which case it expects to be awoken by some other threaded action's
+ * interaction with it.
+ *
+ * {@code Action} have various causality mechanisms to control their behaviour, in the form of {@link org.apache.cassandra.simulator.Action.Modifier}
+ * options that may be applied to themselves, transitively to all of their descendants, or to specific verbs that are
+ * consequences of a {@code SimulatedAction}. Also in the form of {@link org.apache.cassandra.simulator.OrderOn}
+ * constraints that may force actions to occur in strictly sequential order, or simply to occur at some maximum rate.
+ * This latter facility supports both the simulation of executor services and constraints for a correct simulation e.g.
+ * when some action's children must occur in sequential order to correctly simulate some system operation.
+ *
+ * Simulation is achieved through various simulated systems ({@link org.apache.cassandra.simulator.systems})
+ * and byte weaving ({@link org.apache.cassandra.simulator.asm}) of implementation classes, and chaos is introduced
+ * via {@link org.apache.cassandra.simulator.cluster.KeyspaceActions} and also {@link org.apache.cassandra.utils.Nemesis}
+ * points and on monitor entry and exit. Collectively these systems take control of: monitors, lock support, blocking
+ * data structures, threads and executors, random number generation, time, paxos ballots, the network and failure detection.
+ *
+ * The first major simulation introduced was {@link org.apache.cassandra.simulator.paxos.PaxosSimulationRunner} which
+ * may be invoked on the command line to simulate a cluster and some proportion of LWT read/write operations.
+ *
+ * For simulator simulation to operate correctly, be sure to run {@code ant simulator-jars} and to include the
+ * following JVM parameters:
+ *    -javaagent:/path/to/project.dir/build/test/lib/simulator-asm.jar
+ *    -Xbootclasspath/a:/path/to/project.dir/build/test/lib/simulator-bootstrap.jar
+ *    -Dlogback.configurationFile=file:///path/to/project.dir/test/conf/logback-simulator.xml
+ *
+ * For a reproducible simulation, make sure to use the same JVM and JDK for all runs, and to supply the following
+ * JVM parameters:
+ *    -XX:ActiveProcessorCount=???
+ *    -Xmx???
+ *    -XX:-BackgroundCompilation
+ *    -XX:CICompilerCount=1
+ *
+ * For performance reasons the following parameters are recommended:
+ *    -XX:Tier4CompileThreshold=1000
+ *    -XX:-TieredCompilation
+ *    -XX:ReservedCodeCacheSize=256M
+ *
+ * When debugging, the following parameters are recommended:
+ *    -Dcassandra.test.simulator.livenesscheck=false
+ *    -Dcassandra.test.simulator.debug=true
+ *
+ * Note also that when debugging the execution of arbitrary methods may modify the program output if these methods
+ * invoke any simulated methods, notably if the method is synchronised or otherwise uses a monitor, or invokes
+ * {@code System.identityHashCode} or a non-overidden {@code Object.hashCode()}, or many other scenarios.
+ * To support debugging, this behaviour is switched off as far as possible when evaluating any {@code toString()}
+ * method, but it is worth being prepared for the possibility that your use of the debugger has the potential to
+ * perturb program execution.
+ */
+package org.apache.cassandra.simulator;
\ No newline at end of file
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/Ballots.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/Ballots.java
new file mode 100644
index 0000000..08e1b2e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/Ballots.java
@@ -0,0 +1,272 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import java.util.Iterator;
+import java.util.List;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.ReadExecutionController;
+import org.apache.cassandra.db.SinglePartitionReadCommand;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.simulator.systems.NonInterceptible;
+import org.apache.cassandra.simulator.systems.NonInterceptible.Permit;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Shared;
+
+import static java.lang.Long.max;
+import static java.util.Arrays.stream;
+import static org.apache.cassandra.db.SystemKeyspace.loadPaxosState;
+import static org.apache.cassandra.service.paxos.Commit.latest;
+import static org.apache.cassandra.service.paxos.PaxosState.unsafeGetIfPresent;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.OPTIONAL;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+public class Ballots
+{
+    private static final ColumnMetadata PROMISE = paxosUUIDColumn("in_progress_ballot");
+    private static final ColumnMetadata PROPOSAL = paxosUUIDColumn("proposal_ballot");
+    private static final ColumnMetadata COMMIT = paxosUUIDColumn("most_recent_commit_at");
+
+    @Shared(scope = SIMULATION)
+    public static class LatestBallots
+    {
+        public final long promise;
+        public final long accept; // the ballot actually accepted
+        public final long acceptOf; // the original ballot (i.e. if a reproposal accept != acceptOf)
+        public final long commit;
+        public final long persisted;
+
+        public LatestBallots(long promise, long accept, long acceptOf, long commit, long persisted)
+        {
+            this.promise = promise;
+            this.accept = accept;
+            this.acceptOf = acceptOf;
+            this.commit = commit;
+            this.persisted = persisted;
+        }
+
+        public long any()
+        {
+            return max(max(max(promise, accept), commit), persisted);
+        }
+
+        public long permanent()
+        {
+            return max(commit, persisted);
+        }
+
+        public String toString()
+        {
+            return "[" + promise + ',' + accept + ',' + commit + ',' + persisted + ']';
+        }
+    }
+
+    public static LatestBallots read(Permit permit, DecoratedKey key, TableMetadata metadata, int nowInSec, boolean includeEmptyProposals)
+    {
+        return NonInterceptible.apply(permit, () -> {
+            PaxosState.Snapshot state = unsafeGetIfPresent(key, metadata);
+            PaxosState.Snapshot persisted = loadPaxosState(key, metadata, nowInSec);
+            TimeUUID promised = latest(persisted.promised, state == null ? null : state.promised);
+            Commit.Accepted accepted = latest(persisted.accepted, state == null ? null : state.accepted);
+            Commit.Committed committed = latest(persisted.committed, state == null ? null : state.committed);
+            long baseTable = latestBallotFromBaseTable(key, metadata);
+            return new LatestBallots(
+                promised.unixMicros(),
+                accepted == null || accepted.update.isEmpty() ? 0L : accepted.ballot.unixMicros(),
+                accepted == null || accepted.update.isEmpty() ? 0L : accepted.update.stats().minTimestamp,
+                latestBallot(committed.update.iterator()),
+                baseTable
+            );
+        });
+    }
+
+    static LatestBallots[][] read(Permit permit, Cluster cluster, String keyspace, String table, int[] primaryKeys, int[][] replicasForKeys, boolean includeEmptyProposals)
+    {
+        return NonInterceptible.apply(permit, () -> {
+            LatestBallots[][] result = new LatestBallots[primaryKeys.length][];
+            for (int i = 0 ; i < primaryKeys.length ; ++i)
+            {
+                int primaryKey = primaryKeys[i];
+                result[i] = stream(replicasForKeys[i])
+                            .mapToObj(cluster::get)
+                            .map(node -> node.unsafeApplyOnThisThread((p, ks, tbl, pk, ie) -> {
+                                TableMetadata metadata = Keyspace.open(ks).getColumnFamilyStore(tbl).metadata.get();
+                                DecoratedKey key = metadata.partitioner.decorateKey(Int32Type.instance.decompose(pk));
+                                return read(p, key, metadata, FBUtilities.nowInSeconds(), ie);
+                            }, permit, keyspace, table, primaryKey, includeEmptyProposals))
+                            .toArray(LatestBallots[]::new);
+            }
+            return result;
+        });
+    }
+
+    public static String paxosDebugInfo(DecoratedKey key, TableMetadata metadata, int nowInSec)
+    {
+        return NonInterceptible.apply(OPTIONAL, () -> {
+            PaxosState.Snapshot state = unsafeGetIfPresent(key, metadata);
+            PaxosState.Snapshot persisted = loadPaxosState(key, metadata, nowInSec);
+            long[] memtable = latestBallotsFromPaxosMemtable(key, metadata);
+            PaxosState.Snapshot cache = state == null ? persisted : state;
+            long baseTable = latestBallotFromBaseTable(key, metadata);
+            long baseMemtable = latestBallotFromBaseMemtable(key, metadata);
+            return debugBallot(cache.promised, memtable[0], persisted.promised) + ", "
+                   + debugBallot(cache.accepted, memtable[1], persisted.accepted) + ", "
+                   + debugBallot(cache.committed, memtable[2], persisted.committed) + ", "
+                   + debugBallot(baseMemtable, 0L, baseTable);
+        });
+    }
+
+    private static ColumnMetadata paxosUUIDColumn(String name)
+    {
+        return ColumnMetadata.regularColumn(SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.PAXOS, name, TimeUUIDType.instance);
+    }
+
+    /**
+     * Load the current paxos state for the table and key
+     */
+    private static long[] latestBallotsFromPaxosMemtable(DecoratedKey key, TableMetadata metadata)
+    {
+        ColumnFamilyStore paxos = Keyspace.open("system").getColumnFamilyStore("paxos");
+        long[] result = new long[3];
+        List<Memtable> memtables = ImmutableList.copyOf(paxos.getTracker().getView().getAllMemtables());
+        for (Memtable memtable : memtables)
+        {
+            Row row = getRow(key, metadata, paxos, memtable);
+            if (row == null)
+                continue;
+
+            Cell promise = row.getCell(PROMISE);
+            if (promise != null && promise.value() != null)
+                result[0] = promise.timestamp();
+            Cell proposal = row.getCell(PROPOSAL);
+            if (proposal != null && proposal.value() != null)
+                result[1] = proposal.timestamp();
+            Cell commit = row.getCell(COMMIT);
+            if (commit != null && commit.value() != null)
+                result[2] = commit.timestamp();
+        }
+        return result;
+    }
+
+    private static Row getRow(DecoratedKey key, TableMetadata metadata, ColumnFamilyStore paxos, Memtable memtable)
+    {
+        final ClusteringComparator comparator = paxos.metadata.get().comparator;
+        UnfilteredRowIterator iter = memtable.rowIterator(key, Slices.with(comparator, Slice.make(comparator.make(metadata.id))), ColumnFilter.NONE, false, SSTableReadsListener.NOOP_LISTENER);
+        if (iter == null || !iter.hasNext())
+            return null;
+        return (Row) iter.next();
+    }
+
+    public static long latestBallotFromBaseTable(DecoratedKey key, TableMetadata metadata)
+    {
+        SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(metadata, 0, key, Slice.ALL);
+        try (ReadExecutionController controller = cmd.executionController(); UnfilteredPartitionIterator partitions = cmd.executeLocally(controller))
+        {
+            if (!partitions.hasNext())
+                return 0L;
+
+            try (UnfilteredRowIterator rows = partitions.next())
+            {
+                return latestBallot(rows);
+            }
+        }
+    }
+
+    private static long latestBallotFromBaseMemtable(DecoratedKey key, TableMetadata metadata)
+    {
+        ColumnFamilyStore table = Keyspace.openAndGetStore(metadata);
+        long timestamp = 0;
+        List<Memtable> memtables = ImmutableList.copyOf(table.getTracker().getView().getAllMemtables());
+        for (Memtable memtable : memtables)
+        {
+            try (UnfilteredRowIterator partition = memtable.rowIterator(key))
+            {
+                if (partition == null)
+                    continue;
+
+                timestamp = max(timestamp, latestBallot(partition));
+            }
+        }
+        return timestamp;
+    }
+
+    private static long latestBallot(Iterator<? extends Unfiltered> partition)
+    {
+        long timestamp = 0L;
+        while (partition.hasNext())
+        {
+            Unfiltered unfiltered = partition.next();
+            if (!unfiltered.isRow())
+                continue;
+            timestamp = ((Row) unfiltered).accumulate((cd, v) -> max(v, cd.maxTimestamp()), timestamp);
+        }
+        return timestamp;
+    }
+
+    private static String debugBallot(Commit cache, long memtable, Commit persisted)
+    {
+        return debugBallot(cache == null ? null : cache.ballot, memtable, persisted == null ? null : persisted.ballot);
+    }
+
+    private static String debugBallot(TimeUUID cache, long memtable, TimeUUID persisted)
+    {
+        return debugBallot(timestamp(cache), memtable, timestamp(persisted));
+    }
+
+    private static String debugBallot(long cache, long memtable, long persisted)
+    {
+        return debugBallotVsMemtable(cache, memtable)
+               + (cache == persisted ? "" : '(' + debugBallotVsMemtable(persisted, memtable) + ')');
+    }
+
+    private static String debugBallotVsMemtable(long value, long memtable)
+    {
+        return value + (memtable == value && memtable != 0 ? "*" : "");
+    }
+
+    private static long timestamp(TimeUUID a)
+    {
+        return a == null ? 0L : a.unixMicros();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/HistoryChecker.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/HistoryChecker.java
new file mode 100644
index 0000000..d1e0771
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/HistoryChecker.java
@@ -0,0 +1,350 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Queue;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static java.lang.Boolean.FALSE;
+import static java.lang.Boolean.TRUE;
+import static java.lang.Integer.max;
+import static org.apache.cassandra.simulator.paxos.HistoryChecker.Witness.Type.READ;
+import static org.apache.cassandra.simulator.paxos.HistoryChecker.Witness.Type.SELF;
+import static org.apache.cassandra.simulator.paxos.HistoryChecker.Witness.Type.UPDATE_SUCCESS;
+import static org.apache.cassandra.simulator.paxos.HistoryChecker.Witness.Type.UPDATE_UNKNOWN;
+
+/**
+ * Linearizability checker.
+ * <p>
+ * Since the partition maintains two total histories of successful operations, we simply verify that this history
+ * is consistent with what we view, that each copy of the history is consistent, and that there is no viewing
+ * histories backwards or forwards in time (i.e. that the time periods each history is witnessable for are disjoint)
+ */
+class HistoryChecker
+{
+    private static final Logger logger = LoggerFactory.getLogger(HistoryChecker.class);
+
+    static class Witness
+    {
+        enum Type { UPDATE_SUCCESS, UPDATE_UNKNOWN, READ, SELF }
+
+        final Witness.Type type;
+        final int eventId;
+        final int start;
+        final int end;
+
+        Witness(Witness.Type type, int eventId, int start, int end)
+        {
+            this.type = type;
+            this.eventId = eventId;
+            this.start = start;
+            this.end = end;
+        }
+
+        public String toString()
+        {
+            return String.format("((%3d,%3d),%s,%3d)", start, end, type, eventId);
+        }
+    }
+
+    static class VerboseWitness extends Witness
+    {
+        final int[] witnessSequence;
+
+        VerboseWitness(int eventId, int start, int end, int[] witnessSequence)
+        {
+            super(SELF, eventId, start, end);
+            this.witnessSequence = witnessSequence;
+        }
+
+        public String toString()
+        {
+            return String.format("((%3d,%3d), WITNESS, %s)", start, end, Arrays.toString(witnessSequence));
+        }
+    }
+
+    static class Event
+    {
+        final List<Witness> log = new ArrayList<>();
+
+        final int eventId;
+        int eventPosition = -1;
+        int[] witnessSequence;
+        int visibleBy = Integer.MAX_VALUE; // witnessed by at least this time
+        int visibleUntil = -1;             // witnessed until at least this time (i.e. witnessed nothing newer by then)
+        Boolean result;                    // unknown, success or (implied by not being witnessed) failure
+
+        Event(int eventId)
+        {
+            this.eventId = eventId;
+        }
+    }
+
+    final int primaryKey;
+    private final Queue<Event> unwitnessed = new ArrayDeque<>();
+    private Event[] byId = new Event[128];
+    private Event[] events = new Event[16];
+
+    HistoryChecker(int primaryKey)
+    {
+        this.primaryKey = primaryKey;
+    }
+
+    Event byId(int id)
+    {
+        if (byId.length <= id)
+            byId = Arrays.copyOf(byId, Math.max(id, byId.length * 2));
+        return byId[id];
+    }
+
+    Event setById(int id, Event event)
+    {
+        if (byId.length <= id)
+            byId = Arrays.copyOf(byId, Math.max(id, byId.length * 2));
+        return byId[id] = event;
+    }
+
+    void witness(Observation witness, int[] witnessSequence, int start, int end)
+    {
+        int eventPosition = witnessSequence.length;
+        int eventId = eventPosition == 0 ? -1 : witnessSequence[eventPosition - 1];
+        setById(witness.id, new Event(witness.id)).log.add(new VerboseWitness(witness.id, start, end, witnessSequence));
+        Event event = get(eventPosition, eventId);
+        recordWitness(event, witness, witnessSequence);
+        recordVisibleBy(event, end);
+        recordVisibleUntil(event, start);
+
+        // see if any of the unwitnessed events can be ruled out
+        if (!unwitnessed.isEmpty())
+        {
+            Iterator<Event> iter = unwitnessed.iterator();
+            while (iter.hasNext())
+            {
+                Event e = iter.next();
+                if (e.visibleBy < start)
+                {
+                    if (e.result == null)
+                    {
+                        // still accessible byId, so if we witness it later we will flag the inconsistency
+                        e.result = FALSE;
+                        iter.remove();
+                    }
+                    else if (e.result)
+                    {
+                        throw fail(primaryKey, "%d witnessed as absent by %d", e.eventId, witness.id);
+                    }
+                }
+            }
+        }
+    }
+
+    void applied(int eventId, int start, int end, boolean success)
+    {
+        Event event = byId(eventId);
+        if (event == null)
+        {
+            setById(eventId, event = new Event(eventId));
+            unwitnessed.add(event);
+        }
+
+        event.log.add(new Witness(success ? UPDATE_SUCCESS : UPDATE_UNKNOWN, eventId, start, end));
+        recordVisibleUntil(event, start);
+        recordVisibleBy(event, end); // even the result is unknown, the result must be visible to other operations by the time we terminate
+        if (success)
+        {
+            if (event.result == FALSE)
+                throw fail(primaryKey, "witnessed absence of %d but event returned success", eventId);
+            event.result = TRUE;
+        }
+    }
+
+    void recordWitness(Event event, Observation witness, int[] witnessSequence)
+    {
+        recordWitness(event, witness, witnessSequence.length, witnessSequence);
+    }
+
+    void recordWitness(Event event, Observation witness, int eventPosition, int[] witnessSequence)
+    {
+        while (true)
+        {
+            event.log.add(new Witness(READ, witness.id, witness.start, witness.end));
+            if (event.witnessSequence != null)
+            {
+                if (!Arrays.equals(event.witnessSequence, witnessSequence))
+                    throw fail(primaryKey, "%s previously witnessed %s", witnessSequence, event.witnessSequence);
+                return;
+            }
+
+            event.witnessSequence = witnessSequence;
+            event.eventPosition = eventPosition;
+
+            event = prev(event);
+            if (event == null)
+                break;
+
+            if (event.witnessSequence != null)
+            {
+                // verify it's a strict prefix
+                if (!equal(event.witnessSequence, witnessSequence, witnessSequence.length - 1))
+                    throw fail(primaryKey, "%s previously witnessed %s", witnessSequence, event.witnessSequence);
+                break;
+            }
+
+            // if our predecessor event hasn't been witnessed directly, witness it by this event, even if
+            // we say nothing about the times it may have been witnessed (besides those implied by the write event)
+            eventPosition -= 1;
+            witnessSequence = Arrays.copyOf(witnessSequence, eventPosition);
+        }
+    }
+
+    void recordVisibleBy(Event event, int visibleBy)
+    {
+        if (visibleBy < event.visibleBy)
+        {
+            event.visibleBy = visibleBy;
+            Event prev = prev(event);
+            if (prev != null && prev.visibleUntil >= visibleBy)
+                throw fail(primaryKey, "%s not witnessed >= %d, but also witnessed <= %d", event.witnessSequence, event.eventId, prev.visibleUntil, event.visibleBy);
+        }
+    }
+
+    void recordVisibleUntil(Event event, int visibleUntil)
+    {
+        if (visibleUntil > event.visibleUntil)
+        {
+            event.visibleUntil = visibleUntil;
+            Event next = next(event);
+            if (next != null && visibleUntil >= next.visibleBy)
+                throw fail(primaryKey, "%s %d not witnessed >= %d, but also witnessed <= %d", next.witnessSequence, next.eventId, event.visibleUntil, next.visibleBy);
+        }
+    }
+
+    /**
+     * Initialise the Event representing both eventPosition and eventId for witnessing
+     */
+    Event get(int eventPosition, int eventId)
+    {
+        if (eventPosition >= events.length)
+            events = Arrays.copyOf(events, max(eventPosition + 1, events.length * 2));
+
+        Event event = events[eventPosition];
+        if (event == null)
+        {
+            if (eventId < 0)
+            {
+                events[eventPosition] = event = new Event(eventId);
+            }
+            else
+            {
+                event = byId(eventId);
+                if (event != null)
+                {
+                    if (event.eventPosition >= 0)
+                        throw fail(primaryKey, "%d occurs at positions %d and %d", eventId, eventPosition, event.eventPosition);
+                    events[eventPosition] = event;
+                    unwitnessed.remove(event);
+                }
+                else
+                {
+                    setById(eventId, events[eventPosition] = event = new Event(eventId));
+                }
+            }
+        }
+        else
+        {
+            if (eventId != event.eventId)
+                throw fail(primaryKey, "(eventId, eventPosition): (%d, %d) != (%d, %d)", eventId, eventPosition, event.eventId, event.eventPosition);
+            else if (eventPosition != event.eventPosition)
+                throw fail(primaryKey, "%d occurs at positions %d and %d", eventId, eventPosition, event.eventPosition);
+        }
+        return event;
+    }
+
+    Event prev(Event event)
+    {
+        // we can reach here via recordOutcome without knowing our witnessSequence,
+        // in which case we won't know our predecessor event, so we cannot do anything useful
+        if (event.witnessSequence == null)
+            return null;
+
+        int eventPosition = event.eventPosition - 1;
+        if (eventPosition < 0)
+            return null;
+
+        // initialise the event, if necessary importing information from byId
+        return get(eventPosition, eventPosition == 0 ? -1 : event.witnessSequence[eventPosition - 1]);
+    }
+
+    Event next(Event event)
+    {
+        int eventPosition = event.eventPosition + 1;
+        if (eventPosition == 0 || eventPosition >= events.length)
+            return null;
+
+        // we cannot initialise the event meaningfully, so just return what is already known (if anything)
+        return events[eventPosition];
+    }
+
+    void print()
+    {
+        for (Event e : events)
+        {
+            if (e == null) break;
+            logger.error(String.format("%d: (%4d,%4d) %s %s", primaryKey, e.visibleBy, e.visibleUntil, Arrays.toString(e.witnessSequence), e.log));
+        }
+        for (Event e : byId)
+        {
+            if (e == null) continue;
+            logger.error("{}: {}", e.eventId, e.log);
+        }
+    }
+
+    static Error fail(int primaryKey, String message, Object ... params)
+    {
+        for (int i = 0 ; i < params.length ; ++i)
+            if (params[i] instanceof int[]) params[i] = Arrays.toString((int[]) params[i]);
+        throw new HistoryViolation(primaryKey, "history violation on " + primaryKey + ": " + String.format(message, params));
+    }
+
+    static boolean equal(int[] a, int [] b, int count)
+    {
+        for (int i = 0 ; i < count ; ++i)
+            if (a[i] != b[i])
+                return false;
+        return true;
+    }
+
+    static Integer causedBy(Throwable failure)
+    {
+        if (failure == null || failure.getMessage() == null)
+            return null;
+
+        if (!(failure instanceof HistoryViolation))
+            return causedBy(failure.getCause());
+
+        return ((HistoryViolation) failure).primaryKey;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/HistoryViolation.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/HistoryViolation.java
new file mode 100644
index 0000000..6712ab6
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/HistoryViolation.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public class HistoryViolation extends AssertionError
+{
+    final int primaryKey;
+
+    public HistoryViolation(int primaryKey, Object detailMessage)
+    {
+        super(detailMessage);
+        this.primaryKey = primaryKey;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/Observation.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/Observation.java
new file mode 100644
index 0000000..546fd31
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/Observation.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+class Observation implements Comparable<Observation>
+{
+    final int id;
+    final Object[][] result;
+    final int start;
+    final int end;
+
+    Observation(int id, Object[][] result, int start, int end)
+    {
+        this.id = id;
+        this.result = result;
+        this.start = start;
+        this.end = end;
+    }
+
+    // computes a PARTIAL ORDER on when the outcome occurred, i.e. for many pair-wise comparisons the answer is 0
+    public int compareTo(Observation that)
+    {
+        if (this.end < that.start)
+            return -1;
+        if (that.end < this.start)
+            return 1;
+        return 0;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/PairOfSequencesPaxosSimulation.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/PairOfSequencesPaxosSimulation.java
new file mode 100644
index 0000000..77eefb3
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/PairOfSequencesPaxosSimulation.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.LongSupplier;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.ListType;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.distributed.impl.Instance;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.ActionListener;
+import org.apache.cassandra.simulator.ActionPlan;
+import org.apache.cassandra.simulator.RunnableActionScheduler;
+import org.apache.cassandra.simulator.Actions;
+import org.apache.cassandra.simulator.cluster.ClusterActions;
+import org.apache.cassandra.simulator.Debug;
+import org.apache.cassandra.simulator.cluster.KeyspaceActions;
+import org.apache.cassandra.simulator.systems.SimulatedActionTask;
+import org.apache.cassandra.simulator.systems.SimulatedSystems;
+import org.apache.cassandra.simulator.utils.IntRange;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static java.lang.Boolean.TRUE;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ANY;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE_NO_TIMEOUTS;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.STREAM_LIMITED;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.TIME_AND_STREAM_LIMITED;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.TIME_LIMITED;
+import static org.apache.cassandra.simulator.Debug.EventType.PARTITION;
+import static org.apache.cassandra.simulator.paxos.HistoryChecker.fail;
+
+@SuppressWarnings("unused")
+public class PairOfSequencesPaxosSimulation extends PaxosSimulation
+{
+    private static final Logger logger = LoggerFactory.getLogger(PairOfSequencesPaxosSimulation.class);
+
+    private static final String KEYSPACE = "simple_paxos_simulation";
+    private static final String TABLE = "tbl";
+    private static final String CREATE_TABLE = "CREATE TABLE " + KEYSPACE + ".tbl (pk int, count int, seq1 text, seq2 list<int>, PRIMARY KEY (pk))";
+    private static final String INSERT = "INSERT INTO " + KEYSPACE + ".tbl (pk, count, seq1, seq2) VALUES (?, 0, '', []) IF NOT EXISTS";
+    private static final String INSERT1 = "INSERT INTO " + KEYSPACE + ".tbl (pk, count, seq1, seq2) VALUES (?, 0, '', []) USING TIMESTAMP 0";
+    private static final String UPDATE = "UPDATE " + KEYSPACE + ".tbl SET count = count + 1, seq1 = seq1 + ?, seq2 = seq2 + ? WHERE pk = ? IF EXISTS";
+    private static final String SELECT = "SELECT pk, count, seq1, seq2 FROM  " + KEYSPACE + ".tbl WHERE pk = ?";
+    private static final ListType<Integer> LIST_TYPE = ListType.getInstance(Int32Type.instance, true);
+
+    class VerifyingOperation extends Operation
+    {
+        final HistoryChecker historyChecker;
+        public VerifyingOperation(int id, IInvokableInstance instance, ConsistencyLevel consistencyLevel, int primaryKey, HistoryChecker historyChecker)
+        {
+            super(primaryKey, id, instance, "SELECT", SELECT, consistencyLevel, null, primaryKey);
+            this.historyChecker = historyChecker;
+        }
+
+        void verify(Observation outcome)
+        {
+            (outcome.result != null ? successfulReads : failedReads).incrementAndGet();
+
+            if (outcome.result == null)
+                return;
+
+            if (outcome.result.length != 1)
+                throw fail(primaryKey, "#result (%s) != 1", Arrays.toString(outcome.result));
+
+            Object[] row = outcome.result[0];
+            // first verify internally consistent
+            int count = row[1] == null ? 0 : (Integer) row[1];
+            int[] seq1 = Arrays.stream((row[2] == null ? "" : (String) row[2]).split(","))
+                               .filter(s -> !s.isEmpty())
+                               .mapToInt(Integer::parseInt)
+                               .toArray();
+            int[] seq2 = ((List<Integer>) (row[3] == null ? emptyList() : row[3]))
+                         .stream().mapToInt(x -> x).toArray();
+
+            if (!Arrays.equals(seq1, seq2))
+                throw fail(primaryKey, "%s != %s", seq1, seq2);
+
+            if (seq1.length != count)
+                throw fail(primaryKey, "%d != #%s", count, seq1);
+
+            historyChecker.witness(outcome, seq1, outcome.start, outcome.end);
+        }
+    }
+
+    class NonVerifyingOperation extends Operation
+    {
+        public NonVerifyingOperation(int id, IInvokableInstance instance, ConsistencyLevel consistencyLevel, int primaryKey, HistoryChecker historyChecker)
+        {
+            super(primaryKey, id, instance, "SELECT", SELECT, consistencyLevel, null, primaryKey);
+        }
+
+        void verify(Observation outcome)
+        {
+        }
+    }
+
+    public class ModifyingOperation extends Operation
+    {
+        final HistoryChecker historyChecker;
+        public ModifyingOperation(int id, IInvokableInstance instance, ConsistencyLevel commitConsistency, ConsistencyLevel serialConsistency, int primaryKey, HistoryChecker historyChecker)
+        {
+            super(primaryKey, id, instance, "UPDATE", UPDATE, commitConsistency, serialConsistency, id + ",", ByteBufferUtil.getArray(LIST_TYPE.decompose(singletonList(id))), primaryKey);
+            this.historyChecker = historyChecker;
+        }
+
+        void verify(Observation outcome)
+        {
+            (outcome.result != null ? successfulWrites : failedWrites).incrementAndGet();
+            if (outcome.result != null)
+            {
+                if (outcome.result.length != 1)
+                    throw fail(primaryKey, "Result: 1 != #%s", Arrays.toString(outcome.result));
+                if (outcome.result[0][0] != TRUE)
+                    throw fail(primaryKey, "Result != TRUE");
+            }
+            historyChecker.applied(outcome.id, outcome.start, outcome.end, outcome.result != null);
+        }
+    }
+
+    final ClusterActions.Options clusterOptions;
+    final float readRatio;
+    final IntRange withinKeyConcurrency;
+    final int concurrency;
+    final IntRange simulateKeyForSeconds;
+    final ConsistencyLevel serialConsistency;
+    final Debug debug;
+    final List<HistoryChecker> historyCheckers = new ArrayList<>();
+    final AtomicInteger successfulReads = new AtomicInteger();
+    final AtomicInteger successfulWrites = new AtomicInteger();
+    final AtomicInteger failedReads = new AtomicInteger();
+    final AtomicInteger failedWrites = new AtomicInteger();
+    final long seed;
+    final int[] primaryKeys;
+
+    public PairOfSequencesPaxosSimulation(SimulatedSystems simulated,
+                                          Cluster cluster,
+                                          ClusterActions.Options clusterOptions,
+                                          float readRatio,
+                                          int concurrency, IntRange simulateKeyForSeconds, IntRange withinKeyConcurrency,
+                                          ConsistencyLevel serialConsistency, RunnableActionScheduler scheduler, Debug debug,
+                                          long seed, int[] primaryKeys,
+                                          long runForNanos, LongSupplier jitter)
+    {
+        super(runForNanos < 0 ? STREAM_LIMITED : clusterOptions.topologyChangeLimit < 0 ? TIME_LIMITED : TIME_AND_STREAM_LIMITED,
+              simulated, cluster, scheduler, runForNanos, jitter);
+        this.readRatio = readRatio;
+        this.concurrency = concurrency;
+        this.simulateKeyForSeconds = simulateKeyForSeconds;
+        this.withinKeyConcurrency = withinKeyConcurrency;
+        this.serialConsistency = serialConsistency;
+        this.clusterOptions = clusterOptions;
+        this.debug = debug;
+        this.seed = seed;
+        this.primaryKeys = primaryKeys.clone();
+        Arrays.sort(this.primaryKeys);
+    }
+
+    public ActionPlan plan()
+    {
+        ActionPlan plan = new KeyspaceActions(simulated, KEYSPACE, TABLE, CREATE_TABLE, cluster,
+                                              clusterOptions, serialConsistency, this, primaryKeys, debug).plan();
+
+        plan = plan.encapsulate(ActionPlan.setUpTearDown(
+            ActionList.of(
+                cluster.stream().map(i -> simulated.run("Insert Partitions", i, executeForPrimaryKeys(INSERT1, primaryKeys)))
+            ),
+            ActionList.of(
+                cluster.stream().map(i -> SimulatedActionTask.unsafeTask("Shutdown " + i.broadcastAddress(), RELIABLE, RELIABLE_NO_TIMEOUTS, simulated, i, i::shutdown))
+            )
+        ));
+
+        final int nodes = cluster.size();
+        for (int primaryKey : primaryKeys)
+            historyCheckers.add(new HistoryChecker(primaryKey));
+
+        List<Supplier<Action>> primaryKeyActions = new ArrayList<>();
+        for (int pki = 0 ; pki < primaryKeys.length ; ++pki)
+        {
+            int primaryKey = primaryKeys[pki];
+            HistoryChecker historyChecker = historyCheckers.get(pki);
+            Supplier<Action> supplier = new Supplier<Action>()
+            {
+                int i = 0;
+
+                @Override
+                public Action get()
+                {
+                    int node = simulated.random.uniform(1, nodes + 1);
+                    IInvokableInstance instance = cluster.get(node);
+                    switch (serialConsistency)
+                    {
+                        default: throw new AssertionError();
+                        case LOCAL_SERIAL:
+                            if (simulated.snitch.dcOf(node) > 0)
+                            {
+                                // perform some queries against these nodes but don't expect them to be linearizable
+                                return new NonVerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker);
+                            }
+                        case SERIAL:
+                            return simulated.random.decide(readRatio)
+                                   ? new VerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker)
+                                   : new ModifyingOperation(i++, instance, ANY, serialConsistency, primaryKey, historyChecker);
+                    }
+                }
+
+                @Override
+                public String toString()
+                {
+                    return Integer.toString(primaryKey);
+                }
+            };
+
+            final ActionListener listener = debug.debug(PARTITION, simulated.time, cluster, KEYSPACE, primaryKey);
+            if (listener != null)
+            {
+                Supplier<Action> wrap = supplier;
+                supplier = new Supplier<Action>()
+                {
+                    @Override
+                    public Action get()
+                    {
+                        Action action = wrap.get();
+                        action.register(listener);
+                        return action;
+                    }
+
+                    @Override
+                    public String toString()
+                    {
+                        return wrap.toString();
+                    }
+                };
+            }
+
+            primaryKeyActions.add(supplier);
+        }
+
+        List<Integer> available = IntStream.range(0, primaryKeys.length).boxed().collect(Collectors.toList());
+        Action stream = Actions.infiniteStream(concurrency, new Supplier<Action>() {
+            @Override
+            public Action get()
+            {
+                int i = simulated.random.uniform(0, available.size());
+                int next = available.get(i);
+                available.set(i, available.get(available.size() - 1));
+                available.remove(available.size() - 1);
+                long untilNanos = simulated.time.nanoTime() + SECONDS.toNanos(simulateKeyForSeconds.select(simulated.random));
+                int concurrency = withinKeyConcurrency.select(simulated.random);
+                Supplier<Action> supplier = primaryKeyActions.get(next);
+                // while this stream is finite, it participates in an infinite stream via its parent, so we want to permit termination while it's running
+                return Actions.infiniteStream(concurrency, new Supplier<Action>()
+                {
+                    @Override
+                    public Action get()
+                    {
+                        if (simulated.time.nanoTime() >= untilNanos)
+                        {
+                            available.add(next);
+                            return null;
+                        }
+                        return supplier.get();
+                    }
+
+                    @Override
+                    public String toString()
+                    {
+                        return supplier.toString();
+                    }
+                });
+            }
+
+            @Override
+            public String toString()
+            {
+                return "Primary Key Actions";
+            }
+        });
+
+        return simulated.execution.plan()
+                                  .encapsulate(plan)
+                                  .encapsulate(ActionPlan.interleave(singletonList(ActionList.of(stream))));
+    }
+
+    private IIsolatedExecutor.SerializableRunnable executeForPrimaryKeys(String cql, int[] primaryKeys)
+    {
+        return () -> {
+            for (int primaryKey : primaryKeys)
+                Instance.unsafeExecuteInternalWithResult(cql, primaryKey);
+        };
+    }
+
+    @Override
+    public TopologyChangeValidator newTopologyChangeValidator(Object id)
+    {
+        return new PaxosTopologyChangeVerifier(cluster, KEYSPACE, TABLE, id);
+    }
+
+    @Override
+    public RepairValidator newRepairValidator(Object id)
+    {
+        return new PaxosRepairValidator(cluster, KEYSPACE, TABLE, id);
+    }
+
+    @Override
+    void log(@Nullable Integer primaryKey)
+    {
+        if (primaryKey == null) historyCheckers.forEach(HistoryChecker::print);
+        else historyCheckers.stream().filter(h -> h.primaryKey == primaryKey).forEach(HistoryChecker::print);
+    }
+
+    @Override
+    public void run()
+    {
+        super.run();
+        logger.warn("Writes: {} successful, {} failed", successfulWrites, failedWrites);
+        logger.warn("Reads: {} successful {} failed", successfulReads, failedReads);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosClusterSimulation.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosClusterSimulation.java
new file mode 100644
index 0000000..0fd9135
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosClusterSimulation.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import java.io.IOException;
+
+import org.apache.cassandra.config.Config.PaxosVariant;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.ClusterSimulation;
+import org.apache.cassandra.simulator.utils.KindOfSequence;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.SERIAL;
+
+class PaxosClusterSimulation extends ClusterSimulation<PaxosSimulation> implements AutoCloseable
+{
+    @SuppressWarnings("UnusedReturnValue")
+    static class Builder extends ClusterSimulation.Builder<PaxosSimulation>
+    {
+        PaxosVariant initialPaxosVariant = PaxosVariant.v2;
+        PaxosVariant finalPaxosVariant = null;
+        Boolean stateCache;
+        ConsistencyLevel serialConsistency = SERIAL;
+
+        public Builder consistency(ConsistencyLevel serialConsistency)
+        {
+            this.serialConsistency = serialConsistency;
+            return this;
+        }
+
+        public Builder stateCache(Boolean stateCache)
+        {
+            this.stateCache = stateCache;
+            return this;
+        }
+
+        public Builder initialPaxosVariant(PaxosVariant variant)
+        {
+            initialPaxosVariant = variant;
+            return this;
+        }
+
+        public Builder finalPaxosVariant(PaxosVariant variant)
+        {
+            finalPaxosVariant = variant;
+            return this;
+        }
+
+        public PaxosClusterSimulation create(long seed) throws IOException
+        {
+            RandomSource random = randomSupplier.get();
+            random.reset(seed);
+            return new PaxosClusterSimulation(random, seed, uniqueNum, this);
+        }
+    }
+
+    PaxosClusterSimulation(RandomSource random, long seed, int uniqueNum, Builder builder) throws IOException
+    {
+        super(random, seed, uniqueNum, builder,
+              config -> config.set("paxos_variant", builder.initialPaxosVariant.name())
+                              .set("paxos_cache_size", (builder.stateCache != null ? builder.stateCache : random.uniformFloat() < 0.5) ? null : "0MiB")
+                              .set("paxos_state_purging", "repaired")
+                              .set("paxos_on_linearizability_violations", "log")
+        ,
+              (simulated, schedulers, cluster, options) -> {
+                  int[] primaryKeys = primaryKeys(seed, builder.primaryKeyCount());
+                  KindOfSequence.Period jitter = RandomSource.Choices.uniform(KindOfSequence.values()).choose(random)
+                                                                     .period(builder.schedulerJitterNanos(), random);
+                  return new PairOfSequencesPaxosSimulation(simulated, cluster, options.changePaxosVariantTo(builder.finalPaxosVariant),
+                                                            builder.readChance().select(random), builder.concurrency(), builder.primaryKeySeconds(), builder.withinKeyConcurrency(),
+                                                            builder.serialConsistency, schedulers, builder.debug(), seed,
+                                                            primaryKeys, builder.secondsToSimulate() >= 0 ? SECONDS.toNanos(builder.secondsToSimulate()) : -1,
+                                                            () -> jitter.get(random));
+              });
+    }
+
+    private static int[] primaryKeys(long seed, int count)
+    {
+        int primaryKey = (int) (seed);
+        int[] primaryKeys = new int[count];
+        for (int i = 0 ; i < primaryKeys.length ; ++i)
+            primaryKeys[i] = primaryKey += 1 << 20;
+        return primaryKeys;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosRepairValidator.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosRepairValidator.java
new file mode 100644
index 0000000..572b8ff
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosRepairValidator.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.simulator.cluster.ClusterActionListener.RepairValidator;
+import org.apache.cassandra.simulator.cluster.Topology;
+
+import static java.util.Arrays.stream;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.REQUIRED;
+
+public class PaxosRepairValidator implements RepairValidator
+{
+    final Cluster cluster;
+    final String keyspace;
+    final String table;
+    final Object id;
+
+    boolean isPaxos;
+    Topology topology;
+    Ballots.LatestBallots[][] ballotsBefore;
+
+    public PaxosRepairValidator(Cluster cluster, String keyspace, String table, Object id)
+    {
+        this.cluster = cluster;
+        this.keyspace = keyspace;
+        this.table = table;
+        this.id = id;
+    }
+
+    @Override
+    public void before(Topology topology, boolean repairPaxos, boolean repairOnlyPaxos)
+    {
+        if (repairOnlyPaxos)
+            return;
+
+        this.isPaxos = isPaxos;
+        this.topology = topology;
+        this.ballotsBefore = Ballots.read(REQUIRED, cluster, keyspace, table, topology.primaryKeys, topology.replicasForKeys, false);
+    }
+
+    @Override
+    public void after()
+    {
+        if (ballotsBefore == null)
+            return;
+
+        int[] primaryKeys = topology.primaryKeys;
+        int[][] replicasForKeys = topology.replicasForKeys;
+        int quorumRf = topology.quorumRf;
+        int quorum = quorumRf / 2 + 1;
+        Ballots.LatestBallots[][] ballotsAfter  = Ballots.read(REQUIRED, cluster, keyspace, table, primaryKeys, replicasForKeys, true);
+        for (int pki = 0; pki < primaryKeys.length ; ++pki)
+        {
+            Ballots.LatestBallots[] before = ballotsBefore[pki];
+            Ballots.LatestBallots[] after  = ballotsAfter[pki];
+
+            if (before.length != after.length || before.length != quorumRf)
+                throw new AssertionError("Inconsistent ownership information");
+
+            String kind;
+            long expectPersisted;
+            if (isPaxos)
+            {
+                long committedBefore = stream(before).mapToLong(Ballots.LatestBallots::permanent).max().orElse(0L);
+                // anything accepted by a quorum should be persisted
+                long acceptedBefore = stream(before).mapToLong(n -> n.accept).max().orElse(0L);
+                long acceptedOfBefore = stream(before).filter(n -> n.accept == acceptedBefore).mapToLong(n -> n.acceptOf).findAny().orElse(0L);
+                int countAccepted = (int) stream(before).filter(n -> n.accept == acceptedBefore).count();
+                expectPersisted = countAccepted >= quorum ? acceptedOfBefore : committedBefore;
+                kind = countAccepted >= quorum ? "agreed" : "committed";
+            }
+            else
+            {
+                expectPersisted = stream(before).mapToLong(n -> n.persisted).max().orElse(0L);
+                kind = "persisted";
+            }
+
+            int countAfter = (int) stream(after).filter(n -> n.persisted >= expectPersisted).count();
+            if (countAfter < quorum)
+                throw new AssertionError(String.format("%d: %d %s before %s but only persisted on %d after (out of %d)",
+                                                       primaryKeys[pki], expectPersisted, kind, id, countAfter, quorumRf));
+        }
+
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosSimulation.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosSimulation.java
new file mode 100644
index 0000000..63e84dc
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosSimulation.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import java.util.Map;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
+import java.util.function.LongSupplier;
+
+import javax.annotation.Nullable;
+
+import com.google.common.base.Throwables;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.service.paxos.BallotGenerator;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.ActionPlan;
+import org.apache.cassandra.simulator.ActionSchedule;
+import org.apache.cassandra.simulator.RunnableActionScheduler;
+import org.apache.cassandra.simulator.Simulation;
+import org.apache.cassandra.simulator.cluster.ClusterActionListener;
+import org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods;
+import org.apache.cassandra.simulator.systems.SimulatedQuery;
+import org.apache.cassandra.simulator.systems.SimulatedSystems;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.concurrent.Threads;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.DISPLAY_ORIGIN;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.paxos.HistoryChecker.causedBy;
+
+public abstract class PaxosSimulation implements Simulation, ClusterActionListener
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosSimulation.class);
+
+    abstract class Operation extends SimulatedQuery implements BiConsumer<Object[][], Throwable>
+    {
+        final int primaryKey;
+        final int id;
+        int start;
+
+        public Operation(int primaryKey, int id, IInvokableInstance instance,
+                         String idString, String query, ConsistencyLevel commitConsistency, ConsistencyLevel serialConistency, Object... params)
+        {
+            super(primaryKey + "/" + id + ": " + idString, DISPLAY_ORIGIN, NONE, PaxosSimulation.this.simulated, instance, query, commitConsistency, serialConistency, params);
+            this.primaryKey = primaryKey;
+            this.id = id;
+        }
+
+        public ActionList performAndRegister()
+        {
+            start = logicalClock.incrementAndGet();
+            return super.performAndRegister();
+        }
+
+        @Override
+        public void accept(Object[][] success, Throwable failure)
+        {
+            if (failure != null && !(failure instanceof RequestExecutionException))
+            {
+                if (!simulated.failures.hasFailure() || !(failure instanceof UncheckedInterruptedException))
+                    logger.error("Unexpected exception", failure);
+                simulated.failures.accept(failure);
+                return;
+            }
+            else if (failure != null)
+            {
+                logger.trace("{}", failure.getMessage());
+            }
+
+            verify(new Observation(id, success, start, logicalClock.incrementAndGet()));
+        }
+
+        abstract void verify(Observation outcome);
+    }
+
+    final Cluster cluster;
+    final SimulatedSystems simulated;
+    final RunnableActionScheduler runnableScheduler;
+    final AtomicInteger logicalClock = new AtomicInteger(1);
+    final ActionSchedule.Mode mode;
+    final long runForNanos;
+    final LongSupplier jitter;
+
+    public PaxosSimulation(ActionSchedule.Mode mode, SimulatedSystems simulated, Cluster cluster, RunnableActionScheduler runnableScheduler, long runForNanos, LongSupplier jitter)
+    {
+        this.cluster = cluster;
+        this.simulated = simulated;
+        this.runnableScheduler = runnableScheduler;
+        this.runForNanos = runForNanos;
+        this.mode = mode;
+        this.jitter = jitter;
+    }
+
+    protected abstract ActionPlan plan();
+
+    public void run()
+    {
+        AtomicReference<CloseableIterator<?>> onFailedShutdown = new AtomicReference<>();
+        AtomicInteger shutdown = new AtomicInteger();
+
+        AtomicLong counter = new AtomicLong();
+        ScheduledExecutorPlus livenessChecker = null;
+        ScheduledFuture<?> liveness = null;
+        if (CassandraRelevantProperties.TEST_SIMULATOR_LIVENESS_CHECK.getBoolean())
+        {
+            livenessChecker = ExecutorFactory.Global.executorFactory().scheduled("SimulationLiveness");
+            liveness = livenessChecker.scheduleWithFixedDelay(new Runnable()
+            {
+                long prev = 0;
+                @Override
+                public void run()
+                {
+                    Thread.currentThread().setUncaughtExceptionHandler((th, ex) -> {
+                        logger.error("Unexpected exception on {}", th, ex);
+                    });
+                    if (shutdown.get() > 0)
+                    {
+                        int attempts = shutdown.getAndIncrement();
+                        if (attempts > 2 || onFailedShutdown.get() == null)
+                        {
+                            logger.error("Failed to exit despite best efforts, dumping threads and forcing shutdown");
+                            for (Map.Entry<Thread, StackTraceElement[]> stes : Thread.getAllStackTraces().entrySet())
+                            {
+                                logger.error("{}", stes.getKey());
+                                logger.error("{}", Threads.prettyPrint(stes.getValue(), false, "\n"));
+                            }
+
+                            System.exit(1);
+                        }
+                        else if (attempts > 1)
+                        {
+                            logger.error("Failed to exit cleanly, force closing simulation");
+                            onFailedShutdown.get().close();
+                        }
+                    }
+                    else
+                    {
+                        long cur = counter.get();
+                        if (cur == prev)
+                        {
+                            logger.error("Simulation appears to have stalled; terminating. To disable set -Dcassandra.test.simulator.livenesscheck=false");
+                            shutdown.set(1);
+                            throw failWithOOM();
+                        }
+                        prev = cur;
+                    }
+                }
+            }, 5L, 5L, TimeUnit.MINUTES);
+        }
+
+        try (CloseableIterator<?> iter = iterator())
+        {
+            onFailedShutdown.set(iter);
+            while (iter.hasNext())
+            {
+                if (shutdown.get() > 0)
+                    throw failWithOOM();
+
+                iter.next();
+                counter.incrementAndGet();
+            }
+        }
+
+        // only cancel if successfully shutdown; otherwise we may have a shutdown liveness issue, and should kill process
+        if (liveness != null)
+            liveness.cancel(true);
+        if (livenessChecker != null)
+            livenessChecker.shutdownNow();
+    }
+
+    public CloseableIterator<?> iterator()
+    {
+        CloseableIterator<?> iterator = plan().iterator(mode, runForNanos, jitter, simulated.time, runnableScheduler, simulated.futureScheduler);
+        return new CloseableIterator<Object>()
+        {
+            @Override
+            public boolean hasNext()
+            {
+                return !isDone() && iterator.hasNext();
+            }
+
+            @Override
+            public Object next()
+            {
+                try
+                {
+                    return iterator.next();
+                }
+                catch (Throwable t)
+                {
+                    throw failWith(t);
+                }
+            }
+
+            @Override
+            public void close()
+            {
+                iterator.close();
+            }
+        };
+    }
+
+    boolean isDone()
+    {
+        if (!simulated.failures.hasFailure())
+            return false;
+
+        throw logAndThrow();
+    }
+
+    RuntimeException failWith(Throwable t)
+    {
+        simulated.failures.onFailure(t);
+        throw logAndThrow();
+    }
+
+    abstract void log(@Nullable Integer primaryKey);
+
+    private RuntimeException logAndThrow()
+    {
+        Integer causedByPrimaryKey = null;
+        Throwable causedByThrowable = null;
+        for (Throwable t : simulated.failures.get())
+        {
+            if (null != (causedByPrimaryKey = causedBy(t)))
+            {
+                causedByThrowable = t;
+                break;
+            }
+        }
+
+        log(causedByPrimaryKey);
+        if (causedByPrimaryKey != null) throw Throwables.propagate(causedByThrowable);
+        else throw Throwables.propagate(simulated.failures.get().get(0));
+    }
+
+    public void close()
+    {
+        // stop intercepting message delivery
+        cluster.setMessageSink(null);
+        cluster.forEach(i -> {
+            if (!i.isShutdown())
+            {
+                i.unsafeRunOnThisThread(() -> BallotGenerator.Global.unsafeSet(new BallotGenerator.Default()));
+                i.unsafeRunOnThisThread(InterceptorOfGlobalMethods.Global::unsafeReset);
+            }
+        });
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosSimulationRunner.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosSimulationRunner.java
new file mode 100644
index 0000000..50a0ee5
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosSimulationRunner.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import java.io.IOException;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.airlift.airline.Cli;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.simulator.SimulationRunner;
+
+public class PaxosSimulationRunner extends SimulationRunner
+{
+    @Command(name = "run")
+    public static class Run extends SimulationRunner.Run<PaxosClusterSimulation.Builder>
+    {
+        @Option(name = "--consistency", description = "Specify the consistency level to perform paxos operations at")
+        String consistency;
+
+        @Option(name = "--with-paxos-state-cache", arity = 0, description = "Run with the paxos state cache always enabled")
+        boolean withStateCache;
+
+        @Option(name = "--without-paxos-state-cache", arity = 0, description = "Run with the paxos state cache always disabled")
+        boolean withoutStateCache;
+
+        @Option(name = "--variant", title = "paxos variant", description = "Specify the initial paxos variant to use")
+        String variant;
+
+        @Option(name = "--to-variant",  title = "paxos variant", description = "Specify the paxos variant to change to at some point during the simulation")
+        String toVariant;
+
+        public Run() {}
+
+        @Override
+        protected void propagate(PaxosClusterSimulation.Builder builder)
+        {
+            super.propagate(builder);
+            propagateTo(consistency, withStateCache, withoutStateCache, variant, toVariant, builder);
+        }
+    }
+
+    @Command(name = "record")
+    public static class Record extends SimulationRunner.Record<PaxosClusterSimulation.Builder>
+    {
+        @Option(name = "--consistency")
+        String consistency;
+
+        @Option(name = "--with-paxos-state-cache", arity = 0)
+        boolean withStateCache;
+
+        @Option(name = "--without-paxos-state-cache", arity = 0)
+        boolean withoutStateCache;
+
+        @Option(name = "--variant", description = "paxos variant to use")
+        String variant;
+
+        @Option(name = "--to-variant", description = "paxos variant to change to during the simulation")
+        String toVariant;
+
+        public Record() {}
+
+        @Override
+        protected void propagate(PaxosClusterSimulation.Builder builder)
+        {
+            super.propagate(builder);
+            propagateTo(consistency, withStateCache, withoutStateCache, variant, toVariant, builder);
+        }
+    }
+
+    @Command(name = "reconcile")
+    public static class Reconcile extends SimulationRunner.Reconcile<PaxosClusterSimulation.Builder>
+    {
+        @Option(name = "--consistency")
+        String consistency;
+
+        @Option(name = "--with-paxos-state-cache", arity = 0)
+        boolean withStateCache;
+
+        @Option(name = "--without-paxos-state-cache", arity = 0)
+        boolean withoutStateCache;
+
+        @Option(name = "--variant", description = "paxos variant to use")
+        String variant;
+
+        @Option(name = "--to-variant", description = "paxos variant to change to during the simulation")
+        String toVariant;
+        
+        public Reconcile() {}
+
+        @Override
+        protected void propagate(PaxosClusterSimulation.Builder builder)
+        {
+            super.propagate(builder);
+            propagateTo(consistency, withStateCache, withoutStateCache, variant, toVariant, builder);
+        }
+    }
+
+    public static class Help extends HelpCommand<PaxosClusterSimulation.Builder> {}
+
+    static void propagateTo(String consistency, boolean withStateCache, boolean withoutStateCache, String variant, String toVariant, PaxosClusterSimulation.Builder builder)
+    {
+        Optional.ofNullable(consistency).map(ConsistencyLevel::valueOf).ifPresent(builder::consistency);
+        if (withStateCache) builder.stateCache(true);
+        if (withoutStateCache) builder.stateCache(false);
+        Optional.ofNullable(variant).map(Config.PaxosVariant::valueOf).ifPresent(builder::initialPaxosVariant);
+        Optional.ofNullable(toVariant).map(Config.PaxosVariant::valueOf).ifPresent(builder::finalPaxosVariant);
+    }
+
+    // for simple unit tests so we can simply invoke main()
+    private static final AtomicInteger uniqueNum = new AtomicInteger();
+
+    /**
+     * See {@link org.apache.cassandra.simulator} package info for execution tips
+     */
+    public static void main(String[] args) throws IOException
+    {
+        PaxosClusterSimulation.Builder builder = new PaxosClusterSimulation.Builder();
+        builder.unique(uniqueNum.getAndIncrement());
+
+        Cli.<SimulationRunner.ICommand<PaxosClusterSimulation.Builder>>builder("paxos")
+           .withCommand(Run.class)
+           .withCommand(Reconcile.class)
+           .withCommand(Record.class)
+           .withCommand(Help.class)
+           .withDefaultCommand(Help.class)
+           .build()
+           .parse(args)
+           .run(builder);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosTopologyChangeVerifier.java b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosTopologyChangeVerifier.java
new file mode 100644
index 0000000..46c4c9e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosTopologyChangeVerifier.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.paxos;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.simulator.cluster.ClusterActionListener.TopologyChangeValidator;
+import org.apache.cassandra.simulator.cluster.Topology;
+
+import static java.util.Arrays.stream;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.REQUIRED;
+
+public class PaxosTopologyChangeVerifier implements TopologyChangeValidator
+{
+    final Cluster cluster;
+    final String keyspace;
+    final String table;
+    final Object id;
+
+    Topology topologyBefore;
+    Ballots.LatestBallots[][] ballotsBefore;
+
+    public PaxosTopologyChangeVerifier(Cluster cluster, String keyspace, String table, Object id)
+    {
+        this.cluster = cluster;
+        this.keyspace = keyspace;
+        this.table = table;
+        this.id = id;
+    }
+
+    @Override
+    public void before(Topology before, int[] participatingKeys)
+    {
+        this.topologyBefore = before.select(participatingKeys);
+        this.ballotsBefore = Ballots.read(REQUIRED, cluster, keyspace, table, topologyBefore.primaryKeys, topologyBefore.replicasForKeys, true);
+        for (int i = 0; i < topologyBefore.primaryKeys.length ; ++i)
+        {
+            if (ballotsBefore[i].length != topologyBefore.quorumRf)
+                throw new AssertionError("Inconsistent ownership/ballot information");
+        }
+    }
+
+    @Override
+    public void after(Topology topologyAfter)
+    {
+        afterInternal(topologyAfter.select(topologyBefore.primaryKeys));
+    }
+
+    public void afterInternal(Topology topologyAfter)
+    {
+        int[] primaryKeys = topologyAfter.primaryKeys;
+        int quorumBefore = topologyBefore.quorumRf / 2 + 1;
+        int quorumAfter = topologyAfter.quorumRf / 2 + 1;
+        Ballots.LatestBallots[][] allBefore = ballotsBefore;
+        Ballots.LatestBallots[][] allAfter = Ballots.read(REQUIRED, cluster, keyspace, table, primaryKeys, topologyAfter.replicasForKeys, true);
+        for (int pki = 0; pki < primaryKeys.length; ++pki)
+        {
+            Ballots.LatestBallots[] before = allBefore[pki];
+            Ballots.LatestBallots[] after = allAfter[pki];
+
+            if (after.length != topologyAfter.quorumRf)
+                throw new AssertionError("Inconsistent ownership/ballot information");
+
+            {
+                // if we had accepted to a quorum we should be committed to a quorum afterwards
+                // note that we will not always witness something newer than the latest accepted proposal,
+                // because if we don't witness it during repair, we will simply invalidate it with the low bound
+                long acceptedBefore = stream(before).mapToLong(n -> n.accept).max().orElse(0L);
+                long acceptedOfBefore = stream(before).filter(n -> n.accept == acceptedBefore).mapToLong(n -> n.acceptOf).findAny().orElse(0L);
+                int countBefore = (int) stream(before).filter(n -> n.accept == acceptedBefore).count();
+                int countAfter = countBefore < quorumAfter
+                                 ? (int) stream(after).filter(n -> n.any() >= acceptedBefore).count()
+                                 : (int) stream(after).filter(n -> n.permanent() >= acceptedOfBefore).count();
+
+                if (countBefore >= quorumBefore && countAfter < quorumAfter)
+                {
+                    throw new AssertionError(String.format("%d: %d accepted by %d before %s but only %s on %d after (expect at least %d)",
+                                                           primaryKeys[pki], acceptedBefore, countBefore, this, countBefore >= quorumAfter ? "committed" : "accepted", countAfter, quorumAfter));
+                }
+            }
+            {
+                // we should always have at least a quorum of newer records than the most recently witnessed commit
+                long committedBefore = stream(before).mapToLong(Ballots.LatestBallots::permanent).max().orElse(0L);
+                int countAfter = (int) stream(after).filter(n -> n.permanent() >= committedBefore).count();
+                if (countAfter < quorumAfter)
+                {
+                    throw new AssertionError(String.format("%d: %d committed before %s but only committed on %d after (expect at least %d)",
+                                                           primaryKeys[pki], committedBefore, id, countAfter, quorumAfter));
+                }
+            }
+        }
+
+        // clear memory usage on success
+        topologyBefore = null;
+        ballotsBefore = null;
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/Failures.java b/test/simulator/main/org/apache/cassandra/simulator/systems/Failures.java
new file mode 100644
index 0000000..2c89d3c
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/Failures.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.utils.concurrent.Ref;
+import org.apache.cassandra.utils.memory.BufferPool;
+
+/**
+ * A simple encapsulation for capturing and reporting failures during the simulation
+ */
+public class Failures implements Consumer<Throwable>, BufferPool.DebugLeaks, Ref.OnLeak
+{
+    private final List<Throwable> failures = Collections.synchronizedList(new ArrayList<>());
+    private volatile boolean hasFailure;
+
+    public void onFailure(Throwable t)
+    {
+        failures.add(t);
+        hasFailure = true;
+    }
+
+    public boolean hasFailure()
+    {
+        return hasFailure;
+    }
+
+    public List<Throwable> get()
+    {
+        return Collections.unmodifiableList(failures);
+    }
+
+    @Override
+    public void accept(Throwable throwable)
+    {
+        onFailure(throwable);
+    }
+
+    @Override
+    public void leak()
+    {
+        failures.add(new AssertionError("ChunkCache leak detected"));
+    }
+
+    @Override
+    public void onLeak(Object state)
+    {
+        failures.add(new AssertionError("Ref leak detected " + state.toString()));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedExecution.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedExecution.java
new file mode 100644
index 0000000..4002f4e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedExecution.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.function.Function;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.simulator.systems.NotifyThreadPaused.AwaitPaused;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+// An asynchronous task we can intercept the execution of
+@Shared(scope = SIMULATION)
+public interface InterceptedExecution
+{
+    SimulatedAction.Kind kind();
+    long deadlineNanos();
+    void invokeAndAwaitPause(InterceptorOfConsequences interceptor);
+    void cancel();
+    void onCancel(Runnable onCancel);
+
+    /**
+     * Abstract implementation we only need to implement Runnable for, which we will invoke
+     * on the given executor. The runnable should perform any necessary transfer to the target
+     * class loader itself.
+     */
+    abstract class InterceptedTaskExecution implements InterceptedExecution, Runnable
+    {
+        public final InterceptingExecutor executor;
+        boolean submittedOrCancelled;
+        Runnable onCancel;
+
+        public InterceptedTaskExecution(InterceptingExecutor executor)
+        {
+            Preconditions.checkNotNull(executor);
+            this.executor = executor;
+            executor.addPending(this);
+        }
+
+        @Override
+        public SimulatedAction.Kind kind()
+        {
+            return SimulatedAction.Kind.TASK;
+        }
+
+        @Override
+        public long deadlineNanos()
+        {
+            return -1L;
+        }
+
+        public void invokeAndAwaitPause(InterceptorOfConsequences interceptor)
+        {
+            Preconditions.checkState(!submittedOrCancelled);
+            executor.submitAndAwaitPause(this, interceptor);
+            submittedOrCancelled = true;
+        }
+
+        @Override
+        public void cancel()
+        {
+            if (!submittedOrCancelled)
+            {
+                executor.cancelPending(this);
+                submittedOrCancelled = true;
+                if (onCancel != null)
+                {
+                    onCancel.run();
+                    onCancel = null;
+                }
+            }
+        }
+
+        @Override
+        public void onCancel(Runnable onCancel)
+        {
+            assert this.onCancel == null || onCancel == null;
+            this.onCancel = onCancel;
+        }
+    }
+
+    /**
+     * Simple implementation we only need to supply a Runnable to, which we will invoke
+     * on the given executor. The runnable should perform any necessary transfer to the target
+     * class loader itself.
+     */
+    class InterceptedRunnableExecution extends InterceptedTaskExecution
+    {
+        final Runnable run;
+        public InterceptedRunnableExecution(InterceptingExecutor executor, Runnable run)
+        {
+            super(executor);
+            this.run = run;
+        }
+
+        public void run()
+        {
+            run.run();
+        }
+
+        public String toString()
+        {
+            return run + " with " + executor;
+        }
+    }
+
+    class InterceptedFutureTaskExecution<T> implements InterceptedExecution
+    {
+        private final SimulatedAction.Kind kind;
+        private final InterceptingExecutor executor;
+        private final long deadlineNanos;
+        private RunnableFuture<T> run;
+        private Runnable onCancel;
+
+        public InterceptedFutureTaskExecution(SimulatedAction.Kind kind, InterceptingExecutor executor, RunnableFuture<T> run)
+        {
+            this(kind, executor, run, -1L);
+        }
+
+        public InterceptedFutureTaskExecution(SimulatedAction.Kind kind, InterceptingExecutor executor, RunnableFuture<T> run, long deadlineNanos)
+        {
+            Preconditions.checkArgument(deadlineNanos >= -1);
+            this.kind = kind;
+            this.executor = executor;
+            this.run = run;
+            this.deadlineNanos = deadlineNanos;
+            executor.addPending(run);
+        }
+
+        public String toString()
+        {
+            return (run != null ? run.toString() : "(done)") + " with " + executor;
+        }
+
+        @Override
+        public SimulatedAction.Kind kind()
+        {
+            return kind;
+        }
+
+        @Override
+        public long deadlineNanos()
+        {
+            return deadlineNanos;
+        }
+
+        @Override
+        public void invokeAndAwaitPause(InterceptorOfConsequences interceptor)
+        {
+            Preconditions.checkNotNull(run);
+            executor.submitAndAwaitPause(run, interceptor);
+            run = null;
+        }
+
+        @Override
+        public void cancel()
+        {
+            if (run != null)
+            {
+                executor.cancelPending(run);
+                run = null;
+                if (onCancel != null)
+                {
+                    onCancel.run();
+                    onCancel = null;
+                }
+            }
+        }
+
+        @Override
+        public void onCancel(Runnable onCancel)
+        {
+            assert this.onCancel == null || onCancel == null;
+            this.onCancel = onCancel;
+        }
+    }
+
+    public class InterceptedThreadStart implements Runnable, InterceptedExecution
+    {
+        final SimulatedAction.Kind kind;
+        final InterceptibleThread thread;
+        Runnable run;
+        Runnable onCancel;
+
+        public InterceptedThreadStart(Function<Runnable, InterceptibleThread> factory, Runnable run, SimulatedAction.Kind kind)
+        {
+            this.thread = factory.apply(this);
+            this.kind = kind;
+            this.run = run;
+        }
+
+        public void run()
+        {
+            try
+            {
+                run.run();
+            }
+            catch (UncheckedInterruptedException ignore)
+            {
+                // thrown on abnormal shutdown; don't want to pollute the log
+            }
+            catch (Throwable t)
+            {
+                thread.getUncaughtExceptionHandler().uncaughtException(thread, t);
+            }
+            finally
+            {
+                thread.onTermination();
+                thread.interceptTermination(true);
+            }
+        }
+
+        public String toString()
+        {
+            return (run != null ? run.toString() : "(done)") + " with " + thread;
+        }
+
+        @Override
+        public SimulatedAction.Kind kind()
+        {
+            return kind;
+        }
+
+        @Override
+        public long deadlineNanos()
+        {
+            return -1L;
+        }
+
+        public void invokeAndAwaitPause(InterceptorOfConsequences interceptor)
+        {
+            AwaitPaused done = new AwaitPaused();
+            synchronized (done)
+            {
+                thread.beforeInvocation(interceptor, done);
+                thread.start();
+                done.awaitPause();
+            }
+        }
+
+        @Override
+        public void cancel()
+        {
+            if (run != null)
+            {
+                run = null;
+                if (onCancel != null)
+                {
+                    onCancel.run();
+                    onCancel = null;
+                }
+            }
+        }
+
+        @Override
+        public void onCancel(Runnable onCancel)
+        {
+            assert this.onCancel == null || onCancel == null;
+            this.onCancel = onCancel;
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedIdentityHashMap.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedIdentityHashMap.java
new file mode 100644
index 0000000..61837c2
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedIdentityHashMap.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.AbstractSet;
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.google.common.collect.Iterators;
+
+import static org.apache.cassandra.simulator.systems.InterceptorOfSystemMethods.Global.identityHashCode;
+
+/**
+ * A class that behaves like IdentityHashMap but uses our deterministically generated
+ * {@link InterceptorOfSystemMethods.Global#identityHashCode(Object)}
+ *
+ * This is used in case we iterate over the contents of any such collection (which we do)
+ */
+@SuppressWarnings("unused")
+public class InterceptedIdentityHashMap<K, V> extends IdentityHashMap<K, V>
+{
+    static class Key<K>
+    {
+        final int hash;
+        final K key;
+
+        Key(K key)
+        {
+            this.key = key;
+            this.hash = identityHashCode(key);
+        }
+
+        K key()
+        {
+            return key;
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return hash;
+        }
+
+        @Override
+        public boolean equals(Object that)
+        {
+            return that instanceof Key && key == ((Key<?>)that).key;
+        }
+    }
+
+    final HashMap<Key<K>, V> wrapped = new HashMap<>();
+    public InterceptedIdentityHashMap() {}
+    public InterceptedIdentityHashMap(int sizeToIgnore) {}
+
+    @Override
+    public int size()
+    {
+        return wrapped.size();
+    }
+
+    @Override
+    public boolean isEmpty()
+    {
+        return wrapped.isEmpty();
+    }
+
+    @Override
+    public boolean containsValue(Object value)
+    {
+        return wrapped.containsValue(value);
+    }
+
+    @Override
+    public boolean containsKey(Object o)
+    {
+        return wrapped.containsKey(new Key<>(o));
+    }
+
+    @Override
+    public V put(K key, V value)
+    {
+        return wrapped.put(new Key<>(key), value);
+    }
+
+    @Override
+    public V get(Object key)
+    {
+        return wrapped.get(new Key<>(key));
+    }
+
+    @Override
+    public V remove(Object key)
+    {
+        return wrapped.remove(new Key<>(key));
+    }
+
+    @Override
+    public Set<K> keySet()
+    {
+        return new AbstractSet<K>()
+        {
+            @Override
+            public Iterator<K> iterator()
+            {
+                return Iterators.transform(wrapped.keySet().iterator(), Key::key);
+            }
+
+            @Override
+            public boolean contains(Object o)
+            {
+                return containsKey(new Key<>(o));
+            }
+
+            @Override
+            public int size()
+            {
+                return wrapped.size();
+            }
+        };
+    }
+
+    @Override
+    public Set<Entry<K, V>> entrySet()
+    {
+        return new AbstractSet<Entry<K, V>>()
+        {
+            @Override
+            public Iterator<Entry<K, V>> iterator()
+            {
+                return Iterators.transform(wrapped.entrySet().iterator(), e -> new SimpleEntry<>(e.getKey().key, e.getValue()));
+            }
+
+            @Override
+            public int size()
+            {
+                return wrapped.size();
+            }
+        };
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedWait.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedWait.java
new file mode 100644
index 0000000..999e9aa
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptedWait.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+import java.util.stream.Stream;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.Threads;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture.WAKE;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture.WAKE_AND_NOW;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.SIGNAL;
+import static org.apache.cassandra.simulator.systems.InterceptibleThread.interceptorOrDefault;
+import static org.apache.cassandra.utils.Shared.Recursive.ALL;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * A general abstraction for intercepted thread wait events, either
+ * generated by the program execution or our nemesis system.
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface InterceptedWait extends NotifyThreadPaused
+{
+    enum Kind { SLEEP_UNTIL, WAIT_UNTIL, UNBOUNDED_WAIT, NEMESIS }
+    enum Trigger { TIMEOUT, INTERRUPT, SIGNAL }
+
+    interface TriggerListener
+    {
+        /**
+         * Invoked when the wait is triggered, permitting any dependent Action to be invalidated.
+         * This is particularly useful for thread timeouts, which are often logically invalidated
+         * but may otherwise hold up scheduling of further events until their scheduled time passes.
+         * @param triggered the wait that has been triggered, and is no longer valid
+         */
+        void onTrigger(InterceptedWait triggered);
+    }
+
+    /**
+     * The kind of simulated wait
+     */
+    Kind kind();
+
+    /**
+     * true if the signal has already been triggered by another simulation action
+     */
+    boolean isTriggered();
+
+    /**
+     * true if this wait can be interrupted
+     */
+    boolean isInterruptible();
+
+    /**
+     * If kind() == TIMED_WAIT or ABSOLUTE_TIMED_WAIT this returns the relative and absolute
+     * period to wait in nanos
+     */
+    long waitTime();
+
+    /**
+     * Intercept a wakeup signal on this wait
+     */
+    void interceptWakeup(Trigger trigger, Thread by);
+
+    /**
+     * Signal the waiter immediately, and have the caller wait until its simulation has terminated.
+     *
+     * @param interceptor the interceptor to relay events to
+     * @param trigger if SIGNAL, propagate the signal to the wrapped condition we are waiting on
+     */
+    void triggerAndAwaitDone(InterceptorOfConsequences interceptor, Trigger trigger);
+
+    /**
+     * Signal all waiters immediately, bypassing the simulation
+     */
+    void triggerBypass();
+
+    /**
+     * Add a trigger listener, to notify the wait is no longer valid
+     */
+    void addListener(TriggerListener onTrigger);
+
+    Thread waiting();
+
+    /**
+     * A general purpose superclass for implementing an intercepted/simulated thread wait event.
+     * All share this implementation except for monitor waits, which must use the monitor they are waiting on
+     * in order to release its lock.
+     */
+    class InterceptedConditionWait extends NotInterceptedSyncCondition implements InterceptedWait
+    {
+        static final Logger logger = LoggerFactory.getLogger(InterceptedConditionWait.class);
+
+        final Kind kind;
+        final InterceptibleThread waiting;
+        final CaptureSites captureSites;
+        final InterceptorOfConsequences interceptedBy;
+        final Condition propagateSignal;
+        final List<TriggerListener> onTrigger = new ArrayList<>(3);
+        final long waitTime;
+        boolean isInterruptible, isSignalPending, isTriggered, isDone, hasExited;
+
+        public InterceptedConditionWait(Kind kind, long waitTime, InterceptibleThread waiting, CaptureSites captureSites, Condition propagateSignal)
+        {
+            this.kind = kind;
+            this.waitTime = waitTime;
+            this.waiting = waiting;
+            this.captureSites = captureSites;
+            this.interceptedBy = waiting.interceptedBy();
+            this.propagateSignal = propagateSignal;
+        }
+
+        public synchronized void triggerAndAwaitDone(InterceptorOfConsequences interceptor, Trigger trigger)
+        {
+            if (isTriggered)
+                return;
+
+            if (hasExited)
+            {
+                logger.error("{} exited without trigger {}", waiting, captureSites == null ? new CaptureSites(waiting, WAKE_AND_NOW) : captureSites);
+                throw failWithOOM();
+            }
+
+            waiting.beforeInvocation(interceptor, this);
+            isTriggered = true;
+            onTrigger.forEach(listener -> listener.onTrigger(this));
+
+            if (!waiting.preWakeup(this) || !isInterruptible)
+                super.signal();
+
+            if (isSignalPending && propagateSignal != null)
+                propagateSignal.signal();
+
+            try
+            {
+                while (!isDone)
+                    wait();
+            }
+            catch (InterruptedException ie)
+            {
+                throw new UncheckedInterruptedException(ie);
+            }
+        }
+
+        public synchronized void triggerBypass()
+        {
+            if (isTriggered)
+                return;
+
+            isTriggered = true;
+            super.signal();
+            if (propagateSignal != null)
+                propagateSignal.signal();
+        }
+
+        @Override
+        public void addListener(TriggerListener onTrigger)
+        {
+            this.onTrigger.add(onTrigger);
+        }
+
+        @Override
+        public Thread waiting()
+        {
+            return waiting;
+        }
+
+        @Override
+        public synchronized void notifyThreadPaused()
+        {
+            isDone = true;
+            notifyAll();
+        }
+
+        @Override
+        public Kind kind()
+        {
+            return kind;
+        }
+
+        @Override
+        public long waitTime()
+        {
+            return waitTime;
+        }
+
+        @Override
+        public void interceptWakeup(Trigger trigger, Thread by)
+        {
+            assert !isTriggered;
+            isSignalPending |= trigger == SIGNAL;
+            if (captureSites != null)
+                captureSites.registerWakeup(by);
+            interceptorOrDefault(by).interceptWakeup(this, trigger, interceptedBy);
+        }
+
+        public boolean isTriggered()
+        {
+            return isTriggered;
+        }
+
+        @Override
+        public boolean isInterruptible()
+        {
+            return isInterruptible;
+        }
+
+        // ignore return value; always false as can only represent artificial (intercepted) signaled status
+        public boolean await(long time, TimeUnit unit) throws InterruptedException
+        {
+            try
+            {
+                isInterruptible = true;
+                super.await();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return false;
+        }
+
+        // ignore return value; always false as can only represent artificial (intercepted) signaled status
+        public boolean awaitUntil(long until) throws InterruptedException
+        {
+            try
+            {
+                isInterruptible = true;
+                super.await();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return false;
+        }
+
+        // ignore return value; always false as can only represent artificial (intercepted) signaled status
+        public boolean awaitUntilUninterruptibly(long until)
+        {
+            try
+            {
+                isInterruptible = false;
+                super.awaitUninterruptibly();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return false;
+        }
+
+        // ignore return value; always false as can only represent artificial (intercepted) signaled status
+        public boolean awaitUninterruptibly(long time, TimeUnit units)
+        {
+            try
+            {
+                isInterruptible = false;
+                super.awaitUninterruptibly(time, units);
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return false;
+        }
+
+        public Condition await() throws InterruptedException
+        {
+            try
+            {
+                isInterruptible = true;
+                super.await();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return this;
+        }
+
+        // declared as uninterruptible to the simulator to avoid unnecessary wakeups, but handles interrupts if they arise
+        public Condition awaitDeclaredUninterruptible() throws InterruptedException
+        {
+            try
+            {
+                isInterruptible = false;
+                super.await();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return this;
+        }
+
+        public Condition awaitUninterruptibly()
+        {
+            try
+            {
+                isInterruptible = false;
+                super.awaitUninterruptibly();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+            return this;
+        }
+
+        public String toString()
+        {
+            return captureSites == null ? "" : "[" + captureSites + ']';
+        }
+    }
+
+    // debug the place at which a thread waits or is signalled
+    @Shared(scope = SIMULATION, members = ALL)
+    public static class CaptureSites
+    {
+        public static class Capture
+        {
+            public static final Capture NONE = new Capture(false, false, false);
+            public static final Capture WAKE = new Capture(true, false, false);
+            public static final Capture WAKE_AND_NOW = new Capture(true, true, false);
+
+            public final boolean waitSites;
+            public final boolean wakeSites;
+            public final boolean nowSites;
+
+            public Capture(boolean waitSites, boolean wakeSites, boolean nowSites)
+            {
+                this.waitSites = waitSites;
+                this.wakeSites = wakeSites;
+                this.nowSites = nowSites;
+            }
+
+            public boolean any()
+            {
+                return waitSites | wakeSites | nowSites;
+            }
+        }
+
+        final Thread waiting;
+        final StackTraceElement[] waitSite;
+        final Capture capture;
+        @SuppressWarnings("unused") Thread waker;
+        StackTraceElement[] wakeupSite;
+
+        public CaptureSites(Thread waiting, StackTraceElement[] waitSite, Capture capture)
+        {
+            this.waiting = waiting;
+            this.waitSite = waitSite;
+            this.capture = capture;
+        }
+
+        public CaptureSites(Thread waiting, Capture capture)
+        {
+            this.waiting = waiting;
+            this.waitSite = waiting.getStackTrace();
+            this.capture = capture;
+        }
+
+        public CaptureSites(Thread waiting)
+        {
+            this.waiting = waiting;
+            this.waitSite = waiting.getStackTrace();
+            this.capture = WAKE;
+        }
+
+        public void registerWakeup(Thread waking)
+        {
+            this.waker = waking;
+            if (capture.wakeSites)
+                this.wakeupSite = waking.getStackTrace();
+        }
+
+        public String toString(Predicate<StackTraceElement> include)
+        {
+            String tail;
+            if (wakeupSite != null)
+                tail = Threads.prettyPrint(Stream.of(wakeupSite).filter(include), true, capture.nowSites ? "]# by[" : waitSite != null ? " by[" : "by[", "; ", "]");
+            else if (capture.nowSites)
+                tail = "]#";
+            else
+                tail = "";
+            if (capture.nowSites)
+                tail = Threads.prettyPrint(Stream.of(waiting.getStackTrace()).filter(include), true, waitSite != null ? " #[" : "#[", "; ", tail);
+            if (waitSite != null)
+                tail =Threads.prettyPrint(Stream.of(waitSite).filter(include), true, "", "; ", tail);
+            return tail;
+        }
+
+        public String toString()
+        {
+            return toString(ignore -> true);
+        }
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptibleThread.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptibleThread.java
new file mode 100644
index 0000000..0cb26bf
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptibleThread.java
@@ -0,0 +1,558 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.locks.LockSupport;
+
+import io.netty.util.concurrent.FastThreadLocalThread;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.systems.InterceptedWait.Trigger;
+import org.apache.cassandra.simulator.systems.SimulatedTime.LocalTime;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.UNBOUNDED_WAIT;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.WAIT_UNTIL;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.INTERRUPT;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.SIGNAL;
+import static org.apache.cassandra.simulator.systems.InterceptibleThread.WaitTimeKind.ABSOLUTE_MILLIS;
+import static org.apache.cassandra.simulator.systems.InterceptibleThread.WaitTimeKind.NONE;
+import static org.apache.cassandra.simulator.systems.InterceptibleThread.WaitTimeKind.RELATIVE_NANOS;
+import static org.apache.cassandra.utils.Shared.Recursive.ALL;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION, ancestors = ALL, members = ALL)
+public class InterceptibleThread extends FastThreadLocalThread implements InterceptorOfConsequences
+{
+    @Shared(scope = SIMULATION)
+    enum WaitTimeKind
+    {
+        NONE, RELATIVE_NANOS, ABSOLUTE_MILLIS
+    }
+
+    // used to implement LockSupport.park/unpark
+    @Shared(scope = SIMULATION)
+    private class Parked implements InterceptedWait
+    {
+        final Kind kind;
+        final CaptureSites captureSites;
+        final InterceptorOfConsequences waitInterceptedBy;
+        final List<TriggerListener> onTrigger = new ArrayList<>(3);
+        final long waitTime;
+        boolean isDone; // we have been signalled (by the simulation or otherwise)
+        Trigger trigger;
+
+        Parked(Kind kind, CaptureSites captureSites, long waitTime, InterceptorOfConsequences waitInterceptedBy)
+        {
+            this.kind = kind;
+            this.captureSites = captureSites;
+            this.waitTime = waitTime;
+            this.waitInterceptedBy = waitInterceptedBy;
+        }
+
+        @Override
+        public Kind kind()
+        {
+            return kind;
+        }
+
+        @Override
+        public long waitTime()
+        {
+            return waitTime;
+        }
+
+        @Override
+        public boolean isTriggered()
+        {
+            return parked != this;
+        }
+
+        @Override
+        public boolean isInterruptible()
+        {
+            return true;
+        }
+
+        @Override
+        public synchronized void triggerAndAwaitDone(InterceptorOfConsequences interceptor, Trigger trigger)
+        {
+            if (parked == null)
+                return;
+
+            beforeInvocation(interceptor, this);
+
+            parked = null;
+            onTrigger.forEach(listener -> listener.onTrigger(this));
+
+            if (!preWakeup(this))
+                notify();
+
+            try
+            {
+                while (!isDone)
+                    wait();
+            }
+            catch (InterruptedException ie)
+            {
+                throw new UncheckedInterruptedException(ie);
+            }
+        }
+
+        @Override
+        public synchronized void triggerBypass()
+        {
+            parked = null;
+            notifyAll();
+            LockSupport.unpark(InterceptibleThread.this);
+        }
+
+        @Override
+        public void addListener(TriggerListener onTrigger)
+        {
+            this.onTrigger.add(onTrigger);
+        }
+
+        @Override
+        public Thread waiting()
+        {
+            return InterceptibleThread.this;
+        }
+
+        @Override
+        public synchronized void notifyThreadPaused()
+        {
+            isDone = true;
+            notifyAll();
+        }
+
+        synchronized void await()
+        {
+            try
+            {
+                while (!isTriggered())
+                    wait();
+
+                if (hasPendingInterrupt)
+                    doInterrupt();
+                hasPendingInterrupt = false;
+            }
+            catch (InterruptedException e)
+            {
+                if (!isTriggered()) throw new UncheckedInterruptedException(e);
+                else doInterrupt();
+            }
+        }
+
+        @Override
+        public void interceptWakeup(Trigger trigger, Thread by)
+        {
+            if (this.trigger != null && this.trigger.compareTo(trigger) >= 0)
+                return;
+
+            this.trigger = trigger;
+            if (captureSites != null)
+                captureSites.registerWakeup(by);
+            interceptorOrDefault(by).interceptWakeup(this, trigger, waitInterceptedBy);
+        }
+
+        @Override
+        public String toString()
+        {
+            return captureSites == null ? "" : captureSites.toString();
+        }
+    }
+
+    private static InterceptorOfConsequences debug;
+
+    final Object extraToStringInfo; // optional dynamic extra information for reporting with toString
+    final String toString;
+    final Runnable onTermination;
+    private final InterceptorOfGlobalMethods interceptorOfGlobalMethods;
+    private final LocalTime time;
+
+    // this is set before the thread's execution begins/continues; events and cessation are reported back to this
+    private InterceptorOfConsequences interceptor;
+    private NotifyThreadPaused notifyOnPause;
+
+    private boolean hasPendingUnpark;
+    private boolean hasPendingInterrupt;
+    private Parked parked;
+    private InterceptedWait waitingOn;
+
+    volatile boolean trapInterrupts = true;
+    // we need to avoid non-determinism when evaluating things in the debugger and toString() is the main culprit
+    // so we re-write toString methods to update this counter, so that while we are evaluating a toString we do not
+    // perform any non-deterministic actions
+    private int determinismDepth;
+
+    public InterceptibleThread(ThreadGroup group, Runnable target, String name, Object extraToStringInfo, Runnable onTermination, InterceptorOfGlobalMethods interceptorOfGlobalMethods, LocalTime time)
+    {
+        super(group, target, name);
+        this.onTermination = onTermination;
+        this.interceptorOfGlobalMethods = interceptorOfGlobalMethods;
+        this.time = time;
+        // group is nulled on termination, and we need it for reporting purposes, so save the toString
+        this.toString = "Thread[" + name + ',' + getPriority() + ',' + group.getName() + ']';
+        this.extraToStringInfo = extraToStringInfo;
+    }
+
+    public boolean park(long waitTime, WaitTimeKind waitTimeKind)
+    {
+        if (interceptor == null) return false;
+        if (hasPendingUnpark) hasPendingUnpark = false;
+        else if (!isInterrupted())
+        {
+            InterceptedWait.Kind kind;
+            switch (waitTimeKind)
+            {
+                default:
+                    throw new AssertionError();
+                case NONE:
+                    kind = UNBOUNDED_WAIT;
+                    break;
+                case RELATIVE_NANOS:
+                    kind = WAIT_UNTIL;
+                    waitTime = time.localToGlobalNanos(time.relativeToLocalNanos(waitTime));
+                    break;
+                case ABSOLUTE_MILLIS:
+                    kind = WAIT_UNTIL;
+                    waitTime = time.translate().fromMillisSinceEpoch(waitTime);
+            }
+
+            Parked parked = new Parked(kind, interceptorOfGlobalMethods.captureWaitSite(this), waitTime, interceptor);
+            this.parked = parked;
+            interceptWait(parked);
+            parked.await();
+        }
+        return true;
+    }
+
+    public boolean unpark(InterceptibleThread by)
+    {
+        if (by.interceptor == null) return false;
+        if (parked == null) hasPendingUnpark = true;
+        else parked.interceptWakeup(SIGNAL, by);
+        return true;
+    }
+
+    public void trapInterrupts(boolean trapInterrupts)
+    {
+        this.trapInterrupts = trapInterrupts;
+        if (!trapInterrupts && hasPendingInterrupt)
+            doInterrupt();
+    }
+
+    public boolean hasPendingInterrupt()
+    {
+        return hasPendingInterrupt;
+    }
+
+    public boolean preWakeup(InterceptedWait wakingOn)
+    {
+        assert wakingOn == waitingOn;
+        waitingOn = null;
+        if (!hasPendingInterrupt)
+            return false;
+
+        hasPendingInterrupt = false;
+        doInterrupt();
+        return true;
+    }
+
+    public void doInterrupt()
+    {
+        super.interrupt();
+    }
+
+    @Override
+    public void interrupt()
+    {
+        Thread by = Thread.currentThread();
+        if (by == this || !(by instanceof InterceptibleThread) || !trapInterrupts) doInterrupt();
+        else
+        {
+            hasPendingInterrupt = true;
+            if (waitingOn != null && waitingOn.isInterruptible())
+                waitingOn.interceptWakeup(INTERRUPT, by);
+        }
+    }
+
+    @Override
+    public void beforeInvocation(InterceptibleThread realThread)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void interceptMessage(IInvokableInstance from, IInvokableInstance to, IMessage message)
+    {
+        ++determinismDepth;
+        try
+        {
+            if (interceptor == null) to.receiveMessage(message);
+            else interceptor.interceptMessage(from, to, message);
+            if (debug != null) debug.interceptMessage(from, to, message);
+        }
+        finally
+        {
+            --determinismDepth;
+        }
+    }
+
+    @Override
+    public void interceptWakeup(InterceptedWait wakeup, Trigger trigger, InterceptorOfConsequences waitWasInterceptedBy)
+    {
+        ++determinismDepth;
+        try
+        {
+            interceptor.interceptWakeup(wakeup, trigger, waitWasInterceptedBy);
+            if (debug != null) debug.interceptWakeup(wakeup, trigger, waitWasInterceptedBy);
+        }
+        finally
+        {
+            --determinismDepth;
+        }
+    }
+
+    @Override
+    public void interceptExecution(InterceptedExecution invoke, OrderOn orderOn)
+    {
+        ++determinismDepth;
+        try
+        {
+            interceptor.interceptExecution(invoke, orderOn);
+            if (debug != null) debug.interceptExecution(invoke, orderOn);
+        }
+        finally
+        {
+            --determinismDepth;
+        }
+    }
+
+    @Override
+    public void interceptWait(InterceptedWait wakeupWith)
+    {
+        ++determinismDepth;
+        try
+        {
+            if (interceptor == null)
+                return;
+
+            InterceptorOfConsequences interceptor = this.interceptor;
+            NotifyThreadPaused notifyOnPause = this.notifyOnPause;
+            this.interceptor = null;
+            this.notifyOnPause = null;
+            this.waitingOn = wakeupWith;
+
+            interceptor.interceptWait(wakeupWith);
+            if (debug != null) debug.interceptWait(wakeupWith);
+            notifyOnPause.notifyThreadPaused();
+        }
+        finally
+        {
+            --determinismDepth;
+        }
+    }
+
+    public void onTermination()
+    {
+        onTermination.run();
+    }
+
+    @Override
+    public void interceptTermination(boolean isThreadTermination)
+    {
+        ++determinismDepth;
+        try
+        {
+            if (interceptor == null)
+                return;
+
+            InterceptorOfConsequences interceptor = this.interceptor;
+            NotifyThreadPaused notifyOnPause = this.notifyOnPause;
+            this.interceptor = null;
+            this.notifyOnPause = null;
+
+            interceptor.interceptTermination(isThreadTermination);
+            if (debug != null) debug.interceptTermination(isThreadTermination);
+            notifyOnPause.notifyThreadPaused();
+        }
+        finally
+        {
+            --determinismDepth;
+        }
+    }
+
+    public void beforeInvocation(InterceptorOfConsequences interceptor, NotifyThreadPaused notifyOnPause)
+    {
+        assert this.interceptor == null;
+        assert this.notifyOnPause == null;
+
+        this.interceptor = interceptor;
+        this.notifyOnPause = notifyOnPause;
+        interceptor.beforeInvocation(this);
+    }
+
+    public boolean isEvaluationDeterministic()
+    {
+        return determinismDepth > 0;
+    }
+
+    public boolean isIntercepting()
+    {
+        return interceptor != null;
+    }
+
+    public InterceptorOfConsequences interceptedBy()
+    {
+        return interceptor;
+    }
+
+    public InterceptorOfGlobalMethods interceptorOfGlobalMethods()
+    {
+        return interceptorOfGlobalMethods;
+    }
+
+    public int hashCode()
+    {
+        return toString.hashCode();
+    }
+
+    public String toString()
+    {
+        return extraToStringInfo == null ? toString : toString + ' ' + extraToStringInfo;
+    }
+
+    public static boolean isDeterministic()
+    {
+        Thread thread = Thread.currentThread();
+        return thread instanceof InterceptibleThread && ((InterceptibleThread) thread).determinismDepth > 0;
+    }
+
+    public static void runDeterministic(Runnable runnable)
+    {
+        enterDeterministicMethod();
+        try
+        {
+            runnable.run();
+        }
+        finally
+        {
+            exitDeterministicMethod();
+        }
+    }
+    
+    public static void enterDeterministicMethod()
+    {
+        Thread anyThread = Thread.currentThread();
+        if (!(anyThread instanceof InterceptibleThread))
+            return;
+
+        InterceptibleThread thread = (InterceptibleThread) anyThread;
+        ++thread.determinismDepth;
+    }
+
+    public static void exitDeterministicMethod()
+    {
+        Thread anyThread = Thread.currentThread();
+        if (!(anyThread instanceof InterceptibleThread))
+            return;
+
+        InterceptibleThread thread = (InterceptibleThread) anyThread;
+        --thread.determinismDepth;
+    }
+
+    public static void park()
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).park(-1, NONE))
+            LockSupport.park();
+    }
+
+    public static void parkNanos(long nanos)
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).park(nanos, RELATIVE_NANOS))
+            LockSupport.parkNanos(nanos);
+    }
+
+    public static void parkUntil(long millis)
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).park(millis, ABSOLUTE_MILLIS))
+            LockSupport.parkUntil(millis);
+    }
+
+    public static void park(Object blocker)
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).park(-1, NONE))
+            LockSupport.park(blocker);
+    }
+
+    public static void parkNanos(Object blocker, long relative)
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).park(relative, RELATIVE_NANOS))
+            LockSupport.parkNanos(blocker, relative);
+    }
+
+    public static void parkUntil(Object blocker, long millis)
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !((InterceptibleThread) thread).park(millis, ABSOLUTE_MILLIS))
+            LockSupport.parkUntil(blocker, millis);
+    }
+
+    public static void unpark(Thread thread)
+    {
+        Thread currentThread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread) || !(currentThread instanceof InterceptibleThread)
+            || !((InterceptibleThread) thread).unpark((InterceptibleThread) currentThread))
+            LockSupport.unpark(thread);
+    }
+
+    public static InterceptorOfConsequences interceptorOrDefault(Thread thread)
+    {
+        if (!(thread instanceof InterceptibleThread))
+            return DEFAULT_INTERCEPTOR;
+
+        return interceptorOrDefault((InterceptibleThread) thread);
+    }
+
+    public static InterceptorOfConsequences interceptorOrDefault(InterceptibleThread thread)
+    {
+        return thread.isIntercepting() ? thread : DEFAULT_INTERCEPTOR;
+    }
+
+    public LocalTime time()
+    {
+        return time;
+    }
+
+    public static void setDebugInterceptor(InterceptorOfConsequences interceptor)
+    {
+        debug = interceptor;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptibleThreadFactory.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptibleThreadFactory.java
new file mode 100644
index 0000000..6579319
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptibleThreadFactory.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.io.Serializable;
+import java.util.concurrent.ThreadFactory;
+
+import org.apache.cassandra.concurrent.NamedThreadFactory;
+
+public interface InterceptibleThreadFactory extends ThreadFactory
+{
+    public interface MetaFactory<F extends ThreadFactory> extends Serializable
+    {
+        F create(String id, int priority, ClassLoader contextClassLoader, Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
+                 ThreadGroup threadGroup, Runnable onTermination, SimulatedTime.LocalTime time, InterceptingExecutorFactory parent, Object extraToStringInfo);
+    }
+
+    public static class ConcreteInterceptibleThreadFactory extends NamedThreadFactory implements InterceptibleThreadFactory
+    {
+        final InterceptingExecutorFactory parent;
+        final Runnable onTermination;
+        final SimulatedTime.LocalTime time;
+        final Object extraToStringInfo;
+
+        public ConcreteInterceptibleThreadFactory(String id, int priority, ClassLoader contextClassLoader, Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
+                                                  ThreadGroup threadGroup, Runnable onTermination, SimulatedTime.LocalTime time,
+                                                  InterceptingExecutorFactory parent, Object extraToStringInfo)
+        {
+            super(id, priority, contextClassLoader, threadGroup, uncaughtExceptionHandler);
+            this.onTermination = onTermination;
+            this.time = time;
+            this.parent = parent;
+            this.extraToStringInfo = extraToStringInfo;
+        }
+
+        @Override
+        public InterceptibleThread newThread(Runnable runnable)
+        {
+            return (InterceptibleThread) super.newThread(runnable);
+        }
+
+        @Override
+        protected synchronized InterceptibleThread newThread(ThreadGroup threadGroup, Runnable runnable, String name)
+        {
+            InterceptibleThread thread = new InterceptibleThread(threadGroup, runnable, name, extraToStringInfo, onTermination, parent.interceptorOfGlobalMethods, time);
+            if (parent.isClosed)
+                thread.trapInterrupts(false);
+            return setupThread(thread);
+        }
+    }
+
+    public static class PlainThreadFactory extends NamedThreadFactory
+    {
+        final Runnable onTermination;
+
+        public PlainThreadFactory(String id, int priority, ClassLoader contextClassLoader, Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
+                                  ThreadGroup threadGroup, Runnable onTermination, SimulatedTime.LocalTime time, InterceptingExecutorFactory parent, Object extraToStringInfo)
+        {
+            super(id, priority, contextClassLoader, threadGroup, uncaughtExceptionHandler);
+            this.onTermination = onTermination;
+        }
+
+        @Override
+        protected Thread newThread(ThreadGroup threadGroup, Runnable runnable, String name)
+        {
+            return super.newThread(threadGroup, () -> { try { runnable.run(); } finally { onTermination.run();} }, name);
+        }
+    }
+
+    InterceptibleThread newThread(Runnable runnable);
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingAwaitable.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingAwaitable.java
new file mode 100644
index 0000000..ef4e24d
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingAwaitable.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.simulator.systems.InterceptedWait.InterceptedConditionWait;
+import org.apache.cassandra.simulator.systems.InterceptedWait.TriggerListener;
+import org.apache.cassandra.utils.concurrent.Awaitable;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+import org.apache.cassandra.utils.concurrent.WaitQueue;
+
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.WAIT_UNTIL;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.UNBOUNDED_WAIT;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.SIGNAL;
+import static org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods.Global.captureWaitSite;
+import static org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods.Global.ifIntercepted;
+import static org.apache.cassandra.simulator.systems.SimulatedTime.Global.localToGlobalNanos;
+import static org.apache.cassandra.simulator.systems.SimulatedTime.Global.relativeToLocalNanos;
+
+@PerClassLoader
+abstract class InterceptingAwaitable implements Awaitable
+{
+    abstract boolean isSignalled();
+    abstract Condition maybeIntercept(InterceptedWait.Kind kind, long waitNanos);
+
+    Condition maybeInterceptThrowChecked(InterceptedWait.Kind kind, long waitNanos) throws InterruptedException
+    {
+        if (Thread.interrupted())
+            throw new InterruptedException();
+
+        return maybeIntercept(kind, waitNanos);
+    }
+
+    Condition maybeInterceptThrowUnchecked(InterceptedWait.Kind kind, long waitNanos)
+    {
+        if (Thread.interrupted())
+            throw new UncheckedInterruptedException();
+
+        return maybeIntercept(kind, waitNanos);
+    }
+
+    public boolean awaitUntil(long deadline) throws InterruptedException
+    {
+        maybeInterceptThrowChecked(WAIT_UNTIL, deadline).awaitUntil(deadline);
+        return isSignalled();
+    }
+
+    public boolean awaitUntilThrowUncheckedOnInterrupt(long deadline)
+    {
+        maybeInterceptThrowUnchecked(WAIT_UNTIL, deadline).awaitUntilThrowUncheckedOnInterrupt(deadline);
+        return isSignalled();
+    }
+
+    public boolean awaitUntilUninterruptibly(long deadline)
+    {
+        maybeIntercept(WAIT_UNTIL, deadline).awaitUntilUninterruptibly(deadline);
+        return isSignalled();
+    }
+
+    public Awaitable await() throws InterruptedException
+    {
+        maybeInterceptThrowChecked(UNBOUNDED_WAIT, 0).await();
+        return this;
+    }
+
+    public Awaitable awaitThrowUncheckedOnInterrupt()
+    {
+        maybeInterceptThrowUnchecked(UNBOUNDED_WAIT, 0).awaitThrowUncheckedOnInterrupt();
+        return this;
+    }
+
+    public Awaitable awaitUninterruptibly()
+    {
+        maybeIntercept(UNBOUNDED_WAIT, 0).awaitUninterruptibly();
+        return this;
+    }
+
+    public boolean await(long time, TimeUnit units) throws InterruptedException
+    {
+        long deadline = relativeToLocalNanos(units.toNanos(time));
+        maybeInterceptThrowChecked(WAIT_UNTIL, localToGlobalNanos(deadline)).awaitUntil(deadline);
+        return isSignalled();
+    }
+
+    public boolean awaitThrowUncheckedOnInterrupt(long time, TimeUnit units)
+    {
+        long deadline = relativeToLocalNanos(units.toNanos(time));
+        maybeInterceptThrowUnchecked(WAIT_UNTIL, localToGlobalNanos(deadline)).awaitUntilThrowUncheckedOnInterrupt(deadline);
+        return isSignalled();
+    }
+
+    public boolean awaitUninterruptibly(long time, TimeUnit units)
+    {
+        long deadline = relativeToLocalNanos(units.toNanos(time));
+        maybeIntercept(WAIT_UNTIL, localToGlobalNanos(deadline)).awaitUntilUninterruptibly(deadline);
+        return isSignalled();
+    }
+
+    @PerClassLoader
+    static class InterceptingCondition extends InterceptingAwaitable implements Condition, TriggerListener
+    {
+        final Condition inner = new NotInterceptedSyncCondition();
+        private List<InterceptedConditionWait> intercepted;
+
+        public InterceptingCondition()
+        {
+        }
+
+        Condition maybeIntercept(InterceptedWait.Kind kind, long waitNanos)
+        {
+            if (inner.isSignalled())
+                return inner;
+
+            InterceptibleThread thread = ifIntercepted();
+            if (thread == null)
+                return inner;
+
+            InterceptedConditionWait signal = new InterceptedConditionWait(kind, waitNanos, thread, captureWaitSite(thread), inner);
+            synchronized (this)
+            {
+                if (intercepted == null)
+                    intercepted = new ArrayList<>(2);
+                intercepted.add(signal);
+            }
+            signal.addListener(this);
+            thread.interceptWait(signal);
+            return signal;
+        }
+
+        public boolean isSignalled()
+        {
+            return inner.isSignalled();
+        }
+
+        public void signal()
+        {
+            if (isSignalled())
+                return;
+
+            inner.signal();
+            synchronized (this)
+            {
+                if (intercepted != null)
+                {
+                    Thread signalledBy = Thread.currentThread();
+                    intercepted.forEach(signal -> signal.interceptWakeup(SIGNAL, signalledBy));
+                }
+            }
+        }
+
+        @Override
+        public synchronized void onTrigger(InterceptedWait triggered)
+        {
+            intercepted.remove(triggered);
+        }
+    }
+
+    @PerClassLoader
+    static class InterceptingCountDownLatch extends InterceptingCondition implements CountDownLatch
+    {
+        private final AtomicInteger count;
+
+        public InterceptingCountDownLatch(int count)
+        {
+            super();
+            this.count = new AtomicInteger(count);
+        }
+
+        public void decrement()
+        {
+            if (count.decrementAndGet() == 0)
+                signal();
+        }
+
+        public int count()
+        {
+            return count.get();
+        }
+    }
+
+    @PerClassLoader
+    static class InterceptingSignal<V> extends InterceptingAwaitable implements WaitQueue.Signal
+    {
+        final Condition inner = new NotInterceptedSyncCondition();
+        final V supplyOnDone;
+        final Consumer<V> receiveOnDone;
+
+        InterceptedConditionWait intercepted;
+
+        boolean isSignalled;
+        boolean isCancelled;
+
+        InterceptingSignal()
+        {
+            this(null, ignore -> {});
+        }
+
+        InterceptingSignal(V supplyOnDone, Consumer<V> receiveOnDone)
+        {
+            this.supplyOnDone = supplyOnDone;
+            this.receiveOnDone = receiveOnDone;
+        }
+
+        public boolean isSignalled()
+        {
+            return isSignalled;
+        }
+
+        public synchronized boolean isCancelled()
+        {
+            return isCancelled;
+        }
+
+        public synchronized boolean isSet()
+        {
+            return isCancelled | isSignalled;
+        }
+
+        public void signal()
+        {
+            doSignal();
+        }
+
+        synchronized boolean doSignal()
+        {
+            if (isSet())
+                return false;
+
+            isSignalled = true;
+            receiveOnDone.accept(supplyOnDone);
+            inner.signal();
+            if (intercepted != null && !intercepted.isTriggered())
+                intercepted.interceptWakeup(SIGNAL, Thread.currentThread());
+            return true;
+        }
+
+        public synchronized boolean checkAndClear()
+        {
+            if (isSet())
+                return isSignalled;
+            isCancelled = true;
+            receiveOnDone.accept(supplyOnDone);
+            inner.signal();
+            return false;
+        }
+
+        public synchronized void cancel()
+        {
+            checkAndClear();
+        }
+
+        Condition maybeIntercept(InterceptedWait.Kind kind, long waitNanos)
+        {
+            assert intercepted == null;
+            assert !inner.isSignalled();
+
+            InterceptibleThread thread = ifIntercepted();
+            if (thread == null)
+                return inner;
+
+            intercepted = new InterceptedConditionWait(kind, waitNanos, thread, captureWaitSite(thread), inner);
+            thread.interceptWait(intercepted);
+            return intercepted;
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingExecutor.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingExecutor.java
new file mode 100644
index 0000000..94e068c
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingExecutor.java
@@ -0,0 +1,1060 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Delayed;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.LocalAwareExecutorPlus;
+import org.apache.cassandra.concurrent.LocalAwareSequentialExecutorPlus;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
+import org.apache.cassandra.concurrent.SequentialExecutorPlus;
+import org.apache.cassandra.concurrent.SingleThreadExecutorPlus;
+import org.apache.cassandra.concurrent.SyncFutureTask;
+import org.apache.cassandra.concurrent.TaskFactory;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.systems.InterceptingAwaitable.InterceptingCondition;
+import org.apache.cassandra.simulator.systems.NotifyThreadPaused.AwaitPaused;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+import org.apache.cassandra.utils.concurrent.NotScheduledFuture;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static java.util.Collections.newSetFromMap;
+import static java.util.Collections.synchronizedMap;
+import static java.util.Collections.synchronizedSet;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_SIMULATOR_DEBUG;
+import static org.apache.cassandra.simulator.systems.InterceptibleThread.runDeterministic;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_DAEMON;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_TASK;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_TIMEOUT;
+import static org.apache.cassandra.simulator.systems.SimulatedExecution.callable;
+import static org.apache.cassandra.simulator.systems.SimulatedTime.Global.localToRelativeNanos;
+import static org.apache.cassandra.simulator.systems.SimulatedTime.Global.localToGlobalNanos;
+import static org.apache.cassandra.simulator.systems.SimulatedTime.Global.relativeToGlobalNanos;
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+// An executor whose tasks we can intercept the execution of
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface InterceptingExecutor extends OrderOn
+{
+    class ForbiddenExecutionException extends RejectedExecutionException
+    {
+    }
+
+    interface InterceptingTaskFactory extends TaskFactory
+    {
+    }
+
+    void addPending(Object task);
+    void cancelPending(Object task);
+    void submitUnmanaged(Runnable task);
+    void submitAndAwaitPause(Runnable task, InterceptorOfConsequences interceptor);
+
+    OrderOn orderAppliesAfterScheduling();
+
+    static class InterceptedScheduledFutureTask<T> extends SyncFutureTask<T> implements ScheduledFuture<T>
+    {
+        final long delayNanos;
+        Runnable onCancel;
+        public InterceptedScheduledFutureTask(long delayNanos, Callable<T> call)
+        {
+            super(call);
+            this.delayNanos = delayNanos;
+        }
+
+        @Override
+        public long getDelay(TimeUnit unit)
+        {
+            return unit.convert(delayNanos, NANOSECONDS);
+        }
+
+        @Override
+        public int compareTo(Delayed that)
+        {
+            return Long.compare(delayNanos, that.getDelay(NANOSECONDS));
+        }
+
+        void onCancel(Runnable onCancel)
+        {
+            this.onCancel = onCancel;
+        }
+
+        @Override
+        public boolean cancel(boolean b)
+        {
+            if (onCancel != null)
+            {
+                onCancel.run();
+                onCancel = null;
+            }
+            return super.cancel(b);
+        }
+    }
+
+    @PerClassLoader
+    abstract class AbstractInterceptingExecutor implements InterceptingExecutor, ExecutorPlus
+    {
+        private static final AtomicIntegerFieldUpdater<AbstractInterceptingExecutor> pendingUpdater = AtomicIntegerFieldUpdater.newUpdater(AbstractInterceptingExecutor.class, "pending");
+
+        final OrderAppliesAfterScheduling orderAppliesAfterScheduling = new OrderAppliesAfterScheduling(this);
+
+        final InterceptorOfExecution interceptorOfExecution;
+        final InterceptingTaskFactory taskFactory;
+
+        final Set<Object> debugPending = TEST_SIMULATOR_DEBUG.getBoolean() ? synchronizedSet(newSetFromMap(new IdentityHashMap<>())) : null;
+        final Condition isTerminated;
+        volatile boolean isShutdown;
+        volatile int pending;
+
+        protected AbstractInterceptingExecutor(InterceptorOfExecution interceptorOfExecution, InterceptingTaskFactory taskFactory)
+        {
+            this.interceptorOfExecution = interceptorOfExecution;
+            this.isTerminated = new InterceptingCondition();
+            this.taskFactory = taskFactory;
+        }
+
+        @Override
+        public void addPending(Object task)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            pendingUpdater.incrementAndGet(this);
+            if (isShutdown)
+            {
+                if (0 == pendingUpdater.decrementAndGet(this))
+                    terminate();
+                throw new RejectedExecutionException();
+            }
+
+            if (debugPending != null && !debugPending.add(task))
+                throw new AssertionError();
+        }
+
+        @Override
+        public void cancelPending(Object task)
+        {
+            boolean shutdown = isShutdown;
+            if (completePending(task) == 0 && shutdown)
+                terminate();
+        }
+
+        @Override
+        public OrderOn orderAppliesAfterScheduling()
+        {
+            return orderAppliesAfterScheduling;
+        }
+
+        public int completePending(Object task)
+        {
+            int remaining = pendingUpdater.decrementAndGet(this);
+            if (debugPending != null && !debugPending.remove(task))
+                throw new AssertionError();
+            return remaining;
+        }
+
+        <V, T extends RunnableFuture<V>> T addTask(T task)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            return interceptorOfExecution.intercept().addTask(task, this);
+        }
+
+        public void maybeExecuteImmediately(Runnable command)
+        {
+            execute(command);
+        }
+
+        @Override
+        public void execute(Runnable run)
+        {
+            addTask(taskFactory.toSubmit(run));
+        }
+
+        @Override
+        public void execute(WithResources withResources, Runnable run)
+        {
+            addTask(taskFactory.toSubmit(withResources, run));
+        }
+
+        @Override
+        public Future<?> submit(Runnable run)
+        {
+            return addTask(taskFactory.toSubmit(run));
+        }
+
+        @Override
+        public <T> Future<T> submit(Runnable run, T result)
+        {
+            return addTask(taskFactory.toSubmit(run, result));
+        }
+
+        @Override
+        public <T> Future<T> submit(Callable<T> call)
+        {
+            return addTask(taskFactory.toSubmit(call));
+        }
+
+        @Override
+        public <T> Future<T> submit(WithResources withResources, Runnable run, T result)
+        {
+            return addTask(taskFactory.toSubmit(withResources, run, result));
+        }
+
+        @Override
+        public Future<?> submit(WithResources withResources, Runnable run)
+        {
+            return addTask(taskFactory.toSubmit(withResources, run));
+        }
+
+        @Override
+        public <T> Future<T> submit(WithResources withResources, Callable<T> call)
+        {
+            return addTask(taskFactory.toSubmit(withResources, call));
+        }
+
+        abstract void terminate();
+
+        public boolean isShutdown()
+        {
+            return isShutdown;
+        }
+
+        public boolean isTerminated()
+        {
+            return isTerminated.isSignalled();
+        }
+
+        public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                InterceptibleThread interceptibleThread = (InterceptibleThread) thread;
+                if (interceptibleThread.isIntercepting())
+                {
+                    // simpler to use no timeout than to ensure pending tasks all run first in simulation
+                    isTerminated.await();
+                    return true;
+                }
+            }
+            return isTerminated.await(timeout, unit);
+        }
+
+        @Override
+        public void setCorePoolSize(int newCorePoolSize)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void setMaximumPoolSize(int newMaximumPoolSize)
+        {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    @PerClassLoader
+    class InterceptingPooledExecutor extends AbstractInterceptingExecutor implements InterceptingExecutor
+    {
+        enum State { RUNNING, TERMINATING, TERMINATED }
+
+        private class WaitingThread
+        {
+            final InterceptibleThread thread;
+            Runnable task;
+            State state = State.RUNNING;
+
+            WaitingThread(ThreadFactory factory)
+            {
+                this.thread = (InterceptibleThread) factory.newThread(() -> {
+                    InterceptibleThread thread = (InterceptibleThread) Thread.currentThread();
+                    try
+                    {
+                        while (true)
+                        {
+                            try
+                            {
+                                task.run();
+                            }
+                            catch (Throwable t)
+                            {
+                                try { thread.getUncaughtExceptionHandler().uncaughtException(thread, t); }
+                                catch (Throwable ignore) {}
+                            }
+
+                            boolean shutdown = isShutdown;
+                            int remaining = completePending(task);
+                            if (shutdown && remaining < threads.size())
+                            {
+                                threads.remove(thread);
+                                thread.onTermination();
+                                if (threads.isEmpty())
+                                    isTerminated.signal(); // this has simulator side-effects, so try to perform before we interceptTermination
+                                thread.interceptTermination(true);
+                                return;
+                            }
+
+                            task = null;
+                            waiting.add(this); // inverse order of waiting.add/isShutdown to ensure visibility vs shutdown()
+                            thread.interceptTermination(false);
+                            synchronized (this)
+                            {
+                                while (task == null)
+                                {
+                                    if (state == State.TERMINATING)
+                                        return;
+
+                                    try { wait(); }
+                                    catch (InterruptedException | UncheckedInterruptedException ignore) { }
+                                }
+                            }
+                        }
+                    }
+                    finally
+                    {
+                        try
+                        {
+                            runDeterministic(() -> {
+                                if (null != threads.remove(thread))
+                                {
+                                    task = null;
+                                    waiting.remove(this);
+                                    thread.onTermination();
+                                    if (isShutdown && threads.isEmpty() && waiting.isEmpty() && !isTerminated())
+                                        isTerminated.signal();
+                                }
+                            });
+                        }
+                        finally
+                        {
+                            synchronized (this)
+                            {
+                                state = State.TERMINATED;
+                                notify();
+                            }
+                        }
+                    }
+                });
+
+                threads.put(thread, this);
+            }
+
+            synchronized void submit(Runnable task)
+            {
+                if (state != State.RUNNING)
+                    throw new IllegalStateException();
+                this.task = task;
+                if (thread.isAlive()) notify();
+                else thread.start();
+            }
+
+            synchronized void terminate()
+            {
+                if (state != State.TERMINATED)
+                    state = State.TERMINATING;
+
+                if (thread.isAlive()) notify();
+                else thread.start();
+                try
+                {
+                    while (state != State.TERMINATED)
+                        wait();
+                }
+                catch (InterruptedException e)
+                {
+                    throw new UncheckedInterruptedException(e);
+                }
+            }
+        }
+
+        final Map<InterceptibleThread, WaitingThread> threads = synchronizedMap(new IdentityHashMap<>());
+        final ThreadFactory threadFactory;
+        final Queue<WaitingThread> waiting = new ConcurrentLinkedQueue<>();
+        final int concurrency;
+
+        InterceptingPooledExecutor(InterceptorOfExecution interceptorOfExecution, int concurrency, ThreadFactory threadFactory, InterceptingTaskFactory taskFactory)
+        {
+            super(interceptorOfExecution, taskFactory);
+            this.threadFactory = threadFactory;
+            this.concurrency = concurrency;
+        }
+
+        public void submitAndAwaitPause(Runnable task, InterceptorOfConsequences interceptor)
+        {
+            // we don't check isShutdown as we could have a task queued by simulation from prior to shutdown
+            if (isTerminated()) throw new AssertionError();
+            if (debugPending != null && !debugPending.contains(task)) throw new AssertionError();
+
+            WaitingThread waiting = getWaiting();
+            AwaitPaused done = new AwaitPaused(waiting);
+            waiting.thread.beforeInvocation(interceptor, done);
+            synchronized (waiting)
+            {
+                waiting.submit(task);
+                done.awaitPause();
+            }
+        }
+
+        public void submitUnmanaged(Runnable task)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            addPending(task);
+            WaitingThread waiting = getWaiting();
+            waiting.submit(task);
+        }
+
+        private WaitingThread getWaiting()
+        {
+            WaitingThread next = waiting.poll();
+            if (next != null)
+                return next;
+
+            return new WaitingThread(threadFactory);
+        }
+
+        public synchronized void shutdown()
+        {
+            isShutdown = true;
+            WaitingThread next;
+            while (null != (next = waiting.poll()))
+                next.terminate();
+
+            if (pending == 0)
+                terminate();
+        }
+
+        synchronized void terminate()
+        {
+            List<InterceptibleThread> snapshot = new ArrayList<>(threads.keySet());
+            for (InterceptibleThread thread : snapshot)
+            {
+                WaitingThread terminate = threads.get(thread);
+                if (terminate != null)
+                    terminate.terminate();
+            }
+            runDeterministic(isTerminated::signal);
+        }
+
+        public synchronized List<Runnable> shutdownNow()
+        {
+            shutdown();
+            threads.keySet().forEach(InterceptibleThread::interrupt);
+            return Collections.emptyList();
+        }
+
+        @Override
+        public boolean inExecutor()
+        {
+            return threads.containsKey(Thread.currentThread());
+        }
+
+        @Override
+        public int getActiveTaskCount()
+        {
+            return threads.size() - waiting.size();
+        }
+
+        @Override
+        public long getCompletedTaskCount()
+        {
+            return 0;
+        }
+
+        @Override
+        public int getPendingTaskCount()
+        {
+            return 0;
+        }
+
+        @Override
+        public int getCorePoolSize()
+        {
+            return concurrency;
+        }
+
+        @Override
+        public int getMaximumPoolSize()
+        {
+            return concurrency;
+        }
+
+        public String toString()
+        {
+            return threadFactory.toString();
+        }
+
+        @Override
+        public int concurrency()
+        {
+            return concurrency;
+        }
+    }
+
+    // we might want different variants
+    // (did consider a non-intercepting variant, or immediate executor, but we need to intercept the thread events)
+    @PerClassLoader
+    abstract class AbstractSingleThreadedExecutorPlus extends AbstractInterceptingExecutor implements SequentialExecutorPlus
+    {
+        static class AtLeastOnce extends SingleThreadExecutorPlus.AtLeastOnce
+        {
+            AtLeastOnce(SequentialExecutorPlus executor, Runnable run)
+            {
+                super(executor, run);
+            }
+
+            public boolean trigger()
+            {
+                boolean success;
+                if (success = compareAndSet(false, true))
+                    executor.execute(this);
+                else
+                    executor.execute(() -> {}); // submit a no-op, so we can still impose our causality orderings
+                return success;
+            }
+        }
+
+        final InterceptibleThread thread;
+        final ArrayDeque<Runnable> queue = new ArrayDeque<>();
+        volatile boolean executing, terminating, terminated;
+
+        AbstractSingleThreadedExecutorPlus(InterceptorOfExecution interceptorOfExecution, ThreadFactory threadFactory, InterceptingTaskFactory taskFactory)
+        {
+            super(interceptorOfExecution, taskFactory);
+            this.thread = (InterceptibleThread) threadFactory.newThread(() -> {
+                InterceptibleThread thread = (InterceptibleThread) Thread.currentThread();
+                try
+                {
+                    while (true)
+                    {
+                        Runnable task;
+                        try
+                        {
+                            task = dequeue();
+                        }
+                        catch (InterruptedException | UncheckedInterruptedException ignore)
+                        {
+                            if (terminating) return;
+                            else continue;
+                        }
+
+                        try
+                        {
+                            task.run();
+                        }
+                        catch (Throwable t)
+                        {
+                            try { thread.getUncaughtExceptionHandler().uncaughtException(thread, t); }
+                            catch (Throwable ignore) {}
+                        }
+
+                        executing = false;
+                        boolean shutdown = isShutdown;
+                        if ((0 == completePending(task) && shutdown))
+                            return;
+
+                        thread.interceptTermination(false);
+                    }
+                }
+                finally
+                {
+                    runDeterministic(thread::onTermination);
+                    if (terminating)
+                    {
+                        synchronized (this)
+                        {
+                            terminated = true;
+                            notifyAll();
+                        }
+                    }
+                    else
+                    {
+                        runDeterministic(this::terminate);
+                    }
+                }
+            });
+            thread.start();
+        }
+
+        void terminate()
+        {
+            synchronized (this)
+            {
+                assert pending == 0;
+                if (terminating)
+                    return;
+
+                terminating = true;
+                if (Thread.currentThread() != thread)
+                {
+                    notifyAll();
+                    try { while (!terminated) wait(); }
+                    catch (InterruptedException e) { throw new UncheckedInterruptedException(e); }
+                }
+                terminated = true;
+            }
+
+            isTerminated.signal(); // this has simulator side-effects, so try to perform before we interceptTermination
+            if (Thread.currentThread() == thread && thread.isIntercepting())
+                thread.interceptTermination(true);
+        }
+
+        public synchronized void shutdown()
+        {
+            if (isShutdown)
+                return;
+
+            isShutdown = true;
+            if (pending == 0)
+                terminate();
+        }
+
+        public synchronized List<Runnable> shutdownNow()
+        {
+            if (isShutdown)
+                return Collections.emptyList();
+
+            isShutdown = true;
+            List<Runnable> cancelled = new ArrayList<>(queue);
+            queue.clear();
+            cancelled.forEach(super::cancelPending);
+            if (pending == 0) terminate();
+            else thread.interrupt();
+            return cancelled;
+        }
+
+        synchronized void enqueue(Runnable runnable)
+        {
+            queue.add(runnable);
+            notify();
+        }
+
+        synchronized Runnable dequeue() throws InterruptedException
+        {
+            Runnable next;
+            while (null == (next = queue.poll()) && !terminating)
+                wait();
+
+            if (next == null)
+                throw new InterruptedException();
+
+            return next;
+        }
+
+        public AtLeastOnce atLeastOnceTrigger(Runnable run)
+        {
+            return new AtLeastOnce(this, run);
+        }
+
+        @Override
+        public boolean inExecutor()
+        {
+            return thread == Thread.currentThread();
+        }
+
+        @Override
+        public int getCorePoolSize()
+        {
+            return 1;
+        }
+
+        @Override
+        public int getMaximumPoolSize()
+        {
+            return 1;
+        }
+
+        public String toString()
+        {
+            return thread.toString();
+        }
+    }
+
+    @PerClassLoader
+    class InterceptingSequentialExecutor extends AbstractSingleThreadedExecutorPlus implements InterceptingExecutor, ScheduledExecutorPlus, OrderOn
+    {
+        InterceptingSequentialExecutor(InterceptorOfExecution interceptorOfExecution, ThreadFactory threadFactory, InterceptingTaskFactory taskFactory)
+        {
+            super(interceptorOfExecution, threadFactory, taskFactory);
+        }
+
+        public void submitAndAwaitPause(Runnable task, InterceptorOfConsequences interceptor)
+        {
+            synchronized (this)
+            {
+                // we don't check isShutdown as we could have a task queued by simulation from prior to shutdown
+                if (terminated) throw new AssertionError();
+                if (executing) throw new AssertionError();
+                if (debugPending != null && !debugPending.contains(task)) throw new AssertionError();
+                executing = true;
+
+                AwaitPaused done = new AwaitPaused(this);
+                thread.beforeInvocation(interceptor, done);
+                enqueue(task);
+                done.awaitPause();
+            }
+        }
+
+        public synchronized void submitUnmanaged(Runnable task)
+        {
+            addPending(task);
+            enqueue(task);
+        }
+
+        @Override public int getActiveTaskCount()
+        {
+            return !queue.isEmpty() || executing ? 1 : 0;
+        }
+
+        @Override public long getCompletedTaskCount()
+        {
+            return 0;
+        }
+
+        @Override
+        public int getPendingTaskCount()
+        {
+            return 0;
+        }
+
+        public ScheduledFuture<?> schedule(Runnable run, long delay, TimeUnit unit)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            long delayNanos = unit.toNanos(delay);
+            return interceptorOfExecution.intercept().schedule(SCHEDULED_TASK, delayNanos, relativeToGlobalNanos(delayNanos), callable(run, null), this);
+        }
+
+        public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            long delayNanos = unit.toNanos(delay);
+            return interceptorOfExecution.intercept().schedule(SCHEDULED_TASK, delayNanos, relativeToGlobalNanos(delayNanos), callable, this);
+        }
+
+        public ScheduledFuture<?> scheduleTimeoutWithDelay(Runnable run, long delay, TimeUnit unit)
+        {
+            return scheduleTimeoutAt(run, relativeToGlobalNanos(unit.toNanos(delay)));
+        }
+
+        public ScheduledFuture<?> scheduleAt(Runnable run, long deadlineNanos)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            return interceptorOfExecution.intercept().schedule(SCHEDULED_TASK, localToRelativeNanos(deadlineNanos), localToGlobalNanos(deadlineNanos), callable(run, null), this);
+        }
+
+        public ScheduledFuture<?> scheduleTimeoutAt(Runnable run, long deadlineNanos)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            return interceptorOfExecution.intercept().schedule(SCHEDULED_TIMEOUT, localToRelativeNanos(deadlineNanos), localToGlobalNanos(deadlineNanos), callable(run, null), this);
+        }
+
+        public ScheduledFuture<?> scheduleSelfRecurring(Runnable run, long delay, TimeUnit unit)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            long delayNanos = unit.toNanos(delay);
+            return interceptorOfExecution.intercept().schedule(SCHEDULED_DAEMON, delayNanos, relativeToGlobalNanos(delayNanos), callable(run, null), this);
+        }
+
+        public ScheduledFuture<?> scheduleAtFixedRate(Runnable run, long initialDelay, long period, TimeUnit unit)
+        {
+            if (isShutdown)
+                throw new RejectedExecutionException();
+
+            long delayNanos = unit.toNanos(initialDelay);
+            return interceptorOfExecution.intercept().schedule(SCHEDULED_DAEMON, delayNanos, relativeToGlobalNanos(delayNanos), new Callable<Object>()
+            {
+                @Override
+                public Object call()
+                {
+                    run.run();
+                    if (!isShutdown)
+                        scheduleAtFixedRate(run, period, period, unit);
+                    return null;
+                }
+
+                @Override
+                public String toString()
+                {
+                    return run.toString();
+                }
+            }, this);
+        }
+
+        public ScheduledFuture<?> scheduleWithFixedDelay(Runnable run, long initialDelay, long delay, TimeUnit unit)
+        {
+            return scheduleAtFixedRate(run, initialDelay, delay, unit);
+        }
+
+        public int concurrency()
+        {
+            return 1;
+        }
+    }
+
+    @PerClassLoader
+    class InterceptingPooledLocalAwareExecutor extends InterceptingPooledExecutor implements LocalAwareExecutorPlus
+    {
+        InterceptingPooledLocalAwareExecutor(InterceptorOfExecution interceptors, int concurrency, ThreadFactory threadFactory, InterceptingTaskFactory taskFactory)
+        {
+            super(interceptors, concurrency, threadFactory, taskFactory);
+        }
+    }
+
+    @PerClassLoader
+    class InterceptingLocalAwareSequentialExecutor extends InterceptingSequentialExecutor implements LocalAwareSequentialExecutorPlus
+    {
+        InterceptingLocalAwareSequentialExecutor(InterceptorOfExecution interceptorOfExecution, ThreadFactory threadFactory, InterceptingTaskFactory taskFactory)
+        {
+            super(interceptorOfExecution, threadFactory, taskFactory);
+        }
+    }
+
+    @PerClassLoader
+    static class DiscardingSequentialExecutor implements LocalAwareSequentialExecutorPlus, ScheduledExecutorPlus
+    {
+        @Override
+        public void shutdown()
+        {
+        }
+
+        @Override
+        public List<Runnable> shutdownNow()
+        {
+            return Collections.emptyList();
+        }
+
+        @Override
+        public boolean isShutdown()
+        {
+            return false;
+        }
+
+        @Override
+        public boolean isTerminated()
+        {
+            return false;
+        }
+
+        @Override
+        public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException
+        {
+            return false;
+        }
+
+        @Override
+        public <T> Future<T> submit(Callable<T> task)
+        {
+            return ImmediateFuture.cancelled();
+        }
+
+        @Override
+        public <T> Future<T> submit(Runnable task, T result)
+        {
+            return ImmediateFuture.cancelled();
+        }
+
+        @Override
+        public Future<?> submit(Runnable task)
+        {
+            return ImmediateFuture.cancelled();
+        }
+
+        @Override
+        public void execute(WithResources withResources, Runnable task)
+        {
+        }
+
+        @Override
+        public <T> Future<T> submit(WithResources withResources, Callable<T> task)
+        {
+            return ImmediateFuture.cancelled();
+        }
+
+        @Override
+        public Future<?> submit(WithResources withResources, Runnable task)
+        {
+            return ImmediateFuture.cancelled();
+        }
+
+        @Override
+        public <T> Future<T> submit(WithResources withResources, Runnable task, T result)
+        {
+            return ImmediateFuture.cancelled();
+        }
+
+        @Override
+        public boolean inExecutor()
+        {
+            return false;
+        }
+
+        @Override
+        public int getCorePoolSize()
+        {
+            return 0;
+        }
+
+        @Override
+        public void setCorePoolSize(int newCorePoolSize)
+        {
+
+        }
+
+        @Override
+        public int getMaximumPoolSize()
+        {
+            return 0;
+        }
+
+        @Override
+        public void setMaximumPoolSize(int newMaximumPoolSize)
+        {
+
+        }
+
+        @Override
+        public int getActiveTaskCount()
+        {
+            return 0;
+        }
+
+        @Override
+        public long getCompletedTaskCount()
+        {
+            return 0;
+        }
+
+        @Override
+        public int getPendingTaskCount()
+        {
+            return 0;
+        }
+
+        @Override
+        public AtLeastOnceTrigger atLeastOnceTrigger(Runnable runnable)
+        {
+            return new AtLeastOnceTrigger()
+            {
+                @Override
+                public boolean trigger()
+                {
+                    return false;
+                }
+
+                @Override
+                public void runAfter(Runnable run)
+                {
+                }
+
+                @Override
+                public void sync()
+                {
+                }
+            };
+        }
+
+        @Override
+        public void execute(Runnable command)
+        {
+        }
+
+        @Override
+        public ScheduledFuture<?> scheduleSelfRecurring(Runnable run, long delay, TimeUnit units)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public ScheduledFuture<?> scheduleAt(Runnable run, long deadline)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public ScheduledFuture<?> scheduleTimeoutAt(Runnable run, long deadline)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public ScheduledFuture<?> scheduleTimeoutWithDelay(Runnable run, long delay, TimeUnit units)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        @Override
+        public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit)
+        {
+            return new NotScheduledFuture<>();
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingExecutorFactory.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingExecutorFactory.java
new file mode 100644
index 0000000..c7f4dce
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingExecutorFactory.java
@@ -0,0 +1,400 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.io.Serializable;
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.concurrent.ExecutorBuilder;
+import org.apache.cassandra.concurrent.ExecutorBuilderFactory;
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor;
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor.Daemon;
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor.Interrupts;
+import org.apache.cassandra.concurrent.InfiniteLoopExecutor.SimulatorSafe;
+import org.apache.cassandra.concurrent.Interruptible.Task;
+import org.apache.cassandra.concurrent.LocalAwareExecutorPlus;
+import org.apache.cassandra.concurrent.LocalAwareSequentialExecutorPlus;
+import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
+import org.apache.cassandra.concurrent.SequentialExecutorPlus;
+import org.apache.cassandra.concurrent.Interruptible;
+import org.apache.cassandra.concurrent.SyncFutureTask;
+import org.apache.cassandra.concurrent.TaskFactory;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableBiFunction;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableCallable;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableQuadFunction;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableSupplier;
+import org.apache.cassandra.distributed.impl.IsolatedExecutor;
+import org.apache.cassandra.simulator.systems.InterceptibleThreadFactory.ConcreteInterceptibleThreadFactory;
+import org.apache.cassandra.simulator.systems.InterceptibleThreadFactory.PlainThreadFactory;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.DiscardingSequentialExecutor;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.InterceptingTaskFactory;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.InterceptingLocalAwareSequentialExecutor;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.InterceptingPooledExecutor;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.InterceptingPooledLocalAwareExecutor;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.InterceptingSequentialExecutor;
+import org.apache.cassandra.simulator.systems.InterceptorOfExecution.InterceptExecution;
+import org.apache.cassandra.simulator.systems.SimulatedTime.LocalTime;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.INFINITE_LOOP;
+
+public class InterceptingExecutorFactory implements ExecutorFactory, Closeable
+{
+    static class StandardSyncTaskFactory extends TaskFactory.Standard implements InterceptingTaskFactory, Serializable
+    {
+        @Override
+        public <T> RunnableFuture<T> newTask(Callable<T> call)
+        {
+            return new SyncFutureTask<>(call);
+        }
+
+        @Override
+        protected <T> RunnableFuture<T> newTask(WithResources withResources, Callable<T> call)
+        {
+            return new SyncFutureTask<>(withResources, call);
+        }
+    }
+
+    static class LocalAwareSyncTaskFactory extends TaskFactory.LocalAware implements InterceptingTaskFactory, Serializable
+    {
+        @Override
+        public <T> RunnableFuture<T> newTask(Callable<T> call)
+        {
+            return new SyncFutureTask<>(call);
+        }
+
+        @Override
+        protected <T> RunnableFuture<T> newTask(WithResources withResources, Callable<T> call)
+        {
+            return new SyncFutureTask<>(withResources, call);
+        }
+    }
+
+    abstract static class AbstractExecutorBuilder<E extends ExecutorService> implements ExecutorBuilder<E>
+    {
+        ThreadGroup threadGroup;
+        UncaughtExceptionHandler uncaughtExceptionHandler;
+
+        @Override
+        public ExecutorBuilder<E> withKeepAlive(long keepAlive, TimeUnit keepAliveUnits)
+        {
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withKeepAlive()
+        {
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withThreadPriority(int threadPriority)
+        {
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withThreadGroup(ThreadGroup threadGroup)
+        {
+            this.threadGroup = threadGroup;
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withDefaultThreadGroup()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public ExecutorBuilder<E> withQueueLimit(int queueLimit)
+        {
+            // should implement (not pressing)
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withRejectedExecutionHandler(RejectedExecutionHandler rejectedExecutionHandler)
+        {
+            // we don't currently ever reject execution, but we should perhaps consider implementing it
+            return this;
+        }
+
+        @Override
+        public ExecutorBuilder<E> withUncaughtExceptionHandler(UncaughtExceptionHandler uncaughtExceptionHandler)
+        {
+            this.uncaughtExceptionHandler = uncaughtExceptionHandler;
+            return this;
+        }
+    }
+
+    class SimpleExecutorBuilder<E extends ExecutorService> extends AbstractExecutorBuilder<E>
+    {
+        ThreadGroup threadGroup;
+
+        final String name;
+        final SerializableBiFunction<InterceptorOfExecution, ThreadFactory, E> factory;
+
+        SimpleExecutorBuilder(String name, SerializableBiFunction<InterceptorOfExecution, ThreadFactory, E> factory)
+        {
+            this.factory = factory;
+            this.name = name;
+        }
+
+        @Override
+        public E build()
+        {
+            return transferToInstance.apply(factory).apply(simulatedExecution, factory(name, null, threadGroup, uncaughtExceptionHandler));
+        }
+    }
+
+    final SimulatedExecution simulatedExecution;
+    final InterceptorOfGlobalMethods interceptorOfGlobalMethods;
+    final ClassLoader classLoader;
+    final ThreadGroup threadGroup;
+    final IIsolatedExecutor.DynamicFunction<Serializable> transferToInstance;
+    volatile boolean isClosed;
+
+    InterceptingExecutorFactory(SimulatedExecution simulatedExecution, InterceptorOfGlobalMethods interceptorOfGlobalMethods, ClassLoader classLoader, ThreadGroup threadGroup)
+    {
+        this.simulatedExecution = simulatedExecution;
+        this.interceptorOfGlobalMethods = interceptorOfGlobalMethods;
+        this.classLoader = classLoader;
+        this.threadGroup = threadGroup;
+        this.transferToInstance = IsolatedExecutor.transferTo(classLoader);
+    }
+
+    public InterceptibleThreadFactory factory(String name)
+    {
+        return factory(name, null);
+    }
+
+    InterceptibleThreadFactory factory(String name, Object extraInfo)
+    {
+        return factory(name, extraInfo, threadGroup);
+    }
+
+    InterceptibleThreadFactory factory(String name, Object extraInfo, ThreadGroup threadGroup)
+    {
+        return factory(name, extraInfo, threadGroup, null);
+    }
+
+    InterceptibleThreadFactory factory(String name, Object extraInfo, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler)
+    {
+        return factory(name, extraInfo, threadGroup, uncaughtExceptionHandler, ConcreteInterceptibleThreadFactory::new);
+    }
+
+    ThreadFactory plainFactory(String name, Object extraInfo, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler)
+    {
+        return factory(name, extraInfo, threadGroup, uncaughtExceptionHandler, PlainThreadFactory::new);
+    }
+
+    <F extends ThreadFactory> F factory(String name, Object extraInfo, ThreadGroup threadGroup, UncaughtExceptionHandler uncaughtExceptionHandler, InterceptibleThreadFactory.MetaFactory<F> factory)
+    {
+        if (uncaughtExceptionHandler == null)
+            uncaughtExceptionHandler = transferToInstance.apply((SerializableSupplier<UncaughtExceptionHandler>)() -> InterceptorOfGlobalMethods.Global::uncaughtException).get();
+
+        if (threadGroup == null) threadGroup = this.threadGroup;
+        else if (!this.threadGroup.parentOf(threadGroup)) throw new IllegalArgumentException();
+        Runnable onTermination = transferToInstance.apply((SerializableRunnable)FastThreadLocal::removeAll);
+        LocalTime time = transferToInstance.apply((SerializableCallable<LocalTime>) SimulatedTime.Global::current).call();
+        return factory.create(name, Thread.NORM_PRIORITY, classLoader, uncaughtExceptionHandler, threadGroup, onTermination, time, this, extraInfo);
+    }
+
+    @Override
+    public ExecutorBuilderFactory<ExecutorPlus, SequentialExecutorPlus> withJmx(String jmxPath)
+    {
+        return this;
+    }
+
+    @Override
+    public ExecutorBuilderFactory<ExecutorPlus, SequentialExecutorPlus> withJmxInternal()
+    {
+        return this;
+    }
+
+    @Override
+    public ExecutorBuilder<? extends SequentialExecutorPlus> configureSequential(String name)
+    {
+        return new SimpleExecutorBuilder<>(name, (interceptSupplier, threadFactory) -> new InterceptingSequentialExecutor(interceptSupplier, threadFactory, new StandardSyncTaskFactory()));
+    }
+
+    @Override
+    public ExecutorBuilder<? extends ExecutorPlus> configurePooled(String name, int threads)
+    {
+        return new SimpleExecutorBuilder<>(name, (interceptSupplier, threadFactory) -> new InterceptingPooledExecutor(interceptSupplier, threads, threadFactory, new StandardSyncTaskFactory()));
+    }
+
+    public SequentialExecutorPlus sequential(String name)
+    {
+        return configureSequential(name).build();
+    }
+
+    @Override
+    public LocalAwareSubFactory localAware()
+    {
+        return new LocalAwareSubFactory()
+        {
+            @Override
+            public LocalAwareSubFactoryWithJMX withJmx(String jmxPath)
+            {
+                return new LocalAwareSubFactoryWithJMX()
+                {
+                    @Override
+                    public LocalAwareExecutorPlus shared(String name, int threads, ExecutorPlus.MaximumPoolSizeListener onSetMaxSize)
+                    {
+                        return pooled(name, threads);
+                    }
+
+                    @Override
+                    public ExecutorBuilder<? extends LocalAwareSequentialExecutorPlus> configureSequential(String name)
+                    {
+                        return new SimpleExecutorBuilder<>(name, (interceptSupplier, threadFactory) -> new InterceptingLocalAwareSequentialExecutor(interceptSupplier, threadFactory, new LocalAwareSyncTaskFactory()));
+                    }
+
+                    @Override
+                    public ExecutorBuilder<? extends LocalAwareExecutorPlus> configurePooled(String name, int threads)
+                    {
+                        return new SimpleExecutorBuilder<>(name, (interceptSupplier, threadFactory) -> new InterceptingPooledLocalAwareExecutor(interceptSupplier, threads, threadFactory, new LocalAwareSyncTaskFactory()));
+                    }
+                };
+            }
+
+            @Override
+            public ExecutorBuilder<? extends LocalAwareSequentialExecutorPlus> configureSequential(String name)
+            {
+                return new SimpleExecutorBuilder<>(name, (interceptSupplier, threadFactory) -> new InterceptingLocalAwareSequentialExecutor(interceptSupplier, threadFactory, new LocalAwareSyncTaskFactory()));
+            }
+
+            @Override
+            public ExecutorBuilder<? extends LocalAwareExecutorPlus> configurePooled(String name, int threads)
+            {
+                return new SimpleExecutorBuilder<>(name, (interceptSupplier, threadFactory) -> new InterceptingPooledLocalAwareExecutor(interceptSupplier, threads, threadFactory, new LocalAwareSyncTaskFactory()));
+            }
+        };
+    }
+
+    @Override
+    public ScheduledExecutorPlus scheduled(boolean executeOnShutdown, String name, int priority, SimulatorSemantics simulatorSemantics)
+    {
+        switch (simulatorSemantics)
+        {
+            default: throw new AssertionError();
+            case NORMAL:
+                return transferToInstance.apply((SerializableBiFunction<InterceptorOfExecution, ThreadFactory, ScheduledExecutorPlus>) (interceptSupplier, threadFactory) -> new InterceptingSequentialExecutor(interceptSupplier, threadFactory, new StandardSyncTaskFactory())).apply(simulatedExecution, factory(name));
+            case DISCARD:
+                return transferToInstance.apply((SerializableSupplier<ScheduledExecutorPlus>) DiscardingSequentialExecutor::new).get();
+        }
+    }
+
+    @Override
+    public ExecutorPlus pooled(String name, int threads)
+    {
+        if (threads == 1)
+            return configureSequential(name).build();
+        return configurePooled(name, threads).build();
+    }
+
+    public Thread startThread(String name, Runnable runnable, Daemon daemon)
+    {
+        return simulatedExecution.intercept().start(SimulatedAction.Kind.THREAD, factory(name)::newThread, runnable);
+    }
+
+    @VisibleForTesting
+    public InterceptedExecution.InterceptedThreadStart startParked(String name, Runnable run)
+    {
+        return new InterceptedExecution.InterceptedThreadStart(factory(name)::newThread,
+                                                               run,
+                                                               SimulatedAction.Kind.THREAD);
+    }
+
+    @Override
+    public Interruptible infiniteLoop(String name, Task task, SimulatorSafe simulatorSafe, Daemon daemon, Interrupts interrupts)
+    {
+        if (simulatorSafe != SimulatorSafe.SAFE)
+        {
+            // avoid use rewritten classes here (so use system class loader's ILE), as we cannot fully control the thread's execution
+            return new InfiniteLoopExecutor((n, t) -> {
+                Thread thread = plainFactory(n, t, threadGroup, null).newThread(t);
+                thread.start();
+                return thread;
+            }, name, task, interrupts);
+        }
+
+        InterceptExecution interceptor = simulatedExecution.intercept();
+        return transferToInstance.apply((SerializableQuadFunction<BiFunction<String, Runnable, Thread>, String, Task, Interrupts, Interruptible>)InfiniteLoopExecutor::new)
+                                 .apply((n, r) -> interceptor.start(INFINITE_LOOP, factory(n, task)::newThread, r), name, task, interrupts);
+    }
+
+    @Override
+    public ThreadGroup newThreadGroup(String name)
+    {
+        return new ThreadGroup(threadGroup, name);
+    }
+
+    public void close()
+    {
+        isClosed = true;
+        forEach(threadGroup, thread -> {
+            thread.trapInterrupts(false);
+            thread.interrupt();
+        });
+        threadGroup.interrupt();
+    }
+
+    public void interrupt()
+    {
+        threadGroup.interrupt();
+    }
+
+    private static void forEach(ThreadGroup threadGroup, Consumer<InterceptibleThread> consumer)
+    {
+        Thread[] threads;
+        ThreadGroup[] groups;
+        synchronized (threadGroup)
+        {
+            threads = new Thread[threadGroup.activeCount()];
+            threadGroup.enumerate(threads, false);
+            groups = new ThreadGroup[threadGroup.activeGroupCount()];
+            threadGroup.enumerate(groups, false);
+        }
+        for (Thread thread : threads)
+        {
+            if (thread instanceof InterceptibleThread)
+                consumer.accept((InterceptibleThread) thread);
+        }
+        for (ThreadGroup group : groups) forEach(group, consumer);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingGlobalMethods.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingGlobalMethods.java
new file mode 100644
index 0000000..45a64f4
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingGlobalMethods.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.UUID;
+import java.util.function.Consumer;
+import java.util.function.LongConsumer;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture;
+import org.apache.cassandra.simulator.systems.InterceptedWait.InterceptedConditionWait;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+import org.apache.cassandra.utils.concurrent.WaitQueue;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_SIMULATOR_DETERMINISM_CHECK;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.NEMESIS;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.OPTIONAL;
+import static org.apache.cassandra.simulator.systems.NonInterceptible.Permit.REQUIRED;
+
+@PerClassLoader
+public class InterceptingGlobalMethods extends InterceptingMonitors implements InterceptorOfGlobalMethods
+{
+    private static final Logger logger = LoggerFactory.getLogger(InterceptingGlobalMethods.class);
+    private static final boolean isDeterminismCheckStrict = TEST_SIMULATOR_DETERMINISM_CHECK.convert(name -> name.equals("strict"));
+
+    private final @Nullable LongConsumer onThreadLocalRandomCheck;
+    private final Capture capture;
+    private int uniqueUuidCounter = 0;
+    private final Consumer<Throwable> onUncaughtException;
+
+    public InterceptingGlobalMethods(Capture capture, LongConsumer onThreadLocalRandomCheck, Consumer<Throwable> onUncaughtException, RandomSource random)
+    {
+        super(random);
+        this.capture = capture.any() ? capture : null;
+        this.onThreadLocalRandomCheck = onThreadLocalRandomCheck;
+        this.onUncaughtException = onUncaughtException;
+    }
+
+    @Override
+    public WaitQueue newWaitQueue()
+    {
+        return new InterceptingWaitQueue();
+    }
+
+    @Override
+    public CountDownLatch newCountDownLatch(int count)
+    {
+        return new InterceptingAwaitable.InterceptingCountDownLatch(count);
+    }
+
+    @Override
+    public Condition newOneTimeCondition()
+    {
+        return new InterceptingAwaitable.InterceptingCondition();
+    }
+
+    @Override
+    public InterceptedWait.CaptureSites captureWaitSite(Thread thread)
+    {
+        if (capture == null)
+            return null;
+
+        return new InterceptedWait.CaptureSites(thread, capture);
+    }
+
+    @Override
+    public InterceptibleThread ifIntercepted()
+    {
+        Thread thread = Thread.currentThread();
+        if (thread instanceof InterceptibleThread)
+        {
+            InterceptibleThread interceptibleThread = (InterceptibleThread) thread;
+            if (interceptibleThread.isIntercepting())
+                return interceptibleThread;
+        }
+
+        if (NonInterceptible.isPermitted())
+            return null;
+
+        if (!disabled)
+            throw failWithOOM();
+
+        return null;
+    }
+
+    @Override
+    public void uncaughtException(Thread thread, Throwable throwable)
+    {
+        onUncaughtException.accept(throwable);
+    }
+
+    @Override
+    public void nemesis(float chance)
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null || thread.isEvaluationDeterministic() || !random.decide(chance))
+            return;
+
+        InterceptedConditionWait signal = new InterceptedConditionWait(NEMESIS, 0L, thread, captureWaitSite(thread), null);
+        thread.interceptWait(signal);
+
+        // save interrupt state to restore afterwards - new ones only arrive if terminating simulation
+        boolean restoreInterrupt = Thread.interrupted();
+        try
+        {
+            while (true)
+            {
+                try
+                {
+                    signal.awaitDeclaredUninterruptible();
+                    return;
+                }
+                catch (InterruptedException e)
+                {
+                    restoreInterrupt = true;
+                    if (disabled)
+                        return;
+                }
+            }
+        }
+        finally
+        {
+            if (restoreInterrupt)
+                thread.interrupt();
+        }
+    }
+
+    @Override
+    public long randomSeed()
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null || thread.isEvaluationDeterministic())
+            return Thread.currentThread().getName().hashCode();
+
+        return random.uniform(Long.MIN_VALUE, Long.MAX_VALUE);
+    }
+
+    @Override
+    public synchronized UUID randomUUID()
+    {
+        long msb = random.uniform(0, 1L << 60);
+        msb = ((msb << 4) & 0xffffffffffff0000L) | 0x4000 | (msb & 0xfff);
+        return new UUID(msb, (1L << 63) | uniqueUuidCounter++);
+    }
+
+    @Override
+    public void threadLocalRandomCheck(long seed)
+    {
+        if (onThreadLocalRandomCheck != null)
+            onThreadLocalRandomCheck.accept(seed);
+    }
+
+    public static class ThreadLocalRandomCheck implements LongConsumer
+    {
+        final LongConsumer wrapped;
+        private boolean disabled;
+
+        public ThreadLocalRandomCheck(LongConsumer wrapped)
+        {
+            this.wrapped = wrapped;
+        }
+
+        @Override
+        public void accept(long value)
+        {
+            if (wrapped != null)
+                wrapped.accept(value);
+
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                InterceptibleThread interceptibleThread = (InterceptibleThread) thread;
+                if (interceptibleThread.isIntercepting())
+                    return;
+            }
+
+            if (NonInterceptible.isPermitted(isDeterminismCheckStrict ? OPTIONAL : REQUIRED))
+                return;
+
+            if (!disabled)
+                throw failWithOOM();
+        }
+
+        public void stop()
+        {
+            disabled = true;
+        }
+    }
+
+    @Override
+    public long nanoTime()
+    {
+        return Clock.Global.nanoTime();
+    }
+
+    @Override
+    public long currentTimeMillis()
+    {
+        return Clock.Global.currentTimeMillis();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingMonitors.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingMonitors.java
new file mode 100644
index 0000000..eab35de
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingMonitors.java
@@ -0,0 +1,878 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Deque;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.systems.InterceptedWait.InterceptedConditionWait;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.concurrent.Awaitable.SyncAwaitable;
+import org.apache.cassandra.utils.concurrent.Threads;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_SIMULATOR_DEBUG;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.NEMESIS;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.SLEEP_UNTIL;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.UNBOUNDED_WAIT;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.WAIT_UNTIL;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.SIGNAL;
+import static org.apache.cassandra.simulator.systems.InterceptibleThread.interceptorOrDefault;
+import static org.apache.cassandra.simulator.systems.InterceptingMonitors.WaitListAccessor.LOCK;
+import static org.apache.cassandra.simulator.systems.InterceptingMonitors.WaitListAccessor.NOTIFY;
+import static org.apache.cassandra.simulator.systems.SimulatedTime.Global.relativeToGlobalNanos;
+
+@PerClassLoader
+@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
+public abstract class InterceptingMonitors implements InterceptorOfGlobalMethods, Closeable
+{
+    private static final Logger logger = LoggerFactory.getLogger(InterceptingMonitors.class);
+    private static final boolean DEBUG_MONITOR_STATE = TEST_SIMULATOR_DEBUG.getBoolean();
+
+    static class MonitorState
+    {
+        InterceptedMonitorWait waitingOnNotify;
+        InterceptedMonitorWait waitingOnLock;
+        /**
+         * The thread we have assigned lock ownership to.
+         * This may not be actively holding the lock, if
+         * we found it waiting for the monitor and assigned
+         * it to receive
+         */
+        InterceptibleThread heldBy;
+        int depth;
+        int suspended;
+        Deque<Object> recentActions = DEBUG_MONITOR_STATE ? new ArrayDeque<>() : null;
+
+        boolean isEmpty()
+        {
+            return depth == 0 && waitingOnLock == null && waitingOnNotify == null && suspended == 0;
+        }
+
+        InterceptedMonitorWait removeAllWaitingOn(WaitListAccessor list)
+        {
+            InterceptedMonitorWait result = list.head(this);
+            list.setHead(this, null);
+
+            InterceptedMonitorWait cur = result;
+            while (cur != null)
+            {
+                InterceptedMonitorWait next = cur.next;
+                cur.waitingOn = null;
+                cur.next = null;
+                cur = next;
+            }
+            return result;
+        }
+
+        void removeWaitingOn(InterceptedMonitorWait remove)
+        {
+            if (remove.waitingOn != null)
+            {
+                InterceptedMonitorWait head = remove.waitingOn.head(this);
+                remove.waitingOn.setHead(this, head.remove(remove));
+                assert remove.next == null;
+            }
+        }
+
+        @Inline
+        InterceptedMonitorWait removeOneWaitingOn(WaitListAccessor list, RandomSource random)
+        {
+            InterceptedMonitorWait head = list.head(this);
+            if (head == null)
+                return null;
+
+            if (head.next == null)
+            {
+                list.setHead(this, null);
+                head.waitingOn = null;
+                return head;
+            }
+
+            int i = random.uniform(0, 1 + head.nextLength);
+            if (i == 0)
+            {
+                list.setHead(this, head.next);
+                head.next.nextLength = head.nextLength - 1;
+                head.next = null;
+                head.waitingOn = null;
+                return head;
+            }
+
+            InterceptedMonitorWait pred = head;
+            while (--i > 0)
+                pred = pred.next;
+
+            InterceptedMonitorWait result = pred.next;
+            pred.next = result.next;
+            --head.nextLength;
+            result.next = null;
+            result.waitingOn = null;
+            return result;
+        }
+
+        void waitOn(WaitListAccessor list, InterceptedMonitorWait wait)
+        {
+            assert wait.waitingOn == null;
+            wait.waitingOn = list;
+
+            assert wait.next == null;
+            InterceptedMonitorWait head = list.head(this);
+            if (head != null)
+            {
+                wait.next = head.next;
+                head.next = wait;
+                ++head.nextLength;
+            }
+            else
+            {
+                list.setHead(this, wait);
+                wait.nextLength = 0;
+            }
+        }
+
+        void suspend(InterceptedMonitorWait wait)
+        {
+            assert heldBy == wait.waiting;
+            wait.suspendMonitor(depth);
+            ++suspended;
+            heldBy = null;
+            depth = 0;
+        }
+
+        void restore(InterceptedMonitorWait wait)
+        {
+            assert heldBy == null || heldBy == wait.waiting;
+            assert depth == 0;
+            assert suspended > 0;
+            heldBy = wait.waiting;
+            depth = wait.unsuspendMonitor();
+            --suspended;
+        }
+
+        void claim(InterceptedMonitorWait wait)
+        {
+            assert heldBy == null || heldBy == wait.waiting;
+            assert depth == 0;
+            heldBy = wait.waiting;
+            depth = wait.unsuspendMonitor();
+        }
+
+        void log(Object event, Thread toThread, Thread byThread)
+        {
+            if (recentActions != null)
+                log(event + " " + toThread + " by " + byThread);
+        }
+
+        void log(Object event, Thread toThread)
+        {
+            if (recentActions != null)
+                log(event + " " + toThread);
+        }
+
+        void log(Object event)
+        {
+            if (recentActions == null)
+                return;
+
+            if (recentActions.size() > 20)
+                recentActions.poll();
+            recentActions.add(event + " " + depth);
+        }
+    }
+
+    interface WaitListAccessor
+    {
+        static final WaitListAccessor NOTIFY = new WaitListAccessor()
+        {
+            @Override public InterceptedMonitorWait head(MonitorState state) { return state.waitingOnNotify; }
+            @Override public void setHead(MonitorState state, InterceptedMonitorWait newHead) { state.waitingOnNotify = newHead; }
+        };
+
+        static final WaitListAccessor LOCK = new WaitListAccessor()
+        {
+            @Override public InterceptedMonitorWait head(MonitorState state) { return state.waitingOnLock; }
+            @Override public void setHead(MonitorState state, InterceptedMonitorWait newHead) { state.waitingOnLock = newHead; }
+        };
+
+        InterceptedMonitorWait head(MonitorState state);
+        void setHead(MonitorState state, InterceptedMonitorWait newHead);
+    }
+
+    static class InterceptedMonitorWait implements InterceptedWait
+    {
+        Kind kind;
+        final long waitTime;
+        final InterceptibleThread waiting;
+        final CaptureSites captureSites;
+        final InterceptorOfConsequences interceptedBy;
+        final MonitorState state;
+        final Object monitor;
+        int suspendedMonitorDepth;
+
+        Trigger trigger;
+        boolean isTriggered;
+        final List<TriggerListener> onTrigger = new ArrayList<>(3);
+
+        boolean notifiedOfPause;
+        boolean waitingOnRelinquish;
+
+        WaitListAccessor waitingOn;
+        volatile InterceptedMonitorWait next;
+        int nextLength;
+        boolean hasExited;
+
+        InterceptedMonitorWait(Kind kind, long waitTime, MonitorState state, InterceptibleThread waiting, CaptureSites captureSites)
+        {
+            this.kind = kind;
+            this.waitTime = waitTime;
+            this.waiting = waiting;
+            this.captureSites = captureSites;
+            this.interceptedBy = waiting.interceptedBy();
+            this.state = state;
+            this.monitor = this;
+        }
+
+        InterceptedMonitorWait(Kind kind, long waitTime, MonitorState state, InterceptibleThread waiting, CaptureSites captureSites, Object object)
+        {
+            this.kind = kind;
+            this.waitTime = waitTime;
+            this.waiting = waiting;
+            this.captureSites = captureSites;
+            this.interceptedBy = waiting.interceptedBy();
+            this.state = state;
+            this.monitor = object;
+        }
+
+        @Override
+        public Kind kind()
+        {
+            return kind;
+        }
+
+        void suspendMonitor(int depth)
+        {
+            assert suspendedMonitorDepth == 0;
+            suspendedMonitorDepth = depth;
+        }
+
+        int unsuspendMonitor()
+        {
+            assert suspendedMonitorDepth > 0;
+            int result = suspendedMonitorDepth;
+            suspendedMonitorDepth = 0;
+            return result;
+        }
+
+        public boolean isTriggered()
+        {
+            return isTriggered;
+        }
+
+        public boolean isInterruptible()
+        {
+            return true;
+        }
+
+        @Override
+        public long waitTime()
+        {
+            return waitTime;
+        }
+
+        @Override
+        public void interceptWakeup(Trigger trigger, Thread by)
+        {
+            if (this.trigger != null && this.trigger.compareTo(trigger) >= 0)
+                return;
+
+            this.trigger = trigger;
+            if (captureSites != null)
+                captureSites.registerWakeup(by);
+            interceptorOrDefault(by).interceptWakeup(this, trigger, interceptedBy);
+        }
+
+        public void triggerAndAwaitDone(InterceptorOfConsequences interceptor, Trigger trigger)
+        {
+            if (isTriggered)
+                return;
+
+            if (hasExited)
+                throw failWithOOM();
+
+            state.removeWaitingOn(this); // if still present, remove
+
+            // we may have been assigned ownership of the lock if we attempted to trigger but found the lock held
+            if (state.heldBy != null && state.heldBy != waiting)
+            {   // reset this condition to wait on lock release
+                state.waitOn(LOCK, this);
+                this.kind = UNBOUNDED_WAIT;
+                this.trigger = null;
+                interceptor.beforeInvocation(waiting);
+                interceptor.interceptWait(this);
+                return;
+            }
+
+            try
+            {
+                synchronized (monitor)
+                {
+                    waiting.beforeInvocation(interceptor, this);
+
+                    isTriggered = true;
+                    onTrigger.forEach(listener -> listener.onTrigger(this));
+
+                    if (!waiting.preWakeup(this))
+                        monitor.notifyAll(); // TODO: could use interrupts to target waiting anyway, avoiding notifyAll()
+
+                    while (!notifiedOfPause)
+                        monitor.wait();
+
+                    if (waitingOnRelinquish)
+                    {
+                        waitingOnRelinquish = false;
+                        monitor.notifyAll(); // TODO: could use interrupts to target waiting anyway, avoiding notifyAll()
+                    }
+                }
+            }
+            catch (InterruptedException ie)
+            {
+                throw new UncheckedInterruptedException(ie);
+            }
+        }
+
+        @Override
+        public void triggerBypass()
+        {
+            if (isTriggered)
+                return;
+
+            synchronized (monitor)
+            {
+                isTriggered = true;
+                monitor.notifyAll();
+                state.removeWaitingOn(this);
+            }
+        }
+
+        @Override
+        public void addListener(TriggerListener onTrigger)
+        {
+            this.onTrigger.add(onTrigger);
+        }
+
+        @Override
+        public Thread waiting()
+        {
+            return waiting;
+        }
+
+        @Override
+        public void notifyThreadPaused()
+        {
+            notifiedOfPause = true;
+            if (Thread.holdsLock(monitor))
+            {
+                monitor.notifyAll();
+                waitingOnRelinquish = true;
+                try { while (waitingOnRelinquish) monitor.wait(); }
+                catch (InterruptedException e) { throw new UncheckedInterruptedException(e); }
+            }
+            else
+            {
+                synchronized (monitor)
+                {
+                    monitor.notifyAll();
+                }
+            }
+        }
+
+        void await() throws InterruptedException
+        {
+            try
+            {
+                while (!isTriggered())
+                    monitor.wait();
+            }
+            finally
+            {
+                hasExited = true;
+            }
+        }
+
+        InterceptedMonitorWait remove(InterceptedMonitorWait remove)
+        {
+            remove.waitingOn = null;
+
+            if (remove == this)
+            {
+                InterceptedMonitorWait next = this.next;
+                if (next != null)
+                {
+                    next.nextLength = nextLength - 1;
+                    remove.next = null;
+                }
+
+                return next;
+            }
+
+            InterceptedMonitorWait cur = this;
+            while (cur != null && cur.next != remove)
+                cur = cur.next;
+
+            if (cur != null)
+            {
+                cur.next = remove.next;
+                remove.next = null;
+                --nextLength;
+            }
+            return this;
+        }
+
+        public String toString()
+        {
+            return captureSites == null ? "" : "[" + captureSites + ']';
+        }
+    }
+
+    final RandomSource random;
+    private final Map<Object, MonitorState> monitors = new IdentityHashMap<>();
+    private final Map<Thread, Object> waitingOn = new IdentityHashMap<>();
+    protected boolean disabled;
+
+    public InterceptingMonitors(RandomSource random)
+    {
+        this.random = random;
+    }
+
+    private MonitorState state(Object monitor)
+    {
+        return monitors.computeIfAbsent(monitor, ignore -> new MonitorState());
+    }
+
+    private MonitorState maybeState(Object monitor)
+    {
+        return monitors.get(monitor);
+    }
+
+    private void maybeClear(Object monitor, MonitorState state)
+    {
+        if (state.isEmpty())
+            monitors.remove(monitor, state);
+    }
+
+    @Override
+    public void waitUntil(long deadline) throws InterruptedException
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null)
+        {
+            Clock.waitUntil(deadline);
+            return;
+        }
+
+        if (Thread.interrupted())
+            throw new InterruptedException();
+
+        InterceptedMonitorWait trigger = new InterceptedMonitorWait(SLEEP_UNTIL, deadline, new MonitorState(), thread, captureWaitSite(thread));
+        thread.interceptWait(trigger);
+        synchronized (trigger)
+        {
+            try
+            {
+                trigger.await();
+            }
+            catch (InterruptedException e)
+            {
+                if (!trigger.isTriggered)
+                    throw e;
+            }
+        }
+    }
+
+    @Override
+    public void sleep(long period, TimeUnit units) throws InterruptedException
+    {
+        waitUntil(nanoTime() + units.toNanos(period));
+    }
+
+    @Override
+    public void sleepUninterriptibly(long period, TimeUnit units)
+    {
+        try
+        {
+            sleep(period, units);
+        }
+        catch (InterruptedException e)
+        {
+            // instead of looping uninterruptibly
+            throw new UncheckedInterruptedException(e);
+        }
+    }
+
+    public boolean waitUntil(Object monitor, long deadline) throws InterruptedException
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null) return SyncAwaitable.waitUntil(monitor, deadline);
+        else return wait(monitor, thread, WAIT_UNTIL, deadline);
+    }
+
+    @Override
+    public void wait(Object monitor) throws InterruptedException
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null) monitor.wait();
+        else wait(monitor, thread, UNBOUNDED_WAIT, -1L);
+    }
+
+    @Override
+    public void wait(Object monitor, long millis) throws InterruptedException
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null) monitor.wait(millis);
+        else wait(monitor, thread, WAIT_UNTIL, relativeToGlobalNanos(MILLISECONDS.toNanos(millis)));
+    }
+
+    @Override
+    public void wait(Object monitor, long millis, int nanos) throws InterruptedException
+    {
+        InterceptibleThread thread = ifIntercepted();
+        if (thread == null) monitor.wait(millis, nanos);
+        else wait(monitor, thread, WAIT_UNTIL, relativeToGlobalNanos(MILLISECONDS.toNanos(millis) + nanos));
+    }
+
+    private boolean wait(Object monitor, InterceptibleThread thread, InterceptedWait.Kind kind, long waitNanos) throws InterruptedException
+    {
+        if (Thread.interrupted())
+            throw new InterruptedException();
+
+        MonitorState state = state(monitor);
+        InterceptedMonitorWait trigger = new InterceptedMonitorWait(kind, waitNanos, state, thread, captureWaitSite(thread), monitor);
+        state.log("enterwait", thread);
+        state.suspend(trigger);
+        state.waitOn(NOTIFY, trigger);
+        wakeOneWaitingOnLock(thread, state);
+        thread.interceptWait(trigger);
+        try
+        {
+            trigger.await();
+        }
+        finally
+        {
+            state.restore(trigger);
+            state.log("exitwait", thread);
+        }
+        return trigger.trigger == SIGNAL;
+    }
+
+    public void notify(Object monitor)
+    {
+        MonitorState state = state(monitor);
+        if (state != null)
+        {
+            InterceptedMonitorWait wake = state.removeOneWaitingOn(NOTIFY, random);
+            if (wake != null)
+            {
+                // TODO: assign ownership on monitorExit
+                assert wake.waitingOn == null;
+                Thread waker = Thread.currentThread();
+                wake.interceptWakeup(SIGNAL, waker);
+                state.log("notify", wake.waiting, waker);
+                return;
+            }
+        }
+        monitor.notify();
+    }
+
+    @Override
+    public void notifyAll(Object monitor)
+    {
+        MonitorState state = state(monitor);
+        if (state != null)
+        {
+            InterceptedMonitorWait wake = state.removeAllWaitingOn(NOTIFY);
+            if (wake != null)
+            {
+                Thread waker = Thread.currentThread();
+                wake.interceptWakeup(SIGNAL, waker);
+                state.log("notify", wake.waiting, waker);
+
+                wake = wake.next;
+                while (wake != null)
+                {
+                    InterceptedMonitorWait next = wake.next;
+                    state.waitOn(LOCK, wake);
+                    state.log("movetowaitonlock ", wake.waiting, waker);
+                    wake = next;
+                }
+                return;
+            }
+        }
+        monitor.notifyAll();
+    }
+
+    @Override
+    public void preMonitorEnter(Object monitor, float preMonitorDelayChance)
+    {
+        if (disabled)
+            return;
+
+        Thread anyThread = Thread.currentThread();
+        if (!(anyThread instanceof InterceptibleThread))
+            return;
+
+        boolean restoreInterrupt = false;
+        InterceptibleThread thread = (InterceptibleThread) anyThread;
+        try
+        {
+            if (   !thread.isEvaluationDeterministic()
+                && random.decide(preMonitorDelayChance))
+            {
+                // TODO (feature): hold a stack of threads already paused by the nemesis, and, if one of the threads
+                //        is entering the monitor, put the contents of this stack into `waitingOn` for this monitor.
+                InterceptedConditionWait signal = new InterceptedConditionWait(NEMESIS, 0L, thread, captureWaitSite(thread), null);
+                thread.interceptWait(signal);
+
+                // save interrupt state to restore afterwards - new ones only arrive if terminating simulation
+                restoreInterrupt = Thread.interrupted();
+                while (true)
+                {
+                    try
+                    {
+                        signal.awaitDeclaredUninterruptible();
+                        break;
+                    }
+                    catch (InterruptedException e)
+                    {
+                        if (disabled)
+                            throw new UncheckedInterruptedException(e);
+                        restoreInterrupt = true;
+                    }
+                }
+            }
+
+            MonitorState state = state(monitor);
+            if (state.heldBy != thread)
+            {
+                if (state.heldBy != null)
+                {
+                    if (!thread.isIntercepting() && disabled) return;
+                    else if (!thread.isIntercepting())
+                        throw new AssertionError();
+
+                    checkForDeadlock(thread, state.heldBy);
+                    InterceptedMonitorWait wait = new InterceptedMonitorWait(UNBOUNDED_WAIT, 0L, state, thread, captureWaitSite(thread));
+                    wait.suspendedMonitorDepth = 1;
+                    state.log("monitorenter_wait", thread);
+                    state.waitOn(LOCK, wait);
+                    thread.interceptWait(wait);
+                    synchronized (wait)
+                    {
+                        waitingOn.put(thread, monitor);
+                        restoreInterrupt |= Thread.interrupted();
+                        try
+                        {
+                            while (true)
+                            {
+                                try
+                                {
+                                    wait.await();
+                                    break;
+                                }
+                                catch (InterruptedException e)
+                                {
+                                    if (disabled)
+                                    {
+                                        if (state.heldBy == thread)
+                                        {
+                                            state.heldBy = null;
+                                            state.depth = 0;
+                                        }
+                                        throw new UncheckedInterruptedException(e);
+                                    }
+
+                                    restoreInterrupt = true;
+                                    if (wait.isTriggered)
+                                        break;
+                                }
+                            }
+                        }
+                        finally
+                        {
+                            waitingOn.remove(thread);
+                        }
+                    }
+                    state.claim(wait);
+                    state.log("monitorenter_claim", thread);
+                }
+                else
+                {
+                    state.log("monitorenter_free", thread);
+                    state.heldBy = thread;
+                    state.depth = 1;
+                }
+            }
+            else
+            {
+                state.log("monitorreenter", thread);
+                state.depth++;
+            }
+        }
+        finally
+        {
+            if (restoreInterrupt)
+                thread.interrupt();
+        }
+    }
+
+    @Override
+    public void preMonitorExit(Object monitor)
+    {
+        if (disabled)
+            return;
+
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread))
+            return;
+
+        MonitorState state = maybeState(monitor);
+        if (state == null)
+            return;
+
+        if (state.heldBy != thread)
+            throw new AssertionError();
+
+        if (--state.depth > 0)
+        {
+            state.log("monitorreexit", thread);
+            return;
+        }
+
+        state.log("monitorexit", thread);
+        state.heldBy = null;
+
+        if (!wakeOneWaitingOnLock(thread, state))
+        {
+            maybeClear(monitor, state);
+        }
+    }
+
+    private boolean wakeOneWaitingOnLock(Thread waker, MonitorState state)
+    {
+        InterceptedMonitorWait wake = state.removeOneWaitingOn(LOCK, random);
+        if (wake != null)
+        {
+            assert wake.waitingOn == null;
+            assert !wake.isTriggered();
+
+            wake.interceptWakeup(SIGNAL, waker);
+
+            // assign them the lock, so they'll definitely get it when they wake
+            assert state.heldBy == null;
+            state.heldBy = wake.waiting;
+            state.log("wake", wake.waiting);
+            return true;
+        }
+        return false;
+    }
+
+    // TODO (feature): integrate LockSupport waits into this deadlock check
+    private void checkForDeadlock(Thread waiting, Thread blockedBy)
+    {
+        Thread cur = blockedBy;
+        while (true)
+        {
+            Object monitor = waitingOn.get(cur);
+            if (monitor == null)
+                return;
+            MonitorState state = monitors.get(monitor);
+            if (state == null)
+                return;
+            Thread next = state.heldBy;
+            if (next == cur)
+                return; // not really waiting, just hasn't woken up yet
+            if (next == waiting)
+            {
+                logger.error("Deadlock between {}{} and {}{}", waiting, Threads.prettyPrintStackTrace(waiting, true, ";"), cur, Threads.prettyPrintStackTrace(cur, true, ";"));
+                throw failWithOOM();
+            }
+            cur = next;
+        }
+    }
+
+    @Override
+    public void park()
+    {
+        InterceptibleThread.park();
+    }
+
+    @Override
+    public void parkNanos(long nanos)
+    {
+        InterceptibleThread.parkNanos(nanos);
+    }
+
+    @Override
+    public void parkUntil(long millis)
+    {
+        InterceptibleThread.parkUntil(millis);
+    }
+
+    @Override
+    public void park(Object blocker)
+    {
+        InterceptibleThread.park(blocker);
+    }
+
+    @Override
+    public void parkNanos(Object blocker, long nanos)
+    {
+        InterceptibleThread.parkNanos(blocker, nanos);
+    }
+
+    @Override
+    public void parkUntil(Object blocker, long millis)
+    {
+        InterceptibleThread.parkUntil(blocker, millis);
+    }
+
+    @Override
+    public void unpark(Thread thread)
+    {
+        InterceptibleThread.unpark(thread);
+    }
+
+    public void close()
+    {
+        disabled = true;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingWaitQueue.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingWaitQueue.java
new file mode 100644
index 0000000..d14bcfb
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptingWaitQueue.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+
+import org.apache.cassandra.simulator.systems.InterceptingAwaitable.InterceptingSignal;
+import org.apache.cassandra.utils.concurrent.WaitQueue;
+
+import static org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods.Global.ifIntercepted;
+
+@PerClassLoader
+class InterceptingWaitQueue extends WaitQueue.Standard implements WaitQueue
+{
+    final Queue<InterceptingSignal<?>> interceptible = new ConcurrentLinkedQueue<>();
+
+    public InterceptingWaitQueue()
+    {
+    }
+
+    public Signal register()
+    {
+        if (ifIntercepted() == null)
+            return super.register();
+
+        InterceptingSignal<?> signal = new InterceptingSignal<>();
+        interceptible.add(signal);
+        return signal;
+    }
+
+    public <V> Signal register(V value, Consumer<V> consumer)
+    {
+        if (ifIntercepted() == null)
+            return super.register(value, consumer);
+
+        InterceptingSignal<V> signal = new InterceptingSignal<>(value, consumer);
+        interceptible.add(signal);
+        return signal;
+    }
+
+    public boolean signal()
+    {
+        // directly signal the actual underlying queue if no intercepted waiters are present
+        return consumeUntil(InterceptingSignal::doSignal) || super.signal();
+    }
+
+    public void signalAll()
+    {
+        consumeUntil(s -> {
+            s.signal();
+            return false;
+        });
+        super.signalAll();
+    }
+
+    public boolean hasWaiters()
+    {
+        if (super.hasWaiters())
+            return true;
+        if (interceptible.isEmpty())
+            return false;
+
+        return !interceptible.stream().allMatch(Signal::isSet);
+    }
+
+    private boolean consumeUntil(Predicate<InterceptingSignal<?>> consumeUntil)
+    {
+        InterceptingSignal<?> signal;
+        while (null != (signal = interceptible.poll()))
+        {
+            if (consumeUntil.test(signal))
+                return true;
+        }
+        return false;
+    }
+
+    public int getWaiting()
+    {
+        return super.getWaiting() + (int)interceptible.stream().filter(s -> !s.isSignalled).count();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfConsequences.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfConsequences.java
new file mode 100644
index 0000000..801b353
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfConsequences.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.systems.InterceptedWait.Trigger;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public interface InterceptorOfConsequences
+{
+    public static final InterceptorOfConsequences DEFAULT_INTERCEPTOR = new InterceptorOfConsequences()
+    {
+        @Override
+        public void beforeInvocation(InterceptibleThread realThread)
+        {
+        }
+
+        @Override
+        public void interceptMessage(IInvokableInstance from, IInvokableInstance to, IMessage message)
+        {
+            throw new AssertionError();
+        }
+
+        @Override
+        public void interceptWait(InterceptedWait wakeupWith)
+        {
+            throw new AssertionError();
+        }
+
+        @Override
+        public void interceptWakeup(InterceptedWait wakeup, Trigger trigger, InterceptorOfConsequences waitWasInterceptedBy)
+        {
+            // TODO (now): should we be asserting here?
+            wakeup.triggerBypass();
+        }
+
+        @Override
+        public void interceptExecution(InterceptedExecution invoke, OrderOn orderOn)
+        {
+            throw new AssertionError();
+        }
+
+        @Override
+        public void interceptTermination(boolean isThreadTermination)
+        {
+            throw new AssertionError();
+        }
+    };
+
+
+    void beforeInvocation(InterceptibleThread realThread);
+    void interceptMessage(IInvokableInstance from, IInvokableInstance to, IMessage message);
+    void interceptWakeup(InterceptedWait wakeup, Trigger trigger, InterceptorOfConsequences waitWasInterceptedBy);
+    void interceptExecution(InterceptedExecution invoke, OrderOn orderOn);
+    void interceptWait(InterceptedWait wakeupWith);
+    void interceptTermination(boolean isThreadTermination);
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfExecution.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfExecution.java
new file mode 100644
index 0000000..ac8255d
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfExecution.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ScheduledFuture;
+import java.util.function.Function;
+
+import org.apache.cassandra.simulator.systems.SimulatedAction.Kind;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+// some kind of bug in javac(?) sometimes causes this to not be found if not fully-qualified
+@Shared(scope = SIMULATION)
+public interface InterceptorOfExecution
+{
+    InterceptExecution intercept();
+
+    @Shared(scope = SIMULATION)
+    interface InterceptExecution
+    {
+        <V, T extends RunnableFuture<V>> T addTask(T task, InterceptingExecutor executor);
+        <T> ScheduledFuture<T> schedule(Kind kind, long delayNanos, long deadlineNanos, Callable<T> runnable, InterceptingExecutor executor);
+        Thread start(Kind kind, Function<Runnable, InterceptibleThread> factory, Runnable run);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfGlobalMethods.java b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfGlobalMethods.java
new file mode 100644
index 0000000..749f91e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/InterceptorOfGlobalMethods.java
@@ -0,0 +1,474 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayDeque;
+import java.util.UUID;
+import java.util.concurrent.BlockingQueue;
+import java.util.function.IntSupplier;
+import java.util.function.LongConsumer;
+import java.util.function.ToIntFunction;
+
+import net.openhft.chronicle.core.util.WeakIdentityHashMap;
+import org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.BlockingQueues;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+import org.apache.cassandra.utils.concurrent.Semaphore;
+import org.apache.cassandra.utils.concurrent.Semaphore.Standard;
+import org.apache.cassandra.utils.concurrent.WaitQueue;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@SuppressWarnings("unused")
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface InterceptorOfGlobalMethods extends InterceptorOfSystemMethods, Closeable
+{
+    WaitQueue newWaitQueue();
+    CountDownLatch newCountDownLatch(int count);
+    Condition newOneTimeCondition();
+
+    /**
+     * If this interceptor is debugging wait/wake/now sites, return one initialised with the current trace of the
+     * provided thread; otherwise return null.
+     */
+    CaptureSites captureWaitSite(Thread thread);
+
+    /**
+     * Returns the current thread as an InterceptibleThread IF it has its InterceptConsequences interceptor set.
+     * Otherwise, one of the following will happen:
+     *   * if the InterceptorOfWaits permits it, null will be returned;
+     *   * if it does not, the process will be failed.
+     */
+    InterceptibleThread ifIntercepted();
+
+    void uncaughtException(Thread thread, Throwable throwable);
+
+    @PerClassLoader
+    public static class IfInterceptibleThread extends None implements InterceptorOfGlobalMethods
+    {
+        static LongConsumer threadLocalRandomCheck;
+
+        @Override
+        public WaitQueue newWaitQueue()
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().newWaitQueue();
+
+            return WaitQueue.newWaitQueue();
+        }
+
+        @Override
+        public CountDownLatch newCountDownLatch(int count)
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().newCountDownLatch(count);
+
+            return CountDownLatch.newCountDownLatch(count);
+        }
+
+        @Override
+        public Condition newOneTimeCondition()
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().newOneTimeCondition();
+
+            return Condition.newOneTimeCondition();
+        }
+
+        @Override
+        public CaptureSites captureWaitSite(Thread thread)
+        {
+            if (thread instanceof InterceptibleThread)
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().captureWaitSite(thread);
+
+            Thread currentThread = Thread.currentThread();
+            if (currentThread instanceof InterceptibleThread)
+                return ((InterceptibleThread) currentThread).interceptorOfGlobalMethods().captureWaitSite(thread);
+
+            return null;
+        }
+
+        @Override
+        public InterceptibleThread ifIntercepted()
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().ifIntercepted();
+
+            return null;
+        }
+
+        @Override
+        public void waitUntil(long deadlineNanos) throws InterruptedException
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().waitUntil(deadlineNanos);
+            }
+            else
+            {
+                super.waitUntil(deadlineNanos);
+            }
+        }
+
+        @Override
+        public boolean waitUntil(Object monitor, long deadlineNanos) throws InterruptedException
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().waitUntil(monitor, deadlineNanos);
+
+            return super.waitUntil(monitor, deadlineNanos);
+        }
+
+        @Override
+        public void wait(Object monitor) throws InterruptedException
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().wait(monitor);
+            }
+            else
+            {
+                monitor.wait();
+            }
+        }
+
+        @Override
+        public void wait(Object monitor, long millis) throws InterruptedException
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().wait(monitor, millis);
+            }
+            else
+            {
+                monitor.wait(millis);
+            }
+        }
+
+        @Override
+        public void wait(Object monitor, long millis, int nanos) throws InterruptedException
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().wait(monitor, millis, nanos);
+            }
+            else
+            {
+                monitor.wait(millis, nanos);
+            }
+        }
+
+        @Override
+        public void preMonitorEnter(Object object, float chanceOfSwitch)
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().preMonitorEnter(object, chanceOfSwitch);
+            }
+        }
+
+        @Override
+        public void preMonitorExit(Object object)
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().preMonitorExit(object);
+            }
+        }
+
+        @Override
+        public void notify(Object monitor)
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().notify(monitor);
+            }
+            else
+            {
+                monitor.notify();
+            }
+        }
+
+        @Override
+        public void notifyAll(Object monitor)
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().notifyAll(monitor);
+            }
+            else
+            {
+                monitor.notifyAll();
+            }
+        }
+
+        @Override
+        public void park()
+        {
+            InterceptibleThread.park();
+        }
+
+        @Override
+        public void parkNanos(long nanos)
+        {
+            InterceptibleThread.parkNanos(nanos);
+        }
+
+        @Override
+        public void parkUntil(long millis)
+        {
+            InterceptibleThread.parkUntil(millis);
+        }
+
+        @Override
+        public void park(Object blocker)
+        {
+            InterceptibleThread.park(blocker);
+        }
+
+        @Override
+        public void parkNanos(Object blocker, long nanos)
+        {
+            InterceptibleThread.parkNanos(blocker, nanos);
+        }
+
+        @Override
+        public void parkUntil(Object blocker, long millis)
+        {
+            InterceptibleThread.parkUntil(blocker, millis);
+        }
+
+        @Override
+        public void unpark(Thread thread)
+        {
+            InterceptibleThread.unpark(thread);
+        }
+
+        @Override
+        public void nemesis(float chance)
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().nemesis(chance);
+            }
+        }
+
+        @Override
+        public long randomSeed()
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().randomSeed();
+            }
+            else
+            {   // TODO: throw an exception? May result in non-determinism
+                return super.randomSeed();
+            }
+        }
+
+        @Override
+        public UUID randomUUID()
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                return ((InterceptibleThread) thread).interceptorOfGlobalMethods().randomUUID();
+            }
+            else
+            {
+                return super.randomUUID();
+            }
+        }
+
+        @Override
+        public void threadLocalRandomCheck(long seed)
+        {
+            if (threadLocalRandomCheck != null)
+                threadLocalRandomCheck.accept(seed);
+        }
+
+        @Override
+        public void uncaughtException(Thread thread, Throwable throwable)
+        {
+            if (thread instanceof InterceptibleThread)
+                ((InterceptibleThread) thread).interceptorOfGlobalMethods().uncaughtException(thread, throwable);
+        }
+
+        @Override
+        public long nanoTime()
+        {
+            return Clock.Global.nanoTime();
+        }
+
+        @Override
+        public long currentTimeMillis()
+        {
+            return Clock.Global.currentTimeMillis();
+        }
+
+        public static void setThreadLocalRandomCheck(LongConsumer runnable)
+        {
+            threadLocalRandomCheck = runnable;
+        }
+
+        @Override
+        public void close()
+        {
+        }
+    }
+
+    @SuppressWarnings("unused")
+    public static class Global
+    {
+        private static InterceptorOfGlobalMethods methods;
+
+        public static WaitQueue newWaitQueue()
+        {
+            return methods.newWaitQueue();
+        }
+
+        public static CountDownLatch newCountDownLatch(int count)
+        {
+            return methods.newCountDownLatch(count);
+        }
+
+        public static Semaphore newSemaphore(int count)
+        {
+            return new Standard(count, false);
+        }
+
+        public static Semaphore newFairSemaphore(int count)
+        {
+            return new Standard(count, true);
+        }
+
+        public static Condition newOneTimeCondition()
+        {
+            return methods.newOneTimeCondition();
+        }
+
+        public static <T> BlockingQueue<T> newBlockingQueue()
+        {
+            return newBlockingQueue(Integer.MAX_VALUE);
+        }
+
+        public static <T> BlockingQueue<T> newBlockingQueue(int capacity)
+        {
+            return new BlockingQueues.Sync<>(capacity, new ArrayDeque<>());
+        }
+
+        public static CaptureSites captureWaitSite(Thread thread)
+        {
+            return methods.captureWaitSite(thread);
+        }
+
+        public static InterceptibleThread ifIntercepted()
+        {
+            return methods.ifIntercepted();
+        }
+
+        public static void uncaughtException(Thread thread, Throwable throwable)
+        {
+            System.err.println(thread);
+            throwable.printStackTrace(System.err);
+            methods.uncaughtException(thread, throwable);
+        }
+
+        public static void unsafeReset()
+        {
+            Global.methods = new IfInterceptibleThread();
+            InterceptorOfSystemMethods.Global.unsafeSet(methods);
+        }
+
+        public static void unsafeSet(InterceptorOfGlobalMethods methods, IntSupplier intSupplier)
+        {
+            unsafeSet(methods, new IdentityHashCode(intSupplier));
+        }
+
+        public static void unsafeSet(InterceptorOfGlobalMethods methods, ToIntFunction<Object> identityHashCode)
+        {
+            InterceptorOfSystemMethods.Global.unsafeSet(methods, identityHashCode);
+            Global.methods = methods;
+        }
+    }
+
+    static class IdentityHashCode implements ToIntFunction<Object>
+    {
+        static class LCGRandom implements IntSupplier
+        {
+            private static final int LCG_MULTIPLIER = 22695477;
+            private final int constant;
+            private int nextId;
+
+            public LCGRandom(int constant)
+            {
+                this.constant = constant == 0 ? 1 : constant;
+            }
+
+            @Override
+            public int getAsInt()
+            {
+                int id = nextId;
+                nextId = (id * LCG_MULTIPLIER) + constant;
+                id ^= id >> 16;
+                return id;
+            }
+        }
+
+        private final IntSupplier nextId;
+        private final WeakIdentityHashMap<Object, Integer> saved = new WeakIdentityHashMap<>();
+
+        public IdentityHashCode(IntSupplier nextId)
+        {
+            this.nextId = nextId;
+        }
+
+        public synchronized int applyAsInt(Object value)
+        {
+            Integer id = saved.get(value);
+            if (id == null)
+            {
+                id = nextId.getAsInt();
+                saved.put(value, id);
+            }
+            return id;
+        }
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/NetworkConfig.java b/test/simulator/main/org/apache/cassandra/simulator/systems/NetworkConfig.java
new file mode 100644
index 0000000..bde5e9e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/NetworkConfig.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import org.apache.cassandra.simulator.utils.ChanceRange;
+import org.apache.cassandra.simulator.utils.LongRange;
+
+public class NetworkConfig
+{
+    public static class PhaseConfig
+    {
+        final ChanceRange dropChance, delayChance;
+        final LongRange normalLatency, delayLatency;
+
+        public PhaseConfig(ChanceRange dropChance, ChanceRange delayChance, LongRange normalLatency, LongRange delayLatency)
+        {
+            this.dropChance = dropChance;
+            this.delayChance = delayChance;
+            this.normalLatency = normalLatency;
+            this.delayLatency = delayLatency;
+        }
+    }
+
+    final PhaseConfig normal;
+    final PhaseConfig flaky;
+    final ChanceRange partitionChance;
+    final ChanceRange flakyChance;
+    final LongRange reconfigureInterval;
+
+    public NetworkConfig(PhaseConfig normal, PhaseConfig flaky, ChanceRange partitionChance, ChanceRange flakyChance, LongRange reconfigureInterval)
+    {
+        this.normal = normal;
+        this.flaky = flaky;
+        this.partitionChance = partitionChance;
+        this.flakyChance = flakyChance;
+        this.reconfigureInterval = reconfigureInterval;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/NonInterceptible.java b/test/simulator/main/org/apache/cassandra/simulator/systems/NonInterceptible.java
new file mode 100644
index 0000000..6aee396
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/NonInterceptible.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.concurrent.Callable;
+import java.util.function.Supplier;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+@Shared(scope = SIMULATION)
+public class NonInterceptible
+{
+    @Shared(scope = SIMULATION)
+    public enum Permit { REQUIRED, OPTIONAL }
+
+    private static final ThreadLocal<Permit> PERMIT = new ThreadLocal<>();
+
+    public static boolean isPermitted(Permit permit)
+    {
+        Permit current = PERMIT.get();
+        return current != null && current.compareTo(permit) >= 0;
+    }
+
+    public static boolean isPermitted()
+    {
+        return PERMIT.get() != null;
+    }
+
+    public static void execute(Permit permit, Runnable runnable)
+    {
+        if (isPermitted())
+        {
+            runnable.run();
+        }
+        else
+        {
+            PERMIT.set(permit);
+            try
+            {
+                runnable.run();
+            }
+            finally
+            {
+                PERMIT.set(null);
+            }
+        }
+    }
+
+    public static <V> V apply(Permit permit, Supplier<V> supplier)
+    {
+        if (isPermitted())
+        {
+            return supplier.get();
+        }
+        else
+        {
+            PERMIT.set(permit);
+            try
+            {
+                return supplier.get();
+            }
+            finally
+            {
+                PERMIT.set(null);
+            }
+        }
+    }
+
+    public static <V> V call(Permit permit, Callable<V> call) throws Exception
+    {
+        if (isPermitted())
+        {
+            return call.call();
+        }
+        else
+        {
+            PERMIT.set(permit);
+            try
+            {
+                return call.call();
+            }
+            finally
+            {
+                PERMIT.set(null);
+            }
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/NotInterceptedSyncCondition.java b/test/simulator/main/org/apache/cassandra/simulator/systems/NotInterceptedSyncCondition.java
new file mode 100644
index 0000000..6c53a4e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/NotInterceptedSyncCondition.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import org.apache.cassandra.utils.concurrent.Awaitable;
+import org.apache.cassandra.utils.concurrent.Condition;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public class NotInterceptedSyncCondition extends Awaitable.AbstractAwaitable implements Condition
+{
+    private volatile boolean isSignalled;
+
+    @Override
+    public synchronized boolean awaitUntil(long nanoTimeDeadline) throws InterruptedException
+    {
+        while (true)
+        {
+            if (isSignalled()) return true;
+            if (!notInterceptedWaitUntil(this, nanoTimeDeadline)) return false;
+        }
+    }
+
+    @Override
+    public synchronized Awaitable await() throws InterruptedException
+    {
+        while (!isSignalled)
+            wait();
+        return this;
+    }
+
+    @Override
+    public boolean isSignalled()
+    {
+        return isSignalled;
+    }
+
+    @Override
+    public synchronized void signal()
+    {
+        isSignalled = true;
+        notifyAll();
+    }
+
+    private static boolean notInterceptedWaitUntil(Object monitor, long deadlineNanos) throws InterruptedException
+    {
+        long wait = deadlineNanos - nanoTime();
+        if (wait <= 0)
+            return false;
+
+        monitor.wait((wait + 999999) / 1000000);
+        return true;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/NotifyThreadPaused.java b/test/simulator/main/org/apache/cassandra/simulator/systems/NotifyThreadPaused.java
new file mode 100644
index 0000000..2c92073
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/NotifyThreadPaused.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import org.apache.cassandra.utils.Shared;
+import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
+
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * An abstraction for the simulation to permit the main scheduling thread
+ * to trigger a simulation thread and then wait synchronously for the end
+ * of this part of its simulation, i.e. until its next wait is intercepted.
+ *
+ * This permits more efficient scheduling, by (often) ensuring the simulated
+ * thread and the scheduling thread do not run simultaneously.
+ */
+@Shared(scope = SIMULATION)
+public interface NotifyThreadPaused
+{
+    void notifyThreadPaused();
+
+    class AwaitPaused implements NotifyThreadPaused
+    {
+        final Object monitor;
+        boolean isDone;
+        AwaitPaused(Object monitor) { this.monitor = monitor; }
+        AwaitPaused() { this.monitor = this; }
+
+        @Override
+        public void notifyThreadPaused()
+        {
+            synchronized (monitor)
+            {
+                isDone = true;
+                monitor.notifyAll();
+            }
+        }
+
+        public void awaitPause()
+        {
+            try
+            {
+                synchronized (monitor)
+                {
+                    while (!isDone)
+                        monitor.wait();
+                }
+            }
+            catch (InterruptedException ie)
+            {
+                throw new UncheckedInterruptedException(ie);
+            }
+        }
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/PerClassLoader.java b/test/simulator/main/org/apache/cassandra/simulator/systems/PerClassLoader.java
new file mode 100644
index 0000000..2256405
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/PerClassLoader.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+public @interface PerClassLoader
+{
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SchedulerConfig.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SchedulerConfig.java
new file mode 100644
index 0000000..3f5ce0e
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SchedulerConfig.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import org.apache.cassandra.simulator.utils.ChanceRange;
+import org.apache.cassandra.simulator.utils.LongRange;
+
+public class SchedulerConfig
+{
+    final ChanceRange longDelayChance;
+    final LongRange delayNanos, longDelayNanos;
+
+    public SchedulerConfig(ChanceRange longDelayChance, LongRange delayNanos, LongRange longDelayNanos)
+    {
+        this.longDelayChance = longDelayChance;
+        this.delayNanos = delayNanos;
+        this.longDelayNanos = longDelayNanos;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedAction.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedAction.java
new file mode 100644
index 0000000..551616f
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedAction.java
@@ -0,0 +1,466 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executor;
+
+import javax.annotation.Nullable;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.exceptions.RequestFailureReason;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.RequestCallback;
+import org.apache.cassandra.net.RequestCallbacks;
+import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.Actions;
+import org.apache.cassandra.simulator.FutureActionScheduler.Deliver;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.systems.InterceptedExecution.InterceptedRunnableExecution;
+import org.apache.cassandra.simulator.systems.InterceptedWait.Trigger;
+import org.apache.cassandra.simulator.systems.InterceptedWait.TriggerListener;
+import org.apache.cassandra.utils.LazyToString;
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.net.MessagingService.instance;
+import static org.apache.cassandra.simulator.Action.Modifiers.DROP;
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.PSEUDO_ORPHAN;
+import static org.apache.cassandra.simulator.Action.Modifiers.START_DAEMON_TASK;
+import static org.apache.cassandra.simulator.Action.Modifiers.START_SCHEDULED_TASK;
+import static org.apache.cassandra.simulator.Action.Modifiers.START_INFINITE_LOOP;
+import static org.apache.cassandra.simulator.Action.Modifiers.START_TASK;
+import static org.apache.cassandra.simulator.Action.Modifiers.START_THREAD;
+import static org.apache.cassandra.simulator.Action.Modifiers.START_TIMEOUT_TASK;
+import static org.apache.cassandra.simulator.Action.Modifiers.WAKE_UP_THREAD;
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.DELIVER;
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.DELIVER_AND_TIMEOUT;
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.FAILURE;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.SIGNAL;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Trigger.TIMEOUT;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.MESSAGE;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.REDUNDANT_MESSAGE_TIMEOUT;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_TIMEOUT;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.TASK;
+import static org.apache.cassandra.simulator.Debug.Info.LOG;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.Kind.UNBOUNDED_WAIT;
+import static org.apache.cassandra.utils.LazyToString.lazy;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * This class is the nexus of simulation, as this is where we translate intercepted system events
+ * into Action events.
+ */
+public abstract class SimulatedAction extends Action implements InterceptorOfConsequences
+{
+    @Shared(scope = SIMULATION)
+    public enum Kind
+    {
+        MESSAGE(NONE, NONE, WAKE_UP_THREAD),
+        REDUNDANT_MESSAGE_TIMEOUT(PSEUDO_ORPHAN, NONE, WAKE_UP_THREAD),
+        TASK(START_TASK, NONE, WAKE_UP_THREAD),
+        SCHEDULED_TASK(START_SCHEDULED_TASK, NONE, WAKE_UP_THREAD),
+        SCHEDULED_TIMEOUT(START_TIMEOUT_TASK, NONE, WAKE_UP_THREAD),
+        SCHEDULED_DAEMON(START_DAEMON_TASK, NONE, WAKE_UP_THREAD),
+        THREAD(START_THREAD, NONE, WAKE_UP_THREAD),
+        INFINITE_LOOP(START_INFINITE_LOOP, NONE, WAKE_UP_THREAD);
+
+        public final Modifiers self, transitive, signal;
+        Kind(Modifiers self, Modifiers transitive, Modifiers signal)
+        {
+            this.self = self;
+            this.transitive = transitive;
+            this.signal = signal;
+        }
+
+        public boolean logWakeups()
+        {
+            return this == MESSAGE || this == TASK || this == SCHEDULED_TASK;
+        }
+    }
+
+    class Signal extends Action implements TriggerListener
+    {
+        final InterceptedWait wakeup;
+        final Trigger trigger;
+        boolean signalling;
+
+        // note that we do not inherit from the parent thread's self, as anything relevantly heritable by continuations is likely already transitive
+        protected Signal(Object description, Modifiers self, InterceptedWait wakeup, Trigger trigger, long deadlineNanos)
+        {
+            super(description, self.inheritIfContinuation(SimulatedAction.this.self()), NONE);
+            this.wakeup = wakeup;
+            this.trigger = trigger;
+            assert deadlineNanos < 0 || trigger == TIMEOUT;
+            if (deadlineNanos >= 0)
+                setDeadline(simulated.time, deadlineNanos);
+            assert !wakeup.isTriggered();
+            wakeup.addListener(this);
+        }
+
+        // provide the parameters to send to SimulatedAction.this
+        @Override
+        protected ActionList performed(ActionList consequences, boolean isStart, boolean isFinish)
+        {
+            assert !isStart;
+            ActionList restored = super.performed(ActionList.empty(), true, true);
+            consequences = SimulatedAction.this.performed(consequences, false, isFinish);
+            if (!restored.isEmpty()) consequences = consequences.andThen(restored);
+            return consequences;
+        }
+
+        @Override
+        protected ActionList performAndRegister()
+        {
+            assert !wakeup.isTriggered();
+            assert !isFinished();
+
+            if (SimulatedAction.this.isFinished())
+                return super.performed(ActionList.empty(), true, true);
+            assert !realThreadHasTerminated;
+
+            signalling = true;
+            return performed(performSimple(), false, realThreadHasTerminated);
+        }
+
+        @Override
+        protected ActionList performSimple()
+        {
+            return simulate(() -> wakeup.triggerAndAwaitDone(SimulatedAction.this, trigger));
+        }
+
+        @Override
+        public void onTrigger(InterceptedWait triggered)
+        {
+            if (!signalling)
+                cancel();
+        }
+    }
+
+    protected final SimulatedAction.Kind kind;
+    protected final SimulatedSystems simulated;
+    protected final Verb forVerb;
+    protected Map<Verb, Modifiers> verbModifiers;
+
+    private InterceptibleThread realThread; // unset until first simulation
+
+    private List<Action> consequences; // valid only for one round of simulation, until paused
+    private @Nullable InterceptedWait pausedOn;
+    private boolean realThreadHasTerminated;
+
+    public SimulatedAction(Object description, Modifiers self, Modifiers transitive, Verb forVerb, SimulatedSystems simulated)
+    {
+        this(description, OrderOn.NONE, self, transitive, forVerb, simulated);
+    }
+
+    public SimulatedAction(Object description, OrderOn orderOn, Modifiers self, Modifiers transitive, Verb forVerb, SimulatedSystems simulated)
+    {
+        this(description, TASK, orderOn, self, transitive, Collections.emptyMap(), forVerb, simulated);
+    }
+
+    public SimulatedAction(Object description, Kind kind, OrderOn orderOn, Modifiers self, Modifiers transitive, Map<Verb, Modifiers> verbModifiers, Verb forVerb, SimulatedSystems simulated)
+    {
+        super(description, orderOn, self, transitive);
+        Preconditions.checkNotNull(kind);
+        Preconditions.checkNotNull(verbModifiers);
+        Preconditions.checkNotNull(simulated);
+        this.kind = kind;
+        this.simulated = simulated;
+        this.verbModifiers = verbModifiers;
+        this.forVerb = forVerb;
+    }
+
+    @Override
+    public void interceptMessage(IInvokableInstance from, IInvokableInstance to, IMessage message)
+    {
+        if (!to.isShutdown())
+            consequences.addAll(applyToMessage(from, to, message));
+    }
+
+    @Override
+    public void interceptWakeup(InterceptedWait wakeup, Trigger trigger, InterceptorOfConsequences waitWasInterceptedBy)
+    {
+        SimulatedAction action = (SimulatedAction) waitWasInterceptedBy;
+        action.applyToWakeup(consequences, wakeup, trigger);
+    }
+
+    @Override
+    public void interceptExecution(InterceptedExecution invoke, OrderOn orderOn)
+    {
+        if (invoke.kind() == SCHEDULED_TIMEOUT && transitive().is(Modifier.RELIABLE) && transitive().is(Modifier.NO_THREAD_TIMEOUTS))
+            invoke.cancel();
+        else
+            consequences.add(applyToExecution(invoke, orderOn));
+    }
+
+    @Override
+    public void interceptWait(@Nullable InterceptedWait wakeupWith)
+    {
+        pausedOn = wakeupWith;
+    }
+
+    @Override
+    public void interceptTermination(boolean isThreadTermination)
+    {
+        realThreadHasTerminated = true;
+    }
+
+    private ActionList simulate(Runnable simulate)
+    {
+        try
+        {
+            try
+            {
+                simulate.run();
+            }
+            catch (Throwable t)
+            {
+                consequences.forEach(Action::invalidate);
+                throw t;
+            }
+
+            InterceptedWait wakeUpWith = pausedOn;
+            if (wakeUpWith != null && wakeUpWith.kind() != UNBOUNDED_WAIT)
+            {
+                applyToWait(consequences, wakeUpWith);
+            }
+            else if (wakeUpWith != null && kind.logWakeups() && !isFinished())
+            {
+                if (simulated.debug.isOn(LOG))
+                    consequences.add(Actions.empty(Modifiers.INFO, lazy(() -> "Waiting[" + wakeUpWith + "] " + realThread)));
+            }
+
+            for (int i = consequences.size() - 1; i >= 0 ; --i)
+            {
+                // a scheduled future might be cancelled by the same action that creates it
+                if (consequences.get(i).isCancelled())
+                    consequences.remove(i);
+            }
+            return ActionList.of(consequences);
+        }
+        finally
+        {
+            this.consequences = null;
+            this.pausedOn = null;
+        }
+    }
+
+    protected abstract InterceptedExecution task();
+
+    protected ActionList performAndRegister()
+    {
+        return performed(performSimple(), true, realThreadHasTerminated);
+    }
+
+    @Override
+    protected ActionList performSimple()
+    {
+        return simulate(() -> task().invokeAndAwaitPause(this));
+    }
+
+    @Override
+    public void beforeInvocation(InterceptibleThread realThread)
+    {
+        this.consequences = new ArrayList<>();
+        this.realThread = realThread;
+    }
+
+    void applyToWait(List<Action> out, InterceptedWait wakeupWith)
+    {
+        switch (wakeupWith.kind())
+        {
+            case WAIT_UNTIL:
+                applyToSignal(out, START_TIMEOUT_TASK, "Timeout", wakeupWith, TIMEOUT, wakeupWith.waitTime());
+                break;
+            case NEMESIS:
+                applyToSignal(out, WAKE_UP_THREAD, "Nemesis", wakeupWith, SIGNAL, -1L);
+                break;
+            default :
+                applyToSignal(out, WAKE_UP_THREAD, "Continue", wakeupWith, SIGNAL, -1L);
+                break;
+        }
+    }
+
+    void applyToWakeup(List<Action> out, InterceptedWait wakeup, Trigger trigger)
+    {
+        applyToSignal(out, kind.signal, "Wakeup", wakeup, trigger, -1);
+    }
+
+    void applyToSignal(List<Action> out, Modifiers self, String kind, InterceptedWait wakeup, Trigger trigger, long deadlineNanos)
+    {
+        applyToSignal(out, lazy(() -> kind + wakeup + ' ' + realThread), self, wakeup, trigger, deadlineNanos);
+    }
+
+    void applyToSignal(List<Action> out, LazyToString id, Modifiers self, InterceptedWait wakeup, Trigger trigger, long deadlineNanos)
+    {
+        if (deadlineNanos >= 0 && !self.is(Modifier.THREAD_TIMEOUT))
+            throw new IllegalStateException();
+
+        out.add(new Signal(id, self, wakeup, trigger, deadlineNanos));
+    }
+
+    List<Action> applyToMessage(IInvokableInstance from, IInvokableInstance to, IMessage message)
+    {
+        Executor executor = to.executorFor(message.verb());
+        if (executor instanceof ImmediateExecutor)
+            executor = to.executor();
+
+        Verb verb = Verb.fromId(message.verb());
+        Modifiers self = verbModifiers.getOrDefault(verb, NONE);
+
+        int fromNum = from.config().num();
+        int toNum = to.config().num();
+
+        long expiresAtNanos = simulated.time.get(fromNum).localToGlobalNanos(message.expiresAtNanos());
+        boolean isReliable = is(Modifier.RELIABLE) || self.is(Modifier.RELIABLE);
+        Deliver deliver = isReliable ? DELIVER : simulated.futureScheduler.shouldDeliver(fromNum, toNum);
+
+        List<Action> actions = new ArrayList<>(deliver == DELIVER_AND_TIMEOUT ? 2 : 1);
+        switch (deliver)
+        {
+            default: throw new AssertionError();
+            case DELIVER:
+            case DELIVER_AND_TIMEOUT:
+            {
+                InterceptedExecution.InterceptedTaskExecution task = new InterceptedRunnableExecution(
+                    (InterceptingExecutor) executor, () -> to.receiveMessageWithInvokingThread(message)
+                );
+                Object description = lazy(() -> String.format("%s(%d) from %s to %s", verb, message.id(), message.from(), to.broadcastAddress()));
+                OrderOn orderOn = task.executor.orderAppliesAfterScheduling();
+                Action action = applyTo(description, MESSAGE, orderOn, self, verb, task);
+                long deadlineNanos = simulated.futureScheduler.messageDeadlineNanos(fromNum, toNum);
+                if (deliver == DELIVER && deadlineNanos >= expiresAtNanos)
+                {
+                    if (isReliable) deadlineNanos = verb.isResponse() ? expiresAtNanos : expiresAtNanos / 2;
+                    else deliver = DELIVER_AND_TIMEOUT;
+                }
+                action.setDeadline(simulated.time, deadlineNanos);
+                actions.add(action);
+                if (deliver == DELIVER)
+                    break;
+            }
+            case FAILURE:
+            case TIMEOUT:
+            {
+                InetSocketAddress failedOn;
+                IInvokableInstance notify;
+                if (verb.isResponse())
+                {
+                    failedOn = from.broadcastAddress();
+                    notify = to;
+                }
+                else
+                {
+                    failedOn = to.broadcastAddress();
+                    notify = from;
+                }
+                boolean isTimeout = deliver != FAILURE;
+                InterceptedExecution.InterceptedTaskExecution failTask = new InterceptedRunnableExecution(
+                    (InterceptingExecutor) notify.executorFor(verb.id),
+                    () -> notify.unsafeApplyOnThisThread((socketAddress, id, innerIsTimeout) -> {
+                        InetAddressAndPort address = InetAddressAndPort.getByAddress(socketAddress);
+                        RequestCallbacks.CallbackInfo callback = instance().callbacks.remove(id, address);
+                        if (callback != null)
+                        {
+                            RequestCallback<?> invokeOn = (RequestCallback<?>) callback.callback;
+                            RequestFailureReason reason = innerIsTimeout ? RequestFailureReason.TIMEOUT : RequestFailureReason.UNKNOWN;
+                            invokeOn.onFailure(address, reason);
+                        }
+                        return null;
+                    }, failedOn, message.id(), isTimeout)
+                );
+
+                Object description = (lazy(() -> String.format("Report Timeout of %s(%d) from %s to %s", Verb.fromId(message.verb()), message.id(), failedOn, notify.broadcastAddress())));
+                OrderOn orderOn = failTask.executor.orderAppliesAfterScheduling();
+                self = DROP.with(self);
+                Kind kind = deliver == DELIVER_AND_TIMEOUT ? REDUNDANT_MESSAGE_TIMEOUT : MESSAGE;
+                Action action = applyTo(description, kind, orderOn, self, failTask);
+                switch (deliver)
+                {
+                    default: throw new AssertionError();
+                    case FAILURE:
+                        long deadlineNanos = simulated.futureScheduler.messageFailureNanos(toNum, fromNum);
+                        if (deadlineNanos < expiresAtNanos)
+                        {
+                            action.setDeadline(simulated.time, deadlineNanos);
+                            break;
+                        }
+                    case DELIVER_AND_TIMEOUT:
+                    case TIMEOUT:
+                        long expirationIntervalNanos = from.unsafeCallOnThisThread(RequestCallbacks::defaultExpirationInterval);
+                        action.setDeadline(simulated.time, simulated.futureScheduler.messageTimeoutNanos(expiresAtNanos, expirationIntervalNanos));
+                        break;
+                }
+                actions.add(action);
+            }
+        }
+        return actions;
+    }
+
+    Action applyToExecution(InterceptedExecution invoke, OrderOn orderOn)
+    {
+        Action result =  applyTo(lazy(() -> String.format("Invoke %s", invoke)),
+                                 invoke.kind(), orderOn, invoke.kind().self, invoke);
+        switch (invoke.kind())
+        {
+            case SCHEDULED_DAEMON:
+            case SCHEDULED_TASK:
+            case SCHEDULED_TIMEOUT:
+                result.setDeadline(simulated.time, invoke.deadlineNanos());
+        }
+
+        return result;
+    }
+
+    Action applyTo(Object description, Kind kind, OrderOn orderOn, Modifiers self, InterceptedExecution task)
+    {
+        return new SimulatedActionTask(description, kind, orderOn, self, NONE, verbModifiers, forVerb, simulated, task);
+    }
+
+    Action applyTo(Object description, Kind kind, OrderOn orderOn, Modifiers self, Verb verb, InterceptedExecution task)
+    {
+        return new SimulatedActionTask(description, kind, orderOn, self, NONE, verbModifiers, verb, simulated, task);
+    }
+
+    @SuppressWarnings({"SameParameterValue", "UnusedReturnValue"})
+    protected SimulatedAction setMessageModifiers(Verb verb, Modifiers self, Modifiers responses)
+    {
+        if (verbModifiers.isEmpty())
+            verbModifiers = new EnumMap<>(Verb.class);
+        verbModifiers.put(verb, self);
+        if (verb.responseVerb != null)
+            verbModifiers.put(verb.responseVerb, responses);
+        return this;
+    }
+
+    public Object description()
+    {
+        return realThread == null ? super.description() : super.description() + " on " + realThread;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionCallable.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionCallable.java
new file mode 100644
index 0000000..2dc3d6a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionCallable.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.function.BiConsumer;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableCallable;
+import org.apache.cassandra.simulator.systems.InterceptedExecution.InterceptedTaskExecution;
+
+public abstract class SimulatedActionCallable<O> extends SimulatedAction implements BiConsumer<O, Throwable>
+{
+    private final IInvokableInstance on;
+    private SerializableCallable<? extends O> execute;
+
+    public SimulatedActionCallable(Object description, Modifiers self, Modifiers children, SimulatedSystems simulated, IInvokableInstance on, SerializableCallable<? extends O> execute)
+    {
+        super(description, self, children, null, simulated);
+        this.execute = execute;
+        this.on = on;
+    }
+
+    @Override
+    protected InterceptedTaskExecution task()
+    {
+        return new InterceptedTaskExecution((InterceptingExecutor) on.executor())
+        {
+            public void run()
+            {
+                // we'll be invoked on the node's executor, but we need to ensure the task is loaded on its classloader
+                try { accept(on.unsafeCallOnThisThread(execute), null); }
+                catch (Throwable t) { accept(null, t); }
+                finally { execute = null; }
+            }
+        };
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionConsumer.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionConsumer.java
new file mode 100644
index 0000000..c923813
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionConsumer.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.function.Consumer;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableConsumer;
+import org.apache.cassandra.simulator.systems.InterceptedExecution.InterceptedTaskExecution;
+
+public class SimulatedActionConsumer<I> extends SimulatedAction
+{
+    private final IInvokableInstance on;
+    private Consumer<Throwable> onFailure;
+    private SerializableConsumer<I> execute;
+    private I parameter;
+
+    public SimulatedActionConsumer(Object description, Modifiers self, Modifiers children, SimulatedSystems simulated, IInvokableInstance on, SerializableConsumer<I> execute, I parameter)
+    {
+        super(description, self, children, null, simulated);
+        this.onFailure = simulated.failures;
+        this.execute = execute;
+        this.on = on;
+        this.parameter = parameter;
+    }
+
+    @Override
+    protected InterceptedTaskExecution task()
+    {
+        return new InterceptedTaskExecution((InterceptingExecutor) on.executor())
+        {
+            public void run()
+            {
+                // we'll be invoked on the node's executor, but we need to ensure the task is loaded on its classloader
+                try { on.unsafeAcceptOnThisThread(execute, parameter); }
+                catch (Throwable t) { onFailure.accept(t); }
+                finally { execute = null; parameter = null; onFailure = null; }
+            }
+        };
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionTask.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionTask.java
new file mode 100644
index 0000000..6607e19
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedActionTask.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
+import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.systems.InterceptedExecution.InterceptedTaskExecution;
+import org.apache.cassandra.utils.Throwables;
+
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.TASK;
+
+public class SimulatedActionTask extends SimulatedAction implements Runnable
+{
+    InterceptedExecution task;
+
+    public SimulatedActionTask(Object description, Modifiers self, Modifiers transitive, Verb forVerb, SimulatedSystems simulated, InterceptedExecution task)
+    {
+        this(description, TASK, OrderOn.NONE, self, transitive, Collections.emptyMap(), forVerb, simulated, task);
+    }
+
+    public SimulatedActionTask(Object description, Kind kind, OrderOn orderOn, Modifiers self, Modifiers transitive, Map<Verb, Modifiers> verbModifiers, Verb forVerb, SimulatedSystems simulated, InterceptedExecution task)
+    {
+        super(description, kind, orderOn, self, transitive, verbModifiers, forVerb, simulated);
+        this.task = task;
+        task.onCancel(this);
+    }
+
+    public SimulatedActionTask(Object description, Modifiers self, Modifiers children, SimulatedSystems simulated, IInvokableInstance on, SerializableRunnable run)
+    {
+        super(description, self, children, null, simulated);
+        this.task = unsafeAsTask(on, asSafeRunnable(on, run), simulated.failures);
+        task.onCancel(this);
+    }
+
+    private SimulatedActionTask(Object description, Modifiers self, Modifiers children, SimulatedSystems simulated, IInvokableInstance on, InterceptedExecution task)
+    {
+        super(description, self, children, null, simulated);
+        this.task = task;
+        task.onCancel(this);
+    }
+
+    /**
+     * To be used to create actions on runnable that are not serializable but are anyway safe to invoke
+     */
+    public static SimulatedActionTask unsafeTask(Object description, Modifiers self, Modifiers transitive, SimulatedSystems simulated, IInvokableInstance on, Runnable run)
+    {
+        return new SimulatedActionTask(description, self, transitive, simulated, on, unsafeAsTask(on, run, simulated.failures));
+    }
+
+    protected static Runnable asSafeRunnable(IInvokableInstance on, SerializableRunnable run)
+    {
+        return () -> on.unsafeRunOnThisThread(run);
+    }
+
+    protected static InterceptedTaskExecution unsafeAsTask(IInvokableInstance on, Runnable runnable, Consumer<? super Throwable> onFailure)
+    {
+        return new InterceptedTaskExecution((InterceptingExecutor) on.executor())
+        {
+            public void run()
+            {
+                // we'll be invoked on the node's executor, but we need to ensure the task is loaded on its classloader
+                try { runnable.run(); }
+                catch (Throwable t) { onFailure.accept(t); }
+            }
+        };
+    }
+
+    @Override
+    protected InterceptedExecution task()
+    {
+        return task;
+    }
+
+    @Override
+    protected ActionList performAndRegister()
+    {
+        try
+        {
+            return super.performAndRegister();
+        }
+        finally
+        {
+            task = null;
+        }
+    }
+
+    @Override
+    protected Throwable safeInvalidate(boolean isCancellation)
+    {
+        try
+        {
+            if (task != null)
+            {
+                task.onCancel(null);
+                task.cancel();
+                task = null;
+            }
+
+            return super.safeInvalidate(isCancellation);
+        }
+        catch (Throwable t)
+        {
+            return Throwables.merge(t, super.safeInvalidate(isCancellation));
+        }
+    }
+
+    @Override
+    public void run()
+    {
+        // cancellation invoked on the task by the application
+        task = null;
+        super.cancel();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedBallots.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedBallots.java
new file mode 100644
index 0000000..906dc5a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedBallots.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.LongSupplier;
+import java.util.function.Supplier;
+
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.BallotGenerator;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.RandomSource.Choices;
+
+import static org.apache.cassandra.service.paxos.Ballot.atUnixMicrosWithLsb;
+
+// TODO (feature): link with SimulateTime, and otherwise improve
+public class SimulatedBallots
+{
+    enum Next { ONE, JUMP, TO_LATEST }
+
+    final LongSupplier uniqueSupplier; // must be unique for all ballots
+    final RandomSource random;
+    final Choices<Next> nextChoice;
+    final LongSupplier nextJump;
+    final AtomicLong latest = new AtomicLong(1L);
+
+    public SimulatedBallots(RandomSource random, Supplier<LongSupplier> jumpsSupplier)
+    {
+        this.uniqueSupplier = random.uniqueUniformSupplier(Long.MIN_VALUE, Long.MAX_VALUE);
+        this.random = random;
+        this.nextChoice = Choices.random(random, Next.values());
+        this.nextJump = jumpsSupplier.get();
+    }
+
+    class Generator extends AtomicLong implements BallotGenerator
+    {
+        public Generator()
+        {
+            super(1L);
+        }
+
+        public Ballot atUnixMicros(long unixMicros, Ballot.Flag flag)
+        {
+            return atUnixMicrosWithLsb(unixMicros, uniqueSupplier.getAsLong(), flag);
+        }
+
+        public Ballot next(long minUnixMicros, Ballot.Flag flag)
+        {
+            return Ballot.atUnixMicrosWithLsb(nextBallotTimestampMicros(minUnixMicros), uniqueSupplier.getAsLong(), flag);
+        }
+
+        public Ballot stale(long from, long to, Ballot.Flag flag)
+        {
+            return Ballot.atUnixMicrosWithLsb(random.uniform(from, to), uniqueSupplier.getAsLong(), flag);
+        }
+
+        private long nextBallotTimestampMicros(long minUnixMicros)
+        {
+            long next;
+            switch (nextChoice.choose(random))
+            {
+                default: throw new IllegalStateException();
+                case TO_LATEST:
+                    minUnixMicros = Math.max(latest.get(), minUnixMicros);
+                case ONE:
+                    next = accumulateAndGet(minUnixMicros, (a, b) -> Math.max(a, b) + 1);
+                    break;
+                case JUMP:
+                    long jump = Math.max(1, nextJump.getAsLong());
+                    next = addAndGet(jump);
+                    if (next < minUnixMicros)
+                        next = accumulateAndGet(minUnixMicros, (a, b) -> Math.max(a, b) + 1);
+            }
+            latest.accumulateAndGet(next, Math::max);
+            return next;
+        }
+
+        public long prevUnixMicros()
+        {
+            return get();
+        }
+    }
+
+    public BallotGenerator get()
+    {
+        return new Generator();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedExecution.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedExecution.java
new file mode 100644
index 0000000..bb63879
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedExecution.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Function;
+
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.ActionPlan;
+import org.apache.cassandra.simulator.Actions;
+import org.apache.cassandra.simulator.OrderOn;
+import org.apache.cassandra.simulator.systems.InterceptedExecution.InterceptedFutureTaskExecution;
+import org.apache.cassandra.simulator.systems.InterceptedExecution.InterceptedThreadStart;
+import org.apache.cassandra.simulator.systems.InterceptingExecutor.InterceptedScheduledFutureTask;
+import org.apache.cassandra.utils.concurrent.Condition;
+import org.apache.cassandra.utils.concurrent.NotScheduledFuture;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
+
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_DAEMON;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_TASK;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.SCHEDULED_TIMEOUT;
+import static org.apache.cassandra.simulator.systems.SimulatedAction.Kind.TASK;
+
+public class SimulatedExecution implements InterceptorOfExecution
+{
+    static class NoExecutorMarker implements InterceptingExecutor
+    {
+        static final NoExecutorMarker INFINITE_LOOP = new NoExecutorMarker();
+        static final NoExecutorMarker THREAD = new NoExecutorMarker();
+
+        @Override public void addPending(Object task) { throw new UnsupportedOperationException(); }
+        @Override public void cancelPending(Object task) { throw new UnsupportedOperationException(); }
+        @Override public void submitUnmanaged(Runnable task) { throw new UnsupportedOperationException(); }
+        @Override public void submitAndAwaitPause(Runnable task, InterceptorOfConsequences interceptor) { throw new UnsupportedOperationException(); }
+
+        @Override public OrderOn orderAppliesAfterScheduling() { throw new UnsupportedOperationException(); }
+        @Override public int concurrency() { return Integer.MAX_VALUE; }
+    }
+
+    /**
+     * Used to handle executions submitted outside of the simulation
+     * Once "closed" only simulated executions are permitted (until a new NoIntercept instance is assigned).
+     */
+    static class NoIntercept implements InterceptExecution
+    {
+        private final AtomicInteger unmanaged = new AtomicInteger();
+        private volatile Condition forbidden;
+
+        public  <V, T extends RunnableFuture<V>> T addTask(T task, InterceptingExecutor executor)
+        {
+            synchronized (this)
+            {
+                if (forbidden != null)
+                    throw new InterceptingExecutor.ForbiddenExecutionException();
+                unmanaged.incrementAndGet();
+            }
+
+            class Run implements Runnable
+            {
+                public void run()
+                {
+                    try
+                    {
+                        task.run();
+                    }
+                    finally
+                    {
+                        if (unmanaged.decrementAndGet() == 0 && forbidden != null)
+                            forbidden.signal();
+                    }
+                }
+            }
+
+            executor.submitUnmanaged(new Run());
+            return task;
+        }
+
+        public <T> ScheduledFuture<T> schedule(SimulatedAction.Kind kind, long delayNanos, long deadlineNanos, Callable<T> runnable, InterceptingExecutor executor)
+        {
+            return new NotScheduledFuture<>();
+        }
+
+        public Thread start(SimulatedAction.Kind kind, Function<Runnable, InterceptibleThread> factory, Runnable run)
+        {
+            Thread thread = factory.apply(run);
+            thread.start();
+            return thread;
+        }
+
+        NoIntercept forbidExecution()
+        {
+            forbidden = new NotInterceptedSyncCondition();
+            if (0 == unmanaged.get())
+                forbidden.signal();
+            return this;
+        }
+
+        void awaitTermination()
+        {
+            forbidden.awaitUninterruptibly();
+        }
+    }
+
+    static class Intercept implements InterceptExecution
+    {
+        final InterceptorOfConsequences intercept;
+
+        Intercept(InterceptorOfConsequences intercept)
+        {
+            this.intercept = intercept;
+        }
+
+        public <V, T extends RunnableFuture<V>> T addTask(T task, InterceptingExecutor executor)
+        {
+            InterceptedFutureTaskExecution<?> intercepted = new InterceptedFutureTaskExecution<>(TASK, executor, task);
+            intercept.interceptExecution(intercepted, executor);
+            return task;
+        }
+
+        public <V> ScheduledFuture<V> schedule(SimulatedAction.Kind kind, long delayNanos, long deadlineNanos, Callable<V> call, InterceptingExecutor executor)
+        {
+            assert kind == SCHEDULED_TASK || kind == SCHEDULED_TIMEOUT || kind == SCHEDULED_DAEMON;
+            InterceptedScheduledFutureTask<V> task = new InterceptedScheduledFutureTask<>(delayNanos, call);
+            InterceptedFutureTaskExecution<?> intercepted = new InterceptedFutureTaskExecution<>(kind, executor, task, deadlineNanos);
+            task.onCancel(intercepted::cancel);
+            intercept.interceptExecution(intercepted, executor.orderAppliesAfterScheduling());
+            return task;
+        }
+
+        public Thread start(SimulatedAction.Kind kind, Function<Runnable, InterceptibleThread> factory, Runnable run)
+        {
+            InterceptedThreadStart intercepted = new InterceptedThreadStart(factory, run, kind);
+            intercept.interceptExecution(intercepted, kind == SimulatedAction.Kind.INFINITE_LOOP ? NoExecutorMarker.INFINITE_LOOP : NoExecutorMarker.THREAD);
+            return intercepted.thread;
+        }
+    }
+
+    private NoIntercept noIntercept = new NoIntercept();
+
+    public SimulatedExecution()
+    {
+    }
+
+    public InterceptingExecutorFactory factory(InterceptorOfGlobalMethods interceptorOfGlobalMethods, ClassLoader classLoader, ThreadGroup threadGroup)
+    {
+        return new InterceptingExecutorFactory(this, interceptorOfGlobalMethods, classLoader, threadGroup);
+    }
+
+    public InterceptExecution intercept()
+    {
+        Thread thread = Thread.currentThread();
+        if (!(thread instanceof InterceptibleThread))
+            return noIntercept;
+
+        InterceptibleThread interceptibleThread = (InterceptibleThread) thread;
+        if (!interceptibleThread.isIntercepting())
+            return noIntercept;
+
+        return new SimulatedExecution.Intercept(interceptibleThread);
+    }
+
+    public ActionPlan plan()
+    {
+        return ActionPlan.setUpTearDown(start(), stop());
+    }
+
+    private ActionList start()
+    {
+        return ActionList.of(Actions.of("Start Simulating Execution", () -> {
+            noIntercept.forbidExecution().awaitTermination();
+            return ActionList.empty();
+        }));
+    }
+
+    private ActionList stop()
+    {
+        return ActionList.of(Actions.of("Stop Simulating Execution", () -> {
+            noIntercept = new NoIntercept();
+            return ActionList.empty();
+        }));
+    }
+
+    public void forceStop()
+    {
+        noIntercept = new NoIntercept();
+    }
+
+    public static <T> Callable<T> callable(Runnable run, T result)
+    {
+        return new Callable<T>()
+        {
+            public T call()
+            {
+                run.run();
+                return result;
+            }
+
+            public String toString()
+            {
+                return result == null ? run.toString() : (run + " returning " + result);
+            }
+        };
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedFailureDetector.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedFailureDetector.java
new file mode 100644
index 0000000..b473cfc
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedFailureDetector.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.net.InetSocketAddress;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.gms.FailureDetector;
+import org.apache.cassandra.gms.IFailureDetectionEventListener;
+import org.apache.cassandra.gms.IFailureDetector;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+public class SimulatedFailureDetector
+{
+    public static class Instance implements IFailureDetector
+    {
+        private static volatile FailureDetector wrapped;
+
+        private static volatile Function<InetSocketAddress, Boolean> OVERRIDE;
+        private static final Map<IFailureDetectionEventListener, Boolean> LISTENERS = Collections.synchronizedMap(new IdentityHashMap<>());
+
+        private static FailureDetector wrapped()
+        {
+            FailureDetector detector = wrapped;
+            if (detector == null)
+            {
+                synchronized (LISTENERS)
+                {
+                    if (wrapped == null)
+                        wrapped = new FailureDetector();
+                }
+                detector = wrapped;
+            }
+            return detector;
+        }
+
+        private Boolean override(InetAddressAndPort ep)
+        {
+            Function<InetSocketAddress, Boolean> overrideF = OVERRIDE;
+            return overrideF == null ? null : overrideF.apply(new InetSocketAddress(ep.getAddress(), ep.getPort()));
+        }
+
+        public boolean isAlive(InetAddressAndPort ep)
+        {
+            Boolean override = override(ep);
+            return override != null ? override : wrapped().isAlive(ep);
+        }
+
+        public void interpret(InetAddressAndPort ep)
+        {
+            wrapped().interpret(ep);
+        }
+
+        public void report(InetAddressAndPort ep)
+        {
+            wrapped().report(ep);
+        }
+
+        public void remove(InetAddressAndPort ep)
+        {
+            wrapped().remove(ep);
+        }
+
+        public void forceConviction(InetAddressAndPort ep)
+        {
+            wrapped().forceConviction(ep);
+        }
+
+        public void registerFailureDetectionEventListener(IFailureDetectionEventListener listener)
+        {
+            LISTENERS.put(listener, Boolean.TRUE);
+        }
+
+        public void unregisterFailureDetectionEventListener(IFailureDetectionEventListener listener)
+        {
+            LISTENERS.remove(listener);
+        }
+
+        synchronized static void setup(Function<InetSocketAddress, Boolean> override, Consumer<Consumer<InetSocketAddress>> register)
+        {
+            OVERRIDE = override;
+            register.accept(ep -> LISTENERS.keySet().forEach(c -> c.convict(InetAddressAndPort.getByAddress(ep), Double.MAX_VALUE)));
+        }
+    }
+
+    final List<Consumer<InetSocketAddress>> listeners = new CopyOnWriteArrayList<>();
+    final Map<InetSocketAddress, Boolean> override = new ConcurrentHashMap<>();
+
+    public SimulatedFailureDetector(Cluster cluster)
+    {
+        cluster.forEach(i -> i.unsafeAcceptOnThisThread(Instance::setup,
+                override::get,
+                consumer -> listeners.add(e -> i.unsafeAcceptOnThisThread(Consumer::accept, consumer, e)))
+        );
+    }
+
+    public void markDown(InetSocketAddress address)
+    {
+        override.put(address, Boolean.FALSE);
+        listeners.forEach(c -> c.accept(address));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedFutureActionScheduler.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedFutureActionScheduler.java
new file mode 100644
index 0000000..f66999b
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedFutureActionScheduler.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.BitSet;
+
+import org.apache.cassandra.simulator.FutureActionScheduler;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.cluster.Topology;
+import org.apache.cassandra.simulator.cluster.TopologyListener;
+import org.apache.cassandra.simulator.systems.NetworkConfig.PhaseConfig;
+import org.apache.cassandra.simulator.utils.KindOfSequence;
+import org.apache.cassandra.simulator.utils.KindOfSequence.Decision;
+import org.apache.cassandra.simulator.utils.KindOfSequence.LinkLatency;
+import org.apache.cassandra.simulator.utils.KindOfSequence.NetworkDecision;
+import org.apache.cassandra.simulator.utils.KindOfSequence.Period;
+
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.DELIVER;
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.DELIVER_AND_TIMEOUT;
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.FAILURE;
+import static org.apache.cassandra.simulator.FutureActionScheduler.Deliver.TIMEOUT;
+
+public class SimulatedFutureActionScheduler implements FutureActionScheduler, TopologyListener
+{
+    static class Network
+    {
+        final LinkLatency normalLatency;
+        final LinkLatency delayLatency;
+        final NetworkDecision dropMessage;
+        final NetworkDecision delayMessage;
+
+        public Network(int nodes, PhaseConfig config, RandomSource random, KindOfSequence kind)
+        {
+            normalLatency = kind.linkLatency(nodes, config.normalLatency, random);
+            delayLatency = kind.linkLatency(nodes, config.delayLatency, random);
+            dropMessage = kind.networkDecision(nodes, config.dropChance, random);
+            delayMessage = kind.networkDecision(nodes, config.delayChance, random);
+        }
+    }
+
+    static class Scheduler extends SchedulerConfig
+    {
+        final Decision delayChance;
+        public Scheduler(SchedulerConfig config, RandomSource random, KindOfSequence kind)
+        {
+            super(config.longDelayChance, config.delayNanos, config.longDelayNanos);
+            delayChance = kind.decision(config.longDelayChance, random);
+        }
+    }
+
+    final int nodeCount;
+    final RandomSource random;
+    final SimulatedTime time;
+
+    // TODO (feature): should we produce more than two simultaneous partitions?
+    final BitSet isInDropPartition = new BitSet();
+    final BitSet isInFlakyPartition = new BitSet();
+
+    Topology topology;
+
+    final Network normal;
+    final Network flaky;
+    final Scheduler scheduler;
+
+    final Decision decidePartition;
+    final Decision decideFlaky;
+    final Period recomputePeriod;
+
+    long recomputeAt;
+
+    public SimulatedFutureActionScheduler(KindOfSequence kind, int nodeCount, RandomSource random, SimulatedTime time, NetworkConfig network, SchedulerConfig scheduler)
+    {
+        this.nodeCount = nodeCount;
+        this.random = random;
+        this.time = time;
+        this.normal = new Network(nodeCount, network.normal, random, kind);
+        this.flaky = new Network(nodeCount, network.flaky, random, kind);
+        this.scheduler = new Scheduler(scheduler, random, kind);
+        this.decidePartition = kind.decision(network.partitionChance, random);
+        this.decideFlaky = kind.decision(network.flakyChance, random);
+        this.recomputePeriod = kind.period(network.reconfigureInterval, random);
+    }
+
+    private void maybeRecompute()
+    {
+        if (time.nanoTime() < recomputeAt)
+            return;
+
+        if (topology == null)
+            return;
+
+        recompute();
+    }
+
+    private void recompute()
+    {
+        isInDropPartition.clear();
+        isInFlakyPartition.clear();
+
+        if (decidePartition.get(random))
+            computePartition(isInDropPartition);
+
+        if (decideFlaky.get(random))
+            computePartition(isInFlakyPartition);
+
+        recomputeAt = time.nanoTime() + recomputePeriod.get(random);
+    }
+
+    private void computePartition(BitSet compute)
+    {
+        int size = topology.quorumRf <= 4 ? 1 : random.uniform(1, (topology.quorumRf - 1)/2);
+        while (size > 0)
+        {
+            int next = random.uniform(0, topology.membersOfQuorum.length);
+            if (compute.get(next))
+                continue;
+            compute.set(next);
+            --size;
+        }
+    }
+
+    Network config(int from, int to)
+    {
+        maybeRecompute();
+        return isInFlakyPartition.get(from) != isInFlakyPartition.get(to) ? flaky : normal;
+    }
+
+    @Override
+    public FutureActionScheduler.Deliver shouldDeliver(int from, int to)
+    {
+        Network config = config(from, to);
+
+        if (isInDropPartition.get(from) != isInDropPartition.get(to))
+            return TIMEOUT;
+
+        if (!config.dropMessage.get(random, from, to))
+            return DELIVER;
+
+        if (random.decide(0.5f))
+            return DELIVER_AND_TIMEOUT;
+
+        if (random.decide(0.5f))
+            return TIMEOUT;
+
+        return FAILURE;
+    }
+
+    @Override
+    public long messageDeadlineNanos(int from, int to)
+    {
+        Network config = config(from, to);
+        return time.nanoTime() + (config.delayMessage.get(random, from, to)
+                                  ? config.normalLatency.get(random, from, to)
+                                  : config.delayLatency.get(random, from, to));
+    }
+
+    @Override
+    public long messageTimeoutNanos(long expiresAtNanos, long expirationIntervalNanos)
+    {
+        return expiresAtNanos + random.uniform(0, expirationIntervalNanos / 2);
+    }
+
+    @Override
+    public long messageFailureNanos(int from, int to)
+    {
+        return messageDeadlineNanos(from, to);
+    }
+
+    @Override
+    public long schedulerDelayNanos()
+    {
+        return (scheduler.delayChance.get(random) ? scheduler.longDelayNanos : scheduler.delayNanos).select(random);
+    }
+
+    @Override
+    public void onChange(Topology newTopology)
+    {
+        Topology oldTopology = topology;
+        topology = newTopology;
+        if (oldTopology == null || (newTopology.quorumRf < oldTopology.quorumRf && newTopology.quorumRf < isInDropPartition.cardinality()))
+            recompute();
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedMessageDelivery.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedMessageDelivery.java
new file mode 100644
index 0000000..b0821d7
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedMessageDelivery.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.net.InetSocketAddress;
+
+import org.apache.cassandra.distributed.api.ICluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IMessage;
+import org.apache.cassandra.distributed.api.IMessageSink;
+
+public class SimulatedMessageDelivery implements IMessageSink
+{
+    private final ICluster<? extends IInvokableInstance> cluster;
+    public SimulatedMessageDelivery(ICluster<? extends IInvokableInstance> cluster)
+    {
+        this.cluster = cluster;
+        cluster.setMessageSink(this);
+    }
+
+    @Override
+    public void accept(InetSocketAddress to, IMessage message)
+    {
+        ((InterceptibleThread)Thread.currentThread()).interceptMessage(cluster.get(message.from()), cluster.get(to), message);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedQuery.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedQuery.java
new file mode 100644
index 0000000..d190fd7
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedQuery.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.impl.Query;
+
+public class SimulatedQuery extends SimulatedActionCallable<Object[][]>
+{
+    public SimulatedQuery(Object description, SimulatedSystems simulated, IInvokableInstance instance, String query, ConsistencyLevel commitConsistency, ConsistencyLevel serialConsistency, Object... params)
+    {
+        this(description, Modifiers.NONE, Modifiers.NONE, simulated, instance, query, commitConsistency, serialConsistency, params);
+    }
+
+    public SimulatedQuery(Object description, Modifiers self, Modifiers transitive, SimulatedSystems simulated, IInvokableInstance instance, String query, ConsistencyLevel commitConsistency, ConsistencyLevel serialConsistency, Object[] params)
+    {
+        super(description, self, transitive, simulated, instance, new Query(query, -1, commitConsistency, serialConsistency, params));
+    }
+
+    public SimulatedQuery(Object description, Modifiers self, Modifiers transitive, SimulatedSystems simulated, IInvokableInstance instance, String query, long timestamp, ConsistencyLevel consistency, Object... params)
+    {
+        this(description, self, transitive, simulated, instance, query, timestamp, consistency, null, params);
+    }
+
+    public SimulatedQuery(Object description, Modifiers self, Modifiers transitive, SimulatedSystems simulated, IInvokableInstance instance, String query, long timestamp, ConsistencyLevel commitConsistency, ConsistencyLevel serialConsistency, Object[] params)
+    {
+        super(description, self, transitive, simulated, instance, new Query(query, timestamp, commitConsistency, serialConsistency, params));
+    }
+
+    @Override
+    public void accept(Object[][] success, Throwable failure)
+    {
+        if (failure != null)
+            simulated.failures.accept(failure);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedSnitch.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedSnitch.java
new file mode 100644
index 0000000..a87a88a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedSnitch.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.net.InetSocketAddress;
+import java.util.Comparator;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.stream.IntStream;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInstanceConfig;
+import org.apache.cassandra.locator.IEndpointSnitch;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaCollection;
+import org.apache.cassandra.simulator.cluster.NodeLookup;
+
+public class SimulatedSnitch extends NodeLookup
+{
+    public static class Instance implements IEndpointSnitch
+    {
+        private static volatile Function<InetSocketAddress, String> LOOKUP_DC;
+
+        public String getRack(InetAddressAndPort endpoint)
+        {
+            return LOOKUP_DC.apply(endpoint);
+        }
+
+        public String getDatacenter(InetAddressAndPort endpoint)
+        {
+            return LOOKUP_DC.apply(endpoint);
+        }
+
+        public <C extends ReplicaCollection<? extends C>> C sortedByProximity(InetAddressAndPort address, C addresses)
+        {
+            return addresses.sorted(Comparator.comparingInt(SimulatedSnitch::asInt));
+        }
+
+        public int compareEndpoints(InetAddressAndPort target, Replica r1, Replica r2)
+        {
+            return Comparator.comparingInt(SimulatedSnitch::asInt).compare(r1, r2);
+        }
+
+        public void gossiperStarting()
+        {
+        }
+
+        public boolean isWorthMergingForRangeQuery(ReplicaCollection<?> merged, ReplicaCollection<?> l1, ReplicaCollection<?> l2)
+        {
+            return false;
+        }
+
+        public static void setup(Function<InetSocketAddress, String> lookupDc)
+        {
+            LOOKUP_DC = lookupDc;
+        }
+    }
+
+    final int[] numInDcs;
+    final String[] nameOfDcs;
+
+    public SimulatedSnitch(int[] nodeToDc, int[] numInDcs)
+    {
+        super(nodeToDc);
+        this.nameOfDcs = IntStream.range(0, numInDcs.length).mapToObj(i -> "dc" + i).toArray(String[]::new);
+        this.numInDcs = numInDcs;
+    }
+
+    public int dcCount()
+    {
+        return nameOfDcs.length;
+    }
+
+    public String nameOfDc(int i)
+    {
+        return nameOfDcs[i];
+    }
+
+    public Cluster.Builder setup(Cluster.Builder builder)
+    {
+        for (int i = 0 ; i < numInDcs.length ; ++i)
+            builder.withRack(nameOfDcs[i], nameOfDcs[i], numInDcs[i]);
+        Consumer<IInstanceConfig> prev = builder.getConfigUpdater();
+        return builder.withConfig(config -> {
+            if (prev != null)
+                prev.accept(config);
+            config.set("endpoint_snitch", SimulatedSnitch.Instance.class.getName())
+                  .set("dynamic_snitch", false);
+        });
+    }
+
+    public Instance get()
+    {
+        return new Instance();
+    }
+
+    public void setup(Cluster cluster)
+    {
+        Function<InetSocketAddress, String> lookup = Cluster.getUniqueAddressLookup(cluster, i -> nameOfDcs[dcOf(i.config().num())])::get;
+        cluster.forEach(i -> i.unsafeAcceptOnThisThread(Instance::setup, lookup));
+        Instance.setup(lookup);
+    }
+
+    private static int asInt(Replica address)
+    {
+        byte[] bytes = address.endpoint().addressBytes;
+        return bytes[0] | (bytes[1] << 8) | (bytes[2] << 16) | (bytes[3] << 24);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedSystems.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedSystems.java
new file mode 100644
index 0000000..c3cf46a
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedSystems.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.Action.Modifiers;
+import org.apache.cassandra.simulator.Debug;
+import org.apache.cassandra.simulator.FutureActionScheduler;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.cluster.Topology;
+import org.apache.cassandra.simulator.cluster.TopologyListener;
+
+import static org.apache.cassandra.simulator.Action.Modifiers.NONE;
+import static org.apache.cassandra.simulator.Action.Modifiers.RELIABLE;
+
+public class SimulatedSystems
+{
+    public final RandomSource random;
+    public final SimulatedTime time;
+    public final SimulatedMessageDelivery delivery;
+    public final SimulatedExecution execution;
+    public final SimulatedBallots ballots;
+    public final SimulatedFailureDetector failureDetector;
+    public final SimulatedSnitch snitch;
+    public final FutureActionScheduler futureScheduler;
+    public final Debug debug;
+    public final Failures failures;
+    private final List<TopologyListener> topologyListeners; // TODO (cleanup): this is a mutable set of listeners but shared between instances
+
+    public SimulatedSystems(SimulatedSystems copy)
+    {
+        this(copy.random, copy.time, copy.delivery, copy.execution, copy.ballots, copy.failureDetector, copy.snitch, copy.futureScheduler, copy.debug, copy.failures, copy.topologyListeners);
+    }
+
+    public SimulatedSystems(RandomSource random, SimulatedTime time, SimulatedMessageDelivery delivery, SimulatedExecution execution, SimulatedBallots ballots, SimulatedFailureDetector failureDetector, SimulatedSnitch snitch, FutureActionScheduler futureScheduler, Debug debug, Failures failures)
+    {
+        this(random, time, delivery, execution, ballots, failureDetector, snitch, futureScheduler, debug, failures, new ArrayList<>());
+    }
+
+    private SimulatedSystems(RandomSource random, SimulatedTime time, SimulatedMessageDelivery delivery, SimulatedExecution execution, SimulatedBallots ballots, SimulatedFailureDetector failureDetector, SimulatedSnitch snitch, FutureActionScheduler futureScheduler, Debug debug, Failures failures, List<TopologyListener> topologyListeners)
+    {
+        this.random = random;
+        this.time = time;
+        this.delivery = delivery;
+        this.execution = execution;
+        this.ballots = ballots;
+        this.failureDetector = failureDetector;
+        this.snitch = snitch;
+        this.futureScheduler = futureScheduler;
+        this.debug = debug;
+        this.failures = failures;
+        this.topologyListeners = topologyListeners;
+    }
+
+    public Action run(Object description, IInvokableInstance on, SerializableRunnable run)
+    {
+        return new SimulatedActionTask(description, NONE, NONE, this, on, run);
+    }
+
+    public Action transitivelyReliable(Object description, IInvokableInstance on, SerializableRunnable run)
+    {
+        return new SimulatedActionTask(description, NONE, RELIABLE, this, on, run);
+    }
+
+    /**
+     * Useful for non-serializable operations we still need to intercept the execution of
+     */
+    public Action invoke(Object description, Modifiers self, Modifiers children, InterceptedExecution invoke)
+    {
+        return new SimulatedActionTask(description, self, children, null, this, invoke);
+    }
+
+    public void announce(Topology topology)
+    {
+        for (int i = 0; i < topologyListeners.size() ; ++i)
+            topologyListeners.get(i).onChange(topology);
+    }
+
+    public void register(TopologyListener listener)
+    {
+        topologyListeners.add(listener);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedTime.java b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedTime.java
new file mode 100644
index 0000000..ae20d57
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/systems/SimulatedTime.java
@@ -0,0 +1,490 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.systems;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.LongConsumer;
+import java.util.regex.Pattern;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.distributed.impl.IsolatedExecutor;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.utils.KindOfSequence;
+import org.apache.cassandra.simulator.utils.KindOfSequence.Period;
+import org.apache.cassandra.simulator.utils.LongRange;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.MonotonicClock;
+import org.apache.cassandra.utils.MonotonicClock.AbstractEpochSamplingClock.AlmostSameTime;
+import org.apache.cassandra.utils.MonotonicClockTranslation;
+import org.apache.cassandra.utils.Shared;
+
+import static java.util.concurrent.TimeUnit.DAYS;
+import static java.util.concurrent.TimeUnit.HOURS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.simulator.RandomSource.Choices.uniform;
+
+// TODO (cleanup): when we encounter an exception and unwind the simulation, we should restore normal time to go with normal waits etc.
+public class SimulatedTime
+{
+    private static final Pattern PERMITTED_TIME_THREADS = Pattern.compile("(logback|SimulationLiveness|Reconcile)[-:][0-9]+");
+
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public interface Listener
+    {
+        void accept(String kind, long value);
+    }
+
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public interface ClockAndMonotonicClock extends Clock, MonotonicClock
+    {
+    }
+
+    @Shared(scope = Shared.Scope.SIMULATION)
+    public interface LocalTime extends ClockAndMonotonicClock
+    {
+        long relativeToLocalNanos(long relativeNanos);
+        long relativeToGlobalNanos(long relativeNanos);
+        long localToRelativeNanos(long absoluteLocalNanos);
+        long localToGlobalNanos(long absoluteLocalNanos);
+        long nextGlobalMonotonicMicros();
+    }
+
+    @PerClassLoader
+    private static class Disabled extends Clock.Default implements LocalTime
+    {
+        @Override
+        public long now()
+        {
+            return nanoTime();
+        }
+
+        @Override
+        public long error()
+        {
+            return 0;
+        }
+
+        @Override
+        public MonotonicClockTranslation translate()
+        {
+            return new AlmostSameTime(System.currentTimeMillis(), System.nanoTime(), 0L);
+        }
+
+        @Override
+        public boolean isAfter(long instant)
+        {
+            return isAfter(System.nanoTime(), instant);
+        }
+
+        @Override
+        public boolean isAfter(long now, long instant)
+        {
+            return now > instant;
+        }
+
+        @Override
+        public long relativeToLocalNanos(long relativeNanos)
+        {
+            return System.nanoTime() + relativeNanos;
+        }
+
+        @Override
+        public long relativeToGlobalNanos(long relativeNanos)
+        {
+            return System.nanoTime() + relativeNanos;
+        }
+
+        @Override
+        public long localToRelativeNanos(long absoluteLocalNanos)
+        {
+            return absoluteLocalNanos - System.nanoTime();
+        }
+
+        @Override
+        public long localToGlobalNanos(long absoluteLocalNanos)
+        {
+            return absoluteLocalNanos;
+        }
+
+        @Override
+        public long nextGlobalMonotonicMicros()
+        {
+            return FBUtilities.timestampMicros();
+        }
+    }
+
+    public static class Delegating implements ClockAndMonotonicClock
+    {
+        final Disabled disabled = new Disabled();
+        private ClockAndMonotonicClock check()
+        {
+            Thread thread = Thread.currentThread();
+            if (thread instanceof InterceptibleThread)
+            {
+                InterceptibleThread interceptibleThread = ((InterceptibleThread) thread);
+                if (interceptibleThread.isIntercepting())
+                    return interceptibleThread.time();
+            }
+            if (PERMITTED_TIME_THREADS.matcher(Thread.currentThread().getName()).matches())
+                return disabled;
+            throw new IllegalStateException("Using time is not allowed during simulation");
+        }
+
+        public long nanoTime() { return check().nanoTime(); }
+        public long currentTimeMillis()  { return check().currentTimeMillis(); }
+        public long now()  { return check().now(); }
+        public long error()  { return check().error(); }
+        public MonotonicClockTranslation translate()  { return check().translate(); }
+        public boolean isAfter(long instant)  { return check().isAfter(instant); }
+        public boolean isAfter(long now, long instant)  { return check().isAfter(now, instant); }
+    }
+
+    @PerClassLoader
+    public static class Global implements Clock, MonotonicClock
+    {
+        private static LocalTime current;
+
+        public long nanoTime()
+        {
+            return current.nanoTime();
+        }
+
+        public long currentTimeMillis()
+        {
+            return current.currentTimeMillis();
+        }
+
+        @Override
+        public long now()
+        {
+            return current.now();
+        }
+
+        @Override
+        public long error()
+        {
+            return current.error();
+        }
+
+        @Override
+        public MonotonicClockTranslation translate()
+        {
+            return current.translate();
+        }
+
+        @Override
+        public boolean isAfter(long instant)
+        {
+            return current.isAfter(instant);
+        }
+
+        @Override
+        public boolean isAfter(long now, long instant)
+        {
+            return current.isAfter(now, instant);
+        }
+
+        public static long relativeToGlobalNanos(long relativeNanos)
+        {
+            return current.relativeToGlobalNanos(relativeNanos);
+        }
+
+        public static long relativeToLocalNanos(long relativeNanos)
+        {
+            return current.relativeToLocalNanos(relativeNanos);
+        }
+
+        public static long localToRelativeNanos(long absoluteNanos)
+        {
+            return current.localToRelativeNanos(absoluteNanos);
+        }
+
+        public static long localToGlobalNanos(long absoluteNanos)
+        {
+            return current.localToGlobalNanos(absoluteNanos);
+        }
+
+        public static LocalTime current()
+        {
+            return current;
+        }
+
+        @SuppressWarnings("unused") // used by simulator for schema changes
+        public static long nextGlobalMonotonicMicros()
+        {
+            return current.nextGlobalMonotonicMicros();
+        }
+
+        public static void setup(LocalTime newLocalTime)
+        {
+            current = newLocalTime;
+        }
+
+        public static void disable()
+        {
+            current = new Disabled();
+        }
+    }
+
+    public class InstanceTime implements LocalTime
+    {
+        final Period nanosDriftSupplier;
+        long from, to;
+        long baseDrift, nextDrift, lastLocalNanoTime, lastDrift, lastGlobal;
+        double diffPerGlobal;
+
+        private InstanceTime(Period nanosDriftSupplier)
+        {
+            this.nanosDriftSupplier = nanosDriftSupplier;
+        }
+
+        @Override
+        public long nanoTime()
+        {
+            long global = globalNanoTime;
+            if (lastGlobal == global)
+                return lastLocalNanoTime;
+
+            if (global >= to)
+            {
+                baseDrift = nextDrift;
+                nextDrift = nanosDriftSupplier.get(random);
+                from = global;
+                to = global + Math.max(baseDrift, nextDrift);
+                diffPerGlobal = (nextDrift - baseDrift) / (double)(to - from);
+                listener.accept("SetNextDrift", nextDrift);
+            }
+
+            long drift = baseDrift + (long)(diffPerGlobal * (global - from));
+            long local = global + drift;
+            lastGlobal = global;
+            lastDrift = drift;
+            lastLocalNanoTime = local;
+            listener.accept("ReadLocal", local);
+            return local;
+        }
+
+        @Override
+        public long currentTimeMillis()
+        {
+            return NANOSECONDS.toMillis(nanoTime()) + millisEpoch;
+        }
+
+        @Override
+        public long now()
+        {
+            return nanoTime();
+        }
+
+        @Override
+        public long error()
+        {
+            return 1;
+        }
+
+        @Override
+        public MonotonicClockTranslation translate()
+        {
+            return new MonotonicClockTranslation()
+            {
+                @Override
+                public long fromMillisSinceEpoch(long currentTimeMillis)
+                {
+                    return MILLISECONDS.toNanos(currentTimeMillis - millisEpoch);
+                }
+
+                @Override
+                public long toMillisSinceEpoch(long nanoTime)
+                {
+                    return NANOSECONDS.toMillis(nanoTime) + millisEpoch;
+                }
+
+                @Override
+                public long error()
+                {
+                    return MILLISECONDS.toNanos(1L);
+                }
+            };
+        }
+
+        @Override
+        public boolean isAfter(long instant)
+        {
+            return false;
+        }
+
+        @Override
+        public boolean isAfter(long now, long instant)
+        {
+            return false;
+        }
+
+        @Override
+        public long nextGlobalMonotonicMicros()
+        {
+            return SimulatedTime.this.nextGlobalMonotonicMicros();
+        }
+
+        @Override
+        public long relativeToLocalNanos(long relativeNanos)
+        {
+            return relativeNanos + lastLocalNanoTime;
+        }
+
+        @Override
+        public long relativeToGlobalNanos(long relativeNanos)
+        {
+            return relativeNanos + globalNanoTime;
+        }
+
+        @Override
+        public long localToRelativeNanos(long absoluteLocalNanos)
+        {
+            return absoluteLocalNanos - lastLocalNanoTime;
+        }
+
+        @Override
+        public long localToGlobalNanos(long absoluteLocalNanos)
+        {
+            return absoluteLocalNanos - lastDrift;
+        }
+    }
+
+    private final KindOfSequence kindOfDrift;
+    private final LongRange nanosDriftRange;
+    private final RandomSource random;
+    private final Period discontinuityTimeSupplier;
+    private final long millisEpoch;
+    private volatile long globalNanoTime;
+    private long futureTimestamp;
+    private long discontinuityTime;
+    private boolean permitDiscontinuities;
+    private final List<LongConsumer> onDiscontinuity = new ArrayList<>();
+    private final Listener listener;
+    private InstanceTime[] instanceTimes;
+
+    public SimulatedTime(int nodeCount, RandomSource random, long millisEpoch, LongRange nanoDriftRange, KindOfSequence kindOfDrift, Period discontinuityTimeSupplier, Listener listener)
+    {
+        this.random = random;
+        this.millisEpoch = millisEpoch;
+        this.nanosDriftRange = nanoDriftRange;
+        this.futureTimestamp = (millisEpoch + DAYS.toMillis(1000)) * 1000;
+        this.kindOfDrift = kindOfDrift;
+        this.discontinuityTime = MILLISECONDS.toNanos(random.uniform(500L, 30000L));
+        this.discontinuityTimeSupplier = discontinuityTimeSupplier;
+        this.listener = listener;
+        this.instanceTimes = new InstanceTime[nodeCount];
+    }
+
+    public Closeable setup(int nodeNum, ClassLoader classLoader)
+    {
+        Preconditions.checkState(instanceTimes[nodeNum - 1] == null);
+        InstanceTime instanceTime = new InstanceTime(kindOfDrift.period(nanosDriftRange, random));
+        IsolatedExecutor.transferAdhoc((IIsolatedExecutor.SerializableConsumer<LocalTime>) Global::setup, classLoader)
+                        .accept(instanceTime);
+        instanceTimes[nodeNum - 1] = instanceTime;
+        return IsolatedExecutor.transferAdhoc((IIsolatedExecutor.SerializableRunnable) Global::disable, classLoader)::run;
+    }
+
+    public InstanceTime get(int nodeNum)
+    {
+        return instanceTimes[nodeNum - 1];
+    }
+
+    public void permitDiscontinuities()
+    {
+        listener.accept("PermitDiscontinuity", 1);
+        permitDiscontinuities = true;
+        updateAndMaybeApplyDiscontinuity(globalNanoTime);
+    }
+
+    public void forbidDiscontinuities()
+    {
+        listener.accept("PermitDiscontinuity", 0);
+        permitDiscontinuities = false;
+    }
+
+    private void updateAndMaybeApplyDiscontinuity(long newGlobal)
+    {
+        if (permitDiscontinuities && newGlobal >= discontinuityTime)
+        {
+            updateAndApplyDiscontinuity(newGlobal);
+        }
+        else
+        {
+            globalNanoTime = newGlobal;
+            listener.accept("SetGlobal", newGlobal);
+        }
+    }
+
+    private void updateAndApplyDiscontinuity(long newGlobal)
+    {
+        long discontinuity = uniform(DAYS, HOURS, MINUTES).choose(random).toNanos(1L);
+        listener.accept("ApplyDiscontinuity", discontinuity);
+        discontinuityTime = newGlobal + discontinuity + discontinuityTimeSupplier.get(random);
+        globalNanoTime = newGlobal + discontinuity;
+        listener.accept("SetGlobal", newGlobal + discontinuity);
+        onDiscontinuity.forEach(l -> l.accept(discontinuity));
+    }
+
+    public void tick(long nanos)
+    {
+        listener.accept("Tick", nanos);
+        long global = globalNanoTime;
+        if (nanos > global)
+        {
+            updateAndMaybeApplyDiscontinuity(nanos);
+        }
+        else
+        {
+            globalNanoTime = global + 1;
+            listener.accept("IncrGlobal", global + 1);
+        }
+    }
+
+    public long nanoTime()
+    {
+        long global = globalNanoTime;
+        listener.accept("ReadGlobal", global);
+        return global;
+    }
+
+    // make sure schema changes persist
+    public synchronized long nextGlobalMonotonicMicros()
+    {
+        return ++futureTimestamp;
+    }
+
+    public void onDiscontinuity(LongConsumer onDiscontinuity)
+    {
+        this.onDiscontinuity.add(onDiscontinuity);
+    }
+
+    public void onTimeEvent(String kind, long value)
+    {
+        listener.accept(kind, value);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/ChanceRange.java b/test/simulator/main/org/apache/cassandra/simulator/utils/ChanceRange.java
new file mode 100644
index 0000000..5978e20
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/ChanceRange.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.function.ToDoubleFunction;
+
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.asm.ChanceSupplier;
+
+public class ChanceRange
+{
+    public final ToDoubleFunction<RandomSource> distribution;
+    public final float min;
+    public final float max;
+
+    public ChanceRange(ToDoubleFunction<RandomSource> distribution, float min, float max)
+    {
+        this.distribution = distribution;
+        assert min >= 0 && max <= 1.0;
+        this.min = min;
+        this.max = max;
+    }
+
+    public float select(RandomSource random)
+    {
+        if (min >= max) return min;
+        return (float) ((distribution.applyAsDouble(random) * (max - min)) + min);
+    }
+
+    public ChanceSupplier asSupplier(RandomSource random)
+    {
+        return () -> select(random);
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/CompactLists.java b/test/simulator/main/org/apache/cassandra/simulator/utils/CompactLists.java
new file mode 100644
index 0000000..cb62bb7
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/CompactLists.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.distributed.api.IIsolatedExecutor.TriConsumer;
+import org.apache.cassandra.utils.Throwables;
+
+public class CompactLists
+{
+    public static <I> List<I> append(List<I> in, I append)
+    {
+        if (in == null) return Collections.singletonList(append);
+        else  if (in.size() == 1)
+        {
+            List<I> out = new ArrayList<>(2);
+            out.add(in.get(0));
+            out.add(append);
+            return out;
+        }
+        else
+        {
+            in.add(append);
+            return in;
+        }
+    }
+
+    public static <I> List<I> remove(List<I> in, I remove)
+    {
+        if (in == null) return null;
+        else if (in.size() == 1) return in.contains(remove) ? null : in;
+        else
+        {
+            in.remove(remove);
+            return in;
+        }
+    }
+
+    public static <I> Throwable safeForEach(List<I> list, Consumer<I> forEach)
+    {
+        if (list == null)
+            return null;
+
+        if (list.size() == 1)
+        {
+            try
+            {
+                forEach.accept(list.get(0));
+                return null;
+            }
+            catch (Throwable t)
+            {
+                return t;
+            }
+        }
+
+        Throwable result = null;
+        for (int i = 0, maxi = list.size() ; i < maxi ; ++i)
+        {
+            try
+            {
+                forEach.accept(list.get(i));
+            }
+            catch (Throwable t)
+            {
+                result = Throwables.merge(result, t);
+            }
+        }
+        return result;
+    }
+
+    public static <I1, I2> Throwable safeForEach(List<I1> list, BiConsumer<I1, I2> forEach, I2 i2)
+    {
+        if (list == null)
+            return null;
+
+        if (list.size() == 1)
+        {
+            try
+            {
+                forEach.accept(list.get(0), i2);
+                return null;
+            }
+            catch (Throwable t)
+            {
+                return t;
+            }
+        }
+
+        Throwable result = null;
+        for (int i = 0, maxi = list.size() ; i < maxi ; ++i)
+        {
+            try
+            {
+                forEach.accept(list.get(i), i2);
+            }
+            catch (Throwable t)
+            {
+                result = Throwables.merge(result, t);
+            }
+        }
+        return result;
+    }
+
+    public static <I1, I2, I3> Throwable safeForEach(List<I1> list, TriConsumer<I1, I2, I3> forEach, I2 i2, I3 i3)
+    {
+        if (list == null)
+            return null;
+
+        if (list.size() == 1)
+        {
+            try
+            {
+                forEach.accept(list.get(0), i2, i3);
+                return null;
+            }
+            catch (Throwable t)
+            {
+                return t;
+            }
+        }
+
+        Throwable result = null;
+        for (int i = 0, maxi = list.size() ; i < maxi ; ++i)
+        {
+            try
+            {
+                forEach.accept(list.get(i), i2, i3);
+            }
+            catch (Throwable t)
+            {
+                result = Throwables.merge(result, t);
+            }
+        }
+        return result;
+    }
+
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/CountingCollection.java b/test/simulator/main/org/apache/cassandra/simulator/utils/CountingCollection.java
new file mode 100644
index 0000000..7e04fcd
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/CountingCollection.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.AbstractCollection;
+import java.util.Iterator;
+
+public class CountingCollection<T> extends AbstractCollection<T>
+{
+    int count;
+
+    @Override
+    public boolean add(T t)
+    {
+        ++count;
+        return true;
+    }
+
+    @Override
+    public boolean remove(Object o)
+    {
+        if (count == 0) throw new AssertionError();
+        --count;
+        return true;
+    }
+
+    @Override
+    public Iterator<T> iterator()
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int size()
+    {
+        return count;
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/IntRange.java b/test/simulator/main/org/apache/cassandra/simulator/utils/IntRange.java
new file mode 100644
index 0000000..ad465dd
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/IntRange.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.simulator.RandomSource;
+
+public class IntRange
+{
+    public final int min;
+    public final int max;
+
+    public IntRange(int min, int max)
+    {
+        this.min = min;
+        this.max = max;
+    }
+
+    public IntRange(long min, long max)
+    {
+        Preconditions.checkArgument(min < Integer.MAX_VALUE);
+        Preconditions.checkArgument(max <= Integer.MAX_VALUE);
+        this.min = (int)min;
+        this.max = (int)max;
+    }
+
+    public IntRange(long min, long max, TimeUnit from, TimeUnit to)
+    {
+        this(to.convert(min,from), to.convert(max, from));
+    }
+
+    public int select(RandomSource random)
+    {
+        if (min == max) return min;
+        return random.uniform(min, 1 + max);
+    }
+
+    public int select(RandomSource random, int minlb, int maxub)
+    {
+        int min = Math.max(this.min, minlb);
+        int max = Math.min(this.max, maxub);
+        if (min >= max) return min;
+        return random.uniform(min, 1 + max);
+    }
+
+    public static Optional<IntRange> parseRange(Optional<String> chance)
+    {
+        return chance.map(s -> new IntRange(Integer.parseInt(s.replaceFirst("\\.\\.+[0-9]+", "")),
+                                         Integer.parseInt(s.replaceFirst("[0-9]+\\.\\.+", ""))));
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/IntrusiveLinkedList.java b/test/simulator/main/org/apache/cassandra/simulator/utils/IntrusiveLinkedList.java
new file mode 100644
index 0000000..f3dd40d
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/IntrusiveLinkedList.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.Iterator;
+import java.util.Spliterator;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+
+import static java.util.Spliterators.spliteratorUnknownSize;
+
+/**
+ * A simple intrusive double-linked list for maintaining a list of tasks,
+ * useful for invalidating queued ordered tasks
+ */
+@SuppressWarnings("unchecked")
+public class IntrusiveLinkedList<O extends IntrusiveLinkedListNode> extends IntrusiveLinkedListNode
+{
+    public IntrusiveLinkedList()
+    {
+        prev = next = this;
+    }
+
+    public void add(O add)
+    {
+        assert add.prev == null && add.next == null;
+        IntrusiveLinkedListNode after = this;
+        IntrusiveLinkedListNode before = prev;
+        add.next = after;
+        add.prev = before;
+        before.next = add;
+        after.prev = add;
+    }
+
+    public O poll()
+    {
+        if (isEmpty())
+            return null;
+
+        IntrusiveLinkedListNode next = this.next;
+        next.remove();
+        return (O) next;
+    }
+
+    public boolean isEmpty()
+    {
+        return next == this;
+    }
+
+    public Stream<O> stream()
+    {
+        Iterator<O> iterator = new Iterator<O>()
+        {
+            IntrusiveLinkedListNode next = IntrusiveLinkedList.this.next;
+
+            @Override
+            public boolean hasNext()
+            {
+                return next != IntrusiveLinkedList.this;
+            }
+
+            @Override
+            public O next()
+            {
+                O result = (O)next;
+                next = next.next;
+                return result;
+            }
+        };
+
+        return StreamSupport.stream(spliteratorUnknownSize(iterator, Spliterator.IMMUTABLE), false);
+    }
+}
+
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/IntrusiveLinkedListNode.java b/test/simulator/main/org/apache/cassandra/simulator/utils/IntrusiveLinkedListNode.java
new file mode 100644
index 0000000..2145bd8
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/IntrusiveLinkedListNode.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+public abstract class IntrusiveLinkedListNode
+{
+    IntrusiveLinkedListNode prev;
+    IntrusiveLinkedListNode next;
+
+    protected boolean isFree()
+    {
+        return next == null;
+    }
+
+    protected void remove()
+    {
+        if (next != null)
+        {
+            prev.next = next;
+            next.prev = prev;
+            next = null;
+            prev = null;
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/KindOfSequence.java b/test/simulator/main/org/apache/cassandra/simulator/utils/KindOfSequence.java
new file mode 100644
index 0000000..367dea0
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/KindOfSequence.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.Arrays;
+
+import org.apache.cassandra.simulator.RandomSource;
+
+public enum KindOfSequence
+{
+    UNIFORM, UNIFORM_STEP, RANDOMWALK;
+
+    public interface LinkLatency
+    {
+        long get(RandomSource random, int from, int to);
+    }
+
+    public interface NetworkDecision
+    {
+        boolean get(RandomSource random, int from, int to);
+    }
+
+    public interface Period
+    {
+        long get(RandomSource random);
+    }
+
+    public interface Decision
+    {
+        boolean get(RandomSource random);
+    }
+
+    static class UniformPeriod implements LinkLatency, Period
+    {
+        final LongRange nanos;
+
+        UniformPeriod(LongRange nanos)
+        {
+            this.nanos = nanos;
+        }
+
+        @Override
+        public long get(RandomSource random, int from, int to)
+        {
+            return nanos.select(random);
+        }
+
+        @Override
+        public long get(RandomSource random)
+        {
+            return nanos.select(random);
+        }
+    }
+
+    static class UniformStepPeriod implements LinkLatency, Period
+    {
+        final LongRange range;
+        long target;
+        long cur;
+        long step;
+
+        UniformStepPeriod(LongRange range, RandomSource random)
+        {
+            this.range = range;
+            cur = range.select(random);
+            next(random);
+        }
+
+        void next(RandomSource random)
+        {
+            target = range.select(random);
+            step = (target - cur) / random.uniform(16, 128);
+            if (step == 0) step = target > cur ? 1 : -1;
+        }
+
+        @Override
+        public long get(RandomSource random, int from, int to)
+        {
+            return get(random);
+        }
+
+        @Override
+        public long get(RandomSource random)
+        {
+            long result = cur;
+            cur += step;
+            if (step < 0 && cur <= target) next(random);
+            else if (step > 0 && cur >= target) next(random);
+            return result;
+        }
+    }
+
+    static class RandomWalkPeriod implements Period
+    {
+        final LongRange nanos;
+        final long maxStepSize;
+        long cur;
+
+        RandomWalkPeriod(LongRange nanos, RandomSource random)
+        {
+            this.nanos = nanos;
+            this.maxStepSize = maxStepSize(nanos, random);
+            this.cur = nanos.select(random);
+        }
+
+        @Override
+        public long get(RandomSource random)
+        {
+            long step = random.uniform(-maxStepSize, maxStepSize);
+            long cur = this.cur;
+            this.cur = step > 0 ? Math.min(nanos.max, cur + step)
+                                : Math.max(nanos.min, cur + step);
+            return cur;
+        }
+    }
+
+    static class RandomWalkLinkLatency implements LinkLatency
+    {
+        final LongRange nanos;
+        final long maxStepSize;
+        final long[][] curs;
+
+        RandomWalkLinkLatency(int nodes, LongRange nanos, RandomSource random)
+        {
+            this.nanos = nanos;
+            this.maxStepSize = maxStepSize(nanos, random);
+            this.curs = new long[nodes][nodes];
+            for (long[] c : curs)
+                Arrays.fill(c, nanos.min);
+        }
+
+        @Override
+        public long get(RandomSource random, int from, int to)
+        {
+            --from;--to;
+            long cur = curs[from][to];
+            long step = random.uniform(-maxStepSize, maxStepSize);
+            curs[from][to] = step > 0 ? Math.min(nanos.max, cur + step)
+                                      : Math.max(nanos.min, cur + step);
+            return cur;
+        }
+    }
+
+    static class UniformStepLinkLatency implements LinkLatency
+    {
+        private static final int CUR = 0, TARGET = 1, STEP = 2;
+        final LongRange range;
+        final long[][][] state;
+
+        UniformStepLinkLatency(int nodes, LongRange range, RandomSource random)
+        {
+            this.range = range;
+            this.state = new long[nodes][nodes][3];
+            for (int i = 0 ; i < nodes ; ++i)
+            {
+                for (int j = 0 ; j < nodes ; ++j)
+                {
+                    long[] state = this.state[i][j];
+                    state[CUR] = range.select(random);
+                    next(random, state);
+                }
+            }
+        }
+
+        void next(RandomSource random, long[] state)
+        {
+            state[TARGET] = range.select(random);
+            state[STEP] = (state[TARGET] - state[CUR]) / random.uniform(16, 128);
+            if (state[STEP] == 0) state[STEP] = state[TARGET] > state[CUR] ? 1 : -1;
+        }
+
+        @Override
+        public long get(RandomSource random, int from, int to)
+        {
+            --from;--to;
+            long[] state = this.state[from][to];
+            long cur = state[CUR];
+            state[CUR] += state[STEP];
+            if (state[STEP] < 0 && cur <= state[TARGET]) next(random, state);
+            else if (state[STEP] > 0 && cur >= state[TARGET]) next(random, state);
+            return cur;
+        }
+    }
+
+    static class FixedChance implements NetworkDecision, Decision
+    {
+        final float chance;
+
+        FixedChance(float chance)
+        {
+            this.chance = chance;
+        }
+
+        @Override
+        public boolean get(RandomSource random, int from, int to)
+        {
+            return random.decide(chance);
+        }
+
+        @Override
+        public boolean get(RandomSource random)
+        {
+            return random.decide(chance);
+        }
+    }
+
+    static class RandomWalkNetworkDecision implements NetworkDecision
+    {
+        final ChanceRange range;
+        final float maxStepSize;
+        final float[][] curs;
+
+        RandomWalkNetworkDecision(int nodes, ChanceRange range, RandomSource random)
+        {
+            this.range = range;
+            this.maxStepSize = maxStepSize(range, random);
+            this.curs = new float[nodes][nodes];
+            for (float[] c : curs)
+                Arrays.fill(c, range.select(random));
+        }
+
+        @Override
+        public boolean get(RandomSource random, int from, int to)
+        {
+            --from;--to;
+            float cur = curs[from][to];
+            float step = (2*random.uniformFloat() - 1f) * maxStepSize;
+            curs[from][to] = step > 0 ? Math.min(range.max, cur + step)
+                                      : Math.max(range.min, cur + step);
+            return random.decide(cur);
+        }
+    }
+
+    static class UniformStepNetworkDecision implements NetworkDecision
+    {
+        private static final int CUR = 0, TARGET = 1, STEP = 2;
+        final ChanceRange range;
+        final float[][][] state;
+
+        UniformStepNetworkDecision(int nodes, ChanceRange range, RandomSource random)
+        {
+            this.range = range;
+            this.state = new float[nodes][nodes][3];
+            for (int i = 0 ; i < nodes ; ++i)
+            {
+                for (int j = 0 ; j < nodes ; ++j)
+                {
+                    float[] state = this.state[i][j];
+                    state[CUR] = range.select(random);
+                    next(random, state);
+                }
+            }
+        }
+
+        void next(RandomSource random, float[] state)
+        {
+            state[TARGET] = range.select(random);
+            state[STEP] = (state[TARGET] - state[CUR]) / random.uniform(16, 128);
+            if (state[STEP] == 0) state[STEP] = state[TARGET] > state[CUR] ? 1 : -1;
+        }
+
+        @Override
+        public boolean get(RandomSource random, int from, int to)
+        {
+            --from;--to;
+            float[] state = this.state[from][to];
+            float cur = state[CUR];
+            state[CUR] += state[STEP];
+            if (state[STEP] < 0 && cur <= state[TARGET]) next(random, state);
+            else if (state[STEP] > 0 && cur >= state[TARGET]) next(random, state);
+            return random.decide(cur);
+        }
+    }
+
+    static class RandomWalkDecision implements Decision
+    {
+        final ChanceRange range;
+        final float maxStepSize;
+        float cur;
+
+        RandomWalkDecision(ChanceRange range, RandomSource random)
+        {
+            this.range = range;
+            this.maxStepSize = maxStepSize(range, random);
+            this.cur = range.select(random);
+        }
+
+        @Override
+        public boolean get(RandomSource random)
+        {
+            float step = (2*random.uniformFloat() - 1f) * maxStepSize;
+            float cur = this.cur;
+            this.cur = step > 0 ? Math.min(range.max, cur + step)
+                                : Math.max(range.min, cur + step);
+            return random.decide(cur);
+        }
+    }
+
+    static class UniformStepDecision implements Decision
+    {
+        final ChanceRange range;
+        float target;
+        float cur;
+        float step;
+
+        UniformStepDecision(ChanceRange range, RandomSource random)
+        {
+            this.range = range;
+            cur = range.select(random);
+            next(random);
+        }
+
+        void next(RandomSource random)
+        {
+            target = range.select(random);
+            step = (target - cur) / random.uniform(16, 128);
+            if (step == 0) step = target > cur ? 1 : -1;
+        }
+
+        @Override
+        public boolean get(RandomSource random)
+        {
+            float chance = cur;
+            cur += step;
+            if (step < 0 && cur <= target) next(random);
+            else if (step > 0 && cur >= target) next(random);
+            return random.decide(chance);
+        }
+    }
+
+    public LinkLatency linkLatency(int nodes, LongRange nanos, RandomSource random)
+    {
+        switch (this)
+        {
+            default:throw new AssertionError();
+            case UNIFORM: return new UniformPeriod(nanos);
+            case UNIFORM_STEP: return new UniformStepLinkLatency(nodes, nanos, random);
+            case RANDOMWALK: return new RandomWalkLinkLatency(nodes, nanos, random);
+        }
+    }
+
+    public Period period(LongRange nanos, RandomSource random)
+    {
+        switch (this)
+        {
+            default: throw new AssertionError();
+            case UNIFORM: return new UniformPeriod(nanos);
+            case UNIFORM_STEP: return new UniformStepPeriod(nanos, random);
+            case RANDOMWALK: return new RandomWalkPeriod(nanos, random);
+        }
+    }
+
+    public NetworkDecision networkDecision(int nodes, ChanceRange range, RandomSource random)
+    {
+        switch (this)
+        {
+            default:throw new AssertionError();
+            // TODO: support a uniform per node variant?
+            case UNIFORM: return new FixedChance(range.select(random));
+            case UNIFORM_STEP: return new UniformStepNetworkDecision(nodes, range, random);
+            case RANDOMWALK: return new RandomWalkNetworkDecision(nodes, range, random);
+        }
+    }
+
+    public Decision decision(ChanceRange range, RandomSource random)
+    {
+        switch (this)
+        {
+            default:throw new AssertionError();
+            case UNIFORM: return new FixedChance(range.select(random));
+            case UNIFORM_STEP: return new UniformStepDecision(range, random);
+            case RANDOMWALK: return new RandomWalkDecision(range, random);
+        }
+    }
+
+
+    public static float maxStepSize(ChanceRange range, RandomSource random)
+    {
+        return maxStepSize(range.min, range.max, random);
+    }
+
+    public static float maxStepSize(float min, float max, RandomSource random)
+    {
+        switch (random.uniform(0, 3))
+        {
+            case 0:
+                return Math.max(Float.MIN_VALUE, (max/32) - (min/32));
+            case 1:
+                return Math.max(Float.MIN_VALUE, (max/256) - (min/256));
+            case 2:
+                return Math.max(Float.MIN_VALUE, (max/2048) - (min/2048));
+            default:
+                return Math.max(Float.MIN_VALUE, (max/16384) - (min/16384));
+        }
+    }
+
+    private static long maxStepSize(LongRange range, RandomSource random)
+    {
+        switch (random.uniform(0, 3))
+        {
+            case 0:
+                return Math.max(1, (range.max/32) - (range.min/32));
+            case 1:
+                return Math.max(1, (range.max/256) - (range.min/256));
+            case 2:
+                return Math.max(1, (range.max/2048) - (range.min/2048));
+            default:
+                return Math.max(1, (range.max/16384) - (range.min/16384));
+        }
+    }
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/LongRange.java b/test/simulator/main/org/apache/cassandra/simulator/utils/LongRange.java
new file mode 100644
index 0000000..92c4051
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/LongRange.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.cassandra.simulator.RandomSource;
+
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+public class LongRange
+{
+    public final long min;
+    public final long max;
+
+    public LongRange(long min, long max)
+    {
+        this.min = min;
+        this.max = max;
+    }
+
+    public LongRange(long min, long max, TimeUnit from, TimeUnit to)
+    {
+        this(to.convert(min,from), to.convert(max, from));
+    }
+
+    public long select(RandomSource random)
+    {
+        if (min == max) return min;
+        return random.uniform(min, 1 + max);
+    }
+
+    public long select(RandomSource random, long minlb, long maxub)
+    {
+        long min = Math.max(this.min, minlb);
+        long max = Math.min(this.max, maxub);
+        if (min >= max) return min;
+        return random.uniform(min, 1 + max);
+    }
+
+    public static Optional<LongRange> parseRange(Optional<String> chance)
+    {
+        return chance.map(s -> new LongRange(Integer.parseInt(s.replaceFirst("\\.\\.+[0-9]+", "")),
+                                             Integer.parseInt(s.replaceFirst("[0-9]+\\.\\.+", ""))));
+    }
+
+    public static Optional<LongRange> parseNanosRange(Optional<String> chance)
+    {
+        if (!chance.isPresent())
+            return Optional.empty();
+
+        String parse = chance.get();
+        TimeUnit units = parseUnits(parse);
+        parse = stripUnits(parse, units);
+        return Optional.of(new LongRange(Long.parseLong(parse.replaceFirst("\\.\\.+[0-9]+", "")),
+                                         Long.parseLong(parse.replaceFirst("[0-9]+\\.\\.+", "")),
+                                         units, NANOSECONDS));
+    }
+
+    public static TimeUnit parseUnits(String parse)
+    {
+        TimeUnit units;
+        if (parse.endsWith("ms")) units = TimeUnit.MILLISECONDS;
+        else if (parse.endsWith("us")) units = TimeUnit.MICROSECONDS;
+        else if (parse.endsWith("ns")) units = NANOSECONDS;
+        else if (parse.endsWith("s")) units = SECONDS;
+        else throw new IllegalArgumentException("Unable to parse period range: " + parse);
+        return units;
+    }
+
+    public static String stripUnits(String parse, TimeUnit units)
+    {
+        return parse.substring(0, parse.length() - (units == SECONDS ? 1 : 2));
+    }
+
+}
diff --git a/test/simulator/main/org/apache/cassandra/simulator/utils/SafeCollections.java b/test/simulator/main/org/apache/cassandra/simulator/utils/SafeCollections.java
new file mode 100644
index 0000000..396a4be
--- /dev/null
+++ b/test/simulator/main/org/apache/cassandra/simulator/utils/SafeCollections.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.utils;
+
+import java.util.Collection;
+import java.util.function.Consumer;
+
+import org.apache.cassandra.utils.Throwables;
+
+public class SafeCollections
+{
+    public static <I> Throwable safeForEach(Collection<I> collection, Consumer<I> forEach)
+    {
+        Throwable result = null;
+        for (I i : collection)
+        {
+            try
+            {
+                forEach.accept(i);
+            }
+            catch (Throwable t)
+            {
+                result = Throwables.merge(result, t);
+            }
+        }
+        return result;
+    }
+}
diff --git a/test/simulator/test/org/apache/cassandra/simulator/test/ClassWithSynchronizedMethods.java b/test/simulator/test/org/apache/cassandra/simulator/test/ClassWithSynchronizedMethods.java
new file mode 100644
index 0000000..495d883
--- /dev/null
+++ b/test/simulator/test/org/apache/cassandra/simulator/test/ClassWithSynchronizedMethods.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.cassandra.utils.Nemesis;
+import org.apache.cassandra.utils.Simulate;
+
+import static org.apache.cassandra.utils.Simulate.With.MONITORS;
+
+@Simulate(with = MONITORS)
+public class ClassWithSynchronizedMethods
+{
+    @Nemesis
+    private static final AtomicInteger staticCounter1 = new AtomicInteger();
+    @Nemesis
+    private static final AtomicInteger staticCounter2 = new AtomicInteger();
+
+    public static class Execution
+    {
+        public final int thread;
+        public final int sequenceNumber;
+
+        public Execution(int thread, int sequenceNumber)
+        {
+            this.thread = thread;
+            this.sequenceNumber = sequenceNumber;
+        }
+
+        public String toString()
+        {
+            return "Execution{" +
+                   "thread=" + thread +
+                   ", sequenceNumber=" + sequenceNumber +
+                   '}';
+        }
+    }
+
+    public static final List<Execution> executions = new ArrayList<>();
+
+    public static synchronized void synchronizedMethodWithParams(int thread, int sequenceNumber)
+    {
+        int before1 = staticCounter1.get();
+        int before2 = staticCounter2.get();
+
+        Execution execution = new Execution(thread, sequenceNumber);
+        executions.add(execution);
+
+        // Despite interleavings of synchronized method calls, two threads can not enter the synchronized block together,
+        // even in presence of nemesis
+        boolean res1 = staticCounter1.compareAndSet(before1, before1 + 1);
+        assert res1;
+        boolean res2 = staticCounter2.compareAndSet(before2, before2 + 1);
+        assert res2;
+    }
+}
diff --git a/test/simulator/test/org/apache/cassandra/simulator/test/MonitorMethodTransformerTest.java b/test/simulator/test/org/apache/cassandra/simulator/test/MonitorMethodTransformerTest.java
new file mode 100644
index 0000000..9c45810
--- /dev/null
+++ b/test/simulator/test/org/apache/cassandra/simulator/test/MonitorMethodTransformerTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+
+// A simple demonstration of manipulating the order of JVM level events using the simulation framework
+public class MonitorMethodTransformerTest extends SimulationTestBase
+{
+    @Test
+    public void testSynchronizedMethod()
+    {
+        // This verifies that the simulation does introduce some change to the order in which
+        // system events are scheduled. Ordinarily, we would expect each runnable to execute
+        // in a purely serial manner. However, injecting them into the simulated system adds
+        // a degree of (pseudorandom) non-determinism around the ordering of:
+        //   * the scheduling of the threads executing the tasks, when they park / unpark
+        //   * the fairness of acquiring the monitor needed to enter the synchronized method
+        // so we expect to see an interleaving of the thread ids executing the synchronized
+        // method.
+        // The simulated system doesn't alter the semantics of the JVM however, so
+        // synchronization still ensures single threaded access to critical sections. The
+        // synchronized method itself includes a check to verify this.
+        ClassWithSynchronizedMethods.executions.clear();
+        IIsolatedExecutor.SerializableRunnable[] runnables = new IIsolatedExecutor.SerializableRunnable[10];
+        for (int j = 0; j < 10; j++)
+        {
+            int thread = j;
+            runnables[j] = () -> {
+                for (int iteration = 0; iteration < 10; iteration++)
+                {
+                    ClassWithSynchronizedMethods.synchronizedMethodWithParams(thread, iteration);
+                }
+            };
+        }
+
+        simulate(runnables,
+                 () -> checkInterleavings());
+    }
+
+    @Test
+    public void testSynchronizedMethodMultiThreaded()
+    {
+        // Similar to the method above, but this test adds submission of tasks to an Executor
+        // within the simulated system, effectively adding another layer of scheduling to be
+        // perturbed. The same invariants should preserved as in the previous test.
+        simulate(() -> {
+                     ExecutorPlus executor = ExecutorFactory.Global.executorFactory().pooled("name", 10);
+                     int threads = 10;
+                     for (int i = 0; i < threads; i++)
+                     {
+                         int thread = i;
+                         executor.submit(() -> {
+                             for (int iteration = 0; iteration < 10; iteration++)
+                                 ClassWithSynchronizedMethods.synchronizedMethodWithParams(thread, iteration);
+                         });
+                     }
+                 },
+                 () -> checkInterleavings());
+    }
+
+    public static void checkInterleavings()
+    {
+        List<Integer> seenThreads = new ArrayList<>();
+        // check if synchronized methods called by different threads did interleave
+        for (ClassWithSynchronizedMethods.Execution execution : ClassWithSynchronizedMethods.executions)
+        {
+            int idx = seenThreads.indexOf(execution.thread);
+            if (seenThreads.size() > 1)
+            {
+                // This thread was present in the list, and it is not the last one
+                if (idx > 0 && idx != seenThreads.size() - 1)
+                {
+                    System.out.println(String.format("Detected interleaving between %d and %d",
+                                       execution.thread, seenThreads.get(seenThreads.size() - 1)));
+                    return;
+                }
+            }
+
+            if (idx == -1)
+                seenThreads.add(execution.thread);
+        }
+        Assert.fail("No interleavings detected");
+    }
+}
+
diff --git a/test/simulator/test/org/apache/cassandra/simulator/test/ShortPaxosSimulationTest.java b/test/simulator/test/org/apache/cassandra/simulator/test/ShortPaxosSimulationTest.java
new file mode 100644
index 0000000..19d6601
--- /dev/null
+++ b/test/simulator/test/org/apache/cassandra/simulator/test/ShortPaxosSimulationTest.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.test;
+
+import java.io.IOException;
+
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.cassandra.simulator.paxos.PaxosSimulationRunner;
+
+public class ShortPaxosSimulationTest
+{
+    @Test
+    public void simulationTest() throws IOException
+    {
+        PaxosSimulationRunner.main(new String[] { "run", "-n", "3..6", "-t", "1000", "-c", "2", "--cluster-action-limit", "2", "-s", "30" });
+    }
+
+    @Test
+    @Ignore("fails due to OOM DirectMemory - unclear why")
+    public void selfReconcileTest() throws IOException
+    {
+        PaxosSimulationRunner.main(new String[] { "reconcile", "-n", "3..6", "-t", "1000", "-c", "2", "--cluster-action-limit", "2", "-s", "30", "--with-self" });
+    }
+}
diff --git a/test/simulator/test/org/apache/cassandra/simulator/test/SimulationTestBase.java b/test/simulator/test/org/apache/cassandra/simulator/test/SimulationTestBase.java
new file mode 100644
index 0000000..4cceb8f
--- /dev/null
+++ b/test/simulator/test/org/apache/cassandra/simulator/test/SimulationTestBase.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntSupplier;
+import java.util.function.Predicate;
+
+import com.google.common.collect.Iterators;
+
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.distributed.impl.AbstractCluster;
+import org.apache.cassandra.distributed.impl.IsolatedExecutor;
+import org.apache.cassandra.distributed.shared.InstanceClassLoader;
+import org.apache.cassandra.simulator.Action;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.ActionPlan;
+import org.apache.cassandra.simulator.ActionSchedule;
+import org.apache.cassandra.simulator.ActionSchedule.Work;
+import org.apache.cassandra.simulator.FutureActionScheduler;
+import org.apache.cassandra.simulator.RunnableActionScheduler;
+import org.apache.cassandra.simulator.ClusterSimulation;
+import org.apache.cassandra.simulator.Debug;
+import org.apache.cassandra.simulator.RandomSource;
+import org.apache.cassandra.simulator.Simulation;
+import org.apache.cassandra.simulator.SimulationRunner;
+import org.apache.cassandra.simulator.asm.InterceptClasses;
+import org.apache.cassandra.simulator.asm.NemesisFieldSelectors;
+import org.apache.cassandra.simulator.systems.Failures;
+import org.apache.cassandra.simulator.systems.InterceptibleThread;
+import org.apache.cassandra.simulator.systems.InterceptingExecutorFactory;
+import org.apache.cassandra.simulator.systems.InterceptingGlobalMethods;
+import org.apache.cassandra.simulator.systems.InterceptorOfGlobalMethods;
+import org.apache.cassandra.simulator.systems.SimulatedExecution;
+import org.apache.cassandra.simulator.systems.SimulatedQuery;
+import org.apache.cassandra.simulator.systems.SimulatedSystems;
+import org.apache.cassandra.simulator.systems.SimulatedTime;
+import org.apache.cassandra.simulator.utils.LongRange;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.STREAM_LIMITED;
+import static org.apache.cassandra.simulator.ActionSchedule.Mode.UNLIMITED;
+import static org.apache.cassandra.simulator.ClusterSimulation.ISOLATE;
+import static org.apache.cassandra.simulator.ClusterSimulation.SHARE;
+import static org.apache.cassandra.simulator.SimulatorUtils.failWithOOM;
+import static org.apache.cassandra.simulator.systems.InterceptedWait.CaptureSites.Capture.NONE;
+import static org.apache.cassandra.simulator.utils.KindOfSequence.UNIFORM;
+import static org.apache.cassandra.utils.Shared.Scope.ANY;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+public class SimulationTestBase
+{
+    static abstract class DTestClusterSimulation implements Simulation
+    {
+        final SimulatedSystems simulated;
+        final RunnableActionScheduler scheduler;
+        final Cluster cluster;
+
+        public DTestClusterSimulation(SimulatedSystems simulated, RunnableActionScheduler scheduler, Cluster cluster)
+        {
+            this.simulated = simulated;
+            this.scheduler = scheduler;
+            this.cluster = cluster;
+        }
+
+        public Action executeQuery(int node, String query, ConsistencyLevel cl, Object... bindings)
+        {
+            return new SimulatedQuery(String.format("Execute query: %s %s %s", query, cl, Arrays.toString(bindings)),
+                                      simulated,
+                                      cluster.get(node),
+                                      query,
+                                      cl,
+                                      null,
+                                      bindings);
+        }
+
+        public Action schemaChange(int node, String query)
+        {
+            return new SimulatedQuery(String.format("Schema change: %s", query),
+                                      simulated,
+                                      cluster.get(node),
+                                      query,
+                                      org.apache.cassandra.distributed.api.ConsistencyLevel.ALL,
+                                      null);
+        }
+
+        protected abstract ActionList initialize();
+
+        protected abstract ActionList execute();
+
+        public CloseableIterator<?> iterator()
+        {
+            return new ActionPlan(ActionList.of(initialize()),
+                                  Collections.singletonList(execute()),
+                                  ActionList.empty())
+                   .iterator(STREAM_LIMITED, -1, () -> 0L, simulated.time, scheduler, simulated.futureScheduler);
+        }
+
+        public void run()
+        {
+            try (CloseableIterator<?> iter = iterator())
+            {
+                while (iter.hasNext())
+                    iter.next();
+            }
+        }
+
+        public void close() throws Exception
+        {
+
+        }
+    }
+
+    public static void simulate(Function<DTestClusterSimulation, ActionList> init,
+                                Function<DTestClusterSimulation, ActionList> test,
+                                Consumer<ClusterSimulation.Builder<DTestClusterSimulation>> configure) throws IOException
+    {
+        SimulationRunner.beforeAll();
+        long seed = System.currentTimeMillis();
+        RandomSource random = new RandomSource.Default();
+        random.reset(seed);
+        class Factory extends ClusterSimulation.Builder<DTestClusterSimulation>
+        {
+            public ClusterSimulation<DTestClusterSimulation> create(long seed) throws IOException
+            {
+                return new ClusterSimulation<>(random, seed, 1, this,
+                                               (c) -> {},
+                                               (simulated, scheduler, cluster, options) -> new DTestClusterSimulation(simulated, scheduler, cluster) {
+
+                                                   protected ActionList initialize()
+                                                   {
+                                                       return init.apply(this);
+                                                   }
+
+                                                   protected ActionList execute()
+                                                   {
+                                                       return test.apply(this);
+                                                   }
+                                               });
+            }
+        }
+
+        Factory factory = new Factory();
+        configure.accept(factory);
+        try (ClusterSimulation<?> cluster = factory.create(seed))
+        {
+            try
+            {
+                cluster.simulation.run();
+            }
+            catch (Throwable t)
+            {
+                throw new AssertionError(String.format("Failed on seed %s", Long.toHexString(seed)),
+                                         t);
+            }
+        }
+    }
+
+    public static void simulate(IIsolatedExecutor.SerializableRunnable run,
+                                IIsolatedExecutor.SerializableRunnable check)
+    {
+        simulate(new IIsolatedExecutor.SerializableRunnable[]{run},
+                 check);
+    }
+
+    public static void simulate(IIsolatedExecutor.SerializableRunnable[] runnables,
+                                IIsolatedExecutor.SerializableRunnable check)
+    {
+        Failures failures = new Failures();
+        RandomSource random = new RandomSource.Default();
+        long seed = System.currentTimeMillis();
+        System.out.println("Using seed: " + seed);
+        random.reset(seed);
+        SimulatedTime time = new SimulatedTime(1, random, 1577836800000L /*Jan 1st UTC*/, new LongRange(1, 100, MILLISECONDS, NANOSECONDS),
+                                               UNIFORM, UNIFORM.period(new LongRange(10L, 60L, SECONDS, NANOSECONDS), random), (i1, i2) -> {});
+        SimulatedExecution execution = new SimulatedExecution();
+
+        Predicate<String> sharedClassPredicate = AbstractCluster.getSharedClassPredicate(ISOLATE, SHARE, ANY, SIMULATION);
+        InstanceClassLoader classLoader = new InstanceClassLoader(1, 1, AbstractCluster.CURRENT_VERSION.classpath,
+                                                                  Thread.currentThread().getContextClassLoader(),
+                                                                  sharedClassPredicate,
+                                                                  new InterceptClasses(() -> 1.0f, () -> 1.0f, NemesisFieldSelectors.get(), ClassLoader.getSystemClassLoader(), sharedClassPredicate.negate())::apply);
+
+        ThreadGroup tg = new ThreadGroup("test");
+        InterceptorOfGlobalMethods interceptorOfGlobalMethods = new InterceptingGlobalMethods(NONE, null, failures, random);
+        InterceptingExecutorFactory factory = execution.factory(interceptorOfGlobalMethods, classLoader, tg);
+
+        time.setup(1, classLoader);
+        IsolatedExecutor.transferAdhoc((IIsolatedExecutor.SerializableConsumer<ExecutorFactory>) ExecutorFactory.Global::unsafeSet, classLoader)
+                        .accept(factory);
+
+        IsolatedExecutor.transferAdhoc((IIsolatedExecutor.SerializableBiConsumer<InterceptorOfGlobalMethods, IntSupplier>) InterceptorOfGlobalMethods.Global::unsafeSet, classLoader)
+                        .accept(interceptorOfGlobalMethods, () -> {
+                            if (InterceptibleThread.isDeterministic())
+                                throw failWithOOM();
+                            return random.uniform(Integer.MIN_VALUE, Integer.MAX_VALUE);
+                        });
+
+        SimulatedSystems simulated = new SimulatedSystems(random, time, null, execution, null, null, null, new FutureActionScheduler()
+        {
+            @Override
+            public Deliver shouldDeliver(int from, int to)
+            {
+                return Deliver.DELIVER;
+            }
+
+            @Override
+            public long messageDeadlineNanos(int from, int to)
+            {
+                return 0;
+            }
+
+            @Override
+            public long messageTimeoutNanos(long expiresAfterNanos, long expirationIntervalNanos)
+            {
+                return 0;
+            }
+
+            @Override
+            public long messageFailureNanos(int from, int to)
+            {
+                return 0;
+            }
+
+            @Override
+            public long schedulerDelayNanos()
+            {
+                return 0;
+            }
+        }, new Debug(), failures);
+
+        RunnableActionScheduler runnableScheduler = new RunnableActionScheduler.RandomUniform(random);
+
+        Action entrypoint = new Action("entrypoint", Action.Modifiers.NONE, Action.Modifiers.NONE)
+        {
+            protected ActionList performSimple()
+            {
+                Action[] actions = new Action[runnables.length];
+                for (int i = 0; i < runnables.length; i++)
+                    actions[i] = toAction(runnables[i], classLoader, factory, simulated);
+
+                return ActionList.of(actions);
+            }
+        };
+
+
+        ActionSchedule testSchedule = new ActionSchedule(simulated.time, simulated.futureScheduler, () -> 0, runnableScheduler, new Work(UNLIMITED, Collections.singletonList(ActionList.of(entrypoint))));
+        Iterators.advance(testSchedule, Integer.MAX_VALUE);
+        ActionSchedule checkSchedule = new ActionSchedule(simulated.time, simulated.futureScheduler, () -> 0, runnableScheduler, new Work(UNLIMITED, Collections.singletonList(ActionList.of(toAction(check, classLoader, factory, simulated)))));
+        Iterators.advance(checkSchedule, Integer.MAX_VALUE);
+    }
+
+    public static Action toAction(IIsolatedExecutor.SerializableRunnable r, ClassLoader classLoader, InterceptingExecutorFactory factory, SimulatedSystems simulated)
+    {
+        Runnable runnable = IsolatedExecutor.transferAdhoc(r, classLoader);
+        return simulated.invoke("action", Action.Modifiers.NONE, Action.Modifiers.NONE,
+                                factory.startParked("begin", runnable));
+    }
+
+    public static <T> T[] arr(T... arr)
+    {
+        return arr;
+    }
+}
\ No newline at end of file
diff --git a/test/simulator/test/org/apache/cassandra/simulator/test/TrivialSimulationTest.java b/test/simulator/test/org/apache/cassandra/simulator/test/TrivialSimulationTest.java
new file mode 100644
index 0000000..cf8fe1b
--- /dev/null
+++ b/test/simulator/test/org/apache/cassandra/simulator/test/TrivialSimulationTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.simulator.test;
+
+import java.io.IOException;
+import java.util.EnumMap;
+import java.util.IdentityHashMap;
+
+import org.junit.Test;
+
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.simulator.ActionList;
+import org.apache.cassandra.simulator.Debug;
+import org.apache.cassandra.simulator.cluster.ClusterActionListener.NoOpListener;
+import org.apache.cassandra.simulator.cluster.ClusterActions;
+import org.apache.cassandra.simulator.cluster.ClusterActions.Options;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
+
+import static org.apache.cassandra.simulator.cluster.ClusterActions.InitialConfiguration.initializeAll;
+import static org.apache.cassandra.simulator.cluster.ClusterActions.Options.noActions;
+
+public class TrivialSimulationTest extends SimulationTestBase
+{
+    @Test
+    public void trivialTest() throws IOException // for demonstration/experiment purposes
+    {
+        simulate((simulation) -> {
+                     Options options = noActions(simulation.cluster.size());
+                     ClusterActions clusterActions = new ClusterActions(simulation.simulated, simulation.cluster,
+                                                                        options, new NoOpListener(), new Debug(new EnumMap<>(Debug.Info.class), new int[0]));
+                     return ActionList.of(clusterActions.initializeCluster(initializeAll(simulation.cluster.size())),
+                                          simulation.schemaChange(1, "CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3}"),
+                                          simulation.schemaChange(1, "CREATE TABLE IF NOT EXISTS ks.tbl (pk int PRIMARY KEY, v int)"));
+                 },
+                 (simulation) -> ActionList.of(simulation.executeQuery(1, "INSERT INTO ks.tbl VALUES (1,1)", ConsistencyLevel.QUORUM),
+                                               simulation.executeQuery(1, "SELECT * FROM ks.tbl WHERE pk = 1", ConsistencyLevel.QUORUM)),
+                 (config) -> config
+                             .threadCount(10)
+                             .nodes(3, 3)
+                             .dcs(1, 1));
+    }
+
+    @Test
+    public void componentTest()
+    {
+        simulate(arr(() -> {
+                     ExecutorPlus executor = ExecutorFactory.Global.executorFactory().pooled("name", 10);
+                     CountDownLatch latch = CountDownLatch.newCountDownLatch(5);
+
+                     for (int i = 0; i < 5; i++)
+                     {
+                         executor.submit(() -> {
+                             latch.decrement();
+                             try
+                             {
+                                 latch.await();
+                             }
+                             catch (InterruptedException e)
+                             {
+                                 throw new RuntimeException(e);
+                             }
+                         });
+                     }
+                 }),
+                 () -> {});
+    }
+
+    @Test
+    public void identityHashMapTest()
+    {
+        simulate(arr(() -> new IdentityHashMap<>().put(1, 1)),
+                 () -> {});
+    }
+}
diff --git a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
index 3611f0e..6503707 100644
--- a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
+++ b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
@@ -20,27 +20,21 @@
 package org.apache.cassandra;
 
 import org.apache.cassandra.io.IVersionedSerializer;
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
+import org.apache.cassandra.io.util.*;
 import org.apache.cassandra.net.MessagingService;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
 public class AbstractSerializationsTester
 {
-    protected static final String CUR_VER = System.getProperty("cassandra.version", "4.0");
+    protected static final String CUR_VER = System.getProperty("cassandra.version", "4.1");
     protected static final Map<String, Integer> VERSION_MAP = new HashMap<String, Integer> ()
     {{
         put("3.0", MessagingService.VERSION_30);
         put("4.0", MessagingService.VERSION_40);
+        put("4.1", MessagingService.VERSION_41);
     }};
 
     protected static final boolean EXECUTE_WRITES = Boolean.getBoolean("cassandra.test-serialization-writes");
@@ -57,16 +51,16 @@
         assert out.getLength() == serializer.serializedSize(obj, getVersion());
     }
 
-    protected static DataInputStreamPlus getInput(String name) throws IOException
+    protected static FileInputStreamPlus getInput(String name) throws IOException
     {
         return getInput(CUR_VER, name);
     }
 
-    protected static DataInputStreamPlus getInput(String version, String name) throws IOException
+    protected static FileInputStreamPlus getInput(String version, String name) throws IOException
     {
         File f = new File("test/data/serialization/" + version + '/' + name);
-        assert f.exists() : f.getPath();
-        return new DataInputPlus.DataInputStreamPlus(new FileInputStream(f));
+        assert f.exists() : f.path();
+        return new FileInputStreamPlus(f);
     }
 
     @SuppressWarnings("resource")
@@ -79,7 +73,7 @@
     protected static DataOutputStreamPlus getOutput(String version, String name) throws IOException
     {
         File f = new File("test/data/serialization/" + version + '/' + name);
-        f.getParentFile().mkdirs();
-        return new BufferedDataOutputStreamPlus(new FileOutputStream(f).getChannel());
+        f.parent().tryCreateDirectories();
+        return new FileOutputStreamPlus(f);
     }
 }
diff --git a/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java b/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java
index a6c5997..2befb5c 100644
--- a/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java
+++ b/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java
@@ -24,8 +24,8 @@
 import java.io.StringWriter;
 import java.text.NumberFormat;
 
-import junit.framework.AssertionFailedError;
-import junit.framework.Test;
+import junit.framework.AssertionFailedError;  // checkstyle: permit this import
+import junit.framework.Test;  // checkstyle: permit this import
 
 import org.apache.tools.ant.BuildException;
 import org.apache.tools.ant.taskdefs.optional.junit.IgnoredTestListener;
@@ -316,4 +316,4 @@
     public void testAssumptionFailure(Test test, Throwable cause) {
         formatSkip(test, cause.getMessage());
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java b/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java
index 8c37be7..5f65227 100644
--- a/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java
+++ b/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java
@@ -104,4 +104,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java b/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java
index 5015be9..de8fb4e 100644
--- a/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java
+++ b/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java
@@ -33,8 +33,8 @@
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
-import junit.framework.AssertionFailedError;
-import junit.framework.Test;
+import junit.framework.AssertionFailedError;  // checkstyle: permit this import
+import junit.framework.Test;  // checkstyle: permit this import
 
 import org.apache.tools.ant.BuildException;
 import org.apache.tools.ant.taskdefs.optional.junit.IgnoredTestListener;
@@ -50,6 +50,7 @@
 import org.w3c.dom.Element;
 import org.w3c.dom.Text;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Prints XML output of the test to a specified Writer.
@@ -240,7 +241,7 @@
      * @param t the test.
      */
     public void startTest(final Test t) {
-        testStarts.put(createDescription(t), System.currentTimeMillis());
+        testStarts.put(createDescription(t), currentTimeMillis());
     }
 
     private static String createDescription(final Test test) throws BuildException {
@@ -284,7 +285,7 @@
 
         final Long l = testStarts.get(createDescription(test));
         currentTest.setAttribute(ATTR_TIME,
-            "" + ((System.currentTimeMillis() - l) / ONE_SECOND));
+            "" + ((currentTimeMillis() - l) / ONE_SECOND));
     }
 
     /**
@@ -388,4 +389,4 @@
         skippedTests.put(createDescription(test), test);
 
     }
-} // XMLJUnitResultFormatter
\ No newline at end of file
+} // XMLJUnitResultFormatter
diff --git a/test/unit/org/apache/cassandra/LogbackStatusListener.java b/test/unit/org/apache/cassandra/LogbackStatusListener.java
index 1f95bd4..719fada 100644
--- a/test/unit/org/apache/cassandra/LogbackStatusListener.java
+++ b/test/unit/org/apache/cassandra/LogbackStatusListener.java
@@ -121,14 +121,6 @@
                         return;
                 }
 
-                //Filter out Windows newline
-                if (size() == 2)
-                {
-                    byte[] bytes = toByteArray();
-                    if (bytes[0] == 0xD && bytes[1] == 0xA)
-                        return;
-                }
-
                 String statement;
                 if (encoding != null)
                     statement = new String(toByteArray(), encoding);
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index 1f46562..3279490 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -18,7 +18,12 @@
 package org.apache.cassandra;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 import org.apache.cassandra.auth.AuthKeyspace;
 import org.apache.cassandra.auth.AuthSchemaChangeListener;
@@ -39,13 +44,14 @@
 import org.apache.cassandra.index.sasi.SASIIndex;
 import org.apache.cassandra.index.sasi.disk.OnDiskIndexBuilder;
 import org.apache.cassandra.schema.*;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
 import org.junit.After;
 import org.junit.BeforeClass;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class SchemaLoader
 {
     @BeforeClass
@@ -78,7 +84,7 @@
         // skip shadow round and endpoint collision check in tests
         System.setProperty("cassandra.allow_unsafe_join", "true");
         if (!Gossiper.instance.isEnabled())
-            Gossiper.instance.start((int) (System.currentTimeMillis() / 1000));
+            Gossiper.instance.start((int) (currentTimeMillis() / 1000));
     }
 
     public static void schemaDefinition(String testName) throws ConfigurationException
@@ -241,7 +247,7 @@
         // if you're messing with low-level sstable stuff, it can be useful to inject the schema directly
         // Schema.instance.load(schemaDefinition());
         for (KeyspaceMetadata ksm : schema)
-            MigrationManager.announceNewKeyspace(ksm, false);
+            SchemaTestUtil.announceNewKeyspace(ksm);
 
         if (Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false")))
             useCompression(schema, compressionParams(CompressionParams.DEFAULT_CHUNK_LENGTH));
@@ -249,7 +255,7 @@
 
     public static void createKeyspace(String name, KeyspaceParams params)
     {
-        MigrationManager.announceNewKeyspace(KeyspaceMetadata.create(name, params, Tables.of()), true);
+        SchemaTestUtil.announceNewKeyspace(KeyspaceMetadata.create(name, params, Tables.of()));
     }
 
     public static void createKeyspace(String name, KeyspaceParams params, TableMetadata.Builder... builders)
@@ -258,17 +264,17 @@
         for (TableMetadata.Builder builder : builders)
             tables.add(builder.build());
 
-        MigrationManager.announceNewKeyspace(KeyspaceMetadata.create(name, params, tables.build()), true);
+        SchemaTestUtil.announceNewKeyspace(KeyspaceMetadata.create(name, params, tables.build()));
     }
 
     public static void createKeyspace(String name, KeyspaceParams params, TableMetadata... tables)
     {
-        MigrationManager.announceNewKeyspace(KeyspaceMetadata.create(name, params, Tables.of(tables)), true);
+        SchemaTestUtil.announceNewKeyspace(KeyspaceMetadata.create(name, params, Tables.of(tables)));
     }
 
     public static void createKeyspace(String name, KeyspaceParams params, Tables tables, Types types)
     {
-        MigrationManager.announceNewKeyspace(KeyspaceMetadata.create(name, params, tables, Views.none(), types, Functions.none()), true);
+        SchemaTestUtil.announceNewKeyspace(KeyspaceMetadata.create(name, params, tables, Views.none(), types, Functions.none()));
     }
 
     public static void setupAuth(IRoleManager roleManager, IAuthenticator authenticator, IAuthorizer authorizer, INetworkAuthorizer networkAuthorizer)
@@ -277,7 +283,7 @@
         DatabaseDescriptor.setAuthenticator(authenticator);
         DatabaseDescriptor.setAuthorizer(authorizer);
         DatabaseDescriptor.setNetworkAuthorizer(networkAuthorizer);
-        MigrationManager.announceNewKeyspace(AuthKeyspace.metadata(), true);
+        SchemaTestUtil.announceNewKeyspace(AuthKeyspace.metadata());
         DatabaseDescriptor.getRoleManager().setup();
         DatabaseDescriptor.getAuthenticator().setup();
         DatabaseDescriptor.getAuthorizer().setup();
@@ -329,7 +335,7 @@
     {
         for (KeyspaceMetadata ksm : schema)
             for (TableMetadata cfm : ksm.tablesAndViews())
-                MigrationManager.announceTableUpdate(cfm.unbuild().compression(compressionParams.copy()).build(), true);
+                SchemaTestUtil.announceTableUpdate(cfm.unbuild().compression(compressionParams.copy()).build());
     }
 
     public static TableMetadata.Builder counterCFMD(String ksName, String cfName)
diff --git a/test/unit/org/apache/cassandra/ServerTestUtils.java b/test/unit/org/apache/cassandra/ServerTestUtils.java
index 221a23a..10cb082 100644
--- a/test/unit/org/apache/cassandra/ServerTestUtils.java
+++ b/test/unit/org/apache/cassandra/ServerTestUtils.java
@@ -17,9 +17,9 @@
  */
 package org.apache.cassandra;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -31,11 +31,12 @@
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.AbstractEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.security.ThreadAwareSecurityManager;
+import org.apache.cassandra.service.EmbeddedCassandraService;
 
 /**
  * Utility methodes used by SchemaLoader and CQLTester to manage the server and its state.
@@ -131,11 +132,10 @@
     }
 
     /**
-     * Cleanup the directories used by the server, creating them if they do not exists.
+     * Cleanup the directories used by the server, creating them if they do not exist.
      */
     public static void cleanupAndLeaveDirs() throws IOException
     {
-        // We need to stop and unmap all CLS instances prior to cleanup() or we'll get failures on Windows.
         CommitLog.instance.stopUnsafe(true);
         mkdirs(); // Creates the directories if they does not exists
         cleanup(); // Ensure that the directories are all empty
@@ -167,13 +167,14 @@
     {
         if (directory.exists())
         {
-            FileUtils.deleteChildrenRecursive(directory);
+            Arrays.stream(directory.tryList()).forEach(File::deleteRecursive);
         }
     }
 
     private static void cleanupDirectory(String dirName)
     {
-        cleanupDirectory(new File(dirName));
+        if (dirName != null)
+            cleanupDirectory(new File(dirName));
     }
 
     /**
@@ -189,6 +190,16 @@
         cleanupDirectory(DatabaseDescriptor.getSavedCachesLocation());
     }
 
+    public static EmbeddedCassandraService startEmbeddedCassandraService() throws IOException
+    {
+        DatabaseDescriptor.daemonInitialization();
+        mkdirs();
+        cleanup();
+        EmbeddedCassandraService service = new EmbeddedCassandraService();
+        service.start();
+        return service;
+    }
+
     private ServerTestUtils()
     {
     }
diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java
index 4b7b6ea..0459cb3 100644
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@ -21,52 +21,86 @@
 
 import java.io.Closeable;
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOError;
+import java.io.IOException;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
 import java.time.Duration;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Iterators;
 import org.apache.commons.lang3.StringUtils;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.AbstractReadCommandBuilder;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.DeletionTime;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.db.Directories.DataDirectory;
+import org.apache.cassandra.db.DisallowedDirectories;
+import org.apache.cassandra.db.IMutation;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.PartitionRangeReadCommand;
+import org.apache.cassandra.db.ReadCommand;
+import org.apache.cassandra.db.ReadExecutionController;
+import org.apache.cassandra.db.compaction.AbstractCompactionTask;
 import org.apache.cassandra.db.compaction.ActiveCompactionsTracker;
+import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.CompactionTasks;
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.locator.ReplicaCollection;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.schema.ColumnMetadata;
-import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.cql3.ColumnIdentifier;
-
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.Directories.DataDirectory;
-import org.apache.cassandra.db.compaction.AbstractCompactionTask;
-import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.db.partitions.*;
-import org.apache.cassandra.db.rows.*;
+import org.apache.cassandra.db.partitions.FilteredPartition;
+import org.apache.cassandra.db.partitions.ImmutableBTreePartition;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.AbstractUnfilteredRowIterator;
+import org.apache.cassandra.db.rows.BTreeRow;
+import org.apache.cassandra.db.rows.BufferCell;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Cells;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.db.rows.Rows;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.view.TableViews;
 import org.apache.cassandra.dht.IPartitioner;
-
 import org.apache.cassandra.dht.RandomPartitioner.BigIntegerToken;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
@@ -74,23 +108,41 @@
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SSTableLoader;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.io.sstable.UUIDBasedSSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaCollection;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.pager.PagingState;
+import org.apache.cassandra.streaming.StreamResultFuture;
+import org.apache.cassandra.streaming.StreamState;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.CassandraVersion;
 import org.apache.cassandra.utils.CounterId;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.FilterFactory;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
 import org.awaitility.Awaitility;
 
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.equalTo;
 
 public class Util
 {
@@ -201,7 +253,7 @@
             rm.applyUnsafe();
 
         ColumnFamilyStore store = Keyspace.open(keyspaceName).getColumnFamilyStore(tableId);
-        store.forceBlockingFlush();
+        Util.flush(store);
         return store;
     }
 
@@ -788,16 +840,18 @@
     {
         LifecycleTransaction.waitForDeletions();
         assertEquals(expectedSSTableCount, cfs.getLiveSSTables().size());
-        Set<Integer> liveGenerations = cfs.getLiveSSTables().stream().map(sstable -> sstable.descriptor.generation).collect(Collectors.toSet());
+        Set<SSTableId> liveIdentifiers = cfs.getLiveSSTables().stream()
+                                            .map(sstable -> sstable.descriptor.id)
+                                            .collect(Collectors.toSet());
         int fileCount = 0;
         for (File f : cfs.getDirectories().getCFDirectories())
         {
-            for (File sst : f.listFiles())
+            for (File sst : f.tryList())
             {
-                if (sst.getName().contains("Data"))
+                if (sst.name().contains("Data"))
                 {
-                    Descriptor d = Descriptor.fromFilename(sst.getAbsolutePath());
-                    assertTrue(liveGenerations.contains(d.generation));
+                    Descriptor d = Descriptor.fromFilename(sst.absolutePath());
+                    assertTrue(liveIdentifiers.contains(d.id));
                     fileCount++;
                 }
             }
@@ -850,4 +904,142 @@
         }
         Gossiper.instance.expireUpgradeFromVersion();
     }
+
+    /**
+     * Sets the length of the file to given size. File will be created if not exist.
+     *
+     * @param file file for which length needs to be set
+     * @param size new szie
+     * @throws IOException on any I/O error.
+     */
+    public static void setFileLength(File file, long size) throws IOException
+    {
+        try (FileChannel fileChannel = file.newReadWriteChannel())
+        {
+            if (file.length() >= size)
+            {
+                fileChannel.truncate(size);
+            }
+            else
+            {
+                fileChannel.position(size - 1);
+                fileChannel.write(ByteBuffer.wrap(new byte[1]));
+            }
+        }
+    }
+
+    public static Supplier<SequenceBasedSSTableId> newSeqGen(int ... existing)
+    {
+        return SequenceBasedSSTableId.Builder.instance.generator(IntStream.of(existing).mapToObj(SequenceBasedSSTableId::new));
+    }
+
+    public static Supplier<UUIDBasedSSTableId> newUUIDGen()
+    {
+        return UUIDBasedSSTableId.Builder.instance.generator(Stream.empty());
+    }
+
+    public static Set<Descriptor> getSSTables(String ks, String tableName)
+    {
+        return Keyspace.open(ks)
+                       .getColumnFamilyStore(tableName)
+                       .getLiveSSTables()
+                       .stream()
+                       .map(sstr -> sstr.descriptor)
+                       .collect(Collectors.toSet());
+    }
+
+    public static Set<Descriptor> getSnapshots(String ks, String tableName, String snapshotTag)
+    {
+        try
+        {
+            return Keyspace.open(ks)
+                           .getColumnFamilyStore(tableName)
+                           .getSnapshotSSTableReaders(snapshotTag)
+                           .stream()
+                           .map(sstr -> sstr.descriptor)
+                           .collect(Collectors.toSet());
+        }
+        catch (IOException e)
+        {
+            throw Throwables.unchecked(e);
+        }
+    }
+
+    public static Set<Descriptor> getBackups(String ks, String tableName)
+    {
+        return Keyspace.open(ks)
+                       .getColumnFamilyStore(tableName)
+                       .getDirectories()
+                       .sstableLister(Directories.OnTxnErr.THROW)
+                       .onlyBackups(true)
+                       .list()
+                       .keySet();
+    }
+
+    public static StreamState bulkLoadSSTables(File dir, String targetKeyspace)
+    {
+        SSTableLoader.Client client = new SSTableLoader.Client()
+        {
+            private String keyspace;
+
+            public void init(String keyspace)
+            {
+                this.keyspace = keyspace;
+                for (Replica replica : StorageService.instance.getLocalReplicas(keyspace))
+                    addRangeForEndpoint(replica.range(), FBUtilities.getBroadcastAddressAndPort());
+            }
+
+            public TableMetadataRef getTableMetadata(String tableName)
+            {
+                return Schema.instance.getTableMetadataRef(keyspace, tableName);
+            }
+        };
+
+        SSTableLoader loader = new SSTableLoader(dir, client, new OutputHandler.LogOutput(), 1, targetKeyspace);
+        StreamResultFuture result = loader.stream();
+        return FBUtilities.waitOnFuture(result);
+    }
+
+    public static File relativizePath(File targetBasePath, File path, int components)
+    {
+        Preconditions.checkArgument(components > 0);
+        Preconditions.checkArgument(path.toPath().getNameCount() >= components);
+        Path relative = path.toPath().subpath(path.toPath().getNameCount() - components, path.toPath().getNameCount());
+        return new File(targetBasePath.toPath().resolve(relative));
+    }
+
+    public static void flush(ColumnFamilyStore cfs)
+    {
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
+    }
+
+    public static void flushTable(Keyspace keyspace, String table)
+    {
+        flush(keyspace.getColumnFamilyStore(table));
+    }
+
+    public static void flushTable(Keyspace keyspace, TableId table)
+    {
+        flush(keyspace.getColumnFamilyStore(table));
+    }
+
+    public static void flushTable(String keyspace, String table)
+    {
+        flushTable(Keyspace.open(keyspace), table);
+    }
+
+    public static void flush(Keyspace keyspace)
+    {
+        FBUtilities.waitOnFutures(keyspace.flush(ColumnFamilyStore.FlushReason.UNIT_TESTS));
+    }
+
+    public static void flushKeyspace(String keyspaceName)
+    {
+        flush(Keyspace.open(keyspaceName));
+    }
+
+    public static void flush(TableViews view)
+    {
+        view.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java b/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java
index 8054f90..62bc767 100644
--- a/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java
+++ b/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java
@@ -29,6 +29,22 @@
 public class AuditLogFilterTest
 {
     @Test
+    public void testInputWithSpaces()
+    {
+        AuditLogOptions auditLogOptions = new AuditLogOptions.Builder()
+                                          .withIncludedKeyspaces(" ks, ks1, ks3, ")
+                                          .withEnabled(true)
+                                          .build();
+
+        AuditLogFilter auditLogFilter = AuditLogFilter.create(auditLogOptions);
+
+        Assert.assertFalse(auditLogFilter.isFiltered(new AuditLogEntry.Builder(AuditLogEntryType.CREATE_TYPE).setKeyspace("ks").build()));
+        Assert.assertFalse(auditLogFilter.isFiltered(new AuditLogEntry.Builder(AuditLogEntryType.CREATE_TYPE).setKeyspace("ks1").build()));
+        Assert.assertFalse(auditLogFilter.isFiltered(new AuditLogEntry.Builder(AuditLogEntryType.CREATE_TYPE).setKeyspace("ks3").build()));
+        Assert.assertTrue(auditLogFilter.isFiltered(new AuditLogEntry.Builder(AuditLogEntryType.CREATE_TYPE).setKeyspace("ks5").build()));
+    }
+
+    @Test
     public void isFiltered_IncludeSetOnly()
     {
         Set<String> includeSet = new HashSet<>();
@@ -186,4 +202,4 @@
         excludeSet.add("b");
         Assert.assertFalse(isFiltered(null, includeSet, excludeSet));
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/audit/AuditLogOptionsTest.java b/test/unit/org/apache/cassandra/audit/AuditLogOptionsTest.java
new file mode 100644
index 0000000..cdb9ff6
--- /dev/null
+++ b/test/unit/org/apache/cassandra/audit/AuditLogOptionsTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.audit;
+
+import java.util.Collections;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.assertj.core.api.Assertions;
+
+public class AuditLogOptionsTest
+{
+    @Test
+    public void testAuditLogOptions()
+    {
+        AuditLogOptions defaultOptions = new AuditLogOptions();
+        defaultOptions.enabled = false;
+        defaultOptions.included_categories = "dcl, ddl";
+        defaultOptions.included_keyspaces = "ks1, ks2";
+
+        AuditLogOptions options = new AuditLogOptions.Builder(defaultOptions).withEnabled(true).build();
+        Assert.assertEquals("DCL,DDL", options.included_categories);
+        Assert.assertEquals("ks1,ks2", options.included_keyspaces);
+        Assert.assertTrue(options.enabled);
+        Assert.assertNotNull(options.audit_logs_dir);
+        Assert.assertEquals(BinAuditLogger.class.getSimpleName(), options.logger.class_name);
+        Assert.assertEquals(Collections.emptyMap(), options.logger.parameters);
+    }
+
+    @Test
+    public void testInvalidCategoryShouldThrow()
+    {
+        Assertions.assertThatExceptionOfType(ConfigurationException.class)
+                  .isThrownBy(() -> new AuditLogOptions.Builder()
+                                    .withIncludedCategories("invalidCategoryName,dcl")
+                                    .build());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java b/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
index 963a473..9a3c605 100644
--- a/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
+++ b/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
@@ -33,11 +33,10 @@
 import com.datastax.driver.core.exceptions.AuthenticationException;
 import com.datastax.driver.core.exceptions.SyntaxError;
 import com.datastax.driver.core.exceptions.UnauthorizedException;
-
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.OverrideConfigurationLoader;
 import org.apache.cassandra.config.ParameterizedClass;
-import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.PasswordObfuscator;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.service.EmbeddedCassandraService;
@@ -61,6 +60,7 @@
     private static final String TEST_USER = "testuser";
     private static final String TEST_ROLE = "testrole";
     private static final String TEST_PW = "testpassword";
+    private static final String TEST_PW_HASH = "$2a$10$1fI9MDCe13ZmEYW4XXZibuASNKyqOY828ELGUtml/t.0Mk/6Kqnsq";
     private static final String CASS_USER = "cassandra";
     private static final String CASS_PW = "cassandra";
 
@@ -74,11 +74,9 @@
             config.audit_logging_options.enabled = true;
             config.audit_logging_options.logger = new ParameterizedClass("InMemoryAuditLogger", null);
         });
-        CQLTester.prepareServer();
 
         System.setProperty("cassandra.superuser_setup_delay_ms", "0");
-        embedded = new EmbeddedCassandraService();
-        embedded.start();
+        embedded = ServerTestUtils.startEmbeddedCassandraService();
 
         executeWithCredentials(
         Arrays.asList(getCreateRoleCql(TEST_USER, true, false, false),
@@ -144,6 +142,21 @@
     }
 
     @Test
+    public void testCqlCreateRoleSyntaxErrorWithHashedPwd()
+    {
+        String createTestRoleCQL = String.format("CREATE ROLE %s WITH LOGIN = %s ANDSUPERUSER = %s AND HASHED PASSWORD",
+                                                 TEST_ROLE, true, false) + TEST_PW_HASH;
+        String createTestRoleCQLExpected = String.format("CREATE ROLE %s WITH LOGIN = %s ANDSUPERUSER = %s AND HASHED PASSWORD ",
+                                                         TEST_ROLE, true, false) + PasswordObfuscator.OBFUSCATION_TOKEN;
+
+        executeWithCredentials(Collections.singletonList(createTestRoleCQL), CASS_USER, CASS_PW, AuditLogEntryType.LOGIN_SUCCESS);
+        assertTrue(getInMemAuditLogger().size() > 0);
+        AuditLogEntry logEntry = getInMemAuditLogger().poll();
+        assertLogEntry(logEntry, AuditLogEntryType.REQUEST_FAILURE, createTestRoleCQLExpected, CASS_USER,  TEST_PW);
+        assertEquals(0, getInMemAuditLogger().size());
+    }
+
+    @Test
     public void testCqlALTERRoleAuditing()
     {
         createTestRole();
@@ -151,7 +164,28 @@
         executeWithCredentials(Arrays.asList(cql), CASS_USER, CASS_PW, AuditLogEntryType.LOGIN_SUCCESS);
         assertTrue(getInMemAuditLogger().size() > 0);
         AuditLogEntry logEntry = getInMemAuditLogger().poll();
-        assertLogEntry(logEntry, AuditLogEntryType.ALTER_ROLE, "ALTER ROLE " + TEST_ROLE + " WITH PASSWORD = '" + PasswordObfuscator.OBFUSCATION_TOKEN + "'", CASS_USER, "foo_bar");
+        assertLogEntry(logEntry,
+                       AuditLogEntryType.ALTER_ROLE,
+                       "ALTER ROLE " + TEST_ROLE + " WITH PASSWORD = '" + PasswordObfuscator.OBFUSCATION_TOKEN + "'",
+                       CASS_USER,
+                       "foo_bar");
+        assertEquals(0, getInMemAuditLogger().size());
+    }
+
+    @Test
+    public void testCqlALTERRoleAuditingWithHashedPwd()
+    {
+        createTestRole();
+        String cql = "ALTER ROLE " + TEST_ROLE + " WITH HASHED PASSWORD = '" + TEST_PW_HASH + "'";
+        executeWithCredentials(Arrays.asList(cql), CASS_USER, CASS_PW, AuditLogEntryType.LOGIN_SUCCESS);
+        assertTrue(getInMemAuditLogger().size() > 0);
+        AuditLogEntry logEntry = getInMemAuditLogger().poll();
+        assertLogEntry(logEntry,
+                       AuditLogEntryType.ALTER_ROLE,
+                       "ALTER ROLE " + TEST_ROLE +
+                       " WITH HASHED PASSWORD = '" + PasswordObfuscator.OBFUSCATION_TOKEN + "'",
+                       CASS_USER,
+                       TEST_PW_HASH);
         assertEquals(0, getInMemAuditLogger().size());
     }
 
@@ -260,6 +294,45 @@
                        TEST_PW);
     }
 
+    @Test
+    public void testCqlUSERCommandsAuditingWithHashedPwd()
+    {
+        //CREATE USER and ALTER USER are supported only for backwards compatibility.
+
+        String user = TEST_ROLE + "userHasedPwd";
+        String cql = "CREATE USER " + user + " WITH HASHED PASSWORD '" + TEST_PW_HASH + "'";
+        executeWithCredentials(Arrays.asList(cql), CASS_USER, CASS_PW, AuditLogEntryType.LOGIN_SUCCESS);
+        assertTrue(getInMemAuditLogger().size() > 0);
+        AuditLogEntry logEntry = getInMemAuditLogger().poll();
+        assertLogEntry(logEntry,
+                       AuditLogEntryType.CREATE_ROLE,
+                       "CREATE USER " + user + " WITH HASHED PASSWORD '" + PasswordObfuscator.OBFUSCATION_TOKEN + "'",
+                       CASS_USER,
+                       TEST_PW_HASH);
+
+        cql = "ALTER USER " + user + " WITH HASHED PASSWORD '" + TEST_PW_HASH + "'";
+        executeWithCredentials(Arrays.asList(cql), CASS_USER, CASS_PW, AuditLogEntryType.LOGIN_SUCCESS);
+        assertTrue(getInMemAuditLogger().size() > 0);
+        logEntry = getInMemAuditLogger().poll();
+        assertLogEntry(logEntry,
+                       AuditLogEntryType.ALTER_ROLE,
+                       "ALTER USER " + user + " WITH HASHED PASSWORD '" + PasswordObfuscator.OBFUSCATION_TOKEN + "'",
+                       CASS_USER,
+                       TEST_PW_HASH);
+
+        cql = "ALTER USER " + user + " WITH HASHED PASSWORD " + TEST_PW_HASH;
+        executeWithCredentials(Arrays.asList(cql), CASS_USER, CASS_PW, AuditLogEntryType.LOGIN_SUCCESS);
+        assertTrue(getInMemAuditLogger().size() > 0);
+        logEntry = getInMemAuditLogger().poll();
+        assertLogEntry(logEntry,
+                       AuditLogEntryType.REQUEST_FAILURE,
+                       "ALTER USER " + user
+                       + " WITH HASHED PASSWORD " + PasswordObfuscator.OBFUSCATION_TOKEN
+                       + "; Syntax Exception. Obscured for security reasons.",
+                       CASS_USER,
+                       TEST_PW_HASH);
+    }
+
     /**
      * Helper methods
      */
@@ -329,8 +402,8 @@
 
     private static void assertSource(AuditLogEntry logEntry, String username)
     {
-        assertEquals(InetAddressAndPort.getLoopbackAddress().address, logEntry.getSource().address);
-        assertTrue(logEntry.getSource().port > 0);
+        assertEquals(InetAddressAndPort.getLoopbackAddress().getAddress(), logEntry.getSource().getAddress());
+        assertTrue(logEntry.getSource().getPort() > 0);
         if (logEntry.getType() != AuditLogEntryType.LOGIN_ERROR)
             assertEquals(username, logEntry.getUser());
     }
@@ -353,4 +426,4 @@
         assertLogEntry(logEntry, AuditLogEntryType.CREATE_ROLE, getCreateRoleCql(TEST_ROLE, true, false, true), CASS_USER, "");
         assertEquals(0, getInMemAuditLogger().size());
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/audit/AuditLoggerTest.java b/test/unit/org/apache/cassandra/audit/AuditLoggerTest.java
index ac0170f..1ee66f7 100644
--- a/test/unit/org/apache/cassandra/audit/AuditLoggerTest.java
+++ b/test/unit/org/apache/cassandra/audit/AuditLoggerTest.java
@@ -21,7 +21,7 @@
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.util.Map;
+import java.util.Collections;
 
 import org.junit.Assert;
 import org.junit.Before;
@@ -717,6 +717,34 @@
         assertEquals(0, AuthEvents.instance.listenerCount());
     }
 
+    @Test
+    public void testJMXArchiveCommand()
+    {
+        disableAuditLogOptions();
+        AuditLogOptions options = new AuditLogOptions();
+
+        try
+        {
+            StorageService.instance.enableAuditLog("BinAuditLogger", Collections.emptyMap(), "", "", "", "",
+                                                   "", "", 10, true, options.roll_cycle,
+                                                   1000L, 1000, "/xyz/not/null");
+            fail("not allowed");
+        }
+        catch (ConfigurationException e)
+        {
+            assertTrue(e.getMessage().contains("Can't enable audit log archiving via nodetool"));
+        }
+
+        options.archive_command = "/xyz/not/null";
+        options.audit_logs_dir = "/tmp/abc";
+        DatabaseDescriptor.setAuditLoggingOptions(options);
+        StorageService.instance.enableAuditLog("BinAuditLogger", Collections.emptyMap(), "", "", "", "",
+                                               "", "", 10, true, options.roll_cycle,
+                                               1000L, 1000, null);
+        assertTrue(AuditLogManager.instance.isEnabled());
+        assertEquals("/xyz/not/null", AuditLogManager.instance.getAuditLogOptions().archive_command);
+    }
+
     /**
      * Helper methods for Audit Log CQL Testing
      */
diff --git a/test/unit/org/apache/cassandra/auth/AuthCacheTest.java b/test/unit/org/apache/cassandra/auth/AuthCacheTest.java
index da97225..0e22346 100644
--- a/test/unit/org/apache/cassandra/auth/AuthCacheTest.java
+++ b/test/unit/org/apache/cassandra/auth/AuthCacheTest.java
@@ -17,16 +17,23 @@
  */
 package org.apache.cassandra.auth;
 
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.BooleanSupplier;
 import java.util.function.Function;
 import java.util.function.IntConsumer;
 import java.util.function.IntSupplier;
+import java.util.function.Supplier;
 
 import org.junit.Test;
 
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.exceptions.UnavailableException;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -39,27 +46,23 @@
     private int validity = 2000;
     private boolean isCacheEnabled = true;
 
+    private final int MAX_ENTRIES = 10;
+
     @Test
     public void testCacheLoaderIsCalledOnFirst()
     {
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
-        int result = authCache.get("10");
-
-        assertEquals(10, result);
+        TestCache authCache = newCache();
+        assertEquals(10, (int)authCache.get("10"));
         assertEquals(1, loadCounter);
     }
 
     @Test
     public void testCacheLoaderIsNotCalledOnSecond()
     {
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        TestCache authCache = newCache();
         authCache.get("10");
         assertEquals(1, loadCounter);
-
-        int result = authCache.get("10");
-
-        assertEquals(10, result);
+        assertEquals(10, (int)authCache.get("10"));
         assertEquals(1, loadCounter);
     }
 
@@ -67,12 +70,9 @@
     public void testCacheLoaderIsAlwaysCalledWhenDisabled()
     {
         isCacheEnabled = false;
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache authCache = newCache();
         authCache.get("10");
-        int result = authCache.get("10");
-
-        assertEquals(10, result);
+        assertEquals(10, (int)authCache.get("10"));
         assertEquals(2, loadCounter);
     }
 
@@ -80,45 +80,42 @@
     public void testCacheLoaderIsAlwaysCalledWhenValidityIsZero()
     {
         setValidity(0);
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache authCache = newCache();
         authCache.get("10");
-        int result = authCache.get("10");
-
-        assertEquals(10, result);
+        assertEquals(10, (int)authCache.get("10"));
         assertEquals(2, loadCounter);
     }
 
     @Test
     public void testCacheLoaderIsCalledAfterFullInvalidate()
     {
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        TestCache authCache = newCache();
         authCache.get("10");
-
-        authCache.invalidate();
-        int result = authCache.get("10");
-
-        assertEquals(10, result);
+        authCache.get("11");
         assertEquals(2, loadCounter);
+        authCache.invalidate();
+        assertEquals(10, (int)authCache.get("10"));
+        assertEquals(11, (int)authCache.get("11"));
+        assertEquals(4, loadCounter);
     }
 
     @Test
     public void testCacheLoaderIsCalledAfterInvalidateKey()
     {
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        TestCache authCache = newCache();
         authCache.get("10");
-
-        authCache.invalidate("10");
-        int result = authCache.get("10");
-
-        assertEquals(10, result);
+        authCache.get("11"); // second key that should not be invalidated
         assertEquals(2, loadCounter);
+        authCache.invalidate("10");
+        assertEquals(10, (int)authCache.get("10"));
+        assertEquals(11, (int)authCache.get("11"));
+        assertEquals(3, loadCounter);
     }
 
     @Test
     public void testCacheLoaderIsCalledAfterReset()
     {
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        TestCache authCache = newCache();
         authCache.get("10");
 
         authCache.cache = null;
@@ -132,7 +129,7 @@
     public void testThatZeroValidityTurnOffCaching()
     {
         setValidity(0);
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        TestCache authCache = newCache();
         authCache.get("10");
         int result = authCache.get("10");
 
@@ -145,8 +142,7 @@
     public void testThatRaisingValidityTurnOnCaching()
     {
         setValidity(0);
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache authCache = newCache();
         authCache.setValidity(2000);
         authCache.cache = authCache.initCache(null);
 
@@ -157,8 +153,7 @@
     public void testDisableCache()
     {
         isCacheEnabled = false;
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache authCache = newCache();
         assertNull(authCache.cache);
     }
 
@@ -166,8 +161,7 @@
     public void testDynamicallyEnableCache()
     {
         isCacheEnabled = false;
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache authCache = newCache();
         isCacheEnabled = true;
         authCache.cache = authCache.initCache(null);
 
@@ -177,8 +171,7 @@
     @Test
     public void testDefaultPolicies()
     {
-        TestCache<String, Integer> authCache = new TestCache<>(this::countingLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache authCache = newCache();
         assertTrue(authCache.cache.policy().expireAfterWrite().isPresent());
         assertTrue(authCache.cache.policy().refreshAfterWrite().isPresent());
         assertTrue(authCache.cache.policy().eviction().isPresent());
@@ -187,8 +180,7 @@
     @Test(expected = UnavailableException.class)
     public void testCassandraExceptionPassThroughWhenCacheEnabled()
     {
-        TestCache<String, Integer> cache = new TestCache<>(s -> { throw UnavailableException.create(ConsistencyLevel.QUORUM, 3, 1); }, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache cache = newCache(s -> { throw UnavailableException.create(ConsistencyLevel.QUORUM, 3, 1); });
         cache.get("expect-exception");
     }
 
@@ -196,8 +188,7 @@
     public void testCassandraExceptionPassThroughWhenCacheDisable()
     {
         isCacheEnabled = false;
-        TestCache<String, Integer> cache = new TestCache<>(s -> { throw UnavailableException.create(ConsistencyLevel.QUORUM, 3, 1); }, this::setValidity, () -> validity, () -> isCacheEnabled);
-
+        TestCache cache = newCache(s -> { throw UnavailableException.create(ConsistencyLevel.QUORUM, 3, 1); });
         cache.get("expect-exception");
     }
 
@@ -205,7 +196,7 @@
     public void testCassandraExceptionPassThroughWhenCacheRefreshed() throws InterruptedException
     {
         setValidity(50);
-        TestCache<String, Integer> cache = new TestCache<>(this::countingLoaderWithException, this::setValidity, () -> validity, () -> isCacheEnabled);
+        TestCache cache = new TestCache(this::countingLoaderWithException, this::emptyBulkLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
         cache.get("10");
 
         // wait until the cached record expires
@@ -224,6 +215,111 @@
         }
     }
 
+    @Test
+    public void warmCacheUsingEntryProvider()
+    {
+        AtomicBoolean provided = new AtomicBoolean(false);
+        Supplier<Map<String, Integer>> bulkLoader = () -> {
+            provided.set(true);
+            return Collections.singletonMap("0", 0);
+        };
+        TestCache cache = newCache(bulkLoader);
+        cache.warm();
+        assertEquals(1, cache.getEstimatedSize());
+        assertEquals(0, (int)cache.get("0")); // warmed entry
+        assertEquals(0, loadCounter);
+        assertEquals(10, (int)cache.get("10")); // cold entry
+        assertEquals(1, loadCounter);
+        assertTrue(provided.get());
+    }
+
+    @Test
+    public void warmCacheIsSafeIfCachingIsDisabled()
+    {
+        isCacheEnabled = false;
+        TestCache cache = newCache(() -> Collections.singletonMap("0", 0));
+        cache.warm();
+        assertEquals(0, cache.getEstimatedSize());
+    }
+
+    @Test
+    public void providerSuppliesMoreEntriesThanCapacity()
+    {
+        Supplier<Map<String, Integer>> bulkLoader = () -> {
+            Map<String, Integer> entries = new HashMap<>();
+            for (int i = 0; i < MAX_ENTRIES * 2; i++)
+                entries.put(Integer.toString(i), i);
+            return entries;
+        };
+        TestCache cache = new TestCache(this::countingLoader,
+                                        bulkLoader,
+                                        this::setValidity,
+                                        () -> validity,
+                                        () -> isCacheEnabled);
+        cache.warm();
+        cache.cleanup(); // Force the cleanup task rather than waiting for it to be scheduled to get accurate count
+        assertEquals(MAX_ENTRIES, cache.getEstimatedSize());
+    }
+
+    @Test
+    public void handleProviderErrorDuringWarming()
+    {
+        System.setProperty(AuthCache.CACHE_LOAD_RETRIES_PROPERTY, "3");
+        System.setProperty(AuthCache.CACHE_LOAD_RETRY_INTERVAL_PROPERTY, "0");
+        final AtomicInteger attempts = new AtomicInteger(0);
+
+        Supplier<Map<String, Integer>> bulkLoader = () -> {
+            if (attempts.incrementAndGet() < 3)
+                throw new RuntimeException("BOOM");
+
+            return Collections.singletonMap("0", 99);
+        };
+
+        TestCache cache = newCache(bulkLoader);
+        cache.warm();
+        assertEquals(1, cache.getEstimatedSize());
+        assertEquals(99, (int)cache.get("0"));
+        // We should have made 3 attempts to get the initial entries
+        assertEquals(3, attempts.get());
+    }
+
+    @Test
+    public void testCacheLoaderIsNotCalledOnGetAllWhenCacheIsDisabled()
+    {
+        isCacheEnabled = false;
+        TestCache authCache = new TestCache(this::countingLoader, this::emptyBulkLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        authCache.get("10");
+        Map<String, Integer> result = authCache.getAll();
+
+        // even though the cache is disabled and nothing is cache we still use loadFunction on get operation, so
+        // its counter has been incremented
+        assertThat(result).isEmpty();
+        assertEquals(1, loadCounter);
+    }
+
+    @Test
+    public void testCacheLoaderIsNotCalledOnGetAllWhenCacheIsEmpty()
+    {
+        TestCache authCache = new TestCache(this::countingLoader, this::emptyBulkLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+
+        Map<String, Integer> result = authCache.getAll();
+
+        assertThat(result).isEmpty();
+        assertEquals(0, loadCounter);
+    }
+
+    @Test
+    public void testCacheLoaderIsNotCalledOnGetAllWhenCacheIsNotEmpty()
+    {
+        TestCache authCache = new TestCache(this::countingLoader, this::emptyBulkLoader, this::setValidity, () -> validity, () -> isCacheEnabled);
+        authCache.get("10");
+        Map<String, Integer> result = authCache.getAll();
+
+        assertThat(result).hasSize(1);
+        assertThat(result).containsEntry("10", 10);
+        assertEquals(1, loadCounter);
+    }
+
     private void setValidity(int validity)
     {
         this.validity = validity;
@@ -245,21 +341,65 @@
         return loadedValue;
     }
 
-    private static class TestCache<K, V> extends AuthCache<K, V>
+    private Map<String, Integer> emptyBulkLoader()
+    {
+        return Collections.emptyMap();
+    }
+
+    private TestCache newCache()
+    {
+        return new TestCache(this::countingLoader,
+                             this::emptyBulkLoader,
+                             this::setValidity,
+                             () -> validity,
+                             () -> isCacheEnabled);
+    }
+
+    private TestCache newCache(Function<String, Integer> loadFunction)
+    {
+        return new TestCache(loadFunction,
+                             this::emptyBulkLoader,
+                             this::setValidity,
+                             () -> validity,
+                             () -> isCacheEnabled);
+    }
+
+    private TestCache newCache(Supplier<Map<String, Integer>> bulkLoader)
+    {
+        return new TestCache(this::countingLoader,
+                             bulkLoader,
+                             this::setValidity,
+                             () -> validity,
+                             () -> isCacheEnabled);
+    }
+
+    private static class TestCache extends AuthCache<String, Integer>
     {
         private static int nameCounter = 0; // Allow us to create many instances of cache with same name prefix
 
-        TestCache(Function<K, V> loadFunction, IntConsumer setValidityDelegate, IntSupplier getValidityDelegate, BooleanSupplier cacheEnabledDelegate)
+        TestCache(Function<String, Integer> loadFunction,
+                  Supplier<Map<String, Integer>> bulkLoadFunction,
+                  IntConsumer setValidityDelegate,
+                  IntSupplier getValidityDelegate,
+                  BooleanSupplier cacheEnabledDelegate)
         {
             super("TestCache" + nameCounter++,
                   setValidityDelegate,
                   getValidityDelegate,
-                  (updateInterval) -> {},
-                  () -> 1000,
-                  (maxEntries) -> {},
-                  () -> 10,
+                  (updateInterval) -> {},               // set update interval
+                  () -> 1000,                           // get update interval
+                  (MAX_ENTRIES) -> {},                   // set max entries
+                  () -> 10,                             // get max entries
+                  (updateActiveUpdate) -> {},           // set active update enabled
+                  () -> false,                          // get active update enabled
                   loadFunction,
+                  bulkLoadFunction,
                   cacheEnabledDelegate);
         }
+
+        void cleanup()
+        {
+            cache.cleanUp();
+        }
     }
 }
diff --git a/test/unit/org/apache/cassandra/auth/AuthPropertiesTest.java b/test/unit/org/apache/cassandra/auth/AuthPropertiesTest.java
new file mode 100644
index 0000000..9a01449
--- /dev/null
+++ b/test/unit/org/apache/cassandra/auth/AuthPropertiesTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+
+public class AuthPropertiesTest
+{
+    @BeforeClass
+    public static void beforeClass()
+    {
+        if (!DatabaseDescriptor.isDaemonInitialized())
+        {
+            DatabaseDescriptor.daemonInitialization();
+        }
+    }
+
+    @Test
+    public void setReadCL_LegitValue()
+    {
+        AuthProperties authProperties = new AuthProperties(ConsistencyLevel.ANY, ConsistencyLevel.THREE, false);
+        authProperties.setReadConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
+        Assert.assertEquals(ConsistencyLevel.LOCAL_QUORUM, AuthProperties.instance.getReadConsistencyLevel());
+    }
+
+    @Test (expected = IllegalArgumentException.class)
+    public void setReadCL_BadValue()
+    {
+        AuthProperties authProperties = new AuthProperties(ConsistencyLevel.ANY, ConsistencyLevel.THREE, false);
+        authProperties.setReadConsistencyLevel(ConsistencyLevel.valueOf("ILLEGAL_CL"));
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/auth/AuthTestUtils.java b/test/unit/org/apache/cassandra/auth/AuthTestUtils.java
new file mode 100644
index 0000000..c78520e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/auth/AuthTestUtils.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import java.util.concurrent.Callable;
+
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.cql3.statements.BatchStatement;
+import org.apache.cassandra.cql3.statements.SelectStatement;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.transport.messages.ResultMessage;
+
+
+public class AuthTestUtils
+{
+
+    public static final RoleResource ROLE_A = RoleResource.role("role_a");
+    public static final RoleResource ROLE_B = RoleResource.role("role_b");
+    public static final RoleResource ROLE_B_1 = RoleResource.role("role_b_1");
+    public static final RoleResource ROLE_B_2 = RoleResource.role("role_b_2");
+    public static final RoleResource ROLE_B_3 = RoleResource.role("role_b_3");
+    public static final RoleResource ROLE_C = RoleResource.role("role_c");
+    public static final RoleResource ROLE_C_1 = RoleResource.role("role_c_1");
+    public static final RoleResource ROLE_C_2 = RoleResource.role("role_c_2");
+    public static final RoleResource ROLE_C_3 = RoleResource.role("role_c_3");
+    public static final RoleResource[] ALL_ROLES  = new RoleResource[] {ROLE_A,
+                                                                        ROLE_B, ROLE_B_1, ROLE_B_2, ROLE_B_3,
+                                                                        ROLE_C, ROLE_C_1, ROLE_C_2, ROLE_C_3};
+    /**
+     * This just extends the internal IRoleManager implementation to ensure that
+     * all access to underlying tables is made via
+     * QueryProcessor.executeOnceInternal/CQLStatement.executeInternal and not
+     * StorageProxy so that it can be used in unit tests.
+     */
+    public static class LocalCassandraRoleManager extends CassandraRoleManager
+    {
+        @Override
+        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
+        {
+            return statement.executeLocally(QueryState.forInternalCalls(), options);
+        }
+
+        @Override
+        UntypedResultSet process(String query, ConsistencyLevel consistencyLevel)
+        {
+            return QueryProcessor.executeInternal(query);
+        }
+
+        @Override
+        protected void scheduleSetupTask(final Callable<Void> setupTask)
+        {
+            // skip data migration or setting up default role for tests
+        }
+    }
+
+    public static class LocalCassandraAuthorizer extends CassandraAuthorizer
+    {
+        @Override
+        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
+        {
+            return statement.executeLocally(QueryState.forInternalCalls(), options);
+        }
+
+        @Override
+        UntypedResultSet process(String query, ConsistencyLevel cl) throws RequestExecutionException
+        {
+            return QueryProcessor.executeInternal(query);
+        }
+
+        @Override
+        void processBatch(BatchStatement statement)
+        {
+            statement.executeLocally(QueryState.forInternalCalls(), QueryOptions.DEFAULT);
+        }
+    }
+
+    public static class LocalCassandraNetworkAuthorizer extends CassandraNetworkAuthorizer
+    {
+        @Override
+        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
+        {
+            return statement.executeLocally(QueryState.forInternalCalls(), options);
+        }
+
+        @Override
+        UntypedResultSet process(String query, ConsistencyLevel cl)
+        {
+            return QueryProcessor.executeInternal(query);
+        }
+    }
+
+    public static class LocalPasswordAuthenticator extends PasswordAuthenticator
+    {
+        @Override
+        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
+        {
+            return statement.executeLocally(QueryState.forInternalCalls(), options);
+        }
+
+        @Override
+        UntypedResultSet process(String query, ConsistencyLevel cl)
+        {
+            return QueryProcessor.executeInternal(query);
+        }
+    }
+
+    public static class NoAuthSetupAuthorizationProxy extends AuthorizationProxy
+    {
+        public NoAuthSetupAuthorizationProxy()
+        {
+            super();
+            this.isAuthSetupComplete = () -> true;
+        }
+    }
+
+    public static void grantRolesTo(IRoleManager roleManager, RoleResource grantee, RoleResource...granted)
+    {
+        for(RoleResource toGrant : granted)
+            roleManager.grantRole(AuthenticatedUser.ANONYMOUS_USER, toGrant, grantee);
+    }
+
+    public static long getNetworkPermissionsReadCount()
+    {
+        ColumnFamilyStore networkPemissionsTable =
+                Keyspace.open(SchemaConstants.AUTH_KEYSPACE_NAME).getColumnFamilyStore(AuthKeyspace.NETWORK_PERMISSIONS);
+        return networkPemissionsTable.metric.readLatency.latency.getCount();
+    }
+
+    public static long getRolePermissionsReadCount()
+    {
+        ColumnFamilyStore rolesPemissionsTable =
+                Keyspace.open(SchemaConstants.AUTH_KEYSPACE_NAME).getColumnFamilyStore(AuthKeyspace.ROLE_PERMISSIONS);
+        return rolesPemissionsTable.metric.readLatency.latency.getCount();
+    }
+
+    public static long getRolesReadCount()
+    {
+        ColumnFamilyStore rolesTable = Keyspace.open(SchemaConstants.AUTH_KEYSPACE_NAME).getColumnFamilyStore(AuthKeyspace.ROLES);
+        return rolesTable.metric.readLatency.latency.getCount();
+    }
+
+    public static RoleOptions getLoginRoleOptions()
+    {
+        RoleOptions roleOptions = new RoleOptions();
+        roleOptions.setOption(IRoleManager.Option.SUPERUSER, false);
+        roleOptions.setOption(IRoleManager.Option.LOGIN, true);
+        roleOptions.setOption(IRoleManager.Option.PASSWORD, "ignored");
+        return roleOptions;
+    }
+}
diff --git a/test/unit/org/apache/cassandra/auth/CacheRefresherTest.java b/test/unit/org/apache/cassandra/auth/CacheRefresherTest.java
new file mode 100644
index 0000000..7600c29
--- /dev/null
+++ b/test/unit/org/apache/cassandra/auth/CacheRefresherTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.BooleanSupplier;
+
+import com.google.common.util.concurrent.MoreExecutors;
+
+import com.github.benmanes.caffeine.cache.CacheLoader;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.LoadingCache;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class CacheRefresherTest
+{
+    @Test
+    public void refresh() throws Exception
+    {
+        Map<String, String> src = new HashMap<>();
+        CacheLoader<String, String> loader = src::get;
+
+        // Supply the directExecutor so the refresh() call executes within the refresher task like AuthCache (rather than async)
+        LoadingCache<String, String> cache = Caffeine.newBuilder()
+                                                     .executor(MoreExecutors.directExecutor())
+                                                     .build(loader);
+
+        AtomicBoolean skipRefresh = new AtomicBoolean(false);
+        BooleanSupplier skipCondition = skipRefresh::get;
+
+        CacheRefresher<String, String> refresher = CacheRefresher.create("test", cache, (k, v) -> v.equals("removed"), skipCondition);
+        src.put("some", "thing");
+        Assert.assertEquals("thing", cache.get("some"));
+
+        // Cache should still have old value...
+        src.put("some", "one");
+        Assert.assertEquals("thing", cache.get("some"));
+
+        // ... but refresher should update it
+        refresher.run();
+        Assert.assertEquals("one", cache.get("some"));
+
+        // If we just remove the value from the src, the cache should still contain it
+        src.remove("some");
+        Assert.assertEquals("one", cache.get("some"));
+
+        // If we insert the special sentinel value into the src, the refresher will invalidate it from the cache.
+        // This time when it's removed from the underlying storage, it's not returned from the cache
+        src.put("some", "removed");
+        refresher.run();
+        src.remove("some");
+
+        // Remove from src
+        Assert.assertNull(cache.get("some"));
+
+        // If the skip condition returns true, don't refresh
+        src.put("some", "one");
+        Assert.assertEquals("one", cache.get("some"));
+        skipRefresh.set(true);
+        src.put("some", "body");
+        refresher.run();
+        Assert.assertEquals("one", cache.get("some"));
+
+        // Change the skip condition back to false and refresh
+        skipRefresh.set(false);
+        refresher.run();
+        Assert.assertEquals("body", cache.get("some"));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTest.java b/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTest.java
index 61235d2..643b2df 100644
--- a/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTest.java
+++ b/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTest.java
@@ -18,80 +18,78 @@
 
 package org.apache.cassandra.auth;
 
-import java.util.Collections;
-
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.CassandraRelevantProperties;
 import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.cql3.statements.CreateRoleStatement;
-import org.apache.cassandra.cql3.statements.ListPermissionsStatement;
-import org.apache.cassandra.exceptions.UnauthorizedException;
-import org.apache.cassandra.service.ClientState;
-import org.apache.cassandra.transport.messages.ResultMessage;
-import org.assertj.core.api.Assertions;
 
-import static org.apache.cassandra.auth.AuthenticatedUser.SYSTEM_USER;
+import static java.lang.String.format;
 
 public class CassandraAuthorizerTest extends CQLTester
 {
-    private static final RoleResource PARENT_ROLE = RoleResource.role("parent");
-    private static final RoleResource CHILD_ROLE = RoleResource.role("child");
-    private static final RoleResource OTHER_ROLE = RoleResource.role("other");
+    private static final String PARENT = "parent";
+    private static final String CHILD = "child";
+    private static final String OTHER = "other";
+    private static final String PASSWORD = "secret";
 
     @BeforeClass
     public static void setupClass()
     {
+        CassandraRelevantProperties.ORG_APACHE_CASSANDRA_DISABLE_MBEAN_REGISTRATION.setBoolean(true);
         CQLTester.setUpClass();
-        SchemaLoader.setupAuth(new RoleTestUtils.LocalCassandraRoleManager(),
-                               new PasswordAuthenticator(),
-                               new RoleTestUtils.LocalCassandraAuthorizer(),
-                               new RoleTestUtils.LocalCassandraNetworkAuthorizer());
+        requireAuthentication();
+        requireNetwork();
     }
 
     @Test
-    public void testListPermissionsOfChildByParent()
+    public void testListPermissionsOfChildByParent() throws Throwable
     {
-        // create parent role by system user
-        DatabaseDescriptor.getRoleManager()
-                          .createRole(SYSTEM_USER, PARENT_ROLE, RoleTestUtils.getLoginRoleOptions());
+        useSuperUser();
+
+        // create parent role by super user
+        executeNet(format("CREATE ROLE %s WITH login=true AND password='%s'", PARENT, PASSWORD));
+        executeNet(format("GRANT CREATE ON ALL ROLES TO %s", PARENT));
+        assertRowsNet(executeNet(format("LIST ALL PERMISSIONS OF %s", PARENT)),
+                      row(PARENT, PARENT, "<all roles>", "CREATE"));
+
+        // create other role by super user
+        executeNet(format("CREATE ROLE %s WITH login=true AND password='%s'", OTHER, PASSWORD));
+        assertRowsNet(executeNet(format("LIST ALL PERMISSIONS OF %s", OTHER)));
+
+        useUser(PARENT, PASSWORD);
 
         // create child role by parent
-        String createRoleQuery = String.format("CREATE ROLE %s", CHILD_ROLE.getRoleName());
-        CreateRoleStatement createRoleStatement = (CreateRoleStatement) QueryProcessor.parseStatement(createRoleQuery)
-                                                                                      .prepare(ClientState.forInternalCalls());
-        createRoleStatement.execute(getClientState(PARENT_ROLE.getRoleName()));
+        executeNet(format("CREATE ROLE %s WITH login = true AND password='%s'", CHILD, PASSWORD));
 
-        // grant SELECT permission on ALL KEYSPACES to child
-        DatabaseDescriptor.getAuthorizer()
-                          .grant(SYSTEM_USER,
-                                 Collections.singleton(Permission.SELECT),
-                                 DataResource.root(),
-                                 CHILD_ROLE);
+        // list permissions by parent
+        assertRowsNet(executeNet(format("LIST ALL PERMISSIONS OF %s", PARENT)),
+                      row(PARENT, PARENT, "<all roles>", "CREATE"),
+                      row(PARENT, PARENT, "<role child>", "ALTER"),
+                      row(PARENT, PARENT, "<role child>", "DROP"),
+                      row(PARENT, PARENT, "<role child>", "AUTHORIZE"),
+                      row(PARENT, PARENT, "<role child>", "DESCRIBE"));
+        assertRowsNet(executeNet(format("LIST ALL PERMISSIONS OF %s", CHILD)));
+        assertInvalidMessageNet(format("You are not authorized to view %s's permissions", OTHER),
+                                format("LIST ALL PERMISSIONS OF %s", OTHER));
 
-        // list child permissions by parent
-        String listPermissionsQuery = String.format("LIST ALL PERMISSIONS OF %s", CHILD_ROLE.getRoleName());
-        ListPermissionsStatement listPermissionsStatement = (ListPermissionsStatement) QueryProcessor.parseStatement(listPermissionsQuery)
-                                                                                                     .prepare(ClientState.forInternalCalls());
-        ResultMessage message = listPermissionsStatement.execute(getClientState(PARENT_ROLE.getRoleName()));
-        assertRows(UntypedResultSet.create(((ResultMessage.Rows) message).result),
-                   row("child", "child", "<all keyspaces>", "SELECT"));
+        useUser(CHILD, PASSWORD);
 
-        // list child permissions by other user that is not their parent
-        DatabaseDescriptor.getRoleManager().createRole(SYSTEM_USER, OTHER_ROLE, RoleTestUtils.getLoginRoleOptions());
-        Assertions.assertThatThrownBy(() -> listPermissionsStatement.execute(getClientState(OTHER_ROLE.getRoleName())))
-                  .isInstanceOf(UnauthorizedException.class)
-                  .hasMessage("You are not authorized to view child's permissions");
-    }
+        // list permissions by child
+        assertInvalidMessageNet(format("You are not authorized to view %s's permissions", PARENT),
+                                format("LIST ALL PERMISSIONS OF %s", PARENT));
+        assertRowsNet(executeNet(format("LIST ALL PERMISSIONS OF %s", CHILD)));
+        assertInvalidMessageNet(format("You are not authorized to view %s's permissions", OTHER),
+                                format("LIST ALL PERMISSIONS OF %s", OTHER));
 
-    private static ClientState getClientState(String username)
-    {
-        ClientState state = ClientState.forInternalCalls();
-        state.login(new AuthenticatedUser(username));
-        return state;
+        // try to create role by child
+        assertInvalidMessageNet(format("User %s does not have sufficient privileges to perform the requested operation", CHILD),
+                                format("CREATE ROLE %s WITH login=true AND password='%s'", "nope", PASSWORD));
+
+        useUser(PARENT, PASSWORD);
+
+        // alter child's role by parent
+        executeNet(format("ALTER ROLE %s WITH login = false", CHILD));
+        executeNet(format("DROP ROLE %s", CHILD));
     }
 }
diff --git a/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTruncatingTest.java b/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTruncatingTest.java
new file mode 100644
index 0000000..3806a23
--- /dev/null
+++ b/test/unit/org/apache/cassandra/auth/CassandraAuthorizerTruncatingTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.utils.Pair;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ALL_ROLES;
+import static org.apache.cassandra.auth.AuthTestUtils.LocalCassandraRoleManager;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B_1;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B_2;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_C;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_C_1;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_C_2;
+import static org.apache.cassandra.auth.AuthTestUtils.grantRolesTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * For Authorizer based tests where we need to fully truncate the roles, members, and permissions between tests
+ */
+public class CassandraAuthorizerTruncatingTest extends CQLTester
+{
+    @BeforeClass
+    public static void setupClass()
+    {
+        CassandraRelevantProperties.ORG_APACHE_CASSANDRA_DISABLE_MBEAN_REGISTRATION.setBoolean(true);
+        DatabaseDescriptor.daemonInitialization();
+        DatabaseDescriptor.setAuthorizer(new StubAuthorizer());
+        requireAuthentication();
+        requireNetwork();
+    }
+
+    @Before
+    public void setup()
+    {
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLES).truncateBlocking();
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_MEMBERS).truncateBlocking();
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS).truncateBlocking();
+    }
+
+    @Test
+    public void testBulkLoadingForAuthCache()
+    {
+        IResource table1 = Resources.fromName("data/ks1/t1");
+        IResource table2 = Resources.fromName("data/ks2/t2");
+
+        // Setup a hierarchy of roles. ROLE_B is granted LOGIN privs, ROLE_B_1 and ROLE_B_2.
+        // ROLE_C is granted LOGIN along with ROLE_C_1 & ROLE_C_2
+        IRoleManager roleManager = new LocalCassandraRoleManager();
+        roleManager.setup();
+
+        for (RoleResource role : ALL_ROLES)
+            roleManager.createRole(AuthenticatedUser.ANONYMOUS_USER, role, new RoleOptions());
+
+        RoleOptions withLogin = new RoleOptions();
+        withLogin.setOption(IRoleManager.Option.LOGIN, Boolean.TRUE);
+        roleManager.alterRole(AuthenticatedUser.ANONYMOUS_USER, ROLE_B, withLogin);
+        roleManager.alterRole(AuthenticatedUser.ANONYMOUS_USER, ROLE_C, withLogin);
+        grantRolesTo(roleManager, ROLE_B, ROLE_B_1, ROLE_B_2);
+        grantRolesTo(roleManager, ROLE_C, ROLE_C_1, ROLE_C_2);
+
+        CassandraAuthorizer authorizer = new CassandraAuthorizer();
+        // Granted on ks1.t1: B1 -> {SELECT, MODIFY}, B2 -> {AUTHORIZE}, so B -> {SELECT, MODIFY, AUTHORIZE}
+        authorizer.grant(AuthenticatedUser.SYSTEM_USER, EnumSet.of(Permission.SELECT, Permission.MODIFY), table1, ROLE_B_1);
+        authorizer.grant(AuthenticatedUser.SYSTEM_USER, EnumSet.of(Permission.AUTHORIZE), table1, ROLE_B_2);
+
+        // Granted on ks2.t2: C1 -> {SELECT, MODIFY}, C2 -> {AUTHORIZE}, so C -> {SELECT, MODIFY, AUTHORIZE}
+        authorizer.grant(AuthenticatedUser.SYSTEM_USER, EnumSet.of(Permission.SELECT, Permission.MODIFY), table2, ROLE_C_1);
+        authorizer.grant(AuthenticatedUser.SYSTEM_USER, EnumSet.of(Permission.AUTHORIZE), table2, ROLE_C_2);
+
+        Map<Pair<AuthenticatedUser, IResource>, Set<Permission>> cacheEntries = authorizer.bulkLoader().get();
+
+        // Only ROLE_B and ROLE_C have LOGIN privs, so only they should be in the cached
+        assertEquals(2, cacheEntries.size());
+        assertEquals(EnumSet.of(Permission.SELECT, Permission.MODIFY, Permission.AUTHORIZE),
+                     cacheEntries.get(Pair.create(new AuthenticatedUser(ROLE_B.getRoleName()), table1)));
+        assertEquals(EnumSet.of(Permission.SELECT, Permission.MODIFY, Permission.AUTHORIZE),
+                     cacheEntries.get(Pair.create(new AuthenticatedUser(ROLE_C.getRoleName()), table2)));
+    }
+
+    @Test
+    public void testBulkLoadingForAuthCachWithEmptyTable()
+    {
+        CassandraAuthorizer authorizer = new CassandraAuthorizer();
+        Map<Pair<AuthenticatedUser, IResource>, Set<Permission>> cacheEntries = authorizer.bulkLoader().get();
+        assertTrue(cacheEntries.isEmpty());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/auth/CassandraNetworkAuthorizerTest.java b/test/unit/org/apache/cassandra/auth/CassandraNetworkAuthorizerTest.java
index 350ee20..3a208f8 100644
--- a/test/unit/org/apache/cassandra/auth/CassandraNetworkAuthorizerTest.java
+++ b/test/unit/org/apache/cassandra/auth/CassandraNetworkAuthorizerTest.java
@@ -43,9 +43,8 @@
 import org.apache.cassandra.service.ClientState;
 
 import static org.apache.cassandra.auth.AuthKeyspace.NETWORK_PERMISSIONS;
-import static org.apache.cassandra.auth.RoleTestUtils.LocalCassandraRoleManager;
+import static org.apache.cassandra.auth.AuthTestUtils.getRolesReadCount;
 import static org.apache.cassandra.schema.SchemaConstants.AUTH_KEYSPACE_NAME;
-import static org.apache.cassandra.auth.RoleTestUtils.getReadCount;
 
 public class CassandraNetworkAuthorizerTest
 {
@@ -63,13 +62,12 @@
     public static void defineSchema() throws ConfigurationException
     {
         SchemaLoader.prepareServer();
-        SchemaLoader.setupAuth(new LocalCassandraRoleManager(),
-                               new PasswordAuthenticator(),
-                               new RoleTestUtils.LocalCassandraAuthorizer(),
-                               new RoleTestUtils.LocalCassandraNetworkAuthorizer());
+        SchemaLoader.setupAuth(new AuthTestUtils.LocalCassandraRoleManager(),
+                               new AuthTestUtils.LocalPasswordAuthenticator(),
+                               new AuthTestUtils.LocalCassandraAuthorizer(),
+                               new AuthTestUtils.LocalCassandraNetworkAuthorizer());
+        AuthCacheService.initializeAndRegisterCaches();
         setupSuperUser();
-        // not strictly necessary to init the cache here, but better to be explicit
-        Roles.initRolesCache(DatabaseDescriptor.getRoleManager(), () -> true);
     }
 
     @Before
@@ -122,7 +120,7 @@
         AuthenticationStatement authStmt = (AuthenticationStatement) statement;
 
         // invalidate roles cache so that any changes to the underlying roles are picked up
-        Roles.clearCache();
+        Roles.cache.invalidate();
         authStmt.execute(getClientState());
     }
 
@@ -193,7 +191,7 @@
         assertDcPermRow(username, "dc1");
 
         // clear the roles cache to lose the (non-)superuser status for the user
-        Roles.clearCache();
+        Roles.cache.invalidate();
         auth("ALTER ROLE %s WITH superuser = true", username);
         Assert.assertEquals(DCPermissions.all(), dcPerms(username));
     }
@@ -207,14 +205,14 @@
     }
 
     @Test
-    public void getLoginPrivilegeFromRolesCache() throws Exception
+    public void getLoginPrivilegeFromRolesCache()
     {
         String username = createName();
         auth("CREATE ROLE %s", username);
-        long readCount = getReadCount();
+        long readCount = getRolesReadCount();
         dcPerms(username);
-        Assert.assertEquals(++readCount, getReadCount());
+        Assert.assertEquals(++readCount, getRolesReadCount());
         dcPerms(username);
-        Assert.assertEquals(readCount, getReadCount());
+        Assert.assertEquals(readCount, getRolesReadCount());
     }
 }
diff --git a/test/unit/org/apache/cassandra/auth/CassandraRoleManagerTest.java b/test/unit/org/apache/cassandra/auth/CassandraRoleManagerTest.java
index 6583c49..7b6b910 100644
--- a/test/unit/org/apache/cassandra/auth/CassandraRoleManagerTest.java
+++ b/test/unit/org/apache/cassandra/auth/CassandraRoleManagerTest.java
@@ -18,23 +18,28 @@
 
 package org.apache.cassandra.auth;
 
+import java.util.Map;
 import java.util.Set;
 
 import com.google.common.collect.Iterables;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
 
-import static org.apache.cassandra.auth.RoleTestUtils.*;
+import static org.apache.cassandra.auth.AuthTestUtils.*;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 public class CassandraRoleManagerTest
 {
-
     @BeforeClass
     public static void setupClass()
     {
@@ -43,6 +48,19 @@
         SchemaLoader.createKeyspace(SchemaConstants.AUTH_KEYSPACE_NAME,
                                     KeyspaceParams.simple(1),
                                     Iterables.toArray(AuthKeyspace.metadata().tables, TableMetadata.class));
+        // We start StorageService because confirmFastRoleSetup confirms that CassandraRoleManager will
+        // take a faster path once the cluster is already setup, which includes checking MessagingService
+        // and issuing queries with QueryProcessor.process, which uses TokenMetadata
+        DatabaseDescriptor.daemonInitialization();
+        StorageService.instance.initServer(0);
+        AuthCacheService.initializeAndRegisterCaches();
+    }
+
+    @Before
+    public void setup() throws Exception
+    {
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLES).truncateBlocking();
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_MEMBERS).truncateBlocking();
     }
 
     @Test
@@ -55,7 +73,7 @@
         // collects all of the necessary info with a single query for each granted role.
         // This just tests that that is the case, i.e. we perform 1 read per role in the
         // transitive set of granted roles
-        IRoleManager roleManager = new LocalCassandraRoleManager();
+        IRoleManager roleManager = new AuthTestUtils.LocalCassandraRoleManager();
         roleManager.setup();
         for (RoleResource r : ALL_ROLES)
             roleManager.createRole(AuthenticatedUser.ANONYMOUS_USER, r, new RoleOptions());
@@ -80,9 +98,73 @@
 
     private void fetchRolesAndCheckReadCount(IRoleManager roleManager, RoleResource primaryRole)
     {
-        long before = getReadCount();
+        long before = getRolesReadCount();
         Set<Role> granted = roleManager.getRoleDetails(primaryRole);
-        long after = getReadCount();
+        long after = getRolesReadCount();
         assertEquals(granted.size(), after - before);
     }
+
+    @Test
+    public void confirmFastRoleSetup()
+    {
+        IRoleManager roleManager = new AuthTestUtils.LocalCassandraRoleManager();
+        roleManager.setup();
+        for (RoleResource r : ALL_ROLES)
+            roleManager.createRole(AuthenticatedUser.ANONYMOUS_USER, r, new RoleOptions());
+
+        CassandraRoleManager crm = new CassandraRoleManager();
+
+        assertTrue("Expected the role manager to have existing roles before CassandraRoleManager setup", CassandraRoleManager.hasExistingRoles());
+    }
+
+    @Test
+    public void warmCacheLoadsAllEntries()
+    {
+        IRoleManager roleManager = new AuthTestUtils.LocalCassandraRoleManager();
+        roleManager.setup();
+        for (RoleResource r : ALL_ROLES)
+            roleManager.createRole(AuthenticatedUser.ANONYMOUS_USER, r, new RoleOptions());
+
+        // Multi level role hierarchy
+        grantRolesTo(roleManager, ROLE_B, ROLE_B_1, ROLE_B_2, ROLE_B_3);
+        grantRolesTo(roleManager, ROLE_C, ROLE_C_1, ROLE_C_2, ROLE_C_3);
+
+        // Use CassandraRoleManager to get entries for pre-warming a cache, then verify those entries
+        CassandraRoleManager crm = new CassandraRoleManager();
+        crm.setup();
+        Map<RoleResource, Set<Role>> cacheEntries = crm.bulkLoader().get();
+
+        Set<Role> roleBRoles = cacheEntries.get(ROLE_B);
+        assertRoleSet(roleBRoles, ROLE_B, ROLE_B_1, ROLE_B_2, ROLE_B_3);
+
+        Set<Role> roleCRoles = cacheEntries.get(ROLE_C);
+        assertRoleSet(roleCRoles, ROLE_C, ROLE_C_1, ROLE_C_2, ROLE_C_3);
+
+        for (RoleResource r : ALL_ROLES)
+        {
+            // We already verified ROLE_B and ROLE_C
+            if (r.equals(ROLE_B) || r.equals(ROLE_C))
+                continue;
+
+            // Check the cache entries for the roles without any further grants
+            assertRoleSet(cacheEntries.get(r), r);
+        }
+    }
+
+    @Test
+    public void warmCacheWithEmptyTable()
+    {
+        CassandraRoleManager crm = new CassandraRoleManager();
+        crm.setup();
+        Map<RoleResource, Set<Role>> cacheEntries = crm.bulkLoader().get();
+        assertTrue(cacheEntries.isEmpty());
+    }
+
+    private void assertRoleSet(Set<Role> actual, RoleResource...expected)
+    {
+        assertEquals(expected.length, actual.size());
+
+        for (RoleResource expectedRole : expected)
+            assertTrue(actual.stream().anyMatch(role -> role.resource.equals(expectedRole)));
+    }
 }
diff --git a/test/unit/org/apache/cassandra/auth/CreateAndAlterRoleTest.java b/test/unit/org/apache/cassandra/auth/CreateAndAlterRoleTest.java
new file mode 100644
index 0000000..33ae129
--- /dev/null
+++ b/test/unit/org/apache/cassandra/auth/CreateAndAlterRoleTest.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.auth;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.exceptions.AuthenticationException;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.junit.Assert.assertTrue;
+import static org.mindrot.jbcrypt.BCrypt.gensalt;
+import static org.mindrot.jbcrypt.BCrypt.hashpw;
+
+public class CreateAndAlterRoleTest extends CQLTester
+{
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+        requireAuthentication();
+        requireNetwork();
+    }
+
+    @Test
+    public void createAlterRoleWithHashedPassword() throws Throwable
+    {
+        String user1 = "hashed_pw_role";
+        String user2 = "pw_role";
+        String plainTextPwd = "super_secret_thing";
+        String plainTextPwd2 = "much_safer_password";
+        String hashedPassword = hashpw(plainTextPwd, gensalt(4));
+        String hashedPassword2 = hashpw(plainTextPwd2, gensalt(4));
+
+        useSuperUser();
+
+        assertInvalidMessage("Invalid hashed password value",
+                             String.format("CREATE ROLE %s WITH login=true AND hashed password='%s'",
+                                           user1, "this_is_an_invalid_hash"));
+        assertInvalidMessage("Options 'password' and 'hashed password' are mutually exclusive",
+                             String.format("CREATE ROLE %s WITH login=true AND password='%s' AND hashed password='%s'",
+                                           user1, plainTextPwd, hashedPassword));
+        executeNet(String.format("CREATE ROLE %s WITH login=true AND hashed password='%s'", user1, hashedPassword));
+        executeNet(String.format("CREATE ROLE %s WITH login=true AND password='%s'", user2, plainTextPwd));
+
+        useUser(user1, plainTextPwd);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+
+        useUser(user2, plainTextPwd);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+
+        useSuperUser();
+
+        assertInvalidMessage("Options 'password' and 'hashed password' are mutually exclusive",
+                             String.format("ALTER ROLE %s WITH password='%s' AND hashed password='%s'",
+                                           user1, plainTextPwd2, hashedPassword2));
+        executeNet(String.format("ALTER ROLE %s WITH password='%s'", user1, plainTextPwd2));
+        executeNet(String.format("ALTER ROLE %s WITH hashed password='%s'", user2, hashedPassword2));
+
+        useUser(user1, plainTextPwd2);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+
+        useUser(user2, plainTextPwd2);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+    }
+
+    @Test
+    public void createAlterUserWithHashedPassword() throws Throwable
+    {
+        String user1 = "hashed_pw_user";
+        String user2 = "pw_user";
+        String plainTextPwd = "super_secret_thing";
+        String plainTextPwd2 = "much_safer_password";
+        String hashedPassword = hashpw(plainTextPwd, gensalt(4));
+        String hashedPassword2 = hashpw(plainTextPwd2, gensalt(4));
+
+        useSuperUser();
+
+        assertInvalidMessage("Invalid hashed password value",
+                             String.format("CREATE USER %s WITH hashed password '%s'",
+                                           user1, "this_is_an_invalid_hash"));
+        executeNet(String.format("CREATE USER %s WITH hashed password '%s'", user1, hashedPassword));
+        executeNet(String.format("CREATE USER %s WITH password '%s'",  user2, plainTextPwd));
+
+        useUser(user1, plainTextPwd);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+
+        useUser(user2, plainTextPwd);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+
+        useSuperUser();
+
+        executeNet(String.format("ALTER USER %s WITH password '%s'", user1, plainTextPwd2));
+        executeNet(String.format("ALTER USER %s WITH hashed password '%s'", user2, hashedPassword2));
+
+        useUser(user1, plainTextPwd2);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+
+        useUser(user2, plainTextPwd2);
+
+        executeNetWithAuthSpin("SELECT key FROM system.local");
+    }
+
+    /**
+     * Altering or creating auth may take some time to be effective
+     *
+     * @param query
+     */
+    void executeNetWithAuthSpin(String query)
+    {
+        Util.spinAssertEquals(true, () -> {
+            try
+            {
+                executeNet(query);
+                return true;
+            }
+            catch (Throwable e)
+            {
+                assertTrue("Unexpected exception: " + e, e instanceof AuthenticationException);
+                reinitializeNetwork();
+                return false;
+            }
+        }, 10);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/auth/FunctionResourceTest.java b/test/unit/org/apache/cassandra/auth/FunctionResourceTest.java
index 8da811d..fa4421b 100644
--- a/test/unit/org/apache/cassandra/auth/FunctionResourceTest.java
+++ b/test/unit/org/apache/cassandra/auth/FunctionResourceTest.java
@@ -111,7 +111,6 @@
         String expected = String.format("%s is not a valid function resource name. It must end with \"[]\"", invalidInput);
         assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> FunctionResource.fromName(invalidInput))
                                                                  .withMessage(expected);
-        ;
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/auth/GrantAndRevokeTest.java b/test/unit/org/apache/cassandra/auth/GrantAndRevokeTest.java
new file mode 100644
index 0000000..5c1c2a0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/auth/GrantAndRevokeTest.java
@@ -0,0 +1,388 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.auth;
+
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.ResultSet;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.junit.Assert.assertTrue;
+
+public class GrantAndRevokeTest extends CQLTester
+{
+    private static final String user = "user";
+    private static final String pass = "12345";
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        DatabaseDescriptor.setPermissionsValidity(0);
+        CQLTester.setUpClass();
+        requireAuthentication();
+        requireNetwork();
+    }
+
+    @After
+    public void tearDown() throws Throwable
+    {
+        useSuperUser();
+        executeNet("DROP ROLE " + user);
+    }
+
+    @Test
+    public void testGrantedKeyspace() throws Throwable
+    {
+        useSuperUser();
+
+        executeNet(String.format("CREATE ROLE %s WITH LOGIN = TRUE AND password='%s'", user, pass));
+        executeNet("GRANT CREATE ON KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
+        String table = KEYSPACE_PER_TEST + '.' + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
+        String index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
+        String type = KEYSPACE_PER_TEST + '.' + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
+        String mv = KEYSPACE_PER_TEST + ".ks_mv_01";
+        executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
+
+        useUser(user, pass);
+
+        // ALTER and DROP tables created by somebody else
+        // Spin assert for effective auth changes.
+        final String spinAssertTable = table;
+        Util.spinAssertEquals(false, () -> {
+            try
+            {
+                assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable + "> or any of its parents",
+                                        formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
+            }
+            catch(Throwable e)
+            {
+                return true;
+            }
+            return false;
+        }, 10);
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1"));
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
+        assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
+        assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "ALTER TYPE " + type + " ADD c bigint");
+        assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "DROP TYPE " + type);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP MATERIALIZED VIEW " + mv);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP INDEX " + index);
+
+
+        useSuperUser();
+
+        executeNet("GRANT ALTER, DROP, SELECT, MODIFY ON KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
+
+        useUser(user, pass);
+
+        // Spin assert for effective auth changes.
+        Util.spinAssertEquals(false, () -> {
+            try
+            {
+                executeNet("ALTER KEYSPACE " + KEYSPACE_PER_TEST + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}");
+            }
+            catch(Throwable e)
+            {
+                return true;
+            }
+            return false;
+        }, 10);
+
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
+        assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
+        assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
+        executeNet("DROP MATERIALIZED VIEW " + mv);
+        executeNet("DROP INDEX " + index);
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
+        executeNet("ALTER TYPE " + type + " ADD c bigint");
+        executeNet("DROP TYPE " + type);
+
+        // calling creatTableName to create a new table name that will be used by the formatQuery
+        table = createTableName();
+        type = KEYSPACE_PER_TEST + "." + createTypeName();
+        mv = KEYSPACE_PER_TEST + ".ks_mv_02";
+        executeNet("CREATE TYPE " + type + " (a int, b text)");
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))"));
+        executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
+        assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
+        assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
+        executeNet("DROP MATERIALIZED VIEW " + mv);
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
+        executeNet("ALTER TYPE " + type + " ADD c bigint");
+        executeNet("DROP TYPE " + type);
+
+        useSuperUser();
+
+        executeNet("REVOKE ALTER, DROP, MODIFY, SELECT ON KEYSPACE " + KEYSPACE_PER_TEST + " FROM " + user);
+
+        table = KEYSPACE_PER_TEST + "." + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
+        type = KEYSPACE_PER_TEST + "." + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
+        index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
+        mv = KEYSPACE_PER_TEST + ".ks_mv_03";
+        executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
+
+        useUser(user, pass);
+
+        // Spin assert for effective auth changes.
+        final String spinAssertTable2 = table;
+        Util.spinAssertEquals(false, () -> {
+            try
+            {
+                assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable2 + "> or any of its parents",
+                                        "INSERT INTO " + spinAssertTable2 + " (pk, ck, val, val_2) VALUES (1, 1, 1, '1')");
+            }
+            catch(Throwable e)
+            {
+                return true;
+            }
+            return false;
+        }, 10);
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                "UPDATE " + table + " SET val = 1 WHERE pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                "DELETE FROM " + table + " WHERE pk = 1 AND ck = 2");
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                "SELECT * FROM " + table + " WHERE pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                "TRUNCATE TABLE " + table);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE " + table + " ADD val_3 int"));
+        assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "DROP TABLE " + table));
+        assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "ALTER TYPE " + type + " ADD c bigint");
+        assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "DROP TYPE " + type);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP MATERIALIZED VIEW " + mv);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP INDEX " + index);
+
+    }
+
+    @Test
+    public void testGrantedAllTables() throws Throwable
+    {
+        useSuperUser();
+
+        executeNet(String.format("CREATE ROLE %s WITH LOGIN = TRUE AND password='%s'", user, pass));
+        executeNet("GRANT CREATE ON ALL TABLES IN KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
+        String table = KEYSPACE_PER_TEST + "." + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
+        String index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
+        String type = KEYSPACE_PER_TEST + "." + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
+        String mv = KEYSPACE_PER_TEST + ".alltables_mv_01";
+        executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
+
+        useUser(user, pass);
+
+        // ALTER and DROP tables created by somebody else
+        // Spin assert for effective auth changes.
+        final String spinAssertTable = table;
+        Util.spinAssertEquals(false, () -> {
+            try
+            {
+                assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable + "> or any of its parents",
+                                        formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
+            }
+            catch(Throwable e)
+            {
+                return true;
+            }
+            return false;
+        }, 10);
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1"));
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
+        assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
+        assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "ALTER TYPE " + type + " ADD c bigint");
+        assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "DROP TYPE " + type);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP MATERIALIZED VIEW " + mv);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP INDEX " + index);
+
+        useSuperUser();
+
+        executeNet("GRANT ALTER, DROP, SELECT, MODIFY ON ALL TABLES IN KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
+
+        useUser(user, pass);
+
+        // Spin assert for effective auth changes.
+        Util.spinAssertEquals(false, () -> {
+            try
+            {
+                assertUnauthorizedQuery("User user has no ALTER permission on <keyspace " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                        "ALTER KEYSPACE " + KEYSPACE_PER_TEST + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}");
+            }
+            catch(Throwable e)
+            {
+                return true;
+            }
+            return false;
+        }, 10);
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
+        assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
+        assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
+        executeNet("DROP MATERIALIZED VIEW " + mv);
+        executeNet("DROP INDEX " + index);
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
+        executeNet("ALTER TYPE " + type + " ADD c bigint");
+        executeNet("DROP TYPE " + type);
+
+        // calling creatTableName to create a new table name that will be used by the formatQuery
+        table = createTableName();
+        type = KEYSPACE_PER_TEST + "." + createTypeName();
+        mv = KEYSPACE_PER_TEST + ".alltables_mv_02";
+        executeNet("CREATE TYPE " + type + " (a int, b text)");
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))"));
+        index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
+        executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
+        assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
+        assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
+        executeNet("DROP MATERIALIZED VIEW " + mv);
+        executeNet("DROP INDEX " + index);
+        executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
+        executeNet("ALTER TYPE " + type + " ADD c bigint");
+        executeNet("DROP TYPE " + type);
+
+        useSuperUser();
+
+        executeNet("REVOKE ALTER, DROP, SELECT, MODIFY ON ALL TABLES IN KEYSPACE " + KEYSPACE_PER_TEST + " FROM " + user);
+
+        table = KEYSPACE_PER_TEST + "." + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
+        index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
+        type = KEYSPACE_PER_TEST + "." + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
+        mv = KEYSPACE_PER_TEST + ".alltables_mv_03";
+        executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
+
+        useUser(user, pass);
+
+        // Spin assert for effective auth changes.
+        final String spinAssertTable2 = table;
+        Util.spinAssertEquals(false, () -> {
+            try
+            {
+                assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable2 + "> or any of its parents",
+                                        "INSERT INTO " + spinAssertTable2 + " (pk, ck, val, val_2) VALUES (1, 1, 1, '1')");
+            }
+            catch(Throwable e)
+            {
+                return true;
+            }
+            return false;
+        }, 10);
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                "UPDATE " + table + " SET val = 1 WHERE pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                "DELETE FROM " + table + " WHERE pk = 1 AND ck = 2");
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                "SELECT * FROM " + table + " WHERE pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents",
+                                "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
+        assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents",
+                                "TRUNCATE TABLE " + table);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE " + table + " ADD val_3 int"));
+        assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents",
+                                formatQuery(KEYSPACE_PER_TEST, "DROP TABLE " + table));
+        assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "ALTER TYPE " + type + " ADD c bigint");
+        assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents",
+                                "DROP TYPE " + type);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP MATERIALIZED VIEW " + mv);
+        assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents",
+                                "DROP INDEX " + index);
+    }
+
+    @Test
+    public void testWarnings() throws Throwable
+    {
+        useSuperUser();
+
+        executeNet("CREATE KEYSPACE revoke_yeah WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}");
+        executeNet("CREATE TABLE revoke_yeah.t1 (id int PRIMARY KEY, val text)");
+        executeNet("CREATE USER '" + user + "' WITH PASSWORD '" + pass + "'");
+
+        ResultSet res = executeNet("REVOKE CREATE ON KEYSPACE revoke_yeah FROM " + user);
+        assertWarningsContain(res.getExecutionInfo().getWarnings(), "Role '" + user + "' was not granted CREATE on <keyspace revoke_yeah>");
+
+        res = executeNet("GRANT SELECT ON KEYSPACE revoke_yeah TO " + user);
+        assertTrue(res.getExecutionInfo().getWarnings().isEmpty());
+
+        res = executeNet("GRANT SELECT ON KEYSPACE revoke_yeah TO " + user);
+        assertWarningsContain(res.getExecutionInfo().getWarnings(), "Role '" + user + "' was already granted SELECT on <keyspace revoke_yeah>");
+
+        res = executeNet("REVOKE SELECT ON TABLE revoke_yeah.t1 FROM " + user);
+        assertWarningsContain(res.getExecutionInfo().getWarnings(), "Role '" + user + "' was not granted SELECT on <table revoke_yeah.t1>");
+
+        res = executeNet("REVOKE SELECT, MODIFY ON KEYSPACE revoke_yeah FROM " + user);
+        assertWarningsContain(res.getExecutionInfo().getWarnings(), "Role '" + user + "' was not granted MODIFY on <keyspace revoke_yeah>");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java b/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java
index 8cd74d8..e1033ff 100644
--- a/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java
+++ b/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java
@@ -19,35 +19,58 @@
 
 
 import java.nio.charset.StandardCharsets;
+import java.util.Map;
 
-import com.google.common.collect.Iterables;
-import org.junit.AfterClass;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.mindrot.jbcrypt.BCrypt;
+
 import com.datastax.driver.core.Authenticator;
 import com.datastax.driver.core.EndPoint;
 import com.datastax.driver.core.PlainTextAuthProvider;
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.exceptions.AuthenticationException;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
 
-import static org.apache.cassandra.auth.CassandraRoleManager.*;
-import static org.apache.cassandra.auth.PasswordAuthenticator.*;
+import static org.apache.cassandra.auth.AuthTestUtils.ALL_ROLES;
+import static org.apache.cassandra.auth.CassandraRoleManager.DEFAULT_SUPERUSER_PASSWORD;
+import static org.apache.cassandra.auth.CassandraRoleManager.getGensaltLogRounds;
+import static org.apache.cassandra.auth.PasswordAuthenticator.SaslNegotiator;
+import static org.apache.cassandra.auth.PasswordAuthenticator.checkpw;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.mindrot.jbcrypt.BCrypt.hashpw;
 import static org.mindrot.jbcrypt.BCrypt.gensalt;
+import static org.mindrot.jbcrypt.BCrypt.hashpw;
+
+import static org.apache.cassandra.auth.CassandraRoleManager.GENSALT_LOG2_ROUNDS_PROPERTY;
 
 public class PasswordAuthenticatorTest extends CQLTester
 {
+    private final static PasswordAuthenticator authenticator = new PasswordAuthenticator();
 
-    private static PasswordAuthenticator authenticator = new PasswordAuthenticator();
+    @BeforeClass
+    public static void setupClass() throws Exception
+    {
+        SchemaLoader.loadSchema();
+        DatabaseDescriptor.daemonInitialization();
+        StorageService.instance.initServer(0);
+    }
+
+    @Before
+    public void setup() throws Exception
+    {
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLES).truncateBlocking();
+        ColumnFamilyStore.getIfExists(SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_MEMBERS).truncateBlocking();
+    }
 
     @Test
     public void testCheckpw()
@@ -112,7 +135,6 @@
         }
     }
 
-
     @Test(expected = AuthenticationException.class)
     public void testEmptyUsername()
     {
@@ -161,18 +183,28 @@
         negotiator.getAuthenticatedUser();
     }
 
-    @BeforeClass
-    public static void setUp()
+    @Test
+    public void warmCacheLoadsAllEntriesFromTables()
     {
-        SchemaLoader.createKeyspace(SchemaConstants.AUTH_KEYSPACE_NAME,
-                                    KeyspaceParams.simple(1),
-                                    Iterables.toArray(AuthKeyspace.metadata().tables, TableMetadata.class));
-        authenticator.setup();
+        IRoleManager roleManager = new AuthTestUtils.LocalCassandraRoleManager();
+        roleManager.setup();
+        for (RoleResource r : ALL_ROLES)
+        {
+            RoleOptions options = new RoleOptions();
+            options.setOption(IRoleManager.Option.PASSWORD, "hash_for_" + r.getRoleName());
+            roleManager.createRole(AuthenticatedUser.ANONYMOUS_USER, r, options);
+        }
+
+        Map<String, String> cacheEntries = authenticator.bulkLoader().get();
+
+        assertEquals(ALL_ROLES.length, cacheEntries.size());
+        cacheEntries.forEach((username, hash) -> assertTrue(BCrypt.checkpw("hash_for_" + username, hash)));
     }
 
-    @AfterClass
-    public static void tearDown()
+    @Test
+    public void warmCacheWithEmptyTable()
     {
-        schemaChange("DROP KEYSPACE " + SchemaConstants.AUTH_KEYSPACE_NAME);
+        Map<String, String> cacheEntries = authenticator.bulkLoader().get();
+        assertTrue(cacheEntries.isEmpty());
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/auth/ResourcesTest.java b/test/unit/org/apache/cassandra/auth/ResourcesTest.java
index bb279d9..ca4d3dc 100644
--- a/test/unit/org/apache/cassandra/auth/ResourcesTest.java
+++ b/test/unit/org/apache/cassandra/auth/ResourcesTest.java
@@ -50,6 +50,9 @@
         assertEquals(DataResource.keyspace("ks1"), Resources.fromName("data/ks1"));
         assertEquals("data/ks1", DataResource.keyspace("ks1").getName());
 
+        assertEquals(DataResource.allTables("ks1"), Resources.fromName("data/ks1/*"));
+        assertEquals("data/ks1/*", DataResource.allTables("ks1").getName());
+
         assertEquals(DataResource.table("ks1", "t1"), Resources.fromName("data/ks1/t1"));
         assertEquals("data/ks1/t1", DataResource.table("ks1", "t1").getName());
     }
diff --git a/test/unit/org/apache/cassandra/auth/RoleOptionsTest.java b/test/unit/org/apache/cassandra/auth/RoleOptionsTest.java
index 6dea2b5..8a224eb 100644
--- a/test/unit/org/apache/cassandra/auth/RoleOptionsTest.java
+++ b/test/unit/org/apache/cassandra/auth/RoleOptionsTest.java
@@ -52,10 +52,23 @@
         assertInvalidOptions(opts, "Invalid value for property 'SUPERUSER'. It must be a boolean");
 
         opts = new RoleOptions();
+        opts.setOption(IRoleManager.Option.HASHED_PASSWORD, 99);
+        assertInvalidOptions(opts, "Invalid value for property 'HASHED_PASSWORD'. It must be a string");
+
+        opts = new RoleOptions();
+        opts.setOption(IRoleManager.Option.HASHED_PASSWORD, "invalid_hash");
+        assertInvalidOptions(opts, "Invalid hashed password value. Please use jBcrypt.");
+
+        opts = new RoleOptions();
         opts.setOption(IRoleManager.Option.OPTIONS, false);
         assertInvalidOptions(opts, "Invalid value for property 'OPTIONS'. It must be a map");
 
         opts = new RoleOptions();
+        opts.setOption(IRoleManager.Option.PASSWORD, "abc");
+        opts.setOption(IRoleManager.Option.HASHED_PASSWORD, "$2a$10$JSJEMFm6GeaW9XxT5JIheuEtPvat6i7uKbnTcxX3c1wshIIsGyUtG");
+        assertInvalidOptions(opts, "Properties 'PASSWORD' and 'HASHED_PASSWORD' are mutually exclusive");
+
+        opts = new RoleOptions();
         opts.setOption(IRoleManager.Option.LOGIN, true);
         opts.setOption(IRoleManager.Option.SUPERUSER, false);
         opts.setOption(IRoleManager.Option.PASSWORD, "test");
@@ -111,7 +124,7 @@
         }
         catch (InvalidRequestException e)
         {
-            assertTrue(e.getMessage().equals(message));
+            assertEquals(message, e.getMessage());
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/auth/RoleTestUtils.java b/test/unit/org/apache/cassandra/auth/RoleTestUtils.java
deleted file mode 100644
index bcbdf83..0000000
--- a/test/unit/org/apache/cassandra/auth/RoleTestUtils.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.auth;
-
-import java.util.concurrent.Callable;
-
-import org.apache.cassandra.cql3.QueryOptions;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.cql3.statements.BatchStatement;
-import org.apache.cassandra.cql3.statements.SelectStatement;
-import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.exceptions.RequestExecutionException;
-import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.service.QueryState;
-import org.apache.cassandra.transport.messages.ResultMessage;
-
-
-public class RoleTestUtils
-{
-
-    public static final RoleResource ROLE_A = RoleResource.role("role_a");
-    public static final RoleResource ROLE_B = RoleResource.role("role_b");
-    public static final RoleResource ROLE_B_1 = RoleResource.role("role_b_1");
-    public static final RoleResource ROLE_B_2 = RoleResource.role("role_b_2");
-    public static final RoleResource ROLE_B_3 = RoleResource.role("role_b_3");
-    public static final RoleResource ROLE_C = RoleResource.role("role_c");
-    public static final RoleResource ROLE_C_1 = RoleResource.role("role_c_1");
-    public static final RoleResource ROLE_C_2 = RoleResource.role("role_c_2");
-    public static final RoleResource ROLE_C_3 = RoleResource.role("role_c_3");
-    public static final RoleResource[] ALL_ROLES  = new RoleResource[] {ROLE_A,
-                                                                        ROLE_B, ROLE_B_1, ROLE_B_2, ROLE_B_3,
-                                                                        ROLE_C, ROLE_C_1, ROLE_C_2, ROLE_C_3};
-    /**
-     * This just extends the internal IRoleManager implementation to ensure that
-     * all access to underlying tables is made via
-     * QueryProcessor.executeOnceInternal/CQLStatement.executeInternal and not
-     * StorageProxy so that it can be used in unit tests.
-     */
-    public static class LocalCassandraRoleManager extends CassandraRoleManager
-    {
-        @Override
-        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
-        {
-            return statement.executeLocally(QueryState.forInternalCalls(), options);
-        }
-
-        @Override
-        UntypedResultSet process(String query, ConsistencyLevel consistencyLevel)
-        {
-            return QueryProcessor.executeInternal(query);
-        }
-
-        @Override
-        protected void scheduleSetupTask(final Callable<Void> setupTask)
-        {
-            // skip data migration or setting up default role for tests
-        }
-    }
-
-    public static class LocalCassandraAuthorizer extends CassandraAuthorizer
-    {
-        @Override
-        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
-        {
-            return statement.executeLocally(QueryState.forInternalCalls(), options);
-        }
-
-        @Override
-        UntypedResultSet process(String query) throws RequestExecutionException
-        {
-            return QueryProcessor.executeInternal(query);
-        }
-
-        @Override
-        void processBatch(BatchStatement statement)
-        {
-            statement.executeLocally(QueryState.forInternalCalls(), QueryOptions.DEFAULT);
-        }
-    }
-
-    public static class LocalCassandraNetworkAuthorizer extends CassandraNetworkAuthorizer
-    {
-        @Override
-        ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
-        {
-            return statement.executeLocally(QueryState.forInternalCalls(), options);
-        }
-
-        @Override
-        void process(String query)
-        {
-            QueryProcessor.executeInternal(query);
-        }
-    }
-
-    public static void grantRolesTo(IRoleManager roleManager, RoleResource grantee, RoleResource...granted)
-    {
-        for(RoleResource toGrant : granted)
-            roleManager.grantRole(AuthenticatedUser.ANONYMOUS_USER, toGrant, grantee);
-    }
-
-    public static long getReadCount()
-    {
-        ColumnFamilyStore rolesTable = Keyspace.open(SchemaConstants.AUTH_KEYSPACE_NAME).getColumnFamilyStore(AuthKeyspace.ROLES);
-        return rolesTable.metric.readLatency.latency.getCount();
-    }
-
-    public static RoleOptions getLoginRoleOptions()
-    {
-        RoleOptions roleOptions = new RoleOptions();
-        roleOptions.setOption(IRoleManager.Option.SUPERUSER, false);
-        roleOptions.setOption(IRoleManager.Option.LOGIN, true);
-        roleOptions.setOption(IRoleManager.Option.PASSWORD, "ignored");
-        return roleOptions;
-    }
-}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/auth/RolesTest.java b/test/unit/org/apache/cassandra/auth/RolesTest.java
index 94322a7..5eced33 100644
--- a/test/unit/org/apache/cassandra/auth/RolesTest.java
+++ b/test/unit/org/apache/cassandra/auth/RolesTest.java
@@ -21,15 +21,20 @@
 import java.util.Set;
 
 import com.google.common.collect.Iterables;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
 
-import static org.apache.cassandra.auth.RoleTestUtils.*;
+import static org.apache.cassandra.auth.AuthTestUtils.ALL_ROLES;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_C;
+import static org.apache.cassandra.auth.AuthTestUtils.getRolesReadCount;
+import static org.apache.cassandra.auth.AuthTestUtils.grantRolesTo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -40,56 +45,75 @@
     public static void setupClass()
     {
         SchemaLoader.prepareServer();
-        // create the system_auth keyspace so the IRoleManager can function as normal
-        SchemaLoader.createKeyspace(SchemaConstants.AUTH_KEYSPACE_NAME,
-                                    KeyspaceParams.simple(1),
-                                    Iterables.toArray(AuthKeyspace.metadata().tables, TableMetadata.class));
+        IRoleManager roleManager = new AuthTestUtils.LocalCassandraRoleManager();
+        SchemaLoader.setupAuth(roleManager,
+                               new AuthTestUtils.LocalPasswordAuthenticator(),
+                               new AuthTestUtils.LocalCassandraAuthorizer(),
+                               new AuthTestUtils.LocalCassandraNetworkAuthorizer());
 
-        IRoleManager roleManager = new LocalCassandraRoleManager();
-        roleManager.setup();
-        Roles.initRolesCache(roleManager, () -> true);
         for (RoleResource role : ALL_ROLES)
             roleManager.createRole(AuthenticatedUser.ANONYMOUS_USER, role, new RoleOptions());
         grantRolesTo(roleManager, ROLE_A, ROLE_B, ROLE_C);
+
+        roleManager.setup();
+        AuthCacheService.initializeAndRegisterCaches();
     }
 
     @Test
     public void superuserStatusIsCached()
     {
         boolean hasSuper = Roles.hasSuperuserStatus(ROLE_A);
-        long count = getReadCount();
+        long count = getRolesReadCount();
 
         assertEquals(hasSuper, Roles.hasSuperuserStatus(ROLE_A));
-        assertEquals(count, getReadCount());
+        assertEquals(count, getRolesReadCount());
     }
 
     @Test
     public void loginPrivilegeIsCached()
     {
         boolean canLogin = Roles.canLogin(ROLE_A);
-        long count = getReadCount();
+        long count = getRolesReadCount();
 
         assertEquals(canLogin, Roles.canLogin(ROLE_A));
-        assertEquals(count, getReadCount());
+        assertEquals(count, getRolesReadCount());
     }
 
     @Test
     public void grantedRoleDetailsAreCached()
     {
         Iterable<Role> granted = Roles.getRoleDetails(ROLE_A);
-        long count = getReadCount();
+        long count = getRolesReadCount();
 
         assertTrue(Iterables.elementsEqual(granted, Roles.getRoleDetails(ROLE_A)));
-        assertEquals(count, getReadCount());
+        assertEquals(count, getRolesReadCount());
     }
 
     @Test
     public void grantedRoleResourcesAreCached()
     {
         Set<RoleResource> granted = Roles.getRoles(ROLE_A);
-        long count = getReadCount();
+        long count = getRolesReadCount();
 
         assertEquals(granted, Roles.getRoles(ROLE_A));
-        assertEquals(count, getReadCount());
+        assertEquals(count, getRolesReadCount());
+    }
+
+    @Test
+    public void confirmSuperUserConsistency()
+    {
+        // Confirm special treatment of superuser
+        ConsistencyLevel readLevel = CassandraRoleManager.consistencyForRoleRead(CassandraRoleManager.DEFAULT_SUPERUSER_NAME);
+        Assert.assertEquals(CassandraRoleManager.DEFAULT_SUPERUSER_CONSISTENCY_LEVEL, readLevel);
+
+        ConsistencyLevel writeLevel = CassandraRoleManager.consistencyForRoleWrite(CassandraRoleManager.DEFAULT_SUPERUSER_NAME);
+        Assert.assertEquals(CassandraRoleManager.DEFAULT_SUPERUSER_CONSISTENCY_LEVEL, writeLevel);
+
+        // Confirm standard config-based treatment of non
+        ConsistencyLevel nonPrivReadLevel = CassandraRoleManager.consistencyForRoleRead("non-privilaged");
+        Assert.assertEquals(nonPrivReadLevel, DatabaseDescriptor.getAuthReadConsistencyLevel());
+
+        ConsistencyLevel nonPrivWriteLevel = CassandraRoleManager.consistencyForRoleWrite("non-privilaged");
+        Assert.assertEquals(nonPrivWriteLevel, DatabaseDescriptor.getAuthWriteConsistencyLevel());
     }
 }
diff --git a/test/unit/org/apache/cassandra/auth/StubAuthorizer.java b/test/unit/org/apache/cassandra/auth/StubAuthorizer.java
index 8e0d141..e9f7d22 100644
--- a/test/unit/org/apache/cassandra/auth/StubAuthorizer.java
+++ b/test/unit/org/apache/cassandra/auth/StubAuthorizer.java
@@ -21,6 +21,8 @@
 import java.util.*;
 import java.util.stream.Collectors;
 
+import com.google.common.collect.Sets;
+
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.exceptions.RequestValidationException;
@@ -42,34 +44,36 @@
         return perms != null ? perms : Collections.emptySet();
     }
 
-    public void grant(AuthenticatedUser performer,
-                      Set<Permission> permissions,
-                      IResource resource,
-                      RoleResource grantee) throws RequestValidationException, RequestExecutionException
+    public Set<Permission> grant(AuthenticatedUser performer,
+                                 Set<Permission> permissions,
+                                 IResource resource,
+                                 RoleResource grantee) throws RequestValidationException, RequestExecutionException
     {
         Pair<String, IResource> key = Pair.create(grantee.getRoleName(), resource);
-        Set<Permission> perms = userPermissions.get(key);
-        if (null == perms)
-        {
-            perms = new HashSet<>();
-            userPermissions.put(key, perms);
-        }
-        perms.addAll(permissions);
+        Set<Permission> oldPermissions = userPermissions.computeIfAbsent(key, k -> Collections.emptySet());
+        Set<Permission> nonExisting = Sets.difference(permissions, oldPermissions);
+
+        if (!nonExisting.isEmpty())
+            userPermissions.put(key, Sets.union(oldPermissions, nonExisting));
+
+        return nonExisting;
     }
 
-    public void revoke(AuthenticatedUser performer,
-                       Set<Permission> permissions,
-                       IResource resource,
-                       RoleResource revokee) throws RequestValidationException, RequestExecutionException
+    public Set<Permission> revoke(AuthenticatedUser performer,
+                                  Set<Permission> permissions,
+                                  IResource resource,
+                                  RoleResource revokee) throws RequestValidationException, RequestExecutionException
     {
         Pair<String, IResource> key = Pair.create(revokee.getRoleName(), resource);
-        Set<Permission> perms = userPermissions.get(key);
-        if (null != perms)
-        {
-            perms.removeAll(permissions);
-            if (perms.isEmpty())
-                userPermissions.remove(key);
-        }
+        Set<Permission> oldPermissions = userPermissions.computeIfAbsent(key, k -> Collections.emptySet());
+        Set<Permission> existing = Sets.intersection(permissions, oldPermissions);
+
+        if (existing.isEmpty())
+            userPermissions.remove(key);
+        else
+            userPermissions.put(key, Sets.difference(oldPermissions, existing));
+
+        return existing;
     }
 
     public Set<PermissionDetails> list(AuthenticatedUser performer,
diff --git a/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java b/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
index b86b0d3..1fb3735 100644
--- a/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
+++ b/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
@@ -51,10 +51,13 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.TimeUUID.Generator.atUnixMillis;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.*;
 
 public class BatchlogManagerTest
@@ -95,7 +98,7 @@
         TokenMetadata metadata = StorageService.instance.getTokenMetadata();
         InetAddressAndPort localhost = InetAddressAndPort.getByName("127.0.0.1");
         metadata.updateNormalToken(Util.token("A"), localhost);
-        metadata.updateHostId(UUIDGen.getTimeUUID(), localhost);
+        metadata.updateHostId(UUID.randomUUID(), localhost);
         Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).truncateBlocking();
     }
 
@@ -146,14 +149,14 @@
             }
 
             long timestamp = i < 50
-                           ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout())
-                           : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
+                           ? (currentTimeMillis() - BatchlogManager.getBatchlogTimeout())
+                           : (currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
 
-            BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
+            BatchlogManager.store(Batch.createLocal(atUnixMillis(timestamp, i), timestamp * 1000, mutations));
         }
 
         // Flush the batchlog to disk (see CASSANDRA-6822).
-        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
+        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         assertEquals(100, BatchlogManager.instance.countAllBatches() - initialAllBatches);
         assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
@@ -220,7 +223,7 @@
             List<Mutation> mutations = Lists.newArrayList(mutation1, mutation2);
 
             // Make sure it's ready to be replayed, so adjust the timestamp.
-            long timestamp = System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout();
+            long timestamp = currentTimeMillis() - BatchlogManager.getBatchlogTimeout();
 
             if (i == 500)
                 SystemKeyspace.saveTruncationRecord(Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD2),
@@ -233,11 +236,11 @@
             else
                 timestamp--;
 
-            BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), FBUtilities.timestampMicros(), mutations));
+            BatchlogManager.store(Batch.createLocal(atUnixMillis(timestamp, i), FBUtilities.timestampMicros(), mutations));
         }
 
         // Flush the batchlog to disk (see CASSANDRA-6822).
-        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
+        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         // Force batchlog replay and wait for it to complete.
         BatchlogManager.instance.startBatchlogReplay().get();
@@ -275,8 +278,8 @@
         long initialAllBatches = BatchlogManager.instance.countAllBatches();
         TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD5).metadata();
 
-        long timestamp = (System.currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2) * 1000;
-        UUID uuid = UUIDGen.getTimeUUID();
+        long timestamp = (currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2) * 1000;
+        TimeUUID uuid = nextTimeUUID();
 
         // Add a batch with 10 mutations
         List<Mutation> mutations = new ArrayList<>(10);
@@ -307,8 +310,8 @@
         long initialAllBatches = BatchlogManager.instance.countAllBatches();
         TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD5).metadata();
 
-        long timestamp = (System.currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2) * 1000;
-        UUID uuid = UUIDGen.getTimeUUID();
+        long timestamp = (currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2) * 1000;
+        TimeUUID uuid = nextTimeUUID();
 
         // Add a batch with 10 mutations
         List<Mutation> mutations = new ArrayList<>(10);
@@ -349,8 +352,8 @@
 
         TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
 
-        long timestamp = (System.currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2) * 1000;
-        UUID uuid = UUIDGen.getTimeUUID();
+        long timestamp = (currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout(MILLISECONDS) * 2) * 1000;
+        TimeUUID uuid = nextTimeUUID();
 
         // Add a batch with 10 mutations
         List<Mutation> mutations = new ArrayList<>(10);
@@ -365,7 +368,7 @@
         assertEquals(1, BatchlogManager.instance.countAllBatches() - initialAllBatches);
 
         // Flush the batchlog to disk (see CASSANDRA-6822).
-        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
+        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         assertEquals(1, BatchlogManager.instance.countAllBatches() - initialAllBatches);
         assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
diff --git a/test/unit/org/apache/cassandra/batchlog/BatchlogTest.java b/test/unit/org/apache/cassandra/batchlog/BatchlogTest.java
index 8fa4afc..194ae91 100644
--- a/test/unit/org/apache/cassandra/batchlog/BatchlogTest.java
+++ b/test/unit/org/apache/cassandra/batchlog/BatchlogTest.java
@@ -22,7 +22,6 @@
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-import java.util.UUID;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -40,9 +39,10 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 
 public class BatchlogTest
@@ -66,7 +66,7 @@
 
         long now = FBUtilities.timestampMicros();
         int version = MessagingService.current_version;
-        UUID uuid = UUIDGen.getTimeUUID();
+        TimeUUID uuid = nextTimeUUID();
 
         List<Mutation> mutations = new ArrayList<>(10);
         for (int i = 0; i < 10; i++)
diff --git a/test/unit/org/apache/cassandra/cache/AutoSavingCacheTest.java b/test/unit/org/apache/cassandra/cache/AutoSavingCacheTest.java
index bb5129a..040409e 100644
--- a/test/unit/org/apache/cassandra/cache/AutoSavingCacheTest.java
+++ b/test/unit/org/apache/cassandra/cache/AutoSavingCacheTest.java
@@ -34,6 +34,8 @@
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class AutoSavingCacheTest
 {
     private static final String KEYSPACE1 = "AutoSavingCacheTest1";
@@ -71,10 +73,10 @@
         for (int i = 0; i < 2; i++)
         {
             ColumnMetadata colDef = ColumnMetadata.regularColumn(cfs.metadata(), ByteBufferUtil.bytes("col1"), AsciiType.instance);
-            RowUpdateBuilder rowBuilder = new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), "key1");
+            RowUpdateBuilder rowBuilder = new RowUpdateBuilder(cfs.metadata(), currentTimeMillis(), "key1");
             rowBuilder.add(colDef, "val1");
             rowBuilder.build().apply();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         Assert.assertEquals(2, cfs.getLiveSSTables().size());
diff --git a/test/unit/org/apache/cassandra/cache/CacheProviderTest.java b/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
index 7ed8a60..8ec536b 100644
--- a/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
+++ b/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
@@ -45,6 +45,7 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -80,7 +81,7 @@
 
     private CachedBTreePartition createPartition()
     {
-        PartitionUpdate update = new RowUpdateBuilder(cfm, System.currentTimeMillis(), "key1")
+        PartitionUpdate update = new RowUpdateBuilder(cfm, currentTimeMillis(), "key1")
                                  .add("col1", "val1")
                                  .buildUpdate();
 
@@ -113,12 +114,12 @@
 
     private void concurrentCase(final CachedBTreePartition partition, final ICache<MeasureableString, IRowCacheEntry> cache) throws InterruptedException
     {
-        final long startTime = System.currentTimeMillis() + 500;
+        final long startTime = currentTimeMillis() + 500;
         Runnable runnable = new Runnable()
         {
             public void run()
             {
-                while (System.currentTimeMillis() < startTime) {}
+                while (currentTimeMillis() < startTime) {}
                 for (int j = 0; j < 1000; j++)
                 {
                     cache.put(key1, partition);
@@ -133,7 +134,7 @@
         List<Thread> threads = new ArrayList<>(100);
         for (int i = 0; i < 100; i++)
         {
-            Thread thread = NamedThreadFactory.createThread(runnable);
+            Thread thread = NamedThreadFactory.createAnonymousThread(runnable);
             threads.add(thread);
             thread.start();
         }
diff --git a/test/unit/org/apache/cassandra/concurrent/AbstractExecutorPlusTest.java b/test/unit/org/apache/cassandra/concurrent/AbstractExecutorPlusTest.java
new file mode 100644
index 0000000..52650ad
--- /dev/null
+++ b/test/unit/org/apache/cassandra/concurrent/AbstractExecutorPlusTest.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
+
+import org.junit.Assert;
+import org.junit.Ignore;
+
+import org.apache.cassandra.utils.Closeable;
+import org.apache.cassandra.utils.WithResources;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.Semaphore;
+
+import static org.apache.cassandra.utils.concurrent.Semaphore.newSemaphore;
+
+@Ignore
+public abstract class AbstractExecutorPlusTest
+{
+    interface Verify<V>
+    {
+        void test(V test) throws Throwable;
+    }
+
+    static <V> Verify<V> ignoreNull(Verify<V> verify)
+    {
+        return test -> { if (test != null) verify.test(test); };
+    }
+
+    public <E extends ExecutorPlus> void testPooled(Supplier<ExecutorBuilder<? extends E>> builders) throws Throwable
+    {
+        testSuccess(builders);
+        testFailure(builders);
+    }
+
+    public <E extends SequentialExecutorPlus> void testSequential(Supplier<ExecutorBuilder<? extends E>> builders) throws Throwable
+    {
+        testSuccess(builders);
+        testFailure(builders);
+        testAtLeastOnce(builders);
+    }
+
+    Runnable wrapSubmit(Runnable submit)
+    {
+        return submit;
+    }
+
+    public <E extends ExecutorPlus> void testSuccess(Supplier<ExecutorBuilder<? extends E>> builders) throws Throwable
+    {
+        testExecution(builders.get().build(), wrapSubmit(() -> {}), ignoreNull(Future::get));
+        testExecution(builders.get().build(), WithResources.none(), wrapSubmit(() -> {}), ignoreNull(Future::get));
+    }
+
+    public <E extends ExecutorPlus> void testFailure(Supplier<ExecutorBuilder<? extends E>> builders) throws Throwable
+    {
+        ExecutorBuilder<? extends E> builder = builders.get();
+        AtomicReference<Throwable> failure = new AtomicReference<>();
+        Thread.UncaughtExceptionHandler ueh = (thread, f) -> failure.set(f);
+        builder.withUncaughtExceptionHandler(ueh);
+        Verify<Future<?>> verify = f -> {
+            int c = 0;
+            while (f == null && failure.get() == null && c++ < 100000)
+                Thread.yield();
+            Assert.assertTrue(failure.get() instanceof OutOfMemoryError);
+            if (f != null)
+                Assert.assertTrue(f.cause() instanceof OutOfMemoryError);
+            failure.set(null);
+        };
+        Runnable submit = wrapSubmit(() -> { throw new OutOfMemoryError(); });
+        testExecution(builder.build(), submit, verify);
+        testExecution(builder.build(), WithResources.none(), submit, verify);
+        testFailGetWithResources(builder.build(), () -> { throw new OutOfMemoryError(); }, verify);
+        testFailCloseWithResources(builder.build(), () -> () -> { throw new OutOfMemoryError(); }, verify);
+    }
+
+    public <E extends SequentialExecutorPlus> void testAtLeastOnce(Supplier<ExecutorBuilder<? extends E>> builders) throws Throwable
+    {
+        ExecutorBuilder<? extends E> builder = builders.get();
+        AtomicReference<Throwable> failure = new AtomicReference<>();
+        Thread.UncaughtExceptionHandler ueh = (thread, f) -> failure.set(f);
+        builder.withUncaughtExceptionHandler(ueh);
+
+        SequentialExecutorPlus exec = builder.build();
+
+        Semaphore enter = newSemaphore(0);
+        Semaphore exit = newSemaphore(0);
+        Semaphore runAfter = newSemaphore(0);
+        SequentialExecutorPlus.AtLeastOnceTrigger trigger;
+        trigger = exec.atLeastOnceTrigger(() -> { enter.release(1); exit.acquireThrowUncheckedOnInterrupt(1); });
+
+        // check runAfter runs immediately
+        trigger.runAfter(() -> runAfter.release(1));
+        Assert.assertTrue(runAfter.tryAcquire(1, 1L, TimeUnit.SECONDS));
+
+        Assert.assertTrue(trigger.trigger());
+        enter.acquire(1);
+        Assert.assertTrue(trigger.trigger());
+        Assert.assertFalse(trigger.trigger());
+        trigger.runAfter(() -> runAfter.release(1));
+        Assert.assertFalse(runAfter.tryAcquire(1, 10L, TimeUnit.MILLISECONDS));
+        exit.release(1);
+        enter.acquire(1);
+        Assert.assertFalse(runAfter.tryAcquire(1, 10L, TimeUnit.MILLISECONDS));
+        Assert.assertTrue(trigger.trigger());
+        Assert.assertFalse(trigger.trigger());
+        exit.release(1);
+        Assert.assertTrue(runAfter.tryAcquire(1, 1L, TimeUnit.SECONDS));
+        exit.release(1);
+
+        trigger = exec.atLeastOnceTrigger(() -> { throw new OutOfMemoryError(); });
+        trigger.trigger();
+        trigger.sync();
+        Assert.assertTrue(failure.get() instanceof OutOfMemoryError);
+    }
+
+    void testExecution(ExecutorPlus e, WithResources withResources, Runnable submit, Verify<Future<?>> verify) throws Throwable
+    {
+        AtomicInteger i = new AtomicInteger();
+        e.execute(() -> { i.incrementAndGet(); return withResources.get(); } , () -> { i.incrementAndGet(); submit.run(); });
+        while (i.get() < 2) Thread.yield();
+        verify.test(null);
+        verify.test(e.submit(() -> { i.incrementAndGet(); return withResources.get(); }, () -> { i.incrementAndGet(); submit.run(); return null; }).await());
+        Assert.assertEquals(4, i.get());
+        verify.test(e.submit(() -> { i.incrementAndGet(); return withResources.get(); }, () -> { i.incrementAndGet(); submit.run(); }).await());
+        Assert.assertEquals(6, i.get());
+        verify.test(e.submit(() -> { i.incrementAndGet(); return withResources.get(); }, () -> { i.incrementAndGet(); submit.run(); }, null).await());
+        Assert.assertEquals(8, i.get());
+    }
+
+    void testExecution(ExecutorPlus e, Runnable submit, Verify<Future<?>> verify) throws Throwable
+    {
+        AtomicInteger i = new AtomicInteger();
+        e.execute(() -> { i.incrementAndGet(); submit.run(); });
+        while (i.get() < 1) Thread.yield();
+        verify.test(null);
+        e.maybeExecuteImmediately(() -> { i.incrementAndGet(); submit.run(); });
+        while (i.get() < 2) Thread.yield();
+        verify.test(null);
+        verify.test(e.submit(() -> { i.incrementAndGet(); submit.run(); return null; }).await());
+        Assert.assertEquals(3, i.get());
+        verify.test(e.submit(() -> { i.incrementAndGet(); submit.run(); }).await());
+        Assert.assertEquals(4, i.get());
+        verify.test(e.submit(() -> { i.incrementAndGet(); submit.run(); }, null).await());
+        Assert.assertEquals(5, i.get());
+    }
+
+    void testFailGetWithResources(ExecutorPlus e, WithResources withResources, Verify<Future<?>> verify) throws Throwable
+    {
+        AtomicInteger i = new AtomicInteger();
+        WithResources countingOnGetResources = () -> { i.incrementAndGet(); return withResources.get(); };
+        AtomicBoolean executed = new AtomicBoolean();
+        e.execute(countingOnGetResources, () -> executed.set(true));
+        while (i.get() < 1) Thread.yield();
+        verify.test(null);
+        Assert.assertFalse(executed.get());
+        verify.test(e.submit(countingOnGetResources, () -> { executed.set(true); return null; } ).await());
+        Assert.assertEquals(2, i.get());
+        Assert.assertFalse(executed.get());
+        verify.test(e.submit(countingOnGetResources, () -> { executed.set(true); }).await());
+        Assert.assertEquals(3, i.get());
+        Assert.assertFalse(executed.get());
+        verify.test(e.submit(countingOnGetResources, () -> { executed.set(true); }, null).await());
+        Assert.assertEquals(4, i.get());
+        Assert.assertFalse(executed.get());
+    }
+
+    void testFailCloseWithResources(ExecutorPlus e, WithResources withResources, Verify<Future<?>> verify) throws Throwable
+    {
+        AtomicInteger i = new AtomicInteger();
+        WithResources countingOnCloseResources = () -> { Closeable close = withResources.get(); return () -> { i.incrementAndGet(); close.close(); }; };
+        e.execute(countingOnCloseResources, i::incrementAndGet);
+        while (i.get() < 2) Thread.yield();
+        verify.test(null);
+        verify.test(e.submit(countingOnCloseResources, () -> { i.incrementAndGet(); return null; } ).await());
+        Assert.assertEquals(4, i.get());
+        verify.test(e.submit(countingOnCloseResources, () -> { i.incrementAndGet(); }).await());
+        Assert.assertEquals(6, i.get());
+        verify.test(e.submit(countingOnCloseResources, () -> { i.incrementAndGet(); }, null).await());
+        Assert.assertEquals(8, i.get());
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutorTest.java b/test/unit/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutorTest.java
index 1aac470..c719d6b 100644
--- a/test/unit/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutorTest.java
+++ b/test/unit/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutorTest.java
@@ -26,13 +26,18 @@
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.junit.Assert;
+
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.service.EmbeddedCassandraService;
 import org.apache.cassandra.service.StorageService;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 public class DebuggableScheduledThreadPoolExecutorTest
 {
 
@@ -41,16 +46,20 @@
     @BeforeClass
     public static void startup() throws IOException
     {
-        //The DSTPE checks for if we are in the service shutdown hook so
-        //to test it we need to start C* internally.
-        service = new EmbeddedCassandraService();
-        service.start();
+        service = ServerTestUtils.startEmbeddedCassandraService();
+    }
+
+    @AfterClass
+    public static void tearDown()
+    {
+        if (service != null)
+            service.stop();
     }
 
     @Test
     public void testShutdown() throws ExecutionException, InterruptedException, IOException
     {
-        DebuggableScheduledThreadPoolExecutor testPool = new DebuggableScheduledThreadPoolExecutor("testpool");
+        ScheduledExecutorPlus testPool = executorFactory().scheduled("testpool");
 
         final AtomicInteger value = new AtomicInteger(0);
 
diff --git a/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java b/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java
index 43c0fdf..061957d 100644
--- a/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java
+++ b/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java
@@ -21,18 +21,12 @@
  */
 
 
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RunnableFuture;
-import java.util.concurrent.TimeUnit;
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
 
-import com.google.common.base.Throwables;
 import com.google.common.net.InetAddresses;
-import com.google.common.util.concurrent.ListenableFutureTask;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -45,10 +39,15 @@
 import org.apache.cassandra.tracing.TraceStateImpl;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.FailingRunnable;
 import org.apache.cassandra.utils.WrappedRunnable;
-import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.assertj.core.api.Assertions.assertThat;
 
 public class DebuggableThreadPoolExecutorTest
 {
@@ -61,12 +60,7 @@
     @Test
     public void testSerialization()
     {
-        LinkedBlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>(1);
-        DebuggableThreadPoolExecutor executor = new DebuggableThreadPoolExecutor(1,
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.MILLISECONDS,
-                                                                                 q,
-                                                                                 new NamedThreadFactory("TEST"));
+        ExecutorPlus executor = executorFactory().configureSequential("TEST").withQueueLimit(1).build();
         WrappedRunnable runnable = new WrappedRunnable()
         {
             public void runMayThrow() throws InterruptedException
@@ -74,22 +68,23 @@
                 Thread.sleep(50);
             }
         };
-        long start = System.nanoTime();
+        long start = nanoTime();
         for (int i = 0; i < 10; i++)
         {
             executor.execute(runnable);
         }
-        assert q.size() > 0 : q.size();
+        assert executor.getPendingTaskCount() > 0 : executor.getPendingTaskCount();
         while (executor.getCompletedTaskCount() < 10)
             continue;
-        long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+        long delta = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
         assert delta >= 9 * 50 : delta;
     }
 
     @Test
     public void testLocalStatePropagation()
     {
-        DebuggableThreadPoolExecutor executor = DebuggableThreadPoolExecutor.createWithFixedPoolSize("TEST", 1);
+        ExecutorPlus executor = executorFactory().localAware().sequential("TEST");
+        assertThat(executor).isInstanceOf(LocalAwareExecutorPlus.class);
         try
         {
             checkLocalStateIsPropagated(executor);
@@ -100,7 +95,22 @@
         }
     }
 
-    public static void checkLocalStateIsPropagated(LocalAwareExecutorService executor)
+    @Test
+    public void testNoLocalStatePropagation() throws InterruptedException
+    {
+        ExecutorPlus executor = executorFactory().sequential("TEST");
+        assertThat(executor).isNotInstanceOf(LocalAwareExecutorPlus.class);
+        try
+        {
+            checkLocalStateIsPropagated(executor);
+        }
+        finally
+        {
+            executor.shutdown();
+        }
+    }
+
+    public static void checkLocalStateIsPropagated(ExecutorPlus executor)
     {
         checkClientWarningsArePropagated(executor, () -> executor.execute(() -> ClientWarn.instance.warn("msg")));
         checkClientWarningsArePropagated(executor, () -> executor.submit(() -> ClientWarn.instance.warn("msg")));
@@ -119,9 +129,9 @@
         }));
     }
 
-    public static void checkClientWarningsArePropagated(LocalAwareExecutorService executor, Runnable schedulingTask) {
+    public static void checkClientWarningsArePropagated(ExecutorPlus executor, Runnable schedulingTask) {
         ClientWarn.instance.captureWarnings();
-        Assertions.assertThat(ClientWarn.instance.getWarnings()).isNullOrEmpty();
+        assertThat(ClientWarn.instance.getWarnings()).isNullOrEmpty();
 
         ClientWarn.instance.warn("msg0");
         long initCompletedTasks = executor.getCompletedTaskCount();
@@ -129,15 +139,18 @@
         while (executor.getCompletedTaskCount() == initCompletedTasks) Uninterruptibles.sleepUninterruptibly(10, MILLISECONDS);
         ClientWarn.instance.warn("msg1");
 
-        Assertions.assertThat(ClientWarn.instance.getWarnings()).containsExactlyInAnyOrder("msg0", "msg", "msg1");
+        if (executor instanceof LocalAwareExecutorPlus)
+            assertThat(ClientWarn.instance.getWarnings()).containsExactlyInAnyOrder("msg0", "msg", "msg1");
+        else
+            assertThat(ClientWarn.instance.getWarnings()).containsExactlyInAnyOrder("msg0", "msg1");
     }
 
-    public static void checkTracingIsPropagated(LocalAwareExecutorService executor, Runnable schedulingTask) {
+    public static void checkTracingIsPropagated(ExecutorPlus executor, Runnable schedulingTask) {
         ClientWarn.instance.captureWarnings();
-        Assertions.assertThat(ClientWarn.instance.getWarnings()).isNullOrEmpty();
+        assertThat(ClientWarn.instance.getWarnings()).isNullOrEmpty();
 
         ConcurrentLinkedQueue<String> q = new ConcurrentLinkedQueue<>();
-        Tracing.instance.set(new TraceState(FBUtilities.getLocalAddressAndPort(), UUID.randomUUID(), Tracing.TraceType.NONE)
+        Tracing.instance.set(new TraceState(FBUtilities.getLocalAddressAndPort(), nextTimeUUID(), Tracing.TraceType.NONE)
         {
             @Override
             protected void traceImpl(String message)
@@ -151,27 +164,30 @@
         while (executor.getCompletedTaskCount() == initCompletedTasks) Uninterruptibles.sleepUninterruptibly(10, MILLISECONDS);
         Tracing.trace("msg1");
 
-        Assertions.assertThat(q.toArray()).containsExactlyInAnyOrder("msg0", "msg", "msg1");
+        if (executor instanceof LocalAwareExecutorPlus)
+            assertThat(q.toArray()).containsExactlyInAnyOrder("msg0", "msg", "msg1");
+        else
+            assertThat(q.toArray()).containsExactlyInAnyOrder("msg0", "msg1");
     }
 
     @Test
     public void testExecuteFutureTaskWhileTracing()
     {
-        LinkedBlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>(1);
-        DebuggableThreadPoolExecutor executor = new DebuggableThreadPoolExecutor(1,
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.MILLISECONDS,
-                                                                                 q,
-                                                                                 new NamedThreadFactory("TEST"));
+        SettableUncaughtExceptionHandler ueh = new SettableUncaughtExceptionHandler();
+        ExecutorPlus executor = executorFactory()
+                                .localAware()
+                                .configureSequential("TEST")
+                                .withUncaughtExceptionHandler(ueh)
+                                .withQueueLimit(1).build();
         Runnable test = () -> executor.execute(failingTask());
         try
         {
             // make sure the non-tracing case works
-            Throwable cause = catchUncaughtExceptions(test);
+            Throwable cause = catchUncaughtExceptions(ueh, test);
             Assert.assertEquals(DebuggingThrowsException.class, cause.getClass());
 
             // tracing should have the same semantics
-            cause = catchUncaughtExceptions(() -> withTracing(test));
+            cause = catchUncaughtExceptions(ueh, () -> withTracing(test));
             Assert.assertEquals(DebuggingThrowsException.class, cause.getClass());
         }
         finally
@@ -183,21 +199,20 @@
     @Test
     public void testSubmitFutureTaskWhileTracing()
     {
-        LinkedBlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>(1);
-        DebuggableThreadPoolExecutor executor = new DebuggableThreadPoolExecutor(1,
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.MILLISECONDS,
-                                                                                 q,
-                                                                                 new NamedThreadFactory("TEST"));
+        SettableUncaughtExceptionHandler ueh = new SettableUncaughtExceptionHandler();
+        ExecutorPlus executor = executorFactory().localAware()
+                                                 .configureSequential("TEST")
+                                                 .withUncaughtExceptionHandler(ueh)
+                                                 .withQueueLimit(1).build();
         FailingRunnable test = () -> executor.submit(failingTask()).get();
         try
         {
             // make sure the non-tracing case works
-            Throwable cause = catchUncaughtExceptions(test);
+            Throwable cause = catchUncaughtExceptions(ueh, test);
             Assert.assertEquals(DebuggingThrowsException.class, cause.getClass());
 
             // tracing should have the same semantics
-            cause = catchUncaughtExceptions(() -> withTracing(test));
+            cause = catchUncaughtExceptions(ueh, () -> withTracing(test));
             Assert.assertEquals(DebuggingThrowsException.class, cause.getClass());
         }
         finally
@@ -210,17 +225,17 @@
     public void testSubmitWithResultFutureTaskWhileTracing()
     {
         LinkedBlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>(1);
-        DebuggableThreadPoolExecutor executor = new DebuggableThreadPoolExecutor(1,
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.MILLISECONDS,
-                                                                                 q,
-                                                                                 new NamedThreadFactory("TEST"));
+        SettableUncaughtExceptionHandler ueh = new SettableUncaughtExceptionHandler();
+        ExecutorPlus executor = executorFactory().localAware()
+                                                 .configureSequential("TEST")
+                                                 .withUncaughtExceptionHandler(ueh)
+                                                 .withQueueLimit(1).build();
         FailingRunnable test = () -> executor.submit(failingTask(), 42).get();
         try
         {
-            Throwable cause = catchUncaughtExceptions(test);
+            Throwable cause = catchUncaughtExceptions(ueh, test);
             Assert.assertEquals(DebuggingThrowsException.class, cause.getClass());
-            cause = catchUncaughtExceptions(() -> withTracing(test));
+            cause = catchUncaughtExceptions(ueh, () -> withTracing(test));
             Assert.assertEquals(DebuggingThrowsException.class, cause.getClass());
         }
         finally
@@ -233,7 +248,7 @@
     {
         TraceState state = Tracing.instance.get();
         try {
-            Tracing.instance.set(new TraceStateImpl(InetAddressAndPort.getByAddress(InetAddresses.forString("127.0.0.1")), UUID.randomUUID(), Tracing.TraceType.NONE));
+            Tracing.instance.set(new TraceStateImpl(InetAddressAndPort.getByAddress(InetAddresses.forString("127.0.0.1")), nextTimeUUID(), Tracing.TraceType.NONE));
             fn.run();
         }
         finally
@@ -242,14 +257,34 @@
         }
     }
 
-    private static Throwable catchUncaughtExceptions(Runnable fn)
+    private static class SettableUncaughtExceptionHandler implements UncaughtExceptionHandler
     {
-        Thread.UncaughtExceptionHandler defaultHandler = Thread.getDefaultUncaughtExceptionHandler();
+        volatile Supplier<UncaughtExceptionHandler> cur;
+
+        @Override
+        public void uncaughtException(Thread t, Throwable e)
+        {
+            cur.get().uncaughtException(t, e);
+        }
+
+        void set(Supplier<UncaughtExceptionHandler> set)
+        {
+            cur = set;
+        }
+
+        void clear()
+        {
+            cur = Thread::getDefaultUncaughtExceptionHandler;
+        }
+    }
+
+    private static Throwable catchUncaughtExceptions(SettableUncaughtExceptionHandler ueh, Runnable fn)
+    {
         try
         {
             AtomicReference<Throwable> ref = new AtomicReference<>(null);
             CountDownLatch latch = new CountDownLatch(1);
-            Thread.setDefaultUncaughtExceptionHandler((thread, cause) -> {
+            ueh.set(() -> (thread, cause) -> {
                 ref.set(cause);
                 latch.countDown();
             });
@@ -266,7 +301,7 @@
         }
         finally
         {
-            Thread.setDefaultUncaughtExceptionHandler(defaultHandler);
+            ueh.clear();
         }
     }
 
@@ -277,29 +312,10 @@
 
     private static RunnableFuture<String> failingTask()
     {
-        return ListenableFutureTask.create(DebuggableThreadPoolExecutorTest::failingFunction);
+        return new FutureTask<>(DebuggableThreadPoolExecutorTest::failingFunction);
     }
 
     private static final class DebuggingThrowsException extends RuntimeException {
 
     }
-
-    // REVIEWER : I know this is the same as WrappedRunnable, but that doesn't support lambda...
-    private interface FailingRunnable extends Runnable
-    {
-        void doRun() throws Throwable;
-
-        default void run()
-        {
-            try
-            {
-                doRun();
-            }
-            catch (Throwable t)
-            {
-                Throwables.throwIfUnchecked(t);
-                throw new RuntimeException(t);
-            }
-        }
-    }
 }
diff --git a/test/unit/org/apache/cassandra/concurrent/ExecutorPlusTest.java b/test/unit/org/apache/cassandra/concurrent/ExecutorPlusTest.java
new file mode 100644
index 0000000..08af142
--- /dev/null
+++ b/test/unit/org/apache/cassandra/concurrent/ExecutorPlusTest.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.junit.Test;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
+public class ExecutorPlusTest extends AbstractExecutorPlusTest
+{
+    @Test
+    public void testPooled() throws Throwable
+    {
+        testPooled(() -> executorFactory().configurePooled("test", 1));
+    }
+
+    @Test
+    public void testSequential() throws Throwable
+    {
+        testSequential(() -> executorFactory().configureSequential("test"));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/concurrent/InfiniteLoopExecutorTest.java b/test/unit/org/apache/cassandra/concurrent/InfiniteLoopExecutorTest.java
new file mode 100644
index 0000000..9ec702d
--- /dev/null
+++ b/test/unit/org/apache/cassandra/concurrent/InfiniteLoopExecutorTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.apache.cassandra.concurrent.InfiniteLoopExecutor.Daemon.DAEMON;
+
+public class InfiniteLoopExecutorTest
+{
+    @Test
+    public void testShutdownNow() throws InterruptedException, ExecutionException, TimeoutException
+    {
+        Semaphore semaphore = new Semaphore(0);
+        InfiniteLoopExecutor e1 = new InfiniteLoopExecutor("test", ignore -> semaphore.acquire(1), DAEMON);
+        ExecutorService exec = Executors.newCachedThreadPool();
+        Future<?> f = exec.submit(() -> e1.awaitTermination(1L, TimeUnit.MINUTES));
+        e1.shutdownNow();
+        f.get(1L, TimeUnit.SECONDS);
+    }
+
+    @Test
+    public void testShutdown() throws InterruptedException, ExecutionException, TimeoutException
+    {
+        AtomicBoolean active = new AtomicBoolean(false);
+        Semaphore semaphore = new Semaphore(0);
+        InfiniteLoopExecutor e1 = new InfiniteLoopExecutor("test", ignore -> {
+            active.set(true);
+            semaphore.acquire(1);
+            active.set(false);
+            semaphore.release();
+        }, DAEMON);
+        ExecutorService exec = Executors.newCachedThreadPool();
+        Future<?> f = exec.submit(() -> e1.awaitTermination(1L, TimeUnit.MINUTES));
+        // do ten normal loops
+        for (int i = 0 ; i < 10 ; ++i)
+        {
+            semaphore.release();
+            semaphore.acquire();
+        }
+        // confirm we've re-entered the runnable
+        while (!active.get()) Thread.yield();
+        // then shutdown, and expect precisely one more
+        e1.shutdown();
+        try
+        {
+            f.get(10L, TimeUnit.MILLISECONDS);
+            Assert.fail();
+        }
+        catch (TimeoutException ignore)
+        {
+        }
+        semaphore.release();
+        f.get(1L, TimeUnit.SECONDS);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/concurrent/LocalAwareExecutorPlusTest.java b/test/unit/org/apache/cassandra/concurrent/LocalAwareExecutorPlusTest.java
new file mode 100644
index 0000000..f47046c
--- /dev/null
+++ b/test/unit/org/apache/cassandra/concurrent/LocalAwareExecutorPlusTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.concurrent;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
+public class LocalAwareExecutorPlusTest extends AbstractExecutorPlusTest
+{
+    final ExecutorLocals locals = new ExecutorLocals(null, null);
+
+    @Test
+    public void testPooled() throws Throwable
+    {
+        locals.get();
+        testPooled(() -> executorFactory().localAware().configurePooled("test", 1));
+    }
+
+    @Test
+    public void testSequential() throws Throwable
+    {
+        locals.get();
+        testSequential(() -> executorFactory().localAware().configureSequential("test"));
+    }
+
+    @Override
+    Runnable wrapSubmit(Runnable submit)
+    {
+        return () -> {
+            Assert.assertEquals(locals, ExecutorLocals.current());
+            submit.run();
+        };
+    }
+}
diff --git a/test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java b/test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java
index 97e389c..7a682ed 100644
--- a/test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java
+++ b/test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java
@@ -39,6 +39,7 @@
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.concurrent.DebuggableThreadPoolExecutorTest.checkLocalStateIsPropagated;
+import static org.assertj.core.api.Assertions.assertThat;
 
 public class SEPExecutorTest
 {
@@ -96,7 +97,7 @@
         final AtomicInteger notifiedMaxPoolSize = new AtomicInteger();
 
         SharedExecutorPool sharedPool;
-        LocalAwareExecutorService executor;
+        LocalAwareExecutorPlus executor;
 
         Thread makeBusy;
         AtomicBoolean stayBusy;
@@ -141,7 +142,7 @@
             sharedPool.shutdownAndWait(1L, MINUTES);
         }
 
-        public LocalAwareExecutorService getExecutor()
+        public LocalAwareExecutorPlus getExecutor()
         {
             return executor;
         }
@@ -156,7 +157,7 @@
     public void changingMaxWorkersMeetsConcurrencyGoalsTest() throws InterruptedException, TimeoutException
     {
         BusyExecutor busyExecutor = new BusyExecutor("ChangingMaxWorkersMeetsConcurrencyGoalsTest", "resizetest");
-        LocalAwareExecutorService executor = busyExecutor.getExecutor();
+        LocalAwareExecutorPlus executor = busyExecutor.getExecutor();
 
         busyExecutor.start();
         try
@@ -188,12 +189,11 @@
         }
     }
 
-
     @Test
     public void stoppedWorkersProcessTasksWhenConcurrencyIncreases() throws InterruptedException
     {
         BusyExecutor busyExecutor = new BusyExecutor("StoppedWorkersProcessTasksWhenConcurrencyIncreases", "stoptest");
-        LocalAwareExecutorService executor = busyExecutor.getExecutor();
+        LocalAwareExecutorPlus executor = busyExecutor.getExecutor();
         busyExecutor.start();
         try
         {
@@ -258,7 +258,7 @@
         }
     }
 
-    void assertMaxTaskConcurrency(LocalAwareExecutorService executor, int concurrency) throws InterruptedException
+    void assertMaxTaskConcurrency(LocalAwareExecutorPlus executor, int concurrency) throws InterruptedException
     {
         executor.setMaximumPoolSize(concurrency);
 
@@ -277,7 +277,8 @@
         SharedExecutorPool sharedPool = new SharedExecutorPool("TestPool");
         try
         {
-            LocalAwareExecutorService executor = sharedPool.newExecutor(1, "TEST", "TEST");
+            LocalAwareExecutorPlus executor = sharedPool.newExecutor(1, "TEST", "TEST");
+            assertThat(executor).isInstanceOf(LocalAwareExecutorPlus.class);
             checkLocalStateIsPropagated(executor);
         }
         finally
diff --git a/test/unit/org/apache/cassandra/concurrent/WaitQueueTest.java b/test/unit/org/apache/cassandra/concurrent/WaitQueueTest.java
index ac2a9c0..a9049a9 100644
--- a/test/unit/org/apache/cassandra/concurrent/WaitQueueTest.java
+++ b/test/unit/org/apache/cassandra/concurrent/WaitQueueTest.java
@@ -29,6 +29,7 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.cassandra.utils.concurrent.WaitQueue.newWaitQueue;
 import static org.junit.Assert.*;
 
 public class WaitQueueTest
@@ -37,14 +38,14 @@
     @Test
     public void testSerial() throws InterruptedException
     {
-        testSerial(new WaitQueue());
+        testSerial(newWaitQueue());
     }
     public void testSerial(final WaitQueue queue) throws InterruptedException
     {
         final AtomicInteger ready = new AtomicInteger();
         Thread[] ts = new Thread[4];
         for (int i = 0 ; i < ts.length ; i++)
-            ts[i] = NamedThreadFactory.createThread(new Runnable()
+            ts[i] = NamedThreadFactory.createAnonymousThread(new Runnable()
         {
             @Override
             public void run()
@@ -77,14 +78,14 @@
     @Test
     public void testCondition() throws InterruptedException
     {
-        testCondition(new WaitQueue());
+        testCondition(newWaitQueue());
     }
     public void testCondition(final WaitQueue queue) throws InterruptedException
     {
         final AtomicBoolean ready = new AtomicBoolean(false);
         final AtomicBoolean condition = new AtomicBoolean(false);
         final AtomicBoolean fail = new AtomicBoolean(false);
-        Thread t = NamedThreadFactory.createThread(new Runnable()
+        Thread t = NamedThreadFactory.createAnonymousThread(new Runnable()
         {
             @Override
             public void run()
diff --git a/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java b/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java
index 1086521..de6e0d2 100644
--- a/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java
+++ b/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java
@@ -142,4 +142,4 @@
             System.clearProperty(TEST_PROP.getKey());
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/config/ConfigCompatabilityTest.java b/test/unit/org/apache/cassandra/config/ConfigCompatabilityTest.java
new file mode 100644
index 0000000..8d1e5e2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/ConfigCompatabilityTest.java
@@ -0,0 +1,435 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonValue;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.TreeNode;
+import com.fasterxml.jackson.databind.DeserializationContext;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializerProvider;
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
+import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
+import com.fasterxml.jackson.databind.node.TextNode;
+import com.fasterxml.jackson.databind.ser.std.StdSerializer;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import org.yaml.snakeyaml.introspector.Property;
+
+/**
+ * To create the test files used by this class, run {@link org.apache.cassandra.distributed.upgrade.ConfigCompatabilityTestGenerate}.
+ */
+public class ConfigCompatabilityTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(ConfigCompatabilityTest.class);
+
+    public static final String TEST_DIR = "test/data/config";
+
+    // see CASSANDRA-11115
+    private static final Set<String> THRIFT = ImmutableSet.of("rpc_server_type",
+                                                              "rpc_port",
+                                                              "rpc_listen_backlog",
+                                                              "start_rpc",
+                                                              "thrift_max_message_length_in_mb",
+                                                              "rpc_max_threads",
+                                                              "rpc_min_threads",
+                                                              "rpc_recv_buff_size_in_bytes",
+                                                              "rpc_send_buff_size_in_bytes",
+                                                              "thrift_framed_transport_size_in_mb",
+                                                              "thrift_prepared_statements_cache_size_mb",
+                                                              "request_scheduler",
+                                                              "request_scheduler_id",
+                                                              "request_scheduler_options");
+    // see // CASSANDRA-16956
+    private static final Set<String> WINDOWS = ImmutableSet.of("windows_timer_interval");
+
+    private static final Set<String> REMOVED_IN_40 = ImmutableSet.<String>builder()
+                                                     .addAll(THRIFT)
+                                                     .addAll(WINDOWS) // windows was removed later, but support was dropped in 4.0
+                                                     .add("encryption_options") // CASSANDRA-10404
+                                                     .add("index_interval") // CASSANDRA-10671
+                                                     .add("streaming_socket_timeout_in_ms") // CASSANDRA-12229
+                                                     .build();
+
+    /**
+     * Not all converts make sense as backwards compatible as they use things like String to handle the conversion more
+     * generically.
+     */
+    private static final Set<Converters> IGNORED_CONVERTERS = EnumSet.of(Converters.SECONDS_CUSTOM_DURATION);
+
+    @Test
+    public void diff_3_0() throws IOException
+    {
+        diff(TEST_DIR + "/version=3.0.0-alpha1.yml", REMOVED_IN_40);
+    }
+
+    @Test
+    public void diff_3_11() throws IOException
+    {
+        diff(TEST_DIR + "/version=3.11.0.yml", REMOVED_IN_40);
+    }
+
+    @Test
+    public void diff_4_0() throws IOException
+    {
+        diff(TEST_DIR + "/version=4.0-alpha1.yml", ImmutableSet.<String>builder()
+                                                        .addAll(WINDOWS)
+                                                        .build());
+    }
+
+    private void diff(String original, Set<String> ignore) throws IOException
+    {
+        Class<Config> type = Config.class;
+        ClassTree previous = load(original);
+        Loader loader = Properties.defaultLoader();
+        Map<Class<?>, Map<String, Replacement>> replacements = Replacements.getNameReplacements(type);
+        Set<String> missing = new HashSet<>();
+        Set<String> errors = new HashSet<>();
+        diff(loader, replacements, previous, type, "", missing, errors);
+        missing = Sets.difference(missing, ignore);
+        StringBuilder msg = new StringBuilder();
+        if (!missing.isEmpty())
+            msg.append(String.format("Unable to find the following properties:\n%s", String.join("\n", new TreeSet<>(missing))));
+        if (!errors.isEmpty())
+        {
+            if (msg.length() > 0)
+                msg.append('\n');
+            msg.append(String.format("Errors detected:\n%s", String.join("\n", new TreeSet<>(errors))));
+        }
+        if (msg.length() > 0)
+            throw new AssertionError(msg);
+    }
+
+    private void diff(Loader loader, Map<Class<?>, Map<String, Replacement>> replacements, ClassTree previous, Class<?> type, String prefix, Set<String> missing, Set<String> errors)
+    {
+        Map<String, Replacement> replaces = replacements.getOrDefault(type, Collections.emptyMap());
+        Map<String, Property> properties = loader.getProperties(type);
+        Sets.SetView<String> missingInCurrent = Sets.difference(previous.properties.keySet(), properties.keySet());
+        Sets.SetView<String> inBoth = Sets.intersection(previous.properties.keySet(), properties.keySet());
+        for (String name : missingInCurrent)
+        {
+            Replacement replacement = replaces.get(name);
+            // can we find the property in @Replaces?
+            if (replacement == null)
+            {
+                missing.add(prefix + name);
+            }
+            else
+            {
+                // do types match?
+                Node node = previous.properties.get(name);
+                if (node instanceof Leaf && replacement.oldType != null)
+                    typeCheck(replacement.converter, toString(replacement.oldType), ((Leaf) node).type, name, errors);
+            }
+        }
+        for (String name : inBoth)
+        {
+            Property prop = properties.get(name);
+            Node node = previous.properties.get(name);
+            // do types match?
+            // if nested, look at sub-fields
+            if (node instanceof ClassTree)
+            {
+                // current is nested type
+                diff(loader, replacements, (ClassTree) node, prop.getType(), prefix + name + ".", missing, errors);
+            }
+            else
+            {
+                // current is flat type
+                Replacement replacement = replaces.get(name);
+                if (replacement != null && replacement.oldType != null)
+                {
+                    typeCheck(replacement.converter, toString(replacement.oldType), ((Leaf) node).type, name, errors);
+                }
+                else
+                {
+                    // previous is leaf, is current?
+                    Map<String, Property> children = Properties.isPrimitive(prop) || Properties.isCollection(prop) ? Collections.emptyMap() : loader.getProperties(prop.getType());
+                    if (!children.isEmpty())
+                        errors.add(String.format("Property %s used to be a value-type, but now is nested type %s", name, prop.getType()));
+                    typeCheck(null, toString(prop.getType()), ((Leaf) node).type, name, errors);
+                }
+            }
+        }
+    }
+
+    private static void typeCheck(Converters converters, String lhs, String rhs, String name, Set<String> errors)
+    {
+        if (IGNORED_CONVERTERS.contains(converters))
+            return;
+        if (!lhs.equals(rhs))
+            errors.add(String.format("%s types do not match; %s != %s%s", name, lhs, rhs, converters != null ? ", converter " + converters.name() : ""));
+    }
+
+    private static ClassTree load(String path) throws IOException
+    {
+        ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
+        return mapper.readValue(new File(path), ClassTree.class);
+    }
+
+    public static void dump(ClassTree classTree, String path) throws IOException
+    {
+        logger.info("Dumping class to {}", path);
+        ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
+        mapper.writeValue(new File(path), classTree);
+
+        // validate that load works as expected
+        ClassTree loaded = load(path);
+        assert loaded.equals(classTree);
+    }
+
+    public static ClassTree toTree(Class<?> klass)
+    {
+        ClassTree node = new ClassTree(klass);
+        addProperties(Properties.defaultLoader(), node, klass);
+        return node;
+    }
+
+    private static void addProperties(Loader loader, ClassTree node, Class<?> type)
+    {
+        SortedMap<String, Property> properties = new TreeMap<>(loader.getProperties(type));
+        for (Map.Entry<String, Property> e : properties.entrySet())
+        {
+            Property property = e.getValue();
+            Map<String, Property> subProperties = Properties.isPrimitive(property) || Properties.isCollection(property) ? Collections.emptyMap() : loader.getProperties(property.getType());
+            Node child;
+            if (subProperties.isEmpty())
+            {
+                child = new Leaf(toString(property.getType()));
+            }
+            else
+            {
+                ClassTree subTree = new ClassTree(property.getType());
+                addProperties(loader, subTree, property.getType());
+                child = subTree;
+            }
+            node.addProperty(e.getKey(), child);
+        }
+    }
+
+    private static String toString(Class<?> type)
+    {
+        return normalize(type).getCanonicalName();
+    }
+
+    private static Class<?> normalize(Class<?> type)
+    {
+        // convert primitives to Number, allowing null in the doamin
+        // this means that switching between int to Integer, and Integer to int are seen as the same while diffing; null
+        // added/removed from domain is ignored by diff
+        if (type.equals(Byte.TYPE))
+            return Byte.class;
+        else if (type.equals(Short.TYPE))
+            return Short.class;
+        else if (type.equals(Integer.TYPE))
+            return Integer.class;
+        else if (type.equals(Long.TYPE))
+            return Long.class;
+        else if (type.equals(Float.TYPE))
+            return Float.class;
+        else if (type.equals(Double.TYPE))
+            return Double.class;
+        else if (type.equals(Boolean.TYPE))
+            return Boolean.class;
+        else if (type.isArray())
+            return List.class;
+        return type;
+    }
+
+    @JsonSerialize(using = NodeSerializer.class)
+    @JsonDeserialize(using = NodeDeserializer.class)
+    private interface Node
+    {
+
+    }
+
+    private static final class NodeSerializer extends StdSerializer<Node>
+    {
+        NodeSerializer()
+        {
+            super(Node.class);
+        }
+
+        @Override
+        public void serialize(Node node, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException
+        {
+            if (node instanceof Leaf)
+            {
+                jsonGenerator.writeString(((Leaf) node).type);
+            }
+            else if (node instanceof ClassTree)
+            {
+                ClassTree classTree = (ClassTree) node;
+                jsonGenerator.writeStartObject();
+                for (Map.Entry<String, Node> e : classTree.properties.entrySet())
+                    jsonGenerator.writeObjectField(e.getKey(), e.getValue());
+                jsonGenerator.writeEndObject();
+            }
+        }
+    }
+
+    private static final class NodeDeserializer extends StdDeserializer<Node>
+    {
+
+        protected NodeDeserializer()
+        {
+            super(Node.class);
+        }
+
+        @Override
+        public Node deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException
+        {
+            return toNode(jsonParser.getCodec().readTree(jsonParser));
+        }
+
+        private static Node toNode(TreeNode node)
+        {
+            if (node.isValueNode())
+                return new Leaf(((TextNode) node).textValue());
+            Map<String, Node> props = new HashMap<>();
+            Iterator<String> it = node.fieldNames();
+            while (it.hasNext())
+            {
+                String name = it.next();
+                Node value = toNode(node.get(name));
+                Node previous = props.put(name, value);
+                if (previous != null)
+                    throw new AssertionError("Duplicate properties found: " + name);
+            }
+            ClassTree classTree = new ClassTree();
+            classTree.setProperties(props);
+            return classTree;
+        }
+    }
+
+    private static class ClassTree implements Node
+    {
+        private Class<?> type = null;
+        private Map<String, Node> properties = new HashMap<>();
+
+        ClassTree()
+        {
+
+        }
+
+        ClassTree(Class<?> type)
+        {
+            this.type = type;
+        }
+
+        public Map<String, Node> getProperties()
+        {
+            return properties;
+        }
+
+        public void setProperties(Map<String, Node> properties)
+        {
+            this.properties = properties;
+        }
+
+        public void addProperty(String key, Node node)
+        {
+            Node previous = properties.put(key, node);
+            if (previous != null)
+                throw new AssertionError("Duplicate property name: " + key);
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            ClassTree classTree = (ClassTree) o;
+            return Objects.equals(properties, classTree.properties);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(properties);
+        }
+
+        @Override
+        public String toString()
+        {
+            return "Klass{" +
+                   "type=" + type +
+                   ", properties=" + properties +
+                   '}';
+        }
+    }
+
+    private static class Leaf implements Node
+    {
+        private final String type;
+
+        public Leaf(String type)
+        {
+            this.type = type;
+        }
+
+        @JsonValue
+        public String getType()
+        {
+            return type;
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Leaf leaf = (Leaf) o;
+            return Objects.equals(type, leaf.type);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(type);
+        }
+
+        @Override
+        public String toString()
+        {
+            return type;
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/config/DataRateSpecTest.java b/test/unit/org/apache/cassandra/config/DataRateSpecTest.java
new file mode 100644
index 0000000..be0973e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/DataRateSpecTest.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import org.junit.Test;
+
+import org.quicktheories.core.Gen;
+import org.quicktheories.generators.SourceDSL;
+
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.junit.Assert.*;
+import static org.quicktheories.QuickTheory.qt;
+
+public class DataRateSpecTest
+{
+    @Test
+    public void testConversions()
+    {
+        assertEquals(10, new DataRateSpec.LongBytesPerSecondBound("10B/s").toBytesPerSecond(), 0);
+        assertEquals(10240, new DataRateSpec.LongBytesPerSecondBound("10KiB/s").toBytesPerSecond(), 0);
+        assertEquals(0, new DataRateSpec.LongBytesPerSecondBound("10KiB/s").toMebibytesPerSecond(), 0.1);
+        assertEquals(10240, new DataRateSpec.LongBytesPerSecondBound("10MiB/s").toKibibytesPerSecond(), 0);
+        assertEquals(10485760, new DataRateSpec.LongBytesPerSecondBound("10MiB/s").toBytesPerSecond(), 0);
+        assertEquals(10, new DataRateSpec.LongBytesPerSecondBound("10MiB/s").toMebibytesPerSecond(), 0);
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound("25000000B/s").toString(), DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(200L).toString());
+    }
+
+    @Test
+    public void testOverflowingDuringConversion()
+    {
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(Long.MAX_VALUE + "B/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: " +
+                              "9223372036854775807B/s. It shouldn't be " +
+                              "more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("9007199254740992KiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: " +
+                              "9007199254740992KiB/s. It shouldn't be " +
+                              "more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("8796093022208MiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: " +
+                              "8796093022208MiB/s. It shouldn't be " +
+                              "more than 9223372036854775806 in bytes_per_second");
+
+        assertEquals(Integer.MAX_VALUE, new DataRateSpec.LongBytesPerSecondBound(Integer.MAX_VALUE + "MiB/s").toMegabitsPerSecondAsInt());
+        assertEquals(Integer.MAX_VALUE, new DataRateSpec.LongBytesPerSecondBound(2147483649L + "KiB/s").toKibibytesPerSecondAsInt());
+        assertEquals(Integer.MAX_VALUE, new DataRateSpec.LongBytesPerSecondBound(2147483649L / 1024L + "MiB/s").toKibibytesPerSecondAsInt());
+        assertEquals(Integer.MAX_VALUE, new DataRateSpec.LongBytesPerSecondBound(2147483649L + "MiB/s").toMebibytesPerSecondAsInt());
+
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(Long.MAX_VALUE + "MiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 9223372036854775807MiB/s. " +
+                              "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(Long.MAX_VALUE))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 9.223372036854776E18 bytes_per_second. " +
+                              "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("9007199254740992KiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 9007199254740992KiB/s. " +
+                              "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("8796093022208MiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 8796093022208MiB/s. " +
+                                                                                                                                                    "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(Integer.MAX_VALUE))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 2147483647 megabits per second; " +
+                                                                                                                                                  "stream_throughput_outbound and " +
+                                                                                                                                                  "inter_dc_stream_throughput_outbound should " +
+                                                                                                                                                  "be between 0 and 2147483646 in megabits per second");
+    }
+
+    @Test
+    public void testFromSymbol()
+    {
+        assertEquals(DataRateSpec.DataRateUnit.fromSymbol("B/s"), DataRateSpec.DataRateUnit.BYTES_PER_SECOND);
+        assertEquals(DataRateSpec.DataRateUnit.fromSymbol("KiB/s"), DataRateSpec.DataRateUnit.KIBIBYTES_PER_SECOND);
+        assertEquals(DataRateSpec.DataRateUnit.fromSymbol("MiB/s"), DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND);
+        assertThatThrownBy(() -> DataRateSpec.DataRateUnit.fromSymbol("n"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Unsupported data rate unit: n");
+    }
+
+    @Test
+    public void testInvalidInputs()
+    {
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("10")).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid data rate: 10");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("-10b/s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid data rate: -10b/s");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(-10, DataRateSpec.DataRateUnit.BYTES_PER_SECOND))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: value must be non-negative");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("10xb/s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid data rate: 10xb/s");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("9223372036854775809B/s")
+                                 .toBytesPerSecond()).isInstanceOf(NumberFormatException.class)
+                                                     .hasMessageContaining("For input string: \"9223372036854775809\"");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("9223372036854775809KiB/s")
+                                 .toBytesPerSecond()).isInstanceOf(NumberFormatException.class)
+                                                     .hasMessageContaining("For input string: \"9223372036854775809\"");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound("9223372036854775809MiB/s")
+                                 .toBytesPerSecond()).isInstanceOf(NumberFormatException.class)
+                                                     .hasMessageContaining("For input string: \"9223372036854775809\"");
+    }
+
+    @Test
+    public void testInvalidForConversion()
+    {
+        //just test the cast to Int as currently we don't even have any long bound rates and there is a very low probability of ever having them
+        assertEquals(Integer.MAX_VALUE, new DataRateSpec.LongBytesPerSecondBound("92233720368547758B/s").toBytesPerSecondAsInt());
+
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(Long.MAX_VALUE + "B/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 9223372036854775807B/s. " +
+                              "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(Long.MAX_VALUE + "MiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 9223372036854775807MiB/s. " +
+                              "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+        assertThatThrownBy(() -> new DataRateSpec.LongBytesPerSecondBound(Long.MAX_VALUE - 5 + "KiB/s"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Invalid data rate: 9223372036854775802KiB/s. " +
+                              "It shouldn't be more than 9223372036854775806 in bytes_per_second");
+    }
+
+    @Test
+    public void testValidUnits()
+    {
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound("25000000B/s"), DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(200));
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound("24MiB/s"), new DataRateSpec.LongBytesPerSecondBound("25165824B/s"));
+    }
+
+    @Test
+    public void testEquals()
+    {
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound("10B/s"), new DataRateSpec.LongBytesPerSecondBound("10B/s"));
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound("10KiB/s"), new DataRateSpec.LongBytesPerSecondBound("10240B/s"));
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound("10240B/s"), new DataRateSpec.LongBytesPerSecondBound("10KiB/s"));
+        assertNotEquals(new DataRateSpec.LongBytesPerSecondBound("0KiB/s"), new DataRateSpec.LongBytesPerSecondBound("10MiB/s"));
+    }
+
+    @Test
+    public void thereAndBackLongBytesPerSecondBound()
+    {
+        Gen<DataRateSpec.DataRateUnit> unitGen = SourceDSL.arbitrary().enumValues(DataRateSpec.DataRateUnit.class);
+        Gen<Long> valueGen = SourceDSL.longs().between(0, 8796093022207L); // the biggest value in MiB/s that won't lead to B/s overflow
+        qt().forAll(valueGen, unitGen).check((value, unit) -> {
+            DataRateSpec.LongBytesPerSecondBound there = new DataRateSpec.LongBytesPerSecondBound(value, unit);
+            DataRateSpec.LongBytesPerSecondBound back = new DataRateSpec.LongBytesPerSecondBound(there.toString());
+            return there.equals(back) && back.equals(there);
+        });
+    }
+
+    @Test
+    public void testToString()
+    {
+        DataRateSpec testProperty = new DataRateSpec.LongBytesPerSecondBound("5B/s");
+        assertEquals("5B/s", testProperty.toString());
+    }
+
+    @Test
+    public void eq()
+    {
+        qt().forAll(gen(), gen()).check((a, b) -> a.equals(b) == b.equals(a));
+    }
+
+    @Test
+    public void eqAndHash()
+    {
+        qt().forAll(gen(), gen()).check((a, b) -> !a.equals(b) || a.hashCode() == b.hashCode());
+    }
+
+    private static Gen<DataRateSpec> gen()
+    {
+        Gen<DataRateSpec.DataRateUnit> unitGen = SourceDSL.arbitrary().enumValues(DataRateSpec.DataRateUnit.class);
+        Gen<Long> valueGen = SourceDSL.longs().between(0, Long.MAX_VALUE/1024L/1024/1024);
+        Gen<DataRateSpec> gen = rs -> new DataRateSpec.LongBytesPerSecondBound(valueGen.generate(rs), unitGen.generate(rs));
+        return gen.describedAs(DataRateSpec::toString);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/DataStorageSpecTest.java b/test/unit/org/apache/cassandra/config/DataStorageSpecTest.java
new file mode 100644
index 0000000..334e33f
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/DataStorageSpecTest.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Locale;
+
+import org.junit.Test;
+
+import org.quicktheories.core.Gen;
+import org.quicktheories.generators.SourceDSL;
+
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.BYTES;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.GIBIBYTES;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.junit.Assert.*;
+import static org.quicktheories.QuickTheory.qt;
+
+public class DataStorageSpecTest
+{
+    private static final long MAX_LONG_CONFIG_VALUE = Long.MAX_VALUE - 1;
+    @Test
+    public void testConversions()
+    {
+        assertEquals(10, new DataStorageSpec.LongBytesBound("10B").toBytes());
+        assertEquals(10240, new DataStorageSpec.LongBytesBound("10KiB").toBytes());
+        assertEquals(10485760, new DataStorageSpec.LongBytesBound("10MiB").toBytes());
+        assertEquals(1073741824, new DataStorageSpec.LongBytesBound("1GiB").toBytes());
+
+        assertEquals(1024, new DataStorageSpec.LongMebibytesBound("1GiB").toMebibytes());
+        assertEquals(10485760, new DataStorageSpec.LongMebibytesBound("10MiB").toBytes());
+        assertEquals(10240, new DataStorageSpec.LongMebibytesBound("10MiB").toKibibytes());
+        assertEquals(1024 * 1024 * 1024, new DataStorageSpec.IntBytesBound("1GiB").toBytes());
+        assertEquals(10240, new DataStorageSpec.IntKibibytesBound("10MiB").toKibibytes());
+        assertEquals(1024, new DataStorageSpec.IntMebibytesBound("1GiB").toMebibytes());
+
+        assertEquals(10, new DataStorageSpec.LongBytesBound(10, BYTES).toBytes());
+        assertEquals(10240, new DataStorageSpec.LongBytesBound(10, KIBIBYTES).toBytes());
+        assertEquals(10485760, new DataStorageSpec.LongBytesBound(10, MEBIBYTES).toBytes());
+        assertEquals(1073741824, new DataStorageSpec.LongBytesBound(1, GIBIBYTES).toBytes());
+
+        assertEquals(1024, new DataStorageSpec.LongMebibytesBound(1, GIBIBYTES).toMebibytes());
+        assertEquals(1024 * 1024, new DataStorageSpec.LongMebibytesBound(1, GIBIBYTES).toKibibytes());
+        assertEquals(10485760, new DataStorageSpec.LongMebibytesBound(10, MEBIBYTES).toBytes());
+        assertEquals(10240, new DataStorageSpec.IntKibibytesBound(10, MEBIBYTES).toKibibytes());
+        assertEquals(1024, new DataStorageSpec.IntMebibytesBound(1, GIBIBYTES).toMebibytes());
+    }
+
+    @Test
+    public void testOverflowingConversion()
+    {
+        assertThatThrownBy(() -> new DataStorageSpec.IntBytesBound("2147483648B")).isInstanceOf(IllegalArgumentException.class)
+                                                                                  .hasMessageContaining("Invalid data storage: 2147483648B. " +
+                                                                                                        "It shouldn't be more than 2147483646 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntBytesBound(2147483648L)).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid data storage: 2147483648 bytes. " +
+                                                                                                      "It shouldn't be more than 2147483646 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntBytesBound("2147483648KiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid data storage: 2147483648KiB. " +
+                                                                                                          "It shouldn't be more than 2147483646 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntBytesBound("35791395MiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                  .hasMessageContaining("Invalid data storage: 35791395MiB. " +
+                                                                                                        "It shouldn't be more than 2147483646 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntBytesBound("34954GiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid data storage: 34954GiB. " +
+                                                                                                     "It shouldn't be more than 2147483646 in bytes");
+
+        assertThatThrownBy(() -> new DataStorageSpec.IntKibibytesBound("2147483648B")).isInstanceOf(IllegalArgumentException.class)
+                                                                                      .hasMessageContaining("Invalid data storage: 2147483648B " +
+                                                                                                            "Accepted units:[KIBIBYTES, MEBIBYTES, GIBIBYTES]");
+        assertThatThrownBy(() -> new DataStorageSpec.IntKibibytesBound(2147483648L, BYTES)).isInstanceOf(IllegalArgumentException.class)
+                                                                                           .hasMessageContaining("Invalid data storage: 2147483648B " +
+                                                                                                                 "Accepted units:[KIBIBYTES, MEBIBYTES, GIBIBYTES]");
+        assertThatThrownBy(() -> new DataStorageSpec.IntKibibytesBound("2147483648KiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                        .hasMessageContaining("Invalid data storage: 2147483648KiB. " +
+                                                                                                              "It shouldn't be more than 2147483646 in kibibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntKibibytesBound("35791395MiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                      .hasMessageContaining("Invalid data storage: 35791395MiB. " +
+                                                                                                            "It shouldn't be more than 2147483646 in kibibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntKibibytesBound("34954GiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                   .hasMessageContaining("Invalid data storage: 34954GiB. " +
+                                                                                                         "It shouldn't be more than 2147483646 in kibibytes");
+
+        assertThatThrownBy(() -> new DataStorageSpec.IntMebibytesBound("2147483648B")).isInstanceOf(IllegalArgumentException.class)
+                                                                                      .hasMessageContaining("Invalid data storage: 2147483648B " +
+                                                                                                            "Accepted units:[MEBIBYTES, GIBIBYTES]");
+        assertThatThrownBy(() -> new DataStorageSpec.IntMebibytesBound("2147483648MiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                        .hasMessageContaining("Invalid data storage: 2147483648MiB. " +
+                                                                                                              "It shouldn't be more than 2147483646 in mebibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntMebibytesBound(2147483648L, MEBIBYTES)).isInstanceOf(IllegalArgumentException.class)
+                                                                                               .hasMessageContaining("Invalid data storage: 2147483648 mebibytes. " +
+                                                                                                                     "It shouldn't be more than 2147483646 in mebibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntMebibytesBound("2097152GiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                     .hasMessageContaining("Invalid data storage: 2097152GiB. " +
+                                                                                                           "It shouldn't be more than 2147483646 in mebibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.IntMebibytesBound(2147483648L)).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid data storage: 2147483648 mebibytes." +
+                                                                                                          " It shouldn't be more than 2147483646 in mebibytes");
+
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE + "B")).isInstanceOf(IllegalArgumentException.class)
+                                                                                          .hasMessageContaining("Invalid data storage: 9223372036854775807B. " +
+                                                                                                                "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE, BYTES)).isInstanceOf(IllegalArgumentException.class)
+                                                                                           .hasMessageContaining("Invalid data storage: 9223372036854775807 bytes. " +
+                                                                                                                 "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE)).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid data storage: 9223372036854775807 bytes. " +
+                                                                                                          "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE + "KiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                            .hasMessageContaining("Invalid data storage: 9223372036854775807KiB. " +
+                                                                                                                  "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("9223372036854775MiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                  .hasMessageContaining("Invalid data storage: 9223372036854775MiB. " +
+                                                                                                        "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("9223372036854775GiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                           .hasMessageContaining("Invalid data storage: 9223372036854775GiB. " +
+                                                                                                                 "It shouldn't be more than 9223372036854775806 in bytes");
+
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound(Long.MAX_VALUE + "B")).isInstanceOf(IllegalArgumentException.class)
+                                                                                              .hasMessageContaining("Invalid data storage: 9223372036854775807B " +
+                                                                                                                    "Accepted units:[MEBIBYTES, GIBIBYTES]");
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound(Long.MAX_VALUE, BYTES)).isInstanceOf(IllegalArgumentException.class)
+                                                                                              .hasMessageContaining("Invalid data storage: 9223372036854775807B " +
+                                                                                                                    "Accepted units:[MEBIBYTES, GIBIBYTES]");
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound(Long.MAX_VALUE + "KiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                                .hasMessageContaining("Invalid data storage: 9223372036854775807KiB " +
+                                                                                                                      "Accepted units:[MEBIBYTES, GIBIBYTES]");
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound(Long.MAX_VALUE + "MiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                                .hasMessageContaining("Invalid data storage: 9223372036854775807MiB. " +
+                                                                                                                      "It shouldn't be more than 9223372036854775806 in mebibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound("9223372036854775555GiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                                  .hasMessageContaining("Invalid data storage: 9223372036854775555GiB. " +
+                                                                                                                        "It shouldn't be more than 9223372036854775806 in mebibytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound(Long.MAX_VALUE)).isInstanceOf(IllegalArgumentException.class)
+                                                                                        .hasMessageContaining("Invalid data storage: 9223372036854775807 mebibytes." +
+                                                                                                              " It shouldn't be more than 9223372036854775806 in mebibytes");
+    }
+
+    @Test
+    public void testFromSymbol()
+    {
+        assertEquals(DataStorageSpec.DataStorageUnit.fromSymbol("B"), BYTES);
+        assertEquals(DataStorageSpec.DataStorageUnit.fromSymbol("KiB"), DataStorageSpec.DataStorageUnit.KIBIBYTES);
+        assertEquals(DataStorageSpec.DataStorageUnit.fromSymbol("MiB"), DataStorageSpec.DataStorageUnit.MEBIBYTES);
+        assertEquals(DataStorageSpec.DataStorageUnit.fromSymbol("GiB"), DataStorageSpec.DataStorageUnit.GIBIBYTES);
+        assertThatThrownBy(() -> DataStorageSpec.DataStorageUnit.fromSymbol("n"))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("Unsupported data storage unit: n");
+    }
+
+    @Test
+    public void testInvalidInputs()
+    {
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("10")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid data storage: 10");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("-10bps")).isInstanceOf(IllegalArgumentException.class)
+                                                                              .hasMessageContaining("Invalid data storage: -10bps");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("-10b")).isInstanceOf(IllegalArgumentException.class)
+                                                                            .hasMessageContaining("Invalid data storage: -10b");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(-10, BYTES)).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid data storage: value must be non-negative");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("10HG")).isInstanceOf(IllegalArgumentException.class)
+                                                                            .hasMessageContaining("Invalid data storage: 10HG");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound("9223372036854775809B")
+                                 .toBytes()).isInstanceOf(NumberFormatException.class)
+                                            .hasMessageContaining("For input string: \"9223372036854775809\"");
+
+        assertThatThrownBy(() -> new DataStorageSpec.IntKibibytesBound("10B")).isInstanceOf(IllegalArgumentException.class)
+                                                                              .hasMessageContaining("Invalid data storage: 10B Accepted units");
+        assertThatThrownBy(() -> new DataStorageSpec.IntMebibytesBound("10B")).isInstanceOf(IllegalArgumentException.class)
+                                                                              .hasMessageContaining("Invalid data storage: 10B Accepted units");
+
+        assertThatThrownBy(() -> new DataStorageSpec.LongMebibytesBound("10B")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid data storage: 10B Accepted units");
+    }
+
+    @Test
+    public void testValidUnits()
+    {
+        assertEquals(10240L, new DataStorageSpec.IntBytesBound("10KiB").toBytes());
+        assertEquals(10L, new DataStorageSpec.IntKibibytesBound("10KiB").toKibibytes());
+        assertEquals(10L, new DataStorageSpec.IntMebibytesBound("10MiB").toMebibytes());
+    }
+
+    @Test
+    public void testInvalidForConversion()
+    {
+       //just test the cast to Int
+//        assertEquals(Integer.MAX_VALUE, new DataStorageSpec.LongBytesBound("9223372036854775806B").toBytesAsInt());
+
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE + "B")).isInstanceOf(IllegalArgumentException.class)
+                                                                                          .hasMessageContaining("Invalid data storage: 9223372036854775807B. " +
+                                                                                                                "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE + "KiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                            .hasMessageContaining("Invalid data storage: 9223372036854775807KiB. " +
+                                                                                                                  "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE-5L + "MiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                               .hasMessageContaining("Invalid data storage: 9223372036854775802MiB. " +
+                                                                                                                     "It shouldn't be more than 9223372036854775806 in bytes");
+        assertThatThrownBy(() -> new DataStorageSpec.LongBytesBound(Long.MAX_VALUE-5L + "GiB")).isInstanceOf(IllegalArgumentException.class)
+                                                                                               .hasMessageContaining("Invalid data storage: 9223372036854775802GiB. " +
+                                                                                                                     "It shouldn't be more than 9223372036854775806 in bytes");
+    }
+
+    @Test
+    public void testEquals()
+    {
+        assertEquals(new DataStorageSpec.LongBytesBound("10B"), new DataStorageSpec.LongBytesBound("10B"));
+
+        assertEquals(new DataStorageSpec.LongBytesBound.LongBytesBound("10KiB"), new DataStorageSpec.LongBytesBound.LongBytesBound("10240B"));
+        assertEquals(new DataStorageSpec.LongBytesBound.LongBytesBound("10240B"), new DataStorageSpec.LongBytesBound("10KiB"));
+
+        assertEquals(new DataStorageSpec.LongBytesBound("10MiB"), new DataStorageSpec.LongBytesBound("10240KiB"));
+        assertEquals(new DataStorageSpec.LongBytesBound("10240KiB"), new DataStorageSpec.LongBytesBound("10MiB"));
+
+        assertEquals(new DataStorageSpec.LongBytesBound("10GiB"), new DataStorageSpec.LongBytesBound("10240MiB"));
+        assertEquals(new DataStorageSpec.LongBytesBound("10240MiB"), new DataStorageSpec.LongBytesBound("10GiB"));
+
+        assertEquals(new DataStorageSpec.LongBytesBound(MAX_LONG_CONFIG_VALUE, BYTES), new DataStorageSpec.LongBytesBound(MAX_LONG_CONFIG_VALUE, BYTES));
+
+        assertNotEquals(new DataStorageSpec.LongBytesBound("0MiB"), new DataStorageSpec.LongBytesBound("10KiB"));
+    }
+
+    @Test
+    public void thereAndBackLongBytesBound()
+    {
+        qt().forAll(gen()).check(there -> {
+            DataStorageSpec.LongBytesBound back = new DataStorageSpec.LongBytesBound(there.toString());
+            DataStorageSpec.LongBytesBound BACK = new DataStorageSpec.LongBytesBound(there.toString().toUpperCase(Locale.ROOT).replace("I", "i"));
+            return there.equals(back) && there.equals(BACK);
+        });
+    }
+
+    @Test
+    public void eq()
+    {
+        qt().forAll(gen(), gen()).check((a, b) -> a.equals(b) == b.equals(a));
+    }
+
+    @Test
+    public void eqAndHash()
+    {
+        qt().forAll(gen(), gen()).check((a, b) -> !a.equals(b) || a.hashCode() == b.hashCode());
+    }
+
+    private static Gen<DataStorageSpec.LongBytesBound> gen()
+    {
+        Gen<DataStorageSpec.LongBytesBound.DataStorageUnit> unitGen = SourceDSL.arbitrary().enumValues(DataStorageSpec.LongBytesBound.DataStorageUnit.class);
+        Gen<Long> valueGen = SourceDSL.longs().between(0, Long.MAX_VALUE/1024L/1024/1024); // max in GiB we can have without overflowing
+        Gen<DataStorageSpec.LongBytesBound> gen = rs -> new DataStorageSpec.LongBytesBound(valueGen.generate(rs), unitGen.generate(rs));
+        return gen.describedAs(DataStorageSpec.LongBytesBound::toString);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
index a832679..3ee05cf 100644
--- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
+++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
@@ -37,6 +37,7 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import com.google.common.base.Throwables;
 import org.junit.Test;
 
 import org.apache.cassandra.utils.Pair;
@@ -60,12 +61,16 @@
     "org.apache.cassandra.audit.BinLogAuditLogger",
     "org.apache.cassandra.audit.IAuditLogger",
     "org.apache.cassandra.auth.AllowAllInternodeAuthenticator",
+    "org.apache.cassandra.auth.AuthCache$BulkLoader",
+    "org.apache.cassandra.auth.Cacheable",
     "org.apache.cassandra.auth.IInternodeAuthenticator",
     "org.apache.cassandra.auth.IAuthenticator",
     "org.apache.cassandra.auth.IAuthorizer",
     "org.apache.cassandra.auth.IRoleManager",
     "org.apache.cassandra.auth.INetworkAuthorizer",
     "org.apache.cassandra.config.DatabaseDescriptor",
+    "org.apache.cassandra.config.CassandraRelevantProperties",
+    "org.apache.cassandra.config.CassandraRelevantProperties$PropertyConverter",
     "org.apache.cassandra.config.ConfigurationLoader",
     "org.apache.cassandra.config.Config",
     "org.apache.cassandra.config.Config$1",
@@ -77,22 +82,56 @@
     "org.apache.cassandra.config.Config$FlushCompression",
     "org.apache.cassandra.config.Config$InternodeCompression",
     "org.apache.cassandra.config.Config$MemtableAllocationType",
+    "org.apache.cassandra.config.Config$PaxosOnLinearizabilityViolation",
+    "org.apache.cassandra.config.Config$PaxosStatePurging",
+    "org.apache.cassandra.config.Config$PaxosVariant",
     "org.apache.cassandra.config.Config$RepairCommandPoolFullStrategy",
     "org.apache.cassandra.config.Config$UserFunctionTimeoutPolicy",
     "org.apache.cassandra.config.Config$CorruptedTombstoneStrategy",
     "org.apache.cassandra.config.DatabaseDescriptor$ByteUnit",
-    "org.apache.cassandra.config.ParameterizedClass",
+    "org.apache.cassandra.config.DataRateSpec",
+    "org.apache.cassandra.config.DataRateSpec$DataRateUnit",
+    "org.apache.cassandra.config.DataRateSpec$DataRateUnit$1",
+    "org.apache.cassandra.config.DataRateSpec$DataRateUnit$2",
+    "org.apache.cassandra.config.DataRateSpec$DataRateUnit$3",
+    "org.apache.cassandra.config.DataStorageSpec",
+    "org.apache.cassandra.config.DataStorageSpec$DataStorageUnit",
+    "org.apache.cassandra.config.DataStorageSpec$DataStorageUnit$1",
+    "org.apache.cassandra.config.DataStorageSpec$DataStorageUnit$2",
+    "org.apache.cassandra.config.DataStorageSpec$DataStorageUnit$3",
+    "org.apache.cassandra.config.DataStorageSpec$DataStorageUnit$4",
+    "org.apache.cassandra.config.DataStorageSpec$IntBytesBound",
+    "org.apache.cassandra.config.DataStorageSpec$IntKibibytesBound",
+    "org.apache.cassandra.config.DataStorageSpec$IntMebibytesBound",
+    "org.apache.cassandra.config.DataStorageSpec$LongBytesBound",
+    "org.apache.cassandra.config.DataStorageSpec$LongMebibytesBound",
+    "org.apache.cassandra.config.DurationSpec",
+    "org.apache.cassandra.config.DataRateSpec$LongBytesPerSecondBound",
+    "org.apache.cassandra.config.DurationSpec$LongMillisecondsBound",
+    "org.apache.cassandra.config.DurationSpec$LongNanosecondsBound",
+    "org.apache.cassandra.config.DurationSpec$LongSecondsBound",
+    "org.apache.cassandra.config.DurationSpec$IntMillisecondsBound",
+    "org.apache.cassandra.config.DurationSpec$IntSecondsBound",
+    "org.apache.cassandra.config.DurationSpec$IntMinutesBound",
     "org.apache.cassandra.config.EncryptionOptions",
     "org.apache.cassandra.config.EncryptionOptions$ClientEncryptionOptions",
     "org.apache.cassandra.config.EncryptionOptions$ServerEncryptionOptions",
     "org.apache.cassandra.config.EncryptionOptions$ServerEncryptionOptions$InternodeEncryption",
     "org.apache.cassandra.config.EncryptionOptions$ServerEncryptionOptions$OutgoingEncryptedPortSource",
+    "org.apache.cassandra.config.GuardrailsOptions",
+    "org.apache.cassandra.config.GuardrailsOptions$Config",
+    "org.apache.cassandra.config.GuardrailsOptions$ConsistencyLevels",
+    "org.apache.cassandra.config.GuardrailsOptions$TableProperties",
+    "org.apache.cassandra.config.ParameterizedClass",
     "org.apache.cassandra.config.ReplicaFilteringProtectionOptions",
     "org.apache.cassandra.config.YamlConfigurationLoader",
     "org.apache.cassandra.config.YamlConfigurationLoader$PropertiesChecker",
     "org.apache.cassandra.config.YamlConfigurationLoader$PropertiesChecker$1",
     "org.apache.cassandra.config.YamlConfigurationLoader$CustomConstructor",
     "org.apache.cassandra.config.TransparentDataEncryptionOptions",
+    "org.apache.cassandra.config.StartupChecksOptions",
+    "org.apache.cassandra.config.SubnetGroups",
+    "org.apache.cassandra.config.TrackWarnings",
     "org.apache.cassandra.db.ConsistencyLevel",
     "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerFactory",
     "org.apache.cassandra.db.commitlog.DefaultCommitLogSegmentMgrFactory",
@@ -101,6 +140,11 @@
     "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerStandard",
     "org.apache.cassandra.db.commitlog.CommitLog",
     "org.apache.cassandra.db.commitlog.CommitLogMBean",
+    "org.apache.cassandra.db.guardrails.GuardrailsConfig",
+    "org.apache.cassandra.db.guardrails.GuardrailsConfigMBean",
+    "org.apache.cassandra.db.guardrails.GuardrailsConfig$ConsistencyLevels",
+    "org.apache.cassandra.db.guardrails.GuardrailsConfig$TableProperties",
+    "org.apache.cassandra.db.guardrails.Values$Config",
     "org.apache.cassandra.dht.IPartitioner",
     "org.apache.cassandra.distributed.api.IInstance",
     "org.apache.cassandra.distributed.api.IIsolatedExecutor",
@@ -122,9 +166,11 @@
     "org.apache.cassandra.exceptions.ConfigurationException",
     "org.apache.cassandra.exceptions.RequestValidationException",
     "org.apache.cassandra.exceptions.CassandraException",
+    "org.apache.cassandra.exceptions.InvalidRequestException",
     "org.apache.cassandra.exceptions.TransportException",
     "org.apache.cassandra.fql.FullQueryLogger",
     "org.apache.cassandra.fql.FullQueryLoggerOptions",
+    "org.apache.cassandra.gms.IFailureDetector",
     "org.apache.cassandra.locator.IEndpointSnitch",
     "org.apache.cassandra.io.FSWriteError",
     "org.apache.cassandra.io.FSError",
@@ -133,23 +179,33 @@
     "org.apache.cassandra.io.compress.LZ4Compressor",
     "org.apache.cassandra.io.sstable.metadata.MetadataType",
     "org.apache.cassandra.io.util.BufferedDataOutputStreamPlus",
+    "org.apache.cassandra.io.util.RebufferingInputStream",
+    "org.apache.cassandra.io.util.FileInputStreamPlus",
+    "org.apache.cassandra.io.util.FileOutputStreamPlus",
+    "org.apache.cassandra.io.util.File",
     "org.apache.cassandra.io.util.DataOutputBuffer",
     "org.apache.cassandra.io.util.DataOutputBufferFixed",
     "org.apache.cassandra.io.util.DataOutputStreamPlus",
     "org.apache.cassandra.io.util.DataOutputPlus",
+    "org.apache.cassandra.io.util.DataInputPlus",
     "org.apache.cassandra.io.util.DiskOptimizationStrategy",
     "org.apache.cassandra.io.util.SpinningDiskOptimizationStrategy",
+    "org.apache.cassandra.io.util.PathUtils$IOToLongFunction",
     "org.apache.cassandra.locator.Replica",
+    "org.apache.cassandra.locator.ReplicaCollection",
     "org.apache.cassandra.locator.SimpleSeedProvider",
     "org.apache.cassandra.locator.SeedProvider",
+    "org.apache.cassandra.security.ISslContextFactory",
     "org.apache.cassandra.security.SSLFactory",
     "org.apache.cassandra.security.EncryptionContext",
     "org.apache.cassandra.service.CacheService$CacheType",
+    "org.apache.cassandra.transport.ProtocolException",
     "org.apache.cassandra.utils.binlog.BinLogOptions",
     "org.apache.cassandra.utils.FBUtilities",
     "org.apache.cassandra.utils.FBUtilities$1",
     "org.apache.cassandra.utils.CloseableIterator",
     "org.apache.cassandra.utils.Pair",
+    "org.apache.cassandra.utils.concurrent.UncheckedInterruptedException",
     "org.apache.cassandra.ConsoleAppender",
     "org.apache.cassandra.ConsoleAppender$1",
     "org.apache.cassandra.LogbackStatusListener",
@@ -295,7 +351,7 @@
         {
             Method method = databaseDescriptorClass.getDeclaredMethod(methodName);
             method.invoke(null);
-            
+
             if (threadCount != threads.getThreadCount())
             {
                 for (ThreadInfo threadInfo : threads.getThreadInfo(threads.getAllThreadIds()))
@@ -311,15 +367,15 @@
     {
         if (!violations.isEmpty())
         {
+            StringBuilder sb = new StringBuilder();
             for (Pair<String, Exception> violation : new ArrayList<>(violations))
-            {
-                err.println();
-                err.println();
-                err.println("VIOLATION: " + violation.left);
-                violation.right.printStackTrace(err);
-            }
+                sb.append("\n\n")
+                  .append("VIOLATION: ").append(violation.left).append('\n')
+                  .append(Throwables.getStackTraceAsString(violation.right));
+            String msg = sb.toString();
+            err.println(msg);
 
-            fail();
+            fail(msg);
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
index 0038938..2b72cd7 100644
--- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
@@ -25,6 +25,7 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Enumeration;
+import java.util.function.Consumer;
 
 
 import com.google.common.base.Throwables;
@@ -34,8 +35,11 @@
 
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.assertj.core.api.Assertions;
 
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -110,7 +114,7 @@
     }
 
     @Test
-    public void testRpcInterface() throws Exception
+    public void testRpcInterface()
     {
         Config testConfig = DatabaseDescriptor.loadConfig();
         testConfig.rpc_interface = suitableInterface.getName();
@@ -195,7 +199,7 @@
     }
 
     @Test
-    public void testRpcAddress() throws Exception
+    public void testRpcAddress()
     {
         Config testConfig = DatabaseDescriptor.loadConfig();
         testConfig.rpc_address = suitableInterface.getInterfaceAddresses().get(0).getAddress().getHostAddress();
@@ -264,7 +268,7 @@
             }
         }
     }
-    
+
     @Test
     public void testTokensFromString()
     {
@@ -279,15 +283,15 @@
         try
         {
             DatabaseDescriptor.setColumnIndexCacheSize(-1);
-            fail("Should have received a ConfigurationException column_index_cache_size_in_kb = -1");
+            fail("Should have received a IllegalArgumentException column_index_cache_size = -1");
         }
-        catch (ConfigurationException ignored) { }
+        catch (IllegalArgumentException ignored) { }
         Assert.assertEquals(2048, DatabaseDescriptor.getColumnIndexCacheSize());
 
         try
         {
             DatabaseDescriptor.setColumnIndexCacheSize(2 * 1024 * 1024);
-            fail("Should have received a ConfigurationException column_index_cache_size_in_kb = 2GiB");
+            fail("Should have received a ConfigurationException column_index_cache_size= 2GiB");
         }
         catch (ConfigurationException ignored) { }
         Assert.assertEquals(2048, DatabaseDescriptor.getColumnIndexCacheSize());
@@ -295,105 +299,131 @@
         try
         {
             DatabaseDescriptor.setColumnIndexSize(-1);
-            fail("Should have received a ConfigurationException column_index_size_in_kb = -1");
+            fail("Should have received a IllegalArgumentException column_index_size = -1");
         }
-        catch (ConfigurationException ignored) { }
+        catch (IllegalArgumentException ignored) { }
         Assert.assertEquals(4096, DatabaseDescriptor.getColumnIndexSize());
 
         try
         {
             DatabaseDescriptor.setColumnIndexSize(2 * 1024 * 1024);
-            fail("Should have received a ConfigurationException column_index_size_in_kb = 2GiB");
+            fail("Should have received a ConfigurationException column_index_size = 2GiB");
         }
         catch (ConfigurationException ignored) { }
         Assert.assertEquals(4096, DatabaseDescriptor.getColumnIndexSize());
 
         try
         {
-            DatabaseDescriptor.setBatchSizeWarnThresholdInKB(-1);
-            fail("Should have received a ConfigurationException batch_size_warn_threshold_in_kb = -1");
+            DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(-1);
+            fail("Should have received a IllegalArgumentException batch_size_warn_threshold = -1");
         }
-        catch (ConfigurationException ignored) { }
+        catch (IllegalArgumentException ignored) { }
         Assert.assertEquals(5120, DatabaseDescriptor.getBatchSizeWarnThreshold());
 
         try
         {
-            DatabaseDescriptor.setBatchSizeWarnThresholdInKB(2 * 1024 * 1024);
-            fail("Should have received a ConfigurationException batch_size_warn_threshold_in_kb = 2GiB");
+            DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(2 * 1024 * 1024);
+            fail("Should have received a ConfigurationException batch_size_warn_threshold = 2GiB");
         }
         catch (ConfigurationException ignored) { }
-        Assert.assertEquals(4096, DatabaseDescriptor.getColumnIndexSize());
+        Assert.assertEquals(5120, DatabaseDescriptor.getBatchSizeWarnThreshold());
+    }
+
+    @Test
+    public void testWidenToLongInBytes() throws ConfigurationException
+    {
+        Config conf = DatabaseDescriptor.getRawConfig();
+        int maxInt = Integer.MAX_VALUE - 1;
+        long maxIntMebibytesAsBytes = (long) maxInt * 1024 * 1024;
+        long maxIntKibibytesAsBytes = (long) maxInt * 1024;
+
+        conf.compaction_large_partition_warning_threshold = new DataStorageSpec.IntMebibytesBound(maxInt);
+        Assert.assertEquals(maxIntMebibytesAsBytes, DatabaseDescriptor.getCompactionLargePartitionWarningThreshold());
+
+        conf.min_free_space_per_drive = new DataStorageSpec.IntMebibytesBound(maxInt);
+        Assert.assertEquals(maxIntMebibytesAsBytes, DatabaseDescriptor.getMinFreeSpacePerDriveInBytes());
+
+        conf.max_hints_file_size = new DataStorageSpec.IntMebibytesBound(maxInt);
+        Assert.assertEquals(maxIntMebibytesAsBytes, DatabaseDescriptor.getMaxHintsFileSize());
+
+        DatabaseDescriptor.setBatchSizeFailThresholdInKiB(maxInt);
+        Assert.assertEquals((maxIntKibibytesAsBytes), DatabaseDescriptor.getBatchSizeFailThreshold());
     }
 
     @Test
     public void testLowestAcceptableTimeouts() throws ConfigurationException
     {
         Config testConfig = new Config();
-        testConfig.read_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        testConfig.range_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        testConfig.write_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        testConfig.truncate_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        testConfig.cas_contention_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        testConfig.counter_write_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        testConfig.request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT + 1;
-        
-        assertTrue(testConfig.read_request_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.range_request_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.write_request_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.truncate_request_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.cas_contention_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.counter_write_request_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.request_timeout_in_ms > DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+
+        DurationSpec.LongMillisecondsBound greaterThanLowestTimeout = new DurationSpec.LongMillisecondsBound(DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT.toMilliseconds() + 1);
+
+        testConfig.read_request_timeout = greaterThanLowestTimeout;
+        testConfig.range_request_timeout = greaterThanLowestTimeout;
+        testConfig.write_request_timeout = greaterThanLowestTimeout;
+        testConfig.truncate_request_timeout = greaterThanLowestTimeout;
+        testConfig.cas_contention_timeout = greaterThanLowestTimeout;
+        testConfig.counter_write_request_timeout = greaterThanLowestTimeout;
+        testConfig.request_timeout = greaterThanLowestTimeout;
+
+        assertEquals(testConfig.read_request_timeout, greaterThanLowestTimeout);
+        assertEquals(testConfig.range_request_timeout, greaterThanLowestTimeout);
+        assertEquals(testConfig.write_request_timeout, greaterThanLowestTimeout);
+        assertEquals(testConfig.truncate_request_timeout, greaterThanLowestTimeout);
+        assertEquals(testConfig.cas_contention_timeout, greaterThanLowestTimeout);
+        assertEquals(testConfig.counter_write_request_timeout, greaterThanLowestTimeout);
+        assertEquals(testConfig.request_timeout, greaterThanLowestTimeout);
 
         //set less than Lowest acceptable value
-        testConfig.read_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
-        testConfig.range_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
-        testConfig.write_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
-        testConfig.truncate_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
-        testConfig.cas_contention_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
-        testConfig.counter_write_request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
-        testConfig.request_timeout_in_ms = DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT - 1;
+        DurationSpec.LongMillisecondsBound lowerThanLowestTimeout = new DurationSpec.LongMillisecondsBound(DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT.toMilliseconds() - 1);
+
+        testConfig.read_request_timeout = lowerThanLowestTimeout;
+        testConfig.range_request_timeout = lowerThanLowestTimeout;
+        testConfig.write_request_timeout = lowerThanLowestTimeout;
+        testConfig.truncate_request_timeout = lowerThanLowestTimeout;
+        testConfig.cas_contention_timeout = lowerThanLowestTimeout;
+        testConfig.counter_write_request_timeout = lowerThanLowestTimeout;
+        testConfig.request_timeout = lowerThanLowestTimeout;
 
         DatabaseDescriptor.checkForLowestAcceptedTimeouts(testConfig);
 
-        assertTrue(testConfig.read_request_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.range_request_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.write_request_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.truncate_request_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.cas_contention_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.counter_write_request_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
-        assertTrue(testConfig.request_timeout_in_ms == DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.read_request_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.range_request_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.write_request_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.truncate_request_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.cas_contention_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.counter_write_request_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
+        assertEquals(testConfig.request_timeout, DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT);
     }
 
     @Test
     public void testRepairSessionMemorySizeToggles()
     {
-        int previousSize = DatabaseDescriptor.getRepairSessionSpaceInMegabytes();
+        int previousSize = DatabaseDescriptor.getRepairSessionSpaceInMiB();
         try
         {
             Assert.assertEquals((Runtime.getRuntime().maxMemory() / (1024 * 1024) / 16),
-                                DatabaseDescriptor.getRepairSessionSpaceInMegabytes());
+                                DatabaseDescriptor.getRepairSessionSpaceInMiB());
 
             int targetSize = (int) (Runtime.getRuntime().maxMemory() / (1024 * 1024) / 4) + 1;
 
-            DatabaseDescriptor.setRepairSessionSpaceInMegabytes(targetSize);
-            Assert.assertEquals(targetSize, DatabaseDescriptor.getRepairSessionSpaceInMegabytes());
+            DatabaseDescriptor.setRepairSessionSpaceInMiB(targetSize);
+            Assert.assertEquals(targetSize, DatabaseDescriptor.getRepairSessionSpaceInMiB());
 
-            DatabaseDescriptor.setRepairSessionSpaceInMegabytes(10);
-            Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMegabytes());
+            DatabaseDescriptor.setRepairSessionSpaceInMiB(10);
+            Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMiB());
 
             try
             {
-                DatabaseDescriptor.setRepairSessionSpaceInMegabytes(0);
+                DatabaseDescriptor.setRepairSessionSpaceInMiB(0);
                 fail("Should have received a ConfigurationException for depth of 9");
             }
             catch (ConfigurationException ignored) { }
 
-            Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMegabytes());
+            Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMiB());
         }
         finally
         {
-            DatabaseDescriptor.setRepairSessionSpaceInMegabytes(previousSize);
+            DatabaseDescriptor.setRepairSessionSpaceInMiB(previousSize);
         }
     }
 
@@ -433,25 +463,25 @@
     }
 
     @Test
-    public void testCalculateDefaultSpaceInMB()
+    public void testCalculateDefaultSpaceInMiB()
     {
         // check prefered size is used for a small storage volume
-        int preferredInMB = 667;
+        int preferredInMiB = 667;
         int numerator = 2;
         int denominator = 3;
         int spaceInBytes = 999 * 1024 * 1024;
 
         assertEquals(666, // total size is less than preferred, so return lower limit
-                     DatabaseDescriptor.calculateDefaultSpaceInMB("type", "/path", "setting_name", preferredInMB, spaceInBytes, numerator, denominator));
+                     DatabaseDescriptor.calculateDefaultSpaceInMiB("type", "/path", "setting_name", preferredInMiB, spaceInBytes, numerator, denominator));
 
         // check preferred size is used for a small storage volume
-        preferredInMB = 100;
+        preferredInMiB = 100;
         numerator = 1;
         denominator = 3;
         spaceInBytes = 999 * 1024 * 1024;
 
         assertEquals(100, // total size is more than preferred so keep the configured limit
-                     DatabaseDescriptor.calculateDefaultSpaceInMB("type", "/path", "setting_name", preferredInMB, spaceInBytes, numerator, denominator));
+                     DatabaseDescriptor.calculateDefaultSpaceInMiB("type", "/path", "setting_name", preferredInMiB, spaceInBytes, numerator, denominator));
     }
 
     @Test
@@ -557,6 +587,44 @@
     }
 
     @Test
+    public void testUpperBoundStreamingConfigOnStartup()
+    {
+        Config config = DatabaseDescriptor.loadConfig();
+
+        String expectedMsg = "Invalid value of entire_sstable_stream_throughput_outbound:";
+        config.entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(Integer.MAX_VALUE, DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND);
+        validateProperty(expectedMsg);
+
+        expectedMsg = "Invalid value of entire_sstable_stream_throughput_outbound:";
+        config.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(Integer.MAX_VALUE, DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND);
+        validateProperty(expectedMsg);
+
+        expectedMsg = "Invalid value of stream_throughput_outbound:";
+        config.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(Integer.MAX_VALUE * 125_000L);
+        validateProperty(expectedMsg);
+
+        expectedMsg = "Invalid value of inter_dc_stream_throughput_outbound:";
+        config.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(Integer.MAX_VALUE * 125_000L);
+        validateProperty(expectedMsg);
+
+        expectedMsg = "compaction_throughput:";
+        config.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(Integer.MAX_VALUE, DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND);
+        validateProperty(expectedMsg);
+    }
+
+    private static void validateProperty(String expectedMsg)
+    {
+        try
+        {
+            DatabaseDescriptor.validateUpperBoundStreamingConfig();
+        }
+        catch (ConfigurationException ex)
+        {
+            Assert.assertEquals(expectedMsg, ex.getMessage());
+        }
+    }
+
+    @Test
     public void testApplyTokensConfigInitialTokensNotSetNumTokensSet()
     {
         Config config = DatabaseDescriptor.loadConfig();
@@ -590,4 +658,161 @@
         Assert.assertEquals(Integer.valueOf(1), config.num_tokens);
         Assert.assertEquals(1, DatabaseDescriptor.tokensFromString(config.initial_token).size());
     }
+
+    @Test
+    public void testDenylistInvalidValuesRejected()
+    {
+        DatabaseDescriptor.loadConfig();
+
+        expectIllegalArgumentException(DatabaseDescriptor::setDenylistRefreshSeconds, 0, "denylist_refresh must be a positive integer.");
+        expectIllegalArgumentException(DatabaseDescriptor::setDenylistRefreshSeconds, -1, "denylist_refresh must be a positive integer.");
+        expectIllegalArgumentException(DatabaseDescriptor::setDenylistMaxKeysPerTable, 0, "denylist_max_keys_per_table must be a positive integer.");
+        expectIllegalArgumentException(DatabaseDescriptor::setDenylistMaxKeysPerTable, -1, "denylist_max_keys_per_table must be a positive integer.");
+        expectIllegalArgumentException(DatabaseDescriptor::setDenylistMaxKeysTotal, 0, "denylist_max_keys_total must be a positive integer.");
+        expectIllegalArgumentException(DatabaseDescriptor::setDenylistMaxKeysTotal, -1, "denylist_max_keys_total must be a positive integer.");
+    }
+
+    private void expectIllegalArgumentException(Consumer<Integer> c, int val, String expectedMessage)
+    {
+        assertThatThrownBy(() -> c.accept(val))
+            .isInstanceOf(IllegalArgumentException.class)
+            .hasMessageContaining(expectedMessage);
+    }
+
+    // coordinator read
+    @Test
+    public void testClientLargeReadWarnGreaterThanAbort()
+    {
+        Config conf = new Config();
+        conf.coordinator_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.coordinator_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(1, KIBIBYTES);
+        Assertions.assertThatThrownBy(() -> DatabaseDescriptor.applyReadThresholdsValidations(conf))
+                  .isInstanceOf(ConfigurationException.class)
+                  .hasMessage("coordinator_read_size_fail_threshold (1KiB) must be greater than or equal to coordinator_read_size_warn_threshold (2KiB)");
+    }
+
+    @Test
+    public void testClientLargeReadWarnEqAbort()
+    {
+        Config conf = new Config();
+        conf.coordinator_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.coordinator_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testClientLargeReadWarnEnabledAbortDisabled()
+    {
+        Config conf = new Config();
+        conf.coordinator_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.coordinator_read_size_fail_threshold = null;
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testClientLargeReadAbortEnabledWarnDisabled()
+    {
+        Config conf = new Config();
+        conf.coordinator_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(0, KIBIBYTES);
+        conf.coordinator_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    // local read
+
+    @Test
+    public void testLocalLargeReadWarnGreaterThanAbort()
+    {
+        Config conf = new Config();
+        conf.local_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.local_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(1, KIBIBYTES);
+        Assertions.assertThatThrownBy(() -> DatabaseDescriptor.applyReadThresholdsValidations(conf))
+                  .isInstanceOf(ConfigurationException.class)
+                  .hasMessage("local_read_size_fail_threshold (1KiB) must be greater than or equal to local_read_size_warn_threshold (2KiB)");
+    }
+
+    @Test
+    public void testLocalLargeReadWarnEqAbort()
+    {
+        Config conf = new Config();
+        conf.local_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.local_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testLocalLargeReadWarnEnabledAbortDisabled()
+    {
+        Config conf = new Config();
+        conf.local_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.local_read_size_fail_threshold = null;
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testLocalLargeReadAbortEnabledWarnDisabled()
+    {
+        Config conf = new Config();
+        conf.local_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(0, KIBIBYTES);
+        conf.local_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    // row index entry
+
+    @Test
+    public void testRowIndexSizeWarnGreaterThanAbort()
+    {
+        Config conf = new Config();
+        conf.row_index_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.row_index_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(1, KIBIBYTES);
+        Assertions.assertThatThrownBy(() -> DatabaseDescriptor.applyReadThresholdsValidations(conf))
+                  .isInstanceOf(ConfigurationException.class)
+                  .hasMessage("row_index_read_size_fail_threshold (1KiB) must be greater than or equal to row_index_read_size_warn_threshold (2KiB)");
+    }
+
+    @Test
+    public void testRowIndexSizeWarnEqAbort()
+    {
+        Config conf = new Config();
+        conf.row_index_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.row_index_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testRowIndexSizeWarnEnabledAbortDisabled()
+    {
+        Config conf = new Config();
+        conf.row_index_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        conf.row_index_read_size_fail_threshold = null;
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testRowIndexSizeAbortEnabledWarnDisabled()
+    {
+        Config conf = new Config();
+        conf.row_index_read_size_warn_threshold = new DataStorageSpec.LongBytesBound(0, KIBIBYTES);
+        conf.row_index_read_size_fail_threshold = new DataStorageSpec.LongBytesBound(2, KIBIBYTES);
+        DatabaseDescriptor.applyReadThresholdsValidations(conf);
+    }
+
+    @Test
+    public void testDefaultSslContextFactoryConfiguration()
+    {
+        Config config = DatabaseDescriptor.loadConfig();
+        Assert.assertEquals("org.apache.cassandra.security.DefaultSslContextFactory",
+                            config.client_encryption_options.ssl_context_factory.class_name);
+        Assert.assertTrue(config.client_encryption_options.ssl_context_factory.parameters.isEmpty());
+        Assert.assertEquals("org.apache.cassandra.security.DefaultSslContextFactory",
+                            config.server_encryption_options.ssl_context_factory.class_name);
+        Assert.assertTrue(config.server_encryption_options.ssl_context_factory.parameters.isEmpty());
+    }
+
+    @Test (expected = IllegalArgumentException.class)
+    public void testInvalidSub1DefaultRFs() throws IllegalArgumentException
+    {
+        DatabaseDescriptor.setDefaultKeyspaceRF(0);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/config/DefaultLoaderTest.java b/test/unit/org/apache/cassandra/config/DefaultLoaderTest.java
new file mode 100644
index 0000000..6e122f8
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/DefaultLoaderTest.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import org.junit.Test;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.yaml.snakeyaml.introspector.FieldProperty;
+import org.yaml.snakeyaml.introspector.MethodProperty;
+import org.yaml.snakeyaml.introspector.Property;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DefaultLoaderTest
+{
+    private static final DefaultLoader LOADER = new DefaultLoader();
+
+    // field, no getter, no setter -> Example0
+    // field, no/ignored getter, setter -> Example1 && Example5
+    // field, getter, no/ignored setter -> Example3 && Example4
+    // field, getter, setter -> Example2
+
+    // no field, no getter, setter -> Example9 && Example10
+    // no field, getter, no/ignored setter -> Example7 && Example8
+    // no field, getter, setter -> Example6
+
+    @Test
+    public void fieldPresentWithoutGetterOrSetter()
+    {
+        // when the field is present and no getter, then the field property is used
+        assertThat(get(Example0.class, "first")).isInstanceOf(FieldProperty.class);
+    }
+
+    @Test
+    public void fieldPresentWithoutGetter()
+    {
+        // when the field is present and no getter, then the field property is used
+        assertThat(get(Example1.class, "first")).isInstanceOf(FieldProperty.class);
+        assertThat(get(Example5.class, "first")).isInstanceOf(FieldProperty.class);
+    }
+
+    @Test
+    public void fieldPresentWithGetter()
+    {
+        // if setter/getter present, methods take precendents over field
+        assertThat(get(Example2.class, "first")).isInstanceOf(MethodProperty.class);
+    }
+
+    @Test
+    public void fieldPresentWithoutSetter()
+    {
+        assertThat(get(Example3.class, "first")).isInstanceOf(FieldProperty.class);
+        assertThat(get(Example4.class, "first")).isInstanceOf(FieldProperty.class);
+    }
+
+    @Test
+    public void noFieldWithGetterAndSetter()
+    {
+        assertThat(get(Example6.class, "first")).isInstanceOf(MethodProperty.class);
+    }
+
+    @Test
+    public void noFieldWithGetterAndNoSetter()
+    {
+        assertThat(get(Example7.class, "first")).isNull();
+        assertThat(get(Example8.class, "first")).isNull();
+    }
+
+    @Test
+    public void noFieldWithoutGetterAndWithSetter()
+    {
+        assertThat(get(Example9.class, "first")).isInstanceOf(MethodProperty.class);
+        assertThat(get(Example10.class, "first")).isInstanceOf(MethodProperty.class);
+    }
+
+    private static Property get(Class<?> klass, String name)
+    {
+        return LOADER.getProperties(klass).get(name);
+    }
+
+    public static class Example0
+    {
+        public int first;
+    }
+
+    public static class Example1
+    {
+        public int first;
+
+        public void setFirst(int first)
+        {
+            this.first = first;
+        }
+    }
+
+    public static class Example2
+    {
+        public int first;
+
+        public void setFirst(int first)
+        {
+            this.first = first;
+        }
+
+        public int getFirst()
+        {
+            return first;
+        }
+    }
+
+    public static class Example3
+    {
+        public int first;
+
+        public int getFirst()
+        {
+            return first;
+        }
+    }
+
+    public static class Example4
+    {
+        public int first;
+
+        @JsonIgnore
+        public void setFirst(int first)
+        {
+            this.first = first;
+        }
+
+        public int getFirst()
+        {
+            return first;
+        }
+    }
+
+    public static class Example5
+    {
+        public int first;
+
+        public void setFirst(int first)
+        {
+            this.first = first;
+        }
+
+        @JsonIgnore
+        public int getFirst()
+        {
+            return first;
+        }
+    }
+
+    public static class Example6
+    {
+        public int getFirst()
+        {
+            return 42;
+        }
+
+        public void setFirst(int first)
+        {
+            // no-op
+        }
+    }
+
+    public static class Example7
+    {
+        public int getFirst()
+        {
+            return 42;
+        }
+    }
+
+    public static class Example8
+    {
+        public int getFirst()
+        {
+            return 42;
+        }
+
+        @JsonIgnore
+        public void setFirst(int first)
+        {
+            // no-op
+        }
+    }
+
+    public static class Example9
+    {
+        public void setFirst(int first)
+        {
+            // no-op
+        }
+    }
+
+    public static class Example10
+    {
+        @JsonIgnore
+        public int getFirst()
+        {
+            return 42;
+        }
+
+        public void setFirst(int first)
+        {
+            // no-op
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/config/DurationSpecTest.java b/test/unit/org/apache/cassandra/config/DurationSpecTest.java
new file mode 100644
index 0000000..22846fc
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/DurationSpecTest.java
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Test;
+
+import org.quicktheories.core.Gen;
+import org.quicktheories.generators.SourceDSL;
+
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.junit.Assert.*;
+import static org.quicktheories.QuickTheory.qt;
+
+public class DurationSpecTest
+{
+    private static final long MAX_INT_CONFIG_VALUE = Integer.MAX_VALUE - 1;
+
+    @SuppressWarnings("AssertBetweenInconvertibleTypes")
+    @Test
+    public void testConversions()
+    {
+        assertEquals(10000000000L, new DurationSpec.LongNanosecondsBound ("10s").toNanoseconds());
+        assertEquals(MAX_INT_CONFIG_VALUE, new DurationSpec.IntSecondsBound(MAX_INT_CONFIG_VALUE + "s").toSeconds());
+        assertEquals(MAX_INT_CONFIG_VALUE, new DurationSpec.LongMillisecondsBound(MAX_INT_CONFIG_VALUE + "ms").toMilliseconds());
+        assertEquals(600000000000L, new DurationSpec.LongNanosecondsBound ("10m").toNanoseconds());
+        assertEquals(MAX_INT_CONFIG_VALUE, new DurationSpec.IntMinutesBound(MAX_INT_CONFIG_VALUE + "m").toMinutes());
+        assertEquals(MAX_INT_CONFIG_VALUE, new DurationSpec.IntSecondsBound(MAX_INT_CONFIG_VALUE + "s").toSeconds());
+        assertEquals(new DurationSpec.IntMillisecondsBound(0.7, TimeUnit.MILLISECONDS), new DurationSpec.LongNanosecondsBound("1ms"));
+        assertEquals(new DurationSpec.IntMillisecondsBound(0.33, TimeUnit.MILLISECONDS), new DurationSpec.LongNanosecondsBound("0ms"));
+        assertEquals(new DurationSpec.IntMillisecondsBound(0.333, TimeUnit.MILLISECONDS), new DurationSpec.LongNanosecondsBound("0ms"));
+    }
+
+    @Test
+    public void testFromSymbol()
+    {
+        assertEquals(DurationSpec.fromSymbol("ms"), TimeUnit.MILLISECONDS);
+        assertEquals(DurationSpec.fromSymbol("d"), TimeUnit.DAYS);
+        assertEquals(DurationSpec.fromSymbol("h"), TimeUnit.HOURS);
+        assertEquals(DurationSpec.fromSymbol("m"), TimeUnit.MINUTES);
+        assertEquals(DurationSpec.fromSymbol("s"), TimeUnit.SECONDS);
+        assertEquals(DurationSpec.fromSymbol("us"), TimeUnit.MICROSECONDS);
+        assertEquals(DurationSpec.fromSymbol("µs"), TimeUnit.MICROSECONDS);
+        assertEquals(DurationSpec.fromSymbol("ns"), NANOSECONDS);
+        assertThatThrownBy(() -> DurationSpec.fromSymbol("n")).isInstanceOf(IllegalArgumentException.class);
+    }
+
+    @Test
+    public void testGetSymbol()
+    {
+        assertEquals(DurationSpec.symbol(TimeUnit.MILLISECONDS), "ms");
+        assertEquals(DurationSpec.symbol(TimeUnit.DAYS), "d");
+        assertEquals(DurationSpec.symbol(TimeUnit.HOURS), "h");
+        assertEquals(DurationSpec.symbol(TimeUnit.MINUTES), "m");
+        assertEquals(DurationSpec.symbol(TimeUnit.SECONDS), "s");
+        assertEquals(DurationSpec.symbol(TimeUnit.MICROSECONDS), "us");
+        assertEquals(DurationSpec.symbol(NANOSECONDS), "ns");
+    }
+
+    @Test
+    public void testInvalidInputs()
+    {
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound("10")).isInstanceOf(IllegalArgumentException.class)
+                                                                             .hasMessageContaining("Invalid duration: 10");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound("-10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: -10s");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(-10, SECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: value must be non-negativ");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound("10xd")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10xd");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound("0.333555555ms")).isInstanceOf(IllegalArgumentException.class)
+                                                                                        .hasMessageContaining("Invalid duration: 0.333555555ms");
+    }
+
+    @Test
+    public void testInvalidForConversion()
+    {
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE + "ns")).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 9223372036854775807ns. " +
+                                                                                                 "It shouldn't be more than 9223372036854775806 in nanoseconds");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE + "ms")).isInstanceOf(IllegalArgumentException.class)
+                                                                             .hasMessageContaining("Invalid duration: 9223372036854775807ms. " +
+                                                                                                   "It shouldn't be more than 9223372036854775806 in nanoseconds");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE-5 + "µs")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 9223372036854775802µs. " +
+                                                                                                     "It shouldn't be more than 9223372036854775806 in nanoseconds");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE-5 + "us")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 9223372036854775802us. " +
+                                                                                                     "It shouldn't be more than 9223372036854775806 in nanoseconds");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE-5 + "s")).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 9223372036854775802s. " +
+                                                                                                 "It shouldn't be more than 9223372036854775806 in nanoseconds");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE-5 + "h")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 9223372036854775802h. " +
+                                                                                                "It shouldn't be more than 9223372036854775806 in nanoseconds");
+        assertThatThrownBy(() -> new DurationSpec.LongNanosecondsBound(Long.MAX_VALUE-5 + "d")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 9223372036854775802d. " +
+                                                                                                "It shouldn't be more than 9223372036854775806 in nanoseconds");
+    }
+
+    @Test
+    public void testOverflowingConversion()
+    {
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("2147483648ms")).isInstanceOf(IllegalArgumentException.class)
+                                                                                       .hasMessageContaining("Invalid duration: 2147483648ms." +
+                                                                                                             " It shouldn't be more than 2147483646 in milliseconds");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound(2147483648L)).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid duration: 2147483648 milliseconds. " +
+                                                                                                          "It shouldn't be more than 2147483646 in milliseconds");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("2147483648s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                      .hasMessageContaining("Invalid duration: 2147483648s. " +
+                                                                                                            "It shouldn't be more than 2147483646 in milliseconds");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("35791395m")).isInstanceOf(IllegalArgumentException.class)
+                                                                                    .hasMessageContaining("Invalid duration: 35791395m. " +
+                                                                                                          "It shouldn't be more than 2147483646 in milliseconds");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("597h")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 597h. " +
+                                                                                                     "It shouldn't be more than 2147483646 in milliseconds");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("24856d")).isInstanceOf(IllegalArgumentException.class)
+                                                                                 .hasMessageContaining("Invalid duration: 24856d. " +
+                                                                                                       "It shouldn't be more than 2147483646 in milliseconds");
+
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("2147483648s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                 .hasMessageContaining("Invalid duration: 2147483648s. " +
+                                                                                                       "It shouldn't be more than 2147483646 in seconds");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound(2147483648L)).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 2147483648 seconds. " +
+                                                                                                     "It shouldn't be more than 2147483646 in seconds");
+        assertThatThrownBy(() -> DurationSpec.IntSecondsBound.inSecondsString("2147483648")).isInstanceOf(NumberFormatException.class)
+                                                                                            .hasMessageContaining("For input string: \"2147483648\"");
+        assertThatThrownBy(() -> DurationSpec.IntSecondsBound.inSecondsString("2147483648s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                             .hasMessageContaining("Invalid duration: 2147483648s. " +
+                                                                                                                   "It shouldn't be more than 2147483646 in seconds");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("35791395m")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 35791395m. " +
+                                                                                                     "It shouldn't be more than 2147483646 in seconds");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("596524h")).isInstanceOf(IllegalArgumentException.class)
+                                                                             .hasMessageContaining("Invalid duration: 596524h. " +
+                                                                                                   "It shouldn't be more than 2147483646 in seconds");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("24856d")).isInstanceOf(IllegalArgumentException.class)
+                                                                            .hasMessageContaining("Invalid duration: 24856d. " +
+                                                                                                  "It shouldn't be more than 2147483646 in seconds");
+
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("2147483648s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                 .hasMessageContaining("Invalid duration: 2147483648s " +
+                                                                                                       "Accepted units:[MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("2147483648m")).isInstanceOf(IllegalArgumentException.class)
+                                                                                 .hasMessageContaining("Invalid duration: 2147483648m. " +
+                                                                                                       "It shouldn't be more than 2147483646 in minutes");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("35791395h")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 35791395h. " +
+                                                                                                     "It shouldn't be more than 2147483646 in minutes");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("1491309d")).isInstanceOf(IllegalArgumentException.class)
+                                                                              .hasMessageContaining("Invalid duration: 1491309d. " +
+                                                                                                    "It shouldn't be more than 2147483646 in minutes");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound(2147483648L)).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 2147483648 minutes. " +
+                                                                                                     "It shouldn't be more than 2147483646 in minutes");
+    }
+
+    @SuppressWarnings("AssertBetweenInconvertibleTypes")
+    @Test
+    public void testEquals()
+    {
+        assertEquals(new DurationSpec.LongNanosecondsBound ("10s"), new DurationSpec.LongNanosecondsBound ("10s"));
+        assertEquals(new DurationSpec.LongNanosecondsBound ("10s"), new DurationSpec.LongNanosecondsBound ("10000ms"));
+        assertEquals(new DurationSpec.LongNanosecondsBound ("10000ms"), new DurationSpec.LongNanosecondsBound ("10s"));
+        assertEquals(new DurationSpec.LongNanosecondsBound ("4h"), new DurationSpec.LongNanosecondsBound ("14400s"));
+        assertEquals(DurationSpec.LongNanosecondsBound .IntSecondsBound.inSecondsString("14400"), new DurationSpec.LongNanosecondsBound ("14400s"));
+        assertEquals(DurationSpec.LongNanosecondsBound .IntSecondsBound.inSecondsString("4h"), new DurationSpec.LongNanosecondsBound ("14400s"));
+        assertEquals(DurationSpec.LongNanosecondsBound .IntSecondsBound.inSecondsString("14400s"), new DurationSpec.LongNanosecondsBound ("14400s"));
+        assertNotEquals(new DurationSpec.LongNanosecondsBound ("0m"), new DurationSpec.LongNanosecondsBound ("10ms"));
+        assertEquals(Long.MAX_VALUE-1, new DurationSpec.LongNanosecondsBound ("9223372036854775806ns").toNanoseconds());
+    }
+
+    @Test
+    public void thereAndBack()
+    {
+        Gen<TimeUnit> unitGen = SourceDSL.arbitrary().enumValues(TimeUnit.class);
+        Gen<Long> valueGen = SourceDSL.longs().between(0, Long.MAX_VALUE/24/60/60/1000L/1000L/1000L);
+        qt().forAll(valueGen, unitGen).check((value, unit) -> {
+            DurationSpec.LongNanosecondsBound  there = new DurationSpec.LongNanosecondsBound (value, unit);
+            DurationSpec.LongNanosecondsBound  back = new DurationSpec.LongNanosecondsBound (there.toString());
+            return there.equals(back);
+        });
+    }
+
+    @Test
+    public void testValidUnits()
+    {
+        assertEquals(10L, new DurationSpec.IntMillisecondsBound("10ms").toMilliseconds());
+        assertEquals(10L, new DurationSpec.IntSecondsBound("10s").toSeconds());
+        assertEquals(new DurationSpec.IntSecondsBound("10s"), DurationSpec.IntSecondsBound.inSecondsString("10"));
+        assertEquals(new DurationSpec.IntSecondsBound("10s"), DurationSpec.IntSecondsBound.inSecondsString("10s"));
+
+        assertEquals(10L, new DurationSpec.LongMillisecondsBound("10ms").toMilliseconds());
+        assertEquals(10L, new DurationSpec.LongSecondsBound("10s").toSeconds());
+    }
+
+    @Test
+    public void testInvalidUnits()
+    {
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("10ns")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10ns " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound(10, NANOSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10 NANOSECONDS " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("10us")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10us " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10 MICROSECONDS " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("10µs")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10µs " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: 10 MICROSECONDS " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+        assertThatThrownBy(() -> new DurationSpec.IntMillisecondsBound("-10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                               .hasMessageContaining("Invalid duration: -10s " +
+                                                                                                     "Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS]");
+
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("10ms")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10ms Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound(10, MILLISECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 MILLISECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("10ns")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10ns Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound(10, NANOSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 NANOSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("10us")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10us Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("10µs")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10µs Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntSecondsBound("-10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: -10s");
+
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                         .hasMessageContaining("Invalid duration: 10s Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound(10, SECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                         .hasMessageContaining("Invalid duration: 10 SECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("10ms")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10ms Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound(10, MILLISECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 MILLISECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("10ns")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10ns Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound(10, NANOSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 NANOSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("10us")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10us Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("10µs")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10µs Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.IntMinutesBound("-10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                          .hasMessageContaining("Invalid duration: -10s");
+
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound("10ns")).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: 10ns Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound(10, NANOSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: 10 NANOSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound("10us")).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: 10us Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound("10µs")).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: 10µs Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongMillisecondsBound("-10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                                .hasMessageContaining("Invalid duration: -10s");
+
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound("10ms")).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10ms Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound(10, MILLISECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10 MILLISECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound("10ns")).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10ns Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound(10, NANOSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10 NANOSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound("10µs")).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10µs Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound(10, MICROSECONDS)).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: 10 MICROSECONDS Accepted units");
+        assertThatThrownBy(() -> new DurationSpec.LongSecondsBound("-10s")).isInstanceOf(IllegalArgumentException.class)
+                                                                           .hasMessageContaining("Invalid duration: -10s");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/EncryptionOptionsEqualityTest.java b/test/unit/org/apache/cassandra/config/EncryptionOptionsEqualityTest.java
new file mode 100644
index 0000000..dbb309b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/EncryptionOptionsEqualityTest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.junit.Test;
+
+import org.apache.cassandra.security.DefaultSslContextFactory;
+import org.apache.cassandra.security.DummySslContextFactoryImpl;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+/**
+ * This class tests the equals and hashCode method of {@link EncryptionOptions} in order to make sure that the
+ * caching done in the {@link org.apache.cassandra.security.SSLFactory} doesn't break.
+ */
+public class EncryptionOptionsEqualityTest
+{
+    @Test
+    public void testKeystoreOptions() {
+        EncryptionOptions encryptionOptions1 =
+        new EncryptionOptions()
+        .withStoreType("JKS")
+        .withKeyStore("test/conf/cassandra.keystore")
+        .withKeyStorePassword("cassandra")
+        .withTrustStore("test/conf/cassandra_ssl_test.truststore")
+        .withTrustStorePassword("cassandra")
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(true)
+        .withRequireEndpointVerification(false);
+
+        EncryptionOptions encryptionOptions2 =
+        new EncryptionOptions()
+        .withStoreType("JKS")
+        .withKeyStore("test/conf/cassandra.keystore")
+        .withKeyStorePassword("cassandra")
+        .withTrustStore("test/conf/cassandra_ssl_test.truststore")
+        .withTrustStorePassword("cassandra")
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(true)
+        .withRequireEndpointVerification(false);
+
+        assertEquals(encryptionOptions1, encryptionOptions2);
+        assertEquals(encryptionOptions1.hashCode(), encryptionOptions2.hashCode());
+    }
+
+    @Test
+    public void testSameCustomSslContextFactoryImplementation() {
+
+        Map<String,String> parameters1 = new HashMap<>();
+        parameters1.put("key1", "value1");
+        parameters1.put("key2", "value2");
+        EncryptionOptions encryptionOptions1 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters1))
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(true)
+        .withRequireEndpointVerification(false);
+
+        Map<String,String> parameters2 = new HashMap<>();
+        parameters2.put("key1", "value1");
+        parameters2.put("key2", "value2");
+        EncryptionOptions encryptionOptions2 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters2))
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(true)
+        .withRequireEndpointVerification(false);
+
+        assertEquals(encryptionOptions1, encryptionOptions2);
+        assertEquals(encryptionOptions1.hashCode(), encryptionOptions2.hashCode());
+    }
+
+    @Test
+    public void testDifferentCustomSslContextFactoryImplementations() {
+
+        Map<String,String> parameters1 = new HashMap<>();
+        parameters1.put("key1", "value1");
+        parameters1.put("key2", "value2");
+        EncryptionOptions encryptionOptions1 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters1))
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(false)
+        .withRequireEndpointVerification(true);
+
+        Map<String,String> parameters2 = new HashMap<>();
+        parameters2.put("key1", "value1");
+        parameters2.put("key2", "value2");
+        EncryptionOptions encryptionOptions2 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DefaultSslContextFactory.class.getName(), parameters2))
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(false)
+        .withRequireEndpointVerification(true);
+
+        assertNotEquals(encryptionOptions1, encryptionOptions2);
+        assertNotEquals(encryptionOptions1.hashCode(), encryptionOptions2.hashCode());
+    }
+
+    @Test
+    public void testDifferentCustomSslContextFactoryParameters() {
+
+        Map<String,String> parameters1 = new HashMap<>();
+        parameters1.put("key1", "value11");
+        parameters1.put("key2", "value12");
+        EncryptionOptions encryptionOptions1 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters1))
+        .withProtocol("TLSv1.1");
+
+        Map<String,String> parameters2 = new HashMap<>();
+        parameters2.put("key1", "value21");
+        parameters2.put("key2", "value22");
+        EncryptionOptions encryptionOptions2 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters2))
+        .withProtocol("TLSv1.1");
+
+        assertNotEquals(encryptionOptions1, encryptionOptions2);
+        assertNotEquals(encryptionOptions1.hashCode(), encryptionOptions2.hashCode());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
index 23a6c00..a76c24a 100644
--- a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
+++ b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
@@ -18,25 +18,27 @@
 
 package org.apache.cassandra.config;
 
-import java.io.File;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 
 import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.assertj.core.api.Assertions;
 
-import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED;
-import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.OPTIONAL;
-import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.ENCRYPTED;
 import static org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption.all;
 import static org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption.dc;
 import static org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption.none;
 import static org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption.rack;
-import static org.junit.Assert.*;
+import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.ENCRYPTED;
+import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.OPTIONAL;
+import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class EncryptionOptionsTest
 {
@@ -55,7 +57,24 @@
 
         public static EncryptionOptionsTestCase of(Boolean optional, String keystorePath, Boolean enabled, EncryptionOptions.TlsEncryptionPolicy expected)
         {
-            return new EncryptionOptionsTestCase(new EncryptionOptions(keystorePath, "dummypass", "dummytruststore", "dummypass",
+            return new EncryptionOptionsTestCase(new EncryptionOptions(new ParameterizedClass("org.apache.cassandra.security.DefaultSslContextFactory",
+                                                                                              new HashMap<>()),
+                                                                       keystorePath, "dummypass",
+                                                                       "dummytruststore", "dummypass",
+                                                                       Collections.emptyList(), null, null, null, "JKS", false, false, enabled, optional)
+                                                 .applyConfig(),
+                                                 expected,
+                                                 String.format("optional=%s keystore=%s enabled=%s", optional, keystorePath, enabled));
+        }
+
+        public static EncryptionOptionsTestCase of(Boolean optional, String keystorePath, Boolean enabled,
+                                                   Map<String,String> customSslContextFactoryParams,
+                                                   EncryptionOptions.TlsEncryptionPolicy expected)
+        {
+            return new EncryptionOptionsTestCase(new EncryptionOptions(new ParameterizedClass("org.apache.cassandra.security.DefaultSslContextFactory",
+                                                                                              customSslContextFactoryParams),
+                                                                       keystorePath, "dummypass",
+                                                                       "dummytruststore", "dummypass",
                                                                        Collections.emptyList(), null, null, null, "JKS", false, false, enabled, optional)
                                                  .applyConfig(),
                                                  expected,
@@ -105,7 +124,8 @@
                                                          EncryptionOptions.ServerEncryptionOptions.InternodeEncryption internodeEncryption,
                                                          EncryptionOptions.TlsEncryptionPolicy expected)
         {
-            return new ServerEncryptionOptionsTestCase(new EncryptionOptions.ServerEncryptionOptions(keystorePath, "dummypass", "dummytruststore", "dummypass",
+            return new ServerEncryptionOptionsTestCase(new EncryptionOptions.ServerEncryptionOptions(new ParameterizedClass("org.apache.cassandra.security.DefaultSslContextFactory",
+                                                                                                                            new HashMap<>()), keystorePath, "dummypass", "dummytruststore", "dummypass",
                                                                                                Collections.emptyList(), null, null, null, "JKS", false, false, optional, internodeEncryption, false)
                                                        .applyConfig(),
                                                  expected,
@@ -175,4 +195,17 @@
             Assert.assertSame(testCase.description, testCase.expected, testCase.encryptionOptions.tlsEncryptionPolicy());
         }
     }
+
+    @Test(expected =  IllegalArgumentException.class)
+    public void testMisplacedConfigKey()
+    {
+        Map<String, String> customSslContextFactoryParams = new HashMap<>();
+
+        for(EncryptionOptions.ConfigKey configKey: EncryptionOptions.ConfigKey.values())
+        {
+            customSslContextFactoryParams.put(configKey.getKeyName(), "my-custom-value");
+        }
+
+        EncryptionOptionsTestCase.of(null, absentKeystore, true, customSslContextFactoryParams, ENCRYPTED);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/config/FailStartupDuplicateParamsTest.java b/test/unit/org/apache/cassandra/config/FailStartupDuplicateParamsTest.java
new file mode 100644
index 0000000..341a0e8
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/FailStartupDuplicateParamsTest.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.function.Predicate;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.ALLOW_DUPLICATE_CONFIG_KEYS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.ALLOW_NEW_OLD_CONFIG_KEYS;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+
+public class FailStartupDuplicateParamsTest
+{
+    private static final List<String> baseConfig = ImmutableList.of(
+        "cluster_name: Test Cluster",
+        "commitlog_sync: batch",
+        "commitlog_directory: build/test/cassandra/commitlog",
+        "hints_directory: build/test/cassandra/hints",
+        "partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner",
+        "saved_caches_directory: build/test/cassandra/saved_caches",
+        "data_file_directories:",
+        "   - build/test/cassandra/data",
+        "seed_provider:" ,
+        "   - class_name: org.apache.cassandra.locator.SimpleSeedProvider",
+        "parameters:",
+        "   - seeds: \"127.0.0.1:7012\"",
+        "endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch");
+
+    @Before
+    public void before()
+    {
+        ALLOW_DUPLICATE_CONFIG_KEYS.setBoolean(true);
+        ALLOW_NEW_OLD_CONFIG_KEYS.setBoolean(false);
+    }
+
+    @Test
+    public void testDuplicateParamThrows() throws IOException
+    {
+        ALLOW_DUPLICATE_CONFIG_KEYS.setBoolean(false);
+        testYaml("found duplicate key endpoint_snitch", true,
+                 "endpoint_snitch: org.apache.cassandra.locator.RackInferringSnitch");
+    }
+
+    @Test
+    public void testReplacementDupesOldFirst() throws IOException
+    {
+        testYaml("[enable_user_defined_functions -> user_defined_functions_enabled]", true,
+                 "enable_user_defined_functions: true",
+                 "user_defined_functions_enabled: false");
+
+        testYaml("[enable_user_defined_functions -> user_defined_functions_enabled]", true,
+                 "enable_user_defined_functions: true",
+                 "user_defined_functions_enabled: true");
+    }
+
+    @Test
+    public void testReplacementDupesNewFirst() throws IOException
+    {
+        testYaml("[enable_user_defined_functions -> user_defined_functions_enabled]", true,
+                 "user_defined_functions_enabled: false",
+                 "enable_user_defined_functions: true");
+
+    }
+
+    @Test
+    public void testReplacementDupesMultiReplace() throws IOException
+    {
+        /*
+        @Replaces(oldName = "internode_socket_send_buffer_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+        @Replaces(oldName = "internode_send_buff_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+        public DataStorageSpec internode_socket_send_buffer_size = new DataStorageSpec("0B");
+        */
+        Predicate<String> predicate = (s) -> s.contains("[internode_send_buff_size_in_bytes -> internode_socket_send_buffer_size]") &&
+                                             s.contains("[internode_socket_send_buffer_size_in_bytes -> internode_socket_send_buffer_size]");
+        String message = " does not contain both [internode_send_buff_size_in_bytes] and [internode_socket_send_buffer_size_in_bytes]";
+
+        testYaml(predicate, true,
+                 message,
+                 "internode_send_buff_size_in_bytes: 3",
+                 "internode_socket_send_buffer_size_in_bytes: 2",
+                 "internode_socket_send_buffer_size: 5B");
+
+        // and new first:
+        testYaml(predicate, true,
+                 message,
+                 "internode_socket_send_buffer_size: 5B",
+                 "internode_socket_send_buffer_size_in_bytes: 2",
+                 "internode_send_buff_size_in_bytes: 3");
+    }
+
+    private static void testYaml(String expected, boolean expectFailure, String ... toAdd) throws IOException
+    {
+        testYaml((s) -> s.contains(expected), expectFailure, "does not contain [" + expected + ']', toAdd);
+    }
+
+    private static void testYaml(Predicate<String> exceptionMsgPredicate, boolean expectFailure, String message, String ... toAdd) throws IOException
+    {
+        Path p = Files.createTempFile("config_dupes",".yaml");
+        try
+        {
+            List<String> lines = new ArrayList<>(baseConfig);
+            Collections.addAll(lines, toAdd);
+            Files.write(p, lines);
+            loadConfig(p.toUri().toURL(), message, exceptionMsgPredicate, expectFailure);
+        }
+        finally
+        {
+            Files.delete(p);
+        }
+    }
+
+    private static void loadConfig(URL config, String message, Predicate<String> exceptionMsgPredicate, boolean expectFailure)
+    {
+        try
+        {
+            new YamlConfigurationLoader().loadConfig(config);
+        }
+        catch (Exception e)
+        {
+            assertTrue(expectFailure);
+            e.printStackTrace(System.out);
+            Throwable t = e;
+            do
+            {
+                if (exceptionMsgPredicate.test(t.getMessage()))
+                    return;
+                t = t.getCause();
+            } while (t != null);
+
+            fail("Message\n["+e.getMessage()+ "]\n"+message);
+        }
+        assertFalse(expectFailure);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java b/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
index 575f04d..0e028cb 100644
--- a/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
+++ b/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
@@ -18,27 +18,114 @@
 
 package org.apache.cassandra.config;
 
+import java.util.concurrent.TimeUnit;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 public class LoadOldYAMLBackwardCompatibilityTest
 {
     @BeforeClass
     public static void setupDatabaseDescriptor()
     {
-        System.setProperty("cassandra.config", "cassandra_deprecated_parameters_names.yaml");
+        System.setProperty("cassandra.config", "cassandra-old.yaml");
         DatabaseDescriptor.daemonInitialization();
     }
 
-    // CASSANDRA-17141
+    // CASSANDRA-15234
     @Test
     public void testConfigurationLoaderBackwardCompatibility()
     {
         Config config = DatabaseDescriptor.loadConfig();
-        //Confirm parameters were successfully read with the old names from cassandra-old.yaml
-        assertEquals(5, config.internode_socket_send_buffer_size_in_bytes);
-        assertEquals(5, config.internode_socket_receive_buffer_size_in_bytes);
+
+        assertEquals(new DurationSpec.IntMillisecondsBound(10800000), config.max_hint_window);
+        assertEquals(new DurationSpec.IntMillisecondsBound("3h"), config.max_hint_window);
+        assertEquals(new DurationSpec.LongMillisecondsBound(0), config.native_transport_idle_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(10000), config.request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(5000), config.read_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(10000), config.range_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(2000), config.write_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(5000), config.counter_write_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(1800), config.cas_contention_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(60000), config.truncate_request_timeout);
+        assertEquals(new DurationSpec.IntSecondsBound(300), config.streaming_keep_alive_period);
+        assertEquals(new DurationSpec.LongMillisecondsBound(500), config.slow_query_log_timeout);
+        assertNull(config.memtable_heap_space);
+        assertNull(config.memtable_offheap_space);
+        assertNull( config.repair_session_space);
+        assertEquals(new DataStorageSpec.IntBytesBound(4194304), config.internode_application_send_queue_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(134217728), config.internode_application_send_queue_reserve_endpoint_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(536870912), config.internode_application_send_queue_reserve_global_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(4194304), config.internode_application_receive_queue_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(134217728), config.internode_application_receive_queue_reserve_endpoint_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(536870912), config.internode_application_receive_queue_reserve_global_capacity);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.internode_tcp_connect_timeout);
+        assertEquals(new DurationSpec.IntMillisecondsBound(30000), config.internode_tcp_user_timeout);
+        assertEquals(new DurationSpec.IntMillisecondsBound(300000), config.internode_streaming_tcp_user_timeout);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(16), config.native_transport_max_frame_size);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(256), config.max_value_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(4), config.column_index_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(2), config.column_index_cache_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(5), config.batch_size_warn_threshold);
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound(64, DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND), config.compaction_throughput);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(50), config.min_free_space_per_drive);
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound(25000000000000L).toString(), config.stream_throughput_outbound.toString());
+        assertEquals(DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(200000000), config.stream_throughput_outbound);
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound(24L  * 1024L * 1024L), config.inter_dc_stream_throughput_outbound);
+        assertNull(config.commitlog_total_space);
+        assertEquals(new DurationSpec.IntMillisecondsBound(0.0, TimeUnit.MILLISECONDS), config.commitlog_sync_group_window);
+        assertEquals(new DurationSpec.IntMillisecondsBound(0), config.commitlog_sync_period);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(5), config.commitlog_segment_size);
+        assertNull(config.periodic_commitlog_sync_lag_block);  //Integer
+        assertNull(config.max_mutation_size);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(0), config.cdc_total_space);
+        assertEquals(new DurationSpec.IntMillisecondsBound(250), config.cdc_free_space_check_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(100), config.dynamic_snitch_update_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(600000), config.dynamic_snitch_reset_interval);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(1024), config.hinted_handoff_throttle);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(1024), config.batchlog_replay_throttle);
+        assertEquals(new DurationSpec.IntMillisecondsBound(10000), config.hints_flush_period);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(128), config.max_hints_file_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(10240), config.trickle_fsync_interval);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(50), config.sstable_preemptive_open_interval);
+        assertNull( config.key_cache_size);
+        assertEquals(new DataStorageSpec.LongMebibytesBound(16), config.row_cache_size);
+        assertNull(config.counter_cache_size);
+        assertNull(config.networking_cache_size);
+        assertNull(config.file_cache_size);
+        assertNull(config.index_summary_capacity);
+        assertEquals(new DurationSpec.IntMillisecondsBound(200), config.gc_log_threshold);
+        assertEquals(new DurationSpec.IntMillisecondsBound(1000), config.gc_warn_threshold);
+        assertEquals(new DurationSpec.IntSecondsBound(86400), config.trace_type_query_ttl);
+        assertEquals(new DurationSpec.IntSecondsBound(604800), config.trace_type_repair_ttl);
+        assertNull(config.prepared_statements_cache_size);
+        assertTrue(config.user_defined_functions_enabled);
+        assertTrue(config.scripted_user_defined_functions_enabled);
+        assertTrue(config.materialized_views_enabled);
+        assertFalse(config.transient_replication_enabled);
+        assertTrue(config.sasi_indexes_enabled);
+        assertTrue(config.drop_compact_storage_enabled);
+        assertTrue(config.user_defined_functions_threads_enabled);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.permissions_validity);
+        assertNull(config.permissions_update_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.roles_validity);
+        assertNull(config.roles_update_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.credentials_validity);
+        assertNull(config.credentials_update_interval);
+        assertEquals(new DurationSpec.IntMinutesBound(60), config.index_summary_resize_interval);
+
+        //parameters which names have not changed with CASSANDRA-15234
+        assertEquals(DurationSpec.IntSecondsBound.inSecondsString("14400"), config.key_cache_save_period);
+        assertEquals(DurationSpec.IntSecondsBound.inSecondsString("14400s"), config.key_cache_save_period);
+        assertEquals(new DurationSpec.IntSecondsBound(4, TimeUnit.HOURS), config.key_cache_save_period);
+        assertEquals(DurationSpec.IntSecondsBound.inSecondsString("0"), config.row_cache_save_period);
+        assertEquals(new DurationSpec.IntSecondsBound(0), config.row_cache_save_period);
+        assertEquals(new DurationSpec.IntSecondsBound(2, TimeUnit.HOURS), config.counter_cache_save_period);
+        assertEquals(new DurationSpec.IntSecondsBound(35), config.cache_load_timeout);
     }
 }
diff --git a/test/unit/org/apache/cassandra/config/LoaderTest.java b/test/unit/org/apache/cassandra/config/LoaderTest.java
new file mode 100644
index 0000000..5637e98
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/LoaderTest.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.junit.Test;
+
+import org.assertj.core.api.Assertions;
+import org.yaml.snakeyaml.introspector.Property;
+
+public class LoaderTest
+{
+    /**
+     * This test mostly validates that DefaultLoader will match what SnakeYAML would do (if camel casing gets converted to snake),
+     * if this test fails it is likely a new property was added and not properly picked up by one of the two implementations;
+     * if you added a property and now this test is failing, make sure to follow all of the following rules:
+     *
+     * 1) public field
+     * 2) getter AND setter
+     * 3) if getter of Boolean, use "get" prefix rather than "is"
+     */
+    @Test
+    public void allMatchProperties()
+    {
+        Class<?> klass = Config.class;
+        // treeset makes error cleaner to read
+        Set<String> properties = defaultImpl(klass).keySet();
+        Assertions.assertThat(new TreeSet<>(properties))
+                  .isEqualTo(new TreeSet<>(snake(klass).keySet()));
+    }
+
+    private static Map<String, Property> snake(Class<?> klass)
+    {
+        return new SnakeYamlLoader().flatten(klass);
+    }
+
+    private static Map<String, Property> defaultImpl(Class<?> klass)
+    {
+        return new DefaultLoader().flatten(klass);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/config/OverrideConfigurationLoader.java b/test/unit/org/apache/cassandra/config/OverrideConfigurationLoader.java
index e0a5576..2ffe91b 100644
--- a/test/unit/org/apache/cassandra/config/OverrideConfigurationLoader.java
+++ b/test/unit/org/apache/cassandra/config/OverrideConfigurationLoader.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.config;
 
 import java.util.function.Consumer;
-import java.util.function.Function;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 
diff --git a/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java b/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
new file mode 100644
index 0000000..a21f235
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+public class ParseAndConvertUnitsTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    // CASSANDRA-15234
+    @Test
+    public void testConfigurationLoaderParser()
+    {
+        Config config = DatabaseDescriptor.loadConfig();
+
+        //Confirm duration parameters were successfully parsed with the default values in cassandra.yaml
+        assertEquals(new DurationSpec.IntMillisecondsBound(10800000), config.max_hint_window);
+        assertEquals(new DurationSpec.LongMillisecondsBound(0), config.native_transport_idle_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(10000), config.request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(5000), config.read_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(10000), config.range_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(2000), config.write_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(5000), config.counter_write_request_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(1800), config.cas_contention_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(60000), config.truncate_request_timeout);
+        assertEquals(new DurationSpec.IntSecondsBound(300), config.streaming_keep_alive_period);
+        assertEquals(new DurationSpec.LongMillisecondsBound(500), config.slow_query_log_timeout);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.internode_tcp_connect_timeout);
+        assertEquals(new DurationSpec.IntMillisecondsBound(30000), config.internode_tcp_user_timeout);
+        assertEquals(new DurationSpec.IntMillisecondsBound(0), config.commitlog_sync_group_window);
+        assertEquals(new DurationSpec.IntMillisecondsBound(0), config.commitlog_sync_period);
+        assertNull(config.periodic_commitlog_sync_lag_block);
+        assertEquals(new DurationSpec.IntMillisecondsBound(250), config.cdc_free_space_check_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(100), config.dynamic_snitch_update_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(600000), config.dynamic_snitch_reset_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(200), config.gc_log_threshold);
+        assertEquals(new DurationSpec.IntMillisecondsBound(10000), config.hints_flush_period);
+        assertEquals(new DurationSpec.IntMillisecondsBound(1000), config.gc_warn_threshold);
+        assertEquals(new DurationSpec.IntSecondsBound(86400), config.trace_type_query_ttl);
+        assertEquals(new DurationSpec.IntSecondsBound(604800), config.trace_type_repair_ttl);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.permissions_validity);
+        assertNull(config.permissions_update_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.roles_validity);
+        assertNull(config.roles_update_interval);
+        assertEquals(new DurationSpec.IntMillisecondsBound(2000), config.credentials_validity);
+        assertNull(config.credentials_update_interval);
+        assertEquals(new DurationSpec.IntMinutesBound(60), config.index_summary_resize_interval);
+        assertEquals(DurationSpec.IntSecondsBound.inSecondsString("4h"), config.key_cache_save_period);
+        assertEquals(new DurationSpec.IntSecondsBound(30), config.cache_load_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(1500), config.user_defined_functions_fail_timeout);
+        assertEquals(new DurationSpec.LongMillisecondsBound(500), config.user_defined_functions_warn_timeout);
+        assertEquals(new DurationSpec.IntSecondsBound(3600), config.validation_preview_purge_head_start);
+
+        //Confirm space parameters were successfully parsed with the default values in cassandra.yaml
+        assertNull(config.memtable_heap_space);
+        assertNull(config.memtable_offheap_space);
+        assertNull(config.repair_session_space); //null everywhere so should be correct, let's check whether it will bomb
+        assertEquals(new DataStorageSpec.IntBytesBound(4194304), config.internode_application_send_queue_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(134217728), config.internode_application_send_queue_reserve_endpoint_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(536870912), config.internode_application_send_queue_reserve_global_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(4194304), config.internode_application_receive_queue_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(134217728), config.internode_application_receive_queue_reserve_endpoint_capacity);
+        assertEquals(new DataStorageSpec.IntBytesBound(536870912), config.internode_application_receive_queue_reserve_global_capacity);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(16), config.native_transport_max_frame_size);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(256), config.max_value_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(4), config.column_index_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(2), config.column_index_cache_size);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(5), config.batch_size_warn_threshold);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(50), config.batch_size_fail_threshold);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(100), config.compaction_large_partition_warning_threshold);
+        assertNull(config.commitlog_total_space);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(5), config.commitlog_segment_size);
+        assertNull(config.max_mutation_size); //not set explicitly in the default yaml, check the config; not set there too
+        assertEquals(new DataStorageSpec.IntMebibytesBound(0), config.cdc_total_space);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(1024), config.hinted_handoff_throttle);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(1024), config.batchlog_replay_throttle);
+        assertEquals(new DataStorageSpec.IntKibibytesBound(10240), config.trickle_fsync_interval);
+        assertEquals(new DataStorageSpec.IntMebibytesBound(50), config.sstable_preemptive_open_interval);
+        assertNull(config.counter_cache_size);
+        assertNull(config.file_cache_size);
+        assertNull(config.index_summary_capacity);
+        assertEquals(new DataStorageSpec.LongMebibytesBound(1), config.prepared_statements_cache_size);
+        assertNull(config.key_cache_size);
+        assertEquals(new DataStorageSpec.LongMebibytesBound(16), config.row_cache_size);
+        assertNull(config.native_transport_max_request_data_in_flight);
+        assertNull(config.native_transport_max_request_data_in_flight_per_ip);
+        assertEquals(new DataStorageSpec.IntBytesBound(1, MEBIBYTES), config.native_transport_receive_queue_capacity);
+
+        //Confirm rate parameters were successfully parsed with the default values in cassandra.yaml
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound(0), config.compaction_throughput);
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound(23841858, DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND), config.stream_throughput_outbound);
+        assertEquals(new DataRateSpec.LongBytesPerSecondBound(24, DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND), config.inter_dc_stream_throughput_outbound);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/PropertiesTest.java b/test/unit/org/apache/cassandra/config/PropertiesTest.java
new file mode 100644
index 0000000..6fec2df
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/PropertiesTest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import org.junit.Test;
+
+import org.yaml.snakeyaml.introspector.Property;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class PropertiesTest
+{
+    private final Loader loader = Properties.defaultLoader();
+
+    @Test
+    public void backAndForth() throws Exception
+    {
+        Map<String, Property> ps = loader.flatten(Config.class);
+
+        Config config = new Config();
+        Set<String> keys = ImmutableSet.of("server_encryption_options.enabled",
+                                           "client_encryption_options.enabled",
+                                           "server_encryption_options.optional",
+                                           "client_encryption_options.optional");
+        for (Property prop : ps.values())
+        {
+            // skip these properties as they don't allow get/set within the context of this test
+            if (keys.contains(prop.getName()))
+                continue;
+            Object value = prop.get(config);
+            if (value == null)
+                continue;
+            prop.set(config, value);
+            Object back = prop.get(config);
+            assertThat(back).isEqualTo(value);
+        }
+    }
+
+    @Test
+    public void configMutate() throws Exception
+    {
+        Map<String, Property> ps = loader.getProperties(Config.class);
+        assertThat(ps)
+                  .isNotEmpty()
+                  .containsKey("cluster_name") // string / primitive
+                  .containsKey("permissions_validity") // SmallestDurationSeconds
+                  .containsKey("hinted_handoff_disabled_datacenters") // set<string>
+                  .containsKey("disk_access_mode"); // enum
+
+        // can we mutate the config?
+        Config config = new Config();
+
+        ps.get("cluster_name").set(config, "properties testing");
+        assertThat(config.cluster_name).isEqualTo("properties testing");
+
+        ps.get("permissions_validity").set(config, new DurationSpec.IntMillisecondsBound(42));
+        assertThat(config.permissions_validity.toMilliseconds()).isEqualTo(42);
+
+        ps.get("hinted_handoff_disabled_datacenters").set(config, Sets.newHashSet("a", "b", "c"));
+        assertThat(config.hinted_handoff_disabled_datacenters).isEqualTo(Sets.newHashSet("a", "b", "c"));
+
+        ps.get("disk_access_mode").set(config, Config.DiskAccessMode.mmap);
+        assertThat(config.disk_access_mode).isEqualTo(Config.DiskAccessMode.mmap);
+    }
+
+    @Test
+    public void nestedMutate() throws Exception
+    {
+        Map<String, Property> ps = loader.flatten(Config.class);
+
+        assertThat(ps)
+        .containsKey("seed_provider.class_name");
+
+        // can we mutate the config?
+        Config config = new Config();
+
+        ps.get("seed_provider.class_name").set(config, "testing");
+        assertThat(config.seed_provider.class_name).isEqualTo("testing");
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/config/SnakeYamlLoader.java b/test/unit/org/apache/cassandra/config/SnakeYamlLoader.java
new file mode 100644
index 0000000..90c9a1c
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/SnakeYamlLoader.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.config;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.yaml.snakeyaml.error.YAMLException;
+import org.yaml.snakeyaml.introspector.BeanAccess;
+import org.yaml.snakeyaml.introspector.Property;
+import org.yaml.snakeyaml.introspector.PropertyUtils;
+
+import static org.apache.cassandra.utils.FBUtilities.camelToSnake;
+
+public final class SnakeYamlLoader implements Loader
+{
+    private final Helper helper = new Helper();
+
+    @Override
+    public Map<String, Property> getProperties(Class<?> root)
+    {
+        return helper.getPropertiesMap(root, BeanAccess.DEFAULT);
+    }
+
+    private static class Helper extends PropertyUtils
+    {
+        @Override
+        public Map<String, Property> getPropertiesMap(Class<?> type, BeanAccess bAccess)
+        {
+            Map<String, Property> map;
+            try
+            {
+                map = super.getPropertiesMap(type, bAccess);
+            }
+            catch (YAMLException e)
+            {
+                // some classes take a string in constructor and output as toString, these should be treated like
+                // primitive types
+                if (e.getMessage() != null && e.getMessage().startsWith("No JavaBean properties found"))
+                    return Collections.emptyMap();
+                throw e;
+            }
+            // filter out ignores
+            Set<String> ignore = new HashSet<>();
+            Map<String, String> rename = new HashMap<>();
+            map.values().forEach(p -> {
+                if (shouldIgnore(p))
+                {
+                    ignore.add(p.getName());
+                    return;
+                }
+                String snake = camelToSnake(p.getName());
+                if (!p.getName().equals(snake))
+                {
+                    if (map.containsKey(snake))
+                        ignore.add(p.getName());
+                    else
+                        rename.put(p.getName(), snake);
+                }
+            });
+            ignore.forEach(map::remove);
+            rename.forEach((previous, desired) -> map.put(desired, map.remove(previous)));
+            return map;
+        }
+
+        private static boolean shouldIgnore(Property p)
+        {
+            return !p.isWritable() || p.getAnnotation(JsonIgnore.class) != null;
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/StartupCheckOptionsTest.java b/test/unit/org/apache/cassandra/config/StartupCheckOptionsTest.java
new file mode 100644
index 0000000..f613c0e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/config/StartupCheckOptionsTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.config;
+
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Test;
+
+import org.apache.cassandra.service.DataResurrectionCheck;
+import org.apache.cassandra.service.StartupChecks.StartupCheckType;
+import org.apache.cassandra.utils.Pair;
+
+import static org.apache.cassandra.config.StartupChecksOptions.ENABLED_PROPERTY;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.check_filesystem_ownership;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.non_configurable_check;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class StartupCheckOptionsTest
+{
+    @Test
+    public void testStartupOptionsConfigApplication()
+    {
+        Map<StartupCheckType, Map<String, Object>> config = new EnumMap<StartupCheckType, Map<String, Object>>(StartupCheckType.class) {{
+            put(check_filesystem_ownership, new HashMap<String, Object>() {{
+                put(ENABLED_PROPERTY, true);
+                put("key", "value");
+            }});
+        }};
+
+        StartupChecksOptions options = new StartupChecksOptions(config);
+
+        assertTrue(Boolean.parseBoolean(options.getConfig(check_filesystem_ownership)
+                                               .get(ENABLED_PROPERTY)
+                                               .toString()));
+
+        assertEquals("value", options.getConfig(check_filesystem_ownership).get("key"));
+        options.set(check_filesystem_ownership, "key", "value2");
+        assertEquals("value2", options.getConfig(check_filesystem_ownership).get("key"));
+
+        assertTrue(options.isEnabled(check_filesystem_ownership));
+        options.disable(check_filesystem_ownership);
+        assertFalse(options.isEnabled(check_filesystem_ownership));
+        assertTrue(options.isDisabled(check_filesystem_ownership));
+    }
+
+    @Test
+    public void testNoOptions()
+    {
+        StartupChecksOptions options = new StartupChecksOptions();
+
+        assertTrue(options.isEnabled(non_configurable_check));
+
+        // disabling does not to anything on non-configurable check
+        options.disable(non_configurable_check);
+        assertTrue(options.isEnabled(non_configurable_check));
+
+        options.set(non_configurable_check, "key", "value");
+
+        // we can not put anything into non-configurable check
+        assertFalse(options.getConfig(non_configurable_check).containsKey("key"));
+    }
+
+    @Test
+    public void testEmptyDisabledValues()
+    {
+        Map<StartupCheckType, Map<String, Object>> emptyConfig = new EnumMap<StartupCheckType, Map<String, Object>>(StartupCheckType.class) {{
+            put(check_filesystem_ownership, new HashMap<>());
+        }};
+
+        Map<StartupCheckType, Map<String, Object>> emptyEnabledConfig = new EnumMap<StartupCheckType, Map<String, Object>>(StartupCheckType.class) {{
+            put(check_filesystem_ownership, new HashMap<String, Object>() {{
+                put(ENABLED_PROPERTY, null);
+            }});
+        }};
+
+        // empty enabled property or enabled property with null value are still counted as enabled
+
+        StartupChecksOptions options1 = new StartupChecksOptions(emptyConfig);
+        assertTrue(options1.isDisabled(check_filesystem_ownership));
+
+        StartupChecksOptions options2 = new StartupChecksOptions(emptyEnabledConfig);
+        assertTrue(options2.isDisabled(check_filesystem_ownership));
+    }
+
+    @Test
+    public void testChecksDisabledByDefaultAreNotEnabled()
+    {
+        Map<StartupCheckType, Map<String, Object>> emptyConfig = new EnumMap<>(StartupCheckType.class);
+        StartupChecksOptions options = new StartupChecksOptions(emptyConfig);
+        assertTrue(options.isDisabled(check_filesystem_ownership));
+    }
+
+    @Test
+    public void testExcludedKeyspacesInDataResurrectionCheckOptions()
+    {
+        Map<String, Object> config = new HashMap<String, Object>(){{
+            put("excluded_keyspaces", "ks1,ks2,ks3");
+        }};
+        DataResurrectionCheck check = new DataResurrectionCheck();
+        check.getExcludedKeyspaces(config);
+
+        Set<String> excludedKeyspaces = check.getExcludedKeyspaces(config);
+        assertEquals(3, excludedKeyspaces.size());
+        assertTrue(excludedKeyspaces.contains("ks1"));
+        assertTrue(excludedKeyspaces.contains("ks2"));
+        assertTrue(excludedKeyspaces.contains("ks3"));
+    }
+
+    @Test
+    public void testExcludedTablesInDataResurrectionCheckOptions()
+    {
+        for (String input : new String[]{
+        "ks1.tb1,ks1.tb2,ks3.tb3",
+        " ks1 . tb1,  ks1 .tb2  ,ks3 .tb3  "
+        })
+        {
+            Map<String, Object> config = new HashMap<String, Object>(){{
+                put("excluded_tables", input);
+            }};
+
+            DataResurrectionCheck check = new DataResurrectionCheck();
+            Set<Pair<String, String>> excludedTables = check.getExcludedTables(config);
+            assertEquals(3, excludedTables.size());
+            assertTrue(excludedTables.contains(Pair.create("ks1", "tb1")));
+            assertTrue(excludedTables.contains(Pair.create("ks1", "tb2")));
+            assertTrue(excludedTables.contains(Pair.create("ks3", "tb3")));
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java b/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
index 3b7e64e..06be1dc 100644
--- a/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
+++ b/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
@@ -18,18 +18,205 @@
 
 package org.apache.cassandra.config;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
+import java.util.function.Predicate;
 
 import com.google.common.collect.ImmutableMap;
 import org.junit.Test;
 
+import org.apache.cassandra.distributed.shared.WithProperties;
+import org.apache.cassandra.io.util.File;
+import org.yaml.snakeyaml.error.YAMLException;
+
+import static org.apache.cassandra.config.CassandraRelevantProperties.CONFIG_ALLOW_SYSTEM_PROPERTIES;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.KIBIBYTES;
+import static org.apache.cassandra.config.YamlConfigurationLoader.SYSTEM_PROPERTY_PREFIX;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 
 public class YamlConfigurationLoaderTest
 {
     @Test
+    public void validateTypes()
+    {
+        Predicate<Field> isDurationSpec = f -> f.getType().getTypeName().equals("org.apache.cassandra.config.DurationSpec");
+        Predicate<Field> isDataStorageSpec = f -> f.getType().getTypeName().equals("org.apache.cassandra.config.DataStorageSpec");
+        Predicate<Field> isDataRateSpec = f -> f.getType().getTypeName().equals("org.apache.cassandra.config.DataRateSpec");
+
+        assertEquals("You have wrongly defined a config parameter of abstract type DurationSpec, DataStorageSpec or DataRateSpec." +
+                     "Please check the config docs, otherwise Cassandra won't be able to start with this parameter being set in cassandra.yaml.",
+                     Arrays.stream(Config.class.getFields())
+                    .filter(f -> !Modifier.isStatic(f.getModifiers()))
+                    .filter(isDurationSpec.or(isDataRateSpec).or(isDataStorageSpec)).count(), 0);
+    }
+
+    @Test
+    public void updateInPlace()
+    {
+        Config config = new Config();
+        Map<String, Object> map = ImmutableMap.<String, Object>builder().put("storage_port", 123)
+                                                                        .put("commitlog_sync", Config.CommitLogSync.batch)
+                                                                        .put("seed_provider.class_name", "org.apache.cassandra.locator.SimpleSeedProvider")
+                                                                        .put("client_encryption_options.cipher_suites", Collections.singletonList("FakeCipher"))
+                                                                        .put("client_encryption_options.optional", false)
+                                                                        .put("client_encryption_options.enabled", true)
+                                                                        .build();
+        Config updated = YamlConfigurationLoader.updateFromMap(map, true, config);
+        assert updated == config : "Config pointers do not match";
+        assertThat(config.storage_port).isEqualTo(123);
+        assertThat(config.commitlog_sync).isEqualTo(Config.CommitLogSync.batch);
+        assertThat(config.seed_provider.class_name).isEqualTo("org.apache.cassandra.locator.SimpleSeedProvider");
+        assertThat(config.client_encryption_options.cipher_suites).isEqualTo(Collections.singletonList("FakeCipher"));
+        assertThat(config.client_encryption_options.optional).isFalse();
+        assertThat(config.client_encryption_options.enabled).isTrue();
+    }
+
+    @Test
+    public void withSystemProperties()
+    {
+        // for primitive types or data-types which use a String constructor, we can support these as nested
+        // if the type is a collection, then the string format doesn't make sense and will fail with an error such as
+        //   Cannot create property=client_encryption_options.cipher_suites for JavaBean=org.apache.cassandra.config.Config@1f59a598
+        //   No single argument constructor found for interface java.util.List : null
+        // the reason is that its not a scalar but a complex type (collection type), so the map we use needs to have a collection to match.
+        // It is possible that we define a common string representation for these types so they can be written to; this
+        // is an issue that SettingsTable may need to worry about.
+        try (WithProperties ignore = new WithProperties(CONFIG_ALLOW_SYSTEM_PROPERTIES.getKey(), "true",
+                                                        SYSTEM_PROPERTY_PREFIX + "storage_port", "123",
+                                                        SYSTEM_PROPERTY_PREFIX + "commitlog_sync", "batch",
+                                                        SYSTEM_PROPERTY_PREFIX + "seed_provider.class_name", "org.apache.cassandra.locator.SimpleSeedProvider",
+//                                                        PROPERTY_PREFIX + "client_encryption_options.cipher_suites", "[\"FakeCipher\"]",
+                                                        SYSTEM_PROPERTY_PREFIX + "client_encryption_options.optional", "false",
+                                                        SYSTEM_PROPERTY_PREFIX + "client_encryption_options.enabled", "true",
+                                                        SYSTEM_PROPERTY_PREFIX + "doesnotexist", "true"
+        ))
+        {
+            Config config = YamlConfigurationLoader.fromMap(Collections.emptyMap(), true, Config.class);
+            assertThat(config.storage_port).isEqualTo(123);
+            assertThat(config.commitlog_sync).isEqualTo(Config.CommitLogSync.batch);
+            assertThat(config.seed_provider.class_name).isEqualTo("org.apache.cassandra.locator.SimpleSeedProvider");
+//            assertThat(config.client_encryption_options.cipher_suites).isEqualTo(Collections.singletonList("FakeCipher"));
+            assertThat(config.client_encryption_options.optional).isFalse();
+            assertThat(config.client_encryption_options.enabled).isTrue();
+        }
+    }
+
+    @Test
+    public void readConvertersSpecialCasesFromConfig()
+    {
+        Config c = load("test/conf/cassandra-converters-special-cases.yaml");
+        assertThat(c.sstable_preemptive_open_interval).isNull();
+        assertThat(c.index_summary_resize_interval).isNull();
+        assertThat(c.cache_load_timeout).isEqualTo(new DurationSpec.IntSecondsBound("0s"));
+
+        c = load("test/conf/cassandra-converters-special-cases-old-names.yaml");
+        assertThat(c.sstable_preemptive_open_interval).isNull();
+        assertThat(c.index_summary_resize_interval).isNull();
+        assertThat(c.cache_load_timeout).isEqualTo(new DurationSpec.IntSecondsBound("0s"));
+    }
+
+    @Test
+    public void readConvertersSpecialCasesFromMap()
+    {
+        Map<String, Object> map = new HashMap<>();
+        map.put("sstable_preemptive_open_interval", null);
+        map.put("index_summary_resize_interval", null);
+        map.put("credentials_update_interval", null);
+
+        Config c = YamlConfigurationLoader.fromMap(map, true, Config.class);
+        assertThat(c.sstable_preemptive_open_interval).isNull();
+        assertThat(c.index_summary_resize_interval).isNull();
+        assertThat(c.credentials_update_interval).isNull();
+
+        map = ImmutableMap.of(
+        "sstable_preemptive_open_interval_in_mb", "-1",
+        "index_summary_resize_interval_in_minutes", "-1",
+        "cache_load_timeout_seconds", "-1",
+        "credentials_update_interval_in_ms", "-1"
+        );
+        c = YamlConfigurationLoader.fromMap(map, Config.class);
+
+        assertThat(c.sstable_preemptive_open_interval).isNull();
+        assertThat(c.index_summary_resize_interval).isNull();
+        assertThat(c.cache_load_timeout).isEqualTo(new DurationSpec.IntSecondsBound("0s"));
+        assertThat(c.credentials_update_interval).isNull();
+    }
+
+    @Test
+    public void readThresholdsFromConfig()
+    {
+        Config c = load("test/conf/cassandra.yaml");
+
+        assertThat(c.read_thresholds_enabled).isTrue();
+
+        assertThat(c.coordinator_read_size_warn_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1 << 10, KIBIBYTES));
+        assertThat(c.coordinator_read_size_fail_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1 << 12, KIBIBYTES));
+
+        assertThat(c.local_read_size_warn_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1 << 12, KIBIBYTES));
+        assertThat(c.local_read_size_fail_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1 << 13, KIBIBYTES));
+
+        assertThat(c.row_index_read_size_warn_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1 << 12, KIBIBYTES));
+        assertThat(c.row_index_read_size_fail_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1 << 13, KIBIBYTES));
+    }
+
+    @Test
+    public void readThresholdsFromMap()
+    {
+
+        Map<String, Object> map = ImmutableMap.of(
+        "read_thresholds_enabled", true,
+        "coordinator_read_size_warn_threshold", "1024KiB",
+        "local_read_size_fail_threshold", "1024KiB",
+        "row_index_read_size_warn_threshold", "1024KiB",
+        "row_index_read_size_fail_threshold", "1024KiB"
+        );
+
+        Config c = YamlConfigurationLoader.fromMap(map, Config.class);
+        assertThat(c.read_thresholds_enabled).isTrue();
+
+        assertThat(c.coordinator_read_size_warn_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1024, KIBIBYTES));
+        assertThat(c.coordinator_read_size_fail_threshold).isNull();
+
+        assertThat(c.local_read_size_warn_threshold).isNull();
+        assertThat(c.local_read_size_fail_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1024, KIBIBYTES));
+
+        assertThat(c.row_index_read_size_warn_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1024, KIBIBYTES));
+        assertThat(c.row_index_read_size_fail_threshold).isEqualTo(new DataStorageSpec.LongBytesBound(1024, KIBIBYTES));
+    }
+
+    @Test
+    public void notNullableLegacyProperties()
+    {
+        // In  the past commitlog_sync_period and commitlog_sync_group_window were int in Config. So that meant they can't
+        // be assigned null value from the yaml file. To ensure this behavior was not changed when we moved to DurationSpec
+        // in CASSANDRA-15234, we assigned those 0 value.
+
+        Map<String, Object> map = ImmutableMap.of(
+        "commitlog_sync_period", ""
+        );
+        try
+        {
+            Config config = YamlConfigurationLoader.fromMap(map, Config.class);
+        }
+        catch (YAMLException e)
+        {
+            assertTrue(e.getMessage().contains("Cannot create property=commitlog_sync_period for JavaBean=org.apache.cassandra.config.Config"));
+        }
+
+        // loadConfig will catch this exception on startup and throw a ConfigurationException
+    }
+
+    @Test
     public void fromMapTest()
     {
         int storagePort = 123;
@@ -43,8 +230,9 @@
                                  .put("commitlog_sync", commitLogSync)
                                  .put("seed_provider", seedProvider)
                                  .put("client_encryption_options", encryptionOptions)
-                                 .put("internode_send_buff_size_in_bytes", 5)
-                                 .put("internode_recv_buff_size_in_bytes", 5)
+                                 .put("internode_socket_send_buffer_size", "5B")
+                                 .put("internode_socket_receive_buffer_size", "5B")
+                                 .put("commitlog_sync_group_window_in_ms", "42")
                                  .build();
 
         Config config = YamlConfigurationLoader.fromMap(map, Config.class);
@@ -53,7 +241,147 @@
         assertEquals(seedProvider, config.seed_provider); // Check a parameterized class
         assertEquals(false, config.client_encryption_options.optional); // Check a nested object
         assertEquals(true, config.client_encryption_options.enabled); // Check a nested object
-        assertEquals(5, config.internode_socket_send_buffer_size_in_bytes); // Check names backward compatibility (CASSANDRA-17141)
-        assertEquals(5, config.internode_socket_receive_buffer_size_in_bytes); // Check names backward compatibility (CASSANDRA-17141)
+        assertEquals(new DataStorageSpec.IntBytesBound("5B"), config.internode_socket_send_buffer_size); // Check names backward compatibility (CASSANDRA-17141 and CASSANDRA-15234)
+        assertEquals(new DataStorageSpec.IntBytesBound("5B"), config.internode_socket_receive_buffer_size); // Check names backward compatibility (CASSANDRA-17141 and CASSANDRA-15234)
+    }
+
+    @Test
+    public void typeChange()
+    {
+        Config old = YamlConfigurationLoader.fromMap(ImmutableMap.of("key_cache_save_period", 42,
+                                                                     "row_cache_save_period", 42,
+                                                                     "counter_cache_save_period", 42), Config.class);
+        Config latest = YamlConfigurationLoader.fromMap(ImmutableMap.of("key_cache_save_period", "42s",
+                                                                        "row_cache_save_period", "42s",
+                                                                        "counter_cache_save_period", "42s"), Config.class);
+        assertThat(old.key_cache_save_period).isEqualTo(latest.key_cache_save_period).isEqualTo(new DurationSpec.IntSecondsBound(42));
+        assertThat(old.row_cache_save_period).isEqualTo(latest.row_cache_save_period).isEqualTo(new DurationSpec.IntSecondsBound(42));
+        assertThat(old.counter_cache_save_period).isEqualTo(latest.counter_cache_save_period).isEqualTo(new DurationSpec.IntSecondsBound(42));
+    }
+
+    @Test
+    public void sharedErrorReportingExclusions()
+    {
+        Config config = load("data/config/YamlConfigurationLoaderTest/shared_client_error_reporting_exclusions.yaml");
+        SubnetGroups expected = new SubnetGroups(Arrays.asList("127.0.0.1", "127.0.0.0/31"));
+        assertThat(config.client_error_reporting_exclusions).isEqualTo(expected);
+        assertThat(config.internode_error_reporting_exclusions).isEqualTo(expected);
+    }
+
+    @Test
+    public void converters()
+    {
+        // MILLIS_DURATION
+        assertThat(from("permissions_validity_in_ms", "42").permissions_validity.toMilliseconds()).isEqualTo(42);
+        assertThatThrownBy(() -> from("permissions_validity", -2).permissions_validity.toMilliseconds())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid duration: -2 Accepted units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS] where case matters and only non-negative values.");
+
+        // MILLIS_DOUBLE_DURATION
+        assertThat(from("commitlog_sync_group_window_in_ms", "42").commitlog_sync_group_window.toMilliseconds()).isEqualTo(42);
+        assertThat(from("commitlog_sync_group_window_in_ms", "0.2").commitlog_sync_group_window.toMilliseconds()).isEqualTo(0);
+        assertThat(from("commitlog_sync_group_window_in_ms", "42.5").commitlog_sync_group_window.toMilliseconds()).isEqualTo(43);
+        assertThat(from("commitlog_sync_group_window_in_ms", "NaN").commitlog_sync_group_window.toMilliseconds()).isEqualTo(0);
+        assertThatThrownBy(() -> from("commitlog_sync_group_window_in_ms", -2).commitlog_sync_group_window.toMilliseconds())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid duration: value must be non-negative");
+
+        // MILLIS_CUSTOM_DURATION
+        assertThat(from("permissions_update_interval_in_ms", 42).permissions_update_interval).isEqualTo(new DurationSpec.IntMillisecondsBound(42));
+        assertThat(from("permissions_update_interval_in_ms", -1).permissions_update_interval).isNull();
+        assertThatThrownBy(() -> from("permissions_update_interval_in_ms", -2))
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid duration: value must be non-negative");
+
+        // SECONDS_DURATION
+        assertThat(from("streaming_keep_alive_period_in_secs", "42").streaming_keep_alive_period.toSeconds()).isEqualTo(42);
+        assertThatThrownBy(() -> from("streaming_keep_alive_period_in_secs", -2).streaming_keep_alive_period.toSeconds())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid duration: value must be non-negative");
+
+        // NEGATIVE_SECONDS_DURATION
+        assertThat(from("validation_preview_purge_head_start_in_sec", -1).validation_preview_purge_head_start.toSeconds()).isEqualTo(0);
+        assertThat(from("validation_preview_purge_head_start_in_sec", 0).validation_preview_purge_head_start.toSeconds()).isEqualTo(0);
+        assertThat(from("validation_preview_purge_head_start_in_sec", 42).validation_preview_purge_head_start.toSeconds()).isEqualTo(42);
+
+        // SECONDS_CUSTOM_DURATION already tested in type change
+
+        // MINUTES_CUSTOM_DURATION
+        assertThat(from("index_summary_resize_interval_in_minutes", "42").index_summary_resize_interval.toMinutes()).isEqualTo(42);
+        assertThat(from("index_summary_resize_interval_in_minutes", "-1").index_summary_resize_interval).isNull();
+        assertThatThrownBy(() -> from("index_summary_resize_interval_in_minutes", -2).index_summary_resize_interval.toMinutes())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid duration: value must be non-negative");
+
+        // BYTES_CUSTOM_DATASTORAGE
+        assertThat(from("native_transport_max_concurrent_requests_in_bytes_per_ip", -1).native_transport_max_request_data_in_flight_per_ip).isEqualTo(null);
+        assertThat(from("native_transport_max_concurrent_requests_in_bytes_per_ip", 0).native_transport_max_request_data_in_flight_per_ip.toBytes()).isEqualTo(0);
+        assertThat(from("native_transport_max_concurrent_requests_in_bytes_per_ip", 42).native_transport_max_request_data_in_flight_per_ip.toBytes()).isEqualTo(42);
+
+        // MEBIBYTES_DATA_STORAGE
+        assertThat(from("memtable_heap_space_in_mb", "42").memtable_heap_space.toMebibytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("memtable_heap_space_in_mb", -2).memtable_heap_space.toMebibytes())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be non-negative");
+
+        // KIBIBYTES_DATASTORAGE
+        assertThat(from("column_index_size_in_kb", "42").column_index_size.toKibibytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("column_index_size_in_kb", -2).column_index_size.toKibibytes())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be non-negative");
+
+        // BYTES_DATASTORAGE
+        assertThat(from("internode_max_message_size_in_bytes", "42").internode_max_message_size.toBytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("internode_max_message_size_in_bytes", -2).internode_max_message_size.toBytes())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be non-negative");
+
+        // BYTES_DATASTORAGE
+        assertThat(from("internode_max_message_size_in_bytes", "42").internode_max_message_size.toBytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("internode_max_message_size_in_bytes", -2).internode_max_message_size.toBytes())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be non-negative");
+
+        // MEBIBYTES_PER_SECOND_DATA_RATE
+        assertThat(from("compaction_throughput_mb_per_sec", "42").compaction_throughput.toMebibytesPerSecondAsInt()).isEqualTo(42);
+        assertThatThrownBy(() -> from("compaction_throughput_mb_per_sec", -2).compaction_throughput.toMebibytesPerSecondAsInt())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid data rate: value must be non-negative");
+
+        // MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE
+        assertThat(from("stream_throughput_outbound_megabits_per_sec", "42").stream_throughput_outbound.toMegabitsPerSecondAsInt()).isEqualTo(42);
+        assertThatThrownBy(() -> from("stream_throughput_outbound_megabits_per_sec", -2).stream_throughput_outbound.toMegabitsPerSecondAsInt())
+        .hasRootCauseInstanceOf(IllegalArgumentException.class)
+        .hasRootCauseMessage("Invalid data rate: value must be non-negative");
+
+        // NEGATIVE_MEBIBYTES_DATA_STORAGE_INT
+        assertThat(from("sstable_preemptive_open_interval_in_mb", "1").sstable_preemptive_open_interval.toMebibytes()).isEqualTo(1);
+        assertThat(from("sstable_preemptive_open_interval_in_mb", -2).sstable_preemptive_open_interval).isNull();
+    }
+
+    private static Config from(Object... values)
+    {
+        assert values.length % 2 == 0 : "Map can only be created with an even number of inputs: given " + values.length;
+        ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
+        for (int i = 0; i < values.length; i += 2)
+            builder.put((String) values[i], values[i + 1]);
+        return YamlConfigurationLoader.fromMap(builder.build(), Config.class);
+    }
+
+    private static Config load(String path)
+    {
+        URL url = YamlConfigurationLoaderTest.class.getClassLoader().getResource(path);
+        if (url == null)
+        {
+            try
+            {
+                url = new File(path).toPath().toUri().toURL();
+            }
+            catch (MalformedURLException e)
+            {
+                throw new AssertionError(e);
+            }
+        }
+        return new YamlConfigurationLoader().loadConfig(url);
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/BatchTest.java b/test/unit/org/apache/cassandra/cql3/BatchTest.java
index 28b0d5c..330271e 100644
--- a/test/unit/org/apache/cassandra/cql3/BatchTest.java
+++ b/test/unit/org/apache/cassandra/cql3/BatchTest.java
@@ -22,9 +22,12 @@
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.exceptions.InvalidQueryException;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.service.EmbeddedCassandraService;
+
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -44,8 +47,7 @@
     @BeforeClass()
     public static void setup() throws ConfigurationException, IOException
     {
-        cassandra = new EmbeddedCassandraService();
-        cassandra.start();
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
 
         cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
         session = cluster.connect();
@@ -75,6 +77,15 @@
         clustering = session.prepare("insert into junit.clustering(id, clustering1, clustering2, clustering3, val) values(?,?,?,?,?)");
     }
 
+    @AfterClass
+    public static void tearDown()
+    {
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
+    }
+
     @Test(expected = InvalidQueryException.class)
     public void testMixedInCounterBatch()
     {
diff --git a/test/unit/org/apache/cassandra/cql3/CQL3TypeLiteralTest.java b/test/unit/org/apache/cassandra/cql3/CQL3TypeLiteralTest.java
index 8e6b761..f0d2c19 100644
--- a/test/unit/org/apache/cassandra/cql3/CQL3TypeLiteralTest.java
+++ b/test/unit/org/apache/cassandra/cql3/CQL3TypeLiteralTest.java
@@ -30,10 +30,11 @@
 
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.serializers.*;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -225,8 +226,8 @@
 
         for (int i = 0; i < 20; i++)
         {
-            UUID v = UUIDGen.getTimeUUID(randLong(System.currentTimeMillis()));
-            addNativeValue(v.toString(), CQL3Type.Native.TIMEUUID, TimeUUIDType.instance.decompose(v));
+            TimeUUID v = TimeUUID.Generator.atUnixMillis(randLong(currentTimeMillis()));
+            addNativeValue(v.toString(), CQL3Type.Native.TIMEUUID, v.toBytes());
         }
         addNativeValue("null", CQL3Type.Native.TIMEUUID, ByteBufferUtil.EMPTY_BYTE_BUFFER);
         addNativeValue("null", CQL3Type.Native.TIMEUUID, null);
diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java
index dc6a55e..23b392f 100644
--- a/test/unit/org/apache/cassandra/cql3/CQLTester.java
+++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.cql3;
 
-import java.io.File;
 import java.io.IOException;
 import java.math.BigDecimal;
 import java.math.BigInteger;
@@ -49,23 +48,37 @@
 import com.google.common.collect.Iterables;
 
 import org.junit.*;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.codahale.metrics.Gauge;
 import com.datastax.driver.core.*;
 import com.datastax.driver.core.DataType;
 import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.exceptions.UnauthorizedException;
 
 import com.datastax.shaded.netty.channel.EventLoopGroup;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.auth.AuthCacheService;
+import org.apache.cassandra.auth.AuthKeyspace;
+import org.apache.cassandra.auth.AuthSchemaChangeListener;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.IRoleManager;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
 import org.apache.cassandra.db.virtual.VirtualSchemaKeyspace;
+import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.SecondaryIndexManager;
-import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.TokenMetadata;
+import org.apache.cassandra.metrics.CassandraMetricsRegistry;
 import org.apache.cassandra.metrics.ClientMetrics;
 import org.apache.cassandra.schema.*;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -90,23 +103,36 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JMXServerUtils;
+import org.apache.cassandra.utils.TimeUUID;
+import org.assertj.core.api.Assertions;
+import org.apache.cassandra.utils.Pair;
+import org.awaitility.Awaitility;
 
 import static com.datastax.driver.core.SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS;
 import static com.datastax.driver.core.SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS;
-import static junit.framework.Assert.assertNotNull;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * Base class for CQL tests.
  */
 public abstract class CQLTester
 {
+    /**
+     * The super user
+     */
+    private static final User SUPER_USER = new User("cassandra", "cassandra");
+
     protected static final Logger logger = LoggerFactory.getLogger(CQLTester.class);
 
     public static final String KEYSPACE = "cql_test_keyspace";
     public static final String KEYSPACE_PER_TEST = "cql_test_keyspace_alt";
     protected static final boolean USE_PREPARED_VALUES = Boolean.valueOf(System.getProperty("cassandra.test.use_prepared", "true"));
     protected static final boolean REUSE_PREPARED = Boolean.valueOf(System.getProperty("cassandra.test.reuse_prepared", "true"));
-    protected static final long ROW_CACHE_SIZE_IN_MB = Integer.valueOf(System.getProperty("cassandra.test.row_cache_size_in_mb", "0"));
+    protected static final long ROW_CACHE_SIZE_IN_MIB = new DataStorageSpec.LongMebibytesBound(System.getProperty("cassandra.test.row_cache_size", "0MiB")).toMebibytes();
     private static final AtomicInteger seqNumber = new AtomicInteger();
     protected static final ByteBuffer TOO_BIG = ByteBuffer.allocate(FBUtilities.MAX_UNSIGNED_SHORT + 1024);
     public static final String DATA_CENTER = ServerTestUtils.DATA_CENTER;
@@ -122,8 +148,10 @@
     protected static final int nativePort;
     protected static final InetAddress nativeAddr;
     protected static final Set<InetAddressAndPort> remoteAddrs = new HashSet<>();
-    private static final Map<ProtocolVersion, Cluster> clusters = new HashMap<>();
-    protected static final Map<ProtocolVersion, Session> sessions = new HashMap<>();
+    private static final Map<Pair<User, ProtocolVersion>, Cluster> clusters = new HashMap<>();
+    private static final Map<Pair<User, ProtocolVersion>, Session> sessions = new HashMap<>();
+
+    private static Consumer<Cluster.Builder> clusterBuilderConfigurator;
 
     public static final List<ProtocolVersion> PROTOCOL_VERSIONS = new ArrayList<>(ProtocolVersion.SUPPORTED.size());
 
@@ -169,10 +197,13 @@
 
     private List<String> keyspaces = new ArrayList<>();
     private List<String> tables = new ArrayList<>();
+    private List<String> views = new ArrayList<>();
     private List<String> types = new ArrayList<>();
     private List<String> functions = new ArrayList<>();
     private List<String> aggregates = new ArrayList<>();
 
+    private User user;
+
     // We don't use USE_PREPARED_VALUES in the code below so some test can foce value preparation (if the result
     // is not expected to be the same without preparation)
     private boolean usePrepared = USE_PREPARED_VALUES;
@@ -184,12 +215,30 @@
     }
 
     /**
+     * Use the specified user for executing the queries over the network.
+     * @param username the user name
+     * @param password the user password
+     */
+    public void useUser(String username, String password)
+    {
+        this.user = new User(username, password);
+    }
+
+    /**
+     * Use the super user for executing the queries over the network.
+     */
+    public void useSuperUser()
+    {
+        this.user = SUPER_USER;
+    }
+
+    /**
      * Returns a port number that is automatically allocated,
      * typically from an ephemeral port range.
      *
      * @return a port number
      */
-    private static int getAutomaticallyAllocatedPort(InetAddress address)
+    public static int getAutomaticallyAllocatedPort(InetAddress address)
     {
         try
         {
@@ -269,8 +318,8 @@
     @BeforeClass
     public static void setUpClass()
     {
-        if (ROW_CACHE_SIZE_IN_MB > 0)
-            DatabaseDescriptor.setRowCacheSizeInMB(ROW_CACHE_SIZE_IN_MB);
+        if (ROW_CACHE_SIZE_IN_MIB > 0)
+            DatabaseDescriptor.setRowCacheSizeInMiB(ROW_CACHE_SIZE_IN_MIB);
         StorageService.instance.setPartitionerUnsafe(Murmur3Partitioner.instance);
 
         // Once per-JVM is enough
@@ -327,14 +376,17 @@
 
         final List<String> keyspacesToDrop = copy(keyspaces);
         final List<String> tablesToDrop = copy(tables);
+        final List<String> viewsToDrop = copy(views);
         final List<String> typesToDrop = copy(types);
         final List<String> functionsToDrop = copy(functions);
         final List<String> aggregatesToDrop = copy(aggregates);
         keyspaces = null;
         tables = null;
+        views = null;
         types = null;
         functions = null;
         aggregates = null;
+        user = null;
 
         // We want to clean up after the test, but dropping a table is rather long so just do that asynchronously
         ScheduledExecutors.optionalTasks.execute(new Runnable()
@@ -343,6 +395,9 @@
             {
                 try
                 {
+                    for (int i = viewsToDrop.size() - 1; i >= 0; i--)
+                        schemaChange(String.format("DROP MATERIALIZED VIEW IF EXISTS %s.%s", KEYSPACE, viewsToDrop.get(i)));
+
                     for (int i = tablesToDrop.size() - 1; i >= 0; i--)
                         schemaChange(String.format("DROP TABLE IF EXISTS %s.%s", KEYSPACE, tablesToDrop.get(i)));
 
@@ -385,12 +440,14 @@
 
     public static List<String> buildNodetoolArgs(List<String> args)
     {
+        int port = jmxPort == 0 ? Integer.getInteger("cassandra.jmx.local.port", 7199) : jmxPort;
+        String host = jmxHost == null ? "127.0.0.1" : jmxHost;
         List<String> allArgs = new ArrayList<>();
         allArgs.add("bin/nodetool");
         allArgs.add("-p");
-        allArgs.add(Integer.toString(jmxPort));
+        allArgs.add(String.valueOf(port));
         allArgs.add("-h");
-        allArgs.add(jmxHost == null ? "127.0.0.1" : jmxHost);
+        allArgs.add(host);
         allArgs.addAll(args);
         return allArgs;
     }
@@ -419,31 +476,59 @@
         return allArgs;
     }
 
-    protected static void requireNetworkWithoutDriver()
+    protected static void requireAuthentication()
     {
-        startServices();
-        startServer(server -> {});
+        DatabaseDescriptor.setAuthenticator(new AuthTestUtils.LocalPasswordAuthenticator());
+        DatabaseDescriptor.setAuthorizer(new AuthTestUtils.LocalCassandraAuthorizer());
+        DatabaseDescriptor.setNetworkAuthorizer(new AuthTestUtils.LocalCassandraNetworkAuthorizer());
+
+        // The CassandraRoleManager constructor set the supported and alterable options based on
+        // DatabaseDescriptor authenticator type so it needs to be created only after the authenticator is set.
+        IRoleManager roleManager =  new AuthTestUtils.LocalCassandraRoleManager()
+        {
+            public void setup()
+            {
+                loadRoleStatement();
+                QueryProcessor.executeInternal(createDefaultRoleQuery());
+            }
+        };
+
+        DatabaseDescriptor.setRoleManager(roleManager);
+        SchemaTestUtil.addOrUpdateKeyspace(AuthKeyspace.metadata(), true);
+        DatabaseDescriptor.getRoleManager().setup();
+        DatabaseDescriptor.getAuthenticator().setup();
+        DatabaseDescriptor.getAuthorizer().setup();
+        DatabaseDescriptor.getNetworkAuthorizer().setup();
+        Schema.instance.registerListener(new AuthSchemaChangeListener());
+
+        AuthCacheService.initializeAndRegisterCaches();
     }
 
-    // lazy initialization for all tests that require Java Driver
+    /**
+     *  Initialize Native Transport for test that need it.
+     */
     protected static void requireNetwork() throws ConfigurationException
     {
-        requireNetwork(server -> {});
+        requireNetwork(server -> {}, cluster -> {});
     }
 
-    // lazy initialization for all tests that require Java Driver
-    protected static void requireNetwork(Consumer<Server.Builder> decorator) throws ConfigurationException
+    /**
+     *  Initialize Native Transport for the tests that need it.
+     */
+    protected static void requireNetwork(Consumer<Server.Builder> serverConfigurator,
+                                         Consumer<Cluster.Builder> clusterConfigurator) throws ConfigurationException
     {
         if (server != null)
             return;
 
+        clusterBuilderConfigurator = clusterConfigurator;
+
         startServices();
-        initializeNetwork(decorator, null);
+        startServer(serverConfigurator);
     }
 
     private static void startServices()
     {
-        SystemKeyspace.finishStartup();
         VirtualKeyspaceRegistry.instance.register(VirtualSchemaKeyspace.instance);
         StorageService.instance.initServer();
         SchemaLoader.startGossiper();
@@ -451,10 +536,11 @@
 
     protected static void reinitializeNetwork()
     {
-        reinitializeNetwork(null);
+        reinitializeNetwork(server -> {}, cluster -> {});
     }
 
-    protected static void reinitializeNetwork(Consumer<Cluster.Builder> clusterConfigurator)
+    protected static void reinitializeNetwork(Consumer<Server.Builder> serverConfigurator,
+                                              Consumer<Cluster.Builder> clusterConfigurator)
     {
         if (server != null && server.isRunning())
         {
@@ -470,46 +556,9 @@
         clusters.clear();
         sessions.clear();
 
-        initializeNetwork(server -> {}, clusterConfigurator);
-    }
+        clusterBuilderConfigurator = clusterConfigurator;
 
-    private static void initializeNetwork(Consumer<Server.Builder> decorator, Consumer<Cluster.Builder> clusterConfigurator)
-    {
-        startServer(decorator);
-
-        for (ProtocolVersion version : PROTOCOL_VERSIONS)
-        {
-            if (clusters.containsKey(version))
-                continue;
-
-            SocketOptions socketOptions = new SocketOptions()
-                                          .setConnectTimeoutMillis(Integer.getInteger("cassandra.test.driver.connection_timeout_ms", DEFAULT_CONNECT_TIMEOUT_MILLIS)) // default is 5000
-                                          .setReadTimeoutMillis(Integer.getInteger("cassandra.test.driver.read_timeout_ms", DEFAULT_READ_TIMEOUT_MILLIS)); // default is 12000
-
-            logger.info("Timeouts: {} / {}", socketOptions.getConnectTimeoutMillis(), socketOptions.getReadTimeoutMillis());
-
-            Cluster.Builder builder = Cluster.builder()
-                                             .withoutJMXReporting()
-                                             .addContactPoints(nativeAddr)
-                                             .withClusterName("Test Cluster")
-                                             .withPort(nativePort)
-                                             .withSocketOptions(socketOptions)
-                                             .withNettyOptions(IMMEDIATE_CONNECTION_SHUTDOWN_NETTY_OPTIONS);
-
-        if (clusterConfigurator != null)
-                clusterConfigurator.accept(builder);
-
-            if (version.isBeta())
-                builder = builder.allowBetaProtocolVersion();
-            else
-                builder = builder.withProtocolVersion(com.datastax.driver.core.ProtocolVersion.fromInt(version.asInt()));
-
-            Cluster cluster = builder.build();
-            clusters.put(version, cluster);
-            sessions.put(version, cluster.connect());
-
-            logger.info("Started Java Driver instance for protocol version {}", version);
-        }
+        startServer(serverConfigurator);
     }
 
     private static void startServer(Consumer<Server.Builder> decorator)
@@ -521,6 +570,41 @@
         server.start();
     }
 
+    private static Cluster initClientCluster(User user, ProtocolVersion version)
+    {
+        SocketOptions socketOptions =
+                new SocketOptions().setConnectTimeoutMillis(Integer.getInteger("cassandra.test.driver.connection_timeout_ms",
+                                                                               DEFAULT_CONNECT_TIMEOUT_MILLIS)) // default is 5000
+                                   .setReadTimeoutMillis(Integer.getInteger("cassandra.test.driver.read_timeout_ms",
+                                                                            DEFAULT_READ_TIMEOUT_MILLIS)); // default is 12000
+
+        logger.info("Timeouts: {} / {}", socketOptions.getConnectTimeoutMillis(), socketOptions.getReadTimeoutMillis());
+
+        Cluster.Builder builder = Cluster.builder()
+                                         .withoutJMXReporting()
+                                         .addContactPoints(nativeAddr)
+                                         .withClusterName("Test Cluster")
+                                         .withPort(nativePort)
+                                         .withSocketOptions(socketOptions)
+                                         .withNettyOptions(IMMEDIATE_CONNECTION_SHUTDOWN_NETTY_OPTIONS);
+
+        if (user != null)
+            builder.withCredentials(user.username, user.password);
+
+        if (version.isBeta())
+            builder = builder.allowBetaProtocolVersion();
+        else
+            builder = builder.withProtocolVersion(com.datastax.driver.core.ProtocolVersion.fromInt(version.asInt()));
+
+        clusterBuilderConfigurator.accept(builder);
+
+        Cluster cluster = builder.build();
+
+        logger.info("Started Java Driver instance for protocol version {}", version);
+
+        return cluster;
+    }
+
     protected void dropPerTestKeyspace() throws Throwable
     {
         execute(String.format("DROP KEYSPACE IF EXISTS %s", KEYSPACE_PER_TEST));
@@ -545,7 +629,12 @@
         String currentTable = currentTable();
         return currentTable == null
              ? null
-             : Keyspace.open(keyspace).getColumnFamilyStore(currentTable);
+             : getColumnFamilyStore(keyspace, currentTable);
+    }
+
+    public ColumnFamilyStore getColumnFamilyStore(String keyspace, String table)
+    {
+        return Keyspace.open(keyspace).getColumnFamilyStore(table);
     }
 
     public void flush(boolean forceFlush)
@@ -563,7 +652,7 @@
     {
         ColumnFamilyStore store = getCurrentColumnFamilyStore(keyspace);
         if (store != null)
-            store.forceBlockingFlush();
+            Util.flush(store);
     }
 
     public void disableCompaction(String keyspace)
@@ -622,7 +711,7 @@
         // clean up data directory which are stored as data directory/keyspace/data files
         for (File d : Directories.getKSChildDirectories(ks))
         {
-            if (d.exists() && containsAny(d.getName(), tables))
+            if (d.exists() && containsAny(d.name(), tables))
                 FileUtils.deleteRecursive(d);
         }
     }
@@ -649,6 +738,13 @@
         return tables.get(tables.size() - 1);
     }
 
+    protected String currentView()
+    {
+        if (views.isEmpty())
+            return null;
+        return views.get(views.size() - 1);
+    }
+
     protected String currentKeyspace()
     {
         if (keyspaces.isEmpty())
@@ -683,14 +779,20 @@
 
     protected String createType(String keyspace, String query)
     {
-        String typeName = String.format("type_%02d", seqNumber.getAndIncrement());
+        String typeName = createTypeName();
         String fullQuery = String.format(query, keyspace + "." + typeName);
-        types.add(typeName);
         logger.info(fullQuery);
         schemaChange(fullQuery);
         return typeName;
     }
 
+    protected String createTypeName()
+    {
+        String typeName = String.format("type_%02d", seqNumber.getAndIncrement());
+        types.add(typeName);
+        return typeName;
+    }
+
     protected String createFunctionName(String keyspace)
     {
         return String.format("%s.function_%02d", keyspace, seqNumber.getAndIncrement());
@@ -810,6 +912,119 @@
         QueryProcessor.executeOnceInternal(fullQuery);
     }
 
+    /**
+     * Creates a materialized view, waiting for the completion of its builder tasks.
+     *
+     * @param query the {@code CREATE VIEW} query, with {@code %s} placeholders for the view and table names
+     * @return the name of the created view
+     */
+    protected String createView(String query)
+    {
+        return createView(null, query);
+    }
+
+    /**
+     * Creates a materialized view, waiting for the completion of its builder tasks.
+     *
+     * @param viewName the name of the view to be created, or {@code null} for using an automatically generated a name
+     * @param query the {@code CREATE VIEW} query, with {@code %s} placeholders for the view and table names
+     * @return the name of the created view
+     */
+    protected String createView(String viewName, String query)
+    {
+        String currentView = createViewAsync(viewName, query);
+        waitForViewBuild(currentView);
+        return currentView;
+    }
+
+    /**
+     * Creates a materialized view, without waiting for the completion of its builder tasks.
+     *
+     * @param query the {@code CREATE VIEW} query, with {@code %s} placeholders for the view and table names
+     * @return the name of the created view
+     */
+    protected String createViewAsync(String query)
+    {
+        return createViewAsync(null, query);
+    }
+
+    /**
+     * Creates a materialized view, without waiting for the completion of its builder tasks.
+     *
+     * @param viewName the name of the view to be created, or {@code null} for using an automatically generated a name
+     * @param query the {@code CREATE VIEW} query, with {@code %s} placeholders for the view and table names
+     * @return the name of the created view
+     */
+    protected String createViewAsync(String viewName, String query)
+    {
+        String currentView = viewName == null ? createViewName() : viewName;
+        String fullQuery = String.format(query, KEYSPACE + "." + currentView, KEYSPACE + "." + currentTable());
+        logger.info(fullQuery);
+        schemaChange(fullQuery);
+        return currentView;
+    }
+
+    protected void dropView()
+    {
+        dropView(currentView());
+    }
+
+    protected void dropView(String view)
+    {
+        dropFormattedTable(String.format("DROP MATERIALIZED VIEW IF EXISTS %s.%s", KEYSPACE, view));
+        views.remove(view);
+    }
+
+    protected String createViewName()
+    {
+        String currentView = String.format("mv_%02d", seqNumber.getAndIncrement());
+        views.add(currentView);
+        return currentView;
+    }
+
+    protected List<String> getViews()
+    {
+        return copy(views);
+    }
+
+    protected void updateView(String query, Object... params) throws Throwable
+    {
+        updateView(getDefaultVersion(), query, params);
+    }
+
+    protected void updateView(ProtocolVersion version, String query, Object... params) throws Throwable
+    {
+        executeNet(version, query, params);
+        waitForViewMutations();
+    }
+
+    /**
+     * Waits for any pending asynchronous materialized view mutations.
+     */
+    protected static void waitForViewMutations()
+    {
+        Awaitility.await()
+                  .atMost(10, TimeUnit.MINUTES)
+                  .pollDelay(0, TimeUnit.MILLISECONDS)
+                  .pollInterval(1, TimeUnit.MILLISECONDS)
+                  .until(() -> Stage.VIEW_MUTATION.executor().getPendingTaskCount() == 0 &&
+                               Stage.VIEW_MUTATION.executor().getActiveTaskCount() == 0);
+    }
+
+    /**
+     * Waits for the building tasks of the specified materialized view.
+     *
+     * @param view the name of the view
+     */
+    protected void waitForViewBuild(String view)
+    {
+        Awaitility.await()
+                  .atMost(10, TimeUnit.MINUTES)
+                  .pollDelay(0, TimeUnit.MILLISECONDS)
+                  .pollInterval(10, TimeUnit.MILLISECONDS)
+                  .until(() -> SystemKeyspace.isViewBuilt(keyspace(), view));
+    }
+
     protected void alterTable(String query)
     {
         String fullQuery = formatQuery(query);
@@ -889,7 +1104,7 @@
      */
     protected boolean waitForIndex(String keyspace, String table, String index) throws Throwable
     {
-        long start = System.currentTimeMillis();
+        long start = currentTimeMillis();
         boolean indexCreated = false;
         while (!indexCreated)
         {
@@ -903,7 +1118,7 @@
                 }
             }
 
-            if (System.currentTimeMillis() - start > 5000)
+            if (currentTimeMillis() - start > 5000)
                 break;
 
             Thread.sleep(10);
@@ -924,7 +1139,7 @@
      */
     protected boolean waitForIndexBuilds(String keyspace, String indexName) throws InterruptedException
     {
-        long start = System.currentTimeMillis();
+        long start = currentTimeMillis();
         SecondaryIndexManager indexManager = getCurrentColumnFamilyStore(keyspace).indexManager;
 
         while (true)
@@ -933,7 +1148,7 @@
             {
                 return true;
             }
-            else if (System.currentTimeMillis() - start > 5000)
+            else if (currentTimeMillis() - start > 5000)
             {
                 return false;
             }
@@ -975,6 +1190,41 @@
         Assert.assertEquals(expectedArgTypes != null ? Arrays.asList(expectedArgTypes) : null, schemaChange.argTypes);
     }
 
+    protected static void assertWarningsContain(Message.Response response, String message)
+    {
+        assertWarningsContain(response.getWarnings(), message);
+    }
+
+    protected static void assertWarningsContain(List<String> warnings, String message)
+    {
+        Assert.assertNotNull(warnings);
+        assertTrue(warnings.stream().anyMatch(s -> s.contains(message)));
+    }
+
+    protected static void assertWarningsEquals(ResultSet rs, String... messages)
+    {
+        assertWarningsEquals(rs.getExecutionInfo().getWarnings(), messages);
+    }
+
+    protected static void assertWarningsEquals(List<String> warnings, String... messages)
+    {
+        Assert.assertNotNull(warnings);
+        Assertions.assertThat(messages).hasSameElementsAs(warnings);
+    }
+
+    protected static void assertNoWarningContains(Message.Response response, String message)
+    {
+        assertNoWarningContains(response.getWarnings(), message);
+    }
+
+    protected static void assertNoWarningContains(List<String> warnings, String message)
+    {
+        if (warnings != null)
+        {
+            assertFalse(warnings.stream().anyMatch(s -> s.contains(message)));
+        }
+    }
+
     protected static ResultMessage schemaChange(String query)
     {
         try
@@ -1011,12 +1261,17 @@
         return sessionNet().execute(formatQuery(query), values);
     }
 
+    protected com.datastax.driver.core.ResultSet executeViewNet(String query, Object... values)
+    {
+        return sessionNet().execute(formatViewQuery(query), values);
+    }
+
     protected com.datastax.driver.core.ResultSet executeNet(ProtocolVersion protocolVersion, Statement statement)
     {
         return sessionNet(protocolVersion).execute(statement);
     }
 
-    protected com.datastax.driver.core.ResultSet executeNetWithPaging(ProtocolVersion version, String query, int pageSize) throws Throwable
+    protected com.datastax.driver.core.ResultSet executeNetWithPaging(ProtocolVersion version, String query, int pageSize)
     {
         return sessionNet(version).execute(new SimpleStatement(formatQuery(query)).setFetchSize(pageSize));
     }
@@ -1026,7 +1281,7 @@
         return sessionNet(version).execute(new SimpleStatement(formatQuery(KS, query)).setKeyspace(KS).setFetchSize(pageSize));
     }
 
-    protected com.datastax.driver.core.ResultSet executeNetWithPaging(String query, int pageSize) throws Throwable
+    protected com.datastax.driver.core.ResultSet executeNetWithPaging(String query, int pageSize)
     {
         return sessionNet().execute(new SimpleStatement(formatQuery(query)).setFetchSize(pageSize));
     }
@@ -1040,7 +1295,18 @@
     {
         requireNetwork();
 
-        return sessions.get(protocolVersion);
+        return getSession(protocolVersion);
+    }
+
+    private Session getSession(ProtocolVersion protocolVersion)
+    {
+        Cluster cluster = getCluster(protocolVersion);
+        return sessions.computeIfAbsent(Pair.create(user, protocolVersion), userProto -> cluster.connect());
+    }
+
+    private Cluster getCluster(ProtocolVersion protocolVersion)
+    {
+        return clusters.computeIfAbsent(Pair.create(user, protocolVersion), userProto -> initClientCluster(userProto.left, userProto.right));
     }
 
     protected SimpleClient newSimpleClient(ProtocolVersion version) throws IOException
@@ -1060,6 +1326,17 @@
         return currentTable == null ? query : String.format(query, keyspace + "." + currentTable);
     }
 
+    public String formatViewQuery(String query)
+    {
+        return formatViewQuery(KEYSPACE, query);
+    }
+
+    public String formatViewQuery(String keyspace, String query)
+    {
+        String currentView = currentView();
+        return currentView == null ? query : String.format(query, keyspace + "." + currentView);
+    }
+
     protected ResultMessage.Prepared prepare(String query) throws Throwable
     {
         return QueryProcessor.instance.prepare(formatQuery(query), ClientState.forInternalCalls());
@@ -1070,6 +1347,11 @@
         return executeFormattedQuery(formatQuery(query), values);
     }
 
+    public UntypedResultSet executeView(String query, Object... values) throws Throwable
+    {
+        return executeFormattedQuery(formatViewQuery(KEYSPACE, query), values);
+    }
+
     protected UntypedResultSet executeFormattedQuery(String query, Object... values) throws Throwable
     {
         UntypedResultSet rs;
@@ -1142,9 +1424,9 @@
             for (int j = 0; j < meta.size(); j++)
             {
                 DataType type = meta.getType(j);
-                com.datastax.driver.core.TypeCodec<Object> codec = clusters.get(protocolVersion).getConfiguration()
-                                                                                                .getCodecRegistry()
-                                                                                                .codecFor(type);
+                com.datastax.driver.core.TypeCodec<Object> codec = getCluster(protocolVersion).getConfiguration()
+                                                                                              .getCodecRegistry()
+                                                                                              .codecFor(type);
                 ByteBuffer expectedByteValue = codec.serialize(expected[j], com.datastax.driver.core.ProtocolVersion.fromInt(protocolVersion.asInt()));
                 int expectedBytes = expectedByteValue == null ? -1 : expectedByteValue.remaining();
                 ByteBuffer actualValue = actual.getBytesUnsafe(meta.getName(j));
@@ -1204,6 +1486,7 @@
 
             Assert.assertEquals(String.format("Invalid number of (expected) values provided for row %d", i), expected == null ? 1 : expected.length, meta.size());
 
+            StringBuilder error = new StringBuilder();
             for (int j = 0; j < meta.size(); j++)
             {
                 ColumnSpecification column = meta.get(j);
@@ -1216,15 +1499,17 @@
                 {
                     Object actualValueDecoded = actualValue == null ? null : column.type.getSerializer().deserialize(actualValue);
                     if (!Objects.equal(expected != null ? expected[j] : null, actualValueDecoded))
-                        Assert.fail(String.format("Invalid value for row %d column %d (%s of type %s), expected <%s> but got <%s>",
-                                                  i,
-                                                  j,
-                                                  column.name,
-                                                  column.type.asCQL3Type(),
-                                                  formatValue(expectedByteValue != null ? expectedByteValue.duplicate() : null, column.type),
-                                                  formatValue(actualValue, column.type)));
+                        error.append(String.format("Invalid value for row %d column %d (%s of type %s), expected <%s> but got <%s>",
+                                                   i,
+                                                   j,
+                                                   column.name,
+                                                   column.type.asCQL3Type(),
+                                                   formatValue(expectedByteValue != null ? expectedByteValue.duplicate() : null, column.type),
+                                                   formatValue(actualValue, column.type))).append("\n");
                 }
             }
+            if (error.length() > 0)
+                Assert.fail(error.toString());
             i++;
         }
 
@@ -1280,7 +1565,19 @@
             Assert.assertEquals("Invalid number of (expected) values provided for row", expected.length, meta.size());
             List<ByteBuffer> expectedRow = new ArrayList<>(meta.size());
             for (int j = 0; j < meta.size(); j++)
-                expectedRow.add(makeByteBuffer(expected[j], meta.get(j).type));
+            {
+                try
+                {
+                    expectedRow.add(makeByteBuffer(expected[j], meta.get(j).type));
+                }
+                catch (Exception e)
+                {
+                    ColumnSpecification column = meta.get(j);
+                    AssertionError error = new AssertionError("Error with column '" + column.name + " " + column.type.asCQL3Type() + "'; " + e.getLocalizedMessage());
+                    error.addSuppressed(e);
+                    throw error;
+                }
+            }
             expectedRows.add(expectedRow);
         }
 
@@ -1456,6 +1753,11 @@
         assertInvalidThrowMessage(errorMessage, null, query, values);
     }
 
+    protected void assertInvalidMessageNet(String errorMessage, String query, Object... values) throws Throwable
+    {
+        assertInvalidThrowMessage(Optional.of(ProtocolVersion.CURRENT), errorMessage, null, query, values);
+    }
+
     protected void assertInvalidThrow(Class<? extends Throwable> exception, String query, Object... values) throws Throwable
     {
         assertInvalidThrowMessage(null, exception, query, values);
@@ -1542,6 +1844,13 @@
         }
     }
 
+    protected void assertInvalidRequestMessage(String errorMessage, String query, Object... values)
+    {
+        Assertions.assertThatThrownBy(() -> execute(query, values))
+                  .isInstanceOf(InvalidRequestException.class)
+                  .hasMessageContaining(errorMessage);
+    }
+
     /**
      * Asserts that the message of the specified exception contains the specified text.
      *
@@ -1554,6 +1863,21 @@
                 e.getMessage().contains(text));
     }
 
+    /**
+     * Checks that the specified query is not authorized for the current user.
+     * @param errorMessage The expected error message
+     * @param query the query
+     * @param values the query parameters
+     */
+    protected void assertUnauthorizedQuery(String errorMessage, String query, Object... values) throws Throwable
+    {
+        assertInvalidThrowMessage(Optional.of(ProtocolVersion.CURRENT),
+                                  errorMessage,
+                                  UnauthorizedException.class,
+                                  query,
+                                  values);
+    }
+
     @FunctionalInterface
     public interface CheckedFunction {
         void apply() throws Throwable;
@@ -1782,7 +2106,7 @@
         if (value instanceof ByteBuffer)
             return (ByteBuffer)value;
 
-        return type.decompose(serializeTuples(value));
+        return type.decomposeUntyped(serializeTuples(value));
     }
 
     private static String formatValue(ByteBuffer bb, AbstractType<?> type)
@@ -1863,7 +2187,18 @@
     protected com.datastax.driver.core.TupleType tupleTypeOf(ProtocolVersion protocolVersion, com.datastax.driver.core.DataType...types)
     {
         requireNetwork();
-        return clusters.get(protocolVersion).getMetadata().newTupleType(types);
+        return getCluster(protocolVersion).getMetadata().newTupleType(types);
+    }
+
+    @SuppressWarnings({ "rawtypes", "unchecked" })
+    protected static Gauge<Integer> getPausedConnectionsGauge()
+    {
+        String metricName = "org.apache.cassandra.metrics.Client.PausedConnections";
+        Map<String, Gauge> metrics = CassandraMetricsRegistry.Metrics.getGauges((name, metric) -> name.equals(metricName));
+        if (metrics.size() != 1)
+            fail(String.format("Expected a single registered metric for paused client connections, found %s",
+                               metrics.size()));
+        return metrics.get(metricName);
     }
 
     // Attempt to find an AbstracType from a value (for serialization/printing sake).
@@ -1915,6 +2250,9 @@
         if (value instanceof UUID)
             return UUIDType.instance;
 
+        if (value instanceof TimeUUID)
+            return TimeUUIDType.instance;
+
         if (value instanceof List)
         {
             List l = (List)value;
@@ -2025,4 +2363,44 @@
             return "UserTypeValue" + toCQLString();
         }
     }
+
+    private static class User
+    {
+        /**
+         * The user name
+         */
+        public final String username;
+
+        /**
+         * The user password
+         */
+        public final String password;
+
+        public User(String username, String password)
+        {
+            this.username = username;
+            this.password = password;
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hashCode(username, password);
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o)
+                return true;
+
+            if (!(o instanceof User))
+                return false;
+
+            User u = (User) o;
+
+            return Objects.equal(username, u.username)
+                && Objects.equal(password, u.password);
+        }
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java b/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java
index 983acfa..8768b77 100644
--- a/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java
@@ -34,6 +34,8 @@
 import org.apache.cassandra.transport.messages.ResultMessage;
 
 import static java.lang.String.format;
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class CustomNowInSecondsTest extends CQLTester
@@ -154,7 +156,7 @@
             new BatchStatement(BatchStatement.Type.UNLOGGED, VariableSpecifications.empty(), statements, Attributes.none());
 
         // execute an BATCH message with now set to [now + 1 day], with ttl = 1, making its effective ttl = 1 day + 1.
-        QueryProcessor.instance.processBatch(batch, qs, batchQueryOptions(now + day), Collections.emptyMap(), System.nanoTime());
+        QueryProcessor.instance.processBatch(batch, qs, batchQueryOptions(now + day), emptyMap(), nanoTime());
 
         // verify that despite TTL having passed at now + 1 the rows are still there.
         assertEquals(2, executeSelect(format("SELECT * FROM %s.%s", ks, tbl), now + 1, false).size());
@@ -183,12 +185,12 @@
         if (prepared)
         {
             CQLStatement statement = QueryProcessor.parseStatement(query, cs);
-            return QueryProcessor.instance.processPrepared(statement, qs, queryOptions(nowInSeconds), Collections.emptyMap(), System.nanoTime());
+            return QueryProcessor.instance.processPrepared(statement, qs, queryOptions(nowInSeconds), emptyMap(), nanoTime());
         }
         else
         {
             CQLStatement statement = QueryProcessor.instance.parse(query, qs, queryOptions(nowInSeconds));
-            return QueryProcessor.instance.process(statement, qs, queryOptions(nowInSeconds), Collections.emptyMap(), System.nanoTime());
+            return QueryProcessor.instance.process(statement, qs, queryOptions(nowInSeconds), emptyMap(), nanoTime());
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/cql3/DurationTest.java b/test/unit/org/apache/cassandra/cql3/DurationTest.java
index ef031c4..9b63242 100644
--- a/test/unit/org/apache/cassandra/cql3/DurationTest.java
+++ b/test/unit/org/apache/cassandra/cql3/DurationTest.java
@@ -18,23 +18,21 @@
  */
 package org.apache.cassandra.cql3;
 
-import java.text.ParsePosition;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.TimeZone;
-
-import org.apache.commons.lang3.time.DateUtils;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
 
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.serializers.TimeSerializer;
+
 import static org.junit.Assert.assertEquals;
 
-import org.apache.cassandra.exceptions.InvalidRequestException;
-
 import static org.apache.cassandra.cql3.Duration.*;
-import static org.apache.cassandra.cql3.Duration.NANOS_PER_HOUR;
 
 public class DurationTest
 {
@@ -110,72 +108,287 @@
     @Test
     public void testAddTo()
     {
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("0m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("10us").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T00:10:00"), Duration.from("10m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T01:30:00"), Duration.from("90m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T02:10:00"), Duration.from("2h10m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-23T00:10:00"), Duration.from("2d10m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-24T01:00:00"), Duration.from("2d25h").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-10-21T00:00:00"), Duration.from("1mo").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2017-11-21T00:00:00"), Duration.from("14mo").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2017-02-28T00:00:00"), Duration.from("12mo").addTo(toMillis("2016-02-29T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("0m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("10us").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T00:10:00.000", Duration.from("10m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T01:30:00.000", Duration.from("90m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T02:10:00.000", Duration.from("2h10m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-23T00:10:00.000", Duration.from("2d10m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-24T01:00:00.000", Duration.from("2d25h").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-10-21T00:00:00.000", Duration.from("1mo").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2017-11-21T00:00:00.000", Duration.from("14mo").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2017-02-28T00:00:00.000", Duration.from("12mo").addTo(toMillis("2016-02-29T00:00:00")));
     }
 
     @Test
     public void testAddToWithNegativeDurations()
     {
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("-0m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("-10us").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-20T23:50:00"), Duration.from("-10m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-20T22:30:00"), Duration.from("-90m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-20T21:50:00"), Duration.from("-2h10m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-18T23:50:00"), Duration.from("-2d10m").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-17T23:00:00"), Duration.from("-2d25h").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-08-21T00:00:00"), Duration.from("-1mo").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2015-07-21T00:00:00"), Duration.from("-14mo").addTo(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2015-02-28T00:00:00"), Duration.from("-12mo").addTo(toMillis("2016-02-29T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("-0m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("-10us").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-20T23:50:00.000", Duration.from("-10m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-20T22:30:00.000", Duration.from("-90m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-20T21:50:00.000", Duration.from("-2h10m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-18T23:50:00.000", Duration.from("-2d10m").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-17T23:00:00.000", Duration.from("-2d25h").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-08-21T00:00:00.000", Duration.from("-1mo").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2015-07-21T00:00:00.000", Duration.from("-14mo").addTo(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2015-02-28T00:00:00.000", Duration.from("-12mo").addTo(toMillis("2016-02-29T00:00:00")));
     }
 
     @Test
     public void testSubstractFrom()
     {
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("0m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("10us").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-20T23:50:00"), Duration.from("10m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-20T22:30:00"), Duration.from("90m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-20T21:50:00"), Duration.from("2h10m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-18T23:50:00"), Duration.from("2d10m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-17T23:00:00"), Duration.from("2d25h").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-08-21T00:00:00"), Duration.from("1mo").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2015-07-21T00:00:00"), Duration.from("14mo").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2015-02-28T00:00:00"), Duration.from("12mo").substractFrom(toMillis("2016-02-29T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("0m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("10us").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-20T23:50:00.000", Duration.from("10m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-20T22:30:00.000", Duration.from("90m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-20T21:50:00.000", Duration.from("2h10m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-18T23:50:00.000", Duration.from("2d10m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-17T23:00:00.000", Duration.from("2d25h").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-08-21T00:00:00.000", Duration.from("1mo").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2015-07-21T00:00:00.000", Duration.from("14mo").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2015-02-28T00:00:00.000", Duration.from("12mo").substractFrom(toMillis("2016-02-29T00:00:00")));
     }
 
     @Test
     public void testSubstractWithNegativeDurations()
     {
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("-0m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T00:00:00"), Duration.from("-10us").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T00:10:00"), Duration.from("-10m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T01:30:00"), Duration.from("-90m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-21T02:10:00"), Duration.from("-2h10m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-23T00:10:00"), Duration.from("-2d10m").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-09-24T01:00:00"), Duration.from("-2d25h").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2016-10-21T00:00:00"), Duration.from("-1mo").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2017-11-21T00:00:00"), Duration.from("-14mo").substractFrom(toMillis("2016-09-21T00:00:00")));
-        assertEquals(toMillis("2017-02-28T00:00:00"), Duration.from("-12mo").substractFrom(toMillis("2016-02-29T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("-0m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T00:00:00.000", Duration.from("-10us").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T00:10:00.000", Duration.from("-10m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T01:30:00.000", Duration.from("-90m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-21T02:10:00.000", Duration.from("-2h10m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-23T00:10:00.000", Duration.from("-2d10m").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-09-24T01:00:00.000", Duration.from("-2d25h").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2016-10-21T00:00:00.000", Duration.from("-1mo").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2017-11-21T00:00:00.000", Duration.from("-14mo").substractFrom(toMillis("2016-09-21T00:00:00")));
+        assertTimeEquals("2017-02-28T00:00:00.000", Duration.from("-12mo").substractFrom(toMillis("2016-02-29T00:00:00")));
     }
 
-    private long toMillis(String timeAsString)
+    @Test
+    public void testFloorTime()
     {
-        SimpleDateFormat parser = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
-        parser.setTimeZone(TimeZone.getTimeZone("UTC"));
-        Date date = parser.parse(timeAsString, new ParsePosition(0));
-        return DateUtils.truncate(date, Calendar.SECOND).getTime();
+        long time = floorTime("16:12:00", "2h");
+        Duration result = Duration.newInstance(0, 0, time);
+        Duration expected = Duration.from("16h");
+        assertEquals(expected, result);
     }
 
-    public void assertInvalidDuration(String duration, String expectedErrorMessage)
+    @Test
+    public void testInvalidFloorTimestamp()
+    {
+        try
+        {
+            floorTimestamp("2016-09-27T16:12:00", "2h", "2017-09-01T00:00:00");
+            Assert.fail();
+        }
+        catch (InvalidRequestException e)
+        {
+            assertEquals("The floor function starting time is greater than the provided time", e.getMessage());
+        }
+
+        try
+        {
+            floorTimestamp("2016-09-27T16:12:00", "-2h", "2016-09-27T00:00:00");
+            Assert.fail();
+        }
+        catch (InvalidRequestException e)
+        {
+            assertEquals("Negative durations are not supported by the floor function", e.getMessage());
+        }
+    }
+
+    @Test
+    public void testFloorTimestampWithDurationInHours()
+    {
+        // Test floor with a timestamp equals to the start time
+        long result = floorTimestamp("2016-09-27T16:12:00", "2h", "2016-09-27T16:12:00");
+        assertTimeEquals("2016-09-27T16:12:00.000", result);
+
+        // Test floor with a duration equals to zero
+        result = floorTimestamp("2016-09-27T18:12:00", "0h", "2016-09-27T16:12:00");
+        assertTimeEquals("2016-09-27T18:12:00.000", result);
+
+        // Test floor with a timestamp exactly equals to the start time + (1 x duration)
+        result = floorTimestamp("2016-09-27T18:12:00", "2h", "2016-09-27T16:12:00");
+        assertTimeEquals("2016-09-27T18:12:00.000", result);
+
+        // Test floor with a timestamp in the first bucket
+        result = floorTimestamp("2016-09-27T16:13:00", "2h", "2016-09-27T16:12:00");
+        assertTimeEquals("2016-09-27T16:12:00.000", result);
+
+        // Test floor with a timestamp in another bucket
+        result = floorTimestamp("2016-09-27T16:12:00", "2h", "2016-09-27T00:00:00");
+        assertTimeEquals("2016-09-27T16:00:00.000", result);
+    }
+
+    @Test
+    public void testFloorTimestampWithDurationInDays()
+    {
+        // Test floor with a start time at the beginning of the month
+        long result = floorTimestamp("2016-09-27T16:12:00", "2d", "2016-09-01T00:00:00");
+        assertTimeEquals("2016-09-27T00:00:00.000", result);
+
+        // Test floor with a start time in the previous month
+        result = floorTimestamp("2016-09-27T16:12:00", "2d", "2016-08-01T00:00:00");
+        assertTimeEquals("2016-09-26T00:00:00.000", result);
+    }
+
+    @Test
+    public void testFloorTimestampWithDurationInDaysAndHours()
+    {
+        long result = floorTimestamp("2016-09-27T16:12:00", "2d12h", "2016-09-01T00:00:00");
+        assertTimeEquals("2016-09-26T00:00:00.000", result);
+    }
+
+    @Test
+    public void testFloorTimestampWithDurationInMonths()
+    {
+        // Test floor with a timestamp equals to the start time
+        long result = floorTimestamp("2016-09-01T00:00:00", "2mo", "2016-09-01T00:00:00");
+        assertTimeEquals("2016-09-01T00:00:00.000", result);
+
+        // Test floor with a timestamp in the first bucket
+        result = floorTimestamp("2016-09-27T16:12:00", "2mo", "2016-09-01T00:00:00");
+        assertTimeEquals("2016-09-01T00:00:00.000", result);
+
+        // Test floor with a start time at the beginning of the year (LEAP YEAR)
+        result = floorTimestamp("2016-09-27T16:12:00", "1mo", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-09-01T00:00:00.000", result);
+
+        // Test floor with a start time at the beginning of the previous year
+        result = floorTimestamp("2016-09-27T16:12:00", "2mo", "2015-01-01T00:00:00");
+        assertTimeEquals("2016-09-01T00:00:00.000", result);
+
+        // Test floor with a start time in the previous year
+        result = floorTimestamp("2016-09-27T16:12:00", "2mo", "2015-02-02T00:00:00");
+        assertTimeEquals("2016-08-02T00:00:00.000", result);
+
+    }
+
+    @Test
+    public void testFloorTimestampWithDurationInMonthsAndDays()
+    {
+        long result = floorTimestamp("2016-09-27T16:12:00", "2mo2d", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-09-09T00:00:00.000", result);
+
+        result = floorTimestamp("2016-09-27T16:12:00", "2mo5d", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-09-21T00:00:00.000", result);
+
+        // Test floor with a timestamp in the first bucket
+        result = floorTimestamp("2016-09-04T16:12:00", "2mo5d", "2016-07-01T00:00:00");
+        assertTimeEquals("2016-07-01T00:00:00.000", result);
+
+        // Test floor with a timestamp in a bucket starting on the last day of the month
+        result = floorTimestamp("2016-09-27T16:12:00", "2mo10d", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-07-31T00:00:00.000", result);
+
+        // Test floor with a timestamp in a bucket starting on the first day of the month
+        result = floorTimestamp("2016-09-27T16:12:00", "2mo12d", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-08-06T00:00:00.000", result);
+
+        // Test leap years
+        result = floorTimestamp("2016-04-27T16:12:00", "1mo30d", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-03-02T00:00:00.000", result);
+
+        result = floorTimestamp("2015-04-27T16:12:00", "1mo30d", "2015-01-01T00:00:00");
+        assertTimeEquals("2015-03-03T00:00:00.000", result);
+    }
+
+    @Test
+    public void testFloorTimestampWithDurationSmallerThanPrecision()
+    {
+        long result = floorTimestamp("2016-09-27T18:14:00", "5us", "2016-09-27T16:12:00");
+        assertTimeEquals("2016-09-27T18:14:00.000", result);
+
+        result = floorTimestamp("2016-09-27T18:14:00", "1h5us", "2016-09-27T16:12:00");
+        assertTimeEquals("2016-09-27T18:12:00.000", result);
+    }
+
+    @Test
+    public void testFloorTimestampWithLeapSecond()
+    {
+        long result = floorTimestamp("2016-07-02T00:00:00", "2m", "2016-06-30T23:58:00");
+        assertTimeEquals("2016-07-02T00:00:00.000", result);
+    }
+
+    @Test
+    public void testFloorTimestampWithComplexDuration()
+    {
+        long result = floorTimestamp("2016-07-02T00:00:00", "2mo2d8h", "2016-01-01T00:00:00");
+        assertTimeEquals("2016-05-05T16:00:00.000", result);
+    }
+
+    @Test
+    public void testInvalidFloorTime()
+    {
+        try
+        {
+            floorTime("16:12:00", "2d");
+            Assert.fail();
+        }
+        catch (InvalidRequestException e)
+        {
+            assertEquals("For time values, the floor can only be computed for durations smaller that a day", e.getMessage());
+        }
+
+        try
+        {
+            floorTime("16:12:00", "25h");
+            Assert.fail();
+        }
+        catch (InvalidRequestException e)
+        {
+            assertEquals("For time values, the floor can only be computed for durations smaller that a day", e.getMessage());
+        }
+
+        try
+        {
+            floorTime("16:12:00", "-2h");
+            Assert.fail();
+        }
+        catch (InvalidRequestException e)
+        {
+            assertEquals("Negative durations are not supported by the floor function", e.getMessage());
+        }
+    }
+
+    private static long toMillis(String timeAsString)
+    {
+        OffsetDateTime dateTime = LocalDateTime.parse(timeAsString, DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss"))
+                                               .atOffset(ZoneOffset.UTC);
+
+        return Instant.from(dateTime).toEpochMilli();
+    }
+
+    private static String fromMillis(long timeInMillis)
+    {
+        return Instant.ofEpochMilli(timeInMillis)
+                      .atOffset(ZoneOffset.UTC)
+                      .format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS"));
+    }
+
+    private static void assertTimeEquals(String expected, long actualTimeInMillis)
+    {
+        assertEquals(expected, fromMillis(actualTimeInMillis));
+    }
+
+    private static long floorTimestamp(String time, String duration, String startingTime)
+    {
+        return Duration.floorTimestamp(toMillis(time), Duration.from(duration), toMillis(startingTime));
+    }
+
+    private static long floorTime(String time, String duration)
+    {
+        return Duration.floorTime(timeInNanos(time), Duration.from(duration));
+    }
+
+    private static long timeInNanos(String timeAsString)
+    {
+        return TimeSerializer.timeStringToLong(timeAsString);
+    }
+
+    private static void assertInvalidDuration(String duration, String expectedErrorMessage)
     {
         try
         {
diff --git a/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java b/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
index 21475b6..eded145 100644
--- a/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
+++ b/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
@@ -34,6 +34,8 @@
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SSTableIdFactory;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
@@ -173,7 +175,7 @@
       flush();
       assertEquals(1, cfs.getLiveSSTables().size());
       SSTableReader table = cfs.getLiveSSTables().iterator().next();
-      int gen = table.descriptor.generation;
+      SSTableId gen = table.descriptor.id;
       assertEquals(KEY_COUNT * CLUSTERING_COUNT, countRows(table));
 
       assertEquals(0, table.getSSTableLevel()); // flush writes to L0
@@ -184,8 +186,8 @@
 
       assertEquals(1, cfs.getLiveSSTables().size());
       SSTableReader collected = cfs.getLiveSSTables().iterator().next();
-      int collectedGen = collected.descriptor.generation;
-      assertTrue(collectedGen > gen);
+      SSTableId collectedGen = collected.descriptor.id;
+      assertTrue(SSTableIdFactory.COMPARATOR.compare(collectedGen, gen) > 0);
       assertEquals(KEY_COUNT * CLUSTERING_COUNT, countRows(collected));
 
       assertEquals(0, collected.getSSTableLevel()); // garbagecollect should leave the LCS level where it was
@@ -194,8 +196,8 @@
 
       assertEquals(1, cfs.getLiveSSTables().size());
       SSTableReader compacted = cfs.getLiveSSTables().iterator().next();
-      int compactedGen = compacted.descriptor.generation;
-      assertTrue(compactedGen > collectedGen);
+      SSTableId compactedGen = compacted.descriptor.id;
+      assertTrue(SSTableIdFactory.COMPARATOR.compare(compactedGen, collectedGen) > 0);
       assertEquals(KEY_COUNT * CLUSTERING_COUNT, countRows(compacted));
 
       assertEquals(1, compacted.getSSTableLevel()); // full compaction with LCS should move to L1
@@ -205,7 +207,7 @@
 
       assertEquals(1, cfs.getLiveSSTables().size());
       SSTableReader collected2 = cfs.getLiveSSTables().iterator().next();
-      assertTrue(collected2.descriptor.generation > compactedGen);
+      assertTrue(SSTableIdFactory.COMPARATOR.compare(collected2.descriptor.id, compactedGen) > 0);
       assertEquals(KEY_COUNT * CLUSTERING_COUNT, countRows(collected2));
 
       assertEquals(1, collected2.getSSTableLevel()); // garbagecollect should leave the LCS level where it was
@@ -264,7 +266,7 @@
         assertEquals(CompactionManager.AllSSTableOpStatus.SUCCESSFUL, status);
 
         SSTableReader[] tables = cfs.getLiveSSTables().toArray(new SSTableReader[0]);
-        Arrays.sort(tables, (o1, o2) -> Integer.compare(o1.descriptor.generation, o2.descriptor.generation));  // by order of compaction
+        Arrays.sort(tables, SSTableReader.idComparator);  // by order of compaction
 
         // Make sure deleted data was removed
         assertTrue(rowCount0 > countRows(tables[0]));
@@ -407,11 +409,11 @@
         createTable("create table %s (k int, c1 int, primary key (k, c1)) with compaction = {'class': 'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones':'row'}");
         execute("delete from %s where k = 1");
         Set<SSTableReader> readers = new HashSet<>(getCurrentColumnFamilyStore().getLiveSSTables());
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         SSTableReader oldSSTable = getNewTable(readers);
         Thread.sleep(2000);
         execute("delete from %s where k = 1");
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         SSTableReader newTable = getNewTable(readers);
 
         CompactionManager.instance.forceUserDefinedCompaction(oldSSTable.getFilename());
diff --git a/test/unit/org/apache/cassandra/cql3/KeyCacheCqlTest.java b/test/unit/org/apache/cassandra/cql3/KeyCacheCqlTest.java
index b1074b2..2b0a3c8 100644
--- a/test/unit/org/apache/cassandra/cql3/KeyCacheCqlTest.java
+++ b/test/unit/org/apache/cassandra/cql3/KeyCacheCqlTest.java
@@ -30,12 +30,14 @@
 
 import org.apache.cassandra.cache.KeyCacheKey;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.index.Index;
 import org.apache.cassandra.metrics.CacheMetrics;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry;
 import org.apache.cassandra.schema.CachingParams;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
@@ -161,7 +163,7 @@
             }
         }
 
-        StorageService.instance.forceKeyspaceFlush(KEYSPACE_PER_TEST);
+        StorageService.instance.forceKeyspaceFlush(KEYSPACE_PER_TEST, ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         for (int pkInt = 0; pkInt < 20; pkInt++)
         {
@@ -309,7 +311,7 @@
         }
 
         dropTable("DROP TABLE %s");
-        Schema.instance.updateVersion();
+        assert Schema.instance.isSameVersion(SchemaKeyspace.calculateSchemaDigest());
 
         //Test loading for a dropped 2i/table
         CacheService.instance.keyCache.clear();
@@ -567,7 +569,7 @@
 
             if (i % 10 == 9)
             {
-                Keyspace.open(KEYSPACE_PER_TEST).getColumnFamilyStore(table).forceFlush().get();
+                Keyspace.open(KEYSPACE_PER_TEST).getColumnFamilyStore(table).forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS).get();
                 if (index != null)
                     triggerBlockingFlush(Keyspace.open(KEYSPACE_PER_TEST).getColumnFamilyStore(table).indexManager.getIndexByName(index));
             }
@@ -577,7 +579,7 @@
     private static void prepareTable(String table) throws IOException, InterruptedException, java.util.concurrent.ExecutionException
     {
         StorageService.instance.disableAutoCompaction(KEYSPACE_PER_TEST, table);
-        Keyspace.open(KEYSPACE_PER_TEST).getColumnFamilyStore(table).forceFlush().get();
+        Keyspace.open(KEYSPACE_PER_TEST).getColumnFamilyStore(table).forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS).get();
         Keyspace.open(KEYSPACE_PER_TEST).getColumnFamilyStore(table).truncateBlocking();
     }
 
diff --git a/test/unit/org/apache/cassandra/cql3/KeywordSplit1Test.java b/test/unit/org/apache/cassandra/cql3/KeywordSplit1Test.java
index 6d51b34..c301650 100644
--- a/test/unit/org/apache/cassandra/cql3/KeywordSplit1Test.java
+++ b/test/unit/org/apache/cassandra/cql3/KeywordSplit1Test.java
@@ -32,12 +32,12 @@
 {
     static int SPLIT = 1;
     static int TOTAL_SPLITS = 2;
-    
+
     @Parameterized.Parameters(name = "keyword {0} isReserved {1}")
     public static Collection<Object[]> keywords() {
         return KeywordTestBase.getKeywordsForSplit(SPLIT, TOTAL_SPLITS);
     }
-    
+
     public KeywordSplit1Test(String keyword, boolean isReserved)
     {
         super(keyword, isReserved);
diff --git a/test/unit/org/apache/cassandra/cql3/KeywordSplit2Test.java b/test/unit/org/apache/cassandra/cql3/KeywordSplit2Test.java
index f400d4e..7be651a 100644
--- a/test/unit/org/apache/cassandra/cql3/KeywordSplit2Test.java
+++ b/test/unit/org/apache/cassandra/cql3/KeywordSplit2Test.java
@@ -32,7 +32,7 @@
 {
     static int SPLIT = 2;
     static int TOTAL_SPLITS = 2;
-    
+
     @Parameterized.Parameters(name = "keyword {0} isReserved {1}")
     public static Collection<Object[]> keywords() {
         return KeywordTestBase.getKeywordsForSplit(SPLIT, TOTAL_SPLITS);
diff --git a/test/unit/org/apache/cassandra/cql3/KeywordTestBase.java b/test/unit/org/apache/cassandra/cql3/KeywordTestBase.java
index 0b8d2d5..aa6e508 100644
--- a/test/unit/org/apache/cassandra/cql3/KeywordTestBase.java
+++ b/test/unit/org/apache/cassandra/cql3/KeywordTestBase.java
@@ -44,7 +44,7 @@
                                                       return new Object[] { keyword,ReservedKeywords.isReserved(keyword) };
                                                   })
                                                   .collect(Collectors.toList());
-    
+
     public static Collection<Object[]> getKeywordsForSplit(int split, int totalSplits)
     {
         return Sets.newHashSet(Lists.partition(KeywordTestBase.keywords, KeywordTestBase.keywords.size() / totalSplits)
diff --git a/test/unit/org/apache/cassandra/cql3/ListsTest.java b/test/unit/org/apache/cassandra/cql3/ListsTest.java
index 92dcd96..27c36b8 100644
--- a/test/unit/org/apache/cassandra/cql3/ListsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ListsTest.java
@@ -36,6 +36,7 @@
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.UUIDGen;
@@ -141,8 +142,14 @@
 
         ByteBuffer keyBuf = ByteBufferUtil.bytes("key");
         DecoratedKey key = Murmur3Partitioner.instance.decorateKey(keyBuf);
-        UpdateParameters parameters =
-            new UpdateParameters(metaData, null, QueryOptions.DEFAULT, System.currentTimeMillis(), FBUtilities.nowInSeconds(), 1000, Collections.emptyMap());
+        UpdateParameters parameters = new UpdateParameters(metaData,
+                                                           null,
+                                                           ClientState.forInternalCalls(),
+                                                           QueryOptions.DEFAULT,
+                                                           System.currentTimeMillis(),
+                                                           FBUtilities.nowInSeconds(),
+                                                           1000,
+                                                           Collections.emptyMap());
         Clustering<?> clustering = Clustering.make(ByteBufferUtil.bytes(1));
         parameters.newRow(clustering);
         prepender.execute(key, parameters);
diff --git a/test/unit/org/apache/cassandra/cql3/MemtableQuickTest.java b/test/unit/org/apache/cassandra/cql3/MemtableQuickTest.java
new file mode 100644
index 0000000..ee5da92
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/MemtableQuickTest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3;
+
+import java.util.List;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+
+@RunWith(Parameterized.class)
+public class MemtableQuickTest extends CQLTester
+{
+    static final Logger logger = LoggerFactory.getLogger(MemtableQuickTest.class);
+
+    static final int partitions = 50_000;
+    static final int rowsPerPartition = 4;
+
+    static final int deletedPartitionsStart = 20_000;
+    static final int deletedPartitionsEnd = deletedPartitionsStart + 10_000;
+
+    static final int deletedRowsStart = 40_000;
+    static final int deletedRowsEnd = deletedRowsStart + 5_000;
+
+    @Parameterized.Parameter()
+    public String memtableClass;
+
+    @Parameterized.Parameters(name = "{0}")
+    public static List<Object> parameters()
+    {
+        return ImmutableList.of("skiplist",
+                                "skiplist_sharded",
+                                "skiplist_sharded_locking");
+    }
+
+    @BeforeClass
+    public static void setUp()
+    {
+        CQLTester.setUpClass();
+        CQLTester.prepareServer();
+        CQLTester.disablePreparedReuseForTest();
+        logger.info("setupClass done.");
+    }
+
+    @Test
+    public void testMemtable() throws Throwable
+    {
+
+        String keyspace = createKeyspace("CREATE KEYSPACE %s with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } and durable_writes = false");
+        String table = createTable(keyspace, "CREATE TABLE %s ( userid bigint, picid bigint, commentid bigint, PRIMARY KEY(userid, picid))" +
+                                             " with compression = {'enabled': false}" +
+                                             " and memtable = '" + memtableClass + "'");
+        execute("use " + keyspace + ';');
+
+        String writeStatement = "INSERT INTO "+table+"(userid,picid,commentid)VALUES(?,?,?)";
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
+        cfs.disableAutoCompaction();
+        Util.flush(cfs);
+
+        long i;
+        long limit = partitions;
+        logger.info("Writing {} partitions of {} rows", partitions, rowsPerPartition);
+        for (i = 0; i < limit; ++i)
+        {
+            for (long j = 0; j < rowsPerPartition; ++j)
+                execute(writeStatement, i, j, i + j);
+        }
+
+        logger.info("Deleting partitions between {} and {}", deletedPartitionsStart, deletedPartitionsEnd);
+        for (i = deletedPartitionsStart; i < deletedPartitionsEnd; ++i)
+        {
+            // no partition exists, but we will create a tombstone
+            execute("DELETE FROM " + table + " WHERE userid = ?", i);
+        }
+
+        logger.info("Deleting rows between {} and {}", deletedRowsStart, deletedRowsEnd);
+        for (i = deletedRowsStart; i < deletedRowsEnd; ++i)
+        {
+            // no row exists, but we will create a tombstone (and partition)
+            execute("DELETE FROM " + table + " WHERE userid = ? AND picid = ?", i, 0L);
+        }
+
+        logger.info("Reading {} partitions", partitions);
+        for (i = 0; i < limit; ++i)
+        {
+            UntypedResultSet result = execute("SELECT * FROM " + table + " WHERE userid = ?", i);
+            if (i >= deletedPartitionsStart && i < deletedPartitionsEnd)
+                assertEmpty(result);
+            else
+            {
+                int start = 0;
+                if (i >= deletedRowsStart && i < deletedRowsEnd)
+                    start = 1;
+                Object[][] rows = new Object[rowsPerPartition - start][];
+                for (long j = start; j < rowsPerPartition; ++j)
+                    rows[(int) (j - start)] = row(i, j, i + j);
+                assertRows(result, rows);
+            }
+        }
+
+        int deletedPartitions = deletedPartitionsEnd - deletedPartitionsStart;
+        int deletedRows = deletedRowsEnd - deletedRowsStart;
+        logger.info("Selecting *");
+        UntypedResultSet result = execute("SELECT * FROM " + table);
+        assertRowCount(result, rowsPerPartition * (partitions - deletedPartitions) - deletedRows);
+
+        Util.flush(cfs);
+
+        logger.info("Selecting *");
+        result = execute("SELECT * FROM " + table);
+        assertRowCount(result, rowsPerPartition * (partitions - deletedPartitions) - deletedRows);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/cql3/MemtableSizeTest.java b/test/unit/org/apache/cassandra/cql3/MemtableSizeTest.java
index 7d6c6dd..63ff055 100644
--- a/test/unit/org/apache/cassandra/cql3/MemtableSizeTest.java
+++ b/test/unit/org/apache/cassandra/cql3/MemtableSizeTest.java
@@ -18,33 +18,53 @@
 
 package org.apache.cassandra.cql3;
 
+import java.util.List;
+
 import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.Memtable;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.ObjectSizes;
 
+@RunWith(Parameterized.class)
 public class MemtableSizeTest extends CQLTester
 {
-    static String keyspace;
-    String table;
-    ColumnFamilyStore cfs;
+    static final Logger logger = LoggerFactory.getLogger(MemtableSizeTest.class);
 
-    int partitions = 50_000;
-    int rowsPerPartition = 4;
+    static final int partitions = 50_000;
+    static final int rowsPerPartition = 4;
 
-    int deletedPartitions = 10_000;
-    int deletedRows = 5_000;
+    static final int deletedPartitions = 10_000;
+    static final int deletedRows = 5_000;
+
+    @Parameterized.Parameter(0)
+    public String memtableClass;
+
+    @Parameterized.Parameter(1)
+    public int differencePerPartition;
+
+    @Parameterized.Parameters(name = "{0}")
+    public static List<Object[]> parameters()
+    {
+        return ImmutableList.of(new Object[]{"skiplist", 50},
+                                new Object[]{"skiplist_sharded", 60});
+    }
 
     // must be within 50 bytes per partition of the actual size
-    final int MAX_DIFFERENCE = (partitions + deletedPartitions + deletedRows) * 50;
+    final long MAX_DIFFERENCE_PARTITIONS = (partitions + deletedPartitions + deletedRows);
 
     @BeforeClass
     public static void setUp()
@@ -52,42 +72,44 @@
         CQLTester.setUpClass();
         CQLTester.prepareServer();
         CQLTester.disablePreparedReuseForTest();
-        System.err.println("setupClass done.");
+        logger.info("setupClass done.");
     }
 
     @Test
-    public void testTruncationReleasesLogSpace()
+    public void testSize()
     {
-        Util.flakyTest(this::testSize, 2, "Fails occasionally, see CASSANDRA-16684");
+        Util.flakyTest(this::testSizeFlaky, 2, "Fails occasionally, see CASSANDRA-16684");
     }
 
-    private void testSize()
+    private void testSizeFlaky()
     {
         try
         {
-            keyspace = createKeyspace("CREATE KEYSPACE %s with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } and durable_writes = false");
-            table = createTable(keyspace, "CREATE TABLE %s ( userid bigint, picid bigint, commentid bigint, PRIMARY KEY(userid, picid)) with compression = {'enabled': false}");
+            String keyspace = createKeyspace("CREATE KEYSPACE %s with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } and durable_writes = false");
+            String table = createTable(keyspace, "CREATE TABLE %s ( userid bigint, picid bigint, commentid bigint, PRIMARY KEY(userid, picid))" +
+                                                 " with compression = {'enabled': false}" +
+                                                 " and memtable = '" + memtableClass + "'");
             execute("use " + keyspace + ';');
 
             String writeStatement = "INSERT INTO " + table + "(userid,picid,commentid)VALUES(?,?,?)";
 
-            cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
+            ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
             cfs.disableAutoCompaction();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
 
             long deepSizeBefore = ObjectSizes.measureDeep(cfs.getTracker().getView().getCurrentMemtable());
-            System.out.printf("Memtable deep size before %s\n%n",
-                              FBUtilities.prettyPrintMemory(deepSizeBefore));
+            logger.info("Memtable deep size before {}\n",
+                        FBUtilities.prettyPrintMemory(deepSizeBefore));
             long i;
             long limit = partitions;
-            System.out.println("Writing " + partitions + " partitions of " + rowsPerPartition + " rows");
+            logger.info("Writing {} partitions of {} rows", partitions, rowsPerPartition);
             for (i = 0; i < limit; ++i)
             {
                 for (long j = 0; j < rowsPerPartition; ++j)
                     execute(writeStatement, i, j, i + j);
             }
 
-            System.out.println("Deleting " + deletedPartitions + " partitions");
+            logger.info("Deleting {} partitions", deletedPartitions);
             limit += deletedPartitions;
             for (; i < limit; ++i)
             {
@@ -95,7 +117,7 @@
                 execute("DELETE FROM " + table + " WHERE userid = ?", i);
             }
 
-            System.out.println("Deleting " + deletedRows + " rows");
+            logger.info("Deleting {} rows", deletedRows);
             limit += deletedRows;
             for (; i < limit; ++i)
             {
@@ -103,31 +125,28 @@
                 execute("DELETE FROM " + table + " WHERE userid = ? AND picid = ?", i, 0L);
             }
 
-
             if (!cfs.getLiveSSTables().isEmpty())
-                System.out.println("Warning: " + cfs.getLiveSSTables().size() + " sstables created.");
+                logger.info("Warning: " + cfs.getLiveSSTables().size() + " sstables created.");
 
             Memtable memtable = cfs.getTracker().getView().getCurrentMemtable();
-            long actualHeap = memtable.getAllocator().onHeap().owns();
-            System.out.printf("Memtable in %s mode: %d ops, %s serialized bytes, %s (%.0f%%) on heap, %s (%.0f%%) off-heap%n",
-                              DatabaseDescriptor.getMemtableAllocationType(),
-                              memtable.getOperations(),
-                              FBUtilities.prettyPrintMemory(memtable.getLiveDataSize()),
-                              FBUtilities.prettyPrintMemory(actualHeap),
-                              100 * memtable.getAllocator().onHeap().ownershipRatio(),
-                              FBUtilities.prettyPrintMemory(memtable.getAllocator().offHeap().owns()),
-                              100 * memtable.getAllocator().offHeap().ownershipRatio());
+            Memtable.MemoryUsage usage = Memtable.getMemoryUsage(memtable);
+            long actualHeap = usage.ownsOnHeap;
+            logger.info("Memtable in {} mode: {} ops, {} serialized bytes, {}\n",
+                        DatabaseDescriptor.getMemtableAllocationType(),
+                        memtable.operationCount(),
+                        FBUtilities.prettyPrintMemory(memtable.getLiveDataSize()),
+                        usage);
 
             long deepSizeAfter = ObjectSizes.measureDeep(memtable);
-            System.out.printf("Memtable deep size %s\n%n",
-                              FBUtilities.prettyPrintMemory(deepSizeAfter));
+            logger.info("Memtable deep size {}\n",
+                        FBUtilities.prettyPrintMemory(deepSizeAfter));
 
             long expectedHeap = deepSizeAfter - deepSizeBefore;
             String message = String.format("Expected heap usage close to %s, got %s.\n",
                                            FBUtilities.prettyPrintMemory(expectedHeap),
                                            FBUtilities.prettyPrintMemory(actualHeap));
-            System.out.println(message);
-            Assert.assertTrue(message, Math.abs(actualHeap - expectedHeap) <= MAX_DIFFERENCE);
+            logger.info(message);
+            Assert.assertTrue(message, Math.abs(actualHeap - expectedHeap) <= MAX_DIFFERENCE_PARTITIONS * differencePerPartition);
         }
         catch (Throwable throwable)
         {
diff --git a/test/unit/org/apache/cassandra/cql3/NonNativeTimestampTest.java b/test/unit/org/apache/cassandra/cql3/NonNativeTimestampTest.java
index 7ba9889..975f3cf 100644
--- a/test/unit/org/apache/cassandra/cql3/NonNativeTimestampTest.java
+++ b/test/unit/org/apache/cassandra/cql3/NonNativeTimestampTest.java
@@ -19,8 +19,8 @@
 
 import org.junit.Test;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 public class NonNativeTimestampTest extends CQLTester
diff --git a/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java b/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
index b4fe0f5..49195f5 100644
--- a/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
+++ b/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
@@ -17,8 +17,6 @@
  */
 package org.apache.cassandra.cql3;
 
-import static junit.framework.Assert.fail;
-
 import java.io.Closeable;
 import java.util.concurrent.ExecutionException;
 
@@ -28,6 +26,7 @@
 import org.apache.cassandra.Util;
 import org.apache.cassandra.config.Config.DiskFailurePolicy;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.CommitLogSegment;
 import org.apache.cassandra.db.Keyspace;
@@ -37,6 +36,8 @@
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.KillerForTests;
 
+import static org.junit.Assert.fail;
+
 /**
  * Test that TombstoneOverwhelmingException gets thrown when it should be and doesn't when it shouldn't be.
  */
@@ -115,7 +116,10 @@
     {
         try
         {
-            Keyspace.open(KEYSPACE).getColumnFamilyStore(currentTable()).forceFlush().get();
+            Keyspace.open(KEYSPACE)
+                    .getColumnFamilyStore(currentTable())
+                    .forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS)
+                    .get();
             fail("FSWriteError expected.");
         }
         catch (ExecutionException e)
diff --git a/test/unit/org/apache/cassandra/cql3/PagingTest.java b/test/unit/org/apache/cassandra/cql3/PagingTest.java
index 50bba0e..a3387c4 100644
--- a/test/unit/org/apache/cassandra/cql3/PagingTest.java
+++ b/test/unit/org/apache/cassandra/cql3/PagingTest.java
@@ -17,9 +17,7 @@
  */
 package org.apache.cassandra.cql3;
 
-import java.net.InetAddress;
 import java.util.Iterator;
-import java.util.List;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -31,16 +29,16 @@
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.SimpleStatement;
 import com.datastax.driver.core.Statement;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 
-import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner.LongToken;
 import org.apache.cassandra.locator.*;
 import org.apache.cassandra.service.EmbeddedCassandraService;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertEquals;
 
 
@@ -54,13 +52,14 @@
                                                     " WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 };";
 
     private static final String dropKsStatement = "DROP KEYSPACE IF EXISTS " + KEYSPACE;
+    private static EmbeddedCassandraService cassandra;
 
     @BeforeClass
     public static void setup() throws Exception
     {
         System.setProperty("cassandra.config", "cassandra-murmur.yaml");
-        EmbeddedCassandraService cassandra = new EmbeddedCassandraService();
-        cassandra.start();
+
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
 
         // Currently the native server start method return before the server is fully binded to the socket, so we need
         // to wait slightly before trying to connect to it. We should fix this but in the meantime using a sleep.
@@ -78,7 +77,10 @@
     @AfterClass
     public static void tearDown()
     {
-        cluster.close();
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
     }
 
     /**
diff --git a/test/unit/org/apache/cassandra/cql3/PasswordObfuscatorTest.java b/test/unit/org/apache/cassandra/cql3/PasswordObfuscatorTest.java
index 09a366e..96f97b6 100644
--- a/test/unit/org/apache/cassandra/cql3/PasswordObfuscatorTest.java
+++ b/test/unit/org/apache/cassandra/cql3/PasswordObfuscatorTest.java
@@ -29,13 +29,23 @@
 
 public class PasswordObfuscatorTest
 {
-    private static final RoleOptions opts = new RoleOptions();
-    private static final String optsPassword = "testpassword";
+    private static final RoleOptions plainPwdOpts = new RoleOptions();
+    private static final RoleOptions hashedPwdOpts = new RoleOptions();
+    private static final String plainPwd = "testpassword";
+    private static final String hashedPwd = "$2a$10$1fI9MDCe13ZmEYW4XXZibuASNKyqOY828ELGUtml/t.0Mk/6Kqnsq";
 
     @BeforeClass
     public static void startup()
     {
-        opts.setOption(org.apache.cassandra.auth.IRoleManager.Option.PASSWORD, "testpassword");
+        plainPwdOpts.setOption(org.apache.cassandra.auth.IRoleManager.Option.PASSWORD, plainPwd);
+        hashedPwdOpts.setOption(org.apache.cassandra.auth.IRoleManager.Option.HASHED_PASSWORD, hashedPwd);
+    }
+
+    @Test
+    public void testSpecialCharsObfuscation()
+    {
+        assertEquals("ALTER ROLE testrole WITH HASHED PASSWORD = '" + OBFUSCATION_TOKEN + "'",
+                     obfuscate(format("ALTER ROLE testrole WITH HASHED PASSWORD = '%s'",hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -45,7 +55,13 @@
                      obfuscate("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '123'"));
 
         assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s'", optsPassword), opts));
+                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD = '" + hashedPwd + "'"));
+
+        assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD = '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD = '%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -55,14 +71,20 @@
                      obfuscate("CREATE ROLE role1 WITH password = '123' AND LOGIN = true"));
 
         assertEquals(format("CREATE ROLE role1 WITH password = '%s' AND LOGIN = true", OBFUSCATION_TOKEN),
-                     obfuscate(format("CREATE ROLE role1 WITH password = '%s' AND LOGIN = true", optsPassword), opts));
+                     obfuscate(format("CREATE ROLE role1 WITH password = '%s' AND LOGIN = true", plainPwd), plainPwdOpts));
+
+        assertEquals(format("CREATE ROLE role1 WITH HASHED password %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE ROLE role1 WITH HASHED password = '%s' AND LOGIN = true", hashedPwd)));
+
+        assertEquals(format("CREATE ROLE role1 WITH HASHED password = '%s' AND LOGIN = true", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE ROLE role1 WITH HASHED password = '%s' AND LOGIN = true", hashedPwd), hashedPwdOpts));
     }
 
     @Test
     public void testCreateRoleWithoutPassword()
     {
         assertEquals("CREATE ROLE role1", obfuscate("CREATE ROLE role1"));
-        assertEquals("CREATE ROLE role1", obfuscate("CREATE ROLE role1", opts));
+        assertEquals("CREATE ROLE role1", obfuscate("CREATE ROLE role1", plainPwdOpts));
     }
 
     @Test
@@ -70,13 +92,21 @@
     {
         assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD %s", OBFUSCATION_TOKEN),
                      obfuscate("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '123';" +
-                                                  "CREATE ROLE role2 WITH LOGIN = true AND PASSWORD = '123'"));
+                               "CREATE ROLE role2 WITH LOGIN = true AND PASSWORD = '123'"));
 
-        assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s';"
-                            + "CREATE ROLE role2 WITH LOGIN = true AND PASSWORD = '%s'", OBFUSCATION_TOKEN, OBFUSCATION_TOKEN),
-                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s';"
-                                                         + "CREATE ROLE role2 WITH LOGIN = true AND PASSWORD = '%s'", optsPassword, optsPassword),
-                                                  opts));
+        assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s';" +
+                            "CREATE ROLE role2 WITH LOGIN = true AND PASSWORD = '%s'", OBFUSCATION_TOKEN, OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND PASSWORD = '%s';" +
+                                      "CREATE ROLE role2 WITH LOGIN = true AND PASSWORD = '%s'", plainPwd, plainPwd), plainPwdOpts));
+
+        assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD = '%s';" +
+                               "CREATE ROLE role2 WITH LOGIN = true AND HASHED PASSWORD = '%s'", hashedPwd, hashedPwd)));
+
+        assertEquals(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD = '%s';" +
+                            "CREATE ROLE role2 WITH LOGIN = true AND HASHED PASSWORD = '%s'", OBFUSCATION_TOKEN, OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE ROLE role1 WITH LOGIN = true AND HASHED PASSWORD = '%s';" +
+                                      "CREATE ROLE role2 WITH LOGIN = true AND HASHED PASSWORD = '%s'", hashedPwd, hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -86,7 +116,13 @@
                      obfuscate("ALTER ROLE role1 with PASSWORD = '123'"));
 
         assertEquals(format("ALTER ROLE role1 with PASSWORD = '%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("ALTER ROLE role1 with PASSWORD = '%s'", optsPassword), opts));
+                     obfuscate(format("ALTER ROLE role1 with PASSWORD = '%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("ALTER ROLE role1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER ROLE role1 with HASHED PASSWORD = '%s'", hashedPwd)));
+
+        assertEquals(format("ALTER ROLE role1 with HASHED PASSWORD = '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER ROLE role1 with HASHED PASSWORD = '%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -96,7 +132,13 @@
                      obfuscate("ALTER ROLE role1 with PASSWORD='123'"));
 
         assertEquals(format("ALTER ROLE role1 with PASSWORD='%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("ALTER ROLE role1 with PASSWORD='%s'", optsPassword), opts));
+                     obfuscate(format("ALTER ROLE role1 with PASSWORD='%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("ALTER ROLE role1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER ROLE role1 with HASHED PASSWORD='%s'", hashedPwd)));
+
+        assertEquals(format("ALTER ROLE role1 with HASHED PASSWORD='%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER ROLE role1 with HASHED PASSWORD='%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -106,7 +148,13 @@
                      obfuscate("ALTER ROLE role1 with PASSWORD= '123'"));
 
         assertEquals(format("ALTER ROLE role1 with PASSWORD= '%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("ALTER ROLE role1 with PASSWORD= '%s'", optsPassword), opts));
+                     obfuscate(format("ALTER ROLE role1 with PASSWORD= '%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("ALTER ROLE role1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER ROLE role1 with HASHED PASSWORD= '%s'", hashedPwd)));
+
+        assertEquals(format("ALTER ROLE role1 with HASHED PASSWORD= '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER ROLE role1 with HASHED PASSWORD= '%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -114,7 +162,9 @@
     {
         assertEquals("ALTER ROLE role1", obfuscate("ALTER ROLE role1"));
 
-        assertEquals("ALTER ROLE role1", obfuscate("ALTER ROLE role1", opts));
+        assertEquals("ALTER ROLE role1", obfuscate("ALTER ROLE role1", plainPwdOpts));
+
+        assertEquals("ALTER ROLE role1", obfuscate("ALTER ROLE role1", hashedPwdOpts));
     }
 
     @Test
@@ -124,7 +174,13 @@
                      obfuscate("CREATE USER user1 with PASSWORD '123'"));
 
         assertEquals(format("CREATE USER user1 with PASSWORD '%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("CREATE USER user1 with PASSWORD '%s'", optsPassword), opts));
+                     obfuscate(format("CREATE USER user1 with PASSWORD '%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("CREATE USER user1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE USER user1 with HASHED PASSWORD '%s'", hashedPwd)));
+
+        assertEquals(format("CREATE USER user1 with HASHED PASSWORD '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE USER user1 with HASHED PASSWORD '%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -132,7 +188,9 @@
     {
         assertEquals("CREATE USER user1", obfuscate("CREATE USER user1"));
 
-        assertEquals("CREATE USER user1", obfuscate("CREATE USER user1", opts));
+        assertEquals("CREATE USER user1", obfuscate("CREATE USER user1", plainPwdOpts));
+
+        assertEquals("CREATE USER user1", obfuscate("CREATE USER user1", hashedPwdOpts));
     }
 
     @Test
@@ -142,7 +200,13 @@
                      obfuscate("ALTER USER user1 with PASSWORD '123'"));
 
         assertEquals(format("ALTER USER user1 with PASSWORD '%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("ALTER USER user1 with PASSWORD '%s'", optsPassword), opts));
+                     obfuscate(format("ALTER USER user1 with PASSWORD '%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("ALTER USER user1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER USER user1 with HASHED PASSWORD '%s'", hashedPwd)));
+
+        assertEquals(format("ALTER USER user1 with HASHED PASSWORD '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER USER user1 with HASHED PASSWORD '%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -152,7 +216,13 @@
                      obfuscate("ALTER USER user1 with paSSwoRd '123'"));
 
         assertEquals(format("ALTER USER user1 with paSSwoRd '%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("ALTER USER user1 with paSSwoRd '%s'", optsPassword), opts));
+                     obfuscate(format("ALTER USER user1 with paSSwoRd '%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("ALTER USER user1 with HASHED paSSwoRd %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER USER user1 with HASHED paSSwoRd '%s'", hashedPwd)));
+
+        assertEquals(format("ALTER USER user1 with HASHED paSSwoRd '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER USER user1 with HASHED paSSwoRd '%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -162,7 +232,13 @@
                      obfuscate("ALTER USER user1 with PASSWORD\n'123'"));
 
         assertEquals(format("ALTER USER user1 with PASSWORD\n'%s'", OBFUSCATION_TOKEN),
-                     obfuscate(format("ALTER USER user1 with PASSWORD\n'%s'", optsPassword), opts));
+                     obfuscate(format("ALTER USER user1 with PASSWORD\n'%s'", plainPwd), plainPwdOpts));
+
+        assertEquals(format("ALTER USER user1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER USER user1 with HASHED PASSWORD\n'%s'", hashedPwd)));
+
+        assertEquals(format("ALTER USER user1 with HASHED PASSWORD\n'%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("ALTER USER user1 with HASHED PASSWORD\n'%s'", hashedPwd), hashedPwdOpts));
     }
 
     @Test
@@ -175,6 +251,12 @@
         newLinePassOpts.setOption(org.apache.cassandra.auth.IRoleManager.Option.PASSWORD, "test\npassword");
         assertEquals(String.format("CREATE USER user1 with PASSWORD '%s'", OBFUSCATION_TOKEN),
                      obfuscate(format("CREATE USER user1 with PASSWORD '%s'", "test\npassword"), newLinePassOpts));
+
+        assertEquals(String.format("CREATE USER user1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate("CREATE USER user1 with HASHED PASSWORD 'a\nb'"));
+
+        assertEquals(String.format("CREATE USER user1 with HASHED PASSWORD '%s'", OBFUSCATION_TOKEN),
+                     obfuscate(format("CREATE USER user1 with HASHED PASSWORD '%s'", "test\npassword"), newLinePassOpts));
     }
 
     @Test
@@ -187,6 +269,12 @@
         emptyPassOpts.setOption(org.apache.cassandra.auth.IRoleManager.Option.PASSWORD, "");
         assertEquals("CREATE USER user1 with PASSWORD ''",
                      obfuscate("CREATE USER user1 with PASSWORD ''", emptyPassOpts));
+
+        assertEquals(String.format("CREATE USER user1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate("CREATE USER user1 with HASHED PASSWORD ''"));
+
+        assertEquals("CREATE USER user1 with HASHED PASSWORD ''",
+                     obfuscate("CREATE USER user1 with HASHED PASSWORD ''", emptyPassOpts));
     }
 
     @Test
@@ -194,6 +282,9 @@
     {
         assertEquals(String.format("CREATE USER user1 with PASSWORD %s", OBFUSCATION_TOKEN),
                      obfuscate("CREATE USER user1 with PASSWORD 'p a ss wor d'"));
+
+        assertEquals(String.format("CREATE USER user1 with HASHED PASSWORD %s", OBFUSCATION_TOKEN),
+                     obfuscate("CREATE USER user1 with HASHED PASSWORD 'p a ss wor d'"));
     }
 
     @Test
@@ -211,8 +302,23 @@
                             "APPLY BATCH;", OBFUSCATION_TOKEN),
                      obfuscate(format("BEGIN BATCH \n" +
                                       "    CREATE ROLE alice1 WITH PASSWORD = '%s' and LOGIN = true; \n" +
-                                      "APPLY BATCH;", optsPassword),
-                               opts));
+                                      "APPLY BATCH;", plainPwd),
+                               plainPwdOpts));
+
+        assertEquals(format("BEGIN BATCH \n" +
+                            "    CREATE ROLE alice1 WITH HASHED PASSWORD %s",
+                            OBFUSCATION_TOKEN),
+                     obfuscate("BEGIN BATCH \n" +
+                               "    CREATE ROLE alice1 WITH HASHED PASSWORD = '" + hashedPwd + "' and LOGIN = true; \n" +
+                               "APPLY BATCH;"));
+
+        assertEquals(format("BEGIN BATCH \n" +
+                            "    CREATE ROLE alice1 WITH HASHED PASSWORD = '%s' and LOGIN = true; \n" +
+                            "APPLY BATCH;", OBFUSCATION_TOKEN),
+                     obfuscate(format("BEGIN BATCH \n" +
+                          "    CREATE ROLE alice1 WITH HASHED PASSWORD = '%s' and LOGIN = true; \n" +
+                          "APPLY BATCH;", hashedPwd),
+                               hashedPwdOpts));
     }
 
     @Test
@@ -234,7 +340,26 @@
                      obfuscate(format("BEGIN BATCH \n" +
                                       "    CREATE ROLE alice1 WITH PASSWORD = '%s' and LOGIN = true; \n" +
                                       "    CREATE ROLE alice2 WITH PASSWORD = '%s' and LOGIN = true; \n" +
-                                      "APPLY BATCH;", optsPassword, optsPassword),
-                               opts));
+                                      "APPLY BATCH;", plainPwd, plainPwd),
+                               plainPwdOpts));
+
+        assertEquals(format("BEGIN BATCH \n" +
+                            "    CREATE ROLE alice1 WITH HASHED PASSWORD %s",
+                            OBFUSCATION_TOKEN),
+                     obfuscate("BEGIN BATCH \n" +
+                               "    CREATE ROLE alice1 WITH HASHED PASSWORD = '" + hashedPwd + "' and LOGIN = true; \n" +
+                               "    CREATE ROLE alice2 WITH HASHED PASSWORD = '" + hashedPwd + "' and LOGIN = true; \n" +
+                               "APPLY BATCH;"));
+
+        assertEquals(format("BEGIN BATCH \n" +
+                            "    CREATE ROLE alice1 WITH HASHED PASSWORD = '%s' and LOGIN = true; \n" +
+                            "    CREATE ROLE alice2 WITH HASHED PASSWORD = '%s' and LOGIN = true; \n" +
+                            "APPLY BATCH;"
+                            , OBFUSCATION_TOKEN, OBFUSCATION_TOKEN),
+                     obfuscate(format("BEGIN BATCH \n" +
+                                      "    CREATE ROLE alice1 WITH HASHED PASSWORD = '%s' and LOGIN = true; \n" +
+                                      "    CREATE ROLE alice2 WITH HASHED PASSWORD = '%s' and LOGIN = true; \n" +
+                                      "APPLY BATCH;", hashedPwd, hashedPwd),
+                               hashedPwdOpts));
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/PreparedStatementsTest.java b/test/unit/org/apache/cassandra/cql3/PreparedStatementsTest.java
index d885071..ffd7e25 100644
--- a/test/unit/org/apache/cassandra/cql3/PreparedStatementsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/PreparedStatementsTest.java
@@ -58,7 +58,7 @@
     @Test
     public void testInvalidatePreparedStatementsOnDrop()
     {
-        Session session = sessions.get(ProtocolVersion.V5);
+        Session session = sessionNet(ProtocolVersion.V5);
         session.execute(dropKsStatement);
         session.execute(createKsStatement);
 
@@ -102,7 +102,7 @@
 
     private void testInvalidatePreparedStatementOnAlter(ProtocolVersion version, boolean supportsMetadataChange)
     {
-        Session session = sessions.get(version);
+        Session session = sessionNet(version);
         String createTableStatement = "CREATE TABLE IF NOT EXISTS " + KEYSPACE + ".qp_cleanup (a int PRIMARY KEY, b int, c int);";
         String alterTableStatement = "ALTER TABLE " + KEYSPACE + ".qp_cleanup ADD d int;";
 
@@ -162,7 +162,7 @@
 
     private void testInvalidatePreparedStatementOnAlterUnchangedMetadata(ProtocolVersion version)
     {
-        Session session = sessions.get(version);
+        Session session = sessionNet(version);
         String createTableStatement = "CREATE TABLE IF NOT EXISTS " + KEYSPACE + ".qp_cleanup (a int PRIMARY KEY, b int, c int);";
         String alterTableStatement = "ALTER TABLE " + KEYSPACE + ".qp_cleanup ADD d int;";
 
@@ -200,7 +200,7 @@
     @Test
     public void testStatementRePreparationOnReconnect()
     {
-        Session session = sessions.get(ProtocolVersion.V5);
+        Session session = sessionNet(ProtocolVersion.V5);
         session.execute("USE " + keyspace());
 
         session.execute(dropKsStatement);
@@ -241,7 +241,7 @@
     @Test
     public void prepareAndExecuteWithCustomExpressions() throws Throwable
     {
-        Session session = sessions.get(ProtocolVersion.V5);
+        Session session = sessionNet(ProtocolVersion.V5);
 
         session.execute(dropKsStatement);
         session.execute(createKsStatement);
diff --git a/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java b/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java
index 8e3f7f4..2d0ea3b 100644
--- a/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java
+++ b/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java
@@ -32,10 +32,12 @@
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.SchemaKeyspaceTables;
 import org.apache.cassandra.service.ClientState;
-import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.MD5Digest;
 
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class PstmtPersistenceTest extends CQLTester
@@ -130,7 +132,7 @@
     {
         QueryProcessor.Prepared prepared = handler.getPrepared(stmtId);
         Assert.assertNotNull(prepared);
-        handler.processPrepared(prepared.statement, QueryState.forInternalCalls(), options, Collections.emptyMap(), System.nanoTime());
+        handler.processPrepared(prepared.statement, forInternalCalls(), options, emptyMap(), nanoTime());
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/cql3/ViewAbstractParameterizedTest.java b/test/unit/org/apache/cassandra/cql3/ViewAbstractParameterizedTest.java
new file mode 100644
index 0000000..629d407
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/ViewAbstractParameterizedTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3;
+
+import java.util.Collection;
+import java.util.stream.Collectors;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import com.datastax.driver.core.ResultSet;
+import org.apache.cassandra.transport.ProtocolVersion;
+
+@Ignore
+@RunWith(Parameterized.class)
+public abstract class ViewAbstractParameterizedTest extends ViewAbstractTest
+{
+    @Parameterized.Parameter
+    public ProtocolVersion version;
+
+    @Parameterized.Parameters()
+    public static Collection<Object[]> versions()
+    {
+        return ProtocolVersion.SUPPORTED.stream()
+                                        .map(v -> new Object[]{v})
+                                        .collect(Collectors.toList());
+    }
+
+    @Before
+    @Override
+    public void beforeTest() throws Throwable
+    {
+        super.beforeTest();
+
+        executeNet("USE " + keyspace());
+    }
+
+    @Override
+    protected com.datastax.driver.core.ResultSet executeNet(String query, Object... values) throws Throwable
+    {
+        return executeNet(version, query, values);
+    }
+
+    @Override
+    protected com.datastax.driver.core.ResultSet executeNetWithPaging(String query, int pageSize)
+    {
+        return executeNetWithPaging(version, query, pageSize);
+    }
+
+    @Override
+    protected void assertRowsNet(ResultSet result, Object[]... rows)
+    {
+        assertRowsNet(version, result, rows);
+    }
+
+    @Override
+    protected void updateView(String query, Object... params) throws Throwable
+    {
+        updateView(version, query, params);
+    }
+
+    protected void updateViewWithFlush(String query, boolean flush, Object... params) throws Throwable
+    {
+        updateView(query, params);
+        if (flush)
+            flush(keyspace());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/cql3/ViewAbstractTest.java b/test/unit/org/apache/cassandra/cql3/ViewAbstractTest.java
index bbd21dc..4b96ae2 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewAbstractTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewAbstractTest.java
@@ -18,24 +18,13 @@
 
 package org.apache.cassandra.cql3;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 
-import com.datastax.driver.core.exceptions.OperationTimedOutException;
-import org.apache.cassandra.concurrent.Stage;
-import org.awaitility.Awaitility;
-
 @Ignore
 public abstract class ViewAbstractTest extends CQLTester
 {
-    protected final List<String> views = new ArrayList<>();
-
     @BeforeClass
     public static void startup()
     {
@@ -43,66 +32,12 @@
     }
 
     @Before
-    public void begin()
+    @Override
+    public void beforeTest() throws Throwable
     {
-        begin(views);
-    }
+        super.beforeTest();
 
-    private static void begin(List<String> views)
-    {
-        views.clear();
-    }
-
-    @After
-    public void end() throws Throwable
-    {
-        end(views, this);
-    }
-
-    private static void end(List<String> views, CQLTester tester) throws Throwable
-    {
-        for (String viewName : views)
-            tester.executeNet("DROP MATERIALIZED VIEW " + viewName);
-    }
-
-    protected void createView(String name, String query) throws Throwable
-    {
-        createView(name, query, views, this);
-    }
-
-    private static void createView(String name, String query, List<String> views, CQLTester tester) throws Throwable
-    {
-        try
-        {
-            tester.executeNet(String.format(query, name));
-            // If exception is thrown, the view will not be added to the list; since it shouldn't have been created, this is
-            // the desired behavior
-            views.add(name);
-        }
-        catch (OperationTimedOutException ex)
-        {
-            // ... except for timeout, when we actually do not know whether the view was created or not
-            views.add(name);
-            throw ex;
-        }
-    }
-
-    protected void updateView(String query, Object... params) throws Throwable
-    {
-        updateView(query, this, params);
-    }
-
-    private static void updateView(String query, CQLTester tester, Object... params) throws Throwable
-    {
-        tester.executeNet(query, params);
-        waitForViewMutations();
-    }
-
-    protected static void waitForViewMutations()
-    {
-        Awaitility.await()
-                  .atMost(5, TimeUnit.MINUTES)
-                  .until(() -> Stage.VIEW_MUTATION.executor().getPendingTaskCount() == 0
-                               && Stage.VIEW_MUTATION.executor().getActiveTaskCount() == 0);
+        execute("USE " + keyspace());
+        executeNet("USE " + keyspace());
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsPartialTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsPartialTest.java
index 377621e..a05621c 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsPartialTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsPartialTest.java
@@ -23,8 +23,8 @@
 import org.junit.Ignore;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
 
 /* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes:
@@ -36,52 +36,62 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexDeletionsPartialTest extends ViewComplexTester
+public class ViewComplexDeletionsPartialTest extends ViewAbstractParameterizedTest
 {
-    // for now, unselected column cannot be fully supported, SEE CASSANDRA-11500
+    // for now, unselected column cannot be fully supported, see CASSANDRA-11500
     @Ignore
     @Test
-    public void testPartialDeleteUnselectedColumn() throws Throwable
+    public void testPartialDeleteUnselectedColumnWithFlush() throws Throwable
     {
-        boolean flush = true;
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
+        testPartialDeleteUnselectedColumn(true);
+    }
+
+    // for now, unselected column cannot be fully supported, see CASSANDRA-11500
+    @Ignore
+    @Test
+    public void testPartialDeleteUnselectedColumnWithoutFlush() throws Throwable
+    {
+        testPartialDeleteUnselectedColumn(false);
+    }
+
+    private void testPartialDeleteUnselectedColumn(boolean flush) throws Throwable
+    {
         createTable("CREATE TABLE %s (k int, c int, a int, b int, PRIMARY KEY (k, c))");
-        String mv = createView("CREATE MATERIALIZED VIEW %s " +
-                                 "AS SELECT k,c FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS " +
+                   "SELECT k,c FROM %s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
         Keyspace ks = Keyspace.open(keyspace());
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         updateView("UPDATE %s USING TIMESTAMP 10 SET b=1 WHERE k=1 AND c=1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, 1));
-        assertRows(execute("SELECT * from " + mv), row(1, 1));
+        assertRows(executeView("SELECT * FROM %s"), row(1, 1));
         updateView("DELETE b FROM %s USING TIMESTAMP 11 WHERE k=1 AND c=1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertEmpty(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * FROM %s"));
         updateView("UPDATE %s USING TIMESTAMP 1 SET a=1 WHERE k=1 AND c=1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRows(execute("SELECT * from %s"), row(1, 1, 1, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1));
+        assertRows(executeView("SELECT * FROM %s"), row(1, 1));
 
         execute("truncate %s;");
 
         // removal generated by unselected column should not shadow PK update with smaller timestamp
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 18 SET a=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, 1, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1));
+        assertRows(executeView("SELECT * FROM %s"), row(1, 1));
 
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 20 SET a=null WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"));
-        assertRows(execute("SELECT * from " + mv));
+        assertRows(executeView("SELECT * FROM %s"));
 
         updateViewWithFlush("INSERT INTO %s(k,c) VALUES(1,1) USING TIMESTAMP 15", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1));
+        assertRows(executeView("SELECT * FROM %s"), row(1, 1));
     }
 
     @Test
@@ -98,78 +108,76 @@
 
     private void testPartialDeleteSelectedColumn(boolean flush) throws Throwable
     {
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         createTable("CREATE TABLE %s (k int, c int, a int, b int, e int, f int, PRIMARY KEY (k, c))");
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, k FROM %%s " +
-                                 "WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, k FROM %s " +
+                     "WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
         Keyspace ks = Keyspace.open(keyspace());
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 10 SET b=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, 1, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, 1));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, 1));
 
         updateViewWithFlush("DELETE b FROM %s USING TIMESTAMP 11 WHERE k=1 AND c=1", flush);
         assertEmpty(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 1 SET a=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, 1, null, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, 1, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, 1, null));
 
         updateViewWithFlush("DELETE a FROM %s USING TIMESTAMP 1 WHERE k=1 AND c=1", flush);
         assertEmpty(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         // view livenessInfo should not be affected by selected column ts or tb
         updateViewWithFlush("INSERT INTO %s(k,c) VALUES(1,1) USING TIMESTAMP 0", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, null, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 12 SET b=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, 1, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, 1));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, 1));
 
         updateViewWithFlush("DELETE b FROM %s USING TIMESTAMP 13 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, null, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateViewWithFlush("DELETE FROM %s USING TIMESTAMP 14 WHERE k=1 AND c=1", flush);
         assertEmpty(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         updateViewWithFlush("INSERT INTO %s(k,c) VALUES(1,1) USING TIMESTAMP 15", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, null, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateViewWithFlush("UPDATE %s USING TTL 3 SET b=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, 1, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, 1));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, 1));
 
         TimeUnit.SECONDS.sleep(4);
 
         assertRows(execute("SELECT * from %s"), row(1, 1, null, null, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateViewWithFlush("DELETE FROM %s USING TIMESTAMP 15 WHERE k=1 AND c=1", flush);
         assertEmpty(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         execute("truncate %s;");
 
         // removal generated by unselected column should not shadow selected column with smaller timestamp
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 18 SET e=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, null, null, 1, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 18 SET e=null WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"));
-        assertRows(execute("SELECT * from " + mv));
+        assertRows(executeView("SELECT * from %s"));
 
         updateViewWithFlush("UPDATE %s USING TIMESTAMP 16 SET a=1 WHERE k=1 AND c=1", flush);
         assertRows(execute("SELECT * from %s"), row(1, 1, 1, null, null, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, 1, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, 1, null));
     }
 
     @Test
@@ -189,39 +197,36 @@
         // for partition range deletion, need to know that existing row is shadowed instead of not existed.
         createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                                 "WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                     "WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)");
 
         Keyspace ks = Keyspace.open(keyspace());
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 0", 1, 1, 1, 1);
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(1, 1, 1, 1));
 
         // remove view row
         updateView("UPDATE %s using timestamp 1 set b = null WHERE a=1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"));
         // remove base row, no view updated generated.
         updateView("DELETE FROM %s using timestamp 2 where a=1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"));
 
         // restor view row with b,c column. d is still tombstone
         updateView("UPDATE %s using timestamp 3 set b = 1,c = 1 where a=1"); // upsert
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv), row(1, 1, 1, null));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(1, 1, 1, null));
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsTest.java
index 24d76d4..d9fe7f1 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexDeletionsTest.java
@@ -19,23 +19,22 @@
 package org.apache.cassandra.cql3;
 
 import java.util.Arrays;
-import java.util.Comparator;
 import java.util.List;
 import java.util.stream.Collectors;
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertEquals;
 
-/* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670)
+/* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes:
  * - ViewComplexUpdatesTest
  * - ViewComplexDeletionsTest
@@ -45,7 +44,7 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexDeletionsTest extends ViewComplexTester
+public class ViewComplexDeletionsTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testCommutativeRowDeletionFlush() throws Throwable
@@ -66,57 +65,55 @@
         // CASSANDRA-13409 new update should not resurrect previous deleted data in view
         createTable("create table %s (p int primary key, v1 int, v2 int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("create materialized view %s as select * from %%s " +
-                                 "where p is not null and v1 is not null primary key (v1, p)");
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        createView("create materialized view %s as select * from %s " +
+                     "where p is not null and v1 is not null primary key (v1, p)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         // sstable-1, Set initial values TS=1
         updateView("Insert into %s (p, v1, v2) values (3, 1, 3) using timestamp 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v2, WRITETIME(v2) from " + mv + " WHERE v1 = ? AND p = ?", 1, 3), row(3, 1L));
+        assertRowsIgnoringOrder(executeView("SELECT v2, WRITETIME(v2) from %s WHERE v1 = ? AND p = ?", 1, 3), row(3, 1L));
         // sstable-2
         updateView("Delete from %s using timestamp 2 where p = 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"));
         // sstable-3
         updateView("Insert into %s (p, v1) values (3, 1) using timestamp 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
         // sstable-4
         updateView("UPdate %s using timestamp 4 set v1 = 2 where p = 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(2, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(2, 3, null, null));
         // sstable-5
         updateView("UPdate %s using timestamp 5 set v1 = 1 where p = 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
 
         if (flush)
         {
             // compact sstable 2 and 4, 5;
-            ColumnFamilyStore cfs = ks.getColumnFamilyStore(mv);
+            ColumnFamilyStore cfs = ks.getColumnFamilyStore(currentView());
             List<String> sstables = cfs.getLiveSSTables()
                                        .stream()
-                                       .sorted(Comparator.comparingInt(s -> s.descriptor.generation))
+                                       .sorted(SSTableReader.idComparator)
                                        .map(SSTableReader::getFilename)
                                        .collect(Collectors.toList());
             String dataFiles = String.join(",", Arrays.asList(sstables.get(1), sstables.get(3), sstables.get(4)));
@@ -124,7 +121,7 @@
             assertEquals(3, cfs.getLiveSSTables().size());
         }
         // regular tombstone should be retained after compaction
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
     }
 
     @Test
@@ -145,60 +142,58 @@
     {
         createTable("create table %s (p1 int, p2 int, v1 int, v2 int, primary key(p1, p2))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("create materialized view %s as select * from %%s " +
-                                 "where p1 is not null and p2 is not null primary key (p2, p1)");
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        createView("create materialized view %s as select * from %s " +
+                     "where p1 is not null and p2 is not null primary key (p2, p1)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         // Set initial values TS=1
         updateView("Insert into %s (p1, p2, v1, v2) values (1, 2, 3, 4) using timestamp 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, v2, WRITETIME(v2) from " + mv + " WHERE p1 = ? AND p2 = ?", 1, 2),
+        assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2),
                                 row(3, 4, 1L));
         // remove row/mv TS=2
         updateView("Delete from %s using timestamp 2 where p1 = 1 and p2 = 2;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         // view are empty
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"));
         // insert PK with TS=3
         updateView("Insert into %s (p1, p2) values (1, 2) using timestamp 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         // deleted column in MV remained dead
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv), row(2, 1, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(2, 1, null, null));
 
-        ks.getColumnFamilyStore(mv).forceMajorCompaction();
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv), row(2, 1, null, null));
+        ks.getColumnFamilyStore(currentView()).forceMajorCompaction();
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(2, 1, null, null));
 
         // reset values
         updateView("Insert into %s (p1, p2, v1, v2) values (1, 2, 3, 4) using timestamp 10;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, v2, WRITETIME(v2) from " + mv + " WHERE p1 = ? AND p2 = ?", 1, 2),
+        assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2),
                                 row(3, 4, 10L));
 
         updateView("UPDATE %s using timestamp 20 SET v2 = 5 WHERE p1 = 1 and p2 = 2");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, v2, WRITETIME(v2) from " + mv + " WHERE p1 = ? AND p2 = ?", 1, 2),
+        assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2),
                                 row(3, 5, 20L));
 
         updateView("DELETE FROM %s using timestamp 10 WHERE p1 = 1 and p2 = 2");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, v2, WRITETIME(v2) from " + mv + " WHERE p1 = ? AND p2 = ?", 1, 2),
+        assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2),
                                 row(null, 5, 20L));
     }
 
@@ -206,75 +201,70 @@
     {
         createTable("create table %s (p int primary key, v1 int, v2 int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("create materialized view %s as select * from %%s " +
-                                 "where p is not null and v1 is not null primary key (v1, p)");
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        createView("create materialized view %s as select * from %s " +
+                     "where p is not null and v1 is not null primary key (v1, p)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         // Set initial values TS=1
         updateView("Insert into %s (p, v1, v2) values (3, 1, 5) using timestamp 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v2, WRITETIME(v2) from " + mv + " WHERE v1 = ? AND p = ?", 1, 3), row(5, 1L));
+        assertRowsIgnoringOrder(executeView("SELECT v2, WRITETIME(v2) from %s WHERE v1 = ? AND p = ?", 1, 3), row(5, 1L));
         // remove row/mv TS=2
         updateView("Delete from %s using timestamp 2 where p = 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         // view are empty
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"));
         // insert PK with TS=3
         updateView("Insert into %s (p, v1) values (3, 1) using timestamp 3;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         // deleted column in MV remained dead
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv), row(1, 3, null));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(1, 3, null));
 
         // insert values TS=2, it should be considered dead due to previous tombstone
         updateView("Insert into %s (p, v1, v2) values (3, 1, 5) using timestamp 2;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         // deleted column in MV remained dead
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv), row(1, 3, null));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " limit 1"), row(1, 3, null));
+        assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(1, 3, null));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s limit 1"), row(1, 3, null));
 
         // insert values TS=2, it should be considered dead due to previous tombstone
-        executeNet(version, "UPDATE %s USING TIMESTAMP 3 SET v2 = ? WHERE p = ?", 4, 3);
+        executeNet("UPDATE %s USING TIMESTAMP 3 SET v2 = ? WHERE p = ?", 4, 3);
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRows(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, 4, 3L));
+        assertRows(execute("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, 4, 3L));
 
-        ks.getColumnFamilyStore(mv).forceMajorCompaction();
-        assertRows(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, 4, 3L));
-        assertRows(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv + " limit 1"), row(1, 3, 4, 3L));
+        ks.getColumnFamilyStore(currentView()).forceMajorCompaction();
+        assertRows(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, 4, 3L));
+        assertRows(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s limit 1"), row(1, 3, 4, 3L));
     }
 
     @Test
     public void testNoBatchlogCleanupForLocalMutations() throws Throwable
     {
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
         createTable("CREATE TABLE %s (k1 int primary key, v1 int)");
-        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
                    "WHERE k1 IS NOT NULL AND v1 IS NOT NULL PRIMARY KEY (v1, k1)");
 
         ColumnFamilyStore batchlog = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
         batchlog.disableAutoCompaction();
-        batchlog.forceBlockingFlush();
+        Util.flush(batchlog);
         int batchlogSSTables = batchlog.getLiveSSTables().size();
 
         updateView("INSERT INTO %s(k1, v1) VALUES(1, 1)");
-        batchlog.forceBlockingFlush();
+        Util.flush(batchlog);
         assertEquals(batchlogSSTables, batchlog.getLiveSSTables().size());
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessLimitTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessLimitTest.java
index 2be826f..0d3ab67 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessLimitTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessLimitTest.java
@@ -22,6 +22,7 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.Keyspace;
 
 import static org.junit.Assert.assertEquals;
@@ -36,7 +37,7 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexLivenessLimitTest extends ViewComplexTester
+public class ViewComplexLivenessLimitTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testExpiredLivenessLimitWithFlush() throws Throwable
@@ -56,13 +57,11 @@
     {
         createTable("CREATE TABLE %s (k int PRIMARY KEY, a int, b int);");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
                                 "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)");
-        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
                                 "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)");
         ks.getColumnFamilyStore(mv1).disableAutoCompaction();
         ks.getColumnFamilyStore(mv2).disableAutoCompaction();
@@ -76,19 +75,20 @@
             // create expired liveness
             updateView("DELETE a FROM %s WHERE k = ?;", i);
         }
+
         if (flush)
         {
-            ks.getColumnFamilyStore(mv1).forceBlockingFlush();
-            ks.getColumnFamilyStore(mv2).forceBlockingFlush();
+            Util.flushTable(ks, mv1);
+            Util.flushTable(ks, mv2);
         }
 
         for (String view : Arrays.asList(mv1, mv2))
         {
             // paging
-            assertEquals(1, executeNetWithPaging(version, String.format("SELECT k,a,b FROM %s limit 1", view), 1).all().size());
-            assertEquals(2, executeNetWithPaging(version, String.format("SELECT k,a,b FROM %s limit 2", view), 1).all().size());
-            assertEquals(2, executeNetWithPaging(version, String.format("SELECT k,a,b FROM %s", view), 1).all().size());
-            assertRowsNet(version, executeNetWithPaging(version, String.format("SELECT k,a,b FROM %s ", view), 1),
+            assertEquals(1, executeNetWithPaging(String.format("SELECT k,a,b FROM %s limit 1", view), 1).all().size());
+            assertEquals(2, executeNetWithPaging(String.format("SELECT k,a,b FROM %s limit 2", view), 1).all().size());
+            assertEquals(2, executeNetWithPaging(String.format("SELECT k,a,b FROM %s", view), 1).all().size());
+            assertRowsNet(executeNetWithPaging(String.format("SELECT k,a,b FROM %s ", view), 1),
                           row(50, 50, 50),
                           row(100, 100, 100));
             // limit
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessTest.java
index 1e327e2..99caecc 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexLivenessTest.java
@@ -20,9 +20,9 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertEquals;
 
@@ -36,28 +36,36 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexLivenessTest extends ViewComplexTester
+public class ViewComplexLivenessTest extends ViewAbstractParameterizedTest
 {
     @Test
-    public void testUnselectedColumnWithExpiredLivenessInfo() throws Throwable
+    public void testUnselectedColumnWithExpiredLivenessInfoWithFlush() throws Throwable
     {
-        boolean flush = true;
+        testUnselectedColumnWithExpiredLivenessInfo(true);
+    }
+
+    @Test
+    public void testUnselectedColumnWithExpiredLivenessInfoWithoutFlush() throws Throwable
+    {
+        testUnselectedColumnWithExpiredLivenessInfo(false);
+    }
+
+    private void testUnselectedColumnWithExpiredLivenessInfo(boolean flush) throws Throwable
+    {
         createTable("create table %s (k int, c int, a int, b int, PRIMARY KEY(k, c))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String name = createView("create materialized view %s as select k,c,b from %%s " +
-                                 "where c is not null and k is not null primary key (c, k)");
-        ks.getColumnFamilyStore(name).disableAutoCompaction();
+        createView("create materialized view %s as select k,c,b from %s " +
+                   "where c is not null and k is not null primary key (c, k)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         // sstable-1, Set initial values TS=1
         updateViewWithFlush("UPDATE %s SET a = 1 WHERE k = 1 AND c = 1;", flush);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT k,c,b from " + name + " WHERE k = 1 AND c = 1;"),
+        assertRowsIgnoringOrder(executeView("SELECT k,c,b from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, null));
 
         // sstable-2
@@ -65,28 +73,28 @@
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT k,c,b from " + name + " WHERE k = 1 AND c = 1;"),
+        assertRowsIgnoringOrder(executeView("SELECT k,c,b from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, null));
 
         Thread.sleep(5001);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT k,c,b from " + name + " WHERE k = 1 AND c = 1;"),
+        assertRowsIgnoringOrder(executeView("SELECT k,c,b from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, null));
 
         // sstable-3
         updateViewWithFlush("Update %s set a = null where k = 1 AND c = 1;", flush);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE k = 1 AND c = 1;"));
-        assertRowsIgnoringOrder(execute("SELECT k,c,b from " + name + " WHERE k = 1 AND c = 1;"));
+        assertRowsIgnoringOrder(executeView("SELECT k,c,b from %s WHERE k = 1 AND c = 1;"));
 
         // sstable-4
         updateViewWithFlush("Update %s USING TIMESTAMP 1 set b = 1 where k = 1 AND c = 1;", flush);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT k,c,b from " + name + " WHERE k = 1 AND c = 1;"),
+        assertRowsIgnoringOrder(executeView("SELECT k,c,b from %s WHERE k = 1 AND c = 1;"),
                                 row(1, 1, 1));
     }
 
@@ -95,22 +103,20 @@
     {
         createTable("create table %s (p int primary key, v1 int, v2 int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String name = createView("create materialized view %s as select * from %%s " +
-                                 "where p is not null and v1 is not null primary key (v1, p) " +
-                                 "with gc_grace_seconds=5");
-        ColumnFamilyStore cfs = ks.getColumnFamilyStore(name);
+        createView("create materialized view %s as select * from %s " +
+                   "where p is not null and v1 is not null primary key (v1, p) " +
+                   "with gc_grace_seconds=5");
+        ColumnFamilyStore cfs = ks.getColumnFamilyStore(currentView());
         cfs.disableAutoCompaction();
 
         updateView("Insert into %s (p, v1, v2) values (1, 1, 1)");
-        assertRowsIgnoringOrder(execute("SELECT p, v1, v2 from " + name), row(1, 1, 1));
+        assertRowsIgnoringOrder(executeView("SELECT p, v1, v2 from %s"), row(1, 1, 1));
 
         updateView("Update %s set v1 = null WHERE p = 1");
-        FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT p, v1, v2 from " + name));
+        Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT p, v1, v2 from %s"));
 
         cfs.forceMajorCompaction(); // before gc grace second, strict-liveness tombstoned dead row remains
         assertEquals(1, cfs.getLiveSSTables().size());
@@ -122,17 +128,17 @@
         assertEquals(0, cfs.getLiveSSTables().size());
 
         updateView("Update %s using ttl 5 set v1 = 1 WHERE p = 1");
-        FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT p, v1, v2 from " + name), row(1, 1, 1));
+        Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT p, v1, v2 from %s"), row(1, 1, 1));
 
         cfs.forceMajorCompaction(); // before ttl+gc_grace_second, strict-liveness ttled dead row remains
         assertEquals(1, cfs.getLiveSSTables().size());
-        assertRowsIgnoringOrder(execute("SELECT p, v1, v2 from " + name), row(1, 1, 1));
+        assertRowsIgnoringOrder(executeView("SELECT p, v1, v2 from %s"), row(1, 1, 1));
 
         Thread.sleep(5500); // after expired, before gc_grace_second
         cfs.forceMajorCompaction();// before ttl+gc_grace_second, strict-liveness ttled dead row remains
         assertEquals(1, cfs.getLiveSSTables().size());
-        assertRowsIgnoringOrder(execute("SELECT p, v1, v2 from " + name));
+        assertRowsIgnoringOrder(executeView("SELECT p, v1, v2 from %s"));
 
         Thread.sleep(5500); // after expired + gc_grace_second
         assertEquals(1, cfs.getLiveSSTables().size()); // no auto compaction.
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexTTLTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexTTLTest.java
index b44c8d0..93c25c9 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexTTLTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexTTLTest.java
@@ -20,8 +20,8 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -36,7 +36,7 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexTTLTest extends ViewComplexTester
+public class ViewComplexTTLTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testUpdateColumnInViewPKWithTTLWithFlush() throws Throwable
@@ -57,76 +57,74 @@
         // CASSANDRA-13657 if base column used in view pk is ttled, then view row is considered dead
         createTable("create table %s (k int primary key, a int, b int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                               "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)");
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         updateView("UPDATE %s SET a = 1 WHERE k = 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRows(execute("SELECT * from %s"), row(1, 1, null));
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null));
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null));
 
         updateView("DELETE a FROM %s WHERE k = 1");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRows(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         updateView("INSERT INTO %s (k) VALUES (1);");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRows(execute("SELECT * from %s"), row(1, null, null));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         updateView("UPDATE %s USING TTL 5 SET a = 10 WHERE k = 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRows(execute("SELECT * from %s"), row(1, 10, null));
-        assertRows(execute("SELECT * from " + mv), row(10, 1, null));
+        assertRows(executeView("SELECT * from %s"), row(10, 1, null));
 
         updateView("UPDATE %s SET b = 100 WHERE k = 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRows(execute("SELECT * from %s"), row(1, 10, 100));
-        assertRows(execute("SELECT * from " + mv), row(10, 1, 100));
+        assertRows(executeView("SELECT * from %s"), row(10, 1, 100));
 
         Thread.sleep(5000);
 
         // 'a' is TTL of 5 and removed.
         assertRows(execute("SELECT * from %s"), row(1, null, 100));
-        assertEmpty(execute("SELECT * from " + mv));
-        assertEmpty(execute("SELECT * from " + mv + " WHERE k = ? AND a = ?", 1, 10));
+        assertEmpty(executeView("SELECT * from %s"));
+        assertEmpty(executeView("SELECT * from %s WHERE k = ? AND a = ?", 1, 10));
 
         updateView("DELETE b FROM %s WHERE k=1");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRows(execute("SELECT * from %s"), row(1, null, null));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
 
         updateView("DELETE FROM %s WHERE k=1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertEmpty(execute("SELECT * from %s"));
-        assertEmpty(execute("SELECT * from " + mv));
+        assertEmpty(executeView("SELECT * from %s"));
     }
     @Test
     public void testUnselectedColumnsTTLWithFlush() throws Throwable
@@ -147,48 +145,46 @@
         // CASSANDRA-13127 not ttled unselected column in base should keep view row alive
         createTable("create table %s (p int, c int, v int, primary key(p, c))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT p, c FROM %%s " +
-                               "WHERE p IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, p)");
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT p, c FROM %s " +
+                   "WHERE p IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, p)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         updateViewWithFlush("INSERT INTO %s (p, c) VALUES (0, 0) USING TTL 3;", flush);
 
         updateViewWithFlush("UPDATE %s USING TTL 1000 SET v = 0 WHERE p = 0 and c = 0;", flush);
 
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         Thread.sleep(3000);
 
         UntypedResultSet.Row row = execute("SELECT v, ttl(v) from %s WHERE c = ? AND p = ?", 0, 0).one();
         assertEquals("row should have value of 0", 0, row.getInt("v"));
         assertTrue("row should have ttl less than 1000", row.getInt("ttl(v)") < 1000);
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         updateViewWithFlush("DELETE FROM %s WHERE p = 0 and c = 0;", flush);
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
 
         updateViewWithFlush("INSERT INTO %s (p, c) VALUES (0, 0) ", flush);
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         // already have a live row, no need to apply the unselected cell ttl
         updateViewWithFlush("UPDATE %s USING TTL 3 SET v = 0 WHERE p = 0 and c = 0;", flush);
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         updateViewWithFlush("INSERT INTO %s (p, c) VALUES (1, 1) USING TTL 3", flush);
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 1, 1), row(1, 1));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 1, 1), row(1, 1));
 
         Thread.sleep(4000);
 
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 1, 1));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 1, 1));
 
         // unselected should keep view row alive
         updateViewWithFlush("UPDATE %s SET v = 0 WHERE p = 1 and c = 1;", flush);
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 1, 1), row(1, 1));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 1, 1), row(1, 1));
 
     } 
 
@@ -198,17 +194,15 @@
         // CASSANDRA-13127 when liveness timestamp tie, greater localDeletionTime should win if both are expiring.
         createTable("create table %s (p int, c int, v int, primary key(p, c))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
         updateView("INSERT INTO %s (p, c, v) VALUES (0, 0, 0) using timestamp 1;");
 
-        FBUtilities.waitOnFutures(ks.flush());
+        Util.flush(ks);
 
         updateView("INSERT INTO %s (p, c, v) VALUES (0, 0, 0) USING TTL 3 and timestamp 1;");
 
-        FBUtilities.waitOnFutures(ks.flush());
+        Util.flush(ks);
 
         Thread.sleep(4000);
 
@@ -219,11 +213,11 @@
 
         updateView("INSERT INTO %s (p, c, v) VALUES (0, 0, 0) USING TTL 3 and timestamp 1;");
 
-        FBUtilities.waitOnFutures(ks.flush());
+        Util.flush(ks);
 
         updateView("INSERT INTO %s (p, c, v) VALUES (0, 0, 0) USING timestamp 1;");
 
-        FBUtilities.waitOnFutures(ks.flush());
+        Util.flush(ks);
 
         Thread.sleep(4000);
 
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexTest.java
index 8098dcf..3231d3b 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexTest.java
@@ -27,12 +27,11 @@
 import java.util.Map;
 
 import com.google.common.base.Objects;
-
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.fail;
 
@@ -46,7 +45,7 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexTest extends ViewComplexTester
+public class ViewComplexTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testNonBaseColumnInViewPkWithFlush() throws Throwable
@@ -64,51 +63,49 @@
     {
         createTable("create table %s (p1 int, p2 int, v1 int, v2 int, primary key (p1,p2))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("create materialized view %s as select * from %%s " +
-                               "where p1 is not null and p2 is not null primary key (p2, p1) " +
-                               "with gc_grace_seconds=5");
-        ColumnFamilyStore cfs = ks.getColumnFamilyStore(mv);
+        createView("create materialized view %s as select * from %s " +
+                   "where p1 is not null and p2 is not null primary key (p2, p1) " +
+                   "with gc_grace_seconds=5");
+        ColumnFamilyStore cfs = ks.getColumnFamilyStore(currentView());
         cfs.disableAutoCompaction();
 
         updateView("UPDATE %s USING TIMESTAMP 1 set v1 =1 where p1 = 1 AND p2 = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from %s"), row(1, 1, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from " + mv), row(1, 1, 1, null));
+        assertRowsIgnoringOrder(executeView("SELECT p1, p2, v1, v2 from %s"), row(1, 1, 1, null));
 
         updateView("UPDATE %s USING TIMESTAMP 2 set v1 = null, v2 = 1 where p1 = 1 AND p2 = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from %s"), row(1, 1, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from " + mv), row(1, 1, null, 1));
+        assertRowsIgnoringOrder(executeView("SELECT p1, p2, v1, v2 from %s"), row(1, 1, null, 1));
 
         updateView("UPDATE %s USING TIMESTAMP 2 set v2 = null where p1 = 1 AND p2 = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from %s"));
-        assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT p1, p2, v1, v2 from %s"));
 
         updateView("INSERT INTO %s (p1,p2) VALUES(1,1) USING TIMESTAMP 3;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from %s"), row(1, 1, null, null));
-        assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from " + mv), row(1, 1, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT p1, p2, v1, v2 from %s"), row(1, 1, null, null));
 
         updateView("DELETE FROM %s USING TIMESTAMP 4 WHERE p1 =1 AND p2 = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from %s"));
-        assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT p1, p2, v1, v2 from %s"));
 
         updateView("UPDATE %s USING TIMESTAMP 5 set v2 = 1 where p1 = 1 AND p2 = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from %s"), row(1, 1, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT p1, p2, v1, v2 from " + mv), row(1, 1, null, 1));
+        assertRowsIgnoringOrder(executeView("SELECT p1, p2, v1, v2 from %s"), row(1, 1, null, 1));
     }
 
     @Test
@@ -127,22 +124,20 @@
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, f int, PRIMARY KEY(a, b))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         List<String> viewNames = new ArrayList<>();
         List<String> mvStatements = Arrays.asList(
                                                   // all selected
-                                                  "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a,b)",
+                                                  "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a,b)",
                                                   // unselected e,f
-                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b,c,d FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a,b)",
+                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b,c,d FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a,b)",
                                                   // no selected
-                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a,b)",
+                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a,b)",
                                                   // all selected, re-order keys
-                                                  "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b,a)",
+                                                  "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b,a)",
                                                   // unselected e,f, re-order keys
-                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b,c,d FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b,a)",
+                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b,c,d FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b,a)",
                                                   // no selected, re-order keys
-                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b,a)");
+                                                  "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b,a)");
 
         Keyspace ks = Keyspace.open(keyspace());
 
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexTester.java b/test/unit/org/apache/cassandra/cql3/ViewComplexTester.java
deleted file mode 100644
index de30eec..0000000
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexTester.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.cql3;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import com.datastax.driver.core.exceptions.OperationTimedOutException;
-import org.apache.cassandra.concurrent.Stage;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.transport.ProtocolVersion;
-
-/* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
- * Any changes here check if they apply to the other classes:
- * - ViewComplexUpdatesTest
- * - ViewComplexDeletionsTest
- * - ViewComplexTTLTest
- * - ViewComplexTest
- * - ViewComplexLivenessTest
- * - ...
- * - ViewComplex*Test
- */
-@RunWith(Parameterized.class)
-public abstract class ViewComplexTester extends CQLTester
-{
-    private static final AtomicInteger seqNumber = new AtomicInteger();
-
-    @Parameterized.Parameter
-    public ProtocolVersion version;
-
-    @Parameterized.Parameters()
-    public static Collection<Object[]> versions()
-    {
-        return ProtocolVersion.SUPPORTED.stream()
-                                        .map(v -> new Object[]{v})
-                                        .collect(Collectors.toList());
-    }
-
-    protected final List<String> views = new ArrayList<>();
-
-    @BeforeClass
-    public static void startup()
-    {
-        requireNetwork();
-    }
-
-    @Before
-    public void begin() throws Throwable
-    {
-        views.clear();
-    }
-
-    @After
-    public void end() throws Throwable
-    {
-        dropMViews();
-    }
-
-    protected void dropMViews() throws Throwable
-    {
-        for (String viewName : views)
-            executeNet(version, "DROP MATERIALIZED VIEW " + viewName);
-    }
-
-    protected String createView(String query) throws Throwable
-    {
-        String name = createViewName();
-
-        try
-        {
-            executeNet(version, String.format(query, name));
-            // If exception is thrown, the view will not be added to the list; since it shouldn't have been created, this is
-            // the desired behavior
-            views.add(name);
-        }
-        catch (OperationTimedOutException ex)
-        {
-            // ... except for timeout, when we actually do not know whether the view was created or not
-            views.add(name);
-            throw ex;
-        }
-
-        return name;
-    }
-
-    protected static String createViewName()
-    {
-        return "mv" + seqNumber.getAndIncrement();
-    }
-
-    protected void updateView(String query, Object... params) throws Throwable
-    {
-        updateViewWithFlush(query, false, params);
-    }
-
-    protected void updateViewWithFlush(String query, boolean flush, Object... params) throws Throwable
-    {
-        executeNet(version, query, params);
-        while (!(Stage.VIEW_MUTATION.executor().getPendingTaskCount() == 0
-                 && Stage.VIEW_MUTATION.executor().getActiveTaskCount() == 0))
-        {
-            Thread.sleep(1);
-        }
-        if (flush)
-            Keyspace.open(keyspace()).flush();
-    }
-}
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexTombstoneTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexTombstoneTest.java
index 3c484ac..a873135 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexTombstoneTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexTombstoneTest.java
@@ -25,11 +25,12 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.io.sstable.SSTableIdFactory;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.utils.FBUtilities;
 
 /* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes:
@@ -41,7 +42,7 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexTombstoneTest extends ViewComplexTester
+public class ViewComplexTombstoneTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testCellTombstoneAndShadowableTombstonesWithFlush() throws Throwable
@@ -59,58 +60,56 @@
     {
         createTable("create table %s (p int primary key, v1 int, v2 int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("create materialized view %s as select * from %%s " +
-                               "where p is not null and v1 is not null primary key (v1, p)");
-        ks.getColumnFamilyStore(mv).disableAutoCompaction();
+        createView("create materialized view %s as select * from %s " +
+                   "where p is not null and v1 is not null primary key (v1, p)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         // sstable 1, Set initial values TS=1
         updateView("Insert into %s (p, v1, v2) values (3, 1, 3) using timestamp 1;");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v2, WRITETIME(v2) from " + mv + " WHERE v1 = ? AND p = ?", 1, 3), row(3, 1L));
+        assertRowsIgnoringOrder(executeView("SELECT v2, WRITETIME(v2) from %s WHERE v1 = ? AND p = ?", 1, 3), row(3, 1L));
         // sstable 2
         updateView("UPdate %s using timestamp 2 set v2 = null where p = 3");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v2, WRITETIME(v2) from " + mv + " WHERE v1 = ? AND p = ?", 1, 3),
+        assertRowsIgnoringOrder(executeView("SELECT v2, WRITETIME(v2) from %s WHERE v1 = ? AND p = ?", 1, 3),
                                 row(null, null));
         // sstable 3
         updateView("UPdate %s using timestamp 3 set v1 = 2 where p = 3");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(2, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(2, 3, null, null));
         // sstable 4
         updateView("UPdate %s using timestamp 4 set v1 = 1 where p = 3");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
 
         if (flush)
         {
             // compact sstable 2 and 3;
-            ColumnFamilyStore cfs = ks.getColumnFamilyStore(mv);
+            ColumnFamilyStore cfs = ks.getColumnFamilyStore(currentView());
             List<String> sstables = cfs.getLiveSSTables()
                                        .stream()
-                                       .sorted(Comparator.comparingInt(s -> s.descriptor.generation))
+                                       .sorted(Comparator.comparing(s -> s.descriptor.id, SSTableIdFactory.COMPARATOR))
                                        .map(SSTableReader::getFilename)
                                        .collect(Collectors.toList());
             String dataFiles = String.join(",", Arrays.asList(sstables.get(1), sstables.get(2)));
             CompactionManager.instance.forceUserDefinedCompaction(dataFiles);
         }
         // cell-tombstone in sstable 4 is not compacted away, because the shadowable tombstone is shadowed by new row.
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, null, null));
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv + " limit 1"), row(1, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s limit 1"), row(1, 3, null, null));
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewComplexUpdatesTest.java b/test/unit/org/apache/cassandra/cql3/ViewComplexUpdatesTest.java
index c930fa0..74f4038 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewComplexUpdatesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewComplexUpdatesTest.java
@@ -22,8 +22,8 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
 
 /* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes:
@@ -35,7 +35,7 @@
  * - ...
  * - ViewComplex*Test
  */
-public class ViewComplexUpdatesTest extends ViewComplexTester
+public class ViewComplexUpdatesTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testUpdateColumnNotInViewWithFlush() throws Throwable
@@ -54,92 +54,89 @@
     {
         // CASSANDRA-13127: if base column not selected in view are alive, then pk of view row should be alive
         String baseTable = createTable("create table %s (p int, c int, v1 int, v2 int, primary key(p, c))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT p, c FROM %%s " +
+        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT p, c from %s " +
                                "WHERE p IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, p)");
         ks.getColumnFamilyStore(mv).disableAutoCompaction();
 
         updateView("UPDATE %s USING TIMESTAMP 0 SET v1 = 1 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         updateView("DELETE v1 FROM %s USING TIMESTAMP 1 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertEmpty(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
-        assertEmpty(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0));
+        assertEmpty(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
 
         // shadowed by tombstone
         updateView("UPDATE %s USING TIMESTAMP 1 SET v1 = 1 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertEmpty(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
-        assertEmpty(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0));
+        assertEmpty(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
 
         updateView("UPDATE %s USING TIMESTAMP 2 SET v2 = 1 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         updateView("DELETE v1 FROM %s USING TIMESTAMP 3 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         updateView("DELETE v2 FROM %s USING TIMESTAMP 4 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertEmpty(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
-        assertEmpty(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0));
+        assertEmpty(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
 
         updateView("UPDATE %s USING TTL 3 SET v2 = 1 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         Thread.sleep(TimeUnit.SECONDS.toMillis(3));
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
 
         updateView("UPDATE %s SET v2 = 1 WHERE p = 0 AND c = 0");
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0), row(0, 0));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0), row(0, 0));
 
         assertInvalidMessage(String.format("Cannot drop column v2 on base table %s with materialized views", baseTable), "ALTER TABLE %s DROP v2");
         // // drop unselected base column, unselected metadata should be removed, thus view row is dead
         // updateView("ALTER TABLE %s DROP v2");
         // assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
-        // assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE c = ? AND p = ?", 0, 0));
+        // assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE c = ? AND p = ?", 0, 0));
         // assertRowsIgnoringOrder(execute("SELECT * from %s"));
-        // assertRowsIgnoringOrder(execute("SELECT * from " + mv));
+        // assertRowsIgnoringOrder(executeView("SELECT * from %s"));
     }
 
     @Test
@@ -156,51 +153,49 @@
 
     private void testPartialUpdateWithUnselectedCollections(boolean flush) throws Throwable
     {
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         String baseTable = createTable("CREATE TABLE %s (k int, c int, a int, b int, l list<int>, s set<int>, m map<int,int>, PRIMARY KEY (k, c))");
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, k FROM %%s " +
+        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, k from %s " +
                                "WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)");
         Keyspace ks = Keyspace.open(keyspace());
         ks.getColumnFamilyStore(mv).disableAutoCompaction();
 
         updateView("UPDATE %s SET l=l+[1,2,3] WHERE k = 1 AND c = 1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+            Util.flush(ks);
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateView("UPDATE %s SET l=l-[1,2] WHERE k = 1 AND c = 1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, null));
+            Util.flush(ks);
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         updateView("UPDATE %s SET b=3 WHERE k=1 AND c=1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRows(execute("SELECT * from " + mv), row(1, 1, null, 3));
+            Util.flush(ks);
+        assertRows(executeView("SELECT * from %s"), row(1, 1, null, 3));
 
         updateView("UPDATE %s SET b=null, l=l-[3], s=s-{3} WHERE k = 1 AND c = 1");
         if (flush)
         {
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
             ks.getColumnFamilyStore(mv).forceMajorCompaction();
         }
         assertRowsIgnoringOrder(execute("SELECT k,c,a,b from %s"));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s"));
 
         updateView("UPDATE %s SET m=m+{3:3}, l=l-[1], s=s-{2} WHERE k = 1 AND c = 1");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         assertRowsIgnoringOrder(execute("SELECT k,c,a,b from %s"), row(1, 1, null, null));
-        assertRowsIgnoringOrder(execute("SELECT * from " + mv), row(1, 1, null, null));
+        assertRowsIgnoringOrder(executeView("SELECT * from %s"), row(1, 1, null, null));
 
         assertInvalidMessage(String.format("Cannot drop column m on base table %s with materialized views", baseTable), "ALTER TABLE %s DROP m");
         // executeNet(version, "ALTER TABLE %s DROP m");
         // ks.getColumnFamilyStore(mv).forceMajorCompaction();
         // assertRowsIgnoringOrder(execute("SELECT k,c,a,b from %s WHERE k = 1 AND c = 1"));
-        // assertRowsIgnoringOrder(execute("SELECT * from " + mv + " WHERE k = 1 AND c = 1"));
+        // assertRowsIgnoringOrder(executeView("SELECT * from %s WHERE k = 1 AND c = 1"));
         // assertRowsIgnoringOrder(execute("SELECT k,c,a,b from %s"));
-        // assertRowsIgnoringOrder(execute("SELECT * from " + mv));
+        // assertRowsIgnoringOrder(executeView("SELECT * from %s"));
     }
 
     @Test
@@ -219,35 +214,33 @@
     {
         createTable("create table %s (p int primary key, v1 int, v2 int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("create materialized view %s as select * from %%s " +
+        String mv = createView("create materialized view %s as select * from %s " +
                                "where p is not null and v1 is not null primary key (v1, p)");
         ks.getColumnFamilyStore(mv).disableAutoCompaction();
 
         // reset value
         updateView("Insert into %s (p, v1, v2) values (3, 1, 3) using timestamp 6;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, 3, 6L));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, 3, 6L));
         // increase pk's timestamp to 20
         updateView("Insert into %s (p) values (3) using timestamp 20;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, 3, 6L));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, 3, 6L));
         // change v1's to 2 and remove existing view row with ts7
         updateView("UPdate %s using timestamp 7 set v1 = 2 where p = 3;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(2, 3, 3, 6L));
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv + " limit 1"), row(2, 3, 3, 6L));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(2, 3, 3, 6L));
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s" + " limit 1"), row(2, 3, 3, 6L));
         // change v1's to 1 and remove existing view row with ts8
         updateView("UPdate %s using timestamp 8 set v1 = 1 where p = 3;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT v1, p, v2, WRITETIME(v2) from " + mv), row(1, 3, 3, 6L));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, 3, 6L));
     }
 
     @Test
@@ -269,51 +262,49 @@
         // CASSANDRA-11500 able to shadow old view row with column ts greater tahn pk's ts and re-insert the view row
         String baseTable = createTable("CREATE TABLE %s (k int PRIMARY KEY, a int, b int);");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
+        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * from %s " +
                                "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)");
         ks.getColumnFamilyStore(mv).disableAutoCompaction();
         updateView("DELETE FROM %s USING TIMESTAMP 0 WHERE k = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         // sstable-1, Set initial values TS=1
         updateView("INSERT INTO %s(k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 1, 1));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 1, 1));
         updateView("UPDATE %s USING TIMESTAMP 10 SET b = 2 WHERE k = 1;");
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 1, 2));
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 1, 2));
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 1, 2));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 1, 2));
         updateView("UPDATE %s USING TIMESTAMP 2 SET a = 2 WHERE k = 1;");
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 2, 2));
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 2, 2));
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
         ks.getColumnFamilyStore(mv).forceMajorCompaction();
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 2, 2));
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv + " limit 1"), row(1, 2, 2));
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 2, 2));
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s limit 1"), row(1, 2, 2));
         updateView("UPDATE %s USING TIMESTAMP 11 SET a = 1 WHERE k = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 1, 2));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 1, 2));
         assertRowsIgnoringOrder(execute("SELECT k,a,b from %s"), row(1, 1, 2));
 
         // set non-key base column as tombstone, view row is removed with shadowable
         updateView("UPDATE %s USING TIMESTAMP 12 SET a = null WHERE k = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"));
         assertRowsIgnoringOrder(execute("SELECT k,a,b from %s"), row(1, null, 2));
 
         // column b should be alive
         updateView("UPDATE %s USING TIMESTAMP 13 SET a = 1 WHERE k = 1;");
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRowsIgnoringOrder(execute("SELECT k,a,b from " + mv), row(1, 1, 2));
+            Util.flush(ks);
+        assertRowsIgnoringOrder(executeView("SELECT k,a,b from %s"), row(1, 1, 2));
         assertRowsIgnoringOrder(execute("SELECT k,a,b from %s"), row(1, 1, 2));
 
         assertInvalidMessage(String.format("Cannot drop column a on base table %s with materialized views", baseTable), "ALTER TABLE %s DROP a");
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFiltering1Test.java b/test/unit/org/apache/cassandra/cql3/ViewFiltering1Test.java
new file mode 100644
index 0000000..47d5b8e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/ViewFiltering1Test.java
@@ -0,0 +1,507 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+
+/* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
+ * Any changes here check if they apply to the other classes
+ * - ViewFilteringPKTest
+ * - ViewFilteringClustering1Test
+ * - ViewFilteringClustering2Test
+ * - ViewFilteringTest
+ * - ...
+ * - ViewFiltering*Test
+ */
+public class ViewFiltering1Test extends ViewAbstractParameterizedTest
+{
+    @BeforeClass
+    public static void startup()
+    {
+        ViewAbstractParameterizedTest.startup();
+        System.setProperty("cassandra.mv.allow_filtering_nonkey_columns_unsafe", "true");
+    }
+
+    @AfterClass
+    public static void tearDown()
+    {
+        System.setProperty("cassandra.mv.allow_filtering_nonkey_columns_unsafe", "false");
+    }
+
+    // TODO will revise the non-pk filter condition in MV, see CASSANDRA-11500
+    @Ignore
+    @Test
+    public void testViewFilteringWithFlush() throws Throwable
+    {
+        testViewFiltering(true);
+    }
+
+    // TODO will revise the non-pk filter condition in MV, see CASSANDRA-11500
+    @Ignore
+    @Test
+    public void testViewFilteringWithoutFlush() throws Throwable
+    {
+        testViewFiltering(false);
+    }
+
+    public void testViewFiltering(boolean flush) throws Throwable
+    {
+        // CASSANDRA-13547: able to shadow entire view row if base column used in filter condition is modified
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a))");
+
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL and c = 1  PRIMARY KEY (a, b)");
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c, d FROM %s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL and c = 1 and d = 1 PRIMARY KEY (a, b)");
+        String mv3 = createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, d FROM %%s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)");
+        String mv4 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c FROM %s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL and c = 1 PRIMARY KEY (a, b)");
+        String mv5 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c FROM %s " +
+                                "WHERE a IS NOT NULL and d = 1 PRIMARY KEY (a, d)");
+        String mv6 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c FROM %s " +
+                                "WHERE a = 1 and d IS NOT NULL PRIMARY KEY (a, d)");
+
+        Keyspace ks = Keyspace.open(keyspace());
+        ks.getColumnFamilyStore(mv1).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv2).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv3).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv4).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv5).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv6).disableAutoCompaction();
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 0", 1, 1, 1, 1);
+        if (flush)
+            Util.flush(ks);
+
+        // views should be updated.
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
+
+        updateView("UPDATE %s using timestamp 1 set c = ? WHERE a=?", 0, 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowCount(execute("SELECT * FROM " + mv1), 0);
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 0, 1));
+        assertRowCount(execute("SELECT * FROM " + mv4), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 0));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 0));
+
+        updateView("UPDATE %s using timestamp 2 set c = ? WHERE a=?", 1, 1);
+        if (flush)
+            Util.flush(ks);
+
+        // row should be back in views.
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
+
+        updateView("UPDATE %s using timestamp 3 set d = ? WHERE a=?", 0, 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 0));
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 0));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
+        assertRowCount(execute("SELECT * FROM " + mv5), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 0, 1));
+
+        updateView("UPDATE %s using timestamp 4 set c = ? WHERE a=?", 0, 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowCount(execute("SELECT * FROM " + mv1), 0);
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 0, 0));
+        assertRowCount(execute("SELECT * FROM " + mv4), 0);
+        assertRowCount(execute("SELECT * FROM " + mv5), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 0, 0));
+
+        updateView("UPDATE %s using timestamp 5 set d = ? WHERE a=?", 1, 1);
+        if (flush)
+            Util.flush(ks);
+
+        // should not update as c=0
+        assertRowCount(execute("SELECT * FROM " + mv1), 0);
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 0, 1));
+        assertRowCount(execute("SELECT * FROM " + mv4), 0);
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 0));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 0));
+
+        updateView("UPDATE %s using timestamp 6 set c = ? WHERE a=?", 1, 1);
+
+        // row should be back in views.
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
+
+        updateView("UPDATE %s using timestamp 7 set b = ? WHERE a=?", 2, 1);
+        if (flush)
+        {
+            Util.flush(ks);
+            for (String view : getViews())
+                ks.getColumnFamilyStore(view).forceMajorCompaction();
+        }
+        // row should be back in views.
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 2, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 2, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 2, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 2, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
+
+        updateView("DELETE b, c FROM %s using timestamp 6 WHERE a=?", 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s"), row(1, 2, null, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 2, null, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, null));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, null));
+
+        updateView("DELETE FROM %s using timestamp 8 where a=?", 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowCount(execute("SELECT * FROM " + mv1), 0);
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowCount(execute("SELECT * FROM " + mv3), 0);
+        assertRowCount(execute("SELECT * FROM " + mv4), 0);
+        assertRowCount(execute("SELECT * FROM " + mv5), 0);
+        assertRowCount(execute("SELECT * FROM " + mv6), 0);
+
+        updateView("UPDATE %s using timestamp 9 set b = ?,c = ? where a=?", 1, 1, 1); // upsert
+        if (flush)
+            Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, null));
+        assertRows(execute("SELECT * FROM " + mv2));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, null));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
+        assertRows(execute("SELECT * FROM " + mv5));
+        assertRows(execute("SELECT * FROM " + mv6));
+
+        updateView("DELETE FROM %s using timestamp 10 where a=?", 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowCount(execute("SELECT * FROM " + mv1), 0);
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowCount(execute("SELECT * FROM " + mv3), 0);
+        assertRowCount(execute("SELECT * FROM " + mv4), 0);
+        assertRowCount(execute("SELECT * FROM " + mv5), 0);
+        assertRowCount(execute("SELECT * FROM " + mv6), 0);
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 11", 1, 1, 1, 1);
+        if (flush)
+            Util.flush(ks);
+
+        // row should be back in views.
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
+
+        updateView("DELETE FROM %s using timestamp 12 where a=?", 1);
+        if (flush)
+            Util.flush(ks);
+
+        assertRowCount(execute("SELECT * FROM " + mv1), 0);
+        assertRowCount(execute("SELECT * FROM " + mv2), 0);
+        assertRowCount(execute("SELECT * FROM " + mv3), 0);
+        assertRowCount(execute("SELECT * FROM " + mv4), 0);
+        assertRowCount(execute("SELECT * FROM " + mv5), 0);
+        assertRowCount(execute("SELECT * FROM " + mv6), 0);
+
+        dropView(mv1);
+        dropView(mv2);
+        dropView(mv3);
+        dropView(mv4);
+        dropView(mv5);
+        dropView(mv6);
+        dropTable("DROP TABLE %s");
+    }
+
+    // TODO will revise the non-pk filter condition in MV, see CASSANDRA-11500
+    @Ignore
+    @Test
+    public void testMVFilteringWithComplexColumn() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, l list<int>, s set<int>, m map<int,int>, PRIMARY KEY (a, b))");
+
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT a,b,c FROM %%s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND l contains (1) " +
+                                "AND s contains (1) AND m contains key (1) " +
+                                "PRIMARY KEY (a, b, c)");
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s " +
+                                "WHERE a IS NOT NULL and b IS NOT NULL AND l contains (1) " +
+                                "PRIMARY KEY (a, b)");
+        String mv3 = createView("CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL AND s contains (1) " +
+                                "PRIMARY KEY (a, b)");
+        String mv4 = createView("CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s " +
+                                "WHERE a IS NOT NULL AND b IS NOT NULL AND m contains key (1) " +
+                                "PRIMARY KEY (a, b)");
+
+        // not able to drop base column filtered in view
+        assertInvalidMessage("Cannot drop column l, depended on by materialized views", "ALTER TABLE %s DROP l");
+        assertInvalidMessage("Cannot drop column s, depended on by materialized views", "ALTER TABLE %S DROP s");
+        assertInvalidMessage("Cannot drop column m, depended on by materialized views", "ALTER TABLE %s DROP m");
+
+        Keyspace ks = Keyspace.open(keyspace());
+        ks.getColumnFamilyStore(mv1).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv2).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv3).disableAutoCompaction();
+        ks.getColumnFamilyStore(mv4).disableAutoCompaction();
+
+        execute("INSERT INTO %s (a, b, c, l, s, m) VALUES (?, ?, ?, ?, ?, ?) ",
+                1,
+                1,
+                1,
+                list(1, 1, 2),
+                set(1, 2),
+                map(1, 1, 2, 2));
+        Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1));
+
+        execute("UPDATE %s SET l=l-[1] WHERE a = 1 AND b = 1");
+        Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1));
+
+        execute("UPDATE %s SET s=s-{2}, m=m-{2} WHERE a = 1 AND b = 1");
+        Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT a,b,c FROM %s"), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1));
+
+        execute("UPDATE %s SET  m=m-{1} WHERE a = 1 AND b = 1");
+        Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT a,b,c FROM %s"), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4));
+
+        // filter conditions result not changed
+        execute("UPDATE %s SET  l=l+[2], s=s-{0}, m=m+{3:3} WHERE a = 1 AND b = 1");
+        Util.flush(ks);
+
+        assertRowsIgnoringOrder(execute("SELECT a,b,c FROM %s"), row(1, 1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1));
+        assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4));
+    }
+
+    @Test
+    public void testMVCreationSelectRestrictions() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY((a, b), c, d))");
+
+        // IS NOT NULL is required on all PK statements that are not otherwise restricted
+        List<String> badStatements = Arrays.asList(
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE b IS NOT NULL AND c IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND c IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = ? AND b IS NOT NULL AND c is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = blobAsInt(?) AND b IS NOT NULL AND c is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s PRIMARY KEY (a, b, c, d)"
+        );
+
+        for (String badStatement : badStatements)
+        {
+            try
+            {
+                createView(badStatement);
+                Assert.fail("Create MV statement should have failed due to missing IS NOT NULL restriction: " + badStatement);
+            }
+            catch (RuntimeException e)
+            {
+                Assert.assertSame(InvalidRequestException.class, e.getCause().getClass());
+            }
+        }
+
+        List<String> goodStatements = Arrays.asList(
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND c IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c = 1 AND d IS NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND c > 1 AND d IS NOT NULL PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND c = 1 AND d IN (1, 2, 3) PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND (c, d) = (1, 1) PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND (c, d) > (1, 1) PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = 1 AND b = 1 AND (c, d) IN ((1, 1), (2, 2)) PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = (int) 1 AND b = 1 AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)",
+        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a = blobAsInt(intAsBlob(1)) AND b = 1 AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)"
+        );
+
+        for (int i = 0; i < goodStatements.size(); i++)
+        {
+            String mv;
+            try
+            {
+                mv = createView(goodStatements.get(i));
+            }
+            catch (Exception e)
+            {
+                throw new RuntimeException("MV creation failed: " + goodStatements.get(i), e);
+            }
+
+            try
+            {
+                executeNet("ALTER MATERIALIZED VIEW " + mv + " WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
+            }
+            catch (Exception e)
+            {
+                throw new RuntimeException("MV alter failed: " + goodStatements.get(i), e);
+            }
+        }
+    }
+
+    @Test
+    public void testCaseSensitivity() throws Throwable
+    {
+        createTable("CREATE TABLE %s (\"theKey\" int, \"theClustering\" int, \"the\"\"Value\" int, PRIMARY KEY (\"theKey\", \"theClustering\"))");
+
+        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 0, 1, 0);
+        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 1, 0, 0);
+        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 1, 1, 0);
+
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                "WHERE \"theKey\" = 1 AND \"theClustering\" = 1 AND \"the\"\"Value\" IS NOT NULL " +
+                                "PRIMARY KEY (\"theKey\", \"theClustering\")");
+
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT \"theKey\", \"theClustering\", \"the\"\"Value\" FROM %s " +
+                                "WHERE \"theKey\" = 1 AND \"theClustering\" = 1 AND \"the\"\"Value\" IS NOT NULL " +
+                                "PRIMARY KEY (\"theKey\", \"theClustering\")");
+
+        for (String mvname : Arrays.asList(mv1, mv2))
+        {
+            assertRowsIgnoringOrder(execute("SELECT \"theKey\", \"theClustering\", \"the\"\"Value\" FROM " + mvname),
+                                    row(1, 1, 0));
+        }
+
+        executeNet("ALTER TABLE %s RENAME \"theClustering\" TO \"Col\"");
+
+        for (String mvname : Arrays.asList(mv1, mv2))
+        {
+            assertRowsIgnoringOrder(execute("SELECT \"theKey\", \"Col\", \"the\"\"Value\" FROM " + mvname),
+                                    row(1, 1, 0)
+            );
+        }
+    }
+
+    @Test
+    public void testFilterWithFunction() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 0, 2);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 1, 3);
+
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE a = blobAsInt(intAsBlob(1)) AND b IS NOT NULL " +
+                   "PRIMARY KEY (a, b)");
+
+        assertRows(executeView("SELECT a, b, c FROM %s"),
+                   row(1, 0, 2),
+                   row(1, 1, 3)
+        );
+
+        executeNet("ALTER TABLE %s RENAME a TO foo");
+
+        assertRows(executeView("SELECT foo, b, c FROM %s"),
+                   row(1, 0, 2),
+                   row(1, 1, 3)
+        );
+    }
+
+    @Test
+    public void testFilterWithTypecast() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 0, 2);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 1, 3);
+
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE a = (int) 1 AND b IS NOT NULL " +
+                   "PRIMARY KEY (a, b)");
+
+        assertRows(executeView("SELECT a, b, c FROM %s"),
+                   row(1, 0, 2),
+                   row(1, 1, 3)
+        );
+
+        executeNet("ALTER TABLE %s RENAME a TO foo");
+
+        assertRows(executeView("SELECT foo, b, c FROM %s"),
+                   row(1, 0, 2),
+                   row(1, 1, 3)
+        );
+    }
+}
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFiltering2Test.java b/test/unit/org/apache/cassandra/cql3/ViewFiltering2Test.java
index 2eb0eca..17709a8 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewFiltering2Test.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewFiltering2Test.java
@@ -18,14 +18,14 @@
 
 package org.apache.cassandra.cql3;
 
+import org.junit.AfterClass;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.utils.FBUtilities;
 
 /* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes
@@ -36,9 +36,20 @@
  * - ...
  * - ViewFiltering*Test
  */
-@RunWith(Parameterized.class)
-public class ViewFiltering2Test extends ViewFilteringTester
+public class ViewFiltering2Test extends ViewAbstractParameterizedTest
 {
+    @BeforeClass
+    public static void startup()
+    {
+        ViewFiltering1Test.startup();
+    }
+
+    @AfterClass
+    public static void tearDown()
+    {
+        ViewFiltering1Test.tearDown();
+    }
+
     @Test
     public void testAllTypes() throws Throwable
     {
@@ -92,35 +103,30 @@
         "udtval frozen<" + myType + ">, " +
         "PRIMARY KEY (" + columnNames + "))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView(
-        "mv_test",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " +
-        "asciival = 'abc' AND " +
-        "bigintval = 123 AND " +
-        "blobval = 0xfeed AND " +
-        "booleanval = true AND " +
-        "dateval = '1987-03-23' AND " +
-        "decimalval = 123.123 AND " +
-        "doubleval = 123.123 AND " +
-        "floatval = 123.123 AND " +
-        "inetval = '127.0.0.1' AND " +
-        "intval = 123 AND " +
-        "textval = 'abc' AND " +
-        "timeval = '07:35:07.000111222' AND " +
-        "timestampval = 123123123 AND " +
-        "timeuuidval = 6BDDC89A-5644-11E4-97FC-56847AFE9799 AND " +
-        "uuidval = 6BDDC89A-5644-11E4-97FC-56847AFE9799 AND " +
-        "varcharval = 'abc' AND " +
-        "varintval = 123123123 AND " +
-        "frozenlistval = [1, 2, 3] AND " +
-        "frozensetval = {6BDDC89A-5644-11E4-97FC-56847AFE9799} AND " +
-        "frozenmapval = {'a': 1, 'b': 2} AND " +
-        "tupleval = (1, 'foobar', 6BDDC89A-5644-11E4-97FC-56847AFE9799) AND " +
-        "udtval = {a: 1, b: 6BDDC89A-5644-11E4-97FC-56847AFE9799, c: {'foo', 'bar'}} " +
-        "PRIMARY KEY (" + columnNames + ")");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " +
+                   "asciival = 'abc' AND " +
+                   "bigintval = 123 AND " +
+                   "blobval = 0xfeed AND " +
+                   "booleanval = true AND " +
+                   "dateval = '1987-03-23' AND " +
+                   "decimalval = 123.123 AND " +
+                   "doubleval = 123.123 AND " +
+                   "floatval = 123.123 AND " +
+                   "inetval = '127.0.0.1' AND " +
+                   "intval = 123 AND " +
+                   "textval = 'abc' AND " +
+                   "timeval = '07:35:07.000111222' AND " +
+                   "timestampval = 123123123 AND " +
+                   "timeuuidval = 6BDDC89A-5644-11E4-97FC-56847AFE9799 AND " +
+                   "uuidval = 6BDDC89A-5644-11E4-97FC-56847AFE9799 AND " +
+                   "varcharval = 'abc' AND " +
+                   "varintval = 123123123 AND " +
+                   "frozenlistval = [1, 2, 3] AND " +
+                   "frozensetval = {6BDDC89A-5644-11E4-97FC-56847AFE9799} AND " +
+                   "frozenmapval = {'a': 1, 'b': 2} AND " +
+                   "tupleval = (1, 'foobar', 6BDDC89A-5644-11E4-97FC-56847AFE9799) AND " +
+                   "udtval = {a: 1, b: 6BDDC89A-5644-11E4-97FC-56847AFE9799, c: {'foo', 'bar'}} " +
+                   "PRIMARY KEY (" + columnNames + ")");
 
         execute("INSERT INTO %s (" + columnNames + ") VALUES (" +
                 "'abc'," +
@@ -146,24 +152,26 @@
                 "(1, 'foobar', 6BDDC89A-5644-11E4-97FC-56847AFE9799)," +
                 "{a: 1, b: 6BDDC89A-5644-11E4-97FC-56847AFE9799, c: {'foo', 'bar'}})");
 
-        assert !execute("SELECT * FROM mv_test").isEmpty();
+        assert !executeView("SELECT * FROM %s").isEmpty();
 
-        executeNet(version, "ALTER TABLE %s RENAME inetval TO foo");
-        assert !execute("SELECT * FROM mv_test").isEmpty();
+        executeNet("ALTER TABLE %s RENAME inetval TO foo");
+        assert !executeView("SELECT * FROM %s").isEmpty();
     }
 
     @Test
-    public void testMVCreationWithNonPrimaryRestrictions() throws Throwable
+    public void testMVCreationWithNonPrimaryRestrictions()
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        try {
-            createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND d = 1 PRIMARY KEY (a, b, c)");
-            dropView("mv_test");
-        } catch(Exception e) {
+        try
+        {
+            String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                   "WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND d = 1 " +
+                                   "PRIMARY KEY (a, b, c)");
+            dropView(mv);
+        }
+        catch (Exception e)
+        {
             throw new RuntimeException("MV creation with non primary column restrictions failed.", e);
         }
 
@@ -175,9 +183,6 @@
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -188,12 +193,11 @@
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 1, 0);
 
         // only accept rows where c = 1
-        createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND c = 1 PRIMARY KEY (a, b, c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND c = 1 " +
+                   "PRIMARY KEY (a, b, c)");
 
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
-            Thread.sleep(10);
-
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 0),
@@ -203,7 +207,7 @@
         // insert new rows that do not match the filter
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, 2, 0);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 0),
@@ -212,7 +216,7 @@
 
         // insert new row that does match the filter
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 2, 1, 0);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 0),
@@ -223,7 +227,7 @@
         // update rows that don't match the filter
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ?", 2, 2, 0);
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ?", 1, 2, 1);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 0),
@@ -233,7 +237,7 @@
 
         // update a row that does match the filter
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ?", 1, 1, 0);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 1),
@@ -243,7 +247,7 @@
 
         // delete rows that don't match the filter
         execute("DELETE FROM %s WHERE a = ? AND b = ?", 2, 0);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 1),
@@ -253,7 +257,7 @@
 
         // delete a row that does match the filter
         execute("DELETE FROM %s WHERE a = ? AND b = ?", 1, 2);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0),
                                 row(1, 0, 1, 1),
@@ -262,12 +266,12 @@
 
         // delete a partition that matches the filter
         execute("DELETE FROM %s WHERE a = ?", 1);
-        assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test"),
+        assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                 row(0, 0, 1, 0),
                                 row(0, 1, 1, 0)
         );
 
-        dropView("mv_test");
+        dropView();
         dropTable("DROP TABLE %s");
     }
 
@@ -286,98 +290,97 @@
     public void complexRestrictedTimestampUpdateTest(boolean flush) throws Throwable
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND c = 1 PRIMARY KEY (c, a, b)");
-        ks.getColumnFamilyStore("mv").disableAutoCompaction();
+        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                               "WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND c = 1 " +
+                               "PRIMARY KEY (c, a, b)");
+        ks.getColumnFamilyStore(mv).disableAutoCompaction();
 
         //Set initial values TS=0, matching the restriction and verify view
-        executeNet(version, "INSERT INTO %s (a, b, c, d) VALUES (0, 0, 1, 0) USING TIMESTAMP 0");
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
+        executeNet("INSERT INTO %s (a, b, c, d) VALUES (0, 0, 1, 0) USING TIMESTAMP 0");
+        assertRows(executeView("SELECT d FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         //update c's timestamp TS=2
-        executeNet(version, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
+        executeNet("UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
+        assertRows(executeView("SELECT d FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         //change c's value and TS=3, tombstones c=1 and adds c=0 record
-        executeNet(version, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? and b = ? ", 0, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 0, 0, 0));
+        executeNet("UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? and b = ? ", 0, 0, 0);
+        assertRows(executeView("SELECT d FROM %s WHERE c = ? and a = ? and b = ?", 0, 0, 0));
 
-        if(flush)
+        if (flush)
         {
-            ks.getColumnFamilyStore("mv").forceMajorCompaction();
-            FBUtilities.waitOnFutures(ks.flush());
+            ks.getColumnFamilyStore(mv).forceMajorCompaction();
+            Util.flush(ks);
         }
 
         //change c's value back to 1 with TS=4, check we can see d
-        executeNet(version, "UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
+        executeNet("UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
         if (flush)
         {
-            ks.getColumnFamilyStore("mv").forceMajorCompaction();
-            FBUtilities.waitOnFutures(ks.flush());
+            ks.getColumnFamilyStore(mv).forceMajorCompaction();
+            Util.flush(ks);
         }
 
-        assertRows(execute("SELECT d, e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
+        assertRows(executeView("SELECT d, e FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
 
         //Add e value @ TS=1
-        executeNet(version, "UPDATE %s USING TIMESTAMP 1 SET e = ? WHERE a = ? and b = ? ", 1, 0, 0);
-        assertRows(execute("SELECT d, e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
+        executeNet("UPDATE %s USING TIMESTAMP 1 SET e = ? WHERE a = ? and b = ? ", 1, 0, 0);
+        assertRows(executeView("SELECT d, e FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         //Change d value @ TS=2
-        executeNet(version, "UPDATE %s USING TIMESTAMP 2 SET d = ? WHERE a = ? and b = ? ", 2, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
+        executeNet("UPDATE %s USING TIMESTAMP 2 SET d = ? WHERE a = ? and b = ? ", 2, 0, 0);
+        assertRows(executeView("SELECT d FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         //Change d value @ TS=3
-        executeNet(version, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? and b = ? ", 1, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
+        executeNet("UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? and b = ? ", 1, 0, 0);
+        assertRows(executeView("SELECT d FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
 
         //Tombstone c
-        executeNet(version, "DELETE FROM %s WHERE a = ? and b = ?", 0, 0);
-        assertRowsIgnoringOrder(execute("SELECT d from mv"));
-        assertRows(execute("SELECT d from mv"));
+        executeNet("DELETE FROM %s WHERE a = ? and b = ?", 0, 0);
+        assertRowsIgnoringOrder(executeView("SELECT d FROM %s"));
+        assertRows(executeView("SELECT d FROM %s"));
 
         //Add back without D
-        executeNet(version, "INSERT INTO %s (a, b, c) VALUES (0, 0, 1)");
+        executeNet("INSERT INTO %s (a, b, c) VALUES (0, 0, 1)");
 
         //Make sure D doesn't pop back in.
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
+        assertRows(executeView("SELECT d FROM %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
 
         //New partition
         // insert a row with timestamp 0
-        executeNet(version, "INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?) USING TIMESTAMP 0", 1, 0, 1, 0, 0);
+        executeNet("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?) USING TIMESTAMP 0", 1, 0, 1, 0, 0);
 
         // overwrite pk and e with timestamp 1, but don't overwrite d
-        executeNet(version, "INSERT INTO %s (a, b, c, e) VALUES (?, ?, ?, ?) USING TIMESTAMP 1", 1, 0, 1, 0);
+        executeNet("INSERT INTO %s (a, b, c, e) VALUES (?, ?, ?, ?) USING TIMESTAMP 1", 1, 0, 1, 0);
 
         // delete with timestamp 0 (which should only delete d)
-        executeNet(version, "DELETE FROM %s USING TIMESTAMP 0 WHERE a = ? AND b = ?", 1, 0);
-        assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0),
+        executeNet("DELETE FROM %s USING TIMESTAMP 0 WHERE a = ? AND b = ?", 1, 0);
+        assertRows(executeView("SELECT a, b, c, d, e FROM %s WHERE c = ? and a = ? and b = ?", 1, 1, 0),
                    row(1, 0, 1, null, 0)
         );
 
-        executeNet(version, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? AND b = ?", 1, 1, 1);
-        executeNet(version, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? AND b = ?", 1, 1, 0);
-        assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0),
+        executeNet("UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? AND b = ?", 1, 1, 1);
+        executeNet("UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? AND b = ?", 1, 1, 0);
+        assertRows(executeView("SELECT a, b, c, d, e FROM %s WHERE c = ? and a = ? and b = ?", 1, 1, 0),
                    row(1, 0, 1, null, 0)
         );
 
-        executeNet(version, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? AND b = ?", 0, 1, 0);
-        assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0),
+        executeNet("UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? AND b = ?", 0, 1, 0);
+        assertRows(executeView("SELECT a, b, c, d, e FROM %s WHERE c = ? and a = ? and b = ?", 1, 1, 0),
                    row(1, 0, 1, 0, 0)
         );
     }
@@ -392,15 +395,14 @@
                     "c int, " +
                     "val int)");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv_rctstest", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL AND c = 1 PRIMARY KEY (k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE k IS NOT NULL AND c IS NOT NULL AND c = 1 " +
+                   "PRIMARY KEY (k,c)");
 
         updateView("UPDATE %s SET c = ?, val = ? WHERE k = ?", 0, 0, 0);
         updateView("UPDATE %s SET val = ? WHERE k = ?", 1, 0);
         updateView("UPDATE %s SET c = ? WHERE k = ?", 1, 0);
-        assertRows(execute("SELECT c, k, val FROM mv_rctstest"), row(1, 0, 1));
+        assertRows(executeView("SELECT c, k, val FROM %s"), row(1, 0, 1));
 
         updateView("TRUNCATE %s");
 
@@ -409,7 +411,7 @@
         updateView("UPDATE %s USING TIMESTAMP 2 SET val = ? WHERE k = ?", 1, 0);
         updateView("UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE k = ?", 1, 0);
         updateView("UPDATE %s USING TIMESTAMP 3 SET val = ? WHERE k = ?", 2, 0);
-        assertRows(execute("SELECT c, k, val FROM mv_rctstest"), row(1, 0, 2));
+        assertRows(executeView("SELECT c, k, val FROM %s"), row(1, 0, 2));
     }
 
     @Test
@@ -421,32 +423,31 @@
                     "val text, " + "" +
                     "PRIMARY KEY(k, c))");
 
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv_tstest", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL AND val = 'baz' PRIMARY KEY (val,k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL AND val = 'baz' " +
+                   "PRIMARY KEY (val,k,c)");
 
         for (int i = 0; i < 100; i++)
             updateView("INSERT into %s (k,c,val)VALUES(?,?,?)", 0, i % 2, "baz");
 
-        Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
+        Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         Assert.assertEquals(2, execute("select * from %s").size());
-        Assert.assertEquals(2, execute("select * from mv_tstest").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
         assertRows(execute("SELECT val from %s where k = 0 and c = 0"), row("baz"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "baz"), row(0), row(1));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "baz"), row(0), row(1));
 
         //Make sure an old TS does nothing
         updateView("UPDATE %s USING TIMESTAMP 100 SET val = ? where k = ? AND c = ?", "bar", 0, 1);
         assertRows(execute("SELECT val from %s where k = 0 and c = 1"), row("baz"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "baz"), row(0), row(1));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "bar"));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "baz"), row(0), row(1));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "bar"));
 
         //Latest TS
         updateView("UPDATE %s SET val = ? where k = ? AND c = ?", "bar", 0, 1);
         assertRows(execute("SELECT val from %s where k = 0 and c = 1"), row("bar"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "bar"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "baz"), row(0));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "bar"));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "baz"), row(0));
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering1Test.java b/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering1Test.java
index 841eb91..5283336 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering1Test.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering1Test.java
@@ -22,10 +22,6 @@
 import java.util.List;
 
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.cassandra.db.SystemKeyspace;
 
 /* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes
@@ -36,8 +32,7 @@
  * - ...
  * - ViewFiltering*Test
  */
-@RunWith(Parameterized.class)
-public class ViewFilteringClustering1Test extends ViewFilteringTester
+public class ViewFilteringClustering1Test extends ViewAbstractParameterizedTest
 {
     @Test
     public void testClusteringKeyEQRestrictions() throws Throwable
@@ -47,9 +42,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -62,88 +54,79 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where b = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b = 1 AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a IS NOT NULL AND b = 1 AND c IS NOT NULL " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 2, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, 0, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 2, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
-                                    row(0, 1, 1, 0)
-            );
+                                    row(0, 1, 1, 0));
 
-            dropView("mv_test" + i);
+            dropView();
             dropTable("DROP TABLE %s");
         }
     }
@@ -156,9 +139,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -170,88 +150,79 @@
 
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b >= 1 AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a IS NOT NULL AND b >= 1 AND c IS NOT NULL " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, -1, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, -1, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, -1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
-                                    row(0, 1, 1, 0)
-            );
+                                    row(0, 1, 1, 0));
 
-            dropView("mv_test" + i);
+            dropView();
             dropTable("DROP TABLE %s");
         }
     }
@@ -264,9 +235,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -280,95 +248,86 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where b = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IN (1, 2) AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a IS NOT NULL AND b IN (1, 2) AND c IS NOT NULL " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, -1, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
                                     row(1, 1, 2, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, -1, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
                                     row(1, 1, 2, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
                                     row(1, 1, 2, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, -1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
                                     row(1, 1, 2, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 1, 0),
                                     row(1, 1, 2, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
-                                    row(0, 1, 1, 0)
-            );
+                                    row(0, 1, 1, 0));
 
-            dropView("mv_test" + i);
+            dropView();
             dropTable("DROP TABLE %s");
         }
     }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering2Test.java b/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering2Test.java
index 219a807..450d87c 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering2Test.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewFilteringClustering2Test.java
@@ -22,10 +22,6 @@
 import java.util.List;
 
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.cassandra.db.SystemKeyspace;
 
 /* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes
@@ -36,8 +32,7 @@
  * - ...
  * - ViewFiltering*Test
  */
-@RunWith(Parameterized.class)
-public class ViewFilteringClustering2Test extends ViewFilteringTester
+public class ViewFilteringClustering2Test extends ViewAbstractParameterizedTest
 {
     @Test
     public void testClusteringKeyMultiColumnRestrictions() throws Throwable
@@ -47,9 +42,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -63,91 +55,82 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where b = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND (b, c) >= (1, 0) PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a IS NOT NULL AND (b, c) >= (1, 0) " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, -1, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, -1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, -1, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, -1, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, -1);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, -1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 1, 0, 0),
-                                    row(0, 1, 1, 0)
-            );
+                                    row(0, 1, 1, 0));
 
-            dropView("mv_test" + i);
+            dropView();
             dropTable("DROP TABLE %s");
         }
     }
@@ -160,9 +143,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -176,87 +156,78 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where b = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c = 1 PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a IS NOT NULL AND b IS NOT NULL AND c = 1 " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, -1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 2, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, -1, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 2, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 2, 1, 1, 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 2),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, -1);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, -1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, -1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 2),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
                                     row(1, 0, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
-                                    row(0, 1, 1, 0)
-            );
+                                    row(0, 1, 1, 0));
 
             // insert a partition with one matching and one non-matching row using a batch (CASSANDRA-10614)
             String tableName = KEYSPACE + "." + currentTable();
@@ -266,13 +237,12 @@
                     "APPLY BATCH",
                     4, 4, 0, 0,
                     4, 4, 1, 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(0, 0, 1, 0),
                                     row(0, 1, 1, 0),
-                                    row(4, 4, 1, 1)
-            );
+                                    row(4, 4, 1, 1));
 
-            dropView("mv_test" + i);
+            dropView();
             dropTable("DROP TABLE %s");
         }
     }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFilteringComplexPKTest.java b/test/unit/org/apache/cassandra/cql3/ViewFilteringComplexPKTest.java
index da089ca..d18883d 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewFilteringComplexPKTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewFilteringComplexPKTest.java
@@ -22,10 +22,6 @@
 import java.util.List;
 
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.cassandra.db.SystemKeyspace;
 
 /* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes
@@ -36,8 +32,7 @@
  * - ...
  * - ViewFiltering*Test
  */
-@RunWith(Parameterized.class)
-public class ViewFilteringComplexPKTest extends ViewFilteringTester
+public class ViewFilteringComplexPKTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testCompoundPartitionKeyRestrictions() throws Throwable
@@ -47,9 +42,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b), c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -62,72 +54,64 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where a = 1 and b = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a = 1 AND b = 1 AND c IS NOT NULL " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 2, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 0, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 0, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 1, 1);
-            assertEmpty(execute("SELECT * FROM mv_test" + i));
+            assertEmpty(executeView("SELECT * FROM %s"));
         }
     }
 
@@ -135,8 +119,6 @@
     public void testCompoundPartitionKeyRestrictionsNotIncludeAll() throws Throwable
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b), c))");
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
 
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
@@ -148,72 +130,64 @@
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 1, 0);
 
         // only accept rows where a = 1 and b = 1, don't include column d in the selection
-        createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT a, b, c FROM %%s WHERE a = 1 AND b = 1 AND c IS NOT NULL PRIMARY KEY ((a, b), c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c FROM %s " +
+                   "WHERE a = 1 AND b = 1 AND c IS NOT NULL " +
+                   "PRIMARY KEY ((a, b), c)");
 
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
-            Thread.sleep(10);
-
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 0),
-                   row(1, 1, 1)
-        );
+                   row(1, 1, 1));
 
         // insert new rows that do not match the filter
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, 0, 0);
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 2, 0, 0);
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 0),
-                   row(1, 1, 1)
-        );
+                   row(1, 1, 1));
 
         // insert new row that does match the filter
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 0),
                    row(1, 1, 1),
-                   row(1, 1, 2)
-        );
+                   row(1, 1, 2));
 
         // update rows that don't match the filter
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 0, 0);
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 0, 0);
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 1, 0);
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 0),
                    row(1, 1, 1),
-                   row(1, 1, 2)
-        );
+                   row(1, 1, 2));
 
         // update a row that does match the filter
         execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 0),
                    row(1, 1, 1),
-                   row(1, 1, 2)
-        );
+                   row(1, 1, 2));
 
         // delete rows that don't match the filter
         execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 0, 0);
         execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 0, 0);
         execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 1, 0);
         execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 0),
                    row(1, 1, 1),
-                   row(1, 1, 2)
-        );
+                   row(1, 1, 2));
 
         // delete a row that does match the filter
         execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-        assertRows(execute("SELECT * FROM mv_test"),
+        assertRows(executeView("SELECT * FROM %s"),
                    row(1, 1, 1),
-                   row(1, 1, 2)
-        );
+                   row(1, 1, 2));
 
         // delete a partition that matches the filter
         execute("DELETE FROM %s WHERE a = ? AND b = ?", 1, 1);
-        assertEmpty(execute("SELECT * FROM mv_test"));
+        assertEmpty(executeView("SELECT * FROM %s"));
     }
 
     @Test
@@ -224,9 +198,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
@@ -240,71 +211,63 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where b = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a = 1 AND b IS NOT NULL AND c = 1 " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 2, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, -1, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 0, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 2, 1, 1, 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 2),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, -1);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 2, 0, 1);
             execute("DELETE FROM %s WHERE a = ?", 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 2),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 1);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 1, 0),
-                                    row(1, 2, 1, 0)
-            );
+                                    row(1, 2, 1, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertEmpty(execute("SELECT a, b, c, d FROM mv_test" + i));
+            assertEmpty(executeView("SELECT a, b, c, d FROM %s"));
 
-            dropView("mv_test" + i);
+            dropView();
             dropTable("DROP TABLE %s");
         }
     }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFilteringSimplePKTest.java b/test/unit/org/apache/cassandra/cql3/ViewFilteringSimplePKTest.java
index d3b3e4a..9db70e6 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewFilteringSimplePKTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewFilteringSimplePKTest.java
@@ -22,10 +22,6 @@
 import java.util.List;
 
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.cassandra.db.SystemKeyspace;
 
 /* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
  * Any changes here check if they apply to the other classes
@@ -36,8 +32,7 @@
  * - ...
  * - ViewFiltering*Test
  */
-@RunWith(Parameterized.class)
-public class ViewFilteringSimplePKTest extends ViewFilteringTester
+public class ViewFilteringSimplePKTest extends ViewAbstractParameterizedTest
 {
     @Test
     public void testPartitionKeyFilteringUnrestrictedPart() throws Throwable
@@ -47,9 +42,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b), c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 0, 0);
@@ -60,87 +52,79 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where a = 1
-            String viewName= "mv_test" + i;
-            createView(viewName, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                             "WHERE a = 1 AND b IS NOT NULL AND c IS NOT NULL " +
+                             "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            ViewFilteringTest.waitForView(keyspace(), viewName);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 0, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 1, 1, 0),
                                     row(1, 1, 2, 0));
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 1, 1);
-            assertEmpty(execute("SELECT * FROM mv_test" + i));
+            assertEmpty(executeView("SELECT * FROM %s"));
         }
     }
 
@@ -152,9 +136,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b), c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0,  1, 1);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 10, 1, 2);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0,  2, 1);
@@ -165,70 +146,62 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where a = 1
-            String viewName= "mv_test" + i;
-            createView(viewName, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a > 0 AND b > 5 AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a > 0 AND b > 5 AND c IS NOT NULL " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            ViewFilteringTest.waitForView(keyspace(), viewName);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
-                                    row(2, 10, 3, 2)
-            );
+                                    row(2, 10, 3, 2));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
-                                    row(2, 10, 3, 2)
-            );
+                                    row(2, 10, 3, 2));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 3, 10, 4, 2);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
                                     row(2, 10, 3, 2),
-                                    row(3, 10, 4, 2)
-            );
+                                    row(3, 10, 4, 2));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 0, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
                                     row(2, 10, 3, 2),
-                                    row(3, 10, 4, 2)
-            );
+                                    row(3, 10, 4, 2));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 100, 3, 10, 4);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
                                     row(2, 10, 3, 2),
-                                    row(3, 10, 4, 100)
-            );
+                                    row(3, 10, 4, 100));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
                                     row(2, 10, 3, 2),
-                                    row(3, 10, 4, 100)
-            );
+                                    row(3, 10, 4, 100));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 10, 2, 2),
                                     row(2, 10, 3, 2),
-                                    row(3, 10, 4, 100)
-            );
+                                    row(3, 10, 4, 100));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 1, 10);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(2, 10, 3, 2),
                                     row(3, 10, 4, 100));
         }
@@ -242,9 +215,6 @@
         {
             createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-            execute("USE " + keyspace());
-            executeNet(version, "USE " + keyspace());
-
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 0, 0);
@@ -255,83 +225,75 @@
             logger.info("Testing MV primary key: {}", mvPrimaryKeys.get(i));
 
             // only accept rows where a = 1
-            createView("mv_test" + i, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY " + mvPrimaryKeys.get(i));
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE a = 1 AND b IS NOT NULL AND c IS NOT NULL " +
+                       "PRIMARY KEY " + mvPrimaryKeys.get(i));
 
-            while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test" + i))
-                Thread.sleep(10);
-
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new rows that do not match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 0, 0, 0);
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 2, 1, 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
-                                    row(1, 1, 1, 0)
-            );
+                                    row(1, 1, 1, 0));
 
             // insert new row that does match the filter
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 2, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update rows that don't match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 0, 0);
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 0, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // update a row that does match the filter
             execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", 1, 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete rows that don't match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 0, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 0, 1, 0);
             execute("DELETE FROM %s WHERE a = ? AND b = ?", 0, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 0, 1),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a row that does match the filter
             execute("DELETE FROM %s WHERE a = ? AND b = ? AND c = ?", 1, 1, 0);
-            assertRowsIgnoringOrder(execute("SELECT a, b, c, d FROM mv_test" + i),
+            assertRowsIgnoringOrder(executeView("SELECT a, b, c, d FROM %s"),
                                     row(1, 0, 0, 0),
                                     row(1, 0, 1, 0),
                                     row(1, 1, 1, 0),
-                                    row(1, 1, 2, 0)
-            );
+                                    row(1, 1, 2, 0));
 
             // delete a partition that matches the filter
             execute("DELETE FROM %s WHERE a = ?", 1);
-            assertEmpty(execute("SELECT * FROM mv_test" + i));
+            assertEmpty(executeView("SELECT * FROM %s"));
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFilteringTest.java b/test/unit/org/apache/cassandra/cql3/ViewFilteringTest.java
deleted file mode 100644
index ed2d140..0000000
--- a/test/unit/org/apache/cassandra/cql3/ViewFilteringTest.java
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.cql3;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import com.datastax.driver.core.exceptions.InvalidQueryException;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.utils.FBUtilities;
-
-/* ViewFilteringTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670, CASSANDRA-17167)
- * Any changes here check if they apply to the other classes
- * - ViewFilteringPKTest
- * - ViewFilteringClustering1Test
- * - ViewFilteringClustering2Test
- * - ViewFilteringTest
- * - ...
- * - ViewFiltering*Test
- */
-@RunWith(Parameterized.class)
-public class ViewFilteringTest extends ViewFilteringTester
-{
-    // TODO will revise the non-pk filter condition in MV, see CASSANDRA-11500
-    @Ignore
-    @Test
-    public void testViewFilteringWithFlush() throws Throwable
-    {
-        testViewFiltering(true);
-    }
-
-    // TODO will revise the non-pk filter condition in MV, see CASSANDRA-11500
-    @Ignore
-    @Test
-    public void testViewFilteringWithoutFlush() throws Throwable
-    {
-        testViewFiltering(false);
-    }
-
-    public void testViewFiltering(boolean flush) throws Throwable
-    {
-        // CASSANDRA-13547: able to shadow entire view row if base column used in filter condition is modified
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv_test1",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL and c = 1  PRIMARY KEY (a, b)");
-        createView("mv_test2",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT c, d FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL and c = 1 and d = 1 PRIMARY KEY (a, b)");
-        createView("mv_test3",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, d FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)");
-        createView("mv_test4",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT c FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL and c = 1 PRIMARY KEY (a, b)");
-        createView("mv_test5",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT c FROM %%s WHERE a IS NOT NULL and d = 1 PRIMARY KEY (a, d)");
-        createView("mv_test6",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT c FROM %%s WHERE a = 1 and d IS NOT NULL PRIMARY KEY (a, d)");
-
-        waitForView(keyspace(), "mv_test1");
-        waitForView(keyspace(), "mv_test2");
-        waitForView(keyspace(), "mv_test3");
-        waitForView(keyspace(), "mv_test4");
-        waitForView(keyspace(), "mv_test5");
-        waitForView(keyspace(), "mv_test6");
-
-        Keyspace ks = Keyspace.open(keyspace());
-        ks.getColumnFamilyStore("mv_test1").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test2").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test3").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test4").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test5").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test6").disableAutoCompaction();
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 0", 1, 1, 1, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        // views should be updated.
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 1));
-
-        updateView("UPDATE %s using timestamp 1 set c = ? WHERE a=?", 0, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowCount(execute("SELECT * FROM mv_test1"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 0, 1));
-        assertRowCount(execute("SELECT * FROM mv_test4"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 0));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 0));
-
-        updateView("UPDATE %s using timestamp 2 set c = ? WHERE a=?", 1, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        // row should be back in views.
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 1));
-
-        updateView("UPDATE %s using timestamp 3 set d = ? WHERE a=?", 0, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1, 0));
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 1, 0));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1, 1));
-        assertRowCount(execute("SELECT * FROM mv_test5"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 0, 1));
-
-        updateView("UPDATE %s using timestamp 4 set c = ? WHERE a=?", 0, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowCount(execute("SELECT * FROM mv_test1"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 0, 0));
-        assertRowCount(execute("SELECT * FROM mv_test4"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test5"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 0, 0));
-
-        updateView("UPDATE %s using timestamp 5 set d = ? WHERE a=?", 1, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        // should not update as c=0
-        assertRowCount(execute("SELECT * FROM mv_test1"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 0, 1));
-        assertRowCount(execute("SELECT * FROM mv_test4"), 0);
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 0));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 0));
-
-        updateView("UPDATE %s using timestamp 6 set c = ? WHERE a=?", 1, 1);
-
-        // row should be back in views.
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 1));
-
-        updateView("UPDATE %s using timestamp 7 set b = ? WHERE a=?", 2, 1);
-        if (flush)
-        {
-            FBUtilities.waitOnFutures(ks.flush());
-            for (String view : views)
-                ks.getColumnFamilyStore(view).forceMajorCompaction();
-        }
-        // row should be back in views.
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 2, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"), row(1, 2, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 2, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 2, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 1));
-
-        updateView("DELETE b, c FROM %s using timestamp 6 WHERE a=?", 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s"), row(1, 2, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 2, null, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, null));
-
-        updateView("DELETE FROM %s using timestamp 8 where a=?", 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowCount(execute("SELECT * FROM mv_test1"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test3"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test4"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test5"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test6"), 0);
-
-        updateView("UPDATE %s using timestamp 9 set b = ?,c = ? where a=?", 1, 1, 1); // upsert
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1, null));
-        assertRows(execute("SELECT * FROM mv_test2"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 1, null));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1, 1));
-        assertRows(execute("SELECT * FROM mv_test5"));
-        assertRows(execute("SELECT * FROM mv_test6"));
-
-        updateView("DELETE FROM %s using timestamp 10 where a=?", 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowCount(execute("SELECT * FROM mv_test1"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test3"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test4"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test5"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test6"), 0);
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 11", 1, 1, 1, 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        // row should be back in views.
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test5"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test6"), row(1, 1, 1));
-
-        updateView("DELETE FROM %s using timestamp 12 where a=?", 1);
-        if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowCount(execute("SELECT * FROM mv_test1"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test2"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test3"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test4"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test5"), 0);
-        assertRowCount(execute("SELECT * FROM mv_test6"), 0);
-
-        dropView("mv_test1");
-        dropView("mv_test2");
-        dropView("mv_test3");
-        dropView("mv_test4");
-        dropView("mv_test5");
-        dropView("mv_test6");
-        dropTable("DROP TABLE %s");
-    }
-
-    // TODO will revise the non-pk filter condition in MV, see CASSANDRA-11500
-    @Ignore
-    @Test
-    public void testMVFilteringWithComplexColumn() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, l list<int>, s set<int>, m map<int,int>, PRIMARY KEY (a, b))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        createView("mv_test1",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT a,b,c FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL "
-                   + "and l contains (1) AND s contains (1) AND m contains key (1) PRIMARY KEY (a, b, c)");
-        createView("mv_test2",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s WHERE a IS NOT NULL and b IS NOT NULL AND l contains (1) PRIMARY KEY (a, b)");
-        createView("mv_test3",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND s contains (1) PRIMARY KEY (a, b)");
-        createView("mv_test4",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT a,b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND m contains key (1) PRIMARY KEY (a, b)");
-
-        waitForView(keyspace(), "mv_test1");
-        waitForView(keyspace(), "mv_test2");
-        waitForView(keyspace(), "mv_test3");
-        waitForView(keyspace(), "mv_test4");
-
-        // not able to drop base column filtered in view
-        assertInvalidMessage("Cannot drop column l, depended on by materialized views", "ALTER TABLE %s DROP l");
-        assertInvalidMessage("Cannot drop column s, depended on by materialized views", "ALTER TABLE %S DROP s");
-        assertInvalidMessage("Cannot drop column m, depended on by materialized views", "ALTER TABLE %s DROP m");
-
-        Keyspace ks = Keyspace.open(keyspace());
-        ks.getColumnFamilyStore("mv_test1").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test2").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test3").disableAutoCompaction();
-        ks.getColumnFamilyStore("mv_test4").disableAutoCompaction();
-
-        execute("INSERT INTO %s (a, b, c, l, s, m) VALUES (?, ?, ?, ?, ?, ?) ",
-                1,
-                1,
-                1,
-                list(1, 1, 2),
-                set(1, 2),
-                map(1, 1, 2, 2));
-        FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"), row(1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1));
-
-        execute("UPDATE %s SET l=l-[1] WHERE a = 1 AND b = 1" );
-        FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1));
-
-        execute("UPDATE %s SET s=s-{2}, m=m-{2} WHERE a = 1 AND b = 1");
-        FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT a,b,c FROM %s"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"), row(1, 1));
-
-        execute("UPDATE %s SET  m=m-{1} WHERE a = 1 AND b = 1");
-        FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT a,b,c FROM %s"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"));
-
-        // filter conditions result not changed
-        execute("UPDATE %s SET  l=l+[2], s=s-{0}, m=m+{3:3} WHERE a = 1 AND b = 1");
-        FBUtilities.waitOnFutures(ks.flush());
-
-        assertRowsIgnoringOrder(execute("SELECT a,b,c FROM %s"), row(1, 1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test1"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test2"));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test3"), row(1, 1));
-        assertRowsIgnoringOrder(execute("SELECT * FROM mv_test4"));
-    }
-
-    @Test
-    public void testMVCreationSelectRestrictions() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY((a, b), c, d))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        // IS NOT NULL is required on all PK statements that are not otherwise restricted
-        List<String> badStatements = Arrays.asList(
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE b IS NOT NULL AND c IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND c IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = ? AND b IS NOT NULL AND c is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = blobAsInt(?) AND b IS NOT NULL AND c is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s PRIMARY KEY (a, b, c, d)"
-        );
-
-        for (String badStatement : badStatements)
-        {
-            try
-            {
-                createView("mv1_test", badStatement);
-                Assert.fail("Create MV statement should have failed due to missing IS NOT NULL restriction: " + badStatement);
-            }
-            catch (InvalidQueryException exc) {}
-        }
-
-        List<String> goodStatements = Arrays.asList(
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND c IS NOT NULL AND d is NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c = 1 AND d IS NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND c > 1 AND d IS NOT NULL PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND c = 1 AND d IN (1, 2, 3) PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND (c, d) = (1, 1) PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND (c, d) > (1, 1) PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = 1 AND b = 1 AND (c, d) IN ((1, 1), (2, 2)) PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = (int) 1 AND b = 1 AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)",
-        "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a = blobAsInt(intAsBlob(1)) AND b = 1 AND c = 1 AND d = 1 PRIMARY KEY ((a, b), c, d)"
-        );
-
-        for (int i = 0; i < goodStatements.size(); i++)
-        {
-            try
-            {
-                createView("mv" + i + "_test", goodStatements.get(i));
-            }
-            catch (Exception e)
-            {
-                throw new RuntimeException("MV creation failed: " + goodStatements.get(i), e);
-            }
-
-            try
-            {
-                executeNet(version, "ALTER MATERIALIZED VIEW mv" + i + "_test WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
-            }
-            catch (Exception e)
-            {
-                throw new RuntimeException("MV alter failed: " + goodStatements.get(i), e);
-            }
-        }
-    }
-
-    @Test
-    public void testCaseSensitivity() throws Throwable
-    {
-        createTable("CREATE TABLE %s (\"theKey\" int, \"theClustering\" int, \"the\"\"Value\" int, PRIMARY KEY (\"theKey\", \"theClustering\"))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 0, 0, 0);
-        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 0, 1, 0);
-        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 1, 0, 0);
-        execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"the\"\"Value\") VALUES (?, ?, ?)", 1, 1, 0);
-
-        createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                              "WHERE \"theKey\" = 1 AND \"theClustering\" = 1 AND \"the\"\"Value\" IS NOT NULL " +
-                              "PRIMARY KEY (\"theKey\", \"theClustering\")");
-
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
-            Thread.sleep(10);
-        createView("mv_test2", "CREATE MATERIALIZED VIEW %s AS SELECT \"theKey\", \"theClustering\", \"the\"\"Value\" FROM %%s " +
-                               "WHERE \"theKey\" = 1 AND \"theClustering\" = 1 AND \"the\"\"Value\" IS NOT NULL " +
-                               "PRIMARY KEY (\"theKey\", \"theClustering\")");
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test2"))
-            Thread.sleep(10);
-
-        for (String mvname : Arrays.asList("mv_test", "mv_test2"))
-        {
-            assertRowsIgnoringOrder(execute("SELECT \"theKey\", \"theClustering\", \"the\"\"Value\" FROM " + mvname),
-                                    row(1, 1, 0)
-            );
-        }
-
-        executeNet(version, "ALTER TABLE %s RENAME \"theClustering\" TO \"Col\"");
-
-        for (String mvname : Arrays.asList("mv_test", "mv_test2"))
-        {
-            assertRowsIgnoringOrder(execute("SELECT \"theKey\", \"Col\", \"the\"\"Value\" FROM " + mvname),
-                                    row(1, 1, 0)
-            );
-        }
-    }
-
-    @Test
-    public void testFilterWithFunction() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 0, 2);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 1, 3);
-
-        createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                              "WHERE a = blobAsInt(intAsBlob(1)) AND b IS NOT NULL " +
-                              "PRIMARY KEY (a, b)");
-
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
-            Thread.sleep(10);
-
-        assertRows(execute("SELECT a, b, c FROM mv_test"),
-                   row(1, 0, 2),
-                   row(1, 1, 3)
-        );
-
-        executeNet(version, "ALTER TABLE %s RENAME a TO foo");
-
-        assertRows(execute("SELECT foo, b, c FROM mv_test"),
-                   row(1, 0, 2),
-                   row(1, 1, 3)
-        );
-    }
-
-    @Test
-    public void testFilterWithTypecast() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
-
-        execute("USE " + keyspace());
-        executeNet(version, "USE " + keyspace());
-
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 0, 2);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 1, 3);
-
-        createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                              "WHERE a = (int) 1 AND b IS NOT NULL " +
-                              "PRIMARY KEY (a, b)");
-
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
-            Thread.sleep(10);
-
-        assertRows(execute("SELECT a, b, c FROM mv_test"),
-                   row(1, 0, 2),
-                   row(1, 1, 3)
-        );
-
-        executeNet(version, "ALTER TABLE %s RENAME a TO foo");
-
-        assertRows(execute("SELECT foo, b, c FROM mv_test"),
-                   row(1, 0, 2),
-                   row(1, 1, 3)
-        );
-    } 
-}
diff --git a/test/unit/org/apache/cassandra/cql3/ViewFilteringTester.java b/test/unit/org/apache/cassandra/cql3/ViewFilteringTester.java
deleted file mode 100644
index 782a241..0000000
--- a/test/unit/org/apache/cassandra/cql3/ViewFilteringTester.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.cql3;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import com.datastax.driver.core.exceptions.OperationTimedOutException;
-import org.apache.cassandra.concurrent.Stage;
-import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.transport.ProtocolVersion;
-
-/* ViewComplexTest class has been split into multiple ones because of timeout issues (CASSANDRA-16670)
- * Any changes here check if they apply to the other classes:
- * - ViewComplexUpdatesTest
- * - ViewComplexDeletionsTest
- * - ViewComplexTTLTest
- * - ViewComplexTest
- * - ViewComplexLivenessTest
- * - ...
- * - ViewComplex*Test
- */
-@RunWith(Parameterized.class)
-public abstract class ViewFilteringTester extends CQLTester
-{
-    @Parameterized.Parameter
-    public ProtocolVersion version;
-
-    @Parameterized.Parameters()
-    public static Collection<Object[]> versions()
-    {
-        return ProtocolVersion.SUPPORTED.stream()
-                                        .map(v -> new Object[]{v})
-                                        .collect(Collectors.toList());
-    }
-
-    protected final List<String> views = new ArrayList<>();
-
-    @BeforeClass
-    public static void startup()
-    {
-        requireNetwork();
-        System.setProperty("cassandra.mv.allow_filtering_nonkey_columns_unsafe", "true");
-    }
-
-    @AfterClass
-    public static void tearDown()
-    {
-        System.setProperty("cassandra.mv.allow_filtering_nonkey_columns_unsafe", "false");
-    }
-
-    @Before
-    public void begin()
-    {
-        views.clear();
-    }
-
-    @After
-    public void end() throws Throwable
-    {
-        for (String viewName : views)
-            executeNet(version, "DROP MATERIALIZED VIEW " + viewName);
-    }
-
-    protected void createView(String name, String query) throws Throwable
-    {
-        try
-        {
-            executeNet(version, String.format(query, name));
-            // If exception is thrown, the view will not be added to the list; since it shouldn't have been created, this is
-            // the desired behavior
-            views.add(name);
-        }
-        catch (OperationTimedOutException ex)
-        {
-            // ... except for timeout, when we actually do not know whether the view was created or not
-            views.add(name);
-            throw ex;
-        }
-    }
-
-    protected void updateView(String query, Object... params) throws Throwable
-    {
-        executeNet(version, query, params);
-        while (!(Stage.VIEW_MUTATION.executor().getPendingTaskCount() == 0
-                 && Stage.VIEW_MUTATION.executor().getActiveTaskCount() == 0))
-        {
-            Thread.sleep(1);
-        }
-    }
-
-    protected void dropView(String name) throws Throwable
-    {
-        executeNet(version, "DROP MATERIALIZED VIEW " + name);
-        views.remove(name);
-    }
-
-    protected static void waitForView(String keyspace, String view) throws InterruptedException
-    {
-        while (!SystemKeyspace.isViewBuilt(keyspace, view))
-            Thread.sleep(10);
-    }
-}
diff --git a/test/unit/org/apache/cassandra/cql3/ViewPKTest.java b/test/unit/org/apache/cassandra/cql3/ViewPKTest.java
index 06664cb..6ab27ef 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewPKTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewPKTest.java
@@ -24,9 +24,12 @@
 import org.junit.Test;
 
 import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.exceptions.InvalidQueryException;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.exceptions.RequestValidationException;
+import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
+import org.assertj.core.api.Assertions;
 
 import static org.junit.Assert.assertTrue;
 
@@ -44,33 +47,30 @@
     {
         createTable("CREATE TABLE %s (k1 int, c1 int , val int, PRIMARY KEY (k1, c1))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("view1", "CREATE MATERIALIZED VIEW view1 AS SELECT k1, c1, val FROM %%s WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL PRIMARY KEY (val, k1, c1)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT k1, c1, val FROM %s " +
+                   "WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL " +
+                   "PRIMARY KEY (val, k1, c1)");
 
         updateView("INSERT INTO %s (k1, c1, val) VALUES (1, 2, 200)");
         updateView("INSERT INTO %s (k1, c1, val) VALUES (1, 3, 300)");
 
         Assert.assertEquals(2, execute("select * from %s").size());
-        Assert.assertEquals(2, execute("select * from view1").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
         updateView("DELETE FROM %s WHERE k1 = 1");
 
         Assert.assertEquals(0, execute("select * from %s").size());
-        Assert.assertEquals(0, execute("select * from view1").size());
+        Assert.assertEquals(0, executeView("select * from %s").size());
     }
 
     @Test
-    public void createMvWithUnrestrictedPKParts() throws Throwable
+    public void createMvWithUnrestrictedPKParts()
     {
         createTable("CREATE TABLE %s (k1 int, c1 int , val int, PRIMARY KEY (k1, c1))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("view1", "CREATE MATERIALIZED VIEW view1 AS SELECT val, k1, c1 FROM %%s WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL PRIMARY KEY (val, k1, c1)");
-
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT val, k1, c1 FROM %s " +
+                   "WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL " +
+                   "PRIMARY KEY (val, k1, c1)");
     }
 
     @Test
@@ -78,25 +78,24 @@
     {
         createTable("CREATE TABLE %s (k1 int, c1 int , val int, PRIMARY KEY (k1, c1))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("view1", "CREATE MATERIALIZED VIEW view1 AS SELECT k1, c1, val FROM %%s WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL PRIMARY KEY (val, k1, c1)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT k1, c1, val FROM %s " +
+                   "WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL " +
+                   "PRIMARY KEY (val, k1, c1)");
 
         updateView("INSERT INTO %s (k1, c1, val) VALUES (1, 2, 200)");
         updateView("INSERT INTO %s (k1, c1, val) VALUES (1, 3, 300)");
 
         Assert.assertEquals(2, execute("select * from %s").size());
-        Assert.assertEquals(2, execute("select * from view1").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
         updateView("DELETE FROM %s WHERE k1 = 1 and c1 = 3");
 
         Assert.assertEquals(1, execute("select * from %s").size());
-        Assert.assertEquals(1, execute("select * from view1").size());
+        Assert.assertEquals(1, executeView("select * from %s").size());
     }
 
     @Test
-    public void testPrimaryKeyIsNotNull() throws Throwable
+    public void testPrimaryKeyIsNotNull()
     {
         createTable("CREATE TABLE %s (" +
                     "k int, " +
@@ -104,27 +103,32 @@
                     "bigintval bigint, " +
                     "PRIMARY KEY((k, asciival)))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         // Must include "IS NOT NULL" for primary keys
         try
         {
-            createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s");
             Assert.fail("Should fail if no primary key is filtered as NOT NULL");
         }
         catch (Exception e)
         {
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(SyntaxException.class);
+            Assertions.assertThat(cause.getMessage()).contains("mismatched input");
         }
 
         // Must include both when the partition key is composite
         try
         {
-            createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE bigintval IS NOT NULL AND asciival IS NOT NULL PRIMARY KEY (bigintval, k, asciival)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE bigintval IS NOT NULL AND asciival IS NOT NULL " +
+                       "PRIMARY KEY (bigintval, k, asciival)");
             Assert.fail("Should fail if compound primary is not completely filtered as NOT NULL");
         }
         catch (Exception e)
         {
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(InvalidRequestException.class);
+            Assertions.assertThat(cause.getMessage()).contains("Primary key columns k must be restricted");
         }
 
         dropTable("DROP TABLE %s");
@@ -136,21 +140,29 @@
                     "PRIMARY KEY(k, asciival))");
         try
         {
-            createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s");
             Assert.fail("Should fail if no primary key is filtered as NOT NULL");
         }
         catch (Exception e)
         {
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(SyntaxException.class);
+            Assertions.assertThat(cause.getMessage()).contains("mismatched input");
         }
 
         // Must still include both even when the partition key is composite
         try
         {
-            createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE bigintval IS NOT NULL AND asciival IS NOT NULL PRIMARY KEY (bigintval, k, asciival)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE bigintval IS NOT NULL AND asciival IS NOT NULL " +
+                       "PRIMARY KEY (bigintval, k, asciival)");
             Assert.fail("Should fail if compound primary is not completely filtered as NOT NULL");
         }
         catch (Exception e)
         {
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(InvalidRequestException.class);
+            Assertions.assertThat(cause.getMessage()).contains("Primary key columns k must be restricted");
         }
     }
 
@@ -165,39 +177,36 @@
 
         TableMetadata metadata = currentTableMetadata();
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         for (ColumnMetadata def : new HashSet<>(metadata.columns()))
         {
+            String asciival = def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ";
             try
             {
-                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
-                               + (def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ") + "PRIMARY KEY ("
+                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
+                               + asciival + "PRIMARY KEY ("
                                + def.name + ", k" + (def.name.toString().equals("asciival") ? "" : ", asciival") + ")";
                 createView("mv1_" + def.name, query);
 
                 if (def.type.isMultiCell())
                     Assert.fail("MV on a multicell should fail " + def);
             }
-            catch (InvalidQueryException e)
+            catch (Exception e)
             {
                 if (!def.type.isMultiCell() && !def.isPartitionKey())
                     Assert.fail("MV creation failed on " + def);
             }
 
-
             try
             {
-                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
-                               + (def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ") + " PRIMARY KEY ("
+                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
+                               + asciival + " PRIMARY KEY ("
                                + def.name + ", asciival" + (def.name.toString().equals("k") ? "" : ", k") + ")";
                 createView("mv2_" + def.name, query);
 
                 if (def.type.isMultiCell())
                     Assert.fail("MV on a multicell should fail " + def);
             }
-            catch (InvalidQueryException e)
+            catch (Exception e)
             {
                 if (!def.type.isMultiCell() && !def.isPartitionKey())
                     Assert.fail("MV creation failed on " + def);
@@ -205,41 +214,42 @@
 
             try
             {
-                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
-                               + (def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ") + "PRIMARY KEY ((" + def.name + ", k), asciival)";
+                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
+                               + asciival + "PRIMARY KEY ((" + def.name + ", k), asciival)";
                 createView("mv3_" + def.name, query);
 
                 if (def.type.isMultiCell())
                     Assert.fail("MV on a multicell should fail " + def);
             }
-            catch (InvalidQueryException e)
+            catch (Exception e)
             {
                 if (!def.type.isMultiCell() && !def.isPartitionKey())
                     Assert.fail("MV creation failed on " + def);
             }
 
-
             try
             {
-                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
-                               + (def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ") + "PRIMARY KEY ((" + def.name + ", k), asciival)";
+                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
+                               + asciival + "PRIMARY KEY ((" + def.name + ", k), asciival)";
                 createView("mv3_" + def.name, query);
 
                 Assert.fail("Should fail on duplicate name");
             }
             catch (Exception e)
             {
+                Assertions.assertThat(e.getCause()).isInstanceOf(RequestValidationException.class);
             }
 
             try
             {
-                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
-                               + (def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ") + "PRIMARY KEY ((" + def.name + ", k), nonexistentcolumn)";
+                String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL "
+                               + asciival + "PRIMARY KEY ((" + def.name + ", k), nonexistentcolumn)";
                 createView("mv4_" + def.name, query);
                 Assert.fail("Should fail with unknown base column");
             }
-            catch (InvalidQueryException e)
+            catch (Exception e)
             {
+                Assertions.assertThat(e.getCause()).isInstanceOf(RequestValidationException.class);
             }
         }
 
@@ -288,24 +298,24 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, b, c) WITH CLUSTERING ORDER BY (b DESC, c ASC)");
-        createView("mv2", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, c, b) WITH CLUSTERING ORDER BY (c ASC, b ASC)");
-        createView("mv3", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, b, c)");
-        createView("mv4", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, c, b) WITH CLUSTERING ORDER BY (c DESC, b ASC)");
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, b, c) WITH CLUSTERING ORDER BY (b DESC, c ASC)");
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, c, b) WITH CLUSTERING ORDER BY (c ASC, b ASC)");
+        String mv3 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, b, c)");
+        String mv4 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, c, b) WITH CLUSTERING ORDER BY (c DESC, b ASC)");
 
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 1, 1, 1);
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 2, 2, 2);
 
-        ResultSet mvRows = executeNet("SELECT b FROM mv1");
+        ResultSet mvRows = executeNet("SELECT b FROM " + mv1);
         assertRowsNet(mvRows, row(2), row(1));
 
-        mvRows = executeNet("SELECT c FROM mv2");
+        mvRows = executeNet("SELECT c FROM " + mv2);
         assertRowsNet(mvRows, row(1), row(2));
 
-        mvRows = executeNet("SELECT b FROM mv3");
+        mvRows = executeNet("SELECT b FROM " + mv3);
         assertRowsNet(mvRows, row(1), row(2));
 
-        mvRows = executeNet("SELECT c FROM mv4");
+        mvRows = executeNet("SELECT c FROM " + mv4);
         assertRowsNet(mvRows, row(2), row(1));
     }
 
@@ -320,11 +330,13 @@
         executeNet("USE " + keyspace());
 
         // Cannot use SELECT *, as those are always handled by the includeAll shortcut in View.updateAffectsView
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %s " +
+                   "WHERE a IS NOT NULL AND b IS NOT NULL " +
+                   "PRIMARY KEY (b, a)");
 
         updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 1, 1);
 
-        ResultSet mvRows = executeNet("SELECT a, b FROM mv1");
+        ResultSet mvRows = executeViewNet("SELECT a, b FROM %s");
         assertRowsNet(mvRows, row(1, 1));
     }
 
@@ -339,11 +351,11 @@
         executeNet("USE " + keyspace());
 
         // Cannot use SELECT *, as those are always handled by the includeAll shortcut in View.updateAffectsView
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
 
         updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 1, 1);
 
-        ResultSet mvRows = executeNet("SELECT a, b FROM mv1");
+        ResultSet mvRows = executeViewNet("SELECT a, b FROM %s");
         assertRowsNet(mvRows, row(1, 1));
     }
 
@@ -358,18 +370,20 @@
                     "PRIMARY KEY (a, b))");
 
         executeNet("USE " + keyspace());
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND d IS NOT NULL PRIMARY KEY (a, d, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE a IS NOT NULL AND b IS NOT NULL AND d IS NOT NULL " +
+                   "PRIMARY KEY (a, d, b)");
 
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
-        ResultSet mvRows = executeNet("SELECT a, d, b, c FROM mv1");
+        ResultSet mvRows = executeViewNet("SELECT a, d, b, c FROM %s");
         assertRowsNet(mvRows, row(0, 0, 0, 0));
 
         updateView("DELETE c FROM %s WHERE a = ? AND b = ?", 0, 0);
-        mvRows = executeNet("SELECT a, d, b, c FROM mv1");
+        mvRows = executeViewNet("SELECT a, d, b, c FROM %s");
         assertRowsNet(mvRows, row(0, 0, 0, null));
 
         updateView("DELETE d FROM %s WHERE a = ? AND b = ?", 0, 0);
-        mvRows = executeNet("SELECT a, d, b FROM mv1");
+        mvRows = executeViewNet("SELECT a, d, b FROM %s");
         assertTrue(mvRows.isExhausted());
     }
 
@@ -384,23 +398,25 @@
                     "PRIMARY KEY (a, b))");
 
         executeNet("USE " + keyspace());
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND d IS NOT NULL PRIMARY KEY (d, a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE a IS NOT NULL AND b IS NOT NULL AND d IS NOT NULL " +
+                   "PRIMARY KEY (d, a, b)");
 
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
-        ResultSet mvRows = executeNet("SELECT a, d, b, c FROM mv1");
+        ResultSet mvRows = executeViewNet("SELECT a, d, b, c FROM %s");
         assertRowsNet(mvRows, row(0, 0, 0, 0));
 
         updateView("DELETE c FROM %s WHERE a = ? AND b = ?", 0, 0);
-        mvRows = executeNet("SELECT a, d, b, c FROM mv1");
+        mvRows = executeViewNet("SELECT a, d, b, c FROM %s");
         assertRowsNet(mvRows, row(0, 0, 0, null));
 
         updateView("DELETE d FROM %s WHERE a = ? AND b = ?", 0, 0);
-        mvRows = executeNet("SELECT a, d, b FROM mv1");
+        mvRows = executeViewNet("SELECT a, d, b FROM %s");
         assertTrue(mvRows.isExhausted());
     }
 
     @Test
-    public void testMultipleNonPrimaryKeysInView() throws Throwable
+    public void testMultipleNonPrimaryKeysInView()
     {
         createTable("CREATE TABLE %s (" +
                     "a int," +
@@ -412,22 +428,27 @@
 
         try
         {
-            createView("mv_de", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND d IS NOT NULL AND e IS NOT NULL PRIMARY KEY ((d, a), b, e, c)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND d IS NOT NULL AND e IS NOT NULL PRIMARY KEY ((d, a), b, e, c)");
             Assert.fail("Should have rejected a query including multiple non-primary key base columns");
         }
         catch (Exception e)
         {
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(InvalidRequestException.class);
+            Assertions.assertThat(cause.getMessage()).contains("Cannot include more than one non-primary key column");
         }
 
         try
         {
-            createView("mv_de", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND d IS NOT NULL AND e IS NOT NULL PRIMARY KEY ((a, b), c, d, e)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND d IS NOT NULL AND e IS NOT NULL PRIMARY KEY ((a, b), c, d, e)");
             Assert.fail("Should have rejected a query including multiple non-primary key base columns");
         }
         catch (Exception e)
         {
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(InvalidRequestException.class);
+            Assertions.assertThat(cause.getMessage()).contains("Cannot include more than one non-primary key column");
         }
-
     }
 
     @Test
@@ -437,10 +458,9 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv",
-                   "CREATE MATERIALIZED VIEW %s AS" +
+        createView("CREATE MATERIALIZED VIEW %s AS" +
                    "  SELECT id1, v1, id2, v2" +
-                   "  FROM %%s" +
+                   "  FROM %s" +
                    "  WHERE id1 IS NOT NULL AND v1 IS NOT NULL AND id2 IS NOT NULL" +
                    "  PRIMARY KEY (id1, v1, id2)" +
                    "  WITH CLUSTERING ORDER BY (v1 DESC, id2 ASC)");
@@ -448,14 +468,14 @@
         execute("INSERT INTO %s (id1, id2, v1, v2) VALUES (?, ?, ?, ?)", 0, 1, "foo", "bar");
 
         assertRowsNet(executeNet("SELECT * FROM %s"), row(0, 1, "foo", "bar"));
-        assertRowsNet(executeNet("SELECT * FROM mv"), row(0, "foo", 1, "bar"));
+        assertRowsNet(executeViewNet("SELECT * FROM %s"), row(0, "foo", 1, "bar"));
 
         executeNet("UPDATE %s SET v1=? WHERE id1=? AND id2=?", null, 0, 1);
         assertRowsNet(executeNet("SELECT * FROM %s"), row(0, 1, null, "bar"));
-        assertRowsNet(executeNet("SELECT * FROM mv"));
+        assertRowsNet(executeViewNet("SELECT * FROM %s"));
 
         executeNet("UPDATE %s SET v2=? WHERE id1=? AND id2=?", "rab", 0, 1);
         assertRowsNet(executeNet("SELECT * FROM %s"), row(0, 1, null, "rab"));
-        assertRowsNet(executeNet("SELECT * FROM mv"));
+        assertRowsNet(executeViewNet("SELECT * FROM %s"));
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewRangesTest.java b/test/unit/org/apache/cassandra/cql3/ViewRangesTest.java
index ce67a9b..5d6e8a7 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewRangesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewRangesTest.java
@@ -21,8 +21,8 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.SystemKeyspace;
 
 /*
  * This test class was too large and used to timeout CASSANDRA-16777. We're splitting it into:
@@ -49,16 +49,14 @@
     {
         createTable("CREATE TABLE %s (k1 int, c1 int, c2 int, v1 int, v2 int, PRIMARY KEY (k1, c1, c2))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("view1",
-                   "CREATE MATERIALIZED VIEW view1 AS SELECT * FROM %%s WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL PRIMARY KEY (k1, c2, c1)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL " +
+                   "PRIMARY KEY (k1, c2, c1)");
 
         updateView("DELETE FROM %s USING TIMESTAMP 10 WHERE k1 = 1 and c1=1");
 
         if (flush)
-            Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
+            Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         String table = KEYSPACE + "." + currentTable();
         updateView("BEGIN BATCH " +
@@ -75,7 +73,7 @@
                                 row(1, 0, 0, 0, 0),
                                 row(1, 0, 1, 0, 1),
                                 row(1, 2, 0, 2, 0));
-        assertRowsIgnoringOrder(execute("select k1,c1,c2,v1,v2 from view1"),
+        assertRowsIgnoringOrder(executeView("select k1,c1,c2,v1,v2 from %s"),
                                 row(1, 0, 0, 0, 0),
                                 row(1, 0, 1, 0, 1),
                                 row(1, 2, 0, 2, 0));
@@ -93,39 +91,37 @@
                     "PRIMARY KEY((k, asciival), bigintval, textval1)" +
                     ")");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv_test1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE textval2 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL AND textval1 IS NOT NULL PRIMARY KEY ((textval2, k), asciival, bigintval, textval1)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE textval2 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL AND textval1 IS NOT NULL " +
+                   "PRIMARY KEY ((textval2, k), asciival, bigintval, textval1)");
 
         for (int i = 0; i < 100; i++)
-            updateView("INSERT into %s (k,asciival,bigintval,textval1,textval2)VALUES(?,?,?,?,?)", 0, "foo", (long) i % 2, "bar" + i, "baz");
+            updateView("INSERT into %s (k,asciival,bigintval,textval1,textval2) VALUES (?,?,?,?,?)",
+                       0, "foo", (long) i % 2, "bar" + i, "baz");
 
         Assert.assertEquals(50, execute("select * from %s where k = 0 and asciival = 'foo' and bigintval = 0").size());
         Assert.assertEquals(50, execute("select * from %s where k = 0 and asciival = 'foo' and bigintval = 1").size());
 
-        Assert.assertEquals(100, execute("select * from mv_test1").size());
+        Assert.assertEquals(100, executeView("select * from %s").size());
 
         //Check the builder works
-        createView("mv_test2", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE textval2 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL AND textval1 IS NOT NULL PRIMARY KEY ((textval2, k), asciival, bigintval, textval1)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE textval2 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL AND textval1 IS NOT NULL " +
+                   "PRIMARY KEY ((textval2, k), asciival, bigintval, textval1)");
 
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test2"))
-            Thread.sleep(10);
+        Assert.assertEquals(100, executeView("select * from %s").size());
 
-        Assert.assertEquals(100, execute("select * from mv_test2").size());
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE textval2 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL AND textval1 IS NOT NULL " +
+                   "PRIMARY KEY ((textval2, k), bigintval, textval1, asciival)");
 
-        createView("mv_test3", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE textval2 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL AND textval1 IS NOT NULL PRIMARY KEY ((textval2, k), bigintval, textval1, asciival)");
-
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test3"))
-            Thread.sleep(10);
-
-        Assert.assertEquals(100, execute("select * from mv_test3").size());
-        Assert.assertEquals(100, execute("select asciival from mv_test3 where textval2 = ? and k = ?", "baz", 0).size());
+        Assert.assertEquals(100, executeView("select * from %s").size());
+        Assert.assertEquals(100, executeView("select asciival from %s where textval2 = ? and k = ?", "baz", 0).size());
 
         //Write a RT and verify the data is removed from index
         updateView("DELETE FROM %s WHERE k = ? AND asciival = ? and bigintval = ?", 0, "foo", 0L);
 
-        Assert.assertEquals(50, execute("select asciival from mv_test3 where textval2 = ? and k = ?", "baz", 0).size());
+        Assert.assertEquals(50, executeView("select asciival from %s where textval2 = ? and k = ?", "baz", 0).size());
     }
 
 
@@ -140,10 +136,9 @@
                     "PRIMARY KEY((k, asciival), bigintval)" +
                     ")");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE textval1 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL PRIMARY KEY ((textval1, k), asciival, bigintval)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE textval1 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL " +
+                   "PRIMARY KEY ((textval1, k), asciival, bigintval)");
 
         for (int i = 0; i < 100; i++)
             updateView("INSERT into %s (k,asciival,bigintval,textval1)VALUES(?,?,?,?)", 0, "foo", (long) i % 2, "bar" + i);
@@ -153,13 +148,13 @@
 
 
         Assert.assertEquals(2, execute("select * from %s").size());
-        Assert.assertEquals(2, execute("select * from mv").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
         //Write a RT and verify the data is removed from index
         updateView("DELETE FROM %s WHERE k = ? AND asciival = ? and bigintval = ?", 0, "foo", 0L);
 
         Assert.assertEquals(1, execute("select * from %s").size());
-        Assert.assertEquals(1, execute("select * from mv").size());
+        Assert.assertEquals(1, executeView("select * from %s").size());
     }
 
     @Test
@@ -173,10 +168,9 @@
                     "PRIMARY KEY((k, asciival), bigintval)" +
                     ")");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE textval1 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL PRIMARY KEY ((textval1, k), asciival, bigintval)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE textval1 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL " +
+                   "PRIMARY KEY ((textval1, k), asciival, bigintval)");
 
         for (int i = 0; i < 100; i++)
             updateView("INSERT into %s (k,asciival,bigintval,textval1)VALUES(?,?,?,?)", 0, "foo", (long) i % 2, "bar" + i);
@@ -186,12 +180,12 @@
 
 
         Assert.assertEquals(2, execute("select * from %s").size());
-        Assert.assertEquals(2, execute("select * from mv").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
         //Write a RT and verify the data is removed from index
         updateView("DELETE FROM %s WHERE k = ? AND asciival = ? and bigintval >= ?", 0, "foo", 0L);
 
         Assert.assertEquals(0, execute("select * from %s").size());
-        Assert.assertEquals(0, execute("select * from mv").size());
+        Assert.assertEquals(0, executeView("select * from %s").size());
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewSchemaTest.java b/test/unit/org/apache/cassandra/cql3/ViewSchemaTest.java
index a1cc34a..09d7e15 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewSchemaTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewSchemaTest.java
@@ -22,122 +22,55 @@
 import java.math.BigInteger;
 import java.net.InetAddress;
 import java.text.SimpleDateFormat;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Date;
 import java.util.HashSet;
-import java.util.List;
 import java.util.UUID;
 
 import org.junit.Assert;
-
-import com.datastax.driver.core.exceptions.OperationTimedOutException;
-import org.apache.cassandra.concurrent.SEPExecutor;
-import org.apache.cassandra.concurrent.Stage;
-import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.SchemaCQLHelper;
-import org.apache.cassandra.schema.ColumnMetadata;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.db.SystemKeyspace;
-import org.apache.cassandra.serializers.SimpleDateSerializer;
-import org.apache.cassandra.serializers.TimeSerializer;
-import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.datastax.driver.core.exceptions.InvalidQueryException;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.SchemaCQLHelper;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.serializers.SimpleDateSerializer;
+import org.apache.cassandra.serializers.TimeSerializer;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.assertj.core.api.Assertions;
 
 import static org.junit.Assert.assertTrue;
 
-
-public class ViewSchemaTest extends CQLTester
+public class ViewSchemaTest extends ViewAbstractTest
 {
-    ProtocolVersion protocolVersion = ProtocolVersion.V4;
-    private final List<String> views = new ArrayList<>();
-
-    @BeforeClass
-    public static void startup()
-    {
-        requireNetwork();
-    }
-    @Before
-    public void begin()
-    {
-        views.clear();
-    }
-
-    @After
-    public void end() throws Throwable
-    {
-        for (String viewName : views)
-            executeNet(protocolVersion, "DROP MATERIALIZED VIEW " + viewName);
-    }
-
-    private void createView(String name, String query) throws Throwable
-    {
-        try
-        {
-            executeNet(protocolVersion, String.format(query, name));
-            // If exception is thrown, the view will not be added to the list; since it shouldn't have been created, this is
-            // the desired behavior
-            views.add(name);
-        }
-        catch (OperationTimedOutException ex)
-        {
-            // ... except for timeout, when we actually do not know whether the view was created or not
-            views.add(name);
-            throw ex;
-        }
-    }
-
-    private void updateView(String query, Object... params) throws Throwable
-    {
-        executeNet(protocolVersion, query, params);
-        while (!(((SEPExecutor) Stage.VIEW_MUTATION.executor()).getPendingTaskCount() == 0
-                 && ((SEPExecutor) Stage.VIEW_MUTATION.executor()).getActiveTaskCount() == 0))
-        {
-            Thread.sleep(1);
-        }
-    }
-
     @Test
     public void testCaseSensitivity() throws Throwable
     {
         createTable("CREATE TABLE %s (\"theKey\" int, \"theClustering\" int, \"theValue\" int, PRIMARY KEY (\"theKey\", \"theClustering\"))");
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
         execute("INSERT INTO %s (\"theKey\", \"theClustering\", \"theValue\") VALUES (?, ?, ?)", 0, 0, 0);
 
-        createView("mv_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                              "WHERE \"theKey\" IS NOT NULL AND \"theClustering\" IS NOT NULL AND \"theValue\" IS NOT NULL " +
-                              "PRIMARY KEY (\"theKey\", \"theClustering\")");
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                "WHERE \"theKey\" IS NOT NULL AND \"theClustering\" IS NOT NULL AND \"theValue\" IS NOT NULL " +
+                                "PRIMARY KEY (\"theKey\", \"theClustering\")");
 
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
-            Thread.sleep(10);
-        createView("mv_test2", "CREATE MATERIALIZED VIEW %s AS SELECT \"theKey\", \"theClustering\", \"theValue\" FROM %%s " +
-                               "WHERE \"theKey\" IS NOT NULL AND \"theClustering\" IS NOT NULL AND \"theValue\" IS NOT NULL " +
-                               "PRIMARY KEY (\"theKey\", \"theClustering\")");
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test2"))
-            Thread.sleep(10);
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT \"theKey\", \"theClustering\", \"theValue\" FROM %s " +
+                                "WHERE \"theKey\" IS NOT NULL AND \"theClustering\" IS NOT NULL AND \"theValue\" IS NOT NULL " +
+                                "PRIMARY KEY (\"theKey\", \"theClustering\")");
 
-        for (String mvname : Arrays.asList("mv_test", "mv_test2"))
+        for (String mvname : Arrays.asList(mv1, mv2))
         {
             assertRows(execute("SELECT \"theKey\", \"theClustering\", \"theValue\" FROM " + mvname),
-               row(0, 0, 0)
-            );
+                       row(0, 0, 0));
         }
 
-        executeNet(protocolVersion, "ALTER TABLE %s RENAME \"theClustering\" TO \"Col\"");
+        executeNet("ALTER TABLE %s RENAME \"theClustering\" TO \"Col\"");
 
-        for (String mvname : Arrays.asList("mv_test", "mv_test2"))
+        for (String mvname : Arrays.asList(mv1, mv2))
         {
             assertRows(execute("SELECT \"theKey\", \"Col\", \"theValue\" FROM " + mvname),
                        row(0, 0, 0)
@@ -154,54 +87,56 @@
                     "bigintval bigint, " +
                     "PRIMARY KEY((k, asciival)))");
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
-        createView("mv1_test", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE bigintval IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL PRIMARY KEY (bigintval, k, asciival)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE bigintval IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL " +
+                   "PRIMARY KEY (bigintval, k, asciival)");
         updateView("INSERT INTO %s(k,asciival,bigintval)VALUES(?,?,?)", 0, "foo", 1L);
 
         try
         {
-            updateView("INSERT INTO mv1_test(k,asciival,bigintval) VALUES(?,?,?)", 1, "foo", 2L);
+            updateView("INSERT INTO " + currentView() + "(k,asciival,bigintval) VALUES(?,?,?)", 1, "foo", 2L);
             Assert.fail("Shouldn't be able to modify a MV directly");
         }
-        catch (Exception e)
+        catch (InvalidQueryException e)
         {
+            Assertions.assertThat(e.getMessage()).contains("Cannot directly modify a materialized view");
         }
 
         try
         {
-            executeNet(protocolVersion, "ALTER TABLE mv1_test ADD foo text");
+            executeViewNet("ALTER TABLE %s ADD foo text");
             Assert.fail("Should not be able to use alter table with MV");
         }
-        catch (Exception e)
+        catch (InvalidQueryException e)
         {
+            Assertions.assertThat(e.getMessage()).contains("Cannot use ALTER TABLE on a materialized view");
         }
 
         try
         {
-            executeNet(protocolVersion, "ALTER TABLE mv1_test WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
+            executeViewNet("ALTER TABLE %s WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
             Assert.fail("Should not be able to use alter table with MV");
         }
-        catch (Exception e)
+        catch (InvalidQueryException e)
         {
+            Assertions.assertThat(e.getMessage()).contains("Cannot use ALTER TABLE on a materialized view");
         }
 
-        executeNet(protocolVersion, "ALTER MATERIALIZED VIEW mv1_test WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
+        executeViewNet("ALTER MATERIALIZED VIEW %s WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
 
         //Test alter add
-        executeNet(protocolVersion, "ALTER TABLE %s ADD foo text");
-        TableMetadata metadata = Schema.instance.getTableMetadata(keyspace(), "mv1_test");
+        executeNet("ALTER TABLE %s ADD foo text");
+        TableMetadata metadata = Schema.instance.getTableMetadata(keyspace(), currentView());
         Assert.assertNotNull(metadata.getColumn(ByteBufferUtil.bytes("foo")));
 
         updateView("INSERT INTO %s(k,asciival,bigintval,foo)VALUES(?,?,?,?)", 0, "foo", 1L, "bar");
         assertRows(execute("SELECT foo from %s"), row("bar"));
 
         //Test alter rename
-        executeNet(protocolVersion, "ALTER TABLE %s RENAME asciival TO bar");
+        executeNet("ALTER TABLE %s RENAME asciival TO bar");
 
         assertRows(execute("SELECT bar from %s"), row("foo"));
-        metadata = Schema.instance.getTableMetadata(keyspace(), "mv1_test");
+        metadata = Schema.instance.getTableMetadata(keyspace(), currentView());
         Assert.assertNotNull(metadata.getColumn(ByteBufferUtil.bytes("bar")));
     }
 
@@ -209,36 +144,33 @@
     @Test
     public void testTwoTablesOneView() throws Throwable
     {
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
         createTable("CREATE TABLE " + keyspace() + ".dummy_table (" +
-                "j int, " +
-                "intval int, " +
-                "PRIMARY KEY (j))");
+                    "j int, " +
+                    "intval int, " +
+                    "PRIMARY KEY (j))");
 
         createTable("CREATE TABLE " + keyspace() + ".real_base (" +
-                "k int, " +
-                "intval int, " +
-                "PRIMARY KEY (k))");
+                    "k int, " +
+                    "intval int, " +
+                    "PRIMARY KEY (k))");
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM " + keyspace() + ".real_base WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
-        createView("mv2", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM " + keyspace() + ".dummy_table WHERE j IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, j)");
+        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM " + keyspace() + ".real_base WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM " + keyspace() + ".dummy_table WHERE j IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, j)");
 
         updateView("INSERT INTO " + keyspace() + ".real_base (k, intval) VALUES (?, ?)", 0, 0);
         assertRows(execute("SELECT k, intval FROM " + keyspace() + ".real_base WHERE k = ?", 0), row(0, 0));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 0), row(0, 0));
+        assertRows(execute("SELECT k, intval from " + mv + " WHERE intval = ?", 0), row(0, 0));
 
         updateView("INSERT INTO " + keyspace() + ".real_base (k, intval) VALUES (?, ?)", 0, 1);
         assertRows(execute("SELECT k, intval FROM " + keyspace() + ".real_base WHERE k = ?", 0), row(0, 1));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 1), row(0, 1));
+        assertRows(execute("SELECT k, intval from " + mv + " WHERE intval = ?", 1), row(0, 1));
 
         assertRows(execute("SELECT k, intval FROM " + keyspace() + ".real_base WHERE k = ?", 0), row(0, 1));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 1), row(0, 1));
+        assertRows(execute("SELECT k, intval from " + mv + " WHERE intval = ?", 1), row(0, 1));
 
-        updateView("INSERT INTO " + keyspace() +".dummy_table (j, intval) VALUES(?, ?)", 0, 1);
+        updateView("INSERT INTO " + keyspace() + ".dummy_table (j, intval) VALUES(?, ?)", 0, 1);
         assertRows(execute("SELECT j, intval FROM " + keyspace() + ".dummy_table WHERE j = ?", 0), row(0, 1));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 1), row(0, 1));
+        assertRows(execute("SELECT k, intval from " + mv + " WHERE intval = ?", 1), row(0, 1));
     }
 
     @Test
@@ -249,23 +181,23 @@
                     "intval int, " +
                     "PRIMARY KEY (k))");
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
+        String view = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                 "WHERE k IS NOT NULL AND intval IS NOT NULL " +
+                                 "PRIMARY KEY (intval, k)");
 
         updateView("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, 0);
         assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 0));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 0), row(0, 0));
+        assertRows(executeView("SELECT k, intval from %s WHERE intval = ?", 0), row(0, 0));
 
-        executeNet(protocolVersion, "DROP MATERIALIZED VIEW mv");
-        views.remove("mv");
+        executeNet("DROP MATERIALIZED VIEW " + view);
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
+        createView(view, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                         "WHERE k IS NOT NULL AND intval IS NOT NULL " +
+                         "PRIMARY KEY (intval, k)");
 
         updateView("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, 1);
         assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 1));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 1), row(0, 1));
+        assertRows(executeView("SELECT k, intval from %s WHERE intval = ?", 1), row(0, 1));
     }
 
     @Test
@@ -303,14 +235,11 @@
 
         TableMetadata metadata = currentTableMetadata();
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
         for (ColumnMetadata def : new HashSet<>(metadata.columns()))
         {
             try
             {
-                createView("mv_" + def.name, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL PRIMARY KEY (" + def.name + ",k)");
+                createView("mv_" + def.name, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL PRIMARY KEY (" + def.name + ",k)");
 
                 if (def.type.isMultiCell())
                     Assert.fail("MV on a multicell should fail " + def);
@@ -318,7 +247,7 @@
                 if (def.isPartitionKey())
                     Assert.fail("MV on partition key should fail " + def);
             }
-            catch (InvalidQueryException e)
+            catch (Exception e)
             {
                 if (!def.type.isMultiCell() && !def.isPartitionKey())
                     Assert.fail("MV creation failed on " + def);
@@ -678,20 +607,19 @@
     public void testDropTableWithMV() throws Throwable
     {
         createTable("CREATE TABLE %s (" +
-                "a int," +
-                "b int," +
-                "c int," +
-                "d int," +
-                "PRIMARY KEY (a, b, c))");
+                    "a int," +
+                    "b int," +
+                    "c int," +
+                    "d int," +
+                    "PRIMARY KEY (a, b, c))");
 
-        executeNet(protocolVersion, "USE " + keyspace());
+        executeNet("USE " + keyspace());
 
-        createView(keyspace() + ".mv1",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, b, c)");
+        String mv = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (a, b, c)");
 
         try
         {
-            executeNet(protocolVersion, "DROP TABLE " + keyspace() + ".mv1");
+            executeNet("DROP TABLE " + keyspace() + '.' + mv);
             Assert.fail();
         }
         catch (InvalidQueryException e)
@@ -707,12 +635,12 @@
         // format changes.
         createTable("CREATE TABLE %s ( a int, b int, c int, d int, PRIMARY KEY (a, b, c))");
 
-        executeNet(protocolVersion, "USE " + keyspace());
+        executeNet("USE " + keyspace());
 
         assertInvalidMessage("Non-primary key columns can only be restricted with 'IS NOT NULL'",
                              "CREATE MATERIALIZED VIEW " + keyspace() + ".mv AS SELECT * FROM %s "
-                                     + "WHERE b IS NOT NULL AND c IS NOT NULL AND a IS NOT NULL "
-                                     + "AND d = 1 PRIMARY KEY (c, b, a)");
+                             + "WHERE b IS NOT NULL AND c IS NOT NULL AND a IS NOT NULL "
+                             + "AND d = 1 PRIMARY KEY (c, b, a)");
     }
 
     @Test
@@ -720,9 +648,6 @@
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY(a))");
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
         execute("INSERT into %s (a,b,c,d) VALUES (?,?,?,?)", 1, 2, 3, 4);
 
         assertInvalidThrowMessage("Cannot use token relation when defining a materialized view", InvalidRequestException.class,
@@ -740,11 +665,8 @@
                     "v int, " +
                     "PRIMARY KEY (pk, c1, c2, c3))");
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 DESC, c1 ASC, c3 ASC)");
-        createView("mv2", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 ASC, c1 DESC, c3 DESC)");
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 DESC, c1 ASC, c3 ASC)");
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 ASC, c1 DESC, c3 DESC)");
 
         updateView("INSERT INTO %s (pk, c1, c2, c3, v) VALUES (?, ?, ?, ?, ?)", 0, 0, 0, 0, 0);
         updateView("INSERT INTO %s (pk, c1, c2, c3, v) VALUES (?, ?, ?, ?, ?)", 0, 0, 0, 1, 1);
@@ -767,7 +689,7 @@
                    row(0, 1, 2, 1, 7),
                    row(0, 2, 1, 1, 8));
 
-        assertRows(execute("SELECT * FROM mv1 WHERE pk = ?", 0),
+        assertRows(execute("SELECT * FROM " + mv1 + " WHERE pk = ?", 0),
                    row(0, 2, 1, 1, 7),
                    row(0, 1, 0, 0, 3),
                    row(0, 1, 0, 1, 4),
@@ -778,7 +700,7 @@
                    row(0, 0, 0, 1, 1),
                    row(0, 0, 0, 2, 2));
 
-        assertRows(execute("SELECT * FROM mv2 WHERE pk = ?", 0),
+        assertRows(execute("SELECT * FROM " + mv2 + " WHERE pk = ?", 0),
                    row(0, 0, 0, 2, 2),
                    row(0, 0, 0, 1, 1),
                    row(0, 0, 0, 0, 0),
@@ -801,12 +723,9 @@
                     "v int, " +
                     "PRIMARY KEY (pk, c1, c2, c3)) WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC, c3 DESC)");
 
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
-
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3)");
-        createView("mv2", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 DESC, c1 ASC, c3 ASC)");
-        createView("mv3", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 ASC, c1 DESC, c3 DESC)");
+        String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3)");
+        String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 DESC, c1 ASC, c3 ASC)");
+        String mv3 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE pk IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL and c3 IS NOT NULL PRIMARY KEY (pk, c2, c1, c3) WITH CLUSTERING ORDER BY (c2 ASC, c1 DESC, c3 DESC)");
 
         updateView("INSERT INTO %s (pk, c1, c2, c3, v) VALUES (?, ?, ?, ?, ?)", 0, 0, 0, 0, 0);
         updateView("INSERT INTO %s (pk, c1, c2, c3, v) VALUES (?, ?, ?, ?, ?)", 0, 0, 0, 1, 1);
@@ -827,9 +746,9 @@
                    row(0, 0, 0, 0, 0),
                    row(0, 0, 1, 2, 5),
                    row(0, 0, 1, 1, 4),
-                  row(0, 0, 1, 0, 3));
+                   row(0, 0, 1, 0, 3));
 
-        assertRows(execute("SELECT * FROM mv1 WHERE pk = ?", 0),
+        assertRows(execute("SELECT * FROM " + mv1 + " WHERE pk = ?", 0),
                    row(0, 0, 0, 2, 2),
                    row(0, 0, 0, 1, 1),
                    row(0, 0, 0, 0, 0),
@@ -840,7 +759,7 @@
                    row(0, 1, 0, 0, 3),
                    row(0, 2, 1, 1, 7));
 
-        assertRows(execute("SELECT * FROM mv2 WHERE pk = ?", 0),
+        assertRows(execute("SELECT * FROM " + mv2 + " WHERE pk = ?", 0),
                    row(0, 2, 1, 1, 7),
                    row(0, 1, 0, 0, 3),
                    row(0, 1, 0, 1, 4),
@@ -851,7 +770,7 @@
                    row(0, 0, 0, 1, 1),
                    row(0, 0, 0, 2, 2));
 
-        assertRows(execute("SELECT * FROM mv3 WHERE pk = ?", 0),
+        assertRows(execute("SELECT * FROM " + mv3 + " WHERE pk = ?", 0),
                    row(0, 0, 0, 2, 2),
                    row(0, 0, 0, 1, 1),
                    row(0, 0, 0, 0, 0),
@@ -864,7 +783,7 @@
     }
 
     @Test
-    public void testViewMetadataCQLNotIncludeAllColumn() throws Throwable
+    public void testViewMetadataCQLNotIncludeAllColumn()
     {
         String createBase = "CREATE TABLE IF NOT EXISTS %s (" +
                             "pk1 int," +
@@ -877,7 +796,7 @@
                             "PRIMARY KEY ((pk1, pk2), ck1, ck2)) WITH " +
                             "CLUSTERING ORDER BY (ck1 ASC, ck2 ASC);";
 
-        String createView = "CREATE MATERIALIZED VIEW IF NOT EXISTS %s AS SELECT pk1, pk2, ck1, ck2, reg1, reg2 FROM %%s "
+        String createView = "CREATE MATERIALIZED VIEW IF NOT EXISTS %s AS SELECT pk1, pk2, ck1, ck2, reg1, reg2 FROM %s "
                             + "WHERE pk2 IS NOT NULL AND pk1 IS NOT NULL AND ck2 IS NOT NULL AND ck1 IS NOT NULL PRIMARY KEY((pk2, pk1), ck2, ck1)";
 
         String expectedViewSnapshot = "CREATE MATERIALIZED VIEW IF NOT EXISTS %s.%s AS\n" +
@@ -894,7 +813,7 @@
     }
 
     @Test
-    public void testViewMetadataCQLIncludeAllColumn() throws Throwable
+    public void testViewMetadataCQLIncludeAllColumn()
     {
         String createBase = "CREATE TABLE IF NOT EXISTS %s (" +
                             "pk1 int," +
@@ -907,7 +826,7 @@
                             "PRIMARY KEY ((pk1, pk2), ck1, ck2)) WITH " +
                             "CLUSTERING ORDER BY (ck1 ASC, ck2 DESC);";
 
-        String createView = "CREATE MATERIALIZED VIEW IF NOT EXISTS %s AS SELECT * FROM %%s "
+        String createView = "CREATE MATERIALIZED VIEW IF NOT EXISTS %s AS SELECT * FROM %s "
                             + "WHERE pk2 IS NOT NULL AND pk1 IS NOT NULL AND ck2 IS NOT NULL AND ck1 IS NOT NULL PRIMARY KEY((pk2, pk1), ck2, ck1)";
 
         String expectedViewSnapshot = "CREATE MATERIALIZED VIEW IF NOT EXISTS %s.%s AS\n" +
@@ -923,19 +842,22 @@
                             expectedViewSnapshot);
     }
 
-    private void testViewMetadataCQL(String createBase, String createView, String viewSnapshotSchema) throws Throwable
+    @Test
+    public void testAlterViewIfExists() throws Throwable
     {
-        execute("USE " + keyspace());
-        executeNet(protocolVersion, "USE " + keyspace());
+        executeNet("USE " + keyspace());
+        executeNet("ALTER MATERIALIZED VIEW IF EXISTS mv1_test WITH compaction = { 'class' : 'LeveledCompactionStrategy' }");
+    }
 
+    private void testViewMetadataCQL(String createBase, String createView, String viewSnapshotSchema)
+    {
         String base = createTable(createBase);
 
-        String view = "mv";
-        createView(view, createView);
+        String view = createView(createView);
 
-        ColumnFamilyStore mv = Keyspace.open(keyspace()).getColumnFamilyStore(view);
-        
-        assertTrue(SchemaCQLHelper.getTableMetadataAsCQL(mv.metadata(), true, true, true)
+        Keyspace keyspace = Keyspace.open(keyspace());
+        ColumnFamilyStore mv = keyspace.getColumnFamilyStore(view);
+        assertTrue(SchemaCQLHelper.getTableMetadataAsCQL(mv.metadata(), keyspace.getMetadata())
                                   .startsWith(String.format(viewSnapshotSchema,
                                                             keyspace(),
                                                             view,
diff --git a/test/unit/org/apache/cassandra/cql3/ViewTest.java b/test/unit/org/apache/cassandra/cql3/ViewTest.java
index 853c767..d82fd32 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewTest.java
@@ -21,28 +21,26 @@
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.google.common.util.concurrent.Uninterruptibles;
 
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
 import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.exceptions.InvalidQueryException;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.view.View;
+import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.SchemaKeyspaceTables;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.utils.FBUtilities;
+import org.assertj.core.api.Assertions;
 import org.awaitility.Awaitility;
 import org.jboss.byteman.contrib.bmunit.BMRule;
 import org.jboss.byteman.contrib.bmunit.BMRules;
@@ -67,7 +65,6 @@
     /** Latch used by {@link #testTruncateWhileBuilding()} Byteman injections. */
     @SuppressWarnings("unused")
     private static final CountDownLatch blockViewBuild = new CountDownLatch(1);
-    private static final AtomicInteger viewNameSeqNumber = new AtomicInteger();
 
     @Test
     public void testNonExistingOnes() throws Throwable
@@ -89,37 +86,37 @@
                     "val text, " +
                     "PRIMARY KEY(k,c))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         try
         {
-            createView("mv_static", "CREATE MATERIALIZED VIEW %%s AS SELECT * FROM %s WHERE sval IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (sval,k,c)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE sval IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (sval,k,c)");
             Assert.fail("Use of static column in a MV primary key should fail");
         }
-        catch (InvalidQueryException e)
+        catch (Exception e)
         {
+            Assert.assertTrue(e.getCause() instanceof InvalidRequestException);
         }
 
         try
         {
-            createView("mv_static", "CREATE MATERIALIZED VIEW %%s AS SELECT val, sval FROM %s WHERE val IS NOT NULL AND  k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val, k, c)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT val, sval FROM %s WHERE val IS NOT NULL AND  k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val, k, c)");
             Assert.fail("Explicit select of static column in MV should fail");
         }
-        catch (InvalidQueryException e)
+        catch (Exception e)
         {
+            Assert.assertTrue(e.getCause() instanceof InvalidRequestException);
         }
 
         try
         {
-            createView("mv_static", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
             Assert.fail("Implicit select of static column in MV should fail");
         }
-        catch (InvalidQueryException e)
+        catch (Exception e)
         {
+            Assert.assertTrue(e.getCause() instanceof InvalidRequestException);
         }
 
-        createView("mv_static", "CREATE MATERIALIZED VIEW %s AS SELECT val,k,c FROM %%s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT val,k,c FROM %s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
 
         for (int i = 0; i < 100; i++)
             updateView("INSERT into %s (k,c,sval,val)VALUES(?,?,?,?)", 0, i % 2, "bar" + i, "baz");
@@ -128,9 +125,9 @@
 
         assertRows(execute("SELECT sval from %s"), row("bar99"), row("bar99"));
 
-        Assert.assertEquals(2, execute("select * from mv_static").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
-        assertInvalid("SELECT sval from mv_static");
+        assertInvalid("SELECT sval from " + currentView());
     }
 
 
@@ -143,73 +140,66 @@
                     "val text, " +
                     "PRIMARY KEY(k,c))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv_tstest", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
 
         for (int i = 0; i < 100; i++)
             updateView("INSERT into %s (k,c,val)VALUES(?,?,?)", 0, i % 2, "baz");
 
-        Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
+        Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         Assert.assertEquals(2, execute("select * from %s").size());
-        Assert.assertEquals(2, execute("select * from mv_tstest").size());
+        Assert.assertEquals(2, executeView("select * from %s").size());
 
         assertRows(execute("SELECT val from %s where k = 0 and c = 0"), row("baz"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "baz"), row(0), row(1));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "baz"), row(0), row(1));
 
         //Make sure an old TS does nothing
         updateView("UPDATE %s USING TIMESTAMP 100 SET val = ? where k = ? AND c = ?", "bar", 0, 0);
         assertRows(execute("SELECT val from %s where k = 0 and c = 0"), row("baz"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "baz"), row(0), row(1));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "bar"));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "baz"), row(0), row(1));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "bar"));
 
         //Latest TS
         updateView("UPDATE %s SET val = ? where k = ? AND c = ?", "bar", 0, 0);
         assertRows(execute("SELECT val from %s where k = 0 and c = 0"), row("bar"));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "bar"), row(0));
-        assertRows(execute("SELECT c from mv_tstest where k = 0 and val = ?", "baz"), row(1));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "bar"), row(0));
+        assertRows(executeView("SELECT c from %s where k = 0 and val = ?", "baz"), row(1));
     }
 
     @Test
-    public void testCountersTable() throws Throwable
+    public void testCountersTable()
     {
         createTable("CREATE TABLE %s (" +
                     "k int PRIMARY KEY, " +
                     "count counter)");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         try
         {
-            createView("mv_counter", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE count IS NOT NULL AND k IS NOT NULL PRIMARY KEY (count,k)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE count IS NOT NULL AND k IS NOT NULL PRIMARY KEY (count,k)");
             Assert.fail("MV on counter should fail");
         }
-        catch (InvalidQueryException e)
+        catch (Exception e)
         {
+            Assert.assertTrue(e.getCause() instanceof InvalidRequestException);
         }
     }
 
     @Test
-    public void testDurationsTable() throws Throwable
+    public void testDurationsTable()
     {
         createTable("CREATE TABLE %s (" +
                     "k int PRIMARY KEY, " +
                     "result duration)");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         try
         {
-            createView("mv_duration", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE result IS NOT NULL AND k IS NOT NULL PRIMARY KEY (result,k)");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE result IS NOT NULL AND k IS NOT NULL PRIMARY KEY (result,k)");
             Assert.fail("MV on duration should fail");
         }
-        catch (InvalidQueryException e)
+        catch (Exception e)
         {
-            Assert.assertEquals("duration type is not supported for PRIMARY KEY column 'result'", e.getMessage());
+            Throwable cause = e.getCause();
+            Assert.assertEquals("duration type is not supported for PRIMARY KEY column 'result'", cause.getMessage());
         }
     }
 
@@ -222,21 +212,13 @@
                     "intval int, " +
                     "PRIMARY KEY (k, c))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-
-        for(int i = 0; i < 1024; i++)
+        for (int i = 0; i < 1024; i++)
             execute("INSERT INTO %s (k, c, intval) VALUES (?, ?, ?)", 0, i, 0);
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, c, k)");
-
-
-        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv"))
-            Thread.sleep(1000);
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND c IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, c, k)");
 
         assertRows(execute("SELECT count(*) from %s WHERE k = ?", 0), row(1024L));
-        assertRows(execute("SELECT count(*) from mv WHERE intval = ?", 0), row(1024L));
+        assertRows(executeView("SELECT count(*) from %s WHERE intval = ?", 0), row(1024L));
     }
 
     @Test
@@ -248,19 +230,16 @@
                     "listval list<int>, " +
                     "PRIMARY KEY (k))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
 
         updateView("INSERT INTO %s (k, intval, listval) VALUES (?, ?, fromJson(?))", 0, 0, "[1, 2, 3]");
         assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(1, 2, 3)));
-        assertRows(execute("SELECT k, listval from mv WHERE intval = ?", 0), row(0, list(1, 2, 3)));
+        assertRows(executeView("SELECT k, listval from %s WHERE intval = ?", 0), row(0, list(1, 2, 3)));
 
         updateView("INSERT INTO %s (k, intval) VALUES (?, ?)", 1, 1);
         updateView("INSERT INTO %s (k, listval) VALUES (?, fromJson(?))", 1, "[1, 2, 3]");
         assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 1), row(1, list(1, 2, 3)));
-        assertRows(execute("SELECT k, listval from mv WHERE intval = ?", 1), row(1, list(1, 2, 3)));
+        assertRows(executeView("SELECT k, listval from %s WHERE intval = ?", 1), row(1, list(1, 2, 3)));
     }
 
     @Test
@@ -268,11 +247,7 @@
     {
         createTable("CREATE TABLE %s (k int, intval int,  listval frozen<list<tuple<text,text>>>, PRIMARY KEY (k))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND listval IS NOT NULL PRIMARY KEY (k, listval)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND listval IS NOT NULL PRIMARY KEY (k, listval)");
 
         updateView("INSERT INTO %s (k, intval, listval) VALUES (?, ?, fromJson(?))",
                    0,
@@ -282,7 +257,7 @@
         // verify input
         assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0),
                    row(0, list(tuple("a", "1"), tuple("b", "2"), tuple("c", "3"))));
-        assertRows(execute("SELECT k, listval from mv"),
+        assertRows(executeView("SELECT k, listval from %s"),
                    row(0, list(tuple("a", "1"), tuple("b", "2"), tuple("c", "3"))));
 
         // update listval with the same value and it will be compared in view generator
@@ -292,7 +267,7 @@
         // verify result
         assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0),
                    row(0, list(tuple("a", "1"), tuple("b", "2"), tuple("c", "3"))));
-        assertRows(execute("SELECT k, listval from mv"),
+        assertRows(executeView("SELECT k, listval from %s"),
                    row(0, list(tuple("a", "1"), tuple("b", "2"), tuple("c", "3"))));
     }
 
@@ -304,18 +279,15 @@
                     "intval int, " +
                     "PRIMARY KEY (k))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND intval IS NOT NULL PRIMARY KEY (intval, k)");
 
         updateView("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, 0);
         assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 0));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 0), row(0, 0));
+        assertRows(executeView("SELECT k, intval from %s WHERE intval = ?", 0), row(0, 0));
 
         updateView("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, 1);
         assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 1));
-        assertRows(execute("SELECT k, intval from mv WHERE intval = ?", 1), row(0, 1));
+        assertRows(executeView("SELECT k, intval from %s WHERE intval = ?", 1), row(0, 1));
     }
 
     @Test
@@ -330,32 +302,29 @@
                     "d int, " +
                     "PRIMARY KEY (a, b))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT a, b, c FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
 
         updateView("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
-        assertRows(execute("SELECT a, b, c from mv WHERE b = ?", 0), row(0, 0, 0));
+        assertRows(executeView("SELECT a, b, c from %s WHERE b = ?", 0), row(0, 0, 0));
 
         updateView("UPDATE %s SET d = ? WHERE a = ? AND b = ?", 0, 0, 0);
-        assertRows(execute("SELECT a, b, c from mv WHERE b = ?", 0), row(0, 0, 0));
+        assertRows(executeView("SELECT a, b, c from %s WHERE b = ?", 0), row(0, 0, 0));
 
         // Note: errors here may result in the test hanging when the memtables are flushed as part of the table drop,
         // because empty rows in the memtable will cause the flush to fail.  This will result in a test timeout that
         // should not be ignored.
         String table = KEYSPACE + "." + currentTable();
         updateView("BEGIN BATCH " +
-                "INSERT INTO " + table + " (a, b, c, d) VALUES (?, ?, ?, ?); " + // should be accepted
-                "UPDATE " + table + " SET d = ? WHERE a = ? AND b = ?; " +  // should be accepted
-                "APPLY BATCH",
-                0, 0, 0, 0,
-                1, 0, 1);
-        assertRows(execute("SELECT a, b, c from mv WHERE b = ?", 0), row(0, 0, 0));
-        assertRows(execute("SELECT a, b, c from mv WHERE b = ?", 1), row(0, 1, null));
+                   "INSERT INTO " + table + " (a, b, c, d) VALUES (?, ?, ?, ?); " + // should be accepted
+                   "UPDATE " + table + " SET d = ? WHERE a = ? AND b = ?; " +  // should be accepted
+                   "APPLY BATCH",
+                   0, 0, 0, 0,
+                   1, 0, 1);
+        assertRows(executeView("SELECT a, b, c from %s WHERE b = ?", 0), row(0, 0, 0));
+        assertRows(executeView("SELECT a, b, c from %s WHERE b = ?", 1), row(0, 1, null));
 
-        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore("mv");
-        cfs.forceBlockingFlush();
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentView());
+        Util.flush(cfs);
         Assert.assertEquals(1, cfs.getLiveSSTables().size());
     }
 
@@ -371,12 +340,12 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
 
         String table = keyspace() + "." + currentTable();
         updateView("DELETE FROM " + table + " USING TIMESTAMP 6 WHERE a = 1 AND b = 1;");
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) USING TIMESTAMP 3", 1, 1, 1, 1);
-        Assert.assertEquals(0, executeNet("SELECT * FROM mv WHERE c = 1 AND a = 1 AND b = 1").all().size());
+        Assert.assertEquals(0, executeViewNet("SELECT * FROM %s WHERE c = 1 AND a = 1 AND b = 1").all().size());
     }
 
     @Test
@@ -389,13 +358,13 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
 
         updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 1, 1);
         updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 1, 2);
         updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 1, 3);
 
-        ResultSet mvRows = executeNet("SELECT a, b FROM mv1");
+        ResultSet mvRows = executeViewNet("SELECT a, b FROM %s");
         assertRowsNet(mvRows, row(1, 1), row(1, 2), row(1, 3));
 
         updateView(String.format("BEGIN UNLOGGED BATCH " +
@@ -403,7 +372,7 @@
                                  "DELETE FROM %s WHERE a = 1;" +
                                  "APPLY BATCH", currentTable(), currentTable()));
 
-        mvRows = executeNet("SELECT a, b FROM mv1");
+        mvRows = executeViewNet("SELECT a, b FROM %s");
         assertRowsNet(mvRows);
     }
 
@@ -417,18 +386,18 @@
                     "PRIMARY KEY (a))");
 
         executeNet("USE " + keyspace());
-        createView("mvmap", "CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
 
         updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 0, 0);
-        ResultSet mvRows = executeNet("SELECT a, b FROM mvmap WHERE b = ?", 0);
+        ResultSet mvRows = executeViewNet("SELECT a, b FROM %s WHERE b = ?", 0);
         assertRowsNet(mvRows, row(0, 0));
 
         updateView("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 1, map(1, "1"));
-        mvRows = executeNet("SELECT a, b FROM mvmap WHERE b = ?", 1);
+        mvRows = executeViewNet("SELECT a, b FROM %s WHERE b = ?", 1);
         assertRowsNet(mvRows, row(1, 1));
 
         updateView("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, map(0, "0"));
-        mvRows = executeNet("SELECT a, b FROM mvmap WHERE b = ?", 0);
+        mvRows = executeViewNet("SELECT a, b FROM %s WHERE b = ?", 0);
         assertRowsNet(mvRows, row(0, 0));
     }
 
@@ -439,17 +408,16 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv",
-                   "CREATE MATERIALIZED VIEW %s AS" +
+        createView("CREATE MATERIALIZED VIEW %s AS" +
                    "  SELECT \"keyspace\", \"token\"" +
-                   "  FROM %%s" +
+                   "  FROM %s" +
                    "  WHERE \"keyspace\" IS NOT NULL AND \"token\" IS NOT NULL" +
                    "  PRIMARY KEY (\"keyspace\", \"token\")");
 
         execute("INSERT INTO %s (\"token\", \"keyspace\") VALUES (?, ?)", 0, 1);
 
         assertRowsNet(executeNet("SELECT * FROM %s"), row(0, 1));
-        assertRowsNet(executeNet("SELECT * FROM mv"), row(1, 0));
+        assertRowsNet(executeViewNet("SELECT * FROM %s"), row(1, 0));
     }
 
     private void testViewBuilderResume(int concurrentViewBuilders) throws Throwable
@@ -460,9 +428,6 @@
                     "val text, " +
                     "PRIMARY KEY(k,c))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         CompactionManager.instance.setConcurrentViewBuilders(concurrentViewBuilders);
         CompactionManager.instance.setCoreCompactorThreads(1);
         CompactionManager.instance.setMaximumCompactorThreads(1);
@@ -470,43 +435,41 @@
         cfs.disableAutoCompaction();
 
         for (int i = 0; i < 1024; i++)
-            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, ""+i);
+            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, String.valueOf(i));
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         for (int i = 0; i < 1024; i++)
-            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, ""+i);
+            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, String.valueOf(i));
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         for (int i = 0; i < 1024; i++)
-            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, ""+i);
+            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, String.valueOf(i));
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         for (int i = 0; i < 1024; i++)
-            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, ""+i);
+            execute("INSERT into %s (k,c,val)VALUES(?,?,?)", i, i, String.valueOf(i));
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
-        String viewName1 = "mv_test_" + concurrentViewBuilders;
-        createView(viewName1, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
+        String mv1 = createViewAsync("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                     "WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
 
         cfs.enableAutoCompaction();
-        List<Future<?>> futures = CompactionManager.instance.submitBackground(cfs);
+        List<? extends Future<?>> futures = CompactionManager.instance.submitBackground(cfs);
 
-        String viewName2 = viewName1 + "_2";
         //Force a second MV on the same base table, which will restart the first MV builder...
-        createView(viewName2, "CREATE MATERIALIZED VIEW %s AS SELECT val, k, c FROM %%s WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
-
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT val, k, c FROM %s " +
+                   "WHERE val IS NOT NULL AND k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (val,k,c)");
 
         //Compact the base table
         FBUtilities.waitOnFutures(futures);
 
-        while (!SystemKeyspace.isViewBuilt(keyspace(), viewName1))
-            Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+        waitForViewBuild(mv1);
 
-        assertRows(execute("SELECT count(*) FROM " + viewName1), row(1024L));
+        assertRows(executeView("SELECT count(*) FROM %s"), row(1024L));
     }
 
     @Test
@@ -522,15 +485,13 @@
      * Tests that a client warning is issued on materialized view creation.
      */
     @Test
-    public void testClientWarningOnCreate() throws Throwable
+    public void testClientWarningOnCreate()
     {
         createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
 
         ClientWarn.instance.captureWarnings();
-        String viewName = keyspace() + ".warning_view";
-        execute("CREATE MATERIALIZED VIEW " + viewName +
-                " AS SELECT * FROM %s WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
-        views.add(viewName);
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
         List<String> warnings = ClientWarn.instance.getWarnings();
 
         Assert.assertNotNull(warnings);
@@ -548,21 +509,22 @@
 
         executeNet("USE " + keyspace());
 
-        boolean enableMaterializedViews = DatabaseDescriptor.getEnableMaterializedViews();
+        boolean enableMaterializedViews = DatabaseDescriptor.getMaterializedViewsEnabled();
         try
         {
-            DatabaseDescriptor.setEnableMaterializedViews(false);
-            createView("view1", "CREATE MATERIALIZED VIEW %s AS SELECT v FROM %%s WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
+            DatabaseDescriptor.setMaterializedViewsEnabled(false);
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT v FROM %s WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
             Assert.fail("Should not be able to create a materialized view if they are disabled");
         }
-        catch (Throwable e)
+        catch (RuntimeException e)
         {
-            Assert.assertTrue(e instanceof InvalidQueryException);
-            Assert.assertTrue(e.getMessage().contains("Materialized views are disabled"));
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(InvalidRequestException.class);
+            Assertions.assertThat(cause.getMessage()).contains("Materialized views are disabled");
         }
         finally
         {
-            DatabaseDescriptor.setEnableMaterializedViews(enableMaterializedViews);
+            DatabaseDescriptor.setMaterializedViewsEnabled(enableMaterializedViews);
         }
     }
 
@@ -573,8 +535,8 @@
 
         executeNet("USE " + keyspace());
 
-        createView("view1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE \"theKey\" IS NOT NULL AND \"theClustering_1\" IS NOT NULL AND \"theClustering_2\" IS NOT NULL AND \"theValue\" IS NOT NULL  PRIMARY KEY (\"theKey\", \"theClustering_1\", \"theClustering_2\");");
-        createView("view2", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE \"theKey\" IS NOT NULL AND (\"theClustering_1\", \"theClustering_2\") = (1, 2) AND \"theValue\" IS NOT NULL  PRIMARY KEY (\"theKey\", \"theClustering_1\", \"theClustering_2\");");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE \"theKey\" IS NOT NULL AND \"theClustering_1\" IS NOT NULL AND \"theClustering_2\" IS NOT NULL AND \"theValue\" IS NOT NULL  PRIMARY KEY (\"theKey\", \"theClustering_1\", \"theClustering_2\");");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE \"theKey\" IS NOT NULL AND (\"theClustering_1\", \"theClustering_2\") = (1, 2) AND \"theValue\" IS NOT NULL  PRIMARY KEY (\"theKey\", \"theClustering_1\", \"theClustering_2\");");
 
         assertRows(execute("SELECT where_clause FROM system_schema.views"),
                    row("\"theKey\" IS NOT NULL AND \"theClustering_1\" IS NOT NULL AND \"theClustering_2\" IS NOT NULL AND \"theValue\" IS NOT NULL"),
@@ -600,7 +562,7 @@
         assertEmpty(testFunctionInWhereClause("CREATE TABLE %s (k bigint PRIMARY KEY, v int)",
                                               null,
                                               "CREATE MATERIALIZED VIEW %s AS" +
-                                              "   SELECT * FROM %%s WHERE k = token(1) AND v IS NOT NULL " +
+                                              "   SELECT * FROM %s WHERE k = token(1) AND v IS NOT NULL " +
                                               "   PRIMARY KEY (v, k)",
                                               "k = token(1) AND v IS NOT NULL",
                                               "INSERT INTO %s(k, v) VALUES (0, 1)",
@@ -610,7 +572,7 @@
         assertEmpty(testFunctionInWhereClause("CREATE TABLE %s (k bigint PRIMARY KEY, v int)",
                                               null,
                                               "CREATE MATERIALIZED VIEW %s AS" +
-                                              "   SELECT * FROM %%s WHERE k = TOKEN(1) AND v IS NOT NULL" +
+                                              "   SELECT * FROM %s WHERE k = TOKEN(1) AND v IS NOT NULL" +
                                               "   PRIMARY KEY (v, k)",
                                               "k = token(1) AND v IS NOT NULL",
                                               "INSERT INTO %s(k, v) VALUES (0, 1)",
@@ -623,7 +585,7 @@
                                              "   RETURNS int LANGUAGE java" +
                                              "   AS 'return 2;'",
                                              "CREATE MATERIALIZED VIEW %s AS " +
-                                             "   SELECT * FROM %%s WHERE k = fun() AND v IS NOT NULL" +
+                                             "   SELECT * FROM %s WHERE k = fun() AND v IS NOT NULL" +
                                              "   PRIMARY KEY (v, k)",
                                              "k = fun() AND v IS NOT NULL",
                                              "INSERT INTO %s(k, v) VALUES (0, 1)",
@@ -637,7 +599,7 @@
                                              "   LANGUAGE java" +
                                              "   AS 'return 2;'",
                                              "CREATE MATERIALIZED VIEW %s AS " +
-                                             "   SELECT * FROM %%s WHERE k = \"FUN\"() AND v IS NOT NULL" +
+                                             "   SELECT * FROM %s WHERE k = \"FUN\"() AND v IS NOT NULL" +
                                              "   PRIMARY KEY (v, k)",
                                              "k = \"FUN\"() AND v IS NOT NULL",
                                              "INSERT INTO %s(k, v) VALUES (0, 1)",
@@ -652,7 +614,7 @@
                                              "   LANGUAGE java" +
                                              "   AS 'return x;'",
                                              "CREATE MATERIALIZED VIEW %s AS" +
-                                             "   SELECT * FROM %%s WHERE k = \"TOKEN\"(2) AND v IS NOT NULL" +
+                                             "   SELECT * FROM %s WHERE k = \"TOKEN\"(2) AND v IS NOT NULL" +
                                              "   PRIMARY KEY (v, k)",
                                              "k = \"TOKEN\"(2) AND v IS NOT NULL",
                                              "INSERT INTO %s(k, v) VALUES (0, 1)",
@@ -667,7 +629,7 @@
                                              "   LANGUAGE java" +
                                              "   AS 'return x;'",
                                              "CREATE MATERIALIZED VIEW %s AS" +
-                                             "   SELECT * FROM %%s " +
+                                             "   SELECT * FROM %s " +
                                              "   WHERE k = " + keyspace() + ".\"token\"(2) AND v IS NOT NULL" +
                                              "   PRIMARY KEY (v, k)",
                                              "k = " + keyspace() + ".\"token\"(2) AND v IS NOT NULL",
@@ -678,7 +640,7 @@
     /**
      * Tests that truncating a table stops the ongoing builds of its materialized views,
      * so they don't write into the MV data that has been truncated in the base table.
-     *
+     * <p>
      * See CASSANDRA-16567 for further details.
      */
     @Test
@@ -696,33 +658,30 @@
     public void testTruncateWhileBuilding() throws Throwable
     {
         createTable("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY(k, c))");
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
         execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 0, 0, 0);
-        createView("mv",
-                   "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
-                   "WHERE k IS NOT NULL AND c IS NOT NULL AND v IS NOT NULL " +
-                   "PRIMARY KEY (v, c, k)");
+        createViewAsync("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                        "WHERE k IS NOT NULL AND c IS NOT NULL AND v IS NOT NULL " +
+                        "PRIMARY KEY (v, c, k)");
 
         // check that the delayed view builder tasks are either running or pending,
         // and that they haven't written anything yet
         assertThat(runningViewBuilds()).isPositive();
-        assertFalse(SystemKeyspace.isViewBuilt(KEYSPACE, "mv"));
+        assertFalse(SystemKeyspace.isViewBuilt(KEYSPACE, currentView()));
         waitForViewMutations();
-        assertRows(execute("SELECT * FROM mv"));
+        assertRows(executeView("SELECT * FROM %s"));
 
         // truncate the view, this should unblock the view builders, wait for their cancellation,
         // drop the sstables and, finally, start a new view build
         updateView("TRUNCATE %s");
 
         // check that there aren't any rows after truncating
-        assertRows(execute("SELECT * FROM mv"));
+        assertRows(executeView("SELECT * FROM %s"));
 
         // check that the view builder tasks finish and that the view is still empty after that
         Awaitility.await().untilAsserted(() -> assertEquals(0, runningViewBuilds()));
-        assertTrue(SystemKeyspace.isViewBuilt(KEYSPACE, "mv"));
+        assertTrue(SystemKeyspace.isViewBuilt(KEYSPACE, currentView()));
         waitForViewMutations();
-        assertRows(execute("SELECT * FROM mv"));
+        assertRows(executeView("SELECT * FROM %s"));
     }
 
     private static int runningViewBuilds()
@@ -740,28 +699,24 @@
     {
         createTable(createTableQuery);
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         if (createFunctionQuery != null)
         {
             execute(createFunctionQuery);
         }
 
-        String viewName = "view_" + viewNameSeqNumber.getAndIncrement();
-        createView(viewName, createViewQuery);
+        createView(createViewQuery);
 
         // Test the where clause stored in system_schema.views
         String schemaQuery = String.format("SELECT where_clause FROM %s.%s WHERE keyspace_name = ? AND view_name = ?",
                                            SchemaConstants.SCHEMA_KEYSPACE_NAME,
                                            SchemaKeyspaceTables.VIEWS);
-        assertRows(execute(schemaQuery, keyspace(), viewName), row(expectedSchemaWhereClause));
+        assertRows(execute(schemaQuery, keyspace(), currentView()), row(expectedSchemaWhereClause));
 
         for (String insert : insertQueries)
         {
             execute(insert);
         }
 
-        return execute("SELECT * FROM " + viewName);
+        return executeView("SELECT * FROM %s");
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/ViewTimesTest.java b/test/unit/org/apache/cassandra/cql3/ViewTimesTest.java
index 6d37501..9c45936 100644
--- a/test/unit/org/apache/cassandra/cql3/ViewTimesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewTimesTest.java
@@ -26,8 +26,10 @@
 
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.core.Row;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.assertj.core.api.Assertions;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -51,15 +53,14 @@
                     "c int, " +
                     "val int)");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
-        createView("mv_rctstest", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE k IS NOT NULL AND c IS NOT NULL " +
+                   "PRIMARY KEY (k,c)");
 
         updateView("UPDATE %s SET c = ?, val = ? WHERE k = ?", 0, 0, 0);
         updateView("UPDATE %s SET val = ? WHERE k = ?", 1, 0);
         updateView("UPDATE %s SET c = ? WHERE k = ?", 1, 0);
-        assertRows(execute("SELECT c, k, val FROM mv_rctstest"), row(1, 0, 1));
+        assertRows(executeView("SELECT c, k, val FROM %s"), row(1, 0, 1));
 
         updateView("TRUNCATE %s");
 
@@ -69,8 +70,8 @@
         updateView("UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE k = ?", 2, 0);
         updateView("UPDATE %s USING TIMESTAMP 3 SET val = ? WHERE k = ?", 2, 0);
 
-        assertRows(execute("SELECT c, k, val FROM mv_rctstest"), row(2, 0, 2));
-        assertRows(execute("SELECT c, k, val FROM mv_rctstest limit 1"), row(2, 0, 2));
+        assertRows(executeView("SELECT c, k, val FROM %s"), row(2, 0, 2));
+        assertRows(executeView("SELECT c, k, val FROM %s limit 1"), row(2, 0, 2));
     }
 
     @Test
@@ -89,34 +90,34 @@
     {
         createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b))");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
         Keyspace ks = Keyspace.open(keyspace());
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, a, b)");
-        ks.getColumnFamilyStore("mv").disableAutoCompaction();
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                   "WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL " +
+                   "PRIMARY KEY (c, a, b)");
+        ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
 
         //Set initial values TS=0, leaving e null and verify view
         executeNet("INSERT INTO %s (a, b, c, d) VALUES (0, 0, 1, 0) USING TIMESTAMP 0");
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
+        assertRows(executeView("SELECT d from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
 
         //update c's timestamp TS=2
         executeNet("UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
+        assertRows(executeView("SELECT d from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         // change c's value and TS=3, tombstones c=1 and adds c=0 record
         executeNet("UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? and b = ? ", 0, 0, 0);
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0));
+            Util.flush(ks);
+        assertRows(executeView("SELECT d from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0));
 
         if(flush)
         {
-            ks.getColumnFamilyStore("mv").forceMajorCompaction();
-            FBUtilities.waitOnFutures(ks.flush());
+            ks.getColumnFamilyStore(currentView()).forceMajorCompaction();
+            Util.flush(ks);
         }
 
 
@@ -124,43 +125,41 @@
         executeNet("UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
         if (flush)
         {
-            ks.getColumnFamilyStore("mv").forceMajorCompaction();
-            FBUtilities.waitOnFutures(ks.flush());
+            ks.getColumnFamilyStore(currentView()).forceMajorCompaction();
+            Util.flush(ks);
         }
 
-        assertRows(execute("SELECT d,e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
-
+        assertRows(executeView("SELECT d,e from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
 
         //Add e value @ TS=1
         executeNet("UPDATE %s USING TIMESTAMP 1 SET e = ? WHERE a = ? and b = ? ", 1, 0, 0);
-        assertRows(execute("SELECT d,e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
+        assertRows(executeView("SELECT d,e from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
 
         //Change d value @ TS=2
         executeNet("UPDATE %s USING TIMESTAMP 2 SET d = ? WHERE a = ? and b = ? ", 2, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
+        assertRows(executeView("SELECT d from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
 
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
-
+            Util.flush(ks);
 
         //Change d value @ TS=3
         executeNet("UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? and b = ? ", 1, 0, 0);
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
+        assertRows(executeView("SELECT d from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
 
 
         //Tombstone c
         executeNet("DELETE FROM %s WHERE a = ? and b = ?", 0, 0);
-        assertRows(execute("SELECT d from mv"));
+        assertRows(executeView("SELECT d from %s"));
 
         //Add back without D
         executeNet("INSERT INTO %s (a, b, c) VALUES (0, 0, 1)");
 
         //Make sure D doesn't pop back in.
-        assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
+        assertRows(executeView("SELECT d from %s WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
 
 
         //New partition
@@ -172,20 +171,17 @@
 
         // delete with timestamp 0 (which should only delete d)
         executeNet("DELETE FROM %s USING TIMESTAMP 0 WHERE a = ? AND b = ?", 1, 0);
-        assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 0, 1, 0),
-                   row(1, 0, 0, null, 0)
-        );
+        assertRows(executeView("SELECT a, b, c, d, e from %s WHERE c = ? and a = ? and b = ?", 0, 1, 0),
+                   row(1, 0, 0, null, 0));
 
         executeNet("UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? AND b = ?", 1, 1, 0);
         executeNet("UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? AND b = ?", 0, 1, 0);
-        assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 0, 1, 0),
-                   row(1, 0, 0, null, 0)
-        );
+        assertRows(executeView("SELECT a, b, c, d, e from %s WHERE c = ? and a = ? and b = ?", 0, 1, 0),
+                   row(1, 0, 0, null, 0));
 
         executeNet("UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? AND b = ?", 0, 1, 0);
-        assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 0, 1, 0),
-                   row(1, 0, 0, 0, 0)
-        );
+        assertRows(executeView("SELECT a, b, c, d, e from %s WHERE c = ? and a = ? and b = ?", 0, 1, 0),
+                   row(1, 0, 0, 0, 0));
     }
 
     @Test
@@ -200,7 +196,7 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
 
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) USING TTL 3", 1, 1, 1, 1);
 
@@ -208,8 +204,8 @@
         updateView("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, 1, 2);
 
         Thread.sleep(TimeUnit.SECONDS.toMillis(5));
-        List<Row> results = executeNet("SELECT d FROM mv WHERE c = 2 AND a = 1 AND b = 1").all();
-        assertEquals(1, results.size());
+        List<Row> results = executeViewNet("SELECT d FROM %s WHERE c = 2 AND a = 1 AND b = 1").all();
+        Assert.assertEquals(1, results.size());
         Assert.assertTrue("There should be a null result given back due to ttl expiry", results.get(0).isNull(0));
     }
 
@@ -225,12 +221,12 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
 
         updateView("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) USING TTL 3", 1, 1, 1, 1);
 
         Thread.sleep(TimeUnit.SECONDS.toMillis(4));
-        assertEquals(0, executeNet("SELECT * FROM mv WHERE c = 1 AND a = 1 AND b = 1").all().size());
+        Assert.assertEquals(0, executeViewNet("SELECT * FROM %s WHERE c = 1 AND a = 1 AND b = 1").all().size());
     }
 
     @Test
@@ -244,14 +240,14 @@
 
         executeNet("USE " + keyspace());
 
-        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE c IS NOT NULL AND a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (c, a, b)");
 
         for (int i = 0; i < 50; i++)
         {
             updateView("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP 1", 1, 1, i);
         }
 
-        ResultSet mvRows = executeNet("SELECT c FROM mv");
+        ResultSet mvRows = executeViewNet("SELECT c FROM %s");
         List<Row> rows = executeNet("SELECT c FROM %s").all();
         assertEquals("There should be exactly one row in base", 1, rows.size());
         int expected = rows.get(0).getInt("c");
@@ -259,27 +255,25 @@
     }
 
     @Test
-    public void testCreateMvWithTTL() throws Throwable
+    public void testCreateMvWithTTL()
     {
         createTable("CREATE TABLE %s (" +
                     "k int PRIMARY KEY, " +
                     "c int, " +
                     "val int) WITH default_time_to_live = 60");
 
-        execute("USE " + keyspace());
-        executeNet("USE " + keyspace());
-
         // Must NOT include "default_time_to_live" for Materialized View creation
         try
         {
-            createView("mv_ttl1", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c) WITH default_time_to_live = 30");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                       "WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c) WITH default_time_to_live = 30");
             fail("Should fail if TTL is provided for materialized view");
         }
-        catch (Exception e)
+        catch (RuntimeException e)
         {
-            assertEquals("Cannot set default_time_to_live for a materialized view. Data in a materialized " +
-                         "view always expire at the same time than the corresponding data in the parent table.",
-                         e.getMessage());
+            Throwable cause = e.getCause();
+            Assertions.assertThat(cause).isInstanceOf(InvalidRequestException.class);
+            Assertions.assertThat(cause.getMessage()).contains("Cannot set default_time_to_live for a materialized view");
         }
     }
 
@@ -291,12 +285,12 @@
                     "c int, " +
                     "val int) WITH default_time_to_live = 60");
 
-        createView("mv_ttl2", "CREATE MATERIALIZED VIEW " + keyspace() + ".%s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
 
         // Must NOT include "default_time_to_live" on alter Materialized View
         try
         {
-            executeNet("ALTER MATERIALIZED VIEW " + keyspace() + ".mv_ttl2 WITH default_time_to_live = 30");
+            executeNet("ALTER MATERIALIZED VIEW " + currentView() + " WITH default_time_to_live = 30");
             fail("Should fail if TTL is provided while altering materialized view");
         }
         catch (Exception e)
@@ -309,7 +303,7 @@
     }
 
     @Test
-    public void testMvWithZeroTTL() throws Throwable
+    public void testMvWithZeroTTL()
     {
         createTable("CREATE TABLE %s (" +
                     "k int PRIMARY KEY, " +
@@ -318,7 +312,7 @@
 
         try
         {
-            createView("mv_ttl3", "CREATE MATERIALIZED VIEW " + keyspace() + ".%s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c) WITH default_time_to_live = 0");
+            createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c) WITH default_time_to_live = 0");
         }
         catch (Exception e)
         {
@@ -334,11 +328,11 @@
                     "c int, " +
                     "val int) WITH default_time_to_live = 60");
 
-        createView("mv_ttl4", "CREATE MATERIALIZED VIEW " + keyspace() + ".%s AS SELECT * FROM %%s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
+        createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
 
         try
         {
-            executeNet("ALTER MATERIALIZED VIEW " + keyspace() + ".mv_ttl4 WITH default_time_to_live = 0");
+            executeNet("ALTER MATERIALIZED VIEW " + currentView() + " WITH default_time_to_live = 0");
         }
         catch (Exception e)
         {
diff --git a/test/unit/org/apache/cassandra/cql3/conditions/ColumnConditionTest.java b/test/unit/org/apache/cassandra/cql3/conditions/ColumnConditionTest.java
index f62119d..dd40cf8 100644
--- a/test/unit/org/apache/cassandra/cql3/conditions/ColumnConditionTest.java
+++ b/test/unit/org/apache/cassandra/cql3/conditions/ColumnConditionTest.java
@@ -32,9 +32,8 @@
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.schema.ColumnMetadata;
-import org.apache.cassandra.serializers.TimeUUIDSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -65,8 +64,8 @@
         {
             for (int i = 0, m = values.size(); i < m; i++)
             {
-                UUID uuid = UUIDGen.getTimeUUID(now, i);
-                ByteBuffer key = TimeUUIDSerializer.instance.serialize(uuid);
+                TimeUUID uuid = TimeUUID.Generator.atUnixMillis(now, i);
+                ByteBuffer key = uuid.toBytes();
                 ByteBuffer value = values.get(i);
                 BufferCell cell = new BufferCell(definition,
                                                  0L,
@@ -136,6 +135,22 @@
         return bound.appliesTo(newRow(definition, rowValue));
     }
 
+    private static boolean conditionContainsApplies(List<ByteBuffer> rowValue, Operator op, ByteBuffer conditionValue)
+    {
+        ColumnMetadata definition = ColumnMetadata.regularColumn("ks", "cf", "c", ListType.getInstance(Int32Type.instance, true));
+        ColumnCondition condition = ColumnCondition.condition(definition, op, Terms.of(new Constants.Value(conditionValue)));
+        ColumnCondition.Bound bound = condition.bind(QueryOptions.DEFAULT);
+        return bound.appliesTo(newRow(definition, rowValue));
+    }
+
+    private static boolean conditionContainsApplies(Map<ByteBuffer, ByteBuffer> rowValue, Operator op, ByteBuffer conditionValue)
+    {
+        ColumnMetadata definition = ColumnMetadata.regularColumn("ks", "cf", "c", MapType.getInstance(Int32Type.instance, Int32Type.instance, true));
+        ColumnCondition condition = ColumnCondition.condition(definition, op, Terms.of(new Constants.Value(conditionValue)));
+        ColumnCondition.Bound bound = condition.bind(QueryOptions.DEFAULT);
+        return bound.appliesTo(newRow(definition, rowValue));
+    }
+
     private static boolean conditionApplies(SortedSet<ByteBuffer> rowValue, Operator op, SortedSet<ByteBuffer> conditionValue)
     {
         ColumnMetadata definition = ColumnMetadata.regularColumn("ks", "cf", "c", SetType.getInstance(Int32Type.instance, true));
@@ -144,7 +159,15 @@
         return bound.appliesTo(newRow(definition, rowValue));
     }
 
-    private static boolean conditionApplies(Map<ByteBuffer, ByteBuffer> rowValue, Operator op, SortedMap<ByteBuffer, ByteBuffer> conditionValue)
+    private static boolean conditionContainsApplies(SortedSet<ByteBuffer> rowValue, Operator op, ByteBuffer conditionValue)
+    {
+        ColumnMetadata definition = ColumnMetadata.regularColumn("ks", "cf", "c", SetType.getInstance(Int32Type.instance, true));
+        ColumnCondition condition = ColumnCondition.condition(definition, op, Terms.of(new Constants.Value(conditionValue)));
+        ColumnCondition.Bound bound = condition.bind(QueryOptions.DEFAULT);
+        return bound.appliesTo(newRow(definition, rowValue));
+    }
+
+    private static boolean conditionApplies(SortedMap<ByteBuffer, ByteBuffer> rowValue, Operator op, SortedMap<ByteBuffer, ByteBuffer> conditionValue)
     {
         ColumnMetadata definition = ColumnMetadata.regularColumn("ks", "cf", "c", MapType.getInstance(Int32Type.instance, Int32Type.instance, true));
         ColumnCondition condition = ColumnCondition.condition(definition, op, Terms.of(new Maps.Value(conditionValue)));
@@ -328,6 +351,14 @@
         assertTrue(conditionApplies(list(ONE), GTE, list(ByteBufferUtil.EMPTY_BYTE_BUFFER)));
         assertFalse(conditionApplies(list(ByteBufferUtil.EMPTY_BYTE_BUFFER), GTE, list(ONE)));
         assertTrue(conditionApplies(list(ByteBufferUtil.EMPTY_BYTE_BUFFER), GTE, list(ByteBufferUtil.EMPTY_BYTE_BUFFER)));
+
+        //CONTAINS
+        assertTrue(conditionContainsApplies(list(ZERO, ONE, TWO), CONTAINS, ONE));
+        assertFalse(conditionContainsApplies(list(ZERO, ONE), CONTAINS, TWO));
+
+        assertFalse(conditionContainsApplies(list(ZERO, ONE, TWO), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+        assertFalse(conditionContainsApplies(list(ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS, ONE));
+        assertTrue(conditionContainsApplies(list(ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
     }
 
     private static SortedSet<ByteBuffer> set(ByteBuffer... values)
@@ -423,6 +454,14 @@
         assertTrue(conditionApplies(set(ONE), GTE, set(ByteBufferUtil.EMPTY_BYTE_BUFFER)));
         assertFalse(conditionApplies(set(ByteBufferUtil.EMPTY_BYTE_BUFFER), GTE, set(ONE)));
         assertTrue(conditionApplies(set(ByteBufferUtil.EMPTY_BYTE_BUFFER), GTE, set(ByteBufferUtil.EMPTY_BYTE_BUFFER)));
+
+        // CONTAINS
+        assertTrue(conditionContainsApplies(set(ZERO, ONE, TWO), CONTAINS, ONE));
+        assertFalse(conditionContainsApplies(set(ZERO, ONE), CONTAINS, TWO));
+
+        assertFalse(conditionContainsApplies(set(ZERO, ONE, TWO), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+        assertFalse(conditionContainsApplies(set(ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS, ONE));
+        assertTrue(conditionContainsApplies(set(ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
     }
 
     // values should be a list of key, value, key, value, ...
@@ -551,5 +590,26 @@
         assertFalse(conditionApplies(map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER), GTE, map(ONE, ONE)));
         assertTrue(conditionApplies(map(ByteBufferUtil.EMPTY_BYTE_BUFFER, ONE), GTE, map(ByteBufferUtil.EMPTY_BYTE_BUFFER, ONE)));
         assertTrue(conditionApplies(map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER), GTE, map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER)));
+
+        //CONTAINS
+        assertTrue(conditionContainsApplies(map(ZERO, ONE), CONTAINS, ONE));
+        assertFalse(conditionContainsApplies(map(ZERO, ONE), CONTAINS, ZERO));
+
+        assertFalse(conditionContainsApplies(map(ONE, ONE), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+        assertTrue(conditionContainsApplies(map(ByteBufferUtil.EMPTY_BYTE_BUFFER, ONE), CONTAINS, ONE));
+        assertFalse(conditionContainsApplies(map(ByteBufferUtil.EMPTY_BYTE_BUFFER, ONE), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+        assertFalse(conditionContainsApplies(map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS, ONE));
+        assertTrue(conditionContainsApplies(map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+
+        //CONTAINS KEY
+        assertTrue(conditionContainsApplies(map(ZERO, ONE), CONTAINS_KEY, ZERO));
+        assertFalse(conditionContainsApplies(map(ZERO, ONE), CONTAINS_KEY, ONE));
+
+        assertFalse(conditionContainsApplies(map(ONE, ONE), CONTAINS_KEY, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+        assertFalse(conditionContainsApplies(map(ByteBufferUtil.EMPTY_BYTE_BUFFER, ONE), CONTAINS_KEY, ONE));
+        assertTrue(conditionContainsApplies(map(ByteBufferUtil.EMPTY_BYTE_BUFFER, ONE), CONTAINS_KEY, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+        assertTrue(conditionContainsApplies(map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS_KEY, ONE));
+        assertFalse(conditionContainsApplies(map(ONE, ByteBufferUtil.EMPTY_BYTE_BUFFER), CONTAINS_KEY, ByteBufferUtil.EMPTY_BYTE_BUFFER));
+
     }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/functions/CastFctsTest.java b/test/unit/org/apache/cassandra/cql3/functions/CastFctsTest.java
index ee6c69f..6efbdc0 100644
--- a/test/unit/org/apache/cassandra/cql3/functions/CastFctsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/functions/CastFctsTest.java
@@ -23,12 +23,12 @@
 import java.time.LocalTime;
 import java.time.ZoneOffset;
 import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
 import java.util.Date;
 
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.serializers.SimpleDateSerializer;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.TimeUUID;
 
 import org.junit.Test;
 
@@ -39,24 +39,6 @@
     {
         createTable("CREATE TABLE %s (a int primary key, b text, c double)");
 
-        assertInvalidSyntaxMessage("no viable alternative at input '(' (... b, c) VALUES ([CAST](...)",
-                                   "INSERT INTO %s (a, b, c) VALUES (CAST(? AS int), ?, ?)", 1.6, "test", 6.3);
-
-        assertInvalidSyntaxMessage("no viable alternative at input '(' (..." + KEYSPACE + "." + currentTable()
-                + " SET c = [cast](...)",
-                                   "UPDATE %s SET c = cast(? as double) WHERE a = ?", 1, 1);
-
-        assertInvalidSyntaxMessage("no viable alternative at input '(' (...= ? WHERE a = [CAST] (...)",
-                                   "UPDATE %s SET c = ? WHERE a = CAST (? AS INT)", 1, 2.0);
-
-        assertInvalidSyntaxMessage("no viable alternative at input '(' (..." + KEYSPACE + "." + currentTable()
-                + " WHERE a = [CAST] (...)",
-                                   "DELETE FROM %s WHERE a = CAST (? AS INT)", 1, 2.0);
-
-        assertInvalidSyntaxMessage("no viable alternative at input '(' (..." + KEYSPACE + "." + currentTable()
-                + " WHERE a = [CAST] (...)",
-                                   "SELECT * FROM %s WHERE a = CAST (? AS INT)", 1, 2.0);
-
         assertInvalidMessage("a cannot be cast to boolean", "SELECT CAST(a AS boolean) FROM %s");
     }
 
@@ -229,7 +211,7 @@
         long timeInMillis = dateTime.toInstant().toEpochMilli();
 
         execute("INSERT INTO %s (a, b, c, d) VALUES (?, '" + yearMonthDay + " 11:03:02+00', '2015-05-21', '11:03:02')",
-                UUIDGen.getTimeUUID(timeInMillis));
+                TimeUUID.Generator.atUnixMillis(timeInMillis));
 
         assertRows(execute("SELECT CAST(a AS timestamp), " +
                            "CAST(b AS timestamp), " +
@@ -322,4 +304,256 @@
                 "CAST(b AS text) FROM %s"),
                    row((byte) 2, (short) 2, 2, 2L, 2.0F, 2.0, BigDecimal.valueOf(2), "2", "2"));
     }
+
+    /**
+     * Verifies that the {@code CAST} function can be used in the values of {@code INSERT INTO} statements.
+     */
+    @Test
+    public void testCastsInInsertIntoValues() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+
+        // Simple cast
+        execute("INSERT INTO %s (k, v) VALUES (1, CAST(1.3 AS int))");
+        assertRows(execute("SELECT v FROM %s"), row(1));
+
+        // Nested casts
+        execute("INSERT INTO %s (k, v) VALUES (1, CAST(CAST(CAST(2.3 AS int) AS float) AS int))");
+        assertRows(execute("SELECT v FROM %s"), row(2));
+
+        // Cast of placeholder with type hint
+        execute("INSERT INTO %s (k, v) VALUES (1, CAST((float) ? AS int))", 3.4f);
+        assertRows(execute("SELECT v FROM %s"), row(3));
+
+        // Cast of placeholder without type hint
+        assertInvalidRequestMessage("Ambiguous call to function system.castAsInt",
+                                    "INSERT INTO %s (k, v) VALUES (1, CAST(? AS int))", 3.4f);
+
+        // Type hint of cast
+        execute("INSERT INTO %s (k, v) VALUES (1, (int) CAST(4.9 AS int))");
+        assertRows(execute("SELECT v FROM %s"), row(4));
+
+        // Function of cast
+        execute(String.format("INSERT INTO %%s (k, v) VALUES (1, %s(CAST(5 AS float)))", floatToInt()));
+        assertRows(execute("SELECT v FROM %s"), row(5));
+
+        // Cast of function
+        execute(String.format("INSERT INTO %%s (k, v) VALUES (1, CAST(%s(6) AS int))", intToFloat()));
+        assertRows(execute("SELECT v FROM %s"), row(6));
+    }
+
+    /**
+     * Verifies that the {@code CAST} function can be used in the values of {@code UPDATE} statements.
+     */
+    @Test
+    public void testCastsInUpdateValues() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+
+        // Simple cast
+        execute("UPDATE %s SET v = CAST(1.3 AS int) WHERE k = 1");
+        assertRows(execute("SELECT v FROM %s"), row(1));
+
+        // Nested casts
+        execute("UPDATE %s SET v = CAST(CAST(CAST(2.3 AS int) AS float) AS int) WHERE k = 1");
+        assertRows(execute("SELECT v FROM %s"), row(2));
+
+        // Cast of placeholder with type hint
+        execute("UPDATE %s SET v = CAST((float) ? AS int) WHERE k = 1", 3.4f);
+        assertRows(execute("SELECT v FROM %s"), row(3));
+
+        // Cast of placeholder without type hint
+        assertInvalidRequestMessage("Ambiguous call to function system.castAsInt",
+                                    "UPDATE %s SET v = CAST(? AS int) WHERE k = 1", 3.4f);
+
+        // Type hint of cast
+        execute("UPDATE %s SET v = (int) CAST(4.9 AS int) WHERE k = 1");
+        assertRows(execute("SELECT v FROM %s"), row(4));
+
+        // Function of cast
+        execute(String.format("UPDATE %%s SET v = %s(CAST(5 AS float)) WHERE k = 1", floatToInt()));
+        assertRows(execute("SELECT v FROM %s"), row(5));
+
+        // Cast of function
+        execute(String.format("UPDATE %%s SET v = CAST(%s(6) AS int) WHERE k = 1", intToFloat()));
+        assertRows(execute("SELECT v FROM %s"), row(6));
+    }
+
+    /**
+     * Verifies that the {@code CAST} function can be used in the {@code WHERE} clause of {@code UPDATE} statements.
+     */
+    @Test
+    public void testCastsInUpdateWhereClause() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+
+        for (int i = 1; i <= 6; i++)
+        {
+            execute("INSERT INTO %s (k) VALUES (?)", i);
+        }
+
+        // Simple cast
+        execute("UPDATE %s SET v = ? WHERE k = CAST(1.3 AS int)", 1);
+        assertRows(execute("SELECT v FROM %s WHERE k = ?", 1), row(1));
+
+        // Nested casts
+        execute("UPDATE %s SET v = ? WHERE k = CAST(CAST(CAST(2.3 AS int) AS float) AS int)", 2);
+        assertRows(execute("SELECT v FROM %s WHERE k = ?", 2), row(2));
+
+        // Cast of placeholder with type hint
+        execute("UPDATE %s SET v = ? WHERE k = CAST((float) ? AS int)", 3, 3.4f);
+        assertRows(execute("SELECT v FROM %s WHERE k = ?", 3), row(3));
+
+        // Cast of placeholder without type hint
+        assertInvalidRequestMessage("Ambiguous call to function system.castAsInt",
+                                    "UPDATE %s SET v = ? WHERE k = CAST(? AS int)", 3, 3.4f);
+
+        // Type hint of cast
+        execute("UPDATE %s SET v = ? WHERE k = (int) CAST(4.9 AS int)", 4);
+        assertRows(execute("SELECT v FROM %s WHERE k = ?", 4), row(4));
+
+        // Function of cast
+        execute(String.format("UPDATE %%s SET v = ? WHERE k = %s(CAST(5 AS float))", floatToInt()), 5);
+        assertRows(execute("SELECT v FROM %s WHERE k = ?", 5), row(5));
+
+        // Cast of function
+        execute(String.format("UPDATE %%s SET v = ? WHERE k = CAST(%s(6) AS int)", intToFloat()), 6);
+        assertRows(execute("SELECT v FROM %s WHERE k = ?", 6), row(6));
+    }
+
+    /**
+     * Verifies that the {@code CAST} function can be used in the {@code WHERE} clause of {@code SELECT} statements.
+     */
+    @Test
+    public void testCastsInSelectWhereClause() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY)");
+
+        for (int i = 1; i <= 6; i++)
+        {
+            execute("INSERT INTO %s (k) VALUES (?)", i);
+        }
+
+        // Simple cast
+        assertRows(execute("SELECT k FROM %s WHERE k = CAST(1.3 AS int)"), row(1));
+
+        // Nested casts
+        assertRows(execute("SELECT k FROM %s WHERE k = CAST(CAST(CAST(2.3 AS int) AS float) AS int)"), row(2));
+
+        // Cast of placeholder with type hint
+        assertRows(execute("SELECT k FROM %s WHERE k = CAST((float) ? AS int)", 3.4f), row(3));
+
+        // Cast of placeholder without type hint
+        assertInvalidRequestMessage("Ambiguous call to function system.castAsInt",
+                                    "SELECT k FROM %s WHERE k = CAST(? AS int)", 3.4f);
+
+        // Type hint of cast
+        assertRows(execute("SELECT k FROM %s WHERE k = (int) CAST(4.9 AS int)"), row(4));
+
+        // Function of cast
+        assertRows(execute(String.format("SELECT k FROM %%s WHERE k = %s(CAST(5 AS float))", floatToInt())), row(5));
+
+        // Cast of function
+        assertRows(execute(String.format("SELECT k FROM %%s WHERE k = CAST(%s(6) AS int)", intToFloat())), row(6));
+    }
+
+    /**
+     * Verifies that the {@code CAST} function can be used in the {@code WHERE} clause of {@code DELETE} statements.
+     */
+    @Test
+    public void testCastsInDeleteWhereClause() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY)");
+
+        for (int i = 1; i <= 6; i++)
+        {
+            execute("INSERT INTO %s (k) VALUES (?)", i);
+        }
+
+        // Simple cast
+        execute("DELETE FROM %s WHERE k = CAST(1.3 AS int)");
+        assertEmpty(execute("SELECT * FROM %s WHERE k = ?", 1));
+
+        // Nested casts
+        execute("DELETE FROM %s WHERE k = CAST(CAST(CAST(2.3 AS int) AS float) AS int)");
+        assertEmpty(execute("SELECT * FROM %s WHERE k = ?", 2));
+
+        // Cast of placeholder with type hint
+        execute("DELETE FROM %s WHERE k = CAST((float) ? AS int)", 3.4f);
+        assertEmpty(execute("SELECT * FROM %s WHERE k = ?", 3));
+
+        // Cast of placeholder without type hint
+        assertInvalidRequestMessage("Ambiguous call to function system.castAsInt",
+                                    "DELETE FROM %s WHERE k = CAST(? AS int)", 3.4f);
+
+        // Type hint of cast
+        execute("DELETE FROM %s WHERE k = (int) CAST(4.9 AS int)");
+        assertEmpty(execute("SELECT * FROM %s WHERE k = ?", 4));
+
+        // Function of cast
+        execute(String.format("DELETE FROM %%s WHERE k = %s(CAST(5 AS float))", floatToInt()));
+        assertEmpty(execute("SELECT * FROM %s WHERE k = ?", 5));
+
+        // Cast of function
+        execute(String.format("DELETE FROM %%s WHERE k = CAST(%s(6) AS int)", intToFloat()));
+        assertEmpty(execute("SELECT * FROM %s WHERE k = ?", 6));
+    }
+
+    /**
+     * Creates a CQL function that casts an {@code int} argument into a {@code float}.
+     *
+     * @return the name of the created function
+     */
+    private String floatToInt() throws Throwable
+    {
+        return createFunction(KEYSPACE,
+                              "int, int",
+                              "CREATE FUNCTION IF NOT EXISTS %s (x float) " +
+                              "CALLED ON NULL INPUT " +
+                              "RETURNS int " +
+                              "LANGUAGE java " +
+                              "AS 'return Float.valueOf(x).intValue();'");
+    }
+
+    /**
+     * Creates a CQL function that casts a {@code float} argument into an {@code int}.
+     *
+     * @return the name of the created function
+     */
+    private String intToFloat() throws Throwable
+    {
+        return createFunction(KEYSPACE,
+                              "int, int",
+                              "CREATE FUNCTION IF NOT EXISTS %s (x int) " +
+                              "CALLED ON NULL INPUT " +
+                              "RETURNS float " +
+                              "LANGUAGE java " +
+                              "AS 'return (float) x;'");
+    }
+
+    /**
+     * Verifies that the {@code CAST} function can be used in the {@code WHERE} clause of {@code CREATE MATERIALIZED
+     * VIEW} statements.
+     */
+    @Test
+    public void testCastsInCreateViewWhereClause() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+
+        String viewName = keyspace() + ".mv_with_cast";
+        execute(String.format("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s" +
+                              "   WHERE k < CAST(3.14 AS int) AND v IS NOT NULL" +
+                              "   PRIMARY KEY (v, k)", viewName));
+
+        // start storage service so MV writes are applied
+        StorageService.instance.initServer();
+
+        execute("INSERT INTO %s (k, v) VALUES (1, 10)");
+        execute("INSERT INTO %s (k, v) VALUES (2, 20)");
+        execute("INSERT INTO %s (k, v) VALUES (3, 30)");
+
+        assertRows(execute(String.format("SELECT * FROM %s", viewName)), row(10, 1), row(20, 2));
+
+        execute("DROP MATERIALIZED VIEW " + viewName);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/functions/OperationFctsTest.java b/test/unit/org/apache/cassandra/cql3/functions/OperationFctsTest.java
index 1648a55..c46f6a9 100644
--- a/test/unit/org/apache/cassandra/cql3/functions/OperationFctsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/functions/OperationFctsTest.java
@@ -33,6 +33,20 @@
 
 public class OperationFctsTest extends CQLTester
 {
+
+    @Test
+    public void testStringConcatenation() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a text, b ascii, c text, PRIMARY KEY(a, b, c))");
+        execute("INSERT INTO %S (a, b, c) VALUES ('जॉन', 'Doe', 'जॉन Doe')");
+
+        assertColumnNames(execute("SELECT a + a, a + b, b + a, b + b FROM %s WHERE a = 'जॉन' AND b = 'Doe' AND c = 'जॉन Doe'"),
+                "a + a", "a + b", "b + a", "b + b");
+
+        assertRows(execute("SELECT a + ' ' + a, a + ' ' + b, b + ' ' + a, b + ' ' + b FROM %s WHERE a = 'जॉन' AND b = 'Doe' AND c = 'जॉन Doe'"),
+                row("जॉन जॉन", "जॉन Doe", "Doe जॉन", "Doe Doe"));
+    }
+
     @Test
     public void testSingleOperations() throws Throwable
     {
diff --git a/test/unit/org/apache/cassandra/cql3/functions/TimeFctsTest.java b/test/unit/org/apache/cassandra/cql3/functions/TimeFctsTest.java
index b0a4bb9..a124e60 100644
--- a/test/unit/org/apache/cassandra/cql3/functions/TimeFctsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/functions/TimeFctsTest.java
@@ -32,9 +32,10 @@
 import org.apache.cassandra.db.marshal.TimestampType;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.cql3.functions.TimeFcts.*;
+import static org.apache.cassandra.utils.TimeUUID.Generator.atUnixMillisAsBytes;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
@@ -56,7 +57,16 @@
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
         ByteBuffer input = TimestampType.instance.fromString(DATE_TIME_STRING + "+00");
         ByteBuffer output = executeFunction(TimeFcts.minTimeuuidFct, input);
-        assertEquals(UUIDGen.minTimeUUID(timeInMillis), TimeUUIDType.instance.compose(output));
+        assertEquals(TimeUUID.minAtUnixMillis(timeInMillis), TimeUUIDType.instance.compose(output));
+    }
+
+    @Test
+    public void testMinTimeUuidFromBigInt()
+    {
+        long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
+        ByteBuffer input = LongType.instance.decompose(timeInMillis);
+        ByteBuffer output = executeFunction(TimeFcts.minTimeuuidFct, input);
+        assertEquals(TimeUUID.minAtUnixMillis(timeInMillis), TimeUUIDType.instance.compose(output));
     }
 
     @Test
@@ -65,7 +75,16 @@
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
         ByteBuffer input = TimestampType.instance.fromString(DATE_TIME_STRING + "+00");
         ByteBuffer output = executeFunction(TimeFcts.maxTimeuuidFct, input);
-        assertEquals(UUIDGen.maxTimeUUID(timeInMillis), TimeUUIDType.instance.compose(output));
+        assertEquals(TimeUUID.maxAtUnixMillis(timeInMillis), TimeUUIDType.instance.compose(output));
+    }
+
+    @Test
+    public void testMaxTimeUuidFromBigInt()
+    {
+        long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
+        ByteBuffer input = LongType.instance.decompose(timeInMillis);
+        ByteBuffer output = executeFunction(TimeFcts.maxTimeuuidFct, input);
+        assertEquals(TimeUUID.maxAtUnixMillis(timeInMillis), TimeUUIDType.instance.compose(output));
     }
 
     @Test
@@ -73,7 +92,7 @@
     {
 
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
-        ByteBuffer input = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(timeInMillis, 0));
+        ByteBuffer input = ByteBuffer.wrap(atUnixMillisAsBytes(timeInMillis, 0));
         ByteBuffer output = executeFunction(TimeFcts.dateOfFct, input);
         assertEquals(Date.from(DATE_TIME.toInstant()), TimestampType.instance.compose(output));
     }
@@ -82,7 +101,7 @@
     public void testTimeUuidToTimestamp()
     {
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
-        ByteBuffer input = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(timeInMillis, 0));
+        ByteBuffer input = ByteBuffer.wrap(atUnixMillisAsBytes(timeInMillis, 0));
         ByteBuffer output = executeFunction(toTimestamp(TimeUUIDType.instance), input);
         assertEquals(Date.from(DATE_TIME.toInstant()), TimestampType.instance.compose(output));
     }
@@ -91,7 +110,7 @@
     public void testUnixTimestampOfFct()
     {
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
-        ByteBuffer input = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(timeInMillis, 0));
+        ByteBuffer input = ByteBuffer.wrap(atUnixMillisAsBytes(timeInMillis, 0));
         ByteBuffer output = executeFunction(TimeFcts.unixTimestampOfFct, input);
         assertEquals(timeInMillis, LongType.instance.compose(output).longValue());
     }
@@ -100,7 +119,7 @@
     public void testTimeUuidToUnixTimestamp()
     {
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
-        ByteBuffer input = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(timeInMillis, 0));
+        ByteBuffer input = ByteBuffer.wrap(atUnixMillisAsBytes(timeInMillis, 0));
         ByteBuffer output = executeFunction(toUnixTimestamp(TimeUUIDType.instance), input);
         assertEquals(timeInMillis, LongType.instance.compose(output).longValue());
     }
@@ -109,7 +128,7 @@
     public void testTimeUuidToDate()
     {
         long timeInMillis = DATE_TIME.toInstant().toEpochMilli();
-        ByteBuffer input = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(timeInMillis, 0));
+        ByteBuffer input = ByteBuffer.wrap(atUnixMillisAsBytes(timeInMillis, 0));
         ByteBuffer output = executeFunction(toDate(TimeUUIDType.instance), input);
 
         long expectedTime = DATE.toInstant().toEpochMilli();
@@ -142,6 +161,16 @@
     }
 
     @Test
+    public void testBigIntegerToDate()
+    {
+        long millis = DATE.toInstant().toEpochMilli();
+
+        ByteBuffer input = LongType.instance.decompose(millis);
+        ByteBuffer output = executeFunction(toDate(TimestampType.instance), input);
+        assertEquals(DATE.toInstant().toEpochMilli(), SimpleDateType.instance.toTimeInMillis(output));
+    }
+
+    @Test
     public void testTimestampToDateWithEmptyInput()
     {
         ByteBuffer output = executeFunction(toDate(TimestampType.instance), ByteBufferUtil.EMPTY_BYTE_BUFFER);
@@ -157,6 +186,16 @@
     }
 
     @Test
+    public void testBigIntegerToTimestamp()
+    {
+        long millis = DATE_TIME.toInstant().toEpochMilli();
+
+        ByteBuffer input = LongType.instance.decompose(millis);
+        ByteBuffer output = executeFunction(toTimestamp(TimestampType.instance), input);
+        assertEquals(DATE_TIME.toInstant().toEpochMilli(), LongType.instance.compose(output).longValue());
+    }
+
+    @Test
     public void testTimestampToUnixTimestampWithEmptyInput()
     {
         ByteBuffer output = executeFunction(TimeFcts.toUnixTimestamp(TimestampType.instance), ByteBufferUtil.EMPTY_BYTE_BUFFER);
diff --git a/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java b/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
new file mode 100644
index 0000000..ee4dd35
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cassandra.cql3.selection;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.cql3.Constants.Literal;
+import org.apache.cassandra.cql3.functions.AggregateFcts;
+import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.functions.TimeFcts;
+import org.apache.cassandra.cql3.selection.Selectable.RawIdentifier;
+import org.apache.cassandra.cql3.selection.Selector.Serializer;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.io.util.DataInputBuffer;
+import org.apache.cassandra.io.util.DataOutputBuffer;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.Pair;
+
+import static java.util.Arrays.asList;
+
+import static org.junit.Assert.assertEquals;
+
+public class SelectorSerializationTest extends CQLTester
+{
+    @Test
+    public void testSerDes() throws IOException
+    {
+        createTable("CREATE TABLE %s (pk int, c1 int, c2 timestamp, v int, PRIMARY KEY(pk, c1, c2))");
+
+        KeyspaceMetadata keyspace = Schema.instance.getKeyspaceMetadata(KEYSPACE);
+        TableMetadata table = keyspace.getTableOrViewNullable(currentTable());
+
+        // Test SimpleSelector serialization
+        checkSerialization(table.getColumn(new ColumnIdentifier("c1", false)), table);
+
+        // Test WritetimeOrTTLSelector serialization
+        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), true), table);
+        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), false), table);
+
+        // Test ListSelector serialization
+        checkSerialization(new Selectable.WithList(asList(table.getColumn(new ColumnIdentifier("v", false)),
+                                                          table.getColumn(new ColumnIdentifier("c1", false)))), table);
+
+        // Test SetSelector serialization
+        checkSerialization(new Selectable.WithSet(asList(table.getColumn(new ColumnIdentifier("v", false)),
+                                                         table.getColumn(new ColumnIdentifier("c1", false)))), table);
+
+        // Test MapSelector serialization
+        Pair<Selectable.Raw, Selectable.Raw> pair = Pair.create(RawIdentifier.forUnquoted("v"),
+                                                                RawIdentifier.forUnquoted("c1"));
+        checkSerialization(new Selectable.WithMapOrUdt(table, asList(pair)), table, MapType.getInstance(Int32Type.instance, Int32Type.instance, false));
+
+        // Test TupleSelector serialization
+        checkSerialization(new Selectable.BetweenParenthesesOrWithTuple(asList(table.getColumn(new ColumnIdentifier("c2", false)),
+                                                                               table.getColumn(new ColumnIdentifier("c1", false)))), table);
+        // Test TermSelector serialization
+        checkSerialization(new Selectable.WithTerm(Literal.duration("5m")), table, DurationType.instance);
+
+        // Test UserTypeSelector serialization
+        String typeName = createType("CREATE TYPE %s (f1 int, f2 int)");
+
+        UserType type = new UserType(KEYSPACE, ByteBufferUtil.bytes(typeName),
+                                     asList(FieldIdentifier.forUnquoted("f1"),
+                                            FieldIdentifier.forUnquoted("f2")),
+                                     asList(Int32Type.instance,
+                                            Int32Type.instance),
+                                     false);
+
+        List<Pair<Selectable.Raw, Selectable.Raw>> list = asList(Pair.create(RawIdentifier.forUnquoted("f1"),
+                                                                             RawIdentifier.forUnquoted("c1")),
+                                                                 Pair.create(RawIdentifier.forUnquoted("f2"),
+                                                                             RawIdentifier.forUnquoted("pk")));
+
+        checkSerialization(new Selectable.WithMapOrUdt(table, list), table, type);
+
+        // Test FieldSelector serialization
+        checkSerialization(new Selectable.WithFieldSelection(new Selectable.WithTypeHint(typeName, type, new Selectable.WithMapOrUdt(table, list)), FieldIdentifier.forUnquoted("f1")), table, type);
+
+        // Test AggregateFunctionSelector serialization
+        Function max = AggregateFcts.makeMaxFunction(Int32Type.instance);
+        checkSerialization(new Selectable.WithFunction(max, asList(table.getColumn(new ColumnIdentifier("v", false)))), table);
+
+        // Test SCalarFunctionSelector serialization
+        Function toDate = TimeFcts.toDate(TimestampType.instance);
+        checkSerialization(new Selectable.WithFunction(toDate, asList(table.getColumn(new ColumnIdentifier("c2", false)))), table);
+
+        Function floor = TimeFcts.FloorTimestampFunction.newInstanceWithStartTimeArgument();
+        checkSerialization(new Selectable.WithFunction(floor, asList(table.getColumn(new ColumnIdentifier("c2", false)),
+                                                                     new Selectable.WithTerm(Literal.duration("5m")),
+                                                                     new Selectable.WithTerm(Literal.string("2016-09-27 16:00:00 UTC")))), table);
+    }
+
+    private static void checkSerialization(Selectable selectable, TableMetadata table) throws IOException
+    {
+        checkSerialization(selectable, table, null);
+    }
+
+    private static void checkSerialization(Selectable selectable, TableMetadata table, AbstractType<?> expectedType) throws IOException
+    {
+        int version = MessagingService.current_version;
+
+        Serializer serializer = Selector.serializer;
+        Selector.Factory factory = selectable.newSelectorFactory(table, expectedType, new ArrayList<>(), VariableSpecifications.empty());
+        Selector selector = factory.newInstance(QueryOptions.DEFAULT);
+        int size = serializer.serializedSize(selector, version);
+        DataOutputBuffer out = new DataOutputBuffer(size);
+        serializer.serialize(selector, out, version);
+        ByteBuffer buffer = out.asNewBuffer();
+        DataInputBuffer in = new DataInputBuffer(buffer, false);
+        Selector deserialized = serializer.deserialize(in, version, table);
+
+        assertEquals(selector, deserialized);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java b/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java
index 940e7b6..52bb3c3 100644
--- a/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java
+++ b/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java
@@ -975,6 +975,7 @@
                "    AND comment = ''\n" +
                "    AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}\n" +
                "    AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n" +
+               "    AND memtable = 'default'\n" +
                "    AND crc_check_chance = 1.0\n" +
                "    AND default_time_to_live = 0\n" +
                "    AND extensions = {}\n" +
@@ -995,6 +996,7 @@
                "    AND comment = ''\n" +
                "    AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}\n" +
                "    AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n" +
+               "    AND memtable = 'default'\n" +
                "    AND crc_check_chance = 1.0\n" +
                "    AND extensions = {}\n" +
                "    AND gc_grace_seconds = 864000\n" +
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
index bec2ccd..fedb4e5 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
@@ -32,6 +32,7 @@
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class CollectionsTest extends CQLTester
@@ -696,7 +697,7 @@
     public void testMapWithLargePartition() throws Throwable
     {
         Random r = new Random();
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         System.out.println("Seed " + seed);
         r.setSeed(seed);
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java
index e28e0cb..810ee5a 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java
@@ -40,6 +40,7 @@
 import java.util.*;
 import java.util.concurrent.*;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -49,8 +50,8 @@
     @BeforeClass
     public static void setUpClass()
     {
-        if (ROW_CACHE_SIZE_IN_MB > 0)
-            DatabaseDescriptor.setRowCacheSizeInMB(ROW_CACHE_SIZE_IN_MB);
+        if (ROW_CACHE_SIZE_IN_MIB > 0)
+            DatabaseDescriptor.setRowCacheSizeInMiB(ROW_CACHE_SIZE_IN_MIB);
 
         StorageService.instance.setPartitionerUnsafe(ByteOrderedPartitioner.instance);
 
@@ -1329,7 +1330,7 @@
         for (int i = 0; i < numRows; i++)
             execute("INSERT INTO %s (k, v) VALUES (?, ?)", "" + i, "" + i);
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         System.out.println("Seed " + seed);
         final Random rand = new Random(seed);
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java
index f69d8d5..1d10eab 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java
@@ -214,4 +214,4 @@
         assertEmpty(execute("SELECT id, company FROM %s WHERE home = " + addressString));
         assertRows(execute("SELECT id, company FROM %s WHERE home = " + newAddressString), row(1, companyName));
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
index ff7f18b..34b805d 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
@@ -59,6 +59,7 @@
 import static org.apache.cassandra.Util.throwAssert;
 import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -114,7 +115,7 @@
                              "CREATE INDEX " + indexName + " ON %s(b)");
 
         // IF NOT EXISTS should apply in cases where the new index differs from an existing one in name only
-        String otherIndexName = "index_" + System.nanoTime();
+        String otherIndexName = "index_" + nanoTime();
         assertEquals(1, getCurrentColumnFamilyStore().metadata().indexes.size());
         createIndex("CREATE INDEX IF NOT EXISTS " + otherIndexName + " ON %s(b)");
         assertEquals(1, getCurrentColumnFamilyStore().metadata().indexes.size());
@@ -611,7 +612,7 @@
     /**
      * Test for CASSANDRA-5732, Can not query secondary index
      * migrated from cql_tests.py:TestCQL.bug_5732_test(),
-     * which was executing with a row cache size of 100 MB
+     * which was executing with a row cache size of 100 MiB
      * and restarting the node, here we just cleanup the cache.
      */
     @Test
@@ -671,14 +672,14 @@
         long batchSizeThreshold = DatabaseDescriptor.getBatchSizeFailThreshold();
         try
         {
-            DatabaseDescriptor.setBatchSizeFailThresholdInKB( (TOO_BIG / 1024) * 2);
+            DatabaseDescriptor.setBatchSizeFailThresholdInKiB((TOO_BIG / 1024) * 2);
             succeedInsert("BEGIN BATCH\n" +
                           "INSERT INTO %s (a, b, c) VALUES (1, 1, ?) IF NOT EXISTS;\n" +
                           "APPLY BATCH", ByteBuffer.allocate(TOO_BIG));
         }
         finally
         {
-            DatabaseDescriptor.setBatchSizeFailThresholdInKB((int) (batchSizeThreshold / 1024));
+            DatabaseDescriptor.setBatchSizeFailThresholdInKiB((int) (batchSizeThreshold / 1024));
         }
     }
 
@@ -724,14 +725,14 @@
         long batchSizeThreshold = DatabaseDescriptor.getBatchSizeFailThreshold();
         try
         {
-            DatabaseDescriptor.setBatchSizeFailThresholdInKB( (TOO_BIG / 1024) * 2);
+            DatabaseDescriptor.setBatchSizeFailThresholdInKiB((TOO_BIG / 1024) * 2);
             succeedInsert("BEGIN BATCH\n" +
                           "INSERT INTO %s (a, b, c) VALUES (1, 1, ?) IF NOT EXISTS;\n" +
                           "APPLY BATCH", ByteBuffer.allocate(TOO_BIG));
         }
         finally
         {
-            DatabaseDescriptor.setBatchSizeFailThresholdInKB((int)(batchSizeThreshold / 1024));
+            DatabaseDescriptor.setBatchSizeFailThresholdInKiB((int)(batchSizeThreshold / 1024));
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
index 63cd2b7..13090a6 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
@@ -22,7 +22,7 @@
 import org.junit.Assert;
 import org.apache.cassandra.cql3.CQLTester;
 
-import static junit.framework.Assert.assertNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 public class TimestampTest extends CQLTester
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/TimeuuidTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/TimeuuidTest.java
index 0f1f8f0..931451e 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/TimeuuidTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/TimeuuidTest.java
@@ -19,14 +19,14 @@
 package org.apache.cassandra.cql3.validation.entities;
 
 import java.util.Date;
-import java.util.UUID;
 
 import org.junit.Test;
 
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.exceptions.SyntaxException;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.Assert.assertEquals;
 
 public class TimeuuidTest extends CQLTester
@@ -59,12 +59,13 @@
 
         for (int i = 0; i < 4; i++)
         {
-            long timestamp = UUIDGen.unixTimestamp((UUID) rows[i][1]);
+            long timestamp = ((TimeUUID) rows[i][1]).unix(MILLISECONDS);
             assertRows(execute("SELECT dateOf(t), unixTimestampOf(t) FROM %s WHERE k = 0 AND t = ?", rows[i][1]),
                        row(new Date(timestamp), timestamp));
         }
 
         assertEmpty(execute("SELECT t FROM %s WHERE k = 0 AND t > maxTimeuuid(1234567) AND t < minTimeuuid('2012-11-07 18:18:22-0800')"));
+        assertEmpty(execute("SELECT t FROM %s WHERE k = 0 AND t > maxTimeuuid(1564830182000) AND t < minTimeuuid('2012-11-07 18:18:22-0800')"));
     }
 
     /**
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
index f9ef4cc..9f53db4 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
@@ -33,7 +33,6 @@
 
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.SchemaCQLHelper;
 import org.apache.cassandra.db.marshal.TupleType;
 import org.apache.cassandra.utils.AbstractTypeGenerators.TypeSupport;
 import org.quicktheories.core.Gen;
@@ -300,7 +299,6 @@
             TupleType tupleType = testcase.type;
             createTable("CREATE TABLE %s (pk int, ck " + toCqlType(tupleType) + ", value int, PRIMARY KEY(pk, ck))" +
                         " WITH CLUSTERING ORDER BY (ck "+order.name()+")");
-            String cql = SchemaCQLHelper.getTableMetadataAsCQL(currentTableMetadata(), false, false, false);
             SortedMap<ByteBuffer, Integer> map = new TreeMap<>(order.apply(tupleType));
             int count = 0;
             for (ByteBuffer value : testcase.uniqueRows)
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UFScriptTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UFScriptTest.java
index 099e42d..fb33e6f 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UFScriptTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UFScriptTest.java
@@ -38,7 +38,9 @@
 import org.apache.cassandra.cql3.functions.FunctionName;
 import org.apache.cassandra.exceptions.FunctionExecutionException;
 import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class UFScriptTest extends CQLTester
 {
@@ -323,7 +325,7 @@
     public void testScriptParamReturnTypes() throws Throwable
     {
         UUID ruuid = UUID.randomUUID();
-        UUID tuuid = UUIDGen.getTimeUUID();
+        TimeUUID tuuid = nextTimeUUID();
 
         createTable("CREATE TABLE %s (key int primary key, " +
                     "tival tinyint, sival smallint, ival int, lval bigint, fval float, dval double, vval varint, ddval decimal, " +
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java
index 76b8324..e86b3df 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java
@@ -40,6 +40,7 @@
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.transport.Event.SchemaChange.Change;
 import org.apache.cassandra.transport.Event.SchemaChange.Target;
@@ -864,7 +865,7 @@
                                                             "java",
                                                             f.body(),
                                                             new InvalidRequestException("foo bar is broken"));
-        Schema.instance.load(ksm.withSwapped(ksm.functions.without(f.name(), f.argTypes()).with(broken)));
+        SchemaTestUtil.addOrUpdateKeyspace(ksm.withSwapped(ksm.functions.without(f.name(), f.argTypes()).with(broken)), false);
 
         assertInvalidThrowMessage("foo bar is broken", InvalidRequestException.class,
                                   "SELECT key, " + fName + "(dval) FROM %s");
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UFTypesTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UFTypesTest.java
index b9ea27d..eb94fe5 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UFTypesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UFTypesTest.java
@@ -37,8 +37,8 @@
 import org.apache.cassandra.transport.Event.SchemaChange.Change;
 import org.apache.cassandra.transport.Event.SchemaChange.Target;
 import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.transport.messages.ResultMessage;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class UFTypesTest extends CQLTester
 {
@@ -187,7 +187,7 @@
         new TypesTestDef("date", "date", "dt", 12345),
         new TypesTestDef("time", "time", "tim", 12345L),
         new TypesTestDef("uuid", "uuid", "uu", UUID.randomUUID()),
-        new TypesTestDef("timeuuid", "timeuuid", "tu", UUIDGen.getTimeUUID()),
+        new TypesTestDef("timeuuid", "timeuuid", "tu", nextTimeUUID().asUUID()),
         new TypesTestDef("tinyint", "tinyint", "ti", (byte) 42),
         new TypesTestDef("smallint", "smallint", "si", (short) 43),
         new TypesTestDef("int", "int", "i", 44),
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
index e39dd35..0b05e8f 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
@@ -24,6 +24,7 @@
 
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.dht.ByteOrderedPartitioner;
+import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.service.StorageService;
 
 public class UserTypesTest extends CQLTester
@@ -882,4 +883,35 @@
     {
         return keyspace() + '.' + type1;
     }
+
+    @Test
+    public void testAlteringTypeWithIfNotExits() throws Throwable
+    {
+        String columnType = typeWithKs(createType("CREATE TYPE %s (a int)"));
+
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, y frozen<" + columnType + ">)");
+        execute("ALTER TYPE " + columnType + " ADD IF NOT EXISTS a int");
+
+        execute("INSERT INTO %s (k, y) VALUES(?, ?)", 1, userType("a", 1));
+        assertRows(execute("SELECT * FROM %s"), row(1, userType("a", 1)));
+
+        assertInvalidThrowMessage(String.format("Cannot add field %s to type %s: a field with name %s already exists", "a", columnType, "a"),
+                                  InvalidRequestException.class,
+                                  "ALTER TYPE " + columnType + " ADD a int");
+    }
+
+    @Test
+    public void testAlteringTypeRenameWithIfExists() throws Throwable
+    {
+        String columnType = typeWithKs(createType("CREATE TYPE %s (a int)"));
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, y frozen<" + columnType + ">)");
+        execute("ALTER TYPE " + columnType + " RENAME IF EXISTS a TO z AND b TO Y;");
+
+        execute("INSERT INTO %s (k, y) VALUES(?, ?)", 1, userType("z", 1));
+        assertRows(execute("SELECT * FROM %s"), row(1, userType("z", 1)));
+
+        assertInvalidThrowMessage(String.format("Unkown field %s in user type %s", "a", columnType),
+                                  InvalidRequestException.class,
+                                  "ALTER TYPE " + columnType + " RENAME a TO z;");
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/VirtualTableTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/VirtualTableTest.java
index 9808c96..5d3b134 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/VirtualTableTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/VirtualTableTest.java
@@ -18,10 +18,23 @@
 package org.apache.cassandra.cql3.validation.entities;
 
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Optional;
+import java.util.NavigableMap;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.annotation.Nonnull;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Range;
+
+import org.apache.commons.lang3.tuple.Pair;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -31,18 +44,18 @@
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.partitions.Partition;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.virtual.AbstractMutableVirtualTable;
 import org.apache.cassandra.db.virtual.AbstractVirtualTable;
 import org.apache.cassandra.db.virtual.SimpleDataSet;
 import org.apache.cassandra.db.virtual.VirtualKeyspace;
 import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
 import org.apache.cassandra.db.virtual.VirtualTable;
-import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.StorageServiceMBean;
 import org.apache.cassandra.triggers.ITrigger;
 
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -52,53 +65,152 @@
     private static final String VT1_NAME = "vt1";
     private static final String VT2_NAME = "vt2";
     private static final String VT3_NAME = "vt3";
+    private static final String VT4_NAME = "vt4";
 
-    private static class WritableVirtualTable extends AbstractVirtualTable
+    // As long as we execute test queries using execute (and not executeNet) the virtual tables implementation
+    // do not need to be thread-safe. We choose to do it to avoid issues if the test framework was changed or somebody
+    // decided to use the class with executeNet. It also provide a better example in case somebody is looking
+    // at the test for learning how to create mutable virtual tables
+    private static class MutableVirtualTable extends AbstractMutableVirtualTable
     {
-        private final ColumnMetadata valueColumn;
-        private final Map<String, Integer> backingMap = new HashMap<>();
+        // <pk1, pk2> -> c1 -> c2 -> <v1, v2>
+        private final Map<Pair<String, String>, NavigableMap<String, NavigableMap<String, Pair<Number, Number>>>> backingMap = new ConcurrentHashMap<>();
 
-        WritableVirtualTable(String keyspaceName, String tableName)
+        MutableVirtualTable(String keyspaceName, String tableName)
         {
             super(TableMetadata.builder(keyspaceName, tableName)
                                .kind(TableMetadata.Kind.VIRTUAL)
-                               .addPartitionKeyColumn("key", UTF8Type.instance)
-                               .addRegularColumn("value", Int32Type.instance)
+                               .addPartitionKeyColumn("pk1", UTF8Type.instance)
+                               .addPartitionKeyColumn("pk2", UTF8Type.instance)
+                               .addClusteringColumn("c1", UTF8Type.instance)
+                               .addClusteringColumn("c2", UTF8Type.instance)
+                               .addRegularColumn("v1", Int32Type.instance)
+                               .addRegularColumn("v2", LongType.instance)
                                .build());
-            valueColumn = metadata().regularColumns().getSimple(0);
         }
 
         @Override
         public DataSet data()
         {
             SimpleDataSet data = new SimpleDataSet(metadata());
-            backingMap.forEach((key, value) -> data.row(key).column("value", value));
+            backingMap.forEach((pkPair, c1Map) ->
+                    c1Map.forEach((c1, c2Map) ->
+                    c2Map.forEach((c2, valuePair) -> data.row(pkPair.getLeft(), pkPair.getRight(), c1, c2)
+                                                         .column("v1", valuePair.getLeft())
+                                                         .column("v2", valuePair.getRight()))));
             return data;
         }
 
         @Override
-        public void apply(PartitionUpdate update)
+        protected void applyPartitionDeletion(ColumnValues partitionKeyColumns)
         {
-            String key = (String) metadata().partitionKeyType.compose(update.partitionKey().getKey());
-            update.forEach(row ->
-                           {
-                               Integer value = Int32Type.instance.compose(row.getCell(valueColumn).buffer());
-                               backingMap.put(key, value);
-                           });
+            backingMap.remove(toPartitionKey(partitionKeyColumns));
+        }
+
+        @Override
+        protected void applyRangeTombstone(ColumnValues partitionKeyColumns, Range<ColumnValues> range)
+        {
+            Optional<NavigableMap<String, NavigableMap<String, Pair<Number, Number>>>> mayBePartition = getPartition(partitionKeyColumns);
+
+            if (!mayBePartition.isPresent())
+                return;
+
+            NavigableMap<String, NavigableMap<String, Pair<Number, Number>>> selection = mayBePartition.get();
+
+            for (String c1 : ImmutableList.copyOf(selection.keySet()))
+            {
+                NavigableMap<String, Pair<Number, Number>> rows = selection.get(c1);
+
+                for (String c2 : ImmutableList.copyOf(selection.get(c1).keySet()))
+                {
+                    if (range.contains(new ColumnValues(metadata().clusteringColumns(), c1, c2)))
+                        rows.remove(c2);
+                }
+
+                if (rows.isEmpty())
+                    selection.remove(c1);
+            }
+        }
+
+        @Override
+        protected void applyRowDeletion(ColumnValues partitionKeyColumns, ColumnValues clusteringColumns)
+        {
+            getRows(partitionKeyColumns, clusteringColumns.value(0)).ifPresent(rows -> rows.remove(clusteringColumns.value(1)));
+        }
+
+        @Override
+        protected void applyColumnDeletion(ColumnValues partitionKeyColumns, ColumnValues clusteringColumns, String columnName)
+        {
+            getRows(partitionKeyColumns, clusteringColumns.value(0)).ifPresent(rows -> rows.computeIfPresent(clusteringColumns.value(1),
+                                                                                                             (c, p) -> updateColumn(p, columnName, null)));
+        }
+
+        @Override
+        protected void applyColumnUpdate(ColumnValues partitionKeyColumns,
+                                         ColumnValues clusteringColumns,
+                                         Optional<ColumnValue> mayBeColumnValue)
+        {
+            Pair<String, String> pkPair = toPartitionKey(partitionKeyColumns);
+            backingMap.computeIfAbsent(pkPair, ignored -> new ConcurrentSkipListMap<>())
+                      .computeIfAbsent(clusteringColumns.value(0), ignored -> new ConcurrentSkipListMap<>())
+                      .compute(clusteringColumns.value(1), (ignored, p) -> updateColumn(p, mayBeColumnValue));
+        }
+
+        @Override
+        public void truncate()
+        {
+            backingMap.clear();
+        }
+
+        private Optional<NavigableMap<String, Pair<Number, Number>>> getRows(ColumnValues partitionKeyColumns, Comparable<?> firstClusteringColumn)
+        {
+            return getPartition(partitionKeyColumns).map(p -> p.get(firstClusteringColumn));
+        }
+
+        private Optional<NavigableMap<String, NavigableMap<String, Pair<Number, Number>>>> getPartition(ColumnValues partitionKeyColumns)
+        {
+            Pair<String, String> pk = toPartitionKey(partitionKeyColumns);
+            return Optional.ofNullable(backingMap.get(pk));
+        }
+
+        private Pair<String, String> toPartitionKey(ColumnValues partitionKey)
+        {
+            return Pair.of(partitionKey.value(0), partitionKey.value(1));
+        }
+
+        private static Pair<Number, Number> updateColumn(@Nonnull Pair<Number, Number> row,
+                                                         String columnName,
+                                                         Number newValue)
+        {
+            return "v1".equals(columnName) ? Pair.of(newValue, row.getRight())
+                                           : Pair.of(row.getLeft(), newValue);
+        }
+
+        private static Pair<Number, Number> updateColumn(Pair<Number, Number> row,
+                                                         Optional<ColumnValue> mayBeColumnValue)
+        {
+            Pair<Number, Number> r = row != null ? row : Pair.of(null, null);
+
+            if (mayBeColumnValue.isPresent())
+            {
+                ColumnValue newValue = mayBeColumnValue.get();
+                return updateColumn(r, newValue.name(), newValue.value());
+            }
+
+            return r;
         }
     }
 
     @BeforeClass
     public static void setUpClass()
     {
-        TableMetadata vt1Metadata =
-        TableMetadata.builder(KS_NAME, VT1_NAME)
-                     .kind(TableMetadata.Kind.VIRTUAL)
-                     .addPartitionKeyColumn("pk", UTF8Type.instance)
-                     .addClusteringColumn("c", UTF8Type.instance)
-                     .addRegularColumn("v1", Int32Type.instance)
-                     .addRegularColumn("v2", LongType.instance)
-                     .build();
+        TableMetadata vt1Metadata = TableMetadata.builder(KS_NAME, VT1_NAME)
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .addPartitionKeyColumn("pk", UTF8Type.instance)
+                .addClusteringColumn("c", UTF8Type.instance)
+                .addRegularColumn("v1", Int32Type.instance)
+                .addRegularColumn("v2", LongType.instance)
+                .build();
 
         SimpleDataSet vt1data = new SimpleDataSet(vt1Metadata);
 
@@ -116,18 +228,17 @@
                 return vt1data;
             }
         };
-        VirtualTable vt2 = new WritableVirtualTable(KS_NAME, VT2_NAME);
+        VirtualTable vt2 = new MutableVirtualTable(KS_NAME, VT2_NAME);
 
-        TableMetadata vt3Metadata =
-        TableMetadata.builder(KS_NAME, VT3_NAME)
-                     .kind(TableMetadata.Kind.VIRTUAL)
-                     .addPartitionKeyColumn("pk1", UTF8Type.instance)
-                     .addPartitionKeyColumn("pk2", UTF8Type.instance)
-                     .addClusteringColumn("ck1", UTF8Type.instance)
-                     .addClusteringColumn("ck2", UTF8Type.instance)
-                     .addRegularColumn("v1", Int32Type.instance)
-                     .addRegularColumn("v2", LongType.instance)
-                     .build();
+        TableMetadata vt3Metadata = TableMetadata.builder(KS_NAME, VT3_NAME)
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .addPartitionKeyColumn("pk1", UTF8Type.instance)
+                .addPartitionKeyColumn("pk2", UTF8Type.instance)
+                .addClusteringColumn("ck1", UTF8Type.instance)
+                .addClusteringColumn("ck2", UTF8Type.instance)
+                .addRegularColumn("v1", Int32Type.instance)
+                .addRegularColumn("v2", LongType.instance)
+                .build();
 
         SimpleDataSet vt3data = new SimpleDataSet(vt3Metadata);
 
@@ -141,13 +252,104 @@
                 return vt3data;
             }
         };
-        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(vt1, vt2, vt3)));
+
+        TableMetadata vt4Metadata = TableMetadata.builder(KS_NAME, VT4_NAME)
+                .kind(TableMetadata.Kind.VIRTUAL)
+                .addPartitionKeyColumn("pk", UTF8Type.instance)
+                .addRegularColumn("v", LongType.instance)
+                .build();
+
+        // As long as we execute test queries using execute (and not executeNet) the virtual tables implementation
+        // do not need to be thread-safe. We choose to do it to avoid issues if the test framework was changed or somebody
+        // decided to use the class with executeNet. It also provide a better example in case somebody is looking
+        // at the test for learning how to create mutable virtual tables
+        VirtualTable vt4 = new AbstractMutableVirtualTable(vt4Metadata)
+        {
+            // CHM cannot be used here as they do not accept null values
+            private final AtomicReference<Map<String, Long>> table = new AtomicReference<Map<String, Long>>(Collections.emptyMap());
+
+            @Override
+            public DataSet data()
+            {
+                SimpleDataSet data = new SimpleDataSet(metadata());
+                table.get().forEach((pk, v) -> data.row(pk).column("v", v));
+                return data;
+            }
+
+            @Override
+            protected void applyPartitionDeletion(ColumnValues partitionKey)
+            {
+                Map<String, Long> oldMap;
+                Map<String, Long> newMap;
+                do
+                {
+                    oldMap = table.get();
+                    newMap = new HashMap<>(oldMap);
+                    newMap.remove(partitionKey.value(0));
+                }
+                while(!table.compareAndSet(oldMap, newMap));
+            }
+
+            @Override
+            protected void applyColumnDeletion(ColumnValues partitionKey,
+                                               ColumnValues clusteringColumns,
+                                               String columnName)
+            {
+                Map<String, Long> oldMap;
+                Map<String, Long> newMap;
+                do
+                {
+                    oldMap = table.get();
+
+                    if (!oldMap.containsKey(partitionKey.value(0)))
+                        break;
+
+                    newMap = new HashMap<>(oldMap);
+                    newMap.put(partitionKey.value(0), null);
+                }
+                while(!table.compareAndSet(oldMap, newMap));
+            }
+
+            @Override
+            protected void applyColumnUpdate(ColumnValues partitionKey,
+                                             ColumnValues clusteringColumns,
+                                             Optional<ColumnValue> columnValue)
+            {
+                Map<String, Long> oldMap;
+                Map<String, Long> newMap;
+                do
+                {
+                    oldMap = table.get();
+                    if (oldMap.containsKey(partitionKey.value(0)) && !columnValue.isPresent())
+                        break;
+                    newMap = new HashMap<>(oldMap);
+                    newMap.put(partitionKey.value(0), columnValue.isPresent() ? columnValue.get().value() : null);
+                }
+                while(!table.compareAndSet(oldMap, newMap));
+            }
+
+            @Override
+            public void truncate()
+            {
+                Map<String, Long> oldMap;
+                do
+                {
+                    oldMap = table.get();
+                    if (oldMap.isEmpty())
+                        break;
+                }
+                while(!table.compareAndSet(oldMap, Collections.emptyMap()));
+            }
+
+        };
+
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(vt1, vt2, vt3, vt4)));
 
         CQLTester.setUpClass();
     }
 
     @Test
-    public void testQueries() throws Throwable
+    public void testReadOperationsOnReadOnlyTable() throws Throwable
     {
         assertRowsNet(executeNet("SELECT * FROM test_virtual_ks.vt1 WHERE pk = 'UNKNOWN'"));
 
@@ -232,7 +434,7 @@
     }
 
     @Test
-    public void testQueriesOnTableWithMultiplePks() throws Throwable
+    public void testReadOperationsOnReadOnlyTableWithMultiplePks() throws Throwable
     {
         assertRowsNet(executeNet("SELECT * FROM test_virtual_ks.vt3 WHERE pk1 = 'UNKNOWN' AND pk2 = 'UNKNOWN'"));
 
@@ -249,76 +451,510 @@
     }
 
     @Test
-    public void testModifications() throws Throwable
+    public void testDMLOperationsOnMutableCompositeTable() throws Throwable
     {
-        // check for clean state
-        assertRows(execute("SELECT * FROM test_virtual_ks.vt2"));
+        // check for a clean state
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
 
         // fill the table, test UNLOGGED batch
         execute("BEGIN UNLOGGED BATCH " +
-                "UPDATE test_virtual_ks.vt2 SET value = 1 WHERE key ='pk1';" +
-                "UPDATE test_virtual_ks.vt2 SET value = 2 WHERE key ='pk2';" +
-                "UPDATE test_virtual_ks.vt2 SET value = 3 WHERE key ='pk3';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  1, v2 =  1 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  2, v2 =  2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_2';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  3, v2 =  3 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  4, v2 =  4 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_3';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  5, v2 =  5 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_5';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  6, v2 =  6 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_6';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  7, v2 =  7 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  8, v2 =  8 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_2' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  9, v2 =  9 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 10, v2 = 10 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_2';" +
                 "APPLY BATCH");
-        assertRows(execute("SELECT * FROM test_virtual_ks.vt2"),
-                   row("pk1", 1),
-                   row("pk2", 2),
-                   row("pk3", 3));
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L));
 
-        // test that LOGGED batches don't allow virtual table updates
-        assertInvalidMessage("Cannot include a virtual table statement in a logged batch",
-                             "BEGIN BATCH " +
-                             "UPDATE test_virtual_ks.vt2 SET value = 1 WHERE key ='pk1';" +
-                             "UPDATE test_virtual_ks.vt2 SET value = 2 WHERE key ='pk2';" +
-                             "UPDATE test_virtual_ks.vt2 SET value = 3 WHERE key ='pk3';" +
-                             "APPLY BATCH");
+        // update a single column with UPDATE
+        execute("UPDATE test_virtual_ks.vt2 SET v1 = 11 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1'"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 11, 1L));
 
-        // test that UNLOGGED batch doesn't allow mixing updates for regular and virtual tables
-        createTable("CREATE TABLE %s (key text PRIMARY KEY, value int)");
-        assertInvalidMessage("Mutations for virtual and regular tables cannot exist in the same batch",
-                             "BEGIN UNLOGGED BATCH " +
-                             "UPDATE test_virtual_ks.vt2 SET value = 1 WHERE key ='pk1'" +
-                             "UPDATE %s                  SET value = 2 WHERE key ='pk2'" +
-                             "UPDATE test_virtual_ks.vt2 SET value = 3 WHERE key ='pk3'" +
-                             "APPLY BATCH");
+        // update multiple columns with UPDATE
+        execute("UPDATE test_virtual_ks.vt2 SET v1 = 111, v2 = 111 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1'"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 111, 111L));
 
-        // update a single value with UPDATE
-        execute("UPDATE test_virtual_ks.vt2 SET value = 11 WHERE key ='pk1'");
-        assertRows(execute("SELECT * FROM test_virtual_ks.vt2 WHERE key = 'pk1'"),
-                   row("pk1", 11));
+        // update a single columns with INSERT
+        execute("INSERT INTO test_virtual_ks.vt2 (pk1, pk2, c1, c2, v2) VALUES ('pk1_1', 'pk2_1', 'c1_1', 'c2_2', 22)");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_2'"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 22L));
 
-        // update a single value with INSERT
-        executeNet("INSERT INTO test_virtual_ks.vt2 (key, value) VALUES ('pk2', 22)");
-        assertRows(execute("SELECT * FROM test_virtual_ks.vt2 WHERE key = 'pk2'"),
-                   row("pk2", 22));
+        // update multiple columns with INSERT
+        execute("INSERT INTO test_virtual_ks.vt2 (pk1, pk2, c1, c2, v1, v2) VALUES ('pk1_1', 'pk2_1', 'c1_1', 'c2_2', 222, 222)");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_2'"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 222, 222L));
 
-        // test that deletions are (currently) rejected
-        assertInvalidMessage("Virtual tables don't support DELETE statements",
-                             "DELETE FROM test_virtual_ks.vt2 WHERE key ='pk1'");
+        // delete a single partition
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L));
 
-        // test that TTL is (currently) rejected with INSERT and UPDATE
-        assertInvalidMessage("Expiring columns are not supported by virtual tables",
-                             "INSERT INTO test_virtual_ks.vt2 (key, value) VALUES ('pk1', 11) USING TTL 86400");
-        assertInvalidMessage("Expiring columns are not supported by virtual tables",
-                             "UPDATE test_virtual_ks.vt2 USING TTL 86400 SET value = 11 WHERE key ='pk1'");
+        // delete a first-level range (one-sided limit)
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 <= 'c1_1'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L));
 
-        // test that LWT is (currently) rejected with virtual tables in batches
-        assertInvalidMessage("Conditional BATCH statements cannot include mutations for virtual tables",
-                             "BEGIN UNLOGGED BATCH " +
-                             "UPDATE test_virtual_ks.vt2 SET value = 3 WHERE key ='pk3' IF value = 2;" +
-                             "APPLY BATCH");
+        // delete a first-level range (two-sided limit)
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 > 'c1_1' AND c1 < 'c1_3'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L));
 
-        // test that LWT is (currently) rejected with virtual tables in UPDATEs
-        assertInvalidMessage("Conditional updates are not supported by virtual tables",
-                             "UPDATE test_virtual_ks.vt2 SET value = 3 WHERE key ='pk3' IF value = 2");
+        // delete multiple rows
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L));
 
-        // test that LWT is (currently) rejected with virtual tables in INSERTs
-        assertInvalidMessage("Conditional updates are not supported by virtual tables",
-                             "INSERT INTO test_virtual_ks.vt2 (key, value) VALUES ('pk2', 22) IF NOT EXISTS");
+        // delete a second-level range (one-sided limit)
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 > 'c2_5'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L));
+
+        // delete a second-level range (two-sided limit)
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 >= 'c2_3' AND c2 < 'c2_5'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L));
+
+        // delete a single row
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_5'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L));
+
+        // delete a single column
+        execute("DELETE v1 FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", null, 3L));
+
+        // truncate
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
     }
 
     @Test
-    public void testInvalidDDLOperations() throws Throwable
+    public void testRangeDeletionWithMulticolumnRestrictionsOnMutableTable() throws Throwable
+    {
+        // check for a clean state
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
+
+        // fill the table, test UNLOGGED batch
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  1, v2 =  1 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  2, v2 =  2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_2';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  3, v2 =  3 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  4, v2 =  4 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_3';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  5, v2 =  5 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_5';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  6, v2 =  6 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_6';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  7, v2 =  7 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  8, v2 =  8 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_2' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  9, v2 =  9 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 10, v2 = 10 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_2';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 11, v2 = 11 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_3';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 12, v2 = 12 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_4';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 13, v2 = 13 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_5';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 14, v2 = 14 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_3' AND c2 = 'c2_1';" +
+                "APPLY BATCH");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_4", 12, 12L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_5", 13, 13L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        // Test deletion with multiple columns equality
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND (c1, c2) = ('c1_1', 'c2_5')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_4", 12, 12L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_5", 13, 13L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        // Test deletion with multiple columns with slice on both side of different length
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 >= 'c1_1' AND (c1, c2) <= ('c1_1', 'c2_5')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_4", 12, 12L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_5", 13, 13L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND (c1, c2) > ('c1_2', 'c2_3') AND (c1) < ('c1_3')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        // Test deletion with multiple columns with slice on both side of different length
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 >= 'c1_1' AND (c1, c2) <= ('c1_1', 'c2_5')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        // Test deletion with multiple columns with only top slice
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND (c1, c2) < ('c1_2', 'c2_2')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        // Test deletion with multiple columns with only bottom slice
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND (c1, c2) > ('c1_1', 'c2_1')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L),
+                row("pk1_1", "pk2_3", "c1_3", "c2_1", 14, 14L));
+
+        // Test deletion with multiple columns IN
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND (c1, c2) IN (('c1_2', 'c2_2'), ('c1_3', 'c2_1'), ('c1_4', 'c2_1'))");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_2", "c2_1", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_3", 11, 11L));
+
+        // truncate
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
+    }
+
+    @Test
+    public void testDMLOperationsOnMutableNonCompositeTable() throws Throwable
+    {
+        // check for a clean state
+        execute("TRUNCATE test_virtual_ks.vt4");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt4"));
+
+        // fill the table, test UNLOGGED batch
+        execute("BEGIN UNLOGGED BATCH " +
+                "INSERT INTO test_virtual_ks.vt4 (pk, v) VALUES ('pk1', 1);" +
+                "INSERT INTO test_virtual_ks.vt4 (pk, v) VALUES ('pk2', 2);" +
+                "INSERT INTO test_virtual_ks.vt4 (pk, v) VALUES ('pk3', 3);" +
+                "INSERT INTO test_virtual_ks.vt4 (pk, v) VALUES ('pk4', 4);" +
+                "APPLY BATCH");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                row("pk1", 1L),
+                row("pk2", 2L),
+                row("pk3", 3L),
+                row("pk4", 4L));
+
+         execute("UPDATE test_virtual_ks.vt4 SET v = 3 WHERE pk = 'pk1'");
+         assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                 row("pk1", 3L),
+                 row("pk2", 2L),
+                 row("pk3", 3L),
+                 row("pk4", 4L));
+
+        // update a single columns with INSERT
+         execute("INSERT INTO test_virtual_ks.vt4 (pk, v) VALUES ('pk1', 1);");
+         assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                 row("pk1", 1L),
+                 row("pk2", 2L),
+                 row("pk3", 3L),
+                 row("pk4", 4L));
+
+         // update no column via INSERT
+         execute("INSERT INTO test_virtual_ks.vt4 (pk) VALUES ('pk1');");
+         assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                 row("pk1", 1L),
+                 row("pk2", 2L),
+                 row("pk3", 3L),
+                 row("pk4", 4L));
+
+         // insert new primary key only
+         execute("INSERT INTO test_virtual_ks.vt4 (pk) VALUES ('pk5');");
+         assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                 row("pk1", 1L),
+                 row("pk2", 2L),
+                 row("pk3", 3L),
+                 row("pk4", 4L),
+                 row("pk5", null));
+
+        // delete a single partition
+        execute("DELETE FROM test_virtual_ks.vt4 WHERE pk = 'pk2'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                row("pk1", 1L),
+                row("pk3", 3L),
+                row("pk4", 4L),
+                row("pk5", null));
+
+        // delete a single column
+        execute("DELETE v FROM test_virtual_ks.vt4 WHERE pk = 'pk4'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt4"),
+                row("pk1", 1L),
+                row("pk3", 3L),
+                row("pk4", null),
+                row("pk5", null));
+
+        // truncate
+        execute("TRUNCATE test_virtual_ks.vt4");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt4"));
+    }
+
+    @Test
+    public void testInsertRowWithoutRegularColumnsOperationOnMutableTable() throws Throwable
+    {
+        // check for a clean state
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
+
+        // insert a primary key without columns
+        execute("INSERT INTO test_virtual_ks.vt2 (pk1, pk2, c1, c2) VALUES ('pk1_1', 'pk2_1', 'c1_1', 'c2_2')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_2'"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_2", null, null));
+
+        // truncate
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
+    }
+
+    @Test
+    public void testDeleteWithInOperationsOnMutableTable() throws Throwable
+    {
+        // check for a clean state
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
+
+        // fill the table, test UNLOGGED batch
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  1, v2 =  1 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  2, v2 =  2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_2' AND c1 = 'c1_1' AND c2 = 'c2_2';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  3, v2 =  3 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  4, v2 =  4 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_2' AND c2 = 'c2_3';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  5, v2 =  5 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_1' AND c2 = 'c2_5';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  6, v2 =  6 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 = 'c1_2' AND c2 = 'c2_6';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  7, v2 =  7 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  8, v2 =  8 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_1' AND c2 = 'c2_2';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 =  9, v2 =  9 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_1' AND c2 = 'c2_1';" +
+                "UPDATE test_virtual_ks.vt2 SET v1 = 10, v2 = 10 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 = 'c1_2' AND c2 = 'c2_2';" +
+                "APPLY BATCH");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_1", "pk2_1", "c1_1", "c2_1", 1, 1L),
+                row("pk1_1", "pk2_2", "c1_1", "c2_2", 2, 2L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_2", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_1", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L));
+
+        // delete multiple partitions with IN
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 IN('pk2_1', 'pk2_2')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_2", 8, 8L),
+                row("pk1_1", "pk2_3", "c1_1", "c2_1", 9, 9L),
+                row("pk1_1", "pk2_3", "c1_2", "c2_2", 10, 10L));
+
+        // delete multiple rows via first-level IN
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2_3' AND c1 IN('c1_1', 'c1_2')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_6", 6, 6L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_1", 7, 7L),
+                row("pk1_2", "pk2_2", "c1_1", "c2_2", 8, 8L));
+
+        // delete multiple rows via second-level IN
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_2' AND c1 = 'c1_1' AND c2 IN('c2_1', 'c2_2')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_1", 3, 3L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_3", 4, 4L),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_6", 6, 6L));
+
+        // delete multiple rows with first-level IN and second-level range (one-sided limit)
+        execute("DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 IN('c1_1', 'c1_2') AND c2 <= 'c2_3'");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", 5, 5L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_6", 6, 6L));
+
+        // delete multiple rows via first-level and second-level IN
+        execute("DELETE v1 FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2_1' AND c1 IN('c1_1', 'c1_2') AND c2 IN('c2_5', 'c2_6')");
+        assertRowsIgnoringOrder(execute("SELECT * FROM test_virtual_ks.vt2"),
+                row("pk1_2", "pk2_1", "c1_1", "c2_5", null, 5L),
+                row("pk1_2", "pk2_1", "c1_2", "c2_6", null, 6L));
+
+        // truncate
+        execute("TRUNCATE test_virtual_ks.vt2");
+        assertEmpty(execute("SELECT * FROM test_virtual_ks.vt2"));
+    }
+
+    @Test
+    public void testInvalidDMLOperationsOnMutableTable() throws Throwable
+    {
+        // test that LOGGED batch doesn't allow virtual table updates
+        assertInvalidMessage("Cannot include a virtual table statement in a logged batch",
+                "BEGIN BATCH " +
+                        "UPDATE test_virtual_ks.vt2 SET v1 = 1 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2';" +
+                        "UPDATE test_virtual_ks.vt2 SET v1 = 2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2';" +
+                        "UPDATE test_virtual_ks.vt2 SET v1 = 3 WHERE pk1 = 'pk1_3' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2';" +
+                        "APPLY BATCH");
+
+        // test that UNLOGGED batch doesn't allow mixing updates for regular and virtual tables
+        createTable("CREATE TABLE %s (pk1 text, pk2 text, c1 text, c2 text, v1 int, v2 bigint, PRIMARY KEY ((pk1, pk2), c1, c2))");
+        assertInvalidMessage("Mutations for virtual and regular tables cannot exist in the same batch",
+                "BEGIN UNLOGGED BATCH " +
+                        "UPDATE test_virtual_ks.vt2 SET v1 = 1 WHERE pk1 = 'pk1_1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2';" +
+                        "UPDATE %s                  SET v1 = 2 WHERE pk1 = 'pk1_2' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2';" +
+                        "UPDATE test_virtual_ks.vt2 SET v1 = 3 WHERE pk1 = 'pk1_3' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2';" +
+                        "APPLY BATCH");
+
+        // test that TIMESTAMP is (currently) rejected with INSERT and UPDATE
+        assertInvalidMessage("Custom timestamp is not supported by virtual tables",
+                "INSERT INTO test_virtual_ks.vt2 (pk1, pk2, c1, c2, v1, v2) VALUES ('pk1', 'pk2', 'c1', 'c2', 1, 11) USING TIMESTAMP 123456789");
+        assertInvalidMessage("Custom timestamp is not supported by virtual tables",
+                "UPDATE test_virtual_ks.vt2 USING TIMESTAMP 123456789 SET v1 = 1, v2 = 11 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2'");
+
+        // test that TTL is (currently) rejected with INSERT and UPDATE
+        assertInvalidMessage("Expiring columns are not supported by virtual tables",
+                "INSERT INTO test_virtual_ks.vt2 (pk1, pk2, c1, c2, v1, v2) VALUES ('pk1', 'pk2', 'c1', 'c2', 1, 11) USING TTL 86400");
+        assertInvalidMessage("Expiring columns are not supported by virtual tables",
+                "UPDATE test_virtual_ks.vt2 USING TTL 86400 SET v1 = 1, v2 = 11 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2'");
+
+        // test that LWT is (currently) rejected with BATCH
+        assertInvalidMessage("Conditional BATCH statements cannot include mutations for virtual tables",
+                "BEGIN UNLOGGED BATCH " +
+                        "UPDATE test_virtual_ks.vt2 SET v1 = 3 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2' IF v1 = 2;" +
+                        "APPLY BATCH");
+
+        // test that LWT is (currently) rejected with INSERT and UPDATE
+        assertInvalidMessage("Conditional updates are not supported by virtual tables",
+                "INSERT INTO test_virtual_ks.vt2 (pk1, pk2, c1, c2, v1) VALUES ('pk1', 'pk2', 'c1', 'c2', 2) IF NOT EXISTS");
+        assertInvalidMessage("Conditional updates are not supported by virtual tables",
+                "UPDATE test_virtual_ks.vt2 SET v1 = 3 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 = 'c2' IF v1 = 2");
+
+        // test that row DELETE without full primary key with equality relation is (currently) rejected
+        assertInvalidMessage("Some partition key parts are missing: pk2",
+                "DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND c1 = 'c1' AND c2 > 'c2'");
+        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function) for DELETE statements",
+                "DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND pk2 > 'pk2' AND c1 = 'c1' AND c2 > 'c2'");
+        assertInvalidMessage("KEY column \"c2\" cannot be restricted as preceding column \"c1\" is not restricted",
+                "DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c2 > 'c2'");
+        assertInvalidMessage("Clustering column \"c2\" cannot be restricted (preceding column \"c1\" is restricted by a non-EQ relation)",
+                "DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 > 'c1' AND c2 > 'c2'");
+        assertInvalidMessage("DELETE statements must restrict all PRIMARY KEY columns with equality relations",
+                "DELETE FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 > 'c2' IF v1 = 2");
+
+        // test that column DELETE without full primary key with equality relation is (currently) rejected
+        assertInvalidMessage("Range deletions are not supported for specific columns",
+                "DELETE v1 FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1'");
+        assertInvalidMessage("Range deletions are not supported for specific columns",
+                "DELETE v1 FROM test_virtual_ks.vt2 WHERE pk1 = 'pk1' AND pk2 = 'pk2' AND c1 = 'c1' AND c2 > 'c2'");
+    }
+
+    @Test
+    public void testInvalidDMLOperationsOnReadOnlyTable() throws Throwable
+    {
+        assertInvalidMessage("Modification is not supported by table test_virtual_ks.vt1",
+                "INSERT INTO test_virtual_ks.vt1 (pk, c, v1, v2) VALUES ('pk1_1', 'ck1_1', 11, 11)");
+
+        assertInvalidMessage("Modification is not supported by table test_virtual_ks.vt1",
+                "UPDATE test_virtual_ks.vt1 SET v1 = 11, v2 = 11 WHERE pk = 'pk1_1' AND c = 'ck1_1'");
+
+        assertInvalidMessage("Modification is not supported by table test_virtual_ks.vt1",
+                "DELETE FROM test_virtual_ks.vt1 WHERE pk = 'pk1_1' AND c = 'ck1_1'");
+
+        assertInvalidMessage("Error during truncate: Truncation is not supported by table test_virtual_ks.vt1",
+                "TRUNCATE TABLE test_virtual_ks.vt1");
+    }
+
+    @Test
+    public void testInvalidDDLOperationsOnVirtualKeyspaceAndReadOnlyTable() throws Throwable
     {
         assertInvalidMessage("Virtual keyspace 'test_virtual_ks' is not user-modifiable",
                              "DROP KEYSPACE test_virtual_ks");
@@ -338,9 +974,6 @@
         assertInvalidMessage("Virtual keyspace 'test_virtual_ks' is not user-modifiable",
                              "ALTER TABLE test_virtual_ks.vt1 DROP v1");
 
-        assertInvalidMessage("Error during truncate: Cannot truncate virtual tables",
-                             "TRUNCATE TABLE test_virtual_ks.vt1");
-
         assertInvalidMessage("Virtual keyspace 'test_virtual_ks' is not user-modifiable",
                              "CREATE INDEX ON test_virtual_ks.vt1 (v1)");
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
index 246f512..bac7e79 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
@@ -24,6 +24,8 @@
 import org.junit.Test;
 
 import org.junit.Assert;
+
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -68,7 +70,7 @@
 
         ColumnFamilyStore cfs = Keyspace.open(CQLTester.KEYSPACE).getColumnFamilyStore(currentTable());
         ColumnFamilyStore indexCfs = cfs.indexManager.getAllIndexColumnFamilyStores().iterator().next();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Assert.assertEquals(0.99, cfs.getCrcCheckChance(), 0.0);
         Assert.assertEquals(0.99, cfs.getLiveSSTables().iterator().next().getCrcCheckChance(), 0.0);
@@ -96,19 +98,19 @@
         execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
         execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
         execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
         execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
         execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
         execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.forceMajorCompaction();
 
         //Now let's change via JMX
@@ -182,11 +184,11 @@
             execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
             execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
-        DatabaseDescriptor.setCompactionThroughputMbPerSec(1);
-        List<Future<?>> futures = CompactionManager.instance.submitMaximal(cfs, CompactionManager.getDefaultGcBefore(cfs, FBUtilities.nowInSeconds()), false); 
+        DatabaseDescriptor.setCompactionThroughputMebibytesPerSec(1);
+        List<? extends Future<?>> futures = CompactionManager.instance.submitMaximal(cfs, CompactionManager.getDefaultGcBefore(cfs, FBUtilities.nowInSeconds()), false);
         execute("DROP TABLE %s");
 
         try
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
index 71d632d..0d9e043 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
@@ -21,15 +21,15 @@
 
 import org.junit.Test;
 
-import static junit.framework.Assert.assertNull;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Any tests that do not fit in any other category,
  * migrated from python dtests, CASSANDRA-9160
@@ -293,4 +293,4 @@
         execute("INSERT INTO %s (k, v) VALUES (0, blobAsInt(0x00000001))");
         assertRows(execute("select v from %s where k=0"), row(1));
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/RoleSyntaxTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/RoleSyntaxTest.java
index f72e3dc..fce88f9 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/RoleSyntaxTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/RoleSyntaxTest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.cassandra.cql3.validation.miscellaneous;
 
+import java.util.Arrays;
+
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -25,8 +27,9 @@
 
 public class RoleSyntaxTest extends CQLTester
 {
-    private final String NO_QUOTED_USERNAME = "Quoted strings are are not supported for user names " +
-                                              "and USER is deprecated, please use ROLE";
+    private static final String NO_QUOTED_USERNAME = "Quoted strings are are not supported for user names " +
+                                                     "and USER is deprecated, please use ROLE";
+
     @Test
     public void standardOptionsSyntaxTest() throws Throwable
     {
@@ -100,6 +103,9 @@
         assertValidSyntax("ALTER USER 'u1' WITH PASSWORD 'password'");
         assertValidSyntax("ALTER USER $$u1$$ WITH PASSWORD 'password'");
         assertValidSyntax("ALTER USER $$ u1 ' x $ x ' $$ WITH PASSWORD 'password'");
+        // ALTER with IF EXISTS syntax
+        assertValidSyntax("ALTER ROLE IF EXISTS r1 WITH PASSWORD = 'password'");
+        assertValidSyntax("ALTER USER IF EXISTS u1 WITH PASSWORD 'password'");
         // user names may not be quoted names
         assertInvalidSyntax("ALTER USER \"u1\" WITH PASSWORD 'password'", NO_QUOTED_USERNAME);
     }
@@ -107,49 +113,63 @@
     @Test
     public void grantRevokePermissionsSyntaxTest() throws Throwable
     {
-        // grant/revoke on RoleResource
-        assertValidSyntax("GRANT ALTER ON ROLE r1 TO r2");
-        assertValidSyntax("GRANT ALTER ON ROLE 'r1' TO \"r2\"");
-        assertValidSyntax("GRANT ALTER ON ROLE \"r1\" TO 'r2'");
-        assertValidSyntax("GRANT ALTER ON ROLE $$r1$$ TO $$ r '2' $$");
-        assertValidSyntax("REVOKE ALTER ON ROLE r1 FROM r2");
-        assertValidSyntax("REVOKE ALTER ON ROLE 'r1' FROM \"r2\"");
-        assertValidSyntax("REVOKE ALTER ON ROLE \"r1\" FROM 'r2'");
-        assertValidSyntax("REVOKE ALTER ON ROLE $$r1$$ FROM $$ r '2' $$");
+        for (String r1 : Arrays.asList("r1", "'r1'", "\"r1\"", "$$r1$$"))
+        {
+            for (String r2 : Arrays.asList("r2", "\"r2\"", "'r2'", "$$ r '2' $$"))
+            {
+                // grant/revoke on RoleResource
+                assertValidSyntax(String.format("GRANT ALTER ON ROLE %s TO %s", r1, r2));
+                assertValidSyntax(String.format("GRANT ALTER PERMISSION ON ROLE %s TO %s", r1, r2));
+                assertValidSyntax(String.format("REVOKE ALTER ON ROLE %s FROM %s", r1, r2));
+                assertValidSyntax(String.format("REVOKE ALTER PERMISSION ON ROLE %s FROM %s", r1, r2));
 
-        // grant/revoke on DataResource
-        assertValidSyntax("GRANT SELECT ON KEYSPACE ks TO r1");
-        assertValidSyntax("GRANT SELECT ON KEYSPACE ks TO 'r1'");
-        assertValidSyntax("GRANT SELECT ON KEYSPACE ks TO \"r1\"");
-        assertValidSyntax("GRANT SELECT ON KEYSPACE ks TO $$ r '1' $$");
-        assertValidSyntax("REVOKE SELECT ON KEYSPACE ks FROM r1");
-        assertValidSyntax("REVOKE SELECT ON KEYSPACE ks FROM 'r1'");
-        assertValidSyntax("REVOKE SELECT ON KEYSPACE ks FROM \"r1\"");
-        assertValidSyntax("REVOKE SELECT ON KEYSPACE ks FROM $$ r '1' $$");
+                // grant/revoke multiple permissions in a single statement
+                assertValidSyntax(String.format("GRANT CREATE, ALTER ON ROLE %s TO %s", r1, r2));
+                assertValidSyntax(String.format("GRANT CREATE PERMISSION, ALTER PERMISSION ON ROLE %s TO %s", r1, r2));
+                assertValidSyntax(String.format("REVOKE CREATE, ALTER ON ROLE %s FROM %s", r1, r2));
+                assertValidSyntax(String.format("REVOKE CREATE PERMISSION, ALTER PERMISSION ON ROLE %s FROM %s", r1, r2));
+            }
+        }
+
+        for (String r1 : Arrays.asList("r1", "'r1'", "\"r1\"", "$$r1$$", "$$ r '1' $$"))
+        {
+            // grant/revoke on DataResource
+            assertValidSyntax(String.format("GRANT SELECT ON KEYSPACE ks TO %s", r1));
+            assertValidSyntax(String.format("GRANT SELECT PERMISSION ON KEYSPACE ks TO %s", r1));
+            assertValidSyntax(String.format("REVOKE SELECT ON KEYSPACE ks FROM %s", r1));
+            assertValidSyntax(String.format("REVOKE SELECT PERMISSION ON KEYSPACE ks FROM %s", r1));
+
+            // grant/revoke multiple permissions in a single statement
+            assertValidSyntax(String.format("GRANT MODIFY, SELECT ON KEYSPACE ks TO %s", r1));
+            assertValidSyntax(String.format("GRANT MODIFY PERMISSION, SELECT PERMISSION ON KEYSPACE ks TO %s", r1));
+            assertValidSyntax(String.format("GRANT MODIFY, SELECT ON ALL KEYSPACES TO %s", r1));
+            assertValidSyntax(String.format("GRANT MODIFY PERMISSION, SELECT PERMISSION ON ALL KEYSPACES TO %s", r1));
+            assertValidSyntax(String.format("REVOKE MODIFY, SELECT ON KEYSPACE ks FROM %s", r1));
+            assertValidSyntax(String.format("REVOKE MODIFY PERMISSION, SELECT PERMISSION ON KEYSPACE ks FROM %s", r1));
+            assertValidSyntax(String.format("REVOKE MODIFY, SELECT ON ALL KEYSPACES FROM %s", r1));
+            assertValidSyntax(String.format("REVOKE MODIFY PERMISSION, SELECT PERMISSION ON ALL KEYSPACES FROM %s", r1));
+        }
     }
 
     @Test
     public void listPermissionsSyntaxTest() throws Throwable
     {
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL ROLES OF r1");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL ROLES OF 'r1'");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL ROLES OF \"r1\"");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL ROLES OF $$ r '1' $$");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ROLE 'r1' OF r2");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ROLE \"r1\" OF r2");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ROLE $$ r '1' $$ OF r2");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ROLE 'r1' OF 'r2'");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ROLE \"r1\" OF \"r2\"");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ROLE $$r1$$ OF $$ r '2' $$");
+        for (String r1 : Arrays.asList("r1", "'r1'", "\"r1\"", "$$r1$$", "$$ r '1' $$"))
+        {
+            assertValidSyntax(String.format("LIST ALL PERMISSIONS ON ALL ROLES OF %s", r1));
+            assertValidSyntax(String.format("LIST ALL PERMISSIONS ON ALL KEYSPACES OF %s", r1));
+            assertValidSyntax(String.format("LIST ALL PERMISSIONS OF %s", r1));
+            assertValidSyntax(String.format("LIST MODIFY PERMISSION ON KEYSPACE ks OF %s", r1));
+            assertValidSyntax(String.format("LIST MODIFY, SELECT OF %s", r1));
+            assertValidSyntax(String.format("LIST MODIFY, SELECT PERMISSION ON KEYSPACE ks OF %s", r1));
 
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL KEYSPACES OF r1");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL KEYSPACES OF 'r1'");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL KEYSPACES OF \"r1\"");
-        assertValidSyntax("LIST ALL PERMISSIONS ON ALL KEYSPACES OF $$ r '1' $$");
-        assertValidSyntax("LIST ALL PERMISSIONS OF r1");
-        assertValidSyntax("LIST ALL PERMISSIONS OF 'r1'");
-        assertValidSyntax("LIST ALL PERMISSIONS OF \"r1\"");
-        assertValidSyntax("LIST ALL PERMISSIONS OF $$ r '1' $$");
+            for (String r2 : Arrays.asList("r2", "\"r2\"", "'r2'", "$$ r '2' $$"))
+            {
+                assertValidSyntax(String.format("LIST ALL PERMISSIONS ON ROLE %s OF %s", r1, r2));
+                assertValidSyntax(String.format("LIST ALTER PERMISSION ON ROLE %s OF %s", r1, r2));
+                assertValidSyntax(String.format("LIST ALTER, DROP PERMISSION ON ROLE %s OF %s", r1, r2));
+            }
+        }
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/SSTableMetadataTrackingTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/SSTableMetadataTrackingTest.java
index 5d367de..9ecc3ae 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/SSTableMetadataTrackingTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/SSTableMetadataTrackingTest.java
@@ -19,6 +19,7 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -40,7 +41,7 @@
         createTable("CREATE TABLE %s (a int, b int, c text, PRIMARY KEY (a, b))");
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("INSERT INTO %s (a,b,c) VALUES (1,1,'1') using timestamp 9999");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
         assertEquals(Integer.MAX_VALUE, metadata.maxLocalDeletionTime);
@@ -57,7 +58,7 @@
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("INSERT INTO %s (a,b,c) VALUES (1,1,'1') using timestamp 10000");
         execute("DELETE FROM %s USING TIMESTAMP 9999 WHERE a = 1 and b = 1");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
         assertEquals(10000, metadata.maxTimestamp);
@@ -76,7 +77,7 @@
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("INSERT INTO %s (a,b,c) VALUES (1,1,'1') using timestamp 10000");
         execute("DELETE FROM %s USING TIMESTAMP 9999 WHERE a = 1");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
         assertEquals(10000, metadata.maxTimestamp);
@@ -95,7 +96,7 @@
         createTable("CREATE TABLE %s (a int, b int, c text, PRIMARY KEY (a, b)) WITH gc_grace_seconds = 10000");
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("DELETE FROM %s USING TIMESTAMP 9999 WHERE a = 1 and b = 1");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
@@ -115,7 +116,7 @@
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("DELETE FROM %s USING TIMESTAMP 9999 WHERE a = 1");
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
@@ -135,7 +136,7 @@
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("INSERT INTO %s (a) VALUES (1) USING TIMESTAMP 9999");
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
@@ -154,7 +155,7 @@
         createTable("CREATE TABLE %s (a int, PRIMARY KEY (a))");
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
         execute("DELETE FROM %s USING TIMESTAMP 9999 WHERE a=1");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
         StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
         assertEquals(9999, metadata.minTimestamp);
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/TombstonesTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/TombstonesTest.java
index 85048ae..dc7cccf 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/TombstonesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/TombstonesTest.java
@@ -31,9 +31,9 @@
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.filter.TombstoneOverwhelmingException;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * Test that TombstoneOverwhelmingException gets thrown when it should be and doesn't when it shouldn't be.
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterNTSTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterNTSTest.java
index 4cc95e1..e8f9842 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterNTSTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterNTSTest.java
@@ -19,12 +19,21 @@
 package org.apache.cassandra.cql3.validation.operations;
 
 import java.util.List;
+import java.util.UUID;
 
 import org.junit.Test;
 
 import com.datastax.driver.core.PreparedStatement;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.locator.IEndpointSnitch;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaCollection;
+import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.StorageService;
 import org.assertj.core.api.Assertions;
 
 import static org.junit.Assert.assertEquals;
@@ -100,4 +109,43 @@
         warnings = ClientWarn.instance.getWarnings();
         assertNull(warnings);
     }
+
+    @Test
+    public void testAlterKeyspaceSystem_AuthWithNTSOnlyAcceptsConfiguredDataCenterNames() throws Throwable
+    {
+        requireAuthentication();
+
+        // Add a peer
+        StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), InetAddressAndPort.getByName("127.0.0.2"));
+
+        // Register an Endpoint snitch which returns fixed value for data center.
+        DatabaseDescriptor.setEndpointSnitch(new IEndpointSnitch()
+        {
+            public String getRack(InetAddressAndPort endpoint) { return RACK1; }
+            public String getDatacenter(InetAddressAndPort endpoint)
+            {
+                if(endpoint.getHostAddress(false).equalsIgnoreCase("127.0.0.2"))
+                    return "datacenter2";
+                return DATA_CENTER;
+            }
+            public <C extends ReplicaCollection<? extends C>> C sortedByProximity(InetAddressAndPort address, C addresses)
+            {
+                return null;
+            }
+
+            public int compareEndpoints(InetAddressAndPort target, Replica r1, Replica r2)
+            {
+                return 0;
+            }
+
+            // NOOP
+            public void gossiperStarting() { }
+
+            public boolean isWorthMergingForRangeQuery(ReplicaCollection<?> merged, ReplicaCollection<?> l1, ReplicaCollection<?> l2) { return false; }
+        });
+
+        // try modifying the system_auth keyspace without second DC which has active node.
+        assertInvalidThrow(ConfigurationException.class, "ALTER KEYSPACE system_auth WITH replication = { 'class' : 'NetworkTopologyStrategy', '" + DATA_CENTER + "' : 2 }");
+        execute("ALTER KEYSPACE " + SchemaConstants.AUTH_KEYSPACE_NAME + " WITH replication = {'class' : 'NetworkTopologyStrategy', '" + DATA_CENTER + "' : 1 , '" + DATA_CENTER_REMOTE + "' : 1 }");
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
index 2741f94..fddd59b 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
@@ -19,17 +19,23 @@
 
 import java.util.UUID;
 
+import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.memtable.SkipListMemtable;
+import org.apache.cassandra.db.memtable.TestMemtable;
 import org.apache.cassandra.dht.OrderPreservingPartitioner;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.TokenMetadata;
+import org.apache.cassandra.schema.MemtableParams;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.SchemaKeyspaceTables;
 import org.apache.cassandra.service.StorageService;
@@ -37,6 +43,8 @@
 
 import static java.lang.String.format;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -280,6 +288,14 @@
                                         row(KEYSPACE_PER_TEST, true, map("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
                                         row(ks1, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER_REMOTE, "3")));
 
+        schemaChange("ALTER KEYSPACE " + ks1 + " WITH replication = { 'class' : 'NetworkTopologyStrategy', '" + DATA_CENTER_REMOTE + "': 3 }");
+
+        // Removal is a two-step process as the "0" filter has been removed from NTS.prepareOptions
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(KEYSPACE, true, map("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
+                                        row(KEYSPACE_PER_TEST, true, map("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
+                                        row(ks1, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER_REMOTE, "3")));
+
         // The auto-expansion should not change existing replication counts; do not let the user shoot themselves in the foot
         schemaChange("ALTER KEYSPACE " + ks1 + " WITH replication = { 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1 } AND durable_writes=True");
 
@@ -339,29 +355,93 @@
                                         row(ks1, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "2", DATA_CENTER_REMOTE, "2")));
     }
 
-    /**
-     * Test {@link ConfigurationException} thrown on alter keyspace to no DC option in replication configuration.
-     */
     @Test
-    public void testAlterKeyspaceWithNoOptionThrowsConfigurationException() throws Throwable
+    public void testDefaultRF() throws Throwable
     {
-        // Create keyspaces
-        execute("CREATE KEYSPACE testABC WITH replication={ 'class' : 'NetworkTopologyStrategy', '" + DATA_CENTER + "' : 3 }");
-        execute("CREATE KEYSPACE testXYZ WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 3 }");
+        TokenMetadata metadata = StorageService.instance.getTokenMetadata();
+        metadata.clearUnsafe();
+        InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
+        InetAddressAndPort remote = InetAddressAndPort.getByName("127.0.0.4");
+        metadata.updateHostId(UUID.randomUUID(), local);
+        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("A"), local);
+        metadata.updateHostId(UUID.randomUUID(), remote);
+        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("B"), remote);
 
-        // Try to alter the created keyspace without any option
-        assertInvalidThrow(ConfigurationException.class, "ALTER KEYSPACE testABC WITH replication={ 'class' : 'NetworkTopologyStrategy' }");
-        assertInvalidThrow(ConfigurationException.class, "ALTER KEYSPACE testXYZ WITH replication={ 'class' : 'SimpleStrategy' }");
+        DatabaseDescriptor.setDefaultKeyspaceRF(3);
 
-        // Make sure that the alter works as expected
-        alterTable("ALTER KEYSPACE testABC WITH replication={ 'class' : 'NetworkTopologyStrategy', '" + DATA_CENTER + "' : 2 }");
-        alterTable("ALTER KEYSPACE testXYZ WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 2 }");
+        //ensure default rf is being taken into account during creation, and user can choose to override the default
+        String ks1 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy' }");
+        String ks2 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 2 }");
+        String ks3 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'NetworkTopologyStrategy' }");
+        String ks4 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 2 }");
 
-        // clean up
-        execute("DROP KEYSPACE IF EXISTS testABC");
-        execute("DROP KEYSPACE IF EXISTS testXYZ");
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks1, true, map("class","org.apache.cassandra.locator.SimpleStrategy","replication_factor", Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF()))),
+                                        row(ks2, true, map("class","org.apache.cassandra.locator.SimpleStrategy","replication_factor", "2")),
+                                        row(ks3, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER,
+                                                           Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF()), DATA_CENTER_REMOTE, Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF()))),
+                                        row(ks4, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "2", DATA_CENTER_REMOTE, "2")));
+
+        //ensure alter keyspace does not default to default rf unless altering from NTS to SS
+        //no change alter
+        schemaChange("ALTER KEYSPACE " + ks4 + " WITH durable_writes=true");
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks4, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "2", DATA_CENTER_REMOTE, "2")));
+        schemaChange("ALTER KEYSPACE " + ks4 + " WITH replication={ 'class' : 'NetworkTopologyStrategy' }");
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks4, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "2", DATA_CENTER_REMOTE, "2")));
+
+        // change from SS to NTS
+        // without specifying RF
+        schemaChange("ALTER KEYSPACE " + ks2 + " WITH replication={ 'class' : 'NetworkTopologyStrategy' } AND durable_writes=true");
+        // verify that RF of SS is retained
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks2, true, map("class","org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "2", DATA_CENTER_REMOTE, "2")));
+        // with specifying RF
+        schemaChange("ALTER KEYSPACE " + ks1 + " WITH replication={ 'class' : 'NetworkTopologyStrategy', 'replication_factor': '1' } AND durable_writes=true");
+        // verify that explicitly mentioned RF is taken into account
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks1, true, map("class","org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "1", DATA_CENTER_REMOTE, "1")));
+
+        // change from NTS to SS
+        // without specifying RF
+        schemaChange("ALTER KEYSPACE " + ks4 + " WITH replication={ 'class' : 'SimpleStrategy' } AND durable_writes=true");
+        // verify that default RF is taken into account
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks4, true, map("class","org.apache.cassandra.locator.SimpleStrategy","replication_factor", Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF()))));
+        // with specifying RF
+        schemaChange("ALTER KEYSPACE " + ks3 + " WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : '1' } AND durable_writes=true");
+        // verify that explicitly mentioned RF is taken into account
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks3, true, map("class","org.apache.cassandra.locator.SimpleStrategy","replication_factor", "1")));
+
+        // verify updated default does not effect existing keyspaces
+        // create keyspaces
+        String ks5 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy' }");
+        String ks6 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'NetworkTopologyStrategy' }");
+        String oldRF = Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF());
+        // change default
+        DatabaseDescriptor.setDefaultKeyspaceRF(2);
+        // verify RF of existing keyspaces
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks5, true, map("class","org.apache.cassandra.locator.SimpleStrategy","replication_factor", oldRF)));
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(ks6, true, map("class", "org.apache.cassandra.locator.NetworkTopologyStrategy",
+                                                           DATA_CENTER, oldRF, DATA_CENTER_REMOTE, oldRF)));
+
+        //clean up config change
+        DatabaseDescriptor.setDefaultKeyspaceRF(1);
+
+        //clean up keyspaces
+        execute(String.format("DROP KEYSPACE IF EXISTS %s", ks1));
+        execute(String.format("DROP KEYSPACE IF EXISTS %s", ks2));
+        execute(String.format("DROP KEYSPACE IF EXISTS %s", ks3));
+        execute(String.format("DROP KEYSPACE IF EXISTS %s", ks4));
+        execute(String.format("DROP KEYSPACE IF EXISTS %s", ks5));
+        execute(String.format("DROP KEYSPACE IF EXISTS %s", ks6));
     }
 
+
     /**
      * Test {@link ConfigurationException} thrown when altering a keyspace to invalid DC option in replication configuration.
      */
@@ -431,6 +511,37 @@
         alterTable("alter table %s add v1 int");
     }
 
+    @Test(expected = InvalidRequestException.class)
+    public void testDropFixedAddVariable() throws Throwable
+    {
+        createTable("create table %s (k int, c int, v int, PRIMARY KEY (k, c))");
+        execute("alter table %s drop v");
+        execute("alter table %s add v varint");
+    }
+
+    @Test(expected = InvalidRequestException.class)
+    public void testDropFixedCollectionAddVariableCollection() throws Throwable
+    {
+        createTable("create table %s (k int, c int, v list<int>, PRIMARY KEY (k, c))");
+        execute("alter table %s drop v");
+        execute("alter table %s add v list<varint>");
+    }
+
+    @Test(expected = InvalidRequestException.class)
+    public void testDropSimpleAddComplex() throws Throwable
+    {
+        createTable("create table %s (k int, c int, v set<text>, PRIMARY KEY (k, c))");
+        execute("alter table %s drop v");
+        execute("alter table %s add v blob");
+    }
+
+    @Test(expected = SyntaxException.class)
+    public void renameToEmptyTest() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int, c1 int, v int, PRIMARY KEY (k, c1))");
+        execute("ALTER TABLE %s RENAME c1 TO \"\"");
+    }
+
     @Test
     // tests CASSANDRA-9565
     public void testDoubleWith() throws Throwable
@@ -444,71 +555,103 @@
     }
 
     @Test
+    public void testAlterTableWithMemtable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))");
+        assertSame(MemtableParams.DEFAULT.factory(), getCurrentColumnFamilyStore().metadata().params.memtable.factory());
+        assertSchemaOption("memtable", null);
+
+        testMemtableConfig("skiplist", SkipListMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("test_fullname", TestMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("test_shortname", SkipListMemtable.FACTORY, SkipListMemtable.class);
+
+        // verify memtable does not change on other ALTER
+        alterTable("ALTER TABLE %s"
+                   + " WITH compression = {'class': 'LZ4Compressor'};");
+        assertSchemaOption("memtable", "test_shortname");
+
+        testMemtableConfig("default", MemtableParams.DEFAULT.factory(), SkipListMemtable.class);
+
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "The 'class_name' option must be specified.",
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'test_empty_class';");
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "Memtable class org.apache.cassandra.db.memtable.SkipListMemtable does not accept any futher parameters, but {invalid=throw} were given.",
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'test_invalid_param';");
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "Could not create memtable factory for class NotExisting",
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'test_unknown_class';");
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "Memtable class org.apache.cassandra.db.memtable.TestMemtable does not accept any futher parameters, but {invalid=throw} were given.",
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'test_invalid_extra_param';");
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "Could not create memtable factory for class " + CreateTest.InvalidMemtableFactoryMethod.class.getName(),
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'test_invalid_factory_method';");
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "Could not create memtable factory for class " + CreateTest.InvalidMemtableFactoryField.class.getName(),
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'test_invalid_factory_field';");
+
+        assertAlterTableThrowsException(ConfigurationException.class,
+                                        "Memtable configuration \"unknown\" not found.",
+                                        "ALTER TABLE %s"
+                                           + " WITH memtable = 'unknown';");
+    }
+
+    void assertSchemaOption(String option, Object expected) throws Throwable
+    {
+        assertRows(execute(format("SELECT " + option + " FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
+                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
+                                  SchemaKeyspaceTables.TABLES),
+                           KEYSPACE,
+                           currentTable()),
+                   row(expected));
+    }
+
+    private void testMemtableConfig(String memtableConfig, Memtable.Factory factoryInstance, Class<? extends Memtable> memtableClass) throws Throwable
+    {
+        alterTable("ALTER TABLE %s"
+                   + " WITH memtable = '" + memtableConfig + "';");
+        assertSame(factoryInstance, getCurrentColumnFamilyStore().metadata().params.memtable.factory());
+        Assert.assertTrue(memtableClass.isInstance(getCurrentColumnFamilyStore().getTracker().getView().getCurrentMemtable()));
+        assertSchemaOption("memtable", MemtableParams.DEFAULT.configurationKey().equals(memtableConfig) ? null : memtableConfig);
+    }
+
+    @Test
     public void testAlterTableWithCompression() throws Throwable
     {
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor"));
 
         alterTable("ALTER TABLE %s WITH compression = { 'class' : 'SnappyCompressor', 'chunk_length_in_kb' : 32 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor"));
 
         alterTable("ALTER TABLE %s WITH compression = { 'class' : 'LZ4Compressor', 'chunk_length_in_kb' : 64 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "64", "class", "org.apache.cassandra.io.compress.LZ4Compressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "64", "class", "org.apache.cassandra.io.compress.LZ4Compressor"));
 
         alterTable("ALTER TABLE %s WITH compression = { 'class' : 'LZ4Compressor', 'min_compress_ratio' : 2 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor", "min_compress_ratio", "2.0")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor", "min_compress_ratio", "2.0"));
 
         alterTable("ALTER TABLE %s WITH compression = { 'class' : 'LZ4Compressor', 'min_compress_ratio' : 1 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor", "min_compress_ratio", "1.0")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor", "min_compress_ratio", "1.0"));
 
         alterTable("ALTER TABLE %s WITH compression = { 'class' : 'LZ4Compressor', 'min_compress_ratio' : 0 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor"));
 
         alterTable("ALTER TABLE %s WITH compression = { 'class' : 'SnappyCompressor', 'chunk_length_in_kb' : 32 };");
         alterTable("ALTER TABLE %s WITH compression = { 'enabled' : 'false'};");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("enabled", "false")));
+        assertSchemaOption("compression", map("enabled", "false"));
 
         assertAlterTableThrowsException(ConfigurationException.class,
                                         "Missing sub-option 'class' for the 'compression' option.",
@@ -543,7 +686,7 @@
     {
         assertThrowsException(clazz, msg, () -> {alterKeyspaceMayThrow(stmt);});
     }
-    
+
     private void assertAlterTableThrowsException(Class<? extends Throwable> clazz, String msg, String stmt)
     {
         assertThrowsException(clazz, msg, () -> {alterTableMayThrow(stmt);});
@@ -655,4 +798,111 @@
         assertInvalidMessage("Cannot add new column to a COMPACT STORAGE table",
                              "ALTER TABLE %s ADD column1 text");
     }
+
+    @Test
+    public void testAlterTableWithoutCreateTableOrIfExistsClause()
+    {
+        String tbl1 = KEYSPACE + "." + createTableName();
+        assertAlterTableThrowsException(InvalidRequestException.class, String.format("Table '%s' doesn't exist", tbl1),
+                                        "ALTER TABLE %s ADD myCollection list<text>;");
+    }
+
+    @Test
+    public void testAlterTableWithoutCreateTableWithIfExists() throws Throwable
+    {
+        String tbl1 = KEYSPACE + "." + createTableName();
+        assertNull(execute(String.format("ALTER TABLE IF EXISTS %s ADD myCollection list<text>;", tbl1)));
+    }
+
+    @Test
+    public void testAlterTableWithIfExists() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)); ");
+        alterTable("ALTER TABLE IF EXISTS %s ADD myCollection list<text>;");
+        execute("INSERT INTO %s (a, b, myCollection) VALUES (1, 2, ['first element']);");
+
+        assertRows(execute("SELECT * FROM %s;"), row(1, 2, list("first element")));
+    }
+
+    @Test
+    public void testAlterTableAddColWithIfNotExists() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)); ");
+        alterTable("ALTER TABLE %s ADD IF NOT EXISTS a int;");
+        execute("INSERT INTO %s (a, b) VALUES (1, 2);");
+
+        assertRows(execute("SELECT * FROM %s;"), row(1, 2));
+    }
+
+    @Test
+    public void testAlterTableAddExistingColumnWithoutIfExists()
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)); ");
+        assertAlterTableThrowsException(InvalidRequestException.class,
+                                        String.format("Column with name '%s' already exists", "a"),
+                                        "ALTER TABLE IF EXISTS %s ADD a int");
+    }
+
+    @Test
+    public void testAlterTableDropNotExistingColWithIfExists() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)); ");
+        alterTable("ALTER TABLE %s DROP IF EXISTS myCollection");
+        execute("INSERT INTO %s (a, b) VALUES (1, 2);");
+
+        assertRows(execute("SELECT * FROM %s;"), row(1, 2));
+    }
+
+    @Test
+    public void testAlterTableDropExistingColWithIfExists() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, myCollection list<text>, PRIMARY KEY (a, b)); ");
+        alterTable("ALTER TABLE %s DROP IF EXISTS myCollection");
+        execute("INSERT INTO %s (a, b) VALUES (1, 2);");
+
+        assertRows(execute("SELECT * FROM %s;"), row(1, 2));
+    }
+
+    @Test
+    public void testAlterTableRenameExistingColWithIfExists() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, myCollection list<text>, PRIMARY KEY (a, b)); ");
+        alterTable("ALTER TABLE %s RENAME IF EXISTS a TO y AND b to z");
+        execute("INSERT INTO %s (y, z, myCollection) VALUES (1, 2, ['first element']);");
+        assertRows(execute("SELECT * FROM %s;"), row(1, 2, list("first element")));
+    }
+
+    @Test
+    public void testAlterTypeWithIfExists() throws Throwable
+    {
+        // frozen UDT used directly in a partition key
+        String type1 = createType("CREATE TYPE %s (v1 int)");
+        String table1 = createTable("CREATE TABLE %s (pk frozen<" + type1 + ">, val int, PRIMARY KEY(pk));");
+
+        // frozen UDT used in a frozen UDT used in a partition key
+        String type2 = createType("CREATE TYPE %s (v1 frozen<" + type1 + ">, v2 frozen<" + type1 + ">)");
+        String table2 = createTable("CREATE TABLE %s (pk frozen<" + type2 + ">, val int, PRIMARY KEY(pk));");
+
+        // frozen UDT used in a frozen collection used in a partition key
+        String table3 = createTable("CREATE TABLE %s (pk frozen<list<frozen<" + type1 + ">>>, val int, PRIMARY KEY(pk));");
+
+        // assert that ALTER fails and that the error message contains all the names of the table referencing it
+        assertInvalidMessage(table1, format("ALTER TYPE %s.%s ADD v2 int;", keyspace(), type1));
+        assertInvalidMessage(table2, format("ALTER TYPE %s.%s ADD v2 int;", keyspace(), type1));
+        assertInvalidMessage(table3, format("ALTER TYPE %s.%s ADD v2 int;", keyspace(), type1));
+    }
+
+    @Test
+    public void testAlterKeyspaceWithIfExists() throws Throwable
+    {
+        String ks1 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }");
+        execute("ALTER KEYSPACE IF EXISTS " + ks1 + " WITH durable_writes=true");
+
+        assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, durable_writes, replication FROM system_schema.keyspaces"),
+                                        row(KEYSPACE, true, map("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
+                                        row(KEYSPACE_PER_TEST, true, map("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
+                                        row(ks1, true, map("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")));
+
+        assertInvalidThrow(InvalidRequestException.class, "ALTER KEYSPACE ks1 WITH replication= { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }");
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/AutoSnapshotTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/AutoSnapshotTest.java
new file mode 100644
index 0000000..eedcdb0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AutoSnapshotTest.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.validation.operations;
+
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.DurationSpec;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
+import org.assertj.core.api.Condition;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.lang.String.format;
+import static org.apache.cassandra.db.ColumnFamilyStore.SNAPSHOT_DROP_PREFIX;
+import static org.assertj.core.api.Assertions.assertThat;
+
+@RunWith(Parameterized.class)
+public class AutoSnapshotTest extends CQLTester
+{
+    static int TTL_SECS = 1;
+
+    public static Boolean enabledBefore;
+    public static DurationSpec.IntSecondsBound ttlBefore;
+
+    @BeforeClass
+    public static void beforeClass()
+    {
+        enabledBefore = DatabaseDescriptor.isAutoSnapshot();
+        ttlBefore = DatabaseDescriptor.getAutoSnapshotTtl();
+    }
+
+    @AfterClass
+    public static void afterClass()
+    {
+        DatabaseDescriptor.setAutoSnapshot(enabledBefore);
+        DatabaseDescriptor.setAutoSnapshotTtl(ttlBefore);
+    }
+
+    // Dynamic parameters used during tests
+    @Parameterized.Parameter(0)
+    public Boolean autoSnapshotEnabled;
+
+    @Parameterized.Parameter(1)
+    public DurationSpec.IntSecondsBound autoSnapshotTTl;
+
+    @Before
+    public void beforeTest() throws Throwable
+    {
+        super.beforeTest();
+        // Make sure we're testing the correct parameterized settings
+        DatabaseDescriptor.setAutoSnapshot(autoSnapshotEnabled);
+        DatabaseDescriptor.setAutoSnapshotTtl(autoSnapshotTTl);
+    }
+
+    // Test for all values of [auto_snapshot=[true,false], ttl=[1s, null]
+    @Parameterized.Parameters( name = "enabled={0},ttl={1}" )
+    public static Collection options() {
+        return Arrays.asList(new Object[][] {
+        { true, new DurationSpec.IntSecondsBound(TTL_SECS, SECONDS) },
+        { false, new DurationSpec.IntSecondsBound(TTL_SECS, SECONDS) },
+        { true, null },
+        { false, null },
+        });
+    }
+
+    @Test
+    public void testAutoSnapshotOnTrucate() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY(a, b))");
+        // Check there are no snapshots
+        ColumnFamilyStore tableDir = getCurrentColumnFamilyStore();
+        assertThat(tableDir.listSnapshots()).isEmpty();
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
+
+        flush();
+
+        execute("DROP TABLE %s");
+
+        verifyAutoSnapshot(SNAPSHOT_DROP_PREFIX, tableDir, currentTable());
+    }
+
+    @Test
+    public void testAutoSnapshotOnDrop() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY(a, b))");
+        // Check there are no snapshots
+        ColumnFamilyStore tableDir = getCurrentColumnFamilyStore();
+        assertThat(tableDir.listSnapshots()).isEmpty();
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
+
+        flush();
+
+        execute("DROP TABLE %s");
+
+        verifyAutoSnapshot(SNAPSHOT_DROP_PREFIX, tableDir, currentTable());
+    }
+
+    @Test
+    public void testAutoSnapshotOnDropKeyspace() throws Throwable
+    {
+        // Create tables A and B and flush
+        ColumnFamilyStore tableA = createAndPopulateTable();
+        ColumnFamilyStore tableB = createAndPopulateTable();
+        flush();
+
+        // Check no snapshots
+        assertThat(tableA.listSnapshots()).isEmpty();
+        assertThat(tableB.listSnapshots()).isEmpty();
+
+        // Drop keyspace, should have snapshot for table A and B
+        execute(format("DROP KEYSPACE %s", keyspace()));
+        verifyAutoSnapshot(SNAPSHOT_DROP_PREFIX, tableA, tableA.name);
+        verifyAutoSnapshot(SNAPSHOT_DROP_PREFIX, tableB, tableB.name);
+    }
+
+    private ColumnFamilyStore createAndPopulateTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY(a, b))");
+        // Check there are no snapshots
+        ColumnFamilyStore tableA = getCurrentColumnFamilyStore();
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 1);
+        return tableA;
+    }
+
+    /**
+     * Verify that:
+     * - A snapshot is created when auto_snapshot = true.
+     * - TTL is added to the snapshot when auto_snapshot_ttl != null
+     */
+    private void verifyAutoSnapshot(String snapshotPrefix, ColumnFamilyStore tableDir, String expectedTableName)
+    {
+        Map<String, TableSnapshot> snapshots = tableDir.listSnapshots();
+        if (autoSnapshotEnabled)
+        {
+            assertThat(snapshots).hasSize(1);
+            assertThat(snapshots).hasKeySatisfying(new Condition<>(k -> k.startsWith(snapshotPrefix), "is dropped snapshot"));
+            TableSnapshot snapshot = snapshots.values().iterator().next();
+            assertThat(snapshot.getTableName()).isEqualTo(expectedTableName);
+            if (autoSnapshotTTl == null)
+            {
+                // check that the snapshot has NO TTL
+                assertThat(snapshot.isExpiring()).isFalse();
+            }
+            else
+            {
+                // check that snapshot has TTL and is expired after 1 second
+                assertThat(snapshot.isExpiring()).isTrue();
+                Uninterruptibles.sleepUninterruptibly(TTL_SECS, SECONDS);
+                assertThat(snapshot.isExpired(Instant.now())).isTrue();
+            }
+        }
+        else
+        {
+            // No snapshot should be created when auto_snapshot = false
+            assertThat(snapshots).isEmpty();
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageSplit1Test.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageSplit1Test.java
new file mode 100644
index 0000000..80bf51c
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageSplit1Test.java
@@ -0,0 +1,2400 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3.validation.operations;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.UUID;
+
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.validation.entities.SecondaryIndexTest;
+
+import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
+import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.fail;
+
+public class CompactStorageSplit1Test extends CQLTester
+{
+    public static final String compactOption = " WITH COMPACT STORAGE";
+
+    @Test
+    public void testSparseCompactTableIndex() throws Throwable
+    {
+        createTable("CREATE TABLE %s (key ascii PRIMARY KEY, val ascii) WITH COMPACT STORAGE");
+
+        // Indexes are allowed only on the sparse compact tables
+        createIndex("CREATE INDEX ON %s(val)");
+        for (int i = 0; i < 10; i++)
+            execute("INSERT INTO %s (key, val) VALUES (?, ?)", Integer.toString(i), Integer.toString(i * 10));
+
+        alterTable("ALTER TABLE %s DROP COMPACT STORAGE");
+
+        assertRows(execute("SELECT * FROM %s WHERE val = '50'"),
+                   row("5", null, "50", null));
+        assertRows(execute("SELECT * FROM %s WHERE key = '5'"),
+                   row("5", null, "50", null));
+    }
+
+    @Test
+    public void before() throws Throwable
+    {
+        createTable("CREATE TABLE %s (key TEXT, column TEXT, value BLOB, PRIMARY KEY (key, column)) WITH COMPACT STORAGE");
+
+        ByteBuffer largeBytes = ByteBuffer.wrap(new byte[100000]);
+        execute("INSERT INTO %s (key, column, value) VALUES (?, ?, ?)", "test", "a", largeBytes);
+        ByteBuffer smallBytes = ByteBuffer.wrap(new byte[10]);
+        execute("INSERT INTO %s (key, column, value) VALUES (?, ?, ?)", "test", "c", smallBytes);
+
+        flush();
+
+        assertRows(execute("SELECT column FROM %s WHERE key = ? AND column IN (?, ?, ?)", "test", "c", "a", "b"),
+                   row("a"),
+                   row("c"));
+    }
+
+    @Test
+    public void testStaticCompactTables() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k text PRIMARY KEY, v1 int, v2 text) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (k, v1, v2) values (?, ?, ?)", "first", 1, "value1");
+        execute("INSERT INTO %s (k, v1, v2) values (?, ?, ?)", "second", 2, "value2");
+        execute("INSERT INTO %s (k, v1, v2) values (?, ?, ?)", "third", 3, "value3");
+
+        assertRows(execute("SELECT * FROM %s WHERE k = ?", "first"),
+                   row("first", 1, "value1")
+        );
+
+        assertRows(execute("SELECT v2 FROM %s WHERE k = ?", "second"),
+                   row("value2")
+        );
+
+        // Murmur3 order
+        assertRows(execute("SELECT * FROM %s"),
+                   row("third", 3, "value3"),
+                   row("second", 2, "value2"),
+                   row("first", 1, "value1")
+        );
+    }
+
+    @Test
+    public void testCompactStorageUpdateWithNull() throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 1, 1)");
+
+        flush();
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", null, 0, 0);
+
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))", 0, 0, 1),
+                   row(0, 1, 1)
+        );
+    }
+
+    /**
+     * Migrated from cql_tests.py:TestCQL.collection_compact_test()
+     */
+    @Test
+    public void testCompactCollections() throws Throwable
+    {
+        String tableName = KEYSPACE + "." + createTableName();
+        assertInvalid(String.format("CREATE TABLE %s (user ascii PRIMARY KEY, mails list < text >) WITH COMPACT STORAGE;", tableName));
+    }
+
+    /**
+     * Check for a table with counters,
+     * migrated from cql_tests.py:TestCQL.counters_test()
+     */
+    @Test
+    public void testCounters() throws Throwable
+    {
+        createTable("CREATE TABLE %s (userid int, url text, total counter, PRIMARY KEY (userid, url)) WITH COMPACT STORAGE");
+
+        execute("UPDATE %s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(1L));
+
+        execute("UPDATE %s SET total = total - 4 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(-3L));
+
+        execute("UPDATE %s SET total = total+1 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(-2L));
+
+        execute("UPDATE %s SET total = total -2 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(-4L));
+
+        execute("UPDATE %s SET total += 6 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(2L));
+
+        execute("UPDATE %s SET total -= 1 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(1L));
+
+        execute("UPDATE %s SET total += -2 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(-1L));
+
+        execute("UPDATE %s SET total -= -2 WHERE userid = 1 AND url = 'http://foo.com'");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(1L));
+    }
+
+
+    @Test
+    public void testCounterFiltering() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, a counter) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 10; i++)
+            execute("UPDATE %s SET a = a + ? WHERE k = ?", (long) i, i);
+
+        execute("UPDATE %s SET a = a + ? WHERE k = ?", 6L, 10);
+
+        // GT
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a > ? ALLOW FILTERING", 5L),
+                                row(6, 6L),
+                                row(7, 7L),
+                                row(8, 8L),
+                                row(9, 9L),
+                                row(10, 6L));
+
+        // GTE
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a >= ? ALLOW FILTERING", 6L),
+                                row(6, 6L),
+                                row(7, 7L),
+                                row(8, 8L),
+                                row(9, 9L),
+                                row(10, 6L));
+
+        // LT
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a < ? ALLOW FILTERING", 3L),
+                                row(0, 0L),
+                                row(1, 1L),
+                                row(2, 2L));
+
+        // LTE
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a <= ? ALLOW FILTERING", 3L),
+                                row(0, 0L),
+                                row(1, 1L),
+                                row(2, 2L),
+                                row(3, 3L));
+
+        // EQ
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a = ? ALLOW FILTERING", 6L),
+                                row(6, 6L),
+                                row(10, 6L));
+    }
+
+    /**
+     * Test for the bug of #11726.
+     */
+    @Test
+    public void testCounterAndColumnSelection() throws Throwable
+    {
+        for (String compactStorageClause : new String[]{ "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (k int PRIMARY KEY, c counter)" + compactStorageClause);
+
+            // Flush 2 updates in different sstable so that the following select does a merge, which is what triggers
+            // the problem from #11726
+
+            execute("UPDATE %s SET c = c + ? WHERE k = ?", 1L, 0);
+
+            flush();
+
+            execute("UPDATE %s SET c = c + ? WHERE k = ?", 1L, 0);
+
+            flush();
+
+            // Querying, but not including the counter. Pre-CASSANDRA-11726, this made us query the counter but include
+            // it's value, which broke at merge (post-CASSANDRA-11726 are special cases to never skip values).
+            assertRows(execute("SELECT k FROM %s"), row(0));
+        }
+    }
+
+    /*
+     * Check that a counter batch works as intended
+     */
+    @Test
+    public void testCounterBatch() throws Throwable
+    {
+        createTable("CREATE TABLE %s (userid int, url text, total counter, PRIMARY KEY (userid, url)) WITH COMPACT STORAGE");
+
+        // Ensure we handle updates to the same CQL row in the same partition properly
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
+                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
+                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
+                "APPLY BATCH; ");
+        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(3L));
+
+        // Ensure we handle different CQL rows in the same partition properly
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://bar.com'; " +
+                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://baz.com'; " +
+                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://bad.com'; " +
+                "APPLY BATCH; ");
+        assertRows(execute("SELECT url, total FROM %s WHERE userid = 1"),
+                   row("http://bad.com", 1L),
+                   row("http://bar.com", 1L),
+                   row("http://baz.com", 1L),
+                   row("http://foo.com", 3L)); // from previous batch
+
+        // Different counters in the same CQL Row
+        createTable("CREATE TABLE %s (userid int, url text, first counter, second counter, third counter, PRIMARY KEY (userid, url))");
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE %1$s SET first = first + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
+                "UPDATE %1$s SET first = first + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
+                "UPDATE %1$s SET second = second + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
+                "APPLY BATCH; ");
+        assertRows(execute("SELECT first, second, third FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
+                   row(2L, 1L, null));
+
+        // Different counters in different CQL Rows
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE %1$s SET first = first + 1 WHERE userid = 1 AND url = 'http://bad.com'; " +
+                "UPDATE %1$s SET first = first + 1, second = second + 1 WHERE userid = 1 AND url = 'http://bar.com'; " +
+                "UPDATE %1$s SET first = first - 1, second = second - 1 WHERE userid = 1 AND url = 'http://bar.com'; " +
+                "UPDATE %1$s SET second = second + 1 WHERE userid = 1 AND url = 'http://baz.com'; " +
+                "APPLY BATCH; ");
+        assertRows(execute("SELECT url, first, second, third FROM %s WHERE userid = 1"),
+                   row("http://bad.com", 1L, null, null),
+                   row("http://bar.com", 0L, 0L, null),
+                   row("http://baz.com", null, 1L, null),
+                   row("http://foo.com", 2L, 1L, null)); // from previous batch
+
+
+        // Different counters in different partitions
+        execute("BEGIN UNLOGGED BATCH " +
+                "UPDATE %1$s SET first = first + 1 WHERE userid = 2 AND url = 'http://bad.com'; " +
+                "UPDATE %1$s SET first = first + 1, second = second + 1 WHERE userid = 3 AND url = 'http://bar.com'; " +
+                "UPDATE %1$s SET first = first - 1, second = second - 1 WHERE userid = 4 AND url = 'http://bar.com'; " +
+                "UPDATE %1$s SET second = second + 1 WHERE userid = 5 AND url = 'http://baz.com'; " +
+                "APPLY BATCH; ");
+        assertRowsIgnoringOrder(execute("SELECT userid, url, first, second, third FROM %s WHERE userid IN (2, 3, 4, 5)"),
+                                row(2, "http://bad.com", 1L, null, null),
+                                row(3, "http://bar.com", 1L, 1L, null),
+                                row(4, "http://bar.com", -1L, -1L, null),
+                                row(5, "http://baz.com", null, 1L, null));
+    }
+
+    /**
+     * from FrozenCollectionsTest
+     */
+
+    @Test
+    public void testClusteringKeyUsageSet() throws Throwable
+    {
+        testClusteringKeyUsage("set<int>",
+                               set(),
+                               set(1, 2, 3),
+                               set(4, 5, 6),
+                               set(7, 8, 9));
+    }
+
+    @Test
+    public void testClusteringKeyUsageList() throws Throwable
+    {
+        testClusteringKeyUsage("list<int>",
+                               list(),
+                               list(1, 2, 3),
+                               list(4, 5, 6),
+                               list(7, 8, 9));
+    }
+
+    @Test
+    public void testClusteringKeyUsageMap() throws Throwable
+    {
+        testClusteringKeyUsage("map<int, int>",
+                               map(),
+                               map(1, 10, 2, 20, 3, 30),
+                               map(4, 40, 5, 50, 6, 60),
+                               map(7, 70, 8, 80, 9, 90));
+    }
+
+    private void testClusteringKeyUsage(String type, Object v1, Object v2, Object v3, Object v4) throws Throwable
+    {
+        createTable(String.format("CREATE TABLE %%s (a int, b frozen<%s>, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE",
+                                  type));
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v1, 1);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v2, 1);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v3, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v4, 0);
+
+        // overwrite with an update
+        execute("UPDATE %s SET c=? WHERE a=? AND b=?", 0, 0, v1);
+        execute("UPDATE %s SET c=? WHERE a=? AND b=?", 0, 0, v2);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, v1, 0),
+                   row(0, v2, 0),
+                   row(0, v3, 0),
+                   row(0, v4, 0)
+        );
+
+        assertRows(execute("SELECT b FROM %s"),
+                   row(v1),
+                   row(v2),
+                   row(v3),
+                   row(v4)
+        );
+
+        assertRows(execute("SELECT * FROM %s LIMIT 2"),
+                   row(0, v1, 0),
+                   row(0, v2, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, v3),
+                   row(0, v3, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, v1),
+                   row(0, v1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b IN ?", 0, list(v3, v1)),
+                   row(0, v1, 0),
+                   row(0, v3, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ?", 0, v3),
+                   row(0, v4, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b >= ?", 0, v3),
+                   row(0, v3, 0),
+                   row(0, v4, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b < ?", 0, v3),
+                   row(0, v1, 0),
+                   row(0, v2, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b <= ?", 0, v3),
+                   row(0, v1, 0),
+                   row(0, v2, 0),
+                   row(0, v3, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ? AND b <= ?", 0, v2, v3),
+                   row(0, v3, 0)
+        );
+
+        execute("DELETE FROM %s WHERE a=? AND b=?", 0, v1);
+        execute("DELETE FROM %s WHERE a=? AND b=?", 0, v3);
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, v2, 0),
+                   row(0, v4, 0)
+        );
+    }
+
+    @Test
+    public void testNestedClusteringKeyUsage() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b frozen<map<set<int>, list<int>>>, c frozen<set<int>>, d int, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(), set(), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(), list(1, 2, 3)), set(), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, map(), set(), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT b FROM %s"),
+                   row(map()),
+                   row(map(set(), list(1, 2, 3))),
+                   row(map(set(1, 2, 3), list(1, 2, 3))),
+                   row(map(set(4, 5, 6), list(1, 2, 3))),
+                   row(map(set(7, 8, 9), list(1, 2, 3)))
+        );
+
+        assertRows(execute("SELECT c FROM %s"),
+                   row(set()),
+                   row(set()),
+                   row(set(1, 2, 3)),
+                   row(set(1, 2, 3)),
+                   row(set(1, 2, 3))
+        );
+
+        assertRows(execute("SELECT * FROM %s LIMIT 3"),
+                   row(0, map(), set(), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=0 ORDER BY b DESC LIMIT 4"),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map()),
+                   row(0, map(), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(), list(1, 2, 3))),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(1, 2, 3), list(1, 2, 3))),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND (b, c) IN ?", 0, list(tuple(map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)),
+                                                                                 tuple(map(), set()))),
+                   row(0, map(), set(), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b >= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b < ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(), set(), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b <= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(), set(), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ? AND b <= ?", 0, map(set(1, 2, 3), list(1, 2, 3)), map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set());
+        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set()));
+
+        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set());
+        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()));
+
+        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3));
+        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)));
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+    }
+
+    @Test
+    public void testNestedClusteringKeyUsageWithReverseOrder() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b frozen<map<set<int>, list<int>>>, c frozen<set<int>>, d int, " +
+                    "PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (b DESC)");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(), set(), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(), list(1, 2, 3)), set(), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(), set(), 0)
+        );
+
+        assertRows(execute("SELECT b FROM %s"),
+                   row(map(set(7, 8, 9), list(1, 2, 3))),
+                   row(map(set(4, 5, 6), list(1, 2, 3))),
+                   row(map(set(1, 2, 3), list(1, 2, 3))),
+                   row(map(set(), list(1, 2, 3))),
+                   row(map())
+        );
+
+        assertRows(execute("SELECT c FROM %s"),
+                   row(set(1, 2, 3)),
+                   row(set(1, 2, 3)),
+                   row(set(1, 2, 3)),
+                   row(set()),
+                   row(set())
+        );
+
+        assertRows(execute("SELECT * FROM %s LIMIT 3"),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=0 ORDER BY b DESC LIMIT 4"),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map()),
+                   row(0, map(), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(), list(1, 2, 3))),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(1, 2, 3), list(1, 2, 3))),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND (b, c) IN ?", 0, list(tuple(map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)),
+                                                                                 tuple(map(), set()))),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b >= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b < ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b <= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(), list(1, 2, 3)), set(), 0),
+                   row(0, map(), set(), 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ? AND b <= ?", 0, map(set(1, 2, 3), list(1, 2, 3)), map(set(4, 5, 6), list(1, 2, 3))),
+                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+
+        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set());
+        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set()));
+
+        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set());
+        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()));
+
+        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3));
+        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)));
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
+                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
+        );
+    }
+
+    @Test
+    public void testNormalColumnUsage() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b frozen<map<set<int>, list<int>>>, c frozen<set<int>>)" + compactOption);
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, map(), set());
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, map(set(), list(99999, 999999, 99999)), set());
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 2, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3));
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 3, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3));
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 4, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3));
+
+        // overwrite with update
+        execute("UPDATE %s SET b=? WHERE a=?", map(set(), list(1, 2, 3)), 1);
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s"),
+                                row(0, map(), set()),
+                                row(1, map(set(), list(1, 2, 3)), set()),
+                                row(2, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3)),
+                                row(3, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)),
+                                row(4, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3))
+        );
+
+        assertRowsIgnoringOrder(execute("SELECT b FROM %s"),
+                                row(map()),
+                                row(map(set(), list(1, 2, 3))),
+                                row(map(set(1, 2, 3), list(1, 2, 3))),
+                                row(map(set(4, 5, 6), list(1, 2, 3))),
+                                row(map(set(7, 8, 9), list(1, 2, 3)))
+        );
+
+        assertRowsIgnoringOrder(execute("SELECT c FROM %s"),
+                                row(set()),
+                                row(set()),
+                                row(set(1, 2, 3)),
+                                row(set(1, 2, 3)),
+                                row(set(1, 2, 3))
+        );
+
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 3),
+                                row(3, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3))
+        );
+
+        execute("UPDATE %s SET b=? WHERE a=?", null, 1);
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 1),
+                                row(1, null, set())
+        );
+
+        execute("UPDATE %s SET b=? WHERE a=?", map(), 1);
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 1),
+                                row(1, map(), set())
+        );
+
+        execute("UPDATE %s SET c=? WHERE a=?", null, 2);
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 2),
+                                row(2, map(set(1, 2, 3), list(1, 2, 3)), null)
+        );
+
+        execute("UPDATE %s SET c=? WHERE a=?", set(), 2);
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 2),
+                                row(2, map(set(1, 2, 3), list(1, 2, 3)), set())
+        );
+
+        execute("DELETE b FROM %s WHERE a=?", 3);
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 3),
+                                row(3, null, set(1, 2, 3))
+        );
+
+        execute("DELETE c FROM %s WHERE a=?", 4);
+        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 4),
+                                row(4, map(set(7, 8, 9), list(1, 2, 3)), null)
+        );
+    }
+
+    /**
+     * from SecondaryIndexTest
+     */
+    @Test
+    public void testCompactTableWithValueOver64k() throws Throwable
+    {
+        createTable("CREATE TABLE %s(a int, b blob, PRIMARY KEY (a)) WITH COMPACT STORAGE");
+        createIndex("CREATE INDEX ON %s(b)");
+        failInsert("INSERT INTO %s (a, b) VALUES (0, ?)", ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
+        failInsert("INSERT INTO %s (a, b) VALUES (0, ?) IF NOT EXISTS", ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
+        failInsert("BEGIN BATCH\n" +
+                   "INSERT INTO %s (a, b) VALUES (0, ?);\n" +
+                   "APPLY BATCH",
+                   ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
+        failInsert("BEGIN BATCH\n" +
+                   "INSERT INTO %s (a, b) VALUES (0, ?) IF NOT EXISTS;\n" +
+                   "APPLY BATCH",
+                   ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
+    }
+
+    public void failInsert(String insertCQL, Object... args) throws Throwable
+    {
+        try
+        {
+            execute(insertCQL, args);
+            fail("Expected statement to fail validation");
+        }
+        catch (Exception e)
+        {
+            // as expected
+        }
+    }
+
+    /**
+     * Migrated from cql_tests.py:TestCQL.invalid_clustering_indexing_test()
+     */
+    @Test
+    public void testIndexesOnClusteringInvalid() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b))) WITH COMPACT STORAGE");
+        assertInvalid("CREATE INDEX ON %s (a)");
+        assertInvalid("CREATE INDEX ON %s (b)");
+
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        assertInvalid("CREATE INDEX ON %s (a)");
+        assertInvalid("CREATE INDEX ON %s (b)");
+        assertInvalid("CREATE INDEX ON %s (c)");
+    }
+
+    @Test
+    public void testEmptyRestrictionValueWithSecondaryIndexAndCompactTables() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk blob, c blob, v blob, PRIMARY KEY ((pk), c)) WITH COMPACT STORAGE");
+        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
+                             "CREATE INDEX on %s(c)");
+
+        createTable("CREATE TABLE %s (pk blob PRIMARY KEY, v blob) WITH COMPACT STORAGE");
+        createIndex("CREATE INDEX on %s(v)");
+
+        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", bytes("foo123"), bytes("1"));
+
+        // Test restrictions on non-primary key value
+        assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND v = textAsBlob('');"));
+
+        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", bytes("foo124"), EMPTY_BYTE_BUFFER);
+
+        assertRows(execute("SELECT * FROM %s WHERE v = textAsBlob('');"),
+                   row(bytes("foo124"), EMPTY_BYTE_BUFFER));
+    }
+
+    @Test
+    public void testIndicesOnCompactTable() throws Throwable
+    {
+        assertInvalidMessage("COMPACT STORAGE with composite PRIMARY KEY allows no more than one column not part of the PRIMARY KEY (got: v1, v2)",
+                             "CREATE TABLE " + KEYSPACE + ".test (pk int, c int, v1 int, v2 int, PRIMARY KEY(pk, c)) WITH COMPACT STORAGE");
+
+        createTable("CREATE TABLE %s (pk int, c int, v int, PRIMARY KEY(pk, c)) WITH COMPACT STORAGE");
+        assertInvalidMessage("Secondary indexes are not supported on compact value column of COMPACT STORAGE tables",
+                             "CREATE INDEX ON %s(v)");
+
+        createTable("CREATE TABLE %s (pk int PRIMARY KEY, v int) WITH COMPACT STORAGE");
+        createIndex("CREATE INDEX ON %s(v)");
+
+        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", 1, 1);
+        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", 2, 1);
+        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", 3, 3);
+
+        assertRows(execute("SELECT pk, v FROM %s WHERE v = 1"),
+                   row(1, 1),
+                   row(2, 1));
+
+        assertRows(execute("SELECT pk, v FROM %s WHERE v = 3"),
+                   row(3, 3));
+
+        assertEmpty(execute("SELECT pk, v FROM %s WHERE v = 5"));
+
+        createTable("CREATE TABLE %s (pk int PRIMARY KEY, v1 int, v2 int) WITH COMPACT STORAGE");
+        createIndex("CREATE INDEX ON %s(v1)");
+
+        execute("INSERT INTO %s (pk, v1, v2) VALUES (?, ?, ?)", 1, 1, 1);
+        execute("INSERT INTO %s (pk, v1, v2) VALUES (?, ?, ?)", 2, 1, 2);
+        execute("INSERT INTO %s (pk, v1, v2) VALUES (?, ?, ?)", 3, 3, 3);
+
+        assertRows(execute("SELECT pk, v2 FROM %s WHERE v1 = 1"),
+                   row(1, 1),
+                   row(2, 2));
+
+        assertRows(execute("SELECT pk, v2 FROM %s WHERE v1 = 3"),
+                   row(3, 3));
+
+        assertEmpty(execute("SELECT pk, v2 FROM %s WHERE v1 = 5"));
+    }
+
+    /**
+     * OverflowTest
+     */
+
+    /**
+     * Test regression from #5189,
+     * migrated from cql_tests.py:TestCQL.compact_metadata_test()
+     */
+    @Test
+    public void testCompactMetadata() throws Throwable
+    {
+        createTable("CREATE TABLE %s (id int primary key, i int ) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (id, i) VALUES (1, 2)");
+        assertRows(execute("SELECT * FROM %s"),
+                   row(1, 2));
+    }
+
+    @Test
+    public void testEmpty() throws Throwable
+    {
+        // Same test, but for compact
+        createTable("CREATE TABLE %s (k1 int, k2 int, v int, PRIMARY KEY (k1, k2)) WITH COMPACT STORAGE");
+
+        // Inserts a few rows to make sure we don 't actually query something
+        Object[][] rows = fill();
+
+        assertEmpty(execute("SELECT v FROM %s WHERE k1 IN ()"));
+        assertEmpty(execute("SELECT v FROM %s WHERE k1 = 0 AND k2 IN ()"));
+
+        // Test empty IN() in DELETE
+        execute("DELETE FROM %s WHERE k1 IN ()");
+        assertArrayEquals(rows, getRows(execute("SELECT * FROM %s")));
+
+        // Test empty IN() in UPDATE
+        execute("UPDATE %s SET v = 3 WHERE k1 IN () AND k2 = 2");
+        assertArrayEquals(rows, getRows(execute("SELECT * FROM %s")));
+    }
+
+    private Object[][] fill() throws Throwable
+    {
+        for (int i = 0; i < 2; i++)
+            for (int j = 0; j < 2; j++)
+                execute("INSERT INTO %s (k1, k2, v) VALUES (?, ?, ?)", i, j, i + j);
+
+        return getRows(execute("SELECT * FROM %s"));
+    }
+
+    /**
+     * AggregationTest
+     */
+
+    @Test
+    public void testFunctionsWithCompactStorage() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int , b int, c double, primary key(a, b) ) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 1, 11.5)");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 9.5)");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 9.0)");
+
+        assertRows(execute("SELECT max(b), min(b), sum(b), avg(b) , max(c), sum(c), avg(c) FROM %s"),
+                   row(3, 1, 6, 2, 11.5, 30.0, 10.0));
+
+        assertRows(execute("SELECT COUNT(*) FROM %s"), row(3L));
+        assertRows(execute("SELECT COUNT(1) FROM %s"), row(3L));
+        assertRows(execute("SELECT COUNT(*) FROM %s WHERE a = 1 AND b > 1"), row(2L));
+        assertRows(execute("SELECT COUNT(1) FROM %s WHERE a = 1 AND b > 1"), row(2L));
+        assertRows(execute("SELECT max(b), min(b), sum(b), avg(b) , max(c), sum(c), avg(c) FROM %s WHERE a = 1 AND b > 1"),
+                   row(3, 2, 5, 2, 9.5, 18.5, 9.25));
+    }
+
+    /**
+     * BatchTest
+     */
+    @Test
+    public void testBatchRangeDelete() throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE");
+
+        int value = 0;
+        for (int partitionKey = 0; partitionKey < 4; partitionKey++)
+            for (int clustering1 = 0; clustering1 < 5; clustering1++)
+                execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (?, ?, ?)",
+                        partitionKey, clustering1, value++);
+
+        execute("BEGIN BATCH " +
+                "DELETE FROM %1$s WHERE partitionKey = 1;" +
+                "DELETE FROM %1$s WHERE partitionKey = 0 AND  clustering >= 4;" +
+                "DELETE FROM %1$s WHERE partitionKey = 0 AND clustering <= 0;" +
+                "DELETE FROM %1$s WHERE partitionKey = 2 AND clustering >= 0 AND clustering <= 3;" +
+                "DELETE FROM %1$s WHERE partitionKey = 2 AND clustering <= 3 AND clustering >= 4;" +
+                "DELETE FROM %1$s WHERE partitionKey = 3 AND (clustering) >= (3) AND (clustering) <= (6);" +
+                "APPLY BATCH;");
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 1, 1),
+                   row(0, 2, 2),
+                   row(0, 3, 3),
+                   row(2, 4, 14),
+                   row(3, 0, 15),
+                   row(3, 1, 16),
+                   row(3, 2, 17));
+    }
+
+    /**
+     * CreateTest
+     */
+    /**
+     * /**
+     * Creation and basic operations on a static table with compact storage,
+     * migrated from cql_tests.py:TestCQL.noncomposite_static_cf_test()
+     */
+    @Test
+    public void testDenseStaticTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (userid uuid PRIMARY KEY, firstname text, lastname text, age int) WITH COMPACT STORAGE");
+
+        UUID id1 = UUID.fromString("550e8400-e29b-41d4-a716-446655440000");
+        UUID id2 = UUID.fromString("f47ac10b-58cc-4372-a567-0e02b2c3d479");
+
+        execute("INSERT INTO %s (userid, firstname, lastname, age) VALUES (?, ?, ?, ?)", id1, "Frodo", "Baggins", 32);
+        execute("UPDATE %s SET firstname = ?, lastname = ?, age = ? WHERE userid = ?", "Samwise", "Gamgee", 33, id2);
+
+        assertRows(execute("SELECT firstname, lastname FROM %s WHERE userid = ?", id1),
+                   row("Frodo", "Baggins"));
+
+        assertRows(execute("SELECT * FROM %s WHERE userid = ?", id1),
+                   row(id1, 32, "Frodo", "Baggins"));
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(id2, 33, "Samwise", "Gamgee"),
+                   row(id1, 32, "Frodo", "Baggins")
+        );
+
+        String batch = "BEGIN BATCH "
+                       + "INSERT INTO %1$s (userid, age) VALUES (?, ?) "
+                       + "UPDATE %1$s SET age = ? WHERE userid = ? "
+                       + "DELETE firstname, lastname FROM %1$s WHERE userid = ? "
+                       + "DELETE firstname, lastname FROM %1$s WHERE userid = ? "
+                       + "APPLY BATCH";
+
+        execute(batch, id1, 36, 37, id2, id1, id2);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(id2, 37, null, null),
+                   row(id1, 36, null, null));
+    }
+
+    /**
+     * Creation and basic operations on a non-composite table with compact storage,
+     * migrated from cql_tests.py:TestCQL.dynamic_cf_test()
+     */
+    @Test
+    public void testDenseNonCompositeTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (userid uuid, url text, time bigint, PRIMARY KEY (userid, url)) WITH COMPACT STORAGE");
+
+        UUID id1 = UUID.fromString("550e8400-e29b-41d4-a716-446655440000");
+        UUID id2 = UUID.fromString("f47ac10b-58cc-4372-a567-0e02b2c3d479");
+        UUID id3 = UUID.fromString("810e8500-e29b-41d4-a716-446655440000");
+
+        execute("INSERT INTO %s (userid, url, time) VALUES (?, ?, ?)", id1, "http://foo.bar", 42L);
+        execute("INSERT INTO %s (userid, url, time) VALUES (?, ?, ?)", id1, "http://foo-2.bar", 24L);
+        execute("INSERT INTO %s (userid, url, time) VALUES (?, ?, ?)", id1, "http://bar.bar", 128L);
+        execute("UPDATE %s SET time = 24 WHERE userid = ? and url = 'http://bar.foo'", id2);
+        execute("UPDATE %s SET time = 12 WHERE userid IN (?, ?) and url = 'http://foo-3'", id2, id1);
+
+        assertRows(execute("SELECT url, time FROM %s WHERE userid = ?", id1),
+                   row("http://bar.bar", 128L),
+                   row("http://foo-2.bar", 24L),
+                   row("http://foo-3", 12L),
+                   row("http://foo.bar", 42L));
+
+        assertRows(execute("SELECT * FROM %s WHERE userid = ?", id2),
+                   row(id2, "http://bar.foo", 24L),
+                   row(id2, "http://foo-3", 12L));
+
+        assertRows(execute("SELECT time FROM %s"),
+                   row(24L), // id2
+                   row(12L),
+                   row(128L), // id1
+                   row(24L),
+                   row(12L),
+                   row(42L)
+        );
+
+        // Check we don't allow empty values for url since this is the full underlying cell name (#6152)
+        assertInvalid("INSERT INTO %s (userid, url, time) VALUES (?, '', 42)", id3);
+    }
+
+    /**
+     * Creation and basic operations on a composite table with compact storage,
+     * migrated from cql_tests.py:TestCQL.dense_cf_test()
+     */
+    @Test
+    public void testDenseCompositeTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (userid uuid, ip text, port int, time bigint, PRIMARY KEY (userid, ip, port)) WITH COMPACT STORAGE");
+
+        UUID id1 = UUID.fromString("550e8400-e29b-41d4-a716-446655440000");
+        UUID id2 = UUID.fromString("f47ac10b-58cc-4372-a567-0e02b2c3d479");
+
+        execute("INSERT INTO %s (userid, ip, port, time) VALUES (?, '192.168.0.1', 80, 42)", id1);
+        execute("INSERT INTO %s (userid, ip, port, time) VALUES (?, '192.168.0.2', 80, 24)", id1);
+        execute("INSERT INTO %s (userid, ip, port, time) VALUES (?, '192.168.0.2', 90, 42)", id1);
+        execute("UPDATE %s SET time = 24 WHERE userid = ? AND ip = '192.168.0.2' AND port = 80", id2);
+
+        // we don't have to include all of the clustering columns (see CASSANDRA-7990)
+        execute("INSERT INTO %s (userid, ip, time) VALUES (?, '192.168.0.3', 42)", id2);
+        execute("UPDATE %s SET time = 42 WHERE userid = ? AND ip = '192.168.0.4'", id2);
+
+        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ?", id1),
+                   row("192.168.0.1", 80, 42L),
+                   row("192.168.0.2", 80, 24L),
+                   row("192.168.0.2", 90, 42L));
+
+        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? and ip >= '192.168.0.2'", id1),
+                   row("192.168.0.2", 80, 24L),
+                   row("192.168.0.2", 90, 42L));
+
+        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? and ip = '192.168.0.2'", id1),
+                   row("192.168.0.2", 80, 24L),
+                   row("192.168.0.2", 90, 42L));
+
+        assertEmpty(execute("SELECT ip, port, time FROM %s WHERE userid = ? and ip > '192.168.0.2'", id1));
+
+        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? AND ip = '192.168.0.3'", id2),
+                   row("192.168.0.3", null, 42L));
+
+        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? AND ip = '192.168.0.4'", id2),
+                   row("192.168.0.4", null, 42L));
+
+        execute("DELETE time FROM %s WHERE userid = ? AND ip = '192.168.0.2' AND port = 80", id1);
+
+        assertRowCount(execute("SELECT * FROM %s WHERE userid = ?", id1), 2);
+
+        execute("DELETE FROM %s WHERE userid = ?", id1);
+        assertEmpty(execute("SELECT * FROM %s WHERE userid = ?", id1));
+
+        execute("DELETE FROM %s WHERE userid = ? AND ip = '192.168.0.3'", id2);
+        assertEmpty(execute("SELECT * FROM %s WHERE userid = ? AND ip = '192.168.0.3'", id2));
+    }
+
+    @Test
+    public void testCreateIndexOnCompactTableWithClusteringColumns() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int , c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE;");
+
+        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
+                             "CREATE INDEX ON %s (a);");
+
+        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
+                             "CREATE INDEX ON %s (b);");
+
+        assertInvalidMessage("Secondary indexes are not supported on compact value column of COMPACT STORAGE tables",
+                             "CREATE INDEX ON %s (c);");
+    }
+
+    @Test
+    public void testCreateIndexOnCompactTableWithoutClusteringColumns() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int) WITH COMPACT STORAGE;");
+
+        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
+                             "CREATE INDEX ON %s (a);");
+
+        createIndex("CREATE INDEX ON %s (b);");
+
+        execute("INSERT INTO %s (a, b) values (1, 1)");
+        execute("INSERT INTO %s (a, b) values (2, 4)");
+        execute("INSERT INTO %s (a, b) values (3, 6)");
+
+        assertRows(execute("SELECT * FROM %s WHERE b = ?", 4), row(2, 4));
+    }
+
+    /**
+     * DeleteTest
+     */
+
+    @Test
+    public void testDeleteWithNoClusteringColumns() throws Throwable
+    {
+        testDeleteWithNoClusteringColumns(false);
+        testDeleteWithNoClusteringColumns(true);
+    }
+
+    private void testDeleteWithNoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int PRIMARY KEY," +
+                    "value int)" + compactOption);
+
+        execute("INSERT INTO %s (partitionKey, value) VALUES (0, 0)");
+        execute("INSERT INTO %s (partitionKey, value) VALUES (1, 1)");
+        execute("INSERT INTO %s (partitionKey, value) VALUES (2, 2)");
+        execute("INSERT INTO %s (partitionKey, value) VALUES (3, 3)");
+        flush(forceFlush);
+
+        execute("DELETE value FROM %s WHERE partitionKey = ?", 0);
+        flush(forceFlush);
+
+        assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ?", 0));
+
+        execute("DELETE FROM %s WHERE partitionKey IN (?, ?)", 0, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s"),
+                   row(2, 2),
+                   row(3, 3));
+
+        // test invalid queries
+
+        // token function
+        assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
+                             "DELETE FROM %s WHERE token(partitionKey) = token(?)", 0);
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("partitionkey cannot be restricted by more than one relation if it includes an Equal",
+                             "DELETE FROM %s WHERE partitionKey = ? AND partitionKey = ?", 0, 1);
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name unknown",
+                             "DELETE unknown FROM %s WHERE partitionKey = ?", 0);
+
+        assertInvalidMessage("Undefined column name partitionkey1",
+                             "DELETE FROM %s WHERE partitionKey1 = ?", 0);
+
+        // Invalid operator in the where clause
+        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                             "DELETE FROM %s WHERE partitionKey > ? ", 0);
+
+        assertInvalidMessage("Cannot use DELETE with CONTAINS",
+                             "DELETE FROM %s WHERE partitionKey CONTAINS ?", 0);
+
+        // Non primary key in the where clause
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "DELETE FROM %s WHERE partitionKey = ? AND value = ?", 0, 1);
+    }
+
+
+    @Test
+    public void testDeleteWithOneClusteringColumns() throws Throwable
+    {
+        testDeleteWithOneClusteringColumns(false);
+        testDeleteWithOneClusteringColumns(true);
+    }
+
+    private void testDeleteWithOneClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        String compactOption = " WITH COMPACT STORAGE";
+
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering))" + compactOption);
+
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 2, 2)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 3, 3)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 4, 4)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 5, 5)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (1, 0, 6)");
+        flush(forceFlush);
+
+        execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
+        flush(forceFlush);
+
+        assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1));
+
+        execute("DELETE FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
+        flush(forceFlush);
+        assertEmpty(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1));
+
+        execute("DELETE FROM %s WHERE partitionKey IN (?, ?) AND clustering = ?", 0, 1, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                   row(0, 2, 2),
+                   row(0, 3, 3),
+                   row(0, 4, 4),
+                   row(0, 5, 5));
+
+        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) IN ((?), (?))", 0, 4, 5);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                   row(0, 2, 2),
+                   row(0, 3, 3));
+
+        // test invalid queries
+
+        // missing primary key element
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "DELETE FROM %s WHERE clustering = ?", 1);
+
+        // token function
+        assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
+                             "DELETE FROM %s WHERE token(partitionKey) = token(?) AND clustering = ? ", 0, 1);
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("clustering cannot be restricted by more than one relation if it includes an Equal",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering = ? AND clustering = ?", 0, 1, 1);
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name value1",
+                             "DELETE value1 FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
+
+        assertInvalidMessage("Undefined column name partitionkey1",
+                             "DELETE FROM %s WHERE partitionKey1 = ? AND clustering = ?", 0, 1);
+
+        assertInvalidMessage("Undefined column name clustering_3",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_3 = ?", 0, 1);
+
+        // Invalid operator in the where clause
+        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                             "DELETE FROM %s WHERE partitionKey > ? AND clustering = ?", 0, 1);
+
+        assertInvalidMessage("Cannot use DELETE with CONTAINS",
+                             "DELETE FROM %s WHERE partitionKey CONTAINS ? AND clustering = ?", 0, 1);
+
+        // Non primary key in the where clause
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering = ? AND value = ?", 0, 1, 3);
+    }
+
+    @Test
+    public void testDeleteWithTwoClusteringColumns() throws Throwable
+    {
+        testDeleteWithTwoClusteringColumns(false);
+        testDeleteWithTwoClusteringColumns(true);
+    }
+
+    private void testDeleteWithTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + compactOption);
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 2, 2)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 3, 3)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 1, 4)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 2, 5)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 6)");
+        flush(forceFlush);
+
+        execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+        flush(forceFlush);
+
+        assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                            0, 1, 1));
+
+        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) = (?, ?)", 0, 1, 1);
+        flush(forceFlush);
+        assertEmpty(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                            0, 1, 1));
+
+        execute("DELETE FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 0, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                   row(0, 0, 1, 1),
+                   row(0, 0, 2, 2),
+                   row(0, 0, 3, 3),
+                   row(0, 1, 2, 5));
+
+        Object[][] rows = new Object[][]{ row(0, 0, 1, 1), row(0, 1, 2, 5) };
+
+        execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)", 0, 0, 2, 3);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1), rows);
+
+        rows = new Object[][]{ row(0, 0, 1, 1) };
+
+        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))", 0, 0, 2, 1, 2);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1), rows);
+
+        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?)) AND clustering_2 = ?", 0, 0, 2, 3);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                   row(0, 0, 1, 1));
+
+        // test invalid queries
+
+        // missing primary key element
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "DELETE FROM %s WHERE clustering_1 = ? AND clustering_2 = ?", 1, 1);
+
+        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_2 = ?", 0, 1);
+
+        // token function
+        assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
+                             "DELETE FROM %s WHERE token(partitionKey) = token(?) AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND clustering_1 = ?", 0, 1, 1, 1);
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name value1",
+                             "DELETE value1 FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+        assertInvalidMessage("Undefined column name partitionkey1",
+                             "DELETE FROM %s WHERE partitionKey1 = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+        assertInvalidMessage("Undefined column name clustering_3",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_3 = ?", 0, 1, 1);
+
+        // Invalid operator in the where clause
+        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                             "DELETE FROM %s WHERE partitionKey > ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+        assertInvalidMessage("Cannot use DELETE with CONTAINS",
+                             "DELETE FROM %s WHERE partitionKey CONTAINS ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+        // Non primary key in the where clause
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND value = ?", 0, 1, 1, 3);
+    }
+
+    /**
+     * InsertTest
+     */
+
+    @Test
+    public void testInsertWithCompactFormat() throws Throwable
+    {
+        testInsertWithCompactFormat(false);
+        testInsertWithCompactFormat(true);
+    }
+
+    private void testInsertWithCompactFormat(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, 0),
+                   row(0, 1, 1));
+
+        // Invalid Null values for the clustering key or the regular column
+        assertInvalidMessage("Some clustering keys are missing: clustering",
+                             "INSERT INTO %s (partitionKey, value) VALUES (0, 0)");
+        assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table",
+                             "INSERT INTO %s (partitionKey, clustering) VALUES (0, 0)");
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering, value) VALUES (0, 1)");
+
+        // multiple time the same value
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering, value, value) VALUES (0, 0, 2, 2)");
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering, clustering, value) VALUES (0, 0, 0, 2)");
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name clusteringx",
+                             "INSERT INTO %s (partitionKey, clusteringx, value) VALUES (0, 0, 2)");
+
+        assertInvalidMessage("Undefined column name valuex",
+                             "INSERT INTO %s (partitionKey, clustering, valuex) VALUES (0, 0, 2)");
+    }
+
+    @Test
+    public void testInsertWithCompactStorageAndTwoClusteringColumns() throws Throwable
+    {
+        testInsertWithCompactStorageAndTwoClusteringColumns(false);
+        testInsertWithCompactStorageAndTwoClusteringColumns(true);
+    }
+
+    private void testInsertWithCompactStorageAndTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, null, 0),
+                   row(0, 0, 0, 0),
+                   row(0, 0, 1, 1));
+
+        // Invalid Null values for the clustering key or the regular column
+        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
+                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 0)");
+        assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2) VALUES (0, 0, 0)");
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering_1, clustering_2, value) VALUES (0, 0, 1)");
+        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
+                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 2)");
+
+        // multiple time the same value
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering_1, value, clustering_2, value) VALUES (0, 0, 2, 0, 2)");
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 2)");
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name clustering_1x",
+                             "INSERT INTO %s (partitionKey, clustering_1x, clustering_2, value) VALUES (0, 0, 0, 2)");
+
+        assertInvalidMessage("Undefined column name valuex",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2, valuex) VALUES (0, 0, 0, 2)");
+    }
+
+    /**
+     * InsertUpdateIfConditionTest
+     */
+    /**
+     * Test for CAS with compact storage table, and #6813 in particular,
+     * migrated from cql_tests.py:TestCQL.cas_and_compact_test()
+     */
+    @Test
+    public void testCompactStorage() throws Throwable
+    {
+        createTable("CREATE TABLE %s (partition text, key text, owner text, PRIMARY KEY (partition, key) ) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (partition, key, owner) VALUES ('a', 'b', null)");
+        assertRows(execute("UPDATE %s SET owner='z' WHERE partition='a' AND key='b' IF owner=null"), row(true));
+
+        assertRows(execute("UPDATE %s SET owner='b' WHERE partition='a' AND key='b' IF owner='a'"), row(false, "z"));
+        assertRows(execute("UPDATE %s SET owner='b' WHERE partition='a' AND key='b' IF owner='z'"), row(true));
+
+        assertRows(execute("INSERT INTO %s (partition, key, owner) VALUES ('a', 'c', 'x') IF NOT EXISTS"), row(true));
+    }
+
+    /**
+     * SelectGroupByTest
+     */
+
+    @Test
+    public void testGroupByWithoutPaging() throws Throwable
+    {
+
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))"
+                    + compactOption);
+
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)");
+
+        // Makes sure that we have some tombstones
+        execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12");
+        execute("DELETE FROM %s WHERE a = 3");
+
+        // Range queries
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a"),
+                   row(1, 2, 6, 4L, 24),
+                   row(2, 2, 6, 2L, 12),
+                   row(4, 8, 24, 1L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b"),
+                   row(1, 2, 6, 2L, 12),
+                   row(1, 4, 12, 2L, 24),
+                   row(2, 2, 6, 1L, 6),
+                   row(2, 4, 12, 1L, 12),
+                   row(4, 8, 24, 1L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING"),
+                   row(1, 2, 6, 2L, 12),
+                   row(2, 2, 6, 1L, 6));
+
+        assertEmpty(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b IN () GROUP BY a, b ALLOW FILTERING"));
+
+        // Range queries without aggregates
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 4, 2, 6),
+                   row(2, 2, 3, 3),
+                   row(2, 4, 3, 6),
+                   row(4, 8, 2, 12));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b"),
+                   row(1, 2, 1, 3),
+                   row(1, 4, 2, 6),
+                   row(2, 2, 3, 3),
+                   row(2, 4, 3, 6),
+                   row(4, 8, 2, 12));
+
+        // Range queries with wildcard
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(1, 4, 2, 6, 12),
+                   row(2, 2, 3, 3, 6),
+                   row(2, 4, 3, 6, 12),
+                   row(4, 8, 2, 12, 24));
+
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 4, 2, 6, 12),
+                   row(2, 2, 3, 3, 6),
+                   row(2, 4, 3, 6, 12),
+                   row(4, 8, 2, 12, 24));
+
+        // Range query with LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b LIMIT 2"),
+                   row(1, 2, 6, 2L, 12),
+                   row(1, 4, 12, 2L, 24));
+
+        // Range queries with PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1"),
+                   row(1, 2, 6, 2L, 12),
+                   row(2, 2, 6, 1L, 6),
+                   row(4, 8, 24, 1L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"),
+                   row(1, 2, 6, 4L, 24),
+                   row(2, 2, 6, 2L, 12),
+                   row(4, 8, 24, 1L, 24));
+
+        // Range query with PER PARTITION LIMIT and LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"),
+                   row(1, 2, 6, 2L, 12),
+                   row(2, 2, 6, 1L, 6));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"),
+                   row(1, 2, 6, 4L, 24),
+                   row(2, 2, 6, 2L, 12),
+                   row(4, 8, 24, 1L, 24));
+
+        // Range queries without aggregates and with LIMIT
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c LIMIT 3"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 4, 2, 6));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b LIMIT 3"),
+                   row(1, 2, 1, 3),
+                   row(1, 4, 2, 6),
+                   row(2, 2, 3, 3));
+
+        // Range queries with wildcard and with LIMIT
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c LIMIT 3"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(1, 4, 2, 6, 12));
+
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b LIMIT 3"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 4, 2, 6, 12),
+                   row(2, 2, 3, 3, 6));
+
+        // Range queries without aggregates and with PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(2, 2, 3, 3),
+                   row(2, 4, 3, 6),
+                   row(4, 8, 2, 12));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b PER PARTITION LIMIT 1"),
+                   row(1, 2, 1, 3),
+                   row(2, 2, 3, 3),
+                   row(4, 8, 2, 12));
+
+        // Range queries with wildcard and with PER PARTITION LIMIT
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(2, 2, 3, 3, 6),
+                   row(2, 4, 3, 6, 12),
+                   row(4, 8, 2, 12, 24));
+
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1"),
+                   row(1, 2, 1, 3, 6),
+                   row(2, 2, 3, 3, 6),
+                   row(4, 8, 2, 12, 24));
+
+        // Range queries without aggregates, with PER PARTITION LIMIT and LIMIT
+        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(2, 2, 3, 3));
+
+        // Range queries with wildcard, with PER PARTITION LIMIT and LIMIT
+        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(2, 2, 3, 3, 6));
+
+        // Range query with DISTINCT
+        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a"),
+                   row(1, 1L),
+                   row(2, 1L),
+                   row(4, 1L));
+
+        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
+                             "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b");
+
+        // Range query with DISTINCT and LIMIT
+        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a LIMIT 2"),
+                   row(1, 1L),
+                   row(2, 1L));
+
+        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
+                             "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b LIMIT 2");
+
+        // Range query with ORDER BY
+        assertInvalidMessage("ORDER BY is only supported when the partition key is restricted by an EQ or an IN",
+                             "SELECT a, b, c, count(b), max(e) FROM %s GROUP BY a, b ORDER BY b DESC, c DESC");
+
+        // Single partition queries
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(1, 4, 12, 2L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY b, c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(1, 4, 12, 2L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, b, c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12));
+
+        // Single partition queries without aggregates
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b"),
+                   row(1, 2, 1, 3),
+                   row(1, 4, 2, 6));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 4, 2, 6));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY b, c"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 4, 2, 6));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 and token(a) = token(1) GROUP BY b, c"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 4, 2, 6));
+
+        // Single partition queries with wildcard
+        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(1, 4, 2, 6, 12));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 4, 2, 6, 12));
+
+        // Single partition queries with DISTINCT
+        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a"),
+                   row(1, 1L));
+
+        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
+                             "SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a, b");
+
+        // Single partition queries with LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 10"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(1, 4, 12, 2L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12));
+
+        assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 1"),
+                   row(1L, 6));
+
+        // Single partition queries with PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 10"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(1, 4, 12, 2L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12));
+
+        assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 1"),
+                   row(1L, 6));
+
+        // Single partition queries without aggregates and with LIMIT
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2"),
+                   row(1, 2, 1, 3),
+                   row(1, 4, 2, 6));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"),
+                   row(1, 2, 1, 3));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6));
+
+        // Single partition queries with wildcard and with LIMIT
+        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"),
+                   row(1, 2, 1, 3, 6));
+
+        // Single partition queries without aggregates and with PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 2"),
+                   row(1, 2, 1, 3),
+                   row(1, 4, 2, 6));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"),
+                   row(1, 2, 1, 3));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6));
+
+        // Single partition queries with wildcard and with PER PARTITION LIMIT
+        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"),
+                   row(1, 2, 1, 3, 6));
+
+        // Single partition queries with ORDER BY
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC"),
+                   row(1, 4, 24, 2L, 24),
+                   row(1, 2, 12, 1L, 12),
+                   row(1, 2, 6, 1L, 6));
+
+        // Single partition queries with ORDER BY and PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC PER PARTITION LIMIT 1"),
+                   row(1, 4, 24, 2L, 24));
+
+        // Single partition queries with ORDER BY and LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 2"),
+                   row(1, 4, 24, 2L, 24),
+                   row(1, 2, 12, 1L, 12));
+
+        // Multi-partitions queries
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(1, 4, 12, 2L, 24),
+                   row(2, 2, 6, 1L, 6),
+                   row(2, 4, 12, 1L, 12),
+                   row(4, 8, 24, 1L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) AND b = 2 GROUP BY a, b, c"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(2, 2, 6, 1L, 6));
+
+        // Multi-partitions queries without aggregates
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"),
+                   row(1, 2, 1, 3),
+                   row(1, 4, 2, 6),
+                   row(2, 2, 3, 3),
+                   row(2, 4, 3, 6),
+                   row(4, 8, 2, 12));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"),
+                   row(1, 2, 1, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 4, 2, 6),
+                   row(2, 2, 3, 3),
+                   row(2, 4, 3, 6),
+                   row(4, 8, 2, 12));
+
+        // Multi-partitions with wildcard
+        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(1, 4, 2, 6, 12),
+                   row(2, 2, 3, 3, 6),
+                   row(2, 4, 3, 6, 12),
+                   row(4, 8, 2, 12, 24));
+
+        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 4, 2, 6, 12),
+                   row(2, 2, 3, 3, 6),
+                   row(2, 4, 3, 6, 12),
+                   row(4, 8, 2, 12, 24));
+
+        // Multi-partitions query with DISTINCT
+        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a"),
+                   row(1, 1L),
+                   row(2, 1L),
+                   row(4, 1L));
+
+        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
+                             "SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b");
+
+        // Multi-partitions query with DISTINCT and LIMIT
+        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a LIMIT 2"),
+                   row(1, 1L),
+                   row(2, 1L));
+
+        // Multi-partitions queries with PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 1"),
+                   row(1, 2, 6, 1L, 6),
+                   row(2, 2, 6, 1L, 6),
+                   row(4, 8, 24, 1L, 24));
+
+        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 6, 1L, 6),
+                   row(1, 2, 12, 1L, 12),
+                   row(2, 2, 6, 1L, 6),
+                   row(2, 4, 12, 1L, 12),
+                   row(4, 8, 24, 1L, 24));
+
+        // Multi-partitions with wildcard and PER PARTITION LIMIT
+        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"),
+                   row(1, 2, 1, 3, 6),
+                   row(1, 2, 2, 6, 12),
+                   row(2, 2, 3, 3, 6),
+                   row(2, 4, 3, 6, 12),
+                   row(4, 8, 2, 12, 24));
+
+        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 1"),
+                   row(1, 2, 1, 3, 6),
+                   row(2, 2, 3, 3, 6),
+                   row(4, 8, 2, 12, 24));
+
+        // Multi-partitions queries with ORDER BY
+        assertRows(execute("SELECT a, b, c, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC"),
+                   row(4, 8, 2, 1L, 24),
+                   row(2, 4, 3, 1L, 12),
+                   row(1, 4, 2, 2L, 24),
+                   row(2, 2, 3, 1L, 6),
+                   row(1, 2, 2, 2L, 12));
+
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC"),
+                   row(4, 8, 2, 12),
+                   row(2, 4, 3, 6),
+                   row(1, 4, 2, 12),
+                   row(2, 2, 3, 3),
+                   row(1, 2, 2, 6),
+                   row(1, 2, 1, 3));
+
+        // Multi-partitions queries with ORDER BY and LIMIT
+        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 3"),
+                   row(4, 8, 2, 12),
+                   row(2, 4, 3, 6),
+                   row(1, 4, 2, 12));
+
+        // Multi-partitions with wildcard, ORDER BY and LIMIT
+        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 3"),
+                   row(4, 8, 2, 12, 24),
+                   row(2, 4, 3, 6, 12),
+                   row(1, 4, 2, 12, 24));
+
+        // Invalid queries
+        assertInvalidMessage("Group by is currently only supported on the columns of the PRIMARY KEY, got e",
+                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, e");
+
+        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
+                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY c");
+
+        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
+                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, c, b");
+
+        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
+                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, a");
+
+        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
+                             "SELECT a, b, c, d FROM %s WHERE token(a) = token(1) GROUP BY b, c");
+
+        assertInvalidMessage("Undefined column name clustering1",
+                             "SELECT a, b as clustering1, max(c) FROM %s WHERE a = 1 GROUP BY a, clustering1");
+
+        assertInvalidMessage("Undefined column name z",
+                             "SELECT a, b, max(c) FROM %s WHERE a = 1 GROUP BY a, b, z");
+
+        // Test with composite partition key
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key ((a, b), c, d))" + compactOption);
+
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 1, 3, 6)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 2, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 3, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
+
+        assertInvalidMessage("Group by is not supported on only a part of the partition key",
+                             "SELECT a, b, max(d) FROM %s GROUP BY a");
+
+        assertRows(execute("SELECT a, b, max(d) FROM %s GROUP BY a, b"),
+                   row(1, 2, 12),
+                   row(1, 1, 12));
+
+        assertRows(execute("SELECT a, b, max(d) FROM %s WHERE a = 1 AND b = 1 GROUP BY b"),
+                   row(1, 1, 12));
+
+        // Test with table without clustering key
+        createTable("CREATE TABLE %s (a int primary key, b int, c int)" + compactOption);
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 6)");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 6, 12)");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 12, 24)");
+
+        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
+                             "SELECT a, max(c) FROM %s WHERE a = 1 GROUP BY a, a");
+    }
+
+    @Test
+    public void testGroupByWithoutPagingWithDeletions() throws Throwable
+    {
+
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))"
+                    + compactOption);
+
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 9, 18)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 3, 6)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 9, 18)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 12, 24)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 3, 6)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 6, 12)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 9, 18)");
+        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 12, 24)");
+
+        execute("DELETE FROM %s WHERE a = 1 AND b = 2 AND c = 1 AND d = 12");
+        execute("DELETE FROM %s WHERE a = 1 AND b = 2 AND c = 2 AND d = 9");
+
+        assertRows(execute("SELECT a, b, c, count(b), max(d) FROM %s GROUP BY a, b, c"),
+                   row(1, 2, 1, 3L, 9),
+                   row(1, 2, 2, 3L, 12),
+                   row(1, 2, 3, 4L, 12));
+    }
+
+    @Test
+    public void testGroupByWithRangeNamesQueryWithoutPaging() throws Throwable
+    {
+
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, primary key (a, b, c))"
+                    + compactOption);
+
+        for (int i = 1; i < 5; i++)
+            for (int j = 1; j < 5; j++)
+                for (int k = 1; k < 5; k++)
+                    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", i, j, k, i + j);
+
+        // Makes sure that we have some tombstones
+        execute("DELETE FROM %s WHERE a = 3");
+
+        // Range queries
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(1, 2, 3, 2L, 3),
+                   row(2, 1, 3, 2L, 3),
+                   row(2, 2, 4, 2L, 4),
+                   row(4, 1, 5, 2L, 5),
+                   row(4, 2, 6, 2L, 6));
+
+        // Range queries with LIMIT
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a LIMIT 5 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b LIMIT 3 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b LIMIT 3 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(1, 2, 3, 2L, 3),
+                   row(2, 1, 3, 2L, 3));
+
+        // Range queries with PER PARTITION LIMIT
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 2 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 1 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        // Range queries with PER PARTITION LIMIT and LIMIT
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 5 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3),
+                   row(4, 1, 5, 2L, 5));
+
+        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2 ALLOW FILTERING"),
+                   row(1, 1, 2, 2L, 2),
+                   row(2, 1, 3, 2L, 3));
+    }
+
+    /**
+     * SelectSingleColumn
+     */
+    @Test
+    public void testClusteringColumnRelationsWithCompactStorage() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a text, b int, c int, d int, primary key(a, b, c)) WITH COMPACT STORAGE;");
+        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "first", 1, 5, 1);
+        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "first", 2, 6, 2);
+        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "first", 3, 7, 3);
+        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "second", 4, 8, 4);
+
+        assertRows(execute("select * from %s where a in (?, ?)", "first", "second"),
+                   row("first", 1, 5, 1),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3),
+                   row("second", 4, 8, 4));
+
+        assertRows(execute("select * from %s where a = ? and b = ? and c in (?, ?)", "first", 2, 6, 7),
+                   row("first", 2, 6, 2));
+
+        assertRows(execute("select * from %s where a = ? and b in (?, ?) and c in (?, ?)", "first", 2, 3, 6, 7),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select * from %s where a = ? and b in (?, ?) and c in (?, ?)", "first", 3, 2, 7, 6),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select * from %s where a = ? and c in (?, ?) and b in (?, ?)", "first", 7, 6, 3, 2),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select c, d from %s where a = ? and c in (?, ?) and b in (?, ?)", "first", 7, 6, 3, 2),
+                   row(6, 2),
+                   row(7, 3));
+
+        assertRows(execute("select c, d from %s where a = ? and c in (?, ?) and b in (?, ?, ?)", "first", 7, 6, 3, 2, 3),
+                   row(6, 2),
+                   row(7, 3));
+
+        assertRows(execute("select * from %s where a = ? and b in (?, ?) and c = ?", "first", 3, 2, 7),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select * from %s where a = ? and b in ? and c in ?",
+                           "first", Arrays.asList(3, 2), Arrays.asList(7, 6)),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3));
+
+        assertInvalidMessage("Invalid null value for column b",
+                             "select * from %s where a = ? and b in ? and c in ?", "first", null, Arrays.asList(7, 6));
+
+        assertRows(execute("select * from %s where a = ? and c >= ? and b in (?, ?)", "first", 6, 3, 2),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select * from %s where a = ? and c > ? and b in (?, ?)", "first", 6, 3, 2),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select * from %s where a = ? and c <= ? and b in (?, ?)", "first", 6, 3, 2),
+                   row("first", 2, 6, 2));
+
+        assertRows(execute("select * from %s where a = ? and c < ? and b in (?, ?)", "first", 7, 3, 2),
+                   row("first", 2, 6, 2));
+
+        assertRows(execute("select * from %s where a = ? and c >= ? and c <= ? and b in (?, ?)", "first", 6, 7, 3, 2),
+                   row("first", 2, 6, 2),
+                   row("first", 3, 7, 3));
+
+        assertRows(execute("select * from %s where a = ? and c > ? and c <= ? and b in (?, ?)", "first", 6, 7, 3, 2),
+                   row("first", 3, 7, 3));
+
+        assertEmpty(execute("select * from %s where a = ? and c > ? and c < ? and b in (?, ?)", "first", 6, 7, 3, 2));
+
+        assertInvalidMessage("Column \"c\" cannot be restricted by both an equality and an inequality relation",
+                             "select * from %s where a = ? and c > ? and c = ? and b in (?, ?)", "first", 6, 7, 3, 2);
+
+        assertInvalidMessage("c cannot be restricted by more than one relation if it includes an Equal",
+                             "select * from %s where a = ? and c = ? and c > ?  and b in (?, ?)", "first", 6, 7, 3, 2);
+
+        assertRows(execute("select * from %s where a = ? and c in (?, ?) and b in (?, ?) order by b DESC",
+                           "first", 7, 6, 3, 2),
+                   row("first", 3, 7, 3),
+                   row("first", 2, 6, 2));
+
+        assertInvalidMessage("More than one restriction was found for the start bound on b",
+                             "select * from %s where a = ? and b > ? and b > ?", "first", 6, 3, 2);
+
+        assertInvalidMessage("More than one restriction was found for the end bound on b",
+                             "select * from %s where a = ? and b < ? and b <= ?", "first", 6, 3, 2);
+    }
+
+    /**
+     * SelectTest
+     */
+    /**
+     * Check query with KEY IN clause for wide row tables
+     * migrated from cql_tests.py:TestCQL.in_clause_wide_rows_test()
+     */
+    @Test
+    public void testSelectKeyInForWideRows() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY (k, c)) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 10; i++)
+            execute("INSERT INTO %s (k, c, v) VALUES (0, ?, ?)", i, i);
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c IN (5, 2, 8)"),
+                   row(2), row(5), row(8));
+
+        createTable("CREATE TABLE %s (k int, c1 int, c2 int, v int, PRIMARY KEY (k, c1, c2)) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 10; i++)
+            execute("INSERT INTO %s (k, c1, c2, v) VALUES (0, 0, ?, ?)", i, i);
+
+        assertEmpty(execute("SELECT v FROM %s WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3"));
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c1 = 0 AND c2 IN (5, 2, 8)"),
+                   row(2), row(5), row(8));
+    }
+
+    /**
+     * Check SELECT respects inclusive and exclusive bounds
+     * migrated from cql_tests.py:TestCQL.exclusive_slice_test()
+     */
+    @Test
+    public void testSelectBounds() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY (k, c)) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 10; i++)
+            execute("INSERT INTO %s (k, c, v) VALUES (0, ?, ?)", i, i);
+
+        assertRowCount(execute("SELECT v FROM %s WHERE k = 0"), 10);
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c >= 2 AND c <= 6"),
+                   row(2), row(3), row(4), row(5), row(6));
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c > 2 AND c <= 6"),
+                   row(3), row(4), row(5), row(6));
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c >= 2 AND c < 6"),
+                   row(2), row(3), row(4), row(5));
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c > 2 AND c < 6"),
+                   row(3), row(4), row(5));
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c > 2 AND c <= 6 LIMIT 2"),
+                   row(3), row(4));
+
+        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c >= 2 AND c < 6 ORDER BY c DESC LIMIT 2"),
+                   row(5), row(4));
+    }
+
+    /**
+     * Test for #4716 bug and more generally for good behavior of ordering,
+     * migrated from cql_tests.py:TestCQL.reversed_compact_test()
+     */
+    @Test
+    public void testReverseCompact() throws Throwable
+    {
+        createTable("CREATE TABLE %s ( k text, c int, v int, PRIMARY KEY (k, c) ) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC)");
+
+        for (int i = 0; i < 10; i++)
+            execute("INSERT INTO %s (k, c, v) VALUES ('foo', ?, ?)", i, i);
+
+        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo'"),
+                   row(5), row(4), row(3));
+
+        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo'"),
+                   row(6), row(5), row(4), row(3), row(2));
+
+        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"),
+                   row(3), row(4), row(5));
+
+        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"),
+                   row(2), row(3), row(4), row(5), row(6));
+
+        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"),
+                   row(5), row(4), row(3));
+
+        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"),
+                   row(6), row(5), row(4), row(3), row(2));
+
+        createTable("CREATE TABLE %s ( k text, c int, v int, PRIMARY KEY (k, c) ) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 10; i++)
+            execute("INSERT INTO %s(k, c, v) VALUES ('foo', ?, ?)", i, i);
+
+        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo'"),
+                   row(3), row(4), row(5));
+
+        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo'"),
+                   row(2), row(3), row(4), row(5), row(6));
+
+        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"),
+                   row(3), row(4), row(5));
+
+        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"),
+                   row(2), row(3), row(4), row(5), row(6));
+
+        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"),
+                   row(5), row(4), row(3));
+
+        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"),
+                   row(6), row(5), row(4), row(3), row(2));
+    }
+
+    /**
+     * Test for the bug from #4760 and #4759,
+     * migrated from cql_tests.py:TestCQL.reversed_compact_multikey_test()
+     */
+    @Test
+    public void testReversedCompactMultikey() throws Throwable
+    {
+        createTable("CREATE TABLE %s (key text, c1 int, c2 int, value text, PRIMARY KEY(key, c1, c2) ) WITH COMPACT STORAGE AND CLUSTERING ORDER BY(c1 DESC, c2 DESC)");
+
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                execute("INSERT INTO %s (key, c1, c2, value) VALUES ('foo', ?, ?, 'bar')", i, j);
+
+        // Equalities
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 = 1"),
+                   row(1, 2), row(1, 1), row(1, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 = 1 ORDER BY c1 ASC, c2 ASC"),
+                   row(1, 0), row(1, 1), row(1, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 = 1 ORDER BY c1 DESC, c2 DESC"),
+                   row(1, 2), row(1, 1), row(1, 0));
+
+        // GT
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 > 1"),
+                   row(2, 2), row(2, 1), row(2, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 > 1 ORDER BY c1 ASC, c2 ASC"),
+                   row(2, 0), row(2, 1), row(2, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 > 1 ORDER BY c1 DESC, c2 DESC"),
+                   row(2, 2), row(2, 1), row(2, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1"),
+                   row(2, 2), row(2, 1), row(2, 0), row(1, 2), row(1, 1), row(1, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC, c2 ASC"),
+                   row(1, 0), row(1, 1), row(1, 2), row(2, 0), row(2, 1), row(2, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC"),
+                   row(1, 0), row(1, 1), row(1, 2), row(2, 0), row(2, 1), row(2, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1 ORDER BY c1 DESC, c2 DESC"),
+                   row(2, 2), row(2, 1), row(2, 0), row(1, 2), row(1, 1), row(1, 0));
+
+        // LT
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 < 1"),
+                   row(0, 2), row(0, 1), row(0, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 < 1 ORDER BY c1 ASC, c2 ASC"),
+                   row(0, 0), row(0, 1), row(0, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 < 1 ORDER BY c1 DESC, c2 DESC"),
+                   row(0, 2), row(0, 1), row(0, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1"),
+                   row(1, 2), row(1, 1), row(1, 0), row(0, 2), row(0, 1), row(0, 0));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC, c2 ASC"),
+                   row(0, 0), row(0, 1), row(0, 2), row(1, 0), row(1, 1), row(1, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC"),
+                   row(0, 0), row(0, 1), row(0, 2), row(1, 0), row(1, 1), row(1, 2));
+
+        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1 ORDER BY c1 DESC, c2 DESC"),
+                   row(1, 2), row(1, 1), row(1, 0), row(0, 2), row(0, 1), row(0, 0));
+    }
+
+    /**
+     * Migrated from cql_tests.py:TestCQL.multi_in_compact_non_composite_test()
+     */
+    @Test
+    public void testMultiSelectsNonCompositeCompactStorage() throws Throwable
+    {
+        createTable("CREATE TABLE %s (key int, c int, v int, PRIMARY KEY (key, c)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (key, c, v) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (key, c, v) VALUES (0, 1, 1)");
+        execute("INSERT INTO %s (key, c, v) VALUES (0, 2, 2)");
+
+        assertRows(execute("SELECT * FROM %s WHERE key=0 AND c IN (0, 2)"),
+                   row(0, 0, 0), row(0, 2, 2));
+    }
+
+    @Test
+    public void testSelectDistinct() throws Throwable
+    {
+        //Test a 'compact storage' table.
+        createTable("CREATE TABLE %s (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 3; i++)
+            execute("INSERT INTO %s (pk0, pk1, val) VALUES (?, ?, ?)", i, i, i);
+
+        assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 1"),
+                   row(0, 0));
+
+        assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 3"),
+                   row(0, 0),
+                   row(2, 2),
+                   row(1, 1));
+
+        // Test a 'wide row' thrift table.
+        createTable("CREATE TABLE %s (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 3; i++)
+        {
+            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name0', 0)", i);
+            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name1', 1)", i);
+        }
+
+        assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 1"),
+                   row(1));
+
+        assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 3"),
+                   row(1),
+                   row(0),
+                   row(2));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageSplit2Test.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageSplit2Test.java
new file mode 100644
index 0000000..e399feb
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageSplit2Test.java
@@ -0,0 +1,2782 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3.validation.operations;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.RowUpdateBuilder;
+import org.apache.cassandra.db.SchemaCQLHelper;
+import org.apache.cassandra.db.marshal.AsciiType;
+import org.apache.cassandra.db.marshal.CounterColumnType;
+import org.apache.cassandra.db.marshal.IntegerType;
+import org.apache.cassandra.db.marshal.ReversedType;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
+import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.commons.lang3.StringUtils.isEmpty;
+import static org.junit.Assert.assertTrue;
+
+public class CompactStorageSplit2Test extends CQLTester
+{
+    @Test
+    public void testFilteringOnCompactTablesWithoutIndices() throws Throwable
+    {
+        //----------------------------------------------
+        // Test COMPACT table with clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 6)");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, 7)");
+
+        // Adds tomstones
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 1, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 2, 7)");
+        execute("DELETE FROM %s WHERE a = 1 AND b = 1");
+        execute("DELETE FROM %s WHERE a = 2 AND b = 2");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4 ALLOW FILTERING"),
+                       row(1, 4, 4));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7)");
+
+            assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING"),
+                       row(1, 3, 6),
+                       row(2, 3, 7));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > 4 ALLOW FILTERING"),
+                       row(1, 3, 6),
+                       row(2, 3, 7));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b < 3 AND c <= 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= 4 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= 3 AND c <= 6");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= 3 AND c <= 6 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(1, 3, 6),
+                       row(1, 4, 4));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, 6)");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, 7)");
+
+        // Adds tomstones
+        execute("INSERT INTO %s (a, b, c) VALUES (0, 1, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (5, 2, 7)");
+        execute("DELETE FROM %s WHERE a = 0");
+        execute("DELETE FROM %s WHERE a = 5");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = 4 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7)");
+
+            assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING"),
+                       row(2, 1, 6));
+
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > 4 ALLOW FILTERING"),
+                       row(2, 1, 6),
+                       row(4, 1, 7));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b < 3 AND c <= 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= 4 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(3, 2, 4));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= 3 AND c <= 6");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= 3 AND c <= 6 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(2, 1, 6),
+                       row(3, 2, 4));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+
+        // // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+    }
+
+    @Test
+    public void testFilteringOnCompactTablesWithoutIndicesAndWithLists() throws Throwable
+    {
+        //----------------------------------------------
+        // Test COMPACT table with clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, [6, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, [4, 1])");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, [7, 1])");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = [4, 1]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = [4, 1] ALLOW FILTERING"),
+                       row(1, 4, list(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > [4, 2] ALLOW FILTERING"),
+                       row(1, 3, list(6, 2)),
+                       row(2, 3, list(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b <= 3 AND c < [6, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE b <= 3 AND c < [6, 2] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= [4, 2] AND c <= [6, 4]");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= [4, 2] AND c <= [6, 4] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(1, 3, list(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(1, 3, list(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(1, 3, list(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<list<int>>) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, [6, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, [4, 1])");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, [7, 1])");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = [4, 2] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > [4, 2] ALLOW FILTERING"),
+                       row(2, 1, list(6, 2)),
+                       row(4, 1, list(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b < 3 AND c <= [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= [4, 2] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(3, 2, list(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= [4, 3] AND c <= [7]");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= [4, 3] AND c <= [7] ALLOW FILTERING"),
+                       row(2, 1, list(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(2, 1, list(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(2, 1, list(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
+                             unset());
+    }
+
+    @Test
+    public void testFilteringOnCompactTablesWithoutIndicesAndWithSets() throws Throwable
+    {
+        //----------------------------------------------
+        // Test COMPACT table with clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c frozen<set<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4, 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7, 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4, 1}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4, 1} ALLOW FILTERING"),
+                       row(1, 4, set(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > {4, 2} ALLOW FILTERING"),
+                       row(1, 3, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b <= 3 AND c < {6, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE b <= 3 AND c < {6, 2} ALLOW FILTERING"),
+                       row(1, 2, set(2, 4)),
+                       row(2, 3, set(1, 7)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= {4, 2} AND c <= {6, 4}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= {4, 2} AND c <= {6, 4} ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(1, 3, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(1, 3, set(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(1, 3, set(6, 2)));
+        });
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<set<int>>) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4, 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7, 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4, 2} ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > {4, 2} ALLOW FILTERING"),
+                       row(2, 1, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b < 3 AND c <= {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= {4, 2} ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(4, 1, set(1, 7)),
+                       row(3, 2, set(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= {4, 3} AND c <= {7}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= {5, 2} AND c <= {7} ALLOW FILTERING"),
+                       row(2, 1, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(2, 1, set(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(2, 1, set(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
+                             unset());
+    }
+
+    @Test
+    public void testAllowFilteringOnPartitionKeyWithDistinct() throws Throwable
+    {
+        // Test a 'compact storage' table.
+        createTable("CREATE TABLE %s (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 3; i++)
+            execute("INSERT INTO %s (pk0, pk1, val) VALUES (?, ?, ?)", i, i, i);
+
+        beforeAndAfterFlush(() -> {
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT DISTINCT pk0, pk1 FROM %s WHERE pk1 = 1 LIMIT 3");
+
+            assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s WHERE pk0 < 2 AND pk1 = 1 LIMIT 1 ALLOW FILTERING"),
+                       row(1, 1));
+
+            assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s WHERE pk1 > 1 LIMIT 3 ALLOW FILTERING"),
+                       row(2, 2));
+        });
+
+        // Test a 'wide row' thrift table.
+        createTable("CREATE TABLE %s (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE");
+
+        for (int i = 0; i < 3; i++)
+        {
+            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name0', 0)", i);
+            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name1', 1)", i);
+        }
+
+        beforeAndAfterFlush(() -> {
+            assertRows(execute("SELECT DISTINCT pk FROM %s WHERE pk > 1 LIMIT 1 ALLOW FILTERING"),
+                       row(2));
+
+            assertRows(execute("SELECT DISTINCT pk FROM %s WHERE pk > 0 LIMIT 3 ALLOW FILTERING"),
+                       row(1),
+                       row(2));
+        });
+    }
+
+    @Test
+    public void testAllowFilteringOnPartitionKeyWithCounters() throws Throwable
+    {
+        for (String compactStorageClause : new String[]{ "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (a int, b int, c int, cnt counter, PRIMARY KEY ((a, b), c))"
+                        + compactStorageClause);
+
+            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 14L, 11, 12, 13);
+            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 21, 22, 23);
+            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 27L, 21, 22, 26);
+            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 34L, 31, 32, 33);
+            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 41, 42, 43);
+
+            beforeAndAfterFlush(() -> {
+
+                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt = 24"),
+                           row(41, 42, 43, 24L),
+                           row(21, 22, 23, 24L));
+                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 22 AND cnt = 24"),
+                           row(41, 42, 43, 24L));
+                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND b < 25 AND cnt = 24"),
+                           row(21, 22, 23, 24L));
+                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND c < 25 AND cnt = 24"),
+                           row(21, 22, 23, 24L));
+
+                assertInvalidMessage(
+                "ORDER BY is only supported when the partition key is restricted by an EQ or an IN.",
+                "SELECT * FROM %s WHERE a = 21 AND b > 10 AND cnt > 23 ORDER BY c DESC ALLOW FILTERING");
+
+                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a = 21 AND b = 22 AND cnt > 23 ORDER BY c DESC"),
+                           row(21, 22, 26, 27L),
+                           row(21, 22, 23, 24L));
+
+                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt > 20 AND cnt < 30"),
+                           row(41, 42, 43, 24L),
+                           row(21, 22, 23, 24L),
+                           row(21, 22, 26, 27L));
+            });
+        }
+    }
+
+    @Test
+    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndicesAndWithLists() throws Throwable
+    {
+        // ----------------------------------------------
+        // Test COMPACT table with clustering columns
+        // ----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, [6, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, [4, 1])");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, [7, 1])");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = [4, 1]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b >= 4 AND c = [4, 1] ALLOW FILTERING"),
+                       row(1, 4, list(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 0 AND c > [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > [4, 2] ALLOW FILTERING"),
+                       row(1, 3, list(6, 2)),
+                       row(2, 3, list(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND b <= 3 AND c < [6, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a <= 1 AND b <= 3 AND c < [6, 2] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a <= 1 AND c >= [4, 2] AND c <= [6, 4]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND b <= 3 AND c >= [4, 2] AND c <= [6, 4] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(1, 3, list(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(1, 3, list(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE a > 1 AND c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(1, 3, list(6, 2)));
+        });
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a > 1 AND c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a > 1 AND c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS ? ALLOW FILTERING",
+                             unset());
+
+        // ----------------------------------------------
+        // Test COMPACT table without clustering columns
+        // ----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<list<int>>) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, [6, 2])");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, [4, 1])");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, [7, 1])");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = [4, 2] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND c > [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 3 AND c > [4, 2] ALLOW FILTERING"),
+                       row(4, 1, list(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a < 1 AND b < 3 AND c <= [4, 2]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND b < 3 AND c <= [4, 2] ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND c >= [4, 3] AND c <= [7]");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c >= [4, 3] AND c <= [7] ALLOW FILTERING"),
+                       row(2, 1, list(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 3 AND c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, list(4, 2)),
+                       row(2, 1, list(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE a >=1 AND c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(2, 1, list(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a > 1 AND c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a > 1 AND c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS ? ALLOW FILTERING",
+                             unset());
+    }
+
+
+    @Test
+    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndicesAndWithMaps() throws Throwable
+    {
+        //----------------------------------------------
+        // Test COMPACT table with clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c frozen<map<int, int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4 : 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7 : 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = {4 : 1}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a <= 1 AND b = 4 AND c = {4 : 1} ALLOW FILTERING"),
+                       row(1, 4, map(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND c > {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 1 AND c > {4 : 2} ALLOW FILTERING"),
+                       row(2, 3, map(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND b <= 3 AND c < {6 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b <= 3 AND c < {6 : 2} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 1 AND c >= {4 : 2} AND c <= {6 : 4}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c >= {4 : 2} AND c <= {6 : 4} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(1, 3, map(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a > 10 AND c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(1, 3, map(6, 2)));
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
+                       row(1, 3, map(6, 2)));
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
+                       row(1, 3, map(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<map<int, int>>) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4 : 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7 : 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4 : 2} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c > {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > {4 : 2} ALLOW FILTERING"),
+                       row(2, 1, map(6, 2)),
+                       row(4, 1, map(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b < 3 AND c <= {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b < 3 AND c <= {4 : 2} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(3, 2, map(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c >= {4 : 3} AND c <= {7 : 1}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c >= {5 : 2} AND c <= {7 : 0} ALLOW FILTERING"),
+                       row(2, 1, map(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(2, 1, map(6, 2)));
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c CONTAINS KEY 4 ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(3, 2, map(4, 1)));
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
+                       row(2, 1, map(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY ? ALLOW FILTERING",
+                             unset());
+    }
+
+    @Test
+    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndicesAndWithSets() throws Throwable
+    {
+        //----------------------------------------------
+        // Test COMPACT table with clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c frozen<set<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4, 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7, 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = {4, 1}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = {4, 1} ALLOW FILTERING"),
+                       row(1, 4, set(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c > {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > {4, 2} ALLOW FILTERING"),
+                       row(1, 3, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b <= 3 AND c < {6, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND b <= 3 AND c < {6, 2} ALLOW FILTERING"),
+                       row(1, 2, set(2, 4)),
+                       row(2, 3, set(1, 7)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c >= {4, 2} AND c <= {6, 4}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 0 AND c >= {4, 2} AND c <= {6, 4} ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(1, 3, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(1, 3, set(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(1, 3, set(6, 2)));
+        });
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<set<int>>) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6, 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4, 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7, 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4, 2} ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c > {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > {4, 2} ALLOW FILTERING"),
+                       row(2, 1, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND b < 3 AND c <= {4, 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a <= 4 AND b < 3 AND c <= {4, 2} ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(4, 1, set(1, 7)),
+                       row(3, 2, set(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c >= {4, 3} AND c <= {7}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND c >= {5, 2} AND c <= {7} ALLOW FILTERING"),
+                       row(2, 1, set(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 0 AND c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, set(4, 2)),
+                       row(2, 1, set(6, 2)));
+
+            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
+                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY 2 ALLOW FILTERING");
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
+                       row(2, 1, set(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
+                             unset());
+    }
+
+    @Test
+    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndices() throws Throwable
+    {
+        // ----------------------------------------------
+        // Test COMPACT table with clustering columns
+        // ----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b), c)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 2, 4, 5)");
+        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 3, 6, 7)");
+        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 4, 4, 5)");
+        execute("INSERT INTO %s (a, b, c, d) VALUES (2, 3, 7, 8)");
+
+        // Adds tomstones
+        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 1, 4, 5)");
+        execute("INSERT INTO %s (a, b, c, d) VALUES (2, 2, 7, 8)");
+        execute("DELETE FROM %s WHERE a = 1 AND b = 1 AND c = 4");
+        execute("DELETE FROM %s WHERE a = 2 AND b = 2 AND c = 7");
+
+        beforeAndAfterFlush(() -> {
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4"),
+                       row(1, 4, 4, 5));
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4 AND d = 5");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4 ALLOW FILTERING"),
+                       row(1, 4, 4, 5));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a IN (1, 2) AND b = 3 AND d IN (6, 7)");
+
+            assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2) AND b = 3 AND d IN (6, 7) ALLOW FILTERING"),
+                       row(1, 3, 6, 7));
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c > 4 AND c <= 6 ALLOW FILTERING"),
+                       row(1, 3, 6, 7));
+
+            assertRows(execute("SELECT * FROM %s WHERE a <= 1 AND b >= 2 AND c >= 4 AND d <= 8 ALLOW FILTERING"),
+                       row(1, 3, 6, 7),
+                       row(1, 4, 4, 5),
+                       row(1, 2, 4, 5));
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND c >= 4 AND d <= 8 ALLOW FILTERING"),
+                       row(1, 3, 6, 7),
+                       row(1, 4, 4, 5),
+                       row(1, 2, 4, 5));
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c >= 4 AND d <= 8 ALLOW FILTERING"),
+                       row(2, 3, 7, 8));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE d = null");
+        assertInvalidMessage("Unsupported null value for column a",
+                             "SELECT * FROM %s WHERE a = null ALLOW FILTERING");
+        assertInvalidMessage("Unsupported null value for column a",
+                             "SELECT * FROM %s WHERE a > null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column a",
+                             "SELECT * FROM %s WHERE a = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column a",
+                             "SELECT * FROM %s WHERE a > ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int primary key, b int, c int) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, 6)");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, 7)");
+
+        // Adds tomstones
+        execute("INSERT INTO %s (a, b, c) VALUES (0, 1, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES (5, 2, 7)");
+        execute("DELETE FROM %s WHERE a = 0");
+        execute("DELETE FROM %s WHERE a = 5");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = 4 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE b >= 2 AND c <= 4 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(3, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE b >= 2 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(3, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND b <=1 ALLOW FILTERING"),
+                       row(2, 1, 6),
+                       row(4, 1, 7));
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND c >= 4 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b IN (1, 2) AND c IN (6, 7)");
+
+            assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING"),
+                       row(2, 1, 6));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > 4");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > 4 ALLOW FILTERING"),
+                       row(2, 1, 6),
+                       row(4, 1, 7));
+
+            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b >= 2 AND c <= 4 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(3, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND c <= 4 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND b >= 2 AND c <= 4 ALLOW FILTERING"),
+                       row(1, 2, 4));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= 3 AND c <= 6");
+
+            assertRows(execute("SELECT * FROM %s WHERE c <=6 ALLOW FILTERING"),
+                       row(1, 2, 4),
+                       row(2, 1, 6),
+                       row(3, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE token(a) >= token(2)"),
+                       row(2, 1, 6),
+                       row(4, 1, 7),
+                       row(3, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE token(a) >= token(2) ALLOW FILTERING"),
+                       row(2, 1, 6),
+                       row(4, 1, 7),
+                       row(3, 2, 4));
+
+            assertRows(execute("SELECT * FROM %s WHERE token(a) >= token(2) AND b = 1 ALLOW FILTERING"),
+                       row(2, 1, 6),
+                       row(4, 1, 7));
+        });
+    }
+
+    @Test
+    public void testFilteringOnCompactTablesWithoutIndicesAndWithMaps() throws Throwable
+    {
+        //----------------------------------------------
+        // Test COMPACT table with clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int, b int, c frozen<map<int, int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4 : 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7 : 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4 : 1}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4 : 1} ALLOW FILTERING"),
+                       row(1, 4, map(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > {4 : 2} ALLOW FILTERING"),
+                       row(1, 3, map(6, 2)),
+                       row(2, 3, map(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b <= 3 AND c < {6 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE b <= 3 AND c < {6 : 2} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= {4 : 2} AND c <= {6 : 4}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= {4 : 2} AND c <= {6 : 4} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(1, 3, map(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(1, 3, map(6, 2)));
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS KEY 6 ALLOW FILTERING"),
+                       row(1, 3, map(6, 2)));
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
+                       row(1, 3, map(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS KEY null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS KEY ? ALLOW FILTERING",
+                             unset());
+
+        //----------------------------------------------
+        // Test COMPACT table without clustering columns
+        //----------------------------------------------
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<map<int, int>>) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6 : 2})");
+        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4 : 1})");
+        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7 : 1})");
+
+        beforeAndAfterFlush(() -> {
+
+            // Checks filtering
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4 : 2} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c > {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c > {4 : 2} ALLOW FILTERING"),
+                       row(2, 1, map(6, 2)),
+                       row(4, 1, map(7, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE b < 3 AND c <= {4 : 2}");
+
+            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= {4 : 2} ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(3, 2, map(4, 1)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c >= {4 : 3} AND c <= {7 : 1}");
+
+            assertRows(execute("SELECT * FROM %s WHERE c >= {5 : 2} AND c <= {7 : 0} ALLOW FILTERING"),
+                       row(2, 1, map(6, 2)));
+
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                 "SELECT * FROM %s WHERE c CONTAINS 2");
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(2, 1, map(6, 2)));
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS KEY 4 ALLOW FILTERING"),
+                       row(1, 2, map(4, 2)),
+                       row(3, 2, map(4, 1)));
+
+            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
+                       row(2, 1, map(6, 2)));
+        });
+
+        // Checks filtering with null
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c = null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c > null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT * FROM %s WHERE c CONTAINS KEY null");
+        assertInvalidMessage("Unsupported null value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS KEY null ALLOW FILTERING");
+
+        // Checks filtering with unset
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
+                             unset());
+        assertInvalidMessage("Unsupported unset value for column c",
+                             "SELECT * FROM %s WHERE c CONTAINS KEY ? ALLOW FILTERING",
+                             unset());
+    }
+
+    @Test
+    public void filteringOnCompactTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 11, 12, 13, 14);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 22, 23, 24);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 25, 26, 27);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 31, 32, 33, 34);
+
+        beforeAndAfterFlush(() -> {
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13"),
+                       row(21, 22, 23, 24),
+                       row(21, 25, 26, 27),
+                       row(31, 32, 33, 34));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13 AND c < 33"),
+                       row(21, 22, 23, 24),
+                       row(21, 25, 26, 27));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13 AND b < 32"),
+                       row(21, 22, 23, 24),
+                       row(21, 25, 26, 27));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a = 21 AND c > 13 AND b < 32 ORDER BY b DESC"),
+                       row(21, 25, 26, 27),
+                       row(21, 22, 23, 24));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a IN (21, 31) AND c > 13 ORDER BY b DESC"),
+                       row(31, 32, 33, 34),
+                       row(21, 25, 26, 27),
+                       row(21, 22, 23, 24));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13 AND d < 34"),
+                       row(21, 22, 23, 24),
+                       row(21, 25, 26, 27));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13"),
+                       row(21, 22, 23, 24),
+                       row(21, 25, 26, 27),
+                       row(31, 32, 33, 34));
+        });
+
+        // with frozen in clustering key
+        createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, d int, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 11, 12, list(1, 3), 14);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 22, list(2, 3), 24);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 25, list(2, 6), 27);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 31, 32, list(3, 3), 34);
+
+        beforeAndAfterFlush(() -> {
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c CONTAINS 2"),
+                       row(21, 22, list(2, 3), 24),
+                       row(21, 25, list(2, 6), 27));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c CONTAINS 2 AND b < 25"),
+                       row(21, 22, list(2, 3), 24));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 3"),
+                       row(21, 22, list(2, 3), 24));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 12 AND c CONTAINS 2 AND d < 27"),
+                       row(21, 22, list(2, 3), 24));
+        });
+
+        // with frozen in value
+        createTable("CREATE TABLE %s (a int, b int, c int, d frozen<list<int>>, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 11, 12, 13, list(1, 4));
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 22, 23, list(2, 4));
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 25, 25, list(2, 6));
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 31, 32, 34, list(3, 4));
+
+        beforeAndAfterFlush(() -> {
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE d CONTAINS 2"),
+                       row(21, 22, 23, list(2, 4)),
+                       row(21, 25, 25, list(2, 6)));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE d CONTAINS 2 AND b < 25"),
+                       row(21, 22, 23, list(2, 4)));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE d CONTAINS 2 AND d CONTAINS 4"),
+                       row(21, 22, 23, list(2, 4)));
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 12 AND c < 25 AND d CONTAINS 2"),
+                       row(21, 22, 23, list(2, 4)));
+        });
+    }
+
+    private UntypedResultSet executeFilteringOnly(String statement) throws Throwable
+    {
+        assertInvalid(statement);
+        return execute(statement + " ALLOW FILTERING");
+    }
+
+    @Test
+    public void testFilteringWithCounters() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, cnt counter, PRIMARY KEY (a, b, c))" + CompactStorageSplit1Test.compactOption);
+
+        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 14L, 11, 12, 13);
+        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 21, 22, 23);
+        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 27L, 21, 25, 26);
+        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 34L, 31, 32, 33);
+        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 41, 42, 43);
+
+        beforeAndAfterFlush(() -> {
+
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt = 24"),
+                       row(21, 22, 23, 24L),
+                       row(41, 42, 43, 24L));
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 22 AND cnt = 24"),
+                       row(41, 42, 43, 24L));
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND b < 25 AND cnt = 24"),
+                       row(21, 22, 23, 24L));
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND c < 25 AND cnt = 24"),
+                       row(21, 22, 23, 24L));
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a = 21 AND b > 10 AND cnt > 23 ORDER BY b DESC"),
+                       row(21, 25, 26, 27L),
+                       row(21, 22, 23, 24L));
+            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt > 20 AND cnt < 30"),
+                       row(21, 22, 23, 24L),
+                       row(21, 25, 26, 27L),
+                       row(41, 42, 43, 24L));
+        });
+    }
+
+    /**
+     * Check select with and without compact storage, with different column
+     * order. See CASSANDRA-10988
+     */
+    @Test
+    public void testClusteringOrderWithSlice() throws Throwable
+    {
+        final String compactOption = " WITH COMPACT STORAGE AND";
+
+        // non-compound, ASC order
+        createTable("CREATE TABLE %s (a text, b int, PRIMARY KEY (a, b)) " +
+                    compactOption +
+                    " CLUSTERING ORDER BY (b ASC)");
+
+        execute("INSERT INTO %s (a, b) VALUES ('a', 2)");
+        execute("INSERT INTO %s (a, b) VALUES ('a', 3)");
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
+                   row("a", 2),
+                   row("a", 3));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b DESC"),
+                   row("a", 3),
+                   row("a", 2));
+
+        // non-compound, DESC order
+        createTable("CREATE TABLE %s (a text, b int, PRIMARY KEY (a, b))" +
+                    compactOption +
+                    " CLUSTERING ORDER BY (b DESC)");
+
+        execute("INSERT INTO %s (a, b) VALUES ('a', 2)");
+        execute("INSERT INTO %s (a, b) VALUES ('a', 3)");
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
+                   row("a", 3),
+                   row("a", 2));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b ASC"),
+                   row("a", 2),
+                   row("a", 3));
+
+        // compound, first column DESC order
+        createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b, c)) " +
+                    compactOption +
+                    " CLUSTERING ORDER BY (b DESC)"
+        );
+
+        execute("INSERT INTO %s (a, b, c) VALUES ('a', 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES ('a', 3, 5)");
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
+                   row("a", 3, 5),
+                   row("a", 2, 4));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b ASC"),
+                   row("a", 2, 4),
+                   row("a", 3, 5));
+
+        // compound, mixed order
+        createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b, c)) " +
+                    compactOption +
+                    " CLUSTERING ORDER BY (b ASC, c DESC)"
+        );
+
+        execute("INSERT INTO %s (a, b, c) VALUES ('a', 2, 4)");
+        execute("INSERT INTO %s (a, b, c) VALUES ('a', 3, 5)");
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
+                   row("a", 2, 4),
+                   row("a", 3, 5));
+
+        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b ASC"),
+                   row("a", 2, 4),
+                   row("a", 3, 5));
+    }
+
+
+    @Test
+    public void testEmptyRestrictionValue() throws Throwable
+    {
+        for (String options : new String[]{ "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk blob, c blob, v blob, PRIMARY KEY ((pk), c))" + options);
+            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                    bytes("foo123"), bytes("1"), bytes("1"));
+            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                    bytes("foo123"), bytes("2"), bytes("2"));
+
+            beforeAndAfterFlush(() -> {
+
+                assertInvalidMessage("Key may not be empty", "SELECT * FROM %s WHERE pk = textAsBlob('');");
+                assertInvalidMessage("Key may not be empty", "SELECT * FROM %s WHERE pk IN (textAsBlob(''), textAsBlob('1'));");
+
+                assertInvalidMessage("Key may not be empty",
+                                     "INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                                     EMPTY_BYTE_BUFFER, bytes("2"), bytes("2"));
+
+                // Test clustering columns restrictions
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c = textAsBlob('');"));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) = (textAsBlob(''));"));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c IN (textAsBlob(''), textAsBlob('1'));"),
+                           row(bytes("foo123"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) IN ((textAsBlob('')), (textAsBlob('1')));"),
+                           row(bytes("foo123"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('');"),
+                           row(bytes("foo123"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) > (textAsBlob(''));"),
+                           row(bytes("foo123"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('');"),
+                           row(bytes("foo123"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) >= (textAsBlob(''));"),
+                           row(bytes("foo123"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c <= textAsBlob('');"));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) <= (textAsBlob(''));"));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c < textAsBlob('');"));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) < (textAsBlob(''));"));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('') AND c < textAsBlob('');"));
+            });
+
+            if (options.contains("COMPACT"))
+            {
+                assertInvalidMessage("Invalid empty or null value for column c",
+                                     "INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                                     bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4"));
+            }
+            else
+            {
+                execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                        bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4"));
+
+                beforeAndAfterFlush(() -> {
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c = textAsBlob('');"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) = (textAsBlob(''));"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c IN (textAsBlob(''), textAsBlob('1'));"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
+                               row(bytes("foo123"), bytes("1"), bytes("1")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) IN ((textAsBlob('')), (textAsBlob('1')));"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
+                               row(bytes("foo123"), bytes("1"), bytes("1")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('');"),
+                               row(bytes("foo123"), bytes("1"), bytes("1")),
+                               row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) > (textAsBlob(''));"),
+                               row(bytes("foo123"), bytes("1"), bytes("1")),
+                               row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('');"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
+                               row(bytes("foo123"), bytes("1"), bytes("1")),
+                               row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) >= (textAsBlob(''));"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
+                               row(bytes("foo123"), bytes("1"), bytes("1")),
+                               row(bytes("foo123"), bytes("2"), bytes("2")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c <= textAsBlob('');"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
+
+                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) <= (textAsBlob(''));"),
+                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
+
+                    assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c < textAsBlob('');"));
+
+                    assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) < (textAsBlob(''));"));
+
+                    assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('') AND c < textAsBlob('');"));
+                });
+            }
+
+            // Test restrictions on non-primary key value
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND v = textAsBlob('') ALLOW FILTERING;"));
+
+            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                    bytes("foo123"), bytes("3"), EMPTY_BYTE_BUFFER);
+
+            beforeAndAfterFlush(() -> {
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND v = textAsBlob('') ALLOW FILTERING;"),
+                           row(bytes("foo123"), bytes("3"), EMPTY_BYTE_BUFFER));
+            });
+        }
+    }
+
+    @Test
+    public void testEmptyRestrictionValueWithMultipleClusteringColumns() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk blob, c1 blob, c2 blob, v blob, PRIMARY KEY (pk, c1, c2))" + CompactStorageSplit1Test.compactOption);
+        execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("1"), bytes("1"));
+        execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("2"), bytes("2"));
+
+        beforeAndAfterFlush(() -> {
+
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('');"));
+
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 = textAsBlob('');"));
+
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) = (textAsBlob('1'), textAsBlob(''));"));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 IN (textAsBlob(''), textAsBlob('1')) AND c2 = textAsBlob('1');"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 IN (textAsBlob(''), textAsBlob('1'));"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) IN ((textAsBlob(''), textAsBlob('1')), (textAsBlob('1'), textAsBlob('1')));"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 > textAsBlob('');"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 > textAsBlob('');"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'));"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 >= textAsBlob('');"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
+
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 <= textAsBlob('');"));
+
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) <= (textAsBlob('1'), textAsBlob(''));"));
+        });
+
+        execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)",
+                bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4"));
+
+        beforeAndAfterFlush(() -> {
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('');"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('') AND c2 = textAsBlob('1');"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) = (textAsBlob(''), textAsBlob('1'));"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 IN (textAsBlob(''), textAsBlob('1')) AND c2 = textAsBlob('1');"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) IN ((textAsBlob(''), textAsBlob('1')), (textAsBlob('1'), textAsBlob('1')));"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'));"),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) >= (textAsBlob(''), textAsBlob('1'));"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")),
+                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
+
+            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) <= (textAsBlob(''), textAsBlob('1'));"),
+                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+
+            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) < (textAsBlob(''), textAsBlob('1'));"));
+        });
+    }
+
+    @Test
+    public void testEmptyRestrictionValueWithOrderBy() throws Throwable
+    {
+        for (String options : new String[]{ " WITH COMPACT STORAGE",
+                                            " WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC)" })
+        {
+            String orderingClause = options.contains("ORDER") ? "" : "ORDER BY c DESC";
+
+            createTable("CREATE TABLE %s (pk blob, c blob, v blob, PRIMARY KEY ((pk), c))" + options);
+            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                    bytes("foo123"),
+                    bytes("1"),
+                    bytes("1"));
+            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                    bytes("foo123"),
+                    bytes("2"),
+                    bytes("2"));
+
+            beforeAndAfterFlush(() -> {
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('')" + orderingClause),
+                           row(bytes("foo123"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('')" + orderingClause),
+                           row(bytes("foo123"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1")));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c < textAsBlob('')" + orderingClause));
+
+                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c <= textAsBlob('')" + orderingClause));
+            });
+
+            assertInvalidMessage("Invalid empty or null value for column c",
+                                 "INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
+                                 bytes("foo123"),
+                                 EMPTY_BYTE_BUFFER,
+                                 bytes("4"));
+        }
+    }
+
+    @Test
+    public void testEmptyRestrictionValueWithMultipleClusteringColumnsAndOrderBy() throws Throwable
+    {
+        for (String options : new String[]{ " WITH COMPACT STORAGE",
+                                            " WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c1 DESC, c2 DESC)" })
+        {
+            String orderingClause = options.contains("ORDER") ? "" : "ORDER BY c1 DESC, c2 DESC";
+
+            createTable("CREATE TABLE %s (pk blob, c1 blob, c2 blob, v blob, PRIMARY KEY (pk, c1, c2))" + options);
+            execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("1"), bytes("1"));
+            execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("2"), bytes("2"));
+
+            beforeAndAfterFlush(() -> {
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 > textAsBlob('')" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 > textAsBlob('')" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'))" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 >= textAsBlob('')" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+            });
+
+            execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)",
+                    bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4"));
+
+            beforeAndAfterFlush(() -> {
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 IN (textAsBlob(''), textAsBlob('1')) AND c2 = textAsBlob('1')" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) IN ((textAsBlob(''), textAsBlob('1')), (textAsBlob('1'), textAsBlob('1')))" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'))" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
+
+                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) >= (textAsBlob(''), textAsBlob('1'))" + orderingClause),
+                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
+                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
+                           row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
+            });
+        }
+    }
+
+    /**
+     * UpdateTest
+     */
+    @Test
+    public void testUpdate() throws Throwable
+    {
+        testUpdate(false);
+        testUpdate(true);
+    }
+
+    private void testUpdate(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1))" + CompactStorageSplit1Test.compactOption);
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 2, 2)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 3, 3)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (1, 0, 4)");
+
+        flush(forceFlush);
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ?",
+                           0, 1),
+                   row(7));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1) = (?)", 8, 0, 2);
+        flush(forceFlush);
+        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ?",
+                           0, 2),
+                   row(8));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey IN (?, ?) AND clustering_1 = ?", 9, 0, 1, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ?",
+                           0, 1, 0),
+                   row(0, 0, 9),
+                   row(1, 0, 9));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey IN ? AND clustering_1 = ?", 19, Arrays.asList(0, 1), 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN ? AND clustering_1 = ?",
+                           Arrays.asList(0, 1), 0),
+                   row(0, 0, 19),
+                   row(1, 0, 19));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 IN (?, ?)", 10, 0, 1, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?)",
+                           0, 1, 0),
+                   row(0, 0, 10),
+                   row(0, 1, 10));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))", 20, 0, 0, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
+                           0, 0, 1),
+                   row(0, 0, 20),
+                   row(0, 1, 20));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", null, 0, 0);
+        flush(forceFlush);
+
+        if (isEmpty(CompactStorageSplit1Test.compactOption))
+        {
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
+                               0, 0, 1),
+                       row(0, 0, null),
+                       row(0, 1, 20));
+        }
+        else
+        {
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
+                               0, 0, 1),
+                       row(0, 1, 20));
+        }
+
+        // test invalid queries
+
+        // missing primary key element
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "UPDATE %s SET value = ? WHERE clustering_1 = ? ", 7, 1);
+
+        assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
+
+        assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
+
+        // token function
+        assertInvalidMessage("The token function cannot be used in WHERE clauses for UPDATE statements",
+                             "UPDATE %s SET value = ? WHERE token(partitionKey) = token(?) AND clustering_1 = ?",
+                             7, 0, 1);
+
+        // multiple time the same value
+        assertInvalidSyntax("UPDATE %s SET value = ?, value = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_1 = ?", 7, 0, 1, 1);
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name value1",
+                             "UPDATE %s SET value1 = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
+
+        assertInvalidMessage("Undefined column name partitionkey1",
+                             "UPDATE %s SET value = ? WHERE partitionKey1 = ? AND clustering_1 = ?", 7, 0, 1);
+
+        assertInvalidMessage("Undefined column name clustering_3",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_3 = ?", 7, 0, 1);
+
+        // Invalid operator in the where clause
+        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                             "UPDATE %s SET value = ? WHERE partitionKey > ? AND clustering_1 = ?", 7, 0, 1);
+
+        assertInvalidMessage("Cannot use UPDATE with CONTAINS",
+                             "UPDATE %s SET value = ? WHERE partitionKey CONTAINS ? AND clustering_1 = ?", 7, 0, 1);
+
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND value = ?", 7, 0, 1, 3);
+
+        assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 > ?", 7, 0, 1);
+    }
+
+    @Test
+    public void testUpdateWithTwoClusteringColumns() throws Throwable
+    {
+        testUpdateWithTwoClusteringColumns(false);
+        testUpdateWithTwoClusteringColumns(true);
+    }
+
+    private void testUpdateWithTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + CompactStorageSplit1Test.compactOption);
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 2, 2)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 3, 3)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 1, 4)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 2, 5)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 6)");
+        flush(forceFlush);
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                           0, 1, 1),
+                   row(7));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) = (?, ?)", 8, 0, 1, 2);
+        flush(forceFlush);
+        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                           0, 1, 2),
+                   row(8));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 9, 0, 1, 0, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?",
+                           0, 1, 0, 0),
+                   row(0, 0, 0, 9),
+                   row(1, 0, 0, 9));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey IN ? AND clustering_1 = ? AND clustering_2 = ?", 9, Arrays.asList(0, 1), 0, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN ? AND clustering_1 = ? AND clustering_2 = ?",
+                           Arrays.asList(0, 1), 0, 0),
+                   row(0, 0, 0, 9),
+                   row(1, 0, 0, 9));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)", 12, 0, 1, 1, 2);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)",
+                           0, 1, 1, 2),
+                   row(0, 1, 1, 12),
+                   row(0, 1, 2, 12));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND clustering_2 IN (?, ?)", 10, 0, 1, 0, 1, 2);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND clustering_2 IN (?, ?)",
+                           0, 1, 0, 1, 2),
+                   row(0, 0, 1, 10),
+                   row(0, 0, 2, 10),
+                   row(0, 1, 1, 10),
+                   row(0, 1, 2, 10));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))", 20, 0, 0, 2, 1, 2);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
+                           0, 0, 2, 1, 2),
+                   row(0, 0, 2, 20),
+                   row(0, 1, 2, 20));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", null, 0, 0, 2);
+        flush(forceFlush);
+
+
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
+                           0, 0, 2, 1, 2),
+                   row(0, 1, 2, 20));
+
+
+        // test invalid queries
+
+        // missing primary key element
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "UPDATE %s SET value = ? WHERE clustering_1 = ? AND clustering_2 = ?", 7, 1, 1);
+
+        String errorMsg = "PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted";
+
+        assertInvalidMessage(errorMsg,
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_2 = ?", 7, 0, 1);
+
+        assertInvalidMessage("Some clustering keys are missing: clustering_1, clustering_2",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
+
+        // token function
+        assertInvalidMessage("The token function cannot be used in WHERE clauses for UPDATE statements",
+                             "UPDATE %s SET value = ? WHERE token(partitionKey) = token(?) AND clustering_1 = ? AND clustering_2 = ?",
+                             7, 0, 1, 1);
+
+        // multiple time the same value
+        assertInvalidSyntax("UPDATE %s SET value = ?, value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND clustering_1 = ?", 7, 0, 1, 1, 1);
+
+        // Undefined column names
+        assertInvalidMessage("Undefined column name value1",
+                             "UPDATE %s SET value1 = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+        assertInvalidMessage("Undefined column name partitionkey1",
+                             "UPDATE %s SET value = ? WHERE partitionKey1 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+        assertInvalidMessage("Undefined column name clustering_3",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_3 = ?", 7, 0, 1, 1);
+
+        // Invalid operator in the where clause
+        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                             "UPDATE %s SET value = ? WHERE partitionKey > ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+        assertInvalidMessage("Cannot use UPDATE with CONTAINS",
+                             "UPDATE %s SET value = ? WHERE partitionKey CONTAINS ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND value = ?", 7, 0, 1, 1, 3);
+
+        assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 > ?", 7, 0, 1);
+
+        assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
+                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) > (?, ?)", 7, 0, 1, 1);
+    }
+
+    @Test
+    public void testUpdateWithMultiplePartitionKeyComponents() throws Throwable
+    {
+        testUpdateWithMultiplePartitionKeyComponents(false);
+        testUpdateWithMultiplePartitionKeyComponents(true);
+    }
+
+    public void testUpdateWithMultiplePartitionKeyComponents(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey_1 int," +
+                    "partitionKey_2 int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY ((partitionKey_1, partitionKey_2), clustering_1, clustering_2))" + CompactStorageSplit1Test.compactOption);
+
+        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 1, 0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 1, 1, 1, 2)");
+        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (1, 0, 0, 1, 3)");
+        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (1, 1, 0, 1, 3)");
+        flush(forceFlush);
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey_1 = ? AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 0, 0, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT value FROM %s WHERE partitionKey_1 = ? AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?",
+                           0, 0, 0, 0),
+                   row(7));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?", 9, 0, 1, 1, 0, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?",
+                           0, 1, 1, 0, 1),
+                   row(0, 1, 0, 1, 9),
+                   row(1, 1, 0, 1, 9));
+
+        execute("UPDATE %s SET value = ? WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 10, 0, 1, 0, 1, 0, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, 0, 0, 7),
+                   row(0, 0, 0, 1, 10),
+                   row(0, 1, 0, 1, 10),
+                   row(0, 1, 1, 1, 2),
+                   row(1, 0, 0, 1, 10),
+                   row(1, 1, 0, 1, 10));
+
+        // missing primary key element
+        assertInvalidMessage("Some partition key parts are missing: partitionkey_2",
+                             "UPDATE %s SET value = ? WHERE partitionKey_1 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 1, 1);
+    }
+
+    @Test
+    public void testCfmCompactStorageCQL()
+    {
+        String keyspace = "cql_test_keyspace_compact";
+        String table = "test_table_compact";
+
+        TableMetadata.Builder metadata =
+        TableMetadata.builder(keyspace, table)
+                     .flags(EnumSet.of(TableMetadata.Flag.DENSE))
+                     .addPartitionKeyColumn("pk1", IntegerType.instance)
+                     .addPartitionKeyColumn("pk2", AsciiType.instance)
+                     .addClusteringColumn("ck1", ReversedType.getInstance(IntegerType.instance))
+                     .addClusteringColumn("ck2", IntegerType.instance)
+                     .addRegularColumn("reg", IntegerType.instance);
+
+        SchemaLoader.createKeyspace(keyspace, KeyspaceParams.simple(1), metadata);
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
+
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
+        String expected = "CREATE TABLE IF NOT EXISTS cql_test_keyspace_compact.test_table_compact (\n" +
+                          "    pk1 varint,\n" +
+                          "    pk2 ascii,\n" +
+                          "    ck1 varint,\n" +
+                          "    ck2 varint,\n" +
+                          "    reg varint,\n" +
+                          "    PRIMARY KEY ((pk1, pk2), ck1, ck2)\n" +
+                          ") WITH COMPACT STORAGE\n" +
+                          "    AND ID = " + cfs.metadata.id + "\n" +
+                          "    AND CLUSTERING ORDER BY (ck1 DESC, ck2 ASC)";
+        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
+                   actual.contains(expected));
+    }
+
+    @Test
+    public void testCfmCounterCQL()
+    {
+        String keyspace = "cql_test_keyspace_counter";
+        String table = "test_table_counter";
+
+        TableMetadata.Builder metadata;
+        metadata = TableMetadata.builder(keyspace, table)
+                                .flags(EnumSet.of(TableMetadata.Flag.DENSE,
+                                       TableMetadata.Flag.COUNTER))
+                                .isCounter(true)
+                                .addPartitionKeyColumn("pk1", IntegerType.instance)
+                                .addPartitionKeyColumn("pk2", AsciiType.instance)
+                                .addClusteringColumn("ck1", ReversedType.getInstance(IntegerType.instance))
+                                .addClusteringColumn("ck2", IntegerType.instance)
+                                .addRegularColumn("cnt", CounterColumnType.instance);
+
+        SchemaLoader.createKeyspace(keyspace, KeyspaceParams.simple(1), metadata);
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
+
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
+        String expected = "CREATE TABLE IF NOT EXISTS cql_test_keyspace_counter.test_table_counter (\n" +
+                          "    pk1 varint,\n" +
+                          "    pk2 ascii,\n" +
+                          "    ck1 varint,\n" +
+                          "    ck2 varint,\n" +
+                          "    cnt counter,\n" +
+                          "    PRIMARY KEY ((pk1, pk2), ck1, ck2)\n" +
+                          ") WITH COMPACT STORAGE\n" +
+                          "    AND ID = " + cfs.metadata.id + "\n" +
+                          "    AND CLUSTERING ORDER BY (ck1 DESC, ck2 ASC)";
+        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
+                   actual.contains(expected));
+    }
+
+    @Test
+    public void testDenseTable() throws Throwable
+    {
+        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
+                                       "pk1 varint PRIMARY KEY," +
+                                       "reg1 int)" +
+                                       " WITH COMPACT STORAGE");
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
+
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
+        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
+                        "    pk1 varint,\n" +
+                        "    reg1 int,\n" +
+                        "    PRIMARY KEY (pk1)\n" +
+                        ") WITH COMPACT STORAGE\n" +
+                        "    AND ID = " + cfs.metadata.id + "\n";
+
+        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
+                   actual.contains(expected));
+    }
+
+    @Test
+    public void testStaticCompactTable()
+    {
+        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
+                                       "pk1 varint PRIMARY KEY," +
+                                       "reg1 int," +
+                                       "reg2 int)" +
+                                       " WITH COMPACT STORAGE");
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
+        assertTrue(SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata()).contains(
+        "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
+        "    pk1 varint,\n" +
+        "    reg1 int,\n" +
+        "    reg2 int,\n" +
+        "    PRIMARY KEY (pk1)\n" +
+        ") WITH COMPACT STORAGE\n" +
+        "    AND ID = " + cfs.metadata.id));
+    }
+
+    @Test
+    public void testStaticCompactWithCounters()
+    {
+        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
+                                       "pk1 varint PRIMARY KEY," +
+                                       "reg1 counter," +
+                                       "reg2 counter)" +
+                                       " WITH COMPACT STORAGE");
+
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
+
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
+        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
+                          "    pk1 varint,\n" +
+                          "    reg1 counter,\n" +
+                          "    reg2 counter,\n" +
+                          "    PRIMARY KEY (pk1)\n" +
+                          ") WITH COMPACT STORAGE\n" +
+                          "    AND ID = " + cfs.metadata.id + "\n";
+        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
+                   actual.contains(expected));
+    }
+
+    @Test
+    public void testDenseCompactTableWithoutRegulars() throws Throwable
+    {
+        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
+                                       "pk1 varint," +
+                                       "ck1 int," +
+                                       "PRIMARY KEY (pk1, ck1))" +
+                                       " WITH COMPACT STORAGE");
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
+
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
+        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
+                          "    pk1 varint,\n" +
+                          "    ck1 int,\n" +
+                          "    PRIMARY KEY (pk1, ck1)\n" +
+                          ") WITH COMPACT STORAGE\n" +
+                          "    AND ID = " + cfs.metadata.id;
+        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
+                   actual.contains(expected));
+    }
+
+    @Test
+    public void testCompactDynamic() throws Throwable
+    {
+        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
+                                       "pk1 varint," +
+                                       "ck1 int," +
+                                       "reg int," +
+                                       "PRIMARY KEY (pk1, ck1))" +
+                                       " WITH COMPACT STORAGE");
+
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
+
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
+        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
+                          "    pk1 varint,\n" +
+                          "    ck1 int,\n" +
+                          "    reg int,\n" +
+                          "    PRIMARY KEY (pk1, ck1)\n" +
+                          ") WITH COMPACT STORAGE\n" +
+                          "    AND ID = " + cfs.metadata.id;
+
+        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
+                   actual.contains(expected));
+    }
+
+    /**
+     * PartitionUpdateTest
+     */
+
+    @Test
+    public void testOperationCountWithCompactTable()
+    {
+        createTable("CREATE TABLE %s (key text PRIMARY KEY, a int) WITH COMPACT STORAGE");
+        TableMetadata cfm = currentTableMetadata();
+
+        PartitionUpdate update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").add("a", 1)
+                                                                                                 .buildUpdate();
+        Assert.assertEquals(1, update.operationCount());
+
+        update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").buildUpdate();
+        Assert.assertEquals(0, update.operationCount());
+    }
+
+    /**
+     * AlterTest
+     */
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testAlterWithCompactStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "ALTER TABLE %s RENAME column1 TO column2");
+    }
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testAlterWithCompactNonStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "ALTER TABLE %s RENAME column1 TO column2");
+
+        createTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "ALTER TABLE %s RENAME column1 TO column2");
+    }
+
+    /**
+     * CreateTest
+     */
+
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testCreateIndextWithCompactStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "CREATE INDEX column1_index on %s (column1)");
+        assertInvalidMessage("Undefined column name value in table",
+                             "CREATE INDEX value_index on %s (value)");
+    }
+
+    /**
+     * DeleteTest
+     */
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testDeleteWithCompactStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+        testDeleteWithCompactFormat();
+
+        // if column1 is present, hidden column is called column2
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
+        assertInvalidMessage("Undefined column name column2 in table",
+                             "DELETE FROM %s WHERE a = 1 AND column2= 1");
+        assertInvalidMessage("Undefined column name column2 in table",
+                             "DELETE FROM %s WHERE a = 1 AND column2 = 1 AND value1 = 1");
+        assertInvalidMessage("Undefined column name column2",
+                             "DELETE column2 FROM %s WHERE a = 1");
+
+        // if value is present, hidden column is called value1
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
+        assertInvalidMessage("Undefined column name value1 in table",
+                             "DELETE FROM %s WHERE a = 1 AND value1 = 1");
+        assertInvalidMessage("Undefined column name value1 in table",
+                             "DELETE FROM %s WHERE a = 1 AND value1 = 1 AND column1 = 1");
+        assertInvalidMessage("Undefined column name value1",
+                             "DELETE value1 FROM %s WHERE a = 1");
+    }
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testDeleteWithCompactNonStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b) VALUES (1, 1)");
+        execute("INSERT INTO %s (a, b) VALUES (2, 1)");
+        assertRows(execute("SELECT a, b FROM %s"),
+                   row(1, 1),
+                   row(2, 1));
+        testDeleteWithCompactFormat();
+
+        createTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, v) VALUES (1, 1, 3)");
+        execute("INSERT INTO %s (a, b, v) VALUES (2, 1, 4)");
+        assertRows(execute("SELECT a, b, v FROM %s"),
+                   row(1, 1, 3),
+                   row(2, 1, 4));
+        testDeleteWithCompactFormat();
+    }
+
+    private void testDeleteWithCompactFormat() throws Throwable
+    {
+        assertInvalidMessage("Undefined column name value in table",
+                             "DELETE FROM %s WHERE a = 1 AND value = 1");
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "DELETE FROM %s WHERE a = 1 AND column1= 1");
+        assertInvalidMessage("Undefined column name value in table",
+                             "DELETE FROM %s WHERE a = 1 AND value = 1 AND column1 = 1");
+        assertInvalidMessage("Undefined column name value",
+                             "DELETE value FROM %s WHERE a = 1");
+        assertInvalidMessage("Undefined column name column1",
+                             "DELETE column1 FROM %s WHERE a = 1");
+    }
+
+    /**
+     * InsertTest
+     */
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testInsertWithCompactStaticFormat() throws Throwable
+    {
+        testInsertWithCompactTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+
+        // if column1 is present, hidden column is called column2
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c, column1) VALUES (1, 1, 1, 1)");
+        assertInvalidMessage("Undefined column name column2",
+                             "INSERT INTO %s (a, b, c, column2) VALUES (1, 1, 1, 1)");
+
+        // if value is present, hidden column is called value1
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c, value) VALUES (1, 1, 1, 1)");
+        assertInvalidMessage("Undefined column name value1",
+                             "INSERT INTO %s (a, b, c, value1) VALUES (1, 1, 1, 1)");
+    }
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testInsertWithCompactNonStaticFormat() throws Throwable
+    {
+        testInsertWithCompactTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        testInsertWithCompactTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+    }
+
+    private void testInsertWithCompactTable(String tableQuery) throws Throwable
+    {
+        createTable(tableQuery);
+
+        // pass correct types to the hidden columns
+        assertInvalidMessage("Undefined column name column1",
+                             "INSERT INTO %s (a, b, column1) VALUES (?, ?, ?)",
+                             1, 1, 1, ByteBufferUtil.bytes('a'));
+        assertInvalidMessage("Undefined column name value",
+                             "INSERT INTO %s (a, b, value) VALUES (?, ?, ?)",
+                             1, 1, 1, ByteBufferUtil.bytes('a'));
+        assertInvalidMessage("Undefined column name column1",
+                             "INSERT INTO %s (a, b, column1, value) VALUES (?, ?, ?, ?)",
+                             1, 1, 1, ByteBufferUtil.bytes('a'), ByteBufferUtil.bytes('b'));
+        assertInvalidMessage("Undefined column name value",
+                             "INSERT INTO %s (a, b, value, column1) VALUES (?, ?, ?, ?)",
+                             1, 1, 1, ByteBufferUtil.bytes('a'), ByteBufferUtil.bytes('b'));
+
+        // pass incorrect types to the hidden columns
+        assertInvalidMessage("Undefined column name value",
+                             "INSERT INTO %s (a, b, value) VALUES (?, ?, ?)",
+                             1, 1, 1, 1);
+        assertInvalidMessage("Undefined column name column1",
+                             "INSERT INTO %s (a, b, column1) VALUES (?, ?, ?)",
+                             1, 1, 1, 1);
+        assertEmpty(execute("SELECT * FROM %s"));
+
+        // pass null to the hidden columns
+        assertInvalidMessage("Undefined column name value",
+                             "INSERT INTO %s (a, b, value) VALUES (?, ?, ?)",
+                             1, 1, null);
+        assertInvalidMessage("Undefined column name column1",
+                             "INSERT INTO %s (a, b, column1) VALUES (?, ?, ?)",
+                             1, 1, null);
+    }
+
+    /**
+     * SelectTest
+     */
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testSelectWithCompactStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c) VALUES (1, 1, 1)");
+        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, 1)");
+        assertRows(execute("SELECT a, b, c FROM %s"),
+                   row(1, 1, 1),
+                   row(2, 1, 1));
+        testSelectWithCompactFormat();
+
+        // if column column1 is present, hidden column is called column2
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c, column1) VALUES (1, 1, 1, 1)");
+        execute("INSERT INTO %s (a, b, c, column1) VALUES (2, 1, 1, 2)");
+        assertRows(execute("SELECT a, b, c, column1 FROM %s"),
+                   row(1, 1, 1, 1),
+                   row(2, 1, 1, 2));
+        assertInvalidMessage("Undefined column name column2 in table",
+                             "SELECT a, column2, value FROM %s");
+
+        // if column value is present, hidden column is called value1
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c, value) VALUES (1, 1, 1, 1)");
+        execute("INSERT INTO %s (a, b, c, value) VALUES (2, 1, 1, 2)");
+        assertRows(execute("SELECT a, b, c, value FROM %s"),
+                   row(1, 1, 1, 1),
+                   row(2, 1, 1, 2));
+        assertInvalidMessage("Undefined column name value1 in table",
+                             "SELECT a, value1, value FROM %s");
+    }
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testSelectWithCompactNonStaticFormat() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b) VALUES (1, 1)");
+        execute("INSERT INTO %s (a, b) VALUES (2, 1)");
+        assertRows(execute("SELECT a, b FROM %s"),
+                   row(1, 1),
+                   row(2, 1));
+        testSelectWithCompactFormat();
+
+        createTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, v) VALUES (1, 1, 3)");
+        execute("INSERT INTO %s (a, b, v) VALUES (2, 1, 4)");
+        assertRows(execute("SELECT a, b, v FROM %s"),
+                   row(1, 1, 3),
+                   row(2, 1, 4));
+        testSelectWithCompactFormat();
+    }
+
+    private void testSelectWithCompactFormat() throws Throwable
+    {
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "SELECT column1 FROM %s");
+        assertInvalidMessage("Undefined column name value in table",
+                             "SELECT value FROM %s");
+        assertInvalidMessage("Undefined column name value in table",
+                             "SELECT value, column1 FROM %s");
+        assertInvalid("Undefined column name column1 in table ('column1 = NULL')",
+                      "SELECT * FROM %s WHERE column1 = null ALLOW FILTERING");
+        assertInvalid("Undefined column name value in table ('value = NULL')",
+                      "SELECT * FROM %s WHERE value = null ALLOW FILTERING");
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "SELECT WRITETIME(column1) FROM %s");
+        assertInvalidMessage("Undefined column name value in table",
+                             "SELECT WRITETIME(value) FROM %s");
+    }
+
+    /**
+     * UpdateTest
+     */
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testUpdateWithCompactStaticFormat() throws Throwable
+    {
+        testUpdateWithCompactFormat("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
+
+        assertInvalidMessage("Undefined column name column1 in table",
+                             "UPDATE %s SET b = 1 WHERE column1 = ?",
+                             ByteBufferUtil.bytes('a'));
+        assertInvalidMessage("Undefined column name value in table",
+                             "UPDATE %s SET b = 1 WHERE value = ?",
+                             ByteBufferUtil.bytes('a'));
+
+        // if column1 is present, hidden column is called column2
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c, column1) VALUES (1, 1, 1, 1)");
+        execute("UPDATE %s SET column1 = 6 WHERE a = 1");
+        assertInvalidMessage("Undefined column name column2", "UPDATE %s SET column2 = 6 WHERE a = 0");
+        assertInvalidMessage("Undefined column name value", "UPDATE %s SET value = 6 WHERE a = 0");
+
+        // if value is present, hidden column is called value1
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
+        execute("INSERT INTO %s (a, b, c, value) VALUES (1, 1, 1, 1)");
+        execute("UPDATE %s SET value = 6 WHERE a = 1");
+        assertInvalidMessage("Undefined column name column1", "UPDATE %s SET column1 = 6 WHERE a = 1");
+        assertInvalidMessage("Undefined column name value1", "UPDATE %s SET value1 = 6 WHERE a = 1");
+    }
+
+    /**
+     * Test for CASSANDRA-13917
+     */
+    @Test
+    public void testUpdateWithCompactNonStaticFormat() throws Throwable
+    {
+        testUpdateWithCompactFormat("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+        testUpdateWithCompactFormat("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
+    }
+
+    private void testUpdateWithCompactFormat(String tableQuery) throws Throwable
+    {
+        createTable(tableQuery);
+        // pass correct types to hidden columns
+        assertInvalidMessage("Undefined column name column1",
+                             "UPDATE %s SET column1 = ? WHERE a = 0",
+                             ByteBufferUtil.bytes('a'));
+        assertInvalidMessage("Undefined column name value",
+                             "UPDATE %s SET value = ? WHERE a = 0",
+                             ByteBufferUtil.bytes('a'));
+
+        // pass incorrect types to hidden columns
+        assertInvalidMessage("Undefined column name column1", "UPDATE %s SET column1 = 6 WHERE a = 0");
+        assertInvalidMessage("Undefined column name value", "UPDATE %s SET value = 6 WHERE a = 0");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java
deleted file mode 100644
index 276abde..0000000
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java
+++ /dev/null
@@ -1,5138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.cql3.validation.operations;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
-import org.apache.cassandra.cql3.validation.entities.SecondaryIndexTest;
-import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.RowUpdateBuilder;
-import org.apache.cassandra.db.SchemaCQLHelper;
-import org.apache.cassandra.db.marshal.AsciiType;
-import org.apache.cassandra.db.marshal.CounterColumnType;
-import org.apache.cassandra.db.marshal.IntegerType;
-import org.apache.cassandra.db.marshal.ReversedType;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
-
-import static junit.framework.TestCase.assertTrue;
-import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.fail;
-
-public class CompactStorageTest extends CQLTester
-{
-    private static final String compactOption = " WITH COMPACT STORAGE";
-
-    @Test
-    public void testSparseCompactTableIndex() throws Throwable
-    {
-        createTable("CREATE TABLE %s (key ascii PRIMARY KEY, val ascii) WITH COMPACT STORAGE");
-
-        // Indexes are allowed only on the sparse compact tables
-        createIndex("CREATE INDEX ON %s(val)");
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s (key, val) VALUES (?, ?)", Integer.toString(i), Integer.toString(i * 10));
-
-        alterTable("ALTER TABLE %s DROP COMPACT STORAGE");
-
-        assertRows(execute("SELECT * FROM %s WHERE val = '50'"),
-                   row("5", null, "50", null));
-        assertRows(execute("SELECT * FROM %s WHERE key = '5'"),
-                   row("5", null, "50", null));
-    }
-
-    @Test
-    public void before() throws Throwable
-    {
-        createTable("CREATE TABLE %s (key TEXT, column TEXT, value BLOB, PRIMARY KEY (key, column)) WITH COMPACT STORAGE");
-
-        ByteBuffer largeBytes = ByteBuffer.wrap(new byte[100000]);
-        execute("INSERT INTO %s (key, column, value) VALUES (?, ?, ?)", "test", "a", largeBytes);
-        ByteBuffer smallBytes = ByteBuffer.wrap(new byte[10]);
-        execute("INSERT INTO %s (key, column, value) VALUES (?, ?, ?)", "test", "c", smallBytes);
-
-        flush();
-
-        assertRows(execute("SELECT column FROM %s WHERE key = ? AND column IN (?, ?, ?)", "test", "c", "a", "b"),
-                   row("a"),
-                   row("c"));
-    }
-
-    @Test
-    public void testStaticCompactTables() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k text PRIMARY KEY, v1 int, v2 text) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (k, v1, v2) values (?, ?, ?)", "first", 1, "value1");
-        execute("INSERT INTO %s (k, v1, v2) values (?, ?, ?)", "second", 2, "value2");
-        execute("INSERT INTO %s (k, v1, v2) values (?, ?, ?)", "third", 3, "value3");
-
-        assertRows(execute("SELECT * FROM %s WHERE k = ?", "first"),
-                   row("first", 1, "value1")
-        );
-
-        assertRows(execute("SELECT v2 FROM %s WHERE k = ?", "second"),
-                   row("value2")
-        );
-
-        // Murmur3 order
-        assertRows(execute("SELECT * FROM %s"),
-                   row("third", 3, "value3"),
-                   row("second", 2, "value2"),
-                   row("first", 1, "value1")
-        );
-    }
-
-    @Test
-    public void testCompactStorageUpdateWithNull() throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering_1 int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering_1)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 1, 1)");
-
-        flush();
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", null, 0, 0);
-
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))", 0, 0, 1),
-                   row(0, 1, 1)
-        );
-    }
-
-    /**
-     * Migrated from cql_tests.py:TestCQL.collection_compact_test()
-     */
-    @Test
-    public void testCompactCollections() throws Throwable
-    {
-        String tableName = KEYSPACE + "." + createTableName();
-        assertInvalid(String.format("CREATE TABLE %s (user ascii PRIMARY KEY, mails list < text >) WITH COMPACT STORAGE;", tableName));
-    }
-
-    /**
-     * Check for a table with counters,
-     * migrated from cql_tests.py:TestCQL.counters_test()
-     */
-    @Test
-    public void testCounters() throws Throwable
-    {
-        createTable("CREATE TABLE %s (userid int, url text, total counter, PRIMARY KEY (userid, url)) WITH COMPACT STORAGE");
-
-        execute("UPDATE %s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(1L));
-
-        execute("UPDATE %s SET total = total - 4 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(-3L));
-
-        execute("UPDATE %s SET total = total+1 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(-2L));
-
-        execute("UPDATE %s SET total = total -2 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(-4L));
-
-        execute("UPDATE %s SET total += 6 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(2L));
-
-        execute("UPDATE %s SET total -= 1 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(1L));
-
-        execute("UPDATE %s SET total += -2 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(-1L));
-
-        execute("UPDATE %s SET total -= -2 WHERE userid = 1 AND url = 'http://foo.com'");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(1L));
-    }
-
-
-    @Test
-    public void testCounterFiltering() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k int PRIMARY KEY, a counter) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 10; i++)
-            execute("UPDATE %s SET a = a + ? WHERE k = ?", (long) i, i);
-
-        execute("UPDATE %s SET a = a + ? WHERE k = ?", 6L, 10);
-
-        // GT
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a > ? ALLOW FILTERING", 5L),
-                                row(6, 6L),
-                                row(7, 7L),
-                                row(8, 8L),
-                                row(9, 9L),
-                                row(10, 6L));
-
-        // GTE
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a >= ? ALLOW FILTERING", 6L),
-                                row(6, 6L),
-                                row(7, 7L),
-                                row(8, 8L),
-                                row(9, 9L),
-                                row(10, 6L));
-
-        // LT
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a < ? ALLOW FILTERING", 3L),
-                                row(0, 0L),
-                                row(1, 1L),
-                                row(2, 2L));
-
-        // LTE
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a <= ? ALLOW FILTERING", 3L),
-                                row(0, 0L),
-                                row(1, 1L),
-                                row(2, 2L),
-                                row(3, 3L));
-
-        // EQ
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a = ? ALLOW FILTERING", 6L),
-                                row(6, 6L),
-                                row(10, 6L));
-    }
-
-    /**
-     * Test for the bug of #11726.
-     */
-    @Test
-    public void testCounterAndColumnSelection() throws Throwable
-    {
-        for (String compactStorageClause : new String[]{ "", " WITH COMPACT STORAGE" })
-        {
-            createTable("CREATE TABLE %s (k int PRIMARY KEY, c counter)" + compactStorageClause);
-
-            // Flush 2 updates in different sstable so that the following select does a merge, which is what triggers
-            // the problem from #11726
-
-            execute("UPDATE %s SET c = c + ? WHERE k = ?", 1L, 0);
-
-            flush();
-
-            execute("UPDATE %s SET c = c + ? WHERE k = ?", 1L, 0);
-
-            flush();
-
-            // Querying, but not including the counter. Pre-CASSANDRA-11726, this made us query the counter but include
-            // it's value, which broke at merge (post-CASSANDRA-11726 are special cases to never skip values).
-            assertRows(execute("SELECT k FROM %s"), row(0));
-        }
-    }
-
-    /*
-     * Check that a counter batch works as intended
-     */
-    @Test
-    public void testCounterBatch() throws Throwable
-    {
-        createTable("CREATE TABLE %s (userid int, url text, total counter, PRIMARY KEY (userid, url)) WITH COMPACT STORAGE");
-
-        // Ensure we handle updates to the same CQL row in the same partition properly
-        execute("BEGIN UNLOGGED BATCH " +
-                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
-                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
-                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
-                "APPLY BATCH; ");
-        assertRows(execute("SELECT total FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(3L));
-
-        // Ensure we handle different CQL rows in the same partition properly
-        execute("BEGIN UNLOGGED BATCH " +
-                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://bar.com'; " +
-                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://baz.com'; " +
-                "UPDATE %1$s SET total = total + 1 WHERE userid = 1 AND url = 'http://bad.com'; " +
-                "APPLY BATCH; ");
-        assertRows(execute("SELECT url, total FROM %s WHERE userid = 1"),
-                   row("http://bad.com", 1L),
-                   row("http://bar.com", 1L),
-                   row("http://baz.com", 1L),
-                   row("http://foo.com", 3L)); // from previous batch
-
-        // Different counters in the same CQL Row
-        createTable("CREATE TABLE %s (userid int, url text, first counter, second counter, third counter, PRIMARY KEY (userid, url))");
-        execute("BEGIN UNLOGGED BATCH " +
-                "UPDATE %1$s SET first = first + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
-                "UPDATE %1$s SET first = first + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
-                "UPDATE %1$s SET second = second + 1 WHERE userid = 1 AND url = 'http://foo.com'; " +
-                "APPLY BATCH; ");
-        assertRows(execute("SELECT first, second, third FROM %s WHERE userid = 1 AND url = 'http://foo.com'"),
-                   row(2L, 1L, null));
-
-        // Different counters in different CQL Rows
-        execute("BEGIN UNLOGGED BATCH " +
-                "UPDATE %1$s SET first = first + 1 WHERE userid = 1 AND url = 'http://bad.com'; " +
-                "UPDATE %1$s SET first = first + 1, second = second + 1 WHERE userid = 1 AND url = 'http://bar.com'; " +
-                "UPDATE %1$s SET first = first - 1, second = second - 1 WHERE userid = 1 AND url = 'http://bar.com'; " +
-                "UPDATE %1$s SET second = second + 1 WHERE userid = 1 AND url = 'http://baz.com'; " +
-                "APPLY BATCH; ");
-        assertRows(execute("SELECT url, first, second, third FROM %s WHERE userid = 1"),
-                   row("http://bad.com", 1L, null, null),
-                   row("http://bar.com", 0L, 0L, null),
-                   row("http://baz.com", null, 1L, null),
-                   row("http://foo.com", 2L, 1L, null)); // from previous batch
-
-
-        // Different counters in different partitions
-        execute("BEGIN UNLOGGED BATCH " +
-                "UPDATE %1$s SET first = first + 1 WHERE userid = 2 AND url = 'http://bad.com'; " +
-                "UPDATE %1$s SET first = first + 1, second = second + 1 WHERE userid = 3 AND url = 'http://bar.com'; " +
-                "UPDATE %1$s SET first = first - 1, second = second - 1 WHERE userid = 4 AND url = 'http://bar.com'; " +
-                "UPDATE %1$s SET second = second + 1 WHERE userid = 5 AND url = 'http://baz.com'; " +
-                "APPLY BATCH; ");
-        assertRowsIgnoringOrder(execute("SELECT userid, url, first, second, third FROM %s WHERE userid IN (2, 3, 4, 5)"),
-                                row(2, "http://bad.com", 1L, null, null),
-                                row(3, "http://bar.com", 1L, 1L, null),
-                                row(4, "http://bar.com", -1L, -1L, null),
-                                row(5, "http://baz.com", null, 1L, null));
-    }
-
-    /**
-     * from FrozenCollectionsTest
-     */
-
-    @Test
-    public void testClusteringKeyUsageSet() throws Throwable
-    {
-        testClusteringKeyUsage("set<int>",
-                               set(),
-                               set(1, 2, 3),
-                               set(4, 5, 6),
-                               set(7, 8, 9));
-    }
-
-    @Test
-    public void testClusteringKeyUsageList() throws Throwable
-    {
-        testClusteringKeyUsage("list<int>",
-                               list(),
-                               list(1, 2, 3),
-                               list(4, 5, 6),
-                               list(7, 8, 9));
-    }
-
-    @Test
-    public void testClusteringKeyUsageMap() throws Throwable
-    {
-        testClusteringKeyUsage("map<int, int>",
-                               map(),
-                               map(1, 10, 2, 20, 3, 30),
-                               map(4, 40, 5, 50, 6, 60),
-                               map(7, 70, 8, 80, 9, 90));
-    }
-
-    private void testClusteringKeyUsage(String type, Object v1, Object v2, Object v3, Object v4) throws Throwable
-    {
-        createTable(String.format("CREATE TABLE %%s (a int, b frozen<%s>, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE",
-                                  type));
-
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v1, 1);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v2, 1);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v3, 0);
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, v4, 0);
-
-        // overwrite with an update
-        execute("UPDATE %s SET c=? WHERE a=? AND b=?", 0, 0, v1);
-        execute("UPDATE %s SET c=? WHERE a=? AND b=?", 0, 0, v2);
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, v1, 0),
-                   row(0, v2, 0),
-                   row(0, v3, 0),
-                   row(0, v4, 0)
-        );
-
-        assertRows(execute("SELECT b FROM %s"),
-                   row(v1),
-                   row(v2),
-                   row(v3),
-                   row(v4)
-        );
-
-        assertRows(execute("SELECT * FROM %s LIMIT 2"),
-                   row(0, v1, 0),
-                   row(0, v2, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, v3),
-                   row(0, v3, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, v1),
-                   row(0, v1, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b IN ?", 0, list(v3, v1)),
-                   row(0, v1, 0),
-                   row(0, v3, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ?", 0, v3),
-                   row(0, v4, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b >= ?", 0, v3),
-                   row(0, v3, 0),
-                   row(0, v4, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b < ?", 0, v3),
-                   row(0, v1, 0),
-                   row(0, v2, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b <= ?", 0, v3),
-                   row(0, v1, 0),
-                   row(0, v2, 0),
-                   row(0, v3, 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ? AND b <= ?", 0, v2, v3),
-                   row(0, v3, 0)
-        );
-
-        execute("DELETE FROM %s WHERE a=? AND b=?", 0, v1);
-        execute("DELETE FROM %s WHERE a=? AND b=?", 0, v3);
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, v2, 0),
-                   row(0, v4, 0)
-        );
-    }
-
-    @Test
-    public void testNestedClusteringKeyUsage() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b frozen<map<set<int>, list<int>>>, c frozen<set<int>>, d int, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(), set(), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(), list(1, 2, 3)), set(), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0);
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, map(), set(), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT b FROM %s"),
-                   row(map()),
-                   row(map(set(), list(1, 2, 3))),
-                   row(map(set(1, 2, 3), list(1, 2, 3))),
-                   row(map(set(4, 5, 6), list(1, 2, 3))),
-                   row(map(set(7, 8, 9), list(1, 2, 3)))
-        );
-
-        assertRows(execute("SELECT c FROM %s"),
-                   row(set()),
-                   row(set()),
-                   row(set(1, 2, 3)),
-                   row(set(1, 2, 3)),
-                   row(set(1, 2, 3))
-        );
-
-        assertRows(execute("SELECT * FROM %s LIMIT 3"),
-                   row(0, map(), set(), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=0 ORDER BY b DESC LIMIT 4"),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map()),
-                   row(0, map(), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(), list(1, 2, 3))),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(1, 2, 3), list(1, 2, 3))),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND (b, c) IN ?", 0, list(tuple(map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)),
-                                                                                 tuple(map(), set()))),
-                   row(0, map(), set(), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b >= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b < ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(), set(), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b <= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(), set(), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ? AND b <= ?", 0, map(set(1, 2, 3), list(1, 2, 3)), map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set());
-        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set()));
-
-        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set());
-        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()));
-
-        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3));
-        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)));
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-    }
-
-    @Test
-    public void testNestedClusteringKeyUsageWithReverseOrder() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b frozen<map<set<int>, list<int>>>, c frozen<set<int>>, d int, " +
-                    "PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (b DESC)");
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(), set(), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(), list(1, 2, 3)), set(), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0);
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(), set(), 0)
-        );
-
-        assertRows(execute("SELECT b FROM %s"),
-                   row(map(set(7, 8, 9), list(1, 2, 3))),
-                   row(map(set(4, 5, 6), list(1, 2, 3))),
-                   row(map(set(1, 2, 3), list(1, 2, 3))),
-                   row(map(set(), list(1, 2, 3))),
-                   row(map())
-        );
-
-        assertRows(execute("SELECT c FROM %s"),
-                   row(set(1, 2, 3)),
-                   row(set(1, 2, 3)),
-                   row(set(1, 2, 3)),
-                   row(set()),
-                   row(set())
-        );
-
-        assertRows(execute("SELECT * FROM %s LIMIT 3"),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=0 ORDER BY b DESC LIMIT 4"),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map()),
-                   row(0, map(), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(), list(1, 2, 3))),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=?", 0, map(set(1, 2, 3), list(1, 2, 3))),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND (b, c) IN ?", 0, list(tuple(map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)),
-                                                                                 tuple(map(), set()))),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b >= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b < ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b <= ?", 0, map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(), list(1, 2, 3)), set(), 0),
-                   row(0, map(), set(), 0)
-        );
-
-        assertRows(execute("SELECT * FROM %s WHERE a=? AND b > ? AND b <= ?", 0, map(set(1, 2, 3), list(1, 2, 3)), map(set(4, 5, 6), list(1, 2, 3))),
-                   row(0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-
-        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set());
-        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(), set()));
-
-        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set());
-        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(), list(1, 2, 3)), set()));
-
-        execute("DELETE FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3));
-        assertEmpty(execute("SELECT * FROM %s WHERE a=? AND b=? AND c=?", 0, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)));
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3), 0),
-                   row(0, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3), 0)
-        );
-    }
-
-    @Test
-    public void testNormalColumnUsage() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b frozen<map<set<int>, list<int>>>, c frozen<set<int>>)" + compactOption);
-
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, map(), set());
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 1, map(set(), list(99999, 999999, 99999)), set());
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 2, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3));
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 3, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3));
-        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 4, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3));
-
-        // overwrite with update
-        execute("UPDATE %s SET b=? WHERE a=?", map(set(), list(1, 2, 3)), 1);
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s"),
-                                row(0, map(), set()),
-                                row(1, map(set(), list(1, 2, 3)), set()),
-                                row(2, map(set(1, 2, 3), list(1, 2, 3)), set(1, 2, 3)),
-                                row(3, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3)),
-                                row(4, map(set(7, 8, 9), list(1, 2, 3)), set(1, 2, 3))
-        );
-
-        assertRowsIgnoringOrder(execute("SELECT b FROM %s"),
-                                row(map()),
-                                row(map(set(), list(1, 2, 3))),
-                                row(map(set(1, 2, 3), list(1, 2, 3))),
-                                row(map(set(4, 5, 6), list(1, 2, 3))),
-                                row(map(set(7, 8, 9), list(1, 2, 3)))
-        );
-
-        assertRowsIgnoringOrder(execute("SELECT c FROM %s"),
-                                row(set()),
-                                row(set()),
-                                row(set(1, 2, 3)),
-                                row(set(1, 2, 3)),
-                                row(set(1, 2, 3))
-        );
-
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 3),
-                                row(3, map(set(4, 5, 6), list(1, 2, 3)), set(1, 2, 3))
-        );
-
-        execute("UPDATE %s SET b=? WHERE a=?", null, 1);
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 1),
-                                row(1, null, set())
-        );
-
-        execute("UPDATE %s SET b=? WHERE a=?", map(), 1);
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 1),
-                                row(1, map(), set())
-        );
-
-        execute("UPDATE %s SET c=? WHERE a=?", null, 2);
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 2),
-                                row(2, map(set(1, 2, 3), list(1, 2, 3)), null)
-        );
-
-        execute("UPDATE %s SET c=? WHERE a=?", set(), 2);
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 2),
-                                row(2, map(set(1, 2, 3), list(1, 2, 3)), set())
-        );
-
-        execute("DELETE b FROM %s WHERE a=?", 3);
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 3),
-                                row(3, null, set(1, 2, 3))
-        );
-
-        execute("DELETE c FROM %s WHERE a=?", 4);
-        assertRowsIgnoringOrder(execute("SELECT * FROM %s WHERE a=?", 4),
-                                row(4, map(set(7, 8, 9), list(1, 2, 3)), null)
-        );
-    }
-
-    /**
-     * from SecondaryIndexTest
-     */
-    @Test
-    public void testCompactTableWithValueOver64k() throws Throwable
-    {
-        createTable("CREATE TABLE %s(a int, b blob, PRIMARY KEY (a)) WITH COMPACT STORAGE");
-        createIndex("CREATE INDEX ON %s(b)");
-        failInsert("INSERT INTO %s (a, b) VALUES (0, ?)", ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
-        failInsert("INSERT INTO %s (a, b) VALUES (0, ?) IF NOT EXISTS", ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
-        failInsert("BEGIN BATCH\n" +
-                   "INSERT INTO %s (a, b) VALUES (0, ?);\n" +
-                   "APPLY BATCH",
-                   ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
-        failInsert("BEGIN BATCH\n" +
-                   "INSERT INTO %s (a, b) VALUES (0, ?) IF NOT EXISTS;\n" +
-                   "APPLY BATCH",
-                   ByteBuffer.allocate(SecondaryIndexTest.TOO_BIG));
-    }
-
-    public void failInsert(String insertCQL, Object... args) throws Throwable
-    {
-        try
-        {
-            execute(insertCQL, args);
-            fail("Expected statement to fail validation");
-        }
-        catch (Exception e)
-        {
-            // as expected
-        }
-    }
-
-    /**
-     * Migrated from cql_tests.py:TestCQL.invalid_clustering_indexing_test()
-     */
-    @Test
-    public void testIndexesOnClusteringInvalid() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b))) WITH COMPACT STORAGE");
-        assertInvalid("CREATE INDEX ON %s (a)");
-        assertInvalid("CREATE INDEX ON %s (b)");
-
-        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        assertInvalid("CREATE INDEX ON %s (a)");
-        assertInvalid("CREATE INDEX ON %s (b)");
-        assertInvalid("CREATE INDEX ON %s (c)");
-    }
-
-    @Test
-    public void testEmptyRestrictionValueWithSecondaryIndexAndCompactTables() throws Throwable
-    {
-        createTable("CREATE TABLE %s (pk blob, c blob, v blob, PRIMARY KEY ((pk), c)) WITH COMPACT STORAGE");
-        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
-                             "CREATE INDEX on %s(c)");
-
-        createTable("CREATE TABLE %s (pk blob PRIMARY KEY, v blob) WITH COMPACT STORAGE");
-        createIndex("CREATE INDEX on %s(v)");
-
-        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", bytes("foo123"), bytes("1"));
-
-        // Test restrictions on non-primary key value
-        assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND v = textAsBlob('');"));
-
-        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", bytes("foo124"), EMPTY_BYTE_BUFFER);
-
-        assertRows(execute("SELECT * FROM %s WHERE v = textAsBlob('');"),
-                   row(bytes("foo124"), EMPTY_BYTE_BUFFER));
-    }
-
-    @Test
-    public void testIndicesOnCompactTable() throws Throwable
-    {
-        assertInvalidMessage("COMPACT STORAGE with composite PRIMARY KEY allows no more than one column not part of the PRIMARY KEY (got: v1, v2)",
-                             "CREATE TABLE " + KEYSPACE + ".test (pk int, c int, v1 int, v2 int, PRIMARY KEY(pk, c)) WITH COMPACT STORAGE");
-
-        createTable("CREATE TABLE %s (pk int, c int, v int, PRIMARY KEY(pk, c)) WITH COMPACT STORAGE");
-        assertInvalidMessage("Secondary indexes are not supported on compact value column of COMPACT STORAGE tables",
-                             "CREATE INDEX ON %s(v)");
-
-        createTable("CREATE TABLE %s (pk int PRIMARY KEY, v int) WITH COMPACT STORAGE");
-        createIndex("CREATE INDEX ON %s(v)");
-
-        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", 1, 1);
-        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", 2, 1);
-        execute("INSERT INTO %s (pk, v) VALUES (?, ?)", 3, 3);
-
-        assertRows(execute("SELECT pk, v FROM %s WHERE v = 1"),
-                   row(1, 1),
-                   row(2, 1));
-
-        assertRows(execute("SELECT pk, v FROM %s WHERE v = 3"),
-                   row(3, 3));
-
-        assertEmpty(execute("SELECT pk, v FROM %s WHERE v = 5"));
-
-        createTable("CREATE TABLE %s (pk int PRIMARY KEY, v1 int, v2 int) WITH COMPACT STORAGE");
-        createIndex("CREATE INDEX ON %s(v1)");
-
-        execute("INSERT INTO %s (pk, v1, v2) VALUES (?, ?, ?)", 1, 1, 1);
-        execute("INSERT INTO %s (pk, v1, v2) VALUES (?, ?, ?)", 2, 1, 2);
-        execute("INSERT INTO %s (pk, v1, v2) VALUES (?, ?, ?)", 3, 3, 3);
-
-        assertRows(execute("SELECT pk, v2 FROM %s WHERE v1 = 1"),
-                   row(1, 1),
-                   row(2, 2));
-
-        assertRows(execute("SELECT pk, v2 FROM %s WHERE v1 = 3"),
-                   row(3, 3));
-
-        assertEmpty(execute("SELECT pk, v2 FROM %s WHERE v1 = 5"));
-    }
-
-    /**
-     * OverflowTest
-     */
-
-    /**
-     * Test regression from #5189,
-     * migrated from cql_tests.py:TestCQL.compact_metadata_test()
-     */
-    @Test
-    public void testCompactMetadata() throws Throwable
-    {
-        createTable("CREATE TABLE %s (id int primary key, i int ) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (id, i) VALUES (1, 2)");
-        assertRows(execute("SELECT * FROM %s"),
-                   row(1, 2));
-    }
-
-    @Test
-    public void testEmpty() throws Throwable
-    {
-        // Same test, but for compact
-        createTable("CREATE TABLE %s (k1 int, k2 int, v int, PRIMARY KEY (k1, k2)) WITH COMPACT STORAGE");
-
-        // Inserts a few rows to make sure we don 't actually query something
-        Object[][] rows = fill();
-
-        assertEmpty(execute("SELECT v FROM %s WHERE k1 IN ()"));
-        assertEmpty(execute("SELECT v FROM %s WHERE k1 = 0 AND k2 IN ()"));
-
-        // Test empty IN() in DELETE
-        execute("DELETE FROM %s WHERE k1 IN ()");
-        assertArrayEquals(rows, getRows(execute("SELECT * FROM %s")));
-
-        // Test empty IN() in UPDATE
-        execute("UPDATE %s SET v = 3 WHERE k1 IN () AND k2 = 2");
-        assertArrayEquals(rows, getRows(execute("SELECT * FROM %s")));
-    }
-
-    private Object[][] fill() throws Throwable
-    {
-        for (int i = 0; i < 2; i++)
-            for (int j = 0; j < 2; j++)
-                execute("INSERT INTO %s (k1, k2, v) VALUES (?, ?, ?)", i, j, i + j);
-
-        return getRows(execute("SELECT * FROM %s"));
-    }
-
-    /**
-     * AggregationTest
-     */
-
-    @Test
-    public void testFunctionsWithCompactStorage() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int , b int, c double, primary key(a, b) ) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 1, 11.5)");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 9.5)");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 9.0)");
-
-        assertRows(execute("SELECT max(b), min(b), sum(b), avg(b) , max(c), sum(c), avg(c) FROM %s"),
-                   row(3, 1, 6, 2, 11.5, 30.0, 10.0));
-
-        assertRows(execute("SELECT COUNT(*) FROM %s"), row(3L));
-        assertRows(execute("SELECT COUNT(1) FROM %s"), row(3L));
-        assertRows(execute("SELECT COUNT(*) FROM %s WHERE a = 1 AND b > 1"), row(2L));
-        assertRows(execute("SELECT COUNT(1) FROM %s WHERE a = 1 AND b > 1"), row(2L));
-        assertRows(execute("SELECT max(b), min(b), sum(b), avg(b) , max(c), sum(c), avg(c) FROM %s WHERE a = 1 AND b > 1"),
-                   row(3, 2, 5, 2, 9.5, 18.5, 9.25));
-    }
-
-    /**
-     * BatchTest
-     */
-    @Test
-    public void testBatchRangeDelete() throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE");
-
-        int value = 0;
-        for (int partitionKey = 0; partitionKey < 4; partitionKey++)
-            for (int clustering1 = 0; clustering1 < 5; clustering1++)
-                execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (?, ?, ?)",
-                        partitionKey, clustering1, value++);
-
-        execute("BEGIN BATCH " +
-                "DELETE FROM %1$s WHERE partitionKey = 1;" +
-                "DELETE FROM %1$s WHERE partitionKey = 0 AND  clustering >= 4;" +
-                "DELETE FROM %1$s WHERE partitionKey = 0 AND clustering <= 0;" +
-                "DELETE FROM %1$s WHERE partitionKey = 2 AND clustering >= 0 AND clustering <= 3;" +
-                "DELETE FROM %1$s WHERE partitionKey = 2 AND clustering <= 3 AND clustering >= 4;" +
-                "DELETE FROM %1$s WHERE partitionKey = 3 AND (clustering) >= (3) AND (clustering) <= (6);" +
-                "APPLY BATCH;");
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, 1, 1),
-                   row(0, 2, 2),
-                   row(0, 3, 3),
-                   row(2, 4, 14),
-                   row(3, 0, 15),
-                   row(3, 1, 16),
-                   row(3, 2, 17));
-    }
-
-    /**
-     * CreateTest
-     */
-    /**
-     * /**
-     * Creation and basic operations on a static table with compact storage,
-     * migrated from cql_tests.py:TestCQL.noncomposite_static_cf_test()
-     */
-    @Test
-    public void testDenseStaticTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (userid uuid PRIMARY KEY, firstname text, lastname text, age int) WITH COMPACT STORAGE");
-
-        UUID id1 = UUID.fromString("550e8400-e29b-41d4-a716-446655440000");
-        UUID id2 = UUID.fromString("f47ac10b-58cc-4372-a567-0e02b2c3d479");
-
-        execute("INSERT INTO %s (userid, firstname, lastname, age) VALUES (?, ?, ?, ?)", id1, "Frodo", "Baggins", 32);
-        execute("UPDATE %s SET firstname = ?, lastname = ?, age = ? WHERE userid = ?", "Samwise", "Gamgee", 33, id2);
-
-        assertRows(execute("SELECT firstname, lastname FROM %s WHERE userid = ?", id1),
-                   row("Frodo", "Baggins"));
-
-        assertRows(execute("SELECT * FROM %s WHERE userid = ?", id1),
-                   row(id1, 32, "Frodo", "Baggins"));
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(id2, 33, "Samwise", "Gamgee"),
-                   row(id1, 32, "Frodo", "Baggins")
-        );
-
-        String batch = "BEGIN BATCH "
-                       + "INSERT INTO %1$s (userid, age) VALUES (?, ?) "
-                       + "UPDATE %1$s SET age = ? WHERE userid = ? "
-                       + "DELETE firstname, lastname FROM %1$s WHERE userid = ? "
-                       + "DELETE firstname, lastname FROM %1$s WHERE userid = ? "
-                       + "APPLY BATCH";
-
-        execute(batch, id1, 36, 37, id2, id1, id2);
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(id2, 37, null, null),
-                   row(id1, 36, null, null));
-    }
-
-    /**
-     * Creation and basic operations on a non-composite table with compact storage,
-     * migrated from cql_tests.py:TestCQL.dynamic_cf_test()
-     */
-    @Test
-    public void testDenseNonCompositeTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (userid uuid, url text, time bigint, PRIMARY KEY (userid, url)) WITH COMPACT STORAGE");
-
-        UUID id1 = UUID.fromString("550e8400-e29b-41d4-a716-446655440000");
-        UUID id2 = UUID.fromString("f47ac10b-58cc-4372-a567-0e02b2c3d479");
-        UUID id3 = UUID.fromString("810e8500-e29b-41d4-a716-446655440000");
-
-        execute("INSERT INTO %s (userid, url, time) VALUES (?, ?, ?)", id1, "http://foo.bar", 42L);
-        execute("INSERT INTO %s (userid, url, time) VALUES (?, ?, ?)", id1, "http://foo-2.bar", 24L);
-        execute("INSERT INTO %s (userid, url, time) VALUES (?, ?, ?)", id1, "http://bar.bar", 128L);
-        execute("UPDATE %s SET time = 24 WHERE userid = ? and url = 'http://bar.foo'", id2);
-        execute("UPDATE %s SET time = 12 WHERE userid IN (?, ?) and url = 'http://foo-3'", id2, id1);
-
-        assertRows(execute("SELECT url, time FROM %s WHERE userid = ?", id1),
-                   row("http://bar.bar", 128L),
-                   row("http://foo-2.bar", 24L),
-                   row("http://foo-3", 12L),
-                   row("http://foo.bar", 42L));
-
-        assertRows(execute("SELECT * FROM %s WHERE userid = ?", id2),
-                   row(id2, "http://bar.foo", 24L),
-                   row(id2, "http://foo-3", 12L));
-
-        assertRows(execute("SELECT time FROM %s"),
-                   row(24L), // id2
-                   row(12L),
-                   row(128L), // id1
-                   row(24L),
-                   row(12L),
-                   row(42L)
-        );
-
-        // Check we don't allow empty values for url since this is the full underlying cell name (#6152)
-        assertInvalid("INSERT INTO %s (userid, url, time) VALUES (?, '', 42)", id3);
-    }
-
-    /**
-     * Creation and basic operations on a composite table with compact storage,
-     * migrated from cql_tests.py:TestCQL.dense_cf_test()
-     */
-    @Test
-    public void testDenseCompositeTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (userid uuid, ip text, port int, time bigint, PRIMARY KEY (userid, ip, port)) WITH COMPACT STORAGE");
-
-        UUID id1 = UUID.fromString("550e8400-e29b-41d4-a716-446655440000");
-        UUID id2 = UUID.fromString("f47ac10b-58cc-4372-a567-0e02b2c3d479");
-
-        execute("INSERT INTO %s (userid, ip, port, time) VALUES (?, '192.168.0.1', 80, 42)", id1);
-        execute("INSERT INTO %s (userid, ip, port, time) VALUES (?, '192.168.0.2', 80, 24)", id1);
-        execute("INSERT INTO %s (userid, ip, port, time) VALUES (?, '192.168.0.2', 90, 42)", id1);
-        execute("UPDATE %s SET time = 24 WHERE userid = ? AND ip = '192.168.0.2' AND port = 80", id2);
-
-        // we don't have to include all of the clustering columns (see CASSANDRA-7990)
-        execute("INSERT INTO %s (userid, ip, time) VALUES (?, '192.168.0.3', 42)", id2);
-        execute("UPDATE %s SET time = 42 WHERE userid = ? AND ip = '192.168.0.4'", id2);
-
-        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ?", id1),
-                   row("192.168.0.1", 80, 42L),
-                   row("192.168.0.2", 80, 24L),
-                   row("192.168.0.2", 90, 42L));
-
-        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? and ip >= '192.168.0.2'", id1),
-                   row("192.168.0.2", 80, 24L),
-                   row("192.168.0.2", 90, 42L));
-
-        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? and ip = '192.168.0.2'", id1),
-                   row("192.168.0.2", 80, 24L),
-                   row("192.168.0.2", 90, 42L));
-
-        assertEmpty(execute("SELECT ip, port, time FROM %s WHERE userid = ? and ip > '192.168.0.2'", id1));
-
-        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? AND ip = '192.168.0.3'", id2),
-                   row("192.168.0.3", null, 42L));
-
-        assertRows(execute("SELECT ip, port, time FROM %s WHERE userid = ? AND ip = '192.168.0.4'", id2),
-                   row("192.168.0.4", null, 42L));
-
-        execute("DELETE time FROM %s WHERE userid = ? AND ip = '192.168.0.2' AND port = 80", id1);
-
-        assertRowCount(execute("SELECT * FROM %s WHERE userid = ?", id1), 2);
-
-        execute("DELETE FROM %s WHERE userid = ?", id1);
-        assertEmpty(execute("SELECT * FROM %s WHERE userid = ?", id1));
-
-        execute("DELETE FROM %s WHERE userid = ? AND ip = '192.168.0.3'", id2);
-        assertEmpty(execute("SELECT * FROM %s WHERE userid = ? AND ip = '192.168.0.3'", id2));
-    }
-
-    @Test
-    public void testCreateIndexOnCompactTableWithClusteringColumns() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int , c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE;");
-
-        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
-                             "CREATE INDEX ON %s (a);");
-
-        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
-                             "CREATE INDEX ON %s (b);");
-
-        assertInvalidMessage("Secondary indexes are not supported on compact value column of COMPACT STORAGE tables",
-                             "CREATE INDEX ON %s (c);");
-    }
-
-    @Test
-    public void testCreateIndexOnCompactTableWithoutClusteringColumns() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int) WITH COMPACT STORAGE;");
-
-        assertInvalidMessage("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables",
-                             "CREATE INDEX ON %s (a);");
-
-        createIndex("CREATE INDEX ON %s (b);");
-
-        execute("INSERT INTO %s (a, b) values (1, 1)");
-        execute("INSERT INTO %s (a, b) values (2, 4)");
-        execute("INSERT INTO %s (a, b) values (3, 6)");
-
-        assertRows(execute("SELECT * FROM %s WHERE b = ?", 4), row(2, 4));
-    }
-
-    /**
-     * DeleteTest
-     */
-
-    @Test
-    public void testDeleteWithNoClusteringColumns() throws Throwable
-    {
-        testDeleteWithNoClusteringColumns(false);
-        testDeleteWithNoClusteringColumns(true);
-    }
-
-    private void testDeleteWithNoClusteringColumns(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int PRIMARY KEY," +
-                    "value int)" + compactOption);
-
-        execute("INSERT INTO %s (partitionKey, value) VALUES (0, 0)");
-        execute("INSERT INTO %s (partitionKey, value) VALUES (1, 1)");
-        execute("INSERT INTO %s (partitionKey, value) VALUES (2, 2)");
-        execute("INSERT INTO %s (partitionKey, value) VALUES (3, 3)");
-        flush(forceFlush);
-
-        execute("DELETE value FROM %s WHERE partitionKey = ?", 0);
-        flush(forceFlush);
-
-        assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ?", 0));
-
-        execute("DELETE FROM %s WHERE partitionKey IN (?, ?)", 0, 1);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s"),
-                   row(2, 2),
-                   row(3, 3));
-
-        // test invalid queries
-
-        // token function
-        assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
-                             "DELETE FROM %s WHERE token(partitionKey) = token(?)", 0);
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("partitionkey cannot be restricted by more than one relation if it includes an Equal",
-                             "DELETE FROM %s WHERE partitionKey = ? AND partitionKey = ?", 0, 1);
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name unknown",
-                             "DELETE unknown FROM %s WHERE partitionKey = ?", 0);
-
-        assertInvalidMessage("Undefined column name partitionkey1",
-                             "DELETE FROM %s WHERE partitionKey1 = ?", 0);
-
-        // Invalid operator in the where clause
-        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
-                             "DELETE FROM %s WHERE partitionKey > ? ", 0);
-
-        assertInvalidMessage("Cannot use DELETE with CONTAINS",
-                             "DELETE FROM %s WHERE partitionKey CONTAINS ?", 0);
-
-        // Non primary key in the where clause
-        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
-                             "DELETE FROM %s WHERE partitionKey = ? AND value = ?", 0, 1);
-    }
-
-
-    @Test
-    public void testDeleteWithOneClusteringColumns() throws Throwable
-    {
-        testDeleteWithOneClusteringColumns(false);
-        testDeleteWithOneClusteringColumns(true);
-    }
-
-    private void testDeleteWithOneClusteringColumns(boolean forceFlush) throws Throwable
-    {
-        String compactOption = " WITH COMPACT STORAGE";
-
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering))" + compactOption);
-
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 2, 2)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 3, 3)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 4, 4)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 5, 5)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (1, 0, 6)");
-        flush(forceFlush);
-
-        execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
-        flush(forceFlush);
-
-        assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1));
-
-        execute("DELETE FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
-        flush(forceFlush);
-        assertEmpty(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1));
-
-        execute("DELETE FROM %s WHERE partitionKey IN (?, ?) AND clustering = ?", 0, 1, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
-                   row(0, 2, 2),
-                   row(0, 3, 3),
-                   row(0, 4, 4),
-                   row(0, 5, 5));
-
-        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) IN ((?), (?))", 0, 4, 5);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
-                   row(0, 2, 2),
-                   row(0, 3, 3));
-
-        // test invalid queries
-
-        // missing primary key element
-        assertInvalidMessage("Some partition key parts are missing: partitionkey",
-                             "DELETE FROM %s WHERE clustering = ?", 1);
-
-        // token function
-        assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
-                             "DELETE FROM %s WHERE token(partitionKey) = token(?) AND clustering = ? ", 0, 1);
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("clustering cannot be restricted by more than one relation if it includes an Equal",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering = ? AND clustering = ?", 0, 1, 1);
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name value1",
-                             "DELETE value1 FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
-
-        assertInvalidMessage("Undefined column name partitionkey1",
-                             "DELETE FROM %s WHERE partitionKey1 = ? AND clustering = ?", 0, 1);
-
-        assertInvalidMessage("Undefined column name clustering_3",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_3 = ?", 0, 1);
-
-        // Invalid operator in the where clause
-        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
-                             "DELETE FROM %s WHERE partitionKey > ? AND clustering = ?", 0, 1);
-
-        assertInvalidMessage("Cannot use DELETE with CONTAINS",
-                             "DELETE FROM %s WHERE partitionKey CONTAINS ? AND clustering = ?", 0, 1);
-
-        // Non primary key in the where clause
-        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering = ? AND value = ?", 0, 1, 3);
-    }
-
-    @Test
-    public void testDeleteWithTwoClusteringColumns() throws Throwable
-    {
-        testDeleteWithTwoClusteringColumns(false);
-        testDeleteWithTwoClusteringColumns(true);
-    }
-
-    private void testDeleteWithTwoClusteringColumns(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering_1 int," +
-                    "clustering_2 int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + compactOption);
-
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 2, 2)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 3, 3)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 1, 4)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 2, 5)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 6)");
-        flush(forceFlush);
-
-        execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
-        flush(forceFlush);
-
-        assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
-                            0, 1, 1));
-
-        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) = (?, ?)", 0, 1, 1);
-        flush(forceFlush);
-        assertEmpty(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
-                            0, 1, 1));
-
-        execute("DELETE FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 0, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
-                   row(0, 0, 1, 1),
-                   row(0, 0, 2, 2),
-                   row(0, 0, 3, 3),
-                   row(0, 1, 2, 5));
-
-        Object[][] rows = new Object[][]{ row(0, 0, 1, 1), row(0, 1, 2, 5) };
-
-        execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)", 0, 0, 2, 3);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1), rows);
-
-        rows = new Object[][]{ row(0, 0, 1, 1) };
-
-        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))", 0, 0, 2, 1, 2);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1), rows);
-
-        execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?)) AND clustering_2 = ?", 0, 0, 2, 3);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
-                   row(0, 0, 1, 1));
-
-        // test invalid queries
-
-        // missing primary key element
-        assertInvalidMessage("Some partition key parts are missing: partitionkey",
-                             "DELETE FROM %s WHERE clustering_1 = ? AND clustering_2 = ?", 1, 1);
-
-        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_2 = ?", 0, 1);
-
-        // token function
-        assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
-                             "DELETE FROM %s WHERE token(partitionKey) = token(?) AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND clustering_1 = ?", 0, 1, 1, 1);
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name value1",
-                             "DELETE value1 FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
-
-        assertInvalidMessage("Undefined column name partitionkey1",
-                             "DELETE FROM %s WHERE partitionKey1 = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
-
-        assertInvalidMessage("Undefined column name clustering_3",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_3 = ?", 0, 1, 1);
-
-        // Invalid operator in the where clause
-        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
-                             "DELETE FROM %s WHERE partitionKey > ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
-
-        assertInvalidMessage("Cannot use DELETE with CONTAINS",
-                             "DELETE FROM %s WHERE partitionKey CONTAINS ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
-
-        // Non primary key in the where clause
-        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
-                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND value = ?", 0, 1, 1, 3);
-    }
-
-    /**
-     * InsertTest
-     */
-
-    @Test
-    public void testInsertWithCompactFormat() throws Throwable
-    {
-        testInsertWithCompactFormat(false);
-        testInsertWithCompactFormat(true);
-    }
-
-    private void testInsertWithCompactFormat(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
-        flush(forceFlush);
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, 0, 0),
-                   row(0, 1, 1));
-
-        // Invalid Null values for the clustering key or the regular column
-        assertInvalidMessage("Some clustering keys are missing: clustering",
-                             "INSERT INTO %s (partitionKey, value) VALUES (0, 0)");
-        assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table",
-                             "INSERT INTO %s (partitionKey, clustering) VALUES (0, 0)");
-
-        // Missing primary key columns
-        assertInvalidMessage("Some partition key parts are missing: partitionkey",
-                             "INSERT INTO %s (clustering, value) VALUES (0, 1)");
-
-        // multiple time the same value
-        assertInvalidMessage("The column names contains duplicates",
-                             "INSERT INTO %s (partitionKey, clustering, value, value) VALUES (0, 0, 2, 2)");
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("The column names contains duplicates",
-                             "INSERT INTO %s (partitionKey, clustering, clustering, value) VALUES (0, 0, 0, 2)");
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name clusteringx",
-                             "INSERT INTO %s (partitionKey, clusteringx, value) VALUES (0, 0, 2)");
-
-        assertInvalidMessage("Undefined column name valuex",
-                             "INSERT INTO %s (partitionKey, clustering, valuex) VALUES (0, 0, 2)");
-    }
-
-    @Test
-    public void testInsertWithCompactStorageAndTwoClusteringColumns() throws Throwable
-    {
-        testInsertWithCompactStorageAndTwoClusteringColumns(false);
-        testInsertWithCompactStorageAndTwoClusteringColumns(true);
-    }
-
-    private void testInsertWithCompactStorageAndTwoClusteringColumns(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering_1 int," +
-                    "clustering_2 int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
-        flush(forceFlush);
-
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, 0, null, 0),
-                   row(0, 0, 0, 0),
-                   row(0, 0, 1, 1));
-
-        // Invalid Null values for the clustering key or the regular column
-        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
-                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 0)");
-        assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table",
-                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2) VALUES (0, 0, 0)");
-
-        // Missing primary key columns
-        assertInvalidMessage("Some partition key parts are missing: partitionkey",
-                             "INSERT INTO %s (clustering_1, clustering_2, value) VALUES (0, 0, 1)");
-        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
-                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 2)");
-
-        // multiple time the same value
-        assertInvalidMessage("The column names contains duplicates",
-                             "INSERT INTO %s (partitionKey, clustering_1, value, clustering_2, value) VALUES (0, 0, 2, 0, 2)");
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("The column names contains duplicates",
-                             "INSERT INTO %s (partitionKey, clustering_1, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 2)");
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name clustering_1x",
-                             "INSERT INTO %s (partitionKey, clustering_1x, clustering_2, value) VALUES (0, 0, 0, 2)");
-
-        assertInvalidMessage("Undefined column name valuex",
-                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2, valuex) VALUES (0, 0, 0, 2)");
-    }
-
-    /**
-     * InsertUpdateIfConditionTest
-     */
-    /**
-     * Test for CAS with compact storage table, and #6813 in particular,
-     * migrated from cql_tests.py:TestCQL.cas_and_compact_test()
-     */
-    @Test
-    public void testCompactStorage() throws Throwable
-    {
-        createTable("CREATE TABLE %s (partition text, key text, owner text, PRIMARY KEY (partition, key) ) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (partition, key, owner) VALUES ('a', 'b', null)");
-        assertRows(execute("UPDATE %s SET owner='z' WHERE partition='a' AND key='b' IF owner=null"), row(true));
-
-        assertRows(execute("UPDATE %s SET owner='b' WHERE partition='a' AND key='b' IF owner='a'"), row(false, "z"));
-        assertRows(execute("UPDATE %s SET owner='b' WHERE partition='a' AND key='b' IF owner='z'"), row(true));
-
-        assertRows(execute("INSERT INTO %s (partition, key, owner) VALUES ('a', 'c', 'x') IF NOT EXISTS"), row(true));
-    }
-
-    /**
-     * SelectGroupByTest
-     */
-
-    @Test
-    public void testGroupByWithoutPaging() throws Throwable
-    {
-
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))"
-                    + compactOption);
-
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)");
-
-        // Makes sure that we have some tombstones
-        execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12");
-        execute("DELETE FROM %s WHERE a = 3");
-
-        // Range queries
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a"),
-                   row(1, 2, 6, 4L, 24),
-                   row(2, 2, 6, 2L, 12),
-                   row(4, 8, 24, 1L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b"),
-                   row(1, 2, 6, 2L, 12),
-                   row(1, 4, 12, 2L, 24),
-                   row(2, 2, 6, 1L, 6),
-                   row(2, 4, 12, 1L, 12),
-                   row(4, 8, 24, 1L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING"),
-                   row(1, 2, 6, 2L, 12),
-                   row(2, 2, 6, 1L, 6));
-
-        assertEmpty(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b IN () GROUP BY a, b ALLOW FILTERING"));
-
-        // Range queries without aggregates
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 4, 2, 6),
-                   row(2, 2, 3, 3),
-                   row(2, 4, 3, 6),
-                   row(4, 8, 2, 12));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b"),
-                   row(1, 2, 1, 3),
-                   row(1, 4, 2, 6),
-                   row(2, 2, 3, 3),
-                   row(2, 4, 3, 6),
-                   row(4, 8, 2, 12));
-
-        // Range queries with wildcard
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(1, 4, 2, 6, 12),
-                   row(2, 2, 3, 3, 6),
-                   row(2, 4, 3, 6, 12),
-                   row(4, 8, 2, 12, 24));
-
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 4, 2, 6, 12),
-                   row(2, 2, 3, 3, 6),
-                   row(2, 4, 3, 6, 12),
-                   row(4, 8, 2, 12, 24));
-
-        // Range query with LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b LIMIT 2"),
-                   row(1, 2, 6, 2L, 12),
-                   row(1, 4, 12, 2L, 24));
-
-        // Range queries with PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1"),
-                   row(1, 2, 6, 2L, 12),
-                   row(2, 2, 6, 1L, 6),
-                   row(4, 8, 24, 1L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"),
-                   row(1, 2, 6, 4L, 24),
-                   row(2, 2, 6, 2L, 12),
-                   row(4, 8, 24, 1L, 24));
-
-        // Range query with PER PARTITION LIMIT and LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"),
-                   row(1, 2, 6, 2L, 12),
-                   row(2, 2, 6, 1L, 6));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"),
-                   row(1, 2, 6, 4L, 24),
-                   row(2, 2, 6, 2L, 12),
-                   row(4, 8, 24, 1L, 24));
-
-        // Range queries without aggregates and with LIMIT
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c LIMIT 3"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 4, 2, 6));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b LIMIT 3"),
-                   row(1, 2, 1, 3),
-                   row(1, 4, 2, 6),
-                   row(2, 2, 3, 3));
-
-        // Range queries with wildcard and with LIMIT
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c LIMIT 3"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(1, 4, 2, 6, 12));
-
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b LIMIT 3"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 4, 2, 6, 12),
-                   row(2, 2, 3, 3, 6));
-
-        // Range queries without aggregates and with PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(2, 2, 3, 3),
-                   row(2, 4, 3, 6),
-                   row(4, 8, 2, 12));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b PER PARTITION LIMIT 1"),
-                   row(1, 2, 1, 3),
-                   row(2, 2, 3, 3),
-                   row(4, 8, 2, 12));
-
-        // Range queries with wildcard and with PER PARTITION LIMIT
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(2, 2, 3, 3, 6),
-                   row(2, 4, 3, 6, 12),
-                   row(4, 8, 2, 12, 24));
-
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1"),
-                   row(1, 2, 1, 3, 6),
-                   row(2, 2, 3, 3, 6),
-                   row(4, 8, 2, 12, 24));
-
-        // Range queries without aggregates, with PER PARTITION LIMIT and LIMIT
-        assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(2, 2, 3, 3));
-
-        // Range queries with wildcard, with PER PARTITION LIMIT and LIMIT
-        assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(2, 2, 3, 3, 6));
-
-        // Range query with DISTINCT
-        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a"),
-                   row(1, 1L),
-                   row(2, 1L),
-                   row(4, 1L));
-
-        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
-                             "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b");
-
-        // Range query with DISTINCT and LIMIT
-        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a LIMIT 2"),
-                   row(1, 1L),
-                   row(2, 1L));
-
-        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
-                             "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b LIMIT 2");
-
-        // Range query with ORDER BY
-        assertInvalidMessage("ORDER BY is only supported when the partition key is restricted by an EQ or an IN",
-                             "SELECT a, b, c, count(b), max(e) FROM %s GROUP BY a, b ORDER BY b DESC, c DESC");
-
-        // Single partition queries
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(1, 4, 12, 2L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY b, c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(1, 4, 12, 2L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, b, c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12));
-
-        // Single partition queries without aggregates
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b"),
-                   row(1, 2, 1, 3),
-                   row(1, 4, 2, 6));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 4, 2, 6));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY b, c"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 4, 2, 6));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 and token(a) = token(1) GROUP BY b, c"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 4, 2, 6));
-
-        // Single partition queries with wildcard
-        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(1, 4, 2, 6, 12));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 4, 2, 6, 12));
-
-        // Single partition queries with DISTINCT
-        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a"),
-                   row(1, 1L));
-
-        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
-                             "SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a, b");
-
-        // Single partition queries with LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 10"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(1, 4, 12, 2L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12));
-
-        assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 1"),
-                   row(1L, 6));
-
-        // Single partition queries with PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 10"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(1, 4, 12, 2L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12));
-
-        assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 1"),
-                   row(1L, 6));
-
-        // Single partition queries without aggregates and with LIMIT
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2"),
-                   row(1, 2, 1, 3),
-                   row(1, 4, 2, 6));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"),
-                   row(1, 2, 1, 3));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6));
-
-        // Single partition queries with wildcard and with LIMIT
-        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"),
-                   row(1, 2, 1, 3, 6));
-
-        // Single partition queries without aggregates and with PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 2"),
-                   row(1, 2, 1, 3),
-                   row(1, 4, 2, 6));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"),
-                   row(1, 2, 1, 3));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6));
-
-        // Single partition queries with wildcard and with PER PARTITION LIMIT
-        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"),
-                   row(1, 2, 1, 3, 6));
-
-        // Single partition queries with ORDER BY
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC"),
-                   row(1, 4, 24, 2L, 24),
-                   row(1, 2, 12, 1L, 12),
-                   row(1, 2, 6, 1L, 6));
-
-        // Single partition queries with ORDER BY and PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC PER PARTITION LIMIT 1"),
-                   row(1, 4, 24, 2L, 24));
-
-        // Single partition queries with ORDER BY and LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 2"),
-                   row(1, 4, 24, 2L, 24),
-                   row(1, 2, 12, 1L, 12));
-
-        // Multi-partitions queries
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(1, 4, 12, 2L, 24),
-                   row(2, 2, 6, 1L, 6),
-                   row(2, 4, 12, 1L, 12),
-                   row(4, 8, 24, 1L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) AND b = 2 GROUP BY a, b, c"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(2, 2, 6, 1L, 6));
-
-        // Multi-partitions queries without aggregates
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"),
-                   row(1, 2, 1, 3),
-                   row(1, 4, 2, 6),
-                   row(2, 2, 3, 3),
-                   row(2, 4, 3, 6),
-                   row(4, 8, 2, 12));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"),
-                   row(1, 2, 1, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 4, 2, 6),
-                   row(2, 2, 3, 3),
-                   row(2, 4, 3, 6),
-                   row(4, 8, 2, 12));
-
-        // Multi-partitions with wildcard
-        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(1, 4, 2, 6, 12),
-                   row(2, 2, 3, 3, 6),
-                   row(2, 4, 3, 6, 12),
-                   row(4, 8, 2, 12, 24));
-
-        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 4, 2, 6, 12),
-                   row(2, 2, 3, 3, 6),
-                   row(2, 4, 3, 6, 12),
-                   row(4, 8, 2, 12, 24));
-
-        // Multi-partitions query with DISTINCT
-        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a"),
-                   row(1, 1L),
-                   row(2, 1L),
-                   row(4, 1L));
-
-        assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries",
-                             "SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b");
-
-        // Multi-partitions query with DISTINCT and LIMIT
-        assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a LIMIT 2"),
-                   row(1, 1L),
-                   row(2, 1L));
-
-        // Multi-partitions queries with PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 1"),
-                   row(1, 2, 6, 1L, 6),
-                   row(2, 2, 6, 1L, 6),
-                   row(4, 8, 24, 1L, 24));
-
-        assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 6, 1L, 6),
-                   row(1, 2, 12, 1L, 12),
-                   row(2, 2, 6, 1L, 6),
-                   row(2, 4, 12, 1L, 12),
-                   row(4, 8, 24, 1L, 24));
-
-        // Multi-partitions with wildcard and PER PARTITION LIMIT
-        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"),
-                   row(1, 2, 1, 3, 6),
-                   row(1, 2, 2, 6, 12),
-                   row(2, 2, 3, 3, 6),
-                   row(2, 4, 3, 6, 12),
-                   row(4, 8, 2, 12, 24));
-
-        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 1"),
-                   row(1, 2, 1, 3, 6),
-                   row(2, 2, 3, 3, 6),
-                   row(4, 8, 2, 12, 24));
-
-        // Multi-partitions queries with ORDER BY
-        assertRows(execute("SELECT a, b, c, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC"),
-                   row(4, 8, 2, 1L, 24),
-                   row(2, 4, 3, 1L, 12),
-                   row(1, 4, 2, 2L, 24),
-                   row(2, 2, 3, 1L, 6),
-                   row(1, 2, 2, 2L, 12));
-
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC"),
-                   row(4, 8, 2, 12),
-                   row(2, 4, 3, 6),
-                   row(1, 4, 2, 12),
-                   row(2, 2, 3, 3),
-                   row(1, 2, 2, 6),
-                   row(1, 2, 1, 3));
-
-        // Multi-partitions queries with ORDER BY and LIMIT
-        assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 3"),
-                   row(4, 8, 2, 12),
-                   row(2, 4, 3, 6),
-                   row(1, 4, 2, 12));
-
-        // Multi-partitions with wildcard, ORDER BY and LIMIT
-        assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 3"),
-                   row(4, 8, 2, 12, 24),
-                   row(2, 4, 3, 6, 12),
-                   row(1, 4, 2, 12, 24));
-
-        // Invalid queries
-        assertInvalidMessage("Group by is currently only supported on the columns of the PRIMARY KEY, got e",
-                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, e");
-
-        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
-                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY c");
-
-        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
-                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, c, b");
-
-        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
-                             "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, a");
-
-        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
-                             "SELECT a, b, c, d FROM %s WHERE token(a) = token(1) GROUP BY b, c");
-
-        assertInvalidMessage("Undefined column name clustering1",
-                             "SELECT a, b as clustering1, max(c) FROM %s WHERE a = 1 GROUP BY a, clustering1");
-
-        assertInvalidMessage("Undefined column name z",
-                             "SELECT a, b, max(c) FROM %s WHERE a = 1 GROUP BY a, b, z");
-
-        // Test with composite partition key
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key ((a, b), c, d))" + compactOption);
-
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 1, 3, 6)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 2, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 3, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
-
-        assertInvalidMessage("Group by is not supported on only a part of the partition key",
-                             "SELECT a, b, max(d) FROM %s GROUP BY a");
-
-        assertRows(execute("SELECT a, b, max(d) FROM %s GROUP BY a, b"),
-                   row(1, 2, 12),
-                   row(1, 1, 12));
-
-        assertRows(execute("SELECT a, b, max(d) FROM %s WHERE a = 1 AND b = 1 GROUP BY b"),
-                   row(1, 1, 12));
-
-        // Test with table without clustering key
-        createTable("CREATE TABLE %s (a int primary key, b int, c int)" + compactOption);
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 6)");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 6, 12)");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 12, 24)");
-
-        assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
-                             "SELECT a, max(c) FROM %s WHERE a = 1 GROUP BY a, a");
-    }
-
-    @Test
-    public void testGroupByWithoutPagingWithDeletions() throws Throwable
-    {
-
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))"
-                    + compactOption);
-
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 9, 18)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 3, 6)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 9, 18)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 12, 24)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 3, 6)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 6, 12)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 9, 18)");
-        execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 3, 12, 24)");
-
-        execute("DELETE FROM %s WHERE a = 1 AND b = 2 AND c = 1 AND d = 12");
-        execute("DELETE FROM %s WHERE a = 1 AND b = 2 AND c = 2 AND d = 9");
-
-        assertRows(execute("SELECT a, b, c, count(b), max(d) FROM %s GROUP BY a, b, c"),
-                   row(1, 2, 1, 3L, 9),
-                   row(1, 2, 2, 3L, 12),
-                   row(1, 2, 3, 4L, 12));
-    }
-
-    @Test
-    public void testGroupByWithRangeNamesQueryWithoutPaging() throws Throwable
-    {
-
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, primary key (a, b, c))"
-                    + compactOption);
-
-        for (int i = 1; i < 5; i++)
-            for (int j = 1; j < 5; j++)
-                for (int k = 1; k < 5; k++)
-                    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", i, j, k, i + j);
-
-        // Makes sure that we have some tombstones
-        execute("DELETE FROM %s WHERE a = 3");
-
-        // Range queries
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(1, 2, 3, 2L, 3),
-                   row(2, 1, 3, 2L, 3),
-                   row(2, 2, 4, 2L, 4),
-                   row(4, 1, 5, 2L, 5),
-                   row(4, 2, 6, 2L, 6));
-
-        // Range queries with LIMIT
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a LIMIT 5 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b LIMIT 3 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b LIMIT 3 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(1, 2, 3, 2L, 3),
-                   row(2, 1, 3, 2L, 3));
-
-        // Range queries with PER PARTITION LIMIT
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 2 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 1 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        // Range queries with PER PARTITION LIMIT and LIMIT
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b = 1 and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 5 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3),
-                   row(4, 1, 5, 2L, 5));
-
-        assertRows(execute("SELECT a, b, d, count(b), max(d) FROM %s WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2 ALLOW FILTERING"),
-                   row(1, 1, 2, 2L, 2),
-                   row(2, 1, 3, 2L, 3));
-    }
-
-    /**
-     * SelectSingleColumn
-     */
-    @Test
-    public void testClusteringColumnRelationsWithCompactStorage() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a text, b int, c int, d int, primary key(a, b, c)) WITH COMPACT STORAGE;");
-        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "first", 1, 5, 1);
-        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "first", 2, 6, 2);
-        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "first", 3, 7, 3);
-        execute("insert into %s (a, b, c, d) values (?, ?, ?, ?)", "second", 4, 8, 4);
-
-        assertRows(execute("select * from %s where a in (?, ?)", "first", "second"),
-                   row("first", 1, 5, 1),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3),
-                   row("second", 4, 8, 4));
-
-        assertRows(execute("select * from %s where a = ? and b = ? and c in (?, ?)", "first", 2, 6, 7),
-                   row("first", 2, 6, 2));
-
-        assertRows(execute("select * from %s where a = ? and b in (?, ?) and c in (?, ?)", "first", 2, 3, 6, 7),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select * from %s where a = ? and b in (?, ?) and c in (?, ?)", "first", 3, 2, 7, 6),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select * from %s where a = ? and c in (?, ?) and b in (?, ?)", "first", 7, 6, 3, 2),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select c, d from %s where a = ? and c in (?, ?) and b in (?, ?)", "first", 7, 6, 3, 2),
-                   row(6, 2),
-                   row(7, 3));
-
-        assertRows(execute("select c, d from %s where a = ? and c in (?, ?) and b in (?, ?, ?)", "first", 7, 6, 3, 2, 3),
-                   row(6, 2),
-                   row(7, 3));
-
-        assertRows(execute("select * from %s where a = ? and b in (?, ?) and c = ?", "first", 3, 2, 7),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select * from %s where a = ? and b in ? and c in ?",
-                           "first", Arrays.asList(3, 2), Arrays.asList(7, 6)),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3));
-
-        assertInvalidMessage("Invalid null value for column b",
-                             "select * from %s where a = ? and b in ? and c in ?", "first", null, Arrays.asList(7, 6));
-
-        assertRows(execute("select * from %s where a = ? and c >= ? and b in (?, ?)", "first", 6, 3, 2),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select * from %s where a = ? and c > ? and b in (?, ?)", "first", 6, 3, 2),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select * from %s where a = ? and c <= ? and b in (?, ?)", "first", 6, 3, 2),
-                   row("first", 2, 6, 2));
-
-        assertRows(execute("select * from %s where a = ? and c < ? and b in (?, ?)", "first", 7, 3, 2),
-                   row("first", 2, 6, 2));
-
-        assertRows(execute("select * from %s where a = ? and c >= ? and c <= ? and b in (?, ?)", "first", 6, 7, 3, 2),
-                   row("first", 2, 6, 2),
-                   row("first", 3, 7, 3));
-
-        assertRows(execute("select * from %s where a = ? and c > ? and c <= ? and b in (?, ?)", "first", 6, 7, 3, 2),
-                   row("first", 3, 7, 3));
-
-        assertEmpty(execute("select * from %s where a = ? and c > ? and c < ? and b in (?, ?)", "first", 6, 7, 3, 2));
-
-        assertInvalidMessage("Column \"c\" cannot be restricted by both an equality and an inequality relation",
-                             "select * from %s where a = ? and c > ? and c = ? and b in (?, ?)", "first", 6, 7, 3, 2);
-
-        assertInvalidMessage("c cannot be restricted by more than one relation if it includes an Equal",
-                             "select * from %s where a = ? and c = ? and c > ?  and b in (?, ?)", "first", 6, 7, 3, 2);
-
-        assertRows(execute("select * from %s where a = ? and c in (?, ?) and b in (?, ?) order by b DESC",
-                           "first", 7, 6, 3, 2),
-                   row("first", 3, 7, 3),
-                   row("first", 2, 6, 2));
-
-        assertInvalidMessage("More than one restriction was found for the start bound on b",
-                             "select * from %s where a = ? and b > ? and b > ?", "first", 6, 3, 2);
-
-        assertInvalidMessage("More than one restriction was found for the end bound on b",
-                             "select * from %s where a = ? and b < ? and b <= ?", "first", 6, 3, 2);
-    }
-
-    /**
-     * SelectTest
-     */
-    /**
-     * Check query with KEY IN clause for wide row tables
-     * migrated from cql_tests.py:TestCQL.in_clause_wide_rows_test()
-     */
-    @Test
-    public void testSelectKeyInForWideRows() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY (k, c)) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s (k, c, v) VALUES (0, ?, ?)", i, i);
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c IN (5, 2, 8)"),
-                   row(2), row(5), row(8));
-
-        createTable("CREATE TABLE %s (k int, c1 int, c2 int, v int, PRIMARY KEY (k, c1, c2)) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s (k, c1, c2, v) VALUES (0, 0, ?, ?)", i, i);
-
-        assertEmpty(execute("SELECT v FROM %s WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3"));
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c1 = 0 AND c2 IN (5, 2, 8)"),
-                   row(2), row(5), row(8));
-    }
-
-    /**
-     * Check SELECT respects inclusive and exclusive bounds
-     * migrated from cql_tests.py:TestCQL.exclusive_slice_test()
-     */
-    @Test
-    public void testSelectBounds() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY (k, c)) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s (k, c, v) VALUES (0, ?, ?)", i, i);
-
-        assertRowCount(execute("SELECT v FROM %s WHERE k = 0"), 10);
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c >= 2 AND c <= 6"),
-                   row(2), row(3), row(4), row(5), row(6));
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c > 2 AND c <= 6"),
-                   row(3), row(4), row(5), row(6));
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c >= 2 AND c < 6"),
-                   row(2), row(3), row(4), row(5));
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c > 2 AND c < 6"),
-                   row(3), row(4), row(5));
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c > 2 AND c <= 6 LIMIT 2"),
-                   row(3), row(4));
-
-        assertRows(execute("SELECT v FROM %s WHERE k = 0 AND c >= 2 AND c < 6 ORDER BY c DESC LIMIT 2"),
-                   row(5), row(4));
-    }
-
-    /**
-     * Test for #4716 bug and more generally for good behavior of ordering,
-     * migrated from cql_tests.py:TestCQL.reversed_compact_test()
-     */
-    @Test
-    public void testReverseCompact() throws Throwable
-    {
-        createTable("CREATE TABLE %s ( k text, c int, v int, PRIMARY KEY (k, c) ) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC)");
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s (k, c, v) VALUES ('foo', ?, ?)", i, i);
-
-        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo'"),
-                   row(5), row(4), row(3));
-
-        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo'"),
-                   row(6), row(5), row(4), row(3), row(2));
-
-        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"),
-                   row(3), row(4), row(5));
-
-        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"),
-                   row(2), row(3), row(4), row(5), row(6));
-
-        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"),
-                   row(5), row(4), row(3));
-
-        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"),
-                   row(6), row(5), row(4), row(3), row(2));
-
-        createTable("CREATE TABLE %s ( k text, c int, v int, PRIMARY KEY (k, c) ) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s(k, c, v) VALUES ('foo', ?, ?)", i, i);
-
-        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo'"),
-                   row(3), row(4), row(5));
-
-        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo'"),
-                   row(2), row(3), row(4), row(5), row(6));
-
-        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"),
-                   row(3), row(4), row(5));
-
-        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"),
-                   row(2), row(3), row(4), row(5), row(6));
-
-        assertRows(execute("SELECT c FROM %s WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"),
-                   row(5), row(4), row(3));
-
-        assertRows(execute("SELECT c FROM %s WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"),
-                   row(6), row(5), row(4), row(3), row(2));
-    }
-
-    /**
-     * Test for the bug from #4760 and #4759,
-     * migrated from cql_tests.py:TestCQL.reversed_compact_multikey_test()
-     */
-    @Test
-    public void testReversedCompactMultikey() throws Throwable
-    {
-        createTable("CREATE TABLE %s (key text, c1 int, c2 int, value text, PRIMARY KEY(key, c1, c2) ) WITH COMPACT STORAGE AND CLUSTERING ORDER BY(c1 DESC, c2 DESC)");
-
-        for (int i = 0; i < 3; i++)
-            for (int j = 0; j < 3; j++)
-                execute("INSERT INTO %s (key, c1, c2, value) VALUES ('foo', ?, ?, 'bar')", i, j);
-
-        // Equalities
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 = 1"),
-                   row(1, 2), row(1, 1), row(1, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 = 1 ORDER BY c1 ASC, c2 ASC"),
-                   row(1, 0), row(1, 1), row(1, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 = 1 ORDER BY c1 DESC, c2 DESC"),
-                   row(1, 2), row(1, 1), row(1, 0));
-
-        // GT
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 > 1"),
-                   row(2, 2), row(2, 1), row(2, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 > 1 ORDER BY c1 ASC, c2 ASC"),
-                   row(2, 0), row(2, 1), row(2, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 > 1 ORDER BY c1 DESC, c2 DESC"),
-                   row(2, 2), row(2, 1), row(2, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1"),
-                   row(2, 2), row(2, 1), row(2, 0), row(1, 2), row(1, 1), row(1, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC, c2 ASC"),
-                   row(1, 0), row(1, 1), row(1, 2), row(2, 0), row(2, 1), row(2, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC"),
-                   row(1, 0), row(1, 1), row(1, 2), row(2, 0), row(2, 1), row(2, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 >= 1 ORDER BY c1 DESC, c2 DESC"),
-                   row(2, 2), row(2, 1), row(2, 0), row(1, 2), row(1, 1), row(1, 0));
-
-        // LT
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 < 1"),
-                   row(0, 2), row(0, 1), row(0, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 < 1 ORDER BY c1 ASC, c2 ASC"),
-                   row(0, 0), row(0, 1), row(0, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 < 1 ORDER BY c1 DESC, c2 DESC"),
-                   row(0, 2), row(0, 1), row(0, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1"),
-                   row(1, 2), row(1, 1), row(1, 0), row(0, 2), row(0, 1), row(0, 0));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC, c2 ASC"),
-                   row(0, 0), row(0, 1), row(0, 2), row(1, 0), row(1, 1), row(1, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC"),
-                   row(0, 0), row(0, 1), row(0, 2), row(1, 0), row(1, 1), row(1, 2));
-
-        assertRows(execute("SELECT c1, c2 FROM %s WHERE key='foo' AND c1 <= 1 ORDER BY c1 DESC, c2 DESC"),
-                   row(1, 2), row(1, 1), row(1, 0), row(0, 2), row(0, 1), row(0, 0));
-    }
-
-    /**
-     * Migrated from cql_tests.py:TestCQL.multi_in_compact_non_composite_test()
-     */
-    @Test
-    public void testMultiSelectsNonCompositeCompactStorage() throws Throwable
-    {
-        createTable("CREATE TABLE %s (key int, c int, v int, PRIMARY KEY (key, c)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (key, c, v) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s (key, c, v) VALUES (0, 1, 1)");
-        execute("INSERT INTO %s (key, c, v) VALUES (0, 2, 2)");
-
-        assertRows(execute("SELECT * FROM %s WHERE key=0 AND c IN (0, 2)"),
-                   row(0, 0, 0), row(0, 2, 2));
-    }
-
-    @Test
-    public void testSelectDistinct() throws Throwable
-    {
-        //Test a 'compact storage' table.
-        createTable("CREATE TABLE %s (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 3; i++)
-            execute("INSERT INTO %s (pk0, pk1, val) VALUES (?, ?, ?)", i, i, i);
-
-        assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 1"),
-                   row(0, 0));
-
-        assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 3"),
-                   row(0, 0),
-                   row(2, 2),
-                   row(1, 1));
-
-        // Test a 'wide row' thrift table.
-        createTable("CREATE TABLE %s (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 3; i++)
-        {
-            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name0', 0)", i);
-            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name1', 1)", i);
-        }
-
-        assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 1"),
-                   row(1));
-
-        assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 3"),
-                   row(1),
-                   row(0),
-                   row(2));
-    }
-
-    @Test
-    public void testFilteringOnCompactTablesWithoutIndices() throws Throwable
-    {
-        //----------------------------------------------
-        // Test COMPACT table with clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 6)");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, 7)");
-
-        // Adds tomstones
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 1, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 2, 7)");
-        execute("DELETE FROM %s WHERE a = 1 AND b = 1");
-        execute("DELETE FROM %s WHERE a = 2 AND b = 2");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4 ALLOW FILTERING"),
-                       row(1, 4, 4));
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7)");
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING");
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > 4 ALLOW FILTERING"),
-                       row(1, 3, 6),
-                       row(2, 3, 7));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b < 3 AND c <= 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= 4 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= 3 AND c <= 6");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= 3 AND c <= 6 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(1, 3, 6),
-                       row(1, 4, 4));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, 6)");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, 7)");
-
-        // Adds tomstones
-        execute("INSERT INTO %s (a, b, c) VALUES (0, 1, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (5, 2, 7)");
-        execute("DELETE FROM %s WHERE a = 0");
-        execute("DELETE FROM %s WHERE a = 5");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = 4 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7)");
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING");
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > 4 ALLOW FILTERING"),
-                       row(2, 1, 6),
-                       row(4, 1, 7));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b < 3 AND c <= 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= 4 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(3, 2, 4));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= 3 AND c <= 6");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= 3 AND c <= 6 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(2, 1, 6),
-                       row(3, 2, 4));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-
-        // // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-    }
-
-    @Test
-    public void testFilteringOnCompactTablesWithoutIndicesAndWithLists() throws Throwable
-    {
-        //----------------------------------------------
-        // Test COMPACT table with clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, [6, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, [4, 1])");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, [7, 1])");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = [4, 1]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = [4, 1] ALLOW FILTERING"),
-                       row(1, 4, list(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > [4, 2] ALLOW FILTERING"),
-                       row(1, 3, list(6, 2)),
-                       row(2, 3, list(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b <= 3 AND c < [6, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE b <= 3 AND c < [6, 2] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= [4, 2] AND c <= [6, 4]");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= [4, 2] AND c <= [6, 4] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(1, 3, list(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(1, 3, list(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(1, 3, list(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<list<int>>) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, [6, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, [4, 1])");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, [7, 1])");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = [4, 2] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > [4, 2] ALLOW FILTERING"),
-                       row(2, 1, list(6, 2)),
-                       row(4, 1, list(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b < 3 AND c <= [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= [4, 2] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(3, 2, list(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= [4, 3] AND c <= [7]");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= [4, 3] AND c <= [7] ALLOW FILTERING"),
-                       row(2, 1, list(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(2, 1, list(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(2, 1, list(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
-                             unset());
-    }
-
-    @Test
-    public void testFilteringOnCompactTablesWithoutIndicesAndWithSets() throws Throwable
-    {
-        //----------------------------------------------
-        // Test COMPACT table with clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c frozen<set<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4, 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7, 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4, 1}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4, 1} ALLOW FILTERING"),
-                       row(1, 4, set(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > {4, 2} ALLOW FILTERING"),
-                       row(1, 3, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b <= 3 AND c < {6, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE b <= 3 AND c < {6, 2} ALLOW FILTERING"),
-                       row(1, 2, set(2, 4)),
-                       row(2, 3, set(1, 7)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= {4, 2} AND c <= {6, 4}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= {4, 2} AND c <= {6, 4} ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(1, 3, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(1, 3, set(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(1, 3, set(6, 2)));
-        });
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<set<int>>) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4, 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7, 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4, 2} ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > {4, 2} ALLOW FILTERING"),
-                       row(2, 1, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b < 3 AND c <= {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= {4, 2} ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(4, 1, set(1, 7)),
-                       row(3, 2, set(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= {4, 3} AND c <= {7}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= {5, 2} AND c <= {7} ALLOW FILTERING"),
-                       row(2, 1, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(2, 1, set(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(2, 1, set(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
-                             unset());
-    }
-
-    @Test
-    public void testAllowFilteringOnPartitionKeyWithDistinct() throws Throwable
-    {
-        // Test a 'compact storage' table.
-        createTable("CREATE TABLE %s (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 3; i++)
-            execute("INSERT INTO %s (pk0, pk1, val) VALUES (?, ?, ?)", i, i, i);
-
-        beforeAndAfterFlush(() -> {
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT DISTINCT pk0, pk1 FROM %s WHERE pk1 = 1 LIMIT 3");
-
-            assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s WHERE pk0 < 2 AND pk1 = 1 LIMIT 1 ALLOW FILTERING"),
-                       row(1, 1));
-
-            assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s WHERE pk1 > 1 LIMIT 3 ALLOW FILTERING"),
-                       row(2, 2));
-        });
-
-        // Test a 'wide row' thrift table.
-        createTable("CREATE TABLE %s (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE");
-
-        for (int i = 0; i < 3; i++)
-        {
-            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name0', 0)", i);
-            execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name1', 1)", i);
-        }
-
-        beforeAndAfterFlush(() -> {
-            assertRows(execute("SELECT DISTINCT pk FROM %s WHERE pk > 1 LIMIT 1 ALLOW FILTERING"),
-                       row(2));
-
-            assertRows(execute("SELECT DISTINCT pk FROM %s WHERE pk > 0 LIMIT 3 ALLOW FILTERING"),
-                       row(1),
-                       row(2));
-        });
-    }
-
-    @Test
-    public void testAllowFilteringOnPartitionKeyWithCounters() throws Throwable
-    {
-        for (String compactStorageClause : new String[]{ "", " WITH COMPACT STORAGE" })
-        {
-            createTable("CREATE TABLE %s (a int, b int, c int, cnt counter, PRIMARY KEY ((a, b), c))"
-                        + compactStorageClause);
-
-            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 14L, 11, 12, 13);
-            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 21, 22, 23);
-            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 27L, 21, 22, 26);
-            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 34L, 31, 32, 33);
-            execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 41, 42, 43);
-
-            beforeAndAfterFlush(() -> {
-
-                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt = 24"),
-                           row(41, 42, 43, 24L),
-                           row(21, 22, 23, 24L));
-                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 22 AND cnt = 24"),
-                           row(41, 42, 43, 24L));
-                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND b < 25 AND cnt = 24"),
-                           row(21, 22, 23, 24L));
-                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND c < 25 AND cnt = 24"),
-                           row(21, 22, 23, 24L));
-
-                assertInvalidMessage(
-                "ORDER BY is only supported when the partition key is restricted by an EQ or an IN.",
-                "SELECT * FROM %s WHERE a = 21 AND b > 10 AND cnt > 23 ORDER BY c DESC ALLOW FILTERING");
-
-                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a = 21 AND b = 22 AND cnt > 23 ORDER BY c DESC"),
-                           row(21, 22, 26, 27L),
-                           row(21, 22, 23, 24L));
-
-                assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt > 20 AND cnt < 30"),
-                           row(41, 42, 43, 24L),
-                           row(21, 22, 23, 24L),
-                           row(21, 22, 26, 27L));
-            });
-        }
-    }
-
-    @Test
-    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndicesAndWithLists() throws Throwable
-    {
-        // ----------------------------------------------
-        // Test COMPACT table with clustering columns
-        // ----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, [6, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, [4, 1])");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, [7, 1])");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = [4, 1]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b >= 4 AND c = [4, 1] ALLOW FILTERING"),
-                       row(1, 4, list(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 0 AND c > [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > [4, 2] ALLOW FILTERING"),
-                       row(1, 3, list(6, 2)),
-                       row(2, 3, list(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND b <= 3 AND c < [6, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a <= 1 AND b <= 3 AND c < [6, 2] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a <= 1 AND c >= [4, 2] AND c <= [6, 4]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND b <= 3 AND c >= [4, 2] AND c <= [6, 4] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(1, 3, list(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(1, 3, list(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE a > 1 AND c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(1, 3, list(6, 2)));
-        });
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a > 1 AND c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a > 1 AND c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS ? ALLOW FILTERING",
-                             unset());
-
-        // ----------------------------------------------
-        // Test COMPACT table without clustering columns
-        // ----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<list<int>>) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, [4, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, [6, 2])");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, [4, 1])");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, [7, 1])");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = [4, 2] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND c > [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 3 AND c > [4, 2] ALLOW FILTERING"),
-                       row(4, 1, list(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a < 1 AND b < 3 AND c <= [4, 2]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND b < 3 AND c <= [4, 2] ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND c >= [4, 3] AND c <= [7]");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c >= [4, 3] AND c <= [7] ALLOW FILTERING"),
-                       row(2, 1, list(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 3 AND c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, list(4, 2)),
-                       row(2, 1, list(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE a >=1 AND c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(2, 1, list(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a > 1 AND c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a > 1 AND c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a > 1 AND c CONTAINS ? ALLOW FILTERING",
-                             unset());
-    }
-
-
-    @Test
-    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndicesAndWithMaps() throws Throwable
-    {
-        //----------------------------------------------
-        // Test COMPACT table with clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c frozen<map<int, int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4 : 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7 : 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = {4 : 1}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a <= 1 AND b = 4 AND c = {4 : 1} ALLOW FILTERING"),
-                       row(1, 4, map(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND c > {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 1 AND c > {4 : 2} ALLOW FILTERING"),
-                       row(2, 3, map(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND b <= 3 AND c < {6 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b <= 3 AND c < {6 : 2} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 1 AND c >= {4 : 2} AND c <= {6 : 4}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c >= {4 : 2} AND c <= {6 : 4} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(1, 3, map(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a > 10 AND c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(1, 3, map(6, 2)));
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
-                       row(1, 3, map(6, 2)));
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
-                       row(1, 3, map(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<map<int, int>>) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4 : 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7 : 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4 : 2} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c > {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > {4 : 2} ALLOW FILTERING"),
-                       row(2, 1, map(6, 2)),
-                       row(4, 1, map(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b < 3 AND c <= {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b < 3 AND c <= {4 : 2} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(3, 2, map(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c >= {4 : 3} AND c <= {7 : 1}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c >= {5 : 2} AND c <= {7 : 0} ALLOW FILTERING"),
-                       row(2, 1, map(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(2, 1, map(6, 2)));
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND c CONTAINS KEY 4 ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(3, 2, map(4, 1)));
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
-                       row(2, 1, map(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY ? ALLOW FILTERING",
-                             unset());
-    }
-
-    @Test
-    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndicesAndWithSets() throws Throwable
-    {
-        //----------------------------------------------
-        // Test COMPACT table with clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c frozen<set<int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4, 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7, 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = {4, 1}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 4 AND c = {4, 1} ALLOW FILTERING"),
-                       row(1, 4, set(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c > {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > {4, 2} ALLOW FILTERING"),
-                       row(1, 3, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b <= 3 AND c < {6, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a > 0 AND b <= 3 AND c < {6, 2} ALLOW FILTERING"),
-                       row(1, 2, set(2, 4)),
-                       row(2, 3, set(1, 7)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c >= {4, 2} AND c <= {6, 4}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 0 AND c >= {4, 2} AND c <= {6, 4} ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(1, 3, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(1, 3, set(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(1, 3, set(6, 2)));
-        });
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<set<int>>) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6, 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4, 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7, 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b = 2 AND c = {4, 2} ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c > {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND c > {4, 2} ALLOW FILTERING"),
-                       row(2, 1, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND b < 3 AND c <= {4, 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a <= 4 AND b < 3 AND c <= {4, 2} ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(4, 1, set(1, 7)),
-                       row(3, 2, set(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c >= {4, 3} AND c <= {7}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND c >= {5, 2} AND c <= {7} ALLOW FILTERING"),
-                       row(2, 1, set(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 0 AND c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, set(4, 2)),
-                       row(2, 1, set(6, 2)));
-
-            assertInvalidMessage("Cannot use CONTAINS KEY on non-map column c",
-                                 "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS KEY 2 ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c CONTAINS 2 AND c CONTAINS 6 ALLOW FILTERING"),
-                       row(2, 1, set(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE a >= 1 AND c CONTAINS ? ALLOW FILTERING",
-                             unset());
-    }
-
-    @Test
-    public void testAllowFilteringOnPartitionKeyOnCompactTablesWithoutIndices() throws Throwable
-    {
-        // ----------------------------------------------
-        // Test COMPACT table with clustering columns
-        // ----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY ((a, b), c)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 2, 4, 5)");
-        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 3, 6, 7)");
-        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 4, 4, 5)");
-        execute("INSERT INTO %s (a, b, c, d) VALUES (2, 3, 7, 8)");
-
-        // Adds tomstones
-        execute("INSERT INTO %s (a, b, c, d) VALUES (1, 1, 4, 5)");
-        execute("INSERT INTO %s (a, b, c, d) VALUES (2, 2, 7, 8)");
-        execute("DELETE FROM %s WHERE a = 1 AND b = 1 AND c = 4");
-        execute("DELETE FROM %s WHERE a = 2 AND b = 2 AND c = 7");
-
-        beforeAndAfterFlush(() -> {
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4"),
-                       row(1, 4, 4, 5));
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4 AND d = 5");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4 ALLOW FILTERING"),
-                       row(1, 4, 4, 5));
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (d) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND b = 3 AND d IN (6, 7)");
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (d) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND b = 3 AND d IN (6, 7) ALLOW FILTERING");
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 2 AND c > 4 AND c <= 6 ALLOW FILTERING"),
-                       row(1, 3, 6, 7));
-
-            assertRows(execute("SELECT * FROM %s WHERE a <= 1 AND b >= 2 AND c >= 4 AND d <= 8 ALLOW FILTERING"),
-                       row(1, 3, 6, 7),
-                       row(1, 4, 4, 5),
-                       row(1, 2, 4, 5));
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND c >= 4 AND d <= 8 ALLOW FILTERING"),
-                       row(1, 3, 6, 7),
-                       row(1, 4, 4, 5),
-                       row(1, 2, 4, 5));
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND c >= 4 AND d <= 8 ALLOW FILTERING"),
-                       row(2, 3, 7, 8));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE d = null");
-        assertInvalidMessage("Unsupported null value for column a",
-                             "SELECT * FROM %s WHERE a = null ALLOW FILTERING");
-        assertInvalidMessage("Unsupported null value for column a",
-                             "SELECT * FROM %s WHERE a > null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column a",
-                             "SELECT * FROM %s WHERE a = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column a",
-                             "SELECT * FROM %s WHERE a > ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int primary key, b int, c int) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, 6)");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, 7)");
-
-        // Adds tomstones
-        execute("INSERT INTO %s (a, b, c) VALUES (0, 1, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES (5, 2, 7)");
-        execute("DELETE FROM %s WHERE a = 0");
-        execute("DELETE FROM %s WHERE a = 5");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = 4 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE b >= 2 AND c <= 4 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(3, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE b >= 2 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(3, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 2 AND b <=1 ALLOW FILTERING"),
-                       row(2, 1, 6),
-                       row(4, 1, 7));
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND c >= 4 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (b) is not yet supported",
-                                 "SELECT * FROM %s WHERE a = 1 AND b IN (1, 2) AND c IN (6, 7)");
-
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING");
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > 4");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > 4 ALLOW FILTERING"),
-                       row(2, 1, 6),
-                       row(4, 1, 7));
-
-            assertRows(execute("SELECT * FROM %s WHERE a >= 1 AND b >= 2 AND c <= 4 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(3, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND c <= 4 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE a < 3 AND b >= 2 AND c <= 4 ALLOW FILTERING"),
-                       row(1, 2, 4));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= 3 AND c <= 6");
-
-            assertRows(execute("SELECT * FROM %s WHERE c <=6 ALLOW FILTERING"),
-                       row(1, 2, 4),
-                       row(2, 1, 6),
-                       row(3, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE token(a) >= token(2)"),
-                       row(2, 1, 6),
-                       row(4, 1, 7),
-                       row(3, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE token(a) >= token(2) ALLOW FILTERING"),
-                       row(2, 1, 6),
-                       row(4, 1, 7),
-                       row(3, 2, 4));
-
-            assertRows(execute("SELECT * FROM %s WHERE token(a) >= token(2) AND b = 1 ALLOW FILTERING"),
-                       row(2, 1, 6),
-                       row(4, 1, 7));
-        });
-    }
-
-    @Test
-    public void testFilteringOnCompactTablesWithoutIndicesAndWithMaps() throws Throwable
-    {
-        //----------------------------------------------
-        // Test COMPACT table with clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int, b int, c frozen<map<int, int>>, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 3, {6 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 4, {4 : 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 3, {7 : 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4 : 1}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 4 AND c = {4 : 1} ALLOW FILTERING"),
-                       row(1, 4, map(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > {4 : 2} ALLOW FILTERING"),
-                       row(1, 3, map(6, 2)),
-                       row(2, 3, map(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b <= 3 AND c < {6 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE b <= 3 AND c < {6 : 2} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= {4 : 2} AND c <= {6 : 4}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= {4 : 2} AND c <= {6 : 4} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(1, 3, map(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(1, 3, map(6, 2)));
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS KEY 6 ALLOW FILTERING"),
-                       row(1, 3, map(6, 2)));
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
-                       row(1, 3, map(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS KEY null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS KEY ? ALLOW FILTERING",
-                             unset());
-
-        //----------------------------------------------
-        // Test COMPACT table without clustering columns
-        //----------------------------------------------
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c frozen<map<int, int>>) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 2, {4 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, {6 : 2})");
-        execute("INSERT INTO %s (a, b, c) VALUES (3, 2, {4 : 1})");
-        execute("INSERT INTO %s (a, b, c) VALUES (4, 1, {7 : 1})");
-
-        beforeAndAfterFlush(() -> {
-
-            // Checks filtering
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE a = 1 AND b = 2 AND c = {4 : 2} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c > {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c > {4 : 2} ALLOW FILTERING"),
-                       row(2, 1, map(6, 2)),
-                       row(4, 1, map(7, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE b < 3 AND c <= {4 : 2}");
-
-            assertRows(execute("SELECT * FROM %s WHERE b < 3 AND c <= {4 : 2} ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(3, 2, map(4, 1)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c >= {4 : 3} AND c <= {7 : 1}");
-
-            assertRows(execute("SELECT * FROM %s WHERE c >= {5 : 2} AND c <= {7 : 0} ALLOW FILTERING"),
-                       row(2, 1, map(6, 2)));
-
-            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                                 "SELECT * FROM %s WHERE c CONTAINS 2");
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(2, 1, map(6, 2)));
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS KEY 4 ALLOW FILTERING"),
-                       row(1, 2, map(4, 2)),
-                       row(3, 2, map(4, 1)));
-
-            assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS KEY 6 ALLOW FILTERING"),
-                       row(2, 1, map(6, 2)));
-        });
-
-        // Checks filtering with null
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c = null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c > null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c > null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
-        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
-                             "SELECT * FROM %s WHERE c CONTAINS KEY null");
-        assertInvalidMessage("Unsupported null value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS KEY null ALLOW FILTERING");
-
-        // Checks filtering with unset
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c = ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c > ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING",
-                             unset());
-        assertInvalidMessage("Unsupported unset value for column c",
-                             "SELECT * FROM %s WHERE c CONTAINS KEY ? ALLOW FILTERING",
-                             unset());
-    }
-
-    @Test
-    public void filteringOnCompactTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 11, 12, 13, 14);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 22, 23, 24);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 25, 26, 27);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 31, 32, 33, 34);
-
-        beforeAndAfterFlush(() -> {
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13"),
-                       row(21, 22, 23, 24),
-                       row(21, 25, 26, 27),
-                       row(31, 32, 33, 34));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13 AND c < 33"),
-                       row(21, 22, 23, 24),
-                       row(21, 25, 26, 27));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13 AND b < 32"),
-                       row(21, 22, 23, 24),
-                       row(21, 25, 26, 27));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a = 21 AND c > 13 AND b < 32 ORDER BY b DESC"),
-                       row(21, 25, 26, 27),
-                       row(21, 22, 23, 24));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a IN (21, 31) AND c > 13 ORDER BY b DESC"),
-                       row(31, 32, 33, 34),
-                       row(21, 25, 26, 27),
-                       row(21, 22, 23, 24));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13 AND d < 34"),
-                       row(21, 22, 23, 24),
-                       row(21, 25, 26, 27));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c > 13"),
-                       row(21, 22, 23, 24),
-                       row(21, 25, 26, 27),
-                       row(31, 32, 33, 34));
-        });
-
-        // with frozen in clustering key
-        createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, d int, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 11, 12, list(1, 3), 14);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 22, list(2, 3), 24);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 25, list(2, 6), 27);
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 31, 32, list(3, 3), 34);
-
-        beforeAndAfterFlush(() -> {
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c CONTAINS 2"),
-                       row(21, 22, list(2, 3), 24),
-                       row(21, 25, list(2, 6), 27));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c CONTAINS 2 AND b < 25"),
-                       row(21, 22, list(2, 3), 24));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 3"),
-                       row(21, 22, list(2, 3), 24));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 12 AND c CONTAINS 2 AND d < 27"),
-                       row(21, 22, list(2, 3), 24));
-        });
-
-        // with frozen in value
-        createTable("CREATE TABLE %s (a int, b int, c int, d frozen<list<int>>, PRIMARY KEY (a, b, c)) WITH COMPACT STORAGE");
-
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 11, 12, 13, list(1, 4));
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 22, 23, list(2, 4));
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 21, 25, 25, list(2, 6));
-        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 31, 32, 34, list(3, 4));
-
-        beforeAndAfterFlush(() -> {
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE d CONTAINS 2"),
-                       row(21, 22, 23, list(2, 4)),
-                       row(21, 25, 25, list(2, 6)));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE d CONTAINS 2 AND b < 25"),
-                       row(21, 22, 23, list(2, 4)));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE d CONTAINS 2 AND d CONTAINS 4"),
-                       row(21, 22, 23, list(2, 4)));
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 12 AND c < 25 AND d CONTAINS 2"),
-                       row(21, 22, 23, list(2, 4)));
-        });
-    }
-
-    private UntypedResultSet executeFilteringOnly(String statement) throws Throwable
-    {
-        assertInvalid(statement);
-        return execute(statement + " ALLOW FILTERING");
-    }
-
-    @Test
-    public void testFilteringWithCounters() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, c int, cnt counter, PRIMARY KEY (a, b, c))" + compactOption);
-
-        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 14L, 11, 12, 13);
-        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 21, 22, 23);
-        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 27L, 21, 25, 26);
-        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 34L, 31, 32, 33);
-        execute("UPDATE %s SET cnt = cnt + ? WHERE a = ? AND b = ? AND c = ?", 24L, 41, 42, 43);
-
-        beforeAndAfterFlush(() -> {
-
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt = 24"),
-                       row(21, 22, 23, 24L),
-                       row(41, 42, 43, 24L));
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 22 AND cnt = 24"),
-                       row(41, 42, 43, 24L));
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND b < 25 AND cnt = 24"),
-                       row(21, 22, 23, 24L));
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE b > 10 AND c < 25 AND cnt = 24"),
-                       row(21, 22, 23, 24L));
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE a = 21 AND b > 10 AND cnt > 23 ORDER BY b DESC"),
-                       row(21, 25, 26, 27L),
-                       row(21, 22, 23, 24L));
-            assertRows(executeFilteringOnly("SELECT * FROM %s WHERE cnt > 20 AND cnt < 30"),
-                       row(21, 22, 23, 24L),
-                       row(21, 25, 26, 27L),
-                       row(41, 42, 43, 24L));
-        });
-    }
-
-    /**
-     * Check select with and without compact storage, with different column
-     * order. See CASSANDRA-10988
-     */
-    @Test
-    public void testClusteringOrderWithSlice() throws Throwable
-    {
-        final String compactOption = " WITH COMPACT STORAGE AND";
-
-        // non-compound, ASC order
-        createTable("CREATE TABLE %s (a text, b int, PRIMARY KEY (a, b)) " +
-                    compactOption +
-                    " CLUSTERING ORDER BY (b ASC)");
-
-        execute("INSERT INTO %s (a, b) VALUES ('a', 2)");
-        execute("INSERT INTO %s (a, b) VALUES ('a', 3)");
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
-                   row("a", 2),
-                   row("a", 3));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b DESC"),
-                   row("a", 3),
-                   row("a", 2));
-
-        // non-compound, DESC order
-        createTable("CREATE TABLE %s (a text, b int, PRIMARY KEY (a, b))" +
-                    compactOption +
-                    " CLUSTERING ORDER BY (b DESC)");
-
-        execute("INSERT INTO %s (a, b) VALUES ('a', 2)");
-        execute("INSERT INTO %s (a, b) VALUES ('a', 3)");
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
-                   row("a", 3),
-                   row("a", 2));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b ASC"),
-                   row("a", 2),
-                   row("a", 3));
-
-        // compound, first column DESC order
-        createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b, c)) " +
-                    compactOption +
-                    " CLUSTERING ORDER BY (b DESC)"
-        );
-
-        execute("INSERT INTO %s (a, b, c) VALUES ('a', 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES ('a', 3, 5)");
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
-                   row("a", 3, 5),
-                   row("a", 2, 4));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b ASC"),
-                   row("a", 2, 4),
-                   row("a", 3, 5));
-
-        // compound, mixed order
-        createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b, c)) " +
-                    compactOption +
-                    " CLUSTERING ORDER BY (b ASC, c DESC)"
-        );
-
-        execute("INSERT INTO %s (a, b, c) VALUES ('a', 2, 4)");
-        execute("INSERT INTO %s (a, b, c) VALUES ('a', 3, 5)");
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0"),
-                   row("a", 2, 4),
-                   row("a", 3, 5));
-
-        assertRows(execute("SELECT * FROM %s WHERE a = 'a' AND b > 0 ORDER BY b ASC"),
-                   row("a", 2, 4),
-                   row("a", 3, 5));
-    }
-
-
-    @Test
-    public void testEmptyRestrictionValue() throws Throwable
-    {
-        for (String options : new String[]{ "", " WITH COMPACT STORAGE" })
-        {
-            createTable("CREATE TABLE %s (pk blob, c blob, v blob, PRIMARY KEY ((pk), c))" + options);
-            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                    bytes("foo123"), bytes("1"), bytes("1"));
-            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                    bytes("foo123"), bytes("2"), bytes("2"));
-
-            beforeAndAfterFlush(() -> {
-
-                assertInvalidMessage("Key may not be empty", "SELECT * FROM %s WHERE pk = textAsBlob('');");
-                assertInvalidMessage("Key may not be empty", "SELECT * FROM %s WHERE pk IN (textAsBlob(''), textAsBlob('1'));");
-
-                assertInvalidMessage("Key may not be empty",
-                                     "INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                                     EMPTY_BYTE_BUFFER, bytes("2"), bytes("2"));
-
-                // Test clustering columns restrictions
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c = textAsBlob('');"));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) = (textAsBlob(''));"));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c IN (textAsBlob(''), textAsBlob('1'));"),
-                           row(bytes("foo123"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) IN ((textAsBlob('')), (textAsBlob('1')));"),
-                           row(bytes("foo123"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('');"),
-                           row(bytes("foo123"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) > (textAsBlob(''));"),
-                           row(bytes("foo123"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('');"),
-                           row(bytes("foo123"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) >= (textAsBlob(''));"),
-                           row(bytes("foo123"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c <= textAsBlob('');"));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) <= (textAsBlob(''));"));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c < textAsBlob('');"));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) < (textAsBlob(''));"));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('') AND c < textAsBlob('');"));
-            });
-
-            if (options.contains("COMPACT"))
-            {
-                assertInvalidMessage("Invalid empty or null value for column c",
-                                     "INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                                     bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4"));
-            }
-            else
-            {
-                execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                        bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4"));
-
-                beforeAndAfterFlush(() -> {
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c = textAsBlob('');"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) = (textAsBlob(''));"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c IN (textAsBlob(''), textAsBlob('1'));"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
-                               row(bytes("foo123"), bytes("1"), bytes("1")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) IN ((textAsBlob('')), (textAsBlob('1')));"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
-                               row(bytes("foo123"), bytes("1"), bytes("1")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('');"),
-                               row(bytes("foo123"), bytes("1"), bytes("1")),
-                               row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) > (textAsBlob(''));"),
-                               row(bytes("foo123"), bytes("1"), bytes("1")),
-                               row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('');"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
-                               row(bytes("foo123"), bytes("1"), bytes("1")),
-                               row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) >= (textAsBlob(''));"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")),
-                               row(bytes("foo123"), bytes("1"), bytes("1")),
-                               row(bytes("foo123"), bytes("2"), bytes("2")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c <= textAsBlob('');"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
-
-                    assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) <= (textAsBlob(''));"),
-                               row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("4")));
-
-                    assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c < textAsBlob('');"));
-
-                    assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c) < (textAsBlob(''));"));
-
-                    assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('') AND c < textAsBlob('');"));
-                });
-            }
-
-            // Test restrictions on non-primary key value
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND v = textAsBlob('') ALLOW FILTERING;"));
-
-            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                    bytes("foo123"), bytes("3"), EMPTY_BYTE_BUFFER);
-
-            beforeAndAfterFlush(() -> {
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND v = textAsBlob('') ALLOW FILTERING;"),
-                           row(bytes("foo123"), bytes("3"), EMPTY_BYTE_BUFFER));
-            });
-        }
-    }
-
-    @Test
-    public void testEmptyRestrictionValueWithMultipleClusteringColumns() throws Throwable
-    {
-        createTable("CREATE TABLE %s (pk blob, c1 blob, c2 blob, v blob, PRIMARY KEY (pk, c1, c2))" + compactOption);
-        execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("1"), bytes("1"));
-        execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("2"), bytes("2"));
-
-        beforeAndAfterFlush(() -> {
-
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('');"));
-
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 = textAsBlob('');"));
-
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) = (textAsBlob('1'), textAsBlob(''));"));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 IN (textAsBlob(''), textAsBlob('1')) AND c2 = textAsBlob('1');"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 IN (textAsBlob(''), textAsBlob('1'));"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) IN ((textAsBlob(''), textAsBlob('1')), (textAsBlob('1'), textAsBlob('1')));"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 > textAsBlob('');"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 > textAsBlob('');"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'));"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 >= textAsBlob('');"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
-
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 <= textAsBlob('');"));
-
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) <= (textAsBlob('1'), textAsBlob(''));"));
-        });
-
-        execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)",
-                bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4"));
-
-        beforeAndAfterFlush(() -> {
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('');"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('') AND c2 = textAsBlob('1');"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) = (textAsBlob(''), textAsBlob('1'));"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 IN (textAsBlob(''), textAsBlob('1')) AND c2 = textAsBlob('1');"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) IN ((textAsBlob(''), textAsBlob('1')), (textAsBlob('1'), textAsBlob('1')));"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'));"),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) >= (textAsBlob(''), textAsBlob('1'));"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")),
-                       row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                       row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")));
-
-            assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) <= (textAsBlob(''), textAsBlob('1'));"),
-                       row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-
-            assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) < (textAsBlob(''), textAsBlob('1'));"));
-        });
-    }
-
-    @Test
-    public void testEmptyRestrictionValueWithOrderBy() throws Throwable
-    {
-        for (String options : new String[]{ " WITH COMPACT STORAGE",
-                                            " WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC)" })
-        {
-            String orderingClause = options.contains("ORDER") ? "" : "ORDER BY c DESC";
-
-            createTable("CREATE TABLE %s (pk blob, c blob, v blob, PRIMARY KEY ((pk), c))" + options);
-            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                    bytes("foo123"),
-                    bytes("1"),
-                    bytes("1"));
-            execute("INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                    bytes("foo123"),
-                    bytes("2"),
-                    bytes("2"));
-
-            beforeAndAfterFlush(() -> {
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c > textAsBlob('')" + orderingClause),
-                           row(bytes("foo123"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c >= textAsBlob('')" + orderingClause),
-                           row(bytes("foo123"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1")));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c < textAsBlob('')" + orderingClause));
-
-                assertEmpty(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c <= textAsBlob('')" + orderingClause));
-            });
-
-            assertInvalidMessage("Invalid empty or null value for column c",
-                                 "INSERT INTO %s (pk, c, v) VALUES (?, ?, ?)",
-                                 bytes("foo123"),
-                                 EMPTY_BYTE_BUFFER,
-                                 bytes("4"));
-        }
-    }
-
-    @Test
-    public void testEmptyRestrictionValueWithMultipleClusteringColumnsAndOrderBy() throws Throwable
-    {
-        for (String options : new String[]{ " WITH COMPACT STORAGE",
-                                            " WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c1 DESC, c2 DESC)" })
-        {
-            String orderingClause = options.contains("ORDER") ? "" : "ORDER BY c1 DESC, c2 DESC";
-
-            createTable("CREATE TABLE %s (pk blob, c1 blob, c2 blob, v blob, PRIMARY KEY (pk, c1, c2))" + options);
-            execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("1"), bytes("1"));
-            execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)", bytes("foo123"), bytes("1"), bytes("2"), bytes("2"));
-
-            beforeAndAfterFlush(() -> {
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 > textAsBlob('')" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 > textAsBlob('')" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'))" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 = textAsBlob('1') AND c2 >= textAsBlob('')" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-            });
-
-            execute("INSERT INTO %s (pk, c1, c2, v) VALUES (?, ?, ?, ?)",
-                    bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4"));
-
-            beforeAndAfterFlush(() -> {
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND c1 IN (textAsBlob(''), textAsBlob('1')) AND c2 = textAsBlob('1')" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) IN ((textAsBlob(''), textAsBlob('1')), (textAsBlob('1'), textAsBlob('1')))" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) > (textAsBlob(''), textAsBlob('1'))" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")));
-
-                assertRows(execute("SELECT * FROM %s WHERE pk = textAsBlob('foo123') AND (c1, c2) >= (textAsBlob(''), textAsBlob('1'))" + orderingClause),
-                           row(bytes("foo123"), bytes("1"), bytes("2"), bytes("2")),
-                           row(bytes("foo123"), bytes("1"), bytes("1"), bytes("1")),
-                           row(bytes("foo123"), EMPTY_BYTE_BUFFER, bytes("1"), bytes("4")));
-            });
-        }
-    }
-
-    /**
-     * UpdateTest
-     */
-    @Test
-    public void testUpdate() throws Throwable
-    {
-        testUpdate(false);
-        testUpdate(true);
-    }
-
-    private void testUpdate(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering_1 int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering_1))" + compactOption);
-
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 1, 1)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 2, 2)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 3, 3)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (1, 0, 4)");
-
-        flush(forceFlush);
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
-        flush(forceFlush);
-        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ?",
-                           0, 1),
-                   row(7));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1) = (?)", 8, 0, 2);
-        flush(forceFlush);
-        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ?",
-                           0, 2),
-                   row(8));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey IN (?, ?) AND clustering_1 = ?", 9, 0, 1, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ?",
-                           0, 1, 0),
-                   row(0, 0, 9),
-                   row(1, 0, 9));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey IN ? AND clustering_1 = ?", 19, Arrays.asList(0, 1), 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN ? AND clustering_1 = ?",
-                           Arrays.asList(0, 1), 0),
-                   row(0, 0, 19),
-                   row(1, 0, 19));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 IN (?, ?)", 10, 0, 1, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?)",
-                           0, 1, 0),
-                   row(0, 0, 10),
-                   row(0, 1, 10));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))", 20, 0, 0, 1);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
-                           0, 0, 1),
-                   row(0, 0, 20),
-                   row(0, 1, 20));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", null, 0, 0);
-        flush(forceFlush);
-
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
-                           0, 0, 1),
-                   row(0, 1, 20));
-
-        // test invalid queries
-
-        // missing primary key element
-        assertInvalidMessage("Some partition key parts are missing: partitionkey",
-                             "UPDATE %s SET value = ? WHERE clustering_1 = ? ", 7, 1);
-
-        assertInvalidMessage("Some clustering keys are missing: clustering_1",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
-
-        assertInvalidMessage("Some clustering keys are missing: clustering_1",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
-
-        // token function
-        assertInvalidMessage("The token function cannot be used in WHERE clauses for UPDATE statements",
-                             "UPDATE %s SET value = ? WHERE token(partitionKey) = token(?) AND clustering_1 = ?",
-                             7, 0, 1);
-
-        // multiple time the same value
-        assertInvalidSyntax("UPDATE %s SET value = ?, value = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_1 = ?", 7, 0, 1, 1);
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name value1",
-                             "UPDATE %s SET value1 = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
-
-        assertInvalidMessage("Undefined column name partitionkey1",
-                             "UPDATE %s SET value = ? WHERE partitionKey1 = ? AND clustering_1 = ?", 7, 0, 1);
-
-        assertInvalidMessage("Undefined column name clustering_3",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_3 = ?", 7, 0, 1);
-
-        // Invalid operator in the where clause
-        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
-                             "UPDATE %s SET value = ? WHERE partitionKey > ? AND clustering_1 = ?", 7, 0, 1);
-
-        assertInvalidMessage("Cannot use UPDATE with CONTAINS",
-                             "UPDATE %s SET value = ? WHERE partitionKey CONTAINS ? AND clustering_1 = ?", 7, 0, 1);
-
-        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND value = ?", 7, 0, 1, 3);
-
-        assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 > ?", 7, 0, 1);
-    }
-
-    @Test
-    public void testUpdateWithTwoClusteringColumns() throws Throwable
-    {
-        testUpdateWithTwoClusteringColumns(false);
-        testUpdateWithTwoClusteringColumns(true);
-    }
-
-    private void testUpdateWithTwoClusteringColumns(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey int," +
-                    "clustering_1 int," +
-                    "clustering_2 int," +
-                    "value int," +
-                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + compactOption);
-
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 2, 2)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 3, 3)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 1, 4)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 2, 5)");
-        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 6)");
-        flush(forceFlush);
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
-        flush(forceFlush);
-        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
-                           0, 1, 1),
-                   row(7));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) = (?, ?)", 8, 0, 1, 2);
-        flush(forceFlush);
-        assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
-                           0, 1, 2),
-                   row(8));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 9, 0, 1, 0, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?",
-                           0, 1, 0, 0),
-                   row(0, 0, 0, 9),
-                   row(1, 0, 0, 9));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey IN ? AND clustering_1 = ? AND clustering_2 = ?", 9, Arrays.asList(0, 1), 0, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey IN ? AND clustering_1 = ? AND clustering_2 = ?",
-                           Arrays.asList(0, 1), 0, 0),
-                   row(0, 0, 0, 9),
-                   row(1, 0, 0, 9));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)", 12, 0, 1, 1, 2);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)",
-                           0, 1, 1, 2),
-                   row(0, 1, 1, 12),
-                   row(0, 1, 2, 12));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND clustering_2 IN (?, ?)", 10, 0, 1, 0, 1, 2);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND clustering_2 IN (?, ?)",
-                           0, 1, 0, 1, 2),
-                   row(0, 0, 1, 10),
-                   row(0, 0, 2, 10),
-                   row(0, 1, 1, 10),
-                   row(0, 1, 2, 10));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))", 20, 0, 0, 2, 1, 2);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
-                           0, 0, 2, 1, 2),
-                   row(0, 0, 2, 20),
-                   row(0, 1, 2, 20));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", null, 0, 0, 2);
-        flush(forceFlush);
-
-
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
-                           0, 0, 2, 1, 2),
-                   row(0, 1, 2, 20));
-
-
-        // test invalid queries
-
-        // missing primary key element
-        assertInvalidMessage("Some partition key parts are missing: partitionkey",
-                             "UPDATE %s SET value = ? WHERE clustering_1 = ? AND clustering_2 = ?", 7, 1, 1);
-
-        String errorMsg = "PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted";
-
-        assertInvalidMessage(errorMsg,
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_2 = ?", 7, 0, 1);
-
-        assertInvalidMessage("Some clustering keys are missing: clustering_1, clustering_2",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
-
-        // token function
-        assertInvalidMessage("The token function cannot be used in WHERE clauses for UPDATE statements",
-                             "UPDATE %s SET value = ? WHERE token(partitionKey) = token(?) AND clustering_1 = ? AND clustering_2 = ?",
-                             7, 0, 1, 1);
-
-        // multiple time the same value
-        assertInvalidSyntax("UPDATE %s SET value = ?, value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
-
-        // multiple time same primary key element in WHERE clause
-        assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND clustering_1 = ?", 7, 0, 1, 1, 1);
-
-        // Undefined column names
-        assertInvalidMessage("Undefined column name value1",
-                             "UPDATE %s SET value1 = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
-
-        assertInvalidMessage("Undefined column name partitionkey1",
-                             "UPDATE %s SET value = ? WHERE partitionKey1 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
-
-        assertInvalidMessage("Undefined column name clustering_3",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_3 = ?", 7, 0, 1, 1);
-
-        // Invalid operator in the where clause
-        assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
-                             "UPDATE %s SET value = ? WHERE partitionKey > ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
-
-        assertInvalidMessage("Cannot use UPDATE with CONTAINS",
-                             "UPDATE %s SET value = ? WHERE partitionKey CONTAINS ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
-
-        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND value = ?", 7, 0, 1, 1, 3);
-
-        assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 > ?", 7, 0, 1);
-
-        assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
-                             "UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) > (?, ?)", 7, 0, 1, 1);
-    }
-
-    @Test
-    public void testUpdateWithMultiplePartitionKeyComponents() throws Throwable
-    {
-        testUpdateWithMultiplePartitionKeyComponents(false);
-        testUpdateWithMultiplePartitionKeyComponents(true);
-    }
-
-    public void testUpdateWithMultiplePartitionKeyComponents(boolean forceFlush) throws Throwable
-    {
-        createTable("CREATE TABLE %s (partitionKey_1 int," +
-                    "partitionKey_2 int," +
-                    "clustering_1 int," +
-                    "clustering_2 int," +
-                    "value int," +
-                    " PRIMARY KEY ((partitionKey_1, partitionKey_2), clustering_1, clustering_2))" + compactOption);
-
-        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 0)");
-        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 1, 0, 1, 1)");
-        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 1, 1, 1, 2)");
-        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (1, 0, 0, 1, 3)");
-        execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (1, 1, 0, 1, 3)");
-        flush(forceFlush);
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey_1 = ? AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 0, 0, 0);
-        flush(forceFlush);
-        assertRows(execute("SELECT value FROM %s WHERE partitionKey_1 = ? AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?",
-                           0, 0, 0, 0),
-                   row(7));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?", 9, 0, 1, 1, 0, 1);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?",
-                           0, 1, 1, 0, 1),
-                   row(0, 1, 0, 1, 9),
-                   row(1, 1, 0, 1, 9));
-
-        execute("UPDATE %s SET value = ? WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 10, 0, 1, 0, 1, 0, 1);
-        flush(forceFlush);
-        assertRows(execute("SELECT * FROM %s"),
-                   row(0, 0, 0, 0, 7),
-                   row(0, 0, 0, 1, 10),
-                   row(0, 1, 0, 1, 10),
-                   row(0, 1, 1, 1, 2),
-                   row(1, 0, 0, 1, 10),
-                   row(1, 1, 0, 1, 10));
-
-        // missing primary key element
-        assertInvalidMessage("Some partition key parts are missing: partitionkey_2",
-                             "UPDATE %s SET value = ? WHERE partitionKey_1 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 1, 1);
-    }
-
-    @Test
-    public void testCfmCompactStorageCQL()
-    {
-        String keyspace = "cql_test_keyspace_compact";
-        String table = "test_table_compact";
-
-        TableMetadata.Builder metadata =
-        TableMetadata.builder(keyspace, table)
-                     .flags(EnumSet.of(TableMetadata.Flag.DENSE))
-                     .addPartitionKeyColumn("pk1", IntegerType.instance)
-                     .addPartitionKeyColumn("pk2", AsciiType.instance)
-                     .addClusteringColumn("ck1", ReversedType.getInstance(IntegerType.instance))
-                     .addClusteringColumn("ck2", IntegerType.instance)
-                     .addRegularColumn("reg", IntegerType.instance);
-
-        SchemaLoader.createKeyspace(keyspace, KeyspaceParams.simple(1), metadata);
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
-
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
-        String expected = "CREATE TABLE IF NOT EXISTS cql_test_keyspace_compact.test_table_compact (\n" +
-                          "    pk1 varint,\n" +
-                          "    pk2 ascii,\n" +
-                          "    ck1 varint,\n" +
-                          "    ck2 varint,\n" +
-                          "    reg varint,\n" +
-                          "    PRIMARY KEY ((pk1, pk2), ck1, ck2)\n" +
-                          ") WITH COMPACT STORAGE\n" +
-                          "    AND ID = " + cfs.metadata.id + "\n" +
-                          "    AND CLUSTERING ORDER BY (ck1 DESC, ck2 ASC)";
-        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
-                   actual.contains(expected));
-    }
-
-    @Test
-    public void testCfmCounterCQL()
-    {
-        String keyspace = "cql_test_keyspace_counter";
-        String table = "test_table_counter";
-
-        TableMetadata.Builder metadata;
-        metadata = TableMetadata.builder(keyspace, table)
-                                .flags(EnumSet.of(TableMetadata.Flag.DENSE,
-                                       TableMetadata.Flag.COUNTER))
-                                .isCounter(true)
-                                .addPartitionKeyColumn("pk1", IntegerType.instance)
-                                .addPartitionKeyColumn("pk2", AsciiType.instance)
-                                .addClusteringColumn("ck1", ReversedType.getInstance(IntegerType.instance))
-                                .addClusteringColumn("ck2", IntegerType.instance)
-                                .addRegularColumn("cnt", CounterColumnType.instance);
-
-        SchemaLoader.createKeyspace(keyspace, KeyspaceParams.simple(1), metadata);
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
-
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
-        String expected = "CREATE TABLE IF NOT EXISTS cql_test_keyspace_counter.test_table_counter (\n" +
-                          "    pk1 varint,\n" +
-                          "    pk2 ascii,\n" +
-                          "    ck1 varint,\n" +
-                          "    ck2 varint,\n" +
-                          "    cnt counter,\n" +
-                          "    PRIMARY KEY ((pk1, pk2), ck1, ck2)\n" +
-                          ") WITH COMPACT STORAGE\n" +
-                          "    AND ID = " + cfs.metadata.id + "\n" +
-                          "    AND CLUSTERING ORDER BY (ck1 DESC, ck2 ASC)";
-        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
-                   actual.contains(expected));
-    }
-
-    @Test
-    public void testDenseTable() throws Throwable
-    {
-        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
-                                       "pk1 varint PRIMARY KEY," +
-                                       "reg1 int)" +
-                                       " WITH COMPACT STORAGE");
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
-
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
-        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
-                        "    pk1 varint,\n" +
-                        "    reg1 int,\n" +
-                        "    PRIMARY KEY (pk1)\n" +
-                        ") WITH COMPACT STORAGE\n" +
-                        "    AND ID = " + cfs.metadata.id + "\n";
-
-        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
-                   actual.contains(expected));
-    }
-
-    @Test
-    public void testStaticCompactTable()
-    {
-        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
-                                       "pk1 varint PRIMARY KEY," +
-                                       "reg1 int," +
-                                       "reg2 int)" +
-                                       " WITH COMPACT STORAGE");
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
-        assertTrue(SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true).contains(
-        "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
-        "    pk1 varint,\n" +
-        "    reg1 int,\n" +
-        "    reg2 int,\n" +
-        "    PRIMARY KEY (pk1)\n" +
-        ") WITH COMPACT STORAGE\n" +
-        "    AND ID = " + cfs.metadata.id));
-    }
-
-    @Test
-    public void testStaticCompactWithCounters()
-    {
-        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
-                                       "pk1 varint PRIMARY KEY," +
-                                       "reg1 counter," +
-                                       "reg2 counter)" +
-                                       " WITH COMPACT STORAGE");
-
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
-
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
-        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
-                          "    pk1 varint,\n" +
-                          "    reg1 counter,\n" +
-                          "    reg2 counter,\n" +
-                          "    PRIMARY KEY (pk1)\n" +
-                          ") WITH COMPACT STORAGE\n" +
-                          "    AND ID = " + cfs.metadata.id + "\n";
-        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
-                   actual.contains(expected));
-    }
-
-    @Test
-    public void testDenseCompactTableWithoutRegulars() throws Throwable
-    {
-        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
-                                       "pk1 varint," +
-                                       "ck1 int," +
-                                       "PRIMARY KEY (pk1, ck1))" +
-                                       " WITH COMPACT STORAGE");
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
-
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
-        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
-                          "    pk1 varint,\n" +
-                          "    ck1 int,\n" +
-                          "    PRIMARY KEY (pk1, ck1)\n" +
-                          ") WITH COMPACT STORAGE\n" +
-                          "    AND ID = " + cfs.metadata.id;
-        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
-                   actual.contains(expected));
-    }
-
-    @Test
-    public void testCompactDynamic() throws Throwable
-    {
-        String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" +
-                                       "pk1 varint," +
-                                       "ck1 int," +
-                                       "reg int," +
-                                       "PRIMARY KEY (pk1, ck1))" +
-                                       " WITH COMPACT STORAGE");
-
-        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
-
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
-        String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" +
-                          "    pk1 varint,\n" +
-                          "    ck1 int,\n" +
-                          "    reg int,\n" +
-                          "    PRIMARY KEY (pk1, ck1)\n" +
-                          ") WITH COMPACT STORAGE\n" +
-                          "    AND ID = " + cfs.metadata.id;
-
-        assertTrue(String.format("Expected\n%s\nto contain\n%s", actual, expected),
-                   actual.contains(expected));
-    }
-
-    /**
-     * PartitionUpdateTest
-     */
-
-    @Test
-    public void testOperationCountWithCompactTable()
-    {
-        createTable("CREATE TABLE %s (key text PRIMARY KEY, a int) WITH COMPACT STORAGE");
-        TableMetadata cfm = currentTableMetadata();
-
-        PartitionUpdate update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").add("a", 1)
-                                                                                                 .buildUpdate();
-        Assert.assertEquals(1, update.operationCount());
-
-        update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").buildUpdate();
-        Assert.assertEquals(0, update.operationCount());
-    }
-
-    /**
-     * AlterTest
-     */
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testAlterWithCompactStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "ALTER TABLE %s RENAME column1 TO column2");
-    }
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testAlterWithCompactNonStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "ALTER TABLE %s RENAME column1 TO column2");
-
-        createTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "ALTER TABLE %s RENAME column1 TO column2");
-    }
-
-    /**
-     * CreateTest
-     */
-
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testCreateIndextWithCompactStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "CREATE INDEX column1_index on %s (column1)");
-        assertInvalidMessage("Undefined column name value in table",
-                             "CREATE INDEX value_index on %s (value)");
-    }
-
-    /**
-     * DeleteTest
-     */
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testDeleteWithCompactStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-        testDeleteWithCompactFormat();
-
-        // if column1 is present, hidden column is called column2
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
-        assertInvalidMessage("Undefined column name column2 in table",
-                             "DELETE FROM %s WHERE a = 1 AND column2= 1");
-        assertInvalidMessage("Undefined column name column2 in table",
-                             "DELETE FROM %s WHERE a = 1 AND column2 = 1 AND value1 = 1");
-        assertInvalidMessage("Undefined column name column2",
-                             "DELETE column2 FROM %s WHERE a = 1");
-
-        // if value is present, hidden column is called value1
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
-        assertInvalidMessage("Undefined column name value1 in table",
-                             "DELETE FROM %s WHERE a = 1 AND value1 = 1");
-        assertInvalidMessage("Undefined column name value1 in table",
-                             "DELETE FROM %s WHERE a = 1 AND value1 = 1 AND column1 = 1");
-        assertInvalidMessage("Undefined column name value1",
-                             "DELETE value1 FROM %s WHERE a = 1");
-    }
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testDeleteWithCompactNonStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b) VALUES (1, 1)");
-        execute("INSERT INTO %s (a, b) VALUES (2, 1)");
-        assertRows(execute("SELECT a, b FROM %s"),
-                   row(1, 1),
-                   row(2, 1));
-        testDeleteWithCompactFormat();
-
-        createTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, v) VALUES (1, 1, 3)");
-        execute("INSERT INTO %s (a, b, v) VALUES (2, 1, 4)");
-        assertRows(execute("SELECT a, b, v FROM %s"),
-                   row(1, 1, 3),
-                   row(2, 1, 4));
-        testDeleteWithCompactFormat();
-    }
-
-    private void testDeleteWithCompactFormat() throws Throwable
-    {
-        assertInvalidMessage("Undefined column name value in table",
-                             "DELETE FROM %s WHERE a = 1 AND value = 1");
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "DELETE FROM %s WHERE a = 1 AND column1= 1");
-        assertInvalidMessage("Undefined column name value in table",
-                             "DELETE FROM %s WHERE a = 1 AND value = 1 AND column1 = 1");
-        assertInvalidMessage("Undefined column name value",
-                             "DELETE value FROM %s WHERE a = 1");
-        assertInvalidMessage("Undefined column name column1",
-                             "DELETE column1 FROM %s WHERE a = 1");
-    }
-
-    /**
-     * InsertTest
-     */
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testInsertWithCompactStaticFormat() throws Throwable
-    {
-        testInsertWithCompactTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-
-        // if column1 is present, hidden column is called column2
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c, column1) VALUES (1, 1, 1, 1)");
-        assertInvalidMessage("Undefined column name column2",
-                             "INSERT INTO %s (a, b, c, column2) VALUES (1, 1, 1, 1)");
-
-        // if value is present, hidden column is called value1
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c, value) VALUES (1, 1, 1, 1)");
-        assertInvalidMessage("Undefined column name value1",
-                             "INSERT INTO %s (a, b, c, value1) VALUES (1, 1, 1, 1)");
-    }
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testInsertWithCompactNonStaticFormat() throws Throwable
-    {
-        testInsertWithCompactTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        testInsertWithCompactTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-    }
-
-    private void testInsertWithCompactTable(String tableQuery) throws Throwable
-    {
-        createTable(tableQuery);
-
-        // pass correct types to the hidden columns
-        assertInvalidMessage("Undefined column name column1",
-                             "INSERT INTO %s (a, b, column1) VALUES (?, ?, ?)",
-                             1, 1, 1, ByteBufferUtil.bytes('a'));
-        assertInvalidMessage("Undefined column name value",
-                             "INSERT INTO %s (a, b, value) VALUES (?, ?, ?)",
-                             1, 1, 1, ByteBufferUtil.bytes('a'));
-        assertInvalidMessage("Undefined column name column1",
-                             "INSERT INTO %s (a, b, column1, value) VALUES (?, ?, ?, ?)",
-                             1, 1, 1, ByteBufferUtil.bytes('a'), ByteBufferUtil.bytes('b'));
-        assertInvalidMessage("Undefined column name value",
-                             "INSERT INTO %s (a, b, value, column1) VALUES (?, ?, ?, ?)",
-                             1, 1, 1, ByteBufferUtil.bytes('a'), ByteBufferUtil.bytes('b'));
-
-        // pass incorrect types to the hidden columns
-        assertInvalidMessage("Undefined column name value",
-                             "INSERT INTO %s (a, b, value) VALUES (?, ?, ?)",
-                             1, 1, 1, 1);
-        assertInvalidMessage("Undefined column name column1",
-                             "INSERT INTO %s (a, b, column1) VALUES (?, ?, ?)",
-                             1, 1, 1, 1);
-        assertEmpty(execute("SELECT * FROM %s"));
-
-        // pass null to the hidden columns
-        assertInvalidMessage("Undefined column name value",
-                             "INSERT INTO %s (a, b, value) VALUES (?, ?, ?)",
-                             1, 1, null);
-        assertInvalidMessage("Undefined column name column1",
-                             "INSERT INTO %s (a, b, column1) VALUES (?, ?, ?)",
-                             1, 1, null);
-    }
-
-    /**
-     * SelectTest
-     */
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testSelectWithCompactStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c) VALUES (1, 1, 1)");
-        execute("INSERT INTO %s (a, b, c) VALUES (2, 1, 1)");
-        assertRows(execute("SELECT a, b, c FROM %s"),
-                   row(1, 1, 1),
-                   row(2, 1, 1));
-        testSelectWithCompactFormat();
-
-        // if column column1 is present, hidden column is called column2
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c, column1) VALUES (1, 1, 1, 1)");
-        execute("INSERT INTO %s (a, b, c, column1) VALUES (2, 1, 1, 2)");
-        assertRows(execute("SELECT a, b, c, column1 FROM %s"),
-                   row(1, 1, 1, 1),
-                   row(2, 1, 1, 2));
-        assertInvalidMessage("Undefined column name column2 in table",
-                             "SELECT a, column2, value FROM %s");
-
-        // if column value is present, hidden column is called value1
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c, value) VALUES (1, 1, 1, 1)");
-        execute("INSERT INTO %s (a, b, c, value) VALUES (2, 1, 1, 2)");
-        assertRows(execute("SELECT a, b, c, value FROM %s"),
-                   row(1, 1, 1, 1),
-                   row(2, 1, 1, 2));
-        assertInvalidMessage("Undefined column name value1 in table",
-                             "SELECT a, value1, value FROM %s");
-    }
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testSelectWithCompactNonStaticFormat() throws Throwable
-    {
-        createTable("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b) VALUES (1, 1)");
-        execute("INSERT INTO %s (a, b) VALUES (2, 1)");
-        assertRows(execute("SELECT a, b FROM %s"),
-                   row(1, 1),
-                   row(2, 1));
-        testSelectWithCompactFormat();
-
-        createTable("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, v) VALUES (1, 1, 3)");
-        execute("INSERT INTO %s (a, b, v) VALUES (2, 1, 4)");
-        assertRows(execute("SELECT a, b, v FROM %s"),
-                   row(1, 1, 3),
-                   row(2, 1, 4));
-        testSelectWithCompactFormat();
-    }
-
-    private void testSelectWithCompactFormat() throws Throwable
-    {
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "SELECT column1 FROM %s");
-        assertInvalidMessage("Undefined column name value in table",
-                             "SELECT value FROM %s");
-        assertInvalidMessage("Undefined column name value in table",
-                             "SELECT value, column1 FROM %s");
-        assertInvalid("Undefined column name column1 in table ('column1 = NULL')",
-                      "SELECT * FROM %s WHERE column1 = null ALLOW FILTERING");
-        assertInvalid("Undefined column name value in table ('value = NULL')",
-                      "SELECT * FROM %s WHERE value = null ALLOW FILTERING");
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "SELECT WRITETIME(column1) FROM %s");
-        assertInvalidMessage("Undefined column name value in table",
-                             "SELECT WRITETIME(value) FROM %s");
-    }
-
-    /**
-     * UpdateTest
-     */
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testUpdateWithCompactStaticFormat() throws Throwable
-    {
-        testUpdateWithCompactFormat("CREATE TABLE %s (a int PRIMARY KEY, b int, c int) WITH COMPACT STORAGE");
-
-        assertInvalidMessage("Undefined column name column1 in table",
-                             "UPDATE %s SET b = 1 WHERE column1 = ?",
-                             ByteBufferUtil.bytes('a'));
-        assertInvalidMessage("Undefined column name value in table",
-                             "UPDATE %s SET b = 1 WHERE value = ?",
-                             ByteBufferUtil.bytes('a'));
-
-        // if column1 is present, hidden column is called column2
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, column1 int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c, column1) VALUES (1, 1, 1, 1)");
-        execute("UPDATE %s SET column1 = 6 WHERE a = 1");
-        assertInvalidMessage("Undefined column name column2", "UPDATE %s SET column2 = 6 WHERE a = 0");
-        assertInvalidMessage("Undefined column name value", "UPDATE %s SET value = 6 WHERE a = 0");
-
-        // if value is present, hidden column is called value1
-        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int, c int, value int) WITH COMPACT STORAGE");
-        execute("INSERT INTO %s (a, b, c, value) VALUES (1, 1, 1, 1)");
-        execute("UPDATE %s SET value = 6 WHERE a = 1");
-        assertInvalidMessage("Undefined column name column1", "UPDATE %s SET column1 = 6 WHERE a = 1");
-        assertInvalidMessage("Undefined column name value1", "UPDATE %s SET value1 = 6 WHERE a = 1");
-    }
-
-    /**
-     * Test for CASSANDRA-13917
-     */
-    @Test
-    public void testUpdateWithCompactNonStaticFormat() throws Throwable
-    {
-        testUpdateWithCompactFormat("CREATE TABLE %s (a int, b int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-        testUpdateWithCompactFormat("CREATE TABLE %s (a int, b int, v int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE");
-    }
-
-    private void testUpdateWithCompactFormat(String tableQuery) throws Throwable
-    {
-        createTable(tableQuery);
-        // pass correct types to hidden columns
-        assertInvalidMessage("Undefined column name column1",
-                             "UPDATE %s SET column1 = ? WHERE a = 0",
-                             ByteBufferUtil.bytes('a'));
-        assertInvalidMessage("Undefined column name value",
-                             "UPDATE %s SET value = ? WHERE a = 0",
-                             ByteBufferUtil.bytes('a'));
-
-        // pass incorrect types to hidden columns
-        assertInvalidMessage("Undefined column name column1", "UPDATE %s SET column1 = 6 WHERE a = 0");
-        assertInvalidMessage("Undefined column name value", "UPDATE %s SET value = 6 WHERE a = 0");
-    }
-}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactTableTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactTableTest.java
index 589490f..2281e46 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactTableTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactTableTest.java
@@ -27,6 +27,7 @@
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaChangeListener;
+import org.apache.cassandra.schema.TableMetadata;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -71,7 +72,8 @@
         // Verify that schema change listeners are told statements are affected with DROP COMPACT STORAGE.
         SchemaChangeListener listener = new SchemaChangeListener()
         {
-            public void onAlterTable(String keyspace, String table, boolean affectsStatements)
+            @Override
+            public void onAlterTable(TableMetadata before, TableMetadata after, boolean affectsStatements)
             {
                 assertTrue(affectsStatements);
             }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
index 9b7cfc0..68751cd 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
@@ -21,14 +21,19 @@
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Map;
 import java.util.UUID;
 
+import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.Duration;
 import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.memtable.SkipListMemtable;
+import org.apache.cassandra.db.memtable.TestMemtable;
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -38,6 +43,7 @@
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.schema.MemtableParams;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.SchemaKeyspaceTables;
@@ -53,6 +59,7 @@
 import static org.apache.cassandra.cql3.Duration.NANOS_PER_MINUTE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -405,13 +412,27 @@
     }
 
     /**
-     * Test {@link ConfigurationException} is thrown on create keyspace without any options.
+     * Test {@link ConfigurationException} is not thrown on create NetworkTopologyStrategy keyspace without any options.
      */
     @Test
-    public void testConfigurationExceptionThrownWhenCreateKeyspaceWithNoOptions() throws Throwable
+    public void testCreateKeyspaceWithNetworkTopologyStrategyNoOptions() throws Throwable
     {
-        assertInvalidThrow(ConfigurationException.class, "CREATE KEYSPACE testXYZ with replication = { 'class': 'NetworkTopologyStrategy' }");
-        assertInvalidThrow(ConfigurationException.class, "CREATE KEYSPACE testXYZ WITH replication = { 'class' : 'SimpleStrategy' }");
+        schemaChange("CREATE KEYSPACE testXYZ with replication = { 'class': 'NetworkTopologyStrategy' }");
+
+        // clean-up
+        execute("DROP KEYSPACE IF EXISTS testXYZ");
+    }
+
+    /**
+     * Test {@link ConfigurationException} is not thrown on create SimpleStrategy keyspace without any options.
+     */
+    @Test
+    public void testCreateKeyspaceWithSimpleStrategyNoOptions() throws Throwable
+    {
+        schemaChange("CREATE KEYSPACE testXYZ WITH replication = { 'class' : 'SimpleStrategy' }");
+
+        // clean-up
+        execute("DROP KEYSPACE IF EXISTS testXYZ");
     }
 
     @Test
@@ -572,97 +593,125 @@
             assertInvalidSyntaxMessage("no viable alternative at input 'WITH'", stmt);
     }
 
+    public static class InvalidMemtableFactoryMethod
+    {
+        @SuppressWarnings("unused")
+        public static String factory(Map<String, String> options)
+        {
+            return "invalid";
+        }
+    }
+
+    public static class InvalidMemtableFactoryField
+    {
+        @SuppressWarnings("unused")
+        public static String FACTORY = "invalid";
+    }
+
+    @Test
+    public void testCreateTableWithMemtable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))");
+        assertSame(MemtableParams.DEFAULT.factory(), getCurrentColumnFamilyStore().metadata().params.memtable.factory());
+
+        assertSchemaOption("memtable", null);
+
+        testMemtableConfig("skiplist", SkipListMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("skiplist_remapped", SkipListMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("test_fullname", TestMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("test_shortname", SkipListMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("default", MemtableParams.DEFAULT.factory(), SkipListMemtable.class);
+
+        assertThrowsConfigurationException("The 'class_name' option must be specified.",
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_empty_class';");
+
+        assertThrowsConfigurationException("The 'class_name' option must be specified.",
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_missing_class';");
+
+        assertThrowsConfigurationException("Memtable class org.apache.cassandra.db.memtable.SkipListMemtable does not accept any futher parameters, but {invalid=throw} were given.",
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_invalid_param';");
+
+        assertThrowsConfigurationException("Could not create memtable factory for class NotExisting",
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_unknown_class';");
+
+        assertThrowsConfigurationException("Memtable class org.apache.cassandra.db.memtable.TestMemtable does not accept any futher parameters, but {invalid=throw} were given.",
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_invalid_extra_param';");
+
+        assertThrowsConfigurationException("Could not create memtable factory for class " + InvalidMemtableFactoryMethod.class.getName(),
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_invalid_factory_method';");
+
+        assertThrowsConfigurationException("Could not create memtable factory for class " + InvalidMemtableFactoryField.class.getName(),
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'test_invalid_factory_field';");
+
+        assertThrowsConfigurationException("Memtable configuration \"unknown\" not found.",
+                                           "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                                           + " WITH memtable = 'unknown';");
+    }
+
+    private void testMemtableConfig(String memtableConfig, Memtable.Factory factoryInstance, Class<? extends Memtable> memtableClass) throws Throwable
+    {
+        createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
+                    + " WITH memtable = '" + memtableConfig + "';");
+        assertSame(factoryInstance, getCurrentColumnFamilyStore().metadata().params.memtable.factory());
+        Assert.assertTrue(memtableClass.isInstance(getCurrentColumnFamilyStore().getTracker().getView().getCurrentMemtable()));
+
+        assertSchemaOption("memtable", MemtableParams.DEFAULT.configurationKey().equals(memtableConfig) ? null : memtableConfig);
+    }
+
+    void assertSchemaOption(String option, Object expected) throws Throwable
+    {
+        assertRows(execute(format("SELECT " + option + " FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
+                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
+                                  SchemaKeyspaceTables.TABLES),
+                           KEYSPACE,
+                           currentTable()),
+                   row(expected));
+    }
+
     @Test
     public void testCreateTableWithCompression() throws Throwable
     {
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.LZ4Compressor"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                 + " WITH compression = { 'class' : 'SnappyCompressor', 'chunk_length_in_kb' : 32 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                 + " WITH compression = { 'class' : 'SnappyCompressor', 'chunk_length_in_kb' : 32, 'enabled' : true };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                 + " WITH compression = { 'sstable_compression' : 'SnappyCompressor', 'chunk_length_kb' : 32 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "32", "class", "org.apache.cassandra.io.compress.SnappyCompressor"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                 + " WITH compression = { 'sstable_compression' : 'SnappyCompressor', 'min_compress_ratio' : 2 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.SnappyCompressor", "min_compress_ratio", "2.0")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.SnappyCompressor", "min_compress_ratio", "2.0"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                     + " WITH compression = { 'sstable_compression' : 'SnappyCompressor', 'min_compress_ratio' : 1 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.SnappyCompressor", "min_compress_ratio", "1.0")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.SnappyCompressor", "min_compress_ratio", "1.0"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                     + " WITH compression = { 'sstable_compression' : 'SnappyCompressor', 'min_compress_ratio' : 0 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.SnappyCompressor")));
+        assertSchemaOption("compression", map("chunk_length_in_kb", "16", "class", "org.apache.cassandra.io.compress.SnappyCompressor"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                 + " WITH compression = { 'sstable_compression' : '', 'chunk_length_kb' : 32 };");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("enabled", "false")));
+        assertSchemaOption("compression", map("enabled", "false"));
 
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
                 + " WITH compression = { 'enabled' : 'false'};");
-
-        assertRows(execute(format("SELECT compression FROM %s.%s WHERE keyspace_name = ? and table_name = ?;",
-                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
-                                  SchemaKeyspaceTables.TABLES),
-                           KEYSPACE,
-                           currentTable()),
-                   row(map("enabled", "false")));
+        assertSchemaOption("compression", map("enabled", "false"));
 
         assertThrowsConfigurationException("Missing sub-option 'class' for the 'compression' option.",
                                            "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
@@ -740,4 +789,5 @@
             return Collections.emptyList();
         }
     }
+
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
index eda49a6..980b791 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
@@ -28,7 +28,6 @@
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.utils.btree.BTree;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java
index 19aba64..d385639 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java
@@ -17,9 +17,9 @@
  */
 package org.apache.cassandra.cql3.validation.operations;
 
-import java.io.File;
 import java.util.List;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionCollectionsTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionCollectionsTest.java
index 7ad41e3..c0a5139 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionCollectionsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionCollectionsTest.java
@@ -154,8 +154,8 @@
             checkInvalidUDT("v >= null", v, InvalidRequestException.class);
             checkInvalidUDT("v IN null", v, SyntaxException.class);
             checkInvalidUDT("v IN 367", v, SyntaxException.class);
-            checkInvalidUDT("v CONTAINS KEY 123", v, SyntaxException.class);
-            checkInvalidUDT("v CONTAINS 'bar'", v, SyntaxException.class);
+            checkInvalidUDT("v CONTAINS KEY 123", v, InvalidRequestException.class);
+            checkInvalidUDT("v CONTAINS 'bar'", v, InvalidRequestException.class);
 
             /////////////////// null suffix on stored udt ////////////////////
             v = userType("a", 0, "b", null);
@@ -433,21 +433,35 @@
 
     void checkAppliesUDT(String condition, Object value) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET v = ? WHERE k = 0 IF " + condition, value), row(true));
         assertRows(execute("SELECT * FROM %s"), row(0, value));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k = 0 IF " + condition), row(true));
+        assertEmpty(execute("SELECT * FROM %s"));
+        execute("INSERT INTO %s (k, v) VALUES (0, ?)", value);
     }
 
     void checkDoesNotApplyUDT(String condition, Object value) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET v = ? WHERE k = 0 IF " + condition, value),
                    row(false, value));
         assertRows(execute("SELECT * FROM %s"), row(0, value));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k = 0 IF " + condition),
+                   row(false, value));
+        assertRows(execute("SELECT * FROM %s"), row(0, value));
     }
 
     void checkInvalidUDT(String condition, Object value, Class<? extends Throwable> expected) throws Throwable
     {
+        // UPDATE statement
         assertInvalidThrow(expected, "UPDATE %s SET v = ?  WHERE k = 0 IF " + condition, value);
         assertRows(execute("SELECT * FROM %s"), row(0, value));
+        // DELETE statement
+        assertInvalidThrow(expected, "DELETE FROM %s WHERE k = 0 IF " + condition);
+        assertRows(execute("SELECT * FROM %s"), row(0, value));
     }
 
     /**
@@ -472,10 +486,12 @@
             check_applies_list("l < ['z']");
             check_applies_list("l <= ['z']");
             check_applies_list("l IN (null, ['foo', 'bar', 'foobar'], ['a'])");
+            check_applies_list("l CONTAINS 'bar'");
 
             // multiple conditions
             check_applies_list("l > ['aaa', 'bbb'] AND l > ['aaa']");
             check_applies_list("l != null AND l IN (['foo', 'bar', 'foobar'])");
+            check_applies_list("l CONTAINS 'foo' AND l CONTAINS 'foobar'");
 
             // should not apply
             check_does_not_apply_list("l = ['baz']");
@@ -486,10 +502,12 @@
             check_does_not_apply_list("l <= ['a']");
             check_does_not_apply_list("l IN (['a'], null)");
             check_does_not_apply_list("l IN ()");
+            check_does_not_apply_list("l CONTAINS 'baz'");
 
             // multiple conditions
             check_does_not_apply_list("l IN () AND l IN (['foo', 'bar', 'foobar'])");
             check_does_not_apply_list("l > ['zzz'] AND l < ['zzz']");
+            check_does_not_apply_list("l CONTAINS 'bar' AND l CONTAINS 'baz'");
 
             check_invalid_list("l = [null]", InvalidRequestException.class);
             check_invalid_list("l < null", InvalidRequestException.class);
@@ -498,30 +516,42 @@
             check_invalid_list("l >= null", InvalidRequestException.class);
             check_invalid_list("l IN null", SyntaxException.class);
             check_invalid_list("l IN 367", SyntaxException.class);
-            check_invalid_list("l CONTAINS KEY 123", SyntaxException.class);
-
-            // not supported yet
-            check_invalid_list("m CONTAINS 'bar'", SyntaxException.class);
+            check_invalid_list("l CONTAINS KEY 123", InvalidRequestException.class);
+            check_invalid_list("l CONTAINS null", InvalidRequestException.class);
         }
     }
 
     void check_applies_list(String condition) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF " + condition), row(true));
         assertRows(execute("SELECT * FROM %s"), row(0, list("foo", "bar", "foobar")));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k=0 IF " + condition), row(true));
+        assertEmpty(execute("SELECT * FROM %s"));
+        execute("INSERT INTO %s(k, l) VALUES (0, ['foo', 'bar', 'foobar'])");
     }
 
     void check_does_not_apply_list(String condition) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF " + condition),
                    row(false, list("foo", "bar", "foobar")));
         assertRows(execute("SELECT * FROM %s"), row(0, list("foo", "bar", "foobar")));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k=0 IF " + condition),
+                   row(false, list("foo", "bar", "foobar")));
+        assertRows(execute("SELECT * FROM %s"), row(0, list("foo", "bar", "foobar")));
     }
 
     void check_invalid_list(String condition, Class<? extends Throwable> expected) throws Throwable
     {
+        // UPDATE statement
         assertInvalidThrow(expected, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF " + condition);
         assertRows(execute("SELECT * FROM %s"), row(0, list("foo", "bar", "foobar")));
+        // DELETE statement
+        assertInvalidThrow(expected, "DELETE FROM %s WHERE k=0 IF " + condition);
+        assertRows(execute("SELECT * FROM %s"), row(0, list("foo", "bar", "foobar")));
     }
 
     /**
@@ -543,7 +573,8 @@
                                  "DELETE FROM %s WHERE k=0 IF l[?] = ?", null, "foobar");
             assertInvalidMessage("Invalid negative list index -2",
                                  "DELETE FROM %s WHERE k=0 IF l[?] = ?", -2, "foobar");
-
+            assertInvalidSyntax("DELETE FROM %s WHERE k=0 IF l[?] CONTAINS ?", 0, "bar");
+            assertInvalidSyntax("DELETE FROM %s WHERE k=0 IF l[?] CONTAINS KEY ?", 0, "bar");
             assertRows(execute("DELETE FROM %s WHERE k=0 IF l[?] = ?", 1, null), row(false, list("foo", "bar", "foobar")));
             assertRows(execute("DELETE FROM %s WHERE k=0 IF l[?] = ?", 1, "foobar"), row(false, list("foo", "bar", "foobar")));
             assertRows(execute("SELECT * FROM %s"), row(0, list("foo", "bar", "foobar")));
@@ -631,10 +662,12 @@
             check_applies_set("s < {'z'}");
             check_applies_set("s <= {'z'}");
             check_applies_set("s IN (null, {'bar', 'foo'}, {'a'})");
+            check_applies_set("s CONTAINS 'foo'");
 
             // multiple conditions
             check_applies_set("s > {'a'} AND s < {'z'}");
             check_applies_set("s IN (null, {'bar', 'foo'}, {'a'}) AND s IN ({'a'}, {'bar', 'foo'}, null)");
+            check_applies_set("s CONTAINS 'foo' AND s CONTAINS 'bar'");
 
             // should not apply
             check_does_not_apply_set("s = {'baz'}");
@@ -646,6 +679,7 @@
             check_does_not_apply_set("s IN ({'a'}, null)");
             check_does_not_apply_set("s IN ()");
             check_does_not_apply_set("s != null AND s IN ()");
+            check_does_not_apply_set("s CONTAINS 'baz'");
 
             check_invalid_set("s = {null}", InvalidRequestException.class);
             check_invalid_set("s < null", InvalidRequestException.class);
@@ -654,32 +688,43 @@
             check_invalid_set("s >= null", InvalidRequestException.class);
             check_invalid_set("s IN null", SyntaxException.class);
             check_invalid_set("s IN 367", SyntaxException.class);
-            check_invalid_set("s CONTAINS KEY 123", SyntaxException.class);
+            check_invalid_set("s CONTAINS null", InvalidRequestException.class);
+            check_invalid_set("s CONTAINS KEY 123", InvalidRequestException.class);
 
             // element access is not allow for sets
             check_invalid_set("s['foo'] = 'foobar'", InvalidRequestException.class);
-
-            // not supported yet
-            check_invalid_set("m CONTAINS 'bar'", SyntaxException.class);
         }
     }
 
     void check_applies_set(String condition) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF " + condition), row(true));
         assertRows(execute("SELECT * FROM %s"), row(0, set("bar", "foo")));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k=0 IF " + condition), row(true));
+        assertEmpty(execute("SELECT * FROM %s"));
+        execute("INSERT INTO %s (k, s) VALUES (0, {'bar', 'foo'})");
     }
 
     void check_does_not_apply_set(String condition) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF " + condition), row(false, set("bar", "foo")));
         assertRows(execute("SELECT * FROM %s"), row(0, set("bar", "foo")));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k=0 IF " + condition), row(false, set("bar", "foo")));
+        assertRows(execute("SELECT * FROM %s"), row(0, set("bar", "foo")));
     }
 
     void check_invalid_set(String condition, Class<? extends Throwable> expected) throws Throwable
     {
+        // UPDATE statement
         assertInvalidThrow(expected, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF " + condition);
         assertRows(execute("SELECT * FROM %s"), row(0, set("bar", "foo")));
+        // DELETE statement
+        assertInvalidThrow(expected, "DELETE FROM %s WHERE k=0 IF " + condition);
+        assertRows(execute("SELECT * FROM %s"), row(0, set("bar", "foo")));
     }
 
     /**
@@ -704,10 +749,13 @@
             check_applies_map("m <= {'z': 'z'}");
             check_applies_map("m != {'a': 'a'}");
             check_applies_map("m IN (null, {'a': 'a'}, {'foo': 'bar'})");
+            check_applies_map("m CONTAINS 'bar'");
+            check_applies_map("m CONTAINS KEY 'foo'");
 
             // multiple conditions
             check_applies_map("m > {'a': 'a'} AND m < {'z': 'z'}");
             check_applies_map("m != null AND m IN (null, {'a': 'a'}, {'foo': 'bar'})");
+            check_applies_map("m CONTAINS 'bar' AND m CONTAINS KEY 'foo'");
 
             // should not apply
             check_does_not_apply_map("m = {'a': 'a'}");
@@ -719,18 +767,16 @@
             check_does_not_apply_map("m IN ({'a': 'a'}, null)");
             check_does_not_apply_map("m IN ()");
             check_does_not_apply_map("m = null AND m != null");
+            check_does_not_apply_map("m CONTAINS 'foo'");
+            check_does_not_apply_map("m CONTAINS KEY 'bar'");
 
             check_invalid_map("m = {null: null}", InvalidRequestException.class);
             check_invalid_map("m = {'a': null}", InvalidRequestException.class);
             check_invalid_map("m = {null: 'a'}", InvalidRequestException.class);
+            check_invalid_map("m CONTAINS null", InvalidRequestException.class);
+            check_invalid_map("m CONTAINS KEY null", InvalidRequestException.class);
             check_invalid_map("m < null", InvalidRequestException.class);
             check_invalid_map("m IN null", SyntaxException.class);
-
-            // not supported yet
-            check_invalid_map("m CONTAINS 'bar'", SyntaxException.class);
-            check_invalid_map("m CONTAINS KEY 'foo'", SyntaxException.class);
-            check_invalid_map("m CONTAINS null", SyntaxException.class);
-            check_invalid_map("m CONTAINS KEY null", SyntaxException.class);
         }
     }
 
@@ -750,6 +796,8 @@
             execute("INSERT INTO %s (k, m) VALUES (0, {'foo' : 'bar'})");
             assertInvalidMessage("Invalid null value for map element access",
                                  "DELETE FROM %s WHERE k=0 IF m[?] = ?", null, "foo");
+            assertInvalidSyntax("DELETE FROM %s WHERE k=0 IF m[?] CONTAINS ?", "foo", "bar");
+            assertInvalidSyntax("DELETE FROM %s WHERE k=0 IF m[?] CONTAINS KEY ?", "foo", "bar");
             assertRows(execute("DELETE FROM %s WHERE k=0 IF m[?] = ?", "foo", "foo"), row(false, map("foo", "bar")));
             assertRows(execute("DELETE FROM %s WHERE k=0 IF m[?] = ?", "foo", null), row(false, map("foo", "bar")));
             assertRows(execute("SELECT * FROM %s"), row(0, map("foo", "bar")));
@@ -769,17 +817,17 @@
     @Test
     public void testFrozenWithNullValues() throws Throwable
     {
-        createTable(String.format("CREATE TABLE %%s (k int PRIMARY KEY, m %s)", "frozen<list<text>>"));
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, m frozen<list<text>>)");
         execute("INSERT INTO %s (k, m) VALUES (0, null)");
 
         assertRows(execute("UPDATE %s SET m = ? WHERE k = 0 IF m = ?", list("test"), list("comparison")), row(false, null));
 
-        createTable(String.format("CREATE TABLE %%s (k int PRIMARY KEY, m %s)", "frozen<map<text,int>>"));
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, m frozen<map<text,int>>)");
         execute("INSERT INTO %s (k, m) VALUES (0, null)");
 
         assertRows(execute("UPDATE %s SET m = ? WHERE k = 0 IF m = ?", map("test", 3), map("comparison", 2)), row(false, null));
 
-        createTable(String.format("CREATE TABLE %%s (k int PRIMARY KEY, m %s)", "frozen<set<text>>"));
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, m frozen<set<text>>)");
         execute("INSERT INTO %s (k, m) VALUES (0, null)");
 
         assertRows(execute("UPDATE %s SET m = ? WHERE k = 0 IF m = ?", set("test"), set("comparison")), row(false, null));
@@ -838,20 +886,32 @@
 
     void check_applies_map(String condition) throws Throwable
     {
+        // UPDATE statement
         assertRows(execute("UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF " + condition), row(true));
         assertRows(execute("SELECT * FROM %s"), row(0, map("foo", "bar")));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k=0 IF " + condition), row(true));
+        assertEmpty(execute("SELECT * FROM %s"));
+        execute("INSERT INTO %s (k, m) VALUES (0, {'foo' : 'bar'})");
     }
 
     void check_does_not_apply_map(String condition) throws Throwable
     {
         assertRows(execute("UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF " + condition), row(false, map("foo", "bar")));
         assertRows(execute("SELECT * FROM %s"), row(0, map("foo", "bar")));
+        // DELETE statement
+        assertRows(execute("DELETE FROM %s WHERE k=0 IF " + condition), row(false, map("foo", "bar")));
+        assertRows(execute("SELECT * FROM %s"), row(0, map("foo", "bar")));
     }
 
     void check_invalid_map(String condition, Class<? extends Throwable> expected) throws Throwable
     {
+        // UPDATE statement
         assertInvalidThrow(expected, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF " + condition);
         assertRows(execute("SELECT * FROM %s"), row(0, map("foo", "bar")));
+        // DELETE statement
+        assertInvalidThrow(expected, "DELETE FROM %s WHERE k=0 IF " + condition);
+        assertRows(execute("SELECT * FROM %s"), row(0, map("foo", "bar")));
     }
 
     @Test
@@ -918,4 +978,111 @@
                                  "UPDATE %s SET v = {a: 0, b: 'bc'} WHERE k = 0 IF v.a IN ?", unset());
         }
     }
+
+    @Test
+    public void testNonFrozenEmptyCollection() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<text>)");
+        execute("INSERT INTO %s (k, l) VALUES (0, null)");
+
+        // Does apply
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l = ?", (ByteBuffer) null),
+                   row(true));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l = ?", list()),
+                   row(true));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l != ?", list("bar")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l < ?", list("a")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l <= ?", list("a")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l IN (?, ?)", null, list("bar")),
+                   row(true));
+
+        // Does not apply
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l = ?", list("bar")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l > ?", list("a")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l >= ?", list("a")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l CONTAINS ?", "bar"),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET l = null WHERE k = 0 IF l CONTAINS ?", unset()),
+                   row(false, null));
+
+        assertInvalidMessage("Invalid comparison with null for operator \"CONTAINS\"",
+                             "UPDATE %s SET l = null WHERE k = 0 IF l CONTAINS ?", (ByteBuffer) null);
+
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, s set<text>)");
+        execute("INSERT INTO %s (k, s) VALUES (0, null)");
+
+        // Does apply
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s = ?", (ByteBuffer) null),
+                   row(true));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s = ?", set()),
+                   row(true));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s != ?", set("bar")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s < ?", set("a")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s <= ?", set("a")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s IN (?, ?)", null, set("bar")),
+                   row(true));
+
+        // Does not apply
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s = ?", set("bar")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s > ?", set("a")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s >= ?", set("a")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s CONTAINS ?", "bar"),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET s = null WHERE k = 0 IF s CONTAINS ?", unset()),
+                   row(false, null));
+
+        assertInvalidMessage("Invalid comparison with null for operator \"CONTAINS\"",
+                             "UPDATE %s SET s = null WHERE k = 0 IF s CONTAINS ?", (ByteBuffer) null);
+
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, m map<text, text>) ");
+        execute("INSERT INTO %s (k, m) VALUES (0, null)");
+
+        // Does apply
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m = ?", (ByteBuffer) null),
+                   row(true));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m = ?", map()),
+                   row(true));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m != ?", map("foo","bar")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m < ?", map("a","a")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m <= ?", map("a","a")),
+                   row(true));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m IN (?, ?)", null, map("foo","bar")),
+                   row(true));
+
+        // Does not apply
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m = ?", map("foo","bar")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m > ?", map("a", "a")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m >= ?", map("a", "a")),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET m = null WHERE k = 0 IF m CONTAINS ?", "bar"),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET m = {} WHERE k = 0 IF m CONTAINS ?", unset()),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET m = {} WHERE k = 0 IF m CONTAINS KEY ?", "foo"),
+                   row(false, null));
+        assertRows(execute("UPDATE %s SET m = {} WHERE k = 0 IF m CONTAINS KEY ?", unset()),
+                   row(false, null));
+
+        assertInvalidMessage("Invalid comparison with null for operator \"CONTAINS\"",
+                             "UPDATE %s SET m = {} WHERE k = 0 IF m CONTAINS ?", (ByteBuffer) null);
+        assertInvalidMessage("Invalid comparison with null for operator \"CONTAINS KEY\"",
+                             "UPDATE %s SET m = {} WHERE k = 0 IF m CONTAINS KEY ?", (ByteBuffer) null);
+    }
+
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
index ac03cb5..8429ec0 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
@@ -36,6 +36,7 @@
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.schema.SchemaKeyspaceTables;
 import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.Pair;
 
 import static java.lang.String.format;
 import static org.junit.Assert.assertEquals;
@@ -61,14 +62,18 @@
     public static Collection<Object[]> data()
     {
         return Arrays.asList(new Object[]{ "3.0", (Runnable) () -> {
-                                 assertTrue(Gossiper.instance.isUpgradingFromVersionLowerThan(new CassandraVersion("3.11")));
+                                 Pair<Boolean, CassandraVersion> res = Gossiper.instance.isUpgradingFromVersionLowerThanC17653(new CassandraVersion("3.11"));
+                                 assertTrue(debugMsgCASSANDRA17653(res), res.left);
                              } },
                              new Object[]{ "3.11", (Runnable) () -> {
-                                 assertTrue(Gossiper.instance.isUpgradingFromVersionLowerThan(SystemKeyspace.CURRENT_VERSION));
-                                 assertFalse(Gossiper.instance.isUpgradingFromVersionLowerThan(new CassandraVersion("3.11")));
+                                 Pair<Boolean, CassandraVersion> res = Gossiper.instance.isUpgradingFromVersionLowerThanC17653(SystemKeyspace.CURRENT_VERSION);
+                                 assertTrue(debugMsgCASSANDRA17653(res), res.left);
+                                 res = Gossiper.instance.isUpgradingFromVersionLowerThanC17653(new CassandraVersion("3.11"));
+                                 assertFalse(debugMsgCASSANDRA17653(res), res.left);
                              } },
                              new Object[]{ SystemKeyspace.CURRENT_VERSION.toString(), (Runnable) () -> {
-                                 assertFalse(Gossiper.instance.isUpgradingFromVersionLowerThan(SystemKeyspace.CURRENT_VERSION));
+                                 Pair<Boolean, CassandraVersion> res = Gossiper.instance.isUpgradingFromVersionLowerThanC17653(SystemKeyspace.CURRENT_VERSION);
+                                 assertFalse(debugMsgCASSANDRA17653(res), res.left);
                              } });
     }
 
@@ -194,6 +199,7 @@
                              "UPDATE %s SET v1 = 'A' WHERE k = 0 AND c IN () IF EXISTS");
         assertInvalidMessage("IN on the clustering key columns is not supported with conditional updates",
                              "UPDATE %s SET v1 = 'A' WHERE k = 0 AND c IN (1, 2) IF EXISTS");
+        assertInvalidMessage("Cannot use CONTAINS on non-collection column v1", "UPDATE %s SET v1 = 'B' WHERE k = 0 IF v1 CONTAINS 'A'");
     }
 
     /**
@@ -958,4 +964,10 @@
 
         assertRows(execute("SELECT * FROM %s WHERE k = 1"), row(1, Duration.from("10s"), 6));
     }
+
+    // Helper to debug on the next occurrence of CASSANDRA-17653
+    private static String debugMsgCASSANDRA17653(Pair<Boolean, CassandraVersion> res)
+    {
+        return("Failed on Cass Version: " + res.right == null ? "null" : res.right + " boolean:" + res.left);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectGroupByTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectGroupByTest.java
index 17d06ac..f46783b 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectGroupByTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectGroupByTest.java
@@ -19,7 +19,19 @@
 
 import org.junit.Test;
 
+import com.datastax.driver.core.LocalDate;
+
+import java.time.LocalDateTime;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.util.Date;
+import java.util.UUID;
+
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.serializers.SimpleDateSerializer;
+import org.apache.cassandra.serializers.TimeSerializer;
+import org.apache.cassandra.serializers.TimestampSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class SelectGroupByTest extends CQLTester
 {
@@ -2132,4 +2144,493 @@
                           row(1, 1, 4L, 3L));
         }
     }
+
+    @Test
+    public void testGroupByTimeRangesWithTimestamTypeAndWithoutPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, time timestamp, v int, primary key (pk, time))" + compactOption);
+
+            assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY",
+                                 "SELECT pk, floor(time, 2h, '2016-09-01'), v FROM %s GROUP BY floor(time, 2h, '2016-09-01')");
+
+            assertInvalidMessage("Only monotonic functions are supported in the GROUP BY clause. Got: system.floor : (timestamp, duration(constant), timestamp) -> timestamp",
+                                 "SELECT pk, floor(time, 2h, time), v FROM %s GROUP BY pk, floor(time, 2h, time)");
+
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:10:00 UTC', 1)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:12:00 UTC', 2)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:14:00 UTC', 3)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:15:00 UTC', 4)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:21:00 UTC', 5)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:22:00 UTC', 6)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:26:00 UTC', 7)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:26:20 UTC', 8)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-09-27 16:26:20 UTC', 10)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-09-27 16:30:00 UTC', 11)");
+
+            // Test prepared statement
+            assertRows(execute("SELECT pk, floor(time, 5m, ?), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m, ?)",
+                               toTimestamp("2016-09-27 00:00:00 UTC"),
+                               toTimestamp("2016-09-27 00:00:00 UTC")),
+                       row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                       row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                       row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                       row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                       row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                       row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+            for (String startingTime : new String[]{"", ", '2016-09-27 UTC'"})
+            {
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ")"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                           row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+                // Checks with duration lower than precisions
+                assertInvalidMessage("The floor cannot be computed for the 10us duration as precision is below 1 millisecond",
+                                     "SELECT pk, floor(time, 10us" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 10us" + startingTime + ")");
+
+                // Checks with a negative duration
+                assertInvalidMessage("Negative durations are not supported by the floor function",
+                                     "SELECT pk, floor(time, 10us" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, -5m" + startingTime + ")");
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") LIMIT 2"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L));
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") PER PARTITION LIMIT 1"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L));
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC"),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L));
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC LIMIT 2"),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L));
+            }
+
+            // Checks with start time is greater than the timestamp
+            assertInvalidMessage("The floor function starting time is greater than the provided time",
+                                 "SELECT pk, floor(time, 5m, '2016-10-27'), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m, '2016-10-27')");
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithTimeUUIDAndWithoutPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, time timeuuid, v int, primary key (pk, time))" + compactOption);
+
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:10:00"), 1);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:12:00"), 2);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:14:00"), 3);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:15:00"), 4);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:21:00"), 5);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:22:00"), 6);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:26:00"), 7);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:26:20"), 8);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 2, toTimeUUID("2016-09-27 16:26:00"), 10);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 2, toTimeUUID("2016-09-27 16:30:00"), 11);
+
+            for (String startingTime : new String[]{"", ", '2016-09-27 UTC'"})
+            {
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ")"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                           row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+                assertRows(execute("SELECT pk, floor(toTimestamp(time), 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(toTimestamp(time), 5m" + startingTime + ")"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                           row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+                // Checks with duration lower than precisions
+                assertInvalidMessage("The floor cannot be computed for the 10us duration as precision is below 1 millisecond",
+                                     "SELECT pk, floor(time, 10us" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 10us" + startingTime + ")");
+
+                // Checks with a negative duration
+                assertInvalidMessage("Negative durations are not supported by the floor function",
+                                     "SELECT pk, floor(time, 10us" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, -5m" + startingTime + ")");
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") LIMIT 2"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L));
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") PER PARTITION LIMIT 1"),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                           row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L));
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC"),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                           row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                           row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L));
+
+                assertRows(execute("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC LIMIT 2"),
+                           row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                           row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L));
+            }
+
+            // Checks with start time is greater than the timestamp
+            assertInvalidMessage("The floor function starting time is greater than the provided time",
+                                 "SELECT pk, floor(time, 5m, '2016-10-27'), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m, '2016-10-27')");
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithDateTypeAndWithoutPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, time date, v int, primary key (pk, time))" + compactOption);
+
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27', 1)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-28', 2)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-29', 3)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-30', 4)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-10-01', 5)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-10-04', 6)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-10-20', 7)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-11-27', 8)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-11-01', 10)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-11-02', 11)");
+
+            for (String startingTime : new String[]{"", ", '2016-06-01'"})
+            {
+                assertRows(execute("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo" + startingTime + ")"),
+                           row(1, toDate("2016-09-01"), 1, 4, 4L),
+                           row(1, toDate("2016-10-01"), 5, 7, 3L),
+                           row(1, toDate("2016-11-01"), 8, 8, 1L),
+                           row(2, toDate("2016-11-01"), 10, 11, 2L));
+
+                // Checks with duration lower than precisions
+                assertInvalidMessage("The floor on date values cannot be computed for the 1h duration as precision is below 1 day",
+                                     "SELECT pk, floor(time, 1h" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1h" + startingTime + ")");
+
+                // Checks with a negative duration
+                assertInvalidMessage("Negative durations are not supported by the floor function",
+                                     "SELECT pk, floor(time, -1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, -1mo" + startingTime + ")");
+
+                assertRows(execute("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo" + startingTime + ") LIMIT 2"),
+                           row(1, toDate("2016-09-01"), 1, 4, 4L),
+                           row(1, toDate("2016-10-01"), 5, 7, 3L));
+
+                assertRows(execute("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo" + startingTime + ") PER PARTITION LIMIT 1"),
+                           row(1, toDate("2016-09-01"), 1, 4, 4L),
+                           row(2, toDate("2016-11-01"), 10, 11, 2L));
+
+                assertRows(execute("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 1mo" + startingTime + ") ORDER BY time DESC"),
+                           row(1, toDate("2016-11-01"), 8, 8, 1L),
+                           row(1, toDate("2016-10-01"), 5, 7, 3L),
+                           row(1, toDate("2016-09-01"), 1, 4, 4L));
+
+                assertRows(execute("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 1mo" + startingTime + ") ORDER BY time DESC LIMIT 2"),
+                           row(1, toDate("2016-11-01"), 8, 8, 1L),
+                           row(1, toDate("2016-10-01"), 5, 7, 3L));
+            }
+
+            // Checks with start time is greater than the timestamp
+            assertInvalidMessage("The floor function starting time is greater than the provided time",
+                                 "SELECT pk, floor(time, 1mo, '2017-01-01'), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo, '2017-01-01')");
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithTimeTypeAndWithoutPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, date date, time time, v int, primary key (pk, date, time))" + compactOption);
+
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:10:00', 1)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:12:00', 2)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:14:00', 3)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:15:00', 4)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:21:00', 5)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:22:00', 6)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:26:00', 7)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:26:20', 8)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-28', '16:26:20', 9)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-28', '16:26:30', 10)");
+
+            assertInvalidMessage("Functions are only supported on the last element of the GROUP BY clause",
+                                 "SELECT pk, floor(date, 1w), time, min(v), max(v), count(v) FROM %s GROUP BY pk, floor(date, 1w), time");
+
+            assertRows(execute("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s GROUP BY pk, date, floor(time, 5m)"),
+                       row(1, toDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L),
+                       row(1, toDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L),
+                       row(1, toDate("2016-09-27"), toTime("16:20:00"), 5, 6, 2L),
+                       row(1, toDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L),
+                       row(1, toDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L));
+
+            // Checks with duration greater than dayprecisions
+            assertInvalidMessage("For time values, the floor can only be computed for durations smaller that a day",
+                                 "SELECT pk, date, floor(time, 10d), min(v), max(v), count(v) FROM %s GROUP BY pk, date, floor(time, 10d)");
+
+            // Checks with a negative duration
+            assertInvalidMessage("Negative durations are not supported by the floor function",
+                                 "SELECT pk, date, floor(time, -10m), min(v), max(v), count(v) FROM %s GROUP BY pk, date, floor(time, -10m)");
+
+            assertRows(execute("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s GROUP BY pk, date, floor(time, 5m) LIMIT 2"),
+                       row(1, toDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L),
+                       row(1, toDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L));
+
+            assertRows(execute("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, date, floor(time, 5m) ORDER BY date DESC, time DESC"),
+                       row(1, toDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L),
+                       row(1, toDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L),
+                       row(1, toDate("2016-09-27"), toTime("16:20:00"), 5, 6, 2L),
+                       row(1, toDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L),
+                       row(1, toDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L));
+
+            assertRows(execute("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, date, floor(time, 5m) ORDER BY date DESC, time DESC LIMIT 2"),
+                       row(1, toDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L),
+                       row(1, toDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L));
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithTimestampTypeAndPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, time timestamp, v int, primary key (pk, time))"
+                    + compactOption);
+
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:10:00 UTC', 1)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:12:00 UTC', 2)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:14:00 UTC', 3)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:15:00 UTC', 4)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:21:00 UTC', 5)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:22:00 UTC', 6)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:26:00 UTC', 7)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27 16:26:20 UTC', 8)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-09-27 16:26:20 UTC', 10)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-09-27 16:30:00 UTC', 11)");
+
+            for (int pageSize = 1; pageSize < 10; pageSize++)
+            {
+                for (String startingTime : new String[]{"", ", '2016-09-27 UTC'"})
+                {
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ")", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                                  row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                                  row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                                  row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                                  row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") LIMIT 2", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                                  row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") PER PARTITION LIMIT 1", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                                  row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC LIMIT 2", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L));
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithTimeUUIDAndPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, time timeuuid, v int, primary key (pk, time))" + compactOption);
+
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:10:00"), 1);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:12:00"), 2);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:14:00"), 3);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:15:00"), 4);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:21:00"), 5);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:22:00"), 6);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:26:00"), 7);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 1, toTimeUUID("2016-09-27 16:26:20"), 8);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 2, toTimeUUID("2016-09-27 16:26:00"), 10);
+            execute("INSERT INTO %s (pk, time, v) VALUES (?, ?, ?)", 2, toTimeUUID("2016-09-27 16:30:00"), 11);
+
+            for (int pageSize = 1; pageSize < 10; pageSize++)
+            {
+                for (String startingTime : new String[]{"", ", '2016-09-27 UTC'"})
+                {
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ")", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                                  row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                                  row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                                  row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L),
+                                  row(2, toTimestamp("2016-09-27 16:30:00 UTC"), 11, 11, 1L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") LIMIT 2", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                                  row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 5m" + startingTime + ") PER PARTITION LIMIT 1", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L),
+                                  row(2, toTimestamp("2016-09-27 16:25:00 UTC"), 10, 10, 1L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:15:00 UTC"), 4, 4, 1L),
+                                  row(1, toTimestamp("2016-09-27 16:10:00 UTC"), 1, 3, 3L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 5m" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 5m" + startingTime + ") ORDER BY time DESC LIMIT 2", pageSize),
+                                  row(1, toTimestamp("2016-09-27 16:25:00 UTC"), 7, 8, 2L),
+                                  row(1, toTimestamp("2016-09-27 16:20:00 UTC"), 5, 6, 2L));
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithDateTypeAndPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, time date, v int, primary key (pk, time))" + compactOption);
+
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-27', 1)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-28', 2)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-29', 3)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-09-30', 4)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-10-01', 5)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-10-04', 6)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-10-20', 7)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (1, '2016-11-27', 8)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-11-01', 10)");
+            execute("INSERT INTO %s (pk, time, v) VALUES (2, '2016-11-02', 11)");
+
+            for (int pageSize = 1; pageSize < 10; pageSize++)
+            {
+                for (String startingTime : new String[]{"", ", '2016-06-01'"})
+                {
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo" + startingTime + ")", pageSize),
+                                  row(1, toLocalDate("2016-09-01"), 1, 4, 4L),
+                                  row(1, toLocalDate("2016-10-01"), 5, 7, 3L),
+                                  row(1, toLocalDate("2016-11-01"), 8, 8, 1L),
+                                  row(2, toLocalDate("2016-11-01"), 10, 11, 2L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo" + startingTime + ") LIMIT 2", pageSize),
+                                  row(1, toLocalDate("2016-09-01"), 1, 4, 4L),
+                                  row(1, toLocalDate("2016-10-01"), 5, 7, 3L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s GROUP BY pk, floor(time, 1mo" + startingTime + ") PER PARTITION LIMIT 1", pageSize),
+                                  row(1, toLocalDate("2016-09-01"), 1, 4, 4L),
+                                  row(2, toLocalDate("2016-11-01"), 10, 11, 2L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 1mo" + startingTime + ") ORDER BY time DESC", pageSize),
+                                  row(1, toLocalDate("2016-11-01"), 8, 8, 1L),
+                                  row(1, toLocalDate("2016-10-01"), 5, 7, 3L),
+                                  row(1, toLocalDate("2016-09-01"), 1, 4, 4L));
+
+                    assertRowsNet(executeNetWithPaging("SELECT pk, floor(time, 1mo" + startingTime + "), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, floor(time, 1mo" + startingTime + ") ORDER BY time DESC LIMIT 2", pageSize),
+                                  row(1, toLocalDate("2016-11-01"), 8, 8, 1L),
+                                  row(1, toLocalDate("2016-10-01"), 5, 7, 3L));
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testGroupByTimeRangesWithTimeTypeAndPaging() throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (pk int, date date, time time, v int, primary key (pk, date, time))" + compactOption);
+
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:10:00', 1)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:12:00', 2)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:14:00', 3)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:15:00', 4)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:21:00', 5)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:22:00', 6)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:26:00', 7)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-27', '16:26:20', 8)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-28', '16:26:20', 9)");
+            execute("INSERT INTO %s (pk, date, time, v) VALUES (1, '2016-09-28', '16:26:30', 10)");
+
+            for (int pageSize = 1; pageSize < 10; pageSize++)
+            {
+                assertRowsNet(executeNetWithPaging("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s GROUP BY pk, date, floor(time, 5m)", pageSize),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:20:00"), 5, 6, 2L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L),
+                              row(1, toLocalDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L));
+
+                assertRowsNet(executeNetWithPaging("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s GROUP BY pk, date, floor(time, 5m) LIMIT 2", pageSize),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L));
+
+                assertRowsNet(executeNetWithPaging("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, date, floor(time, 5m) ORDER BY date DESC, time DESC", pageSize),
+                              row(1, toLocalDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:20:00"), 5, 6, 2L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:15:00"), 4, 4, 1L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:10:00"), 1, 3, 3L));
+
+                assertRowsNet(executeNetWithPaging("SELECT pk, date, floor(time, 5m), min(v), max(v), count(v) FROM %s WHERE pk = 1 GROUP BY pk, date, floor(time, 5m) ORDER BY date DESC, time DESC LIMIT 2", pageSize),
+                              row(1, toLocalDate("2016-09-28"), toTime("16:25:00"), 9, 10, 2L),
+                              row(1, toLocalDate("2016-09-27"), toTime("16:25:00"), 7, 8, 2L));
+            }
+        }
+    }
+
+    private static UUID toTimeUUID(String string)
+    {
+        DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
+        LocalDateTime dateTime = LocalDateTime.parse(string, formatter);
+        long timeInMillis = dateTime.toInstant(ZoneOffset.UTC).toEpochMilli();
+        return TimeUUID.Generator.atUnixMillis(timeInMillis).asUUID();
+    }
+
+    private static Date toTimestamp(String timestampAsString)
+    {
+        return new Date(TimestampSerializer.dateStringToTimestamp(timestampAsString));
+    }
+
+    private static int toDate(String dateAsString)
+    {
+        return SimpleDateSerializer.dateStringToDays(dateAsString);
+    }
+
+    private static LocalDate toLocalDate(String dateAsString)
+    {
+        DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
+        LocalDateTime dateTime = java.time.LocalDate.parse(dateAsString, formatter).atStartOfDay();
+        long timeInMillis = dateTime.toInstant(ZoneOffset.UTC).toEpochMilli();
+
+        return LocalDate.fromMillisSinceEpoch(timeInMillis);
+    }
+
+    private static long toTime(String timeAsString)
+    {
+        return TimeSerializer.timeStringToLong(timeAsString);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectMultiColumnRelationTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectMultiColumnRelationTest.java
index 5062448..b69ee37 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectMultiColumnRelationTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectMultiColumnRelationTest.java
@@ -1930,4 +1930,46 @@
         assertInvalidMessage("Undefined column name e", "SELECT c AS e FROM %s WHERE (b, e) IN ((0, 1), (2, 4))");
         assertInvalidMessage("Undefined column name e", "SELECT c AS e FROM %s WHERE (b, e) > (0, 1) and b <= 2");
     }
+
+    @Test
+    public void testInRestrictionsWithAllowFiltering() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk int, c1 text, c2 int, c3 int, v int, primary key(pk, c1, c2, c3))");
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "0", 0, 1, 3);
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "1", 0, 2, 4);
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "1", 1, 3, 5);
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "2", 1, 4, 6);
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "2", 2, 5, 7);
+
+        assertRows(execute("SELECT * FROM %s WHERE (c2) IN ((?), (?)) ALLOW FILTERING", 1, 3),
+                   row(1, "1", 1, 3, 5),
+                   row(1, "2", 1, 4, 6));
+
+        assertRows(execute("SELECT * FROM %s WHERE c2 IN (?, ?) ALLOW FILTERING", 1, 3),
+                   row(1, "1", 1, 3, 5),
+                   row(1, "2", 1, 4, 6));
+
+        assertInvalidMessage("Multicolumn IN filters are not supported",
+                             "SELECT * FROM %s WHERE (c2, c3) IN ((?, ?), (?, ?)) ALLOW FILTERING", 1, 0, 2, 0);
+    }
+
+    @Test
+    public void testInRestrictionsWithIndex() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk int, c1 text, c2 int, c3 int, v int, primary key(pk, c1, c2, c3))");
+        createIndex("CREATE INDEX ON %s (c3)");
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "0", 0, 1, 3);
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "1", 0, 2, 4);
+        execute("INSERT INTO %s (pk, c1, c2, c3, v) values (?, ?, ?, ?, ?)", 1, "1", 1, 3, 5);
+
+        assertRows(execute("SELECT * FROM %s WHERE (c3) IN ((?), (?)) ALLOW FILTERING", 1, 3),
+                   row(1, "0", 0, 1, 3),
+                   row(1, "1", 1, 3, 5));
+
+        assertInvalidMessage("PRIMARY KEY column \"c2\" cannot be restricted as preceding column \"c1\" is not restricted",
+                             "SELECT * FROM %s WHERE (c2, c3) IN ((?, ?), (?, ?))", 1, 0, 2, 0);
+
+        assertInvalidMessage("Multicolumn IN filters are not supported",
+                             "SELECT * FROM %s WHERE (c2, c3) IN ((?, ?), (?, ?)) ALLOW FILTERING", 1, 0, 2, 0);
+    }
  }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
index a14a2a4..fbac1ff 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
@@ -25,14 +25,14 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import static junit.framework.Assert.assertNull;
-import static org.junit.Assert.assertEquals;
-
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
 import org.apache.cassandra.dht.ByteOrderedPartitioner;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
 /**
  * SELECT statement tests that require a ByteOrderedPartitioner
  */
@@ -368,7 +368,7 @@
 
         Object[][] rows = getRows(execute("SELECT v2 FROM %s"));
         assertEquals(0, rows[0][0]);
-        assertEquals(null, rows[1][0]);
+        assertNull(rows[1][0]);
         assertEquals(2, rows[2][0]);
 
         rows = getRows(execute("SELECT v2 FROM %s WHERE k = 1"));
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectSingleColumnRelationTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectSingleColumnRelationTest.java
index 3795ce5..17c7e87 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectSingleColumnRelationTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectSingleColumnRelationTest.java
@@ -351,9 +351,17 @@
 
         assertRows(execute("SELECT v1 FROM %s WHERE time = 1"), row("B"), row("E"));
 
-        assertInvalidMessage("IN restrictions are not supported on indexed columns",
-                             "SELECT v1 FROM %s WHERE id2 = 0 and time IN (1, 2) ALLOW FILTERING");
+        // Checks that IN restrictions are not used for index queries
+        assertInvalidMessage("PRIMARY KEY column \"time\" cannot be restricted as preceding column \"author\" is not restricted",
+                            "SELECT v1 FROM %s WHERE time IN (1, 2)");
+        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                             "SELECT v1 FROM %s WHERE id2 IN (0, 2)");
 
+        // Checks that the IN queries works with filtering
+        assertRows(execute("SELECT v1 FROM %s WHERE time IN (1, 2) ALLOW FILTERING"), row("B"), row("C"), row("E"));
+        assertRows(execute("SELECT v1 FROM %s WHERE id2 IN (0, 2) ALLOW FILTERING"), row("A"), row("B"), row("D"));
+
+        // Checks index query with filtering
         assertRows(execute("SELECT v1 FROM %s WHERE author > 'ted' AND time = 1 ALLOW FILTERING"), row("E"));
         assertRows(execute("SELECT v1 FROM %s WHERE author > 'amy' AND author < 'zoe' AND time = 0 ALLOW FILTERING"),
                            row("A"), row("D"));
@@ -639,4 +647,138 @@
         assertInvalidMessage("Cannot use CONTAINS on non-collection column b",
                              "SELECT * FROM %s WHERE b CONTAINS ?", udt);
     }
+
+    @Test
+    public void testInRestrictionWithClusteringColumn() throws Throwable
+    {
+        createTable("CREATE TABLE %s (key int, c1 int, c2 int, s1 text static, PRIMARY KEY ((key, c1), c2))");
+
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 10, 11, 1, 's1')");
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 10, 12, 2, 's2')");
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 10, 13, 3, 's3')");
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 10, 13, 4, 's4')");
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 20, 21, 1, 's1')");
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 20, 22, 2, 's2')");
+        execute("INSERT INTO %s (key, c1, c2, s1) VALUES ( 20, 22, 3, 's3')");
+
+        assertRows(execute("SELECT * from %s WHERE key = ? AND c1 IN (?, ?)", 10, 21, 13),
+                   row(10, 13, 3, "s4"),
+                   row(10, 13, 4, "s4"));
+
+        assertRows(execute("SELECT * from %s WHERE key = ? AND c2 IN (?, ?) ALLOW FILTERING", 20, 1, 2),
+                   row(20, 22, 2, "s3"),
+                   row(20, 21, 1, "s1"));
+
+        assertRows(execute("SELECT * from %s WHERE c1 = ? AND c2 IN (?, ?) ALLOW FILTERING", 13, 2, 3),
+                   row(10, 13, 3, "s4"));
+
+        assertRowsIgnoringOrder(execute("SELECT * from %s WHERE c2 IN (?, ?) ALLOW FILTERING", 1, 2),
+                                row(10, 11, 1, "s1"),
+                                row(10, 12, 2, "s2"),
+                                row(20, 21, 1, "s1"),
+                                row(20, 22, 2, "s3"));
+
+        assertInvalidMessage("Invalid null value in condition for column c2",
+                             "SELECT * from %s WHERE key = 10 AND c2 IN (1, null) ALLOW FILTERING");
+    }
+
+    @Test
+    public void testInRestrictionsWithAllowFiltering() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk1 int, pk2 int, c text, s int static, v int, primary key((pk1, pk2), c))");
+        execute("INSERT INTO %s (pk1, pk2, c, s, v) values (?, ?, ?, ?, ?)", 1, 0, "5", 1, 3);
+        execute("INSERT INTO %s (pk1, pk2, c, s, v) values (?, ?, ?, ?, ?)", 1, 0, "7", 1, 2);
+        execute("INSERT INTO %s (pk1, pk2, c, s, v) values (?, ?, ?, ?, ?)", 1, 1, "7", 1, 3);
+        execute("INSERT INTO %s (pk1, pk2, c, s, v) values (?, ?, ?, ?, ?)", 2, 0, "4", 2, 1);
+        execute("INSERT INTO %s (pk1, pk2, c, s, v) values (?, ?, ?, ?, ?)", 2, 3, "6", 2, 8);
+
+        // Test filtering on regular columns
+        assertRows(execute("SELECT * FROM %s WHERE v IN (?, ?) ALLOW FILTERING", 4, 3),
+                   row(1, 0, "5", 1, 3),
+                   row(1, 1, "7", 1, 3));
+
+        assertRows(execute("SELECT * FROM %s WHERE v IN ? ALLOW FILTERING", list(4, 3)),
+                   row(1, 0, "5", 1, 3),
+                   row(1, 1, "7", 1, 3));
+
+        // Test filtering on clustering columns
+        assertRows(execute("SELECT * FROM %s WHERE c IN (?, ?, ?) ALLOW FILTERING", "7", "6", "8"),
+                   row(2, 3, "6", 2, 8),
+                   row(1, 0, "7", 1, 2),
+                   row(1, 1, "7", 1, 3));
+
+        assertRows(execute("SELECT * FROM %s WHERE c IN ? ALLOW FILTERING", list("7", "6", "8")),
+                   row(2, 3, "6", 2, 8),
+                   row(1, 0, "7", 1, 2),
+                   row(1, 1, "7", 1, 3));
+
+        // Test filtering on partition keys
+        assertRows(execute("SELECT * FROM %s WHERE pk1 IN (?, ?) ALLOW FILTERING", 1, 3),
+                   row(1, 0, "5", 1, 3),
+                   row(1, 0, "7", 1, 2),
+                   row(1, 1, "7", 1, 3));
+
+        assertRows(execute("SELECT * FROM %s WHERE pk1 IN ? ALLOW FILTERING", list(1, 3)),
+                   row(1, 0, "5", 1, 3),
+                   row(1, 0, "7", 1, 2),
+                   row(1, 1, "7", 1, 3));
+
+        // Test filtering on static columns
+        assertRows(execute("SELECT * FROM %s WHERE s IN (?, ?) ALLOW FILTERING", 1, 3),
+                   row(1, 0, "5", 1, 3),
+                   row(1, 0, "7", 1, 2),
+                   row(1, 1, "7", 1, 3));
+
+        assertRows(execute("SELECT * FROM %s WHERE s IN ? ALLOW FILTERING", list(1, 3)),
+                   row(1, 0, "5", 1, 3),
+                   row(1, 0, "7", 1, 2),
+                   row(1, 1, "7", 1, 3));
+    }
+
+    @Test
+    public void testInRestrictionsWithAllowFilteringAndOrdering() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk int, c text, v int, primary key(pk, c)) WITH CLUSTERING ORDER BY (c DESC)");
+        execute("INSERT INTO %s (pk, c, v) values (?, ?, ?)", 1, "0", 5);
+        execute("INSERT INTO %s (pk, c, v) values (?, ?, ?)", 1, "1", 7);
+        execute("INSERT INTO %s (pk, c, v) values (?, ?, ?)", 1, "2", 7);
+        execute("INSERT INTO %s (pk, c, v) values (?, ?, ?)", 2, "0", 4);
+        execute("INSERT INTO %s (pk, c, v) values (?, ?, ?)", 2, "2", 6);
+
+        assertRows(execute("SELECT * FROM %s WHERE pk = ? AND c IN (?, ?, ?) ALLOW FILTERING", 1, "2", "0", "8"),
+                   row(1, "2", 7),
+                   row(1, "0", 5));
+
+        assertRows(execute("SELECT * FROM %s WHERE pk = ? AND c IN ? ORDER BY c ASC ALLOW FILTERING", 2, list("2", "8", "0")),
+                   row(2, "0", 4),
+                   row(2, "2", 6));
+
+        assertRows(execute("SELECT * FROM %s WHERE pk IN (?, ?) AND c IN (?, ?, ?) ALLOW FILTERING", 1, 2, "2", "0", "8"),
+                   row(1, "2", 7),
+                   row(1, "0", 5),
+                   row(2, "2", 6),
+                   row(2, "0", 4));
+
+        assertRows(execute("SELECT * FROM %s WHERE pk IN ? AND c IN ? ORDER BY c ASC ALLOW FILTERING", list(1, 2), list("2", "8", "0")),
+                   row(1, "0", 5),
+                   row(2, "0", 4),
+                   row(1, "2", 7),
+                   row(2, "2", 6));
+    }
+
+    @Test
+    public void testInRestrictionsWithCountersAndAllowFiltering() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk int, v counter, primary key (pk))");
+
+        assertEmpty(execute("SELECT * FROM %s WHERE v IN (?, ?) ALLOW FILTERING", 0L, 1L));
+
+        execute("UPDATE %s SET v = v + 1 WHERE pk = 1");
+        execute("UPDATE %s SET v = v + 2 WHERE pk = 2");
+        execute("UPDATE %s SET v = v + 1 WHERE pk = 3");
+
+        assertRows(execute("SELECT * FROM %s WHERE v IN (?, ?) ALLOW FILTERING", 0L, 1L),
+                   row(1, 1L),
+                   row(3, 1L));
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java
index d0493cf..7bbaa85 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java
@@ -1276,11 +1276,12 @@
             assertRows(execute("SELECT * FROM %s WHERE s = 1 AND d = 12 ALLOW FILTERING"),
                        row(1, 3, 1, 6, 12));
 
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
                                  "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7)");
 
-            assertInvalidMessage("IN predicates on non-primary-key columns (c) is not yet supported",
-                                 "SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING");
+            assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2) AND c IN (6, 7) ALLOW FILTERING"),
+                       row(1, 3, 1, 6, 12),
+                       row(2, 3, 2, 7, 12));
 
             assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
                                  "SELECT * FROM %s WHERE c > 4");
@@ -1669,8 +1670,8 @@
 
         beforeAndAfterFlush(() -> {
 
-            assertInvalidMessage("IN restrictions are not supported when the query involves filtering",
-                    "SELECT * FROM %s WHERE b in (11,12) ALLOW FILTERING");
+            assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                    "SELECT * FROM %s WHERE b in (11,12)");
 
             assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
                     "SELECT * FROM %s WHERE a = 11");
@@ -1745,8 +1746,8 @@
 
         beforeAndAfterFlush(() -> {
 
-             assertInvalidMessage("IN restrictions are not supported when the query involves filtering",
-                    "SELECT * FROM %s WHERE b in (11,12) ALLOW FILTERING");
+             assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                    "SELECT * FROM %s WHERE b in (11,12)");
 
             assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
                     "SELECT * FROM %s WHERE a = 11");
@@ -2838,8 +2839,9 @@
                    row(0, Duration.from("1s")),
                    row(2, Duration.from("1s")));
 
-        assertInvalidMessage("IN predicates on non-primary-key columns (d) is not yet supported",
-                             "SELECT * FROM %s WHERE d IN (1s, 2s) ALLOW FILTERING");
+        assertRows(execute("SELECT * FROM %s WHERE d IN (1s, 3s) ALLOW FILTERING"),
+                   row(0, Duration.from("1s")),
+                   row(2, Duration.from("1s")));
 
         assertInvalidMessage("Slice restrictions are not supported on duration columns",
                              "SELECT * FROM %s WHERE d > 1s ALLOW FILTERING");
@@ -2867,11 +2869,19 @@
             execute("INSERT INTO %s (k, l) VALUES (2, [1s, 3s])");
 
             if (frozen)
+            {
                 assertRows(execute("SELECT * FROM %s WHERE l = [1s, 2s] ALLOW FILTERING"),
                            row(0, list(Duration.from("1s"), Duration.from("2s"))));
 
-            assertInvalidMessage("IN predicates on non-primary-key columns (l) is not yet supported",
-                                 "SELECT * FROM %s WHERE l IN ([1s, 2s], [2s, 3s]) ALLOW FILTERING");
+                assertRows(execute("SELECT * FROM %s WHERE l IN ([1s, 2s], [2s, 3s]) ALLOW FILTERING"),
+                           row(1, list(Duration.from("2s"), Duration.from("3s"))),
+                           row(0, list(Duration.from("1s"), Duration.from("2s"))));
+            }
+            else
+            {
+                assertInvalidMessage("Collection column 'l' (list<duration>) cannot be restricted by a 'IN' relation",
+                        "SELECT * FROM %s WHERE l IN ([1s, 2s], [2s, 3s]) ALLOW FILTERING");
+            }
 
             assertInvalidMessage("Slice restrictions are not supported on collections containing durations",
                                  "SELECT * FROM %s WHERE l > [2s, 3s] ALLOW FILTERING");
@@ -2904,11 +2914,19 @@
             execute("INSERT INTO %s (k, m) VALUES (2, {1:1s, 3:3s})");
 
             if (frozen)
+            {
                 assertRows(execute("SELECT * FROM %s WHERE m = {1:1s, 2:2s} ALLOW FILTERING"),
                            row(0, map(1, Duration.from("1s"), 2, Duration.from("2s"))));
 
-            assertInvalidMessage("IN predicates on non-primary-key columns (m) is not yet supported",
-                    "SELECT * FROM %s WHERE m IN ({1:1s, 2:2s}, {1:1s, 3:3s}) ALLOW FILTERING");
+                assertRows(execute("SELECT * FROM %s WHERE m IN ({1:1s, 2:2s}, {1:1s, 3:3s}) ALLOW FILTERING"),
+                        row(0, map(1, Duration.from("1s"), 2, Duration.from("2s"))),
+                        row(2, map(1, Duration.from("1s"), 3, Duration.from("3s"))));
+            }
+            else
+            {
+                assertInvalidMessage("Collection column 'm' (map<int, duration>) cannot be restricted by a 'IN' relation",
+                        "SELECT * FROM %s WHERE m IN ({1:1s, 2:2s}, {1:1s, 3:3s}) ALLOW FILTERING");
+            }
 
             assertInvalidMessage("Slice restrictions are not supported on collections containing durations",
                     "SELECT * FROM %s WHERE m > {1:1s, 3:3s} ALLOW FILTERING");
@@ -2939,8 +2957,9 @@
         assertRows(execute("SELECT * FROM %s WHERE t = (1, 2s) ALLOW FILTERING"),
                    row(0, tuple(1, Duration.from("2s"))));
 
-        assertInvalidMessage("IN predicates on non-primary-key columns (t) is not yet supported",
-                "SELECT * FROM %s WHERE t IN ((1, 2s), (1, 3s)) ALLOW FILTERING");
+        assertRows(execute("SELECT * FROM %s WHERE t IN ((1, 2s), (1, 3s)) ALLOW FILTERING"),
+                   row(0, tuple(1, Duration.from("2s"))),
+                   row(2, tuple(1, Duration.from("3s"))));
 
         assertInvalidMessage("Slice restrictions are not supported on tuples containing durations",
                 "SELECT * FROM %s WHERE t > (1, 2s) ALLOW FILTERING");
@@ -2970,11 +2989,22 @@
             execute("INSERT INTO %s (k, u) VALUES (2, {i: 1, d:3s})");
 
             if (frozen)
+            {
                 assertRows(execute("SELECT * FROM %s WHERE u = {i: 1, d:2s} ALLOW FILTERING"),
                            row(0, userType("i", 1, "d", Duration.from("2s"))));
 
-            assertInvalidMessage("IN predicates on non-primary-key columns (u) is not yet supported",
-                    "SELECT * FROM %s WHERE u IN ({i: 2, d:3s}, {i: 1, d:3s}) ALLOW FILTERING");
+                assertRows(execute("SELECT * FROM %s WHERE u IN ({i: 2, d:3s}, {i: 1, d:3s}) ALLOW FILTERING"),
+                        row(1, userType("i", 2, "d", Duration.from("3s"))),
+                        row(2, userType("i", 1, "d", Duration.from("3s"))));
+            }
+            else
+            {
+                assertInvalidMessage("Non-frozen UDT column 'u' (" + udt + ") cannot be restricted by any relation",
+                        "SELECT * FROM %s WHERE u = {i: 1, d:2s} ALLOW FILTERING");
+
+                assertInvalidMessage("Non-frozen UDT column 'u' (" + udt + ") cannot be restricted by any relation",
+                        "SELECT * FROM %s WHERE u IN ({i: 2, d:3s}, {i: 1, d:3s}) ALLOW FILTERING");
+            }
 
             assertInvalidMessage("Slice restrictions are not supported on UDTs containing durations",
                     "SELECT * FROM %s WHERE u > {i: 1, d:3s} ALLOW FILTERING");
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
index 5c24432..9ef2520 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
@@ -18,11 +18,11 @@
 
 package org.apache.cassandra.cql3.validation.operations;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
+
 import java.io.IOException;
 
+import org.apache.cassandra.Util;
+import org.apache.cassandra.io.util.File;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -37,10 +37,11 @@
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.rows.AbstractCell;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.apache.cassandra.tools.StandaloneScrubber;
 import org.apache.cassandra.tools.ToolRunner;
 import org.apache.cassandra.tools.ToolRunner.ToolResult;
-import org.apache.cassandra.utils.FBUtilities;
 import org.assertj.core.api.Assertions;
 
 import static org.junit.Assert.assertEquals;
@@ -268,7 +269,7 @@
         // Maybe Flush
         Keyspace ks = Keyspace.open(keyspace());
         if (flush)
-            FBUtilities.waitOnFutures(ks.flush());
+            Util.flush(ks);
 
         // Verify data
         verifyData(simple);
@@ -441,7 +442,7 @@
     {
         File destDir = Keyspace.open(keyspace()).getColumnFamilyStore(table).getDirectories().getCFDirectories().iterator().next();
         File sourceDir = getTableDir(table, simple, clustering);
-        for (File file : sourceDir.listFiles())
+        for (File file : sourceDir.tryList())
         {
             copyFile(file, destDir);
         }
@@ -457,12 +458,13 @@
         byte[] buf = new byte[65536];
         if (src.isFile())
         {
-            File target = new File(dest, src.getName());
+            File target = new File(dest, src.name());
             int rd;
-            FileInputStream is = new FileInputStream(src);
-            FileOutputStream os = new FileOutputStream(target);
+            FileInputStreamPlus is = new FileInputStreamPlus(src);
+            FileOutputStreamPlus os = new FileOutputStreamPlus(target);
             while ((rd = is.read(buf)) >= 0)
                 os.write(buf, 0, rd);
+            os.close();
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
index 0279557..59a0616 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
@@ -28,6 +28,8 @@
 import org.apache.cassandra.cql3.UntypedResultSet.Row;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.assertj.core.api.Assertions;
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
@@ -657,4 +659,14 @@
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(currentTable());
         return cfs.metric.allMemtablesLiveDataSize.getValue() == 0;
     }
+
+    @Test
+    public void testAdderNonCounter()
+    {
+        createTable("CREATE TABLE %s (pk int PRIMARY KEY, a int, b text)");
+        Assertions.assertThatThrownBy(() -> execute("UPDATE %s SET a = a + 1, b = b + 'fail' WHERE pk = 1"))
+                  .isInstanceOf(InvalidRequestException.class)
+                  // if error ever includes "b" its safe to update this test
+                  .hasMessage("Invalid operation (a = a + 1) for non counter column a");
+    }
 }
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/UseTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/UseTest.java
index e1498b6..b2a40a3 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/UseTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/UseTest.java
@@ -17,15 +17,41 @@
  */
 package org.apache.cassandra.cql3.validation.operations;
 
+import org.assertj.core.api.Assertions;
 import org.junit.Test;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 public class UseTest extends CQLTester
 {
     @Test
     public void testUseStatementWithBindVariable() throws Throwable
     {
+        DatabaseDescriptor.setUseStatementsEnabled(true);
         assertInvalidSyntaxMessage("Bind variables cannot be used for keyspace names", "USE ?");
     }
+
+    @Test
+    public void shouldRejectUseStatementWhenProhibited() throws Throwable
+    {
+        long useCountBefore = QueryProcessor.metrics.useStatementsExecuted.getCount();
+
+        try
+        {
+            DatabaseDescriptor.setUseStatementsEnabled(false);
+            execute("USE cql_test_keyspace");
+            fail("expected USE statement to fail with use_statements_enabled = false");
+        }
+        catch (InvalidRequestException e)
+        {
+            assertEquals(useCountBefore, QueryProcessor.metrics.useStatementsExecuted.getCount());
+            Assertions.assertThat(e).hasMessageContaining("USE statements prohibited");
+        }
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/AbstractReadQueryToCQLStringTest.java b/test/unit/org/apache/cassandra/db/AbstractReadQueryToCQLStringTest.java
new file mode 100644
index 0000000..11582b9
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/AbstractReadQueryToCQLStringTest.java
@@ -0,0 +1,813 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.cql3.statements.SelectStatement;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.virtual.AbstractVirtualTable;
+import org.apache.cassandra.db.virtual.SimpleDataSet;
+import org.apache.cassandra.db.virtual.VirtualKeyspace;
+import org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry;
+import org.apache.cassandra.db.virtual.VirtualTable;
+import org.apache.cassandra.exceptions.RequestValidationException;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests for {@link AbstractReadQuery#toCQLString()}.
+ */
+public class AbstractReadQueryToCQLStringTest extends CQLTester
+{
+    @Test
+    public void testSkinnyTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int)");
+
+        // column selection on unrestricted partition range query
+        test("SELECT * FROM %s");
+        test("SELECT k FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT v1 FROM %s");
+        test("SELECT v2 FROM %s");
+        test("SELECT k, v1, v2 FROM %s",
+             "SELECT v1, v2 FROM %s");
+
+        // column selection on partition directed query
+        test("SELECT * FROM %s WHERE k = 0");
+        test("SELECT k FROM %s WHERE k = 0",
+             "SELECT * FROM %s WHERE k = 0");
+        test("SELECT v1 FROM %s WHERE k = 0");
+        test("SELECT v2 FROM %s WHERE k = 0");
+        test("SELECT k, v1, v2 FROM %s WHERE k = 0",
+             "SELECT v1, v2 FROM %s WHERE k = 0");
+
+        // token restrictions
+        test("SELECT * FROM %s WHERE token(k) > 0");
+        test("SELECT * FROM %s WHERE token(k) < 0");
+        test("SELECT * FROM %s WHERE token(k) >= 0");
+        test("SELECT * FROM %s WHERE token(k) <= 0");
+        test("SELECT * FROM %s WHERE token(k) = 0",
+             "SELECT * FROM %s WHERE token(k) >= 0 AND token(k) <= 0");
+
+        // row filter without indexed column
+        test("SELECT * FROM %s WHERE v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 < 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 > 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 <= 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 >= 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+
+        // row filter with indexed column
+        createIndex("CREATE INDEX ON %s (v1)");
+        test("SELECT * FROM %s WHERE v1 = 1");
+        test("SELECT * FROM %s WHERE v1 < 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 > 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 <= 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 >= 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND v1 = 1");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1",
+             "SELECT * FROM %s WHERE token(k) >= token(0) AND token(k) <= token(0) AND v1 = 1");
+
+        // grouped partition-directed queries, maybe producing multiple queries
+        test("SELECT * FROM %s WHERE k IN (0)",
+             "SELECT * FROM %s WHERE k = 0");
+        test("SELECT * FROM %s WHERE k IN (0, 1)",
+             "SELECT * FROM %s WHERE k = 0",
+             "SELECT * FROM %s WHERE k = 1");
+    }
+
+    @Test
+    public void testSkinnyTableWithMulticolumnKey() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k1 int, k2 int, v1 int, v2 int, PRIMARY KEY((k1, k2)))");
+
+        // column selection on unrestricted partition range query
+        test("SELECT * FROM %s");
+        test("SELECT k1 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT k2 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT v1 FROM %s");
+        test("SELECT v2 FROM %s");
+        test("SELECT k1, k2, v1, v2 FROM %s",
+             "SELECT v1, v2 FROM %s");
+
+        // column selection on partition directed query
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT k1 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT k2 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT v1 FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT v2 FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT k1, k2, v1, v2 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT v1, v2 FROM %s WHERE k1 = 1 AND k2 = 2");
+
+        // token restrictions
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) < 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) >= 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) <= 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) = 0",
+             "SELECT * FROM %s WHERE token(k1, k2) >= 0 AND token(k1, k2) <= 0");
+
+        // row filter without indexed column
+        test("SELECT * FROM %s WHERE k1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND v2 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 2 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 2 AND v2 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 0 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+
+        // row filter with indexed column
+        createIndex("CREATE INDEX ON %s (k1)");
+        createIndex("CREATE INDEX ON %s (k2)");
+        createIndex("CREATE INDEX ON %s (v1)");
+        createIndex("CREATE INDEX ON %s (v2)");
+        test("SELECT * FROM %s WHERE k1 = 1");
+        test("SELECT * FROM %s WHERE k2 = 2");
+        test("SELECT * FROM %s WHERE v1 = 1");
+        test("SELECT * FROM %s WHERE v2 = 2");
+        test("SELECT * FROM %s WHERE k1 > 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 > 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 > 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v2 > 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND v2 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 2 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 2 AND v2 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND k1 = 1");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1",
+             "SELECT * FROM %s WHERE token(k1, k2) >= token(1, 2) AND token(k1, k2) <= token(1, 2) AND v1 = 1");
+
+        // grouped partition-directed queries, maybe producing multiple queries
+        test("SELECT * FROM %s WHERE k1 IN (1) AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 IN (2)",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 IN (1) AND k2 IN (2)",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 IN (0, 1) AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 IN (2, 3)",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3");
+        test("SELECT * FROM %s WHERE k1 IN (0, 1) AND k2 IN (2, 3)",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 3",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3");
+    }
+
+    @Test
+    public void testWideTable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int, c int, v1 int, v2 int, s int static, PRIMARY KEY(k, c))");
+
+        // column selection on unrestricted partition range query
+        test("SELECT * FROM %s");
+        test("SELECT k FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT c FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT s FROM %s");
+        test("SELECT v1 FROM %s");
+        test("SELECT v2 FROM %s");
+        test("SELECT k, c, s, v1, v2 FROM %s",
+             "SELECT s, v1, v2 FROM %s");
+
+        // column selection on partition directed query
+        test("SELECT * FROM %s WHERE k = 0");
+        test("SELECT k FROM %s WHERE k = 0",
+             "SELECT * FROM %s WHERE k = 0");
+        test("SELECT s FROM %s WHERE k = 0");
+        test("SELECT v1 FROM %s WHERE k = 0");
+        test("SELECT v2 FROM %s WHERE k = 0");
+        test("SELECT k, c, s, v1, v2 FROM %s WHERE k = 0",
+             "SELECT s, v1, v2 FROM %s WHERE k = 0");
+
+        // clustering filters
+        test("SELECT * FROM %s WHERE k = 0 AND c = 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c < 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c > 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c <= 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c >= 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c > 1 AND c <= 2");
+        test("SELECT * FROM %s WHERE k = 0 AND c >= 1 AND c < 2");
+
+        // token restrictions
+        test("SELECT * FROM %s WHERE token(k) > 0");
+        test("SELECT * FROM %s WHERE token(k) < 0");
+        test("SELECT * FROM %s WHERE token(k) >= 0");
+        test("SELECT * FROM %s WHERE token(k) <= 0");
+        test("SELECT * FROM %s WHERE token(k) = 0",
+             "SELECT * FROM %s WHERE token(k) >= 0 AND token(k) <= 0");
+
+        // row filter without indexed column
+        test("SELECT * FROM %s WHERE c = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE s = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 0 AND c = 1 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 0 AND c = 1 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND c = 1 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+
+        // expression filter with indexed column
+        createIndex("CREATE INDEX ON %s (c)");
+        createIndex("CREATE INDEX ON %s (s)");
+        createIndex("CREATE INDEX ON %s (v1)");
+        test("SELECT * FROM %s WHERE c = 1");
+        test("SELECT * FROM %s WHERE v1 = 1");
+        test("SELECT * FROM %s WHERE s = 1");
+        test("SELECT * FROM %s WHERE v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k) > 0 AND v1 = 1");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1",
+             "SELECT * FROM %s WHERE token(k) >= token(0) AND token(k) <= token(0) AND v1 = 1");
+        test("SELECT * FROM %s WHERE k = 0 AND v1 = 1 AND c = 1",
+             "SELECT * FROM %s WHERE token(k) >= token(0) AND token(k) <= token(0) AND c = 1 AND v1 = 1 ALLOW FILTERING");
+
+        // grouped partition-directed queries, maybe producing multiple queries
+        test("SELECT * FROM %s WHERE k IN (0)",
+             "SELECT * FROM %s WHERE k = 0");
+        test("SELECT * FROM %s WHERE k IN (0, 1)",
+             "SELECT * FROM %s WHERE k = 0",
+             "SELECT * FROM %s WHERE k = 1");
+        test("SELECT * FROM %s WHERE k IN (0, 1) AND c = 0",
+             "SELECT * FROM %s WHERE k = 0 AND c = 0",
+             "SELECT * FROM %s WHERE k = 1 AND c = 0");
+        test("SELECT * FROM %s WHERE k IN (0, 1) AND c > 0",
+             "SELECT * FROM %s WHERE k = 0 AND c > 0",
+             "SELECT * FROM %s WHERE k = 1 AND c > 0");
+
+        // order by
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c",
+             "SELECT * FROM %s WHERE k = 0");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c ASC",
+             "SELECT * FROM %s WHERE k = 0");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c DESC");
+
+        // order by clustering filter
+        test("SELECT * FROM %s WHERE k = 0 AND c = 1 ORDER BY c",
+             "SELECT * FROM %s WHERE k = 0 AND c = 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c = 1 ORDER BY c ASC",
+             "SELECT * FROM %s WHERE k = 0 AND c = 1");
+        test("SELECT * FROM %s WHERE k = 0 AND c = 1 ORDER BY c DESC");
+    }
+
+    @Test
+    public void testWideTableWithMulticolumnKey() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k1 int, k2 int, c1 int, c2 int, c3 int, v1 int, v2 int, PRIMARY KEY((k1, k2), c1, c2, c3))");
+
+        // column selection on unrestricted partition range query
+        test("SELECT * FROM %s");
+        test("SELECT k1 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT k2 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT c1 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT c2 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT c3 FROM %s",
+             "SELECT * FROM %s");
+        test("SELECT v1 FROM %s");
+        test("SELECT v2 FROM %s");
+        test("SELECT k1, k2, c1, c2, c3, v1, v2 FROM %s",
+             "SELECT v1, v2 FROM %s");
+
+        // column selection on partition directed query
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT k1 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT k2 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT c1 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT c2 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT v1 FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT v2 FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT k1, k2, c1, c2, v1, v2 FROM %s WHERE k1 = 1 AND k2 = 2",
+             "SELECT v1, v2 FROM %s WHERE k1 = 1 AND k2 = 2");
+
+        // clustering filters
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 < 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 <= 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 >= 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1 AND c1 < 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1 AND c1 <= 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 >= 1 AND c1 < 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1 AND c1 < 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 < 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 <= 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 >= 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2 AND c2 < 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2 AND c2 <= 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 >= 2 AND c2 < 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2 AND c2 < 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 = 3",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND (c1, c2, c3) = (1, 2, 3)");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 > 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 < 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 >= 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 <= 3");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 > 3 AND c3 < 4");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 > 3 AND c3 <= 4");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 >= 3 AND c3 < 4");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 >= 3 AND c3 <= 4");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND (c1, c2, c3) = (1, 2, 3)");
+
+        // token restrictions
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) < 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) >= 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) <= 0");
+        test("SELECT * FROM %s WHERE token(k1, k2) = 0",
+             "SELECT * FROM %s WHERE token(k1, k2) >= 0 AND token(k1, k2) <= 0");
+
+        // row filter without indexed column
+        test("SELECT * FROM %s WHERE k1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE c1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE c2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE c3 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND c1 = 1 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
+
+        // expression filter with indexed column
+        createIndex("CREATE INDEX ON %s (k1)");
+        createIndex("CREATE INDEX ON %s (k2)");
+        createIndex("CREATE INDEX ON %s (c1)");
+        createIndex("CREATE INDEX ON %s (c2)");
+        createIndex("CREATE INDEX ON %s (c3)");
+        createIndex("CREATE INDEX ON %s (v1)");
+        createIndex("CREATE INDEX ON %s (v2)");
+        test("SELECT * FROM %s WHERE k1 = 1");
+        test("SELECT * FROM %s WHERE k2 = 2");
+        test("SELECT * FROM %s WHERE c1 = 1");
+        test("SELECT * FROM %s WHERE c2 = 2");
+        test("SELECT * FROM %s WHERE c3 = 3");
+        test("SELECT * FROM %s WHERE v1 = 1");
+        test("SELECT * FROM %s WHERE v2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT * FROM %s WHERE c1 = 1 AND c2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE c1 = 1 AND c2 = 2 AND c3 = 3 ALLOW FILTERING",
+             "SELECT * FROM %s WHERE (c1, c2, c3) = (1, 2, 3) ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1",
+             "SELECT * FROM %s WHERE token(k1, k2) >= token(1, 2) AND token(k1, k2) <= token(1, 2) AND v1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND v1 = 1",
+             "SELECT * FROM %s WHERE token(k1, k2) >= token(1, 2) AND token(k1, k2) <= token(1, 2) AND c1 = 1 AND v1 = 1 ALLOW FILTERING");
+
+        // grouped partition-directed queries, maybe producing multiple queries
+        test("SELECT * FROM %s WHERE k1 IN (1) AND k2 IN (2)",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4)",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4");
+        test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4) AND c1 = 0",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3 AND c1 = 0",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4 AND c1 = 0",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3 AND c1 = 0",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4 AND c1 = 0");
+        test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4) AND c1 > 0",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3 AND c1 > 0",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4 AND c1 > 0",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3 AND c1 > 0",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4 AND c1 > 0");
+        test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4) AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))",
+             "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))",
+             "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))");
+
+        // order by
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 ASC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 DESC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 DESC, c2 DESC, c3 DESC");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1, c2",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1, c2 ASC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 ASC, c2",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 ASC, c2 ASC, c3 ASC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 DESC, c2 DESC, c3 DESC");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 ASC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
+
+        // order by clustering filter
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 DESC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 DESC, c2 DESC, c3 DESC");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1, c2",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1, c2 ASC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 ASC, c2",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 ASC, c2 ASC, c3 ASC",
+             "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
+        test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 DESC, c2 DESC, c3 DESC");
+    }
+
+    @Test
+    public void testQuotedNames() throws Throwable
+    {
+        createKeyspace("CREATE KEYSPACE \"K\" WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }");
+        createTable("CREATE TABLE \"K\".\"T\" (\"K\" int, \"C\" int, \"S\" int static, \"V\" int, PRIMARY KEY(\"K\", \"C\"))");
+
+        // column selection on unrestricted partition range query
+        test("SELECT * FROM \"K\".\"T\"");
+        test("SELECT \"K\" FROM \"K\".\"T\"",
+             "SELECT * FROM \"K\".\"T\"");
+        test("SELECT \"S\" FROM \"K\".\"T\"");
+        test("SELECT \"V\" FROM \"K\".\"T\"");
+        test("SELECT \"K\", \"C\", \"S\", \"V\" FROM \"K\".\"T\"",
+             "SELECT \"S\", \"V\" FROM \"K\".\"T\"");
+
+        // column selection on partition directed query
+        test("SELECT * FROM \"K\".\"T\" WHERE \"K\" = 0");
+        test("SELECT \"K\" FROM \"K\".\"T\" WHERE \"K\" = 0",
+             "SELECT * FROM \"K\".\"T\" WHERE \"K\" = 0");
+        test("SELECT \"S\" FROM \"K\".\"T\" WHERE \"K\" = 0");
+        test("SELECT \"V\" FROM \"K\".\"T\" WHERE \"K\" = 0");
+        test("SELECT \"K\", \"C\", \"S\", \"V\" FROM \"K\".\"T\" WHERE \"K\" = 0",
+             "SELECT \"S\", \"V\" FROM \"K\".\"T\" WHERE \"K\" = 0");
+
+        // filters
+        test("SELECT * FROM \"K\".\"T\" WHERE \"K\" = 0 AND \"C\" = 1");
+        test("SELECT * FROM \"K\".\"T\" WHERE \"K\" = 0 AND \"C\" > 1 AND \"C\" <= 2");
+        test("SELECT * FROM \"K\".\"T\" WHERE \"V\" = 0 ALLOW FILTERING");
+        test("SELECT * FROM \"K\".\"T\" WHERE \"S\" = 0 ALLOW FILTERING");
+        test("SELECT * FROM \"K\".\"T\" WHERE \"C\" = 0 ALLOW FILTERING");
+
+        // order by
+        test("SELECT * FROM \"K\".\"T\" WHERE \"K\" = 0 ORDER BY \"C\" DESC");
+        test("SELECT * FROM \"K\".\"T\" WHERE \"K\" = 0 AND \"C\" = 1 ORDER BY \"C\" DESC");
+    }
+
+    @Test
+    public void testLiterals() throws Throwable
+    {
+        // skinny table
+        createTable("CREATE TABLE %s (k text, c text, v text, PRIMARY KEY(k, c))");
+        test("SELECT * FROM %s WHERE k = 'A'");
+        test("SELECT * FROM %s WHERE c = 'A' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE v = 'A' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k = 'A' AND c = 'B'");
+        test("SELECT * FROM %s WHERE k = 'A' AND v = 'B' ALLOW FILTERING");
+
+        // wide table
+        createTable("CREATE TABLE %s (k1 text, k2 text, c1 text, c2 text, v text, PRIMARY KEY((k1, k2), c1, c2))");
+        test("SELECT * FROM %s WHERE k1 = 'A' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k2 = 'A' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE c1 = 'A' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE c2 = 'A' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B'");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND c1 = 'C'");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND c1 > 'C'");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND c1 > 'C' AND c1 <= 'D'");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND c1 = 'C' AND c2 = 'D'",
+             "SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND (c1, c2) = ('C', 'D')");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND c1 = 'C' AND c2 > 'D'");
+        test("SELECT * FROM %s WHERE k1 = 'A' AND k2 = 'B' AND c1 = 'C' AND c2 > 'D' AND c2 <= 'E'");
+    }
+
+    @Test
+    public void testWideTableWithClusteringOrder() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int, c1 int, c2 int, c3 int, PRIMARY KEY(k, c1, c2, c3)) WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC, c3 DESC)");
+
+        // one column
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1 DESC",
+             "SELECT * FROM %s WHERE k = 0");
+
+        // two columns
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1, c2 DESC",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1 DESC, c2 ASC",
+             "SELECT * FROM %s WHERE k = 0");
+
+        // three columns
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1, c2 DESC, c3 ASC",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1, c2 DESC, c3 ASC",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC",
+             "SELECT * FROM %s WHERE k = 0 ORDER BY c1 ASC, c2 DESC, c3 ASC");
+        test("SELECT * FROM %s WHERE k = 0 ORDER BY c1 DESC, c2 ASC, c3 DESC",
+             "SELECT * FROM %s WHERE k = 0");
+    }
+
+    @Test
+    public void testCollections() throws Throwable
+    {
+        String udt = createType("CREATE TYPE %s (a text, b int)");
+        createTable("CREATE TABLE %s (" +
+                    "k int PRIMARY KEY, " +
+                    "l list<text>, " +
+                    "s set<text>, " +
+                    "m map<text, text>, " +
+                    "t tuple<text, int>, " +
+                    "u " + udt + ")");
+
+        // column selections
+        test("SELECT l FROM %s");
+        test("SELECT s FROM %s");
+        test("SELECT m FROM %s");
+        test("SELECT t FROM %s");
+        test("SELECT u FROM %s");
+        testInvalid("SELECT l['a'] FROM %s");
+        test("SELECT s['a'] FROM %s");
+        test("SELECT m['a'] FROM %s");
+        test("SELECT u.a FROM %s",
+             "SELECT u FROM %s");
+        test("SELECT m['a'], m['b'], s['c'], s['d'], t, u.a, u.b FROM %s",
+             "SELECT m['a'], m['b'], s['c'], s['d'], t, u FROM %s");
+
+        // filtering
+        testInvalid("SELECT * FROM %s WHERE l = ['a', 'b'] ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE s = {'a', 'b'} ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE m = {'a': 'b', 'c': 'd'} ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE t = ('a', 1) ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u = {a: 'a', b: 1} ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE l['a'] = 'b' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE s['a'] = 'b' ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE m['a'] = 'b' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u.a = 'a' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u.b = 0 ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u.a = 'a' ANd u.b = 0 ALLOW FILTERING");
+    }
+
+    @Test
+    public void testFrozenCollections() throws Throwable
+    {
+        String udt = createType("CREATE TYPE %s (a text, b int)");
+        createTable("CREATE TABLE %s (" +
+                    "k int PRIMARY KEY, " +
+                    "l frozen<list<text>>, " +
+                    "s frozen<set<text>>, " +
+                    "m frozen<map<text, text>>, " +
+                    "t frozen<tuple<text, int>>, " +
+                    "u frozen<" + udt + ">)");
+
+        // column selections
+        test("SELECT l FROM %s");
+        test("SELECT s FROM %s");
+        test("SELECT m FROM %s");
+        test("SELECT t FROM %s");
+        test("SELECT u FROM %s");
+        testInvalid("SELECT l['a'] FROM %s");
+        test("SELECT s['a'] FROM %s",
+             "SELECT s FROM %s");
+        test("SELECT m['a'] FROM %s",
+             "SELECT m FROM %s");
+        test("SELECT u.a FROM %s",
+             "SELECT u FROM %s");
+        test("SELECT m['a'], m['b'], s['c'], s['d'], t, u.a, u.b FROM %s",
+             "SELECT m, s, t, u FROM %s");
+
+        // filtering
+        test("SELECT * FROM %s WHERE l = ['a', 'b'] ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE s = {'a', 'b'} ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE m = {'a': 'b', 'c': 'd'} ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE t = ('a', 1) ALLOW FILTERING");
+        test("SELECT * FROM %s WHERE u = {a: 'a', b: 1} ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE l['a'] = 'a' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE s['a'] = 'a' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE m['a'] = 'a' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u.a = 'a' ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u.b = 0 ALLOW FILTERING");
+        testInvalid("SELECT * FROM %s WHERE u.a = 'a' ANd u.b = 0 ALLOW FILTERING");
+    }
+
+    @Test
+    public void testVirtualTable() throws Throwable
+    {
+        TableMetadata metadata =
+        TableMetadata.builder("vk", "vt")
+                     .kind(TableMetadata.Kind.VIRTUAL)
+                     .addPartitionKeyColumn("k", Int32Type.instance)
+                     .addClusteringColumn("c", Int32Type.instance)
+                     .addRegularColumn("v", Int32Type.instance)
+                     .addStaticColumn("s", Int32Type.instance)
+                     .build();
+        SimpleDataSet data = new SimpleDataSet(metadata);
+        VirtualTable table = new AbstractVirtualTable(metadata)
+        {
+            public DataSet data()
+            {
+                return data;
+            }
+        };
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace("vk", ImmutableList.of(table)));
+
+        // column selection on unrestricted partition range query
+        test("SELECT * FROM vk.vt");
+        test("SELECT k FROM vk.vt",
+             "SELECT * FROM vk.vt");
+        test("SELECT c FROM vk.vt",
+             "SELECT * FROM vk.vt");
+        test("SELECT s FROM vk.vt");
+        test("SELECT v FROM vk.vt");
+        test("SELECT k, c, s, v FROM vk.vt",
+             "SELECT s, v FROM vk.vt");
+
+        // column selection on partition directed query
+        test("SELECT * FROM vk.vt WHERE k = 1");
+        test("SELECT k FROM vk.vt WHERE k = 1",
+             "SELECT * FROM vk.vt WHERE k = 1");
+        test("SELECT c FROM vk.vt WHERE k = 1",
+             "SELECT * FROM vk.vt WHERE k = 1");
+        test("SELECT v FROM vk.vt WHERE k = 1");
+        test("SELECT s FROM vk.vt WHERE k = 1");
+        test("SELECT k, c, s, v FROM vk.vt WHERE k = 1",
+             "SELECT s, v FROM vk.vt WHERE k = 1");
+
+        // clustering filters
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c = 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c < 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c > 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c <= 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c >= 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c > 1 AND c <= 2");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c >= 1 AND c < 2");
+
+        // token restrictions
+        test("SELECT * FROM vk.vt WHERE token(k) > 0");
+        test("SELECT * FROM vk.vt WHERE token(k) < 0");
+        test("SELECT * FROM vk.vt WHERE token(k) >= 0");
+        test("SELECT * FROM vk.vt WHERE token(k) <= 0");
+        test("SELECT * FROM vk.vt WHERE token(k) = 0",
+             "SELECT * FROM vk.vt WHERE token(k) >= 0 AND token(k) <= 0");
+
+        // row filters
+        test("SELECT * FROM vk.vt WHERE c = 1 ALLOW FILTERING");
+        test("SELECT * FROM vk.vt WHERE s = 1 ALLOW FILTERING");
+        test("SELECT * FROM vk.vt WHERE v = 1 ALLOW FILTERING");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND v = 1 ALLOW FILTERING");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c = 1 AND v = 1 ALLOW FILTERING");
+        test("SELECT * FROM vk.vt WHERE token(k) > 0 AND v = 1 ALLOW FILTERING");
+        test("SELECT * FROM vk.vt WHERE token(k) > 0 AND c = 1 AND v = 1 ALLOW FILTERING");
+
+        // grouped partition-directed queries, maybe producing multiple queries
+        test("SELECT * FROM vk.vt WHERE k IN (0)",
+             "SELECT * FROM vk.vt WHERE k = 0");
+        test("SELECT * FROM vk.vt WHERE k IN (0, 1)",
+             "SELECT * FROM vk.vt WHERE k = 0",
+             "SELECT * FROM vk.vt WHERE k = 1");
+        test("SELECT * FROM vk.vt WHERE k IN (0, 1) AND c = 0",
+             "SELECT * FROM vk.vt WHERE k = 0 AND c = 0",
+             "SELECT * FROM vk.vt WHERE k = 1 AND c = 0");
+        test("SELECT * FROM vk.vt WHERE k IN (0, 1) AND c > 0",
+             "SELECT * FROM vk.vt WHERE k = 0 AND c > 0",
+             "SELECT * FROM vk.vt WHERE k = 1 AND c > 0");
+
+        // order by
+        test("SELECT * FROM vk.vt WHERE k = 0 ORDER BY c",
+             "SELECT * FROM vk.vt WHERE k = 0");
+        test("SELECT * FROM vk.vt WHERE k = 0 ORDER BY c ASC",
+             "SELECT * FROM vk.vt WHERE k = 0");
+        test("SELECT * FROM vk.vt WHERE k = 0 ORDER BY c DESC");
+
+        // order by clustering filter
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c = 1 ORDER BY c",
+             "SELECT * FROM vk.vt WHERE k = 0 AND c = 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c = 1 ORDER BY c ASC",
+             "SELECT * FROM vk.vt WHERE k = 0 AND c = 1");
+        test("SELECT * FROM vk.vt WHERE k = 0 AND c = 1 ORDER BY c DESC");
+    }
+
+    private List<String> toCQLString(String query)
+    {
+        String fullQuery = formatQuery(query);
+        ClientState state = ClientState.forInternalCalls();
+        CQLStatement statement = QueryProcessor.getStatement(fullQuery, state);
+
+        assertTrue(statement instanceof SelectStatement);
+        SelectStatement select = (SelectStatement) statement;
+
+        QueryOptions options = QueryOptions.forInternalCalls(Collections.emptyList());
+        ReadQuery readQuery = select.getQuery(options, FBUtilities.nowInSeconds());
+
+        if (readQuery instanceof SinglePartitionReadCommand.Group)
+        {
+            SinglePartitionReadCommand.Group group = (SinglePartitionReadCommand.Group) readQuery;
+            return group.queries.stream().map(AbstractReadQuery::toCQLString).collect(Collectors.toList());
+        }
+        else
+        {
+            assertTrue(readQuery instanceof AbstractReadQuery);
+            return Collections.singletonList(((AbstractReadQuery) readQuery).toCQLString());
+        }
+    }
+
+    private void test(String query) throws Throwable
+    {
+        test(query, query);
+    }
+
+    private void test(String query, String... expected) throws Throwable
+    {
+        List<String> actual = toCQLString(query);
+        List<String> fullExpected = Stream.of(expected)
+                                          .map(this::formatQuery)
+                                          .map(s -> s.endsWith(" ALLOW FILTERING") ? s : s + " ALLOW FILTERING")
+                                          .collect(Collectors.toList());
+        assertEquals(fullExpected, actual);
+
+        // execute both the expected output commands to verify that they are valid CQL
+        for (String q : expected)
+            execute(q);
+    }
+
+    private void testInvalid(String query) throws Throwable
+    {
+        assertInvalidThrow(RequestValidationException.class, query);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/CellSpecTest.java b/test/unit/org/apache/cassandra/db/CellSpecTest.java
index 4cc886d..3387c78 100644
--- a/test/unit/org/apache/cassandra/db/CellSpecTest.java
+++ b/test/unit/org/apache/cassandra/db/CellSpecTest.java
@@ -22,7 +22,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.concurrent.CompletableFuture;
 import java.util.function.BiConsumer;
 import java.util.stream.Collectors;
 
@@ -40,13 +39,15 @@
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ObjectSizes;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.NativeAllocator;
 import org.apache.cassandra.utils.memory.NativePool;
-import org.assertj.core.api.Assertions;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 @RunWith(Parameterized.class)
 public class CellSpecTest
@@ -60,6 +61,28 @@
     }
 
     @Test
+    public void unsharedHeapSize()
+    {
+        long empty = ObjectSizes.measure(cell);
+        long actual = ObjectSizes.measureDeep(cell);
+        long expected;
+        if (cell instanceof NativeCell)
+        {
+            // NativeCell stores the contents off-heap, so the cost on-heap is just the object's empty case
+            expected = empty;
+        }
+        else
+        {
+            expected = empty + valueSizeOnHeapOf(cell.value());
+            if (cell.path() != null)
+                expected += cell.path().unsharedHeapSize();
+        }
+
+        assertThat(expected).isEqualTo(actual);
+        assertThat(cell.unsharedHeapSize()).isEqualTo(expected);
+    }
+
+    @Test
     public void unsharedHeapSizeExcludingData()
     {
         long empty = ObjectSizes.measure(cell);
@@ -77,10 +100,24 @@
                 expected += cell.path().unsharedHeapSizeExcludingData();
         }
 
-        Assertions.assertThat(cell.unsharedHeapSizeExcludingData())
+        assertThat(cell.unsharedHeapSizeExcludingData())
                   .isEqualTo(expected);
     }
 
+    private long valueSizeOnHeapOf(Object value)
+    {
+        if (value instanceof ByteBuffer)
+        {
+            ByteBuffer bb = (ByteBuffer) value;
+            return ObjectSizes.sizeOnHeapOf(bb);
+        }
+        else if (value instanceof byte[])
+        {
+            return ObjectSizes.sizeOfArray((byte[]) value);
+        }
+        throw new IllegalArgumentException("Unsupported type: " + value.getClass());
+    }
+
     private static long valuePtrSize(Object value)
     {
         if (value instanceof ByteBuffer)
@@ -98,8 +135,8 @@
 
         byte[] rawBytes = { 0, 1, 2, 3, 4, 5, 6 };
         ByteBuffer bbBytes = ByteBuffer.wrap(rawBytes);
-        NativePool pool = new NativePool(1024, 1024, 1, () -> CompletableFuture.completedFuture(true));
-        NativeAllocator allocator = pool.newAllocator();
+        NativePool pool = new NativePool(1024, 1024, 1, () -> ImmediateFuture.success(true));
+        NativeAllocator allocator = pool.newAllocator(null);
         OpOrder order = new OpOrder();
 
         List<Cell<?>> tests = new ArrayList<>();
@@ -113,7 +150,7 @@
 
         // complex
         // seems NativeCell does not allow CellPath.TOP, or CellPath.BOTTOM
-        fn.accept(ColumnMetadata.regularColumn(table, bytes("complex"), ListType.getInstance(BytesType.instance, true)), CellPath.create(bytes(UUIDGen.getTimeUUID())));
+        fn.accept(ColumnMetadata.regularColumn(table, bytes("complex"), ListType.getInstance(BytesType.instance, true)), CellPath.create(TimeUUID.Serializer.instance.serialize(nextTimeUUID())));
 
         return tests.stream().map(a -> new Object[] {a.getClass().getSimpleName() + ":" + (a.path() == null ? "simple" : "complex"), a}).collect(Collectors.toList());
     }
diff --git a/test/unit/org/apache/cassandra/db/CellTest.java b/test/unit/org/apache/cassandra/db/CellTest.java
index d4dec05..4100c54 100644
--- a/test/unit/org/apache/cassandra/db/CellTest.java
+++ b/test/unit/org/apache/cassandra/db/CellTest.java
@@ -19,16 +19,16 @@
 package org.apache.cassandra.db;
 
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.List;
 
-import com.google.common.collect.Lists;
-
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.FieldIdentifier;
 import org.apache.cassandra.db.marshal.*;
@@ -39,9 +39,8 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
 
-import static java.util.Arrays.*;
+import static java.util.Arrays.asList;
 
 public class CellTest
 {
@@ -103,6 +102,26 @@
         }
     }
 
+    @Test
+    public void testUnmarshallableInMulticellCollection()
+    {
+        List<CQL3Type.Native> unmarshallableTypes = new ArrayList<>();
+        for (CQL3Type.Native nativeType : CQL3Type.Native.values())
+        {
+            ColumnMetadata c = fakeColumn("c", MapType.getInstance(Int32Type.instance, nativeType.getType(), true));
+            BufferCell cell = BufferCell.tombstone(c, 0, 4, CellPath.create(ByteBufferUtil.bytes(4)));
+            try
+            {
+                Assert.assertEquals("expected #toString failed for type " + nativeType, "[c[4]=<tombstone> ts=0 ldt=4]", cell.toString());
+            }
+            catch (MarshalException m)
+            {
+                unmarshallableTypes.add(nativeType);
+            }
+        }
+        Assert.assertTrue(unmarshallableTypes.isEmpty());
+    }
+
     private void assertValid(Cell<?> cell)
     {
         try
diff --git a/test/unit/org/apache/cassandra/db/CleanupTest.java b/test/unit/org/apache/cassandra/db/CleanupTest.java
index f59bdd6..d82eef8 100644
--- a/test/unit/org/apache/cassandra/db/CleanupTest.java
+++ b/test/unit/org/apache/cassandra/db/CleanupTest.java
@@ -56,6 +56,7 @@
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -157,8 +158,8 @@
 
         ColumnMetadata cdef = cfs.metadata().getColumn(COLUMN);
         String indexName = "birthdate_key_index";
-        long start = System.nanoTime();
-        while (!cfs.getBuiltIndexes().contains(indexName) && System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10))
+        long start = nanoTime();
+        while (!cfs.getBuiltIndexes().contains(indexName) && nanoTime() - start < TimeUnit.SECONDS.toNanos(10))
             Thread.sleep(10);
 
         RowFilter cf = RowFilter.create();
@@ -285,7 +286,7 @@
             .add("val", VALUE)
             .build()
             .applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         Set<SSTableReader> beforeFirstCleanup = Sets.newHashSet(cfs.getLiveSSTables());
@@ -434,7 +435,7 @@
                     .applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     protected List<Long> getMaxTimestampList(ColumnFamilyStore cfs)
diff --git a/test/unit/org/apache/cassandra/db/CleanupTransientTest.java b/test/unit/org/apache/cassandra/db/CleanupTransientTest.java
index 9789183..d611bfa 100644
--- a/test/unit/org/apache/cassandra/db/CleanupTransientTest.java
+++ b/test/unit/org/apache/cassandra/db/CleanupTransientTest.java
@@ -46,7 +46,6 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 public class CleanupTransientTest
 {
@@ -182,7 +181,7 @@
                     .applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     protected List<Long> getMaxTimestampList(ColumnFamilyStore cfs)
diff --git a/test/unit/org/apache/cassandra/db/ClusteringHeapSizeTest.java b/test/unit/org/apache/cassandra/db/ClusteringHeapSizeTest.java
index 54d0ce1..e97f067 100644
--- a/test/unit/org/apache/cassandra/db/ClusteringHeapSizeTest.java
+++ b/test/unit/org/apache/cassandra/db/ClusteringHeapSizeTest.java
@@ -20,13 +20,13 @@
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.concurrent.CompletableFuture;
 
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import org.apache.cassandra.utils.ObjectSizes;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.NativePool;
 import org.assertj.core.api.Assertions;
@@ -45,12 +45,6 @@
     public void unsharedHeap()
     {
         long measureDeep = ObjectSizes.measureDeep(clustering);
-        if (clustering instanceof BufferClustering)
-        {
-            // jamm (used in measureDeep) uses .remaining() where as .sizeOnHeapOf() done in unsharedHeapSize actually looks at memory cost
-            // without assuming the array is shared (unless capacity > remaining); so account for that
-            measureDeep += ObjectSizes.measureDeep(new byte[0]);
-        }
         long unsharedHeapSize = clustering.unsharedHeapSize();
 
         double allowedDiff = 0.1; // 10% is seen as "close enough"
@@ -70,7 +64,7 @@
     @Parameterized.Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         byte[] rawBytes = { 0, 1, 2, 3, 4, 5, 6 };
-        NativePool pool = new NativePool(1024, 1024, 1, () -> CompletableFuture.completedFuture(true));
+        NativePool pool = new NativePool(1024, 1024, 1, () -> ImmediateFuture.success(true));
         OpOrder order = new OpOrder();
 
         ArrayClustering array = ArrayClustering.make(rawBytes);
@@ -78,7 +72,7 @@
         return Arrays.asList(new Object[][] {
         { array },
         { buffer },
-        { new NativeClustering(pool.newAllocator(), order.getCurrent(), array)}
+        { new NativeClustering(pool.newAllocator(null), order.getCurrent(), array)}
         });
     }
 }
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyMetricTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyMetricTest.java
index 0ba7cee..75e311e 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyMetricTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyMetricTest.java
@@ -60,7 +60,7 @@
         // late - after the whole system is already running, and some static fields may remain uninitialized
         // OTOH, late initialization of them may have creepy effects (for example NPEs in static initializers)
         // disclaimer: this is not a proper way to fix that
-        StorageService.instance.forceKeyspaceFlush(SchemaConstants.SYSTEM_KEYSPACE_NAME);
+        StorageService.instance.forceKeyspaceFlush(SchemaConstants.SYSTEM_KEYSPACE_NAME, ColumnFamilyStore.FlushReason.UNIT_TESTS);
     }
 
     @Test
@@ -79,7 +79,7 @@
         {
             applyMutation(cfs.metadata(), String.valueOf(j), ByteBufferUtil.EMPTY_BYTE_BUFFER, FBUtilities.timestampMicros());
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Collection<SSTableReader> sstables = cfs.getLiveSSTables();
         long size = 0;
         for (SSTableReader reader : sstables)
@@ -161,7 +161,7 @@
             applyMutation(store.metadata(), "1", bytes(1), FBUtilities.timestampMicros());
 
             // Flushing first SSTable
-            store.forceBlockingFlush();
+            Util.flush(store);
 
             long[] estimatedColumnCountHistogram = store.metric.estimatedColumnCountHistogram.getValue();
             assertNumberOfNonZeroValue(estimatedColumnCountHistogram, 1);
@@ -174,7 +174,7 @@
             applyMutation(store.metadata(), "2", bytes(2), FBUtilities.timestampMicros());
 
             // Flushing second SSTable
-            store.forceBlockingFlush();
+            Util.flush(store);
 
             estimatedColumnCountHistogram = store.metric.estimatedColumnCountHistogram.getValue();
             assertNumberOfNonZeroValue(estimatedColumnCountHistogram, 1);
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreMBeanTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreMBeanTest.java
new file mode 100644
index 0000000..8d15f67
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreMBeanTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+import java.util.Arrays;
+import java.util.Random;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.dht.ByteOrderedPartitioner;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.OrderPreservingPartitioner;
+import org.apache.cassandra.dht.RandomPartitioner;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.assertj.core.api.Assertions;
+import org.mockito.Mockito;
+import org.quicktheories.core.Gen;
+import org.quicktheories.impl.Constraint;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.quicktheories.QuickTheory.qt;
+
+public class ColumnFamilyStoreMBeanTest
+{
+    @BeforeClass
+    public static void setup()
+    {
+        // can't use client due to the fact thread pools startup and fail due to config issues
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    @Test
+    public void testToTokenRangesMurmur3Partitioner()
+    {
+        testToTokenRanges(Murmur3Partitioner.instance);
+    }
+
+    @Test
+    public void testToTokenRangesRandomPartitioner()
+    {
+        testToTokenRanges(RandomPartitioner.instance);
+    }
+
+    @Test
+    public void testToTokenRangesOrderPreservingPartitioner()
+    {
+        testToTokenRanges(OrderPreservingPartitioner.instance);
+    }
+
+    @Test
+    public void testToTokenRangesByteOrderedPartitioner()
+    {
+        testToTokenRanges(ByteOrderedPartitioner.instance);
+    }
+
+    @Test
+    public void testInvalidateTokenRangesFormat()
+    {
+        ColumnFamilyStore store = Mockito.mock(ColumnFamilyStore.class);
+        Mockito.doCallRealMethod().when(store).forceCompactionForTokenRanges(Mockito.any());
+        IPartitioner previous = DatabaseDescriptor.getPartitioner();
+        try
+        {
+            DatabaseDescriptor.setPartitionerUnsafe(ByteOrderedPartitioner.instance);
+
+            for (String s : Arrays.asList("testing", "t1:", ":t2", "spaces should not have an impact"))
+                Assertions.assertThatThrownBy(() -> store.forceCompactionForTokenRanges(s))
+                          .hasMessageStartingWith(String.format("Unable to parse token range %s;", s));
+        }
+        finally
+        {
+            DatabaseDescriptor.setPartitionerUnsafe(previous);
+        }
+
+    }
+
+    private static void testToTokenRanges(IPartitioner partitioner)
+    {
+        Token.TokenFactory tokenFactory = partitioner.getTokenFactory();
+        Gen<Token> tokenGen = tokenGen(partitioner);
+        qt().forAll(tokenGen, tokenGen)
+            .checkAssert((left, right) ->
+                         assertThat(ColumnFamilyStore.toTokenRanges(partitioner, toString(tokenFactory, left, right)))
+                         .isEqualTo(ImmutableSet.of(new Range<>(left, right))));
+    }
+
+    private static String toString(Token.TokenFactory tokenFactory, Token left, Token right)
+    {
+        return tokenFactory.toString(left) + ColumnFamilyStore.TOKEN_DELIMITER + tokenFactory.toString(right);
+    }
+
+    private static Gen<Token> tokenGen(IPartitioner partitioner)
+    {
+        // Random and RandomSource can not share the same seed, but there is a workaround...
+        // use RandomSource to generate a seed!
+        return rs -> partitioner.getRandomToken(new Random(rs.next(Constraint.none())));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index b1c6599..4d871fc 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -18,56 +18,83 @@
 */
 package org.apache.cassandra.db;
 
-import java.io.File;
-import java.io.FileReader;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import java.util.*;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterators;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Assume;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
-import org.apache.cassandra.utils.Pair;
-import org.json.simple.JSONArray;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.collect.Iterators;
-import org.apache.cassandra.*;
+import com.googlecode.concurrenttrees.common.Iterables;
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.UpdateBuilder;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.Operator;
+import org.apache.cassandra.db.ColumnFamilyStore.FlushReason;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.lifecycle.SSTableSet;
-import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.db.partitions.*;
+import org.apache.cassandra.db.memtable.AbstractMemtable;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.partitions.FilteredPartition;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.index.transactions.UpdateTransaction;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.metrics.ClearableHistogram;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.service.reads.SpeculativeRetryPolicy;
+import org.apache.cassandra.service.snapshot.SnapshotManifest;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.WrappedRunnable;
-import static junit.framework.Assert.assertNotNull;
+import org.apache.cassandra.utils.concurrent.OpOrder.Barrier;
+import org.apache.cassandra.utils.concurrent.OpOrder.Group;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 public class ColumnFamilyStoreTest
 {
     public static final String KEYSPACE1 = "ColumnFamilyStoreTest1";
     public static final String KEYSPACE2 = "ColumnFamilyStoreTest2";
+    public static final String KEYSPACE3 = "ColumnFamilyStoreTest3";
     public static final String CF_STANDARD1 = "Standard1";
     public static final String CF_STANDARD2 = "Standard2";
     public static final String CF_INDEX1 = "Indexed1";
+    public static final String CF_SPEC_RETRY1 = "SpeculativeRetryTest1";
 
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
@@ -81,6 +108,11 @@
         SchemaLoader.createKeyspace(KEYSPACE2,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(KEYSPACE2, CF_STANDARD1));
+        SchemaLoader.createKeyspace(KEYSPACE3,
+                                    KeyspaceParams.simple(1),
+                                    SchemaLoader.standardCFMD(KEYSPACE3, CF_SPEC_RETRY1)
+                                                .speculativeRetry(SpeculativeRetryPolicy.fromString("50PERCENTILE"))
+                                                .additionalWritePolicy(SpeculativeRetryPolicy.fromString("75PERCENTILE")));
     }
 
     @Before
@@ -88,16 +120,15 @@
     {
         Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).truncateBlocking();
         Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD2).truncateBlocking();
+        Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_INDEX1).truncateBlocking();
         Keyspace.open(KEYSPACE2).getColumnFamilyStore(CF_STANDARD1).truncateBlocking();
     }
 
     @Test
     public void testMemtableTimestamp() throws Throwable
     {
-        assertEquals(Memtable.NO_MIN_TIMESTAMP,
-                     (new Memtable(Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata(),
-                                   EncodingStats.NO_STATS.minTimestamp))
-                     .getMinTimestamp());
+        ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
+        assertEquals(Memtable.NO_MIN_TIMESTAMP, fakeMemTableWithMinTS(cfs, EncodingStats.NO_STATS.minTimestamp).getMinTimestamp());
     }
 
     @Test
@@ -112,14 +143,14 @@
                 .add("val", "asdf")
                 .build()
                 .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, "key1")
                 .clustering("Column1")
                 .add("val", "asdf")
                 .build()
                 .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         ((ClearableHistogram)cfs.metric.sstablesPerReadHistogram.cf).clear(); // resets counts
         Util.getAll(Util.cmd(cfs, "key1").includeRow("c1").build());
@@ -189,7 +220,7 @@
         assertRangeCount(cfs, col, val, 2);
 
         // flush.
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // insert, don't flush
         new RowUpdateBuilder(cfs.metadata(), 1, "key3").clustering("Column1").add("val", "val1").build().applyUnsafe();
@@ -204,7 +235,7 @@
         assertRangeCount(cfs, col, val, 2);
 
         // flush
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // re-verify delete. // first breakage is right here because of CASSANDRA-1837.
         assertRangeCount(cfs, col, val, 2);
@@ -222,18 +253,13 @@
         assertRangeCount(cfs, col, val, 4);
 
         // and it remains so after flush. (this wasn't failing before, but it's good to check.)
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertRangeCount(cfs, col, val, 4);
     }
 
     @Test
     public void testClearEphemeralSnapshots() throws Throwable
     {
-        // We don't do snapshot-based repair on Windows so we don't have ephemeral snapshots from repair that need clearing.
-        // This test will fail as we'll revert to the WindowsFailedSnapshotTracker and counts will be off, but since we
-        // don't do snapshot-based repair on Windows, we just skip this test.
-        Assume.assumeTrue(!FBUtilities.isWindows);
-
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_INDEX1);
 
         //cleanup any previous test gargbage
@@ -251,14 +277,14 @@
         cfs.snapshot("nonEphemeralSnapshot", null, false, false);
         cfs.snapshot("ephemeralSnapshot", null, true, false);
 
-        Map<String, Directories.SnapshotSizeDetails> snapshotDetails = cfs.getSnapshotDetails();
+        Map<String, TableSnapshot> snapshotDetails = cfs.listSnapshots();
         assertEquals(2, snapshotDetails.size());
         assertTrue(snapshotDetails.containsKey("ephemeralSnapshot"));
         assertTrue(snapshotDetails.containsKey("nonEphemeralSnapshot"));
 
         ColumnFamilyStore.clearEphemeralSnapshots(cfs.getDirectories());
 
-        snapshotDetails = cfs.getSnapshotDetails();
+        snapshotDetails = cfs.listSnapshots();
         assertEquals(1, snapshotDetails.size());
         assertTrue(snapshotDetails.containsKey("nonEphemeralSnapshot"));
 
@@ -267,7 +293,7 @@
     }
 
     @Test
-    public void testSnapshotSize()
+    public void testSnapshotSize() throws IOException
     {
         // cleanup any previous test gargbage
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
@@ -279,20 +305,20 @@
         .add("val", "asdf")
         .build()
         .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // snapshot
         cfs.snapshot("basic", null, false, false);
 
         // check snapshot was created
-        Map<String, Directories.SnapshotSizeDetails> snapshotDetails = cfs.getSnapshotDetails();
+        Map<String, TableSnapshot> snapshotDetails = cfs.listSnapshots();
         assertThat(snapshotDetails).hasSize(1);
         assertThat(snapshotDetails).containsKey("basic");
 
         // check that sizeOnDisk > trueSize = 0
-        Directories.SnapshotSizeDetails details = snapshotDetails.get("basic");
-        assertThat(details.sizeOnDiskBytes).isGreaterThan(details.dataSizeBytes);
-        assertThat(details.dataSizeBytes).isZero();
+        TableSnapshot details = snapshotDetails.get("basic");
+        assertThat(details.computeSizeOnDiskBytes()).isGreaterThan(details.computeTrueSizeBytes());
+        assertThat(details.computeTrueSizeBytes()).isEqualTo(getSnapshotManifestAndSchemaFileSizes(details));
 
         // compact base table to make trueSize > 0
         cfs.forceMajorCompaction();
@@ -300,10 +326,9 @@
 
         // sizeOnDisk > trueSize because trueSize does not include manifest.json
         // Check that truesize now is > 0
-        snapshotDetails = cfs.getSnapshotDetails();
+        snapshotDetails = cfs.listSnapshots();
         details = snapshotDetails.get("basic");
-        assertThat(details.sizeOnDiskBytes).isGreaterThan(details.dataSizeBytes);
-        assertThat(details.dataSizeBytes).isPositive();
+        assertThat(details.computeSizeOnDiskBytes()).isEqualTo(details.computeTrueSizeBytes());
     }
 
     @Test
@@ -311,20 +336,63 @@
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE2).getColumnFamilyStore(CF_STANDARD1);
         new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes("key1")).clustering("Column1").add("val", "asdf").build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes("key2")).clustering("Column1").add("val", "asdf").build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
-        for (int version = 1; version <= 2; ++version)
+        for (SSTableReader liveSSTable : cfs.getLiveSSTables())
         {
-            Descriptor existing = new Descriptor(cfs.getDirectories().getDirectoryForNewSSTables(), KEYSPACE2, CF_STANDARD1, version,
-                                                 SSTableFormat.Type.BIG);
-            Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing), KEYSPACE2, CF_STANDARD1, version, SSTableFormat.Type.BIG);
-            for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.STATS })
+            Descriptor existing = liveSSTable.descriptor;
+            Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing),
+                                             KEYSPACE2,
+                                             CF_STANDARD1,
+                                             liveSSTable.descriptor.id,
+                                             liveSSTable.descriptor.formatType);
+            for (Component c : liveSSTable.getComponents())
                 assertTrue("Cannot find backed-up file:" + desc.filenameFor(c), new File(desc.filenameFor(c)).exists());
         }
     }
 
+    @Test
+    public void speculationThreshold()
+    {
+        // CF_SPEC_RETRY1 configured to use the 50th percentile for read and 75th percentile for write
+        ColumnFamilyStore cfs = Keyspace.open(KEYSPACE3).getColumnFamilyStore(CF_SPEC_RETRY1);
+
+        cfs.sampleReadLatencyMicros = 123000;
+        cfs.additionalWriteLatencyMicros = 234000;
+
+        // test updating before any stats are present
+        cfs.updateSpeculationThreshold();
+        assertThat(cfs.sampleReadLatencyMicros).isEqualTo(123000);
+        assertThat(cfs.additionalWriteLatencyMicros).isEqualTo(234000);
+
+        // Seed the column family with some latency data.
+        final int count = 10000;
+        for (int millis = 0; millis < count; millis++)
+        {
+            cfs.metric.coordinatorReadLatency.update(millis, TimeUnit.MILLISECONDS);
+            cfs.metric.coordinatorWriteLatency.update(millis, TimeUnit.MILLISECONDS);
+        }
+        // Sanity check the metrics - 50th percentile of linear 0-10000ms
+        // remember, latencies are only an estimate - off by up to 20% by the 1.2 factor between buckets.
+        assertThat(cfs.metric.coordinatorReadLatency.getCount()).isEqualTo(count);
+        assertThat(cfs.metric.coordinatorReadLatency.getSnapshot().getValue(0.5))
+            .isBetween((double) TimeUnit.MILLISECONDS.toMicros(5839),
+                       (double) TimeUnit.MILLISECONDS.toMicros(5840));
+        // Sanity check the metrics - 75th percentileof linear 0-10000ms
+        assertThat(cfs.metric.coordinatorWriteLatency.getCount()).isEqualTo(count);
+        assertThat(cfs.metric.coordinatorWriteLatency.getSnapshot().getValue(0.75))
+        .isBetween((double) TimeUnit.MILLISECONDS.toMicros(8409),
+                   (double) TimeUnit.MILLISECONDS.toMicros(8410));
+
+        // CF_SPEC_RETRY1 configured to use the 50th percentile for speculation
+        cfs.updateSpeculationThreshold();
+
+        assertThat(cfs.sampleReadLatencyMicros).isBetween(TimeUnit.MILLISECONDS.toMicros(5839), TimeUnit.MILLISECONDS.toMicros(5840));
+        assertThat(cfs.additionalWriteLatencyMicros).isBetween(TimeUnit.MILLISECONDS.toMicros(8409), TimeUnit.MILLISECONDS.toMicros(8410));
+    }
+
     // TODO: Fix once we have working supercolumns in 8099
 //    // CASSANDRA-3467.  the key here is that supercolumn and subcolumn comparators are different
 //    @Test
@@ -452,7 +520,7 @@
     public void reTest(ColumnFamilyStore cfs, Runnable verify) throws Exception
     {
         verify.run();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         verify.run();
     }
 
@@ -491,25 +559,81 @@
                                              .add("birthdate", 1L)
                                              .add("notbirthdate", 2L);
         new Mutation(builder.build()).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         String snapshotName = "newSnapshot";
-        cfs.snapshotWithoutFlush(snapshotName);
+        cfs.snapshotWithoutMemtable(snapshotName);
 
         File snapshotManifestFile = cfs.getDirectories().getSnapshotManifestFile(snapshotName);
-        JSONParser parser = new JSONParser();
-        JSONObject manifest = (JSONObject) parser.parse(new FileReader(snapshotManifestFile));
-        JSONArray files = (JSONArray) manifest.get("files");
+        SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(snapshotManifestFile);
 
         // Keyspace1-Indexed1 and the corresponding index
-        assert files.size() == 2;
+        assertThat(manifest.getFiles()).hasSize(2);
 
         // Snapshot of the secondary index is stored in the subfolder with the same file name
-        String baseTableFile = (String) files.get(0);
-        String indexTableFile = (String) files.get(1);
-        assert !baseTableFile.equals(indexTableFile);
-        assert Directories.isSecondaryIndexFolder(new File(indexTableFile).getParentFile());
-        assert indexTableFile.endsWith(baseTableFile);
+        String baseTableFile = manifest.getFiles().get(0);
+        String indexTableFile = manifest.getFiles().get(1);
+        assertThat(baseTableFile).isNotEqualTo(indexTableFile);
+        assertThat(Directories.isSecondaryIndexFolder(new File(indexTableFile).parent())).isTrue();
+
+        Set<String> originalFiles = new HashSet<>();
+        Iterables.toList(cfs.concatWithIndexes()).stream()
+                 .flatMap(c -> c.getLiveSSTables().stream().map(t -> t.descriptor.filenameFor(Component.DATA)))
+                 .forEach(originalFiles::add);
+        assertThat(originalFiles.stream().anyMatch(f -> f.endsWith(indexTableFile))).isTrue();
+        assertThat(originalFiles.stream().anyMatch(f -> f.endsWith(baseTableFile))).isTrue();
+    }
+
+    private void createSnapshotAndDelete(String ks, String table, boolean writeData)
+    {
+        ColumnFamilyStore cfs = Keyspace.open(ks).getColumnFamilyStore(table);
+        if (writeData)
+        {
+            writeData(cfs);
+        }
+
+        TableSnapshot snapshot = cfs.snapshot("basic");
+
+
+        assertThat(snapshot.exists()).isTrue();
+        assertThat(cfs.listSnapshots().containsKey("basic")).isTrue();
+        assertThat(cfs.listSnapshots().get("basic")).isEqualTo(snapshot);
+
+        snapshot.getDirectories().forEach(FileUtils::deleteRecursive);
+
+        assertThat(snapshot.exists()).isFalse();
+        assertFalse(cfs.listSnapshots().containsKey("basic"));
+    }
+
+    private void writeData(ColumnFamilyStore cfs)
+    {
+        if (cfs.name.equals(CF_INDEX1))
+        {
+            new RowUpdateBuilder(cfs.metadata(), 2, "key").add("birthdate", 1L).add("notbirthdate", 2L).build().applyUnsafe();
+            Util.flush(cfs);
+        }
+        else
+        {
+            new RowUpdateBuilder(cfs.metadata(), 2, "key").clustering("name").add("val", "2").build().applyUnsafe();
+            Util.flush(cfs);
+        }
+    }
+
+    @Test
+    public void testSnapshotCreationAndDeleteEmptyTable() {
+        createSnapshotAndDelete(KEYSPACE1, CF_INDEX1, false);
+        createSnapshotAndDelete(KEYSPACE1, CF_STANDARD1, false);
+        createSnapshotAndDelete(KEYSPACE1, CF_STANDARD2, false);
+        createSnapshotAndDelete(KEYSPACE2, CF_STANDARD1, false);
+        createSnapshotAndDelete(SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.TRANSFERRED_RANGES_V2, false);
+    }
+
+    @Test
+    public void testSnapshotCreationAndDeletePopulatedTable() {
+        createSnapshotAndDelete(KEYSPACE1, CF_INDEX1, true);
+        createSnapshotAndDelete(KEYSPACE1, CF_STANDARD1, true);
+        createSnapshotAndDelete(KEYSPACE1, CF_STANDARD2, true);
+        createSnapshotAndDelete(KEYSPACE2, CF_STANDARD1, true);
     }
 
     @Test
@@ -536,7 +660,7 @@
         ColumnFamilyStore.scrubDataDirectories(cfs.metadata());
 
         new RowUpdateBuilder(cfs.metadata(), 2, "key").clustering("name").add("val", "2").build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // Nuke the metadata and reload that sstable
         Collection<SSTableReader> ssTables = cfs.getLiveSSTables();
@@ -545,7 +669,7 @@
 
         String dataFileName = ssTable.descriptor.filenameFor(Component.DATA);
         String tmpDataFileName = ssTable.descriptor.tmpFilenameFor(Component.DATA);
-        new File(dataFileName).renameTo(new File(tmpDataFileName));
+        new File(dataFileName).tryMove(new File(tmpDataFileName));
 
         ssTable.selfRef().release();
 
@@ -556,4 +680,149 @@
         assertEquals(0, ssTableFiles.size());
         cfs.clearUnsafe();
     }
+
+    @VisibleForTesting
+    public static long getSnapshotManifestAndSchemaFileSizes(TableSnapshot snapshot) throws IOException
+    {
+        Optional<File> schemaFile = snapshot.getSchemaFile();
+        Optional<File> manifestFile = snapshot.getManifestFile();
+
+        long schemaAndManifestFileSizes = 0;
+
+        schemaAndManifestFileSizes += schemaFile.isPresent() ? schemaFile.get().length() : 0;
+        schemaAndManifestFileSizes += manifestFile.isPresent() ? manifestFile.get().length() : 0;
+
+        return schemaAndManifestFileSizes;
+    }
+
+    private Memtable fakeMemTableWithMinTS(ColumnFamilyStore cfs, long minTS)
+    {
+        return new AbstractMemtable(cfs.metadata, minTS)
+        {
+
+            @Override
+            public UnfilteredRowIterator rowIterator(DecoratedKey key,
+                                                     Slices slices,
+                                                     ColumnFilter columnFilter,
+                                                     boolean reversed,
+                                                     SSTableReadsListener listener)
+            {
+                return null;
+            }
+
+            @Override
+            public UnfilteredPartitionIterator
+                   partitionIterator(ColumnFilter columnFilter, DataRange dataRange, SSTableReadsListener listener)
+            {
+                return null;
+            }
+
+            @Override
+            public void switchOut(Barrier writeBarrier, AtomicReference<CommitLogPosition> commitLogUpperBound)
+            {
+            }
+
+            @Override
+            public boolean shouldSwitch(FlushReason reason)
+            {
+                return false;
+            }
+
+            @Override
+            public long put(PartitionUpdate update, UpdateTransaction indexer, Group opGroup)
+            {
+                return 0;
+            }
+
+            @Override
+            public void performSnapshot(String snapshotName)
+            {
+            }
+
+            @Override
+            public long partitionCount()
+            {
+                return 0;
+            }
+
+            @Override
+            public void metadataUpdated()
+            {
+            }
+
+            @Override
+            public boolean mayContainDataBefore(CommitLogPosition position)
+            {
+                return false;
+            }
+
+            @Override
+            public void markExtraOnHeapUsed(long additionalSpace, Group opGroup)
+            {
+            }
+
+            @Override
+            public void markExtraOffHeapUsed(long additionalSpace, Group opGroup)
+            {
+            }
+
+            @Override
+            public void localRangesUpdated()
+            {
+            }
+
+            @Override
+            public boolean isClean()
+            {
+                return false;
+            }
+
+            @Override
+            public long getLiveDataSize()
+            {
+                return 0;
+            }
+
+            @Override
+            public FlushablePartitionSet<?> getFlushSet(PartitionPosition from, PartitionPosition to)
+            {
+                // TODO Auto-generated method stub
+                return null;
+            }
+
+            @Override
+            public LastCommitLogPosition getFinalCommitLogUpperBound()
+            {
+                return null;
+            }
+
+            @Override
+            public CommitLogPosition getCommitLogLowerBound()
+            {
+                return null;
+            }
+
+            @Override
+            public CommitLogPosition getApproximateCommitLogLowerBound()
+            {
+                return null;
+            }
+
+            @Override
+            public void discard()
+            {
+            }
+
+            @Override
+            public void addMemoryUsageTo(MemoryUsage usage)
+            {
+            }
+
+            @Override
+            public boolean accepts(Group opGroup, CommitLogPosition commitLogPosition)
+            {
+                return false;
+            }
+        };
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/ColumnsTest.java b/test/unit/org/apache/cassandra/db/ColumnsTest.java
index 6dc1832..37edcbc 100644
--- a/test/unit/org/apache/cassandra/db/ColumnsTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnsTest.java
@@ -27,9 +27,8 @@
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 
-import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.db.marshal.BytesType;
+
 import org.junit.AfterClass;
 import org.junit.Test;
 
diff --git a/test/unit/org/apache/cassandra/db/CounterCacheTest.java b/test/unit/org/apache/cassandra/db/CounterCacheTest.java
index 62be22b..2b743a9 100644
--- a/test/unit/org/apache/cassandra/db/CounterCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/CounterCacheTest.java
@@ -22,6 +22,8 @@
 
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.dht.Bounds;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.schema.KeyspaceParams;
@@ -191,7 +193,8 @@
         CacheService.instance.invalidateCounterCache();
         assertEquals(0, CacheService.instance.counterCache.size());
 
-        Keyspace ks = Schema.instance.removeKeyspaceInstance(KEYSPACE1);
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(KEYSPACE1);
+        SchemaTestUtil.dropKeyspaceIfExist(KEYSPACE1, true);
 
         try
         {
@@ -201,7 +204,7 @@
         }
         finally
         {
-            Schema.instance.storeKeyspaceInstance(ks);
+            SchemaTestUtil.addOrUpdateKeyspace(ksm, true);
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/db/DeletePartitionTest.java b/test/unit/org/apache/cassandra/db/DeletePartitionTest.java
index 6ed43f7..34a2b83 100644
--- a/test/unit/org/apache/cassandra/db/DeletePartitionTest.java
+++ b/test/unit/org/apache/cassandra/db/DeletePartitionTest.java
@@ -30,8 +30,8 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class DeletePartitionTest
 {
@@ -75,7 +75,7 @@
         assertTrue(r.getCell(column).value().equals(ByteBufferUtil.bytes("asdf")));
 
         if (flushBeforeRemove)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         // delete the partition
         new Mutation.PartitionUpdateCollector(KEYSPACE1, key)
@@ -84,7 +84,7 @@
                 .applyUnsafe();
 
         if (flushAfterRemove)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         // validate removal
         ImmutableBTreePartition partitionUnfiltered = Util.getOnlyPartitionUnfiltered(Util.cmd(store, key).build());
diff --git a/test/unit/org/apache/cassandra/db/DigestTest.java b/test/unit/org/apache/cassandra/db/DigestTest.java
index 4fd12d0..4719726 100644
--- a/test/unit/org/apache/cassandra/db/DigestTest.java
+++ b/test/unit/org/apache/cassandra/db/DigestTest.java
@@ -29,8 +29,8 @@
 
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Hex;
-import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 
@@ -93,7 +93,7 @@
                            new Digest(Hashing.murmur3_128(1000).newHasher()),
                            new Digest(Hashing.murmur3_128(2000).newHasher())
                            };
-        byte [] random = UUIDGen.getTimeUUIDBytes();
+        byte [] random = nextTimeUUIDAsBytes();
 
         for (Digest digest : digests)
         {
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index 022c3a0..c701516 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -17,16 +17,29 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.util.*;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import com.google.common.collect.Sets;
 import org.apache.commons.lang3.StringUtils;
@@ -36,7 +49,10 @@
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
+import org.apache.cassandra.Util;
 import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 // Our version of Sfl4j seems to be missing the ListAppender class.
@@ -47,14 +63,11 @@
 import ch.qos.logback.classic.spi.ILoggingEvent;
 import ch.qos.logback.core.read.ListAppender;
 
-import org.apache.cassandra.cql3.ColumnIdentifier;
-import org.apache.cassandra.schema.Indexes;
-import org.apache.cassandra.schema.SchemaConstants;
-import org.apache.cassandra.schema.SchemaKeyspaceTables;
-import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.auth.AuthKeyspace;
 import org.apache.cassandra.config.Config.DiskFailurePolicy;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.DurationSpec;
+import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.statements.schema.IndexTarget;
 import org.apache.cassandra.db.Directories.DataDirectories;
 import org.apache.cassandra.db.Directories.DataDirectory;
@@ -63,25 +76,60 @@
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.io.sstable.UUIDBasedSSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.IndexMetadata;
+import org.apache.cassandra.schema.Indexes;
+import org.apache.cassandra.schema.MockSchema;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.SchemaKeyspaceTables;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.DefaultFSErrorHandler;
+import org.apache.cassandra.service.snapshot.SnapshotManifest;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.schema.MockSchema.sstableId;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.FBUtilities.now;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+@RunWith(Parameterized.class)
 public class DirectoriesTest
 {
+    public static final String TABLE_NAME = "FakeTable";
+    public static final String SNAPSHOT1 = "snapshot1";
+    public static final String SNAPSHOT2 = "snapshot2";
+
+    public static final String LEGACY_SNAPSHOT_NAME = "42";
     private static File tempDataDir;
     private static final String KS = "ks";
     private static String[] TABLES;
     private static Set<TableMetadata> CFM;
-    private static Map<String, List<File>> files;
+    private static Map<String, List<File>> sstablesByTableName;
+
+    @Parameterized.Parameter(0)
+    public SSTableId.Builder<? extends SSTableId> idBuilder;
+
+    @Parameterized.Parameter(1)
+    public Supplier<? extends SSTableId> idGenerator;
+
+    @Parameterized.Parameters
+    public static Collection<Object[]> idBuilders()
+    {
+        return Arrays.asList(new Object[]{ SequenceBasedSSTableId.Builder.instance, Util.newSeqGen() },
+                             new Object[]{ UUIDBasedSSTableId.Builder.instance, Util.newUUIDGen() });
+    }
 
 
     private static final String MDCID = "test-DirectoriesTest-id";
@@ -100,21 +148,21 @@
     @Before
     public void beforeTest() throws IOException
     {
+        MockSchema.sstableIds.clear();
+        MockSchema.sstableIdGenerator = idGenerator;
+
         TABLES = new String[] { "cf1", "ks" };
         CFM = new HashSet<>(TABLES.length);
-        files = new HashMap<>();
+        sstablesByTableName = new HashMap<>();
 
         for (String table : TABLES)
         {
-            CFM.add(TableMetadata.builder(KS, table)
-                                 .addPartitionKeyColumn("thekey", UTF8Type.instance)
-                                 .addClusteringColumn("thecolumn", UTF8Type.instance)
-                                 .build());
+            CFM.add(createFakeTable(table));
         }
 
         tempDataDir = FileUtils.createTempFile("cassandra", "unittest");
-        tempDataDir.delete(); // hack to create a temp dir
-        tempDataDir.mkdir();
+        tempDataDir.tryDelete(); // hack to create a temp dir
+        tempDataDir.tryCreateDirectory();
 
         // Create two fake data dir for tests, one using CF directories, one that do not.
         createTestFiles();
@@ -148,37 +196,95 @@
         return dirs;
     }
 
-    private static void createTestFiles() throws IOException
+    private void createTestFiles() throws IOException
     {
         for (TableMetadata cfm : CFM)
         {
-            List<File> fs = new ArrayList<>();
-            files.put(cfm.name, fs);
-            File dir = cfDir(cfm);
-            dir.mkdirs();
+            List<File> allSStables = new ArrayList<>();
+            sstablesByTableName.put(cfm.name, allSStables);
+            File tableDir = cfDir(cfm);
+            tableDir.tryCreateDirectories();
 
-            createFakeSSTable(dir, cfm.name, 1, fs);
-            createFakeSSTable(dir, cfm.name, 2, fs);
+            allSStables.addAll(createFakeSSTable(tableDir, cfm.name, 1));
+            allSStables.addAll(createFakeSSTable(tableDir, cfm.name, 2));
 
-            File backupDir = new File(dir, Directories.BACKUPS_SUBDIR);
-            backupDir.mkdir();
-            createFakeSSTable(backupDir, cfm.name, 1, fs);
+            File backupDir = new File(tableDir, Directories.BACKUPS_SUBDIR);
+            backupDir.tryCreateDirectory();
+            allSStables.addAll(createFakeSSTable(backupDir, cfm.name, 1));
 
-            File snapshotDir = new File(dir, Directories.SNAPSHOT_SUBDIR + File.separator + "42");
-            snapshotDir.mkdirs();
-            createFakeSSTable(snapshotDir, cfm.name, 1, fs);
+            File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + LEGACY_SNAPSHOT_NAME);
+            snapshotDir.tryCreateDirectories();
+            allSStables.addAll(createFakeSSTable(snapshotDir, cfm.name, 1));
         }
     }
 
-    private static void createFakeSSTable(File dir, String cf, int gen, List<File> addTo) throws IOException
+    class FakeSnapshot {
+        final TableMetadata table;
+        final String tag;
+        final File snapshotDir;
+        final SnapshotManifest manifest;
+
+        FakeSnapshot(TableMetadata table, String tag, File snapshotDir, SnapshotManifest manifest)
+        {
+            this.table = table;
+            this.tag = tag;
+            this.snapshotDir = snapshotDir;
+            this.manifest = manifest;
+        }
+
+        public TableSnapshot asTableSnapshot()
+        {
+            Instant createdAt = manifest == null ? null : manifest.createdAt;
+            Instant expiresAt = manifest == null ? null : manifest.expiresAt;
+            return new TableSnapshot(table.keyspace, table.name, table.id.asUUID(), tag, createdAt, expiresAt, Collections.singleton(snapshotDir));
+        }
+    }
+
+    private TableMetadata createFakeTable(String table)
     {
-        Descriptor desc = new Descriptor(dir, KS, cf, gen, SSTableFormat.Type.BIG);
+        return TableMetadata.builder(KS, table)
+                            .addPartitionKeyColumn("thekey", UTF8Type.instance)
+                            .addClusteringColumn("thecolumn", UTF8Type.instance)
+                            .build();
+    }
+
+    public FakeSnapshot createFakeSnapshot(TableMetadata table, String tag, boolean createManifest) throws IOException
+    {
+        File tableDir = cfDir(table);
+        tableDir.tryCreateDirectories();
+        File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + tag);
+        snapshotDir.tryCreateDirectories();
+
+        Descriptor sstableDesc = new Descriptor(snapshotDir, KS, table.name, sstableId(1), SSTableFormat.Type.BIG);
+        createFakeSSTable(sstableDesc);
+
+        SnapshotManifest manifest = null;
+        if (createManifest)
+        {
+            File manifestFile = Directories.getSnapshotManifestFile(snapshotDir);
+            manifest = new SnapshotManifest(Collections.singletonList(sstableDesc.filenameFor(Component.DATA)), new DurationSpec.IntSecondsBound("1m"), now());
+            manifest.serializeToJsonFile(manifestFile);
+        }
+
+        return new FakeSnapshot(table, tag, snapshotDir, manifest);
+    }
+
+    private List<File> createFakeSSTable(File dir, String cf, int gen)
+    {
+        Descriptor desc = new Descriptor(dir, KS, cf, sstableId(gen), SSTableFormat.Type.BIG);
+        return createFakeSSTable(desc);
+    }
+
+    private List<File> createFakeSSTable(Descriptor desc)
+    {
+        List<File> components = new ArrayList<>(3);
         for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER })
         {
             File f = new File(desc.filenameFor(c));
-            f.createNewFile();
-            addTo.add(f);
+            f.createFileIfNotExists();
+            components.add(f);
         }
+        return components;
     }
 
     private static File cfDir(TableMetadata metadata)
@@ -189,13 +295,13 @@
         {
             // secondary index
             return new File(tempDataDir,
-                            metadata.keyspace + File.separator +
-                            metadata.name.substring(0, idx) + '-' + tableId + File.separator +
+                            metadata.keyspace + File.pathSeparator() +
+                            metadata.name.substring(0, idx) + '-' + tableId + File.pathSeparator() +
                             metadata.name.substring(idx));
         }
         else
         {
-            return new File(tempDataDir, metadata.keyspace + File.separator + metadata.name + '-' + tableId);
+            return new File(tempDataDir, metadata.keyspace + File.pathSeparator() + metadata.name + '-' + tableId);
         }
     }
 
@@ -207,12 +313,93 @@
             Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
             assertEquals(cfDir(cfm), directories.getDirectoryForNewSSTables());
 
-            Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, 1, SSTableFormat.Type.BIG);
-            File snapshotDir = new File(cfDir(cfm),  File.separator + Directories.SNAPSHOT_SUBDIR + File.separator + "42");
-            assertEquals(snapshotDir.getCanonicalFile(), Directories.getSnapshotDirectory(desc, "42"));
+            Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, sstableId(1), SSTableFormat.Type.BIG);
+            File snapshotDir = new File(cfDir(cfm), File.pathSeparator() + Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + LEGACY_SNAPSHOT_NAME);
+            assertEquals(snapshotDir.toCanonical(), Directories.getSnapshotDirectory(desc, LEGACY_SNAPSHOT_NAME));
 
-            File backupsDir = new File(cfDir(cfm),  File.separator + Directories.BACKUPS_SUBDIR);
-            assertEquals(backupsDir.getCanonicalFile(), Directories.getBackupsDirectory(desc));
+            File backupsDir = new File(cfDir(cfm), File.pathSeparator() + Directories.BACKUPS_SUBDIR);
+            assertEquals(backupsDir.toCanonical(), Directories.getBackupsDirectory(desc));
+
+            Supplier<? extends SSTableId> uidGen = directories.getUIDGenerator(idBuilder);
+            assertThat(Stream.generate(uidGen).limit(100).filter(MockSchema.sstableIds::containsValue).collect(Collectors.toList())).isEmpty();
+        }
+    }
+
+    @Test
+    public void testListSnapshots() throws Exception {
+        // Initial state
+        TableMetadata fakeTable = createFakeTable(TABLE_NAME);
+        Directories directories = new Directories(fakeTable, toDataDirectories(tempDataDir));
+        assertThat(directories.listSnapshots()).isEmpty();
+
+        // Create snapshot with and without manifest
+        FakeSnapshot snapshot1 = createFakeSnapshot(fakeTable, SNAPSHOT1, true);
+        FakeSnapshot snapshot2 = createFakeSnapshot(fakeTable, SNAPSHOT2, false);
+
+        // Both snapshots should be present
+        Map<String, TableSnapshot> snapshots = directories.listSnapshots();
+        assertThat(snapshots.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT1, SNAPSHOT2));
+        assertThat(snapshots.get(SNAPSHOT1)).isEqualTo(snapshot1.asTableSnapshot());
+        assertThat(snapshots.get(SNAPSHOT2)).isEqualTo(snapshot2.asTableSnapshot());
+
+        // Now remove snapshot1
+        FileUtils.deleteRecursive(snapshot1.snapshotDir);
+
+        // Only snapshot 2 should be present
+        snapshots = directories.listSnapshots();
+        assertThat(snapshots.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT2));
+        assertThat(snapshots.get(SNAPSHOT2)).isEqualTo(snapshot2.asTableSnapshot());
+    }
+
+    @Test
+    public void testListSnapshotDirsByTag() throws Exception {
+        // Initial state
+        TableMetadata fakeTable = createFakeTable("FakeTable");
+        Directories directories = new Directories(fakeTable, toDataDirectories(tempDataDir));
+        assertThat(directories.listSnapshotDirsByTag()).isEmpty();
+
+        // Create snapshot with and without manifest
+        FakeSnapshot snapshot1 = createFakeSnapshot(fakeTable, SNAPSHOT1, true);
+        FakeSnapshot snapshot2 = createFakeSnapshot(fakeTable, SNAPSHOT2, false);
+
+        // Both snapshots should be present
+        Map<String, Set<File>> snapshotDirs = directories.listSnapshotDirsByTag();
+        assertThat(snapshotDirs.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT1, SNAPSHOT2));
+        assertThat(snapshotDirs.get(SNAPSHOT1)).allMatch(snapshotDir -> snapshotDir.equals(snapshot1.snapshotDir));
+        assertThat(snapshotDirs.get(SNAPSHOT2)).allMatch(snapshotDir -> snapshotDir.equals(snapshot2.snapshotDir));
+
+        // Now remove snapshot1
+        FileUtils.deleteRecursive(snapshot1.snapshotDir);
+
+        // Only snapshot 2 should be present
+        snapshotDirs = directories.listSnapshotDirsByTag();
+        assertThat(snapshotDirs.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT2));
+    }
+
+    @Test
+    public void testMaybeManifestLoading() throws Exception {
+        for (TableMetadata cfm : CFM)
+        {
+            String tag = "test";
+            Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
+            Descriptor parentDesc = new Descriptor(directories.getDirectoryForNewSSTables(), KS, cfm.name, sstableId(0), SSTableFormat.Type.BIG);
+            File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, tag);
+
+            List<String> files = new LinkedList<>();
+            files.add(parentSnapshotDirectory.toAbsolute().absolutePath());
+
+            File manifestFile = directories.getSnapshotManifestFile(tag);
+
+            SnapshotManifest manifest = new SnapshotManifest(files, new DurationSpec.IntSecondsBound("1m"), now());
+            manifest.serializeToJsonFile(manifestFile);
+
+            Set<File> dirs = new HashSet<>();
+
+            dirs.add(manifestFile.parent());
+            dirs.add(new File("buzz"));
+            SnapshotManifest loadedManifest = Directories.maybeLoadManifest(KS, cfm.name, tag, dirs);
+
+            assertEquals(manifest, loadedManifest);
         }
     }
 
@@ -242,46 +429,44 @@
         {
             assertEquals(cfDir(INDEX_CFM), dir);
         }
-        Descriptor parentDesc = new Descriptor(parentDirectories.getDirectoryForNewSSTables(), KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
-        Descriptor indexDesc = new Descriptor(indexDirectories.getDirectoryForNewSSTables(), KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
+        Descriptor parentDesc = new Descriptor(parentDirectories.getDirectoryForNewSSTables(), KS, PARENT_CFM.name, sstableId(0), SSTableFormat.Type.BIG);
+        Descriptor indexDesc = new Descriptor(indexDirectories.getDirectoryForNewSSTables(), KS, INDEX_CFM.name, sstableId(0), SSTableFormat.Type.BIG);
 
         // snapshot dir should be created under its parent's
         File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, "test");
         File indexSnapshotDirectory = Directories.getSnapshotDirectory(indexDesc, "test");
-        assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.getParentFile());
+        assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.parent());
 
         // check if snapshot directory exists
+        parentSnapshotDirectory.tryCreateDirectories();
         assertTrue(parentDirectories.snapshotExists("test"));
         assertTrue(indexDirectories.snapshotExists("test"));
 
         // check true snapshot size
-        Descriptor parentSnapshot = new Descriptor(parentSnapshotDirectory, KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
+        Descriptor parentSnapshot = new Descriptor(parentSnapshotDirectory, KS, PARENT_CFM.name, sstableId(0), SSTableFormat.Type.BIG);
         createFile(parentSnapshot.filenameFor(Component.DATA), 30);
-        Descriptor indexSnapshot = new Descriptor(indexSnapshotDirectory, KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
+        Descriptor indexSnapshot = new Descriptor(indexSnapshotDirectory, KS, INDEX_CFM.name, sstableId(0), SSTableFormat.Type.BIG);
         createFile(indexSnapshot.filenameFor(Component.DATA), 40);
 
         assertEquals(30, parentDirectories.trueSnapshotsSize());
         assertEquals(40, indexDirectories.trueSnapshotsSize());
 
         // check snapshot details
-        Map<String, Directories.SnapshotSizeDetails> parentSnapshotDetail = parentDirectories.getSnapshotDetails();
+        Map<String, TableSnapshot> parentSnapshotDetail = parentDirectories.listSnapshots();
         assertTrue(parentSnapshotDetail.containsKey("test"));
-        assertEquals(30L, parentSnapshotDetail.get("test").dataSizeBytes);
-
-        Map<String, Directories.SnapshotSizeDetails> indexSnapshotDetail = indexDirectories.getSnapshotDetails();
-        assertTrue(indexSnapshotDetail.containsKey("test"));
-        assertEquals(40L, indexSnapshotDetail.get("test").dataSizeBytes);
+        // CASSANDRA-17357: include indexes when computing true size of parent table
+        assertEquals(70L, parentSnapshotDetail.get("test").computeTrueSizeBytes());
 
         // check backup directory
         File parentBackupDirectory = Directories.getBackupsDirectory(parentDesc);
         File indexBackupDirectory = Directories.getBackupsDirectory(indexDesc);
-        assertEquals(parentBackupDirectory, indexBackupDirectory.getParentFile());
+        assertEquals(parentBackupDirectory, indexBackupDirectory.parent());
     }
 
     private File createFile(String fileName, int size)
     {
         File newFile = new File(fileName);
-        try (FileOutputStream writer = new FileOutputStream(newFile))
+        try (FileOutputStreamPlus writer = new FileOutputStreamPlus(newFile);)
         {
             writer.write(new byte[size]);
             writer.flush();
@@ -306,9 +491,9 @@
         Set<File> listed;// List all but no snapshot, backup
         lister = directories.sstableLister(Directories.OnTxnErr.THROW);
         listed = new HashSet<>(lister.listFiles());
-        for (File f : files.get(cfm.name))
+        for (File f : sstablesByTableName.get(cfm.name))
         {
-            if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR) || f.getPath().contains(Directories.BACKUPS_SUBDIR))
+            if (f.path().contains(Directories.SNAPSHOT_SUBDIR) || f.path().contains(Directories.BACKUPS_SUBDIR))
                 assertFalse(f + " should not be listed", listed.contains(f));
             else
                 assertTrue(f + " is missing", listed.contains(f));
@@ -317,9 +502,9 @@
         // List all but including backup (but no snapshot)
         lister = directories.sstableLister(Directories.OnTxnErr.THROW).includeBackups(true);
         listed = new HashSet<>(lister.listFiles());
-        for (File f : files.get(cfm.name))
+        for (File f : sstablesByTableName.get(cfm.name))
         {
-            if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR))
+            if (f.path().contains(Directories.SNAPSHOT_SUBDIR))
                 assertFalse(f + " should not be listed", listed.contains(f));
             else
                 assertTrue(f + " is missing", listed.contains(f));
@@ -328,11 +513,11 @@
         // Skip temporary and compacted
         lister = directories.sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
         listed = new HashSet<>(lister.listFiles());
-        for (File f : files.get(cfm.name))
+        for (File f : sstablesByTableName.get(cfm.name))
         {
-            if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR) || f.getPath().contains(Directories.BACKUPS_SUBDIR))
+            if (f.path().contains(Directories.SNAPSHOT_SUBDIR) || f.path().contains(Directories.BACKUPS_SUBDIR))
                 assertFalse(f + " should not be listed", listed.contains(f));
-            else if (f.getName().contains("tmp-"))
+            else if (f.name().contains("tmp-"))
                 assertFalse(f + " should not be listed", listed.contains(f));
             else
                 assertTrue(f + " is missing", listed.contains(f));
@@ -347,9 +532,9 @@
             Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
 
             File tempDir = directories.getTemporaryWriteableDirectoryAsFile(10);
-            tempDir.mkdir();
+            tempDir.tryCreateDirectory();
             File tempFile = new File(tempDir, "tempFile");
-            tempFile.createNewFile();
+            tempFile.createFileIfNotExists();
 
             assertTrue(tempDir.exists());
             assertTrue(tempFile.exists());
@@ -383,14 +568,13 @@
             if (!directories.isEmpty())
             {
                 String[] path = new String[] {KS, "bad"};
-                File dir = new File(first.location, StringUtils.join(path, File.separator));
+                File dir = new File(first.location, StringUtils.join(path, File.pathSeparator()));
                 JVMStabilityInspector.inspectThrowable(new FSWriteError(new IOException("Unable to create directory " + dir), dir));
             }
 
-            File file = new File(first.location, new File(KS, "bad").getPath());
+            File file = new File(first.location, new File(KS, "bad").path());
             assertTrue(DisallowedDirectories.isUnwritable(file));
-
-        } 
+        }
         finally 
         {
             DatabaseDescriptor.setDiskFailurePolicy(origPolicy);
@@ -404,10 +588,10 @@
         {
             final Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
             assertEquals(cfDir(cfm), directories.getDirectoryForNewSSTables());
-            final String n = Long.toString(System.nanoTime());
+            final String n = Long.toString(nanoTime());
             Callable<File> directoryGetter = new Callable<File>() {
                 public File call() throws Exception {
-                    Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, 1, SSTableFormat.Type.BIG);
+                    Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, sstableId(1), SSTableFormat.Type.BIG);
                     return Directories.getSnapshotDirectory(desc, n);
                 }
             };
@@ -533,8 +717,8 @@
             Directories dirs = new Directories(cfm, paths);
             for (DataDirectory dir : paths)
             {
-                String p = dirs.getLocationForDisk(dir).getAbsolutePath() + File.separator;
-                assertTrue(p.startsWith(dir.location.getAbsolutePath() + File.separator));
+                String p = dirs.getLocationForDisk(dir).absolutePath() + File.pathSeparator();
+                assertTrue(p.startsWith(dir.location.absolutePath() + File.pathSeparator()));
             }
         }
     }
@@ -550,11 +734,11 @@
         Path p2 = Files.createDirectories(ddir.resolve("p2"));
         Path l1 = Files.createSymbolicLink(p2.resolve("ks"), symlinktarget);
 
-        DataDirectory path1 = new DataDirectory(p1.toFile());
-        DataDirectory path2 = new DataDirectory(p2.toFile());
+        DataDirectory path1 = new DataDirectory(new File(p1));
+        DataDirectory path2 = new DataDirectory(new File(p2));
         Directories dirs = new Directories(CFM.iterator().next(), new DataDirectory[] {path1, path2});
-        dirs.getLocationForDisk(new DataDirectory(p1.toFile()));
-        dirs.getLocationForDisk(new DataDirectory(p2.toFile()));
+        dirs.getLocationForDisk(new DataDirectory(new File(p1)));
+        dirs.getLocationForDisk(new DataDirectory(new File(p2)));
 
         assertTrue(dirs.getLocationForDisk(path2).toPath().startsWith(l1));
         assertTrue(dirs.getLocationForDisk(path1).toPath().startsWith(p1));
@@ -574,8 +758,8 @@
             for (DataDirectory dir : paths)
             {
                 Descriptor d = Descriptor.fromFilename(new File(dir.location, getNewFilename(cfm, false)).toString());
-                String p = dirs.getDataDirectoryForFile(d).location.getAbsolutePath() + File.separator;
-                assertTrue(p.startsWith(dir.location.getAbsolutePath() + File.separator));
+                String p = dirs.getDataDirectoryForFile(d).location.absolutePath() + File.pathSeparator();
+                assertTrue(p.startsWith(dir.location.absolutePath() + File.pathSeparator()));
             }
         }
     }
@@ -596,16 +780,16 @@
         Path symlinktarget = Files.createDirectories(p.resolve("symlinktarget"));
         Path ddir1 = Files.createDirectories(p.resolve("datadir1"));
         Path ddir2 = Files.createSymbolicLink(p.resolve("datadir11"), symlinktarget);
-        DataDirectory dd1 = new DataDirectory(ddir1.toFile());
-        DataDirectory dd2 = new DataDirectory(ddir2.toFile());
+        DataDirectory dd1 = new DataDirectory(new File(ddir1));
+        DataDirectory dd2 = new DataDirectory(new File(ddir2));
 
         for (TableMetadata tm : CFM)
         {
             Directories dirs = new Directories(tm, Sets.newHashSet(dd1, dd2));
-            Descriptor desc = Descriptor.fromFilename(ddir1.resolve(getNewFilename(tm, false)).toFile());
-            assertEquals(ddir1.toFile(), dirs.getDataDirectoryForFile(desc).location);
-            desc = Descriptor.fromFilename(ddir2.resolve(getNewFilename(tm, false)).toFile());
-            assertEquals(ddir2.toFile(), dirs.getDataDirectoryForFile(desc).location);
+            Descriptor desc = Descriptor.fromFilename(new File(ddir1.resolve(getNewFilename(tm, false))));
+            assertEquals(new File(ddir1), dirs.getDataDirectoryForFile(desc).location);
+            desc = Descriptor.fromFilename(new File(ddir2.resolve(getNewFilename(tm, false))));
+            assertEquals(new File(ddir2), dirs.getDataDirectoryForFile(desc).location);
         }
     }
 
@@ -647,15 +831,15 @@
             Files.createSymbolicLink(keyspacedir.resolve(tabledir), symlinktarget);
         }
 
-        DataDirectory dd1 = new DataDirectory(ddir1.toFile());
-        DataDirectory dd2 = new DataDirectory(ddir2.toFile());
+        DataDirectory dd1 = new DataDirectory(new File(ddir1));
+        DataDirectory dd2 = new DataDirectory(new File(ddir2));
         for (TableMetadata tm : CFM)
         {
             Directories dirs = new Directories(tm, Sets.newHashSet(dd1, dd2));
-            Descriptor desc = Descriptor.fromFilename(ddir1.resolve(getNewFilename(tm, oldStyle)).toFile());
-            assertEquals(ddir1.toFile(), dirs.getDataDirectoryForFile(desc).location);
-            desc = Descriptor.fromFilename(ddir2.resolve(getNewFilename(tm, oldStyle)).toFile());
-            assertEquals(ddir2.toFile(), dirs.getDataDirectoryForFile(desc).location);
+            Descriptor desc = Descriptor.fromFilename(new File(ddir1.resolve(getNewFilename(tm, oldStyle))));
+            assertEquals(new File(ddir1), dirs.getDataDirectoryForFile(desc).location);
+            desc = Descriptor.fromFilename(new File(ddir2.resolve(getNewFilename(tm, oldStyle))));
+            assertEquals(new File(ddir2), dirs.getDataDirectoryForFile(desc).location);
         }
     }
 
@@ -687,11 +871,11 @@
 
         Iterator<DataDirectory> iter = directories.iterator();
         assertTrue(iter.hasNext());
-        assertEquals(new DataDirectory(subDir_1.toFile()), iter.next());
+        assertEquals(new DataDirectory(new File(subDir_1)), iter.next());
         assertTrue(iter.hasNext());
-        assertEquals(new DataDirectory(subDir_2.toFile()), iter.next());
+        assertEquals(new DataDirectory(new File(subDir_2)), iter.next());
         assertTrue(iter.hasNext());
-        assertEquals(new DataDirectory(subDir_3.toFile()), iter.next());
+        assertEquals(new DataDirectory(new File(subDir_3)), iter.next());
         assertFalse(iter.hasNext());
 
         directories = new DataDirectories(new String[]{subDir_1.toString(), subDir_2.toString()},
@@ -699,15 +883,15 @@
 
         iter = directories.iterator();
         assertTrue(iter.hasNext());
-        assertEquals(new DataDirectory(subDir_1.toFile()), iter.next());
+        assertEquals(new DataDirectory(new File(subDir_1)), iter.next());
         assertTrue(iter.hasNext());
-        assertEquals(new DataDirectory(subDir_2.toFile()), iter.next());
+        assertEquals(new DataDirectory(new File(subDir_2)), iter.next());
         assertFalse(iter.hasNext());
     }
 
     private String getNewFilename(TableMetadata tm, boolean oldStyle)
     {
-        return tm.keyspace + File.separator + tm.name + (oldStyle ? "" : Component.separator + tm.id.toHexString()) + "/na-1-big-Data.db";
+        return tm.keyspace + File.pathSeparator() + tm.name + (oldStyle ? "" : Component.separator + tm.id.toHexString()) + "/na-1-big-Data.db";
     }
 
     private List<Directories.DataDirectoryCandidate> getWriteableDirectories(DataDirectory[] dataDirectories, long writeSize)
@@ -718,14 +902,14 @@
         long totalAvailable = 0L;
 
         for (DataDirectory dataDir : dataDirectories)
-            {
-                Directories.DataDirectoryCandidate candidate = new Directories.DataDirectoryCandidate(dataDir);
-                // exclude directory if its total writeSize does not fit to data directory
-                if (candidate.availableSpace < writeSize)
-                    continue;
-                candidates.add(candidate);
-                totalAvailable += candidate.availableSpace;
-            }
+        {
+            Directories.DataDirectoryCandidate candidate = new Directories.DataDirectoryCandidate(dataDir);
+            // exclude directory if its total writeSize does not fit to data directory
+            if (candidate.availableSpace < writeSize)
+                continue;
+            candidates.add(candidate);
+            totalAvailable += candidate.availableSpace;
+        }
 
         Directories.sortWriteableCandidates(candidates, totalAvailable);
 
diff --git a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
index 3cd501e..cdf9a9a 100644
--- a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.net.UnknownHostException;
 import java.util.List;
 
@@ -27,8 +26,10 @@
 import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.dht.BootStrapper;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.service.StorageService;
@@ -37,7 +38,6 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class DiskBoundaryManagerTest extends CQLTester
@@ -121,7 +121,7 @@
     {
         MockCFS(ColumnFamilyStore cfs, Directories dirs)
         {
-            super(cfs.keyspace, cfs.getTableName(), 0, cfs.metadata, dirs, false, false, true);
+            super(cfs.keyspace, cfs.getTableName(), Util.newSeqGen(), cfs.metadata, dirs, false, false, true);
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/ImportTest.java b/test/unit/org/apache/cassandra/db/ImportTest.java
index a760768..ff843fa 100644
--- a/test/unit/org/apache/cassandra/db/ImportTest.java
+++ b/test/unit/org/apache/cassandra/db/ImportTest.java
@@ -18,9 +18,8 @@
 
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
+import java.nio.channels.FileChannel;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.Collections;
@@ -33,9 +32,9 @@
 import com.google.common.collect.Sets;
 import org.apache.commons.lang3.StringUtils;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cache.RowCacheKey;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.UntypedResultSet;
@@ -43,10 +42,12 @@
 import org.apache.cassandra.dht.BootStrapper;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertEquals;
@@ -55,6 +56,7 @@
 
 public class ImportTest extends CQLTester
 {
+
     @Test
     public void basicImportByMovingTest() throws Throwable
     {
@@ -84,8 +86,7 @@
         {
             execute("insert into %s (id, d) values (?, ?)", i, i);
         }
-
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
 
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
@@ -110,14 +111,14 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
 
         File backupdir = moveToBackupDir(sstables);
         for (int i = 10; i < 20; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
 
@@ -141,7 +142,7 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
         sstables.forEach(s -> s.selfRef().release());
@@ -156,7 +157,7 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
         for (SSTableReader sstable : sstables)
@@ -193,7 +194,7 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
         for (SSTableReader sstable : sstables)
@@ -227,36 +228,36 @@
     {
         Path temp = Files.createTempDirectory("importtest");
         SSTableReader sst = sstables.iterator().next();
-        String tabledir = sst.descriptor.directory.getName();
-        String ksdir = sst.descriptor.directory.getParentFile().getName();
+        String tabledir = sst.descriptor.directory.name();
+        String ksdir = sst.descriptor.directory.parent().name();
         Path backupdir = createDirectories(temp.toString(), ksdir, tabledir);
         for (SSTableReader sstable : sstables)
         {
             sstable.selfRef().release();
-            for (File f : sstable.descriptor.directory.listFiles())
+            for (File f : sstable.descriptor.directory.tryList())
             {
                 if (f.toString().contains(sstable.descriptor.baseFilename()))
                 {
                     System.out.println("move " + f.toPath() + " to " + backupdir);
-                    File moveFileTo = new File(backupdir.toFile(), f.getName());
+                    File moveFileTo = new File(backupdir, f.name());
                     moveFileTo.deleteOnExit();
                     Files.move(f.toPath(), moveFileTo.toPath());
                 }
             }
         }
-        return backupdir.toFile();
+        return new File(backupdir);
     }
 
     private Path createDirectories(String base, String ... subdirs)
     {
         File b = new File(base);
-        b.mkdir();
+        b.tryCreateDirectory();
         System.out.println("mkdir "+b);
         b.deleteOnExit();
         for (String subdir : subdirs)
         {
             b = new File(b, subdir);
-            b.mkdir();
+            b.tryCreateDirectory();
             System.out.println("mkdir "+b);
             b.deleteOnExit();
         }
@@ -275,7 +276,7 @@
         for (int i = 0; i < 10; i++)
         {
             execute("insert into %s (id, d) values (?, ?)", i, i);
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
 
         Set<SSTableReader> toMove = getCurrentColumnFamilyStore().getLiveSSTables();
@@ -291,8 +292,8 @@
         importer.importNewSSTables(SSTableImporter.Options.options(dir.toString()).build());
         for (SSTableReader sstable : mock.getLiveSSTables())
         {
-            File movedDir = sstable.descriptor.directory.getCanonicalFile();
-            File correctDir = mock.getDiskBoundaries().getCorrectDiskForSSTable(sstable).location.getCanonicalFile();
+            File movedDir = sstable.descriptor.directory.toCanonical();
+            File correctDir = mock.getDiskBoundaries().getCorrectDiskForSSTable(sstable).location.toCanonical();
             assertTrue(movedDir.toString().startsWith(correctDir.toString()));
         }
         for (SSTableReader sstable : mock.getLiveSSTables())
@@ -304,20 +305,20 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         SSTableReader sstableToCorrupt = getCurrentColumnFamilyStore().getLiveSSTables().iterator().next();
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i + 10, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
 
         getCurrentColumnFamilyStore().clearUnsafe();
 
         String filenameToCorrupt = sstableToCorrupt.descriptor.filenameFor(Component.STATS);
-        try (RandomAccessFile file = new RandomAccessFile(filenameToCorrupt, "rw"))
+        try (FileChannel fileChannel = new File(filenameToCorrupt).newReadWriteChannel())
         {
-            file.seek(0);
-            file.writeBytes(StringUtils.repeat('z', 2));
+            fileChannel.position(0);
+            fileChannel.write(ByteBufferUtil.bytes(StringUtils.repeat('z', 2)));
         }
 
         File backupdir = moveToBackupDir(sstables);
@@ -325,13 +326,13 @@
         // now move a correct sstable to another directory to make sure that directory gets properly imported
         for (int i = 100; i < 130; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> correctSSTables = getCurrentColumnFamilyStore().getLiveSSTables();
 
         getCurrentColumnFamilyStore().clearUnsafe();
         File backupdirCorrect = moveToBackupDir(correctSSTables);
 
-        Set<File> beforeImport = Sets.newHashSet(backupdir.listFiles());
+        Set<File> beforeImport = Sets.newHashSet(backupdir.tryList());
         // first we moved out 2 sstables, one correct and one corrupt in to a single directory (backupdir)
         // then we moved out 1 sstable, a correct one (in backupdirCorrect).
         // now import should fail import on backupdir, but import the one in backupdirCorrect.
@@ -346,7 +347,7 @@
             assertTrue("pk = "+pk, pk >= 100 && pk < 130);
         }
         assertEquals("Data dir should contain one file", 1, countFiles(getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables()));
-        assertEquals("backupdir contained 2 files before import, should still contain 2 after failing to import it", beforeImport, Sets.newHashSet(backupdir.listFiles()));
+        assertEquals("backupdir contained 2 files before import, should still contain 2 after failing to import it", beforeImport, Sets.newHashSet(backupdir.tryList()));
         if (copy)
         {
             assertEquals("backupdirCorrect contained 1 file before import, should contain 1 after import too", 1, countFiles(backupdirCorrect));
@@ -355,14 +356,13 @@
         {
             assertEquals("backupdirCorrect contained 1 file before import, should be empty after import", 0, countFiles(backupdirCorrect));
         }
-
     }
 
     private int countFiles(File dir)
     {
         int fileCount = 0;
 
-        for (File f : dir.listFiles())
+        for (File f : dir.tryList())
         {
             if (f.isFile() && f.toString().contains("-Data.db"))
             {
@@ -402,7 +402,7 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 1000; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
 
         getCurrentColumnFamilyStore().clearUnsafe();
@@ -447,7 +447,7 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 1000; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
 
         getCurrentColumnFamilyStore().clearUnsafe();
@@ -483,7 +483,7 @@
         createTable("create table %s (id int primary key, d int) WITH caching = { 'keys': 'NONE', 'rows_per_partition': 'ALL' }");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         CacheService.instance.setRowCacheCapacityInMB(1);
 
         Set<RowCacheKey> keysToInvalidate = new HashSet<>();
@@ -504,7 +504,7 @@
 
         for (int i = 10; i < 20; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
 
         Set<RowCacheKey> allCachedKeys = new HashSet<>();
 
@@ -551,7 +551,7 @@
         createTable("create table %s (id int primary key, d int) WITH caching = { 'keys': 'NONE', 'rows_per_partition': 'ALL' }");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         CacheService.instance.setRowCacheCapacityInMB(1);
         getCurrentColumnFamilyStore().clearUnsafe();
@@ -568,25 +568,25 @@
         createTable("create table %s (id int primary key, d int) WITH caching = { 'keys': 'NONE', 'rows_per_partition': 'ALL' }");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
         sstables.forEach(s -> s.selfRef().release());
         // corrupt the sstable which is still in the data directory
         SSTableReader sstableToCorrupt = sstables.iterator().next();
         String filenameToCorrupt = sstableToCorrupt.descriptor.filenameFor(Component.STATS);
-        try (RandomAccessFile file = new RandomAccessFile(filenameToCorrupt, "rw"))
+        try (FileChannel fileChannel = new File(filenameToCorrupt).newReadWriteChannel())
         {
-            file.seek(0);
-            file.writeBytes(StringUtils.repeat('z', 2));
+            fileChannel.position(0);
+            fileChannel.write(ByteBufferUtil.bytes(StringUtils.repeat('z', 2)));
         }
 
         for (int i = 10; i < 20; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         for (int i = 20; i < 30; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
 
         Set<SSTableReader> expectedFiles = new HashSet<>(getCurrentColumnFamilyStore().getLiveSSTables());
 
@@ -618,8 +618,8 @@
             assertTrue(new File(sstable.descriptor.filenameFor(Component.DATA)).exists());
         getCurrentColumnFamilyStore().truncateBlocking();
         LifecycleTransaction.waitForDeletions();
-        for (File f : sstableToCorrupt.descriptor.directory.listFiles()) // clean up the corrupt files which truncate does not handle
-            f.delete();
+        for (File f : sstableToCorrupt.descriptor.directory.tryList()) // clean up the corrupt files which truncate does not handle
+            f.tryDelete();
 
     }
 
@@ -632,14 +632,14 @@
         createTable("create table %s (id int primary key, d int)");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
 
         File backupdir = moveToBackupDir(sstables);
         for (int i = 10; i < 20; i++)
             execute("insert into %s (id, d) values (?, ?)", i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         getCurrentColumnFamilyStore().clearUnsafe();
 
@@ -676,8 +676,9 @@
                 for (int i = 0; i < 10; i++)
                     execute(String.format("INSERT INTO %s.%s (id, d) values (?, ?)", KEYSPACE, table), i, i);
 
-                ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(KEYSPACE, unquotedTableName);
-                cfs.forceBlockingFlush();
+                ColumnFamilyStore cfs = getColumnFamilyStore(KEYSPACE, unquotedTableName);
+
+                Util.flush(cfs);
 
                 Set<SSTableReader> sstables = cfs.getLiveSSTables();
                 cfs.clearUnsafe();
@@ -708,7 +709,7 @@
     {
         public MockCFS(ColumnFamilyStore cfs, Directories dirs)
         {
-            super(cfs.keyspace, cfs.getTableName(), 0, cfs.metadata, dirs, false, false, true);
+            super(cfs.keyspace, cfs.getTableName(), Util.newSeqGen(), cfs.metadata, dirs, false, false, true);
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/KeyCacheTest.java b/test/unit/org/apache/cassandra/db/KeyCacheTest.java
index 445fc67..475f1a1 100644
--- a/test/unit/org/apache/cassandra/db/KeyCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/KeyCacheTest.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.db;
 
 import java.io.IOException;
-import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -41,8 +40,8 @@
 import org.apache.cassandra.cache.ICache;
 import org.apache.cassandra.cache.KeyCacheKey;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
@@ -75,12 +74,13 @@
     private static final String COLUMN_FAMILY7 = "Standard7";
     private static final String COLUMN_FAMILY8 = "Standard8";
     private static final String COLUMN_FAMILY9 = "Standard9";
+    private static final String COLUMN_FAMILY10 = "Standard10";
 
     private static final String COLUMN_FAMILY_K2_1 = "Standard1";
 
 
     @BeforeClass
-    public static void defineSchema() throws ConfigurationException, UnknownHostException
+    public static void defineSchema() throws ConfigurationException
     {
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE1,
@@ -93,11 +93,13 @@
                                     SchemaLoader.standardCFMD(KEYSPACE1, COLUMN_FAMILY6),
                                     SchemaLoader.standardCFMD(KEYSPACE1, COLUMN_FAMILY7),
                                     SchemaLoader.standardCFMD(KEYSPACE1, COLUMN_FAMILY8),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, COLUMN_FAMILY9));
+                                    SchemaLoader.standardCFMD(KEYSPACE1, COLUMN_FAMILY9),
+                                    SchemaLoader.standardCFMD(KEYSPACE1, COLUMN_FAMILY10));
 
         SchemaLoader.createKeyspace(KEYSPACE2,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(KEYSPACE2, COLUMN_FAMILY_K2_1));
+
     }
 
     @AfterClass
@@ -132,7 +134,7 @@
 
         // insert data and force to disk
         SchemaLoader.insertData(KEYSPACE1, cf, 0, 100);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         // populate the cache
         readData(KEYSPACE1, cf, 0, 100);
@@ -202,7 +204,7 @@
     {
         return ColumnFamilyStore.getIfExists(k.desc.ksname, k.desc.cfname).getLiveSSTables()
                                 .stream()
-                                .filter(sstreader -> sstreader.descriptor.generation == k.desc.generation)
+                                .filter(sstreader -> sstreader.descriptor.id == k.desc.id)
                                 .findFirst().get();
     }
 
@@ -232,7 +234,7 @@
 
         // insert data and force to disk
         SchemaLoader.insertData(KEYSPACE1, cf, 0, 100);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         Collection<SSTableReader> firstFlushTables = ImmutableList.copyOf(store.getLiveSSTables());
 
@@ -242,7 +244,7 @@
 
         // insert some new data and force to disk
         SchemaLoader.insertData(KEYSPACE1, cf, 100, 50);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         // check that it's fine
         readData(KEYSPACE1, cf, 100, 50);
@@ -303,7 +305,7 @@
         new RowUpdateBuilder(cfs.metadata(), 0, "key2").clustering("2").build().applyUnsafe();
 
         // to make sure we have SSTable
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // reads to cache key position
         Util.getAll(Util.cmd(cfs, "key1").build());
@@ -317,7 +319,7 @@
             throw new IllegalStateException();
 
         Util.compactAll(cfs, Integer.MAX_VALUE).get();
-        boolean noEarlyOpen = DatabaseDescriptor.getSSTablePreemptiveOpenIntervalInMB() < 0;
+        boolean noEarlyOpen = DatabaseDescriptor.getSSTablePreemptiveOpenIntervalInMiB() < 0;
 
         // after compaction cache should have entries for new SSTables,
         // but since we have kept a reference to the old sstables,
@@ -339,16 +341,16 @@
     }
 
     @Test
-    public void testKeyCacheLoadNegativeCacheLoadTime() throws Exception
+    public void testKeyCacheLoadZeroCacheLoadTime() throws Exception
     {
-        DatabaseDescriptor.setCacheLoadTimeout(-1);
+        DatabaseDescriptor.setCacheLoadTimeout(0);
         String cf = COLUMN_FAMILY7;
 
         createAndInvalidateCache(Collections.singletonList(Pair.create(KEYSPACE1, cf)), 100);
 
         CacheService.instance.keyCache.loadSaved();
 
-        // Here max time to load cache is negative which means no time left to load cache. So the keyCache size should
+        // Here max time to load cache is zero which means no time left to load cache. So the keyCache size should
         // be zero after loadSaved().
         assertKeyCacheSize(0, KEYSPACE1, cf);
     }
@@ -424,7 +426,7 @@
 
             // insert data and force to disk
             SchemaLoader.insertData(keyspace, cf, 0, numberOfRows);
-            store.forceBlockingFlush();
+            Util.flush(store);
         }
         for(Pair<String, String> entry : tables)
         {
diff --git a/test/unit/org/apache/cassandra/db/KeyspaceTest.java b/test/unit/org/apache/cassandra/db/KeyspaceTest.java
index fd15366..2445fe3 100644
--- a/test/unit/org/apache/cassandra/db/KeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/db/KeyspaceTest.java
@@ -21,9 +21,9 @@
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import org.apache.cassandra.schema.Schema;
 import org.assertj.core.api.Assertions;
 import org.junit.Test;
-import org.mockito.Mockito;
 
 import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
@@ -37,7 +37,6 @@
 import org.apache.cassandra.db.partitions.PartitionIterator;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.metrics.ClearableHistogram;
-import org.apache.cassandra.schema.SchemaProvider;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -86,7 +85,7 @@
             Util.assertEmpty(Util.cmd(cfs, "0").columns("c").includeRow(1).build());
 
             if (round == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
     }
 
@@ -121,7 +120,7 @@
             }
 
             if (round == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
     }
 
@@ -138,7 +137,7 @@
         for (String key : new String[]{"0", "2"})
             Util.assertEmpty(Util.cmd(cfs, key).build());
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         for (String key : new String[]{"0", "2"})
             Util.assertEmpty(Util.cmd(cfs, key).build());
@@ -210,7 +209,7 @@
             assertRowsInSlice(cfs, "0", 288, 299, 12, true, prefix);
 
             if (round == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
     }
 
@@ -223,7 +222,7 @@
         for (int i = 0; i < 10; i++)
             execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", "0", i, i);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         for (int i = 10; i < 20; i++)
         {
@@ -337,7 +336,7 @@
             assertRowsInResult(cfs, command);
 
             if (round == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
     }
 
@@ -360,7 +359,7 @@
             assertRowsInResult(cfs, command, 1);
 
             if (round == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
     }
 
@@ -373,7 +372,7 @@
         for (int i = 1; i < 7; i++)
             execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", "0", i, i);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // overwrite three rows with -1
         for (int i = 1; i < 4; i++)
@@ -385,7 +384,7 @@
             assertRowsInResult(cfs, command, -1, -1, 4);
 
             if (round == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
     }
 
@@ -398,7 +397,7 @@
         for (int i = 1000; i < 2000; i++)
             execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", "0", i, i);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         validateSliceLarge(cfs);
 
@@ -415,6 +414,20 @@
     }
 
     @Test
+    public void testSnapshotCreation() throws Throwable {
+        createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b))");
+
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", "0", 0, 0);
+
+        Keyspace ks = Keyspace.open(KEYSPACE_PER_TEST);
+        String table = getCurrentColumnFamilyStore().name;
+        ks.snapshot("test", table);
+
+        assertTrue(ks.snapshotExists("test"));
+        assertEquals(1, ks.getAllSnapshots().count());
+    }
+
+    @Test
     public void testLimitSSTables() throws Throwable
     {
         createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b))");
@@ -426,7 +439,7 @@
             for (int i = 1000 + (j*100); i < 1000 + ((j+1)*100); i++)
                 execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", "0", i, i, (long)i);
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         ((ClearableHistogram)cfs.metric.sstablesPerReadHistogram.cf).clear();
@@ -499,12 +512,9 @@
     @Test
     public void shouldThrowOnMissingKeyspace()
     {
-        SchemaProvider schema = Mockito.mock(SchemaProvider.class);
         String ksName = "MissingKeyspace";
-        
-        Mockito.when(schema.getKeyspaceMetadata(ksName)).thenReturn(null);
 
-        Assertions.assertThatThrownBy(() -> Keyspace.open(ksName, schema, false))
+        Assertions.assertThatThrownBy(() -> Keyspace.open(ksName, Schema.instance, false))
                   .isInstanceOf(AssertionError.class)
                   .hasMessage("Unknown keyspace " + ksName);
     }
diff --git a/test/unit/org/apache/cassandra/db/MmapFileTest.java b/test/unit/org/apache/cassandra/db/MmapFileTest.java
index 71a218e..4619df3 100644
--- a/test/unit/org/apache/cassandra/db/MmapFileTest.java
+++ b/test/unit/org/apache/cassandra/db/MmapFileTest.java
@@ -17,8 +17,6 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
-import java.io.RandomAccessFile;
 import java.lang.management.ManagementFactory;
 import java.nio.MappedByteBuffer;
 import java.nio.channels.FileChannel;
@@ -26,13 +24,25 @@
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
+import org.apache.cassandra.Util;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.io.util.FileUtils;
 
 public class MmapFileTest
 {
+    @BeforeClass
+    public static void setup()
+    {
+        // PathUtils touches StorageService which touches StreamManager which requires configs be setup
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+
     /**
      * Verifies that {@link sun.misc.Cleaner} works and that mmap'd files can be deleted.
      */
@@ -56,20 +66,9 @@
         {
             int size = 1024 * 1024;
 
-            try (RandomAccessFile raf = new RandomAccessFile(f1, "rw"))
-            {
-                raf.setLength(size);
-            }
-
-            try (RandomAccessFile raf = new RandomAccessFile(f2, "rw"))
-            {
-                raf.setLength(size);
-            }
-
-            try (RandomAccessFile raf = new RandomAccessFile(f3, "rw"))
-            {
-                raf.setLength(size);
-            }
+            Util.setFileLength(f1, size);
+            Util.setFileLength(f2, size);
+            Util.setFileLength(f3,size);
 
             try (FileChannel channel = FileChannel.open(f1.toPath(), StandardOpenOption.WRITE, StandardOpenOption.READ))
             {
@@ -148,16 +147,16 @@
             Assert.assertEquals("# of mapped buffers should be 0", Long.valueOf(0L), mmapCount);
             Assert.assertEquals("amount of mapped memory should be 0", Long.valueOf(0L), mmapMemoryUsed);
 
-            Assert.assertTrue(f1.delete());
-            Assert.assertTrue(f2.delete());
-            Assert.assertTrue(f3.delete());
+            Assert.assertTrue(f1.tryDelete());
+            Assert.assertTrue(f2.tryDelete());
+            Assert.assertTrue(f3.tryDelete());
         }
         finally
         {
             Runtime.getRuntime().gc();
-            f1.delete();
-            f2.delete();
-            f3.delete();
+            f1.tryDelete();
+            f2.tryDelete();
+            f3.tryDelete();
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/MultiKeyspaceTest.java b/test/unit/org/apache/cassandra/db/MultiKeyspaceTest.java
index d690253..5ecd5b0 100644
--- a/test/unit/org/apache/cassandra/db/MultiKeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/db/MultiKeyspaceTest.java
@@ -20,7 +20,9 @@
  *
  */
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
+
 import org.junit.Test;
 
 
@@ -38,8 +40,8 @@
         execute("INSERT INTO multikstest1.standard1 (a, b) VALUES (0, 0)");
         execute("INSERT INTO multikstest2.standard1 (a, b) VALUES (0, 0)");
 
-        Keyspace.open("multikstest1").flush();
-        Keyspace.open("multikstest2").flush();
+        Util.flushKeyspace("multikstest1");
+        Util.flushKeyspace("multikstest2");
 
         assertRows(execute("SELECT * FROM multikstest1.standard1"),
                    row(0, 0));
diff --git a/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java b/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java
index 81d9735..09ab8a1 100644
--- a/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java
+++ b/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java
@@ -43,4 +43,4 @@
         assertEquals("aaa, bbb and 1 more.", makeTopKeysString(new ArrayList<>(keys), 8));
         assertEquals("aaa, bbb and 1 more.", makeTopKeysString(new ArrayList<>(keys), 10));
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/NameSortTest.java b/test/unit/org/apache/cassandra/db/NameSortTest.java
index 0d7b09c..2fdd735 100644
--- a/test/unit/org/apache/cassandra/db/NameSortTest.java
+++ b/test/unit/org/apache/cassandra/db/NameSortTest.java
@@ -84,7 +84,7 @@
             rub.build().applyUnsafe();
         }
         validateNameSort(cfs);
-        keyspace.getColumnFamilyStore("Standard1").forceBlockingFlush();
+        keyspace.getColumnFamilyStore("Standard1").forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         validateNameSort(cfs);
     }
 
diff --git a/test/unit/org/apache/cassandra/db/NativeCellTest.java b/test/unit/org/apache/cassandra/db/NativeCellTest.java
index f52490d..c70bb3b 100644
--- a/test/unit/org/apache/cassandra/db/NativeCellTest.java
+++ b/test/unit/org/apache/cassandra/db/NativeCellTest.java
@@ -20,7 +20,6 @@
 import java.nio.ByteBuffer;
 import java.util.Random;
 import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
 
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -35,6 +34,7 @@
 import org.apache.cassandra.db.marshal.SetType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.rows.*;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.HeapCloner;
 import org.apache.cassandra.utils.memory.NativeAllocator;
@@ -47,7 +47,7 @@
     private static final NativeAllocator nativeAllocator = new NativePool(Integer.MAX_VALUE,
                                                                           Integer.MAX_VALUE,
                                                                           1f,
-                                                                          () -> CompletableFuture.completedFuture(true)).newAllocator();
+                                                                          () -> ImmediateFuture.success(true)).newAllocator(null);
     @SuppressWarnings("resource")
     private static final OpOrder.Group group = new OpOrder().start();
     private static Random rand;
diff --git a/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java b/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java
index 986a125..2038748 100644
--- a/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java
+++ b/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java
@@ -100,14 +100,14 @@
                 .add("val", "val1")
                 .build()
                 .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, "k1")
                 .clustering(new BigInteger(new byte[]{0, 0, 1}))
                 .add("val", "val2")
                 .build()
                 .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // fetch by the first column name; we should get the second version of the column value
         Row row = Util.getOnlyRow(Util.cmd(cfs, "k1").includeRow(new BigInteger(new byte[]{1})).build());
@@ -157,7 +157,7 @@
             builder.build().applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         ColumnMetadata cDef = cfs.metadata().getColumn(ByteBufferUtil.bytes("val"));
 
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
index 8f61a05..83b79ac 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
@@ -31,10 +31,11 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.distributed.impl.IsolatedExecutor;
+import org.apache.cassandra.distributed.shared.ThrowingRunnable;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertSame;
@@ -334,7 +335,7 @@
         int MAX_IT_DISTANCE = 10;
         int MAX_MARKEDAT = 10;
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         Random rand = new Random(seed);
 
         for (int i = 0; i < TEST_COUNT; i++)
@@ -609,7 +610,7 @@
         }
     }
 
-    private void assertHasException(IsolatedExecutor.ThrowingRunnable block, Consumer<Throwable> verifier)
+    private void assertHasException(ThrowingRunnable block, Consumer<Throwable> verifier)
     {
         try
         {
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
index 755302d..0ce0c14 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
@@ -45,8 +45,8 @@
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.schema.IndexMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -85,7 +85,7 @@
         for (int i = 0; i < 40; i += 2)
             builder.newRow(i).add("val", i);
         builder.applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 22).build().applyUnsafe();
 
@@ -235,7 +235,7 @@
 
         int nowInSec = FBUtilities.nowInSeconds();
         new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1000, nowInSec)).apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         assertTimes(sstable.getSSTableMetadata(), 1000, 1000, nowInSec);
@@ -257,7 +257,7 @@
         key = "rt_times2";
         int nowInSec = FBUtilities.nowInSeconds();
         new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1000, nowInSec)).apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         assertTimes(sstable.getSSTableMetadata(), 999, 1000, Integer.MAX_VALUE);
@@ -276,7 +276,7 @@
 
         int nowInSec = FBUtilities.nowInSeconds();
         new RowUpdateBuilder(cfs.metadata(), nowInSec, 1000L, key).addRangeTombstone(1, 2).build().apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         assertTimes(sstable.getSSTableMetadata(), 1000, 1000, nowInSec);
@@ -298,9 +298,9 @@
         key = "rt_times2";
         int nowInSec = FBUtilities.nowInSeconds();
         new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1000, nowInSec)).apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         assertTimes(sstable.getSSTableMetadata(), 999, 1000, Integer.MAX_VALUE);
         cfs.forceMajorCompaction();
@@ -320,7 +320,7 @@
     {
         Keyspace ks = Keyspace.open(KSNAME);
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build());
 
         String key = "7810";
 
@@ -328,10 +328,10 @@
         for (int i = 10; i < 20; i ++)
             builder.newRow(i).add("val", i);
         builder.apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 11).build().apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Thread.sleep(5);
         cfs.forceMajorCompaction();
@@ -343,17 +343,17 @@
     {
         Keyspace ks = Keyspace.open(KSNAME);
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build());
 
         String key = "7808_1";
         UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
         for (int i = 0; i < 40; i += 2)
             builder.newRow(i).add("val", i);
         builder.apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1, 1)).apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Thread.sleep(5);
         cfs.forceMajorCompaction();
     }
@@ -363,20 +363,20 @@
     {
         Keyspace ks = Keyspace.open(KSNAME);
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build());
 
         String key = "7808_2";
         UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
         for (int i = 10; i < 20; i ++)
             builder.newRow(i).add("val", i);
         builder.apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 0, 0)).apply();
 
         UpdateBuilder.create(cfs.metadata(), key).withTimestamp(1).newRow(5).add("val", 5).apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Thread.sleep(5);
         cfs.forceMajorCompaction();
         assertEquals(1, Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build()).rowCount());
@@ -396,16 +396,16 @@
         for (int i = 0; i < 20; i++)
             builder.newRow(i).add("val", i);
         builder.applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(5, 15).build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(5, 10).build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 2, key).addRangeTombstone(5, 8).build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Partition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
         int nowInSec = FBUtilities.nowInSeconds();
@@ -447,11 +447,11 @@
         String key = "k3";
 
         UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0).newRow(2).add("val", 2).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(0, 10).build().applyUnsafe();
         UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2).newRow(1).add("val", 1).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // Get the last value of the row
         FilteredPartition partition = Util.getOnlyPartition(Util.cmd(cfs, key).build());
@@ -489,7 +489,7 @@
                 current.unbuild()
                        .indexes(current.indexes.with(indexDef))
                        .build();
-            MigrationManager.announceTableUpdate(updated, true);
+            SchemaTestUtil.announceTableUpdate(updated);
         }
 
         Future<?> rebuild = cfs.indexManager.addIndex(indexDef, false);
@@ -508,10 +508,10 @@
         for (int i = 0; i < 10; i++)
             builder.newRow(i).add("val", i);
         builder.applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 0, key).addRangeTombstone(0, 7).build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         assertEquals(10, index.rowsInserted.size());
 
@@ -538,10 +538,10 @@
         for (int i = 0; i < 10; i += 2)
             builder.newRow(i).add("val", i);
         builder.applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 0, key).addRangeTombstone(0, 7).build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // there should be 2 sstables
         assertEquals(2, cfs.getLiveSSTables().size());
@@ -595,7 +595,7 @@
                 current.unbuild()
                        .indexes(current.indexes.with(indexDef))
                        .build();
-            MigrationManager.announceTableUpdate(updated, true);
+            SchemaTestUtil.announceTableUpdate(updated);
         }
 
         Future<?> rebuild = cfs.indexManager.addIndex(indexDef, false);
@@ -614,7 +614,7 @@
         // now re-insert that column
         UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2).newRow(1).add("val", 1).applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // We should have 1 insert and 1 update to the indexed "1" column
         // CASSANDRA-6640 changed index update to just update, not insert then delete
diff --git a/test/unit/org/apache/cassandra/db/ReadCommandTest.java b/test/unit/org/apache/cassandra/db/ReadCommandTest.java
index 8eec769..43a7952 100644
--- a/test/unit/org/apache/cassandra/db/ReadCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/ReadCommandTest.java
@@ -72,6 +72,7 @@
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableParams;
 import org.apache.cassandra.service.ActiveRepairService;
@@ -79,11 +80,13 @@
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -217,7 +220,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes("key2"))
                 .clustering("Column1")
@@ -233,7 +236,7 @@
     }
 
     @Test
-    public void testSinglePartitionSliceAbort() throws Exception
+    public void testSinglePartitionSliceAbort()
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF2);
 
@@ -245,7 +248,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes("key"))
                 .clustering("dd")
@@ -264,7 +267,7 @@
     }
 
     @Test
-    public void testSinglePartitionNamesAbort() throws Exception
+    public void testSinglePartitionNamesAbort()
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF2);
 
@@ -276,7 +279,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes("key"))
                 .clustering("dd")
@@ -355,9 +358,9 @@
                 commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
             }
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
 
-            ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
+            ReadQuery query = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
 
             try (ReadExecutionController executionController = query.executionController();
                  UnfilteredPartitionIterator iter = query.executeLocally(executionController);
@@ -525,9 +528,9 @@
                         DataLimits.NONE, Util.dk(data[1]), sliceFilter));
             }
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
 
-            ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
+            ReadQuery query = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
 
             try (ReadExecutionController executionController = query.executionController();
                     UnfilteredPartitionIterator iter = query.executeLocally(executionController);
@@ -601,9 +604,9 @@
                         DataLimits.NONE, Util.dk(data[1]), sliceFilter));
             }
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
 
-            ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
+            ReadQuery query = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
 
             try (ReadExecutionController executionController = query.executionController();
                     UnfilteredPartitionIterator iter = query.executeLocally(executionController);
@@ -660,7 +663,7 @@
             .build()
             .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, ByteBufferUtil.bytes("key"))
             .clustering("dd")
@@ -668,10 +671,10 @@
             .build()
             .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
         assertEquals(2, sstables.size());
-        Collections.sort(sstables, SSTableReader.maxTimestampDescending);
+        sstables.sort(SSTableReader.maxTimestampDescending);
 
         ReadCommand readCommand = Util.cmd(cfs, Util.dk("key")).includeRow("dd").columns("a").build();
 
@@ -689,7 +692,7 @@
     }
 
     @Test
-    public void dontIncludeLegacyCounterContextInDigest() throws IOException
+    public void dontIncludeLegacyCounterContextInDigest()
     {
         // Serializations of a CounterContext containing legacy (pre-2.1) shards
         // can legitimately differ across replicas. For this reason, the context
@@ -707,13 +710,13 @@
                 .addLegacyCounterCell("c", 0L)
                 .build()
                 .apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
 
         // execute a read and capture the digest
         ReadCommand readCommand = Util.cmd(cfs, Util.dk("key")).build();
         ByteBuffer digestWithLegacyCounter0 = performReadAndVerifyRepairedInfo(readCommand, 1, 1, true);
-        assertFalse(EMPTY_BYTE_BUFFER.equals(digestWithLegacyCounter0));
+        assertNotEquals(EMPTY_BYTE_BUFFER, digestWithLegacyCounter0);
 
         // truncate, then re-insert the same partition, but this time with a legacy
         // shard having the value 1. The repaired digest should match the previous, as
@@ -724,7 +727,7 @@
                 .addLegacyCounterCell("c", 1L)
                 .build()
                 .apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
 
         ByteBuffer digestWithLegacyCounter1 = performReadAndVerifyRepairedInfo(readCommand, 1, 1, true);
@@ -739,13 +742,13 @@
                 .add("c", 1L)
                 .build()
                 .apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
 
         ByteBuffer digestWithCounterCell = performReadAndVerifyRepairedInfo(readCommand, 1, 1, true);
-        assertFalse(EMPTY_BYTE_BUFFER.equals(digestWithCounterCell));
-        assertFalse(digestWithLegacyCounter0.equals(digestWithCounterCell));
-        assertFalse(digestWithLegacyCounter1.equals(digestWithCounterCell));
+        assertNotEquals(EMPTY_BYTE_BUFFER, digestWithCounterCell);
+        assertNotEquals(digestWithLegacyCounter0, digestWithCounterCell);
+        assertNotEquals(digestWithLegacyCounter1, digestWithCounterCell);
     }
 
     /**
@@ -756,7 +759,7 @@
      * Also, neither digest should be empty as the partition is not made empty by the purging.
      */
     @Test
-    public void purgeGCableTombstonesBeforeCalculatingDigest() throws Exception
+    public void purgeGCableTombstonesBeforeCalculatingDigest()
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF8);
         cfs.truncateBlocking();
@@ -765,7 +768,6 @@
 
         DecoratedKey[] keys = new DecoratedKey[] { Util.dk("key0"), Util.dk("key1"), Util.dk("key2"), Util.dk("key3") };
         int nowInSec = FBUtilities.nowInSeconds();
-        TableMetadata cfm = cfs.metadata();
 
         // A simple tombstone
         new RowUpdateBuilder(cfs.metadata(), 0, keys[0]).clustering("cc").delete("a").build().apply();
@@ -783,7 +785,7 @@
         // Partition with 2 rows, one fully deleted
         new RowUpdateBuilder(cfs.metadata.get(), 0, keys[3]).clustering("bb").add("a", ByteBufferUtil.bytes("a")).delete("b").build().apply();
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, keys[3], "cc").apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
 
         Map<DecoratedKey, ByteBuffer> digestsWithTombstones = new HashMap<>();
@@ -851,7 +853,7 @@
                                                         .build());
         // Insert and repair
         insert(cfs, IntStream.range(0, 10), () -> IntStream.range(0, 10));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
         // Insert and leave unrepaired
         insert(cfs, IntStream.range(0, 10), () -> IntStream.range(10, 20));
@@ -901,10 +903,7 @@
     {
         TableParams newParams = cfs.metadata().params.unbuild().gcGraceSeconds(gcGrace).build();
         KeyspaceMetadata keyspaceMetadata = Schema.instance.getKeyspaceMetadata(cfs.metadata().keyspace);
-        Schema.instance.load(
-        keyspaceMetadata.withSwapped(
-        keyspaceMetadata.tables.withSwapped(
-        cfs.metadata().withSwapped(newParams))));
+        SchemaTestUtil.addOrUpdateKeyspace(keyspaceMetadata.withSwapped(keyspaceMetadata.tables.withSwapped(cfs.metadata().withSwapped(newParams))), true);
     }
 
     private long getAndResetOverreadCount(ColumnFamilyStore cfs)
@@ -975,7 +974,7 @@
      * the row deletion is eligible for purging, both the result set and the repaired data digest should
      * be empty.
      */
-    private void fullyPurgedPartitionCreatesEmptyDigest(ColumnFamilyStore cfs, ReadCommand command) throws Exception
+    private void fullyPurgedPartitionCreatesEmptyDigest(ColumnFamilyStore cfs, ReadCommand command)
     {
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
@@ -984,7 +983,7 @@
         // Partition with a fully deleted static row and a single, fully deleted regular row
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, ByteBufferUtil.bytes("key")).apply();
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, ByteBufferUtil.bytes("key"), "cc").apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
 
         try (ReadExecutionController controller = command.executionController(true))
@@ -1034,12 +1033,12 @@
 
         // Live partition in a repaired sstable, so included in the digest calculation
         new RowUpdateBuilder(cfs.metadata.get(), 0, ByteBufferUtil.bytes("key-0")).clustering("cc").add("a", ByteBufferUtil.bytes("a")).build().apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
         // Fully deleted partition (static and regular rows) in an unrepaired sstable, so not included in the intial digest
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, ByteBufferUtil.bytes("key-1")).apply();
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, ByteBufferUtil.bytes("key-1"), "cc").apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         ByteBuffer digestWithoutPurgedPartition = null;
         
@@ -1067,7 +1066,7 @@
     }
 
     @Test
-    public void purgingConsidersRepairedDataOnly() throws Exception
+    public void purgingConsidersRepairedDataOnly()
     {
         // 2 sstables, first is repaired and contains data that is all purgeable
         // the second is unrepaired and contains non-purgable data. Even though
@@ -1082,11 +1081,11 @@
         DecoratedKey key = Util.dk("key");
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, key).apply();
         RowUpdateBuilder.deleteRow(cfs.metadata(), 0, key, "cc").apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.getLiveSSTables().forEach(sstable -> mutateRepaired(cfs, sstable, 111, null));
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).clustering("cc").add("a", ByteBufferUtil.bytes("a")).build().apply();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         int nowInSec = FBUtilities.nowInSeconds() + 10;
         ReadCommand cmd = Util.cmd(cfs, key).withNowInSeconds(nowInSec).build();
@@ -1127,7 +1126,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         ReadCommand readCommand = Util.cmd(cfs, Util.dk("key")).build();
         assertTrue(cfs.isRowCacheEnabled());
@@ -1191,7 +1190,20 @@
                                                            ReplicaUtils.full(addr, token)));
     }
 
-    private void testRepairedDataTracking(ColumnFamilyStore cfs, ReadCommand readCommand) throws IOException
+    @Test
+    public void testToCQLString()
+    {
+        ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF2);
+        DecoratedKey key = Util.dk("key");
+
+        ReadCommand readCommand = Util.cmd(cfs, key).build();
+
+        String result = readCommand.toCQLString();
+
+        assertEquals(result, String.format("SELECT * FROM \"ReadCommandTest\".\"Standard2\" WHERE key = 0x%s ALLOW FILTERING", ByteBufferUtil.bytesToHex(key.getKey())));
+    }
+
+    private void testRepairedDataTracking(ColumnFamilyStore cfs, ReadCommand readCommand)
     {
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
@@ -1202,7 +1214,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 1, ByteBufferUtil.bytes("key"))
                 .clustering("dd")
@@ -1210,7 +1222,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
         assertEquals(2, sstables.size());
         sstables.forEach(sstable -> assertFalse(sstable.isRepaired() || sstable.isPendingRepair()));
@@ -1229,13 +1241,13 @@
         digests.add(digest);
 
         // add a pending repair session to table1, digest should remain the same but now we expect it to be marked inconclusive
-        UUID session1 = UUIDGen.getTimeUUID();
+        TimeUUID session1 = nextTimeUUID();
         mutateRepaired(cfs, sstable1, ActiveRepairService.UNREPAIRED_SSTABLE, session1);
         digests.add(performReadAndVerifyRepairedInfo(readCommand, numPartitions, rowsPerPartition, false));
         assertEquals(1, digests.size());
 
         // add a different pending session to table2, digest should remain the same and still consider it inconclusive
-        UUID session2 = UUIDGen.getTimeUUID();
+        TimeUUID session2 = nextTimeUUID();
         mutateRepaired(cfs, sstable2, ActiveRepairService.UNREPAIRED_SSTABLE, session2);
         digests.add(performReadAndVerifyRepairedInfo(readCommand, numPartitions, rowsPerPartition, false));
         assertEquals(1, digests.size());
@@ -1267,13 +1279,13 @@
             assertEquals(EMPTY_BYTE_BUFFER, digest);
 
             // now flush so we have an unrepaired table with the deletion and repeat the check
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
             digest = performReadAndVerifyRepairedInfo(readCommand, 0, rowsPerPartition, false);
             assertEquals(EMPTY_BYTE_BUFFER, digest);
         }
     }
 
-    private void mutateRepaired(ColumnFamilyStore cfs, SSTableReader sstable, long repairedAt, UUID pendingSession)
+    private void mutateRepaired(ColumnFamilyStore cfs, SSTableReader sstable, long repairedAt, TimeUUID pendingSession)
     {
         try
         {
diff --git a/test/unit/org/apache/cassandra/db/ReadCommandVerbHandlerTest.java b/test/unit/org/apache/cassandra/db/ReadCommandVerbHandlerTest.java
index 44c065a..b823f23 100644
--- a/test/unit/org/apache/cassandra/db/ReadCommandVerbHandlerTest.java
+++ b/test/unit/org/apache/cassandra/db/ReadCommandVerbHandlerTest.java
@@ -20,7 +20,6 @@
 
 import java.net.UnknownHostException;
 import java.util.Random;
-import java.util.UUID;
 
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -46,6 +45,7 @@
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.apache.cassandra.net.Verb.*;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
@@ -110,7 +110,7 @@
         handler.doVerb(Message.builder(READ_REQ, (ReadCommand) command)
                               .from(peer())
                               .withId(messageId())
-                              .withParam(ParamType.TRACE_SESSION, UUID.randomUUID())
+                              .withParam(ParamType.TRACE_SESSION, nextTimeUUID())
                               .build());
         assertFalse(command.isTrackingRepairedData());
     }
@@ -170,7 +170,8 @@
                   DataLimits.NONE,
                   KEY,
                   new ClusteringIndexSliceFilter(Slices.ALL, false),
-                  null);
+                  null,
+                  false);
         }
 
         @Override
diff --git a/test/unit/org/apache/cassandra/db/ReadResponseTest.java b/test/unit/org/apache/cassandra/db/ReadResponseTest.java
index 29fa784..f8a19fd 100644
--- a/test/unit/org/apache/cassandra/db/ReadResponseTest.java
+++ b/test/unit/org/apache/cassandra/db/ReadResponseTest.java
@@ -262,7 +262,8 @@
                   DataLimits.NONE,
                   metadata.partitioner.decorateKey(ByteBufferUtil.bytes(key)),
                   null,
-                  null);
+                  null,
+                  false);
            
         }
 
diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerFlushedTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerFlushedTest.java
index d395439..acca6ed 100644
--- a/test/unit/org/apache/cassandra/db/RecoveryManagerFlushedTest.java
+++ b/test/unit/org/apache/cassandra/db/RecoveryManagerFlushedTest.java
@@ -35,6 +35,7 @@
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.io.compress.ZstdCompressor;
@@ -101,8 +102,8 @@
     public void testWithFlush() throws Exception
     {
         // Flush everything that may be in the commit log now to start fresh
-        FBUtilities.waitOnFutures(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).flush());
-        FBUtilities.waitOnFutures(Keyspace.open(SchemaConstants.SCHEMA_KEYSPACE_NAME).flush());
+        Util.flushKeyspace(SchemaConstants.SYSTEM_KEYSPACE_NAME);
+        Util.flushKeyspace(SchemaConstants.SCHEMA_KEYSPACE_NAME);
 
 
         CompactionManager.instance.disableAutoCompaction();
@@ -119,7 +120,7 @@
         Keyspace keyspace1 = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = keyspace1.getColumnFamilyStore("Standard1");
         logger.debug("forcing flush");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         logger.debug("begin manual replay");
         // replay the commit log (nothing on Standard1 should be replayed since everything was flushed, so only the row on Standard2
diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java
index 4044fff..2105867 100644
--- a/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java
+++ b/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java
@@ -18,12 +18,12 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -112,9 +112,9 @@
         keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
 
         // nuke the header
-        for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
+        for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).tryList())
         {
-            if (file.getName().endsWith(".header"))
+            if (file.name().endsWith(".header"))
                 FileUtils.deleteWithConfirm(file);
         }
 
diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
index 013f30b..c60bb97 100644
--- a/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
@@ -23,11 +23,7 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.junit.Assert;
@@ -61,6 +57,7 @@
 import org.apache.cassandra.security.EncryptionContext;
 import org.apache.cassandra.security.EncryptionContextGenerator;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.concurrent.AsyncFuture;
 
 import static org.junit.Assert.assertEquals;
 
@@ -160,7 +157,7 @@
             Assert.assertTrue(Util.getAllUnfiltered(Util.cmd(keyspace2.getColumnFamilyStore(CF_STANDARD3), dk).build()).isEmpty());
 
             final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
-            Thread t = NamedThreadFactory.createThread(() ->
+            Thread t = NamedThreadFactory.createAnonymousThread(() ->
             {
                 try
                 {
@@ -345,33 +342,23 @@
         final Semaphore blocked = new Semaphore(0);
 
         @Override
-        protected Future<Integer> initiateMutation(final Mutation mutation,
-                final long segmentId,
-                final int serializedSize,
-                final int entryLocation,
-                final CommitLogReplayer clr)
+        protected org.apache.cassandra.utils.concurrent.Future<Integer> initiateMutation(final Mutation mutation,
+                                                                                         final long segmentId,
+                                                                                         final int serializedSize,
+                                                                                         final int entryLocation,
+                                                                                         final CommitLogReplayer clr)
         {
-            final Future<Integer> toWrap = super.initiateMutation(mutation,
-                                                                  segmentId,
-                                                                  serializedSize,
-                                                                  entryLocation,
-                                                                  clr);
-            return new Future<Integer>()
+            final org.apache.cassandra.utils.concurrent.Future<Integer> toWrap =
+                super.initiateMutation(mutation,
+                                       segmentId,
+                                       serializedSize,
+                                       entryLocation,
+                                       clr);
+
+            return new AsyncFuture<Integer>()
             {
 
                 @Override
-                public boolean cancel(boolean mayInterruptIfRunning)
-                {
-                    throw new UnsupportedOperationException();
-                }
-
-                @Override
-                public boolean isCancelled()
-                {
-                    throw new UnsupportedOperationException();
-                }
-
-                @Override
                 public boolean isDone()
                 {
                     return blocker.availablePermits() > 0 && toWrap.isDone();
@@ -380,7 +367,6 @@
                 @Override
                 public Integer get() throws InterruptedException, ExecutionException
                 {
-                    System.out.println("Got blocker once");
                     blocked.release();
                     blocker.acquire();
                     return toWrap.get();
diff --git a/test/unit/org/apache/cassandra/db/RemoveCellTest.java b/test/unit/org/apache/cassandra/db/RemoveCellTest.java
index 01fe255..1c28d1c 100644
--- a/test/unit/org/apache/cassandra/db/RemoveCellTest.java
+++ b/test/unit/org/apache/cassandra/db/RemoveCellTest.java
@@ -20,6 +20,7 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 
 public class RemoveCellTest extends CQLTester
@@ -30,7 +31,7 @@
         String tableName = createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
         execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", 0, 0, 0, 0L);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         execute("DELETE c FROM %s USING TIMESTAMP ? WHERE a = ? AND b = ?", 1L, 0, 0);
         assertRows(execute("SELECT * FROM %s WHERE a = ? AND b = ?", 0, 0), row(0, 0, null));
         assertRows(execute("SELECT c FROM %s WHERE a = ? AND b = ?", 0, 0), row(new Object[]{null}));
diff --git a/test/unit/org/apache/cassandra/db/RowCacheTest.java b/test/unit/org/apache/cassandra/db/RowCacheTest.java
index 59ce213..0bf8c5d 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@ -33,6 +33,7 @@
 import org.apache.cassandra.cache.RowCacheKey;
 import org.apache.cassandra.db.marshal.ValueAccessors;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.db.rows.*;
@@ -48,6 +49,7 @@
 import org.apache.cassandra.metrics.ClearableHistogram;
 import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -94,7 +96,7 @@
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
-        // set global row cache size to 1 MB
+        // set global row cache size to 1 MiB
         CacheService.instance.setRowCacheCapacityInMB(1);
 
         ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
@@ -139,7 +141,7 @@
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
-        // set global row cache size to 1 MB
+        // set global row cache size to 1 MiB
         CacheService.instance.setRowCacheCapacityInMB(1);
 
         // inserting 100 rows into both column families
@@ -221,7 +223,7 @@
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
-        // set global row cache size to 1 MB
+        // set global row cache size to 1 MiB
         CacheService.instance.setRowCacheCapacityInMB(1);
 
         // inserting 100 rows into column family
@@ -367,7 +369,9 @@
         CacheService.instance.setRowCacheCapacityInMB(1);
         rowCacheLoad(100, 50, 0);
         CacheService.instance.rowCache.submitWrite(Integer.MAX_VALUE).get();
-        Keyspace instance = Schema.instance.removeKeyspaceInstance(KEYSPACE_CACHED);
+
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(KEYSPACE_CACHED);
+        SchemaTestUtil.dropKeyspaceIfExist(KEYSPACE_CACHED, true);
         try
         {
             CacheService.instance.rowCache.size();
@@ -378,7 +382,7 @@
         }
         finally
         {
-            Schema.instance.storeKeyspaceInstance(instance);
+            SchemaTestUtil.addOrUpdateKeyspace(ksm, true);
         }
     }
 
@@ -409,7 +413,7 @@
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
-        // set global row cache size to 1 MB
+        // set global row cache size to 1 MiB
         CacheService.instance.setRowCacheCapacityInMB(1);
 
         ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
@@ -484,14 +488,14 @@
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
-        // set global row cache size to 1 MB
+        // set global row cache size to 1 MiB
         CacheService.instance.setRowCacheCapacityInMB(1);
 
         // inserting 100 rows into both column families
         SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, 0, 100);
 
         //force flush for confidence that SSTables exists
-        cachedStore.forceBlockingFlush();
+        Util.flush(cachedStore);
 
         ((ClearableHistogram)cachedStore.metric.sstablesPerReadHistogram.cf).clear();
 
diff --git a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
index 7b774eb..a2fb57d 100644
--- a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
+++ b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -65,8 +64,8 @@
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.btree.BTree;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 public class RowIndexEntryTest extends CQLTester
 {
diff --git a/test/unit/org/apache/cassandra/db/RowIterationTest.java b/test/unit/org/apache/cassandra/db/RowIterationTest.java
index b0cd4fc..894744a 100644
--- a/test/unit/org/apache/cassandra/db/RowIterationTest.java
+++ b/test/unit/org/apache/cassandra/db/RowIterationTest.java
@@ -36,7 +36,7 @@
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
         for (int i = 0; i < 10; i++)
             execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) USING TIMESTAMP ?", i, 0, i, i, (long)i);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(10, execute("SELECT * FROM %s").size());
     }
 
@@ -49,7 +49,7 @@
         execute("INSERT INTO %s (a, b) VALUES (?, ?) USING TIMESTAMP ?", 0, 0, 0L);
         execute("DELETE FROM %s USING TIMESTAMP ? WHERE a = ?", 0L, 0);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // Delete row in second sstable with higher timestamp
         execute("INSERT INTO %s (a, b) VALUES (?, ?) USING TIMESTAMP ?", 0, 0, 1L);
@@ -57,7 +57,7 @@
 
         int localDeletionTime = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs).build()).partitionLevelDeletion().localDeletionTime();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         DeletionTime dt = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs).build()).partitionLevelDeletion();
         assertEquals(1L, dt.markedForDeleteAt());
@@ -72,7 +72,7 @@
 
         // Delete a row in first sstable
         execute("DELETE FROM %s USING TIMESTAMP ? WHERE a = ?", 0L, 0);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         assertFalse(Util.getOnlyPartitionUnfiltered(Util.cmd(cfs).build()).isEmpty());
     }
diff --git a/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java b/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java
index 8cb1e15..63ef861 100644
--- a/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java
+++ b/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java
@@ -22,6 +22,7 @@
 import com.google.common.collect.ImmutableMap;
 import com.google.common.io.Files;
 
+import org.apache.cassandra.io.util.FileReader;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -40,7 +41,6 @@
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
 
-import java.io.FileReader;
 import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 import java.util.Arrays;
@@ -167,7 +167,7 @@
                           "    reg2 varint,\n" +
                           "    st1 varint static,\n" +
                           "    PRIMARY KEY (pk1, ck1)\n) WITH ID =";
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
 
         assertThat(actual,
                    allOf(startsWith(expected),
@@ -208,7 +208,7 @@
         ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
 
         // when re-adding, column is present as both column and as dropped column record.
-        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true);
+        String actual = SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata());
         String expected = "CREATE TABLE IF NOT EXISTS cql_test_keyspace_readded_columns.test_table_readded_columns (\n" +
                           "    pk1 varint,\n" +
                           "    ck1 varint,\n" +
@@ -247,7 +247,7 @@
 
         ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
 
-        assertThat(SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true),
+        assertThat(SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata()),
                    startsWith(
                    "CREATE TABLE IF NOT EXISTS cql_test_keyspace_create_table.test_table_create_table (\n" +
                    "    pk1 varint,\n" +
@@ -294,7 +294,7 @@
 
         ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
 
-        assertThat(SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), true, true, true),
+        assertThat(SchemaCQLHelper.getTableMetadataAsCQL(cfs.metadata(), cfs.keyspace.getMetadata()),
                    containsString("CLUSTERING ORDER BY (cl1 ASC)\n" +
                             "    AND additional_write_policy = 'ALWAYS'\n" +
                             "    AND bloom_filter_fp_chance = 1.0\n" +
@@ -303,6 +303,7 @@
                             "    AND comment = 'comment'\n" +
                             "    AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4', 'sstable_size_in_mb': '1'}\n" +
                             "    AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor', 'min_compress_ratio': '2.0'}\n" +
+                            "    AND memtable = 'default'\n" +
                             "    AND crc_check_chance = 0.3\n" +
                             "    AND default_time_to_live = 4\n" +
                             "    AND extensions = {'ext1': 0x76616c31}\n" +
@@ -402,7 +403,7 @@
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
         cfs.snapshot(SNAPSHOT);
 
-        String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT), Charset.defaultCharset());
+        String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT).toJavaIOFile(), Charset.defaultCharset());
         assertThat(schema,
                    allOf(containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" +
                                                       "    a1 varint,\n" +
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index ad09da9..dfe9165 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -18,11 +18,10 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.io.IOError;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -33,13 +32,12 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
 
-import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -85,6 +83,7 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.SchemaLoader.counterCFMD;
 import static org.apache.cassandra.SchemaLoader.createKeyspace;
@@ -354,7 +353,7 @@
         assertOrderedAll(cfs, 10);
 
         for (SSTableReader sstable : cfs.getLiveSSTables())
-            assertTrue(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)).delete());
+            assertTrue(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)).tryDelete());
 
         CompactionManager.instance.performScrub(cfs, false, true, 2);
 
@@ -370,10 +369,10 @@
         DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
 
         // Create out-of-order SSTable
-        File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
+        File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").parent();
         // create ks/cf directory
-        File tempDataDir = new File(tempDir, String.join(File.separator, ksName, CF));
-        assertTrue(tempDataDir.mkdirs());
+        File tempDataDir = new File(tempDir, String.join(File.pathSeparator(), ksName, CF));
+        assertTrue(tempDataDir.tryCreateDirectories());
         try
         {
             CompactionManager.instance.disableAutoCompaction();
@@ -468,10 +467,10 @@
 
     private static void overrideWithGarbage(SSTableReader sstable, long startPosition, long endPosition) throws IOException
     {
-        try (RandomAccessFile file = new RandomAccessFile(sstable.getFilename(), "rw"))
+        try (FileChannel fileChannel = new File(sstable.getFilename()).newReadWriteChannel())
         {
-            file.seek(startPosition);
-            file.writeBytes(StringUtils.repeat('z', (int) (endPosition - startPosition)));
+            fileChannel.position(startPosition);
+            fileChannel.write(ByteBufferUtil.bytes(StringUtils.repeat('z', (int) (endPosition - startPosition))));
         }
         if (ChunkCache.instance != null)
             ChunkCache.instance.invalidateFile(sstable.getFilename());
@@ -508,7 +507,7 @@
             new Mutation(update).applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     public static void fillIndexCF(ColumnFamilyStore cfs, boolean composite, long ... values)
@@ -532,7 +531,7 @@
             new Mutation(builder.build()).applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     protected static void fillCounterCF(ColumnFamilyStore cfs, int partitionsPerSSTable) throws WriteTimeoutException
@@ -545,7 +544,7 @@
             new CounterMutation(new Mutation(update), ConsistencyLevel.ONE).apply();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     @Test
@@ -556,14 +555,14 @@
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("test_compact_static_columns");
 
         QueryProcessor.executeInternal(String.format("INSERT INTO \"%s\".test_compact_static_columns (a, b, c, d) VALUES (123, c3db07e8-b602-11e3-bc6b-e0b9a54a6d93, true, 'foobar')", ksName));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         CompactionManager.instance.performScrub(cfs, false, true, 2);
 
         QueryProcessor.process(String.format("CREATE TABLE \"%s\".test_scrub_validation (a text primary key, b int)", ksName), ConsistencyLevel.ONE);
         ColumnFamilyStore cfs2 = keyspace.getColumnFamilyStore("test_scrub_validation");
 
         new Mutation(UpdateBuilder.create(cfs2.metadata(), "key").newRow().add("b", Int32Type.instance.decompose(1)).build()).apply();
-        cfs2.forceBlockingFlush();
+        Util.flush(cfs2);
 
         CompactionManager.instance.performScrub(cfs2, false, false, 2);
     }
@@ -582,7 +581,7 @@
         QueryProcessor.executeInternal(String.format("INSERT INTO \"%s\".test_compact_dynamic_columns (a, b, c) VALUES (0, 'a', 'foo')", ksName));
         QueryProcessor.executeInternal(String.format("INSERT INTO \"%s\".test_compact_dynamic_columns (a, b, c) VALUES (0, 'b', 'bar')", ksName));
         QueryProcessor.executeInternal(String.format("INSERT INTO \"%s\".test_compact_dynamic_columns (a, b, c) VALUES (0, 'c', 'boo')", ksName));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         CompactionManager.instance.performScrub(cfs, true, true, 2);
 
         // Scrub is silent, but it will remove broken records. So reading everything back to make sure nothing to "scrubbed away"
@@ -705,7 +704,7 @@
      */
     private static class TestWriter extends BigTableWriter
     {
-        TestWriter(Descriptor descriptor, long keyCount, long repairedAt, UUID pendingRepair, boolean isTransient, TableMetadataRef metadata,
+        TestWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, TableMetadataRef metadata,
                    MetadataCollector collector, SerializationHeader header, LifecycleTransaction txn)
         {
             super(descriptor, keyCount, repairedAt, pendingRepair, isTransient, metadata, collector, header, Collections.emptySet(), txn);
diff --git a/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
index e7bfb12..14137f1 100644
--- a/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
@@ -40,8 +40,8 @@
 import org.apache.cassandra.index.Index;
 import org.apache.cassandra.schema.IndexMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -300,7 +300,7 @@
         new RowUpdateBuilder(cfs.metadata(), 1, "k1").noRowMarker().add("birthdate", 1L).build().applyUnsafe();
 
         // force a flush, so our index isn't being read from a memtable
-        keyspace.getColumnFamilyStore(WITH_KEYS_INDEX).forceBlockingFlush();
+        Util.flushTable(keyspace, WITH_KEYS_INDEX);
 
         // now apply another update, but force the index update to be skipped
         keyspace.apply(new RowUpdateBuilder(cfs.metadata(), 2, "k1").noRowMarker().add("birthdate", 2L).build(),
@@ -356,7 +356,7 @@
         assertIndexedOne(cfs, col, 10l);
 
         // force a flush and retry the query, so our index isn't being read from a memtable
-        keyspace.getColumnFamilyStore(cfName).forceBlockingFlush();
+        Util.flushTable(keyspace, cfName);
         assertIndexedOne(cfs, col, 10l);
 
         // now apply another update, but force the index update to be skipped
@@ -482,7 +482,7 @@
             current.unbuild()
                    .indexes(current.indexes.with(indexDef))
                    .build();
-        MigrationManager.announceTableUpdate(updated, true);
+        SchemaTestUtil.announceTableUpdate(updated);
 
         // wait for the index to be built
         Index index = cfs.indexManager.getIndex(indexDef);
@@ -522,7 +522,7 @@
             new RowUpdateBuilder(cfs.metadata(), 0, "k" + i).noRowMarker().add("birthdate", 1l).build().applyUnsafe();
 
         assertIndexedCount(cfs, ByteBufferUtil.bytes("birthdate"), 1l, 10);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertIndexedCount(cfs, ByteBufferUtil.bytes("birthdate"), 1l, 10);
     }
 
@@ -537,7 +537,7 @@
         new RowUpdateBuilder(cfs.metadata(), 0, "k3").clustering("c").add("birthdate", 1L).add("notbirthdate", 3L).build().applyUnsafe();
         new RowUpdateBuilder(cfs.metadata(), 0, "k4").clustering("c").add("birthdate", 1L).add("notbirthdate", 3L).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         ReadCommand rc = Util.cmd(cfs)
                              .fromKeyIncl("k1")
                              .toKeyIncl("k3")
diff --git a/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java b/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java
index c9649b1..8f7e24b 100644
--- a/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java
+++ b/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java
@@ -19,6 +19,8 @@
 package org.apache.cassandra.db;
 
 import com.google.common.io.Files;
+
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.compaction.OperationType;
@@ -30,9 +32,9 @@
 import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.db.rows.UnfilteredRowIterator;
-import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableWriter;
@@ -45,13 +47,14 @@
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.BiFunction;
 import java.util.function.Function;
+import java.util.function.Supplier;
+
+import org.apache.cassandra.io.util.File;
 
 public class SerializationHeaderTest
 {
@@ -84,12 +87,12 @@
         schemaWithStatic = schemaWithStatic.unbuild().recordColumnDrop(columnRegular, 0L).build();
         schemaWithRegular = schemaWithRegular.unbuild().recordColumnDrop(columnStatic, 0L).build();
 
-        final AtomicInteger generation = new AtomicInteger();
-        File dir = Files.createTempDir();
+        Supplier<SequenceBasedSSTableId> id = Util.newSeqGen();
+        File dir = new File(Files.createTempDir());
         try
         {
             BiFunction<TableMetadata, Function<ByteBuffer, Clustering<?>>, Callable<Descriptor>> writer = (schema, clusteringFunction) -> () -> {
-                Descriptor descriptor = new Descriptor(BigFormat.latestVersion, dir, schema.keyspace, schema.name, generation.incrementAndGet(), SSTableFormat.Type.BIG);
+                Descriptor descriptor = new Descriptor(BigFormat.latestVersion, dir, schema.keyspace, schema.name, id.get(), SSTableFormat.Type.BIG);
 
                 SerializationHeader header = SerializationHeader.makeWithoutStats(schema);
                 try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionReadCommandCQLTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionReadCommandCQLTest.java
index 1c891ec..47e866c 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionReadCommandCQLTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionReadCommandCQLTest.java
@@ -31,10 +31,10 @@
     {
         createTable("CREATE TABLE %s (bucket_id TEXT,name TEXT,data TEXT,PRIMARY KEY (bucket_id, name))");
         execute("insert into %s (bucket_id, name, data) values ('8772618c9009cf8f5a5e0c18', 'test', 'hello')");
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         execute("insert into %s (bucket_id, name, data) values ('8772618c9009cf8f5a5e0c19', 'test2', 'hello');");
         execute("delete from %s where bucket_id = '8772618c9009cf8f5a5e0c18'");
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         UntypedResultSet res = execute("select * from %s where bucket_id = '8772618c9009cf8f5a5e0c18' and name = 'test'");
         assertTrue(res.isEmpty());
     }
diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
index 692b0c5..ab790dc 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
@@ -180,7 +180,7 @@
                                                      ck1));
 
         if (flush)
-            Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE_SCLICES).forceBlockingFlush();
+            Util.flushTable(KEYSPACE, TABLE_SCLICES);
 
         AbstractClusteringIndexFilter clusteringFilter = createClusteringFilter(uniqueCk1, uniqueCk2, isSlice);
         ReadCommand cmd = SinglePartitionReadCommand.create(CFM_SLICES,
@@ -279,7 +279,7 @@
         }
 
         // check (de)serialized iterator for sstable static cell
-        Schema.instance.getColumnFamilyStoreInstance(metadata.id).forceBlockingFlush();
+        Schema.instance.getColumnFamilyStoreInstance(metadata.id).forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         try (ReadExecutionController executionController = cmd.executionController(); UnfilteredPartitionIterator pi = cmd.executeLocally(executionController))
         {
             response = ReadResponse.createDataResponse(pi, cmd, executionController.getRepairedDataInfo());
@@ -318,7 +318,7 @@
             QueryProcessor.executeOnceInternal("DELETE FROM ks.test_read_rt USING TIMESTAMP 10 WHERE k=1 AND c1=1");
 
             List<Unfiltered> memtableUnfiltereds = assertQueryReturnsSingleRT(query);
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
             List<Unfiltered> sstableUnfiltereds = assertQueryReturnsSingleRT(query);
 
             String errorMessage = String.format("Expected %s but got %s with postfix '%s'",
@@ -356,17 +356,17 @@
                                                                   timestamp,
                                                                   "DELETE FROM ks.partition_row_deletion USING TIMESTAMP 10 WHERE k=1");
             if (flush && multiSSTable)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
             QueryProcessor.executeOnceInternalWithNowAndTimestamp(nowInSec,
                                                                   timestamp,
                                                                   "DELETE FROM ks.partition_row_deletion USING TIMESTAMP 10 WHERE k=1 and c=1");
             if (flush)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
 
             QueryProcessor.executeOnceInternal("INSERT INTO ks.partition_row_deletion(k,c,v) VALUES(1,1,1) using timestamp 11");
             if (flush)
             {
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
                 try
                 {
                     cfs.forceMajorCompaction();
@@ -424,17 +424,17 @@
                                                                   timestamp,
                                                                   "DELETE FROM ks.partition_range_deletion USING TIMESTAMP 10 WHERE k=1");
             if (flush && multiSSTable)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
             QueryProcessor.executeOnceInternalWithNowAndTimestamp(nowInSec,
                                                                   timestamp,
                                                                   "DELETE FROM ks.partition_range_deletion USING TIMESTAMP 10 WHERE k=1 and c1=1");
             if (flush)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
 
             QueryProcessor.executeOnceInternal("INSERT INTO ks.partition_range_deletion(k,c1,c2,v) VALUES(1,1,1,1) using timestamp 11");
             if (flush)
             {
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
                 try
                 {
                     cfs.forceMajorCompaction();
@@ -544,7 +544,7 @@
         QueryProcessor.executeOnceInternal("INSERT INTO ks.legacy_mc_inaccurate_min_max (k, c1, c2, c3, v) VALUES (100, 2, 2, 2, 2)");
         QueryProcessor.executeOnceInternal("DELETE FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=1");
         assertQueryReturnsSingleRT("SELECT * FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=1 AND c2=1");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertQueryReturnsSingleRT("SELECT * FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=1 AND c2=1");
         assertQueryReturnsSingleRT("SELECT * FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=1 AND c2=1 AND c3=1"); // clustering names
 
@@ -560,7 +560,7 @@
         new Mutation(builder.build()).apply();
 
         assertQueryReturnsSingleRT("SELECT * FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=3 AND c2=2");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertQueryReturnsSingleRT("SELECT * FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=3 AND c2=2");
         assertQueryReturnsSingleRT("SELECT * FROM ks.legacy_mc_inaccurate_min_max WHERE k=100 AND c1=3 AND c2=2 AND c3=2"); // clustering names
 
diff --git a/test/unit/org/apache/cassandra/db/SnapshotTest.java b/test/unit/org/apache/cassandra/db/SnapshotTest.java
index a406048..aa726ec 100644
--- a/test/unit/org/apache/cassandra/db/SnapshotTest.java
+++ b/test/unit/org/apache/cassandra/db/SnapshotTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.db;
 
-import java.io.File;
 import java.nio.file.Files;
 import java.nio.file.StandardOpenOption;
 
@@ -27,6 +26,7 @@
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 
 public class SnapshotTest extends CQLTester
 {
@@ -35,7 +35,7 @@
     {
         createTable("create table %s (id int primary key, k int)");
         execute("insert into %s (id, k) values (1,1)");
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        getCurrentColumnFamilyStore().forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         for (SSTableReader sstable : getCurrentColumnFamilyStore().getLiveSSTables())
         {
             File toc = new File(sstable.descriptor.filenameFor(Component.TOC));
diff --git a/test/unit/org/apache/cassandra/db/SystemKeyspaceMigrator40Test.java b/test/unit/org/apache/cassandra/db/SystemKeyspaceMigrator40Test.java
deleted file mode 100644
index a14db00..0000000
--- a/test/unit/org/apache/cassandra/db/SystemKeyspaceMigrator40Test.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db;
-
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import org.junit.Test;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.db.marshal.UTF8Type;
-import org.apache.cassandra.db.marshal.UUIDType;
-import org.apache.cassandra.dht.Range;
-import org.apache.cassandra.dht.Token;
-import org.apache.cassandra.utils.UUIDGen;
-
-import static org.junit.Assert.assertEquals;
-
-public class SystemKeyspaceMigrator40Test extends CQLTester
-{
-    @Test
-    public void testMigratePeers() throws Throwable
-    {
-        String insert = String.format("INSERT INTO %s ("
-                                      + "peer, "
-                                      + "data_center, "
-                                      + "host_id, "
-                                      + "preferred_ip, "
-                                      + "rack, "
-                                      + "release_version, "
-                                      + "rpc_address, "
-                                      + "schema_version, "
-                                      + "tokens) "
-                                      + " values ( ?, ?, ? , ? , ?, ?, ?, ?, ?)",
-                                      SystemKeyspaceMigrator40.legacyPeersName);
-        UUID hostId = UUIDGen.getTimeUUID();
-        UUID schemaVersion = UUIDGen.getTimeUUID();
-        execute(insert,
-                InetAddress.getByName("127.0.0.1"),
-                "dcFoo",
-                hostId,
-                InetAddress.getByName("127.0.0.2"),
-                "rackFoo", "4.0",
-                InetAddress.getByName("127.0.0.3"),
-                schemaVersion,
-                ImmutableSet.of("foobar"));
-        SystemKeyspaceMigrator40.migrate();
-
-        int rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.peersName)))
-        {
-            rowCount++;
-            assertEquals(InetAddress.getByName("127.0.0.1"), row.getInetAddress("peer"));
-            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("peer_port"));
-            assertEquals("dcFoo", row.getString("data_center"));
-            assertEquals(hostId, row.getUUID("host_id"));
-            assertEquals(InetAddress.getByName("127.0.0.2"), row.getInetAddress("preferred_ip"));
-            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("preferred_port"));
-            assertEquals("rackFoo", row.getString("rack"));
-            assertEquals("4.0", row.getString("release_version"));
-            assertEquals(InetAddress.getByName("127.0.0.3"), row.getInetAddress("native_address"));
-            assertEquals(DatabaseDescriptor.getNativeTransportPort(), row.getInt("native_port"));
-            assertEquals(schemaVersion, row.getUUID("schema_version"));
-            assertEquals(ImmutableSet.of("foobar"), row.getSet("tokens", UTF8Type.instance));
-        }
-        assertEquals(1, rowCount);
-
-        //Test nulls/missing don't prevent the row from propagating
-        execute(String.format("TRUNCATE %s", SystemKeyspaceMigrator40.legacyPeersName));
-        execute(String.format("TRUNCATE %s", SystemKeyspaceMigrator40.peersName));
-
-        execute(String.format("INSERT INTO %s (peer) VALUES (?)", SystemKeyspaceMigrator40.legacyPeersName),
-                              InetAddress.getByName("127.0.0.1"));
-        SystemKeyspaceMigrator40.migrate();
-
-        rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.peersName)))
-        {
-            rowCount++;
-        }
-        assertEquals(1, rowCount);
-    }
-
-    @Test
-    public void testMigratePeerEvents() throws Throwable
-    {
-        String insert = String.format("INSERT INTO %s ("
-                                      + "peer, "
-                                      + "hints_dropped) "
-                                      + " values ( ?, ? )",
-                                      SystemKeyspaceMigrator40.legacyPeerEventsName);
-        UUID uuid = UUIDGen.getTimeUUID();
-        execute(insert,
-                InetAddress.getByName("127.0.0.1"),
-                ImmutableMap.of(uuid, 42));
-        SystemKeyspaceMigrator40.migrate();
-
-        int rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.peerEventsName)))
-        {
-            rowCount++;
-            assertEquals(InetAddress.getByName("127.0.0.1"), row.getInetAddress("peer"));
-            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("peer_port"));
-            assertEquals(ImmutableMap.of(uuid, 42), row.getMap("hints_dropped", UUIDType.instance, Int32Type.instance));
-        }
-        assertEquals(1, rowCount);
-
-        //Test nulls/missing don't prevent the row from propagating
-        execute(String.format("TRUNCATE %s", SystemKeyspaceMigrator40.legacyPeerEventsName));
-        execute(String.format("TRUNCATE %s", SystemKeyspaceMigrator40.peerEventsName));
-
-        execute(String.format("INSERT INTO %s (peer) VALUES (?)", SystemKeyspaceMigrator40.legacyPeerEventsName),
-                InetAddress.getByName("127.0.0.1"));
-        SystemKeyspaceMigrator40.migrate();
-
-        rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.peerEventsName)))
-        {
-            rowCount++;
-        }
-        assertEquals(1, rowCount);
-    }
-
-    @Test
-    public void testMigrateTransferredRanges() throws Throwable
-    {
-        String insert = String.format("INSERT INTO %s ("
-                                      + "operation, "
-                                      + "peer, "
-                                      + "keyspace_name, "
-                                      + "ranges) "
-                                      + " values ( ?, ?, ?, ? )",
-                                      SystemKeyspaceMigrator40.legacyTransferredRangesName);
-        execute(insert,
-                "foo",
-                InetAddress.getByName("127.0.0.1"),
-                "bar",
-                ImmutableSet.of(ByteBuffer.wrap(new byte[] { 42 })));
-        SystemKeyspaceMigrator40.migrate();
-
-        int rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.transferredRangesName)))
-        {
-            rowCount++;
-            assertEquals("foo", row.getString("operation"));
-            assertEquals(InetAddress.getByName("127.0.0.1"), row.getInetAddress("peer"));
-            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("peer_port"));
-            assertEquals("bar", row.getString("keyspace_name"));
-            assertEquals(ImmutableSet.of(ByteBuffer.wrap(new byte[] { 42 })), row.getSet("ranges", BytesType.instance));
-        }
-        assertEquals(1, rowCount);
-
-        //Test nulls/missing don't prevent the row from propagating
-        execute(String.format("TRUNCATE %s", SystemKeyspaceMigrator40.legacyTransferredRangesName));
-        execute(String.format("TRUNCATE %s", SystemKeyspaceMigrator40.transferredRangesName));
-
-        execute(String.format("INSERT INTO %s (operation, peer, keyspace_name) VALUES (?, ?, ?)", SystemKeyspaceMigrator40.legacyTransferredRangesName),
-                "foo",
-                InetAddress.getByName("127.0.0.1"),
-                "bar");
-        SystemKeyspaceMigrator40.migrate();
-
-        rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.transferredRangesName)))
-        {
-            rowCount++;
-        }
-        assertEquals(1, rowCount);
-    }
-
-    @Test
-    public void testMigrateAvailableRanges() throws Throwable
-    {
-        Range<Token> testRange = new Range<>(DatabaseDescriptor.getPartitioner().getRandomToken(), DatabaseDescriptor.getPartitioner().getRandomToken());
-        String insert = String.format("INSERT INTO %s ("
-                                      + "keyspace_name, "
-                                      + "ranges) "
-                                      + " values ( ?, ? )",
-                                      SystemKeyspaceMigrator40.legacyAvailableRangesName);
-        execute(insert,
-                "foo",
-                ImmutableSet.of(SystemKeyspace.rangeToBytes(testRange)));
-        SystemKeyspaceMigrator40.migrate();
-
-        int rowCount = 0;
-        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", SystemKeyspaceMigrator40.availableRangesName)))
-        {
-            rowCount++;
-            assertEquals("foo", row.getString("keyspace_name"));
-            assertEquals(ImmutableSet.of(testRange), SystemKeyspace.rawRangesToRangeSet(row.getSet("full_ranges", BytesType.instance), DatabaseDescriptor.getPartitioner()));
-        }
-        assertEquals(1, rowCount);
-    }
-}
diff --git a/test/unit/org/apache/cassandra/db/SystemKeyspaceMigrator41Test.java b/test/unit/org/apache/cassandra/db/SystemKeyspaceMigrator41Test.java
new file mode 100644
index 0000000..b543679
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/SystemKeyspaceMigrator41Test.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.TimeUUIDType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+import static org.junit.Assert.assertEquals;
+
+public class SystemKeyspaceMigrator41Test extends CQLTester
+{
+    @Test
+    public void testMigratePeers() throws Throwable
+    {
+        String legacyTab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_PEERS);
+        String tab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.PEERS_V2);
+        String insert = String.format("INSERT INTO %s ("
+                                      + "peer, "
+                                      + "data_center, "
+                                      + "host_id, "
+                                      + "preferred_ip, "
+                                      + "rack, "
+                                      + "release_version, "
+                                      + "rpc_address, "
+                                      + "schema_version, "
+                                      + "tokens) "
+                                      + " values ( ?, ?, ? , ? , ?, ?, ?, ?, ?)",
+                                      legacyTab);
+        UUID hostId = UUID.randomUUID();
+        UUID schemaVersion = UUID.randomUUID();
+        execute(insert,
+                InetAddress.getByName("127.0.0.1"),
+                "dcFoo",
+                hostId,
+                InetAddress.getByName("127.0.0.2"),
+                "rackFoo", "4.0",
+                InetAddress.getByName("127.0.0.3"),
+                schemaVersion,
+                ImmutableSet.of("foobar"));
+        SystemKeyspaceMigrator41.migratePeers();
+
+        int rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+            assertEquals(InetAddress.getByName("127.0.0.1"), row.getInetAddress("peer"));
+            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("peer_port"));
+            assertEquals("dcFoo", row.getString("data_center"));
+            assertEquals(hostId, row.getUUID("host_id"));
+            assertEquals(InetAddress.getByName("127.0.0.2"), row.getInetAddress("preferred_ip"));
+            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("preferred_port"));
+            assertEquals("rackFoo", row.getString("rack"));
+            assertEquals("4.0", row.getString("release_version"));
+            assertEquals(InetAddress.getByName("127.0.0.3"), row.getInetAddress("native_address"));
+            assertEquals(DatabaseDescriptor.getNativeTransportPort(), row.getInt("native_port"));
+            assertEquals(schemaVersion, row.getUUID("schema_version"));
+            assertEquals(ImmutableSet.of("foobar"), row.getSet("tokens", UTF8Type.instance));
+        }
+        assertEquals(1, rowCount);
+
+        //Test nulls/missing don't prevent the row from propagating
+        execute(String.format("TRUNCATE %s", legacyTab));
+        execute(String.format("TRUNCATE %s", tab));
+
+        execute(String.format("INSERT INTO %s (peer) VALUES (?)", legacyTab),
+                              InetAddress.getByName("127.0.0.1"));
+        SystemKeyspaceMigrator41.migratePeers();
+
+        rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+        }
+        assertEquals(1, rowCount);
+    }
+
+    @Test
+    public void testMigratePeerEvents() throws Throwable
+    {
+        String legacyTab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_PEER_EVENTS);
+        String tab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.PEER_EVENTS_V2);
+        String insert = String.format("INSERT INTO %s ("
+                                      + "peer, "
+                                      + "hints_dropped) "
+                                      + " values ( ?, ? )",
+                                      legacyTab);
+        TimeUUID uuid = nextTimeUUID();
+        execute(insert,
+                InetAddress.getByName("127.0.0.1"),
+                ImmutableMap.of(uuid, 42));
+        SystemKeyspaceMigrator41.migratePeerEvents();
+
+        int rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+            assertEquals(InetAddress.getByName("127.0.0.1"), row.getInetAddress("peer"));
+            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("peer_port"));
+            assertEquals(ImmutableMap.of(uuid, 42), row.getMap("hints_dropped", TimeUUIDType.instance, Int32Type.instance));
+        }
+        assertEquals(1, rowCount);
+
+        //Test nulls/missing don't prevent the row from propagating
+        execute(String.format("TRUNCATE %s", legacyTab));
+        execute(String.format("TRUNCATE %s", tab));
+
+        execute(String.format("INSERT INTO %s (peer) VALUES (?)", legacyTab),
+                InetAddress.getByName("127.0.0.1"));
+        SystemKeyspaceMigrator41.migratePeerEvents();
+
+        rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+        }
+        assertEquals(1, rowCount);
+    }
+
+    @Test
+    public void testMigrateTransferredRanges() throws Throwable
+    {
+        String legacyTab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_TRANSFERRED_RANGES);
+        String tab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.TRANSFERRED_RANGES_V2);
+        String insert = String.format("INSERT INTO %s ("
+                                      + "operation, "
+                                      + "peer, "
+                                      + "keyspace_name, "
+                                      + "ranges) "
+                                      + " values ( ?, ?, ?, ? )",
+                                      legacyTab);
+        execute(insert,
+                "foo",
+                InetAddress.getByName("127.0.0.1"),
+                "bar",
+                ImmutableSet.of(ByteBuffer.wrap(new byte[] { 42 })));
+        SystemKeyspaceMigrator41.migrateTransferredRanges();
+
+        int rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+            assertEquals("foo", row.getString("operation"));
+            assertEquals(InetAddress.getByName("127.0.0.1"), row.getInetAddress("peer"));
+            assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("peer_port"));
+            assertEquals("bar", row.getString("keyspace_name"));
+            assertEquals(ImmutableSet.of(ByteBuffer.wrap(new byte[] { 42 })), row.getSet("ranges", BytesType.instance));
+        }
+        assertEquals(1, rowCount);
+
+        //Test nulls/missing don't prevent the row from propagating
+        execute(String.format("TRUNCATE %s", legacyTab));
+        execute(String.format("TRUNCATE %s", tab));
+
+        execute(String.format("INSERT INTO %s (operation, peer, keyspace_name) VALUES (?, ?, ?)", legacyTab),
+                "foo",
+                InetAddress.getByName("127.0.0.1"),
+                "bar");
+        SystemKeyspaceMigrator41.migrateTransferredRanges();
+
+        rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+        }
+        assertEquals(1, rowCount);
+    }
+
+    @Test
+    public void testMigrateAvailableRanges() throws Throwable
+    {
+        String legacyTab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_AVAILABLE_RANGES);
+        String tab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.AVAILABLE_RANGES_V2);
+        Range<Token> testRange = new Range<>(DatabaseDescriptor.getPartitioner().getRandomToken(), DatabaseDescriptor.getPartitioner().getRandomToken());
+        String insert = String.format("INSERT INTO %s ("
+                                      + "keyspace_name, "
+                                      + "ranges) "
+                                      + " values ( ?, ? )",
+                                      legacyTab);
+        execute(insert,
+                "foo",
+                ImmutableSet.of(SystemKeyspace.rangeToBytes(testRange)));
+        SystemKeyspaceMigrator41.migrateAvailableRanges();
+
+        int rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+            assertEquals("foo", row.getString("keyspace_name"));
+            assertEquals(ImmutableSet.of(testRange), SystemKeyspace.rawRangesToRangeSet(row.getSet("full_ranges", BytesType.instance), DatabaseDescriptor.getPartitioner()));
+        }
+        assertEquals(1, rowCount);
+    }
+
+    @Test
+    public void testMigrateSSTableActivity() throws Throwable
+    {
+        FBUtilities.setPreviousReleaseVersionString(CassandraVersion.NULL_VERSION.toString());
+        String legacyTab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_SSTABLE_ACTIVITY);
+        String tab = String.format("%s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.SSTABLE_ACTIVITY_V2);
+
+        String insert = String.format("INSERT INTO %s (%s) VALUES (%s)",
+                                      legacyTab,
+                                      StringUtils.join(new String[] {"keyspace_name",
+                                                       "columnfamily_name",
+                                                       "generation",
+                                                       "rate_120m",
+                                                       "rate_15m"}, ", "),
+                                      StringUtils.repeat("?", ", ", 5));
+
+        execute(insert, "ks", "tab", 5, 123.234d, 345.456d);
+
+        ColumnFamilyStore cf = getColumnFamilyStore(SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.SSTABLE_ACTIVITY_V2);
+        cf.truncateBlocking();
+        cf.clearUnsafe();
+        SystemKeyspaceMigrator41.migrateSSTableActivity();
+
+        int rowCount = 0;
+        for (UntypedResultSet.Row row : execute(String.format("SELECT * FROM %s", tab)))
+        {
+            rowCount++;
+            assertEquals("ks", row.getString("keyspace_name"));
+            assertEquals("tab", row.getString("table_name"));
+            assertEquals(new SequenceBasedSSTableId(5).toString(), row.getString("id"));
+            assertEquals(123.234d, row.getDouble("rate_120m"), 0.001d);
+            assertEquals(345.456d, row.getDouble("rate_15m"), 0.001d);
+        }
+        assertEquals(1, rowCount);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java b/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
index 97f3874..bf59b62 100644
--- a/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
@@ -25,19 +25,24 @@
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.SchemaKeyspace;
+import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.FBUtilities;
 
+import static java.lang.String.format;
+import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
+import static org.apache.cassandra.db.SystemKeyspace.LOCAL;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 public class SystemKeyspaceTest
@@ -47,9 +52,6 @@
     {
         DatabaseDescriptor.daemonInitialization();
         CommitLog.instance.start();
-
-        if (FBUtilities.isWindows)
-            WindowsFailedSnapshotTracker.deleteOldSnapshots();
     }
 
     @Test
@@ -92,26 +94,9 @@
         assert firstId.equals(secondId) : String.format("%s != %s%n", firstId.toString(), secondId.toString());
     }
 
-    private void assertDeletedOrDeferred(int expectedCount)
+    private void assertDeleted()
     {
-        if (FBUtilities.isWindows)
-            assertEquals(expectedCount, getDeferredDeletionCount());
-        else
-            assertTrue(getSystemSnapshotFiles(SchemaConstants.SYSTEM_KEYSPACE_NAME).isEmpty());
-    }
-
-    private int getDeferredDeletionCount()
-    {
-        try
-        {
-            Class c = Class.forName("java.io.DeleteOnExitHook");
-            LinkedHashSet<String> files = (LinkedHashSet<String>)FBUtilities.getProtectedField(c, "files").get(c);
-            return files.size();
-        }
-        catch (Exception e)
-        {
-            throw new RuntimeException(e);
-        }
+        assertTrue(getSystemSnapshotFiles(SchemaConstants.SYSTEM_KEYSPACE_NAME).isEmpty());
     }
 
     @Test
@@ -122,15 +107,13 @@
             cfs.clearUnsafe();
         Keyspace.clearSnapshot(null, SchemaConstants.SYSTEM_KEYSPACE_NAME);
 
-        int baseline = getDeferredDeletionCount();
-
         SystemKeyspace.snapshotOnVersionChange();
-        assertDeletedOrDeferred(baseline);
+        assertDeleted();
 
         // now setup system.local as if we're upgrading from a previous version
         setupReleaseVersion(getOlderVersionString());
         Keyspace.clearSnapshot(null, SchemaConstants.SYSTEM_KEYSPACE_NAME);
-        assertDeletedOrDeferred(baseline);
+        assertDeleted();
 
         // Compare versions again & verify that snapshots were created for all tables in the system ks
         SystemKeyspace.snapshotOnVersionChange();
@@ -138,7 +121,7 @@
         Set<String> snapshottedSystemTables = getSystemSnapshotFiles(SchemaConstants.SYSTEM_KEYSPACE_NAME);
         SystemKeyspace.metadata().tables.forEach(t -> assertTrue(snapshottedSystemTables.contains(t.name)));
         Set<String> snapshottedSchemaTables = getSystemSnapshotFiles(SchemaConstants.SCHEMA_KEYSPACE_NAME);
-        Schema.getSystemKeyspaceMetadata().tables.forEach(t -> assertTrue(snapshottedSchemaTables.contains(t.name)));
+        SchemaKeyspace.metadata().tables.forEach(t -> assertTrue(snapshottedSchemaTables.contains(t.name)));
 
         // clear out the snapshots & set the previous recorded version equal to the latest, we shouldn't
         // see any new snapshots created this time.
@@ -147,14 +130,38 @@
 
         SystemKeyspace.snapshotOnVersionChange();
 
-        // snapshotOnVersionChange for upgrade case will open a SSTR when the CFS is flushed. On Windows, we won't be
-        // able to delete hard-links to that file while segments are memory-mapped, so they'll be marked for deferred deletion.
+        // snapshotOnVersionChange for upgrade case will open a SSTR when the CFS is flushed.
         // 10 files expected.
-        assertDeletedOrDeferred(baseline + 10);
+        assertDeleted();
 
         Keyspace.clearSnapshot(null, SchemaConstants.SYSTEM_KEYSPACE_NAME);
     }
 
+    @Test
+    public void testPersistLocalMetadata()
+    {
+        SystemKeyspace.persistLocalMetadata();
+
+        UntypedResultSet result = executeInternal(format("SELECT * FROM system.%s WHERE key='%s'", LOCAL, LOCAL));
+
+        assertNotNull(result);
+        UntypedResultSet.Row row = result.one();
+
+        assertEquals(DatabaseDescriptor.getClusterName(), row.getString("cluster_name"));
+        assertEquals(FBUtilities.getReleaseVersionString(), row.getString("release_version"));
+        assertEquals(QueryProcessor.CQL_VERSION.toString(), row.getString("cql_version"));
+        assertEquals(String.valueOf(ProtocolVersion.CURRENT.asInt()), row.getString("native_protocol_version"));
+        assertEquals(DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter(), row.getString("data_center"));
+        assertEquals(DatabaseDescriptor.getEndpointSnitch().getLocalRack(), row.getString("rack"));
+        assertEquals(DatabaseDescriptor.getPartitioner().getClass().getName(), row.getString("partitioner"));
+        assertEquals(FBUtilities.getJustBroadcastNativeAddress(), row.getInetAddress("rpc_address"));
+        assertEquals(DatabaseDescriptor.getNativeTransportPort(), row.getInt("rpc_port"));
+        assertEquals(FBUtilities.getJustBroadcastAddress(), row.getInetAddress("broadcast_address"));
+        assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("broadcast_port"));
+        assertEquals(FBUtilities.getJustLocalAddress(), row.getInetAddress("listen_address"));
+        assertEquals(DatabaseDescriptor.getStoragePort(), row.getInt("listen_port"));
+    }
+
     private String getOlderVersionString()
     {
         String version = FBUtilities.getReleaseVersionString();
@@ -168,7 +175,7 @@
         Set<String> snapshottedTableNames = new HashSet<>();
         for (ColumnFamilyStore cfs : Keyspace.open(keyspace).getColumnFamilyStores())
         {
-            if (!cfs.getSnapshotDetails().isEmpty())
+            if (!cfs.listSnapshots().isEmpty())
                 snapshottedTableNames.add(cfs.getTableName());
         }
         return snapshottedTableNames;
diff --git a/test/unit/org/apache/cassandra/db/TimeSortTest.java b/test/unit/org/apache/cassandra/db/TimeSortTest.java
index 8ae05ea..b45ab96 100644
--- a/test/unit/org/apache/cassandra/db/TimeSortTest.java
+++ b/test/unit/org/apache/cassandra/db/TimeSortTest.java
@@ -36,7 +36,7 @@
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
 
         execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", 0, 100, 0, 100L);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", 0, 0, 1, 0L);
 
         assertRows(execute("SELECT * FROM %s WHERE a = ? AND b >= ? LIMIT 1000", 0, 10), row(0, 100, 0));
@@ -53,7 +53,7 @@
                 execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", i, j * 2, 0, (long)j * 2);
 
         validateTimeSort();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         validateTimeSort();
 
         // interleave some new data to test memtable + sstable
diff --git a/test/unit/org/apache/cassandra/db/TopPartitionTrackerTest.java b/test/unit/org/apache/cassandra/db/TopPartitionTrackerTest.java
new file mode 100644
index 0000000..417bda0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/TopPartitionTrackerTest.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+import org.junit.Test;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.TokenMetadata;
+import org.apache.cassandra.metrics.TopPartitionTracker;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.Pair;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TopPartitionTrackerTest extends CQLTester
+{
+    @Test
+    public void testSizeLimit()
+    {
+        createTable("create table %s (id bigint primary key, x int)");
+        DatabaseDescriptor.setMaxTopSizePartitionCount(5);
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(new DataStorageSpec.LongBytesBound("12B"));
+
+        Collection<Range<Token>> fullRange = Collections.singleton(r(0, 0));
+        TopPartitionTracker tpt = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        TopPartitionTracker.Collector collector = new TopPartitionTracker.Collector(fullRange);
+        for (int i = 5; i < 15; i++)
+            collector.trackPartitionSize(dk(i), i);
+        tpt.merge(collector);
+        assertEquals(3, tpt.topSizes().top.size());
+        assertTrue(tpt.topSizes().top.stream().allMatch(tp -> tp.value >= 12));
+
+        Collection<Range<Token>> keyRange = rangesFor(7);
+        collector = new TopPartitionTracker.Collector(keyRange);
+        collector.trackPartitionSize(dk(7), 7);
+        tpt.merge(collector);
+        assertEquals(3, tpt.topSizes().top.size());
+        assertTrue(tpt.topSizes().top.stream().allMatch(tp -> tp.value >= 12));
+        assertFalse(tpt.topSizes().top.contains(tp(7, 7)));
+
+        collector = new TopPartitionTracker.Collector(keyRange);
+        collector.trackPartitionSize(dk(7), 100);
+        tpt.merge(collector);
+        assertEquals(4, tpt.topSizes().top.size());
+        assertTrue(tpt.topSizes().top.stream().allMatch(tp -> tp.value >= 12));
+        assertTrue(tpt.topSizes().top.contains(tp(7, 100)));
+    }
+
+    @Test
+    public void testCountLimit()
+    {
+        createTable("create table %s (id bigint primary key, x int)");
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(new DataStorageSpec.LongBytesBound("0B"));
+        DatabaseDescriptor.setMaxTopSizePartitionCount(5);
+        Collection<Range<Token>> fullRange = Collections.singleton(r(0, 0));
+        TopPartitionTracker tpt = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        TopPartitionTracker.Collector collector = new TopPartitionTracker.Collector(fullRange);
+        for (int i = 5; i < 15; i++)
+            collector.trackPartitionSize(dk(i), i);
+        tpt.merge(collector);
+        assertEquals(5, tpt.topSizes().top.size());
+
+        collector = new TopPartitionTracker.Collector(fullRange);
+        for (int i = 5; i < 15; i++)
+            collector.trackPartitionSize(dk(i), i + 1);
+        tpt.merge(collector);
+        assertEquals(5, tpt.topSizes().top.size());
+
+        collector = new TopPartitionTracker.Collector(rangesFor(15));
+        collector.trackPartitionSize(dk(15), 14);
+        tpt.merge(collector);
+        assertEquals(5, tpt.topSizes().top.size());
+    }
+
+    @Test
+    public void testSubRangeMerge()
+    {
+        createTable("create table %s (id bigint primary key, x int)");
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(new DataStorageSpec.LongBytesBound("0B"));
+        DatabaseDescriptor.setMaxTopSizePartitionCount(10);
+        Collection<Range<Token>> fullRange = Collections.singleton(r(0, 0));
+        TopPartitionTracker tpt = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        TopPartitionTracker.Collector collector = new TopPartitionTracker.Collector(fullRange);
+        for (int i = 0; i < 10; i++)
+            collector.trackPartitionSize(dk(i), 10);
+        tpt.merge(collector);
+        assertEquals(10, tpt.topSizes().top.size());
+        collector = new TopPartitionTracker.Collector(rangesFor(0,1,2,3,4));
+        for (int i = 0; i < 5; i++)
+            collector.trackPartitionSize(dk(i), 8);
+        tpt.merge(collector);
+
+        assertEquals(10, tpt.topSizes().top.size());
+        for (TopPartitionTracker.TopPartition tp : tpt.topSizes().top)
+        {
+            long key = ByteBufferUtil.toLong(tp.key.getKey());
+            if (key < 5)
+                assertEquals(8, tp.value);
+            else
+                assertEquals(10, tp.value);
+        }
+    }
+
+    @Test
+    public void testSaveLoad()
+    {
+        createTable("create table %s (id bigint primary key, x int)");
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(new DataStorageSpec.LongBytesBound("0B"));
+        DatabaseDescriptor.setMinTrackedPartitionTombstoneCount(0);
+        DatabaseDescriptor.setMaxTopSizePartitionCount(10);
+        DatabaseDescriptor.setMaxTopTombstonePartitionCount(10);
+        Collection<Range<Token>> fullRange = Collections.singleton(r(0, 0));
+        TopPartitionTracker tpt = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        assertEquals(0, tpt.topSizes().lastUpdate);
+        assertEquals(0, tpt.topTombstones().lastUpdate);
+        long start = System.currentTimeMillis();
+        TopPartitionTracker.Collector collector = new TopPartitionTracker.Collector(fullRange);
+        for (int i = 0; i < 10; i++)
+        {
+            collector.trackPartitionSize(dk(i), 10);
+            collector.trackTombstoneCount(dk(i + 10), 100);
+        }
+        tpt.merge(collector);
+        long sizeUpdate = tpt.topSizes().lastUpdate;
+        long tombstoneUpdate = tpt.topTombstones().lastUpdate;
+        assertTrue(sizeUpdate >= start && sizeUpdate <= System.currentTimeMillis());
+        assertTrue(tombstoneUpdate >= start && tombstoneUpdate <= System.currentTimeMillis());
+
+        assertEquals(10, tpt.topSizes().top.size());
+        assertEquals(10, tpt.topTombstones().top.size());
+        tpt.save();
+        TopPartitionTracker tptLoaded = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        assertEquals(sizeUpdate, tptLoaded.topSizes().lastUpdate);
+        assertEquals(tombstoneUpdate, tptLoaded.topTombstones().lastUpdate);
+
+        assertEquals(tpt.topSizes().top, tptLoaded.topSizes().top);
+        assertEquals(tpt.topTombstones().top, tptLoaded.topTombstones().top);
+
+        DatabaseDescriptor.setMaxTopSizePartitionCount(5);
+        DatabaseDescriptor.setMaxTopTombstonePartitionCount(5);
+
+        tptLoaded = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        assertEquals(5, tptLoaded.topSizes().top.size());
+        assertEquals(5, tptLoaded.topTombstones().top.size());
+        assertEquals(sizeUpdate, tptLoaded.topSizes().lastUpdate);
+        assertEquals(tombstoneUpdate, tptLoaded.topTombstones().lastUpdate);
+
+        Iterator<TopPartitionTracker.TopPartition> oldIter = tpt.topSizes().top.iterator();
+        Iterator<TopPartitionTracker.TopPartition> loadedIter = tptLoaded.topSizes().top.iterator();
+        while (loadedIter.hasNext())
+        {
+            TopPartitionTracker.TopPartition old = oldIter.next();
+            TopPartitionTracker.TopPartition loaded = loadedIter.next();
+            assertEquals(old.key, loaded.key);
+            assertEquals(old.value, loaded.value);
+        }
+
+        oldIter = tpt.topTombstones().top.iterator();
+        loadedIter = tptLoaded.topTombstones().top.iterator();
+        while (loadedIter.hasNext())
+        {
+            TopPartitionTracker.TopPartition old = oldIter.next();
+            TopPartitionTracker.TopPartition loaded = loadedIter.next();
+            assertEquals(old.key, loaded.key);
+            assertEquals(old.value, loaded.value);
+        }
+    }
+
+    @Test
+    public void randomTest()
+    {
+        createTable("create table %s (id bigint primary key, x int)");
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(new DataStorageSpec.LongBytesBound("0B"));
+        DatabaseDescriptor.setMaxTopSizePartitionCount(1000);
+        int keyCount = 10000;
+        long seed = System.currentTimeMillis();
+        Random r = new Random(seed);
+        List<DecoratedKey> keys = new ArrayList<>(keyCount);
+        for (int i = 0; i < keyCount; i++)
+            keys.add(dk(i));
+
+        Collection<Range<Token>> fullRange = Collections.singleton(r(0, 0));
+        List<Pair<DecoratedKey, Long>> expected = new ArrayList<>();
+        TopPartitionTracker tpt = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        TopPartitionTracker.Collector collector = new TopPartitionTracker.Collector(fullRange);
+        Set<Long> uniqueValues = new HashSet<>();
+        for (int i = 0; i < keys.size(); i++)
+        {
+            DecoratedKey key = keys.get(i);
+            long value;
+            do
+            {
+                value = Math.abs(r.nextLong() % 100000);
+            } while (!uniqueValues.add(value));
+            expected.add(Pair.create(key, value));
+            collector.trackPartitionSize(key, value);
+        }
+        assertEquals(keyCount, expected.size());
+        tpt.merge(collector);
+        expected.sort((o1, o2) -> {
+            int cmp = -o1.right.compareTo(o2.right);
+            if (cmp != 0)
+                return cmp;
+            return o1.left.compareTo(o2.left);
+        });
+        Iterator<Pair<DecoratedKey, Long>> expectedTop = expected.subList(0,1000).iterator();
+        Iterator<TopPartitionTracker.TopPartition> trackedTop = tpt.topSizes().top.iterator();
+
+        while (expectedTop.hasNext())
+        {
+            Pair<DecoratedKey, Long> ex = expectedTop.next();
+            TopPartitionTracker.TopPartition tracked = trackedTop.next();
+            assertEquals("seed "+seed, ex.left, tracked.key);
+            assertEquals("seed "+seed, (long)ex.right, tracked.value);
+        }
+    }
+
+    @Test
+    public void testRanges() throws UnknownHostException
+    {
+        createTable("create table %s (id bigint primary key, x int)");
+        DatabaseDescriptor.setMinTrackedPartitionSizeInBytes(new DataStorageSpec.LongBytesBound("0B"));
+        DatabaseDescriptor.setMaxTopSizePartitionCount(1000);
+        long seed = System.currentTimeMillis();
+        Random r = new Random(seed);
+        List<Pair<DecoratedKey, Long>> keys = new ArrayList<>(10000);
+        for (int i = 0; i < 10000; i++)
+            keys.add(Pair.create(dk(i), Math.abs(r.nextLong() % 20000)));
+
+        Collection<Range<Token>> fullRange = Collections.singleton(r(0, 0));
+        TopPartitionTracker tpt = new TopPartitionTracker(getCurrentColumnFamilyStore().metadata());
+        TopPartitionTracker.Collector collector = new TopPartitionTracker.Collector(fullRange);
+        for (int i = 0; i < keys.size(); i++)
+        {
+            Pair<DecoratedKey, Long> entry = keys.get(i);
+            collector.trackPartitionSize(entry.left, entry.right);
+        }
+        tpt.merge(collector);
+        TokenMetadata tmd = StorageService.instance.getTokenMetadata();
+        tmd.updateNormalToken(t(0), InetAddressAndPort.getByName("127.0.0.1"));
+        tmd.updateNormalToken(t(Long.MAX_VALUE - 1), InetAddressAndPort.getByName("127.0.0.2"));
+        Iterator<TopPartitionTracker.TopPartition> trackedTop = tpt.topSizes().top.iterator();
+        Collection<Range<Token>> localRanges = StorageService.instance.getLocalReplicas(keyspace()).ranges();
+        int outOfRangeCount = 0;
+        while (trackedTop.hasNext())
+        {
+            if (!Range.isInRanges(trackedTop.next().key.getToken(), localRanges))
+                outOfRangeCount++;
+        }
+        assertTrue(outOfRangeCount > 0);
+        collector = new TopPartitionTracker.Collector(localRanges);
+        for (int i = 0; i < keys.size(); i++)
+        {
+            Pair<DecoratedKey, Long> entry = keys.get(i);
+            // we don't need this check during compaction since we know we won't track any tokens outside the owned ranges
+            // but the TopPartitionTracker might still be tracking outside of the local ranges - these are cleared in .merge()
+            if (Range.isInRanges(entry.left.getToken(), localRanges))
+                collector.trackPartitionSize(entry.left, entry.right);
+        }
+        tpt.merge(collector);
+        outOfRangeCount = 0;
+        trackedTop = tpt.topSizes().top.iterator();
+        while (trackedTop.hasNext())
+        {
+            if (!Range.isInRanges(trackedTop.next().key.getToken(), localRanges))
+                outOfRangeCount++;
+        }
+        assertEquals(0, outOfRangeCount);
+        assertTrue(tpt.topSizes().top.size() > 0);
+    }
+
+    private static TopPartitionTracker.TopPartition tp(int i, long c)
+    {
+        return new TopPartitionTracker.TopPartition(dk(i), c);
+    }
+    private static DecoratedKey dk(long i)
+    {
+        return Murmur3Partitioner.instance.decorateKey(ByteBufferUtil.bytes(i));
+    }
+    private static Range<Token> r(long start, long end)
+    {
+        return new Range<>(t(start), t(end));
+    }
+    private static Token t(long v)
+    {
+        return new Murmur3Partitioner.LongToken(v);
+    }
+    private static long tokenValue(long key)
+    {
+        return (long) dk(key).getToken().getTokenValue();
+    }
+    private static Collection<Range<Token>> rangesFor(long ... keys)
+    {
+        List<Range<Token>> ranges = new ArrayList<>(keys.length);
+        for (long key : keys)
+            ranges.add(r(tokenValue(key) - 1, tokenValue(key)));
+        return ranges;
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java b/test/unit/org/apache/cassandra/db/VerifyTest.java
index b2f4344..a58837a 100644
--- a/test/unit/org/apache/cassandra/db/VerifyTest.java
+++ b/test/unit/org/apache/cassandra/db/VerifyTest.java
@@ -19,16 +19,13 @@
 package org.apache.cassandra.db;
 
 import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.net.UnknownHostException;
+import java.nio.channels.FileChannel;
 import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.zip.CRC32;
 import java.util.zip.CheckedInputStream;
@@ -56,7 +53,10 @@
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.schema.CompressionParams;
@@ -68,6 +68,7 @@
 import static org.apache.cassandra.SchemaLoader.createKeyspace;
 import static org.apache.cassandra.SchemaLoader.loadSchema;
 import static org.apache.cassandra.SchemaLoader.standardCFMD;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -306,9 +307,9 @@
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
 
-        try (RandomAccessFile file = new RandomAccessFile(sstable.descriptor.filenameFor(Component.DIGEST), "rw"))
+        try (RandomAccessReader file = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.DIGEST))))
         {
-            Long correctChecksum = Long.valueOf(file.readLine());
+            Long correctChecksum = file.readLong();
 
             writeChecksum(++correctChecksum, sstable.descriptor.filenameFor(Component.DIGEST));
         }
@@ -348,9 +349,9 @@
         long startPosition = row0Start < row1Start ? row0Start : row1Start;
         long endPosition = row0Start < row1Start ? row1Start : row0Start;
 
-        RandomAccessFile file = new RandomAccessFile(sstable.getFilename(), "rw");
-        file.seek(startPosition);
-        file.writeBytes(StringUtils.repeat('z', (int) 2));
+        FileChannel file = new File(sstable.getFilename()).newReadWriteChannel();
+        file.position(startPosition);
+        file.write(ByteBufferUtil.bytes(StringUtils.repeat('z', 2)));
         file.close();
         if (ChunkCache.instance != null)
             ChunkCache.instance.invalidateFile(sstable.getFilename());
@@ -400,9 +401,9 @@
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
         String filenameToCorrupt = sstable.descriptor.filenameFor(Component.STATS);
-        RandomAccessFile file = new RandomAccessFile(filenameToCorrupt, "rw");
-        file.seek(0);
-        file.writeBytes(StringUtils.repeat('z', 2));
+        FileChannel file = new File(filenameToCorrupt).newReadWriteChannel();
+        file.position(0);
+        file.write(ByteBufferUtil.bytes(StringUtils.repeat('z', 2)));
         file.close();
         try (Verifier verifier = new Verifier(cfs, sstable, false, Verifier.options().invokeDiskFailurePolicy(true).build()))
         {
@@ -439,9 +440,9 @@
 
         // break the sstable:
         Long correctChecksum;
-        try (RandomAccessFile file = new RandomAccessFile(sstable.descriptor.filenameFor(Component.DIGEST), "rw"))
+        try (RandomAccessReader file = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.DIGEST))))
         {
-            correctChecksum = Long.parseLong(file.readLine());
+            correctChecksum = file.readLong();
         }
         writeChecksum(++correctChecksum, sstable.descriptor.filenameFor(Component.DIGEST));
         try (Verifier verifier = new Verifier(cfs, sstable, false, Verifier.options().mutateRepairStatus(false).invokeDiskFailurePolicy(true).build()))
@@ -508,9 +509,9 @@
 
         sstable = cfs.getLiveSSTables().iterator().next();
         Long correctChecksum;
-        try (RandomAccessFile file = new RandomAccessFile(sstable.descriptor.filenameFor(Component.DIGEST), "rw"))
+        try (RandomAccessReader file = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.DIGEST))))
         {
-            correctChecksum = Long.parseLong(file.readLine());
+            correctChecksum = file.readLong();
         }
         writeChecksum(++correctChecksum, sstable.descriptor.filenameFor(Component.DIGEST));
         try (Verifier verifier = new Verifier(cfs, sstable, false, Verifier.options().invokeDiskFailurePolicy(true).mutateRepairStatus(true).build()))
@@ -554,9 +555,9 @@
             verifier.verify(); //still not corrupt, should pass
         }
         String filenameToCorrupt = sstable.descriptor.filenameFor(componentToBreak);
-        try (RandomAccessFile file = new RandomAccessFile(filenameToCorrupt, "rw"))
+        try(FileChannel fileChannel = new File(filenameToCorrupt).newReadWriteChannel())
         {
-            file.setLength(3);
+            fileChannel.truncate(3);
         }
 
         try (Verifier verifier = new Verifier(cfs, sstable, false, Verifier.options().invokeDiskFailurePolicy(true).build()))
@@ -584,9 +585,9 @@
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
 
-        try (RandomAccessFile file = new RandomAccessFile(sstable.descriptor.filenameFor(Component.DIGEST), "rw"))
+        try (RandomAccessReader file = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.DIGEST))))
         {
-            Long correctChecksum = Long.valueOf(file.readLine());
+            Long correctChecksum = file.readLong();
 
             writeChecksum(++correctChecksum, sstable.descriptor.filenameFor(Component.DIGEST));
         }
@@ -699,10 +700,10 @@
         tmd.updateNormalToken(new ByteOrderedPartitioner.BytesToken(tk1), InetAddressAndPort.getByName("127.0.0.1"));
         tmd.updateNormalToken(new ByteOrderedPartitioner.BytesToken(tk2), InetAddressAndPort.getByName("127.0.0.2"));
         // write some bogus to a localpartitioner table
-        Batch bogus = Batch.createLocal(UUID.randomUUID(), 0, Collections.emptyList());
+        Batch bogus = Batch.createLocal(nextTimeUUID(), 0, Collections.emptyList());
         BatchlogManager.store(bogus);
         ColumnFamilyStore cfs = Keyspace.open("system").getColumnFamilyStore("batches");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
 
@@ -760,7 +761,7 @@
                          .apply();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     protected void fillCounterCF(ColumnFamilyStore cfs, int partitionsPerSSTable) throws WriteTimeoutException
@@ -772,12 +773,12 @@
                          .apply();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     protected long simpleFullChecksum(String filename) throws IOException
     {
-        try (FileInputStream inputStream = new FileInputStream(filename))
+        try (FileInputStreamPlus inputStream = new FileInputStreamPlus(filename))
         {
             CRC32 checksum = new CRC32();
             CheckedInputStream cinStream = new CheckedInputStream(inputStream, checksum);
diff --git a/test/unit/org/apache/cassandra/db/aggregation/GroupMakerTest.java b/test/unit/org/apache/cassandra/db/aggregation/GroupMakerTest.java
index 8ff262d..13fb0df 100644
--- a/test/unit/org/apache/cassandra/db/aggregation/GroupMakerTest.java
+++ b/test/unit/org/apache/cassandra/db/aggregation/GroupMakerTest.java
@@ -18,17 +18,30 @@
 package org.apache.cassandra.db.aggregation;
 
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.cql3.Constants.Literal;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.VariableSpecifications;
+import org.apache.cassandra.cql3.functions.ScalarFunction;
+import org.apache.cassandra.cql3.functions.TimeFcts;
+import org.apache.cassandra.cql3.selection.Selectable;
+import org.apache.cassandra.cql3.selection.Selector;
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.ClusteringComparator;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.ReversedType;
+import org.apache.cassandra.db.marshal.TimestampType;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -45,7 +58,7 @@
     public void testIsNewGroupWithClusteringColumns()
     {
         ClusteringComparator comparator = newComparator(false, false, false);
-        GroupMaker groupMaker = GroupMaker.newInstance(comparator, 2);
+        GroupMaker groupMaker = GroupMaker.newPkPrefixGroupMaker(comparator, 2);
 
         assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 1)));
         assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 2)));
@@ -63,7 +76,7 @@
     public void testIsNewGroupWithOneClusteringColumnsPrefix()
     {
         ClusteringComparator comparator = newComparator(false, false, false);
-        GroupMaker groupMaker = GroupMaker.newInstance(comparator, 1);
+        GroupMaker groupMaker = GroupMaker.newPkPrefixGroupMaker(comparator, 1);
 
         assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 1)));
         assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 2)));
@@ -82,7 +95,7 @@
     {
         ClusteringComparator comparator = newComparator(true, true, true);
 
-        GroupMaker groupMaker = GroupMaker.newInstance(comparator, 2);
+        GroupMaker groupMaker = GroupMaker.newPkPrefixGroupMaker(comparator, 2);
 
         assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 3, 2)));
         assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 3, 1)));
@@ -102,7 +115,7 @@
     {
         ClusteringComparator comparator = newComparator(true, false, false);
 
-        GroupMaker groupMaker = GroupMaker.newInstance(comparator, 2);
+        GroupMaker groupMaker = GroupMaker.newPkPrefixGroupMaker(comparator, 2);
 
         assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 3, 1)));
         assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 3, 2)));
@@ -121,7 +134,7 @@
     public void testIsNewGroupWithStaticClusteringColumns()
     {
         ClusteringComparator comparator = newComparator(false, false, false);
-        GroupMaker groupMaker = GroupMaker.newInstance(comparator, 2);
+        GroupMaker groupMaker = GroupMaker.newPkPrefixGroupMaker(comparator, 2);
 
         assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 1)));
         assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 2)));
@@ -135,7 +148,7 @@
     public void testIsNewGroupWithOnlyPartitionKeyComponents()
     {
         ClusteringComparator comparator = newComparator(false, false, false);
-        GroupMaker goupMaker = GroupMaker.newInstance(comparator, 2);
+        GroupMaker goupMaker = GroupMaker.newPkPrefixGroupMaker(comparator, 2);
 
         assertTrue(goupMaker.isNewGroup(partitionKey(1, 1), clustering(1, 1, 1)));
         assertFalse(goupMaker.isNewGroup(partitionKey(1, 1), clustering(1, 1, 2)));
@@ -146,6 +159,100 @@
         assertTrue(goupMaker.isNewGroup(partitionKey(2, 2), clustering(1, 1, 2)));
     }
 
+    @Test
+    public void testIsNewGroupWithFunction()
+    {
+        GroupMaker groupMaker = newSelectorGroupMaker(false);
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:10:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:12:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:14:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:15:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:21:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:22:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:20 UTC")));
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering("2016-09-27 16:26:20 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering("2016-09-27 16:30:00 UTC")));
+    }
+
+    @Test
+    public void testIsNewGroupWithFunctionAndReversedOrder()
+    {
+        GroupMaker groupMaker = newSelectorGroupMaker(true);
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:20 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:22:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:21:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:15:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:14:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:12:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:10:00 UTC")));
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering("2016-09-27 16:30:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering("2016-09-27 16:26:20 UTC")));
+    }
+
+    @Test
+    public void testIsNewGroupWithFunctionWithStaticColumn()
+    {
+        GroupMaker groupMaker = newSelectorGroupMaker(false);
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:10:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:12:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:14:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:15:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:21:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:22:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:20 UTC")));
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), Clustering.STATIC_CLUSTERING));
+        assertTrue(groupMaker.isNewGroup(partitionKey(3), Clustering.STATIC_CLUSTERING));
+        assertTrue(groupMaker.isNewGroup(partitionKey(4), clustering("2016-09-27 16:26:20 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(4), clustering("2016-09-27 16:30:00 UTC")));
+    }
+
+    @Test
+    public void testIsNewGroupWithFunctionAndReversedOrderWithStaticColumns()
+    {
+        GroupMaker groupMaker = newSelectorGroupMaker(true);
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:20 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:26:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:22:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:21:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:15:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:14:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:12:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering("2016-09-27 16:10:00 UTC")));
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), Clustering.STATIC_CLUSTERING));
+        assertTrue(groupMaker.isNewGroup(partitionKey(3), Clustering.STATIC_CLUSTERING));
+        assertTrue(groupMaker.isNewGroup(partitionKey(4), clustering("2016-09-27 16:30:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(4), clustering("2016-09-27 16:26:20 UTC")));
+    }
+
+    @Test
+    public void testIsNewGroupWithPrefixAndFunction()
+    {
+        GroupMaker groupMaker = newSelectorGroupMaker(false, false);
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, "2016-09-27 16:10:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, "2016-09-27 16:12:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, "2016-09-27 16:14:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, "2016-09-27 16:15:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(2, "2016-09-27 16:16:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(2, "2016-09-27 16:22:00 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(2, "2016-09-27 16:26:00 UTC")));
+        assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(2, "2016-09-27 16:26:20 UTC")));
+
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering(1, "2016-09-27 16:26:20 UTC")));
+        assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering(1, "2016-09-27 16:30:00 UTC")));
+    }
+
     private static DecoratedKey partitionKey(int... components)
     {
         ByteBuffer buffer = ByteBuffer.allocate(components.length * 4);
@@ -162,6 +269,19 @@
         return Clustering.make(toByteBufferArray(components));
     }
 
+    private static Clustering<?> clustering(String timeComponent)
+    {
+        ByteBuffer buffer = TimestampType.instance.fromString(timeComponent);
+        return Clustering.make(buffer);
+    }
+
+    private static Clustering<?> clustering(int component, String timeComponent)
+    {
+        ByteBuffer first = Int32Type.instance.decompose(component);
+        ByteBuffer second = TimestampType.instance.fromString(timeComponent);
+        return Clustering.make(first, second);
+    }
+
     private static ByteBuffer[] toByteBufferArray(int[] values)
     {
         ByteBuffer[] buffers = new ByteBuffer[values.length];
@@ -182,4 +302,31 @@
 
         return new ClusteringComparator(types);
     }
+
+    private GroupMaker newSelectorGroupMaker(boolean... reversed)
+    {
+        TableMetadata.Builder builder = TableMetadata.builder("keyspace", "test")
+                                                     .addPartitionKeyColumn("partition_key", Int32Type.instance);
+
+        int last = reversed.length - 1;
+        for (int i = 0; i < reversed.length; i++)
+        {
+            AbstractType<?> type = i == last ? TimestampType.instance : Int32Type.instance;
+            builder.addClusteringColumn("clustering" + i, reversed[i] ? ReversedType.getInstance(type) : type);
+        }
+
+        TableMetadata table = builder.build();
+
+        ColumnMetadata column = table.getColumn(new ColumnIdentifier("clustering" + last, false));
+
+        Selectable.WithTerm duration = new Selectable.WithTerm(Literal.duration("5m"));
+        Selectable.WithTerm startTime = new Selectable.WithTerm(Literal.string("2016-09-27 16:00:00 UTC"));
+        ScalarFunction function = TimeFcts.FloorTimestampFunction.newInstanceWithStartTimeArgument();
+
+        Selectable.WithFunction selectable = new Selectable.WithFunction(function, Arrays.asList(column, duration, startTime));
+        Selector.Factory factory = selectable.newSelectorFactory(table, null, new ArrayList<>(), VariableSpecifications.empty());
+        Selector selector = factory.newInstance(QueryOptions.DEFAULT);
+
+        return GroupMaker.newSelectorGroupMaker(table.comparator, reversed.length, selector);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/columniterator/SSTableReverseIteratorTest.java b/test/unit/org/apache/cassandra/db/columniterator/SSTableReverseIteratorTest.java
index 9040f11..9f9b7bb 100644
--- a/test/unit/org/apache/cassandra/db/columniterator/SSTableReverseIteratorTest.java
+++ b/test/unit/org/apache/cassandra/db/columniterator/SSTableReverseIteratorTest.java
@@ -28,6 +28,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -81,7 +82,7 @@
         QueryProcessor.executeInternal(String.format("UPDATE %s.%s SET v1=? WHERE k=? AND c=?", KEYSPACE, table), bytes(0x20000), key, 2);
         QueryProcessor.executeInternal(String.format("UPDATE %s.%s SET v1=? WHERE k=? AND c=?", KEYSPACE, table), bytes(0x20000), key, 3);
 
-        tbl.forceBlockingFlush();
+        Util.flush(tbl);
         SSTableReader sstable = Iterables.getOnlyElement(tbl.getLiveSSTables());
         DecoratedKey dk = tbl.getPartitioner().decorateKey(Int32Type.instance.decompose(key));
         RowIndexEntry indexEntry = sstable.getPosition(dk, SSTableReader.Operator.EQ);
diff --git a/test/unit/org/apache/cassandra/db/commitlog/AbstractCommitLogServiceTest.java b/test/unit/org/apache/cassandra/db/commitlog/AbstractCommitLogServiceTest.java
index 741b145..a918e3c 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/AbstractCommitLogServiceTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/AbstractCommitLogServiceTest.java
@@ -30,6 +30,8 @@
 import org.apache.cassandra.db.commitlog.AbstractCommitLogService.SyncRunnable;
 import org.apache.cassandra.utils.FreeRunningClock;
 
+import static org.apache.cassandra.concurrent.Interruptible.State.NORMAL;
+import static org.apache.cassandra.concurrent.Interruptible.State.SHUTTING_DOWN;
 import static org.apache.cassandra.db.commitlog.AbstractCommitLogService.DEFAULT_MARKER_INTERVAL_MILLIS;
 
 public class AbstractCommitLogServiceTest
@@ -111,7 +113,7 @@
     }
 
     @Test
-    public void testSync()
+    public void testSync() throws InterruptedException
     {
         long syncTimeMillis = AbstractCommitLogService.DEFAULT_MARKER_INTERVAL_MILLIS * 2;
         FreeRunningClock clock = new FreeRunningClock();
@@ -120,26 +122,25 @@
         FakeCommitLog commitLog = (FakeCommitLog) commitLogService.commitLog;
 
         // at time 0
-        Assert.assertTrue(syncRunnable.sync());
+        syncRunnable.run(NORMAL);
         Assert.assertEquals(1, commitLog.markCount.get());
         Assert.assertEquals(0, commitLog.syncCount.get());
 
         // at time DEFAULT_MARKER_INTERVAL_MILLIS
         clock.advance(DEFAULT_MARKER_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
-        Assert.assertTrue(syncRunnable.sync());
+        syncRunnable.run(NORMAL);
         Assert.assertEquals(2, commitLog.markCount.get());
         Assert.assertEquals(0, commitLog.syncCount.get());
 
         // at time DEFAULT_MARKER_INTERVAL_MILLIS * 2
         clock.advance(DEFAULT_MARKER_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
-        Assert.assertTrue(syncRunnable.sync());
+        syncRunnable.run(NORMAL);
         Assert.assertEquals(2, commitLog.markCount.get());
         Assert.assertEquals(1, commitLog.syncCount.get());
 
         // at time DEFAULT_MARKER_INTERVAL_MILLIS * 3, but with shutdown!
         clock.advance(DEFAULT_MARKER_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
-        commitLogService.shutdown();
-        Assert.assertFalse(syncRunnable.sync());
+        syncRunnable.run(SHUTTING_DOWN);
         Assert.assertEquals(2, commitLog.markCount.get());
         Assert.assertEquals(2, commitLog.syncCount.get());
     }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java b/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java
index fb7dda1..e9ec640 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java
@@ -34,6 +34,9 @@
 import org.apache.cassandra.db.RowUpdateBuilder;
 import org.apache.cassandra.security.EncryptionContext;
 
+import static org.junit.Assert.assertEquals;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class BatchCommitLogTest extends CommitLogTest
 {
     private static final long CL_BATCH_SYNC_WINDOW = 1000; // 1 second
@@ -60,19 +63,37 @@
                      .add("val", ByteBuffer.allocate(10 * 1024))
                      .build();
 
-        long startNano = System.nanoTime();
+        long startNano = nanoTime();
         CommitLog.instance.add(m);
-        long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNano);
+        long delta = TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNano);
         Assert.assertTrue("Expect batch commitlog sync immediately, but took " + delta, delta < CL_BATCH_SYNC_WINDOW);
     }
 
     @Test
     public void testBatchCLShutDownImmediately() throws InterruptedException
     {
-        long startNano = System.nanoTime();
+        long startNano = nanoTime();
         CommitLog.instance.shutdownBlocking();
-        long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNano);
+        long delta = TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNano);
         Assert.assertTrue("Expect batch commitlog shutdown immediately, but took " + delta, delta < CL_BATCH_SYNC_WINDOW);
         CommitLog.instance.start();
     }
+
+    @Test
+    public void testFlushAndWaitingMetrics()
+    {
+        ColumnFamilyStore cfs1 = Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
+        Mutation m = new RowUpdateBuilder(cfs1.metadata.get(), 0, "key").clustering("bytes")
+                                                                        .add("val", ByteBuffer.allocate(10 * 1024))
+                                                                        .build();
+
+        long startingFlushCount = CommitLog.instance.metrics.waitingOnFlush.getCount();
+        long startingWaitCount = CommitLog.instance.metrics.waitingOnCommit.getCount();
+
+        CommitLog.instance.add(m);
+
+        // We should register single new flush and waiting data points.
+        assertEquals(startingFlushCount + 1, CommitLog.instance.metrics.waitingOnFlush.getCount());
+        assertEquals(startingWaitCount + 1, CommitLog.instance.metrics.waitingOnCommit.getCount());
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java b/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java
index fa3295a..7624590 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java
@@ -17,9 +17,9 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
 
+import org.apache.cassandra.io.util.File;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,7 +45,7 @@
 
     public void examineCommitLog() throws IOException
     {
-        replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles());
+        replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).tryList());
     }
 
     private class CommitLogTestReader extends CommitLogReader
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogCQLTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogCQLTest.java
index 4725bcf..2fd38d8 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogCQLTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogCQLTest.java
@@ -18,12 +18,19 @@
 
 package org.apache.cassandra.db.commitlog;
 
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 
 public class CommitLogCQLTest extends CQLTester
@@ -45,7 +52,7 @@
         // Calling switchMemtable directly applies Flush even though memtable is empty. This can happen with some races
         // (flush with recycling by segment manager). It should still tell commitlog that the memtable's region is clean.
         // CASSANDRA-12436
-        cfs.switchMemtable();
+        cfs.switchMemtable(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         execute("INSERT INTO %s (idx, data) VALUES (?, ?)", 15, Integer.toString(17));
 
@@ -56,4 +63,66 @@
         active.retainAll(CommitLog.instance.segmentManager.getActiveSegments());
         assert active.isEmpty();
     }
+    
+    @Test
+    public void testSwitchMemtable() throws Throwable
+    {
+        createTable("CREATE TABLE %s (idx INT, data TEXT, PRIMARY KEY(idx));");
+        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
+        
+        AtomicBoolean shouldStop = new AtomicBoolean(false);
+        ConcurrentLinkedQueue<Throwable> errors = new ConcurrentLinkedQueue<>();
+        List<Thread> threads = new ArrayList<>();
+        
+        final String stmt = String.format("INSERT INTO %s.%s (idx, data) VALUES(?, ?)", KEYSPACE, currentTable());
+        for (int i = 0; i < 10; ++i)
+        {
+            threads.add(new Thread("" + i)
+            {
+                public void run()
+                {
+                    try
+                    {
+                        while (!shouldStop.get())
+                        {
+                            for (int i = 0; i < 50; i++)
+                            {
+                                QueryProcessor.executeInternal(stmt, i, Integer.toString(i));
+                            }
+                            cfs.dumpMemtable();
+                        }
+                    }
+                    catch (Throwable t)
+                    {
+                        errors.add(t);
+                        shouldStop.set(true);
+                    }
+                }
+            });
+        }
+
+        for (Thread t : threads)
+            t.start();
+
+        Thread.sleep(15_000);
+        shouldStop.set(true);
+        
+        for (Thread t : threads)
+            t.join();
+
+        if (!errors.isEmpty())
+        {
+            StringBuilder sb = new StringBuilder();
+            for(Throwable error: errors)
+            {
+                sb.append("Got error during memtable switching:\n");
+                sb.append(error.getMessage() + "\n");
+                ByteArrayOutputStream os = new ByteArrayOutputStream();
+                PrintStream ps = new PrintStream(os);
+                error.printStackTrace(ps);
+                sb.append(os.toString("UTF-8"));
+            }
+            Assert.fail(sb.toString());
+        }
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java
index fb90d59..319e75b 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java
@@ -18,12 +18,12 @@
 
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Random;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
index 53c6769..5ef83b4 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
@@ -29,15 +29,18 @@
 import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.config.TransparentDataEncryptionOptions;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.compress.LZ4Compressor;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.FileSegmentInputStream;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.security.EncryptionContext;
 import org.apache.cassandra.security.EncryptionContextGenerator;
+import org.assertj.core.api.Assertions;
 
 public class CommitLogDescriptorTest
 {
@@ -86,6 +89,21 @@
         Assert.assertEquals(MessagingService.current_version, CommitLogDescriptor.fromFileName(newCLName).getMessagingVersion());
     }
 
+    @Test
+    public void testExactIdFromFileName()
+    {
+        Assertions.assertThatThrownBy(() -> CommitLogDescriptor.idFromFileName("CommitLog-1340512736956320000.log"))
+                  .hasMessageContaining("Commitlog segment is too old to open")
+                  .isInstanceOf(UnsupportedOperationException.class);
+
+        Assertions.assertThatThrownBy(() -> CommitLogDescriptor.idFromFileName("CommitLog--1340512736956320000.log"))
+                  .hasMessageContaining("Cannot parse the version of the file")
+                  .isInstanceOf(RuntimeException.class);
+
+        Assertions.assertThat(CommitLogDescriptor.idFromFileName("CommitLog-2-1340512736956320000.log"))
+                  .isEqualTo(1340512736956320000L);
+    }
+
     // migrated from CommitLogTest
     private void testDescriptorPersistence(CommitLogDescriptor desc) throws IOException
     {
@@ -309,4 +327,19 @@
         CommitLogDescriptor desc2 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 1, compression, enabledEncryption);
         Assert.assertEquals(desc1, desc2);
     }
+
+    @Test
+    public void testInferCDCIndexFile()
+    {
+        DatabaseDescriptor.daemonInitialization();
+        String fileNameSuffix = "CommitLog-2-1340512736956320000";
+        File validCdcLink = new File(fileNameSuffix + ".log");
+        File inferredIndexFile = CommitLogDescriptor.inferCdcIndexFile(validCdcLink);
+        Assert.assertNotNull(inferredIndexFile);
+        Assert.assertEquals(fileNameSuffix + "_cdc.idx", inferredIndexFile.name());
+
+        File invalidCdcLink = new File(fileNameSuffix + ".invalidlog");
+        inferredIndexFile = CommitLogDescriptor.inferCdcIndexFile(invalidCdcLink);
+        Assert.assertNull(inferredIndexFile);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogFailurePolicyTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogFailurePolicyTest.java
index 79f83fe..aadd2fd 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogFailurePolicyTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogFailurePolicyTest.java
@@ -25,7 +25,6 @@
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.service.CassandraDaemon;
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogInitWithExceptionTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogInitWithExceptionTest.java
index 21d207b..b3cff94 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogInitWithExceptionTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogInitWithExceptionTest.java
@@ -19,22 +19,23 @@
 package org.apache.cassandra.db.commitlog;
 
 
+import org.apache.cassandra.Util;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.apache.cassandra.CassandraIsolatedJunit4ClassRunner;
-import org.apache.cassandra.Util;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.utils.JVMStabilityInspector;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+
 
 @RunWith(CassandraIsolatedJunit4ClassRunner.class)
 public class CommitLogInitWithExceptionTest
 {
-    private final static SimpleCondition killed = new SimpleCondition();
-
+    private static Thread initThread;
+    private final static Condition killed = Condition.newOneTimeCondition();
     @BeforeClass
     public static void setUp()
     {
@@ -58,7 +59,7 @@
             }
             finally
             {
-                killed.signalAll();
+                killed.signal();
             }
         };
     }
@@ -66,12 +67,12 @@
     @Test
     public void testCommitLogInitWithException() {
         // This line will trigger initialization process because it's the first time to access CommitLog class.
-        Thread initThread = new Thread(CommitLog.instance::start);
+        initThread = new Thread(CommitLog.instance::start);
 
         initThread.setName("initThread");
         initThread.start();
 
-        Util.spinAssertEquals(true, killed::isSignaled, 120);
+        Util.spinAssertEquals(true, killed::isSignalled, 10);
     }
 
     private static class MockCommitLogSegmentMgr extends CommitLogSegmentManagerStandard {
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
index 794f99f..02ce3f6 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
@@ -17,16 +17,17 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.Config;
@@ -191,7 +192,7 @@
     static ArrayList<File> getCommitLogs()
     {
         File dir = new File(DatabaseDescriptor.getCommitLogLocation());
-        File[] files = dir.listFiles();
+        File[] files = dir.tryList();
         ArrayList<File> results = new ArrayList<>();
         for (File f : files)
         {
@@ -261,7 +262,9 @@
         for (int i = midpoint; i < entryCount; i++)
             execute("INSERT INTO %s (idx, data) VALUES (?, ?)", i, Integer.toString(i));
 
-        Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
+        Keyspace.open(keyspace())
+                .getColumnFamilyStore(currentTable())
+                .forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         return result;
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentBackpressureTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentBackpressureTest.java
index 6b167b2..e28c25e 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentBackpressureTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentBackpressureTest.java
@@ -66,12 +66,12 @@
     @Test
     @BMRules(rules = {@BMRule(name = "Acquire Semaphore before sync",
                               targetClass = "AbstractCommitLogService$SyncRunnable",
-                              targetMethod = "sync",
+                              targetMethod = "run",
                               targetLocation = "AT INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync(boolean)",
                               action = "org.apache.cassandra.db.commitlog.CommitLogSegmentBackpressureTest.allowSync.acquire()"),
                       @BMRule(name = "Release Semaphore after sync",
                               targetClass = "AbstractCommitLogService$SyncRunnable",
-                              targetMethod = "sync",
+                              targetMethod = "run",
                               targetLocation = "AFTER INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync(boolean)",
                               action = "org.apache.cassandra.db.commitlog.CommitLogSegmentBackpressureTest.allowSync.release()")})
     public void testCompressedCommitLogBackpressure() throws Throwable
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
index 4128b71..3789b51 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
@@ -18,12 +18,17 @@
 
 package org.apache.cassandra.db.commitlog;
 
-import java.io.*;
+import java.io.BufferedReader;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.*;
+import java.util.concurrent.TimeUnit;
 
+import com.google.monitoring.runtime.instrumentation.common.util.concurrent.Uninterruptibles;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileReader;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -31,6 +36,7 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
 import org.apache.cassandra.db.commitlog.CommitLogSegment.CDCState;
@@ -46,7 +52,7 @@
     public static void setUpClass()
     {
         DatabaseDescriptor.setCDCEnabled(true);
-        DatabaseDescriptor.setCDCSpaceInMB(1024);
+        DatabaseDescriptor.setCDCTotalSpaceInMiB(1024);
         CQLTester.setUpClass();
     }
 
@@ -63,31 +69,8 @@
     @Test
     public void testCDCWriteFailure() throws Throwable
     {
-        createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
-        CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager;
-        TableMetadata cfm = currentTableMetadata();
-
-        // Confirm that logic to check for whether or not we can allocate new CDC segments works
-        Integer originalCDCSize = DatabaseDescriptor.getCDCSpaceInMB();
-        try
-        {
-            DatabaseDescriptor.setCDCSpaceInMB(32);
-            // Spin until we hit CDC capacity and make sure we get a CDCWriteException
-            try
-            {
-                // Should trigger on anything < 20:1 compression ratio during compressed test
-                for (int i = 0; i < 100; i++)
-                {
-                    new RowUpdateBuilder(cfm, 0, i)
-                        .add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3))
-                        .build().apply();
-                }
-                Assert.fail("Expected CDCWriteException from full CDC but did not receive it.");
-            }
-            catch (CDCWriteException e)
-            {
-                // expected, do nothing
-            }
+        testWithCDCSpaceInMb(32, () -> {
+            createTableAndBulkWrite();
             expectCurrentCDCState(CDCState.FORBIDDEN);
 
             // Confirm we can create a non-cdc table and write to it even while at cdc capacity
@@ -95,69 +78,68 @@
             execute("INSERT INTO %s (idx, data) VALUES (1, '1');");
 
             // Confirm that, on flush+recyle, we see files show up in cdc_raw
-            Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
+            CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager;
+            Keyspace.open(keyspace())
+                    .getColumnFamilyStore(currentTable())
+                    .forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
             CommitLog.instance.forceRecycleAllSegments();
             cdcMgr.awaitManagementTasksCompletion();
             Assert.assertTrue("Expected files to be moved to overflow.", getCDCRawCount() > 0);
 
             // Simulate a CDC consumer reading files then deleting them
-            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
+            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
                 FileUtils.deleteWithConfirm(f);
 
             // Update size tracker to reflect deleted files. Should flip flag on current allocatingFrom to allow.
             cdcMgr.updateCDCTotalSize();
             expectCurrentCDCState(CDCState.PERMITTED);
-        }
-        finally
-        {
-            DatabaseDescriptor.setCDCSpaceInMB(originalCDCSize);
-        }
+        });
     }
 
     @Test
     public void testSegmentFlaggingOnCreation() throws Throwable
     {
-        CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager;
-        String ct = createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
+        testSegmentFlaggingOnCreation0();
+    }
 
-        int origSize = DatabaseDescriptor.getCDCSpaceInMB();
-        try
-        {
-            DatabaseDescriptor.setCDCSpaceInMB(16);
-            TableMetadata ccfm = Keyspace.open(keyspace()).getColumnFamilyStore(ct).metadata();
-            // Spin until we hit CDC capacity and make sure we get a CDCWriteException
-            try
-            {
-                for (int i = 0; i < 1000; i++)
-                {
-                    new RowUpdateBuilder(ccfm, 0, i)
-                        .add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3))
-                        .build().apply();
-                }
-                Assert.fail("Expected CDCWriteException from full CDC but did not receive it.");
-            }
-            catch (CDCWriteException e) { }
+    @Test
+    public void testSegmentFlaggingWithNonblockingOnCreation() throws Throwable
+    {
+        testWithNonblockingMode(this::testSegmentFlaggingOnCreation0);
+    }
 
-            expectCurrentCDCState(CDCState.FORBIDDEN);
-            CommitLog.instance.forceRecycleAllSegments();
+    @Test
+    public void testNonblockingShouldMaintainSteadyDiskUsage() throws Throwable
+    {
+        final int commitlogSize = DatabaseDescriptor.getCommitLogSegmentSize() / 1024 / 1024;
+        final int targetFilesCount = 3;
+        final long cdcSizeLimit = commitlogSize * targetFilesCount;
+        final int mutationSize = DatabaseDescriptor.getCommitLogSegmentSize() / 3;
+        testWithNonblockingMode(() -> testWithCDCSpaceInMb((int) cdcSizeLimit, () -> {
+            CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager;
 
-            cdcMgr.awaitManagementTasksCompletion();
-            // Delete all files in cdc_raw
-            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
-                f.delete();
-            cdcMgr.updateCDCTotalSize();
-            // Confirm cdc update process changes flag on active segment
-            expectCurrentCDCState(CDCState.PERMITTED);
+            createTableAndBulkWrite(mutationSize);
 
-            // Clear out archived CDC files
-            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) {
-                FileUtils.deleteWithConfirm(f);
-            }
-        }
-        finally
-        {
-            DatabaseDescriptor.setCDCSpaceInMB(origSize);
-        }
+            long actualSize = cdcMgr.updateCDCTotalSize();
+            long cdcSizeLimitBytes = cdcSizeLimit * 1024 * 1024;
+            Assert.assertTrue(String.format("Actual size (%s) should not exceed the size limit (%s)", actualSize, cdcSizeLimitBytes),
+                              actualSize <= cdcSizeLimitBytes * targetFilesCount);
+            Assert.assertTrue(String.format("Actual size (%s) should be at least the mutation size (%s)", actualSize, mutationSize),
+                              actualSize >= mutationSize);
+        }));
+    }
+
+    @Test // switch from blocking to nonblocking, then back to blocking
+    public void testSwitchingCDCWriteModes() throws Throwable
+    {
+        String tableName = createTableAndBulkWrite();
+        expectCurrentCDCState(CDCState.FORBIDDEN);
+        testWithNonblockingMode(() -> {
+            bulkWrite(tableName);
+            expectCurrentCDCState(CDCState.CONTAINS);
+        });
+        bulkWrite(tableName);
+        expectCurrentCDCState(CDCState.FORBIDDEN);
     }
 
     @Test
@@ -177,38 +159,37 @@
         Assert.assertTrue("Index file not written: " + cdcIndexFile, cdcIndexFile.exists());
 
         // Read index value and confirm it's == end from last sync
-        BufferedReader in = new BufferedReader(new FileReader(cdcIndexFile));
-        String input = in.readLine();
-        Integer offset = Integer.parseInt(input);
-        Assert.assertEquals(syncOffset, (long)offset);
-        in.close();
+        String input = null;
+        // There could be a race between index file update (truncate & write) and read. See CASSANDRA-17416
+        // It is possible to read an empty line. In this case, re-try at most 5 times.
+        for (int i = 0; input == null && i < 5; i++)
+        {
+            if (i != 0) // add a little pause between each attempt
+                Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
+
+            try (BufferedReader in = new BufferedReader(new FileReader(cdcIndexFile)))
+            {
+                input = in.readLine();
+            }
+        }
+
+        if (input == null)
+        {
+            Assert.fail("Unable to read the CDC index file after several attempts");
+        }
+
+        int indexOffset = Integer.parseInt(input);
+        Assert.assertTrue("The offset read from CDC index file should be equal or larger than the offset after sync. See CASSANDRA-17416",
+                          syncOffset <= indexOffset);
     }
 
     @Test
-    public void testCompletedFlag() throws IOException
+    public void testCompletedFlag() throws Throwable
     {
-        createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
+        String tableName = createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
         CommitLogSegment initialSegment = CommitLog.instance.segmentManager.allocatingFrom();
-        Integer originalCDCSize = DatabaseDescriptor.getCDCSpaceInMB();
 
-        DatabaseDescriptor.setCDCSpaceInMB(8);
-        try
-        {
-            for (int i = 0; i < 1000; i++)
-            {
-                new RowUpdateBuilder(currentTableMetadata(), 0, 1)
-                .add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3))
-                .build().apply();
-            }
-        }
-        catch (CDCWriteException ce)
-        {
-            // pass. Expected since we'll have a file or two linked on restart of CommitLog due to replay
-        }
-        finally
-        {
-            DatabaseDescriptor.setCDCSpaceInMB(originalCDCSize);
-        }
+        testWithCDCSpaceInMb(8, () -> bulkWrite(tableName));
 
         CommitLog.instance.forceRecycleAllSegments();
 
@@ -234,7 +215,7 @@
         CommitLogSegment currentSegment = CommitLog.instance.segmentManager.allocatingFrom();
 
         // Confirm that, with no CDC data present, we've hard-linked but have no index file
-        Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.getName()).toPath();
+        Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.name()).toPath();
         File cdcIndexFile = currentSegment.getCDCIndexFile();
         Assert.assertTrue("File does not exist: " + linked, Files.exists(linked));
         Assert.assertFalse("Expected index file to not be created but found: " + cdcIndexFile, cdcIndexFile.exists());
@@ -262,7 +243,7 @@
             .add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3))
             .build().apply();
 
-        Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.getName()).toPath();
+        Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.name()).toPath();
         // Confirm that, with CDC data present but not yet flushed, we've hard-linked but have no index file
         Assert.assertTrue("File does not exist: " + linked, Files.exists(linked));
 
@@ -278,45 +259,23 @@
     }
 
     @Test
-    public void testReplayLogic() throws IOException
+    public void testReplayLogic() throws Throwable
     {
-        // Assert.assertEquals(0, new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length);
-        String table_name = createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
-        Integer originalCDCSize = DatabaseDescriptor.getCDCSpaceInMB();
-
-        DatabaseDescriptor.setCDCSpaceInMB(8);
-        TableMetadata ccfm = Keyspace.open(keyspace()).getColumnFamilyStore(table_name).metadata();
-        try
-        {
-            for (int i = 0; i < 1000; i++)
-            {
-                new RowUpdateBuilder(ccfm, 0, i)
-                    .add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3))
-                    .build().apply();
-            }
-            Assert.fail("Expected CDCWriteException from full CDC but did not receive it.");
-        }
-        catch (CDCWriteException e)
-        {
-            // pass
-        }
-        finally
-        {
-            DatabaseDescriptor.setCDCSpaceInMB(originalCDCSize);
-        }
+        // Assert.assertEquals(0, new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length);
+        testWithCDCSpaceInMb(8, this::createTableAndBulkWrite);
 
         CommitLog.instance.sync(true);
         CommitLog.instance.stopUnsafe(false);
 
         // Build up a list of expected index files after replay and then clear out cdc_raw
         List<CDCIndexData> oldData = parseCDCIndexData();
-        for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
-            FileUtils.deleteWithConfirm(f.getAbsolutePath());
+        for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
+            FileUtils.deleteWithConfirm(f.absolutePath());
 
         try
         {
             Assert.assertEquals("Expected 0 files in CDC folder after deletion. ",
-                                0, new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length);
+                                0, new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length);
         }
         finally
         {
@@ -331,7 +290,7 @@
 
         // Rough sanity check -> should be files there now.
         Assert.assertTrue("Expected non-zero number of files in CDC folder after restart.",
-                          new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length > 0);
+                          new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length > 0);
 
         // Confirm all the old indexes in old are present and >= the original offset, as we flag the entire segment
         // as cdc written on a replay.
@@ -377,9 +336,9 @@
         List<CDCIndexData> results = new ArrayList<>();
         try
         {
-            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
+            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
             {
-                if (f.getName().contains("_cdc.idx"))
+                if (f.name().contains("_cdc.idx"))
                     results.add(new CDCIndexData(f));
             }
         }
@@ -398,7 +357,7 @@
         CDCIndexData(File f) throws IOException
         {
             String line = "";
-            try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(f))))
+            try (BufferedReader br = new BufferedReader(new FileReader(f)))
             {
                 line = br.readLine();
             }
@@ -406,7 +365,7 @@
             {
                 throw e;
             }
-            fileName = f.getName();
+            fileName = f.name();
             offset = Integer.parseInt(line);
         }
 
@@ -433,7 +392,7 @@
 
     private int getCDCRawCount()
     {
-        return new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length;
+        return new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length;
     }
 
     private void expectCurrentCDCState(CDCState expectedState)
@@ -447,4 +406,108 @@
                         expectedState, currentState));
         }
     }
+
+    private void testWithNonblockingMode(Testable test) throws Throwable
+    {
+        boolean original = DatabaseDescriptor.getCDCBlockWrites();
+        CommitLog.instance.setCDCBlockWrites(false);
+        try
+        {
+            test.run();
+        }
+        finally
+        {
+            CommitLog.instance.setCDCBlockWrites(original);
+        }
+    }
+
+    private void testWithCDCSpaceInMb(int size, Testable test) throws Throwable
+    {
+        int origSize = (int) DatabaseDescriptor.getCDCTotalSpace() / 1024 / 1024;
+        DatabaseDescriptor.setCDCTotalSpaceInMiB(size);
+        try
+        {
+            test.run();
+        }
+        finally
+        {
+            DatabaseDescriptor.setCDCTotalSpaceInMiB(origSize);
+        }
+    }
+
+    private String createTableAndBulkWrite() throws Throwable
+    {
+        return createTableAndBulkWrite(DatabaseDescriptor.getCommitLogSegmentSize() / 3);
+    }
+
+    private String createTableAndBulkWrite(int mutationSize) throws Throwable
+    {
+        String tableName = createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
+        bulkWrite(tableName, mutationSize);
+        return tableName;
+    }
+
+    private void bulkWrite(String tableName) throws Throwable
+    {
+        bulkWrite(tableName, DatabaseDescriptor.getCommitLogSegmentSize() / 3);
+    }
+
+    private void bulkWrite(String tableName, int mutationSize) throws Throwable
+    {
+        TableMetadata ccfm = Keyspace.open(keyspace()).getColumnFamilyStore(tableName).metadata();
+        boolean blockWrites = DatabaseDescriptor.getCDCBlockWrites();
+        // Spin to make sure we hit CDC capacity
+        try
+        {
+            for (int i = 0; i < 1000; i++)
+            {
+                new RowUpdateBuilder(ccfm, 0, i)
+                .add("data", randomizeBuffer(mutationSize))
+                .build().applyFuture().get();
+            }
+            if (blockWrites)
+                Assert.fail("Expected CDCWriteException from full CDC but did not receive it.");
+        }
+        catch (CDCWriteException e)
+        {
+            if (!blockWrites)
+                Assert.fail("Excepted no CDCWriteException when not blocking writes but received it.");
+        }
+    }
+
+    private void testSegmentFlaggingOnCreation0() throws Throwable
+    {
+        testWithCDCSpaceInMb(16, () -> {
+            boolean blockWrites = DatabaseDescriptor.getCDCBlockWrites();
+
+            createTableAndBulkWrite();
+
+            CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager;
+            expectCurrentCDCState(blockWrites? CDCState.FORBIDDEN : CDCState.CONTAINS);
+
+            // When block writes, releasing CDC commit logs should update the CDC state to PERMITTED
+            if (blockWrites)
+            {
+                CommitLog.instance.forceRecycleAllSegments();
+
+                cdcMgr.awaitManagementTasksCompletion();
+                // Delete all files in cdc_raw
+                for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
+                    f.delete();
+                cdcMgr.updateCDCTotalSize();
+                // Confirm cdc update process changes flag on active segment
+                expectCurrentCDCState(CDCState.PERMITTED);
+            }
+
+            // Clear out archived CDC files
+            for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) {
+                FileUtils.deleteWithConfirm(f);
+            }
+        });
+    }
+
+    private interface Testable
+    {
+        void run() throws Throwable;
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
index da3b83e..17bbece 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
@@ -18,7 +18,13 @@
 */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.*;
+import org.apache.cassandra.io.util.File;
+
+import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
 import java.math.BigInteger;
 import java.nio.ByteBuffer;
 import java.util.*;
@@ -29,9 +35,12 @@
 import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
+import javax.crypto.Cipher;
+
 import com.google.common.collect.Iterables;
 import com.google.common.io.Files;
 
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.junit.*;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -39,8 +48,12 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.memtable.SkipListMemtable;
 import org.apache.cassandra.io.compress.ZstdCompressor;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.MemtableParams;
+import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -61,6 +74,7 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.security.CipherFactory;
 import org.apache.cassandra.security.EncryptionContext;
 import org.apache.cassandra.security.EncryptionContextGenerator;
 import org.apache.cassandra.service.StorageService;
@@ -71,14 +85,13 @@
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.vint.VIntCoding;
 
-import org.junit.After;
-
 import static org.apache.cassandra.db.commitlog.CommitLogSegment.ENTRY_OVERHEAD_SIZE;
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import org.apache.cassandra.db.marshal.IntegerType;
 import org.apache.cassandra.db.marshal.MapType;
@@ -105,15 +118,25 @@
     }
 
     @Parameters()
-    public static Collection<Object[]> generateData()
+    public static Collection<Object[]> generateData() throws Exception
     {
-        return Arrays.asList(new Object[][]{
-            {null, EncryptionContextGenerator.createDisabledContext()}, // No compression, no encryption
-            {null, EncryptionContextGenerator.createContext(true)}, // Encryption
-            {new ParameterizedClass(LZ4Compressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()},
-            {new ParameterizedClass(SnappyCompressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()},
-            {new ParameterizedClass(DeflateCompressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()},
-            {new ParameterizedClass(ZstdCompressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()}});
+        return Arrays.asList(new Object[][]
+        {
+            { null, EncryptionContextGenerator.createDisabledContext()}, // No compression, no encryption
+            { null, newEncryptionContext() }, // Encryption
+            { new ParameterizedClass(LZ4Compressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext() },
+            { new ParameterizedClass(SnappyCompressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()},
+            { new ParameterizedClass(DeflateCompressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()},
+            { new ParameterizedClass(ZstdCompressor.class.getName(), Collections.emptyMap()), EncryptionContextGenerator.createDisabledContext()}
+        });
+    }
+
+    private static EncryptionContext newEncryptionContext() throws Exception
+    {
+        EncryptionContext context = EncryptionContextGenerator.createContext(true);
+        CipherFactory cipherFactory = new CipherFactory(context.getTransparentDataEncryptionOptions());
+        Cipher cipher = cipherFactory.getEncryptor(context.getTransparentDataEncryptionOptions().cipher, context.getTransparentDataEncryptionOptions().key_alias);
+        return EncryptionContextGenerator.createContext(cipher.getIV(), true);
     }
 
     public static void beforeClass() throws ConfigurationException
@@ -126,22 +149,25 @@
         SchemaLoader.prepareServer();
         StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), FBUtilities.getBroadcastAddressAndPort());
 
+        MemtableParams skipListMemtable = MemtableParams.get("skiplist");
+
         TableMetadata.Builder custom =
             TableMetadata.builder(KEYSPACE1, CUSTOM1)
                          .addPartitionKeyColumn("k", IntegerType.instance)
                          .addClusteringColumn("c1", MapType.getInstance(UTF8Type.instance, UTF8Type.instance, false))
                          .addClusteringColumn("c2", SetType.getInstance(UTF8Type.instance, false))
-                         .addStaticColumn("s", IntegerType.instance);
+                         .addStaticColumn("s", IntegerType.instance)
+                         .memtable(skipListMemtable);
 
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, STANDARD1, 0, AsciiType.instance, BytesType.instance),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, STANDARD2, 0, AsciiType.instance, BytesType.instance),
+                                    SchemaLoader.standardCFMD(KEYSPACE1, STANDARD1, 0, AsciiType.instance, BytesType.instance).memtable(skipListMemtable),
+                                    SchemaLoader.standardCFMD(KEYSPACE1, STANDARD2, 0, AsciiType.instance, BytesType.instance).memtable(skipListMemtable),
                                     custom);
         SchemaLoader.createKeyspace(KEYSPACE2,
                                     KeyspaceParams.simpleTransient(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, STANDARD1, 0, AsciiType.instance, BytesType.instance),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, STANDARD2, 0, AsciiType.instance, BytesType.instance));
+                                    SchemaLoader.standardCFMD(KEYSPACE2, STANDARD1, 0, AsciiType.instance, BytesType.instance).memtable(skipListMemtable),
+                                    SchemaLoader.standardCFMD(KEYSPACE2, STANDARD2, 0, AsciiType.instance, BytesType.instance).memtable(skipListMemtable));
         CompactionManager.instance.disableAutoCompaction();
 
         testKiller = new KillerForTests();
@@ -167,6 +193,8 @@
     @After
     public void afterTest()
     {
+        CommitLogSegmentReader.setAllowSkipSyncMarkerCrc(false);
+        System.clearProperty("cassandra.replayList");
         testKiller.reset();
     }
 
@@ -174,10 +202,8 @@
     public void testRecoveryWithEmptyLog() throws Exception
     {
         runExpecting(() -> {
-            CommitLog.instance.recoverFiles(new File[]{
-            tmpFile(CommitLogDescriptor.current_version),
-            tmpFile(CommitLogDescriptor.current_version)
-            });
+            CommitLog.instance.recoverFiles(tmpFile(CommitLogDescriptor.current_version),
+                                            tmpFile(CommitLogDescriptor.current_version));
             return null;
         }, CommitLogReplayException.class);
     }
@@ -196,7 +222,9 @@
     @Test
     public void testHeaderOnlyFileFiltering() throws Exception
     {
-        File directory = Files.createTempDir();
+        Assume.assumeTrue(!DatabaseDescriptor.getEncryptionContext().isEnabled());
+
+        File directory = new File(Files.createTempDir());
 
         CommitLogDescriptor desc1 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 1, null, DatabaseDescriptor.getEncryptionContext());
         CommitLogDescriptor desc2 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 2, null, DatabaseDescriptor.getEncryptionContext());
@@ -213,7 +241,7 @@
         buffer.putInt(5);
         buffer.putInt(6);
 
-        try (OutputStream lout = new FileOutputStream(file1))
+        try (OutputStream lout = new FileOutputStreamPlus(file1))
         {
             lout.write(buffer.array());
         }
@@ -222,7 +250,7 @@
         File file2 = new File(directory, desc2.fileName());
         buffer = ByteBuffer.allocate(1024);
         CommitLogDescriptor.writeHeader(buffer, desc2);
-        try (OutputStream lout = new FileOutputStream(file2))
+        try (OutputStream lout = new FileOutputStreamPlus(file2))
         {
             lout.write(buffer.array());
         }
@@ -317,7 +345,7 @@
         ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(STANDARD1);
         ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
 
-        // Roughly 32 MB mutation
+        // Roughly 32 MiB mutation
         Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, "k")
                      .clustering("bytes")
                      .add("val", ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize() / 4))
@@ -353,7 +381,7 @@
         ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(STANDARD1);
         ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
 
-        // Roughly 32 MB mutation
+        // Roughly 32 MiB mutation
          Mutation rm = new RowUpdateBuilder(cfs1.metadata(), 0, "k")
                   .clustering("bytes")
                   .add("val", ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()/4) - 1))
@@ -424,7 +452,7 @@
                       .add("val", ByteBuffer.allocate(allocSize)).build();
 
         int max = DatabaseDescriptor.getMaxMutationSize();
-        max -= CommitLogSegment.ENTRY_OVERHEAD_SIZE; // log entry overhead
+        max -= ENTRY_OVERHEAD_SIZE; // log entry overhead
 
         // Note that the size of the value if vint encoded. So we first compute the ovehead of the mutation without the value and it's size
         int mutationOverhead = rm.serializedSize(MessagingService.current_version) - (VIntCoding.computeVIntSize(allocSize) + allocSize);
@@ -470,7 +498,7 @@
         }
         catch (MutationExceededMaxSizeException e)
         {
-            Assert.assertEquals(cnt + 1, CommitLog.instance.metrics.oversizedMutations.getCount());
+            assertEquals(cnt + 1, CommitLog.instance.metrics.oversizedMutations.getCount());
             throw e;
         }
         throw new AssertionError("mutation larger than limit was accepted");
@@ -508,7 +536,7 @@
             String message = exception.getMessage();
 
             long mutationSize = mutation.serializedSize(MessagingService.current_version) + ENTRY_OVERHEAD_SIZE;
-            final String expectedMessagePrefix = String.format("Encountered an oversized mutation (%d/%d) for keyspace: %s.",
+            final String expectedMessagePrefix = String.format("Rejected an oversized mutation (%d/%d) for keyspace: %s.",
                                                                mutationSize,
                                                                DatabaseDescriptor.getMaxMutationSize(),
                                                                KEYSPACE1);
@@ -557,7 +585,7 @@
 
         File logFile = new File(DatabaseDescriptor.getCommitLogLocation(), desc.fileName());
 
-        try (OutputStream lout = new FileOutputStream(logFile))
+        try (OutputStream lout = new FileOutputStreamPlus(logFile))
         {
             lout.write(buf.array(), 0, buf.limit());
         }
@@ -586,11 +614,11 @@
     protected Void testRecovery(byte[] logData, int version) throws Exception
     {
         File logFile = tmpFile(version);
-        try (OutputStream lout = new FileOutputStream(logFile))
+        try (OutputStream lout = new FileOutputStreamPlus(logFile))
         {
             lout.write(logData);
             //statics make it annoying to test things correctly
-            CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
+            CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
         }
         return null;
     }
@@ -598,17 +626,17 @@
     protected Void testRecovery(CommitLogDescriptor desc, byte[] logData) throws Exception
     {
         File logFile = tmpFile(desc.version);
-        CommitLogDescriptor fromFile = CommitLogDescriptor.fromFileName(logFile.getName());
+        CommitLogDescriptor fromFile = CommitLogDescriptor.fromFileName(logFile.name());
         // Change id to match file.
         desc = new CommitLogDescriptor(desc.version, fromFile.id, desc.compression, desc.getEncryptionContext());
         ByteBuffer buf = ByteBuffer.allocate(1024);
         CommitLogDescriptor.writeHeader(buf, desc, getAdditionalHeaders(desc.getEncryptionContext()));
-        try (OutputStream lout = new FileOutputStream(logFile))
+        try (OutputStream lout = new FileOutputStreamPlus(logFile))
         {
             lout.write(buf.array(), 0, buf.position());
             lout.write(logData);
             //statics make it annoying to test things correctly
-            CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
+            CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
         }
         return null;
     }
@@ -620,12 +648,12 @@
         File logFile = tmpFile(desc.version);
         ByteBuffer buf = ByteBuffer.allocate(1024);
         CommitLogDescriptor.writeHeader(buf, desc);
-        try (OutputStream lout = new FileOutputStream(logFile))
+        try (OutputStream lout = new FileOutputStreamPlus(logFile))
         {
             lout.write(buf.array(), 0, buf.position());
 
             runExpecting(() -> {
-                CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
+                CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
                 return null;
             }, CommitLogReplayException.class);
         }
@@ -655,20 +683,18 @@
             caught = t;
         }
         if (expected != null && caught == null)
-            Assert.fail("Expected exception " + expected + " but call completed successfully.");
+            fail("Expected exception " + expected + " but call completed successfully.");
 
         assertEquals("JVM kill state doesn't match expectation.", expected != null, testKiller.wasKilled());
     }
 
     protected void testRecovery(final byte[] logData, Class<?> expected) throws Exception
     {
-        ParameterizedClass commitLogCompression = DatabaseDescriptor.getCommitLogCompression();
-        EncryptionContext encryptionContext = DatabaseDescriptor.getEncryptionContext();
         runExpecting(() -> testRecovery(logData, CommitLogDescriptor.current_version), expected);
     }
 
     @Test
-    public void testTruncateWithoutSnapshot() throws ExecutionException, InterruptedException, IOException
+    public void testTruncateWithoutSnapshot()
     {
         boolean originalState = DatabaseDescriptor.isAutoSnapshot();
         try
@@ -758,13 +784,81 @@
         List<String> activeSegments = CommitLog.instance.getActiveSegmentNames();
         assertFalse(activeSegments.isEmpty());
 
-        File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).listFiles((file, name) -> activeSegments.contains(name));
+        File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).tryList((file, name) -> activeSegments.contains(name));
         replayer.replayFiles(files);
 
         assertEquals(cellCount, replayer.cells);
     }
 
     @Test
+    public void replayWithBadSyncMarkerCRC() throws IOException
+    {
+        ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
+
+        Mutation rm2 = new RowUpdateBuilder(cfs.metadata(), 0, "k2").clustering("bytes")
+                                                                    .add("val", bytes("this is a string"))
+                                                                    .build();
+        CommitLog.instance.add(rm2);
+        CommitLog.instance.sync(true);
+
+        List<String> activeSegments = CommitLog.instance.getActiveSegmentNames();
+        assertFalse(activeSegments.isEmpty());
+
+        File directory = new File(CommitLog.instance.segmentManager.storageDirectory);
+        File firstActiveFile = Objects.requireNonNull(directory.tryList((file, name) -> activeSegments.contains(name)))[0];
+        zeroFirstSyncMarkerCRC(firstActiveFile);
+
+        CommitLogSegmentReader.setAllowSkipSyncMarkerCrc(true);
+
+        if (DatabaseDescriptor.getCommitLogCompression() != null || DatabaseDescriptor.getEncryptionContext().isEnabled())
+        {
+            // If compression or encryption are enabled, expect an error, and do not attempt to replay using only mutation CRCs.
+            runExpecting(() ->
+                         {
+                             CommitLog.instance.recoverFiles(firstActiveFile);
+                             return null;
+                         },
+                         CommitLogReplayException.class);
+        }
+        else
+        {
+            SimpleCountingReplayer replayer = new SimpleCountingReplayer(CommitLog.instance, CommitLogPosition.NONE, cfs.metadata());
+            replayer.replayPath(firstActiveFile, false);
+            assertEquals(1, replayer.cells);
+        }
+    }
+
+    private void zeroFirstSyncMarkerCRC(File file) throws IOException
+    {
+        // Get the position of the first sync marker...
+        int firstSyncMarkerPosition = -1;
+
+        try (RandomAccessReader reader = RandomAccessReader.open(file))
+        {
+            CommitLogDescriptor.readHeader(reader, DatabaseDescriptor.getEncryptionContext());
+            firstSyncMarkerPosition = (int) reader.getFilePointer();
+        }
+
+        // ...buffer the file into memory...
+        ByteBuffer buffer = ByteBuffer.allocate((int) file.length());
+
+        try (RandomAccessReader reader = RandomAccessReader.open(file))
+        {
+            reader.readFully(buffer);
+        }
+
+        // ...overwrite the sync marker CRC with zero...
+        buffer.position(firstSyncMarkerPosition + 4);
+        buffer.putInt(0);
+
+        // ...and write the file back out.
+        try (OutputStream out = new FileOutputStreamPlus(file))
+        {
+            out.write(buffer.array());
+        }
+    }
+
+    @Test
     public void replayWithDiscard() throws IOException
     {
         int cellCount = 0;
@@ -795,7 +889,7 @@
         List<String> activeSegments = CommitLog.instance.getActiveSegmentNames();
         assertFalse(activeSegments.isEmpty());
 
-        File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).listFiles((file, name) -> activeSegments.contains(name));
+        File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).tryList((file, name) -> activeSegments.contains(name));
         replayer.replayFiles(files);
 
         assertEquals(cellCount, replayer.cells);
@@ -842,6 +936,7 @@
         }
     }
 
+    @Test
     public void testUnwriteableFlushRecovery() throws ExecutionException, InterruptedException, IOException
     {
         CommitLog.instance.resetUnsafe(true);
@@ -864,7 +959,7 @@
                 {
                     try (Closeable c = Util.markDirectoriesUnwriteable(cfs))
                     {
-                        cfs.forceBlockingFlush();
+                        Util.flush(cfs);
                     }
                     catch (Throwable t)
                     {
@@ -874,7 +969,7 @@
                     }
                 }
                 else
-                    cfs.forceBlockingFlush();
+                    Util.flush(cfs);
             }
         }
         finally
@@ -886,7 +981,7 @@
         System.setProperty("cassandra.replayList", KEYSPACE1 + "." + STANDARD1);
         // Currently we don't attempt to re-flush a memtable that failed, thus make sure data is replayed by commitlog.
         // If retries work subsequent flushes should clear up error and this should change to expect 0.
-        Assert.assertEquals(1, CommitLog.instance.resetUnsafe(false));
+        assertEquals(1, CommitLog.instance.resetUnsafe(false));
         System.clearProperty("cassandra.replayList");
     }
 
@@ -906,7 +1001,7 @@
 
             Memtable current = cfs.getTracker().getView().getCurrentMemtable();
             if (i == 2)
-                current.makeUnflushable();
+                ((SkipListMemtable) current).makeUnflushable();
 
             flushAction.accept(cfs, current);
         }
@@ -921,15 +1016,14 @@
         // In the absence of error, this should be 0 because forceBlockingFlush/forceRecycleAllSegments would have
         // persisted all data in the commit log. Because we know there was an error, there must be something left to
         // replay.
-        Assert.assertEquals(1, CommitLog.instance.resetUnsafe(false));
-        System.clearProperty("cassandra.replayList");
+        assertEquals(1, CommitLog.instance.resetUnsafe(false));
     }
 
     BiConsumer<ColumnFamilyStore, Memtable> flush = (cfs, current) ->
     {
         try
         {
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         catch (Throwable t)
         {
@@ -937,7 +1031,7 @@
             while (!(t instanceof FSWriteError))
                 t = t.getCause();
             // Wait for started flushes to complete.
-            cfs.switchMemtableIfCurrent(current);
+            waitForStartedFlushes(cfs, current);
         }
     };
 
@@ -949,9 +1043,14 @@
         CommitLog.instance.forceRecycleAllSegments();
 
         // Wait for started flushes to complete.
-        cfs.switchMemtableIfCurrent(current);
+        waitForStartedFlushes(cfs, current);
     };
 
+    private void waitForStartedFlushes(ColumnFamilyStore cfs, Memtable current)
+    {
+        FBUtilities.waitOnFuture(cfs.switchMemtableIfCurrent(current, ColumnFamilyStore.FlushReason.UNIT_TESTS));
+    }
+
     @Test
     public void testOutOfOrderFlushRecovery() throws ExecutionException, InterruptedException, IOException
     {
@@ -1000,7 +1099,7 @@
             System.clearProperty(CommitLogReplayer.IGNORE_REPLAY_ERRORS_PROPERTY);
         }
 
-        Assert.assertEquals(replayed, 1);
+        assertEquals(replayed, 1);
     }
 }
 
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
index 5b87d68..0519af9 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
@@ -18,7 +18,7 @@
 */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
+import org.apache.cassandra.io.util.File;
 import java.io.IOException;
 
 import com.google.common.base.Predicate;
@@ -48,7 +48,7 @@
 
     public void examineCommitLog() throws IOException
     {
-        replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles());
+        replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).tryList());
     }
 
     private class CommitLogTestReader extends CommitLogReader
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
index 4705919..96b32d2 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
@@ -21,10 +21,12 @@
  *
  */
 
-import java.io.*;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Properties;
 
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.junit.Assert;
 
 import com.google.common.base.Predicate;
@@ -40,6 +42,7 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.db.rows.Row;
@@ -116,7 +119,7 @@
     public void testRestore(String location) throws IOException, InterruptedException
     {
         Properties prop = new Properties();
-        prop.load(new FileInputStream(new File(location + File.separatorChar + PROPERTIES_FILE)));
+        prop.load(new FileInputStreamPlus(new File(location + File.pathSeparator() + PROPERTIES_FILE)));
         int hash = Integer.parseInt(prop.getProperty(HASH_PROPERTY));
         int cells = Integer.parseInt(prop.getProperty(CELLS_PROPERTY));
 
@@ -125,12 +128,15 @@
         {
             TableId tableId = TableId.fromString(cfidString);
             if (Schema.instance.getTableMetadata(tableId) == null)
-                Schema.instance.load(KeyspaceMetadata.create(KEYSPACE, KeyspaceParams.simple(1), Tables.of(metadata.unbuild().id(tableId).build())));
+                SchemaTestUtil.addOrUpdateKeyspace(KeyspaceMetadata.create(KEYSPACE,
+                                                                           KeyspaceParams.simple(1),
+                                                                           Tables.of(metadata.unbuild().id(tableId).build())),
+                                                   true);
         }
 
         Hasher hasher = new Hasher();
         CommitLogTestReplayer replayer = new CommitLogTestReplayer(hasher);
-        File[] files = new File(location).listFiles((file, name) -> name.endsWith(".log"));
+        File[] files = new File(location).tryList((file, name) -> name.endsWith(".log"));
         replayer.replayFiles(files);
 
         Assert.assertEquals(cells, hasher.cells);
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
index 680a0e7..b97cad2 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
@@ -21,7 +21,7 @@
  *
  */
 
-import java.io.*;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
@@ -33,6 +33,9 @@
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.util.concurrent.RateLimiter;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.junit.Assert;
 
 import org.apache.cassandra.SchemaLoader;
@@ -82,7 +85,7 @@
 
     static public void initialize() throws IOException, ConfigurationException
     {
-        try (FileInputStream fis = new FileInputStream("CHANGES.txt"))
+        try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt"))
         {
             dataSource = ByteBuffer.allocateDirect((int) fis.getChannel().size());
             while (dataSource.hasRemaining())
@@ -128,15 +131,15 @@
         if (dataDir.exists())
             FileUtils.deleteRecursive(dataDir);
 
-        dataDir.mkdirs();
-        for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
-            FileUtils.createHardLink(f, new File(dataDir, f.getName()));
+        dataDir.tryCreateDirectories();
+        for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).tryList())
+            FileUtils.createHardLink(f, new File(dataDir, f.name()));
 
         Properties prop = new Properties();
         prop.setProperty(CFID_PROPERTY, Schema.instance.getTableMetadata(KEYSPACE, TABLE).id.toString());
         prop.setProperty(CELLS_PROPERTY, Integer.toString(cells));
         prop.setProperty(HASH_PROPERTY, Integer.toString(hash));
-        prop.store(new FileOutputStream(new File(dataDir, PROPERTIES_FILE)),
+        prop.store(new FileOutputStreamPlus(new File(dataDir, PROPERTIES_FILE)),
                    "CommitLog upgrade test, version " + FBUtilities.getReleaseVersionString());
         System.out.println("Done");
     }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java
index 711cf65..e962450 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java
@@ -18,12 +18,11 @@
 
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.Random;
-import java.util.UUID;
 
 import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -94,6 +93,6 @@
         CommitLog.instance.shutdownBlocking();
 
         // the shutdown should block until all logs except the currently active one and perhaps a new, empty one are gone
-        Assert.assertTrue(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles().length <= 2);
+        Assert.assertTrue(new File(DatabaseDescriptor.getCommitLogLocation()).tryList().length <= 2);
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java b/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java
index ce20935..4166759 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java
@@ -17,10 +17,8 @@
  */
 package org.apache.cassandra.db.commitlog;
 
-import java.io.File;
-import java.io.FileOutputStream;
+import org.apache.cassandra.io.util.*;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.Collections;
@@ -42,9 +40,6 @@
 import org.apache.cassandra.io.compress.LZ4Compressor;
 import org.apache.cassandra.io.compress.SnappyCompressor;
 import org.apache.cassandra.io.compress.ZstdCompressor;
-import org.apache.cassandra.io.util.FileDataInput;
-import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.security.CipherFactory;
 import org.apache.cassandra.security.EncryptionUtils;
 import org.apache.cassandra.security.EncryptionContext;
@@ -103,7 +98,7 @@
 
         File compressedFile = FileUtils.createTempFile("compressed-segment-", ".log");
         compressedFile.deleteOnExit();
-        FileOutputStream fos = new FileOutputStream(compressedFile);
+        FileOutputStreamPlus fos = new FileOutputStreamPlus(compressedFile);
         fos.getChannel().write(compBuffer);
         fos.close();
 
@@ -190,7 +185,7 @@
         Cipher cipher = cipherFactory.getEncryptor(context.getTransparentDataEncryptionOptions().cipher, context.getTransparentDataEncryptionOptions().key_alias);
         File encryptedFile = FileUtils.createTempFile("encrypted-segment-", ".log");
         encryptedFile.deleteOnExit();
-        FileChannel channel = new RandomAccessFile(encryptedFile, "rw").getChannel();
+        FileChannel channel = encryptedFile.newReadWriteChannel();
         channel.write(ByteBufferUtil.bytes(plainTextLength));
         EncryptionUtils.encryptAndWrite(compressedBuffer, channel, true, cipher);
         channel.close();
diff --git a/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java b/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java
deleted file mode 100644
index b3dc070..0000000
--- a/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db.commitlog;
-
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.Util;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.RowUpdateBuilder;
-import org.apache.cassandra.db.WindowsFailedSnapshotTracker;
-import org.apache.cassandra.io.sstable.SnapshotDeletingTask;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.service.GCInspector;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
-
-public class SnapshotDeletingTest
-{
-    private static final String KEYSPACE1 = "Keyspace1";
-    private static final String CF_STANDARD1 = "CF_STANDARD1";
-
-    @BeforeClass
-    public static void defineSchema() throws Exception
-    {
-        DatabaseDescriptor.daemonInitialization();
-        GCInspector.register();
-        // Needed to init the output file where we print failed snapshots. This is called on node startup.
-        WindowsFailedSnapshotTracker.deleteOldSnapshots();
-        SchemaLoader.prepareServer();
-        SchemaLoader.createKeyspace(KEYSPACE1,
-                                    KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1));
-    }
-
-    @Test
-    public void testCompactionHook() throws Exception
-    {
-        Assume.assumeTrue(FBUtilities.isWindows);
-
-        Keyspace keyspace = Keyspace.open(KEYSPACE1);
-        ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
-        store.clearUnsafe();
-
-        populate(10000);
-        store.snapshot("snapshot1");
-
-        // Confirm snapshot deletion fails. Sleep for a bit just to make sure the SnapshotDeletingTask has
-        // time to run and fail.
-        Thread.sleep(500);
-        store.clearSnapshot("snapshot1");
-        assertEquals(1, SnapshotDeletingTask.pendingDeletionCount());
-
-        // Compact the cf and confirm that the executor's after hook calls rescheduleDeletion
-        populate(20000);
-        store.forceBlockingFlush();
-        store.forceMajorCompaction();
-
-        long start = System.currentTimeMillis();
-        while (System.currentTimeMillis() - start < 1000 && SnapshotDeletingTask.pendingDeletionCount() > 0)
-        {
-            Thread.yield();
-        }
-
-        assertEquals(0, SnapshotDeletingTask.pendingDeletionCount());
-    }
-
-    private void populate(int rowCount) {
-        long timestamp = System.currentTimeMillis();
-        TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
-        for (int i = 0; i <= rowCount; i++)
-        {
-            DecoratedKey key = Util.dk(Integer.toString(i));
-            for (int j = 0; j < 10; j++)
-            {
-                new RowUpdateBuilder(cfm, timestamp, 0, key.getKey())
-                    .clustering(Integer.toString(j))
-                    .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
-                    .build()
-                    .applyUnsafe();
-            }
-        }
-    }
-}
diff --git a/test/unit/org/apache/cassandra/db/compaction/AbstractCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/AbstractCompactionStrategyTest.java
index 4092f54..bd4b28f 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AbstractCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AbstractCompactionStrategyTest.java
@@ -139,6 +139,6 @@
         .add("val", "val")
         .build()
         .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/compaction/AbstractPendingRepairTest.java b/test/unit/org/apache/cassandra/db/compaction/AbstractPendingRepairTest.java
index de7ddfc..b0bb23c 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AbstractPendingRepairTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AbstractPendingRepairTest.java
@@ -21,13 +21,13 @@
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.UUID;
 
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -39,6 +39,7 @@
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 @Ignore
 public class AbstractPendingRepairTest extends AbstractRepairTest
@@ -86,7 +87,7 @@
         int pk = nextSSTableKey++;
         Set<SSTableReader> pre = cfs.getLiveSSTables();
         QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES(?, ?)", ks, tbl), pk, pk);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Set<SSTableReader> post = cfs.getLiveSSTables();
         Set<SSTableReader> diff = new HashSet<>(post);
         diff.removeAll(pre);
@@ -99,7 +100,7 @@
         return sstable;
     }
 
-    public static void mutateRepaired(SSTableReader sstable, long repairedAt, UUID pendingRepair, boolean isTransient)
+    public static void mutateRepaired(SSTableReader sstable, long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         try
         {
@@ -117,7 +118,7 @@
         mutateRepaired(sstable, repairedAt, ActiveRepairService.NO_PENDING_REPAIR, false);
     }
 
-    public static void mutateRepaired(SSTableReader sstable, UUID pendingRepair, boolean isTransient)
+    public static void mutateRepaired(SSTableReader sstable, TimeUUID pendingRepair, boolean isTransient)
     {
         mutateRepaired(sstable, ActiveRepairService.UNREPAIRED_SSTABLE, pendingRepair, isTransient);
     }
diff --git a/test/unit/org/apache/cassandra/db/compaction/ActiveCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/ActiveCompactionsTest.java
index 08c76bf..56d6c40 100644
--- a/test/unit/org/apache/cassandra/db/compaction/ActiveCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/ActiveCompactionsTest.java
@@ -38,6 +38,7 @@
 import org.apache.cassandra.cache.AutoSavingCache;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.view.View;
 import org.apache.cassandra.db.view.ViewBuilderTask;
@@ -67,7 +68,7 @@
         for (int i = 0; i < 5; i++)
         {
             execute("INSERT INTO %s (pk, ck, a, b) VALUES (" + i + ", 2, 3, 4)");
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            getCurrentColumnFamilyStore().forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         }
 
         Index idx = getCurrentColumnFamilyStore().indexManager.getIndexByName(idxName);
@@ -111,7 +112,7 @@
         for (int i = 0; i < 5; i++)
         {
             execute("INSERT INTO %s (pk, ck, a, b) VALUES (" + i + ", 2, 3, 4)");
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
 
         Index idx = getCurrentColumnFamilyStore().indexManager.getIndexByName(idxName);
@@ -134,7 +135,7 @@
         for (int i = 0; i < 5; i++)
         {
             execute("INSERT INTO %s (pk, ck, a, b) VALUES (" + i + ", 2, 3, 4)");
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
         Set<SSTableReader> sstables = getCurrentColumnFamilyStore().getLiveSSTables();
         try (LifecycleTransaction txn = getCurrentColumnFamilyStore().getTracker().tryModify(sstables, OperationType.INDEX_SUMMARY))
@@ -159,7 +160,7 @@
         for (int i = 0; i < 5; i++)
         {
             execute("INSERT INTO %s (k1, c1, val) VALUES (" + i + ", 2, 3)");
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
         execute(String.format("CREATE MATERIALIZED VIEW %s.view1 AS SELECT k1, c1, val FROM %s.%s WHERE k1 IS NOT NULL AND c1 IS NOT NULL AND val IS NOT NULL PRIMARY KEY (val, k1, c1)", keyspace(), keyspace(), currentTable()));
         View view = Iterables.getOnlyElement(getCurrentColumnFamilyStore().viewManager);
@@ -183,7 +184,7 @@
         for (int i = 0; i < 5; i++)
         {
             execute("INSERT INTO %s (pk, ck, a, b) VALUES (" + i + ", 2, 3, 4)");
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
 
         SSTableReader sstable = Iterables.getFirst(getCurrentColumnFamilyStore().getLiveSSTables(), null);
@@ -208,7 +209,7 @@
         for (int i = 0; i < 5; i++)
         {
             execute("INSERT INTO %s (pk, ck, a, b) VALUES (" + i + ", 2, 3, 4)");
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
 
         SSTableReader sstable = Iterables.getFirst(getCurrentColumnFamilyStore().getLiveSSTables(), null);
diff --git a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionBytemanTest.java b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionBytemanTest.java
index 38d2607..c858930 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionBytemanTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionBytemanTest.java
@@ -23,7 +23,6 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -48,6 +47,7 @@
 import org.jboss.byteman.contrib.bmunit.BMRules;
 import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 
@@ -67,7 +67,7 @@
         execute("insert into %s (id, i) values (1, 1)");
         execute("insert into %s (id, i) values (2, 1)");
         execute("insert into %s (id, i) values (3, 1)");
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         UntypedResultSet res = execute("select token(id) as tok from %s");
         Iterator<UntypedResultSet.Row> it = res.iterator();
         List<Long> tokens = new ArrayList<>();
@@ -126,7 +126,7 @@
 
         try (LifecycleTransaction txn = getCurrentColumnFamilyStore().getTracker().tryModify(getCurrentColumnFamilyStore().getLiveSSTables(), OperationType.ANTICOMPACTION))
         {
-            CompactionManager.instance.antiCompactGroup(getCurrentColumnFamilyStore(), ranges, txn, UUID.randomUUID(), () -> false);
+            CompactionManager.instance.antiCompactGroup(getCurrentColumnFamilyStore(), ranges, txn, nextTimeUUID(), () -> false);
         }
         finished.set(true);
         t.join();
@@ -134,4 +134,4 @@
         assertFalse(getCurrentColumnFamilyStore().getLiveSSTables().contains(sstableBefore));
         Util.assertOnDiskState(getCurrentColumnFamilyStore(), 3);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
index b2618e5..d3112d8 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db.compaction;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -26,14 +25,15 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.function.Predicate;
-import java.util.stream.Collectors;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.After;
@@ -43,6 +43,7 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
@@ -62,7 +63,7 @@
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Refs;
 import org.apache.cassandra.UpdateBuilder;
 import org.apache.cassandra.utils.concurrent.Transactional;
@@ -70,6 +71,7 @@
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
 import static org.apache.cassandra.Util.assertOnDiskState;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -107,7 +109,7 @@
         store.truncateBlocking();
     }
 
-    private void registerParentRepairSession(UUID sessionID, Iterable<Range<Token>> ranges, long repairedAt, UUID pendingRepair) throws IOException
+    private void registerParentRepairSession(TimeUUID sessionID, Iterable<Range<Token>> ranges, long repairedAt, TimeUUID pendingRepair) throws IOException
     {
         ActiveRepairService.instance.registerParentRepairSession(sessionID,
                                                                  InetAddressAndPort.getByName("10.0.0.1"),
@@ -143,7 +145,7 @@
 
     private SSTableStats antiCompactRanges(ColumnFamilyStore store, RangesAtEndpoint ranges) throws IOException
     {
-        UUID sessionID = UUID.randomUUID();
+        TimeUUID sessionID = nextTimeUUID();
         Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
              Refs<SSTableReader> refs = Refs.ref(sstables))
@@ -237,7 +239,7 @@
     }
 
     @Test
-    public void antiCompactionSizeTest() throws InterruptedException, IOException
+    public void antiCompactionSizeTest() throws InterruptedException, IOException, NoSuchRepairSessionException
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
@@ -247,8 +249,8 @@
         Range<Token> range = new Range<Token>(new BytesToken(ByteBufferUtil.bytes(0)), new BytesToken(ByteBufferUtil.bytes(500)));
         List<Range<Token>> ranges = Arrays.asList(range);
         Collection<SSTableReader> sstables = cfs.getLiveSSTables();
-        UUID parentRepairSession = UUID.randomUUID();
-        registerParentRepairSession(parentRepairSession, ranges, UNREPAIRED_SSTABLE, UUIDGen.getTimeUUID());
+        TimeUUID parentRepairSession = nextTimeUUID();
+        registerParentRepairSession(parentRepairSession, ranges, UNREPAIRED_SSTABLE, nextTimeUUID());
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
              Refs<SSTableReader> refs = Refs.ref(sstables))
         {
@@ -299,11 +301,11 @@
                     .build()
                     .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
     }
 
     @Test
-    public void antiCompactTenFull() throws InterruptedException, IOException
+    public void antiCompactTenFull() throws IOException, NoSuchRepairSessionException
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
@@ -326,7 +328,7 @@
     }
 
     @Test
-    public void antiCompactTenTrans() throws InterruptedException, IOException
+    public void antiCompactTenTrans() throws IOException, NoSuchRepairSessionException
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
@@ -349,7 +351,7 @@
     }
 
     @Test
-    public void antiCompactTenMixed() throws InterruptedException, IOException
+    public void antiCompactTenMixed() throws IOException, NoSuchRepairSessionException
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
@@ -368,7 +370,7 @@
     }
 
     @Test
-    public void shouldMutatePendingRepair() throws InterruptedException, IOException
+    public void shouldMutatePendingRepair() throws InterruptedException, IOException, NoSuchRepairSessionException
     {
         ColumnFamilyStore store = prepareColumnFamilyStore();
         Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
@@ -376,7 +378,7 @@
         // the sstables start at "0".getBytes() = 48, we need to include that first token, with "/".getBytes() = 47
         Range<Token> range = new Range<Token>(new BytesToken("/".getBytes()), new BytesToken("9999".getBytes()));
         List<Range<Token>> ranges = Arrays.asList(range);
-        UUID pendingRepair = UUID.randomUUID();
+        TimeUUID pendingRepair = nextTimeUUID();
         registerParentRepairSession(pendingRepair, ranges, UNREPAIRED_SSTABLE, pendingRepair);
 
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
@@ -394,7 +396,7 @@
     }
 
     @Test
-    public void shouldSkipAntiCompactionForNonIntersectingRange() throws InterruptedException, IOException
+    public void shouldSkipAntiCompactionForNonIntersectingRange() throws IOException
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
@@ -410,7 +412,7 @@
 
         Range<Token> range = new Range<Token>(new BytesToken("-1".getBytes()), new BytesToken("-10".getBytes()));
         List<Range<Token>> ranges = Arrays.asList(range);
-        UUID parentRepairSession = UUID.randomUUID();
+        TimeUUID parentRepairSession = nextTimeUUID();
         registerParentRepairSession(parentRepairSession, ranges, UNREPAIRED_SSTABLE, null);
         boolean gotException = false;
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
@@ -442,7 +444,7 @@
                 .build()
                 .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         return store;
     }
 
@@ -479,7 +481,7 @@
         Range<Token> range = new Range<Token>(new BytesToken("-1".getBytes()), new BytesToken("-10".getBytes()));
         List<Range<Token>> ranges = Arrays.asList(range);
 
-        UUID missingRepairSession = UUIDGen.getTimeUUID();
+        TimeUUID missingRepairSession = nextTimeUUID();
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
              Refs<SSTableReader> refs = Refs.ref(sstables))
         {
@@ -509,7 +511,7 @@
         Range<Token> r = new Range<>(t(10), t(100)); // should include sstable 1 and 2 above, but none is fully contained (Range is (x, y])
 
         Iterator<SSTableReader> sstableIterator = sstables.iterator();
-        Set<SSTableReader> fullyContainedSSTables = CompactionManager.findSSTablesToAnticompact(sstableIterator, Collections.singletonList(r), UUID.randomUUID());
+        Set<SSTableReader> fullyContainedSSTables = CompactionManager.findSSTablesToAnticompact(sstableIterator, Collections.singletonList(r), nextTimeUUID());
         assertTrue(fullyContainedSSTables.isEmpty());
         assertEquals(2, sstables.size());
     }
@@ -527,7 +529,7 @@
         Range<Token> r = new Range<>(t(9), t(100)); // sstable 1 is fully contained
 
         Iterator<SSTableReader> sstableIterator = sstables.iterator();
-        Set<SSTableReader> fullyContainedSSTables = CompactionManager.findSSTablesToAnticompact(sstableIterator, Collections.singletonList(r), UUID.randomUUID());
+        Set<SSTableReader> fullyContainedSSTables = CompactionManager.findSSTablesToAnticompact(sstableIterator, Collections.singletonList(r), nextTimeUUID());
         assertEquals(Collections.singleton(sstable1), fullyContainedSSTables);
         assertEquals(Collections.singletonList(sstable2), sstables);
     }
@@ -542,7 +544,7 @@
 
         Range<Token> r = new Range<>(t(9), t(100)); // sstable is not intersecting and should not be included
 
-        CompactionManager.validateSSTableBoundsForAnticompaction(UUID.randomUUID(), sstables, atEndpoint(Collections.singletonList(r), NO_RANGES));
+        CompactionManager.validateSSTableBoundsForAnticompaction(nextTimeUUID(), sstables, atEndpoint(Collections.singletonList(r), NO_RANGES));
     }
 
     @Test(expected = IllegalStateException.class)
@@ -557,7 +559,7 @@
 
         Range<Token> r = new Range<>(t(10), t(11)); // no sstable included, throw
 
-        CompactionManager.validateSSTableBoundsForAnticompaction(UUID.randomUUID(), sstables, atEndpoint(Collections.singletonList(r), NO_RANGES));
+        CompactionManager.validateSSTableBoundsForAnticompaction(nextTimeUUID(), sstables, atEndpoint(Collections.singletonList(r), NO_RANGES));
     }
 
     @Test
@@ -573,7 +575,7 @@
         Range<Token> r = new Range<>(t(9), t(200)); // sstable 2 is fully contained - last token is equal
 
         Iterator<SSTableReader> sstableIterator = sstables.iterator();
-        Set<SSTableReader> fullyContainedSSTables = CompactionManager.findSSTablesToAnticompact(sstableIterator, Collections.singletonList(r), UUID.randomUUID());
+        Set<SSTableReader> fullyContainedSSTables = CompactionManager.findSSTablesToAnticompact(sstableIterator, Collections.singletonList(r), nextTimeUUID());
         assertEquals(Sets.newHashSet(sstable1, sstable2), fullyContainedSSTables);
         assertTrue(sstables.isEmpty());
     }
diff --git a/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java
index beed019..67421ba 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java
@@ -23,7 +23,6 @@
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
@@ -35,7 +34,6 @@
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.junit.Test;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
@@ -58,7 +56,9 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -255,7 +255,7 @@
             // make sure that sstables are fully contained so that the metadata gets mutated
             Range<Token> range = new Range<>(token(-1), token(49));
 
-            UUID prsid = UUID.randomUUID();
+            TimeUUID prsid = nextTimeUUID();
             ActiveRepairService.instance.registerParentRepairSession(prsid, InetAddressAndPort.getLocalHost(), Collections.singletonList(cfs), Collections.singleton(range), true, 1, true, PreviewKind.NONE);
 
             InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
@@ -398,7 +398,7 @@
             txn = cfs.getTracker().tryModify(sstables, OperationType.COMPACTION);
             assertNotNull(txn);
             controller = new CompactionController(cfs, sstables, Integer.MIN_VALUE);
-            ci = new CompactionIterator(txn.opType(), scanners, controller, FBUtilities.nowInSeconds(), UUID.randomUUID());
+            ci = new CompactionIterator(txn.opType(), scanners, controller, FBUtilities.nowInSeconds(), nextTimeUUID());
             CompactionManager.instance.active.beginCompaction(ci);
         }
 
@@ -462,7 +462,7 @@
         for (int i = 0; i < 10; i++)
         {
             execute("insert into %s (id, something) values (?,?)", i, i);
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
         }
         AbstractCompactionTask ct = null;
 
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
index 500a881..9d81b61 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
@@ -21,7 +21,6 @@
 import java.nio.ByteBuffer;
 import java.util.Set;
 import java.util.function.LongPredicate;
-import java.util.function.Predicate;
 
 import com.google.common.collect.Sets;
 import org.junit.BeforeClass;
@@ -91,7 +90,7 @@
         {
             assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp1); //memtable only
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
             assertTrue(controller.getPurgeEvaluator(key).test(Long.MAX_VALUE)); //no memtables and no sstables
         }
 
@@ -99,7 +98,7 @@
 
         // create another sstable
         applyMutation(cfs.metadata(), key, timestamp2);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // check max purgeable timestamp when compacting the first sstable with and without a memtable
         try (CompactionController controller = new CompactionController(cfs, compacting, 0))
@@ -112,7 +111,7 @@
         }
 
         // check max purgeable timestamp again without any sstables but with different insertion orders on the memtable
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         //newest to oldest
         try (CompactionController controller = new CompactionController(cfs, null, 0))
@@ -124,7 +123,7 @@
             assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3); //memtable only
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         //oldest to newest
         try (CompactionController controller = new CompactionController(cfs, null, 0))
@@ -152,14 +151,14 @@
 
         // create sstable with tombstone that should be expired in no older timestamps
         applyDeleteMutation(cfs.metadata(), key, timestamp2);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // first sstable with tombstone is compacting
         Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
 
         // create another sstable with more recent timestamp
         applyMutation(cfs.metadata(), key, timestamp1);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // second sstable is overlapping
         Set<SSTableReader> overlapping = Sets.difference(Sets.newHashSet(cfs.getLiveSSTables()), compacting);
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionExecutorTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionExecutorTest.java
index cca2997..813d12e 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionExecutorTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionExecutorTest.java
@@ -18,15 +18,14 @@
 
 package org.apache.cassandra.db.compaction;
 
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.concurrent.ExecutorFactory;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -34,36 +33,15 @@
 public class CompactionExecutorTest
 {
     static Throwable testTaskThrowable = null;
-    static SimpleCondition afterExecuteCompleted = null;
-    private static class TestTaskExecutor extends CompactionManager.CompactionExecutor
-    {
-        // afterExecute runs immediately after the task completes, but it may
-        // race with the main thread checking the result, so make the main thread wait
-        // with a simple condition
-        @Override
-        public void afterExecute(Runnable r, Throwable t)
-        {
-            if (t == null)
-            {
-                t = DebuggableThreadPoolExecutor.extractThrowable(r);
-            }
-            testTaskThrowable = t;
-            afterExecuteCompleted.signalAll();
-        }
-        @Override
-        protected void beforeExecute(Thread t, Runnable r)
-        {
-        }
-    }
     private CompactionManager.CompactionExecutor executor;
 
     @Before
     public void setup()
     {
-        DatabaseDescriptor.daemonInitialization();
-        executor = new TestTaskExecutor();
-        testTaskThrowable = null;
-        afterExecuteCompleted = new SimpleCondition();
+        executor = new CompactionManager.CompactionExecutor(new ExecutorFactory.Default(null, null, (thread, throwable) -> {
+            if (throwable != null)
+                testTaskThrowable = throwable;
+        }), 1, "test", Integer.MAX_VALUE);
     }
 
     @After
@@ -73,19 +51,16 @@
         Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
     }
 
-    void awaitExecution() throws Exception
-    {
-        assert afterExecuteCompleted.await(10, TimeUnit.SECONDS) : "afterExecute failed to complete";
-    }
-
     @Test
     public void testFailedRunnable() throws Exception
     {
-        executor.submitIfRunning(
+        testTaskThrowable = null;
+        Future<?> tt = executor.submitIfRunning(
             () -> { assert false : "testFailedRunnable"; }
             , "compactionExecutorTest");
 
-        awaitExecution();
+        while (!tt.isDone())
+            Thread.sleep(10);
         assertNotNull(testTaskThrowable);
         assertEquals(testTaskThrowable.getMessage(), "testFailedRunnable");
     }
@@ -93,11 +68,13 @@
     @Test
     public void testFailedCallable() throws Exception
     {
-        executor.submitIfRunning(
+        testTaskThrowable = null;
+        Future<?> tt = executor.submitIfRunning(
             () -> { assert false : "testFailedCallable"; return 1; }
             , "compactionExecutorTest");
 
-        awaitExecution();
+        while (!tt.isDone())
+            Thread.sleep(10);
         assertNotNull(testTaskThrowable);
         assertEquals(testTaskThrowable.getMessage(), "testFailedCallable");
     }
@@ -105,11 +82,13 @@
     @Test
     public void testExceptionRunnable() throws Exception
     {
-        executor.submitIfRunning(
+        testTaskThrowable = null;
+        Future<?> tt = executor.submitIfRunning(
         () -> { throw new RuntimeException("testExceptionRunnable"); }
         , "compactionExecutorTest");
 
-        awaitExecution();
+        while (!tt.isDone())
+            Thread.sleep(10);
         assertNotNull(testTaskThrowable);
         assertEquals(testTaskThrowable.getMessage(), "testExceptionRunnable");
     }
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionInfoTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionInfoTest.java
index c0196e8..753a185 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionInfoTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionInfoTest.java
@@ -20,24 +20,25 @@
 
 import java.util.ArrayList;
 import java.util.UUID;
-import java.util.regex.Pattern;
 
-import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.repair.AbstractPendingAntiCompactionTest;
 import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.utils.TimeUUID;
 import org.assertj.core.api.Assertions;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class CompactionInfoTest extends AbstractPendingAntiCompactionTest
 {
     @Test
     public void testCompactionInfoToStringContainsTaskId()
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
-        UUID expectedTaskId = UUID.randomUUID();
+        TimeUUID expectedTaskId = nextTimeUUID();
         CompactionInfo compactionInfo = new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, 0, 1000, expectedTaskId, new ArrayList<>());
         Assertions.assertThat(compactionInfo.toString())
                   .contains(expectedTaskId.toString());
@@ -47,7 +48,7 @@
     public void testCompactionInfoToStringFormat()
     {
         UUID tableId = UUID.randomUUID();
-        UUID taskId = UUID.randomUUID();
+        TimeUUID taskId = nextTimeUUID();
         ColumnFamilyStore cfs = MockSchema.newCFS(builder -> builder.id(TableId.fromUUID(tableId)));
         CompactionInfo compactionInfo = new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, 0, 1000, taskId, new ArrayList<>());
         Assertions.assertThat(compactionInfo.toString())
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
index 0aab021..ff3f210 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
@@ -466,7 +466,7 @@
         createTable("CREATE TABLE %s (pk text, ck1 int, ck2 int, v int, PRIMARY KEY (pk, ck1, ck2))");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (pk, ck1, ck2, v) values (?, ?, ?, ?)", "key", i, i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
 
         DatabaseDescriptor.setSnapshotOnDuplicateRowDetection(true);
         TableMetadata metadata = getCurrentColumnFamilyStore().metadata();
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerPendingRepairTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerPendingRepairTest.java
index 9f2bc2e..76bbd88 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerPendingRepairTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerPendingRepairTest.java
@@ -20,7 +20,6 @@
 
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 
 import com.google.common.collect.Iterables;
 import org.junit.Assert;
@@ -31,9 +30,11 @@
 import org.apache.cassandra.notifications.SSTableDeletingNotification;
 import org.apache.cassandra.notifications.SSTableListChangedNotification;
 import org.apache.cassandra.notifications.SSTableRepairStatusChanged;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.repair.consistent.LocalSessionAccessor;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * Tests CompactionStrategyManager's handling of pending repair sstables
@@ -61,12 +62,12 @@
         return csm.getUnrepairedUnsafe().containsSSTable(sstable);
     }
 
-    private boolean hasPendingStrategiesFor(UUID sessionID)
+    private boolean hasPendingStrategiesFor(TimeUUID sessionID)
     {
         return !Iterables.isEmpty(csm.getPendingRepairsUnsafe().getStrategiesFor(sessionID));
     }
 
-    private boolean hasTransientStrategiesFor(UUID sessionID)
+    private boolean hasTransientStrategiesFor(TimeUUID sessionID)
     {
         return !Iterables.isEmpty(csm.getTransientRepairsUnsafe().getStrategiesFor(sessionID));
     }
@@ -77,7 +78,7 @@
     @Test
     public void sstableAdded()
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         Assert.assertTrue(Iterables.isEmpty(csm.getPendingRepairsUnsafe().allStrategies()));
 
@@ -103,7 +104,7 @@
     @Test
     public void sstableListChangedAddAndRemove()
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
 
         SSTableReader sstable1 = makeSSTable(true);
@@ -152,7 +153,7 @@
     @Test
     public void sstableRepairStatusChanged()
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
 
         // add as unrepaired
@@ -186,7 +187,7 @@
     @Test
     public void sstableDeleted()
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
 
         SSTableReader sstable = makeSSTable(true);
@@ -209,7 +210,7 @@
     @Test
     public void getStrategies()
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
 
         List<List<AbstractCompactionStrategy>> strategies;
@@ -232,9 +233,9 @@
      * which reclassify the sstables as repaired
      */
     @Test
-    public void cleanupCompactionFinalized()
+    public void cleanupCompactionFinalized() throws NoSuchRepairSessionException
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -274,7 +275,7 @@
     @Test
     public void cleanupCompactionFailed()
     {
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -310,7 +311,7 @@
     public void finalizedSessionTransientCleanup()
     {
         Assert.assertTrue(cfs.getLiveSSTables().isEmpty());
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, true);
@@ -341,7 +342,7 @@
     public void failedSessionTransientCleanup()
     {
         Assert.assertTrue(cfs.getLiveSSTables().isEmpty());
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, true);
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java
index d29ab52..9960e8e 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java
@@ -18,14 +18,12 @@
 
 package org.apache.cassandra.db.compaction;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
@@ -38,7 +36,6 @@
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,14 +53,15 @@
 import org.apache.cassandra.dht.ByteOrderedPartitioner;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.notifications.SSTableAddedNotification;
 import org.apache.cassandra.notifications.SSTableDeletingNotification;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -134,7 +132,7 @@
             else if (i % 3 == 1)
             {
                 //make 1 third of sstables pending repair
-                cfs.getCompactionStrategyManager().mutateRepaired(newSSTables, 0, UUIDGen.getTimeUUID(), false);
+                cfs.getCompactionStrategyManager().mutateRepaired(newSSTables, 0, nextTimeUUID(), false);
             }
             previousSSTables = currentSSTables;
         }
@@ -355,8 +353,8 @@
             repaired.add(createSSTableWithKey(cfs.keyspace.getName(), cfs.name, key++));
         }
 
-        cfs.getCompactionStrategyManager().mutateRepaired(transientRepairs, 0, UUID.randomUUID(), true);
-        cfs.getCompactionStrategyManager().mutateRepaired(pendingRepair, 0, UUID.randomUUID(), false);
+        cfs.getCompactionStrategyManager().mutateRepaired(transientRepairs, 0, nextTimeUUID(), true);
+        cfs.getCompactionStrategyManager().mutateRepaired(pendingRepair, 0, nextTimeUUID(), false);
         cfs.getCompactionStrategyManager().mutateRepaired(repaired, 1000, null, false);
 
         DiskBoundaries boundaries = new DiskBoundaries(cfs, cfs.getDirectories().getWriteableLocations(),
@@ -403,7 +401,7 @@
         Directories.DataDirectory[] directories = new Directories.DataDirectory[disks];
         for (int i = 0; i < disks; ++i)
         {
-            File tempDir = Files.createTempDir();
+            File tempDir = new File(Files.createTempDir());
             tempDir.deleteOnExit();
             directories[i] = new Directories.DataDirectory(tempDir);
         }
@@ -462,7 +460,7 @@
         int firstKey = Integer.parseInt(new String(ByteBufferUtil.getArray(reader.first.getKey())));
         while (boundaries[index] <= firstKey)
             index++;
-        logger.debug("Index for SSTable {} on boundary {} is {}", reader.descriptor.generation, Arrays.toString(boundaries), index);
+        logger.debug("Index for SSTable {} on boundary {} is {}", reader.descriptor.id, Arrays.toString(boundaries), index);
         return index;
     }
 
@@ -511,7 +509,7 @@
         .build()
         .applyUnsafe();
         Set<SSTableReader> before = cfs.getLiveSSTables();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Set<SSTableReader> after = cfs.getLiveSSTables();
         return Iterables.getOnlyElement(Sets.difference(after, before));
     }
@@ -521,7 +519,7 @@
     {
         MockCFS(ColumnFamilyStore cfs, Directories dirs)
         {
-            super(cfs.keyspace, cfs.getTableName(), 0, cfs.metadata, dirs, false, false, true);
+            super(cfs.keyspace, cfs.getTableName(), Util.newSeqGen(), cfs.metadata, dirs, false, false, true);
         }
     }
 
@@ -532,7 +530,7 @@
 
         private MockCFSForCSM(ColumnFamilyStore cfs, CountDownLatch latch, AtomicInteger upgradeTaskCount)
         {
-            super(cfs.keyspace, cfs.name, 10, cfs.metadata, cfs.getDirectories(), true, false, false);
+            super(cfs.keyspace, cfs.name, Util.newSeqGen(10), cfs.metadata, cfs.getDirectories(), true, false, false);
             this.latch = latch;
             this.upgradeTaskCount = upgradeTaskCount;
         }
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionTaskTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionTaskTest.java
index bdd2014..24b0c3d 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionTaskTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionTaskTest.java
@@ -23,7 +23,6 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 
 import org.junit.Assert;
 import org.junit.Before;
@@ -31,6 +30,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -41,9 +41,12 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
+import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class CompactionTaskTest
 {
     private static TableMetadata cfm;
@@ -53,7 +56,7 @@
     public static void setUpClass() throws Exception
     {
         SchemaLoader.prepareServer();
-        cfm = CreateTableStatement.parse("CREATE TABLE tbl (k INT PRIMARY KEY, v INT)", "coordinatorsessiontest").build();
+        cfm = CreateTableStatement.parse("CREATE TABLE tbl (k INT PRIMARY KEY, v INT)", "ks").build();
         SchemaLoader.createKeyspace("ks", KeyspaceParams.simple(1), cfm);
         cfs = Schema.instance.getColumnFamilyStoreInstance(cfm.id);
     }
@@ -71,10 +74,10 @@
         cfs.getCompactionStrategyManager().disable();
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (1, 1);");
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (2, 2);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (3, 3);");
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (4, 4);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Set<SSTableReader> sstables = cfs.getLiveSSTables();
 
         Assert.assertEquals(2, sstables.size());
@@ -96,7 +99,7 @@
         Assert.assertEquals(Transactional.AbstractTransactional.State.ABORTED, txn.state());
     }
 
-    private static void mutateRepaired(SSTableReader sstable, long repairedAt, UUID pendingRepair, boolean isTransient) throws IOException
+    private static void mutateRepaired(SSTableReader sstable, long repairedAt, TimeUUID pendingRepair, boolean isTransient) throws IOException
     {
         sstable.descriptor.getMetadataSerializer().mutateRepairMetadata(sstable.descriptor, repairedAt, pendingRepair, isTransient);
         sstable.reloadSSTableMetadata();
@@ -111,13 +114,13 @@
     {
         cfs.getCompactionStrategyManager().disable();
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (1, 1);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (2, 2);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (3, 3);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (4, 4);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
         Assert.assertEquals(4, sstables.size());
@@ -128,8 +131,8 @@
         SSTableReader pending2 = sstables.get(3);
 
         mutateRepaired(repaired, FBUtilities.nowInSeconds(), ActiveRepairService.NO_PENDING_REPAIR, false);
-        mutateRepaired(pending1, ActiveRepairService.UNREPAIRED_SSTABLE, UUIDGen.getTimeUUID(), false);
-        mutateRepaired(pending2, ActiveRepairService.UNREPAIRED_SSTABLE, UUIDGen.getTimeUUID(), false);
+        mutateRepaired(pending1, UNREPAIRED_SSTABLE, nextTimeUUID(), false);
+        mutateRepaired(pending2, UNREPAIRED_SSTABLE, nextTimeUUID(), false);
 
         LifecycleTransaction txn = null;
         List<SSTableReader> toCompact = new ArrayList<>(sstables);
@@ -160,13 +163,13 @@
     {
         cfs.getCompactionStrategyManager().disable();
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (1, 1);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (2, 2);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (3, 3);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, v) VALUES (4, 4);");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Set<SSTableReader> sstables = cfs.getLiveSSTables();
         Assert.assertEquals(4, sstables.size());
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java
index 1c02699..462d406 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java
@@ -18,9 +18,9 @@
 
 package org.apache.cassandra.db.compaction;
 
-import java.util.concurrent.TimeUnit;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 
@@ -35,6 +35,7 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Throwables;
 import org.jboss.byteman.contrib.bmunit.BMRule;
 import org.jboss.byteman.contrib.bmunit.BMRules;
 import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
@@ -129,7 +130,7 @@
 
         execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 0, 1, 1);
         Util.spinAssertEquals(true, () -> CompactionManager.instance.compactingCF.count(cfs) == 0, 5);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Util.spinAssertEquals(true, () -> CompactionManager.instance.compactingCF.count(cfs) == 0, 5);
         FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(cfs));
@@ -147,7 +148,7 @@
         {
             execute("INSERT INTO %s (id, val) values (2, 'immortal')");
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     private void createLowGCGraceTable(){
@@ -194,7 +195,7 @@
             {
                 execute("insert into %s (k, c, v) values (?, ?, ?)", i, j, i*j);
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         cfs.getCompactionStrategyManager().mutateRepaired(cfs.getLiveSSTables(), System.currentTimeMillis(), null, false);
         for (int i = 0; i < 5; i++)
@@ -203,7 +204,7 @@
             {
                 execute("insert into %s (k, c, v) values (?, ?, ?)", i, j, i*j);
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         assertTrue(cfs.getTracker().getCompacting().isEmpty());
@@ -216,7 +217,7 @@
         }
         catch (RuntimeException t)
         {
-            if (!(t.getCause().getCause() instanceof CompactionInterruptedException))
+            if (!Throwables.isCausedBy(t, CompactionInterruptedException.class::isInstance))
                 throw t;
             //expected
         }
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java
index e169548..65eea6a 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java
@@ -31,6 +31,7 @@
 import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.config.Config;
@@ -216,7 +217,7 @@
         getCurrentColumnFamilyStore().setCompactionParameters(localOptions);
         assertTrue(verifyStrategies(getCurrentColumnFamilyStore().getCompactionStrategyManager(), DateTieredCompactionStrategy.class));
         // Invalidate disk boundaries to ensure that boundary invalidation will not cause the old strategy to be reloaded
-        getCurrentColumnFamilyStore().invalidateDiskBoundaries();
+        getCurrentColumnFamilyStore().invalidateLocalRanges();
         // altering something non-compaction related
         execute("ALTER TABLE %s WITH gc_grace_seconds = 1000");
         // should keep the local compaction strat
@@ -297,7 +298,7 @@
         RangeTombstone rt = new RangeTombstone(Slice.ALL, new DeletionTime(System.currentTimeMillis(), -1));
         RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, 22).clustering(33).addRangeTombstone(rt);
         rub.build().apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         compactAndValidate();
         readAndValidate(true);
         readAndValidate(false);
@@ -311,7 +312,7 @@
         // write a standard tombstone with negative local deletion time (LDTs are not set by user and should not be negative):
         RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), -1, System.currentTimeMillis() * 1000, 22).clustering(33).delete("b");
         rub.build().apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         compactAndValidate();
         readAndValidate(true);
         readAndValidate(false);
@@ -325,7 +326,7 @@
         // write a partition deletion with negative local deletion time (LDTs are not set by user and should not be negative)::
         PartitionUpdate pu = PartitionUpdate.simpleBuilder(getCurrentColumnFamilyStore().metadata(), 22).nowInSec(-1).delete().build();
         new Mutation(pu).apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         compactAndValidate();
         readAndValidate(true);
         readAndValidate(false);
@@ -338,7 +339,7 @@
         prepare();
         // write a row deletion with negative local deletion time (LDTs are not set by user and should not be negative):
         RowUpdateBuilder.deleteRowAt(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, -1, 22, 33).apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         compactAndValidate();
         readAndValidate(true);
         readAndValidate(false);
@@ -356,11 +357,11 @@
     {
         // write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt row deletion
         DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception);
-        int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKB();
+        int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKiB();
         DatabaseDescriptor.setColumnIndexSize(1024);
         prepareWide();
         RowUpdateBuilder.deleteRowAt(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, -1, 22, 33).apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         readAndValidate(true);
         readAndValidate(false);
         DatabaseDescriptor.setColumnIndexSize(maxSizePre);
@@ -371,12 +372,12 @@
     {
         // write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt standard tombstone
         DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception);
-        int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKB();
+        int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKiB();
         DatabaseDescriptor.setColumnIndexSize(1024);
         prepareWide();
         RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), -1, System.currentTimeMillis() * 1000, 22).clustering(33).delete("b");
         rub.build().apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         readAndValidate(true);
         readAndValidate(false);
         DatabaseDescriptor.setColumnIndexSize(maxSizePre);
@@ -387,16 +388,16 @@
     {
         // write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt range tombstone
         DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception);
-        final int maxSizePreKB = DatabaseDescriptor.getColumnIndexSizeInKB();
+        final int maxSizePreKiB = DatabaseDescriptor.getColumnIndexSizeInKiB();
         DatabaseDescriptor.setColumnIndexSize(1024);
         prepareWide();
         RangeTombstone rt = new RangeTombstone(Slice.ALL, new DeletionTime(System.currentTimeMillis(), -1));
         RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, 22).clustering(33).addRangeTombstone(rt);
         rub.build().apply();
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         readAndValidate(true);
         readAndValidate(false);
-        DatabaseDescriptor.setColumnIndexSize(maxSizePreKB);
+        DatabaseDescriptor.setColumnIndexSize(maxSizePreKiB);
     }
 
 
@@ -415,7 +416,7 @@
             {
                 execute("insert into %s (id, id2, t) values (?, ?, ?)", i, j, value);
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         assertEquals(50, cfs.getLiveSSTables().size());
         LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) cfs.getCompactionStrategyManager().getUnrepairedUnsafe().first();
@@ -432,7 +433,7 @@
         ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
         cfs.disableAutoCompaction();
         execute("insert into %s (id, id2, t) values (?, ?, ?)", 1,1,"L1");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.forceMajorCompaction();
         SSTableReader l1sstable = cfs.getLiveSSTables().iterator().next();
         assertEquals(1, l1sstable.getSSTableLevel());
@@ -446,7 +447,7 @@
             {
                 execute("insert into %s (id, id2, t) values (?, ?, ?)", i, j, value);
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         assertEquals(51, cfs.getLiveSSTables().size());
 
@@ -473,14 +474,14 @@
             r.nextBytes(b);
             execute("insert into %s (id, x) values (?, ?)", i, ByteBuffer.wrap(b));
         }
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        getCurrentColumnFamilyStore().forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         getCurrentColumnFamilyStore().disableAutoCompaction();
         for (int i = 0; i < 1000; i++)
         {
             r.nextBytes(b);
             execute("insert into %s (id, x) values (?, ?)", i, ByteBuffer.wrap(b));
         }
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        getCurrentColumnFamilyStore().forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) getCurrentColumnFamilyStore().getCompactionStrategyManager().getUnrepairedUnsafe().first();
         LeveledCompactionTask lcsTask;
@@ -507,7 +508,7 @@
             r.nextBytes(b);
             execute("insert into %s (id, x) values (?, ?)", i, ByteBuffer.wrap(b));
         }
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        getCurrentColumnFamilyStore().forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
         // now we have a bunch of sstables in L2 and one in L0 - bump the L0 one to L1:
         for (SSTableReader sstable : getCurrentColumnFamilyStore().getLiveSSTables())
         {
@@ -649,7 +650,7 @@
         {
             execute("INSERT INTO %s (id, b) VALUES (?, ?)", i, String.valueOf(i));
         }
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
 
         assertTombstones(getCurrentColumnFamilyStore().getLiveSSTables().iterator().next(), false);
         if (deletedCell)
@@ -657,7 +658,7 @@
         else
             execute("DELETE FROM %s WHERE id = ?", 50);
         getCurrentColumnFamilyStore().setNeverPurgeTombstones(false);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Thread.sleep(2000); // wait for gcgs to pass
         getCurrentColumnFamilyStore().forceMajorCompaction();
         assertTombstones(getCurrentColumnFamilyStore().getLiveSSTables().iterator().next(), false);
@@ -666,7 +667,7 @@
         else
             execute("DELETE FROM %s WHERE id = ?", 44);
         getCurrentColumnFamilyStore().setNeverPurgeTombstones(true);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         Thread.sleep(1100);
         getCurrentColumnFamilyStore().forceMajorCompaction();
         assertTombstones(getCurrentColumnFamilyStore().getLiveSSTables().iterator().next(), true);
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
index a0d52aa..4f1b639 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
@@ -102,14 +102,14 @@
                    .build().applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // deletes
         for (int i = 0; i < 10; i++)
         {
             RowUpdateBuilder.deleteRow(cfs.metadata(), 1, key, String.valueOf(i)).applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // resurrect one column
         RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 2, key);
@@ -117,7 +117,7 @@
                .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                .build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // major compact and test that all columns but the resurrected one is completely gone
         FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE, false));
@@ -146,14 +146,14 @@
                    .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                    .build().applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // deletes
         for (int i = 0; i < 10; i++)
         {
             RowUpdateBuilder.deleteRow(cfs.metadata(), Long.MAX_VALUE, key, String.valueOf(i)).applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // major compact - tombstones should be purged
         FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE, false));
@@ -164,7 +164,7 @@
                .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                .build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         cfs.invalidateCachedPartition(dk(key));
 
@@ -191,13 +191,13 @@
                    .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                    .build().applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new Mutation.PartitionUpdateCollector(KEYSPACE1, dk(key))
             .add(PartitionUpdate.fullPartitionDelete(cfs.metadata(), dk(key), Long.MAX_VALUE, FBUtilities.nowInSeconds()))
             .build()
             .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // major compact - tombstones should be purged
         FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE, false));
@@ -208,7 +208,7 @@
                .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                .build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         cfs.invalidateCachedPartition(dk(key));
 
@@ -235,11 +235,11 @@
                    .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                    .build().applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), Long.MAX_VALUE, dk(key))
             .addRangeTombstone(String.valueOf(0), String.valueOf(9)).build().applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // major compact - tombstones should be purged
         FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE, false));
@@ -250,7 +250,7 @@
                .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                .build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         cfs.invalidateCachedPartition(dk(key));
 
@@ -278,7 +278,7 @@
                         .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                         .build().applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
 
             // deletes
             for (int i = 0; i < 10; i++)
@@ -286,7 +286,7 @@
                 RowUpdateBuilder.deleteRow(cfs.metadata(), 1, key, String.valueOf(i)).applyUnsafe();
             }
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         DecoratedKey key1 = Util.dk("key1");
@@ -294,7 +294,7 @@
 
         // flush, remember the current sstable and then resurrect one column
         // for first key. Then submit minor compaction on remembered sstables.
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Collection<SSTableReader> sstablesIncomplete = cfs.getLiveSSTables();
 
         RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 2, "key1");
@@ -302,7 +302,7 @@
                 .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                 .build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         try (CompactionTasks tasks = cfs.getCompactionStrategyManager().getUserDefinedTasks(sstablesIncomplete, Integer.MAX_VALUE))
         {
             Iterables.getOnlyElement(tasks).execute(ActiveCompactionsTracker.NOOP);
@@ -343,16 +343,16 @@
         .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
         .build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         // delete c1
         RowUpdateBuilder.deleteRow(cfs.metadata(), 10, key3, "c1").applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Collection<SSTableReader> sstablesIncomplete = cfs.getLiveSSTables();
 
         // delete c2 so we have new delete in a diffrent SSTable
         RowUpdateBuilder.deleteRow(cfs.metadata(), 9, key3, "c2").applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // compact the sstables with the c1/c2 data and the c1 tombstone
         try (CompactionTasks tasks = cfs.getCompactionStrategyManager().getUserDefinedTasks(sstablesIncomplete, Integer.MAX_VALUE))
@@ -393,7 +393,7 @@
         {
             RowUpdateBuilder.deleteRow(cfs.metadata(), 1, key, String.valueOf(i)).applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(String.valueOf(cfs.getLiveSSTables()), 1, cfs.getLiveSSTables().size()); // inserts & deletes were in the same memtable -> only deletes in sstable
 
         // compact and test that the row is completely gone
@@ -438,7 +438,7 @@
         assertFalse(Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build()).isEmpty());
 
         // flush and major compact
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Util.compactAll(cfs, Integer.MAX_VALUE).get();
 
         // Since we've force purging (by passing MAX_VALUE for gc_before), the row should have been invalidated and we should have no deletion info anymore
@@ -474,7 +474,7 @@
         assertFalse(partition.partitionLevelDeletion().isLive());
 
         // flush and major compact (with tombstone purging)
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Util.compactAll(cfs, Integer.MAX_VALUE).get();
         assertFalse(Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build()).isEmpty());
 
@@ -504,14 +504,14 @@
         // write a row out to one sstable
         QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)",
                                                      keyspace, table, 1, "foo", 1));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         UntypedResultSet result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
         assertEquals(1, result.size());
 
         // write a row tombstone out to a second sstable
         QueryProcessor.executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // basic check that the row is considered deleted
         assertEquals(2, cfs.getLiveSSTables().size());
@@ -529,14 +529,14 @@
         // write a row out to one sstable
         QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)",
                                                      keyspace, table, 1, "foo", 1));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(2, cfs.getLiveSSTables().size());
         result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
         assertEquals(1, result.size());
 
         // write a row tombstone out to a different sstable
         QueryProcessor.executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // compact the two sstables with a gcBefore that *does* allow the row tombstone to be purged
         FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) + 10000, false));
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
index dd51f61..fcf5c51 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
@@ -18,7 +18,6 @@
 */
 package org.apache.cassandra.db.compaction;
 
-import java.io.File;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -64,15 +63,19 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
+import org.apache.cassandra.io.sstable.SSTableId;
+import org.apache.cassandra.io.sstable.SSTableIdFactory;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -131,14 +134,14 @@
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
         store.clearUnsafe();
-        MigrationManager.announceTableUpdate(store.metadata().unbuild().gcGraceSeconds(1).build(), true);
+        SchemaTestUtil.announceTableUpdate(store.metadata().unbuild().gcGraceSeconds(1).build());
 
         // disable compaction while flushing
         store.disableAutoCompaction();
 
         long timestamp = populate(KEYSPACE1, CF_STANDARD1, 0, 9, 3); //ttl=3s
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertEquals(1, store.getLiveSSTables().size());
         long originalSize = store.getLiveSSTables().iterator().next().uncompressedLength();
 
@@ -173,18 +176,18 @@
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
         store.clearUnsafe();
 
-        MigrationManager.announceTableUpdate(store.metadata().unbuild().gcGraceSeconds(1).compaction(CompactionParams.stcs(compactionOptions)).build(), true);
+        SchemaTestUtil.announceTableUpdate(store.metadata().unbuild().gcGraceSeconds(1).compaction(CompactionParams.stcs(compactionOptions)).build());
 
         // disable compaction while flushing
         store.disableAutoCompaction();
 
         //Populate sstable1 with with keys [0..9]
         populate(KEYSPACE1, CF_STANDARD1, 0, 9, 3); //ttl=3s
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         //Populate sstable2 with with keys [10..19] (keys do not overlap with SSTable1)
         long timestamp2 = populate(KEYSPACE1, CF_STANDARD1, 10, 19, 3); //ttl=3s
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         assertEquals(2, store.getLiveSSTables().size());
 
@@ -216,7 +219,7 @@
 
         // now let's enable the magic property
         compactionOptions.put("unchecked_tombstone_compaction", "true");
-        MigrationManager.announceTableUpdate(store.metadata().unbuild().gcGraceSeconds(1).compaction(CompactionParams.stcs(compactionOptions)).build(), true);
+        SchemaTestUtil.announceTableUpdate(store.metadata().unbuild().gcGraceSeconds(1).compaction(CompactionParams.stcs(compactionOptions)).build());
 
         //submit background task again and wait for it to complete
         FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
@@ -264,14 +267,14 @@
             .add("val", "val1")
             .build().applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Collection<SSTableReader> sstables = cfs.getLiveSSTables();
 
         assertEquals(1, sstables.size());
         SSTableReader sstable = sstables.iterator().next();
 
-        int prevGeneration = sstable.descriptor.generation;
-        String file = new File(sstable.descriptor.filenameFor(Component.DATA)).getAbsolutePath();
+        SSTableId prevGeneration = sstable.descriptor.id;
+        String file = new File(sstable.descriptor.filenameFor(Component.DATA)).absolutePath();
         // submit user defined compaction on flushed sstable
         CompactionManager.instance.forceUserDefinedCompaction(file);
         // wait until user defined compaction finishes
@@ -282,7 +285,7 @@
         // CF should have only one sstable with generation number advanced
         sstables = cfs.getLiveSSTables();
         assertEquals(1, sstables.size());
-        assertEquals( prevGeneration + 1, sstables.iterator().next().descriptor.generation);
+        assertThat(SSTableIdFactory.COMPARATOR.compare(prevGeneration, sstables.iterator().next().descriptor.id)).isLessThan(0);
     }
 
     public static void writeSSTableWithRangeTombstoneMaskingOneColumn(ColumnFamilyStore cfs, TableMetadata table, int[] dks) {
@@ -299,7 +302,7 @@
             notYetDeletedRowUpdateBuilder.clustering("02").add("val", "a"); //Range tombstone doesn't cover this (timestamp 3 > 2)
             notYetDeletedRowUpdateBuilder.build().applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     @Test
@@ -384,7 +387,7 @@
         rowUpdateBuilder.clustering("c").add("val", "a");
         rowUpdateBuilder.build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Collection<SSTableReader> sstablesBefore = cfs.getLiveSSTables();
 
@@ -402,7 +405,7 @@
         // Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
         Thread.sleep(1000);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Collection<SSTableReader> sstablesAfter = cfs.getLiveSSTables();
         Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
@@ -484,7 +487,7 @@
             insertRowWithKey(i + 100);
             insertRowWithKey(i + 200);
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         assertEquals(1, store.getLiveSSTables().size());
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
diff --git a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java
index 95542a1..33dfc25 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java
@@ -21,7 +21,8 @@
  */
 
 
-import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.*;
 
 import org.junit.After;
@@ -32,6 +33,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -43,6 +45,7 @@
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.*;
 
@@ -71,7 +74,7 @@
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        long seed = System.nanoTime();
+        long seed = nanoTime();
 
         //long seed = 754271160974509L; // CASSANDRA-9530: use this seed to reproduce compaction failures if reading empty rows
         //long seed = 2080431860597L; // CASSANDRA-12359: use this seed to reproduce undetected corruptions
@@ -108,7 +111,7 @@
     public static void closeStdErr()
     {
         // These tests generate an error message per CorruptSSTableException since it goes through
-        // DebuggableThreadPoolExecutor, which will log it in afterExecute.  We could stop that by
+        // ExecutorPlus, which will log it in afterExecute.  We could stop that by
         // creating custom CompactionStrategy and CompactionTask classes, but that's kind of a
         // ridiculous amount of effort, especially since those aren't really intended to be wrapped
         // like that.
@@ -162,7 +165,7 @@
                 maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
                 inserted.add(key);
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
             CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
             assertEquals(inserted.toString(), inserted.size(), Util.getAll(Util.cmd(cfs).build()).size());
         }
@@ -176,29 +179,29 @@
             if (currentSSTable + 1 > SSTABLES_TO_CORRUPT)
                 break;
 
-            RandomAccessFile raf = null;
+            FileChannel fc = null;
 
             try
             {
                 int corruptionSize = 100;
-                raf = new RandomAccessFile(sstable.getFilename(), "rw");
-                assertNotNull(raf);
-                assertTrue(raf.length() > corruptionSize);
-                long pos = random.nextInt((int)(raf.length() - corruptionSize));
-                logger.info("Corrupting sstable {} [{}] at pos {} / {}", currentSSTable, sstable.getFilename(), pos, raf.length());
-                raf.seek(pos);
+                fc = new File(sstable.getFilename()).newReadWriteChannel();
+                assertNotNull(fc);
+                assertTrue(fc.size() > corruptionSize);
+                long pos = random.nextInt((int)(fc.size() - corruptionSize));
+                logger.info("Corrupting sstable {} [{}] at pos {} / {}", currentSSTable, sstable.getFilename(), pos, fc.size());
+                fc.position(pos);
                 // We want to write something large enough that the corruption cannot get undetected
                 // (even without compression)
                 byte[] corruption = new byte[corruptionSize];
                 random.nextBytes(corruption);
-                raf.write(corruption);
+                fc.write(ByteBuffer.wrap(corruption));
                 if (ChunkCache.instance != null)
                     ChunkCache.instance.invalidateFile(sstable.getFilename());
 
             }
             finally
             {
-                FileUtils.closeQuietly(raf);
+                FileUtils.closeQuietly(fc);
             }
 
             currentSSTable++;
diff --git a/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java
index f75842d..4df210a 100644
--- a/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java
@@ -231,9 +231,9 @@
                 .clustering("column")
                 .add("val", value).build().applyUnsafe();
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
 
@@ -267,9 +267,9 @@
                 .clustering("column")
                 .add("val", value).build().applyUnsafe();
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Iterable<SSTableReader> filtered;
         List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
@@ -304,7 +304,7 @@
             .clustering("column")
             .add("val", value).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
         Thread.sleep(10);
 
@@ -313,7 +313,7 @@
             .clustering("column")
             .add("val", value).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(cfs.getLiveSSTables().size(), 2);
 
         Map<String, String> options = new HashMap<>();
@@ -357,7 +357,7 @@
                     .clustering("column")
                     .add("val", bigValue).build().applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         // and small ones:
         for (int r = 0; r < numSSTables / 2; r++)
@@ -366,7 +366,7 @@
             new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey())
                 .clustering("column")
                 .add("val", value).build().applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         Map<String, String> options = new HashMap<>();
         options.put(SizeTieredCompactionStrategyOptions.MIN_SSTABLE_SIZE_KEY, "1");
diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
index 88ddb0b..07a45e7 100644
--- a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
@@ -30,7 +30,6 @@
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
-import java.util.UUID;
 import java.util.stream.Collectors;
 
 import com.google.common.collect.Iterables;
@@ -57,20 +56,23 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.notifications.SSTableAddedNotification;
 import org.apache.cassandra.notifications.SSTableRepairStatusChanged;
-import org.apache.cassandra.repair.RepairJobDesc;
 import org.apache.cassandra.repair.ValidationManager;
+import org.apache.cassandra.repair.state.ValidationState;
+import org.apache.cassandra.schema.MockSchema;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.repair.RepairJobDesc;
 import org.apache.cassandra.repair.Validator;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
 import org.awaitility.Awaitility;
 
 import static java.util.Collections.singleton;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -122,7 +124,7 @@
      */
     @Test
     public void testGrouperLevels() throws Exception{
-        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KiB value, make it easy to have multiple files
 
         //Need entropy to prevent compression so size is predictable with compression enabled/disabled
         new Random().nextBytes(value.array());
@@ -138,7 +140,7 @@
             for (int c = 0; c < columns; c++)
                 update.newRow("column" + c).add("val", value);
             update.applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         waitForLeveling(cfs);
@@ -180,7 +182,7 @@
     {
         byte [] b = new byte[100 * 1024];
         new Random().nextBytes(b);
-        ByteBuffer value = ByteBuffer.wrap(b); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(b); // 100 KiB value, make it easy to have multiple files
 
         // Enough data to have a level 1 and 2
         int rows = 40;
@@ -193,7 +195,7 @@
             for (int c = 0; c < columns; c++)
                 update.newRow("column" + c).add("val", value);
             update.applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         waitForLeveling(cfs);
@@ -204,7 +206,7 @@
 
         Range<Token> range = new Range<>(Util.token(""), Util.token(""));
         int gcBefore = keyspace.getColumnFamilyStore(CF_STANDARDDLEVELED).gcBefore(FBUtilities.nowInSeconds());
-        UUID parentRepSession = UUID.randomUUID();
+        TimeUUID parentRepSession = nextTimeUUID();
         ActiveRepairService.instance.registerParentRepairSession(parentRepSession,
                                                                  FBUtilities.getBroadcastAddressAndPort(),
                                                                  Arrays.asList(cfs),
@@ -213,8 +215,8 @@
                                                                  ActiveRepairService.UNREPAIRED_SSTABLE,
                                                                  true,
                                                                  PreviewKind.NONE);
-        RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), KEYSPACE1, CF_STANDARDDLEVELED, Arrays.asList(range));
-        Validator validator = new Validator(desc, FBUtilities.getBroadcastAddressAndPort(), gcBefore, PreviewKind.NONE);
+        RepairJobDesc desc = new RepairJobDesc(parentRepSession, nextTimeUUID(), KEYSPACE1, CF_STANDARDDLEVELED, Arrays.asList(range));
+        Validator validator = new Validator(new ValidationState(desc, FBUtilities.getBroadcastAddressAndPort()), gcBefore, PreviewKind.NONE);
 
         ValidationManager.instance.submitValidation(cfs, validator).get();
     }
@@ -267,7 +269,7 @@
             for (int c = 0; c < columns; c++)
                 update.newRow("column" + c).add("val", value);
             update.applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         waitForLeveling(cfs);
@@ -291,7 +293,7 @@
     public void testMutateLevel() throws Exception
     {
         cfs.disableAutoCompaction();
-        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KiB value, make it easy to have multiple files
 
         // Enough data to have a level 1 and 2
         int rows = 40;
@@ -304,9 +306,9 @@
             for (int c = 0; c < columns; c++)
                 update.newRow("column" + c).add("val", value);
             update.applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         LeveledCompactionStrategy strategy = (LeveledCompactionStrategy) cfs.getCompactionStrategyManager().getStrategies().get(1).get(0);
         cfs.forceMajorCompaction();
 
@@ -332,7 +334,7 @@
     {
         byte [] b = new byte[100 * 1024];
         new Random().nextBytes(b);
-        ByteBuffer value = ByteBuffer.wrap(b); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(b); // 100 KiB value, make it easy to have multiple files
 
         // Enough data to have a level 1 and 2
         int rows = 40;
@@ -345,7 +347,7 @@
             for (int c = 0; c < columns; c++)
                 update.newRow("column" + c).add("val", value);
             update.applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         waitForLeveling(cfs);
         cfs.disableAutoCompaction();
@@ -400,7 +402,7 @@
         // Disable auto compaction so cassandra does not compact
         CompactionManager.instance.disableAutoCompaction();
 
-        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files
+        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KiB value, make it easy to have multiple files
 
         DecoratedKey key1 = Util.dk(String.valueOf(1));
         DecoratedKey key2 = Util.dk(String.valueOf(2));
@@ -420,7 +422,7 @@
                     update.newRow("column" + c).add("val", value);
                 update.applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         // create 20 more sstables with 10 containing data for key1 and other 10 containing data for key2
@@ -430,7 +432,7 @@
                 for (int c = 0; c < columns; c++)
                     update.newRow("column" + c).add("val", value);
                 update.applyUnsafe();
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
             }
         }
 
@@ -470,7 +472,7 @@
                     update.newRow("column" + c).add("val", value);
                 update.applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         // create 20 more sstables with 10 containing data for key1 and other 10 containing data for key2
@@ -482,7 +484,7 @@
                 for (int c = 0; c < columns; c++)
                     update.newRow("column" + c).add("val", value);
                 update.applyUnsafe();
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
             }
         }
 
@@ -526,7 +528,7 @@
             for (int c = 0; c < columns; c++)
                 update.newRow("column" + c).add("val", value);
             update.applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         LeveledCompactionStrategy strategy = (LeveledCompactionStrategy) (cfs.getCompactionStrategyManager()).getStrategies().get(1).get(0);
         // get readers for level 0 sstables
@@ -768,6 +770,53 @@
         }
         return newLevels;
     }
+    @Test
+    public void testPerLevelSizeBytes() throws IOException
+    {
+        byte [] b = new byte[100];
+        new Random().nextBytes(b);
+        ByteBuffer value = ByteBuffer.wrap(b);
+        int rows = 5;
+        int columns = 5;
+
+        cfs.disableAutoCompaction();
+        for (int r = 0; r < rows; r++)
+        {
+            UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
+            for (int c = 0; c < columns; c++)
+                update.newRow("column" + c).add("val", value);
+            update.applyUnsafe();
+        }
+        Util.flush(cfs);
+
+        SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
+        long [] levelSizes = cfs.getPerLevelSizeBytes();
+        for (int i = 0; i < levelSizes.length; i++)
+        {
+            if (i != 0)
+                assertEquals(0, levelSizes[i]);
+            else
+                assertEquals(sstable.onDiskLength(), levelSizes[i]);
+        }
+
+        assertEquals(sstable.onDiskLength(), cfs.getPerLevelSizeBytes()[0]);
+
+        LeveledCompactionStrategy strategy = (LeveledCompactionStrategy) ( cfs.getCompactionStrategyManager()).getStrategies().get(1).get(0);
+        strategy.manifest.remove(sstable);
+        sstable.descriptor.getMetadataSerializer().mutateLevel(sstable.descriptor, 2);
+        sstable.reloadSSTableMetadata();
+        strategy.manifest.addSSTables(Collections.singleton(sstable));
+
+        levelSizes = cfs.getPerLevelSizeBytes();
+        for (int i = 0; i < levelSizes.length; i++)
+        {
+            if (i != 2)
+                assertEquals(0, levelSizes[i]);
+            else
+                assertEquals(sstable.onDiskLength(), levelSizes[i]);
+        }
+
+    }
 
     /**
      * brute-force checks if the new sstables can be added to the correct level in manifest
diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java
index 0e20d63..d47ffd2 100644
--- a/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java
@@ -37,7 +37,7 @@
 import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -194,6 +194,6 @@
 
     private void print(SSTableReader sstable)
     {
-        System.out.println(String.format("%d %s %s %d", sstable.descriptor.generation, sstable.first, sstable.last, sstable.getSSTableLevel()));
+        System.out.println(String.format("%d %s %s %d", sstable.descriptor.id, sstable.first, sstable.last, sstable.getSSTableLevel()));
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/compaction/NeverPurgeTest.java b/test/unit/org/apache/cassandra/db/compaction/NeverPurgeTest.java
index a5388ca..4253731 100644
--- a/test/unit/org/apache/cassandra/db/compaction/NeverPurgeTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/NeverPurgeTest.java
@@ -23,6 +23,7 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -74,13 +75,13 @@
             {
                 execute("INSERT INTO %s (a, b, c) VALUES (" + j + ", 2, '3')");
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         execute("UPDATE %s SET c = null WHERE a=1 AND b=2");
         execute("DELETE FROM %s WHERE a=2 AND b=2");
         execute("DELETE FROM %s WHERE a=3");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.enableAutoCompaction();
         while (cfs.getLiveSSTables().size() > 1 || !cfs.getTracker().getCompacting().isEmpty())
             Thread.sleep(100);
@@ -94,7 +95,7 @@
         execute("INSERT INTO %s (a, b, c) VALUES (1, 2, '3')");
         execute(deletionStatement);
         Thread.sleep(1000);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.forceMajorCompaction();
         verifyContainsTombstones(cfs.getLiveSSTables(), 1);
     }
diff --git a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
index 0c469dc..9bb2abd 100644
--- a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
@@ -71,7 +71,7 @@
                 .applyUnsafe();
 
             inserted.add(key);
-            store.forceBlockingFlush();
+            Util.flush(store);
             assertEquals(inserted.size(), Util.getAll(Util.cmd(store).build()).size());
         }
         CompactionManager.instance.performMaximal(store, false);
diff --git a/test/unit/org/apache/cassandra/db/compaction/PartialCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/PartialCompactionsTest.java
index 9cb3872..b922ca80 100644
--- a/test/unit/org/apache/cassandra/db/compaction/PartialCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/PartialCompactionsTest.java
@@ -144,7 +144,7 @@
             .build()
             .applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
     }
 
     private static void createTombstonesSSTable(ColumnFamilyStore cfs, int firstKey, int endKey)
@@ -153,7 +153,7 @@
         {
             RowUpdateBuilder.deleteRow(cfs.metadata(), 1, "key1", String.valueOf(i)).applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
     }
 
     private static class LimitableDataDirectory extends Directories.DataDirectory
@@ -188,7 +188,7 @@
             Keyspace keyspace = Keyspace.open(ks);
             ColumnFamilyStore store = keyspace.getColumnFamilyStore(cf);
             TableMetadataRef metadata = store.metadata;
-            keyspace.dropCf(metadata.id);
+            keyspace.dropCf(metadata.id, true);
             ColumnFamilyStore cfs = ColumnFamilyStore.createColumnFamilyStore(keyspace, cf, metadata, wrapDirectoriesOf(store), false, false, true);
             keyspace.initCfCustom(cfs);
         }
diff --git a/test/unit/org/apache/cassandra/db/compaction/PendingRepairManagerTest.java b/test/unit/org/apache/cassandra/db/compaction/PendingRepairManagerTest.java
index 9f4cf8d..d2b3693 100644
--- a/test/unit/org/apache/cassandra/db/compaction/PendingRepairManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/PendingRepairManagerTest.java
@@ -20,7 +20,6 @@
 
 import java.util.Collection;
 import java.util.Collections;
-import java.util.UUID;
 
 import com.google.common.collect.Lists;
 import org.junit.Assert;
@@ -30,7 +29,9 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.repair.consistent.LocalSessionAccessor;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class PendingRepairManagerTest extends AbstractPendingRepairTest
 {
@@ -42,7 +43,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -60,7 +61,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -79,7 +80,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -93,7 +94,7 @@
     @Test
     public void needsCleanupNoSession()
     {
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
         PendingRepairManager prm = new PendingRepairManager(cfs, null, false);
         Assert.assertTrue(prm.canCleanup(fakeID));
     }
@@ -103,7 +104,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -119,7 +120,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -137,7 +138,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -180,7 +181,7 @@
     public void getNextBackgroundTaskAllCleanup() throws Exception
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
 
         SSTableReader sstable = makeSSTable(true);
@@ -199,7 +200,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairID, false);
@@ -223,7 +224,7 @@
     public void userDefinedTaskTest()
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
-        UUID repairId = registerSession(cfs, true, true);
+        TimeUUID repairId = registerSession(cfs, true, true);
         SSTableReader sstable = makeSSTable(true);
         mutateRepaired(sstable, repairId, false);
         prm.addSSTable(sstable);
@@ -238,8 +239,8 @@
     public void mixedPendingSessionsTest()
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
-        UUID repairId = registerSession(cfs, true, true);
-        UUID repairId2 = registerSession(cfs, true, true);
+        TimeUUID repairId = registerSession(cfs, true, true);
+        TimeUUID repairId2 = registerSession(cfs, true, true);
         SSTableReader sstable = makeSSTable(true);
         SSTableReader sstable2 = makeSSTable(true);
 
@@ -282,7 +283,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
 
-        UUID repairID = registerSession(cfs, true, true);
+        TimeUUID repairID = registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
 
         Assert.assertFalse(prm.hasDataForSession(repairID));
@@ -297,7 +298,7 @@
     {
         PendingRepairManager prm = csm.getPendingRepairManagers().get(0);
         SSTableReader sstable = makeSSTable(false);
-        UUID id = UUID.randomUUID();
+        TimeUUID id = nextTimeUUID();
         mutateRepaired(sstable, id, false);
         prm.getOrCreate(sstable);
         cfs.truncateBlocking();
diff --git a/test/unit/org/apache/cassandra/db/compaction/SingleSSTableLCSTaskTest.java b/test/unit/org/apache/cassandra/db/compaction/SingleSSTableLCSTaskTest.java
index 61cf302..bb1d1f0 100644
--- a/test/unit/org/apache/cassandra/db/compaction/SingleSSTableLCSTaskTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/SingleSSTableLCSTaskTest.java
@@ -18,18 +18,21 @@
 
 package org.apache.cassandra.db.compaction;
 
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.Random;
 
 import org.apache.commons.lang3.StringUtils;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -42,7 +45,7 @@
         createTable("create table %s (id int primary key, t text) with compaction = {'class':'LeveledCompactionStrategy','single_sstable_uplevel':true}");
         ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
         execute("insert into %s (id, t) values (1, 'meep')");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.COMPACTION))
@@ -95,7 +98,7 @@
                 execute("insert into %s (id, id2, t) values (?, ?, ?)", i, j, value);
             }
             if (i % 100 == 0)
-                cfs.forceBlockingFlush();
+                Util.flush(cfs);
         }
         // now we have a bunch of data in L0, first compaction will be a normal one, containing all sstables:
         LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) cfs.getCompactionStrategyManager().getUnrepairedUnsafe().first();
@@ -123,14 +126,15 @@
         createTable("create table %s (id int primary key, t text) with compaction = {'class':'LeveledCompactionStrategy','single_sstable_uplevel':true}");
         ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
         execute("insert into %s (id, t) values (1, 'meep')");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
         String filenameToCorrupt = sstable.descriptor.filenameFor(Component.STATS);
-        RandomAccessFile file = new RandomAccessFile(filenameToCorrupt, "rw");
-        file.seek(0);
-        file.writeBytes(StringUtils.repeat('z', 2));
-        file.close();
+        try(FileChannel fc = new File(filenameToCorrupt).newReadWriteChannel())
+        {
+            fc.position(0);
+            fc.write(ByteBufferUtil.bytes(StringUtils.repeat('z', 2)));
+        }
         boolean gotException = false;
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.COMPACTION))
         {
diff --git a/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
index 00c4a86..fc98a9c 100644
--- a/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
@@ -28,6 +28,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
@@ -165,9 +166,9 @@
             new RowUpdateBuilder(cfs.metadata(), 0, key)
                 .clustering("column").add("val", value)
                 .build().applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
         Pair<List<SSTableReader>, Double> bucket;
diff --git a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
index c20316a..0e3fb76 100644
--- a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
@@ -29,6 +29,8 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.ColumnFilter;
@@ -38,9 +40,7 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.sstable.ISSTableScanner;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.tools.SSTableExpiredBlockers;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -80,7 +80,7 @@
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore("Standard1");
         cfs.disableAutoCompaction();
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build());
         String key = "ttl";
         new RowUpdateBuilder(cfs.metadata(), 1L, 1, key)
                     .add("col1", ByteBufferUtil.EMPTY_BYTE_BUFFER)
@@ -91,7 +91,7 @@
                     .add("col2", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                     .build()
                     .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         new RowUpdateBuilder(cfs.metadata(), 2L, 1, key)
                     .add("col1", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                     .build()
@@ -102,7 +102,7 @@
                     .build()
                     .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), 4L, 1, key)
                     .add("col1", ByteBufferUtil.EMPTY_BYTE_BUFFER)
@@ -114,7 +114,7 @@
                     .build()
                     .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
 
         new RowUpdateBuilder(cfs.metadata(), 6L, 3, key)
@@ -127,7 +127,7 @@
                     .build()
                     .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         Set<SSTableReader> sstables = Sets.newHashSet(cfs.getLiveSSTables());
         int now = (int)(System.currentTimeMillis() / 1000);
@@ -161,7 +161,7 @@
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
         // To reproduce #10944, we need our gcBefore to be equal to the locaDeletionTime. A gcGrace of 1 will (almost always) give us that.
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(force10944Bug ? 1 : 0).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(force10944Bug ? 1 : 0).build());
         long timestamp = System.currentTimeMillis();
         String key = "ttl";
         new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key)
@@ -170,7 +170,7 @@
                         .build()
                         .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key)
             .add("col2", ByteBufferUtil.EMPTY_BYTE_BUFFER)
@@ -180,7 +180,7 @@
             .applyUnsafe();
 
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         // To reproduce #10944, we need to avoid the optimization that get rid of full sstable because everything
         // is known to be gcAble, so keep some data non-expiring in that case.
         new RowUpdateBuilder(cfs.metadata(), timestamp, force10944Bug ? 0 : 1, key)
@@ -189,14 +189,14 @@
                     .applyUnsafe();
 
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key)
                             .add("col311", ByteBufferUtil.EMPTY_BYTE_BUFFER)
                             .build()
                             .applyUnsafe();
 
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Thread.sleep(2000); // wait for ttl to expire
         assertEquals(4, cfs.getLiveSSTables().size());
         cfs.enableAutoCompaction(true);
@@ -209,7 +209,7 @@
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore("Standard1");
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build());
         long timestamp = System.currentTimeMillis();
         String key = "ttl";
         new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key)
@@ -218,32 +218,32 @@
             .build()
             .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key)
             .add("col2", ByteBufferUtil.EMPTY_BYTE_BUFFER)
             .build()
             .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key)
             .add("col3", ByteBufferUtil.EMPTY_BYTE_BUFFER)
             .build()
             .applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         String noTTLKey = "nottl";
         new RowUpdateBuilder(cfs.metadata(), timestamp, noTTLKey)
             .add("col311", ByteBufferUtil.EMPTY_BYTE_BUFFER)
             .build()
             .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Thread.sleep(2000); // wait for ttl to expire
         assertEquals(4, cfs.getLiveSSTables().size());
         cfs.enableAutoCompaction(true);
         assertEquals(1, cfs.getLiveSSTables().size());
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
-        ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(cfs.metadata()),
-                                                     DataRange.allData(cfs.getPartitioner()),
-                                                     SSTableReadsListener.NOOP_LISTENER);
+        UnfilteredPartitionIterator scanner = sstable.partitionIterator(ColumnFilter.all(cfs.metadata()),
+                                                                        DataRange.allData(cfs.getPartitioner()),
+                                                                        SSTableReadsListener.NOOP_LISTENER);
         assertTrue(scanner.hasNext());
         while(scanner.hasNext())
         {
@@ -259,7 +259,7 @@
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore("Standard1");
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build());
 
         new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), "test")
                 .noRowMarker()
@@ -267,7 +267,7 @@
                 .build()
                 .applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader blockingSSTable = cfs.getSSTables(SSTableSet.LIVE).iterator().next();
         for (int i = 0; i < 10; i++)
         {
@@ -276,7 +276,7 @@
                             .delete("col1")
                             .build()
                             .applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         Multimap<SSTableReader, SSTableReader> blockers = SSTableExpiredBlockers.checkForExpiredSSTableBlockers(cfs.getSSTables(SSTableSet.LIVE), (int) (System.currentTimeMillis() / 1000) + 100);
         assertEquals(1, blockers.keySet().size());
diff --git a/test/unit/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategyTest.java
index 88c6308..de60286 100644
--- a/test/unit/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategyTest.java
@@ -36,7 +36,6 @@
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import org.apache.cassandra.SchemaLoader;
@@ -184,7 +183,7 @@
                 .clustering("column")
                 .add("val", value).build().applyUnsafe();
 
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         // Decrement the timestamp to simulate a timestamp in the past hour
         for (int r = 3; r < 5; r++)
@@ -194,10 +193,10 @@
             new RowUpdateBuilder(cfs.metadata(), r, key.getKey())
                 .clustering("column")
                 .add("val", value).build().applyUnsafe();
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         HashMultimap<Long, SSTableReader> buckets = HashMultimap.create();
         List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
@@ -240,7 +239,7 @@
                     .clustering("column")
                     .add("val", value).build().applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         // Reset the buckets, overfill it now
@@ -275,7 +274,7 @@
             .clustering("column")
             .add("val", value).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
         Thread.sleep(10);
 
@@ -285,7 +284,7 @@
             .clustering("column")
             .add("val", value).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(cfs.getLiveSSTables().size(), 2);
 
         Map<String, String> options = new HashMap<>();
@@ -327,7 +326,7 @@
             .clustering("column")
             .add("val", value).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
         Thread.sleep(10);
 
@@ -340,7 +339,7 @@
             .clustering("column")
             .add("val", value).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(cfs.getLiveSSTables().size(), 2);
 
         Map<String, String> options = new HashMap<>();
diff --git a/test/unit/org/apache/cassandra/db/compaction/ValidationExecutorTest.java b/test/unit/org/apache/cassandra/db/compaction/ValidationExecutorTest.java
index aef4bb6..a5f2072 100644
--- a/test/unit/org/apache/cassandra/db/compaction/ValidationExecutorTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/ValidationExecutorTest.java
@@ -23,7 +23,6 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
 
 import com.google.common.util.concurrent.Uninterruptibles;
 
@@ -33,7 +32,7 @@
 
 import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
@@ -65,7 +64,7 @@
     @Test
     public void testQueueOnValidationSubmission() throws InterruptedException
     {
-        Condition taskBlocked = new SimpleCondition();
+        CountDownLatch taskBlocked = new CountDownLatch(1);
         AtomicInteger threadsAvailable = new AtomicInteger(DatabaseDescriptor.getConcurrentValidations());
         CountDownLatch taskComplete = new CountDownLatch(5);
         validationExecutor = new CompactionManager.ValidationExecutor();
@@ -85,7 +84,7 @@
         Util.spinAssertEquals(2, () -> validationExecutor.getActiveTaskCount(), 1);
         assertEquals(3, validationExecutor.getPendingTaskCount());
 
-        taskBlocked.signalAll();
+        taskBlocked.countDown();
         taskComplete.await(10, TimeUnit.SECONDS);
         validationExecutor.shutdownNow();
     }
@@ -110,10 +109,10 @@
 
     private static class Task implements Runnable
     {
-        private final Condition blocked;
+        private final CountDownLatch blocked;
         private final CountDownLatch complete;
 
-        Task(Condition blocked, CountDownLatch complete)
+        Task(CountDownLatch blocked, CountDownLatch complete)
         {
             this.blocked = blocked;
             this.complete = complete;
@@ -121,7 +120,7 @@
 
         public void run()
         {
-            Uninterruptibles.awaitUninterruptibly(blocked, 10, TimeUnit.SECONDS);
+            Uninterruptibles.awaitUninterruptibly(blocked);
             complete.countDown();
         }
     }
diff --git a/test/unit/org/apache/cassandra/db/compaction/writers/CompactionAwareWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/writers/CompactionAwareWriterTest.java
index 5e127dd..a641be1 100644
--- a/test/unit/org/apache/cassandra/db/compaction/writers/CompactionAwareWriterTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/writers/CompactionAwareWriterTest.java
@@ -17,13 +17,13 @@
  */
 package org.apache.cassandra.db.compaction.writers;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.*;
 
 import com.google.common.primitives.Longs;
 import org.junit.*;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.db.*;
@@ -33,10 +33,12 @@
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.db.compaction.OperationType.COMPACTION;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 
 public class CompactionAwareWriterTest extends CQLTester
@@ -208,7 +210,7 @@
         int nowInSec = FBUtilities.nowInSeconds();
         try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals());
              CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(nowInSec));
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, scanners.scanners, controller, nowInSec, nextTimeUUID()))
         {
             while (ci.hasNext())
             {
@@ -231,7 +233,7 @@
                 execute(String.format("INSERT INTO %s.%s(k, t, v) VALUES (?, ?, ?)", KEYSPACE, TABLE), i, j, b);
 
         ColumnFamilyStore cfs = getColumnFamilyStore();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         if (cfs.getLiveSSTables().size() > 1)
         {
             // we want just one big sstable to avoid doing actual compaction in compact() above
diff --git a/test/unit/org/apache/cassandra/db/filter/ColumnFilterTest.java b/test/unit/org/apache/cassandra/db/filter/ColumnFilterTest.java
index 86b81e5..b23aa07 100644
--- a/test/unit/org/apache/cassandra/db/filter/ColumnFilterTest.java
+++ b/test/unit/org/apache/cassandra/db/filter/ColumnFilterTest.java
@@ -92,8 +92,12 @@
     @BeforeClass
     public static void beforeClass()
     {
+        // Gossiper touches StorageService which touches StreamManager which requires configs be setup
+        DatabaseDescriptor.daemonInitialization();
         DatabaseDescriptor.setSeedProvider(Arrays::asList);
         DatabaseDescriptor.setEndpointSnitch(new SimpleSnitch());
+        DatabaseDescriptor.setDefaultFailureDetector();
+        DatabaseDescriptor.setPartitionerUnsafe(new Murmur3Partitioner());
         Gossiper.instance.start(0);
     }
 
diff --git a/test/unit/org/apache/cassandra/db/filter/SliceTest.java b/test/unit/org/apache/cassandra/db/filter/SliceTest.java
index 77c0ec2..c83880c 100644
--- a/test/unit/org/apache/cassandra/db/filter/SliceTest.java
+++ b/test/unit/org/apache/cassandra/db/filter/SliceTest.java
@@ -21,7 +21,6 @@
 
 import java.nio.ByteBuffer;
 import java.util.*;
-import java.util.List;
 
 import org.apache.cassandra.db.*;
 import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailAllowFilteringTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailAllowFilteringTest.java
new file mode 100644
index 0000000..c46498c
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailAllowFilteringTest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.SchemaKeyspaceTables;
+
+public class GuardrailAllowFilteringTest extends GuardrailTester
+{
+    private boolean enableState;
+
+    @Before
+    public void setupTest()
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, a int, b int)");
+        enableState = getGuardrial();
+    }
+
+    @After
+    public void teardownTest()
+    {
+        setGuardrail(enableState);
+    }
+
+    private void setGuardrail(boolean allowFilteringEnabled)
+    {
+        guardrails().setAllowFilteringEnabled(allowFilteringEnabled);
+    }
+
+    private boolean getGuardrial()
+    {
+        return guardrails().getAllowFilteringEnabled();
+    }
+
+    @Test
+    public void testAllowFilteringDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("SELECT * FROM %s WHERE a = 5 ALLOW FILTERING", "Querying with ALLOW FILTERING is not allowed");
+    }
+
+    @Test
+    public void testAllowFilteringDisabedNotUsed() throws Throwable
+    {
+        setGuardrail(false);
+        execute("INSERT INTO %s (k, a, b) VALUES (1, 1, 1)");
+        assertValid("SELECT * FROM %s");
+    }
+
+    @Test
+    public void testAllowFilteringEnabled() throws Throwable
+    {
+        setGuardrail(true);
+        execute("INSERT INTO %s (k, a, b) VALUES (1, 1, 1)");
+        assertValid("SELECT * FROM %s WHERE a = 5 ALLOW FILTERING");
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        setGuardrail(false);
+        testExcludedUsers(() -> "SELECT * FROM %s WHERE a = 5 ALLOW FILTERING");
+    }
+
+    @Test
+    public void testSystemTable() throws Throwable
+    {
+        setGuardrail(false);
+        assertValid(String.format("SELECT * FROM %s.%s WHERE table_name = '%s' ALLOW FILTERING",
+                                  SchemaConstants.SCHEMA_KEYSPACE_NAME,
+                                  SchemaKeyspaceTables.TABLES,
+                                  currentTable()));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailAllowUncompressedTablesTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailAllowUncompressedTablesTest.java
new file mode 100644
index 0000000..27f13f1
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailAllowUncompressedTablesTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+
+public class GuardrailAllowUncompressedTablesTest extends GuardrailTester
+{
+    private void setGuardrail(boolean enabled)
+    {
+        guardrails().setUncompressedTablesEnabled(enabled);
+    }
+
+    /**
+     * If the guardrail has been set, creating tables with compression disabled should work
+     */
+    @Test
+    public void createSuccess()
+    {
+        setGuardrail(true);
+        String table = createTableName();
+        schemaChange(String.format("CREATE TABLE %s.%s (k int primary key, v int) WITH compression={'sstable_compression':''}", KEYSPACE, table));
+        TableMetadata tmd = Schema.instance.getTableMetadata(KEYSPACE, table);
+        Assert.assertFalse(tmd.params.compression.isEnabled());
+    }
+
+    /**
+     * If the guardrail is false, creating tables with compression disabled should fail
+     */
+    @Test
+    public void createFailure() throws Throwable
+    {
+        setGuardrail(false);
+        String table = createTableName();
+        assertFails(String.format("CREATE TABLE %s.%s (k int primary key, v int) WITH compression={'sstable_compression':''}", KEYSPACE, table), "Uncompressed table is not allowed");
+    }
+
+    @Test
+    public void alterSuccess()
+    {
+        setGuardrail(true);
+        String table = createTableName();
+        schemaChange(String.format("CREATE TABLE %s.%s (k int primary key, v int)", KEYSPACE, table));
+        schemaChange(String.format("ALTER TABLE %s.%s WITH compression = {'sstable_compression': ''}", KEYSPACE, table));
+        TableMetadata tmd = Schema.instance.getTableMetadata(KEYSPACE, table);
+        Assert.assertFalse(tmd.params.compression.isEnabled());
+    }
+
+    @Test
+    public void alterFailure() throws Throwable
+    {
+        setGuardrail(false);
+        String table = createTableName();
+        schemaChange(String.format("CREATE TABLE %s.%s (k int primary key, v int)", KEYSPACE, table));
+        assertFails(String.format("ALTER TABLE %s.%s WITH compression = {'sstable_compression': ''}", KEYSPACE, table), "Uncompressed table is not allowed");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailCollectionSizeTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailCollectionSizeTest.java
new file mode 100644
index 0000000..1483e81
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailCollectionSizeTest.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.junit.After;
+import org.junit.Test;
+
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.ListType;
+import org.apache.cassandra.db.marshal.MapType;
+import org.apache.cassandra.db.marshal.SetType;
+
+import static java.nio.ByteBuffer.allocate;
+
+/**
+ * Tests the guardrail for the size of collections, {@link Guardrails#collectionSize}.
+ * <p>
+ * This test doesn't include the activation of the guardrail during sstable writes, these cases are covered by the dtest
+ * {@link org.apache.cassandra.distributed.test.guardrails.GuardrailCollectionSizeOnSSTableWriteTest}.
+ */
+public class GuardrailCollectionSizeTest extends ThresholdTester
+{
+    private static final int WARN_THRESHOLD = 1024; // bytes
+    private static final int FAIL_THRESHOLD = WARN_THRESHOLD * 4; // bytes
+
+    public GuardrailCollectionSizeTest()
+    {
+        super(WARN_THRESHOLD + "B",
+              FAIL_THRESHOLD + "B",
+              Guardrails.collectionSize,
+              Guardrails::setCollectionSizeThreshold,
+              Guardrails::getCollectionSizeWarnThreshold,
+              Guardrails::getCollectionSizeFailThreshold);
+    }
+
+    @After
+    public void after()
+    {
+        // immediately drop the created table so its async cleanup doesn't interfere with the next tests
+        if (currentTable() != null)
+            dropTable("DROP TABLE %s");
+    }
+
+    @Test
+    public void testSetSize() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v set<blob>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", set());
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(1)));
+        assertValid("INSERT INTO %s (k, v) VALUES (3, ?)", set(allocate(WARN_THRESHOLD / 2)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (4, ?)", set(allocate(WARN_THRESHOLD)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", set(allocate(WARN_THRESHOLD / 4), allocate(WARN_THRESHOLD * 3 / 4)));
+
+        assertFails("INSERT INTO %s (k, v) VALUES (6, ?)", set(allocate(FAIL_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", set(allocate(FAIL_THRESHOLD / 4), allocate(FAIL_THRESHOLD * 3 / 4)));
+    }
+
+    @Test
+    public void testSetSizeFrozen() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v frozen<set<blob>>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", set());
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(1)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (4, ?)", set(allocate(WARN_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (5, ?)", set(allocate(FAIL_THRESHOLD)));
+    }
+
+    @Test
+    public void testSetSizeWithUpdates() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v set<blob>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, ?)", set(allocate(1)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 0", set(allocate(1)));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", set(allocate(WARN_THRESHOLD / 4)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 1", set(allocate(WARN_THRESHOLD * 3 / 4)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (2, ?)", set(allocate(FAIL_THRESHOLD / 4)));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 2", set(allocate(FAIL_THRESHOLD * 3 / 4)));
+    }
+
+    @Test
+    public void testListSize() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v list<blob>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", list());
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(1)));
+        assertValid("INSERT INTO %s (k, v) VALUES (3, ?)", list(allocate(WARN_THRESHOLD / 2)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (4, ?)", list(allocate(WARN_THRESHOLD)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", list(allocate(WARN_THRESHOLD / 2), allocate(WARN_THRESHOLD / 2)));
+
+        assertFails("INSERT INTO %s (k, v) VALUES (6, ?)", list(allocate(FAIL_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", list(allocate(FAIL_THRESHOLD / 2), allocate(FAIL_THRESHOLD / 2)));
+    }
+
+    @Test
+    public void testListSizeFrozen() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v frozen<list<blob>>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", list());
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(1)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (4, ?)", list(allocate(WARN_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (5, ?)", list(allocate(FAIL_THRESHOLD)));
+    }
+
+    @Test
+    public void testListSizeWithUpdates() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v list<blob>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, ?)", list(allocate(1)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 0", list(allocate(1)));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", list(allocate(WARN_THRESHOLD / 2)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 1", list(allocate(WARN_THRESHOLD / 2)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (2, ?)", list(allocate(FAIL_THRESHOLD / 2)));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 2", list(allocate(FAIL_THRESHOLD / 2)));
+    }
+
+    @Test
+    public void testMapSize() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v map<blob, blob>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", map());
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(1), allocate(1)));
+        assertValid("INSERT INTO %s (k, v) VALUES (3, ?)", map(allocate(1), allocate(WARN_THRESHOLD / 2)));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(WARN_THRESHOLD / 2), allocate(1)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(WARN_THRESHOLD), allocate(1)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(1), allocate(WARN_THRESHOLD)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (7, ?)", map(allocate(WARN_THRESHOLD), allocate(WARN_THRESHOLD)));
+
+        assertFails("INSERT INTO %s (k, v) VALUES (8, ?)", map(allocate(FAIL_THRESHOLD), allocate(1)));
+        assertFails("INSERT INTO %s (k, v) VALUES (9, ?)", map(allocate(1), allocate(FAIL_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (10, ?)", map(allocate(FAIL_THRESHOLD), allocate(FAIL_THRESHOLD)));
+    }
+
+    @Test
+    public void testMapSizeFrozen() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v frozen<map<blob, blob>>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", map());
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(1), allocate(1)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(1), allocate(WARN_THRESHOLD)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(WARN_THRESHOLD), allocate(1)));
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(WARN_THRESHOLD), allocate(WARN_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", map(allocate(1), allocate(FAIL_THRESHOLD)));
+        assertFails("INSERT INTO %s (k, v) VALUES (8, ?)", map(allocate(FAIL_THRESHOLD), allocate(1)));
+        assertFails("INSERT INTO %s (k, v) VALUES (9, ?)", map(allocate(FAIL_THRESHOLD), allocate(FAIL_THRESHOLD)));
+    }
+
+    @Test
+    public void testMapSizeWithUpdates() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v map<blob, blob>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, ?)", map(allocate(1), allocate(1)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 0", map(allocate(1), allocate(1)));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", map(allocate(1), allocate(WARN_THRESHOLD / 2)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 1", map(allocate(2), allocate(WARN_THRESHOLD / 2)));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", map(allocate(WARN_THRESHOLD / 4), allocate(1)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 2", map(allocate(WARN_THRESHOLD * 3 / 4), allocate(1)));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (3, ?)", map(allocate(WARN_THRESHOLD / 4), allocate(WARN_THRESHOLD / 4)));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 3", map(allocate(WARN_THRESHOLD / 4 + 1), allocate(WARN_THRESHOLD / 4)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (4, ?)", map(allocate(1), allocate(FAIL_THRESHOLD / 2)));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 4", map(allocate(2), allocate(FAIL_THRESHOLD / 2)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", map(allocate(FAIL_THRESHOLD / 4), allocate(1)));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 5", map(allocate(FAIL_THRESHOLD * 3 / 4), allocate(1)));
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", map(allocate(FAIL_THRESHOLD / 4), allocate(FAIL_THRESHOLD / 4)));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 6", map(allocate(FAIL_THRESHOLD / 4 + 1), allocate(FAIL_THRESHOLD / 4)));
+    }
+
+    @Override
+    protected String createTable(String query)
+    {
+        String table = super.createTable(query);
+        disableCompaction();
+        return table;
+    }
+
+    private void assertValid(String query, ByteBuffer... values) throws Throwable
+    {
+        assertValid(execute(query, values));
+    }
+
+    private void assertWarns(String query, ByteBuffer... values) throws Throwable
+    {
+        assertWarns(execute(query, values), "Detected collection v");
+    }
+
+    private void assertFails(String query, ByteBuffer... values) throws Throwable
+    {
+        assertFails(execute(query, values), "Detected collection v");
+    }
+
+    private CheckedFunction execute(String query, ByteBuffer... values)
+    {
+        return () -> execute(userClientState, query, Arrays.asList(values));
+    }
+
+    private static ByteBuffer set(ByteBuffer... values)
+    {
+        return SetType.getInstance(BytesType.instance, true).decompose(ImmutableSet.copyOf(values));
+    }
+
+    private static ByteBuffer list(ByteBuffer... values)
+    {
+        return ListType.getInstance(BytesType.instance, true).decompose(ImmutableList.copyOf(values));
+    }
+
+    private ByteBuffer map()
+    {
+        return MapType.getInstance(BytesType.instance, BytesType.instance, true).decompose(Collections.emptyMap());
+    }
+
+    private ByteBuffer map(ByteBuffer key, ByteBuffer value)
+    {
+        return MapType.getInstance(BytesType.instance, BytesType.instance, true).decompose(ImmutableMap.of(key, value));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailColumnsPerTableTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailColumnsPerTableTest.java
new file mode 100644
index 0000000..0c47335
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailColumnsPerTableTest.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the number of columns in a table, {@link Guardrails#columnsPerTable}.
+ */
+public class GuardrailColumnsPerTableTest extends ThresholdTester
+{
+    private static final int COLUMNS_PER_TABLE_WARN_THRESHOLD = 2;
+    private static final int COLUMNS_PER_TABLE_FAIL_THRESHOLD = 4;
+
+    public GuardrailColumnsPerTableTest()
+    {
+        super(COLUMNS_PER_TABLE_WARN_THRESHOLD,
+              COLUMNS_PER_TABLE_FAIL_THRESHOLD,
+              Guardrails.columnsPerTable,
+              Guardrails::setColumnsPerTableThreshold,
+              Guardrails::getColumnsPerTableWarnThreshold,
+              Guardrails::getColumnsPerTableFailThreshold);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return getCurrentColumnFamilyStore().metadata().columns().size();
+    }
+
+    @Test
+    public void testCreateTable() throws Throwable
+    {
+        // partition key on skinny table
+        assertCreateTableValid("CREATE TABLE %s (k1 int, v int, PRIMARY KEY((k1)))");
+        assertCreateTableWarns(3, "CREATE TABLE %s (k1 int, k2 int, v int, PRIMARY KEY((k1, k2)))");
+        assertCreateTableWarns(4, "CREATE TABLE %s (k1 int, k2 int, k3 int, v int, PRIMARY KEY((k1, k2, k3)))");
+        assertCreateTableFails(5, "CREATE TABLE %s (k1 int, k2 int, k3 int, k4 int, v int, PRIMARY KEY((k1, k2, k3, k4)))");
+        assertCreateTableFails(6, "CREATE TABLE %s (k1 int, k2 int, k3 int, k4 int, k5 int, v int, PRIMARY KEY((k1, k2, k3, k4, k5)))");
+
+        // partition key on wide table
+        assertCreateTableWarns(3, "CREATE TABLE %s (k1 int, c int, v int, PRIMARY KEY(k1, c))");
+        assertCreateTableWarns(4, "CREATE TABLE %s (k1 int, k2 int, c int, v int, PRIMARY KEY((k1, k2), c))");
+        assertCreateTableFails(5, "CREATE TABLE %s (k1 int, k2 int, k3 int, c int, v int, PRIMARY KEY((k1, k2, k3), c))");
+        assertCreateTableFails(6, "CREATE TABLE %s (k1 int, k2 int, k3 int, k4 int, c int, v int, PRIMARY KEY((k1, k2, k3, k4), c))");
+
+        // clustering key
+        assertCreateTableWarns(3, "CREATE TABLE %s (k int, c1 int, v int, PRIMARY KEY(k, c1))");
+        assertCreateTableWarns(4, "CREATE TABLE %s (k int, c1 int, c2 int, v int, PRIMARY KEY(k, c1, c2))");
+        assertCreateTableFails(5, "CREATE TABLE %s (k int, c1 int, c2 int, c3 int, v int, PRIMARY KEY(k, c1, c2, c3))");
+        assertCreateTableFails(6, "CREATE TABLE %s (k int, c1 int, c2 int, c3 int, c4 int, v int, PRIMARY KEY(k, c1, c2, c3, c4))");
+
+        // static column
+        assertCreateTableWarns(3, "CREATE TABLE %s (k int, c int, s1 int STATIC, PRIMARY KEY(k, c))");
+        assertCreateTableWarns(4, "CREATE TABLE %s (k int, c int, s1 int STATIC, s2 int STATIC, PRIMARY KEY(k, c))");
+        assertCreateTableFails(5, "CREATE TABLE %s (k int, c int, s1 int STATIC, s2 int STATIC, s3 int STATIC, PRIMARY KEY(k, c))");
+        assertCreateTableFails(6, "CREATE TABLE %s (k int, c int, s1 int STATIC, s2 int STATIC, s3 int STATIC, s4 int STATIC, PRIMARY KEY(k, c))");
+
+        // regular column on skinny table
+        assertCreateTableValid("CREATE TABLE %s (k int PRIMARY KEY, v1 int)");
+        assertCreateTableWarns(3, "CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int)");
+        assertCreateTableWarns(4, "CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int)");
+        assertCreateTableFails(5, "CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)");
+        assertCreateTableFails(6, "CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int, v5 int)");
+
+        // regular column on wide table
+        assertCreateTableWarns(3, "CREATE TABLE %s (k int, c int, v1 int, PRIMARY KEY(k, c))");
+        assertCreateTableWarns(4, "CREATE TABLE %s (k int, c int, v1 int, v2 int, PRIMARY KEY(k, c))");
+        assertCreateTableFails(5, "CREATE TABLE %s (k int, c int, v1 int, v2 int, v3 int, PRIMARY KEY(k, c))");
+        assertCreateTableFails(6, "CREATE TABLE %s (k int, c int, v1 int, v2 int, v3 int, v4 int, PRIMARY KEY(k, c))");
+
+        // udt
+        String udt = createType("CREATE TYPE %s (a int, b int, c int, d int)");
+        assertValid(format("CREATE TABLE %s (k int PRIMARY KEY, v %s)", createTableName(), udt));
+    }
+
+    @Test
+    public void testAlterTableAddColumn() throws Throwable
+    {
+        // skinny table under threshold
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int)");
+        assertAddColumnWarns("ALTER TABLE %s ADD v2 int");
+        assertAddColumnWarns("ALTER TABLE %s ADD v3 int");
+        assertAddColumnFails("ALTER TABLE %s ADD v4 int");
+
+        // skinny table at threshold
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int)");
+        assertAddColumnFails("ALTER TABLE %s ADD v4 int");
+
+        // wide table
+        createTable("CREATE TABLE %s (k int, c int, v1 int, v2 int, PRIMARY KEY(k, c))");
+        assertAddColumnFails("ALTER TABLE %s ADD v3 int");
+        assertAddColumnFails("ALTER TABLE %s ADD s int STATIC");
+
+        // udt
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int)");
+        String udt = createType("CREATE TYPE %s (a int, b int, c int, d int)");
+        assertAddColumnWarns("ALTER TABLE %s ADD v2 " + udt);
+    }
+
+    /**
+     * Verifies that its possible to drop columns from a table that has more columns than the current threshold.
+     */
+    @Test
+    public void testAlterTableDropColumn() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int)");
+        guardrails().setColumnsPerTableThreshold(2, 2);
+        assertDropColumnValid("ALTER TABLE %s DROP v2");
+        assertDropColumnValid("ALTER TABLE %s DROP v3");
+        assertAddColumnFails("ALTER TABLE %s ADD v2 int");
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        String table = keyspace() + '.' + createTableName();
+        testExcludedUsers(() -> format("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)", table),
+                          () -> format("ALTER TABLE %s ADD v5 int", table),
+                          () -> format("DROP TABLE %s", table));
+    }
+
+    private void assertCreateTableValid(String query) throws Throwable
+    {
+        assertMaxThresholdValid(format(query, keyspace() + '.' + createTableName()));
+    }
+
+    private void assertDropColumnValid(String query) throws Throwable
+    {
+        assertValid(format(query, keyspace() + '.' + currentTable()));
+    }
+
+    private void assertCreateTableWarns(int numColumns, String query) throws Throwable
+    {
+        assertWarns(numColumns, query, createTableName());
+    }
+
+    private void assertAddColumnWarns(String query) throws Throwable
+    {
+        assertWarns(currentValue() + 1, query, currentTable());
+    }
+
+    private void assertWarns(long numColumns, String query, String tableName) throws Throwable
+    {
+        assertThresholdWarns(format(query, keyspace() + '.' + tableName),
+                             format("The table %s has %s columns, this exceeds the warning threshold of %s.",
+                                    tableName,
+                                    numColumns,
+                                    guardrails().getColumnsPerTableWarnThreshold())
+        );
+    }
+
+    private void assertAddColumnFails(String query) throws Throwable
+    {
+        assertFails(currentValue() + 1, query, currentTable());
+    }
+
+    private void assertCreateTableFails(long numColumns, String query) throws Throwable
+    {
+        assertFails(numColumns, query, FAIL_TABLE);
+    }
+
+    private void assertFails(long numColumns, String query, String tableName) throws Throwable
+    {
+        assertThresholdFails(format(query, keyspace() + '.' + tableName),
+                             format("Tables cannot have more than %s columns, but %s provided for table %s",
+                                    guardrails().getColumnsPerTableFailThreshold(),
+                                    numColumns,
+                                    tableName)
+        );
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailConsistencyLevelsTester.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailConsistencyLevelsTester.java
new file mode 100644
index 0000000..14c3a58
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailConsistencyLevelsTester.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+
+/**
+ * Utilty class for testing the guardrails for read/write consistency levels.
+ */
+public abstract class GuardrailConsistencyLevelsTester extends GuardrailTester
+{
+    private final String warnedPropertyName;
+    private final String disallowePropertyName;
+    private final Function<Guardrails, Set<String>> warnedGetter;
+    private final Function<Guardrails, Set<String>> disallowedGetter;
+    private final Function<Guardrails, String> warnedCSVGetter;
+    private final Function<Guardrails, String> disallowedCSVGetter;
+    private final BiConsumer<Guardrails, Set<String>> warnedSetter;
+    private final BiConsumer<Guardrails, Set<String>> disallowedSetter;
+    private final BiConsumer<Guardrails, String> warnedCSVSetter;
+    private final BiConsumer<Guardrails, String> disallowedCSVSetter;
+
+    public GuardrailConsistencyLevelsTester(String warnedPropertyName,
+                                            String disallowePropertyName,
+                                            Values<ConsistencyLevel> guardrail,
+                                            Function<Guardrails, Set<String>> warnedGetter,
+                                            Function<Guardrails, Set<String>> disallowedGetter,
+                                            Function<Guardrails, String> warnedCSVGetter,
+                                            Function<Guardrails, String> disallowedCSVGetter,
+                                            BiConsumer<Guardrails, Set<String>> warnedSetter,
+                                            BiConsumer<Guardrails, Set<String>> disallowedSetter,
+                                            BiConsumer<Guardrails, String> warnedCSVSetter,
+                                            BiConsumer<Guardrails, String> disallowedCSVSetter)
+    {
+        super(guardrail);
+        this.warnedPropertyName = warnedPropertyName;
+        this.disallowePropertyName = disallowePropertyName;
+        this.warnedGetter = warnedGetter;
+        this.disallowedGetter = disallowedGetter;
+        this.warnedCSVGetter = g -> sortCSV(warnedCSVGetter.apply(g));
+        this.disallowedCSVGetter = g -> sortCSV(disallowedCSVGetter.apply(g));
+        this.warnedSetter = warnedSetter;
+        this.disallowedSetter = disallowedSetter;
+        this.warnedCSVSetter = warnedCSVSetter;
+        this.disallowedCSVSetter = disallowedCSVSetter;
+    }
+
+    @Before
+    public void before()
+    {
+        warnConsistencyLevels();
+        disableConsistencyLevels();
+    }
+
+    protected void warnConsistencyLevels(ConsistencyLevel... consistencyLevels)
+    {
+        warnedSetter.accept(guardrails(), Stream.of(consistencyLevels).map(ConsistencyLevel::name).collect(Collectors.toSet()));
+    }
+
+    protected void disableConsistencyLevels(ConsistencyLevel... consistencyLevels)
+    {
+        disallowedSetter.accept(guardrails(), Stream.of(consistencyLevels).map(ConsistencyLevel::name).collect(Collectors.toSet()));
+    }
+
+    @Test
+    public void testConfigValidation()
+    {
+        String message = "Invalid value for %s: null is not allowed";
+        assertInvalidProperty(warnedSetter, null, message, warnedPropertyName);
+        assertInvalidProperty(disallowedSetter, null, message, disallowePropertyName);
+
+        assertValidProperty(Collections.emptySet());
+        assertValidProperty(EnumSet.allOf(ConsistencyLevel.class));
+
+        assertValidPropertyCSV("");
+        assertValidPropertyCSV(EnumSet.allOf(ConsistencyLevel.class)
+                                      .stream()
+                                      .map(ConsistencyLevel::toString)
+                                      .collect(Collectors.joining(",")));
+
+        assertInvalidPropertyCSV("invalid", "INVALID");
+        assertInvalidPropertyCSV("ONE,invalid1,invalid2", "INVALID1");
+        assertInvalidPropertyCSV("invalid1,invalid2,ONE", "INVALID1");
+        assertInvalidPropertyCSV("invalid1,ONE,invalid2", "INVALID1");
+    }
+
+    private void assertValidProperty(Set<ConsistencyLevel> input)
+    {
+        Set<String> properties = input.stream().map(ConsistencyLevel::name).collect(Collectors.toSet());
+        assertValidProperty(warnedSetter, warnedGetter, properties);
+        assertValidProperty(disallowedSetter, disallowedGetter, properties);
+    }
+
+    private void assertValidPropertyCSV(String csv)
+    {
+        csv = sortCSV(csv);
+        assertValidProperty(warnedCSVSetter, warnedCSVGetter, csv);
+        assertValidProperty(disallowedCSVSetter, disallowedCSVGetter, csv);
+    }
+
+    private void assertInvalidPropertyCSV(String properties, String rejected)
+    {
+        String message = "No enum constant org.apache.cassandra.db.ConsistencyLevel.%s";
+        assertInvalidProperty(warnedCSVSetter, properties, message, rejected);
+        assertInvalidProperty(disallowedCSVSetter, properties, message, rejected);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailDiskUsageTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailDiskUsageTest.java
new file mode 100644
index 0000000..86e4df0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailDiskUsageTest.java
@@ -0,0 +1,631 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.nio.file.FileStore;
+import java.util.Arrays;
+import java.util.function.Consumer;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.VersionedValue;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.disk.usage.DiskUsageBroadcaster;
+import org.apache.cassandra.service.disk.usage.DiskUsageMonitor;
+import org.apache.cassandra.service.disk.usage.DiskUsageState;
+import org.apache.cassandra.utils.FBUtilities;
+import org.jboss.byteman.contrib.bmunit.BMRule;
+import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
+import org.mockito.Mockito;
+
+import static org.apache.cassandra.service.disk.usage.DiskUsageState.FULL;
+import static org.apache.cassandra.service.disk.usage.DiskUsageState.NOT_AVAILABLE;
+import static org.apache.cassandra.service.disk.usage.DiskUsageState.SPACIOUS;
+import static org.apache.cassandra.service.disk.usage.DiskUsageState.STUFFED;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests the guardrails for disk usage, {@link Guardrails#localDataDiskUsage} and {@link Guardrails#replicaDiskUsage}.
+ */
+@RunWith(BMUnitRunner.class)
+@BMRule(name = "Always returns a physical disk size of 1000TiB",
+targetClass = "DiskUsageMonitor",
+targetMethod = "totalDiskSpace",
+action = "return " + (1000L * 1024 * 1024 * 1024 * 1024) + "L;") // 1000TiB
+public class GuardrailDiskUsageTest extends GuardrailTester
+{
+    private static int defaultDataDiskUsagePercentageWarnThreshold;
+    private static int defaultDataDiskUsagePercentageFailThreshold;
+
+    @BeforeClass
+    public static void beforeClass()
+    {
+        defaultDataDiskUsagePercentageWarnThreshold = Guardrails.instance.getDataDiskUsagePercentageWarnThreshold();
+        defaultDataDiskUsagePercentageFailThreshold = Guardrails.instance.getDataDiskUsagePercentageFailThreshold();
+
+        Guardrails.instance.setDataDiskUsagePercentageThreshold(-1, -1);
+    }
+
+    @AfterClass
+    public static void afterClass()
+    {
+        Guardrails.instance.setDataDiskUsagePercentageThreshold(defaultDataDiskUsagePercentageWarnThreshold,
+                                                                defaultDataDiskUsagePercentageFailThreshold);
+    }
+
+    @Test
+    public void testConfigValidation()
+    {
+        assertConfigValid(x -> x.setDataDiskUsageMaxDiskSize(null));
+        assertNull(guardrails().getDataDiskUsageMaxDiskSize());
+
+        assertConfigFails(x -> x.setDataDiskUsageMaxDiskSize("0B"), "0 is not allowed");
+        assertConfigFails(x -> x.setDataDiskUsageMaxDiskSize("0KiB"), "0 is not allowed");
+        assertConfigFails(x -> x.setDataDiskUsageMaxDiskSize("0MiB"), "0 is not allowed");
+        assertConfigFails(x -> x.setDataDiskUsageMaxDiskSize("0GiB"), "0 is not allowed");
+
+        assertConfigValid(x -> x.setDataDiskUsageMaxDiskSize("10B"));
+        assertEquals("10B", guardrails().getDataDiskUsageMaxDiskSize());
+
+        assertConfigValid(x -> x.setDataDiskUsageMaxDiskSize("20KiB"));
+        assertEquals("20KiB", guardrails().getDataDiskUsageMaxDiskSize());
+
+        assertConfigValid(x -> x.setDataDiskUsageMaxDiskSize("30MiB"));
+        assertEquals("30MiB", guardrails().getDataDiskUsageMaxDiskSize());
+
+        assertConfigValid(x -> x.setDataDiskUsageMaxDiskSize("40GiB"));
+        assertEquals("40GiB", guardrails().getDataDiskUsageMaxDiskSize());
+
+        long diskSize = DiskUsageMonitor.totalDiskSpace();
+        String message = String.format("only %s are actually available on disk", FileUtils.stringifyFileSize(diskSize));
+        assertConfigValid(x -> x.setDataDiskUsageMaxDiskSize(diskSize + "B"));
+        assertConfigFails(x -> x.setDataDiskUsageMaxDiskSize(diskSize + 1 + "B"), message);
+        // We want to test with very big number, Long.MAX_VALUE is not allowed so it was easy to use Intger.MAX_VALUE
+        assertConfigFails(x -> x.setDataDiskUsageMaxDiskSize(Integer.MAX_VALUE + "GiB"), message);
+
+        // warn threshold smaller than lower bound
+        assertConfigFails(x -> x.setDataDiskUsagePercentageThreshold(0, 80), "0 is not allowed");
+
+        // fail threshold bigger than upper bound
+        assertConfigFails(x -> x.setDataDiskUsagePercentageThreshold(1, 110), "maximum allowed value is 100");
+
+        // warn threshold larger than fail threshold
+        assertConfigFails(x -> x.setDataDiskUsagePercentageThreshold(60, 50),
+                          "The warn threshold 60 for data_disk_usage_percentage_warn_threshold should be lower than the fail threshold 50");
+    }
+
+    @Test
+    public void testDiskUsageState()
+    {
+        guardrails().setDataDiskUsagePercentageThreshold(50, 90);
+
+        // under usage
+        assertEquals(SPACIOUS, DiskUsageMonitor.instance.getState(10));
+        assertEquals(SPACIOUS, DiskUsageMonitor.instance.getState(50));
+
+        // exceed warning threshold
+        assertEquals(STUFFED, DiskUsageMonitor.instance.getState(51));
+        assertEquals(STUFFED, DiskUsageMonitor.instance.getState(56));
+        assertEquals(STUFFED, DiskUsageMonitor.instance.getState(90));
+
+        // exceed fail threshold
+        assertEquals(FULL, DiskUsageMonitor.instance.getState(91));
+        assertEquals(FULL, DiskUsageMonitor.instance.getState(100));
+
+        // shouldn't be possible to go over 100% but just to be sure
+        assertEquals(FULL, DiskUsageMonitor.instance.getState(101));
+        assertEquals(FULL, DiskUsageMonitor.instance.getState(500));
+    }
+
+    @Test
+    public void testDiskUsageDetectorWarnDisabled()
+    {
+        guardrails().setDataDiskUsagePercentageThreshold(-1, 90);
+
+        // under usage
+        assertEquals(SPACIOUS, DiskUsageMonitor.instance.getState(0));
+        assertEquals(SPACIOUS, DiskUsageMonitor.instance.getState(50));
+        assertEquals(SPACIOUS, DiskUsageMonitor.instance.getState(90));
+
+        // exceed fail threshold
+        assertEquals(FULL, DiskUsageMonitor.instance.getState(91));
+        assertEquals(FULL, DiskUsageMonitor.instance.getState(100));
+    }
+
+    @Test
+    public void testDiskUsageDetectorFailDisabled()
+    {
+        guardrails().setDataDiskUsagePercentageThreshold(50, -1);
+
+        // under usage
+        assertEquals(SPACIOUS, DiskUsageMonitor.instance.getState(50));
+
+        // exceed warning threshold
+        assertEquals(STUFFED, DiskUsageMonitor.instance.getState(51));
+        assertEquals(STUFFED, DiskUsageMonitor.instance.getState(80));
+        assertEquals(STUFFED, DiskUsageMonitor.instance.getState(100));
+    }
+
+    @Test
+    public void testDiskUsageGuardrailDisabled()
+    {
+        guardrails().setDataDiskUsagePercentageThreshold(-1, -1);
+
+        assertEquals(NOT_AVAILABLE, DiskUsageMonitor.instance.getState(0));
+        assertEquals(NOT_AVAILABLE, DiskUsageMonitor.instance.getState(60));
+        assertEquals(NOT_AVAILABLE, DiskUsageMonitor.instance.getState(100));
+    }
+
+    @Test
+    public void testMemtableSizeIncluded() throws Throwable
+    {
+        DiskUsageMonitor monitor = new DiskUsageMonitor();
+
+        createTable(keyspace(), "CREATE TABLE %s (k text PRIMARY KEY, v text) WITH compression = { 'enabled': false }");
+
+        long memtableSizeBefore = monitor.getAllMemtableSize();
+        int rows = 10;
+        int mb = 1024 * 1024;
+
+        for (int i = 0; i < rows; i++)
+        {
+            char[] chars = new char[mb];
+            Arrays.fill(chars, (char) i);
+            String value = String.copyValueOf(chars);
+            execute("INSERT INTO %s (k, v) VALUES(?, ?)", i, value);
+        }
+
+        // verify memtables are included
+        long memtableSizeAfterInsert = monitor.getAllMemtableSize();
+        assertTrue(String.format("Expect at least 10MB more data, but got before: %s and after: %d",
+                                 memtableSizeBefore, memtableSizeAfterInsert),
+                   memtableSizeAfterInsert - memtableSizeBefore >= rows * mb);
+
+        // verify memtable size are reduced after flush
+        flush();
+        long memtableSizeAfterFlush = monitor.getAllMemtableSize();
+        assertEquals(memtableSizeBefore, memtableSizeAfterFlush, mb);
+    }
+
+    @Test
+    public void testMonitorLogsOnStateChange()
+    {
+        guardrails().setDataDiskUsagePercentageThreshold(50, 90);
+
+        Guardrails.localDataDiskUsage.resetLastNotifyTime();
+
+        DiskUsageMonitor monitor = new DiskUsageMonitor();
+
+        // transit to SPACIOUS, no logging
+        assertMonitorStateTransition(0.50, SPACIOUS, monitor);
+
+        // transit to STUFFED, expect warning
+        assertMonitorStateTransition(0.50001, STUFFED, monitor, true, "Local data disk usage 51%(Stuffed) exceeds warning threshold of 50%");
+
+        // remain as STUFFED, no logging because of min log interval
+        assertMonitorStateTransition(0.90, STUFFED, monitor);
+
+        // transit to FULL, expect failure
+        assertMonitorStateTransition(0.90001, FULL, monitor, false, "Local data disk usage 91%(Full) exceeds failure threshold of 90%, will stop accepting writes");
+
+        // remain as FULL, no logging because of min log interval
+        assertMonitorStateTransition(0.99, FULL, monitor);
+
+        // remain as FULL, no logging because of min log interval
+        assertMonitorStateTransition(5.0, FULL, monitor);
+
+        // transit back to STUFFED, no warning  because of min log interval
+        assertMonitorStateTransition(0.90, STUFFED, monitor);
+
+        // transit back to FULL, no logging  because of min log interval
+        assertMonitorStateTransition(0.900001, FULL, monitor);
+
+        // transit back to STUFFED, no logging  because of min log interval
+        assertMonitorStateTransition(0.90, STUFFED, monitor);
+
+        // transit to SPACIOUS, no logging
+        assertMonitorStateTransition(0.50, SPACIOUS, monitor);
+    }
+
+    @Test
+    public void testDiskUsageBroadcaster() throws UnknownHostException
+    {
+        DiskUsageBroadcaster broadcaster = new DiskUsageBroadcaster(null);
+        Gossiper.instance.unregister(broadcaster);
+
+        InetAddressAndPort node1 = InetAddressAndPort.getByName("127.0.0.1");
+        InetAddressAndPort node2 = InetAddressAndPort.getByName("127.0.0.2");
+        InetAddressAndPort node3 = InetAddressAndPort.getByName("127.0.0.3");
+
+        // initially it's NOT_AVAILABLE
+        assertFalse(broadcaster.hasStuffedOrFullNode());
+        assertFalse(broadcaster.isFull(node1));
+        assertFalse(broadcaster.isFull(node2));
+        assertFalse(broadcaster.isFull(node3));
+
+        // adding 1st node: Spacious, cluster has no Full node
+        broadcaster.onChange(node1, ApplicationState.DISK_USAGE, value(SPACIOUS));
+        assertFalse(broadcaster.hasStuffedOrFullNode());
+        assertFalse(broadcaster.isFull(node1));
+
+        // adding 2nd node with wrong ApplicationState
+        broadcaster.onChange(node2, ApplicationState.RACK, value(FULL));
+        assertFalse(broadcaster.hasStuffedOrFullNode());
+        assertFalse(broadcaster.isFull(node2));
+
+        // adding 2nd node: STUFFED
+        broadcaster.onChange(node2, ApplicationState.DISK_USAGE, value(STUFFED));
+        assertTrue(broadcaster.hasStuffedOrFullNode());
+        assertTrue(broadcaster.isStuffed(node2));
+
+        // adding 3rd node: FULL
+        broadcaster.onChange(node3, ApplicationState.DISK_USAGE, value(FULL));
+        assertTrue(broadcaster.hasStuffedOrFullNode());
+        assertTrue(broadcaster.isFull(node3));
+
+        // remove 2nd node, cluster has Full node
+        broadcaster.onRemove(node2);
+        assertTrue(broadcaster.hasStuffedOrFullNode());
+        assertFalse(broadcaster.isStuffed(node2));
+
+        // remove 3nd node, cluster has no Full node
+        broadcaster.onRemove(node3);
+        assertFalse(broadcaster.hasStuffedOrFullNode());
+        assertFalse(broadcaster.isFull(node3));
+    }
+
+    @Test
+    public void testDiskUsageCalculationWithMaxDiskSize() throws IOException
+    {
+        Directories.DataDirectory directory = mock(Directories.DataDirectory.class);
+        when(directory.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("5GiB").toBytes());
+
+        FileStore store = mock(FileStore.class);
+        when(store.getUsableSpace()).thenReturn(new DataStorageSpec.LongBytesBound("95GiB").toBytes()); // 100GiB disk - 5GiB
+
+        Multimap<FileStore, Directories.DataDirectory> directories = HashMultimap.create();
+        directories.put(store, directory);
+        DiskUsageMonitor monitor = spy(new DiskUsageMonitor(() -> directories));
+
+        doCallRealMethod().when(monitor).getDiskUsage();
+        doReturn(0L).when(monitor).getAllMemtableSize();
+
+        guardrails().setDataDiskUsageMaxDiskSize(null);
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.05);
+
+        // 5G are used of 10G
+        guardrails().setDataDiskUsageMaxDiskSize("10GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.5);
+
+        // max disk size = space used
+        guardrails().setDataDiskUsageMaxDiskSize("5GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(1.0);
+
+        // max disk size < space used
+        guardrails().setDataDiskUsageMaxDiskSize("1GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(5.0);
+    }
+
+    @Test
+    public void testDiskUsageCalculationWithMaxDiskSizeAndSmallUnits() throws IOException
+    {
+        // 5GiB used out of 100GiB disk
+        long freeDiskSizeInBytes = new DataStorageSpec.LongBytesBound("100GiB").toBytes() - new DataStorageSpec.LongBytesBound("5MiB").toBytes();
+
+        FileStore store = mock(FileStore.class);
+        when(store.getUsableSpace()).thenReturn(new DataStorageSpec.LongBytesBound(freeDiskSizeInBytes + "B").toBytes()); // 100GiB disk
+
+        Directories.DataDirectory directory = mock(Directories.DataDirectory.class);
+        when(directory.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("5MiB").toBytes());
+
+        Multimap<FileStore, Directories.DataDirectory> directories = HashMultimap.create();
+        directories.put(store, directory);
+        DiskUsageMonitor monitor = spy(new DiskUsageMonitor(() -> directories));
+
+        doCallRealMethod().when(monitor).getDiskUsage();
+        doReturn(0L).when(monitor).getAllMemtableSize();
+
+        guardrails().setDataDiskUsageMaxDiskSize(null);
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.00005);
+
+        // 5MiB are used of 10MiB
+        guardrails().setDataDiskUsageMaxDiskSize("10MiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.5);
+
+        // max disk size = space used
+        guardrails().setDataDiskUsageMaxDiskSize("5MiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(1.0);
+
+        // max disk size < space used
+        guardrails().setDataDiskUsageMaxDiskSize("1MiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(5.0);
+    }
+
+    @Test
+    public void testDiskUsageCalculationWithMaxDiskSizeAndMultipleVolumes() throws IOException
+    {
+        Mockito.reset();
+
+        Multimap<FileStore, Directories.DataDirectory> directories = HashMultimap.create();
+
+        Directories.DataDirectory directory1 = mock(Directories.DataDirectory.class);
+        FileStore store1 = mock(FileStore.class);
+        when(directory1.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("5GiB").toBytes());
+        when(store1.getUsableSpace()).thenReturn(new DataStorageSpec.LongBytesBound("95GiB").toBytes()); // 100 GiB disk - 5 GiB
+        directories.put(store1, directory1);
+
+        Directories.DataDirectory directory2 = mock(Directories.DataDirectory.class);
+        FileStore store2 = mock(FileStore.class);
+        when(directory2.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("25GiB").toBytes());
+        when(store2.getUsableSpace()).thenReturn(new DataStorageSpec.LongBytesBound("75GiB").toBytes()); // 100 GiB disk - 25 GiB
+        directories.put(store2, directory2);
+
+        Directories.DataDirectory directory3 = mock(Directories.DataDirectory.class);
+        FileStore store3 = mock(FileStore.class);
+        when(directory3.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("20GiB").toBytes());
+        when(store3.getUsableSpace()).thenReturn(new DataStorageSpec.LongBytesBound("80GiB").toBytes()); // 100 GiB disk - 20 GiB
+        directories.put(store3, directory3);
+
+        DiskUsageMonitor monitor = spy(new DiskUsageMonitor(() -> directories));
+
+        doCallRealMethod().when(monitor).getDiskUsage();
+        doReturn(0L).when(monitor).getAllMemtableSize();
+
+        // 50G/300G as each disk has a capacity of 100G
+        guardrails().setDataDiskUsageMaxDiskSize(null);
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.16667);
+
+        // 50G/100G
+        guardrails().setDataDiskUsageMaxDiskSize("100GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.5);
+
+        // 50G/75G
+        guardrails().setDataDiskUsageMaxDiskSize("75GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.66667);
+
+        // 50G/50G
+        guardrails().setDataDiskUsageMaxDiskSize("50GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(1.0);
+
+        // 50G/49G
+        guardrails().setDataDiskUsageMaxDiskSize("49GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(1.02041);
+    }
+
+    @Test
+    public void testDiskUsageCalculationWithMaxDiskSizeAndMultipleDirectories() throws IOException
+    {
+        Mockito.reset();
+
+        Directories.DataDirectory directory1 = mock(Directories.DataDirectory.class);
+        when(directory1.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("5GiB").toBytes());
+
+        Directories.DataDirectory directory2 = mock(Directories.DataDirectory.class);
+        when(directory2.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("25GiB").toBytes());
+
+        Directories.DataDirectory directory3 = mock(Directories.DataDirectory.class);
+        when(directory3.getRawSize()).thenReturn(new DataStorageSpec.LongBytesBound("20GiB").toBytes());
+
+        FileStore store = mock(FileStore.class);
+        when(store.getUsableSpace()).thenReturn(new DataStorageSpec.LongBytesBound("250GiB").toBytes()); // 100 GiB disk (300 - 5 - 25 - 20)
+
+        Multimap<FileStore, Directories.DataDirectory> directories = HashMultimap.create();
+        directories.putAll(store, ImmutableSet.of(directory1, directory2, directory3));
+
+        DiskUsageMonitor monitor = spy(new DiskUsageMonitor(() -> directories));
+
+        doCallRealMethod().when(monitor).getDiskUsage();
+        doReturn(0L).when(monitor).getAllMemtableSize();
+
+        // 50G/300G as disk has a capacity of 300G
+        guardrails().setDataDiskUsageMaxDiskSize(null);
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.16667);
+
+        // 50G/100G
+        guardrails().setDataDiskUsageMaxDiskSize("100GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.5);
+
+        // 50G/75G
+        guardrails().setDataDiskUsageMaxDiskSize("75GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(0.66667);
+
+        // 50G/50G
+        guardrails().setDataDiskUsageMaxDiskSize("50GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(1.0);
+
+        // 50G/49G
+        guardrails().setDataDiskUsageMaxDiskSize("49GiB");
+        assertThat(monitor.getDiskUsage()).isEqualTo(1.02041);
+    }
+
+    @Test
+    public void testWriteRequests() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+
+        InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
+        InetAddressAndPort node1 = InetAddressAndPort.getByName("127.0.0.11");
+        InetAddressAndPort node2 = InetAddressAndPort.getByName("127.0.0.21");
+        InetAddressAndPort node3 = InetAddressAndPort.getByName("127.0.0.31");
+
+        Guardrails.replicaDiskUsage.resetLastNotifyTime();
+        guardrails().setDataDiskUsagePercentageThreshold(98, 99);
+
+        ConsistencyLevel cl = ConsistencyLevel.LOCAL_QUORUM;
+        String select = "SELECT * FROM %s";
+        String insert = "INSERT INTO %s (k, v) VALUES (0, 0)";
+        String batch = "BEGIN BATCH " +
+                       "INSERT INTO %s (k, v) VALUES (1, 1);" +
+                       "INSERT INTO %<s (k, v) VALUES (2, 2); " +
+                       "APPLY BATCH";
+        CheckedFunction userSelect = () -> execute(userClientState, select, cl);
+        CheckedFunction userInsert = () -> execute(userClientState, insert, cl);
+        CheckedFunction userBatch = () -> execute(userClientState, batch, cl);
+
+        // default state, write request works fine
+        assertValid(userSelect);
+        assertValid(userInsert);
+        assertValid(userBatch);
+
+        // verify node1 NOT_AVAILABLE won't affect writes
+        setDiskUsageState(node1, NOT_AVAILABLE);
+        assertValid(userSelect);
+        assertValid(userInsert);
+        assertValid(userBatch);
+
+        // verify node2 Spacious won't affect writes
+        setDiskUsageState(node2, SPACIOUS);
+        assertValid(userSelect);
+        assertValid(userInsert);
+        assertValid(userBatch);
+
+        // verify node3 STUFFED won't trigger warning as it's not write replica
+        setDiskUsageState(node3, STUFFED);
+        assertValid(userSelect);
+        assertValid(userInsert);
+        assertValid(userBatch);
+
+        // verify node3 Full won't affect writes as it's not write replica
+        setDiskUsageState(node3, FULL);
+        assertValid(userSelect);
+        assertValid(userInsert);
+        assertValid(userBatch);
+
+        // verify local node STUFF, will log warning
+        setDiskUsageState(local, STUFFED);
+        assertValid(userSelect);
+        assertWarns(userInsert);
+        assertWarns(userBatch);
+
+        // verify local node Full, will reject writes
+        setDiskUsageState(local, FULL);
+        assertValid(userSelect);
+        assertFails(userInsert);
+        assertFails(userBatch);
+
+        // excluded users can write to FULL cluster
+        useSuperUser();
+        Guardrails.replicaDiskUsage.resetLastNotifyTime();
+        for (ClientState excludedUser : Arrays.asList(systemClientState, superClientState))
+        {
+            assertValid(() -> execute(excludedUser, select, cl));
+            assertValid(() -> execute(excludedUser, insert, cl));
+            assertValid(() -> execute(excludedUser, batch, cl));
+        }
+
+        // verify local node STUFFED won't reject writes
+        setDiskUsageState(local, STUFFED);
+        assertValid(userSelect);
+        assertWarns(userInsert);
+        assertWarns(userBatch);
+    }
+
+    @Override
+    protected void assertValid(CheckedFunction function) throws Throwable
+    {
+        Guardrails.replicaDiskUsage.resetLastNotifyTime();
+        super.assertValid(function);
+    }
+
+    protected void assertWarns(CheckedFunction function) throws Throwable
+    {
+        Guardrails.replicaDiskUsage.resetLastNotifyTime();
+        super.assertWarns(function, "Replica disk usage exceeds warning threshold");
+    }
+
+    protected void assertFails(CheckedFunction function) throws Throwable
+    {
+        Guardrails.replicaDiskUsage.resetLastNotifyTime();
+        super.assertFails(function, "Write request failed because disk usage exceeds failure threshold");
+    }
+
+    private static void setDiskUsageState(InetAddressAndPort endpoint, DiskUsageState state)
+    {
+        DiskUsageBroadcaster.instance.onChange(endpoint, ApplicationState.DISK_USAGE, value(state));
+    }
+
+    private static VersionedValue value(DiskUsageState state)
+    {
+        return StorageService.instance.valueFactory.diskUsage(state.name());
+    }
+
+    private void assertMonitorStateTransition(double usageRatio, DiskUsageState state, DiskUsageMonitor monitor)
+    {
+        assertMonitorStateTransition(usageRatio, state, monitor, false, null);
+    }
+
+    private void assertMonitorStateTransition(double usageRatio, DiskUsageState state, DiskUsageMonitor monitor,
+                                              boolean isWarn, String msg)
+    {
+        boolean stateChanged = state != monitor.state();
+        Consumer<DiskUsageState> notifier = newState -> {
+            if (stateChanged)
+                assertEquals(state, newState);
+            else
+                fail("Expect no notification if state remains the same");
+        };
+
+        monitor.updateLocalState(usageRatio, notifier);
+        assertEquals(state, monitor.state());
+
+        if (msg == null)
+        {
+            listener.assertNotFailed();
+            listener.assertNotWarned();
+        }
+        else if (isWarn)
+        {
+            listener.assertWarned(msg);
+            listener.assertNotFailed();
+        }
+        else
+        {
+            listener.assertFailed(msg);
+            listener.assertNotWarned();
+        }
+
+        listener.clear();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailDropTruncateTableTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailDropTruncateTableTest.java
new file mode 100644
index 0000000..761dcdc
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailDropTruncateTableTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.After;
+import org.junit.Test;
+
+import static java.lang.String.format;
+
+public class GuardrailDropTruncateTableTest extends GuardrailTester
+{
+    private String tableQuery = "CREATE TABLE %s(pk int, ck int, v int, PRIMARY KEY(pk, ck))";
+
+    private void setGuardrail(boolean enabled)
+    {
+        Guardrails.instance.setDropTruncateTableEnabled(enabled);
+    }
+
+    @After
+    public void afterTest()
+    {
+        setGuardrail(true);
+    }
+
+    @Test
+    public void testCanDropWhileFeatureEnabled() throws Throwable
+    {
+        setGuardrail(true);
+        createTable(tableQuery);
+        assertValid(String.format("DROP TABLE %s", currentTable()));
+    }
+
+    @Test
+    public void testCannotDropWhileFeatureDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        createTable(tableQuery);
+        assertFails("DROP TABLE %s", "DROP and TRUNCATE TABLE functionality is not allowed");
+    }
+
+    @Test
+    public void testIfExistsDoesNotBreakGuardrail() throws Throwable
+    {
+        setGuardrail(false);
+        createTable(tableQuery);
+        assertFails("DROP TABLE IF EXISTS %s", "DROP and TRUNCATE TABLE functionality is not allowed");
+    }
+
+    @Test
+    public void testCanTruncateWhileFeatureEnabled() throws Throwable
+    {
+        setGuardrail(true);
+        createTable(tableQuery);
+        assertValid(String.format("TRUNCATE TABLE %s", currentTable()));
+    }
+
+    @Test
+    public void testCannotTruncateWhileFeatureDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        createTable(tableQuery);
+        assertFails("TRUNCATE TABLE %s", "DROP and TRUNCATE TABLE functionality is not allowed");
+    }
+
+    @Test
+    public void testExcludedUsersCanAlwaysDropAndTruncate() throws Throwable
+    {
+        String table = keyspace() + '.' + createTableName();
+        setGuardrail(false);
+        testExcludedUsers(() -> format("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)", table),
+                          () -> format("TRUNCATE TABLE %s", table),
+                          () -> format("DROP TABLE %s", table));
+
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailFieldsPerUDTTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailFieldsPerUDTTest.java
new file mode 100644
index 0000000..96ad28a
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailFieldsPerUDTTest.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the number of fields in a user-defined type, {@link Guardrails#fieldsPerUDT}.
+ */
+public class GuardrailFieldsPerUDTTest extends ThresholdTester
+{
+    private static final int WARN_THRESHOLD = 2;
+    private static final int FAIL_THRESHOLD = 4;
+
+    public GuardrailFieldsPerUDTTest()
+    {
+        super(WARN_THRESHOLD,
+              FAIL_THRESHOLD,
+              Guardrails.fieldsPerUDT,
+              Guardrails::setFieldsPerUDTThreshold,
+              Guardrails::getFieldsPerUDTWarnThreshold,
+              Guardrails::getFieldsPerUDTFailThreshold);
+    }
+
+    @Test
+    public void testCreateType() throws Throwable
+    {
+        assertValid("CREATE TYPE %s (a int)");
+        assertValid("CREATE TYPE %s (a int, b int)");
+        assertWarns("CREATE TYPE %s (a int, b int, c int)", 3);
+        assertWarns("CREATE TYPE %s (a int, b int, c int, d int)", 4);
+        assertFails("CREATE TYPE %s (a int, b int, c int, d int, e int)", 5);
+        assertFails("CREATE TYPE %s (a int, b int, c int, d int, e int, f int)", 6);
+    }
+
+    @Test
+    public void testAlterType() throws Throwable
+    {
+        String name = createType("CREATE TYPE %s (a int)");
+
+        assertValid("ALTER TYPE %s ADD b int", name);
+        assertWarns("ALTER TYPE %s ADD c int", name, 3);
+        assertWarns("ALTER TYPE %s ADD d int", name, 4);
+        assertFails("ALTER TYPE %s ADD e int", name, 5);
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        String name = createTypeName();
+        testExcludedUsers(() -> format("CREATE TYPE %s (a int, b int, c int, d int, e int)", name),
+                          () -> format("ALTER TYPE %s ADD f int", name),
+                          () -> format("DROP TYPE %s", name));
+    }
+
+    protected void assertValid(String query) throws Throwable
+    {
+        assertValid(query, createTypeName());
+    }
+
+    private void assertValid(String query, String typeName) throws Throwable
+    {
+        super.assertValid(format(query, typeName));
+    }
+
+    private void assertWarns(String query, int numFields) throws Throwable
+    {
+        String typeName = createTypeName();
+        assertWarns(query, typeName, numFields);
+    }
+
+    private void assertWarns(String query, String typeName, int numFields) throws Throwable
+    {
+        assertWarns(format(query, typeName),
+                    format("The user type %s has %s columns, this exceeds the warning threshold of %s.",
+                           typeName, numFields, WARN_THRESHOLD));
+    }
+
+    private void assertFails(String query, int numFields) throws Throwable
+    {
+        String typeName = createTypeName();
+        assertFails(query, typeName, numFields);
+    }
+
+    private void assertFails(String query, String typeName, int numFields) throws Throwable
+    {
+        assertFails(format(query, typeName),
+                    format("User types cannot have more than %s columns, but %s provided for user type %s",
+                           FAIL_THRESHOLD, numFields, typeName));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailGroupByTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailGroupByTest.java
new file mode 100644
index 0000000..f25cba2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailGroupByTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.SchemaKeyspaceTables;
+
+public class GuardrailGroupByTest extends GuardrailTester
+{
+    private static final String query = String.format("SELECT * FROM %s.%s WHERE keyspace_name='%s' GROUP BY table_name",
+                                                      SchemaConstants.SCHEMA_KEYSPACE_NAME,
+                                                      SchemaKeyspaceTables.TABLES,
+                                                      KEYSPACE);
+
+    private void setGuardrail(boolean enabled)
+    {
+        Guardrails.instance.setGroupByEnabled(enabled);
+    }
+
+    @Test
+    public void checkExplicitlyDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails(query, "GROUP BY functionality is not allowed");
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        setGuardrail(false);
+        testExcludedUsers(() -> query);
+    }
+
+    @Test
+    public void checkEnabled() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid(query);
+    }
+
+    @Test
+    public void checkView() throws Throwable
+    {
+        setGuardrail(false);
+        createTable( "CREATE TABLE %s(pk int, ck int, v int, PRIMARY KEY(pk, ck))");
+        String viewName = createView("CREATE MATERIALIZED VIEW %s AS " +
+                                     "SELECT * FROM %s WHERE pk IS NOT null and ck IS NOT null " +
+                                     "PRIMARY KEY(ck, pk)");
+        String viewQuery = "SELECT * FROM " + viewName + " WHERE ck=0 GROUP BY pk";
+        assertFails(viewQuery, "GROUP BY functionality is not allowed");
+        testExcludedUsers(() -> viewQuery);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailInSelectCartesianProductTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailInSelectCartesianProductTest.java
new file mode 100644
index 0000000..ef61ee4
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailInSelectCartesianProductTest.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.db.marshal.Int32Type;
+
+/**
+ * Tests the guardrail for the max number of restrictions produced by the cartesian product of the {@code IN}
+ * restrictions of a query, {@link Guardrails#inSelectCartesianProduct}.
+ */
+public class GuardrailInSelectCartesianProductTest extends ThresholdTester
+{
+    private static final int WARN_THRESHOLD = 16;
+    private static final int FAIL_THRESHOLD = 25;
+
+    private static final String WARN_MESSAGE = "The cartesian product of the IN restrictions on %s produces %d " +
+                                               "values, this exceeds warning threshold of " + WARN_THRESHOLD;
+    private static final String FAIL_MESSAGE = "Aborting query because the cartesian product of the IN restrictions " +
+                                               "on %s produces %d values, this exceeds fail threshold of " + FAIL_THRESHOLD;
+
+    public GuardrailInSelectCartesianProductTest()
+    {
+        super(WARN_THRESHOLD,
+              FAIL_THRESHOLD,
+              Guardrails.inSelectCartesianProduct,
+              Guardrails::setInSelectCartesianProductThreshold,
+              Guardrails::getInSelectCartesianProductWarnThreshold,
+              Guardrails::getInSelectCartesianProductFailThreshold);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Before
+    public void initSchema()
+    {
+        createTable("CREATE TABLE %s (pk1 int, pk2 int, ck1 int, ck2 int, PRIMARY KEY((pk1, pk2), ck1, ck2))");
+    }
+
+    @Test
+    public void testPkCartesianProduct() throws Throwable
+    {
+        // below both thresholds
+        testPkCartesianProduct(1, 1);
+        testPkCartesianProduct(1, 4);
+        testPkCartesianProduct(4, 4);
+
+        // above warn threshold
+        testPkCartesianProduct(5, 5);
+        testPkCartesianProduct(2, 12);
+        testPkCartesianProduct(8, 3);
+
+        // above cartesian product limit
+        testPkCartesianProduct(1, 26);
+        testPkCartesianProduct(5, 6);
+        testPkCartesianProduct(26, 1);
+    }
+
+    @Test
+    public void testCkCartesianProduct() throws Throwable
+    {
+        // below both thresholds
+        testCkCartesianProduct(3, 8);
+        testCkCartesianProduct(5, 5);
+
+        // above cartesian product limit
+        testCkCartesianProduct(1, 26);
+        testCkCartesianProduct(5, 6);
+        testCkCartesianProduct(6, 5);
+        testCkCartesianProduct(26, 1);
+    }
+
+    @Test
+    public void testPkCkCartesianProduct() throws Throwable
+    {
+        // below both thresholds
+        testCartesianProduct(1, 10, 1, 10);
+        testCartesianProduct(10, 1, 10, 1);
+        testCartesianProduct(5, 5, 5, 5);
+
+        // above cartesian product limit
+        testCartesianProduct(5, 6, 5, 5);
+        testCartesianProduct(6, 5, 5, 5);
+        testCartesianProduct(5, 5, 6, 5);
+        testCartesianProduct(5, 5, 5, 6);
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        testExcludedUsers(() -> String.format("SELECT * FROM %%s WHERE pk1 in (%s) AND pk2 in (%s)",
+                                              terms(5), terms(5)),
+                          () -> String.format("SELECT * FROM %%s WHERE pk1 in (%s) AND pk2 in (%s) AND ck1 in (%s) AND ck2 in (%s)",
+                                              terms(5), terms(5), terms(5), terms(6)));
+    }
+
+    @Test
+    public void testPkCartesianProductMultiColumnBelowThreshold() throws Throwable
+    {
+        String inTerms = IntStream.range(0, 5).mapToObj(i -> String.format("(%d, %d)", i, i + 1)).collect(Collectors.joining(", "));
+        String query = String.format("SELECT * FROM %%s WHERE (pk1, pk2) in (%s)", inTerms);
+        assertInvalidMessage("Multi-column relations can only be applied to clustering columns but was applied to: pk1", query);
+    }
+
+    private void testPkCartesianProduct(int pk1Terms, int pk2Terms) throws Throwable
+    {
+        testCartesianProduct(pk1Terms, pk2Terms, 1, 1);
+    }
+
+    private void testCkCartesianProduct(int ck1Terms, int ck2Terms) throws Throwable
+    {
+        testCartesianProduct(1, 1, ck1Terms, ck2Terms);
+    }
+
+    private void testCartesianProduct(int pk1, int pk2, int ck1, int ck2) throws Throwable
+    {
+        int keys = pk1 * pk2;
+        int clusterings = ck1 * ck2;
+
+        String query = String.format("SELECT * FROM %%s WHERE pk1 in (%s) AND pk2 in (%s) AND ck1 in (%s) AND ck2 in (%s)",
+                                     terms(pk1), terms(pk2), terms(ck1), terms(ck2));
+        testCartesianProduct(() -> execute(userClientState, query), keys, clusterings);
+
+        String queryWithBindVariables = String.format("SELECT * FROM %%s WHERE pk1 in (%s) AND pk2 in (%s) AND ck1 in (%s) AND ck2 in (%s)",
+                                                      markers(pk1), markers(pk2), markers(ck1), markers(ck2));
+        testCartesianProduct(() -> execute(userClientState, queryWithBindVariables, bindValues(pk1, pk2, ck1, ck2)), keys, clusterings);
+    }
+
+    private void testCartesianProduct(CheckedFunction function, int keys, int clusterings) throws Throwable
+    {
+        String keysFailMessage = String.format(FAIL_MESSAGE, "partition key", keys);
+        String keysWarnMessage = String.format(WARN_MESSAGE, "partition key", keys);
+        String clusteringsFailMessage = String.format(FAIL_MESSAGE, "clustering key", clusterings);
+        String clusteringsWarnMessage = String.format(WARN_MESSAGE, "clustering key", clusterings);
+
+        if (keys > FAIL_THRESHOLD)
+        {
+            assertFails(function, keysFailMessage);
+        }
+        else if (keys > WARN_THRESHOLD)
+        {
+            if (clusterings > FAIL_THRESHOLD)
+                assertFails(function, Arrays.asList(keysWarnMessage, clusteringsFailMessage));
+            else if (clusterings > WARN_THRESHOLD)
+                assertWarns(function, Arrays.asList(keysWarnMessage, clusteringsWarnMessage));
+            else
+                assertWarns(function, keysWarnMessage);
+        }
+        else if (clusterings > FAIL_THRESHOLD)
+        {
+            assertFails(function, clusteringsFailMessage);
+        }
+        else if (clusterings > WARN_THRESHOLD)
+        {
+            assertWarns(function, clusteringsWarnMessage);
+        }
+        else
+        {
+            assertValid(function);
+        }
+    }
+
+    private static String terms(int terms)
+    {
+        assert terms > 0;
+        return IntStream.range(0, terms).mapToObj(String::valueOf).collect(Collectors.joining(", "));
+    }
+
+    private static String markers(int terms)
+    {
+        assert terms > 0;
+        return IntStream.range(0, terms).mapToObj(i -> "?").collect(Collectors.joining(", "));
+    }
+
+    private static List<ByteBuffer> bindValues(int... termCounts)
+    {
+        return IntStream.of(termCounts)
+                        .boxed()
+                        .flatMap(terms -> IntStream.range(0, terms).boxed().map(Int32Type.instance::decompose))
+                        .collect(Collectors.toList());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailItemsPerCollectionTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailItemsPerCollectionTest.java
new file mode 100644
index 0000000..a13e9b3
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailItemsPerCollectionTest.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.stream.Collector;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Test;
+
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.ListType;
+import org.apache.cassandra.db.marshal.MapType;
+import org.apache.cassandra.db.marshal.SetType;
+
+/**
+ * Tests the guardrail for the number of items on a collection, {@link Guardrails#itemsPerCollection}.
+ * <p>
+ * This test doesn't include the activation of the guardrail during sstable writes, these cases are covered by the dtest
+ * {@link org.apache.cassandra.distributed.test.guardrails.GuardrailItemsPerCollectionOnSSTableWriteTest}.
+ */
+public class GuardrailItemsPerCollectionTest extends ThresholdTester
+{
+    private static final int WARN_THRESHOLD = 10;
+    private static final int FAIL_THRESHOLD = 20;
+
+    public GuardrailItemsPerCollectionTest()
+    {
+        super(WARN_THRESHOLD,
+              FAIL_THRESHOLD,
+              Guardrails.itemsPerCollection,
+              Guardrails::setItemsPerCollectionThreshold,
+              Guardrails::getItemsPerCollectionWarnThreshold,
+              Guardrails::getItemsPerCollectionFailThreshold);
+    }
+
+    @After
+    public void after()
+    {
+        // immediately drop the created table so its async cleanup doesn't interfere with the next tests
+        if (currentTable() != null)
+            dropTable("DROP TABLE %s");
+    }
+
+    @Test
+    public void testSetSize() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v set<int>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", set(0));
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", set(1));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", set(WARN_THRESHOLD));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", set(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", set(FAIL_THRESHOLD), FAIL_THRESHOLD);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", set(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+        assertFails("INSERT INTO %s (k, v) VALUES (8, ?)", set(FAIL_THRESHOLD + 10), FAIL_THRESHOLD + 10);
+    }
+
+    @Test
+    public void testSetSizeFrozen() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v frozen<set<int>>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", set(0));
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", set(1));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", set(WARN_THRESHOLD));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", set(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", set(FAIL_THRESHOLD), FAIL_THRESHOLD);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", set(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+    }
+
+    @Test
+    public void testSetSizeWithUpdates() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v set<int>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, ?)", set(1));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 0", set(1, WARN_THRESHOLD));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 0", set(1, FAIL_THRESHOLD), FAIL_THRESHOLD - 1);
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (1, ?)", set(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v = v - ? WHERE k = 1", set(1));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", set(1));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 2", set(1, WARN_THRESHOLD + 1));
+
+        assertFails("INSERT INTO %s (k, v) VALUES (3, ?)", set(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v = v - ? WHERE k = 3", set(1));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", set(1));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 4", set(1, FAIL_THRESHOLD + 1), FAIL_THRESHOLD);
+    }
+
+    @Test
+    public void testListSize() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v list<int>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", list(0));
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", list(1));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", list(WARN_THRESHOLD));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", list(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", list(FAIL_THRESHOLD), FAIL_THRESHOLD);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", list(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", list(FAIL_THRESHOLD + 10), FAIL_THRESHOLD + 10);
+    }
+
+    @Test
+    public void testListSizeFrozen() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v frozen<list<int>>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", list(0));
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", list(1));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", list(WARN_THRESHOLD));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", list(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", list(FAIL_THRESHOLD), FAIL_THRESHOLD);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", list(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+    }
+
+    @Test
+    public void testListSizeWithUpdates() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v list<int>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, ?)", list(1));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 0", list(1, WARN_THRESHOLD));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 0", list(1, FAIL_THRESHOLD), FAIL_THRESHOLD - 1);
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (1, ?)", list(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v = v - ? WHERE k = 1", list(1));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", list(1));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 2", list(1, WARN_THRESHOLD + 1));
+
+        assertFails("INSERT INTO %s (k, v) VALUES (3, ?)", list(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v = v - ? WHERE k = 3", list(1));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", list(1));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 4", list(1, FAIL_THRESHOLD + 1), FAIL_THRESHOLD);
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", set(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v[0] = null WHERE k = 5");
+    }
+
+    @Test
+    public void testMapSize() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v map<int, int>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", map(0));
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", map(1));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", map(WARN_THRESHOLD));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", map(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", map(FAIL_THRESHOLD), FAIL_THRESHOLD);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", map(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", map(FAIL_THRESHOLD + 10), FAIL_THRESHOLD + 10);
+    }
+
+    @Test
+    public void testMapSizeFrozen() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v frozen<map<int, int>>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, null)");
+        assertValid("INSERT INTO %s (k, v) VALUES (1, ?)", map(0));
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", map(1));
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", map(WARN_THRESHOLD));
+        assertWarns("INSERT INTO %s (k, v) VALUES (5, ?)", map(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertWarns("INSERT INTO %s (k, v) VALUES (6, ?)", map(FAIL_THRESHOLD), FAIL_THRESHOLD);
+        assertFails("INSERT INTO %s (k, v) VALUES (7, ?)", map(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+    }
+
+    @Test
+    public void testMapSizeWithUpdates() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v map<int, int>)");
+
+        assertValid("INSERT INTO %s (k, v) VALUES (0, ?)", map(1));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 0", map(1, WARN_THRESHOLD));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 0", map(1, FAIL_THRESHOLD), FAIL_THRESHOLD - 1);
+
+        assertWarns("INSERT INTO %s (k, v) VALUES (1, ?)", map(WARN_THRESHOLD + 1), WARN_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v = v - ? WHERE k = 1", set(1));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (2, ?)", map(1));
+        assertValid("UPDATE %s SET v = v + ? WHERE k = 2", map(1, WARN_THRESHOLD + 1));
+
+        assertFails("INSERT INTO %s (k, v) VALUES (3, ?)", map(FAIL_THRESHOLD + 1), FAIL_THRESHOLD + 1);
+        assertValid("UPDATE %s SET v = v - ? WHERE k = 3", set(1));
+
+        assertValid("INSERT INTO %s (k, v) VALUES (4, ?)", map(1));
+        assertWarns("UPDATE %s SET v = v + ? WHERE k = 4", map(1, FAIL_THRESHOLD + 1), FAIL_THRESHOLD);
+    }
+
+    @Override
+    protected String createTable(String query)
+    {
+        String table = super.createTable(query);
+        disableCompaction();
+        return table;
+    }
+
+    private void assertValid(String query, ByteBuffer collection) throws Throwable
+    {
+        assertValid(execute(query, collection));
+    }
+
+    private void assertWarns(String query, ByteBuffer collection, int numItems) throws Throwable
+    {
+        assertWarns(execute(query, collection),
+                    String.format("Detected collection v with %d items, this exceeds the warning threshold of %d.",
+                                  numItems, WARN_THRESHOLD));
+    }
+
+    private void assertFails(String query, ByteBuffer collection, int numItems) throws Throwable
+    {
+        assertFails(execute(query, collection),
+                    String.format("Detected collection v with %d items, this exceeds the failure threshold of %d.",
+                                  numItems, FAIL_THRESHOLD));
+    }
+
+    private CheckedFunction execute(String query, ByteBuffer collection)
+    {
+        return () -> execute(userClientState, query, Collections.singletonList(collection));
+    }
+
+    private static ByteBuffer set(int numElements)
+    {
+        return set(0, numElements);
+    }
+
+    private static ByteBuffer set(int startInclusive, int endExclusive)
+    {
+        return SetType.getInstance(Int32Type.instance, true)
+                      .decompose(collection(startInclusive, endExclusive, Collectors.toSet()));
+    }
+
+    private static ByteBuffer list(int numElements)
+    {
+        return list(0, numElements);
+    }
+
+    private static ByteBuffer list(int startInclusive, int endExclusive)
+    {
+        return ListType.getInstance(Int32Type.instance, false)
+                       .decompose(collection(startInclusive, endExclusive, Collectors.toList()));
+    }
+
+    private static ByteBuffer map(int numElements)
+    {
+        return map(0, numElements);
+    }
+
+    private static ByteBuffer map(int startInclusive, int endExclusive)
+    {
+        return MapType.getInstance(Int32Type.instance, Int32Type.instance, true)
+                      .decompose(collection(startInclusive, endExclusive, Collectors.toMap(x -> x, x -> x)));
+    }
+
+    private static <R, A> R collection(int startInclusive, int endExclusive, Collector<Integer, A, R> collector)
+    {
+        return IntStream.range(startInclusive, endExclusive).boxed().collect(collector);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailKeyspacesTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailKeyspacesTest.java
new file mode 100644
index 0000000..6c60093
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailKeyspacesTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import org.apache.cassandra.schema.Schema;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the max number of user keyspaces, {@link Guardrails#keyspaces}.
+ */
+public class GuardrailKeyspacesTest extends ThresholdTester
+{
+    private static final int WARN_THRESHOLD = 3; // CQLTester creates two keyspaces
+    private static final int FAIL_THRESHOLD = WARN_THRESHOLD + 1;
+
+    public GuardrailKeyspacesTest()
+    {
+        super(WARN_THRESHOLD,
+              FAIL_THRESHOLD,
+              Guardrails.keyspaces,
+              Guardrails::setKeyspacesThreshold,
+              Guardrails::getKeyspacesWarnThreshold,
+              Guardrails::getKeyspacesFailThreshold);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return Schema.instance.getUserKeyspaces().size();
+    }
+
+    @Test
+    public void testCreateKeyspace() throws Throwable
+    {
+        // create keyspaces until hitting the two warn/fail thresholds
+        String k1 = assertCreateKeyspaceValid();
+        String k2 = assertCreateKeyspaceWarns();
+        assertCreateKeyspaceFails();
+
+        // drop a keyspace and hit the warn/fail threshold again
+        dropKeyspace(k2);
+        String k3 = assertCreateKeyspaceWarns();
+        assertCreateKeyspaceFails();
+
+        // drop two keyspaces and hit the warn/fail threshold again
+        dropKeyspace(k1);
+        dropKeyspace(k3);
+        assertCreateKeyspaceValid();
+        assertCreateKeyspaceWarns();
+        assertCreateKeyspaceFails();
+
+        // test excluded users
+        testExcludedUsers(this::createKeyspaceQuery,
+                          this::createKeyspaceQuery,
+                          this::createKeyspaceQuery);
+    }
+
+    private void dropKeyspace(String keyspaceName)
+    {
+        schemaChange(format("DROP KEYSPACE %s", keyspaceName));
+    }
+
+    private String assertCreateKeyspaceValid() throws Throwable
+    {
+        String keyspaceName = createKeyspaceName();
+        assertMaxThresholdValid(createKeyspaceQuery(keyspaceName));
+        return keyspaceName;
+    }
+
+    private String assertCreateKeyspaceWarns() throws Throwable
+    {
+        String keyspaceName = createKeyspaceName();
+        assertThresholdWarns(createKeyspaceQuery(keyspaceName),
+                             format("Creating keyspace %s, current number of keyspaces %d exceeds warning threshold of %d",
+                                    keyspaceName, currentValue() + 1, WARN_THRESHOLD)
+        );
+        return keyspaceName;
+    }
+
+    private void assertCreateKeyspaceFails() throws Throwable
+    {
+        String keyspaceName = createKeyspaceName();
+        assertThresholdFails(createKeyspaceQuery(keyspaceName),
+                             format("Cannot have more than %d keyspaces, aborting the creation of keyspace %s",
+                                    FAIL_THRESHOLD, keyspaceName)
+        );
+    }
+
+    private String createKeyspaceQuery()
+    {
+        return createKeyspaceQuery(createKeyspaceName());
+    }
+
+    private String createKeyspaceQuery(String keyspaceName)
+    {
+        return format("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }",
+                      keyspaceName);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java
new file mode 100644
index 0000000..d3df983
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.locator.AbstractEndpointSnitch;
+import org.apache.cassandra.locator.IEndpointSnitch;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.StorageService;
+import org.assertj.core.api.Assertions;
+
+import static java.lang.String.format;
+import static org.junit.Assert.assertNotNull;
+
+public class GuardrailMinimumReplicationFactorTest extends ThresholdTester
+{
+    private static final int MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD = 4;
+    private static int MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD = 1;
+    private static final int DEFAULT_REPLICATION_FACTOR = 2;
+    private static final int DISABLED_GUARDRAIL = -1;
+    private static final String WHAT = "minimum_replication_factor";
+    private static final String DATACENTER1 = "datacenter1";
+    private static final String KS = "ks";
+    private final TriConsumer<Guardrails, Integer, Integer> setter;
+
+    public GuardrailMinimumReplicationFactorTest()
+    {
+        super(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD,
+              MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD,
+              Guardrails.minimumReplicationFactor,
+              Guardrails::setMinimumReplicationFactorThreshold,
+              Guardrails::getMinimumReplicationFactorWarnThreshold,
+              Guardrails::getMinimumReplicationFactorFailThreshold);
+
+        this.setter = Guardrails::setMinimumReplicationFactorThreshold;
+    }
+
+    @Before
+    public void setupTest() throws Throwable
+    {
+        DatabaseDescriptor.setDefaultKeyspaceRF(DEFAULT_REPLICATION_FACTOR);
+        MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD = 2;
+    }
+
+    @After
+    public void cleanupTest() throws Throwable
+    {
+        execute("DROP KEYSPACE IF EXISTS ks");
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return Long.parseLong((Keyspace.open(KS).getReplicationStrategy()).configOptions.get(DATACENTER1));
+    }
+
+    @Override
+    protected List<String> getWarnings()
+    {
+        List<String> warnings = ClientWarn.instance.getWarnings();
+
+        return warnings == null
+               ? Collections.emptyList()
+               : warnings.stream()
+                         .filter(w -> !w.contains("keyspace ks is higher than the number of nodes 1 for datacenter") &&
+                                      !w.contains("When increasing replication factor you need to run a full (-full) repair to distribute the data") &&
+                                      !w.contains("keyspace ks is higher than the number of nodes") &&
+                                      !w.contains("Your replication factor 4 for keyspace ks is higher than the number of nodes 2 for datacenter datacenter2"))
+                         .collect(Collectors.toList());
+    }
+
+    @Test
+    public void testConfigValidation()
+    {
+        assertNotNull(guardrail);
+        setter.accept(guardrails(), DISABLED_GUARDRAIL, DISABLED_GUARDRAIL);
+
+        assertInvalidPositiveIntProperty((g, a) -> setter.accept(g, DISABLED_GUARDRAIL, a), Integer.MIN_VALUE, Integer.MAX_VALUE, WHAT + "_fail_threshold");
+        assertInvalidPositiveIntProperty((g, a) -> setter.accept(g, DISABLED_GUARDRAIL, a), -2, Integer.MAX_VALUE, WHAT + "_fail_threshold");
+        assertValidProperty((g, a) -> setter.accept(g, DISABLED_GUARDRAIL, a), DISABLED_GUARDRAIL);
+        assertInvalidPositiveIntProperty((g, a) -> setter.accept(g, DISABLED_GUARDRAIL, a), 0, Integer.MAX_VALUE, WHAT + "_fail_threshold");
+        assertValidProperty((g, a) -> setter.accept(g, DISABLED_GUARDRAIL, a), 1);
+        assertValidProperty((g, a) -> setter.accept(g, DISABLED_GUARDRAIL, a), 2);
+
+        assertInvalidPositiveIntProperty((g, w) -> setter.accept(g, w, DISABLED_GUARDRAIL), Integer.MIN_VALUE, Integer.MAX_VALUE, WHAT + "_warn_threshold");
+        assertInvalidPositiveIntProperty((g, w) -> setter.accept(g, w, DISABLED_GUARDRAIL), -2, Integer.MAX_VALUE, WHAT + "_warn_threshold");
+        assertValidProperty((g, w) -> setter.accept(g, w, DISABLED_GUARDRAIL), DISABLED_GUARDRAIL);
+        assertInvalidPositiveIntProperty((g, w) -> setter.accept(g, w, DISABLED_GUARDRAIL), 0, Integer.MAX_VALUE, WHAT + "_warn_threshold");
+        assertValidProperty((g, w) -> setter.accept(g, w, DISABLED_GUARDRAIL), 1);
+        assertValidProperty((g, w) -> setter.accept(g, w, DISABLED_GUARDRAIL), 2);
+
+        Assertions.assertThatThrownBy(() -> setter.accept(guardrails(), 1, 2))
+                  .hasMessageContaining(guardrail.name + "_warn_threshold should be greater than the fail threshold");
+    }
+
+    @Test
+    public void testMinKeyspaceRFDisabled() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(DISABLED_GUARDRAIL, DISABLED_GUARDRAIL);
+        assertMinThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}");
+        assertMinThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 3}");
+    }
+
+    @Test
+    public void testSimpleStrategy() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 3}",
+                    format("The keyspace %s has a replication factor of 3, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 1}",
+                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+    }
+
+    @Test
+    public void testMultipleDatacenter() throws Throwable
+    {
+        IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
+        DatabaseDescriptor.setEndpointSnitch(new AbstractEndpointSnitch()
+        {
+            public static final String RACK1 = ServerTestUtils.RACK1;
+
+            @Override
+            public String getRack(InetAddressAndPort endpoint) { return RACK1; }
+
+            @Override
+            public String getDatacenter(InetAddressAndPort endpoint) { return "datacenter2"; }
+
+            @Override
+            public int compareEndpoints(InetAddressAndPort target, Replica a1, Replica a2) { return 0; }
+        });
+
+        List<String> twoWarnings = Arrays.asList(format("The keyspace %s has a replication factor of 2, below the warning threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD),
+                                                 format("The keyspace %s has a replication factor of 2, below the warning threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        
+        StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), InetAddressAndPort.getByName("127.0.0.255"));
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertValid("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 4 };");
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 2 };",
+                    format("The keyspace %s has a replication factor of 2, below the warning threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 2 };", twoWarnings);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 1 };",
+                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertFails("CREATE KEYSPACE ks1 WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 1, 'datacenter2' : 1 };",
+                    format("The keyspace ks1 has a replication factor of 1, below the failure threshold of %d.", MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+
+        DatabaseDescriptor.setEndpointSnitch(snitch);
+        execute("DROP KEYSPACE IF EXISTS ks1");
+    }
+
+    @Test
+    public void testMinKeyspaceRFOnlyWarnAbove() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, DISABLED_GUARDRAIL);
+        assertMinThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 6}");
+        assertMinThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 5}");
+    }
+
+    @Test
+    public void testMinKeyspaceRFOnlyWarnBelow() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, DISABLED_GUARDRAIL);
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}",
+                    format("The keyspace %s has a replication factor of 3, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}",
+                    format("The keyspace %s has a replication factor of 2, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+    }
+
+    @Test
+    public void testMinKeyspaceRFOnlyFailAbove() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(DISABLED_GUARDRAIL, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertMinThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 4}");
+        assertMinThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}");
+    }
+
+    @Test
+    public void testMinKeyspaceRFOnlyFailBelow() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(DISABLED_GUARDRAIL, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
+                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+    }
+
+    @Test
+    public void testMinKeyspaceRFOnlyFailBelowAlter() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(DISABLED_GUARDRAIL, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        execute("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}");
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
+                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+    }
+
+    @Test
+    public void testMinKeyspaceRFWarnAbove() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertMinThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 6}");
+        assertMinThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 5}");
+    }
+
+    @Test
+    public void testMinKeyspaceRFWarnFailBetween() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}",
+                    format("The keyspace %s has a replication factor of 3, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}",
+                    format("The keyspace %s has a replication factor of 2, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+    }
+
+    @Test
+    public void testMinKeyspaceRFFailBelow() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
+                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+    }
+
+    @Test
+    public void testMinKeyspaceRFFailBelowAlter() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        execute("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 4}");
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
+                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+    }
+
+    @Test
+    public void testMinRFGreaterThanDefaultRF()
+    {
+        try
+        {
+            DatabaseDescriptor.setDefaultKeyspaceRF(1);
+            guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        }
+        catch (IllegalArgumentException e)
+        {
+            String expectedMessage = "";
+
+            if(guardrails().getMinimumReplicationFactorFailThreshold() > DatabaseDescriptor.getDefaultKeyspaceRF())
+                expectedMessage = format("%s_fail_threshold to be set (%d) cannot be greater than default_keyspace_rf (%d)",
+                                         WHAT, guardrails().getMinimumReplicationFactorFailThreshold(), DatabaseDescriptor.getDefaultKeyspaceRF());
+            Assertions.assertThat(e.getMessage()).contains(expectedMessage);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailNewCompactStorageTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailNewCompactStorageTest.java
new file mode 100644
index 0000000..ce786ed
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailNewCompactStorageTest.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+public class GuardrailNewCompactStorageTest extends GuardrailTester
+{
+    private void setGuardrail(boolean enabled)
+    {
+        Guardrails.instance.setCompactTablesEnabled(enabled);
+    }
+
+    @Test
+    public void testFeatureEnabled() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE");
+    }
+
+    @Test
+    public void testFeatureDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE",
+                    "Creation of new COMPACT STORAGE tables");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailPageSizeTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailPageSizeTest.java
new file mode 100644
index 0000000..68122f2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailPageSizeTest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Collections;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.transport.ProtocolVersion;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the page size, {@link Guardrails#pageSize}.
+ */
+public class GuardrailPageSizeTest extends ThresholdTester
+{
+    private static final int PAGE_SIZE_WARN_THRESHOLD = 5;
+    private static final int PAGE_SIZE_FAIL_THRESHOLD = 10;
+
+    public GuardrailPageSizeTest()
+    {
+        super(PAGE_SIZE_WARN_THRESHOLD,
+              PAGE_SIZE_FAIL_THRESHOLD,
+              Guardrails.pageSize,
+              Guardrails::setPageSizeThreshold,
+              Guardrails::getPageSizeWarnThreshold,
+              Guardrails::getPageSizeFailThreshold);
+    }
+
+    @Before
+    public void setupTest()
+    {
+        createTable("CREATE TABLE IF NOT EXISTS %s (k INT, c INT, v TEXT, PRIMARY KEY(k, c))");
+    }
+
+    @Test
+    public void testSelectStatementAgainstPageSize() throws Throwable
+    {
+        // regular query
+        String query = "SELECT * FROM %s";
+        assertPagingValid(query, 3);
+        assertPagingValid(query, PAGE_SIZE_WARN_THRESHOLD);
+        assertPagingWarns(query, 6);
+        assertPagingWarns(query, PAGE_SIZE_FAIL_THRESHOLD);
+        assertPagingFails(query, 11);
+
+        // aggregation query
+        query = "SELECT COUNT(*) FROM %s WHERE k=0";
+        assertPagingValid(query, 3);
+        assertPagingValid(query, PAGE_SIZE_WARN_THRESHOLD);
+        assertPagingWarns(query, 6);
+        assertPagingWarns(query, PAGE_SIZE_FAIL_THRESHOLD);
+        assertPagingFails(query, 11);
+
+        // query with limit over thresholds
+        query = "SELECT * FROM %s LIMIT 100";
+        assertPagingValid(query, 3);
+        assertPagingValid(query, PAGE_SIZE_WARN_THRESHOLD);
+        assertPagingWarns(query, 6);
+        assertPagingWarns(query, PAGE_SIZE_FAIL_THRESHOLD);
+        assertPagingFails(query, 11);
+
+        // query with limit under thresholds
+        query = "SELECT * FROM %s LIMIT 1";
+        assertPagingValid(query, 3);
+        assertPagingValid(query, PAGE_SIZE_WARN_THRESHOLD);
+        assertPagingValid(query, 6);
+        assertPagingValid(query, PAGE_SIZE_FAIL_THRESHOLD);
+        assertPagingValid(query, 11);
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        assertPagingIgnored("SELECT * FROM %s", PAGE_SIZE_WARN_THRESHOLD + 1);
+        assertPagingIgnored("SELECT * FROM %s", PAGE_SIZE_FAIL_THRESHOLD + 1);
+    }
+
+    private void assertPagingValid(String query, int pageSize) throws Throwable
+    {
+        assertValid(() -> executeWithPaging(userClientState, query, pageSize));
+    }
+
+    private void assertPagingIgnored(String query, int pageSize) throws Throwable
+    {
+        assertValid(() -> executeWithPaging(superClientState, query, pageSize));
+        assertValid(() -> executeWithPaging(systemClientState, query, pageSize));
+    }
+
+    private void assertPagingWarns(String query, int pageSize) throws Throwable
+    {
+        assertWarns(() -> executeWithPaging(userClientState, query, pageSize),
+                    format("Query for table %s with page size %s exceeds warning threshold of %s.",
+                           currentTable(), pageSize, PAGE_SIZE_WARN_THRESHOLD));
+    }
+
+    private void assertPagingFails(String query, int pageSize) throws Throwable
+    {
+        assertFails(() -> executeWithPaging(userClientState, query, pageSize),
+                    format("Aborting query for table %s, page size %s exceeds fail threshold of %s.",
+                           currentTable(), pageSize, PAGE_SIZE_FAIL_THRESHOLD));
+    }
+
+    private void executeWithPaging(ClientState state, String query, int pageSize)
+    {
+        QueryState queryState = new QueryState(state);
+
+        String formattedQuery = formatQuery(query);
+        CQLStatement statement = QueryProcessor.parseStatement(formattedQuery, queryState.getClientState());
+        statement.validate(state);
+
+        QueryOptions options = QueryOptions.create(ConsistencyLevel.ONE,
+                                                   Collections.emptyList(),
+                                                   false,
+                                                   pageSize,
+                                                   null,
+                                                   null,
+                                                   ProtocolVersion.CURRENT,
+                                                   KEYSPACE);
+
+        statement.executeLocally(queryState, options);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailPartitionKeysInSelectTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailPartitionKeysInSelectTest.java
new file mode 100644
index 0000000..d4b913e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailPartitionKeysInSelectTest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Before;
+import org.junit.Test;
+
+public class GuardrailPartitionKeysInSelectTest extends ThresholdTester
+{
+    private static final int PARTITION_KEYS_SELECT_WARN_THRESHOLD = 3;
+    private static final int PARTITION_KEYS_SELECT_FAIL_THRESHOLD = 5;
+    private String tableName;
+
+    public GuardrailPartitionKeysInSelectTest()
+    {
+        super(PARTITION_KEYS_SELECT_WARN_THRESHOLD,
+              PARTITION_KEYS_SELECT_FAIL_THRESHOLD,
+              Guardrails.partitionKeysInSelect,
+              Guardrails::setPartitionKeysInSelectThreshold,
+              Guardrails::getPartitionKeysInSelectWarnThreshold,
+              Guardrails::getPartitionKeysInSelectFailThreshold);
+    }
+
+    @Before
+    public void setupTest()
+    {
+        tableName = createTable("CREATE TABLE %s (k INT, c INT, v TEXT, PRIMARY KEY(k, c))");
+    }
+
+    @Test
+    public void testSelectStatementAgainstInClausePartitionKeys() throws Throwable
+    {
+        assertValid("SELECT k, c, v FROM %s WHERE k=10");
+
+        assertValid("SELECT k, c, v FROM %s WHERE k IN (2, 3)");
+
+        assertValid("SELECT k, c, v FROM %s WHERE k = 2 and c IN (2, 3, 4, 5, 6, 7)");
+
+        assertWarns("SELECT k, c, v FROM %s WHERE k IN (2, 3, 4, 5)",
+                    String.format("Query with partition keys in IN clause on table %s, with " +
+                                  "number of partition keys 4 exceeds warning threshold of 3.", tableName));
+
+        assertFails("SELECT k, c, v FROM %s WHERE k IN (2, 3, 4, 5, 6, 7)",
+                    String.format("Aborting query with partition keys in IN clause on table %s, " +
+                                  "number of partition keys 6 exceeds fail threshold of 5.", tableName));
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        testExcludedUsers(() -> "SELECT k, c, v FROM %s WHERE k IN (2, 3, 4, 5)",
+                          () -> "SELECT k, c, v FROM %s WHERE k IN (2, 3, 4, 5, 6, 7)");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailReadBeforeWriteListOperationsTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailReadBeforeWriteListOperationsTest.java
new file mode 100644
index 0000000..8184fa6
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailReadBeforeWriteListOperationsTest.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Tests the guardrail for read-before-write list operations, {@link Guardrails#readBeforeWriteListOperationsEnabled}.
+ */
+@RunWith(Parameterized.class)
+public class GuardrailReadBeforeWriteListOperationsTest extends GuardrailTester
+{
+    @Parameterized.Parameter
+    public boolean enabled;
+
+    @Parameterized.Parameters(name = "read_before_write_list_operations_enabled={0}")
+    public static Collection<Object> data()
+    {
+        return Arrays.asList(false, true);
+    }
+
+    public GuardrailReadBeforeWriteListOperationsTest()
+    {
+        super(Guardrails.readBeforeWriteListOperationsEnabled);
+    }
+
+    @Before
+    public void before()
+    {
+        guardrails().setReadBeforeWriteListOperationsEnabled(enabled);
+        Assert.assertEquals(enabled, guardrails().getReadBeforeWriteListOperationsEnabled());
+
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
+    }
+
+    @Test
+    public void tesInsertFullValue() throws Throwable
+    {
+        // insert from scratch
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2])");
+        assertRows(row(0, list(1, 2)));
+
+        // insert overriding previous value
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [2, 3])");
+        assertRows(row(0, list(2, 3)));
+    }
+
+    @Test
+    public void testUpdateFullValue() throws Throwable
+    {
+        // update from scratch
+        assertValid("UPDATE %s SET l = [1, 2] WHERE k = 0");
+        assertRows(row(0, list(1, 2)));
+
+        // update overriding previous value
+        assertValid("UPDATE %s SET l = [2, 3] WHERE k = 0");
+        assertRows(row(0, list(2, 3)));
+    }
+
+    @Test
+    public void testDeleteFullValue() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2])");
+        assertValid("DELETE l FROM %s WHERE k = 0");
+        assertRows(row(0, null));
+    }
+
+    @Test
+    public void testAppend() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2])");
+        assertValid("UPDATE %s SET l = l + [3, 4] WHERE k = 0");
+        assertRows(row(0, list(1, 2, 3, 4)));
+    }
+
+    @Test
+    public void testPrepend() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2])");
+        assertValid("UPDATE %s SET l = [3, 4] + l WHERE k = 0");
+        assertRows(row(0, list(3, 4, 1, 2)));
+    }
+
+    @Test
+    public void testUpdateByIndex() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2, 3])");
+        testGuardrail("UPDATE %s SET l[1] = 4 WHERE k = 0",
+                      "Setting of list items by index requiring read before write is not allowed",
+                      row(0, list(1, 4, 3)));
+    }
+
+    @Test
+    public void testDeleteByIndex() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2, 3])");
+        testGuardrail("DELETE l[1] FROM %s WHERE k = 0",
+                      "Removal of list items by index requiring read before write is not allowed",
+                      row(0, list(1, 3)));
+    }
+
+    @Test
+    public void testDeleteByItem() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2, 3])");
+        testGuardrail("UPDATE %s SET l = l - [2] WHERE k = 0",
+                      "Removal of list items requiring read before write is not allowed",
+                      row(0, list(1, 3)));
+    }
+
+    @Test
+    public void testBatch() throws Throwable
+    {
+        assertValid("INSERT INTO %s (k, l) VALUES (0, [1, 2, 3])");
+
+        testGuardrail("BEGIN BATCH UPDATE %s SET l[1] = 0 WHERE k = 0; APPLY BATCH",
+                      "Setting of list items by index requiring read before write is not allowed",
+                      row(0, list(1, 0, 3)));
+
+        testGuardrail("BEGIN BATCH DELETE l[1] FROM %s WHERE k = 0; APPLY BATCH",
+                      "Removal of list items by index requiring read before write is not allowed",
+                      row(0, list(1, 3)));
+
+        testGuardrail("BEGIN BATCH UPDATE %s SET l = l - [3] WHERE k = 0; APPLY BATCH",
+                      "Removal of list items requiring read before write is not allowed",
+                      row(0, list(1)));
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        testExcludedUsers(() -> "INSERT INTO %s (k, l) VALUES (0, [1, 2, 3, 4, 5])",
+                          () -> "UPDATE %s SET l[1] = 4 WHERE k = 0",
+                          () -> "DELETE l[1] FROM %s WHERE k = 0",
+                          () -> "INSERT INTO %s (k, l) VALUES (0, [1, 2, 3])",
+                          () -> "UPDATE %s SET l = l - [2] WHERE k = 0",
+                          () -> "BEGIN BATCH UPDATE %s SET l[1] = 0 WHERE k = 0; APPLY BATCH",
+                          () -> "BEGIN BATCH DELETE l[1] FROM %s WHERE k = 0; APPLY BATCH",
+                          () -> "BEGIN BATCH UPDATE %s SET l = l - [3] WHERE k = 0; APPLY BATCH");
+    }
+
+    private void testGuardrail(String query, String expectedMessage, Object[]... rows) throws Throwable
+    {
+        if (enabled)
+        {
+            assertValid(query);
+            assertRows(rows);
+        }
+        else
+        {
+            assertFails(query, expectedMessage);
+        }
+    }
+
+    private void assertRows(Object[]... rows) throws Throwable
+    {
+        assertRowsNet(executeNet("SELECT * FROM %s"), rows);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailReadConsistencyLevelsTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailReadConsistencyLevelsTest.java
new file mode 100644
index 0000000..2d189a2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailReadConsistencyLevelsTest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+
+import static java.lang.String.format;
+import static org.apache.cassandra.db.ConsistencyLevel.ALL;
+import static org.apache.cassandra.db.ConsistencyLevel.EACH_QUORUM;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_ONE;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_QUORUM;
+import static org.apache.cassandra.db.ConsistencyLevel.ONE;
+import static org.apache.cassandra.db.ConsistencyLevel.QUORUM;
+
+/**
+ * Tests the guardrail for read consistency levels, {@link Guardrails#readConsistencyLevels}.
+ */
+public class GuardrailReadConsistencyLevelsTest extends GuardrailConsistencyLevelsTester
+{
+    public GuardrailReadConsistencyLevelsTest()
+    {
+        super("read_consistency_levels_warned",
+              "read_consistency_levels_disallowed",
+              Guardrails.readConsistencyLevels,
+              Guardrails::getReadConsistencyLevelsWarned,
+              Guardrails::getReadConsistencyLevelsDisallowed,
+              Guardrails::getReadConsistencyLevelsWarnedCSV,
+              Guardrails::getReadConsistencyLevelsDisallowedCSV,
+              Guardrails::setReadConsistencyLevelsWarned,
+              Guardrails::setReadConsistencyLevelsDisallowed,
+              Guardrails::setReadConsistencyLevelsWarnedCSV,
+              Guardrails::setReadConsistencyLevelsDisallowedCSV);
+    }
+
+    @Test
+    public void testSelect() throws Throwable
+    {
+        createTable("CREATE TABLE IF NOT EXISTS %s (k INT, c INT, v INT, PRIMARY KEY(k, c))");
+
+        execute("INSERT INTO %s (k, c, v) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (k, c, v) VALUES (0, 1, 1)");
+        execute("INSERT INTO %s (k, c, v) VALUES (1, 0, 2)");
+        execute("INSERT INTO %s (k, c, v) VALUES (1, 1, 3)");
+
+        testQuery("SELECT * FROM %s");
+        testQuery("SELECT * FROM %s WHERE k = 0");
+        testQuery("SELECT * FROM %s WHERE k = 0 AND c = 0");
+    }
+
+    private void testQuery(String query) throws Throwable
+    {
+        testQuery(query, ONE);
+        testQuery(query, ALL);
+        testQuery(query, QUORUM);
+        testQuery(query, EACH_QUORUM);
+        testQuery(query, LOCAL_ONE);
+        testQuery(query, LOCAL_QUORUM);
+    }
+
+    private void testQuery(String query, ConsistencyLevel cl) throws Throwable
+    {
+        warnConsistencyLevels();
+        disableConsistencyLevels();
+        assertValid(query, cl);
+
+        warnConsistencyLevels(cl);
+        assertWarns(query, cl);
+
+        disableConsistencyLevels(cl);
+        assertFails(query, cl);
+    }
+
+    private void assertValid(String query, ConsistencyLevel cl) throws Throwable
+    {
+        assertValid(() -> execute(userClientState, query, cl));
+    }
+
+    private void assertWarns(String query, ConsistencyLevel cl) throws Throwable
+    {
+        assertWarns(() -> execute(userClientState, query, cl),
+                    format("Provided values [%s] are not recommended for read consistency levels (warned values are: %s)",
+                           cl, guardrails().getReadConsistencyLevelsWarned()));
+
+        assertExcludedUsers(query, cl);
+    }
+
+    private void assertFails(String query, ConsistencyLevel cl) throws Throwable
+    {
+        assertFails(() -> execute(userClientState, query, cl),
+                    format("Provided values [%s] are not allowed for read consistency levels (disallowed values are: %s)",
+                           cl, guardrails().getReadConsistencyLevelsDisallowed()));
+
+        assertExcludedUsers(query, cl);
+    }
+
+    private void assertExcludedUsers(String query, ConsistencyLevel cl) throws Throwable
+    {
+        assertValid(() -> execute(superClientState, query, cl));
+        assertValid(() -> execute(systemClientState, query, cl));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailSecondaryIndexTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailSecondaryIndexTest.java
new file mode 100644
index 0000000..910bd01
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailSecondaryIndexTest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for disabling user creation of secondary indexes, {@link Guardrails#setSecondaryIndexesEnabled(boolean)}.
+ */
+public class GuardrailSecondaryIndexTest extends GuardrailTester
+{
+    public GuardrailSecondaryIndexTest()
+    {
+        super(Guardrails.createSecondaryIndexesEnabled);
+    }
+
+    @Before
+    public void setupTest()
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)");
+    }
+
+    private void setGuardrail(boolean enabled)
+    {
+        guardrails().setSecondaryIndexesEnabled(enabled);
+    }
+
+    @Test
+    public void testCreateIndex() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid(String.format("CREATE INDEX %s ON %s.%s(%s)", "v1_idx", keyspace(), currentTable(), "v1"));
+        assertValid(String.format("CREATE INDEX %s ON %s.%s(%s)", "v2_idx", keyspace(), currentTable(), "v2"));
+
+        setGuardrail(false);
+        assertFails(String.format("CREATE INDEX %s ON %s.%s(%s)", "v3_idx", keyspace(), currentTable(), "v3"), "Creating secondary indexes");
+        assertFails(String.format("CREATE INDEX %s ON %s.%s(%s)", "v4_idx", keyspace(), currentTable(), "v4"), "Creating secondary indexes");
+        assertFails(String.format("CREATE INDEX %s ON %s.%s(%s)", "v2_idx", keyspace(), currentTable(), "v2"), "Creating secondary indexes");
+
+        setGuardrail(true);
+        assertValid(String.format("CREATE INDEX %s ON %s.%s(%s)", "v3_idx", keyspace(), currentTable(), "v3"));
+        assertValid(String.format("CREATE INDEX %s ON %s.%s(%s)", "v4_idx", keyspace(), currentTable(), "v4"));
+
+        // Confirm can drop in either state
+        setGuardrail(false);
+        dropIndex(format("DROP INDEX %s.%s", keyspace(), "v1_idx"));
+
+        setGuardrail(true);
+        dropIndex(format("DROP INDEX %s.%s", keyspace(), "v2_idx"));
+    }
+
+    @Test
+    public void testCustomIndex() throws Throwable
+    {
+        // 2i guardrail will also affect custom indexes
+        setGuardrail(false);
+        assertFails(format("CREATE CUSTOM INDEX ON %%s (%s) USING 'org.apache.cassandra.index.sasi.SASIIndex'", "v4"),
+                    format("Creating secondary indexes", currentTable())
+        );
+
+        // Confirm custom creation will work on flip
+        setGuardrail(true);
+        assertValid(format("CREATE CUSTOM INDEX ON %%s (%s) USING 'org.apache.cassandra.index.sasi.SASIIndex'", "v4"));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailSecondaryIndexesPerTableTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailSecondaryIndexesPerTableTest.java
new file mode 100644
index 0000000..628aead
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailSecondaryIndexesPerTableTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import com.google.common.base.Strings;
+import org.junit.Test;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the number of secondary indexes in a table, {@link Guardrails#secondaryIndexesPerTable}.
+ */
+public class GuardrailSecondaryIndexesPerTableTest extends ThresholdTester
+{
+    private static final int INDEXES_PER_TABLE_WARN_THRESHOLD = 1;
+    private static final int INDEXES_PER_TABLE_FAIL_THRESHOLD = 3;
+
+    public GuardrailSecondaryIndexesPerTableTest()
+    {
+        super(INDEXES_PER_TABLE_WARN_THRESHOLD,
+              INDEXES_PER_TABLE_FAIL_THRESHOLD,
+              Guardrails.secondaryIndexesPerTable,
+              Guardrails::setSecondaryIndexesPerTableThreshold,
+              Guardrails::getSecondaryIndexesPerTableWarnThreshold,
+              Guardrails::getSecondaryIndexesPerTableFailThreshold);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return getCurrentColumnFamilyStore().indexManager.listIndexes().size();
+    }
+
+    @Test
+    public void testCreateIndex() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)");
+        assertCreateIndexSucceeds("v1", "v1_idx");
+        assertCurrentValue(1);
+
+        assertCreateIndexWarns("v2", "");
+        assertCreateIndexWarns("v3", "v3_idx");
+        assertCreateIndexFails("v4", "");
+        assertCreateIndexFails("v2", "v2_idx");
+        assertCurrentValue(3);
+
+        // 2i guardrail will also affect custom indexes
+        assertCreateCustomIndexFails("v2");
+
+        // drop the two first indexes, we should be able to create new indexes again
+        dropIndex(format("DROP INDEX %s.%s", keyspace(), "v3_idx"));
+        assertCurrentValue(2);
+
+        assertCreateIndexWarns("v3", "");
+        assertCreateCustomIndexFails("v4");
+        assertCurrentValue(3);
+
+        // previous guardrail should not apply to another base table
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)");
+        assertCreateIndexSucceeds("v4", "");
+        assertCreateIndexWarns("v3", "");
+        assertCreateIndexWarns("v2", "");
+        assertCreateIndexFails("v1", "");
+        assertCurrentValue(3);
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int primary key, v1 int, v2 int)");
+        testExcludedUsers(() -> "CREATE INDEX excluded_1 ON %s(v1)",
+                          () -> "CREATE INDEX excluded_2 ON %s(v2)",
+                          () -> "DROP INDEX excluded_1",
+                          () -> "DROP INDEX excluded_2");
+    }
+
+    private void assertCreateIndexSucceeds(String column, String indexName) throws Throwable
+    {
+        assertMaxThresholdValid(format("CREATE INDEX %s ON %s.%s(%s)", indexName, keyspace(), currentTable(), column));
+    }
+
+    private void assertCreateIndexWarns(String column, String indexName) throws Throwable
+    {
+        assertThresholdWarns(format("CREATE INDEX %s ON %%s(%s)", indexName, column),
+                             format("Creating secondary index %son table %s, current number of indexes %s exceeds warning threshold of %s.",
+                                    (Strings.isNullOrEmpty(indexName) ? "" : indexName + " "),
+                                    currentTable(),
+                                    currentValue() + 1,
+                                    guardrails().getSecondaryIndexesPerTableWarnThreshold())
+        );
+    }
+
+    private void assertCreateIndexFails(String column, String indexName) throws Throwable
+    {
+        assertThresholdFails(format("CREATE INDEX %s ON %%s(%s)", indexName, column),
+                             format("aborting the creation of secondary index %son table %s",
+                                    Strings.isNullOrEmpty(indexName) ? "" : indexName + " ", currentTable())
+        );
+    }
+
+    private void assertCreateCustomIndexFails(String column) throws Throwable
+    {
+        assertThresholdFails(format("CREATE CUSTOM INDEX ON %%s (%s) USING 'org.apache.cassandra.index.sasi.SASIIndex'", column),
+                             format("aborting the creation of secondary index on table %s", currentTable())
+        );
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailTablePropertiesTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTablePropertiesTest.java
new file mode 100644
index 0000000..5754c51
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTablePropertiesTest.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.statements.schema.TableAttributes;
+
+import static java.lang.String.format;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests the guardrail for table properties, {@link Guardrails#tableProperties}.
+ */
+public class GuardrailTablePropertiesTest extends GuardrailTester
+{
+    private static final String CREATE_TABLE = "CREATE TABLE %s.%s(pk int, ck int, v int, PRIMARY KEY(pk, ck)) %s";
+    private static final String CREATE_VIEW = "CREATE MATERIALIZED VIEW %s.%s as SELECT * FROM %s.%s " +
+                                              "WHERE pk IS NOT null and ck IS NOT null PRIMARY KEY(ck, pk) %s";
+    private static final String ALTER_VIEW = "ALTER MATERIALIZED VIEW %s.%s WITH %s";
+
+    private static final String WARNED_PROPERTY_NAME = "table_properties_warned";
+    private static final String IGNORED_PROPERTY_NAME = "table_properties_ignored";
+    private static final String DISALLOWED_PROPERTY_NAME = "table_properties_disallowed";
+
+    public GuardrailTablePropertiesTest()
+    {
+        super(Guardrails.tableProperties);
+    }
+
+    @Before
+    public void before()
+    {
+        // only allow "gc_grace_seconds", "comments" and "default_time_to_live"
+        Set<String> allowed = new HashSet<>(Arrays.asList("gc_grace_seconds", "comment", "default_time_to_live"));
+        guardrails().setTablePropertiesDisallowed(TableAttributes.validKeywords()
+                                                                 .stream()
+                                                                 .filter(p -> !allowed.contains(p))
+                                                                 .map(String::toUpperCase)
+                                                                 .collect(Collectors.toSet()));
+        // but actually ignore "comment" and warn about "default_time_to_live"
+        guardrails().setTablePropertiesIgnored("comment");
+        guardrails().setTablePropertiesWarned("default_time_to_live");
+    }
+
+    @Test
+    public void testConfigValidation()
+    {
+        String message = "Invalid value for %s: null is not allowed";
+        assertInvalidProperty(Guardrails::setTablePropertiesWarned, (Set<String>) null, message, WARNED_PROPERTY_NAME);
+        assertInvalidProperty(Guardrails::setTablePropertiesIgnored, (Set<String>) null, message, IGNORED_PROPERTY_NAME);
+        assertInvalidProperty(Guardrails::setTablePropertiesDisallowed, (Set<String>) null, message, DISALLOWED_PROPERTY_NAME);
+
+        assertValidProperty(Collections.emptySet());
+        assertValidProperty(TableAttributes.allKeywords());
+
+        assertValidPropertyCSV("");
+        assertValidPropertyCSV(String.join(",", TableAttributes.allKeywords()));
+
+        assertInvalidProperty(Collections.singleton("invalid"), Collections.singleton("invalid"));
+        assertInvalidProperty(ImmutableSet.of("comment", "invalid1", "invalid2"), ImmutableSet.of("invalid1", "invalid2"));
+        assertInvalidProperty(ImmutableSet.of("invalid1", "invalid2", "comment"), ImmutableSet.of("invalid1", "invalid2"));
+        assertInvalidProperty(ImmutableSet.of("invalid1", "comment", "invalid2"), ImmutableSet.of("invalid1", "invalid2"));
+
+        assertInvalidPropertyCSV("invalid", "[invalid]");
+        assertInvalidPropertyCSV("comment,invalid1,invalid2", "[invalid1, invalid2]");
+        assertInvalidPropertyCSV("invalid1,invalid2,comment", "[invalid1, invalid2]");
+        assertInvalidPropertyCSV("invalid1,comment,invalid2", "[invalid1, invalid2]");
+    }
+
+    private void assertValidProperty(Set<String> properties)
+    {
+        assertValidProperty(Guardrails::setTablePropertiesWarned, Guardrails::getTablePropertiesWarned, properties);
+        assertValidProperty(Guardrails::setTablePropertiesIgnored, Guardrails::getTablePropertiesIgnored, properties);
+        assertValidProperty(Guardrails::setTablePropertiesDisallowed, Guardrails::getTablePropertiesDisallowed, properties);
+    }
+
+    private void assertValidPropertyCSV(String csv)
+    {
+        csv = sortCSV(csv);
+        assertValidProperty(Guardrails::setTablePropertiesWarnedCSV, g -> sortCSV(g.getTablePropertiesWarnedCSV()), csv);
+        assertValidProperty(Guardrails::setTablePropertiesIgnoredCSV, g -> sortCSV(g.getTablePropertiesIgnoredCSV()), csv);
+        assertValidProperty(Guardrails::setTablePropertiesDisallowedCSV, g -> sortCSV(g.getTablePropertiesDisallowedCSV()), csv);
+    }
+
+    private void assertInvalidProperty(Set<String> properties, Set<String> rejected)
+    {
+        String message = "Invalid value for %s: '%s' do not parse as valid table properties";
+        assertInvalidProperty(Guardrails::setTablePropertiesWarned, properties, message, WARNED_PROPERTY_NAME, rejected);
+        assertInvalidProperty(Guardrails::setTablePropertiesIgnored, properties, message, IGNORED_PROPERTY_NAME, rejected);
+        assertInvalidProperty(Guardrails::setTablePropertiesDisallowed, properties, message, DISALLOWED_PROPERTY_NAME, rejected);
+    }
+
+    private void assertInvalidPropertyCSV(String properties, String rejected)
+    {
+        String message = "Invalid value for %s: '%s' do not parse as valid table properties";
+        assertInvalidProperty(Guardrails::setTablePropertiesWarnedCSV, properties, message, WARNED_PROPERTY_NAME, rejected);
+        assertInvalidProperty(Guardrails::setTablePropertiesIgnoredCSV, properties, message, IGNORED_PROPERTY_NAME, rejected);
+        assertInvalidProperty(Guardrails::setTablePropertiesDisallowedCSV, properties, message, DISALLOWED_PROPERTY_NAME, rejected);
+    }
+
+    @Test
+    public void testTableProperties() throws Throwable
+    {
+        // most table properties are not allowed
+        assertValid(this::createTableWithProperties);
+        assertFails(() -> createTableWithProperties("with id = " + UUID.randomUUID()), "[id]");
+        assertFails(() -> createTableWithProperties("with compression = { 'enabled': 'false' }"), "[compression]");
+        assertFails(() -> createTableWithProperties("with compression = { 'enabled': 'false' } AND id = " + UUID.randomUUID()), "[compression, id]");
+        assertFails(() -> createTableWithProperties("with compaction = { 'class': 'SizeTieredCompactionStrategy' }"), "[compaction]");
+        assertFails(() -> createTableWithProperties("with gc_grace_seconds = 1000 and compression = { 'enabled': 'false' }"), "[compression]");
+
+        // though gc_grace_seconds alone is
+        assertValid(() -> createTableWithProperties("with gc_grace_seconds = 1000"));
+
+        // and comment is "ignored". So it should warn, and getting the comment on the created table should be empty,
+        // not the one we set.
+        AtomicReference<String> tableName = new AtomicReference<>();
+        assertWarns(() -> tableName.set(createTableWithProperties("with comment = 'my table'")), "[comment]");
+        assertEquals("", executeNet("SELECT comment FROM system_schema.tables WHERE keyspace_name=? AND table_name=?",
+                                    keyspace(),
+                                    tableName.get()).one().getString("comment"));
+
+        // default_time_to_live is "warned". So it should warn, and getting the default ttl on the created table should
+        // not be empty, since we don't ignore it.
+        assertWarns(() -> tableName.set(createTableWithProperties("with default_time_to_live = 1000")), "[default_time_to_live]");
+        assertEquals(1000, executeNet("SELECT default_time_to_live FROM system_schema.tables WHERE keyspace_name=? AND table_name=?",
+                                      keyspace(),
+                                      tableName.get()).one().getInt("default_time_to_live"));
+
+        // alter column is allowed
+        assertValid(this::createTableWithProperties);
+        assertValid("ALTER TABLE %s ADD v1 int");
+        assertValid("ALTER TABLE %s DROP v1");
+        assertValid("ALTER TABLE %s RENAME pk to pk1");
+    }
+
+    @Test
+    public void testViewProperties() throws Throwable
+    {
+        // view properties is not allowed
+        createTableWithProperties();
+        assertValid(() -> createViewWithProperties(""));
+        assertFails(() -> createViewWithProperties("with compression = { 'enabled': 'false' }"), "[compression]");
+        assertValid(() -> createViewWithProperties("with gc_grace_seconds = 1000"));
+
+        // alter mv properties except "gc_grace_seconds" is not allowed
+        assertValid(() -> alterViewWithProperties("gc_grace_seconds = 1000"));
+        assertFails(() -> alterViewWithProperties("compaction = { 'class': 'SizeTieredCompactionStrategy' } AND crc_check_chance = 1"),
+                    "[compaction, crc_check_chance]");
+    }
+
+    @Test
+    public void testInvalidTableProperties()
+    {
+        assertConfigFails(c -> c.setTablePropertiesDisallowed("ID1", "gc_grace_seconds"), "[id1]");
+        assertConfigFails(c -> c.setTablePropertiesDisallowed("ID2", "Gc_Grace_Seconds"), "[id2]");
+        assertConfigFails(c -> c.setTablePropertiesIgnored("ID3", "gc_grace_seconds"), "[id3]");
+        assertConfigFails(c -> c.setTablePropertiesIgnored("ID4", "Gc_Grace_Seconds"), "[id4]");
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        testExcludedUsers(
+        () -> format(CREATE_TABLE, keyspace(), createTableName(), "WITH compaction = { 'class': 'SizeTieredCompactionStrategy' }"),
+        () -> format(CREATE_TABLE, keyspace(), createTableName(), "WITH gc_grace_seconds = 1000"),
+        () -> "ALTER TABLE %s WITH gc_grace_seconds = 1000 and default_time_to_live = 1000",
+        () -> "ALTER TABLE %s WITH compaction = { 'class': 'SizeTieredCompactionStrategy' }",
+        () -> format(CREATE_VIEW, keyspace(), createViewName(), keyspace(), currentTable(), "with compression = { 'enabled': 'false' }"),
+        () -> format(ALTER_VIEW, keyspace(), currentView(), "compaction = { 'class': 'SizeTieredCompactionStrategy' }"),
+        () -> format(ALTER_VIEW, keyspace(), currentView(), "gc_grace_seconds = 1000"),
+        () -> format(ALTER_VIEW, keyspace(), currentView(), "gc_grace_seconds = 1000 and crc_check_chance = 1"),
+        () -> format(ALTER_VIEW, keyspace(), currentView(), "compaction = { 'class': 'SizeTieredCompactionStrategy' }"));
+    }
+
+    private void createTableWithProperties()
+    {
+        createTableWithProperties("");
+    }
+
+    private String createTableWithProperties(String withClause)
+    {
+        String name = createTableName();
+        execute(userClientState, format(CREATE_TABLE, keyspace(), name, withClause));
+        return name;
+    }
+
+    private void createViewWithProperties(String withClause)
+    {
+        execute(userClientState, format(CREATE_VIEW, keyspace(), createViewName(), keyspace(), currentTable(), withClause));
+    }
+
+    private void alterViewWithProperties(String withClause)
+    {
+        execute(userClientState, format(ALTER_VIEW, keyspace(), currentView(), withClause));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailTablesTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTablesTest.java
new file mode 100644
index 0000000..7e69636
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTablesTest.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import org.apache.cassandra.db.Keyspace;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the max number of user tables, {@link Guardrails#tables}.
+ */
+public class GuardrailTablesTest extends ThresholdTester
+{
+    private static final int TABLES_LIMIT_WARN_THRESHOLD = 1;
+    private static final int TABLES_LIMIT_FAIL_THRESHOLD = 2;
+
+    public GuardrailTablesTest()
+    {
+        super(TABLES_LIMIT_WARN_THRESHOLD,
+              TABLES_LIMIT_FAIL_THRESHOLD,
+              Guardrails.tables,
+              Guardrails::setTablesThreshold,
+              Guardrails::getTablesWarnThreshold,
+              Guardrails::getTablesFailThreshold);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return Keyspace.open(keyspace()).getColumnFamilyStores().size();
+    }
+
+    @Test
+    public void testCreateTable() throws Throwable
+    {
+        // create tables until hitting the two warn/fail thresholds
+        String t1 = assertCreateTableValid();
+        String t2 = assertCreateTableWarns();
+        assertCreateTableFails();
+
+        // drop a table and hit the warn/fail threshold again
+        dropTable(t2);
+        String t3 = assertCreateTableWarns();
+        assertCreateTableFails();
+
+        // drop two tables and hit the warn/fail threshold again
+        dropTable(t1);
+        dropTable(t3);
+        assertCreateTableValid();
+        assertCreateTableWarns();
+        assertCreateTableFails();
+
+        // test excluded users
+        testExcludedUsers(this::createTableQuery,
+                          this::createTableQuery,
+                          this::createTableQuery);
+    }
+
+    @Override
+    protected void dropTable(String tableName)
+    {
+        dropFormattedTable(format("DROP TABLE %s.%s", keyspace(), tableName));
+    }
+
+    private String assertCreateTableValid() throws Throwable
+    {
+        String tableName = createTableName();
+        assertMaxThresholdValid(createTableQuery(tableName));
+        return tableName;
+    }
+
+    private String assertCreateTableWarns() throws Throwable
+    {
+        String tableName = createTableName();
+        assertThresholdWarns(createTableQuery(tableName),
+                             format("Creating table %s, current number of tables 2 exceeds warning threshold of 1", tableName)
+        );
+        return tableName;
+    }
+
+    private void assertCreateTableFails() throws Throwable
+    {
+        String tableName = createTableName();
+        assertThresholdFails(createTableQuery(tableName),
+                             format("Cannot have more than 2 tables, aborting the creation of table %s", tableName)
+        );
+    }
+
+    private String createTableQuery()
+    {
+        return createTableQuery(createTableName());
+    }
+
+    private String createTableQuery(String tableName)
+    {
+        return format("CREATE TABLE %s.%s (k1 int, v int, PRIMARY KEY((k1)))", keyspace(), tableName);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
new file mode 100644
index 0000000..7c94702
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
@@ -0,0 +1,581 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.io.Serializable;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.CassandraRoleManager;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.guardrails.GuardrailEvent.GuardrailEventType;
+import org.apache.cassandra.db.view.View;
+import org.apache.cassandra.diag.DiagnosticEventService;
+import org.apache.cassandra.index.sasi.SASIIndex;
+import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.QueryState;
+import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.transport.messages.ResultMessage;
+import org.apache.cassandra.utils.Clock;
+import org.assertj.core.api.Assertions;
+
+import static java.lang.String.format;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public abstract class GuardrailTester extends CQLTester
+{
+    // Name used when testing CREATE TABLE that should be aborted (we need to provide it as assertFails, which
+    // is used to assert the failure, does not know that it is a CREATE TABLE and would thus reuse the name of the
+    // previously created table, which is not what we want).
+    protected static final String FAIL_TABLE = "abort_table_creation_test";
+
+    private static final String USERNAME = "guardrail_user";
+    private static final String PASSWORD = "guardrail_password";
+
+    protected static ClientState systemClientState, userClientState, superClientState;
+
+    /** The tested guardrail, if we are testing a specific one. */
+    @Nullable
+    protected final Guardrail guardrail;
+
+    /** A listener for emitted diagnostic events. */
+    protected final Listener listener;
+
+    public GuardrailTester()
+    {
+        this(null);
+    }
+
+    public GuardrailTester(@Nullable Guardrail guardrail)
+    {
+        this.guardrail = guardrail;
+        this.listener = new Listener();
+    }
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+        requireAuthentication();
+        requireNetwork();
+        DatabaseDescriptor.setDiagnosticEventsEnabled(true);
+
+        systemClientState = ClientState.forInternalCalls();
+        userClientState = ClientState.forExternalCalls(InetSocketAddress.createUnresolved("127.0.0.1", 123));
+        superClientState = ClientState.forExternalCalls(InetSocketAddress.createUnresolved("127.0.0.1", 321));
+        superClientState.login(new AuthenticatedUser(CassandraRoleManager.DEFAULT_SUPERUSER_NAME));
+    }
+
+    /**
+     * Creates an ordinary user that is not excluded from guardrails, that is, a user that is not super not internal.
+     */
+    @Before
+    public void beforeGuardrailTest() throws Throwable
+    {
+        useSuperUser();
+        executeNet(format("CREATE USER IF NOT EXISTS %s WITH PASSWORD '%s'", USERNAME, PASSWORD));
+        executeNet(format("GRANT ALL ON KEYSPACE %s TO %s", KEYSPACE, USERNAME));
+        useUser(USERNAME, PASSWORD);
+
+        String useKeyspaceQuery = "USE " + keyspace();
+        execute(userClientState, useKeyspaceQuery);
+        execute(systemClientState, useKeyspaceQuery);
+        execute(superClientState, useKeyspaceQuery);
+
+        DiagnosticEventService.instance().subscribe(GuardrailEvent.class, listener);
+    }
+
+    @After
+    public void afterGuardrailTest() throws Throwable
+    {
+        DiagnosticEventService.instance().unsubscribe(listener);
+    }
+
+    static Guardrails guardrails()
+    {
+        return Guardrails.instance;
+    }
+
+    protected <T> void assertValidProperty(BiConsumer<Guardrails, T> setter, T value)
+    {
+        setter.accept(guardrails(), value);
+    }
+
+    protected <T> void assertValidProperty(BiConsumer<Guardrails, T> setter, Function<Guardrails, T> getter, T value)
+    {
+        setter.accept(guardrails(), value);
+        assertEquals(value, getter.apply(guardrails()));
+    }
+
+    protected <T> void assertInvalidProperty(BiConsumer<Guardrails, T> setter,
+                                             T value,
+                                             String message,
+                                             Object... messageArgs)
+    {
+        Assertions.assertThatThrownBy(() -> setter.accept(guardrails(), value))
+                  .isInstanceOf(IllegalArgumentException.class)
+                  .hasMessage(format(message, messageArgs));
+    }
+
+    @SafeVarargs
+    protected final void testExcludedUsers(Supplier<String>... queries) throws Throwable
+    {
+        execute("USE " + keyspace());
+        assertSuperuserIsExcluded(queries);
+        assertInternalQueriesAreExcluded(queries);
+    }
+
+    @SafeVarargs
+    private final void assertInternalQueriesAreExcluded(Supplier<String>... queries) throws Throwable
+    {
+        for (Supplier<String> query : queries)
+        {
+            assertValid(() -> execute(systemClientState, query.get()));
+        }
+    }
+
+    @SafeVarargs
+    private final void assertSuperuserIsExcluded(Supplier<String>... queries) throws Throwable
+    {
+        for (Supplier<String> query : queries)
+        {
+            assertValid(() -> execute(superClientState, query.get()));
+        }
+    }
+
+    protected void assertValid(CheckedFunction function) throws Throwable
+    {
+        ClientWarn.instance.captureWarnings();
+        try
+        {
+            function.apply();
+            assertEmptyWarnings();
+            listener.assertNotWarned();
+            listener.assertNotFailed();
+        }
+        catch (GuardrailViolatedException e)
+        {
+            fail("Expected not to fail, but failed with error message: " + e.getMessage());
+        }
+        finally
+        {
+            ClientWarn.instance.resetWarnings();
+            listener.clear();
+        }
+    }
+
+    protected void assertValid(String query) throws Throwable
+    {
+        assertValid(() -> execute(userClientState, query));
+    }
+
+    protected void assertWarns(String query, String message) throws Throwable
+    {
+        assertWarns(query, message, message);
+    }
+
+    protected void assertWarns(String query, String message, String redactedMessage) throws Throwable
+    {
+        assertWarns(query, Collections.singletonList(message), Collections.singletonList(redactedMessage));
+    }
+
+    protected void assertWarns(String query, List<String> messages) throws Throwable
+    {
+        assertWarns(() -> execute(userClientState, query), messages, messages);
+    }
+
+    protected void assertWarns(String query, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        assertWarns(() -> execute(userClientState, query), messages, redactedMessages);
+    }
+
+    protected void assertWarns(CheckedFunction function, String message) throws Throwable
+    {
+        assertWarns(function, message, message);
+    }
+
+    protected void assertWarns(CheckedFunction function, String message, String redactedMessage) throws Throwable
+    {
+        assertWarns(function, Collections.singletonList(message), Collections.singletonList(redactedMessage));
+    }
+
+    protected void assertWarns(CheckedFunction function, List<String> messages) throws Throwable
+    {
+        assertWarns(function, messages, messages);
+    }
+
+    protected void assertWarns(CheckedFunction function, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        // We use client warnings to check we properly warn as this is the most convenient. Technically,
+        // this doesn't validate we also log the warning, but that's probably fine ...
+        ClientWarn.instance.captureWarnings();
+        try
+        {
+            function.apply();
+            assertWarnings(messages);
+            listener.assertWarned(redactedMessages);
+            listener.assertNotFailed();
+        }
+        finally
+        {
+            ClientWarn.instance.resetWarnings();
+            listener.clear();
+        }
+    }
+
+    protected void assertFails(String query, String message) throws Throwable
+    {
+        assertFails(query, message, message);
+    }
+
+    protected void assertFails(String query, String message, String redactedMessage) throws Throwable
+    {
+        assertFails(query, Collections.singletonList(message), Collections.singletonList(redactedMessage));
+    }
+
+    protected void assertFails(String query, List<String> messages) throws Throwable
+    {
+        assertFails(query, messages, messages);
+    }
+
+    protected void assertFails(String query, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        assertFails(() -> execute(userClientState, query), messages, redactedMessages);
+    }
+
+    protected void assertFails(CheckedFunction function, String message) throws Throwable
+    {
+        assertFails(function, message, message);
+    }
+
+    protected void assertFails(CheckedFunction function, String message, String redactedMessage) throws Throwable
+    {
+        assertFails(function, true, message, redactedMessage);
+    }
+
+    protected void assertFails(CheckedFunction function, boolean thrown, String message) throws Throwable
+    {
+        assertFails(function, thrown, message, message);
+    }
+
+    protected void assertFails(CheckedFunction function, boolean thrown, String message, String redactedMessage) throws Throwable
+    {
+        assertFails(function, thrown, Collections.singletonList(message), Collections.singletonList(redactedMessage));
+    }
+
+    protected void assertFails(CheckedFunction function, List<String> messages) throws Throwable
+    {
+        assertFails(function, messages, messages);
+    }
+
+    protected void assertFails(CheckedFunction function, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        assertFails(function, true, messages, redactedMessages);
+    }
+
+    protected void assertFails(CheckedFunction function, boolean thrown, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        ClientWarn.instance.captureWarnings();
+        try
+        {
+            function.apply();
+
+            if (thrown)
+                fail("Expected to fail, but it did not");
+        }
+        catch (GuardrailViolatedException e)
+        {
+            assertTrue("Expect no exception thrown", thrown);
+
+            // the last message is the one raising the guardrail failure, the previous messages are warnings
+            String failMessage = messages.get(messages.size() - 1);
+
+            if (guardrail != null)
+            {
+                String prefix = guardrail.decorateMessage("");
+                assertTrue(format("Full error message '%s' doesn't start with the prefix '%s'", e.getMessage(), prefix),
+                           e.getMessage().startsWith(prefix));
+            }
+
+            assertTrue(format("Full error message '%s' does not contain expected message '%s'", e.getMessage(), failMessage),
+                       e.getMessage().contains(failMessage));
+
+            assertWarnings(messages);
+            if (messages.size() > 1)
+                listener.assertWarned(redactedMessages.subList(0, messages.size() - 1));
+            else
+                listener.assertNotWarned();
+            listener.assertFailed(redactedMessages.get(messages.size() - 1));
+        }
+        finally
+        {
+            ClientWarn.instance.resetWarnings();
+            listener.clear();
+        }
+    }
+
+    protected void assertFails(String query, String... messages) throws Throwable
+    {
+        assertFails(() -> execute(userClientState, query), Arrays.asList(messages));
+    }
+
+    protected void assertThrows(CheckedFunction function, Class<? extends Throwable> exception, String message)
+    {
+        try
+        {
+            function.apply();
+            fail("Expected to fail, but it did not");
+        }
+        catch (Throwable e)
+        {
+            if (!exception.isAssignableFrom(e.getClass()))
+                Assert.fail(format("Expected to fail with %s but got %s", exception.getName(), e.getClass().getName()));
+
+            assertTrue(format("Error message '%s' does not contain expected message '%s'", e.getMessage(), message),
+                       e.getMessage().contains(message));
+        }
+    }
+
+    private void assertWarnings(List<String> messages)
+    {
+        List<String> warnings = getWarnings();
+
+        assertFalse("Expected to warn, but no warning was received", warnings == null || warnings.isEmpty());
+        assertEquals(format("Expected %d warnings but got %d: %s", messages.size(), warnings.size(), warnings),
+                     messages.size(),
+                     warnings.size());
+
+        for (int i = 0; i < messages.size(); i++)
+        {
+            String message = messages.get(i);
+            String warning = warnings.get(i);
+            if (guardrail != null)
+            {
+                String prefix = guardrail.decorateMessage("");
+                assertTrue(format("Warning log message '%s' doesn't start with the prefix '%s'", warning, prefix),
+                           warning.startsWith(prefix));
+            }
+
+            assertTrue(format("Warning log message '%s' does not contain expected message '%s'", warning, message),
+                       warning.contains(message));
+        }
+    }
+
+    private void assertEmptyWarnings()
+    {
+        List<String> warnings = getWarnings();
+
+        if (warnings == null) // will always be the case in practice currently, but being defensive if this change
+            warnings = Collections.emptyList();
+
+        assertTrue(format("Expect no warning messages but got %s", warnings), warnings.isEmpty());
+    }
+
+    protected List<String> getWarnings()
+    {
+        List<String> warnings = ClientWarn.instance.getWarnings();
+
+        return warnings == null
+               ? Collections.emptyList()
+               : warnings.stream()
+                         .filter(w -> !w.equals(View.USAGE_WARNING) && !w.equals(SASIIndex.USAGE_WARNING))
+                         .collect(Collectors.toList());
+    }
+
+    protected void assertConfigValid(Consumer<Guardrails> consumer)
+    {
+        consumer.accept(guardrails());
+    }
+
+    protected void assertConfigFails(Consumer<Guardrails> consumer, String message)
+    {
+        try
+        {
+            consumer.accept(guardrails());
+            fail("Expected failure");
+        }
+        catch (IllegalArgumentException e)
+        {
+            String actualMessage = e.getMessage();
+            assertTrue(String.format("Failure message '%s' does not contain expected message '%s'", actualMessage, message),
+                       actualMessage.contains(message));
+        }
+    }
+
+    protected ResultMessage execute(ClientState state, String query)
+    {
+        return execute(state, query, Collections.emptyList());
+    }
+
+    protected ResultMessage execute(ClientState state, String query, List<ByteBuffer> values)
+    {
+        QueryOptions options = QueryOptions.forInternalCalls(values);
+
+        return execute(state, query, options);
+    }
+
+    protected ResultMessage execute(ClientState state, String query, ConsistencyLevel cl)
+    {
+        return execute(state, query, cl, null);
+    }
+
+    protected ResultMessage execute(ClientState state, String query, ConsistencyLevel cl, ConsistencyLevel serialCl)
+    {
+        QueryOptions options = QueryOptions.create(cl,
+                                                   Collections.emptyList(),
+                                                   false,
+                                                   10,
+                                                   null,
+                                                   serialCl,
+                                                   ProtocolVersion.CURRENT,
+                                                   KEYSPACE);
+
+        return execute(state, query, options);
+    }
+
+    protected ResultMessage execute(ClientState state, String query, QueryOptions options)
+    {
+        QueryState queryState = new QueryState(state);
+
+        String formattedQuery = formatQuery(query);
+        CQLStatement statement = QueryProcessor.parseStatement(formattedQuery, queryState.getClientState());
+        statement.validate(state);
+
+        return statement.execute(queryState, options, Clock.Global.nanoTime());
+    }
+
+    protected static String sortCSV(String csv)
+    {
+        return String.join(",", (new TreeSet<>(ImmutableSet.copyOf((csv.split(","))))));
+    }
+
+    /**
+     * A listener for guardrails diagnostic events.
+     */
+    public class Listener implements Consumer<GuardrailEvent>
+    {
+        private final List<String> warnings = new CopyOnWriteArrayList<>();
+        private final List<String> failures = new CopyOnWriteArrayList<>();
+
+        @Override
+        public void accept(GuardrailEvent event)
+        {
+            assertNotNull(event);
+            Map<String, Serializable> map = event.toMap();
+
+            if (guardrail != null)
+                assertEquals(guardrail.name, map.get("name"));
+
+            GuardrailEventType type = (GuardrailEventType) event.getType();
+            String message = map.toString();
+
+            switch (type)
+            {
+                case WARNED:
+                    warnings.add(message);
+                    break;
+                case FAILED:
+                    failures.add(message);
+                    break;
+                default:
+                    fail("Unexpected diagnostic event:" + type);
+            }
+        }
+
+        public void clear()
+        {
+            warnings.clear();
+            failures.clear();
+        }
+
+        public void assertNotWarned()
+        {
+            assertTrue(format("Expect no warning diagnostic events but got %s", warnings), warnings.isEmpty());
+        }
+
+        public void assertWarned(String message)
+        {
+            assertWarned(Collections.singletonList(message));
+        }
+
+        public void assertWarned(List<String> messages)
+        {
+            assertFalse("Expected to emit warning diagnostic event, but no warning was emitted", warnings.isEmpty());
+            assertEquals(format("Expected %d warning diagnostic events but got %d: %s)", messages.size(), warnings.size(), warnings),
+                         messages.size(), warnings.size());
+
+            for (int i = 0; i < messages.size(); i++)
+            {
+                String message = messages.get(i);
+                String warning = warnings.get(i);
+                assertTrue(format("Warning diagnostic event '%s' does not contain expected message '%s'", warning, message),
+                           warning.contains(message));
+            }
+        }
+
+        public void assertNotFailed()
+        {
+            assertTrue(format("Expect no failure diagnostic events but got %s", failures), failures.isEmpty());
+        }
+
+        public void assertFailed(String... messages)
+        {
+            assertFalse("Expected to emit failure diagnostic event, but no failure was emitted", failures.isEmpty());
+            assertEquals(format("Expected %d failure diagnostic events but got %d: %s)", messages.length, failures.size(), failures),
+                         messages.length, failures.size());
+
+            for (int i = 0; i < messages.length; i++)
+            {
+                String message = messages[i];
+                String failure = failures.get(i);
+                assertTrue(format("Failure diagnostic event '%s' does not contain expected message '%s'", failure, message),
+                           failure.contains(message));
+            }
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailUserTimestampsTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailUserTimestampsTest.java
new file mode 100644
index 0000000..d7c0c7b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailUserTimestampsTest.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests the guardrail for disabling user-provided timestamps, {@link Guardrails#userTimestampsEnabled}.
+ */
+public class GuardrailUserTimestampsTest extends GuardrailTester
+{
+    public GuardrailUserTimestampsTest()
+    {
+        super(Guardrails.userTimestampsEnabled);
+    }
+
+    @Before
+    public void setupTest()
+    {
+        createTable("CREATE TABLE IF NOT EXISTS %s (k INT, c INT, v TEXT, PRIMARY KEY(k, c))");
+    }
+
+    private void setGuardrail(boolean userTimestampsEnabled)
+    {
+        guardrails().setUserTimestampsEnabled(userTimestampsEnabled);
+    }
+
+    @Test
+    public void testInsertWithDisabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') USING TIMESTAMP 1");
+    }
+
+    @Test
+    public void testInsertWithEnabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') USING TIMESTAMP 1");
+    }
+
+    @Test
+    public void testUpdateWithDisabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("UPDATE %s USING TIMESTAMP 1 SET v = 'val2' WHERE k = 1 and c = 2");
+    }
+
+    @Test
+    public void testUpdateWithEnabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("UPDATE %s USING TIMESTAMP 1 SET v = 'val2' WHERE k = 1 and c = 2");
+    }
+
+    @Test
+    public void testDeleteWithDisabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("DELETE FROM %s USING TIMESTAMP 1 WHERE k=1");
+    }
+
+    @Test
+    public void testDeleteWithEnabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("DELETE FROM %s USING TIMESTAMP 1 WHERE k=1");
+    }
+
+    @Test
+    public void testBatchWithDisabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(false);
+        assertValid("BEGIN BATCH USING TIMESTAMP 1 " +
+                    "INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') " +
+                    "APPLY BATCH");
+        assertFails("BEGIN BATCH " +
+                    "INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') USING TIMESTAMP 1 " +
+                    "APPLY BATCH");
+    }
+
+    @Test
+    public void testBatchWithEnabledUserTimestamps() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("BEGIN BATCH USING TIMESTAMP 1 " +
+                    "INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') " +
+                    "APPLY BATCH");
+        assertValid("BEGIN BATCH " +
+                    "INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') USING TIMESTAMP 1 " +
+                    "APPLY BATCH");
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        for (boolean userTimestampsEnabled : new boolean[]{ false, true })
+        {
+            setGuardrail(userTimestampsEnabled);
+            testExcludedUsers(() -> "INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') USING TIMESTAMP 1",
+                              () -> "UPDATE %s USING TIMESTAMP 1 SET v = 'val2' WHERE k = 1 and c = 2",
+                              () -> "DELETE FROM %s USING TIMESTAMP 1 WHERE k=1",
+                              () -> "BEGIN BATCH USING TIMESTAMP 1 INSERT INTO %s (k, c, v) VALUES (1, 2, 'v'); APPLY BATCH",
+                              () -> "BEGIN BATCH INSERT INTO %s (k, c, v) VALUES (1, 2, 'v') USING TIMESTAMP 1; APPLY BATCH");
+        }
+    }
+
+    private void assertFails(String query) throws Throwable
+    {
+        assertFails(query, "User provided timestamps (USING TIMESTAMP) is not allowed");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailViewsPerTableTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailViewsPerTableTest.java
new file mode 100644
index 0000000..3be58b0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailViewsPerTableTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static java.lang.String.format;
+
+/**
+ * Tests the guardrail for the number of materialized views in a table, {@link Guardrails#materializedViewsPerTable}.
+ */
+public class GuardrailViewsPerTableTest extends ThresholdTester
+{
+    private static final int VIEWS_PER_TABLE_WARN_THRESHOLD = 1;
+    private static final int VIEWS_PER_TABLE_FAIL_THRESHOLD = 3;
+
+    private static final String CREATE_TABLE = "CREATE TABLE %s (k int PRIMARY KEY, v int)";
+    private static final String CREATE_VIEW = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s " +
+                                              "WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)";
+
+    public GuardrailViewsPerTableTest()
+    {
+        super(VIEWS_PER_TABLE_WARN_THRESHOLD,
+              VIEWS_PER_TABLE_FAIL_THRESHOLD,
+              Guardrails.materializedViewsPerTable,
+              Guardrails::setMaterializedViewsPerTableThreshold,
+              Guardrails::getMaterializedViewsPerTableWarnThreshold,
+              Guardrails::getMaterializedViewsPerTableFailThreshold);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return getCurrentColumnFamilyStore().viewManager.size();
+    }
+
+    @Before
+    public void before()
+    {
+        super.before();
+        createTable(CREATE_TABLE);
+    }
+
+    @Test
+    public void testCreateView() throws Throwable
+    {
+        String view1 = assertCreateViewSucceeds();
+        assertCurrentValue(1);
+
+        assertCreateViewWarns();
+        assertCreateViewWarns();
+        assertCreateViewFails();
+        assertCurrentValue(3);
+
+        // drop the first view, we should be able to create new MV again
+        dropView(view1);
+        assertCurrentValue(2);
+        assertCreateViewWarns();
+        assertCreateViewFails();
+        assertCurrentValue(3);
+
+        // previous guardrail should not apply to another base table
+        createTable("CREATE TABLE %s (k int primary key, v int)");
+        assertCreateViewSucceeds();
+        assertCreateViewWarns();
+        assertCreateViewWarns();
+        assertCreateViewFails();
+        assertCurrentValue(3);
+    }
+
+    @Test
+    public void testExcludedUsers() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int)");
+
+        testExcludedUsers(() -> format("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                       "  WHERE k IS NOT NULL AND v1 IS NOT NULL PRIMARY KEY (v1, k)",
+                                       createViewName(), currentTable()),
+                          () -> format("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " +
+                                       "  WHERE k IS NOT NULL AND v2 IS NOT NULL PRIMARY KEY (v2, k)",
+                                       createViewName(), currentTable()));
+    }
+
+    private String assertCreateViewSucceeds() throws Throwable
+    {
+        String viewName = createViewName();
+        assertMaxThresholdValid(format(CREATE_VIEW, viewName));
+        return viewName;
+    }
+
+    private void assertCreateViewWarns() throws Throwable
+    {
+        String viewName = createViewName();
+        assertThresholdWarns(format(CREATE_VIEW, viewName),
+                             format("Creating materialized view %s on table %s, current number of views %s exceeds warning threshold of %s.",
+                                    viewName, currentTable(), currentValue() + 1, guardrails().getMaterializedViewsPerTableWarnThreshold()));
+    }
+
+    private void assertCreateViewFails() throws Throwable
+    {
+        String viewName = createViewName();
+        assertThresholdFails(format(CREATE_VIEW, viewName),
+                             format("aborting the creation of materialized view %s on table %s",
+                                    viewName, currentTable()));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailWriteConsistencyLevelsTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailWriteConsistencyLevelsTest.java
new file mode 100644
index 0000000..f2ad40f
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailWriteConsistencyLevelsTest.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+
+import static java.lang.String.format;
+import static org.apache.cassandra.db.ConsistencyLevel.ALL;
+import static org.apache.cassandra.db.ConsistencyLevel.ANY;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_ONE;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_QUORUM;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_SERIAL;
+import static org.apache.cassandra.db.ConsistencyLevel.ONE;
+import static org.apache.cassandra.db.ConsistencyLevel.QUORUM;
+import static org.apache.cassandra.db.ConsistencyLevel.SERIAL;
+
+/**
+ * Tests the guardrail for write consistency levels, {@link Guardrails#writeConsistencyLevels}.
+ */
+public class GuardrailWriteConsistencyLevelsTest extends GuardrailConsistencyLevelsTester
+{
+    public GuardrailWriteConsistencyLevelsTest()
+    {
+        super("write_consistency_levels_warned",
+              "write_consistency_levels_disallowed",
+              Guardrails.writeConsistencyLevels,
+              Guardrails::getWriteConsistencyLevelsWarned,
+              Guardrails::getWriteConsistencyLevelsDisallowed,
+              Guardrails::getWriteConsistencyLevelsWarnedCSV,
+              Guardrails::getWriteConsistencyLevelsDisallowedCSV,
+              Guardrails::setWriteConsistencyLevelsWarned,
+              Guardrails::setWriteConsistencyLevelsDisallowed,
+              Guardrails::setWriteConsistencyLevelsWarnedCSV,
+              Guardrails::setWriteConsistencyLevelsDisallowedCSV);
+    }
+
+    @Before
+    public void before()
+    {
+        super.before();
+        createTable("CREATE TABLE IF NOT EXISTS %s (k INT, c INT, v TEXT, PRIMARY KEY(k, c))");
+    }
+
+    @Test
+    public void testInsert() throws Throwable
+    {
+        testQuery("INSERT INTO %s (k, c, v) VALUES (1, 2, 'val')");
+        testLWTQuery("INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') IF NOT EXISTS");
+    }
+
+    @Test
+    public void testUpdate() throws Throwable
+    {
+        testQuery("UPDATE %s SET v = 'val2' WHERE k = 1 AND c = 2");
+        testLWTQuery("UPDATE %s SET v = 'val2' WHERE k = 1 AND c = 2 IF EXISTS");
+    }
+
+    @Test
+    public void testDelete() throws Throwable
+    {
+        testQuery("DELETE FROM %s WHERE k=1");
+        testLWTQuery("DELETE FROM %s WHERE k=1 AND c=2 IF EXISTS");
+    }
+
+    @Test
+    public void testBatch() throws Throwable
+    {
+        testQuery("BEGIN BATCH INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') APPLY BATCH");
+        testQuery("BEGIN BATCH UPDATE %s SET v = 'val2' WHERE k = 1 AND c = 2 APPLY BATCH");
+        testQuery("BEGIN BATCH DELETE FROM %s WHERE k=1 APPLY BATCH");
+
+        testLWTQuery("BEGIN BATCH INSERT INTO %s (k, c, v) VALUES (1, 2, 'val') IF NOT EXISTS APPLY BATCH");
+        testLWTQuery("BEGIN BATCH UPDATE %s SET v = 'val2' WHERE k = 1 AND c = 2 IF EXISTS APPLY BATCH");
+        testLWTQuery("BEGIN BATCH DELETE FROM %s WHERE k=1 AND c=2 IF EXISTS APPLY BATCH");
+    }
+
+    private void testQuery(String query) throws Throwable
+    {
+        testQuery(query, ONE);
+        testQuery(query, ALL);
+        testQuery(query, ANY);
+        testQuery(query, QUORUM);
+        testQuery(query, LOCAL_ONE);
+        testQuery(query, LOCAL_QUORUM);
+    }
+
+    private void testQuery(String query, ConsistencyLevel cl) throws Throwable
+    {
+        warnConsistencyLevels();
+        disableConsistencyLevels();
+        assertValid(query, cl, null);
+
+        warnConsistencyLevels(cl);
+        assertWarns(query, cl, null, cl);
+
+        warnConsistencyLevels();
+        disableConsistencyLevels(cl);
+        assertFails(query, cl, null, cl);
+    }
+
+    private void testLWTQuery(String query) throws Throwable
+    {
+        testLWTQuery(query, ONE);
+        testLWTQuery(query, ALL);
+        testLWTQuery(query, QUORUM);
+        testLWTQuery(query, LOCAL_ONE);
+        testLWTQuery(query, LOCAL_QUORUM);
+    }
+
+    private void testLWTQuery(String query, ConsistencyLevel cl) throws Throwable
+    {
+        disableConsistencyLevels();
+
+        warnConsistencyLevels();
+        assertValid(query, cl, SERIAL);
+        assertValid(query, cl, LOCAL_SERIAL);
+        assertValid(query, cl, null);
+
+        warnConsistencyLevels(cl);
+        assertWarns(query, cl, SERIAL, cl);
+        assertWarns(query, cl, LOCAL_SERIAL, cl);
+        assertWarns(query, cl, null, cl);
+
+        warnConsistencyLevels(SERIAL);
+        assertWarns(query, cl, SERIAL, SERIAL);
+        assertValid(query, cl, LOCAL_SERIAL);
+        assertWarns(query, cl, null, SERIAL);
+
+        warnConsistencyLevels(LOCAL_SERIAL);
+        assertValid(query, cl, SERIAL);
+        assertWarns(query, cl, LOCAL_SERIAL, LOCAL_SERIAL);
+        assertValid(query, cl, null);
+
+        warnConsistencyLevels(SERIAL, LOCAL_SERIAL);
+        assertWarns(query, cl, SERIAL, SERIAL);
+        assertWarns(query, cl, LOCAL_SERIAL, LOCAL_SERIAL);
+        assertWarns(query, cl, null, SERIAL);
+
+        warnConsistencyLevels();
+
+        disableConsistencyLevels(cl);
+        assertFails(query, cl, SERIAL, cl);
+        assertFails(query, cl, LOCAL_SERIAL, cl);
+        assertFails(query, cl, null, cl);
+
+        disableConsistencyLevels(SERIAL);
+        assertFails(query, cl, SERIAL, SERIAL);
+        assertValid(query, cl, LOCAL_SERIAL);
+        assertFails(query, cl, null, SERIAL);
+
+        disableConsistencyLevels(LOCAL_SERIAL);
+        assertValid(query, cl, SERIAL);
+        assertFails(query, cl, LOCAL_SERIAL, LOCAL_SERIAL);
+        assertValid(query, cl, null);
+
+        disableConsistencyLevels(SERIAL, LOCAL_SERIAL);
+        assertFails(query, cl, SERIAL, SERIAL);
+        assertFails(query, cl, LOCAL_SERIAL, LOCAL_SERIAL);
+        assertFails(query, cl, null, SERIAL);
+    }
+
+    private void assertValid(String query, ConsistencyLevel cl, ConsistencyLevel serialCl) throws Throwable
+    {
+        assertValid(() -> execute(userClientState, query, cl, serialCl));
+    }
+
+    private void assertWarns(String query, ConsistencyLevel cl, ConsistencyLevel serialCl, ConsistencyLevel warnedCl) throws Throwable
+    {
+        assertWarns(() -> execute(userClientState, query, cl, serialCl),
+                    format("Provided values [%s] are not recommended for write consistency levels (warned values are: %s)",
+                           warnedCl, guardrails().getWriteConsistencyLevelsWarned()));
+
+        assertExcludedUsers(query, cl, serialCl);
+    }
+
+    private void assertFails(String query, ConsistencyLevel cl, ConsistencyLevel serialCl, ConsistencyLevel rejectedCl) throws Throwable
+    {
+        assertFails(() -> execute(userClientState, query, cl, serialCl),
+                    format("Provided values [%s] are not allowed for write consistency levels (disallowed values are: %s)",
+                           rejectedCl, guardrails().getWriteConsistencyLevelsDisallowed()));
+
+        assertExcludedUsers(query, cl, serialCl);
+    }
+
+    private void assertExcludedUsers(String query, ConsistencyLevel cl, ConsistencyLevel serialCl) throws Throwable
+    {
+        assertValid(() -> execute(superClientState, query, cl, serialCl));
+        assertValid(() -> execute(systemClientState, query, cl, serialCl));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailsConfigProviderTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailsConfigProviderTest.java
new file mode 100644
index 0000000..e99b736
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailsConfigProviderTest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.GuardrailsOptions;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.service.ClientState;
+import org.assertj.core.api.Assertions;
+
+import static java.lang.String.format;
+
+public class GuardrailsConfigProviderTest extends GuardrailTester
+{
+    @Test
+    public void testBuildCustom() throws Throwable
+    {
+        String name = getClass().getCanonicalName() + '$' + CustomProvider.class.getSimpleName();
+        GuardrailsConfigProvider provider = GuardrailsConfigProvider.build(name);
+        MaxThreshold guard = new MaxThreshold("test_guardrail",
+                                        state -> provider.getOrCreate(state).getTablesWarnThreshold(),
+                                        state -> provider.getOrCreate(state).getTablesFailThreshold(),
+                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertValid(() -> guard.guard(5, "Z", false, userClientState));
+        assertWarns(() -> guard.guard(25, "A", false, userClientState), "Warning: for A, 25 > 10");
+        assertWarns(() -> guard.guard(100, "B", false, userClientState), "Warning: for B, 100 > 10");
+        assertFails(() -> guard.guard(101, "X", false, userClientState), "Aborting: for X, 101 > 100");
+        assertFails(() -> guard.guard(200, "Y", false, userClientState), "Aborting: for Y, 200 > 100");
+        assertValid(() -> guard.guard(5, "Z", false, userClientState));
+
+        assertValid(() -> guard.guard(5, "Z", true, userClientState));
+        assertWarns(() -> guard.guard(25, "A", true, userClientState), "Warning: for A, 25 > 10", "Warning: for <redacted>, 25 > 10");
+        assertWarns(() -> guard.guard(100, "B", true, userClientState), "Warning: for B, 100 > 10", "Warning: for <redacted>, 100 > 10");
+        assertFails(() -> guard.guard(101, "X", true, userClientState), "Aborting: for X, 101 > 100", "Aborting: for <redacted>, 101 > 100");
+        assertFails(() -> guard.guard(200, "Y", true, userClientState), "Aborting: for Y, 200 > 100", "Aborting: for <redacted>, 200 > 100");
+        assertValid(() -> guard.guard(5, "Z", true, userClientState));
+
+        Assertions.assertThatThrownBy(() -> GuardrailsConfigProvider.build("unexistent_class"))
+                  .isInstanceOf(ConfigurationException.class)
+                  .hasMessageContaining("Unable to find custom guardrails config provider class 'unexistent_class'");
+    }
+
+    /**
+     * Custom {@link GuardrailsConfigProvider} implementation that simply duplicates the threshold values.
+     */
+    public static class CustomProvider extends GuardrailsConfigProvider.Default
+    {
+        public GuardrailsConfig getOrCreate(ClientState state)
+        {
+            return new CustomConfig(DatabaseDescriptor.getRawConfig());
+        }
+    }
+
+    public static class CustomConfig extends GuardrailsOptions
+    {
+        public CustomConfig(Config config)
+        {
+            super(config);
+        }
+
+        @Override
+        public int getTablesWarnThreshold()
+        {
+            return 10;
+        }
+
+        @Override
+        public int getTablesFailThreshold()
+        {
+            return 100;
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java
new file mode 100644
index 0000000..a0a5823
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Consumer;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static java.lang.String.format;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class GuardrailsTest extends GuardrailTester
+{
+    public static final int DISABLED = -1;
+
+
+    private void testDisabledThreshold(Threshold guard) throws Throwable
+    {
+        assertFalse(guard.enabled(userClientState));
+
+        for (boolean containsUserData : Arrays.asList(true, false))
+        {
+            assertValid(() -> guard.guard(5, "Z", containsUserData, null));
+            assertValid(() -> guard.guard(25, "A", containsUserData, userClientState));
+            assertValid(() -> guard.guard(100, "B", containsUserData, userClientState));
+            assertValid(() -> guard.guard(101, "X", containsUserData, userClientState));
+            assertValid(() -> guard.guard(200, "Y", containsUserData, userClientState));
+        }
+    }
+
+    @Test
+    public void testDisabledMaxThreshold() throws Throwable
+    {
+        Threshold.ErrorMessageProvider errorMessageProvider = (isWarn, what, v, t) -> "Should never trigger";
+        testDisabledThreshold(new MaxThreshold("x", state -> DISABLED, state -> DISABLED, errorMessageProvider));
+    }
+
+    @Test
+    public void testMaxThreshold() throws Throwable
+    {
+        MaxThreshold guard = new MaxThreshold("x",
+                                        state -> 10,
+                                        state -> 100,
+                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertTrue(guard.enabled(userClientState));
+
+        assertValid(() -> guard.guard(5, "Z", false, userClientState));
+        assertWarns(() -> guard.guard(25, "A", false, userClientState), "Warning: for A, 25 > 10");
+        assertWarns(() -> guard.guard(100, "B", false, userClientState), "Warning: for B, 100 > 10");
+        assertFails(() -> guard.guard(101, "X", false, userClientState), "Aborting: for X, 101 > 100");
+        assertFails(() -> guard.guard(200, "Y", false, userClientState), "Aborting: for Y, 200 > 100");
+        assertValid(() -> guard.guard(5, "Z", false, userClientState));
+
+        assertValid(() -> guard.guard(5, "Z", true, userClientState));
+        assertWarns(() -> guard.guard(25, "A", true, userClientState), "Warning: for A, 25 > 10", "Warning: for <redacted>, 25 > 10");
+        assertWarns(() -> guard.guard(100, "B", true, userClientState), "Warning: for B, 100 > 10", "Warning: for <redacted>, 100 > 10");
+        assertFails(() -> guard.guard(101, "X", true, userClientState), "Aborting: for X, 101 > 100", "Aborting: for <redacted>, 101 > 100");
+        assertFails(() -> guard.guard(200, "Y", true, userClientState), "Aborting: for Y, 200 > 100", "Aborting: for <redacted>, 200 > 100");
+        assertValid(() -> guard.guard(5, "Z", true, userClientState));
+    }
+
+    @Test
+    public void testWarnOnlyMaxThreshold() throws Throwable
+    {
+        MaxThreshold guard = new MaxThreshold("x",
+                                        state -> 10,
+                                        state -> DISABLED,
+                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertTrue(guard.enabled(userClientState));
+
+        assertValid(() -> guard.guard(5, "Z", false, userClientState));
+        assertWarns(() -> guard.guard(11, "A", false, userClientState), "Warning: for A, 11 > 10");
+
+        assertValid(() -> guard.guard(5, "Z", true, userClientState));
+        assertWarns(() -> guard.guard(11, "A", true, userClientState), "Warning: for A, 11 > 10", "Warning: for <redacted>, 11 > 10");
+    }
+
+    @Test
+    public void testFailOnlyMaxThreshold() throws Throwable
+    {
+        MaxThreshold guard = new MaxThreshold("x",
+                                        state -> DISABLED,
+                                        state -> 10,
+                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertTrue(guard.enabled(userClientState));
+
+        assertValid(() -> guard.guard(5, "Z", false, userClientState));
+        assertFails(() -> guard.guard(11, "A", false, userClientState), "Aborting: for A, 11 > 10");
+
+        assertValid(() -> guard.guard(5, "Z", true, userClientState));
+        assertFails(() -> guard.guard(11, "A", true, userClientState), "Aborting: for A, 11 > 10", "Aborting: for <redacted>, 11 > 10");
+    }
+
+    @Test
+    public void testMaxThresholdUsers() throws Throwable
+    {
+        MaxThreshold guard = new MaxThreshold("x",
+                                        state -> 10,
+                                        state -> 100,
+                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Failure", what, v, t));
+
+        // value under both thresholds
+        assertValid(() -> guard.guard(5, "x", false, null));
+        assertValid(() -> guard.guard(5, "x", false, userClientState));
+        assertValid(() -> guard.guard(5, "x", false, systemClientState));
+        assertValid(() -> guard.guard(5, "x", false, superClientState));
+
+        // value over warning threshold
+        assertWarns(() -> guard.guard(100, "y", false, null), "Warning: for y, 100 > 10");
+        assertWarns(() -> guard.guard(100, "y", false, userClientState), "Warning: for y, 100 > 10");
+        assertValid(() -> guard.guard(100, "y", false, systemClientState));
+        assertValid(() -> guard.guard(100, "y", false, superClientState));
+
+        // value over fail threshold. An undefined user means that the check comes from a background process, so we
+        // still emit failure messages and events, but we don't throw an exception to prevent interrupting that process.
+        assertFails(() -> guard.guard(101, "z", false, null), false, "Failure: for z, 101 > 100");
+        assertFails(() -> guard.guard(101, "z", false, userClientState), "Failure: for z, 101 > 100");
+        assertValid(() -> guard.guard(101, "z", false, systemClientState));
+        assertValid(() -> guard.guard(101, "z", false, superClientState));
+    }
+
+    @Test
+    public void testDisabledMinThreshold() throws Throwable
+    {
+        Threshold.ErrorMessageProvider errorMessageProvider = (isWarn, what, v, t) -> "Should never trigger";
+        testDisabledThreshold(new MinThreshold("x", state -> DISABLED, state -> DISABLED, errorMessageProvider));
+    }
+
+    @Test
+    public void testMinThreshold() throws Throwable
+    {
+        MinThreshold guard = new MinThreshold("x",
+                                              state -> 100,
+                                              state -> 10,
+                                              (isWarn, what, v, t) -> format("%s: for %s, %s < %s",
+                                                                             isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertTrue(guard.enabled(userClientState));
+
+        assertValid(() -> guard.guard(200, "Z", false, userClientState));
+        assertWarns(() -> guard.guard(25, "A", false, userClientState), "Warning: for A, 25 < 100");
+        assertWarns(() -> guard.guard(10, "B", false, userClientState), "Warning: for B, 10 < 100");
+        assertFails(() -> guard.guard(9, "X", false, userClientState), "Aborting: for X, 9 < 10");
+        assertFails(() -> guard.guard(1, "Y", false, userClientState), "Aborting: for Y, 1 < 10");
+        assertValid(() -> guard.guard(200, "Z", false, userClientState));
+
+        assertValid(() -> guard.guard(200, "Z", true, userClientState));
+        assertWarns(() -> guard.guard(25, "A", true, userClientState), "Warning: for A, 25 < 100", "Warning: for <redacted>, 25 < 100");
+        assertWarns(() -> guard.guard(10, "B", true, userClientState), "Warning: for B, 10 < 100", "Warning: for <redacted>, 10 < 100");
+        assertFails(() -> guard.guard(9, "X", true, userClientState), "Aborting: for X, 9 < 10", "Aborting: for <redacted>, 9 < 10");
+        assertFails(() -> guard.guard(1, "Y", true, userClientState), "Aborting: for Y, 1 < 10", "Aborting: for <redacted>, 1 < 10");
+        assertValid(() -> guard.guard(200, "Z", true, userClientState));
+    }
+
+    @Test
+    public void testWarnOnlyMinThreshold() throws Throwable
+    {
+        MinThreshold guard = new MinThreshold("x",
+                                              state -> 10,
+                                              state -> DISABLED,
+                                              (isWarn, what, v, t) -> format("%s: for %s, %s < %s",
+                                                                             isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertTrue(guard.enabled(userClientState));
+
+        assertValid(() -> guard.guard(11, "Z", false, userClientState));
+        assertWarns(() -> guard.guard(5, "A", false, userClientState), "Warning: for A, 5 < 10");
+
+        assertValid(() -> guard.guard(11, "Z", true, userClientState));
+        assertWarns(() -> guard.guard(5, "A", true, userClientState), "Warning: for A, 5 < 10", "Warning: for <redacted>, 5 < 10");
+    }
+
+    @Test
+    public void testFailOnlyMinThreshold() throws Throwable
+    {
+        MinThreshold guard = new MinThreshold("x",
+                                              state -> DISABLED,
+                                              state -> 10,
+                                              (isWarn, what, v, t) -> format("%s: for %s, %s < %s",
+                                                                             isWarn ? "Warning" : "Aborting", what, v, t));
+
+        assertTrue(guard.enabled(userClientState));
+
+        assertValid(() -> guard.guard(11, "Z", false, userClientState));
+        assertFails(() -> guard.guard(5, "A", false, userClientState), "Aborting: for A, 5 < 10");
+
+        assertValid(() -> guard.guard(11, "Z", true, userClientState));
+        assertFails(() -> guard.guard(5, "A", true, userClientState), "Aborting: for A, 5 < 10", "Aborting: for <redacted>, 5 < 10");
+    }
+
+    @Test
+    public void testMinThresholdUsers() throws Throwable
+    {
+        MinThreshold guard = new MinThreshold("x",
+                                              state -> 100,
+                                              state -> 10,
+                                              (isWarn, what, v, t) -> format("%s: for %s, %s < %s",
+                                                                             isWarn ? "Warning" : "Failure", what, v, t));
+
+        // value above both thresholds
+        assertValid(() -> guard.guard(200, "x", false, null));
+        assertValid(() -> guard.guard(200, "x", false, userClientState));
+        assertValid(() -> guard.guard(200, "x", false, systemClientState));
+        assertValid(() -> guard.guard(200, "x", false, superClientState));
+
+        // value under warning threshold
+        assertWarns(() -> guard.guard(10, "y", false, null), "Warning: for y, 10 < 100");
+        assertWarns(() -> guard.guard(10, "y", false, userClientState), "Warning: for y, 10 < 100");
+        assertValid(() -> guard.guard(10, "y", false, systemClientState));
+        assertValid(() -> guard.guard(10, "y", false, superClientState));
+
+        // value under fail threshold. An undefined user means that the check comes from a background process, so we
+        // still emit failure messages and events, but we don't throw an exception to prevent interrupting that process.
+        assertFails(() -> guard.guard(9, "z", false, null), false, "Failure: for z, 9 < 10");
+        assertFails(() -> guard.guard(9, "z", false, userClientState), "Failure: for z, 9 < 10");
+        assertValid(() -> guard.guard(9, "z", false, systemClientState));
+        assertValid(() -> guard.guard(9, "z", false, superClientState));
+    }
+
+    @Test
+    public void testDisableFlag() throws Throwable
+    {
+        assertFails(() -> new DisableFlag("x", state -> true, "X").ensureEnabled(userClientState), "X is not allowed");
+        assertValid(() -> new DisableFlag("x", state -> false, "X").ensureEnabled(userClientState));
+
+        assertFails(() -> new DisableFlag("x", state -> true, "X").ensureEnabled("Y", userClientState), "Y is not allowed");
+        assertValid(() -> new DisableFlag("x", state -> false, "X").ensureEnabled("Y", userClientState));
+    }
+
+    @Test
+    public void testDisableFlagUsers() throws Throwable
+    {
+        DisableFlag enabled = new DisableFlag("x", state -> false, "X");
+        assertValid(() -> enabled.ensureEnabled(null));
+        assertValid(() -> enabled.ensureEnabled(userClientState));
+        assertValid(() -> enabled.ensureEnabled(systemClientState));
+        assertValid(() -> enabled.ensureEnabled(superClientState));
+
+        DisableFlag disabled = new DisableFlag("x", state -> true, "X");
+        assertFails(() -> disabled.ensureEnabled(userClientState), "X is not allowed");
+        assertValid(() -> disabled.ensureEnabled(systemClientState));
+        assertValid(() -> disabled.ensureEnabled(superClientState));
+    }
+
+    @Test
+    public void testValuesWarned() throws Throwable
+    {
+        // Using a sorted set below to ensure the order in the warning message checked below is not random
+        Values<Integer> warned = new Values<>("x",
+                                              state -> insertionOrderedSet(4, 6, 20),
+                                              state -> Collections.emptySet(),
+                                              state -> Collections.emptySet(),
+                                              "integer");
+
+        Consumer<Integer> action = i -> Assert.fail("The ignore action shouldn't have been triggered");
+        assertValid(() -> warned.guard(set(3), action, userClientState));
+        assertWarns(() -> warned.guard(set(4), action, userClientState),
+                    "Provided values [4] are not recommended for integer (warned values are: [4, 6, 20])");
+        assertWarns(() -> warned.guard(set(4, 6), action, null),
+                    "Provided values [4, 6] are not recommended for integer (warned values are: [4, 6, 20])");
+        assertWarns(() -> warned.guard(set(4, 5, 6, 7), action, null),
+                    "Provided values [4, 6] are not recommended for integer (warned values are: [4, 6, 20])");
+    }
+
+    @Test
+    public void testValuesIgnored() throws Throwable
+    {
+        // Using a sorted set below to ensure the order in the error message checked below are not random
+        Values<Integer> ignored = new Values<>("x",
+                                               state -> Collections.emptySet(),
+                                               state -> insertionOrderedSet(4, 6, 20),
+                                               state -> Collections.emptySet(),
+                                               "integer");
+
+        Set<Integer> triggeredOn = set();
+        assertValid(() -> ignored.guard(set(3), triggeredOn::add, userClientState));
+        assertEquals(set(), triggeredOn);
+
+        assertWarns(() -> ignored.guard(set(4), triggeredOn::add, userClientState),
+                    "Ignoring provided values [4] as they are not supported for integer (ignored values are: [4, 6, 20])");
+        assertEquals(set(4), triggeredOn);
+        triggeredOn.clear();
+
+        assertWarns(() -> ignored.guard(set(4, 6), triggeredOn::add, null),
+                    "Ignoring provided values [4, 6] as they are not supported for integer (ignored values are: [4, 6, 20])");
+        assertEquals(set(4, 6), triggeredOn);
+        triggeredOn.clear();
+
+        assertWarns(() -> ignored.guard(set(4, 5, 6, 7), triggeredOn::add, null),
+                    "Ignoring provided values [4, 6] as they are not supported for integer (ignored values are: [4, 6, 20])");
+        assertEquals(set(4, 6), triggeredOn);
+        triggeredOn.clear();
+
+        assertThrows(() -> ignored.guard(set(4), userClientState),
+                     AssertionError.class,
+                     "There isn't an ignore action for integer, but value 4 is setup to be ignored");
+    }
+
+    @Test
+    public void testValuesDisallowed() throws Throwable
+    {
+        // Using a sorted set below to ensure the order in the error message checked below are not random
+        Values<Integer> disallowed = new Values<>("x",
+                                                  state -> Collections.emptySet(),
+                                                  state -> Collections.emptySet(),
+                                                  state -> insertionOrderedSet(4, 6, 20),
+                                                  "integer");
+
+        Consumer<Integer> action = i -> Assert.fail("The ignore action shouldn't have been triggered");
+        assertValid(() -> disallowed.guard(set(3), action, userClientState));
+        assertFails(() -> disallowed.guard(set(4), action, userClientState),
+                    "Provided values [4] are not allowed for integer (disallowed values are: [4, 6, 20])");
+        assertValid(() -> disallowed.guard(set(10), action, userClientState));
+        assertFails(() -> disallowed.guard(set(20), action, userClientState),
+                    "Provided values [20] are not allowed for integer (disallowed values are: [4, 6, 20])");
+        assertValid(() -> disallowed.guard(set(200), action, userClientState));
+        assertValid(() -> disallowed.guard(set(1, 2, 3), action, userClientState));
+
+        assertFails(() -> disallowed.guard(set(4, 6), action, null), false,
+                    "Provided values [4, 6] are not allowed for integer (disallowed values are: [4, 6, 20])");
+        assertFails(() -> disallowed.guard(set(4, 5, 6, 7), action, null), false,
+                    "Provided values [4, 6] are not allowed for integer (disallowed values are: [4, 6, 20])");
+    }
+
+    @Test
+    public void testValuesUsers() throws Throwable
+    {
+        Values<Integer> disallowed = new Values<>("x",
+                                                  state -> Collections.singleton(2),
+                                                  state -> Collections.singleton(3),
+                                                  state -> Collections.singleton(4),
+                                                  "integer");
+
+        Consumer<Integer> action = i -> Assert.fail("The ignore action shouldn't have been triggered");
+
+        assertValid(() -> disallowed.guard(set(1), action, null));
+        assertValid(() -> disallowed.guard(set(1), action, userClientState));
+        assertValid(() -> disallowed.guard(set(1), action, systemClientState));
+        assertValid(() -> disallowed.guard(set(1), action, superClientState));
+
+        String message = "Provided values [2] are not recommended for integer (warned values are: [2])";
+        assertWarns(() -> disallowed.guard(set(2), action, null), message);
+        assertWarns(() -> disallowed.guard(set(2), action, userClientState), message);
+        assertValid(() -> disallowed.guard(set(2), action, systemClientState));
+        assertValid(() -> disallowed.guard(set(2), action, superClientState));
+
+        message = "Ignoring provided values [3] as they are not supported for integer (ignored values are: [3])";
+        List<Integer> triggeredOn = new ArrayList<>();
+        assertWarns(() -> disallowed.guard(set(3), triggeredOn::add, null), message);
+        assertWarns(() -> disallowed.guard(set(3), triggeredOn::add, userClientState), message);
+        assertValid(() -> disallowed.guard(set(3), triggeredOn::add, systemClientState));
+        assertValid(() -> disallowed.guard(set(3), triggeredOn::add, superClientState));
+        Assert.assertEquals(list(3, 3), triggeredOn);
+
+        message = "Provided values [4] are not allowed for integer (disallowed values are: [4])";
+        assertFails(() -> disallowed.guard(set(4), action, null), false, message);
+        assertFails(() -> disallowed.guard(set(4), action, userClientState), message);
+        assertValid(() -> disallowed.guard(set(4), action, systemClientState));
+        assertValid(() -> disallowed.guard(set(4), action, superClientState));
+    }
+
+    @Test
+    public void testPredicates() throws Throwable
+    {
+        Predicates<Integer> guard = new Predicates<>("x",
+                                                     state -> x -> x > 10,
+                                                     state -> x -> x > 100,
+                                                     (isWarn, value) -> format("%s: %s", isWarn ? "Warning" : "Aborting", value));
+
+        assertValid(() -> guard.guard(5, userClientState));
+        assertWarns(() -> guard.guard(25, userClientState), "Warning: 25");
+        assertWarns(() -> guard.guard(100,  userClientState), "Warning: 100");
+        assertFails(() -> guard.guard(101,  userClientState), "Aborting: 101");
+        assertFails(() -> guard.guard(200,  userClientState), "Aborting: 200");
+        assertValid(() -> guard.guard(5,  userClientState));
+    }
+
+    @Test
+    public void testPredicatesUsers() throws Throwable
+    {
+        Predicates<Integer> guard = new Predicates<>("x",
+                                                     state -> x -> x > 10,
+                                                     state -> x -> x > 100,
+                                                     (isWarn, value) -> format("%s: %s", isWarn ? "Warning" : "Aborting", value));
+
+        assertTrue(guard.enabled());
+        assertTrue(guard.enabled(null));
+        assertTrue(guard.enabled(userClientState));
+        assertFalse(guard.enabled(systemClientState));
+        assertFalse(guard.enabled(superClientState));
+
+        assertValid(() -> guard.guard(5, null));
+        assertValid(() -> guard.guard(5, userClientState));
+        assertValid(() -> guard.guard(5, systemClientState));
+        assertValid(() -> guard.guard(5, superClientState));
+
+        assertWarns(() -> guard.guard(25, null), "Warning: 25");
+        assertWarns(() -> guard.guard(25, userClientState), "Warning: 25");
+        assertValid(() -> guard.guard(25, systemClientState));
+        assertValid(() -> guard.guard(25, superClientState));
+
+        assertFails(() -> guard.guard(101,  null), false, "Aborting: 101");
+        assertFails(() -> guard.guard(101,  userClientState), "Aborting: 101");
+        assertValid(() -> guard.guard(101, systemClientState));
+        assertValid(() -> guard.guard(101, superClientState));
+    }
+
+    private static Set<Integer> set(Integer value)
+    {
+        return Collections.singleton(value);
+    }
+
+    private static Set<Integer> set(Integer... values)
+    {
+        return new HashSet<>(Arrays.asList(values));
+    }
+
+    @SafeVarargs
+    private static <T> Set<T> insertionOrderedSet(T... values)
+    {
+        return new LinkedHashSet<>(Arrays.asList(values));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/ThresholdTester.java b/test/unit/org/apache/cassandra/db/guardrails/ThresholdTester.java
new file mode 100644
index 0000000..fae305a
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/ThresholdTester.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongFunction;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.assertj.core.api.Assertions;
+
+import static java.lang.String.format;
+import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.BYTES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+/**
+ * Utility class for testing a {@link Threshold} guardrail.
+ */
+public abstract class ThresholdTester extends GuardrailTester
+{
+    private final long warnThreshold;
+    private final long failThreshold;
+    private final TriConsumer<Guardrails, Long, Long> setter;
+    private final ToLongFunction<Guardrails> warnGetter;
+    private final ToLongFunction<Guardrails> failGetter;
+    private final long maxValue;
+    private final Long disabledValue;
+
+    protected ThresholdTester(int warnThreshold,
+                              int failThreshold,
+                              Threshold threshold,
+                              TriConsumer<Guardrails, Integer, Integer> setter,
+                              ToIntFunction<Guardrails> warnGetter,
+                              ToIntFunction<Guardrails> failGetter)
+    {
+        super(threshold);
+        this.warnThreshold = warnThreshold;
+        this.failThreshold = failThreshold;
+        this.setter = (g, w, a) -> setter.accept(g, w.intValue(), a.intValue());
+        this.warnGetter = g -> (long) warnGetter.applyAsInt(g);
+        this.failGetter = g -> (long) failGetter.applyAsInt(g);
+        maxValue = Integer.MAX_VALUE;
+        disabledValue = -1L;
+    }
+
+    protected ThresholdTester(long warnThreshold,
+                              long failThreshold,
+                              Threshold threshold,
+                              TriConsumer<Guardrails, Long, Long> setter,
+                              ToLongFunction<Guardrails> warnGetter,
+                              ToLongFunction<Guardrails> failGetter)
+    {
+        super(threshold);
+        this.warnThreshold = warnThreshold;
+        this.failThreshold = failThreshold;
+        this.setter = setter;
+        this.warnGetter = warnGetter;
+        this.failGetter = failGetter;
+        maxValue = Long.MAX_VALUE;
+        disabledValue = -1L;
+    }
+
+    protected ThresholdTester(String warnThreshold,
+                              String failThreshold,
+                              Threshold threshold,
+                              TriConsumer<Guardrails, String, String> setter,
+                              Function<Guardrails, String> warnGetter,
+                              Function<Guardrails, String> failGetter)
+    {
+        super(threshold);
+        this.warnThreshold = new DataStorageSpec.LongBytesBound(warnThreshold).toBytes();
+        this.failThreshold = new DataStorageSpec.LongBytesBound(failThreshold).toBytes();
+        this.setter = (g, w, a) -> setter.accept(g, w == null ? null : new DataStorageSpec.LongBytesBound(w, BYTES).toString(), a == null ? null : new DataStorageSpec.LongBytesBound(a, BYTES).toString());
+        this.warnGetter = g -> new DataStorageSpec.LongBytesBound(warnGetter.apply(g)).toBytes();
+        this.failGetter = g -> new DataStorageSpec.LongBytesBound(failGetter.apply(g)).toBytes();
+        maxValue = Long.MAX_VALUE - 1;
+        disabledValue = null;
+    }
+
+    protected long currentValue()
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    protected void assertCurrentValue(int count)
+    {
+        assertEquals(count, currentValue());
+    }
+
+    @Before
+    public void before()
+    {
+        setter.accept(guardrails(), warnThreshold, failThreshold);
+    }
+
+    @Test
+    public void testConfigValidation()
+    {
+        assertNotNull(guardrail);
+        testValidationOfThresholdProperties(guardrail.name + "_warn_threshold", guardrail.name + "_fail_threshold");
+    }
+
+    protected void testValidationOfThresholdProperties(String warnName, String failName)
+    {
+        setter.accept(guardrails(), disabledValue, disabledValue);
+
+        testValidationOfStrictlyPositiveProperty((g, a) -> setter.accept(g, disabledValue, a), failName);
+        testValidationOfStrictlyPositiveProperty((g, w) -> setter.accept(g, w, disabledValue), warnName);
+
+        setter.accept(guardrails(), disabledValue, disabledValue);
+        Assertions.assertThatThrownBy(() -> setter.accept(guardrails(), 2L, 1L))
+                  .hasMessageContaining(guardrail.name + "_warn_threshold should be lower than the fail threshold");
+    }
+
+    protected void assertMaxThresholdValid(String query) throws Throwable
+    {
+        assertValid(query);
+
+        long warnValue = warnGetter.applyAsLong(guardrails());
+        long failValue = failGetter.applyAsLong(guardrails());
+        long current = currentValue();
+
+        if (warnValue != disabledValue)
+            Assertions.assertThat(current)
+                      .isLessThanOrEqualTo(warnValue);
+
+        if (failValue != disabledValue)
+            Assertions.assertThat(current)
+                      .isLessThanOrEqualTo(failValue);
+    }
+
+    protected void assertMinThresholdValid(String query) throws Throwable
+    {
+        assertValid(query);
+
+        long warnValue = warnGetter.applyAsLong(guardrails());
+        long failValue = failGetter.applyAsLong(guardrails());
+        long current = currentValue();
+
+        if (warnValue != disabledValue)
+            Assertions.assertThat(current)
+                      .isGreaterThanOrEqualTo(warnValue);
+
+        if (failValue != disabledValue)
+            Assertions.assertThat(current)
+                      .isGreaterThanOrEqualTo(failValue);
+    }
+
+
+    protected void assertThresholdWarns(String query, String message) throws Throwable
+    {
+        assertThresholdWarns(query, message, message);
+    }
+
+    protected void assertThresholdWarns(String query, String message, String redactedMessage) throws Throwable
+    {
+        assertThresholdWarns(query, Collections.singletonList(message), Collections.singletonList(redactedMessage));
+    }
+
+    protected void assertThresholdWarns(String query, List<String> messages) throws Throwable
+    {
+        assertThresholdWarns(query, messages, messages);
+    }
+
+    protected void assertThresholdWarns(String query, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        assertWarns(query, messages, redactedMessages);
+
+        Assertions.assertThat(currentValue())
+                  .isGreaterThan(warnGetter.applyAsLong(guardrails()))
+                  .isLessThanOrEqualTo(failGetter.applyAsLong(guardrails()));
+    }
+
+    protected void assertThresholdFails(String query, String message) throws Throwable
+    {
+        assertThresholdFails(query, message, message);
+    }
+
+    protected void assertThresholdFails(String query, String message, String redactedMessage) throws Throwable
+    {
+        assertThresholdFails(query, Collections.singletonList(message), Collections.singletonList(redactedMessage));
+    }
+
+    protected void assertThresholdFails(String query, List<String> messages) throws Throwable
+    {
+        assertThresholdFails(query, messages, messages);
+    }
+
+    protected void assertThresholdFails(String query, List<String> messages, List<String> redactedMessages) throws Throwable
+    {
+        assertFails(query, messages, redactedMessages);
+
+        Assertions.assertThat(currentValue())
+                  .isGreaterThanOrEqualTo(warnGetter.applyAsLong(guardrails()))
+                  .isEqualTo(failGetter.applyAsLong(guardrails()));
+    }
+
+    protected void assertInvalidPositiveProperty(BiConsumer<Guardrails, Long> setter,
+                                               long value,
+                                               long maxValue,
+                                               String name)
+    {
+        try
+        {
+            assertValidProperty(setter, value);
+            fail(format("Expected exception for guardrails.%s value: %d", name, value));
+        }
+        catch (IllegalArgumentException e)
+        {
+            String expectedMessage = null;
+
+            if (value > maxValue)
+                expectedMessage = format("Invalid value %d for %s: maximum allowed value is %d",
+                                         value, name, maxValue);
+
+            if (value == 0 && value != disabledValue)
+                expectedMessage = format("Invalid value for %s: 0 is not allowed; if attempting to disable use %s",
+                                         name, disabledValue);
+
+            if (value < 0 && disabledValue != null && value != disabledValue)
+                expectedMessage = format("Invalid value %d for %s: negative values are not "
+                                         + "allowed, outside of %s which disables the guardrail",
+                                         value, name, disabledValue);
+
+            if (expectedMessage == null && value < 0)
+                expectedMessage = format("Invalid data storage: value must be non-negative");
+
+            assertEquals(format("Exception message '%s' does not contain '%s'", e.getMessage(), expectedMessage),
+                         expectedMessage, e.getMessage());
+        }
+    }
+
+    private void assertInvalidStrictlyPositiveProperty(BiConsumer<Guardrails, Long> setter, long value, String name)
+    {
+        assertInvalidPositiveProperty(setter, value, maxValue, name);
+    }
+
+    protected void assertInvalidPositiveIntProperty (BiConsumer<Guardrails, Integer> setter, int value,
+                                                     int maxValue,
+                                                     String name)
+    {
+        assertInvalidPositiveProperty((g, l) -> setter.accept(g, l.intValue()), (long) value, maxValue, name);
+    }
+
+    protected void testValidationOfStrictlyPositiveProperty(BiConsumer<Guardrails, Long> setter, String name)
+    {
+        assertInvalidStrictlyPositiveProperty(setter, Integer.MIN_VALUE, name);
+        assertInvalidStrictlyPositiveProperty(setter, -2, name);
+        assertValidProperty(setter, disabledValue);
+        assertInvalidStrictlyPositiveProperty(setter, disabledValue == null ? -1 : 0, name);
+        assertValidProperty(setter, 1L);
+        assertValidProperty(setter, 2L);
+        assertValidProperty(setter, maxValue);
+    }
+
+    @FunctionalInterface
+    public interface TriConsumer<T, U, V>
+    {
+        void accept(T t, U u, V v);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/HelpersTest.java b/test/unit/org/apache/cassandra/db/lifecycle/HelpersTest.java
index 7acd3e6..1b121c1 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/HelpersTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/HelpersTest.java
@@ -38,8 +38,8 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.MockSchema;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 public class HelpersTest
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/LifecycleTransactionTest.java b/test/unit/org/apache/cassandra/db/lifecycle/LifecycleTransactionTest.java
index 1e0d157..781a468 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/LifecycleTransactionTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/LifecycleTransactionTest.java
@@ -30,7 +30,6 @@
 import org.junit.Assert;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Memtable;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction.ReaderState.Action;
@@ -77,7 +76,7 @@
     public void testUpdates() // (including obsoletion)
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
-        Tracker tracker = new Tracker(null, false);
+        Tracker tracker = Tracker.newDummyTracker();
         SSTableReader[] readers = readersArray(0, 3, cfs);
         SSTableReader[] readers2 = readersArray(0, 4, cfs);
         SSTableReader[] readers3 = readersArray(0, 4, cfs);
@@ -141,7 +140,7 @@
     public void testCancellation()
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
-        Tracker tracker = new Tracker(null, false);
+        Tracker tracker = Tracker.newDummyTracker();
         List<SSTableReader> readers = readers(0, 3, cfs);
         tracker.addInitialSSTables(readers);
         LifecycleTransaction txn = tracker.tryModify(readers, OperationType.UNKNOWN);
@@ -185,7 +184,7 @@
     public void testSplit()
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
-        Tracker tracker = new Tracker(null, false);
+        Tracker tracker = Tracker.newDummyTracker();
         List<SSTableReader> readers = readers(0, 4, cfs);
         tracker.addInitialSSTables(readers);
         LifecycleTransaction txn = tracker.tryModify(readers, OperationType.UNKNOWN);
@@ -271,7 +270,7 @@
 
         private static Tracker tracker(ColumnFamilyStore cfs, List<SSTableReader> readers)
         {
-            Tracker tracker = new Tracker(new Memtable(new AtomicReference<>(CommitLogPosition.NONE), cfs), false);
+            Tracker tracker = new Tracker(cfs, cfs.createMemtable(new AtomicReference<>(CommitLogPosition.NONE)), false);
             tracker.addInitialSSTables(readers);
             return tracker;
         }
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java
index a4e74ce..2812353 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java
@@ -17,11 +17,11 @@
  */
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
-import java.io.IOError;
+
 import java.io.IOException;
-import java.io.RandomAccessFile;
+import java.io.UncheckedIOException;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -32,6 +32,7 @@
 import java.util.function.BiConsumer;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
@@ -39,21 +40,25 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.sstable.metadata.MetadataType;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileHandle;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.utils.AlwaysPresentFilter;
+import org.apache.cassandra.utils.Throwables;
 import org.apache.cassandra.utils.concurrent.AbstractTransactionalTest;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
@@ -141,9 +146,9 @@
 
             void assertInProgress() throws Exception
             {
-                assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(),
-                                                                                   sstableOld.getAllFilePaths(),
-                                                                                   txnLogs.logFilePaths())));
+                assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(),
+                                                                                sstableOld.getAllFilePaths(),
+                                                                                txnLogs.logFilePaths())));
             }
 
             void assertPrepared() throws Exception
@@ -152,12 +157,12 @@
 
             void assertAborted() throws Exception
             {
-                assertFiles(dataFolder.getPath(), new HashSet<>(sstableOld.getAllFilePaths()));
+                assertFiles(dataFolder.path(), new HashSet<>(sstableOld.getAllFilePaths()));
             }
 
             void assertCommitted() throws Exception
             {
-                assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
+                assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths()));
             }
         }
 
@@ -225,15 +230,15 @@
         Thread.sleep(1);
         LogTransaction.waitForDeletions();
 
-        assertFiles(dataFolder.getPath(), Collections.<String>emptySet());
+        assertFiles(dataFolder.path(), Collections.<String>emptySet());
     }
 
     @Test
     public void testUntrackIdenticalLogFilesOnDisk() throws Throwable
     {
         ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
-        File datadir1 = Files.createTempDirectory("datadir1").toFile();
-        File datadir2 = Files.createTempDirectory("datadir2").toFile();
+        File datadir1 = new File(Files.createTempDirectory("datadir1"));
+        File datadir2 = new File(Files.createTempDirectory("datadir2"));
         SSTableReader sstable1 = sstable(datadir1, cfs, 1, 128);
         SSTableReader sstable2 = sstable(datadir2, cfs, 1, 128);
 
@@ -285,7 +290,7 @@
         sstableOld1.selfRef().release();
         sstableOld2.selfRef().release();
 
-        assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
+        assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths()));
 
         sstableNew.selfRef().release();
     }
@@ -303,7 +308,7 @@
         log.trackNew(sstable);
         log.finish();
 
-        assertFiles(dataFolder.getPath(), new HashSet<>(sstable.getAllFilePaths()));
+        assertFiles(dataFolder.path(), new HashSet<>(sstable.getAllFilePaths()));
 
         sstable.selfRef().release();
     }
@@ -325,7 +330,7 @@
         sstable.markObsolete(tidier);
         sstable.selfRef().release();
 
-        assertFiles(dataFolder.getPath(), new HashSet<>());
+        assertFiles(dataFolder.path(), new HashSet<>());
     }
 
     @Test
@@ -361,8 +366,8 @@
         Arrays.stream(sstables).forEach(s -> s.selfRef().release());
         LogTransaction.waitForDeletions();
 
-        assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
-        assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
+        assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths()));
+        assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths()));
     }
 
     @Test
@@ -380,7 +385,7 @@
 
         sstable.selfRef().release();
 
-        assertFiles(dataFolder.getPath(), new HashSet<>());
+        assertFiles(dataFolder.path(), new HashSet<>());
     }
 
     @Test
@@ -401,7 +406,7 @@
 
         sstable.selfRef().release();
 
-        assertFiles(dataFolder.getPath(), new HashSet<>(sstable.getAllFilePaths()));
+        assertFiles(dataFolder.path(), new HashSet<>(sstable.getAllFilePaths()));
     }
 
     @Test
@@ -435,8 +440,8 @@
         Arrays.stream(sstables).forEach(s -> s.selfRef().release());
         LogTransaction.waitForDeletions();
 
-        assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths()));
-        assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths()));
+        assertFiles(dataFolder1.path(), new HashSet<>(sstables[0].getAllFilePaths()));
+        assertFiles(dataFolder2.path(), new HashSet<>(sstables[2].getAllFilePaths()));
     }
 
 
@@ -470,7 +475,7 @@
         Map<Descriptor, Set<Component>> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list();
         assertEquals(1, sstables.size());
 
-        assertFiles(dataFolder.getPath(), new HashSet<>(sstableOld.getAllFilePaths()));
+        assertFiles(dataFolder.path(), new HashSet<>(sstableOld.getAllFilePaths()));
 
         // complete the transaction before releasing files
         tidier.run();
@@ -510,7 +515,7 @@
         Map<Descriptor, Set<Component>> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list();
         assertEquals(1, sstables.size());
 
-        assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
+        assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths()));
 
         // complete the transaction to avoid LEAK errors
         tidier.run();
@@ -560,8 +565,8 @@
         assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
 
         // new tables should be only table left
-        assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
-        assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
+        assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths()));
+        assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths()));
 
         // complete the transaction to avoid LEAK errors
         Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
@@ -611,8 +616,8 @@
         assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
 
         // old tables should be only table left
-        assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths()));
-        assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths()));
+        assertFiles(dataFolder1.path(), new HashSet<>(sstables[0].getAllFilePaths()));
+        assertFiles(dataFolder2.path(), new HashSet<>(sstables[2].getAllFilePaths()));
 
         // complete the transaction to avoid LEAK errors
         Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
@@ -780,18 +785,18 @@
         if (shouldCommit)
         {
             // only new sstables should still be there
-            assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
-            assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
+            assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths()));
+            assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths()));
         }
         else
         {
             // all files should still be there
-            assertFiles(dataFolder1.getPath(), Sets.newHashSet(Iterables.concat(sstables[0].getAllFilePaths(),
-                                                                                sstables[1].getAllFilePaths(),
-                                                                                Collections.singleton(log.logFilePaths().get(0)))));
-            assertFiles(dataFolder2.getPath(), Sets.newHashSet(Iterables.concat(sstables[2].getAllFilePaths(),
-                                                                                sstables[3].getAllFilePaths(),
-                                                                                Collections.singleton(log.logFilePaths().get(1)))));
+            assertFiles(dataFolder1.path(), Sets.newHashSet(Iterables.concat(sstables[0].getAllFilePaths(),
+                                                                             sstables[1].getAllFilePaths(),
+                                                                             Collections.singleton(log.logFilePaths().get(0)))));
+            assertFiles(dataFolder2.path(), Sets.newHashSet(Iterables.concat(sstables[2].getAllFilePaths(),
+                                                                             sstables[3].getAllFilePaths(),
+                                                                             Collections.singleton(log.logFilePaths().get(1)))));
         }
 
 
@@ -816,7 +821,7 @@
         {
             Directories directories = new Directories(cfs.metadata());
 
-            File[] beforeSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory());
+            File[] beforeSecondSSTable = dataFolder.tryList(pathname -> !pathname.isDirectory());
 
             SSTableReader sstable2 = sstable(dataFolder, cfs, 1, 128);
             log.trackNew(sstable2);
@@ -825,7 +830,7 @@
             assertEquals(2, sstables.size());
 
             // this should contain sstable1, sstable2 and the transaction log file
-            File[] afterSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory());
+            File[] afterSecondSSTable = dataFolder.tryList(pathname -> !pathname.isDirectory());
 
             int numNewFiles = afterSecondSSTable.length - beforeSecondSSTable.length;
             assertEquals(numNewFiles - 1, sstable2.getAllFilePaths().size()); // new files except for transaction log file
@@ -1070,7 +1075,7 @@
             LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
 
             // make sure to exclude the old files that were deleted by the modifier
-            assertFiles(dataFolder.getPath(), oldFiles);
+            assertFiles(dataFolder.path(), oldFiles);
         }
         else
         { // if an intermediate line was also modified, it should ignore the tx log file
@@ -1078,9 +1083,9 @@
             //This should not remove any files
             LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
 
-            assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(newFiles,
-                                                                               oldFiles,
-                                                                               log.logFilePaths())));
+            assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(newFiles,
+                                                                            oldFiles,
+                                                                            log.logFilePaths())));
         }
 
         // make sure to run the tidier to avoid any leaks in the logs
@@ -1096,7 +1101,7 @@
                                       for (String filePath : sstable.getAllFilePaths())
                                       {
                                           if (filePath.endsWith("Data.db"))
-                                              assertTrue(new File(filePath).setLastModified(System.currentTimeMillis() + 60000)); //one minute later
+                                              assertTrue(new File(filePath).trySetLastModified(System.currentTimeMillis() + 60000)); //one minute later
                                       }
                                   });
     }
@@ -1124,7 +1129,7 @@
         //This should not remove the old files
         LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
 
-        assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(
+        assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(
                                                                           sstableNew.getAllFilePaths(),
                                                                           sstableOld.getAllFilePaths(),
                                                                           log.logFilePaths())));
@@ -1135,9 +1140,9 @@
         // complete the transaction to avoid LEAK errors
         assertNull(log.complete(null));
 
-        assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(),
-                                                                           sstableOld.getAllFilePaths(),
-                                                                           log.logFilePaths())));
+        assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(),
+                                                                        sstableOld.getAllFilePaths(),
+                                                                        log.logFilePaths())));
 
         // make sure to run the tidier to avoid any leaks in the logs
         tidier.run();
@@ -1156,7 +1161,7 @@
                                       {
                                           File f = new File(filePath);
                                           long lastModified = f.lastModified();
-                                          f.setLastModified(lastModified - (lastModified % 1000));
+                                          f.trySetLastModified(lastModified - (lastModified % 1000));
                                       }
                                   });
     }
@@ -1184,13 +1189,13 @@
         LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
 
         // only the new files should be there
-        assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths()));
+        assertFiles(dataFolder.path(), Sets.newHashSet(sstableNew.getAllFilePaths()));
         sstableNew.selfRef().release();
 
         // complete the transaction to avoid LEAK errors
         assertNull(log.complete(null));
 
-        assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths()));
+        assertFiles(dataFolder.path(), Sets.newHashSet(sstableNew.getAllFilePaths()));
 
         // make sure to run the tidier to avoid any leaks in the logs
         tidier.run();
@@ -1256,17 +1261,15 @@
 
     private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException
     {
-        Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
+        Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), new SequenceBasedSSTableId(generation), SSTableFormat.Type.BIG);
         Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
         for (Component component : components)
         {
             File file = new File(descriptor.filenameFor(component));
             if (!file.exists())
-                assertTrue(file.createNewFile());
-            try (RandomAccessFile raf = new RandomAccessFile(file, "rw"))
-            {
-                raf.setLength(size);
-            }
+                assertTrue(file.createFileIfNotExists());
+
+            Util.setFileLength(file, size);
         }
 
         FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
@@ -1300,8 +1303,8 @@
     {
         LogTransaction.waitForDeletions();
 
-        File dir = new File(dirPath).getCanonicalFile();
-        File[] files = dir.listFiles();
+        File dir = new File(dirPath).toCanonical();
+        File[] files = dir.tryList();
         if (files != null)
         {
             for (File file : files)
@@ -1309,7 +1312,7 @@
                 if (file.isDirectory())
                     continue;
 
-                String filePath = file.getPath();
+                String filePath = file.path();
                 assertTrue(String.format("%s not in [%s]", filePath, expectedFiles), expectedFiles.contains(filePath));
                 expectedFiles.remove(filePath);
             }
@@ -1329,8 +1332,7 @@
     }
 
     // Check either that a temporary file is expected to exist (in the existingFiles) or that
-    // it does not exist any longer (on Windows we need to check File.exists() because a list
-    // might return a file as existing even if it does not)
+    // it does not exist any longer.
     private static void assertFiles(Iterable<String> existingFiles, Set<File> temporaryFiles)
     {
         for (String filePath : existingFiles)
@@ -1359,6 +1361,28 @@
         return listFiles(folder, Directories.FileType.FINAL);
     }
 
+    // Used by listFiles - this test is deliberately racing with files being
+    // removed and cleaned up, so it is possible that files are removed between listing and getting their
+    // canonical names, in which case they can be dropped from the stream.
+    private static Stream<File> toCanonicalIgnoringNotFound(File file)
+    {
+        try
+        {
+            return Stream.of(file.toCanonical());
+        }
+        catch (UncheckedIOException io)
+        {
+            if (Throwables.isCausedBy(io, t -> t instanceof NoSuchFileException))
+            {
+                return Stream.empty();
+            }
+            else
+            {
+                throw io;
+            }
+        }
+    }
+
     static Set<File> listFiles(File folder, Directories.FileType... types)
     {
         Collection<Directories.FileType> match = Arrays.asList(types);
@@ -1366,16 +1390,7 @@
                                       (file, type) -> match.contains(type),
                                       Directories.OnTxnErr.IGNORE).list()
                        .stream()
-                       .map(f -> {
-                           try
-                           {
-                               return f.getCanonicalFile();
-                           }
-                           catch (IOException e)
-                           {
-                               throw new IOError(e);
-                           }
-                       })
+                       .flatMap(LogTransactionTest::toCanonicalIgnoringNotFound)
                        .collect(Collectors.toSet());
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
index b7b7d4a..0420957 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
@@ -18,13 +18,13 @@
 
 package org.apache.cassandra.db.lifecycle;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -47,6 +47,7 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -84,8 +85,8 @@
         LogTransaction.waitForDeletions();
 
         // both sstables are in the same folder
-        assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(newSSTable.getAllFilePaths()));
-        assertFiles(newSSTable.descriptor.directory.getPath(), new HashSet<>(newSSTable.getAllFilePaths()));
+        assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(newSSTable.getAllFilePaths()));
+        assertFiles(newSSTable.descriptor.directory.path(), new HashSet<>(newSSTable.getAllFilePaths()));
     }
 
     @Test
@@ -100,7 +101,7 @@
         replaceSSTable(cfs, txn, true);
         LogTransaction.waitForDeletions();
 
-        assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(oldSSTable.getAllFilePaths()));
+        assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(oldSSTable.getAllFilePaths()));
     }
 
     @Test
@@ -111,7 +112,7 @@
 
         SSTableReader ssTableReader = getSSTable(cfs, 100);
 
-        String dataFolder = cfs.getLiveSSTables().iterator().next().descriptor.directory.getPath();
+        String dataFolder = cfs.getLiveSSTables().iterator().next().descriptor.directory.path();
         assertFiles(dataFolder, new HashSet<>(ssTableReader.getAllFilePaths()));
     }
 
@@ -155,7 +156,7 @@
                  CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())
             )
             {
-                long lastCheckObsoletion = System.nanoTime();
+                long lastCheckObsoletion = nanoTime();
                 File directory = txn.originals().iterator().next().descriptor.directory;
                 Descriptor desc = cfs.newSSTableDescriptor(directory);
                 TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
@@ -173,10 +174,10 @@
                 {
                     rewriter.append(ci.next());
 
-                    if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
+                    if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
                     {
                         controller.maybeRefreshOverlaps();
-                        lastCheckObsoletion = System.nanoTime();
+                        lastCheckObsoletion = nanoTime();
                     }
                 }
 
@@ -201,12 +202,12 @@
     private void assertFiles(String dirPath, Set<String> expectedFiles)
     {
         File dir = new File(dirPath);
-        for (File file : dir.listFiles())
+        for (File file : dir.tryList())
         {
             if (file.isDirectory())
                 continue;
 
-            String filePath = file.getPath();
+            String filePath = file.path();
             assertTrue(filePath, expectedFiles.contains(filePath));
             expectedFiles.remove(filePath);
         }
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java b/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
index 4390b20..e39f71f 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
@@ -24,23 +24,22 @@
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
-
 import javax.annotation.Nullable;
 
 import com.google.common.base.Function;
 import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.junit.Assert;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Memtable;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
 import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.notifications.*;
 import org.apache.cassandra.schema.CachingParams;
@@ -85,7 +84,7 @@
     public void testTryModify()
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
-        Tracker tracker = new Tracker(null, false);
+        Tracker tracker = Tracker.newDummyTracker();
         List<SSTableReader> readers = ImmutableList.of(MockSchema.sstable(0, true, cfs), MockSchema.sstable(1, cfs), MockSchema.sstable(2, cfs));
         tracker.addInitialSSTables(copyOf(readers));
         Assert.assertNull(tracker.tryModify(ImmutableList.of(MockSchema.sstable(0, cfs)), OperationType.COMPACTION));
@@ -108,7 +107,7 @@
     public void testApply()
     {
         final ColumnFamilyStore cfs = MockSchema.newCFS();
-        final Tracker tracker = new Tracker(null, false);
+        final Tracker tracker = Tracker.newDummyTracker();
         final View resultView = ViewTest.fakeView(0, 0, cfs);
         final AtomicInteger count = new AtomicInteger();
         tracker.apply(new Predicate<View>()
@@ -277,15 +276,15 @@
         Tracker tracker = cfs.getTracker();
         tracker.subscribe(listener);
 
-        Memtable prev1 = tracker.switchMemtable(true, new Memtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition()), cfs));
+        Memtable prev1 = tracker.switchMemtable(true, cfs.createMemtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition())));
         OpOrder.Group write1 = cfs.keyspace.writeOrder.getCurrent();
         OpOrder.Barrier barrier1 = cfs.keyspace.writeOrder.newBarrier();
-        prev1.setDiscarding(barrier1, new AtomicReference<>(CommitLog.instance.getCurrentPosition()));
+        prev1.switchOut(barrier1, new AtomicReference<>(CommitLog.instance.getCurrentPosition()));
         barrier1.issue();
-        Memtable prev2 = tracker.switchMemtable(false, new Memtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition()), cfs));
+        Memtable prev2 = tracker.switchMemtable(false, cfs.createMemtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition())));
         OpOrder.Group write2 = cfs.keyspace.writeOrder.getCurrent();
         OpOrder.Barrier barrier2 = cfs.keyspace.writeOrder.newBarrier();
-        prev2.setDiscarding(barrier2, new AtomicReference<>(CommitLog.instance.getCurrentPosition()));
+        prev2.switchOut(barrier2, new AtomicReference<>(CommitLog.instance.getCurrentPosition()));
         barrier2.issue();
         Memtable cur = tracker.getView().getCurrentMemtable();
         OpOrder.Group writecur = cfs.keyspace.writeOrder.getCurrent();
@@ -325,7 +324,7 @@
         tracker = cfs.getTracker();
         listener = new MockListener(false);
         tracker.subscribe(listener);
-        prev1 = tracker.switchMemtable(false, new Memtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition()), cfs));
+        prev1 = tracker.switchMemtable(false, cfs.createMemtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition())));
         tracker.markFlushing(prev1);
         reader = MockSchema.sstable(0, 10, true, cfs);
         cfs.invalidate(false);
@@ -348,7 +347,7 @@
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
         SSTableReader r1 = MockSchema.sstable(0, cfs), r2 = MockSchema.sstable(1, cfs);
-        Tracker tracker = new Tracker(null, false);
+        Tracker tracker = Tracker.newDummyTracker();
         MockListener listener = new MockListener(false);
         tracker.subscribe(listener);
         tracker.notifyAdded(singleton(r1), false);
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java b/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
index fd32087..eb162d5 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
@@ -34,9 +34,9 @@
 import org.junit.Assert;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Memtable;
 import org.apache.cassandra.db.PartitionPosition;
 import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.MockSchema;
diff --git a/test/unit/org/apache/cassandra/db/marshal/CollectionTypesTest.java b/test/unit/org/apache/cassandra/db/marshal/CollectionTypesTest.java
index 971af76..889364b 100644
--- a/test/unit/org/apache/cassandra/db/marshal/CollectionTypesTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/CollectionTypesTest.java
@@ -31,7 +31,6 @@
 import org.junit.Test;
 
 import org.apache.cassandra.cql3.CQL3Type;
-import org.apache.cassandra.transport.ProtocolVersion;
 
 import static org.apache.cassandra.db.marshal.ValueAccessors.ACCESSORS;
 
diff --git a/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
index eb838d3..ecdf0d7 100644
--- a/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
@@ -26,6 +26,8 @@
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.assertEquals;
 
@@ -40,6 +42,7 @@
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.serializers.MarshalException;
+import org.apache.cassandra.serializers.UUIDSerializer;
 import org.apache.cassandra.utils.*;
 
 public class CompositeTypeTest
@@ -57,11 +60,11 @@
     }
 
     private static final int UUID_COUNT = 3;
-    private static final UUID[] uuids = new UUID[UUID_COUNT];
+    private static final TimeUUID[] uuids = new TimeUUID[UUID_COUNT];
     static
     {
         for (int i = 0; i < UUID_COUNT; ++i)
-            uuids[i] = UUIDGen.getTimeUUID();
+            uuids[i] = nextTimeUUID();
     }
 
     @BeforeClass
@@ -138,7 +141,7 @@
         ByteBuffer key = createCompositeKey("test1", uuids[1], 42, false);
         comparator.validate(key);
 
-        key = createCompositeKey("test1", null, -1, false);
+        key = createCompositeKey("test1", (ByteBuffer) null, -1, false);
         comparator.validate(key);
 
         key = createCompositeKey("test1", uuids[2], -1, true);
@@ -168,7 +171,7 @@
             assert e.toString().contains("should be 16 or 0 bytes");
         }
 
-        key = createCompositeKey("test1", UUID.randomUUID(), 42, false);
+        key = createCompositeKey("test1", UUIDSerializer.instance.serialize(UUID.randomUUID()), 42, false);
         try
         {
             comparator.validate(key);
@@ -186,7 +189,7 @@
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARDCOMPOSITE);
 
-        ByteBuffer cname1 = createCompositeKey("test1", null, -1, false);
+        ByteBuffer cname1 = createCompositeKey("test1", (ByteBuffer) null, -1, false);
         ByteBuffer cname2 = createCompositeKey("test1", uuids[0], 24, false);
         ByteBuffer cname3 = createCompositeKey("test1", uuids[0], 42, false);
         ByteBuffer cname4 = createCompositeKey("test2", uuids[0], -1, false);
@@ -281,7 +284,12 @@
         }
     }
 
-    private ByteBuffer createCompositeKey(String s, UUID uuid, int i, boolean lastIsOne)
+    private ByteBuffer createCompositeKey(String s, TimeUUID uuid, int i, boolean lastIsOne)
+    {
+        return createCompositeKey(s, uuid == null ? null : uuid.toBytes(), i, lastIsOne);
+    }
+
+    private ByteBuffer createCompositeKey(String s, ByteBuffer uuid, int i, boolean lastIsOne)
     {
         ByteBuffer bytes = ByteBufferUtil.bytes(s);
         int totalSize = 0;
@@ -308,7 +316,7 @@
             if (uuid != null)
             {
                 bb.putShort((short) 16);
-                bb.put(UUIDGen.decompose(uuid));
+                bb.put(uuid);
                 bb.put(i == -1 && lastIsOne ? (byte)1 : (byte)0);
                 if (i != -1)
                 {
diff --git a/test/unit/org/apache/cassandra/db/marshal/DecimalTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/DecimalTypeTest.java
index 108f48f..b1f4a43 100644
--- a/test/unit/org/apache/cassandra/db/marshal/DecimalTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/DecimalTypeTest.java
@@ -24,7 +24,6 @@
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 
-import org.apache.cassandra.db.marshal.DecimalType;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.junit.Test;
 
diff --git a/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
index 9f8eec3..d22a8ac 100644
--- a/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
@@ -27,6 +27,8 @@
 
 import org.junit.BeforeClass;
 import org.junit.Test;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.fail;
 
 import org.apache.cassandra.SchemaLoader;
@@ -62,7 +64,7 @@
     static
     {
         for (int i = 0; i < UUID_COUNT; ++i)
-            uuids[i] = UUIDGen.getTimeUUID();
+            uuids[i] = nextTimeUUID().asUUID();
     }
 
     @BeforeClass
diff --git a/test/unit/org/apache/cassandra/db/marshal/JsonConversionTest.java b/test/unit/org/apache/cassandra/db/marshal/JsonConversionTest.java
index 847ebef..bab0611 100644
--- a/test/unit/org/apache/cassandra/db/marshal/JsonConversionTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/JsonConversionTest.java
@@ -18,6 +18,7 @@
 */
 package org.apache.cassandra.db.marshal;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 
 import java.nio.ByteBuffer;
@@ -25,7 +26,7 @@
 import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.utils.UUIDGen;
+
 import org.junit.Test;
 
 public class JsonConversionTest
@@ -282,7 +283,7 @@
     public void testTimeUUID() throws Exception
     {
         String type = "TimeUUIDType";
-        String json = "\"" + UUIDGen.getTimeUUID() + "\"";
+        String json = "\"" + nextTimeUUID() + "\"";
         assertBytebufferPositionAndOutput(json, type);
     }
 
diff --git a/test/unit/org/apache/cassandra/db/marshal/RoundTripTest.java b/test/unit/org/apache/cassandra/db/marshal/RoundTripTest.java
index 9f46850..9a4711a 100644
--- a/test/unit/org/apache/cassandra/db/marshal/RoundTripTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/RoundTripTest.java
@@ -22,6 +22,7 @@
 
 import org.apache.cassandra.serializers.*;
 import org.apache.cassandra.utils.Hex;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.UUIDGen;
 import org.junit.Test;
 
@@ -30,6 +31,8 @@
 import java.nio.charset.StandardCharsets;
 import java.util.UUID;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class RoundTripTest
 {
     @Test
@@ -88,7 +91,7 @@
     @Test
     public void testLexicalUUID()
     {
-        UUID uuid = UUIDGen.getTimeUUID();
+        UUID uuid = nextTimeUUID().asUUID();
         assert LexicalUUIDType.instance.fromString(LexicalUUIDType.instance.getString(ByteBuffer.wrap(UUIDGen.decompose(uuid))))
                 .equals(ByteBuffer.wrap(UUIDGen.decompose(uuid)));
         assert LexicalUUIDType.instance.compose(ByteBuffer.wrap(UUIDGen.decompose(uuid))).equals(uuid);
@@ -98,15 +101,15 @@
     @Test
     public void testTimeUUID()
     {
-        UUID uuid = UUIDGen.getTimeUUID();
+        TimeUUID uuid = nextTimeUUID();
         assert TimeUUIDType.instance.getString(TimeUUIDType.instance.fromString(uuid.toString()))
                 .equals(uuid.toString());
-        assert TimeUUIDType.instance.fromString(TimeUUIDType.instance.getString(ByteBuffer.wrap(UUIDGen.decompose(uuid))))
-                .equals(ByteBuffer.wrap(UUIDGen.decompose(uuid)));
-        assert TimeUUIDType.instance.compose(ByteBuffer.wrap(UUIDGen.decompose(uuid))).equals(uuid);
+        assert TimeUUIDType.instance.fromString(TimeUUIDType.instance.getString(uuid.toBytes()))
+                .equals(uuid.toBytes());
+        assert TimeUUIDType.instance.compose(uuid.toBytes()).equals(uuid);
 
         assert uuid.equals(TimeUUIDType.instance.compose(TimeUUIDType.instance.fromString(uuid.toString())));
-        assert UUIDSerializer.instance.toString(uuid).equals(uuid.toString());
+        assert uuid.toString().equals(uuid.toString());
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/db/marshal/TimeUUIDTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/TimeUUIDTypeTest.java
index 9a71e06..8e2dc31 100644
--- a/test/unit/org/apache/cassandra/db/marshal/TimeUUIDTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/TimeUUIDTypeTest.java
@@ -24,8 +24,12 @@
 import org.junit.Assert;
 import org.apache.cassandra.serializers.MarshalException;
 import org.junit.Test;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 
+import org.apache.cassandra.serializers.UUIDSerializer;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.UUIDGen;
 
 public class TimeUUIDTypeTest
@@ -35,44 +39,44 @@
     @Test
     public void testEquality()
     {
-        UUID a = UUIDGen.getTimeUUID();
-        UUID b = new UUID(a.getMostSignificantBits(), a.getLeastSignificantBits());
+        TimeUUID a = nextTimeUUID();
+        TimeUUID b = TimeUUID.fromBytes(a.msb(), a.lsb());
 
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(a)));
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(b)));
-        assertEquals(0, timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(a)), ByteBuffer.wrap(UUIDGen.decompose(b))));
+        timeUUIDType.validate(a.toBytes());
+        timeUUIDType.validate(b.toBytes());
+        assertEquals(0, timeUUIDType.compare(a.toBytes(), b.toBytes()));
     }
 
     @Test
     public void testSmaller()
     {
-        UUID a = UUIDGen.getTimeUUID();
-        UUID b = UUIDGen.getTimeUUID();
-        UUID c = UUIDGen.getTimeUUID();
+        TimeUUID a = nextTimeUUID();
+        TimeUUID b = nextTimeUUID();
+        TimeUUID c = nextTimeUUID();
 
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(a)));
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(b)));
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(c)));
+        timeUUIDType.validate(a.toBytes());
+        timeUUIDType.validate(b.toBytes());
+        timeUUIDType.validate(c.toBytes());
 
-        assert timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(a)), ByteBuffer.wrap(UUIDGen.decompose(b))) < 0;
-        assert timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(b)), ByteBuffer.wrap(UUIDGen.decompose(c))) < 0;
-        assert timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(a)), ByteBuffer.wrap(UUIDGen.decompose(c))) < 0;
+        assert timeUUIDType.compare(a.toBytes(), b.toBytes()) < 0;
+        assert timeUUIDType.compare(b.toBytes(), c.toBytes()) < 0;
+        assert timeUUIDType.compare(a.toBytes(), c.toBytes()) < 0;
     }
 
     @Test
     public void testBigger()
     {
-        UUID a = UUIDGen.getTimeUUID();
-        UUID b = UUIDGen.getTimeUUID();
-        UUID c = UUIDGen.getTimeUUID();
+        TimeUUID a = nextTimeUUID();
+        TimeUUID b = nextTimeUUID();
+        TimeUUID c = nextTimeUUID();
 
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(a)));
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(b)));
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(c)));
+        timeUUIDType.validate(a.toBytes());
+        timeUUIDType.validate(b.toBytes());
+        timeUUIDType.validate(c.toBytes());
 
-        assert timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(c)), ByteBuffer.wrap(UUIDGen.decompose(b))) > 0;
-        assert timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(b)), ByteBuffer.wrap(UUIDGen.decompose(a))) > 0;
-        assert timeUUIDType.compare(ByteBuffer.wrap(UUIDGen.decompose(c)), ByteBuffer.wrap(UUIDGen.decompose(a))) > 0;
+        assert timeUUIDType.compare(c.toBytes(), b.toBytes()) > 0;
+        assert timeUUIDType.compare(b.toBytes(), a.toBytes()) > 0;
+        assert timeUUIDType.compare(c.toBytes(), a.toBytes()) > 0;
     }
 
     @Test
@@ -153,7 +157,7 @@
     {
         UUID uuid1 = UUID.fromString("00000000-0000-1000-0000-000000000000");
         assert uuid1.version() == 1;
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(uuid1)));
+        timeUUIDType.validate(UUIDSerializer.instance.serialize(uuid1));
     }
 
     @Test(expected = MarshalException.class)
@@ -161,7 +165,7 @@
     {
         UUID uuid2 = UUID.fromString("00000000-0000-2100-0000-000000000000");
         assert uuid2.version() == 2;
-        timeUUIDType.validate(ByteBuffer.wrap(UUIDGen.decompose(uuid2)));
+        timeUUIDType.validate(UUIDSerializer.instance.serialize(uuid2));
     }
 
 
diff --git a/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java
index b34207f..3ec63c0 100644
--- a/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java
@@ -42,4 +42,4 @@
                       .isEqualTo(buffer);
         });
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java b/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
index 7c0c863..4d25a1f 100644
--- a/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
@@ -40,6 +40,7 @@
 import static org.apache.cassandra.utils.AbstractTypeGenerators.getTypeSupport;
 import static org.apache.cassandra.utils.AbstractTypeGenerators.primitiveTypeGen;
 import static org.apache.cassandra.utils.AbstractTypeGenerators.userTypeGen;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.quicktheories.QuickTheory.qt;
 
@@ -61,7 +62,7 @@
     @Test
     public void testValidTimeUUID()
     {
-        TimeUUIDType.instance.validate(ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes()));
+        TimeUUIDType.instance.validate(ByteBuffer.wrap(nextTimeUUIDAsBytes()));
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/db/marshal/UUIDTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/UUIDTypeTest.java
index e82f3f5..87cbbe9 100644
--- a/test/unit/org/apache/cassandra/db/marshal/UUIDTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/UUIDTypeTest.java
@@ -21,6 +21,7 @@
  *
  */
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 
 import java.nio.ByteBuffer;
@@ -55,8 +56,8 @@
     public void testRandomCompare()
     {
 
-        UUID t1 = UUIDGen.getTimeUUID();
-        UUID t2 = UUIDGen.getTimeUUID();
+        UUID t1 = nextTimeUUID().asUUID();
+        UUID t2 = nextTimeUUID().asUUID();
 
         testCompare(null, t2, -1);
         testCompare(t1, null, 1);
@@ -179,7 +180,7 @@
     @Test
     public void testTimeEquality()
     {
-        UUID a = UUIDGen.getTimeUUID();
+        UUID a = nextTimeUUID().asUUID();
         UUID b = new UUID(a.getMostSignificantBits(),
                 a.getLeastSignificantBits());
 
@@ -189,9 +190,9 @@
     @Test
     public void testTimeSmaller()
     {
-        UUID a = UUIDGen.getTimeUUID();
-        UUID b = UUIDGen.getTimeUUID();
-        UUID c = UUIDGen.getTimeUUID();
+        UUID a = nextTimeUUID().asUUID();
+        UUID b = nextTimeUUID().asUUID();
+        UUID c = nextTimeUUID().asUUID();
 
         assert uuidType.compare(bytebuffer(a), bytebuffer(b)) < 0;
         assert uuidType.compare(bytebuffer(b), bytebuffer(c)) < 0;
@@ -201,9 +202,9 @@
     @Test
     public void testTimeBigger()
     {
-        UUID a = UUIDGen.getTimeUUID();
-        UUID b = UUIDGen.getTimeUUID();
-        UUID c = UUIDGen.getTimeUUID();
+        UUID a = nextTimeUUID().asUUID();
+        UUID b = nextTimeUUID().asUUID();
+        UUID c = nextTimeUUID().asUUID();
 
         assert uuidType.compare(bytebuffer(c), bytebuffer(b)) > 0;
         assert uuidType.compare(bytebuffer(b), bytebuffer(a)) > 0;
diff --git a/test/unit/org/apache/cassandra/db/memtable/TestMemtable.java b/test/unit/org/apache/cassandra/db/memtable/TestMemtable.java
new file mode 100644
index 0000000..ec10c1a
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/memtable/TestMemtable.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.Map;
+
+public class TestMemtable
+{
+    public static Memtable.Factory factory(Map<String, String> options)
+    {
+        String skiplist = options.remove("skiplist");
+        if (Boolean.parseBoolean(skiplist))
+            return SkipListMemtable.FACTORY;
+        else
+            return FACTORY;
+    }
+
+    public static Memtable.Factory FACTORY = SkipListMemtable::new;
+}
diff --git a/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java b/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java
index dc8c317..9c1e8b1 100644
--- a/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java
+++ b/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java
@@ -22,7 +22,6 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -32,11 +31,12 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.utils.ApproximateTime;
-
+import static java.lang.Thread.currentThread;
+import static java.util.UUID.randomUUID;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -98,8 +98,8 @@
         long timeout = operations.stream().map(Monitorable::timeoutNanos).reduce(0L, Long::max);
         Thread.sleep(NANOSECONDS.toMillis(timeout * 2 + approxTime.error()));
 
-        long start = System.nanoTime();
-        while(System.nanoTime() - start <= MAX_SPIN_TIME_NANOS)
+        long start = nanoTime();
+        while(nanoTime() - start <= MAX_SPIN_TIME_NANOS)
         {
             long numInProgress = operations.stream().filter(Monitorable::isInProgress).count();
             if (numInProgress == 0)
@@ -117,8 +117,8 @@
         long timeout = operations.stream().map(Monitorable::slowTimeoutNanos).reduce(0L, Long::max);
         Thread.sleep(NANOSECONDS.toMillis(timeout * 2 + approxTime.error()));
 
-        long start = System.nanoTime();
-        while(System.nanoTime() - start <= MAX_SPIN_TIME_NANOS)
+        long start = nanoTime();
+        while(nanoTime() - start <= MAX_SPIN_TIME_NANOS)
         {
             long numSlow = operations.stream().filter(Monitorable::isSlow).count();
             if (numSlow == operations.size())
@@ -129,7 +129,7 @@
     @Test
     public void testAbort() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test abort", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test abort", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.isAborted());
@@ -140,7 +140,7 @@
     @Test
     public void testAbortIdemPotent() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test abort", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test abort", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.abort());
@@ -153,7 +153,7 @@
     @Test
     public void testAbortCrossNode() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test for cross node", System.nanoTime(), true, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test for cross node", nanoTime(), true, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.isAborted());
@@ -164,7 +164,7 @@
     @Test
     public void testComplete() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test complete", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test complete", nanoTime(), false, timeout, slowTimeout);
         operation.complete();
         waitForOperationsToComplete(operation);
 
@@ -176,7 +176,7 @@
     @Test
     public void testCompleteIdemPotent() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test complete", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test complete", nanoTime(), false, timeout, slowTimeout);
         operation.complete();
         waitForOperationsToComplete(operation);
 
@@ -190,7 +190,7 @@
     @Test
     public void testReportSlow() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test report slow", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test report slow", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToBeReportedAsSlow(operation);
 
         assertTrue(operation.isSlow());
@@ -204,7 +204,7 @@
     public void testNoReportSlowIfZeroSlowTimeout() throws InterruptedException
     {
         // when the slow timeout is set to zero then operation won't be reported as slow
-        Monitorable operation = new TestMonitor("Test report slow disabled", System.nanoTime(), false, timeout, 0);
+        Monitorable operation = new TestMonitor("Test report slow disabled", nanoTime(), false, timeout, 0);
         waitForOperationsToBeReportedAsSlow(operation);
 
         assertTrue(operation.isSlow());
@@ -217,7 +217,7 @@
     @Test
     public void testReport() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test report", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test report", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.isSlow());
@@ -238,13 +238,13 @@
         MonitoringTask.instance = MonitoringTask.make(10, -1);
         try
         {
-            Monitorable operation1 = new TestMonitor("Test report 1", System.nanoTime(), false, timeout, slowTimeout);
+            Monitorable operation1 = new TestMonitor("Test report 1", nanoTime(), false, timeout, slowTimeout);
             waitForOperationsToComplete(operation1);
 
             assertTrue(operation1.isAborted());
             assertFalse(operation1.isCompleted());
 
-            Monitorable operation2 = new TestMonitor("Test report 2", System.nanoTime(), false, timeout, slowTimeout);
+            Monitorable operation2 = new TestMonitor("Test report 2", nanoTime(), false, timeout, slowTimeout);
             waitForOperationsToBeReportedAsSlow(operation2);
 
             operation2.complete();
@@ -271,7 +271,7 @@
         for (int i = 0; i < opCount; i++)
         {
             executorService.submit(() ->
-                operations.add(new TestMonitor(UUID.randomUUID().toString(), System.nanoTime(), false, timeout, slowTimeout))
+                operations.add(new TestMonitor(randomUUID().toString(), nanoTime(), false, timeout, slowTimeout))
             );
         }
 
@@ -316,14 +316,14 @@
                         for (int j = 0; j < numTimes; j++)
                         {
                             Monitorable operation1 = new TestMonitor(operationName,
-                                                                     System.nanoTime(),
+                                                                     nanoTime(),
                                                                      false,
                                                                      timeout,
                                                                      slowTimeout);
                             waitForOperationsToComplete(operation1);
 
                             Monitorable operation2 = new TestMonitor(operationName,
-                                                                     System.nanoTime(),
+                                                                     nanoTime(),
                                                                      false,
                                                                      timeout,
                                                                      slowTimeout);
@@ -371,7 +371,7 @@
                 try
                 {
                     Monitorable operation = new TestMonitor("Test testMultipleThreadsSameName failed",
-                                                            System.nanoTime(),
+                                                            nanoTime(),
                                                             false,
                                                             timeout,
                                                             slowTimeout);
@@ -405,7 +405,7 @@
                 try
                 {
                     Monitorable operation = new TestMonitor("Test testMultipleThreadsSameName slow",
-                                                            System.nanoTime(),
+                                                            nanoTime(),
                                                             false,
                                                             timeout,
                                                             slowTimeout);
@@ -440,8 +440,8 @@
             executorService.submit(() -> {
                 try
                 {
-                    Monitorable operation = new TestMonitor("Test thread " + Thread.currentThread().getName(),
-                                                            System.nanoTime(),
+                    Monitorable operation = new TestMonitor("Test thread " + currentThread().getName(),
+                                                            nanoTime(),
                                                             false,
                                                             timeout,
                                                             slowTimeout);
diff --git a/test/unit/org/apache/cassandra/db/partition/PartitionUpdateTest.java b/test/unit/org/apache/cassandra/db/partition/PartitionUpdateTest.java
index a4555c8..771facf 100644
--- a/test/unit/org/apache/cassandra/db/partition/PartitionUpdateTest.java
+++ b/test/unit/org/apache/cassandra/db/partition/PartitionUpdateTest.java
@@ -58,7 +58,7 @@
         builder.newRow().add("s", 1);
         builder.newRow(1).add("a", 2);
         int size1 = builder.build().dataSize();
-        Assert.assertEquals(44, size1);
+        Assert.assertEquals(94, size1);
 
         builder = UpdateBuilder.create(cfm, "key0");
         builder.newRow(1).add("a", 2);
diff --git a/test/unit/org/apache/cassandra/db/partitions/AtomicBTreePartitionMemtableAccountingTest.java b/test/unit/org/apache/cassandra/db/partitions/AtomicBTreePartitionMemtableAccountingTest.java
index 125015f..04196f6 100644
--- a/test/unit/org/apache/cassandra/db/partitions/AtomicBTreePartitionMemtableAccountingTest.java
+++ b/test/unit/org/apache/cassandra/db/partitions/AtomicBTreePartitionMemtableAccountingTest.java
@@ -21,7 +21,6 @@
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.List;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.BeforeClass;
@@ -38,10 +37,10 @@
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.DeletionTime;
 
-import org.apache.cassandra.db.Memtable;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.SetType;
+import org.apache.cassandra.db.memtable.AbstractAllocatorMemtable;
 import org.apache.cassandra.db.rows.BTreeRow;
 import org.apache.cassandra.db.rows.BufferCell;
 import org.apache.cassandra.db.rows.Cell;
@@ -60,6 +59,7 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.btree.BTree;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.Cloner;
 import org.apache.cassandra.utils.memory.MemtableAllocator;
@@ -97,7 +97,7 @@
     public static final long HEAP_LIMIT = 1 << 20;
     public static final long OFF_HEAP_LIMIT = 1 << 20;
     public static final float MEMTABLE_CLEANUP_THRESHOLD = 0.25f;
-    public static final MemtableCleaner DUMMY_CLEANER = () -> CompletableFuture.completedFuture(null);
+    public static final MemtableCleaner DUMMY_CLEANER = () -> ImmediateFuture.failure(new IllegalStateException());
 
     @Parameterized.Parameters(name="allocationType={0}")
     public static Iterable<? extends Object> data()
@@ -293,10 +293,10 @@
         opOrder.start();
         UpdateTransaction indexer = UpdateTransaction.NO_OP;
 
-        MemtablePool memtablePool = Memtable.createMemtableAllocatorPoolInternal(allocationType,
-                                                                                 HEAP_LIMIT, OFF_HEAP_LIMIT, MEMTABLE_CLEANUP_THRESHOLD, DUMMY_CLEANER);
-        MemtableAllocator allocator = memtablePool.newAllocator();
-        MemtableAllocator recreatedAllocator = memtablePool.newAllocator();
+        MemtablePool memtablePool = AbstractAllocatorMemtable.createMemtableAllocatorPoolInternal(allocationType,
+                                                                                                  HEAP_LIMIT, OFF_HEAP_LIMIT, MEMTABLE_CLEANUP_THRESHOLD, DUMMY_CLEANER);
+        MemtableAllocator allocator = memtablePool.newAllocator("test");
+        MemtableAllocator recreatedAllocator = memtablePool.newAllocator("recreated");
         try
         {
             // Prepare a partition to receive updates
@@ -331,7 +331,6 @@
             }).sum();
 
             // Now recreate the partition to see if there's a leak in the accounting
-
             AtomicBTreePartition recreated = new AtomicBTreePartition(metadataRef, partitionKey, recreatedAllocator);
             try (UnfilteredRowIterator iter = partition.unfilteredIterator())
             {
@@ -346,7 +345,7 @@
             long unreleasableOnHeap = 0, unreleasableOffHeap = 0;
             if (allocator.offHeap().owns() > 0) unreleasableOffHeap = unreleasable;
             else unreleasableOnHeap = unreleasable;
-            
+
             assertThat(recreatedAllocator.offHeap().owns()).isEqualTo(allocator.offHeap().owns() - unreleasableOffHeap);
             assertThat(recreatedAllocator.onHeap().owns()).isEqualTo(allocator.onHeap().owns() - unreleasableOnHeap);
         }
diff --git a/test/unit/org/apache/cassandra/db/repair/AbstractPendingAntiCompactionTest.java b/test/unit/org/apache/cassandra/db/repair/AbstractPendingAntiCompactionTest.java
index 62b7db1..79be4c8 100644
--- a/test/unit/org/apache/cassandra/db/repair/AbstractPendingAntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/repair/AbstractPendingAntiCompactionTest.java
@@ -20,7 +20,6 @@
 
 import java.util.Collection;
 import java.util.Collections;
-import java.util.UUID;
 
 import com.google.common.collect.Sets;
 import org.junit.Assert;
@@ -29,6 +28,7 @@
 import org.junit.Ignore;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.QueryProcessor;
@@ -46,6 +46,7 @@
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 @Ignore
 public abstract class AbstractPendingAntiCompactionTest
@@ -109,14 +110,14 @@
             int val = i * rowsPerSSTable;  // multiplied to prevent ranges from overlapping
             for (int j = 0; j < rowsPerSSTable; j++)
                 QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES (?, ?)", ks, cfs.getTableName()), val + j, val + j);
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         Assert.assertEquals(num, cfs.getLiveSSTables().size());
     }
 
-    UUID prepareSession()
+    TimeUUID prepareSession()
     {
-        UUID sessionID = AbstractRepairTest.registerSession(cfs, true, true);
+        TimeUUID sessionID = AbstractRepairTest.registerSession(cfs, true, true);
         LocalSessionAccessor.prepareUnsafe(sessionID, AbstractRepairTest.COORDINATOR, Sets.newHashSet(AbstractRepairTest.COORDINATOR));
         return sessionID;
     }
diff --git a/test/unit/org/apache/cassandra/db/repair/CompactionManagerGetSSTablesForValidationTest.java b/test/unit/org/apache/cassandra/db/repair/CompactionManagerGetSSTablesForValidationTest.java
index 3b29cc5..bcf7582 100644
--- a/test/unit/org/apache/cassandra/db/repair/CompactionManagerGetSSTablesForValidationTest.java
+++ b/test/unit/org/apache/cassandra/db/repair/CompactionManagerGetSSTablesForValidationTest.java
@@ -18,10 +18,8 @@
 
 package org.apache.cassandra.db.repair;
 
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
@@ -31,7 +29,9 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.repair.state.ValidationState;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.Schema;
@@ -47,9 +47,11 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static java.util.Collections.singleton;
 import static org.apache.cassandra.db.repair.CassandraValidationIterator.getSSTablesToValidate;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * Tests correct sstables are returned from CompactionManager.getSSTablesForValidation
@@ -68,7 +70,7 @@
     private SSTableReader unrepaired;
     private SSTableReader pendingRepair;
 
-    private UUID sessionID;
+    private TimeUUID sessionID;
     private RepairJobDesc desc;
 
     @BeforeClass
@@ -93,7 +95,7 @@
         for (int i=0; i<3; i++)
         {
             QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES(?, ?)", ks, tbl), i, i);
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
         Assert.assertEquals(3, cfs.getLiveSSTables().size());
 
@@ -101,7 +103,7 @@
 
     private void registerRepair(boolean incremental) throws Exception
     {
-        sessionID = UUIDGen.getTimeUUID();
+        sessionID = nextTimeUUID();
         Range<Token> range = new Range<>(MT, MT);
         ActiveRepairService.instance.registerParentRepairSession(sessionID,
                                                                  coordinator,
@@ -111,7 +113,7 @@
                                                                  incremental ? System.currentTimeMillis() : ActiveRepairService.UNREPAIRED_SSTABLE,
                                                                  true,
                                                                  PreviewKind.NONE);
-        desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), ks, tbl, Collections.singleton(range));
+        desc = new RepairJobDesc(sessionID, nextTimeUUID(), ks, tbl, singleton(range));
     }
 
     private void modifySSTables() throws Exception
@@ -139,7 +141,7 @@
         modifySSTables();
 
         // get sstables for repair
-        Validator validator = new Validator(desc, coordinator, FBUtilities.nowInSeconds(), true, PreviewKind.NONE);
+        Validator validator = new Validator(new ValidationState(desc, coordinator), FBUtilities.nowInSeconds(), true, PreviewKind.NONE);
         Set<SSTableReader> sstables = Sets.newHashSet(getSSTablesToValidate(cfs, validator.desc.ranges, validator.desc.parentSessionId, validator.isIncremental));
         Assert.assertNotNull(sstables);
         Assert.assertEquals(1, sstables.size());
@@ -154,7 +156,7 @@
         modifySSTables();
 
         // get sstables for repair
-        Validator validator = new Validator(desc, coordinator, FBUtilities.nowInSeconds(), false, PreviewKind.NONE);
+        Validator validator = new Validator(new ValidationState(desc, coordinator), FBUtilities.nowInSeconds(), false, PreviewKind.NONE);
         Set<SSTableReader> sstables = Sets.newHashSet(getSSTablesToValidate(cfs, validator.desc.ranges, validator.desc.parentSessionId, validator.isIncremental));
         Assert.assertNotNull(sstables);
         Assert.assertEquals(2, sstables.size());
@@ -170,7 +172,7 @@
         modifySSTables();
 
         // get sstables for repair
-        Validator validator = new Validator(desc, coordinator, FBUtilities.nowInSeconds(), false, PreviewKind.NONE);
+        Validator validator = new Validator(new ValidationState(desc, coordinator), FBUtilities.nowInSeconds(), false, PreviewKind.NONE);
         Set<SSTableReader> sstables = Sets.newHashSet(getSSTablesToValidate(cfs, validator.desc.ranges, validator.desc.parentSessionId, validator.isIncremental));
         Assert.assertNotNull(sstables);
         Assert.assertEquals(3, sstables.size());
diff --git a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java
index 127a0a4..e43027d 100644
--- a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java
+++ b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -36,6 +35,7 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.utils.TimeUUID;
 import org.jboss.byteman.contrib.bmunit.BMRule;
 import org.jboss.byteman.contrib.bmunit.BMRules;
 import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
@@ -50,7 +50,7 @@
     @BMRules(rules = { @BMRule(name = "Throw exception anticompaction",
                                targetClass = "Range$OrderedRangeContainmentChecker",
                                targetMethod = "test",
-                               action = "throw new org.apache.cassandra.db.compaction.CompactionInterruptedException(null);")} )
+                               action = "throw new org.apache.cassandra.db.compaction.CompactionInterruptedException(\"antiCompactionExceptionTest\");")} )
     @Test
     public void testExceptionAnticompaction() throws InterruptedException
     {
@@ -65,7 +65,7 @@
         {
             ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
         }
-        UUID prsid = prepareSession();
+        TimeUUID prsid = prepareSession();
         try
         {
             PendingAntiCompaction pac = new PendingAntiCompaction(prsid, Lists.newArrayList(cfs, cfs2), atEndpoint(ranges, NO_RANGES), es, () -> false);
diff --git a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java
index 1c5c245..a559478 100644
--- a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java
@@ -24,12 +24,10 @@
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.stream.Collectors;
@@ -41,9 +39,14 @@
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListenableFutureTask;
-import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.concurrent.FutureTask;
+import org.apache.cassandra.concurrent.ImmediateExecutor;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Future;
 import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -76,10 +79,12 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
 import org.apache.cassandra.utils.WrappedRunnable;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
+import static java.util.Collections.emptyList;
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -93,18 +98,18 @@
 
     private static class InstrumentedAcquisitionCallback extends PendingAntiCompaction.AcquisitionCallback
     {
-        public InstrumentedAcquisitionCallback(UUID parentRepairSession, RangesAtEndpoint ranges)
+        public InstrumentedAcquisitionCallback(TimeUUID parentRepairSession, RangesAtEndpoint ranges)
         {
             super(parentRepairSession, ranges, () -> false);
         }
 
         Set<TableId> submittedCompactions = new HashSet<>();
 
-        ListenableFuture<?> submitPendingAntiCompaction(PendingAntiCompaction.AcquireResult result)
+        Future<Void> submitPendingAntiCompaction(PendingAntiCompaction.AcquireResult result)
         {
             submittedCompactions.add(result.cfs.metadata.id);
             result.abort();  // prevent ref leak complaints
-            return ListenableFutureTask.create(() -> {}, null);
+            return new FutureTask<>(() -> {});
         }
     }
 
@@ -122,12 +127,12 @@
         {
             QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES (?, ?)", ks, tbl), i, i);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         for (int i = 8; i < 12; i++)
         {
             QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES (?, ?)", ks, tbl), i, i);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(2, cfs.getLiveSSTables().size());
 
         Token left = ByteOrderedPartitioner.instance.getToken(ByteBufferUtil.bytes((int) 6));
@@ -136,7 +141,7 @@
         Collection<Range<Token>> ranges = Collections.singleton(new Range<>(left, right));
 
         // create a session so the anti compaction can fine it
-        UUID sessionID = UUIDGen.getTimeUUID();
+        TimeUUID sessionID = nextTimeUUID();
         ActiveRepairService.instance.registerParentRepairSession(sessionID, InetAddressAndPort.getLocalHost(), tables, ranges, true, 1, true, PreviewKind.NONE);
 
         PendingAntiCompaction pac;
@@ -174,7 +179,7 @@
             ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
         }
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, ranges, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, ranges, nextTimeUUID(), 0, 0);
 
         logger.info("SSTables: {}", sstables);
         logger.info("Expected: {}", expected);
@@ -208,7 +213,7 @@
         repaired.descriptor.getMetadataSerializer().mutateRepairMetadata(repaired.descriptor, 1, null, false);
         repaired.reloadSSTableMetadata();
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNotNull(result);
 
@@ -231,13 +236,13 @@
         assertTrue(repaired.intersects(FULL_RANGE));
         assertTrue(unrepaired.intersects(FULL_RANGE));
 
-        UUID sessionId = prepareSession();
+        TimeUUID sessionId = prepareSession();
         LocalSessionAccessor.finalizeUnsafe(sessionId);
         repaired.descriptor.getMetadataSerializer().mutateRepairMetadata(repaired.descriptor, 0, sessionId, false);
         repaired.reloadSSTableMetadata();
         assertTrue(repaired.isPendingRepair());
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNotNull(result);
 
@@ -260,12 +265,12 @@
         assertTrue(repaired.intersects(FULL_RANGE));
         assertTrue(unrepaired.intersects(FULL_RANGE));
 
-        UUID sessionId = prepareSession();
+        TimeUUID sessionId = prepareSession();
         repaired.descriptor.getMetadataSerializer().mutateRepairMetadata(repaired.descriptor, 0, sessionId, false);
         repaired.reloadSSTableMetadata();
         assertTrue(repaired.isPendingRepair());
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNull(result);
     }
@@ -277,7 +282,7 @@
 
         assertEquals(0, cfs.getLiveSSTables().size());
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNotNull(result);
 
@@ -293,11 +298,11 @@
         cfs.disableAutoCompaction();
         makeSSTables(2);
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNotNull(result);
 
-        InstrumentedAcquisitionCallback cb = new InstrumentedAcquisitionCallback(UUIDGen.getTimeUUID(), atEndpoint(FULL_RANGE, NO_RANGES));
+        InstrumentedAcquisitionCallback cb = new InstrumentedAcquisitionCallback(nextTimeUUID(), atEndpoint(FULL_RANGE, NO_RANGES));
         assertTrue(cb.submittedCompactions.isEmpty());
         cb.apply(Lists.newArrayList(result));
 
@@ -316,12 +321,12 @@
         cfs.disableAutoCompaction();
         makeSSTables(2);
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNotNull(result);
         assertEquals(Transactional.AbstractTransactional.State.IN_PROGRESS, result.txn.state());
 
-        InstrumentedAcquisitionCallback cb = new InstrumentedAcquisitionCallback(UUIDGen.getTimeUUID(), atEndpoint(FULL_RANGE, Collections.emptyList()));
+        InstrumentedAcquisitionCallback cb = new InstrumentedAcquisitionCallback(nextTimeUUID(), atEndpoint(FULL_RANGE, emptyList()));
         assertTrue(cb.submittedCompactions.isEmpty());
         cb.apply(Lists.newArrayList(result, null));
 
@@ -339,14 +344,14 @@
         cfs.disableAutoCompaction();
         makeSSTables(2);
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
         assertNotNull(result);
 
         ColumnFamilyStore cfs2 = Schema.instance.getColumnFamilyStoreInstance(Schema.instance.getTableMetadata("system", "peers").id);
         PendingAntiCompaction.AcquireResult fakeResult = new PendingAntiCompaction.AcquireResult(cfs2, null, null);
 
-        InstrumentedAcquisitionCallback cb = new InstrumentedAcquisitionCallback(UUIDGen.getTimeUUID(), atEndpoint(FULL_RANGE, NO_RANGES));
+        InstrumentedAcquisitionCallback cb = new InstrumentedAcquisitionCallback(nextTimeUUID(), atEndpoint(FULL_RANGE, NO_RANGES));
         assertTrue(cb.submittedCompactions.isEmpty());
         cb.apply(Lists.newArrayList(result, fakeResult));
 
@@ -362,9 +367,9 @@
         cfs.disableAutoCompaction();
         makeSSTables(2);
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
-        UUID sessionID = UUIDGen.getTimeUUID();
+        TimeUUID sessionID = nextTimeUUID();
         ActiveRepairService.instance.registerParentRepairSession(sessionID,
                                                                  InetAddressAndPort.getByName("127.0.0.1"),
                                                                  Lists.newArrayList(cfs),
@@ -382,9 +387,9 @@
         cfs.disableAutoCompaction();
         makeSSTables(1);
 
-        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, UUIDGen.getTimeUUID(), 0, 0);
+        PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, FULL_RANGE, nextTimeUUID(), 0, 0);
         PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
-        UUID sessionID = UUIDGen.getTimeUUID();
+        TimeUUID sessionID = nextTimeUUID();
         ActiveRepairService.instance.registerParentRepairSession(sessionID,
                                                                  InetAddressAndPort.getByName("127.0.0.1"),
                                                                  Lists.newArrayList(cfs),
@@ -410,22 +415,22 @@
     {
         cfs.disableAutoCompaction();
         makeSSTables(2);
-        UUID prsid = UUID.randomUUID();
-        ListeningExecutorService es = MoreExecutors.listeningDecorator(MoreExecutors.newDirectExecutorService());
+        TimeUUID prsid = nextTimeUUID();
+        ExecutorPlus es = ImmediateExecutor.INSTANCE;
         PendingAntiCompaction pac = new PendingAntiCompaction(prsid, Collections.singleton(cfs), atEndpoint(FULL_RANGE, NO_RANGES), es, () -> false) {
             @Override
-            protected AcquisitionCallback getAcquisitionCallback(UUID prsId, RangesAtEndpoint tokenRanges)
+            protected AcquisitionCallback getAcquisitionCallback(TimeUUID prsId, RangesAtEndpoint tokenRanges)
             {
                 return new AcquisitionCallback(prsid, tokenRanges, () -> false)
                 {
                     @Override
-                    ListenableFuture<?> submitPendingAntiCompaction(AcquireResult result)
+                    Future submitPendingAntiCompaction(AcquireResult result)
                     {
                         Runnable r = new WrappedRunnable()
                         {
                             protected void runMayThrow()
                             {
-                                throw new CompactionInterruptedException(null);
+                                throw new CompactionInterruptedException("antiCompactionExceptionTest");
                             }
                         };
                         return es.submit(r);
@@ -451,14 +456,14 @@
         ExecutorService es = Executors.newFixedThreadPool(1);
 
         makeSSTables(2);
-        UUID prsid = UUID.randomUUID();
+        TimeUUID prsid = nextTimeUUID();
         Set<SSTableReader> sstables = cfs.getLiveSSTables();
         List<ISSTableScanner> scanners = sstables.stream().map(SSTableReader::getScanner).collect(Collectors.toList());
         try
         {
             try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
                  CompactionController controller = new CompactionController(cfs, sstables, 0);
-                 CompactionIterator ci = CompactionManager.getAntiCompactionIterator(scanners, controller, 0, UUID.randomUUID(), CompactionManager.instance.active, () -> false))
+                 CompactionIterator ci = CompactionManager.getAntiCompactionIterator(scanners, controller, 0, nextTimeUUID(), CompactionManager.instance.active, () -> false))
             {
                 // `ci` is our imaginary ongoing anticompaction which makes no progress until after 30s
                 // now we try to start a new AC, which will try to cancel all ongoing compactions
@@ -505,14 +510,14 @@
         cfs.disableAutoCompaction();
         ExecutorService es = Executors.newFixedThreadPool(1);
         makeSSTables(2);
-        UUID prsid = prepareSession();
+        TimeUUID prsid = prepareSession();
         Set<SSTableReader> sstables = cfs.getLiveSSTables();
         List<ISSTableScanner> scanners = sstables.stream().map(SSTableReader::getScanner).collect(Collectors.toList());
         try
         {
             try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
                  CompactionController controller = new CompactionController(cfs, sstables, 0);
-                 CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners, controller, 0, UUID.randomUUID()))
+                 CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners, controller, 0, nextTimeUUID()))
             {
                 // `ci` is our imaginary ongoing anticompaction which makes no progress until after 5s
                 // now we try to start a new AC, which will try to cancel all ongoing compactions
@@ -584,7 +589,7 @@
         for (int i = 1; i <= 10; i++)
         {
             SSTableReader sstable = MockSchema.sstable(i + 20, i * 10, i * 10 + 9, cfs);
-            AbstractPendingRepairTest.mutateRepaired(sstable, UUID.randomUUID(), false);
+            AbstractPendingRepairTest.mutateRepaired(sstable, nextTimeUUID(), false);
             pendingSSTables.add(sstable);
         }
 
@@ -604,7 +609,7 @@
         {
             public CompactionInfo getCompactionInfo()
             {
-                return new CompactionInfo(cfs.metadata(), OperationType.ANTICOMPACTION, 0, 1000, UUID.randomUUID(), compacting);
+                return new CompactionInfo(cfs.metadata(), OperationType.ANTICOMPACTION, 0, 1000, nextTimeUUID(), compacting);
             }
 
             public boolean isGlobal()
@@ -617,7 +622,7 @@
         {
             PendingAntiCompaction.AntiCompactionPredicate predicate =
             new PendingAntiCompaction.AntiCompactionPredicate(Collections.singleton(new Range<>(new Murmur3Partitioner.LongToken(0), new Murmur3Partitioner.LongToken(100))),
-                                                              UUID.randomUUID());
+                                                              nextTimeUUID());
             Set<SSTableReader> live = cfs.getLiveSSTables().stream().filter(predicate).collect(Collectors.toSet());
             if (shouldFail)
                 fail("should fail - we try to grab already anticompacting sstables for anticompaction");
@@ -640,12 +645,12 @@
         ColumnFamilyStore cfs = MockSchema.newCFS();
         cfs.addSSTable(MockSchema.sstable(1, true, cfs));
         CountDownLatch cdl = new CountDownLatch(5);
-        ExecutorService es = Executors.newFixedThreadPool(1);
+        ExecutorPlus es = executorFactory().sequential("test");
         CompactionInfo.Holder holder = new CompactionInfo.Holder()
         {
             public CompactionInfo getCompactionInfo()
             {
-                return new CompactionInfo(cfs.metadata(), OperationType.ANTICOMPACTION, 0, 0, UUID.randomUUID(), cfs.getLiveSSTables());
+                return new CompactionInfo(cfs.metadata(), OperationType.ANTICOMPACTION, 0, 0, nextTimeUUID(), cfs.getLiveSSTables());
             }
 
             public boolean isGlobal()
@@ -655,7 +660,7 @@
         };
         try
         {
-            PendingAntiCompaction.AntiCompactionPredicate acp = new PendingAntiCompaction.AntiCompactionPredicate(FULL_RANGE, UUID.randomUUID())
+            PendingAntiCompaction.AntiCompactionPredicate acp = new PendingAntiCompaction.AntiCompactionPredicate(FULL_RANGE, nextTimeUUID())
             {
                 @Override
                 public boolean apply(SSTableReader sstable)
@@ -667,7 +672,7 @@
                 }
             };
             CompactionManager.instance.active.beginCompaction(holder);
-            PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, UUID.randomUUID(), 10, 1, acp);
+            PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, nextTimeUUID(), 10, 1, acp);
             Future f = es.submit(acquisitionCallable);
             cdl.await();
             assertNotNull(f.get());
@@ -684,12 +689,12 @@
     {
         ColumnFamilyStore cfs = MockSchema.newCFS();
         cfs.addSSTable(MockSchema.sstable(1, true, cfs));
-        ExecutorService es = Executors.newFixedThreadPool(1);
+        ExecutorPlus es = executorFactory().sequential("test");
         CompactionInfo.Holder holder = new CompactionInfo.Holder()
         {
             public CompactionInfo getCompactionInfo()
             {
-                return new CompactionInfo(cfs.metadata(), OperationType.ANTICOMPACTION, 0, 0, UUID.randomUUID(), cfs.getLiveSSTables());
+                return new CompactionInfo(cfs.metadata(), OperationType.ANTICOMPACTION, 0, 0, nextTimeUUID(), cfs.getLiveSSTables());
             }
 
             public boolean isGlobal()
@@ -699,7 +704,7 @@
         };
         try
         {
-            PendingAntiCompaction.AntiCompactionPredicate acp = new PendingAntiCompaction.AntiCompactionPredicate(FULL_RANGE, UUID.randomUUID())
+            PendingAntiCompaction.AntiCompactionPredicate acp = new PendingAntiCompaction.AntiCompactionPredicate(FULL_RANGE, nextTimeUUID())
             {
                 @Override
                 public boolean apply(SSTableReader sstable)
@@ -708,7 +713,7 @@
                 }
             };
             CompactionManager.instance.active.beginCompaction(holder);
-            PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, UUID.randomUUID(), 2, 1000, acp);
+            PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, nextTimeUUID(), 2, 1000, acp);
             Future fut = es.submit(acquisitionCallable);
             assertNull(fut.get());
         }
@@ -728,7 +733,7 @@
         ExecutorService es = Executors.newFixedThreadPool(1);
         try
         {
-            UUID prsid = prepareSession();
+            TimeUUID prsid = prepareSession();
             for (SSTableReader sstable : cfs2.getLiveSSTables())
                 assertFalse(sstable.isPendingRepair());
 
diff --git a/test/unit/org/apache/cassandra/db/rows/RowAndDeletionMergeIteratorTest.java b/test/unit/org/apache/cassandra/db/rows/RowAndDeletionMergeIteratorTest.java
index c90694d..17c9bec 100644
--- a/test/unit/org/apache/cassandra/db/rows/RowAndDeletionMergeIteratorTest.java
+++ b/test/unit/org/apache/cassandra/db/rows/RowAndDeletionMergeIteratorTest.java
@@ -32,7 +32,6 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.db.ClusteringPrefix;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.ColumnFilter;
diff --git a/test/unit/org/apache/cassandra/db/rows/RowsMergingTest.java b/test/unit/org/apache/cassandra/db/rows/RowsMergingTest.java
index 1ceacbc..8049af3 100644
--- a/test/unit/org/apache/cassandra/db/rows/RowsMergingTest.java
+++ b/test/unit/org/apache/cassandra/db/rows/RowsMergingTest.java
@@ -214,8 +214,8 @@
                 catch (Throwable e)
                 {
                     throw new AssertionError("Executing the following queries did not lead to the expected result: \n"
-                                + Joiner.on("; \n").join(queries) 
-                                + "\n when executing: \n" + query, e); 
+                                + Joiner.on("; \n").join(queries)
+                                + "\n when executing: \n" + query, e);
                 }
             }
 
@@ -224,7 +224,7 @@
                 if (expectedRow != null)
                 {
                     expectedRow[0] = pk;
-                    assertRows(execute("SELECT * FROM %s WHERE pk = ?" , pk), 
+                    assertRows(execute("SELECT * FROM %s WHERE pk = ?" , pk),
                                expectedRow);
                 }
                 else
@@ -234,7 +234,7 @@
             }
             catch (Throwable e)
             {
-                throw new AssertionError("Executing the following queries did not lead to the expected result: \n" + Joiner.on("; \n").join(queries), e); 
+                throw new AssertionError("Executing the following queries did not lead to the expected result: \n" + Joiner.on("; \n").join(queries), e);
             }
             pk++;
         }
diff --git a/test/unit/org/apache/cassandra/db/rows/ThrottledUnfilteredIteratorTest.java b/test/unit/org/apache/cassandra/db/rows/ThrottledUnfilteredIteratorTest.java
index d5e6348..0c4a79d 100644
--- a/test/unit/org/apache/cassandra/db/rows/ThrottledUnfilteredIteratorTest.java
+++ b/test/unit/org/apache/cassandra/db/rows/ThrottledUnfilteredIteratorTest.java
@@ -21,7 +21,6 @@
 import static org.apache.cassandra.SchemaLoader.standardCFMD;
 import static org.junit.Assert.*;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
@@ -50,7 +49,6 @@
 import org.apache.cassandra.db.ReadExecutionController;
 import org.apache.cassandra.db.RegularAndStaticColumns;
 import org.apache.cassandra.db.RowUpdateBuilder;
-import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
@@ -112,7 +110,7 @@
 
         // flush and generate 1 sstable
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.disableAutoCompaction();
         cfs.forceMajorCompaction();
 
@@ -147,7 +145,7 @@
 
         // flush and generate 1 sstable
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.disableAutoCompaction();
         cfs.forceMajorCompaction();
 
@@ -205,7 +203,7 @@
 
         // flush and generate 1 sstable
         ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.disableAutoCompaction();
         cfs.forceMajorCompaction();
 
@@ -623,7 +621,7 @@
 
         new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 22).build().applyUnsafe();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2);
         for (int i = 1; i < 40; i += 2)
diff --git a/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java b/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java
index 2804733..c6deb24 100644
--- a/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java
+++ b/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java
@@ -337,4 +337,4 @@
     {
         System.out.println(str(list));
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java
index 58d26c1..a65b2ea 100644
--- a/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java
+++ b/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java
@@ -23,8 +23,8 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Queue;
-import java.util.UUID;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -49,7 +49,7 @@
 import org.apache.cassandra.net.AsyncStreamingOutputPlus;
 import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.SessionInfo;
 import org.apache.cassandra.streaming.StreamCoordinator;
@@ -62,6 +62,7 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -102,7 +103,7 @@
             .build()
             .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         sstable = store.getLiveSSTables().iterator().next();
@@ -129,7 +130,7 @@
     }
 
     @Test
-    public void testBlockReadingAndWritingOverWire() throws Exception
+    public void testBlockReadingAndWritingOverWire() throws Throwable
     {
         StreamSession session = setupStreamingSessionForTest();
         InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
@@ -204,13 +205,13 @@
 
     private StreamSession setupStreamingSessionForTest()
     {
-        StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new DefaultConnectionFactory(), false, false, null, PreviewKind.NONE);
-        StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.BOOTSTRAP, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
+        StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
+        StreamResultFuture future = StreamResultFuture.createInitiator(nextTimeUUID(), StreamOperation.BOOTSTRAP, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
 
         InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
         streamCoordinator.addSessionInfo(new SessionInfo(peer, 0, peer, Collections.emptyList(), Collections.emptyList(), StreamSession.State.INITIALIZED));
 
-        StreamSession session = streamCoordinator.getOrCreateNextSession(peer);
+        StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
         session.init(future);
         return session;
     }
diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java
index 9d663b5..93b6e71 100644
--- a/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java
+++ b/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java
@@ -25,6 +25,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.Keyspace;
@@ -78,7 +79,7 @@
             .build()
             .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         sstable = store.getLiveSSTables().iterator().next();
diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java
index 999a44e..0e5187c 100644
--- a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java
+++ b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java
@@ -26,6 +26,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -80,7 +81,7 @@
             .build()
             .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         sstable = store.getLiveSSTables().iterator().next();
diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java
index 0b37d66..e7795c8 100644
--- a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java
@@ -24,7 +24,6 @@
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -34,6 +33,7 @@
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Uninterruptibles;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.junit.Assert;
 import org.junit.Before;
@@ -52,27 +52,30 @@
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.OutgoingStream;
 import org.apache.cassandra.streaming.PreviewKind;
-import org.apache.cassandra.streaming.StreamConnectionFactory;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Ref;
 
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
+import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class CassandraStreamManagerTest
 {
     private static final String KEYSPACE = null;
     private String keyspace = null;
     private static final String table = "tbl";
-    private static final StreamConnectionFactory connectionFactory = new DefaultConnectionFactory();
+    private static final StreamingChannel.Factory connectionFactory = new NettyStreamingConnectionFactory();
 
     private TableMetadata tbm;
     private ColumnFamilyStore cfs;
@@ -92,13 +95,15 @@
         cfs = Schema.instance.getColumnFamilyStoreInstance(tbm.id);
     }
 
-    private static StreamSession session(UUID pendingRepair)
+    private static StreamSession session(TimeUUID pendingRepair)
     {
         try
         {
             return new StreamSession(StreamOperation.REPAIR,
                                      InetAddressAndPort.getByName("127.0.0.1"),
                                      connectionFactory,
+                                     null,
+                                     MessagingService.current_version,
                                      false,
                                      0,
                                      pendingRepair,
@@ -114,14 +119,14 @@
     {
         Set<SSTableReader> before = cfs.getLiveSSTables();
         queryable.run();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Set<SSTableReader> after = cfs.getLiveSSTables();
 
         Set<SSTableReader> diff = Sets.difference(after, before);
         return Iterables.getOnlyElement(diff);
     }
 
-    private static void mutateRepaired(SSTableReader sstable, long repairedAt, UUID pendingRepair, boolean isTransient) throws IOException
+    private static void mutateRepaired(SSTableReader sstable, long repairedAt, TimeUUID pendingRepair, boolean isTransient) throws IOException
     {
         Descriptor descriptor = sstable.descriptor;
         descriptor.getMetadataSerializer().mutateRepairMetadata(descriptor, repairedAt, pendingRepair, isTransient);
@@ -150,7 +155,7 @@
         return sstablesFromStreams(streams);
     }
 
-    private Set<SSTableReader> selectReaders(UUID pendingRepair)
+    private Set<SSTableReader> selectReaders(TimeUUID pendingRepair)
     {
         IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
         Collection<Range<Token>> ranges = Lists.newArrayList(new Range<Token>(partitioner.getMinimumToken(), partitioner.getMinimumToken()));
@@ -171,10 +176,10 @@
         SSTableReader sstable4 = createSSTable(() -> QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES (4, 4)", keyspace, table)));
 
 
-        UUID pendingRepair = UUIDGen.getTimeUUID();
+        TimeUUID pendingRepair = nextTimeUUID();
         long repairedAt = System.currentTimeMillis();
         mutateRepaired(sstable2, ActiveRepairService.UNREPAIRED_SSTABLE, pendingRepair, false);
-        mutateRepaired(sstable3, ActiveRepairService.UNREPAIRED_SSTABLE, UUIDGen.getTimeUUID(), false);
+        mutateRepaired(sstable3, UNREPAIRED_SSTABLE, nextTimeUUID(), false);
         mutateRepaired(sstable4, repairedAt, NO_PENDING_REPAIR, false);
 
 
@@ -199,7 +204,7 @@
         Collection<SSTableReader> allSSTables = cfs.getLiveSSTables();
         Assert.assertEquals(1, allSSTables.size());
         final Token firstToken = allSSTables.iterator().next().first.getToken();
-        DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMB(1);
+        DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMiB(1);
 
         Set<SSTableReader> sstablesBeforeRewrite = getReadersForRange(new Range<>(firstToken, firstToken));
         Assert.assertEquals(1, sstablesBeforeRewrite.size());
@@ -222,7 +227,7 @@
                 }
             }
         };
-        Thread t = NamedThreadFactory.createThread(r);
+        Thread t = NamedThreadFactory.createAnonymousThread(r);
         try
         {
             t.start();
@@ -231,7 +236,7 @@
         }
         finally
         {
-            DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMB(50);
+            DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMiB(50);
             done.set(true);
             t.join(20);
         }
diff --git a/test/unit/org/apache/cassandra/db/streaming/EntireSSTableStreamConcurrentComponentMutationTest.java b/test/unit/org/apache/cassandra/db/streaming/EntireSSTableStreamConcurrentComponentMutationTest.java
index 3cc8943..8b63ba5 100644
--- a/test/unit/org/apache/cassandra/db/streaming/EntireSSTableStreamConcurrentComponentMutationTest.java
+++ b/test/unit/org/apache/cassandra/db/streaming/EntireSSTableStreamConcurrentComponentMutationTest.java
@@ -22,7 +22,6 @@
 import java.nio.channels.WritableByteChannel;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
@@ -46,6 +45,7 @@
 import io.netty.channel.ChannelPromise;
 import io.netty.channel.embedded.EmbeddedChannel;
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
@@ -68,10 +68,10 @@
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.SharedDefaultFileRegion;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.OutgoingStream;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.SessionInfo;
@@ -88,6 +88,7 @@
 import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
 
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertTrue;
 
 @RunWith(BMUnitRunner.class)
@@ -128,7 +129,7 @@
             .build()
             .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         Token start = ByteOrderedPartitioner.instance.getTokenFactory().fromString(Long.toHexString(0));
@@ -160,7 +161,7 @@
     }
 
     @Test
-    public void testStream() throws Exception
+    public void testStream() throws Throwable
     {
         testStreamWithConcurrentComponentMutation(NO_OP, NO_OP);
     }
@@ -170,12 +171,12 @@
      * update causes the actual transfered file size to be different from the one in {@link ComponentManifest}
      */
     @Test
-    public void testStreamWithStatsMutation() throws Exception
+    public void testStreamWithStatsMutation() throws Throwable
     {
         testStreamWithConcurrentComponentMutation(() -> {
 
             Descriptor desc = sstable.descriptor;
-            desc.getMetadataSerializer().mutate(desc, "testing", stats -> stats.mutateRepairedMetadata(0, UUID.randomUUID(), false));
+            desc.getMetadataSerializer().mutate(desc, "testing", stats -> stats.mutateRepairedMetadata(0, nextTimeUUID(), false));
 
             return null;
         }, NO_OP);
@@ -188,7 +189,7 @@
             targetLocation = "AFTER INVOKE serialize",
             condition = "$descriptor.cfname.contains(\"Standard1\")",
             action = "org.apache.cassandra.db.streaming.EntireSSTableStreamConcurrentComponentMutationTest.countDown();Thread.sleep(5000);")
-    public void testStreamWithIndexSummaryRedistributionDelaySavingSummary() throws Exception
+    public void testStreamWithIndexSummaryRedistributionDelaySavingSummary() throws Throwable
     {
         testStreamWithConcurrentComponentMutation(() -> {
             // wait until new index summary is partially written
@@ -203,7 +204,7 @@
         latch.countDown();
     }
 
-    private void testStreamWithConcurrentComponentMutation(Callable<?> runBeforeStreaming, Callable<?> runConcurrentWithStreaming) throws Exception
+    private void testStreamWithConcurrentComponentMutation(Callable<?> runBeforeStreaming, Callable<?> runConcurrentWithStreaming) throws Throwable
     {
         ByteBuf serializedFile = Unpooled.buffer(8192);
         InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
@@ -247,7 +248,7 @@
 
         // rewrite index summary file with new min/max index interval
         TableMetadata origin = store.metadata();
-        MigrationManager.announceTableUpdate(origin.unbuild().minIndexInterval(1).maxIndexInterval(2).build(), true);
+        SchemaTestUtil.announceTableUpdate(origin.unbuild().minIndexInterval(1).maxIndexInterval(2).build());
 
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstable, OperationType.INDEX_SUMMARY))
         {
@@ -257,7 +258,7 @@
         }
 
         // reset min/max index interval
-        MigrationManager.announceTableUpdate(origin, true);
+        SchemaTestUtil.announceTableUpdate(origin);
         return true;
     }
 
@@ -319,13 +320,13 @@
 
     private StreamSession setupStreamingSessionForTest()
     {
-        StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new DefaultConnectionFactory(), false, false, null, PreviewKind.NONE);
-        StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.BOOTSTRAP, Collections.emptyList(), streamCoordinator);
+        StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
+        StreamResultFuture future = StreamResultFuture.createInitiator(nextTimeUUID(), StreamOperation.BOOTSTRAP, Collections.emptyList(), streamCoordinator);
 
         InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
         streamCoordinator.addSessionInfo(new SessionInfo(peer, 0, peer, Collections.emptyList(), Collections.emptyList(), StreamSession.State.INITIALIZED));
 
-        StreamSession session = streamCoordinator.getOrCreateNextSession(peer);
+        StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
         session.init(future);
         return session;
     }
diff --git a/test/unit/org/apache/cassandra/db/transform/DuplicateRowCheckerTest.java b/test/unit/org/apache/cassandra/db/transform/DuplicateRowCheckerTest.java
index 2e2ee8f..e44cbcd 100644
--- a/test/unit/org/apache/cassandra/db/transform/DuplicateRowCheckerTest.java
+++ b/test/unit/org/apache/cassandra/db/transform/DuplicateRowCheckerTest.java
@@ -65,7 +65,7 @@
         createTable("CREATE TABLE %s (pk text, ck1 int, ck2 int, v int, PRIMARY KEY (pk, ck1, ck2))");
         for (int i = 0; i < 10; i++)
             execute("insert into %s (pk, ck1, ck2, v) values (?, ?, ?, ?)", "key", i, i, i);
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
 
         metadata = getCurrentColumnFamilyStore().metadata();
         cfs = getCurrentColumnFamilyStore();
diff --git a/test/unit/org/apache/cassandra/db/view/ViewBuilderTaskTest.java b/test/unit/org/apache/cassandra/db/view/ViewBuilderTaskTest.java
index 2341c73..c6df898 100644
--- a/test/unit/org/apache/cassandra/db/view/ViewBuilderTaskTest.java
+++ b/test/unit/org/apache/cassandra/db/view/ViewBuilderTaskTest.java
@@ -24,6 +24,7 @@
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.SystemKeyspace;
@@ -84,8 +85,8 @@
                               int expectedRowsInView) throws Throwable
             {
                 // Truncate the materialized view (not the base table)
-                cfs.viewManager.forceBlockingFlush();
-                cfs.viewManager.truncateBlocking(cfs.forceBlockingFlush(), System.currentTimeMillis());
+                Util.flush(cfs.viewManager);
+                cfs.viewManager.truncateBlocking(cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS), System.currentTimeMillis());
                 assertRowCount(execute("SELECT * FROM " + viewName), 0);
 
                 // Get the tokens from the referenced inserted rows
diff --git a/test/unit/org/apache/cassandra/db/view/ViewUtilsTest.java b/test/unit/org/apache/cassandra/db/view/ViewUtilsTest.java
index 7855150..e0ef3d6 100644
--- a/test/unit/org/apache/cassandra/db/view/ViewUtilsTest.java
+++ b/test/unit/org/apache/cassandra/db/view/ViewUtilsTest.java
@@ -18,36 +18,41 @@
 
 package org.apache.cassandra.db.view;
 
+import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Optional;
 
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.junit.Assert;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.OrderPreservingPartitioner.StringToken;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.locator.IEndpointSnitch;
+import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.NetworkTopologyStrategy;
 import org.apache.cassandra.locator.PropertyFileSnitch;
+import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.ReplicationParams;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.service.StorageService;
 
 public class ViewUtilsTest
 {
+    private final String KS = "Keyspace1";
+
     @BeforeClass
-    public static void setUp() throws ConfigurationException
+    public static void setUp() throws ConfigurationException, IOException
     {
         DatabaseDescriptor.daemonInitialization();
+        ServerTestUtils.cleanupAndLeaveDirs();
         IEndpointSnitch snitch = new PropertyFileSnitch();
         DatabaseDescriptor.setEndpointSnitch(snitch);
         Keyspace.setInitialized();
@@ -73,11 +78,9 @@
         replicationMap.put("DC1", "1");
         replicationMap.put("DC2", "1");
 
-        Keyspace.clear("Keyspace1");
-        KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, replicationMap));
-        Schema.instance.load(meta);
+        recreateKeyspace(replicationMap);
 
-        Optional<Replica> naturalEndpoint = ViewUtils.getViewNaturalEndpoint(Keyspace.open("Keyspace1").getReplicationStrategy(),
+        Optional<Replica> naturalEndpoint = ViewUtils.getViewNaturalEndpoint(Keyspace.open(KS).getReplicationStrategy(),
                                                                              new StringToken("CA"),
                                                                              new StringToken("BB"));
 
@@ -106,11 +109,9 @@
         replicationMap.put("DC1", "2");
         replicationMap.put("DC2", "2");
 
-        Keyspace.clear("Keyspace1");
-        KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, replicationMap));
-        Schema.instance.load(meta);
+        recreateKeyspace(replicationMap);
 
-        Optional<Replica> naturalEndpoint = ViewUtils.getViewNaturalEndpoint(Keyspace.open("Keyspace1").getReplicationStrategy(),
+        Optional<Replica> naturalEndpoint = ViewUtils.getViewNaturalEndpoint(Keyspace.open(KS).getReplicationStrategy(),
                                                                              new StringToken("CA"),
                                                                              new StringToken("BB"));
 
@@ -138,14 +139,19 @@
         replicationMap.put("DC1", "1");
         replicationMap.put("DC2", "1");
 
-        Keyspace.clear("Keyspace1");
-        KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, replicationMap));
-        Schema.instance.load(meta);
+        recreateKeyspace(replicationMap);
 
-        Optional<Replica> naturalEndpoint = ViewUtils.getViewNaturalEndpoint(Keyspace.open("Keyspace1").getReplicationStrategy(),
+        Optional<Replica> naturalEndpoint = ViewUtils.getViewNaturalEndpoint(Keyspace.open(KS).getReplicationStrategy(),
                                                                              new StringToken("AB"),
                                                                              new StringToken("BB"));
 
         Assert.assertFalse(naturalEndpoint.isPresent());
     }
+
+    private void recreateKeyspace(Map<String, String> replicationMap)
+    {
+        SchemaTestUtil.dropKeyspaceIfExist(KS, true);
+        KeyspaceMetadata meta = KeyspaceMetadata.create(KS, KeyspaceParams.create(false, replicationMap));
+        SchemaTestUtil.addOrUpdateKeyspace(meta, true);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/db/virtual/BatchMetricsTableTest.java b/test/unit/org/apache/cassandra/db/virtual/BatchMetricsTableTest.java
new file mode 100644
index 0000000..8c34759
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/BatchMetricsTableTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
+import com.datastax.driver.core.ResultSet;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.statements.BatchStatement;
+import org.apache.cassandra.metrics.BatchMetrics;
+
+import static java.lang.String.format;
+import static org.junit.Assert.assertEquals;
+
+public class BatchMetricsTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+    }
+
+    @Before
+    public void config()
+    {
+        BatchMetricsTable table = new BatchMetricsTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+    }
+
+    @Test
+    public void testSelectAll() throws Throwable
+    {
+        BatchMetrics metrics = BatchStatement.metrics;
+
+        for (int i = 0; i < 10; i++)
+        {
+            metrics.partitionsPerLoggedBatch.update(i);
+            metrics.partitionsPerUnloggedBatch.update(i + 10);
+            metrics.partitionsPerCounterBatch.update(i * 10);
+        }
+
+        ResultSet result = executeNet(format("SELECT * FROM %s.batch_metrics", KS_NAME));
+        assertEquals(5, result.getColumnDefinitions().size());
+        AtomicInteger rowCount = new AtomicInteger(0);
+        result.forEach(r -> {
+            Snapshot snapshot = getExpectedHistogram(metrics, r.getString("name")).getSnapshot();
+            assertEquals(snapshot.getMedian(), r.getDouble("p50th"), 0.0);
+            assertEquals(snapshot.get99thPercentile(), r.getDouble("p99th"), 0.0);
+            rowCount.addAndGet(1);
+        });
+
+        assertEquals(3, rowCount.get());
+    }
+
+    private Histogram getExpectedHistogram(BatchMetrics metrics, String name)
+    {
+        if ("partitions_per_logged_batch".equals(name))
+            return metrics.partitionsPerLoggedBatch;
+
+        if ("partitions_per_unlogged_batch".equals(name))
+            return metrics.partitionsPerUnloggedBatch;
+
+        return metrics.partitionsPerCounterBatch;
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/CQLMetricsTableTest.java b/test/unit/org/apache/cassandra/db/virtual/CQLMetricsTableTest.java
new file mode 100644
index 0000000..658b841
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/CQLMetricsTableTest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.collect.ImmutableList;
+
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Session;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+import org.junit.BeforeClass;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.QueryProcessor;
+import org.apache.cassandra.metrics.CQLMetrics;
+
+public class CQLMetricsTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+    }
+
+    private void queryAndValidateMetrics(CQLMetrics expectedMetrics) throws Throwable
+    {
+        String getMetricsQuery = "SELECT * FROM " + KS_NAME + "." + CQLMetricsTable.TABLE_NAME;
+        ResultSet vtsRows = executeNet(getMetricsQuery);
+
+        assertEquals(2, vtsRows.getColumnDefinitions().size());
+
+        AtomicInteger rowCount = new AtomicInteger(0);
+        vtsRows.forEach(r -> {
+            final double metricValue = r.getDouble(CQLMetricsTable.VALUE_COL);
+            switch (r.getString(CQLMetricsTable.NAME_COL))
+            {
+                case CQLMetricsTable.PREPARED_STATEMENTS_COUNT:
+                    assertEquals(expectedMetrics.preparedStatementsCount.getValue(), metricValue, 0);
+                    break;
+                case CQLMetricsTable.PREPARED_STATEMENTS_EVICTED:
+                    assertEquals(expectedMetrics.preparedStatementsEvicted.getCount(), metricValue, 0);
+                    break;
+                case CQLMetricsTable.PREPARED_STATEMENTS_EXECUTED:
+                    assertEquals(expectedMetrics.preparedStatementsExecuted.getCount(), metricValue, 0);
+                    break;
+                case CQLMetricsTable.PREPARED_STATEMENTS_RATIO:
+                    assertEquals(expectedMetrics.preparedStatementsRatio.getValue(), metricValue, 0.01);
+                    break;
+                case CQLMetricsTable.REGULAR_STATEMENTS_EXECUTED:
+                    assertEquals(expectedMetrics.regularStatementsExecuted.getCount(), metricValue, 0);
+                    break;
+            }
+            rowCount.getAndIncrement();
+        });
+
+        assertEquals(5, rowCount.get());
+    }
+
+    @Test
+    public void testUsingPrepareStmts() throws Throwable
+    {
+        CQLMetricsTable table = new CQLMetricsTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        String ks = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }");
+        String tbl = createTable(ks, "CREATE TABLE %s (id int PRIMARY KEY, cid int, val text)");
+        Session session = sessionNet();
+
+        String insertCQL = "INSERT INTO " + ks + "." + tbl + " (id, cid, val) VALUES (?, ?, ?)";
+        PreparedStatement preparedInsert = session.prepare(insertCQL);
+
+        String selectCQL = "Select * from " + ks + "." + tbl + " where id = ?";
+        PreparedStatement preparedSelect = session.prepare(selectCQL);
+
+        for (int i = 0; i < 10; i++)
+        {
+            session.execute(preparedInsert.bind(i, i, "value" + i));
+            session.execute(preparedSelect.bind(i));
+        }
+
+        queryAndValidateMetrics(QueryProcessor.metrics);
+    }
+
+    @Test
+    public void testUsingInjectedValues() throws Throwable
+    {
+        CQLMetrics cqlMetrics = new CQLMetrics();
+        CQLMetricsTable table = new CQLMetricsTable(KS_NAME, cqlMetrics);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        // With initial injected values
+        cqlMetrics.preparedStatementsExecuted.inc(50);
+        cqlMetrics.regularStatementsExecuted.inc(100);
+        cqlMetrics.preparedStatementsEvicted.inc(25);
+        queryAndValidateMetrics(cqlMetrics);
+
+        // Test again with updated values
+        cqlMetrics.preparedStatementsExecuted.inc(150);
+        cqlMetrics.regularStatementsExecuted.inc(200);
+        cqlMetrics.preparedStatementsEvicted.inc(50);
+        queryAndValidateMetrics(cqlMetrics);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/ClientsTableTest.java b/test/unit/org/apache/cassandra/db/virtual/ClientsTableTest.java
new file mode 100644
index 0000000..5b9aa14
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/ClientsTableTest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.net.InetAddress;
+
+import com.google.common.collect.ImmutableList;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+import org.apache.cassandra.cql3.CQLTester;
+import org.assertj.core.api.Assertions;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ClientsTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+    
+    private ClientsTable table;
+    
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+    }
+
+    @Before
+    public void config()
+    {
+        table = new ClientsTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+    }
+    
+    @Test
+    public void testSelectAll() throws Throwable
+    {
+        ResultSet result = executeNet("SELECT * FROM vts.clients");
+        
+        for (Row r : result)
+        {
+            Assert.assertEquals(InetAddress.getLoopbackAddress(), r.getInet("address"));
+            r.getInt("port");
+            Assert.assertTrue(r.getInt("port") > 0);
+            Assert.assertNotNull(r.getMap("client_options", String.class, String.class));
+            Assert.assertTrue(r.getLong("request_count") > 0 );
+            // the following are questionable if they belong here
+            Assert.assertEquals("localhost", r.getString("hostname"));
+            Assertions.assertThat(r.getMap("client_options", String.class, String.class))
+                      .hasEntrySatisfying("DRIVER_VERSION", value -> assertThat(value.contains(r.getString("driver_name"))))
+                      .hasEntrySatisfying("DRIVER_VERSION", value -> assertThat(value.contains(r.getString("driver_version"))));
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/CredentialsCacheKeysTableTest.java b/test/unit/org/apache/cassandra/db/virtual/CredentialsCacheKeysTableTest.java
new file mode 100644
index 0000000..45132a9
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/CredentialsCacheKeysTableTest.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.EndPoint;
+import com.datastax.driver.core.PlainTextAuthProvider;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.IAuthenticator;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+
+public class CredentialsCacheKeysTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+    private static AuthTestUtils.LocalPasswordAuthenticator passwordAuthenticator;
+
+    @SuppressWarnings("FieldCanBeLocal")
+    private CredentialsCacheKeysTable table;
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        // high value is used for convenient debugging
+        DatabaseDescriptor.setCredentialsValidity(20_000);
+
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+        passwordAuthenticator = (AuthTestUtils.LocalPasswordAuthenticator) DatabaseDescriptor.getAuthenticator();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+    }
+
+    @Before
+    public void config()
+    {
+        table = new CredentialsCacheKeysTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        // ensure nothing keeps cached between tests
+        passwordAuthenticator.getCredentialsCache().invalidate();
+        disablePreparedReuseForTest();
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        DatabaseDescriptor.setCredentialsValidity(DatabaseDescriptor.getRawConfig().credentials_validity.toMilliseconds());
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.credentials_cache_keys"));
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.credentials_cache_keys"),
+                row("role_a"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.credentials_cache_keys WHERE role='role_a'"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.credentials_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testDeletePartition() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("DELETE FROM vts.credentials_cache_keys WHERE role='role_a'");
+
+        assertRows(execute("SELECT * FROM vts.credentials_cache_keys"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testDeletePartitionWithInvalidValues() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+
+        execute("DELETE FROM vts.credentials_cache_keys WHERE role='invalid_role'");
+
+        assertRows(execute("SELECT * FROM vts.credentials_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testTruncateTable() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("TRUNCATE vts.credentials_cache_keys");
+
+        assertEmpty(execute("SELECT * FROM vts.credentials_cache_keys"));
+    }
+
+    @Test
+    public void testUnsupportedOperations() throws Throwable
+    {
+        // range tombstone is not supported, however, this table has no clustering columns, so it is not covered by tests
+
+        // column deletion is not supported, however, this table has no regular columns, so it is not covered by tests
+
+        // insert is not supported
+        assertInvalidMessage("Column modification is not supported by table vts.credentials_cache_keys",
+                "INSERT INTO vts.credentials_cache_keys (role) VALUES ('role_e')");
+
+        // update is not supported, however, this table has no regular columns, so it is not covered by tests
+    }
+
+    private void cachePermissions(RoleResource roleResource)
+    {
+        IAuthenticator.SaslNegotiator saslNegotiator = passwordAuthenticator.newSaslNegotiator(null);
+        saslNegotiator.evaluateResponse(new PlainTextAuthProvider(roleResource.getRoleName(), "ignored")
+                .newAuthenticator((EndPoint) null, null)
+                .initialResponse());
+        saslNegotiator.getAuthenticatedUser();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/GossipInfoTableTest.java b/test/unit/org/apache/cassandra/db/virtual/GossipInfoTableTest.java
new file mode 100644
index 0000000..a2c2a2b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/GossipInfoTableTest.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.function.Supplier;
+
+import org.awaitility.Awaitility;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+import static com.google.common.collect.ImmutableList.of;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class GossipInfoTableTest extends CQLTester
+{
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+    }
+
+    @Test
+    public void testSelectAllWhenGossipInfoIsEmpty() throws Throwable
+    {
+        // we have not triggered gossiper yet
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace("vts_1",
+                                                                      of(new GossipInfoTable("vts_1", HashMap::new))));
+        assertEmpty(execute("SELECT * FROM vts_1.gossip_info"));
+    }
+
+    @Test
+    public void testSelectAllWithStateTransitions() throws Throwable
+    {
+        try
+        {
+            requireNetwork(); // triggers gossiper
+
+            ConcurrentMap<InetAddressAndPort, EndpointState> states = Gossiper.instance.endpointStateMap;
+            Awaitility.await().until(() -> !states.isEmpty());
+            Map.Entry<InetAddressAndPort, EndpointState> entry = states.entrySet().stream().findFirst()
+                    .orElseThrow(AssertionError::new);
+            InetAddressAndPort endpoint = entry.getKey();
+            EndpointState localState = new EndpointState(entry.getValue());
+
+            Supplier<Map<InetAddressAndPort, EndpointState>> endpointStateMapSupplier = () -> new HashMap<InetAddressAndPort, EndpointState>() {{put(endpoint, localState);}};
+
+            VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace("vts_2",
+                                                                          of(new GossipInfoTable("vts_2", endpointStateMapSupplier))));
+
+            UntypedResultSet resultSet = execute("SELECT * FROM vts_2.gossip_info");
+
+            assertThat(resultSet.size()).isEqualTo(1);
+            UntypedResultSet.Row row = resultSet.one();
+            assertThat(row.getColumns().size()).isEqualTo(64);
+
+            assertThat(endpoint).isNotNull();
+            assertThat(localState).isNotNull();
+            assertThat(row.getInetAddress("address")).isEqualTo(endpoint.getAddress());
+            assertThat(row.getInt("port")).isEqualTo(endpoint.getPort());
+            assertThat(row.getString("hostname")).isEqualTo(endpoint.getHostName());
+            assertThat(row.getInt("generation")).isEqualTo(localState.getHeartBeatState().getGeneration());
+            assertThat(row.getInt("heartbeat")).isNotNull();
+
+            assertValue(row, "status", localState, ApplicationState.STATUS);
+            assertValue(row, "load", localState, ApplicationState.LOAD);
+            assertValue(row, "schema", localState, ApplicationState.SCHEMA);
+            assertValue(row, "dc", localState, ApplicationState.DC);
+            assertValue(row, "rack", localState, ApplicationState.RACK);
+            assertValue(row, "release_version", localState, ApplicationState.RELEASE_VERSION);
+            assertValue(row, "removal_coordinator", localState, ApplicationState.REMOVAL_COORDINATOR);
+            assertValue(row, "internal_ip", localState, ApplicationState.INTERNAL_IP);
+            assertValue(row, "rpc_address", localState, ApplicationState.RPC_ADDRESS);
+            assertValue(row, "severity", localState, ApplicationState.SEVERITY);
+            assertValue(row, "net_version", localState, ApplicationState.NET_VERSION);
+            assertValue(row, "host_id", localState, ApplicationState.HOST_ID);
+            assertValue(row, "rpc_ready", localState, ApplicationState.RPC_READY);
+            assertValue(row, "internal_address_and_port", localState, ApplicationState.INTERNAL_ADDRESS_AND_PORT);
+            assertValue(row, "native_address_and_port", localState, ApplicationState.NATIVE_ADDRESS_AND_PORT);
+            assertValue(row, "status_with_port", localState, ApplicationState.STATUS_WITH_PORT);
+            assertValue(row, "sstable_versions", localState, ApplicationState.SSTABLE_VERSIONS);
+            assertValue(row, "disk_usage", localState, ApplicationState.DISK_USAGE);
+            assertValue(row, "x_11_padding", localState, ApplicationState.X_11_PADDING);
+            assertValue(row, "x1", localState, ApplicationState.X1);
+            assertValue(row, "x2", localState, ApplicationState.X2);
+            assertValue(row, "x3", localState, ApplicationState.X3);
+            assertValue(row, "x4", localState, ApplicationState.X4);
+            assertValue(row, "x5", localState, ApplicationState.X5);
+            assertValue(row, "x6", localState, ApplicationState.X6);
+            assertValue(row, "x7", localState, ApplicationState.X7);
+            assertValue(row, "x8", localState, ApplicationState.X8);
+            assertValue(row, "x9", localState, ApplicationState.X9);
+            assertValue(row, "x10", localState, ApplicationState.X10);
+
+            assertVersion(row, "status_version", localState, ApplicationState.STATUS);
+            assertVersion(row, "load_version", localState, ApplicationState.LOAD);
+            assertVersion(row, "schema_version", localState, ApplicationState.SCHEMA);
+            assertVersion(row, "dc_version", localState, ApplicationState.DC);
+            assertVersion(row, "rack_version", localState, ApplicationState.RACK);
+            assertVersion(row, "release_version_version", localState, ApplicationState.RELEASE_VERSION);
+            assertVersion(row, "removal_coordinator_version", localState, ApplicationState.REMOVAL_COORDINATOR);
+            assertVersion(row, "internal_ip_version", localState, ApplicationState.INTERNAL_IP);
+            assertVersion(row, "rpc_address_version", localState, ApplicationState.RPC_ADDRESS);
+            assertVersion(row, "severity_version", localState, ApplicationState.SEVERITY);
+            assertVersion(row, "net_version_version", localState, ApplicationState.NET_VERSION);
+            assertVersion(row, "host_id_version", localState, ApplicationState.HOST_ID);
+            assertVersion(row, "tokens_version", localState, ApplicationState.TOKENS);
+            assertVersion(row, "rpc_ready_version", localState, ApplicationState.RPC_READY);
+            assertVersion(row, "internal_address_and_port_version", localState, ApplicationState.INTERNAL_ADDRESS_AND_PORT);
+            assertVersion(row, "native_address_and_port_version", localState, ApplicationState.NATIVE_ADDRESS_AND_PORT);
+            assertVersion(row, "status_with_port_version", localState, ApplicationState.STATUS_WITH_PORT);
+            assertVersion(row, "sstable_versions_version", localState, ApplicationState.SSTABLE_VERSIONS);
+            assertVersion(row, "disk_usage_version", localState, ApplicationState.DISK_USAGE);
+            assertVersion(row, "x_11_padding", localState, ApplicationState.X_11_PADDING);
+            assertVersion(row, "x1", localState, ApplicationState.X1);
+            assertVersion(row, "x2", localState, ApplicationState.X2);
+            assertVersion(row, "x3", localState, ApplicationState.X3);
+            assertVersion(row, "x4", localState, ApplicationState.X4);
+            assertVersion(row, "x5", localState, ApplicationState.X5);
+            assertVersion(row, "x6", localState, ApplicationState.X6);
+            assertVersion(row, "x7", localState, ApplicationState.X7);
+            assertVersion(row, "x8", localState, ApplicationState.X8);
+            assertVersion(row, "x9", localState, ApplicationState.X9);
+            assertVersion(row, "x10", localState, ApplicationState.X10);
+        }
+        finally
+        {
+            // clean up the gossip states
+            Gossiper.instance.clearUnsafe();
+        }
+    }
+
+    private void assertValue(UntypedResultSet.Row row, String column, EndpointState localState, ApplicationState key)
+    {
+        if (row.has(column))
+        {
+            assertThat(localState.getApplicationState(key)).as("'%s' is expected to be not-null", key)
+                                                           .isNotNull();
+            String tableString = row.getString(column);
+            String stateString = localState.getApplicationState(key).value;
+            assertThat(tableString).as("'%s' is expected to match column '%s', table string: %s, state string: %s",
+                                       key, column, tableString, stateString).isEqualTo(stateString);
+        }
+        else
+        {
+            assertThat(localState.getApplicationState(key)).as("'%s' is expected to be null", key)
+                                                           .isNull();
+        }
+    }
+
+    private void assertVersion(UntypedResultSet.Row row, String column, EndpointState localState, ApplicationState key)
+    {
+        if (row.has(column))
+        {
+            assertThat(localState.getApplicationState(key)).as("'%s' is expected to be not-null", key)
+                                                           .isNotNull();
+
+            int tableVersion = row.getInt(column);
+            int stateVersion = localState.getApplicationState(key).version;
+
+            assertThat(tableVersion).as("'%s' is expected to match column '%s', table int: %s, state int: %s",
+                                        key, column, tableVersion, stateVersion).isEqualTo(stateVersion);
+        }
+        else
+        {
+            assertThat(localState.getApplicationState(key)).as("'%s' is expected to be null", key)
+                                                           .isNull();
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/JmxPermissionsCacheKeysTableTest.java b/test/unit/org/apache/cassandra/db/virtual/JmxPermissionsCacheKeysTableTest.java
new file mode 100644
index 0000000..1dfe622
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/JmxPermissionsCacheKeysTableTest.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import javax.security.auth.Subject;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.CassandraPrincipal;
+import org.apache.cassandra.auth.IAuthorizer;
+import org.apache.cassandra.auth.IResource;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.JMXResource;
+import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+
+public class JmxPermissionsCacheKeysTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+    private static final AuthorizationProxy authorizationProxy = new AuthTestUtils.NoAuthSetupAuthorizationProxy();
+
+    @SuppressWarnings("FieldCanBeLocal")
+    private JmxPermissionsCacheKeysTable table;
+
+    // this method is intentionally not called "setUpClass" to let it throw exception brought by startJMXServer method 
+    @BeforeClass
+    public static void setup() throws Exception {
+        // high value is used for convenient debugging
+        DatabaseDescriptor.setPermissionsValidity(20_000);
+
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+
+        List<IResource> resources = Arrays.asList(
+                JMXResource.root(),
+                JMXResource.mbean("org.apache.cassandra.db:type=Tables,*"));
+
+        IAuthorizer authorizer = DatabaseDescriptor.getAuthorizer();
+        for (IResource resource : resources)
+        {
+            Set<Permission> permissions = resource.applicablePermissions();
+            authorizer.grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_A);
+            authorizer.grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_B);
+        }
+
+        startJMXServer();
+    }
+
+    @Before
+    public void config()
+    {
+        table = new JmxPermissionsCacheKeysTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        // ensure nothing keeps cached between tests
+        AuthorizationProxy.jmxPermissionsCache.invalidate();
+        disablePreparedReuseForTest();
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        DatabaseDescriptor.setPermissionsValidity(DatabaseDescriptor.getRawConfig().permissions_validity.toMilliseconds());
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.jmx_permissions_cache_keys"));
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.jmx_permissions_cache_keys"),
+                row("role_a"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.jmx_permissions_cache_keys WHERE role='role_a'"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.jmx_permissions_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testDeletePartition() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("DELETE FROM vts.jmx_permissions_cache_keys WHERE role='role_a'");
+
+        assertRows(execute("SELECT * FROM vts.jmx_permissions_cache_keys"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testDeletePartitionWithInvalidValues() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+
+        execute("DELETE FROM vts.jmx_permissions_cache_keys WHERE role='invalid_role'");
+
+        assertRows(execute("SELECT * FROM vts.jmx_permissions_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testTruncateTable() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("TRUNCATE vts.jmx_permissions_cache_keys");
+
+        assertEmpty(execute("SELECT * FROM vts.jmx_permissions_cache_keys"));
+    }
+
+    @Test
+    public void testUnsupportedOperations() throws Throwable
+    {
+        // range tombstone is not supported, however, this table has no clustering columns, so it is not covered by tests
+
+        // column deletion is not supported, however, this table has no regular columns, so it is not covered by tests
+
+        // insert is not supported
+        assertInvalidMessage("Column modification is not supported by table vts.jmx_permissions_cache_keys",
+                "INSERT INTO vts.jmx_permissions_cache_keys (role) VALUES ('role_e')");
+
+        // update is not supported, however, this table has no regular columns, so it is not covered by tests
+    }
+
+    private void cachePermissions(RoleResource roleResource)
+    {
+        Subject userSubject = new Subject();
+        userSubject.getPrincipals().add(new CassandraPrincipal(roleResource.getRoleName()));
+
+        authorizationProxy.authorize(userSubject, "queryNames", null);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/LocalRepairTablesTest.java b/test/unit/org/apache/cassandra/db/virtual/LocalRepairTablesTest.java
new file mode 100644
index 0000000..2a6da1a
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/LocalRepairTablesTest.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.CommonRange;
+import org.apache.cassandra.repair.RepairJobDesc;
+import org.apache.cassandra.repair.RepairRunnable;
+import org.apache.cassandra.repair.messages.PrepareMessage;
+import org.apache.cassandra.repair.messages.RepairOption;
+import org.apache.cassandra.repair.state.Completable;
+import org.apache.cassandra.repair.state.CoordinatorState;
+import org.apache.cassandra.repair.state.JobState;
+import org.apache.cassandra.repair.state.ParticipateState;
+import org.apache.cassandra.repair.state.SessionState;
+import org.apache.cassandra.repair.state.State;
+import org.apache.cassandra.repair.state.ValidationState;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
+
+public class LocalRepairTablesTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+    private static final ImmutableSet<InetAddressAndPort> ADDRESSES = ImmutableSet.of(address(127, 0, 0, 1));
+    private static final List<String> ADDRESSES_STR = ADDRESSES.stream().map(Object::toString).collect(Collectors.toList());
+    private static final CommonRange COMMON_RANGE = new CommonRange(ADDRESSES, Collections.emptySet(), Collections.singleton(range(0, 100)));
+    private static final String REPAIR_KS = "system";
+    private static final String REPAIR_TABLE = "peers";
+
+    @BeforeClass
+    public static void before()
+    {
+        CQLTester.setUpClass();
+
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, LocalRepairTables.getAll(KS_NAME)));
+    }
+
+    @Before
+    public void cleanupRepairs()
+    {
+        ActiveRepairService.instance.clearLocalRepairState();
+    }
+
+    @Test
+    public void repairs() throws Throwable
+    {
+        assertEmpty("repairs");
+
+        CoordinatorState state = coordinator();
+        assertInit("repairs", state);
+
+        state.phase.setup();
+        assertState("repairs", state, CoordinatorState.State.SETUP);
+
+        List<ColumnFamilyStore> tables = Collections.singletonList(table());
+        RepairRunnable.NeighborsAndRanges neighbors = neighbors();
+        state.phase.start(tables, neighbors);
+        assertState("repairs", state, CoordinatorState.State.START);
+        List<List<String>> expectedRanges = neighbors.commonRanges.stream().map(a -> a.ranges.stream().map(Object::toString).collect(Collectors.toList())).collect(Collectors.toList());
+        assertRowsIgnoringOrder(execute(t("SELECT id, completed, participants, table_names, ranges, unfiltered_ranges, participants FROM %s.repairs")),
+                                row(state.id, false, ADDRESSES_STR, tables.stream().map(a -> a.name).collect(Collectors.toList()), expectedRanges, expectedRanges, neighbors.participants.stream().map(Object::toString).collect(Collectors.toList())));
+
+        state.phase.prepareStart();
+        assertState("repairs", state, CoordinatorState.State.PREPARE_START);
+
+        state.phase.prepareComplete();
+        assertState("repairs", state, CoordinatorState.State.PREPARE_COMPLETE);
+
+        state.phase.repairSubmitted();
+        assertState("repairs", state, CoordinatorState.State.REPAIR_START);
+
+        state.phase.repairCompleted();
+        assertState("repairs", state, CoordinatorState.State.REPAIR_COMPLETE);
+
+        state.phase.success("testing");
+        assertSuccess("repairs", state);
+
+        // make sure serialization works
+        execute(t("SELECT * FROM %s.repairs"));
+    }
+
+    @Test
+    public void sessions() throws Throwable
+    {
+        assertEmpty("repair_sessions");
+
+        SessionState state = session();
+        assertInit("repair_sessions", state);
+
+        state.phase.start();
+        assertState("repair_sessions", state, SessionState.State.START);
+
+        state.phase.jobsSubmitted();
+        assertState("repair_sessions", state, SessionState.State.JOBS_START);
+
+        state.phase.success("testing");
+        assertSuccess("repair_sessions", state);
+
+        assertRowsIgnoringOrder(execute(t("SELECT participants FROM %s.repair_sessions WHERE id=?"), state.id),
+                                row(ADDRESSES_STR));
+
+        // make sure serialization works
+        execute(t("SELECT * FROM %s.repair_sessions"));
+    }
+
+    @Test
+    public void jobs() throws Throwable
+    {
+        assertEmpty("repair_jobs");
+
+        JobState state = job();
+        assertInit("repair_jobs", state);
+
+        state.phase.start();
+        assertState("repair_jobs", state, JobState.State.START);
+
+        state.phase.snapshotsSubmitted();
+        assertState("repair_jobs", state, JobState.State.SNAPSHOT_START);
+        state.phase.snapshotsCompleted();
+        assertState("repair_jobs", state, JobState.State.SNAPSHOT_COMPLETE);
+        state.phase.validationSubmitted();
+        assertState("repair_jobs", state, JobState.State.VALIDATION_START);
+        state.phase.validationCompleted();
+        assertState("repair_jobs", state, JobState.State.VALIDATION_COMPLETE);
+        state.phase.streamSubmitted();
+        assertState("repair_jobs", state, JobState.State.STREAM_START);
+
+        state.phase.success("testing");
+        assertSuccess("repair_jobs", state);
+
+        assertRowsIgnoringOrder(execute(t("SELECT participants FROM %s.repair_jobs WHERE id=?"), state.id),
+                                row(ADDRESSES_STR));
+
+        // make sure serialization works
+        execute(t("SELECT * FROM %s.repair_jobs"));
+    }
+
+    @Test
+    public void participates() throws Throwable
+    {
+        assertEmpty("repair_participates");
+        ParticipateState state = participate();
+
+        assertInit("repair_participates", state);
+        state.phase.success("testing");
+        assertRowsIgnoringOrder(execute(t("SELECT id, initiator, ranges, failure_cause, success_message, state_init_timestamp, state_success_timestamp, state_failure_timestamp FROM %s.repair_participates WHERE id = ?"), state.id),
+                                row(state.getId(), FBUtilities.getBroadcastAddressAndPort().toString(), Arrays.asList("(0,42]"), null, "testing", new Date(state.getInitializedAtMillis()), new Date(state.getLastUpdatedAtMillis()), null));
+
+        state = participate();
+        assertInit("repair_participates", state);
+        state.phase.fail("testing");
+        assertRowsIgnoringOrder(execute(t("SELECT id, completed, initiator, ranges, failure_cause, success_message, state_init_timestamp, state_success_timestamp, state_failure_timestamp FROM %s.repair_participates WHERE id = ?"), state.id),
+                                row(state.getId(), true, FBUtilities.getBroadcastAddressAndPort().toString(), Arrays.asList("(0,42]"), "testing", null, new Date(state.getInitializedAtMillis()), null, new Date(state.getLastUpdatedAtMillis())));
+
+        // make sure serialization works
+        execute(t("SELECT * FROM %s.repair_participates"));
+    }
+
+    @Test
+    public void validations() throws Throwable
+    {
+        assertEmpty("repair_validations");
+
+        ValidationState state = validation();
+        assertInit("repair_validations", state);
+
+        // progress is defined by estimated partitions and how many partitions have been processed; disable checking in shared functions
+        state.phase.start(100, 100);
+        assertState("repair_validations", state, ValidationState.State.START);
+
+        for (int i = 0; i < 10; i++)
+        {
+            state.partitionsProcessed += 10;
+            state.bytesRead += 10;
+            state.updated();
+
+            // min 99% is because >= 100 gets lowered to 99% and the last 1% is when validation is actualy complete
+            assertRowsIgnoringOrder(execute(t("SELECT id, initiator, status, progress_percentage, estimated_partitions, estimated_total_bytes, partitions_processed, bytes_read, failure_cause, success_message FROM %s.repair_validations")),
+                                    row(state.getId(), FBUtilities.getBroadcastAddressAndPort().toString(), "start", Math.min(99.0F, (float) state.partitionsProcessed), 100L, 100L, state.partitionsProcessed, state.bytesRead, null, null));
+        }
+
+        state.phase.sendingTrees();
+        assertState("repair_validations", state, ValidationState.State.SENDING_TREES);
+
+        state.phase.success("testing");
+        assertSuccess("repair_validations", state);
+
+        // make sure serialization works
+        execute(t("SELECT * FROM %s.repair_validations"));
+    }
+
+    private void assertEmpty(String table) throws Throwable
+    {
+        assertRowCount(execute(t("SELECT * FROM %s." + table)), 0);
+    }
+
+    private void assertInit(String table, Completable<?> state) throws Throwable
+    {
+        assertRowsIgnoringOrder(execute(t("SELECT id, state_init_timestamp, failure_cause, success_message FROM %s." + table + " WHERE id = ?"), state.getId()),
+                                row(state.getId(), new Date(state.getInitializedAtMillis()), null, null));
+    }
+
+    private void assertInit(String table, State<?, ?> state) throws Throwable
+    {
+        assertRowsIgnoringOrder(execute(t("SELECT id, status, state_init_timestamp, failure_cause, success_message FROM %s." + table + " WHERE id = ?"), state.getId()),
+                                row(state.getId(), "init", new Date(state.getInitializedAtMillis()), null, null));
+    }
+
+    private <T extends Enum<T>> void assertState(String table, State<?, ?> state, T expectedState) throws Throwable
+    {
+        assertRowsIgnoringOrder(execute(t("SELECT id, completed, status, failure_cause, success_message FROM %s." + table + " WHERE id = ?"), state.getId()),
+                                row(state.getId(), false, expectedState.name().toLowerCase(), null, null));
+    }
+
+    private void assertSuccess(String table, State<?, ?> state) throws Throwable
+    {
+        assertRowsIgnoringOrder(execute(t("SELECT id, completed, status, failure_cause, success_message FROM %s." + table + " WHERE id = ?"), state.getId()),
+                                row(state.getId(), true, "success", null, "testing"));
+    }
+
+    private static ColumnFamilyStore table()
+    {
+        return table(REPAIR_KS, REPAIR_TABLE);
+    }
+
+    private static ColumnFamilyStore table(String ks, String name)
+    {
+        return Schema.instance.getColumnFamilyStoreInstance(Schema.instance.getTableMetadata(ks, name).id);
+    }
+
+    private static RepairRunnable.NeighborsAndRanges neighbors()
+    {
+        return new RepairRunnable.NeighborsAndRanges(false, ADDRESSES, ImmutableList.of(COMMON_RANGE));
+    }
+
+    private static Range<Token> range(long a, long b)
+    {
+        return new Range<>(new Murmur3Partitioner.LongToken(a), new Murmur3Partitioner.LongToken(b));
+    }
+
+    private static InetAddressAndPort address(int a, int b, int c, int d)
+    {
+        try
+        {
+            return InetAddressAndPort.getByAddress(new byte[] {(byte) a, (byte) b, (byte) c, (byte) d});
+        }
+        catch (UnknownHostException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static CoordinatorState coordinator()
+    {
+        RepairOption options = RepairOption.parse(Collections.emptyMap(), DatabaseDescriptor.getPartitioner());
+        CoordinatorState state = new CoordinatorState(0, "test", options);
+        ActiveRepairService.instance.register(state);
+        return state;
+    }
+
+    private static SessionState session()
+    {
+        CoordinatorState parent = coordinator();
+        SessionState state = new SessionState(parent.id, REPAIR_KS, new String[]{ REPAIR_TABLE }, COMMON_RANGE);
+        parent.register(state);
+        return state;
+    }
+
+    private static JobState job()
+    {
+        SessionState session = session();
+        JobState state = new JobState(new RepairJobDesc(session.parentRepairSession, session.id, session.keyspace, session.cfnames[0], session.commonRange.ranges), session.commonRange.endpoints);
+        session.register(state);
+        return state;
+    }
+
+    private ValidationState validation()
+    {
+        JobState job = job(); // job isn't needed but makes getting the descriptor easier
+        ParticipateState participate = participate();
+        ValidationState state = new ValidationState(job.desc, ADDRESSES.stream().findFirst().get());
+        participate.register(state);
+        return state;
+    }
+
+    private ParticipateState participate()
+    {
+        List<Range<Token>> ranges = Arrays.asList(new Range<>(new Murmur3Partitioner.LongToken(0), new Murmur3Partitioner.LongToken(42)));
+        ParticipateState state = new ParticipateState(FBUtilities.getBroadcastAddressAndPort(), new PrepareMessage(TimeUUID.Generator.nextTimeUUID(), Collections.emptyList(), ranges, true, 42, true, PreviewKind.ALL));
+        ActiveRepairService.instance.register(state);
+        return state;
+    }
+
+    private static String t(String string)
+    {
+        return String.format(string, KS_NAME);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/db/virtual/NetworkPermissionsCacheKeysTableTest.java b/test/unit/org/apache/cassandra/db/virtual/NetworkPermissionsCacheKeysTableTest.java
new file mode 100644
index 0000000..f944884
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/NetworkPermissionsCacheKeysTableTest.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.DCPermissions;
+import org.apache.cassandra.auth.INetworkAuthorizer;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+
+public class NetworkPermissionsCacheKeysTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+
+    @SuppressWarnings("FieldCanBeLocal")
+    private NetworkPermissionsCacheKeysTable table;
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        // high value is used for convenient debugging
+        DatabaseDescriptor.setPermissionsValidity(20_000);
+
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+
+        INetworkAuthorizer networkAuthorizer = DatabaseDescriptor.getNetworkAuthorizer();
+        networkAuthorizer.setRoleDatacenters(ROLE_A, DCPermissions.all());
+        networkAuthorizer.setRoleDatacenters(ROLE_B, DCPermissions.subset(DATA_CENTER, DATA_CENTER_REMOTE));
+    }
+
+    @Before
+    public void config()
+    {
+        table = new NetworkPermissionsCacheKeysTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        // ensure nothing keeps cached between tests
+        AuthenticatedUser.networkPermissionsCache.invalidate();
+        disablePreparedReuseForTest();
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        DatabaseDescriptor.setPermissionsValidity(DatabaseDescriptor.getRawConfig().permissions_validity.toMilliseconds());
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.network_permissions_cache_keys"));
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.network_permissions_cache_keys"),
+                row("role_a"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.network_permissions_cache_keys WHERE role='role_a'"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.network_permissions_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testDeletePartition() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("DELETE FROM vts.network_permissions_cache_keys WHERE role='role_a'");
+
+        assertRows(execute("SELECT * FROM vts.network_permissions_cache_keys"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testDeletePartitionWithInvalidValues() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+
+        execute("DELETE FROM vts.network_permissions_cache_keys WHERE role='invalid_role'");
+
+        assertRows(execute("SELECT * FROM vts.network_permissions_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testTruncateTable() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("TRUNCATE vts.network_permissions_cache_keys");
+
+        assertEmpty(execute("SELECT * FROM vts.network_permissions_cache_keys"));
+    }
+
+    @Test
+    public void testUnsupportedOperations() throws Throwable
+    {
+        // range tombstone is not supported, however, this table has no clustering columns, so it is not covered by tests
+
+        // column deletion is not supported, however, this table has no regular columns, so it is not covered by tests
+
+        // insert is not supported
+        assertInvalidMessage("Column modification is not supported by table vts.network_permissions_cache_keys",
+                "INSERT INTO vts.network_permissions_cache_keys (role) VALUES ('role_e')");
+
+        // update is not supported, however, this table has no regular columns, so it is not covered by tests
+    }
+
+    private void cachePermissions(RoleResource roleResource)
+    {
+        AuthenticatedUser.networkPermissionsCache.get(roleResource);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/PermissionsCacheKeysTableTest.java b/test/unit/org/apache/cassandra/db/virtual/PermissionsCacheKeysTableTest.java
new file mode 100644
index 0000000..2171b2d
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/PermissionsCacheKeysTableTest.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.DataResource;
+import org.apache.cassandra.auth.IAuthorizer;
+import org.apache.cassandra.auth.IResource;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+
+public class PermissionsCacheKeysTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+
+    @SuppressWarnings("FieldCanBeLocal")
+    private PermissionsCacheKeysTable table;
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        // high value is used for convenient debugging
+        DatabaseDescriptor.setPermissionsValidity(20_000);
+
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+
+        List<IResource> resources = Arrays.asList(
+                DataResource.root(),
+                DataResource.keyspace(KEYSPACE),
+                DataResource.table(KEYSPACE, "t1"));
+
+        IAuthorizer authorizer = DatabaseDescriptor.getAuthorizer();
+        for (IResource resource : resources)
+        {
+            Set<Permission> permissions = resource.applicablePermissions();
+            authorizer.grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_A);
+            authorizer.grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_B);
+        }
+    }
+
+    @Before
+    public void config()
+    {
+        table = new PermissionsCacheKeysTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        // ensure nothing keeps cached between tests
+        AuthenticatedUser.permissionsCache.invalidate();
+        disablePreparedReuseForTest();
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        DatabaseDescriptor.setPermissionsValidity(DatabaseDescriptor.getRawConfig().permissions_validity.toMilliseconds());
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.permissions_cache_keys"));
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissionsForResource(ROLE_A, DataResource.root());
+        cachePermissionsForResource(ROLE_A, DataResource.keyspace(KEYSPACE));
+        cachePermissionsForResource(ROLE_B, DataResource.table(KEYSPACE, "t1"));
+
+        assertRows(execute("SELECT * FROM vts.permissions_cache_keys"),
+                row("role_a", "data"),
+                row("role_a", "data/cql_test_keyspace"),
+                row("role_b", "data/cql_test_keyspace/t1"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.permissions_cache_keys WHERE role='role_a' AND resource='data'"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissionsForResource(ROLE_A, DataResource.root());
+        cachePermissionsForResource(ROLE_A, DataResource.keyspace(KEYSPACE));
+        cachePermissionsForResource(ROLE_B, DataResource.table(KEYSPACE, "t1"));
+
+        assertRows(execute("SELECT * FROM vts.permissions_cache_keys WHERE role='role_a' AND resource='data'"),
+                row("role_a", "data"));
+    }
+
+    @Test
+    public void testDeletePartition() throws Throwable
+    {
+        cachePermissionsForResource(ROLE_A, DataResource.root());
+        cachePermissionsForResource(ROLE_A, DataResource.keyspace(KEYSPACE));
+
+        execute("DELETE FROM vts.permissions_cache_keys WHERE role='role_a' AND resource='data'");
+
+        assertRows(execute("SELECT * FROM vts.permissions_cache_keys"),
+                row("role_a", "data/cql_test_keyspace"));
+    }
+
+    @Test
+    public void testDeletePartitionWithInvalidValues() throws Throwable
+    {
+        cachePermissionsForResource(ROLE_A, DataResource.root());
+
+        execute("DELETE FROM vts.permissions_cache_keys WHERE role='invalid_role' AND resource='data'");
+        execute("DELETE FROM vts.permissions_cache_keys WHERE role='role_a' AND resource='invalid_resource'");
+
+        assertRows(execute("SELECT * FROM vts.permissions_cache_keys WHERE role='role_a' AND resource='data'"),
+                row("role_a", "data"));
+    }
+
+    @Test
+    public void testTruncateTable() throws Throwable
+    {
+        cachePermissionsForResource(ROLE_A, DataResource.root());
+        cachePermissionsForResource(ROLE_B, DataResource.table(KEYSPACE, "t1"));
+
+        execute("TRUNCATE vts.permissions_cache_keys");
+
+        assertEmpty(execute("SELECT * FROM vts.permissions_cache_keys"));
+    }
+
+    @Test
+    public void testUnsupportedOperations() throws Throwable
+    {
+        // range tombstone is not supported, however, this table has no clustering columns, so it is not covered by tests
+
+        // column deletion is not supported, however, this table has no regular columns, so it is not covered by tests
+
+        // insert is not supported
+        assertInvalidMessage("Column modification is not supported by table vts.permissions_cache_keys",
+                "INSERT INTO vts.permissions_cache_keys (role, resource) VALUES ('role_e', 'data')");
+
+        // update is not supported, however, this table has no regular columns, so it is not covered by tests
+    }
+
+    private void cachePermissionsForResource(RoleResource roleResource, IResource resource)
+    {
+        AuthenticatedUser role = new AuthenticatedUser(roleResource.getRoleName());
+        role.getPermissions(resource);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/RolesCacheKeysTableTest.java b/test/unit/org/apache/cassandra/db/virtual/RolesCacheKeysTableTest.java
new file mode 100644
index 0000000..40c3037
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/RolesCacheKeysTableTest.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.auth.Roles;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_C;
+
+public class RolesCacheKeysTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+
+    @SuppressWarnings("FieldCanBeLocal")
+    private RolesCacheKeysTable table;
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        // high value is used for convenient debugging
+        DatabaseDescriptor.setRolesValidity(20_000);
+
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_C, AuthTestUtils.getLoginRoleOptions());
+
+        AuthTestUtils.grantRolesTo(roleManager, ROLE_A, ROLE_C);
+        AuthTestUtils.grantRolesTo(roleManager, ROLE_B, ROLE_C);
+    }
+
+    @Before
+    public void config()
+    {
+        table = new RolesCacheKeysTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+
+        // ensure nothing keeps cached between tests
+        Roles.cache.invalidate();
+        disablePreparedReuseForTest();
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        DatabaseDescriptor.setRolesValidity(DatabaseDescriptor.getRawConfig().roles_validity.toMilliseconds());
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.roles_cache_keys"));
+    }
+
+    @Test
+    public void testSelectAllWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.roles_cache_keys"),
+                row("role_a"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreNotCached() throws Throwable
+    {
+        assertEmpty(execute("SELECT * FROM vts.roles_cache_keys WHERE role='role_a'"));
+    }
+
+    @Test
+    public void testSelectPartitionWhenPermissionsAreCached() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        assertRows(execute("SELECT * FROM vts.roles_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testDeletePartition() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("DELETE FROM vts.roles_cache_keys WHERE role='role_a'");
+
+        assertRows(execute("SELECT * FROM vts.roles_cache_keys"),
+                row("role_b"));
+    }
+
+    @Test
+    public void testDeletePartitionWithInvalidValues() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+
+        execute("DELETE FROM vts.roles_cache_keys WHERE role='invalid_role'");
+
+        assertRows(execute("SELECT * FROM vts.roles_cache_keys WHERE role='role_a'"),
+                row("role_a"));
+    }
+
+    @Test
+    public void testTruncateTable() throws Throwable
+    {
+        cachePermissions(ROLE_A);
+        cachePermissions(ROLE_B);
+
+        execute("TRUNCATE vts.roles_cache_keys");
+
+        assertEmpty(execute("SELECT * FROM vts.roles_cache_keys"));
+    }
+
+    @Test
+    public void testUnsupportedOperations() throws Throwable
+    {
+        // range tombstone is not supported, however, this table has no clustering columns, so it is not covered by tests
+
+        // column deletion is not supported, however, this table has no regular columns, so it is not covered by tests
+
+        // insert is not supported
+        assertInvalidMessage("Column modification is not supported by table vts.roles_cache_keys",
+                "INSERT INTO vts.roles_cache_keys (role) VALUES ('role_e')");
+
+        // update is not supported, however, this table has no regular columns, so it is not covered by tests
+    }
+
+    private void cachePermissions(RoleResource roleResource)
+    {
+        Roles.getRoleDetails(roleResource);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/SSTableTasksTableTest.java b/test/unit/org/apache/cassandra/db/virtual/SSTableTasksTableTest.java
new file mode 100644
index 0000000..f642a2f
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/SSTableTasksTableTest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.virtual;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.compaction.CompactionInfo;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.MockSchema;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+public class SSTableTasksTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+
+    @SuppressWarnings("FieldCanBeLocal")
+    private SSTableTasksTable table;
+
+    @BeforeClass
+    public static void setUpClass()
+    {
+        CQLTester.setUpClass();
+        CompactionManager.instance.disableAutoCompaction();
+    }
+
+    @Before
+    public void config()
+    {
+        table = new SSTableTasksTable(KS_NAME);
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+        disablePreparedReuseForTest();
+    }
+
+    @Test
+    public void testSelectAll() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk int, ck int, PRIMARY KEY (pk, ck))");
+        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
+
+        long bytesCompacted = 123;
+        long bytesTotal = 123456;
+        TimeUUID compactionId = nextTimeUUID();
+        List<SSTableReader> sstables = IntStream.range(0, 10)
+                .mapToObj(i -> MockSchema.sstable(i, i * 10L, i * 10L + 9, cfs))
+                .collect(Collectors.toList());
+        CompactionInfo.Holder compactionHolder = new CompactionInfo.Holder()
+        {
+            public CompactionInfo getCompactionInfo()
+            {
+                return new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, bytesCompacted, bytesTotal, compactionId, sstables);
+            }
+
+            public boolean isGlobal()
+            {
+                return false;
+            }
+        };
+
+        CompactionManager.instance.active.beginCompaction(compactionHolder);
+        UntypedResultSet result = execute("SELECT * FROM vts.sstable_tasks");
+        assertRows(result, row(CQLTester.KEYSPACE, currentTable(), compactionId, 1.0 * bytesCompacted / bytesTotal,
+                OperationType.COMPACTION.toString().toLowerCase(), bytesCompacted, sstables.size(),
+                bytesTotal, CompactionInfo.Unit.BYTES.toString()));
+
+        CompactionManager.instance.active.finishCompaction(compactionHolder);
+        result = execute("SELECT * FROM vts.sstable_tasks");
+        assertEmpty(result);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/virtual/SettingsTableTest.java b/test/unit/org/apache/cassandra/db/virtual/SettingsTableTest.java
index a2fda49..a5c69cd 100644
--- a/test/unit/org/apache/cassandra/db/virtual/SettingsTableTest.java
+++ b/test/unit/org/apache/cassandra/db/virtual/SettingsTableTest.java
@@ -18,10 +18,8 @@
 
 package org.apache.cassandra.db.virtual;
 
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 
 import com.google.common.collect.ImmutableList;
@@ -33,10 +31,12 @@
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.core.Row;
 import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DurationSpec;
 import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.security.SSLFactory;
+import org.yaml.snakeyaml.introspector.Property;
 
 public class SettingsTableTest extends CQLTester
 {
@@ -57,24 +57,14 @@
         config = new Config();
         config.client_encryption_options.applyConfig();
         config.server_encryption_options.applyConfig();
+        config.sstable_preemptive_open_interval = null;
+        config.index_summary_resize_interval = null;
+        config.cache_load_timeout = new DurationSpec.IntSecondsBound(0);
+        config.commitlog_sync_group_window = new DurationSpec.IntMillisecondsBound(0);
+        config.credentials_update_interval = null;
         table = new SettingsTable(KS_NAME, config);
         VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
-    }
-
-    private String getValue(Field f)
-    {
-        Object untypedValue = table.getValue(f);
-        String value = null;
-        if (untypedValue != null)
-        {
-            if (untypedValue.getClass().isArray())
-            {
-                value = Arrays.toString((Object[]) untypedValue);
-            }
-            else
-                value = untypedValue.toString();
-        }
-        return value;
+        disablePreparedReuseForTest();
     }
 
     @Test
@@ -87,26 +77,22 @@
         {
             i++;
             String name = r.getString("name");
-            Field f = SettingsTable.FIELDS.get(name);
-            if (f != null) // skip overrides
-                Assert.assertEquals(getValue(f), r.getString("value"));
+            Property prop = SettingsTable.PROPERTIES.get(name);
+            if (prop != null) // skip overrides
+                Assert.assertEquals(getValue(prop), r.getString("value"));
         }
-        Assert.assertTrue(SettingsTable.FIELDS.size() <= i);
+        Assert.assertTrue(SettingsTable.PROPERTIES.size() <= i);
     }
 
     @Test
     public void testSelectPartition() throws Throwable
     {
-        List<Field> fields = Arrays.stream(Config.class.getFields())
-                                   .filter(f -> !Modifier.isStatic(f.getModifiers()))
-                                   .collect(Collectors.toList());
-        for (Field f : fields)
+        for (Map.Entry<String, Property> e : SettingsTable.PROPERTIES.entrySet())
         {
-            if (table.overrides.containsKey(f.getName()))
-                continue;
-
-            String q = "SELECT * FROM vts.settings WHERE name = '"+f.getName()+'\'';
-            assertRowsNet(executeNet(q), new Object[] {f.getName(), getValue(f)});
+            String name = e.getKey();
+            Property prop = e.getValue();
+            String q = "SELECT * FROM vts.settings WHERE name = '"+name+'\'';
+            assertRowsNet(executeNet(q), new Object[] { name, getValue(prop) });
         }
     }
 
@@ -126,10 +112,59 @@
         assertRowsNet(executeNet(q));
     }
 
+    @Test
+    public void virtualTableBackwardCompatibility() throws Throwable
+    {
+        // test NEGATIVE_MEBIBYTES_DATA_STORAGE_INT converter
+        String q = "SELECT * FROM vts.settings WHERE name = 'sstable_preemptive_open_interval';";
+        assertRowsNet(executeNet(q), new Object[] {"sstable_preemptive_open_interval", null});
+        q = "SELECT * FROM vts.settings WHERE name = 'sstable_preemptive_open_interval_in_mb';";
+        assertRowsNet(executeNet(q), new Object[] {"sstable_preemptive_open_interval_in_mb", "-1"});
+
+        // test MINUTES_CUSTOM_DURATION converter
+        q = "SELECT * FROM vts.settings WHERE name = 'index_summary_resize_interval';";
+        assertRowsNet(executeNet(q), new Object[] {"index_summary_resize_interval", null});
+        q = "SELECT * FROM vts.settings WHERE name = 'index_summary_resize_interval_in_minutes';";
+        assertRowsNet(executeNet(q), new Object[] {"index_summary_resize_interval_in_minutes", "-1"});
+
+        // test NEGATIVE_SECONDS_DURATION converter
+        q = "SELECT * FROM vts.settings WHERE name = 'cache_load_timeout';";
+        assertRowsNet(executeNet(q), new Object[] {"cache_load_timeout", "0s"});
+        q = "SELECT * FROM vts.settings WHERE name = 'cache_load_timeout_seconds';";
+        assertRowsNet(executeNet(q), new Object[] {"cache_load_timeout_seconds", "0"});
+
+        // test MILLIS_DURATION_DOUBLE converter
+        q = "SELECT * FROM vts.settings WHERE name = 'commitlog_sync_group_window';";
+        assertRowsNet(executeNet(q), new Object[] {"commitlog_sync_group_window", "0ms"});
+        q = "SELECT * FROM vts.settings WHERE name = 'commitlog_sync_group_window_in_ms';";
+        assertRowsNet(executeNet(q), new Object[] {"commitlog_sync_group_window_in_ms", "0.0"});
+
+        //test MILLIS_CUSTOM_DURATION converter
+        q = "SELECT * FROM vts.settings WHERE name = 'credentials_update_interval';";
+        assertRowsNet(executeNet(q), new Object[] {"credentials_update_interval", null});
+        q = "SELECT * FROM vts.settings WHERE name = 'credentials_update_interval_in_ms';";
+        assertRowsNet(executeNet(q), new Object[] {"credentials_update_interval_in_ms", "-1"});
+    }
+
+    private String getValue(Property prop)
+    {
+        Object v = prop.get(config);
+        if (v != null)
+            return v.toString();
+        return null;
+    }
+
     private void check(String setting, String expected) throws Throwable
     {
         String q = "SELECT * FROM vts.settings WHERE name = '"+setting+'\'';
-        assertRowsNet(executeNet(q), new Object[] {setting, expected});
+        try
+        {
+            assertRowsNet(executeNet(q), new Object[] {setting, expected});
+        }
+        catch (AssertionError e)
+        {
+            throw new AssertionError(e.getMessage() + " for query " + q);
+        }
     }
 
     @Test
@@ -140,7 +175,8 @@
         String all = "SELECT * FROM vts.settings WHERE " +
                      "name > 'server_encryption' AND name < 'server_encryptionz' ALLOW FILTERING";
 
-        Assert.assertEquals(9, executeNet(all).all().size());
+        List<String> expectedNames = SettingsTable.PROPERTIES.keySet().stream().filter(n -> n.startsWith("server_encryption")).collect(Collectors.toList());
+        Assert.assertEquals(expectedNames.size(), executeNet(all).all().size());
 
         check(pre + "algorithm", null);
         config.server_encryption_options = config.server_encryption_options.withAlgorithm("SUPERSSL");
@@ -150,6 +186,7 @@
         config.server_encryption_options = config.server_encryption_options.withCipherSuites("c1", "c2");
         check(pre + "cipher_suites", "[c1, c2]");
 
+        // name doesn't match yaml
         check(pre + "protocol", null);
         config.server_encryption_options = config.server_encryption_options.withProtocol("TLSv5");
         check(pre + "protocol", "[TLSv5]");
@@ -169,10 +206,12 @@
         config.server_encryption_options = config.server_encryption_options.withOptional(true);
         check(pre + "optional", "true");
 
+        // name doesn't match yaml
         check(pre + "client_auth", "false");
         config.server_encryption_options = config.server_encryption_options.withRequireClientAuth(true);
         check(pre + "client_auth", "true");
 
+        // name doesn't match yaml
         check(pre + "endpoint_verification", "false");
         config.server_encryption_options = config.server_encryption_options.withRequireEndpointVerification(true);
         check(pre + "endpoint_verification", "true");
@@ -182,6 +221,7 @@
         check(pre + "internode_encryption", "all");
         check(pre + "enabled", "true");
 
+        // name doesn't match yaml
         check(pre + "legacy_ssl_storage_port", "false");
         config.server_encryption_options = config.server_encryption_options.withLegacySslStoragePort(true);
         check(pre + "legacy_ssl_storage_port", "true");
@@ -196,9 +236,11 @@
                      "name > 'audit_logging' AND name < 'audit_loggingz' ALLOW FILTERING";
 
         config.audit_logging_options.enabled = true;
-        Assert.assertEquals(9, executeNet(all).all().size());
+        List<String> expectedNames = SettingsTable.PROPERTIES.keySet().stream().filter(n -> n.startsWith("audit_logging")).collect(Collectors.toList());
+        Assert.assertEquals(expectedNames.size(), executeNet(all).all().size());
         check(pre + "enabled", "true");
 
+        // name doesn't match yaml
         check(pre + "logger", "BinAuditLogger");
         config.audit_logging_options.logger = new ParameterizedClass("logger", null);
         check(pre + "logger", "logger");
@@ -241,7 +283,8 @@
                      "name < 'transparent_data_encryption_optionsz' ALLOW FILTERING";
 
         config.transparent_data_encryption_options.enabled = true;
-        Assert.assertEquals(4, executeNet(all).all().size());
+        List<String> expectedNames = SettingsTable.PROPERTIES.keySet().stream().filter(n -> n.startsWith("transparent_data_encryption_options")).collect(Collectors.toList());
+        Assert.assertEquals(expectedNames.size(), executeNet(all).all().size());
         check(pre + "enabled", "true");
 
         check(pre + "cipher", "AES/CBC/PKCS5Padding");
diff --git a/test/unit/org/apache/cassandra/db/virtual/StreamingVirtualTableTest.java b/test/unit/org/apache/cassandra/db/virtual/StreamingVirtualTableTest.java
new file mode 100644
index 0000000..c8e3d89
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/virtual/StreamingVirtualTableTest.java
@@ -0,0 +1,272 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.ThreadLocalRandom;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.streaming.ProgressInfo.Direction;
+import org.apache.cassandra.streaming.SessionInfo;
+import org.apache.cassandra.streaming.StreamCoordinator;
+import org.apache.cassandra.streaming.StreamEvent;
+import org.apache.cassandra.streaming.StreamEvent.ProgressEvent;
+import org.apache.cassandra.streaming.StreamManager;
+import org.apache.cassandra.streaming.StreamOperation;
+import org.apache.cassandra.streaming.StreamResultFuture;
+import org.apache.cassandra.streaming.StreamSession;
+import org.apache.cassandra.streaming.StreamState;
+import org.apache.cassandra.streaming.StreamSummary;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.StreamingState;
+import org.apache.cassandra.utils.FBUtilities;
+import org.assertj.core.util.Throwables;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+public class StreamingVirtualTableTest extends CQLTester
+{
+    private static final String KS_NAME = "vts";
+    private static final InetAddressAndPort PEER1 = address(127, 0, 0, 1);
+    private static final InetAddressAndPort PEER2 = address(127, 0, 0, 2);
+    private static final InetAddressAndPort PEER3 = address(127, 0, 0, 3);
+    private static String TABLE_NAME;
+
+    @BeforeClass
+    public static void setup()
+    {
+        CQLTester.setUpClass();
+        StreamingVirtualTable table = new StreamingVirtualTable(KS_NAME);
+        TABLE_NAME = table.toString();
+        VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+    }
+
+    @Before
+    public void clearState()
+    {
+        StreamManager.instance.clearStates();
+    }
+
+    @Test
+    public void empty() throws Throwable
+    {
+        assertEmpty(execute(t("select * from %s")));
+    }
+
+    @Test
+    public void single() throws Throwable
+    {
+        StreamingState state = stream(true);
+        assertRows(execute(t("select id, follower, operation, peers, status, progress_percentage, last_updated_at, failure_cause, success_message from %s")),
+                   new Object[] { state.id(), true, "Repair", Collections.emptyList(), "init", 0F, new Date(state.lastUpdatedAtMillis()), null, null });
+
+        state.phase.start();
+        assertRows(execute(t("select id, follower, operation, peers, status, progress_percentage, last_updated_at, failure_cause, success_message from %s")),
+                   new Object[] { state.id(), true, "Repair", Collections.emptyList(), "start", 0F, new Date(state.lastUpdatedAtMillis()), null, null });
+
+        state.handleStreamEvent(new StreamEvent.SessionPreparedEvent(state.id(), new SessionInfo(PEER2, 1, PEER1, Collections.emptyList(), Collections.emptyList(), StreamSession.State.PREPARING), StreamSession.PrepareDirection.ACK));
+
+        state.onSuccess(new StreamState(state.id(), StreamOperation.REPAIR, ImmutableSet.of(new SessionInfo(PEER2, 1, PEER1, Collections.emptyList(), Collections.emptyList(), StreamSession.State.COMPLETE))));
+        assertRows(execute(t("select id, follower, operation, peers, status, progress_percentage, last_updated_at, failure_cause, success_message from %s")),
+                   new Object[] { state.id(), true, "Repair", Arrays.asList(address(127, 0, 0, 2).toString()), "success", 100F, new Date(state.lastUpdatedAtMillis()), null, null });
+    }
+
+    @Test
+    public void progressInitiator() throws Throwable
+    {
+        progress(false);
+    }
+
+    @Test
+    public void progressFollower() throws Throwable
+    {
+        progress(true);
+    }
+
+    public void progress(boolean follower) throws Throwable
+    {
+        StreamingState state = stream(follower);
+        StreamResultFuture future = state.future();
+        state.phase.start();
+
+        SessionInfo s1 = new SessionInfo(PEER2, 0, FBUtilities.getBroadcastAddressAndPort(), Arrays.asList(streamSummary()), Arrays.asList(streamSummary(), streamSummary()), StreamSession.State.PREPARING);
+        SessionInfo s2 = new SessionInfo(PEER3, 0, FBUtilities.getBroadcastAddressAndPort(), Arrays.asList(streamSummary()), Arrays.asList(streamSummary(), streamSummary()), StreamSession.State.PREPARING);
+
+        // we only update stats on ACK
+        state.handleStreamEvent(new StreamEvent.SessionPreparedEvent(state.id(), s1, StreamSession.PrepareDirection.ACK));
+        state.handleStreamEvent(new StreamEvent.SessionPreparedEvent(state.id(), s2, StreamSession.PrepareDirection.ACK));
+
+        long bytesToReceive = 0, bytesToSend = 0;
+        long filesToReceive = 0, filesToSend = 0;
+        for (SessionInfo s : Arrays.asList(s1, s2))
+        {
+            bytesToReceive += s.getTotalSizeToReceive();
+            bytesToSend += s.getTotalSizeToSend();
+            filesToReceive += s.getTotalFilesToReceive();
+            filesToSend += s.getTotalFilesToSend();
+        }
+        assertRows(execute(t("select id, follower, peers, status, progress_percentage, bytes_to_receive, bytes_received, bytes_to_send, bytes_sent, files_to_receive, files_received, files_to_send, files_sent from %s")),
+                   new Object[] { state.id(), follower, Arrays.asList(PEER2.toString(), PEER3.toString()), "start", 0F, bytesToReceive, 0L, bytesToSend, 0L, filesToReceive, 0L, filesToSend, 0L });
+
+        // update progress; sent all but 1 file
+        long bytesReceived = 0, bytesSent = 0;
+        long filesReceived = 0, filesSent = 0;
+        for (SessionInfo s : Arrays.asList(s1, s2))
+        {
+            List<StreamSummary> receiving = deterministic(s.receivingSummaries);
+            bytesReceived += progressEvent(state, s, receiving, Direction.IN);
+            filesReceived += receiving.stream().mapToInt(ss -> ss.files - 1).sum();
+
+            List<StreamSummary> sending = deterministic(s.sendingSummaries);
+            bytesSent += progressEvent(state, s, sending, Direction.OUT);
+            filesSent += sending.stream().mapToInt(ss -> ss.files - 1).sum();
+        }
+
+        assertRows(execute(t("select id, follower, peers, status, bytes_to_receive, bytes_received, bytes_to_send, bytes_sent, files_to_receive, files_received, files_to_send, files_sent from %s")),
+                   new Object[] { state.id(), follower, Arrays.asList(PEER2.toString(), PEER3.toString()), "start", bytesToReceive, bytesReceived, bytesToSend, bytesSent, filesToReceive, filesReceived, filesToSend, filesSent });
+
+        // finish
+        for (SessionInfo s : Arrays.asList(s1, s2))
+        {
+            // complete the rest
+            List<StreamSummary> receiving = deterministic(s.receivingSummaries);
+            bytesReceived += completeEvent(state, s, receiving, Direction.IN);
+            filesReceived += receiving.stream().mapToInt(ss -> ss.files - 1).sum();
+
+            List<StreamSummary> sending = deterministic(s.sendingSummaries);
+            bytesSent += completeEvent(state, s, sending, Direction.OUT);
+            filesSent += sending.stream().mapToInt(ss -> ss.files - 1).sum();
+        }
+
+        assertRows(execute(t("select id, follower, peers, status, progress_percentage, bytes_to_receive, bytes_received, bytes_to_send, bytes_sent, files_to_receive, files_received, files_to_send, files_sent from %s")),
+                   new Object[] { state.id(), follower, Arrays.asList(PEER2.toString(), PEER3.toString()), "start", 99F, bytesToReceive, bytesToReceive, bytesToSend, bytesToSend, filesToReceive, filesToReceive, filesToSend, filesToSend });
+
+        state.onSuccess(future.getCurrentState());
+        assertRows(execute(t("select id, follower, peers, status, progress_percentage, last_updated_at, failure_cause, success_message from %s")),
+                   new Object[] { state.id(), follower, Arrays.asList(PEER2.toString(), PEER3.toString()), "success", 100F, new Date(state.lastUpdatedAtMillis()), null, null });
+    }
+
+    private static long progressEvent(StreamingState state, SessionInfo s, List<StreamSummary> summaries, Direction direction)
+    {
+        long counter = 0;
+        for (StreamSummary summary : summaries)
+        {
+            long fileSize = summary.totalSize / summary.files;
+            for (int i = 0; i < summary.files - 1; i++)
+            {
+                String fileName = summary.tableId + "-" + direction.name().toLowerCase() + "-" + i;
+                state.handleStreamEvent(new ProgressEvent(state.id(), new ProgressInfo((InetAddressAndPort) s.peer, 0, fileName, direction, fileSize, fileSize, fileSize)));
+                counter += fileSize;
+            }
+        }
+        return counter;
+    }
+
+    private static long completeEvent(StreamingState state, SessionInfo s, List<StreamSummary> summaries, Direction direction)
+    {
+        long counter = 0;
+        for (StreamSummary summary : summaries)
+        {
+            long fileSize = summary.totalSize / summary.files;
+            String fileName = summary.tableId + "-" + direction.name().toLowerCase() + "-" + summary.files;
+            state.handleStreamEvent(new ProgressEvent(state.id(), new ProgressInfo((InetAddressAndPort) s.peer, 0, fileName, direction, fileSize, fileSize, fileSize)));
+            counter += fileSize;
+        }
+        return counter;
+    }
+
+    private List<StreamSummary> deterministic(Collection<StreamSummary> summaries)
+    {
+        // SessionInfo uses a ImmutableSet... so create a list
+        List<StreamSummary> list = new ArrayList<>(summaries);
+        // need to order so all calls with the same input return the same order
+        // if duplicates are found, the object order may be different but the contents will match
+        Collections.sort(list, Comparator.comparing((StreamSummary a) -> a.tableId.asUUID())
+                                         .thenComparingInt(a -> a.files)
+                                         .thenComparingLong(a -> a.totalSize));
+        return list;
+    }
+
+    private static StreamSummary streamSummary()
+    {
+        int files = ThreadLocalRandom.current().nextInt(2, 10);
+        return new StreamSummary(TableId.fromUUID(UUID.randomUUID()), files, files * 1024);
+    }
+
+    @Test
+    public void failed() throws Throwable
+    {
+        StreamingState state = stream(true);
+        RuntimeException t = new RuntimeException("You failed!");
+        state.onFailure(t);
+        assertRows(execute(t("select id, follower, peers, status, progress_percentage, last_updated_at, failure_cause, success_message from %s")),
+                   new Object[] { state.id(), true, Collections.emptyList(), "failure", 100F, new Date(state.lastUpdatedAtMillis()), Throwables.getStackTrace(t), null });
+    }
+
+    private static String t(String query)
+    {
+        return String.format(query, TABLE_NAME);
+    }
+
+    private static StreamingState stream(boolean follower)
+    {
+        StreamResultFuture future = new StreamResultFuture(nextTimeUUID(), StreamOperation.REPAIR, new StreamCoordinator(StreamOperation.REPAIR, 0, StreamingChannel.Factory.Global.streamingFactory(), follower, false, null, null) {
+            // initiator requires active sessions exist, else the future becomes success right away.
+            @Override
+            public synchronized boolean hasActiveSessions()
+            {
+                return true;
+            }
+        });
+        StreamingState state = new StreamingState(future);
+        if (follower) StreamManager.instance.putFollowerStream(future);
+        else StreamManager.instance.putInitiatorStream(future);
+        StreamManager.instance.putStreamingState(state);
+        future.addEventListener(state);
+        return state;
+    }
+
+    private static InetAddressAndPort address(int a, int b, int c, int d)
+    {
+        try
+        {
+            return InetAddressAndPort.getByAddress(new byte[] {(byte) a, (byte) b, (byte) c, (byte) d});
+        }
+        catch (UnknownHostException e)
+        {
+            throw new AssertionError(e);
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/db/virtual/SystemPropertiesTableTest.java b/test/unit/org/apache/cassandra/db/virtual/SystemPropertiesTableTest.java
index 2ec0683..5242d55 100644
--- a/test/unit/org/apache/cassandra/db/virtual/SystemPropertiesTableTest.java
+++ b/test/unit/org/apache/cassandra/db/virtual/SystemPropertiesTableTest.java
@@ -56,6 +56,7 @@
     {
         table = new SystemPropertiesTable(KS_NAME);
         VirtualKeyspaceRegistry.instance.register(new VirtualKeyspace(KS_NAME, ImmutableList.of(table)));
+        disablePreparedReuseForTest();
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/dht/BootStrapperTest.java b/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
index 05d42cf..395ff40 100644
--- a/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
+++ b/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
@@ -74,7 +74,7 @@
     public void testSourceTargetComputation() throws UnknownHostException
     {
         final int[] clusterSizes = new int[] { 1, 3, 5, 10, 100};
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             int replicationFactor = Keyspace.open(keyspaceName).getReplicationStrategy().getReplicationFactor().allReplicas;
             for (int clusterSize : clusterSizes)
diff --git a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
index bd6f3d4..c4e5db8 100644
--- a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
+++ b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
@@ -172,4 +172,4 @@
     {
         return new PartitionerDefinedOrder(this);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/dht/RangeFetchMapCalculatorTest.java b/test/unit/org/apache/cassandra/dht/RangeFetchMapCalculatorTest.java
index 8574786..bb02f40 100644
--- a/test/unit/org/apache/cassandra/dht/RangeFetchMapCalculatorTest.java
+++ b/test/unit/org/apache/cassandra/dht/RangeFetchMapCalculatorTest.java
@@ -25,7 +25,6 @@
 import java.util.Collections;
 import java.util.Map;
 
-import com.google.common.base.Predicate;
 import com.google.common.collect.Multimap;
 import org.apache.cassandra.locator.EndpointsByRange;
 import org.junit.Assert;
@@ -70,7 +69,7 @@
 
             private int getIPLastPart(InetAddressAndPort endpoint)
             {
-                String str = endpoint.address.toString();
+                String str = endpoint.getAddress().toString();
                 int index = str.lastIndexOf(".");
                 return Integer.parseInt(str.substring(index + 1).trim());
             }
diff --git a/test/unit/org/apache/cassandra/dht/RangeTest.java b/test/unit/org/apache/cassandra/dht/RangeTest.java
index 7cdb788..84ca124 100644
--- a/test/unit/org/apache/cassandra/dht/RangeTest.java
+++ b/test/unit/org/apache/cassandra/dht/RangeTest.java
@@ -29,7 +29,6 @@
 import com.google.common.base.Joiner;
 import com.google.common.collect.Sets;
 import org.apache.commons.lang3.StringUtils;
-import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
diff --git a/test/unit/org/apache/cassandra/dht/StreamStateStoreTest.java b/test/unit/org/apache/cassandra/dht/StreamStateStoreTest.java
index b18d249..d731385 100644
--- a/test/unit/org/apache/cassandra/dht/StreamStateStoreTest.java
+++ b/test/unit/org/apache/cassandra/dht/StreamStateStoreTest.java
@@ -26,13 +26,14 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.StreamEvent;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.net.MessagingService.current_version;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
@@ -55,7 +56,7 @@
         Range<Token> range = new Range<>(factory.fromString("0"), factory.fromString("100"));
 
         InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
-        StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, local, new DefaultConnectionFactory(), false, 0, null, PreviewKind.NONE);
+        StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, local, new NettyStreamingConnectionFactory(), null, current_version, false, 0, null, PreviewKind.NONE);
         session.addStreamRequest("keyspace1", RangesAtEndpoint.toDummyList(Collections.singleton(range)), RangesAtEndpoint.toDummyList(Collections.emptyList()), Collections.singleton("cf"));
 
         StreamStateStore store = new StreamStateStore();
@@ -76,7 +77,7 @@
 
         // add different range within the same keyspace
         Range<Token> range2 = new Range<>(factory.fromString("100"), factory.fromString("200"));
-        session = new StreamSession(StreamOperation.BOOTSTRAP, local, new DefaultConnectionFactory(), false, 0, null, PreviewKind.NONE);
+        session = new StreamSession(StreamOperation.BOOTSTRAP, local, new NettyStreamingConnectionFactory(), null, current_version,false, 0, null, PreviewKind.NONE);
         session.addStreamRequest("keyspace1", RangesAtEndpoint.toDummyList(Collections.singleton(range2)), RangesAtEndpoint.toDummyList(Collections.emptyList()), Collections.singleton("cf"));
         session.state(StreamSession.State.COMPLETE);
         store.handleStreamEvent(new StreamEvent.SessionCompleteEvent(session));
diff --git a/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorGenerationsTest.java b/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorGenerationsTest.java
new file mode 100644
index 0000000..bed078b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorGenerationsTest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.dht.tokenallocator;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.RandomPartitioner;
+import org.apache.cassandra.tools.Util;
+
+import static org.apache.cassandra.dht.tokenallocator.OfflineTokenAllocator.allocate;
+import static org.apache.cassandra.dht.tokenallocator.OfflineTokenAllocatorTestUtils.assertTokensAndNodeCount;
+import static org.apache.cassandra.dht.tokenallocator.OfflineTokenAllocatorTestUtils.makeRackCountArray;
+
+
+/**
+ * We break the testTokenGenerations test out as it runs long and pushes the rest of the test suite to timeout on both
+ * ci environments and local laptops.
+ */
+public class OfflineTokenAllocatorGenerationsTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(OfflineTokenAllocatorGenerationsTest.class);
+
+    @Before
+    public void setup()
+    {
+        Util.initDatabaseDescriptor();
+    }
+
+    // We run with a subset of even, odd, boundary, etc. combinations, however we can't afford to walk through every entry
+    // for each parameter we test as the tests end up taking too long and timing out.
+    private final int[] racks = { 1, 2, 3, 5, 6, 9, 10 };
+    private final int[] rfs = { 1, 2, 3, 5 };
+    private final int[] tokens = { 1, 2, 3, 5, 6, 9, 10, 13, 15, 16 };
+
+    /**
+     * Cycle through a matrix of valid ranges.
+     */
+    @Test
+    public void testTokenGenerations()
+    {
+        for (int numTokens : tokens)
+        {
+            for (int rf : rfs)
+            {
+                int nodeCount = 32;
+                for (int rack: racks)
+                {
+                    int[] nodeToRack = makeRackCountArray(nodeCount, rack);
+                    for (IPartitioner partitioner : new IPartitioner[] { Murmur3Partitioner.instance, RandomPartitioner.instance })
+                    {
+                        logger.info("Testing offline token allocator for numTokens={}, rf={}, racks={}, nodeToRack={}, partitioner={}",
+                                    numTokens, rf, rack, nodeToRack, partitioner);
+                        assertTokensAndNodeCount(numTokens, nodeCount, allocate(rf,
+                                                                                numTokens,
+                                                                                nodeToRack,
+                                                                                new OfflineTokenAllocatorTestUtils.SystemOutputImpl(rf, rack),
+                                                                                partitioner));
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTest.java b/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTest.java
index 832d13d..4b2e04d 100644
--- a/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTest.java
+++ b/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTest.java
@@ -18,37 +18,22 @@
 
 package org.apache.cassandra.dht.tokenallocator;
 
-import java.util.Collection;
 import java.util.List;
 
-import com.google.common.collect.Lists;
-
-import org.assertj.core.api.Assertions;
-
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.cassandra.dht.ByteOrderedPartitioner;
-import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
-import org.apache.cassandra.dht.RandomPartitioner;
-import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.tools.Util;
-import org.apache.cassandra.utils.OutputHandler;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
 import static org.apache.cassandra.dht.tokenallocator.OfflineTokenAllocator.allocate;
+import static org.apache.cassandra.dht.tokenallocator.OfflineTokenAllocatorTestUtils.FAIL_ON_WARN_OUTPUT;
+import static org.apache.cassandra.dht.tokenallocator.OfflineTokenAllocatorTestUtils.assertTokensAndNodeCount;
 
 public class OfflineTokenAllocatorTest
 {
-    private static final Logger logger = LoggerFactory.getLogger(OfflineTokenAllocatorTest.class);
-    private static final OutputHandler FAIL_ON_WARN_OUTPUT = new SystemOutputImpl();
-
     @Before
     public void setup()
     {
@@ -62,63 +47,6 @@
         Assert.assertEquals(3, nodes.size());
     }
 
-    /**
-     * Cycle through a matrix of valid ranges.
-     */
-    @Test
-    public void testTokenGenerations()
-    {
-        for (int numTokens = 1; numTokens <= 16 ; ++numTokens)
-        {
-            for (int rf = 1; rf <=5; ++rf)
-            {
-                int nodeCount = 32;
-                for (int racks = 1; racks <= 10; ++racks)
-                {
-                    int[] nodeToRack = makeRackCountArray(nodeCount, racks);
-                    for (IPartitioner partitioner : new IPartitioner[] { Murmur3Partitioner.instance, RandomPartitioner.instance })
-                    {
-                        logger.info("Testing offline token allocator for numTokens={}, rf={}, racks={}, nodeToRack={}, partitioner={}",
-                                    numTokens, rf, racks, nodeToRack, partitioner);
-                        assertTokensAndNodeCount(numTokens, nodeCount, allocate(rf,
-                                                                                numTokens,
-                                                                                nodeToRack,
-                                                                                new SystemOutputImpl(rf, racks),
-                                                                                partitioner));
-                    }
-                }
-            }
-        }
-    }
-
-    private void assertTokensAndNodeCount(int numTokens, int nodeCount, List<OfflineTokenAllocator.FakeNode> nodes)
-    {
-        assertEquals(nodeCount, nodes.size());
-        Collection<Token> allTokens = Lists.newArrayList();
-        for (OfflineTokenAllocator.FakeNode node : nodes)
-        {
-            Assertions.assertThat(node.tokens()).hasSize(numTokens);
-            Assertions.assertThat(allTokens).doesNotContainAnyElementsOf(node.tokens());
-            allTokens.addAll(node.tokens());
-        }
-    }
-
-    private static int[] makeRackCountArray(int nodes, int racks)
-    {
-        assert nodes > 0;
-        assert racks > 0;
-        // Distribute nodes among the racks in round-robin fashion in the order the user is supposed to start them.
-        int[] rackCounts = new int[racks];
-        int rack = 0;
-        for (int node = 0; node < nodes; node++)
-        {
-            rackCounts[rack]++;
-            if (++rack == racks)
-                rack = 0;
-        }
-        return rackCounts;
-    }
-
     @Test(expected = IllegalArgumentException.class)
     public void testTokenGenerator_more_rf_than_racks()
     {
@@ -180,43 +108,4 @@
                                                         FAIL_ON_WARN_OUTPUT,
                                                         Murmur3Partitioner.instance));
     }
-
-    private static class SystemOutputImpl extends OutputHandler.SystemOutput
-    {
-        private final int rf;
-        private final int racks;
-
-        private SystemOutputImpl()
-        {
-            super(true, true);
-            rf = racks = 1;
-        }
-
-        private SystemOutputImpl(int rf, int racks)
-        {
-            super(true, true);
-            this.rf = rf;
-            this.racks = racks;
-        }
-
-        @Override
-        public void warn(String msg)
-        {
-            // We can only guarantee that ownership stdev won't increase above the warn threshold for racks==1 or racks==rf
-            if (racks == 1 || racks == rf)
-                fail(msg);
-            else
-                super.warn(msg);
-        }
-
-        @Override
-        public void warn(String msg, Throwable th)
-        {
-            // We can only guarantee that ownership stdev won't increase above the warn threshold for racks==1 or racks==rf
-            if (racks == 1 || racks == rf)
-                fail(msg);
-            else
-                super.warn(msg, th);
-        }
-    }
 }
diff --git a/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTestUtils.java b/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTestUtils.java
new file mode 100644
index 0000000..e580461
--- /dev/null
+++ b/test/unit/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocatorTestUtils.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.dht.tokenallocator;
+
+import java.util.Collection;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.utils.OutputHandler;
+import org.assertj.core.api.Assertions;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class OfflineTokenAllocatorTestUtils
+{
+    static final OutputHandler FAIL_ON_WARN_OUTPUT = new SystemOutputImpl();
+
+    static void assertTokensAndNodeCount(int numTokens, int nodeCount, List<OfflineTokenAllocator.FakeNode> nodes)
+    {
+        assertEquals(nodeCount, nodes.size());
+        Collection<Token> allTokens = Lists.newArrayList();
+        for (OfflineTokenAllocator.FakeNode node : nodes)
+        {
+            Assertions.assertThat(node.tokens()).hasSize(numTokens);
+            Assertions.assertThat(allTokens).doesNotContainAnyElementsOf(node.tokens());
+            allTokens.addAll(node.tokens());
+        }
+    }
+
+    static int[] makeRackCountArray(int nodes, int racks)
+    {
+        assert nodes > 0;
+        assert racks > 0;
+        // Distribute nodes among the racks in round-robin fashion in the order the user is supposed to start them.
+        int[] rackCounts = new int[racks];
+        int rack = 0;
+        for (int node = 0; node < nodes; node++)
+        {
+            rackCounts[rack]++;
+            if (++rack == racks)
+                rack = 0;
+        }
+        return rackCounts;
+    }
+
+    static class SystemOutputImpl extends OutputHandler.SystemOutput
+    {
+        final int rf;
+        final int racks;
+
+        SystemOutputImpl()
+        {
+            super(true, true);
+            rf = racks = 1;
+        }
+
+        SystemOutputImpl(int rf, int racks)
+        {
+            super(true, true);
+            this.rf = rf;
+            this.racks = racks;
+        }
+
+        @Override
+        public void warn(String msg)
+        {
+            // We can only guarantee that ownership stdev won't increase above the warn threshold for racks==1 or racks==rf
+            if (racks == 1 || racks == rf)
+                fail(msg);
+            else
+                super.warn(msg);
+        }
+
+        @Override
+        public void warn(String msg, Throwable th)
+        {
+            // We can only guarantee that ownership stdev won't increase above the warn threshold for racks==1 or racks==rf
+            if (racks == 1 || racks == rf)
+                fail(msg);
+            else
+                super.warn(msg, th);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java b/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java
index 712f049..799484f 100644
--- a/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java
+++ b/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.fql;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.nio.file.Path;
 import java.util.ArrayList;
@@ -30,6 +29,7 @@
 
 import javax.annotation.Nullable;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
 import org.junit.After;
 import org.junit.BeforeClass;
@@ -136,42 +136,42 @@
     @Test(expected = IllegalArgumentException.class)
     public void testCanRead() throws Exception
     {
-        tempDir.toFile().setReadable(false);
+        new File(tempDir).trySetReadable(false);
         try
         {
             configureFQL();
         }
         finally
         {
-            tempDir.toFile().setReadable(true);
+            new File(tempDir).trySetReadable(true);
         }
     }
 
     @Test(expected = IllegalArgumentException.class)
     public void testCanWrite() throws Exception
     {
-        tempDir.toFile().setWritable(false);
+        new File(tempDir).trySetWritable(false);
         try
         {
             configureFQL();
         }
         finally
         {
-            tempDir.toFile().setWritable(true);
+            new File(tempDir).trySetWritable(true);
         }
     }
 
     @Test(expected = IllegalArgumentException.class)
     public void testCanExecute() throws Exception
     {
-        tempDir.toFile().setExecutable(false);
+        new File(tempDir).trySetExecutable(false);
         try
         {
             configureFQL();
         }
         finally
         {
-            tempDir.toFile().setExecutable(true);
+            new File(tempDir).trySetExecutable(true);
         }
     }
 
@@ -196,10 +196,10 @@
     public void testResetCleansPaths() throws Exception
     {
         configureFQL();
-        File tempA = File.createTempFile("foo", "bar", tempDir.toFile());
+        File tempA = FileUtils.createTempFile("foo", "bar", new File(tempDir));
         assertTrue(tempA.exists());
-        File tempB = File.createTempFile("foo", "bar", BinLogTest.tempDir().toFile());
-        FullQueryLogger.instance.reset(tempB.getParent());
+        File tempB = FileUtils.createTempFile("foo", "bar", new File(BinLogTest.tempDir()));
+        FullQueryLogger.instance.reset(tempB.parentPath());
         assertFalse(tempA.exists());
         assertFalse(tempB.exists());
     }
@@ -211,9 +211,9 @@
     public void testResetSamePath() throws Exception
     {
         configureFQL();
-        File tempA = File.createTempFile("foo", "bar", tempDir.toFile());
+        File tempA = FileUtils.createTempFile("foo", "bar", new File(tempDir));
         assertTrue(tempA.exists());
-        FullQueryLogger.instance.reset(tempA.getParent());
+        FullQueryLogger.instance.reset(tempA.parentPath());
         assertFalse(tempA.exists());
     }
 
@@ -227,10 +227,10 @@
     @Test
     public void testCleansDirectory() throws Exception
     {
-        assertTrue(new File(tempDir.toFile(), "foobar").createNewFile());
+        assertTrue(new File(tempDir, "foobar").createFileIfNotExists());
         configureFQL();
-        assertEquals(tempDir.toFile().listFiles().length, 1);
-        assertEquals("metadata.cq4t", tempDir.toFile().listFiles()[0].getName());
+        assertEquals(new File(tempDir).tryList().length, 1);
+        assertEquals("metadata.cq4t", new File(tempDir).tryList()[0].name());
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java b/test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java
index ea59300..3a07ea3 100644
--- a/test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java
+++ b/test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java
@@ -23,13 +23,21 @@
 
 import static org.junit.Assert.*;
 
+import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
 
 public class ArrivalWindowTest
 {
+    @BeforeClass
+    public static void beforeClass()
+    {
+        DatabaseDescriptor.setDefaultFailureDetector();
+    }
+
     @Test
     public void testWithNanoTime()
     {
diff --git a/test/unit/org/apache/cassandra/gms/EndpointStateTest.java b/test/unit/org/apache/cassandra/gms/EndpointStateTest.java
index 103653e..aa57d80 100644
--- a/test/unit/org/apache/cassandra/gms/EndpointStateTest.java
+++ b/test/unit/org/apache/cassandra/gms/EndpointStateTest.java
@@ -33,7 +33,6 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.dht.Token;
-import org.apache.cassandra.locator.InetAddressAndPort;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
diff --git a/test/unit/org/apache/cassandra/gms/GossiperTest.java b/test/unit/org/apache/cassandra/gms/GossiperTest.java
index a48b5fb..68841ea 100644
--- a/test/unit/org/apache/cassandra/gms/GossiperTest.java
+++ b/test/unit/org/apache/cassandra/gms/GossiperTest.java
@@ -47,6 +47,7 @@
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.CassandraVersion;
+import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -63,6 +64,8 @@
         CommitLog.instance.start();
     }
 
+    private static final CassandraVersion CURRENT_VERSION = new CassandraVersion(FBUtilities.getReleaseVersionString());
+
     static final IPartitioner partitioner = new RandomPartitioner();
     StorageService ss = StorageService.instance;
     TokenMetadata tmd = StorageService.instance.getTokenMetadata();
@@ -116,7 +119,7 @@
 
         VersionedValue.VersionedValueFactory factory = new VersionedValue.VersionedValueFactory(null);
         EndpointState es = new EndpointState((HeartBeatState) null);
-        es.addApplicationState(ApplicationState.RELEASE_VERSION, factory.releaseVersion(SystemKeyspace.CURRENT_VERSION.toString()));
+        es.addApplicationState(ApplicationState.RELEASE_VERSION, factory.releaseVersion(CURRENT_VERSION.toString()));
         Gossiper.instance.endpointStateMap.put(InetAddressAndPort.getByName("127.0.0.1"), es);
         Gossiper.instance.liveEndpoints.add(InetAddressAndPort.getByName("127.0.0.1"));
 
@@ -271,12 +274,12 @@
         {
             gossiper.seeds.add(addr);
             nextSeeds.add(addr);
-            addr = InetAddressAndPort.getByAddress(InetAddresses.increment(addr.address));
+            addr = InetAddressAndPort.getByAddress(InetAddresses.increment(addr.getAddress()));
         }
         Assert.assertEquals(nextSize, gossiper.seeds.size());
 
         // Add another unique address to the list
-        addr = InetAddressAndPort.getByAddress(InetAddresses.increment(addr.address));
+        addr = InetAddressAndPort.getByAddress(InetAddresses.increment(addr.getAddress()));
         nextSeeds.add(addr);
         nextSize++;
         DatabaseDescriptor.setSeedProvider(new TestSeedProvider(nextSeeds));
@@ -315,7 +318,7 @@
         for (int i = 0; i < disjointSize; i ++)
         {
             disjointSeeds.add(addr);
-            addr = InetAddressAndPort.getByAddress(InetAddresses.increment(addr.address));
+            addr = InetAddressAndPort.getByAddress(InetAddresses.increment(addr.getAddress()));
         }
         DatabaseDescriptor.setSeedProvider(new TestSeedProvider(disjointSeeds));
         loadedList = gossiper.reloadSeeds();
diff --git a/test/unit/org/apache/cassandra/gms/SerializationsTest.java b/test/unit/org/apache/cassandra/gms/SerializationsTest.java
index 90ce10b..0422ac0 100644
--- a/test/unit/org/apache/cassandra/gms/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/gms/SerializationsTest.java
@@ -22,8 +22,8 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
-import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
@@ -68,7 +68,7 @@
         if (EXECUTE_WRITES)
             testEndpointStateWrite();
 
-        DataInputStreamPlus in = getInput("gms.EndpointState.bin");
+        FileInputStreamPlus in = getInput("gms.EndpointState.bin");
         assert HeartBeatState.serializer.deserialize(in, getVersion()) != null;
         assert EndpointState.serializer.deserialize(in, getVersion()) != null;
         assert VersionedValue.serializer.deserialize(in, getVersion()) != null;
@@ -110,7 +110,7 @@
             testGossipDigestWrite();
 
         int count = 0;
-        DataInputStreamPlus in = getInput("gms.Gossip.bin");
+        FileInputStreamPlus in = getInput("gms.Gossip.bin");
         while (count < Statics.Digests.size())
             assert GossipDigestAck2.serializer.deserialize(in, getVersion()) != null;
         assert GossipDigestAck.serializer.deserialize(in, getVersion()) != null;
diff --git a/test/unit/org/apache/cassandra/hints/AlteredHints.java b/test/unit/org/apache/cassandra/hints/AlteredHints.java
index 9b8e32f..4bb6178 100644
--- a/test/unit/org/apache/cassandra/hints/AlteredHints.java
+++ b/test/unit/org/apache/cassandra/hints/AlteredHints.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -28,6 +27,7 @@
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 
@@ -37,7 +37,6 @@
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.RowUpdateBuilder;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.utils.UUIDGen;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 
@@ -81,11 +80,11 @@
         int bufferSize = HintsWriteExecutor.WRITE_BUFFER_SIZE;
         List<Hint> hints = new LinkedList<>();
 
-        UUID hostId = UUIDGen.getTimeUUID();
+        UUID hostId = UUID.randomUUID();
         long ts = System.currentTimeMillis();
 
         HintsDescriptor descriptor = new HintsDescriptor(hostId, ts, params());
-        File dir = Files.createTempDir();
+        File dir = new File(Files.createTempDir());
         try (HintsWriter writer = HintsWriter.create(dir, descriptor))
         {
             Assert.assertTrue(looksLegit(writer));
@@ -103,7 +102,7 @@
             }
         }
 
-        try (HintsReader reader = HintsReader.open(new File(dir, descriptor.fileName())))
+        try (HintsReader reader = HintsReader.open(descriptor.file(dir)))
         {
             Assert.assertTrue(looksLegit(reader.getInput()));
             List<Hint> deserialized = new ArrayList<>(hintNum);
diff --git a/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java b/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java
index 9f4cdfb..cc29163 100644
--- a/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java
+++ b/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java
@@ -17,13 +17,12 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
-import java.util.Arrays;
 import java.util.zip.CRC32;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -35,9 +34,10 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class ChecksummedDataInputTest
 {
@@ -104,12 +104,12 @@
             assertEquals(127, reader.read());
             byte[] bytes = new byte[b.length];
             reader.readFully(bytes);
-            assertTrue(Arrays.equals(bytes, b));
-            assertEquals(false, reader.readBoolean());
+            assertArrayEquals(bytes, b);
+            assertFalse(reader.readBoolean());
             assertEquals(10, reader.readByte());
             assertEquals('t', reader.readChar());
-            assertEquals(3.3, reader.readDouble());
-            assertEquals(2.2f, reader.readFloat());
+            assertEquals(3.3, reader.readDouble(), 0.0);
+            assertEquals(2.2f, reader.readFloat(), 0.0);
             assertEquals(42, reader.readInt());
             assertEquals(Long.MAX_VALUE, reader.readLong());
             assertEquals(Short.MIN_VALUE, reader.readShort());
@@ -176,14 +176,14 @@
 
             // assert that we read all the right values back
             assertEquals(127, reader.read());
-            assertEquals(false, reader.readBoolean());
+            assertFalse(reader.readBoolean());
             assertEquals(10, reader.readByte());
             assertEquals('t', reader.readChar());
             assertTrue(reader.checkCrc());
 
             reader.resetCrc();
-            assertEquals(3.3, reader.readDouble());
-            assertEquals(2.2f, reader.readFloat());
+            assertEquals(3.3, reader.readDouble(), 0.0);
+            assertEquals(2.2f, reader.readFloat(), 0.0);
             assertEquals(42, reader.readInt());
             assertTrue(reader.checkCrc());
             assertTrue(reader.isEOF());
@@ -232,7 +232,7 @@
 
             // assert that we read all the right values back
             assertEquals(127, reader.read());
-            assertEquals(false, reader.readBoolean());
+            assertFalse(reader.readBoolean());
             assertEquals(10, reader.readByte());
             assertEquals('t', reader.readChar());
             assertFalse(reader.checkCrc());
diff --git a/test/unit/org/apache/cassandra/hints/DTestSerializer.java b/test/unit/org/apache/cassandra/hints/DTestSerializer.java
index 1e308dc..61bd77c 100644
--- a/test/unit/org/apache/cassandra/hints/DTestSerializer.java
+++ b/test/unit/org/apache/cassandra/hints/DTestSerializer.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.UUID;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Ints;
 
 import org.apache.cassandra.db.TypeSizes;
diff --git a/test/unit/org/apache/cassandra/hints/HintServiceBytemanTest.java b/test/unit/org/apache/cassandra/hints/HintServiceBytemanTest.java
new file mode 100644
index 0000000..b7f431d
--- /dev/null
+++ b/test/unit/org/apache/cassandra/hints/HintServiceBytemanTest.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.hints;
+
+import java.time.Duration;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.MockMessagingService;
+import org.apache.cassandra.net.MockMessagingSpy;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+import org.awaitility.Awaitility;
+import org.jboss.byteman.contrib.bmunit.BMRule;
+import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
+
+import static org.apache.cassandra.hints.HintsTestUtil.MockFailureDetector;
+import static org.apache.cassandra.hints.HintsTestUtil.sendHintsAndResponses;
+import static org.junit.Assert.assertEquals;
+
+@RunWith(BMUnitRunner.class)
+public class HintServiceBytemanTest
+{
+    private static final String KEYSPACE = "hints_service_test";
+    private static final String TABLE = "table";
+
+    private final MockFailureDetector failureDetector = new HintsTestUtil.MockFailureDetector();
+    private static TableMetadata metadata;
+
+    @BeforeClass
+    public static void defineSchema()
+    {
+        SchemaLoader.prepareServer();
+        StorageService.instance.initServer();
+        SchemaLoader.createKeyspace(KEYSPACE,
+                                    KeyspaceParams.simple(1),
+                                    SchemaLoader.standardCFMD(KEYSPACE, TABLE));
+        metadata = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+    }
+
+    @After
+    public void cleanup()
+    {
+        MockMessagingService.cleanup();
+    }
+
+    @Before
+    public void reinstanciateService() throws Throwable
+    {
+        MessagingService.instance().inboundSink.clear();
+        MessagingService.instance().outboundSink.clear();
+
+        if (!HintsService.instance.isShutDown())
+        {
+            HintsService.instance.shutdownBlocking();
+            HintsService.instance.deleteAllHints();
+        }
+
+        failureDetector.isAlive = true;
+
+        HintsService.instance = new HintsService(failureDetector);
+
+        HintsService.instance.startDispatch();
+    }
+
+    @Test
+    @BMRule(name = "Delay delivering hints",
+    targetClass = "DispatchHintsTask",
+    targetMethod = "run",
+    action = "Thread.sleep(DatabaseDescriptor.getHintsFlushPeriodInMS() * 3L)")
+    public void testListPendingHints() throws InterruptedException, ExecutionException
+    {
+        HintsService.instance.resumeDispatch();
+        MockMessagingSpy spy = sendHintsAndResponses(metadata, 20000, -1);
+        Awaitility.await("For the hints file to flush")
+                  .atMost(Duration.ofMillis(DatabaseDescriptor.getHintsFlushPeriodInMS() * 2L))
+                  .until(() -> !HintsService.instance.getPendingHints().isEmpty());
+
+        List<PendingHintsInfo> pendingHints = HintsService.instance.getPendingHintsInfo();
+        assertEquals(1, pendingHints.size());
+        PendingHintsInfo info = pendingHints.get(0);
+        assertEquals(StorageService.instance.getLocalHostUUID(), info.hostId);
+        assertEquals(1, info.totalFiles);
+        assertEquals(info.oldestTimestamp, info.newestTimestamp); // there is 1 descriptor with only 1 timestamp
+
+        spy.interceptMessageOut(20000).get();
+        assertEquals(Collections.emptyList(), HintsService.instance.getPendingHints());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/hints/HintTest.java b/test/unit/org/apache/cassandra/hints/HintTest.java
index e3e26d0..c762973 100644
--- a/test/unit/org/apache/cassandra/hints/HintTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintTest.java
@@ -18,8 +18,8 @@
 package org.apache.cassandra.hints;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.ImmutableList;
 
@@ -46,13 +46,14 @@
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static junit.framework.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 
 import static org.apache.cassandra.Util.dk;
 import static org.apache.cassandra.hints.HintsTestUtil.assertHintsEqual;
@@ -87,7 +88,7 @@
         tokenMeta.updateNormalTokens(BootStrapper.getRandomTokens(tokenMeta, 1), local);
 
         for (TableMetadata table : Schema.instance.getTablesAndViews(KEYSPACE))
-            MigrationManager.announceTableUpdate(table.unbuild().gcGraceSeconds(864000).build(), true);
+            SchemaTestUtil.announceTableUpdate(table.unbuild().gcGraceSeconds(864000).build());
     }
 
     @Test
@@ -177,7 +178,7 @@
                   .unbuild()
                   .gcGraceSeconds(0)
                   .build();
-        MigrationManager.announceTableUpdate(updated, true);
+        SchemaTestUtil.announceTableUpdate(updated);
 
         Mutation mutation = createMutation(key, now);
         Hint.create(mutation, now / 1000).apply();
@@ -206,7 +207,7 @@
                   .unbuild()
                   .gcGraceSeconds(0)
                   .build();
-        MigrationManager.announceTableUpdate(updated, true);
+        SchemaTestUtil.announceTableUpdate(updated);
 
         Mutation mutation = createMutation(key, now);
         Hint hint = Hint.create(mutation, now / 1000);
@@ -306,6 +307,26 @@
         }
     }
 
+    @Test
+    public void testCalculateHintExpiration()
+    {
+        // create a hint with gcgs
+        long now = FBUtilities.timestampMicros();
+        long nowInMillis = TimeUnit.MICROSECONDS.toMillis(now);
+        int gcgs = 10; // It is less than the default mutation gcgs
+        String key = "testExpiration";
+        Mutation mutation = createMutation(key, now);
+        // create a hint with explicit small gcgs
+        Hint hint = Hint.create(mutation, nowInMillis, gcgs);
+        assertEquals(nowInMillis + TimeUnit.SECONDS.toMillis(gcgs),
+                     hint.expirationInMillis());
+
+        // create a hint with mutation's gcgs.
+        hint = Hint.create(mutation, nowInMillis);
+        assertEquals(nowInMillis + TimeUnit.SECONDS.toMillis(mutation.smallestGCGS()),
+                     hint.expirationInMillis());
+    }
+
     private static Mutation createMutation(String key, long now)
     {
         Mutation.SimpleBuilder builder = Mutation.simpleBuilder(KEYSPACE, dk(key));
diff --git a/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java b/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java
index 21dbd7e..e24ff76 100644
--- a/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java
@@ -18,15 +18,16 @@
 
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -42,7 +43,6 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
 
 public class HintWriteTTLTest
 {
@@ -94,8 +94,8 @@
         ttldHint = makeHint(tbm, 2, nowInSeconds - (TTL + 1), GC_GRACE);
 
 
-        File directory = Files.createTempDirectory(null).toFile();
-        HintsDescriptor descriptor = new HintsDescriptor(UUIDGen.getTimeUUID(), s2m(nowInSeconds));
+        File directory = new File(Files.createTempDirectory(null));
+        HintsDescriptor descriptor = new HintsDescriptor(UUID.randomUUID(), s2m(nowInSeconds));
 
         try (HintsWriter writer = HintsWriter.create(directory, descriptor);
              HintsWriter.Session session = writer.newSession(ByteBuffer.allocate(1024)))
diff --git a/test/unit/org/apache/cassandra/hints/HintsBufferPoolTest.java b/test/unit/org/apache/cassandra/hints/HintsBufferPoolTest.java
index 1374d80..d658c88 100644
--- a/test/unit/org/apache/cassandra/hints/HintsBufferPoolTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsBufferPoolTest.java
@@ -26,12 +26,12 @@
 
 import com.google.common.collect.ImmutableList;
 
-import static junit.framework.Assert.*;
-
 import java.util.Queue;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentLinkedQueue;
 
+import static org.junit.Assert.assertTrue;
+
 @RunWith(BMUnitRunner.class)
 public class HintsBufferPoolTest
 {
diff --git a/test/unit/org/apache/cassandra/hints/HintsBufferTest.java b/test/unit/org/apache/cassandra/hints/HintsBufferTest.java
index 0dcf747..42daebf 100644
--- a/test/unit/org/apache/cassandra/hints/HintsBufferTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsBufferTest.java
@@ -26,6 +26,7 @@
 import com.google.common.collect.Iterables;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
@@ -40,12 +41,20 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.utils.Clock;
+import org.jboss.byteman.contrib.bmunit.BMRule;
+import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
 
-import static junit.framework.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 import static org.apache.cassandra.utils.FBUtilities.updateChecksum;
 
+@RunWith(BMUnitRunner.class)
 public class HintsBufferTest
 {
     private static final String KEYSPACE = "hints_buffer_test";
@@ -116,7 +125,7 @@
         // create HINT_THREADS_COUNT, start them, and wait for them to finish
         List<Thread> threads = new ArrayList<>(HINT_THREADS_COUNT);
         for (int i = 0; i < HINT_THREADS_COUNT; i ++)
-            threads.add(NamedThreadFactory.createThread(new Writer(buffer, load, hintSize, i, baseTimestamp)));
+            threads.add(NamedThreadFactory.createAnonymousThread(new Writer(buffer, load, hintSize, i, baseTimestamp)));
         threads.forEach(java.lang.Thread::start);
         for (Thread thread : threads)
             thread.join();
@@ -157,6 +166,48 @@
         buffer.free();
     }
 
+    static volatile long timestampForHint = 0;
+    // BM rule to get the timestamp that was used to store the hint so that we avoid any flakiness in timestamps between
+    // when we send the hint and when it actually got written.
+    @Test
+    @BMRule(name = "GetHintTS",
+            targetClass="HintsBuffer$Allocation",
+            targetMethod="write(Iterable, Hint)",
+            targetLocation="AFTER INVOKE putIfAbsent",
+            action="org.apache.cassandra.hints.HintsBufferTest.timestampForHint = $ts")
+    public void testEarliestHintTime()
+    {
+        int hintSize = (int) Hint.serializer.serializedSize(createHint(0, Clock.Global.currentTimeMillis()), MessagingService.current_version);
+        int entrySize = hintSize + HintsBuffer.ENTRY_OVERHEAD_SIZE;
+        // allocate a slab to fit 10 hints
+        int slabSize = entrySize * 10;
+
+        // use a fixed timestamp base for all mutation timestamps
+        long baseTimestamp = Clock.Global.currentTimeMillis();
+
+        HintsBuffer buffer = HintsBuffer.create(slabSize);
+        UUID uuid = UUID.randomUUID();
+        // Track the first hints time
+        try (HintsBuffer.Allocation allocation = buffer.allocate(hintSize))
+        {
+            Hint hint = createHint(100, baseTimestamp);
+            allocation.write(Collections.singleton(uuid), hint);
+        }
+        long oldestHintTime = timestampForHint;
+
+        // Write some more hints to ensure we actually test getting the earliest
+        for (int i = 0; i < 9; i++)
+        {
+            try (HintsBuffer.Allocation allocation = buffer.allocate(hintSize))
+            {
+                Hint hint = createHint(i, baseTimestamp);
+                allocation.write(Collections.singleton(uuid), hint);
+            }
+        }
+        long earliest = buffer.getEarliestHintTime(uuid);
+        assertEquals(oldestHintTime, earliest);
+    }
+
     private static int validateEntry(UUID hostId, ByteBuffer buffer, long baseTimestamp, UUID[] load) throws IOException
     {
         CRC32 crc = new CRC32();
diff --git a/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java b/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java
index 92cfc71..af73d1b 100644
--- a/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java
@@ -17,22 +17,29 @@
  */
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.nio.file.Files;
 import java.util.*;
 
 import com.google.common.collect.ImmutableMap;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.utils.Clock;
 import org.apache.cassandra.utils.FBUtilities;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
-import static junit.framework.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
 import static org.apache.cassandra.Util.dk;
 
 public class HintsCatalogTest
@@ -54,18 +61,14 @@
                 SchemaLoader.standardCFMD(KEYSPACE, TABLE2));
     }
 
+    @Rule
+    public TemporaryFolder testFolder = new TemporaryFolder();
+
     @Test
     public void loadCompletenessAndOrderTest() throws IOException
     {
-        File directory = Files.createTempDirectory(null).toFile();
-        try
-        {
-            loadCompletenessAndOrderTest(directory);
-        }
-        finally
-        {
-            directory.deleteOnExit();
-        }
+        File directory = new File(testFolder.newFolder());
+        loadCompletenessAndOrderTest(directory);
     }
 
     private void loadCompletenessAndOrderTest(File directory) throws IOException
@@ -73,10 +76,10 @@
         UUID hostId1 = UUID.randomUUID();
         UUID hostId2 = UUID.randomUUID();
 
-        long timestamp1 = System.currentTimeMillis();
-        long timestamp2 = System.currentTimeMillis() + 1;
-        long timestamp3 = System.currentTimeMillis() + 2;
-        long timestamp4 = System.currentTimeMillis() + 3;
+        long timestamp1 = Clock.Global.currentTimeMillis();
+        long timestamp2 = Clock.Global.currentTimeMillis() + 1;
+        long timestamp3 = Clock.Global.currentTimeMillis() + 2;
+        long timestamp4 = Clock.Global.currentTimeMillis() + 3;
 
         HintsDescriptor descriptor1 = new HintsDescriptor(hostId1, timestamp1);
         HintsDescriptor descriptor2 = new HintsDescriptor(hostId2, timestamp3);
@@ -107,10 +110,10 @@
     @Test
     public void deleteHintsTest() throws IOException
     {
-        File directory = Files.createTempDirectory(null).toFile();
+        File directory = new File(testFolder.newFolder());
         UUID hostId1 = UUID.randomUUID();
         UUID hostId2 = UUID.randomUUID();
-        long now = System.currentTimeMillis();
+        long now = Clock.Global.currentTimeMillis();
         writeDescriptor(directory, new HintsDescriptor(hostId1, now));
         writeDescriptor(directory, new HintsDescriptor(hostId1, now + 1));
         writeDescriptor(directory, new HintsDescriptor(hostId2, now + 2));
@@ -138,14 +141,27 @@
     @Test
     public void exciseHintFiles() throws IOException
     {
-        File directory = Files.createTempDirectory(null).toFile();
-        try
+        File directory = new File(testFolder.newFolder());
+        exciseHintFiles(directory);
+    }
+
+    @Test
+    public void hintsTotalSizeTest() throws IOException
+    {
+        File directory = new File(testFolder.newFolder());
+        UUID hostId = UUID.randomUUID();
+        long now = Clock.Global.currentTimeMillis();
+        long totalSize = 0;
+        HintsCatalog catalog = HintsCatalog.load(directory, ImmutableMap.of());
+        HintsStore store = catalog.get(hostId);
+        assertEquals(totalSize, store.getTotalFileSize());
+        for (int i = 0; i < 3; i++)
         {
-            exciseHintFiles(directory);
-        }
-        finally
-        {
-            directory.deleteOnExit();
+            HintsDescriptor descriptor = new HintsDescriptor(hostId, now + i);
+            writeDescriptor(directory, descriptor);
+            store.offerLast(descriptor);
+            assertTrue("Total file size should increase after writing more hints", store.getTotalFileSize() > totalSize);
+            totalSize = store.getTotalFileSize();
         }
     }
 
@@ -153,10 +169,10 @@
     {
         UUID hostId = UUID.randomUUID();
 
-        HintsDescriptor descriptor1 = new HintsDescriptor(hostId, System.currentTimeMillis());
-        HintsDescriptor descriptor2 = new HintsDescriptor(hostId, System.currentTimeMillis() + 1);
-        HintsDescriptor descriptor3 = new HintsDescriptor(hostId, System.currentTimeMillis() + 2);
-        HintsDescriptor descriptor4 = new HintsDescriptor(hostId, System.currentTimeMillis() + 3);
+        HintsDescriptor descriptor1 = new HintsDescriptor(hostId, Clock.Global.currentTimeMillis());
+        HintsDescriptor descriptor2 = new HintsDescriptor(hostId, Clock.Global.currentTimeMillis() + 1);
+        HintsDescriptor descriptor3 = new HintsDescriptor(hostId, Clock.Global.currentTimeMillis() + 2);
+        HintsDescriptor descriptor4 = new HintsDescriptor(hostId, Clock.Global.currentTimeMillis() + 3);
 
         createHintFile(directory, descriptor1);
         createHintFile(directory, descriptor2);
diff --git a/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java b/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java
index 2fad733..596727f 100644
--- a/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.hints;
 
 import java.io.DataInput;
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -27,15 +26,16 @@
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.io.ByteStreams;
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.io.compress.LZ4Compressor;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.net.MessagingService;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotSame;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.fail;
 import static org.assertj.core.api.Assertions.assertThat;
 
 public class HintsDescriptorTest
@@ -104,18 +104,18 @@
         ImmutableMap<String, Object> parameters = ImmutableMap.of();
         HintsDescriptor expected = new HintsDescriptor(hostId, version, timestamp, parameters);
 
-        Path directory = Files.createTempDirectory("hints");
+        File directory = new File(Files.createTempDirectory("hints"));
         try
         {
-            try (HintsWriter ignored = HintsWriter.create(directory.toFile(), expected))
+            try (HintsWriter ignored = HintsWriter.create(directory, expected))
             {
             }
-            HintsDescriptor actual = HintsDescriptor.readFromFile(directory.resolve(expected.fileName()));
+            HintsDescriptor actual = HintsDescriptor.readFromFile(expected.file(directory));
             assertEquals(expected, actual);
         }
         finally
         {
-            directory.toFile().deleteOnExit();
+            directory.deleteOnExit();
         }
     }
 
@@ -146,7 +146,7 @@
         HintsDescriptor.handleDescriptorIOE(new IOException("test"), p);
         File newFile = new File(p.getParent().toFile(), p.getFileName().toString().replace(".hints", ".corrupt.hints"));
         assertThat(p).doesNotExist();
-        assertThat(newFile).exists();
+        assertThat(newFile.exists());
         newFile.deleteOnExit();
     }
 
diff --git a/test/unit/org/apache/cassandra/hints/HintsReaderTest.java b/test/unit/org/apache/cassandra/hints/HintsReaderTest.java
index f05d4ce..3e3c649 100644
--- a/test/unit/org/apache/cassandra/hints/HintsReaderTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsReaderTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.hints;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
@@ -29,6 +28,7 @@
 import java.util.function.Function;
 
 import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -43,12 +43,12 @@
 import org.apache.cassandra.io.util.DataInputBuffer;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.apache.cassandra.Util.dk;
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 
@@ -108,7 +108,7 @@
     {
         long baseTimestamp = descriptor.timestamp;
         int index = 0;
-        try (HintsReader reader = HintsReader.open(new File(directory, descriptor.fileName())))
+        try (HintsReader reader = HintsReader.open(descriptor.file(directory)))
         {
             for (HintsReader.Page page : reader)
             {
@@ -196,7 +196,7 @@
                                     SchemaLoader.standardCFMD(ks, CF_STANDARD1),
                                     SchemaLoader.standardCFMD(ks, CF_STANDARD2));
         int numTable = 2;
-        directory = Files.createTempDirectory(null).toFile();
+        directory = new File(Files.createTempDirectory(null));
         try
         {
             generateHints(3, ks);
@@ -206,7 +206,7 @@
         }
         finally
         {
-            directory.delete();
+            directory.deleteRecursive();
         }
     }
 
@@ -219,7 +219,7 @@
                                     SchemaLoader.standardCFMD(ks, CF_STANDARD1),
                                     SchemaLoader.standardCFMD(ks, CF_STANDARD2));
         int numTable = 2;
-        directory = Files.createTempDirectory(null).toFile();
+        directory = new File(Files.createTempDirectory(null));
         try
         {
             generateHints(3, ks);
@@ -227,7 +227,7 @@
         }
         finally
         {
-            directory.delete();
+            directory.tryDelete();
         }
     }
 
@@ -240,16 +240,16 @@
                                     SchemaLoader.standardCFMD(ks, CF_STANDARD1),
                                     SchemaLoader.standardCFMD(ks, CF_STANDARD2));
 
-        directory = Files.createTempDirectory(null).toFile();
+        directory = new File(Files.createTempDirectory(null));
         try
         {
             generateHints(3, ks);
-            MigrationManager.announceTableDrop(ks, CF_STANDARD1, true);
+            SchemaTestUtil.announceTableDrop(ks, CF_STANDARD1);
             readHints(3, 1);
         }
         finally
         {
-            directory.delete();
+            directory.tryDelete();
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/hints/HintsServiceTest.java b/test/unit/org/apache/cassandra/hints/HintsServiceTest.java
index dddf336..10a7040 100644
--- a/test/unit/org/apache/cassandra/hints/HintsServiceTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsServiceTest.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.hints;
 
+import java.util.Collections;
 import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
@@ -30,38 +31,38 @@
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 
 import com.datastax.driver.core.utils.MoreFutures;
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.NoPayload;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.gms.IFailureDetectionEventListener;
-import org.apache.cassandra.gms.IFailureDetector;
 import org.apache.cassandra.metrics.StorageMetrics;
-import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.MockMessagingService;
 import org.apache.cassandra.net.MockMessagingSpy;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.StorageService;
+import org.jboss.byteman.contrib.bmunit.BMRule;
+import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
 
-import static org.apache.cassandra.Util.dk;
-import static org.apache.cassandra.net.Verb.HINT_REQ;
-import static org.apache.cassandra.net.Verb.HINT_RSP;
-import static org.apache.cassandra.net.MockMessagingService.verb;
+import static org.apache.cassandra.hints.HintsTestUtil.MockFailureDetector;
+import static org.apache.cassandra.hints.HintsTestUtil.sendHintsAndResponses;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
+@RunWith(BMUnitRunner.class)
 public class HintsServiceTest
 {
     private static final String KEYSPACE = "hints_service_test";
     private static final String TABLE = "table";
 
     private final MockFailureDetector failureDetector = new MockFailureDetector();
+    private static TableMetadata metadata;
 
     @BeforeClass
     public static void defineSchema()
@@ -71,6 +72,7 @@
         SchemaLoader.createKeyspace(KEYSPACE,
                 KeyspaceParams.simple(1),
                 SchemaLoader.standardCFMD(KEYSPACE, TABLE));
+        metadata = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
     }
 
     @After
@@ -104,7 +106,7 @@
         long cnt = StorageMetrics.totalHints.getCount();
 
         // create spy for hint messages
-        MockMessagingSpy spy = sendHintsAndResponses(100, -1);
+        MockMessagingSpy spy = sendHintsAndResponses(metadata, 100, -1);
 
         // metrics should have been updated with number of create hints
         assertEquals(cnt + 100, StorageMetrics.totalHints.getCount());
@@ -120,7 +122,7 @@
         HintsService.instance.pauseDispatch();
 
         // create spy for hint messages
-        MockMessagingSpy spy = sendHintsAndResponses(100, -1);
+        MockMessagingSpy spy = sendHintsAndResponses(metadata, 100, -1);
 
         // we should not send any hints while paused
         ListenableFuture<Boolean> noMessagesWhilePaused = spy.interceptNoMsg(15, TimeUnit.SECONDS);
@@ -143,7 +145,7 @@
     public void testPageRetry() throws InterruptedException, ExecutionException, TimeoutException
     {
         // create spy for hint messages, but only create responses for 5 hints
-        MockMessagingSpy spy = sendHintsAndResponses(20, 5);
+        MockMessagingSpy spy = sendHintsAndResponses(metadata, 20, 5);
 
         Futures.allAsList(
                 // the dispatcher will always send all hints within the current page
@@ -164,7 +166,7 @@
     public void testPageSeek() throws InterruptedException, ExecutionException
     {
         // create spy for hint messages, stop replying after 12k (should be on 3rd page)
-        MockMessagingSpy spy = sendHintsAndResponses(20000, 12000);
+        MockMessagingSpy spy = sendHintsAndResponses(metadata, 20000, 12000);
 
         // At this point the dispatcher will constantly retry the page we stopped acking,
         // thus we receive the same hints from the page multiple times and in total more than
@@ -182,73 +184,41 @@
         assertTrue(((ChecksummedDataInput.Position) dispatchOffset).sourcePosition > 0);
     }
 
-    private MockMessagingSpy sendHintsAndResponses(int noOfHints, int noOfResponses)
+    // BM rule to get the timestamp that was used to store the hint so that we avoid any flakiness in timestamps between
+    // when we send the hint and when it actually got written.
+    static volatile long timestampForHint = 0L;
+    @Test
+    @BMRule(name = "GetHintTS",
+    targetClass="HintsBuffer$Allocation",
+    targetMethod="write(Iterable, Hint)",
+    targetLocation="AFTER INVOKE putIfAbsent",
+    action="org.apache.cassandra.hints.HintsServiceTest.timestampForHint = $ts")
+    public void testEarliestHint() throws InterruptedException
     {
-        // create spy for hint messages, but only create responses for noOfResponses hints
-        Message<NoPayload> message = Message.internalResponse(HINT_RSP, NoPayload.noPayload);
-
-        MockMessagingSpy spy;
-        if (noOfResponses != -1)
-        {
-            spy = MockMessagingService.when(verb(HINT_REQ)).respondN(message, noOfResponses);
-        }
-        else
-        {
-            spy = MockMessagingService.when(verb(HINT_REQ)).respond(message);
-        }
-
         // create and write noOfHints using service
         UUID hostId = StorageService.instance.getLocalHostUUID();
-        for (int i = 0; i < noOfHints; i++)
-        {
-            long now = System.currentTimeMillis();
-            DecoratedKey dkey = dk(String.valueOf(i));
-            TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
-            PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(metadata, dkey).timestamp(now);
-            builder.row("column0").add("val", "value0");
-            Hint hint = Hint.create(builder.buildAsMutation(), now);
-            HintsService.instance.write(hostId, hint);
-        }
-        return spy;
+        TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+
+        long ts = System.currentTimeMillis();
+        DecoratedKey dkey = Util.dk(String.valueOf(1));
+        PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(metadata, dkey).timestamp(ts);
+        builder.row("column0").add("val", "value0");
+        Hint hint = Hint.create(builder.buildAsMutation(), ts);
+        HintsService.instance.write(hostId, hint);
+        long oldestHintTime = timestampForHint;
+        Thread.sleep(1);
+        HintsService.instance.write(hostId, hint);
+        Thread.sleep(1);
+        HintsService.instance.write(hostId, hint);
+
+        // Close and fsync so that we get the timestamp from the descriptor rather than the buffer.
+        HintsStore store = HintsService.instance.getCatalog().get(hostId);
+        HintsService.instance.flushAndFsyncBlockingly(Collections.singletonList(hostId));
+        store.closeWriter();
+
+        long earliest = HintsService.instance.getEarliestHintForHost(hostId);
+        assertEquals(oldestHintTime, earliest);
+        assertNotEquals(oldestHintTime, timestampForHint);
     }
 
-    private static class MockFailureDetector implements IFailureDetector
-    {
-        private boolean isAlive = true;
-
-        public boolean isAlive(InetAddressAndPort ep)
-        {
-            return isAlive;
-        }
-
-        public void interpret(InetAddressAndPort ep)
-        {
-            throw new UnsupportedOperationException();
-        }
-
-        public void report(InetAddressAndPort ep)
-        {
-            throw new UnsupportedOperationException();
-        }
-
-        public void registerFailureDetectionEventListener(IFailureDetectionEventListener listener)
-        {
-            throw new UnsupportedOperationException();
-        }
-
-        public void unregisterFailureDetectionEventListener(IFailureDetectionEventListener listener)
-        {
-            throw new UnsupportedOperationException();
-        }
-
-        public void remove(InetAddressAndPort ep)
-        {
-            throw new UnsupportedOperationException();
-        }
-
-        public void forceConviction(InetAddressAndPort ep)
-        {
-            throw new UnsupportedOperationException();
-        }
-    }
 }
diff --git a/test/unit/org/apache/cassandra/hints/HintsStoreTest.java b/test/unit/org/apache/cassandra/hints/HintsStoreTest.java
new file mode 100644
index 0000000..93e6a11
--- /dev/null
+++ b/test/unit/org/apache/cassandra/hints/HintsStoreTest.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.hints;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.db.RowUpdateBuilder;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.KeyspaceParams;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+
+public class HintsStoreTest
+{
+    private static final String KEYSPACE = "hints_store_test";
+    private static final String TABLE = "table";
+    private File directory;
+    private UUID hostId;
+
+    @Before
+    public void testSetup() throws IOException
+    {
+        directory = new File(Files.createTempDirectory(null));
+        directory.deleteOnExit();
+        hostId = UUID.randomUUID();
+    }
+
+    @BeforeClass
+    public static void setup()
+    {
+        SchemaLoader.prepareServer();
+        SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), SchemaLoader.standardCFMD(KEYSPACE, TABLE));
+    }
+
+    @Test
+    public void testDeleteAllExpiredHints() throws IOException
+    {
+        final long now = System.currentTimeMillis();
+        // hints to delete
+        writeHints(directory, new HintsDescriptor(hostId, now), 100, now);
+        writeHints(directory, new HintsDescriptor(hostId, now + 1000), 1, now);
+        HintsStore store = HintsCatalog.load(directory, ImmutableMap.of()).get(hostId);
+        assertTrue("Hints store should have files", store.hasFiles());
+        assertEquals(2, store.getDispatchQueueSize());
+
+        // jump to the future and delete.
+        store.deleteExpiredHints(now + TimeUnit.SECONDS.toMillis(Hint.maxHintTTL) + 10);
+
+        assertFalse("All hints files should be deleted", store.hasFiles());
+    }
+
+    @Test
+    public void testDeleteAllExpiredHintsByHittingExpirationsCache() throws IOException
+    {
+        final long now = System.currentTimeMillis();
+        HintsDescriptor hintsDescriptor = new HintsDescriptor(hostId, now);
+        writeHints(directory, hintsDescriptor, 100, now);
+
+        HintsStore store = HintsCatalog.load(directory, ImmutableMap.of()).get(hostId);
+        assertTrue("Hints store should have files", store.hasFiles());
+        assertEquals("Hints store should not have cached expiration yet", 0, store.getHintsExpirationsMapSize());
+        assertEquals(1, store.getDispatchQueueSize());
+
+        store.deleteExpiredHints(now + 1); // Not expired yet. Wont delete.
+        assertEquals("Hint should not be deleted yet", 1, store.getDispatchQueueSize());
+        assertEquals("Found no cached hints expiration", 1, store.getHintsExpirationsMapSize());
+        // jump to the future and delete. It should not re-read all the file
+        store.deleteExpiredHints(now + TimeUnit.SECONDS.toMillis(Hint.maxHintTTL) + 10);
+        assertFalse("All hints files should be deleted", store.hasFiles());
+    }
+
+    /**
+     * Test multiple threads delete hints files.
+     * It could happen when hint service is running a removal process, meanwhile operator issues a NodeTool command to delete.
+     *
+     * Thread contends and delete part of the files in the store. The final effect should all files get deleted.
+     */
+    @Test
+    public void testConcurrentDeleteExpiredHints() throws Exception
+    {
+        final long now = System.currentTimeMillis();
+        for (int i = 100; i >= 0; i--)
+        {
+            writeHints(directory, new HintsDescriptor(hostId, now - i), 100, now);
+        }
+
+        HintsStore store = HintsCatalog.load(directory, ImmutableMap.of()).get(hostId);
+        int concurrency = 3;
+        CountDownLatch start = new CountDownLatch(1);
+        Runnable removal = () -> {
+            Uninterruptibles.awaitUninterruptibly(start);
+            store.deleteExpiredHints(now + TimeUnit.SECONDS.toMillis(Hint.maxHintTTL) + 10); // jump to the future and delete
+        };
+        ExecutorService es = Executors.newFixedThreadPool(concurrency);
+        try (Closeable ignored = es::shutdown)
+        {
+            for (int i = 0; i < concurrency; i++)
+                es.submit(removal);
+            start.countDown();
+        }
+        assertTrue(es.awaitTermination(2, TimeUnit.SECONDS));
+        assertFalse("All hints files should be deleted", store.hasFiles());
+    }
+
+    @Test
+    public void testPendingHintsInfo() throws Exception
+    {
+        HintsStore store = HintsCatalog.load(directory, ImmutableMap.of()).get(hostId);
+        assertNull(store.getPendingHintsInfo());
+
+        final long t1 = 10;
+        writeHints(directory, new HintsDescriptor(hostId, t1), 100, t1);
+        store = HintsCatalog.load(directory, ImmutableMap.of()).get(hostId);
+        assertEquals(new PendingHintsInfo(store.hostId, 1, t1, t1),
+                     store.getPendingHintsInfo());
+        final long t2 = t1 + 1;
+        writeHints(directory, new HintsDescriptor(hostId, t2), 100, t2);
+        store = HintsCatalog.load(directory, ImmutableMap.of()).get(hostId);
+        assertEquals(new PendingHintsInfo(store.hostId, 2, t1, t2),
+                     store.getPendingHintsInfo());
+    }
+
+    private long writeHints(File directory, HintsDescriptor descriptor, int hintsCount, long hintCreationTime) throws IOException
+    {
+        try (HintsWriter writer = HintsWriter.create(directory, descriptor))
+        {
+            ByteBuffer buffer = ByteBuffer.allocateDirect(256 * 1024);
+            try (HintsWriter.Session session = writer.newSession(buffer))
+            {
+                for (int i = 0; i < hintsCount; i++)
+                    session.append(createHint(i, hintCreationTime));
+            }
+            FileUtils.clean(buffer);
+        }
+        return new File(directory, descriptor.fileName()).lastModified(); // hint file last modified time
+    }
+
+    private Hint createHint(int idx, long creationTime)
+    {
+        TableMetadata table = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
+        Mutation mutation = new RowUpdateBuilder(table, creationTime, bytes(idx))
+                            .clustering(bytes(idx))
+                            .add("val", bytes(idx))
+                            .build();
+
+        return Hint.create(mutation, creationTime, 1);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/hints/HintsTestUtil.java b/test/unit/org/apache/cassandra/hints/HintsTestUtil.java
index c1c6192..727404e 100644
--- a/test/unit/org/apache/cassandra/hints/HintsTestUtil.java
+++ b/test/unit/org/apache/cassandra/hints/HintsTestUtil.java
@@ -17,13 +17,30 @@
  */
 package org.apache.cassandra.hints;
 
+import java.util.UUID;
+
 import com.google.common.collect.Iterators;
 
+import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.partitions.AbstractBTreePartition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.gms.IFailureDetectionEventListener;
+import org.apache.cassandra.gms.IFailureDetector;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MockMessagingService;
+import org.apache.cassandra.net.MockMessagingSpy;
+import org.apache.cassandra.net.NoPayload;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.Clock;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.apache.cassandra.Util.dk;
+import static org.apache.cassandra.net.MockMessagingService.verb;
+import static org.apache.cassandra.net.Verb.HINT_REQ;
+import static org.apache.cassandra.net.Verb.HINT_RSP;
 
 final class HintsTestUtil
 {
@@ -45,4 +62,73 @@
         assertEquals(expected.creationTime, actual.creationTime);
         assertEquals(expected.gcgs, actual.gcgs);
     }
+
+    static MockMessagingSpy sendHintsAndResponses(TableMetadata metadata, int noOfHints, int noOfResponses)
+    {
+        // create spy for hint messages, but only create responses for noOfResponses hints
+        Message<NoPayload> message = Message.internalResponse(HINT_RSP, NoPayload.noPayload);
+
+        MockMessagingSpy spy;
+        if (noOfResponses != -1)
+        {
+            spy = MockMessagingService.when(verb(HINT_REQ)).respondN(message, noOfResponses);
+        }
+        else
+        {
+            spy = MockMessagingService.when(verb(HINT_REQ)).respond(message);
+        }
+
+        // create and write noOfHints using service
+        UUID hostId = StorageService.instance.getLocalHostUUID();
+        for (int i = 0; i < noOfHints; i++)
+        {
+            long now = Clock.Global.currentTimeMillis();
+            DecoratedKey dkey = dk(String.valueOf(i));
+            PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(metadata, dkey).timestamp(now);
+            builder.row("column0").add("val", "value0");
+            Hint hint = Hint.create(builder.buildAsMutation(), now);
+            HintsService.instance.write(hostId, hint);
+        }
+        return spy;
+    }
+
+    static class MockFailureDetector implements IFailureDetector
+    {
+        boolean isAlive = true;
+
+        public boolean isAlive(InetAddressAndPort ep)
+        {
+            return isAlive;
+        }
+
+        public void interpret(InetAddressAndPort ep)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public void report(InetAddressAndPort ep)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public void registerFailureDetectionEventListener(IFailureDetectionEventListener listener)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public void unregisterFailureDetectionEventListener(IFailureDetectionEventListener listener)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public void remove(InetAddressAndPort ep)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public void forceConviction(InetAddressAndPort ep)
+        {
+            throw new UnsupportedOperationException();
+        }
+    }
 }
diff --git a/test/unit/org/apache/cassandra/index/CustomIndexTest.java b/test/unit/org/apache/cassandra/index/CustomIndexTest.java
index 84a36df..0d7fc9f 100644
--- a/test/unit/org/apache/cassandra/index/CustomIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/CustomIndexTest.java
@@ -639,7 +639,7 @@
 
         try
         {
-            getCurrentColumnFamilyStore().forceBlockingFlush();
+            flush();
             fail("Exception should have been propagated");
         }
         catch (Throwable t)
@@ -648,7 +648,7 @@
         }
 
         // SSTables remain uncommitted.
-        assertEquals(1, getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables().listFiles().length);
+        assertEquals(1, getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables().tryList().length);
     }
 
     @Test
@@ -661,7 +661,7 @@
         // Insert a single wide partition to be indexed
         for (int i = 0; i < totalRows; i++)
             execute("INSERT INTO %s (k, c, v) VALUES (0, ?, ?)", i, i);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // Create the index, which won't automatically start building
         String indexName = "build_single_partition_idx";
@@ -718,7 +718,7 @@
         execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 5, 3, 3);
         execute("DELETE FROM %s WHERE k = ?", 5);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         String indexName = "partition_index_test_idx";
         createIndex(String.format("CREATE CUSTOM INDEX %s ON %%s(v) USING '%s'",
@@ -780,7 +780,7 @@
         // Insert a single row partition to be indexed
         for (int i = 0; i < totalRows; i++)
             execute("INSERT INTO %s (k, c, v) VALUES (0, ?, ?)", i, i);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // Create the index, which won't automatically start building
         String indexName = "partition_overindex_test_idx";
@@ -806,7 +806,7 @@
 
         // Insert a single range tombstone
         execute("DELETE FROM %s WHERE k=1 and c > 2");
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // Create the index, which won't automatically start building
         String indexName = "range_tombstone_idx";
diff --git a/test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java b/test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java
index 2c46bdb..84eb8d0 100644
--- a/test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java
@@ -569,7 +569,7 @@
         Awaitility.await()
                   .atMost(1, TimeUnit.MINUTES)
                   .pollDelay(1, TimeUnit.SECONDS)
-                  .untilAsserted(() -> assertRows(execute(selectBuiltIndexesQuery)));
+                  .untilAsserted(() -> assertRows(execute(selectBuiltIndexesQuery), row("system", "PaxosUncommittedIndex", null)));
 
         String indexName = "build_remove_test_idx";
         String tableName = createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
@@ -577,18 +577,18 @@
         waitForIndex(KEYSPACE, tableName, indexName);
 
         // check that there are no other rows in the built indexes table
-        assertRows(execute(selectBuiltIndexesQuery), row(KEYSPACE, indexName, null));
+        assertRows(execute(selectBuiltIndexesQuery), row(KEYSPACE, indexName, null), row("system", "PaxosUncommittedIndex", null));
 
         // rebuild the index and verify the built status table
         getCurrentColumnFamilyStore().rebuildSecondaryIndex(indexName);
         waitForIndex(KEYSPACE, tableName, indexName);
 
         // check that there are no other rows in the built indexes table
-        assertRows(execute(selectBuiltIndexesQuery), row(KEYSPACE, indexName, null));
+        assertRows(execute(selectBuiltIndexesQuery), row(KEYSPACE, indexName, null), row("system", "PaxosUncommittedIndex", null));
 
         // check that dropping the index removes it from the built indexes table
         dropIndex("DROP INDEX %s." + indexName);
-        assertRows(execute(selectBuiltIndexesQuery));
+        assertRows(execute(selectBuiltIndexesQuery), row("system", "PaxosUncommittedIndex", null));
     }
 
 
diff --git a/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java b/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java
index 51bb6bb..04579b0 100644
--- a/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java
+++ b/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java
@@ -30,6 +30,7 @@
 
 import com.google.common.collect.ImmutableSet;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.index.TargetParser;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -135,7 +136,7 @@
     public Callable<Void> getBlockingFlushTask()
     {
         return () -> {
-            indexCfs.forceBlockingFlush();
+            Util.flush(indexCfs);
             return null;
         };
     }
@@ -597,7 +598,7 @@
         CompactionManager.instance.interruptCompactionForCFs(cfss, (sstable) -> true, true);
         CompactionManager.instance.waitForCessation(cfss, (sstable) -> true);
         indexCfs.keyspace.writeOrder.awaitNewBarrier();
-        indexCfs.forceBlockingFlush();
+        Util.flush(indexCfs);
         indexCfs.readOrdering.awaitNewBarrier();
         indexCfs.invalidate();
     }
@@ -622,7 +623,7 @@
 
     private void buildBlocking()
     {
-        baseCfs.forceBlockingFlush();
+        Util.flush(baseCfs);
 
         try (ColumnFamilyStore.RefViewFragment viewFragment = baseCfs.selectAndReference(View.selectFunction(SSTableSet.CANONICAL));
              Refs<SSTableReader> sstables = viewFragment.refs)
@@ -646,7 +647,7 @@
                                                                          ImmutableSet.copyOf(sstables));
             Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
             FBUtilities.waitOnFuture(future);
-            indexCfs.forceBlockingFlush();
+            Util.flush(indexCfs);
         }
         logger.info("Index build of {} complete", metadata.name);
     }
diff --git a/test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java b/test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java
index 695f040..e746fab 100644
--- a/test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.index.sasi;
 
 import java.util.List;
+import java.util.Optional;
 import java.util.stream.Collectors;
 
 import com.google.common.collect.Sets;
@@ -32,8 +33,10 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.transport.ProtocolVersion;
 
 public class SASICQLTest extends CQLTester
 {
@@ -109,10 +112,10 @@
     {
         createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
 
-        boolean enableSASIIndexes = DatabaseDescriptor.getEnableSASIIndexes();
+        boolean enableSASIIndexes = DatabaseDescriptor.getSASIIndexesEnabled();
         try
         {
-            DatabaseDescriptor.setEnableSASIIndexes(false);
+            DatabaseDescriptor.setSASIIndexesEnabled(false);
             createIndex("CREATE CUSTOM INDEX ON %s (v) USING 'org.apache.cassandra.index.sasi.SASIIndex'");
             Assert.fail("Should not be able to create a SASI index if they are disabled");
         }
@@ -125,7 +128,7 @@
         }
         finally
         {
-            DatabaseDescriptor.setEnableSASIIndexes(enableSASIIndexes);
+            DatabaseDescriptor.setSASIIndexesEnabled(enableSASIIndexes);
         }
     }
 
@@ -348,4 +351,17 @@
             }
         }
     }
+
+    @Test
+    public void testInOperator() throws Throwable
+    {
+        createTable("CREATE TABLE %s (pk int primary key, v int);");
+
+        createIndex("CREATE CUSTOM INDEX ON %s (v) USING 'org.apache.cassandra.index.sasi.SASIIndex';");
+
+        assertInvalidThrowMessage(Optional.of(ProtocolVersion.CURRENT),
+                                  StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE,
+                                  InvalidQueryException.class,
+                                  "SELECT * FROM %s WHERE v IN (200, 250, 300)");
+    }
 }
diff --git a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
index 945a5e9..441d7b0 100644
--- a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
@@ -17,16 +17,15 @@
  */
 package org.apache.cassandra.index.sasi;
 
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
 import java.io.IOException;
-import java.io.Writer;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.FileSystems;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
 import java.nio.file.attribute.BasicFileAttributes;
 import java.util.*;
 import java.util.concurrent.ExecutorService;
@@ -38,12 +37,14 @@
 import java.util.stream.Stream;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.index.Index;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
@@ -86,6 +87,8 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.TypeSerializer;
+import org.apache.cassandra.service.snapshot.SnapshotManifest;
+import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
@@ -95,12 +98,10 @@
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Uninterruptibles;
 
-import org.json.simple.JSONArray;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
 import org.junit.*;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.db.ColumnFamilyStoreTest.getSnapshotManifestAndSchemaFileSizes;
 
 public class SASIIndexTest
 {
@@ -165,17 +166,16 @@
         try
         {
             store.snapshot(snapshotName);
+
             // Compact to make true snapshot size != 0
             store.forceMajorCompaction();
             LifecycleTransaction.waitForDeletions();
 
-            FileReader reader = new FileReader(store.getDirectories().getSnapshotManifestFile(snapshotName));
-            JSONObject manifest = (JSONObject) new JSONParser().parse(reader);
-            JSONArray files = (JSONArray) manifest.get("files");
+            SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(store.getDirectories().getSnapshotManifestFile(snapshotName));
 
             Assert.assertFalse(ssTableReaders.isEmpty());
-            Assert.assertFalse(files.isEmpty());
-            Assert.assertEquals(ssTableReaders.size(), files.size());
+            Assert.assertFalse(manifest.files.isEmpty());
+            Assert.assertEquals(ssTableReaders.size(), manifest.files.size());
 
             Map<Descriptor, Set<Component>> snapshotSSTables = store.getDirectories()
                                                                     .sstableLister(Directories.OnTxnErr.IGNORE)
@@ -191,7 +191,7 @@
                 Descriptor snapshotSSTable = new Descriptor(snapshotDirectory,
                                                             sstable.getKeyspaceName(),
                                                             sstable.getColumnFamilyName(),
-                                                            sstable.descriptor.generation,
+                                                            sstable.descriptor.id,
                                                             sstable.descriptor.formatType);
 
                 Set<Component> components = snapshotSSTables.get(snapshotSSTable);
@@ -208,11 +208,12 @@
                         tableSize += componentSize;
                 }
             }
-
-            Map<String, Directories.SnapshotSizeDetails> details = store.getSnapshotDetails();
+            
+            TableSnapshot details = store.listSnapshots().get(snapshotName);
 
             // check that SASI components are included in the computation of snapshot size
-            Assert.assertEquals(tableSize + indexSize, (long) details.get(snapshotName).dataSizeBytes);
+            long snapshotSize = tableSize + indexSize + getSnapshotManifestAndSchemaFileSizes(details);
+            Assert.assertEquals(snapshotSize, details.computeTrueSizeBytes());
         }
         finally
         {
@@ -546,7 +547,7 @@
 
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         final UntypedResultSet results = executeCQL(FTS_CF_NAME, "SELECT * FROM %s.%s WHERE artist LIKE 'lady%%'");
         Assert.assertNotNull(results);
@@ -898,7 +899,7 @@
         rm3.build().apply();
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         final ByteBuffer dataOutputId = UTF8Type.instance.decompose("/data/output/id");
 
@@ -1060,7 +1061,7 @@
     {
         setMinIndexInterval(minIndexInterval);
         IndexSummaryManager.instance.redistributeSummaries();
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         Set<String> rows = getIndexed(store, 100, buildExpression(firstName, Operator.LIKE_CONTAINS, UTF8Type.instance.decompose("a")));
         Assert.assertEquals(rows.toString(), expected, rows.size());
@@ -1304,7 +1305,7 @@
         rm.build().apply();
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         Set<String> rows;
 
@@ -1376,7 +1377,7 @@
         rm.build().apply();
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         Set<String> rows;
 
@@ -1437,7 +1438,7 @@
             rows = getIndexed(store, 10, buildExpression(comment, Operator.LIKE_MATCHES, bigValue.duplicate()));
             Assert.assertEquals(0, rows.size());
 
-            store.forceBlockingFlush();
+            Util.flush(store);
 
             rows = getIndexed(store, 10, buildExpression(comment, Operator.LIKE_MATCHES, bigValue.duplicate()));
             Assert.assertEquals(0, rows.size());
@@ -1540,7 +1541,7 @@
         update(rm, fullName, UTF8Type.instance.decompose("利久 寺地"), 8000);
         rm.build().apply();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
 
         Set<String> rows;
@@ -1577,7 +1578,7 @@
         rm.build().apply();
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         Set<String> rows;
 
@@ -1662,7 +1663,7 @@
         rm.build().apply();
 
         // first flush would make interval for name - 'johnny' -> 'pavel'
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         rm = new Mutation.PartitionUpdateCollector(KS_NAME, decoratedKey("key6"));
         update(rm, name, UTF8Type.instance.decompose("Jason"), 6000);
@@ -1677,7 +1678,7 @@
         rm.build().apply();
 
         // this flush is going to produce range - 'jason' -> 'vijay'
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         // make sure that overlap of the prefixes is properly handled across sstables
         // since simple interval tree lookup is not going to cover it, prefix lookup actually required.
@@ -1841,7 +1842,7 @@
         executeCQL(CLUSTERING_CF_NAME_1 ,"INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Jordan", "jrwest", "US", 27, 182, 1.0);
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         UntypedResultSet results;
 
@@ -1928,7 +1929,7 @@
         executeCQL(CLUSTERING_CF_NAME_2 ,"INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Christopher", "chis", "US", 27, 180, 1.0);
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         results = executeCQL(CLUSTERING_CF_NAME_2 ,"SELECT * FROM %s.%s WHERE location LIKE 'US' AND age = 43 ALLOW FILTERING");
         Assert.assertNotNull(results);
@@ -1954,7 +1955,7 @@
         executeCQL(STATIC_CF_NAME, "INSERT INTO %s.%s (sensor_id,date,value,variance) VALUES(?, ?, ?, ?)", 1, 20160403L, 24.96, 4);
 
         if (shouldFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         executeCQL(STATIC_CF_NAME, "INSERT INTO %s.%s (sensor_id,sensor_type) VALUES(?, ?)", 2, "PRESSURE");
         executeCQL(STATIC_CF_NAME, "INSERT INTO %s.%s (sensor_id,date,value,variance) VALUES(?, ?, ?, ?)", 2, 20160401L, 1.03, 9);
@@ -1962,7 +1963,7 @@
         executeCQL(STATIC_CF_NAME, "INSERT INTO %s.%s (sensor_id,date,value,variance) VALUES(?, ?, ?, ?)", 2, 20160403L, 1.01, 4);
 
         if (shouldFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         UntypedResultSet results;
 
@@ -2043,15 +2044,15 @@
         executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, location, age, height, score) VALUES (?, ?, ?, ?, ?)", "Pavel", "BY", 28, 182, 2.0);
         executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Jordan", "jrwest", "US", 27, 182, 1.0);
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         SSTable ssTable = store.getSSTables(SSTableSet.LIVE).iterator().next();
         Path path = FileSystems.getDefault().getPath(ssTable.getFilename().replace("-Data", "-SI_" + CLUSTERING_CF_NAME_1 + "_age"));
 
         // Overwrite index file with garbage
-        try (Writer writer = new FileWriter(path.toFile(), false))
+        try(FileChannel fc = FileChannel.open(path, StandardOpenOption.WRITE))
         {
-            writer.write("garbage");
+            fc.truncate(8).write(ByteBuffer.wrap("grabage".getBytes(StandardCharsets.UTF_8)));
         }
 
         long size1 = Files.readAttributes(path, BasicFileAttributes.class).size();
@@ -2082,7 +2083,7 @@
 
         executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, nickname) VALUES (?, ?)", "Alex", "ifesdjeen");
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         for (Index index : store.indexManager.listIndexes())
         {
@@ -2208,7 +2209,7 @@
         {
             Keyspace keyspace = Keyspace.open(KS_NAME);
             for (String table : Arrays.asList(containsTable, prefixTable, analyzedPrefixTable))
-                keyspace.getColumnFamilyStore(table).forceBlockingFlush();
+                Util.flushTable(keyspace, table);
         }
 
         UntypedResultSet results;
@@ -2442,7 +2443,7 @@
 
         Assert.assertTrue(rangesSize(beforeFlushMemtable, expression) > 0);
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         IndexMemtable afterFlushMemtable = index.getCurrentMemtable();
 
@@ -2620,7 +2621,7 @@
         ColumnFamilyStore store = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME);
 
         if (forceFlush)
-            store.forceBlockingFlush();
+            Util.flush(store);
 
         return store;
     }
@@ -2679,7 +2680,6 @@
                     }
                 }
             }
-
         }
         while (count == pageSize);
 
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java
index 1afb7b4..0abddd9 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.index.sasi.disk;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.ThreadLocalRandom;
@@ -38,6 +37,7 @@
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.io.util.DataOutputBuffer;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.MurmurHash;
 import org.apache.cassandra.utils.Pair;
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
index 97b3433..ad0caff 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.index.sasi.disk;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.Callable;
@@ -40,13 +39,14 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.FSError;
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.Tables;
-import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import com.google.common.util.concurrent.Futures;
@@ -65,9 +65,9 @@
     {
         System.setProperty("cassandra.config", "cassandra-murmur.yaml");
         SchemaLoader.loadSchema();
-        MigrationManager.announceNewKeyspace(KeyspaceMetadata.create(KS_NAME,
-                                                                     KeyspaceParams.simpleTransient(1),
-                                                                     Tables.of(SchemaLoader.sasiCFMD(KS_NAME, CF_NAME).build())));
+        SchemaTestUtil.announceNewKeyspace(KeyspaceMetadata.create(KS_NAME,
+                                                                   KeyspaceParams.simpleTransient(1),
+                                                                   Tables.of(SchemaLoader.sasiCFMD(KS_NAME, CF_NAME).build())));
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
index 4339a62..6d067a1 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.index.sasi.disk;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
@@ -36,6 +35,7 @@
 import org.apache.cassandra.index.sasi.utils.RangeIterator;
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.index.sasi.utils.RangeUnionIterator;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.SequentialWriterOption;
 import org.apache.cassandra.utils.MurmurHash;
diff --git a/test/unit/org/apache/cassandra/index/sasi/plan/ExpressionTest.java b/test/unit/org/apache/cassandra/index/sasi/plan/ExpressionTest.java
index 7457a85..5eeb5ef 100644
--- a/test/unit/org/apache/cassandra/index/sasi/plan/ExpressionTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/plan/ExpressionTest.java
@@ -24,7 +24,6 @@
 import org.junit.Test;
 
 import org.apache.cassandra.db.marshal.UTF8Type;
-import org.apache.cassandra.index.sasi.plan.Expression.Bound;
 
 public class ExpressionTest
 {
diff --git a/test/unit/org/apache/cassandra/index/sasi/plan/OperationTest.java b/test/unit/org/apache/cassandra/index/sasi/plan/OperationTest.java
index 4468f2c..8620f5c 100644
--- a/test/unit/org/apache/cassandra/index/sasi/plan/OperationTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/plan/OperationTest.java
@@ -38,6 +38,7 @@
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
 import org.junit.*;
@@ -651,6 +652,15 @@
         {
             throw new UnsupportedOperationException();
         }
+
+        @Override
+        protected String toString(boolean cql)
+        {
+            return String.format("%s %s %s",
+                                 cql ? column.name.toCQLString() : column.name.toString(),
+                                 operator,
+                                 ByteBufferUtil.bytesToHex(value));
+        }
     }
 
     private static Unfiltered buildRow(Cell<?>... cells)
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java
index 05db33a..f61252a 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java
@@ -22,8 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.cassandra.index.sasi.utils.LongIterator.convert;
-
 public class LongIteratorTest
 {
     @Test
@@ -53,4 +51,4 @@
 
         it.close();
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java
index e55f6ba..dcd79b9 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java
@@ -25,6 +25,7 @@
 
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.io.util.ChannelProxy;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 
 import org.junit.Assert;
@@ -452,7 +453,7 @@
         File tmp = FileUtils.createTempFile("mapped-buffer", "tmp");
         tmp.deleteOnExit();
 
-        RandomAccessFile file = new RandomAccessFile(tmp, "rw");
+        RandomAccessFile file = new RandomAccessFile(tmp.toJavaIOFile(), "rw");
 
         long numValues = 1000;
         for (long i = 0; i < numValues; i++)
@@ -460,7 +461,7 @@
 
         file.getFD().sync();
 
-        try (MappedBuffer buffer = new MappedBuffer(new ChannelProxy(tmp.getAbsolutePath(), file.getChannel())))
+        try (MappedBuffer buffer = new MappedBuffer(new ChannelProxy(tmp.absolutePath(), file.getChannel())))
         {
             Assert.assertEquals(numValues * 8, buffer.limit());
             Assert.assertEquals(numValues * 8, buffer.capacity());
@@ -493,7 +494,7 @@
         final File testFile = FileUtils.createTempFile("mapped-buffer-test", "db");
         testFile.deleteOnExit();
 
-        RandomAccessFile file = new RandomAccessFile(testFile, "rw");
+        RandomAccessFile file = new RandomAccessFile(testFile.toJavaIOFile(), "rw");
 
         for (long i = 0; i < numCount; i++)
         {
@@ -529,7 +530,7 @@
 
         try
         {
-            return new MappedBuffer(new ChannelProxy(testFile.getAbsolutePath(), file.getChannel()), numPageBits);
+            return new MappedBuffer(new ChannelProxy(testFile.absolutePath(), file.getChannel()), numPageBits);
         }
         finally
         {
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java
index 162b1c6..581f4e5 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java
@@ -373,4 +373,4 @@
         Assert.assertTrue(range.hasNext());
         Assert.assertEquals(10, range.getCount());
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java b/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java
index 8760f43..be8b162 100644
--- a/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java
+++ b/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java
@@ -28,6 +28,7 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.compaction.CompactionInterruptedException;
@@ -96,7 +97,7 @@
             execute("INSERT INTO %s (pk) VALUES (?)", base + i);
 
         // flush to write the sstable
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
     }
 
     private void assertDiskSpaceEqual(ColumnFamilyStore cfs)
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index d3d81f0..c398ac4 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@ -19,9 +19,8 @@
 package org.apache.cassandra.io.compress;
 
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
+import java.io.RandomAccessFile; //checkstyle: permit this import
 import java.util.Arrays;
 import java.util.Random;
 
@@ -97,7 +96,7 @@
     public void test6791() throws IOException, ConfigurationException
     {
         File f = FileUtils.createTempFile("compressed6791_", "3");
-        String filename = f.getAbsolutePath();
+        String filename = f.absolutePath();
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
         try(CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata",
                                                                                null, SequentialWriterOption.DEFAULT,
@@ -132,10 +131,10 @@
         finally
         {
             if (f.exists())
-                assertTrue(f.delete());
+                assertTrue(f.tryDelete());
             File metadata = new File(filename+ ".metadata");
             if (metadata.exists())
-                metadata.delete();
+                metadata.tryDelete();
         }
     }
 
@@ -145,8 +144,8 @@
     @Test
     public void testChunkIndexOverflow() throws IOException
     {
-        File file = File.createTempFile("chunk_idx_overflow", "1");
-        String filename = file.getAbsolutePath();
+        File file = FileUtils.createTempFile("chunk_idx_overflow", "1");
+        String filename = file.absolutePath();
         int chunkLength = 4096; // 4k
 
         try
@@ -166,16 +165,16 @@
         finally
         {
             if (file.exists())
-                assertTrue(file.delete());
+                assertTrue(file.tryDelete());
             File metadata = new File(filename + ".metadata");
             if (metadata.exists())
-                metadata.delete();
+                metadata.tryDelete();
         }
     }
 
     private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize, double minCompressRatio) throws IOException
     {
-        final String filename = f.getAbsolutePath();
+        final String filename = f.absolutePath();
         writeSSTable(f, compressed ? CompressionParams.snappy() : null, junkSize);
 
         CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), true) : null;
@@ -192,16 +191,16 @@
         finally
         {
             if (f.exists())
-                assertTrue(f.delete());
+                assertTrue(f.tryDelete());
             File metadata = new File(filename + ".metadata");
             if (compressed && metadata.exists())
-                metadata.delete();
+                metadata.tryDelete();
         }
     }
 
     private static void writeSSTable(File f, CompressionParams params, int junkSize) throws IOException
     {
-        final String filename = f.getAbsolutePath();
+        final String filename = f.absolutePath();
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
         try(SequentialWriter writer = params != null
                 ? new CompressedSequentialWriter(f, filename + ".metadata",
@@ -237,14 +236,14 @@
         File file = new File("testDataCorruptionDetection");
         file.deleteOnExit();
 
-        File metadata = new File(file.getPath() + ".meta");
+        File metadata = new File(file.path() + ".meta");
         metadata.deleteOnExit();
 
-        assertTrue(file.createNewFile());
-        assertTrue(metadata.createNewFile());
+        assertTrue(file.createFileIfNotExists());
+        assertTrue(metadata.createFileIfNotExists());
 
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-        try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(),
+        try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.path(),
                                                                       null, SequentialWriterOption.DEFAULT,
                                                                       CompressionParams.snappy(), sstableMetadataCollector))
         {
@@ -253,16 +252,16 @@
         }
 
         // open compression metadata and get chunk information
-        CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true);
+        CompressionMetadata meta = new CompressionMetadata(metadata.path(), file.length(), true);
         CompressionMetadata.Chunk chunk = meta.chunkFor(0);
 
-        try (FileHandle.Builder builder = new FileHandle.Builder(file.getPath()).withCompressionMetadata(meta);
+        try (FileHandle.Builder builder = new FileHandle.Builder(file.path()).withCompressionMetadata(meta);
              FileHandle fh = builder.complete();
              RandomAccessReader reader = fh.createReader())
         {// read and verify compressed data
             assertEquals(CONTENT, reader.readLine());
             Random random = new Random();
-            try(RandomAccessFile checksumModifier = new RandomAccessFile(file, "rw"))
+            try(RandomAccessFile checksumModifier = new RandomAccessFile(file.toJavaIOFile(), "rw"))
             {
                 byte[] checksum = new byte[4];
 
@@ -311,6 +310,6 @@
     {
         file.seek(checksumOffset);
         file.write(checksum);
-        SyncUtil.sync(file);
+        SyncUtil.sync(file.getFD());
     }
 }
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterReopenTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterReopenTest.java
index 461c13c..08072f6 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterReopenTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterReopenTest.java
@@ -78,13 +78,13 @@
         {
             execute("insert into %s (id, t) values (?, ?)", i, ByteBuffer.wrap(blob));
         }
-        getCurrentColumnFamilyStore().forceBlockingFlush();
+        flush();
         for (int i = 0; i < 10000; i++)
         {
             execute("insert into %s (id, t) values (?, ?)", i, ByteBuffer.wrap(blob));
         }
-        getCurrentColumnFamilyStore().forceBlockingFlush();
-        DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMB(1);
+        flush();
+        DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMiB(1);
         getCurrentColumnFamilyStore().forceMajorCompaction();
     }
 
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
index 57802cb..2b50633 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
@@ -19,7 +19,6 @@
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
@@ -113,7 +112,7 @@
 
     private void testWrite(File f, int bytesToTest, boolean useMemmap) throws IOException
     {
-        final String filename = f.getAbsolutePath();
+        final String filename = f.absolutePath();
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance)));
 
         byte[] dataPre = new byte[bytesToTest];
@@ -171,10 +170,10 @@
         finally
         {
             if (f.exists())
-                f.delete();
+                f.tryDelete();
             File metadata = new File(f + ".metadata");
             if (metadata.exists())
-                metadata.delete();
+                metadata.tryDelete();
         }
     }
 
@@ -213,12 +212,12 @@
         b.flip();
 
         File f = FileUtils.createTempFile("testUncompressedChunks", "1");
-        String filename = f.getPath();
+        String filename = f.path();
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance)));
         compressionParameters = new CompressionParams(MockCompressor.class.getTypeName(),
                                                       MockCompressor.paramsFor(ratio, extra),
                                                       DEFAULT_CHUNK_LENGTH, ratio);
-        try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, f.getPath() + ".metadata",
+        try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, f.path() + ".metadata",
                                                                                 null, SequentialWriterOption.DEFAULT,
                                                                                 compressionParameters,
                                                                                 sstableMetadataCollector))
@@ -244,10 +243,10 @@
         finally
         {
             if (f.exists())
-                f.delete();
+                f.tryDelete();
             File metadata = new File(f + ".metadata");
             if (metadata.exists())
-                metadata.delete();
+                metadata.tryDelete();
         }
 
     }
@@ -272,12 +271,12 @@
     @Override
     public void resetAndTruncateTest()
     {
-        File tempFile = new File(Files.createTempDir(), "reset.txt");
+        File tempFile = new File(Files.createTempDir().toPath(), "reset.txt");
         File offsetsFile = FileUtils.createDeletableTempFile("compressedsequentialwriter.offset", "test");
         final int bufferSize = 48;
         final int writeSize = 64;
         byte[] toWrite = new byte[writeSize];
-        try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.getPath(),
+        try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.path(),
                                                                       null, SequentialWriterOption.DEFAULT,
                                                                       CompressionParams.lz4(bufferSize),
                                                                       new MetadataCollector(new ClusteringComparator(UTF8Type.instance))))
@@ -331,7 +330,7 @@
 
         private TestableCSW(File file, File offsetsFile) throws IOException
         {
-            this(file, offsetsFile, new CompressedSequentialWriter(file, offsetsFile.getPath(),
+            this(file, offsetsFile, new CompressedSequentialWriter(file, offsetsFile.path(),
                                                                    null, SequentialWriterOption.DEFAULT,
                                                                    CompressionParams.lz4(BUFFER_SIZE, MAX_COMPRESSED),
                                                                    new MetadataCollector(new ClusteringComparator(UTF8Type.instance))));
@@ -348,7 +347,7 @@
         {
             Assert.assertTrue(file.exists());
             Assert.assertFalse(offsetsFile.exists());
-            byte[] compressed = readFileToByteArray(file);
+            byte[] compressed = readFileToByteArray(file.toJavaIOFile());
             byte[] uncompressed = new byte[partialContents.length];
             LZ4Compressor.create(Collections.<String, String>emptyMap()).uncompress(compressed, 0, compressed.length - 4, uncompressed, 0);
             Assert.assertTrue(Arrays.equals(partialContents, uncompressed));
@@ -358,7 +357,7 @@
         {
             Assert.assertTrue(file.exists());
             Assert.assertTrue(offsetsFile.exists());
-            DataInputStream offsets = new DataInputStream(new ByteArrayInputStream(readFileToByteArray(offsetsFile)));
+            DataInputStream offsets = new DataInputStream(new ByteArrayInputStream(readFileToByteArray(offsetsFile.toJavaIOFile())));
             Assert.assertTrue(offsets.readUTF().endsWith("LZ4Compressor"));
             Assert.assertEquals(0, offsets.readInt());
             Assert.assertEquals(BUFFER_SIZE, offsets.readInt());
@@ -367,7 +366,7 @@
             Assert.assertEquals(2, offsets.readInt());
             Assert.assertEquals(0, offsets.readLong());
             int offset = (int) offsets.readLong();
-            byte[] compressed = readFileToByteArray(file);
+            byte[] compressed = readFileToByteArray(file.toJavaIOFile());
             byte[] uncompressed = new byte[fullContents.length];
             LZ4Compressor.create(Collections.<String, String>emptyMap()).uncompress(compressed, 0, offset - 4, uncompressed, 0);
             LZ4Compressor.create(Collections.<String, String>emptyMap()).uncompress(compressed, offset, compressed.length - (4 + offset), uncompressed, partialContents.length);
@@ -381,8 +380,8 @@
 
         void cleanup()
         {
-            file.delete();
-            offsetsFile.delete();
+            file.tryDelete();
+            offsetsFile.tryDelete();
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressorTest.java b/test/unit/org/apache/cassandra/io/compress/CompressorTest.java
index 29e8453..dad3ae4 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressorTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressorTest.java
@@ -27,6 +27,7 @@
 import java.util.Random;
 
 import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
 import static org.junit.Assert.*;
 import org.junit.Assert;
 import org.junit.Test;
@@ -143,7 +144,7 @@
         dest.clear();
         channel.write(dest);
 
-        MappedByteBuffer mappedData = Files.map(temp);
+        MappedByteBuffer mappedData = Files.map(temp.toJavaIOFile());
         ByteBuffer result = makeBB(data.length + 100);
         mappedData.position(outOffset).limit(outOffset + compressedLength);
 
diff --git a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
index 9e3594b..14d3c5e 100644
--- a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
@@ -18,7 +18,7 @@
 */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
+import org.apache.cassandra.io.util.File;
 import java.io.IOException;
 
 import org.junit.BeforeClass;
diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
index 61ac017..5ee2768 100644
--- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
@@ -17,11 +17,12 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
-import java.io.FilenameFilter;
+
 import java.io.IOException;
+import java.util.function.BiPredicate;
 
 import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,8 +40,8 @@
     @Before
     public void setUp()
     {
-        this.testDirectory = Files.createTempDir();
-        DatabaseDescriptor.daemonInitialization();
+        this.testDirectory = new File(Files.createTempDir());
+        DatabaseDescriptor.clientInitialization();
     }
 
     @Test
@@ -73,16 +74,9 @@
         writer.close();
         writer2.close();
 
-        FilenameFilter filter = new FilenameFilter()
-        {
-            @Override
-            public boolean accept(File dir, String name)
-            {
-                return name.endsWith("-Data.db");
-            }
-        };
+        BiPredicate<File, String> filter = (dir, name) -> name.endsWith("-Data.db");
 
-        File[] dataFiles = this.testDirectory.listFiles(filter);
+        File[] dataFiles = this.testDirectory.tryList(filter);
         assertEquals(2, dataFiles.length);
     }
 
diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
index f43294c..1851314 100644
--- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
@@ -17,21 +17,20 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
-import java.io.FilenameFilter;
+
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BiPredicate;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
-import com.google.common.io.Files;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -47,13 +46,11 @@
 import org.apache.cassandra.cql3.functions.types.*;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadataRef;
-import org.apache.cassandra.serializers.SimpleDateSerializer;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.*;
@@ -94,8 +91,8 @@
         keyspace = "cql_keyspace" + idGen.incrementAndGet();
         table = "table" + idGen.incrementAndGet();
         qualifiedTable = keyspace + '.' + table;
-        dataDir = new File(tempFolder.newFolder().getAbsolutePath() + File.separator + keyspace + File.separator + table);
-        assert dataDir.mkdirs();
+        dataDir = new File(tempFolder.newFolder().getAbsolutePath() + File.pathSeparator() + keyspace + File.pathSeparator() + table);
+        assert dataDir.tryCreateDirectories();
     }
 
     @Test
@@ -171,7 +168,7 @@
         }
         catch (IllegalArgumentException e)
         {
-            assertEquals(e.getMessage(), "Counter update statements are not supported");
+            assertEquals(e.getMessage(), "Counter modification statements are not supported");
         }
     }
 
@@ -179,8 +176,8 @@
     public void testSyncWithinPartition() throws Exception
     {
         // Check that the write respect the buffer size even if we only insert rows withing the same partition (#7360)
-        // To do that simply, we use a writer with a buffer of 1MB, and write 2 rows in the same partition with a value
-        // > 1MB and validate that this created more than 1 sstable.
+        // To do that simply, we use a writer with a buffer of 1MiB, and write 2 rows in the same partition with a value
+        // > 1MiB and validate that this created more than 1 sstable.
         String schema = "CREATE TABLE " + qualifiedTable + " ("
                       + "  k int PRIMARY KEY,"
                       + "  v blob"
@@ -199,14 +196,8 @@
         writer.addRow(1, val);
         writer.close();
 
-        FilenameFilter filterDataFiles = new FilenameFilter()
-        {
-            public boolean accept(File dir, String name)
-            {
-                return name.endsWith("-Data.db");
-            }
-        };
-        assert dataDir.list(filterDataFiles).length > 1 : Arrays.toString(dataDir.list(filterDataFiles));
+        BiPredicate<File, String> filterDataFiles = (dir, name) -> name.endsWith("-Data.db");
+        assert dataDir.tryListNames(filterDataFiles).length > 1 : Arrays.toString(dataDir.tryListNames(filterDataFiles));
     }
 
 
@@ -234,7 +225,286 @@
 
     }
 
+    @Test
+    public void testDeleteStatement() throws Exception
+    {
 
+        final String schema = "CREATE TABLE " + qualifiedTable + " ("
+                              + "  k int,"
+                              + "  c1 int,"
+                              + "  c2 int,"
+                              + "  v text,"
+                              + "  PRIMARY KEY (k, c1, c2)"
+                              + ")";
+
+        testUpdateStatement(); // start by adding some data
+        UntypedResultSet resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(2, resultSet.size());
+
+        CQLSSTableWriter writer = CQLSSTableWriter.builder()
+                                                  .inDirectory(dataDir)
+                                                  .forTable(schema)
+                                                  .using("DELETE FROM " + qualifiedTable +
+                                                         " WHERE k = ? AND c1 = ? AND c2 = ?")
+                                                  .build();
+
+        writer.addRow(1, 2, 3);
+        writer.addRow(4, 5, 6);
+        writer.close();
+        loadSSTables(dataDir, keyspace);
+
+        resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(0, resultSet.size());
+        Iterator<UntypedResultSet.Row> iter = resultSet.iterator();
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void testDeletePartition() throws Exception
+    {
+
+        final String schema = "CREATE TABLE " + qualifiedTable + " ("
+                              + "  k int,"
+                              + "  c1 int,"
+                              + "  c2 int,"
+                              + "  v text,"
+                              + "  PRIMARY KEY (k, c1, c2)"
+                              + ")";
+
+        // First, write some rows
+        CQLSSTableWriter writer = CQLSSTableWriter.builder()
+                                                  .inDirectory(dataDir)
+                                                  .forTable(schema)
+                                                  .using("INSERT INTO " + qualifiedTable + " (k, c1, c2, v) " +
+                                                         "VALUES (?, ?, ?, ?)")
+                                                  .build();
+
+        writer.addRow(1, 2, 3, "a");
+        writer.addRow(1, 4, 5, "b");
+        writer.addRow(1, 6, 7, "c");
+        writer.addRow(2, 8, 9, "d");
+
+        writer.close();
+        loadSSTables(dataDir, keyspace);
+
+        UntypedResultSet resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(4, resultSet.size());
+        Iterator<UntypedResultSet.Row> iter = resultSet.iterator();
+        UntypedResultSet.Row r1 = iter.next();
+        assertEquals(1, r1.getInt("k"));
+        assertEquals(2, r1.getInt("c1"));
+        assertEquals(3, r1.getInt("c2"));
+        assertEquals("a", r1.getString("v"));
+        UntypedResultSet.Row r2 = iter.next();
+        assertEquals(1, r2.getInt("k"));
+        assertEquals(4, r2.getInt("c1"));
+        assertEquals(5, r2.getInt("c2"));
+        assertEquals("b", r2.getString("v"));
+        UntypedResultSet.Row r3 = iter.next();
+        assertEquals(1, r3.getInt("k"));
+        assertEquals(6, r3.getInt("c1"));
+        assertEquals(7, r3.getInt("c2"));
+        assertEquals("c", r3.getString("v"));
+        UntypedResultSet.Row r4 = iter.next();
+        assertEquals(2, r4.getInt("k"));
+        assertEquals(8, r4.getInt("c1"));
+        assertEquals(9, r4.getInt("c2"));
+        assertEquals("d", r4.getString("v"));
+        assertFalse(iter.hasNext());
+
+        writer = CQLSSTableWriter.builder()
+                                 .inDirectory(dataDir)
+                                 .forTable(schema)
+                                 .using("DELETE FROM " + qualifiedTable +
+                                        " WHERE k = ?")
+                                 .build();
+
+        writer.addRow(1);
+        writer.close();
+        loadSSTables(dataDir, keyspace);
+
+        resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(1, resultSet.size());
+        iter = resultSet.iterator();
+        UntypedResultSet.Row r5 = iter.next();
+        assertEquals(2, r5.getInt("k"));
+        assertEquals(8, r5.getInt("c1"));
+        assertEquals(9, r5.getInt("c2"));
+        assertEquals("d", r5.getString("v"));
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void testDeleteRange() throws Exception
+    {
+
+        final String schema = "CREATE TABLE " + qualifiedTable + " ("
+                              + "  k text,"
+                              + "  c1 int,"
+                              + "  c2 int,"
+                              + "  v text,"
+                              + "  PRIMARY KEY (k, c1, c2)"
+                              + ")";
+
+        CQLSSTableWriter updateWriter = CQLSSTableWriter.builder()
+                                                        .inDirectory(dataDir)
+                                                        .forTable(schema)
+                                                        .using(String.format("UPDATE %s SET v=? WHERE k=? AND c1=? AND c2=?", qualifiedTable))
+                                                        .build();
+        CQLSSTableWriter deleteWriter = CQLSSTableWriter.builder()
+                                                        .inDirectory(dataDir)
+                                                        .forTable(schema)
+                                                        .using(String.format("DELETE FROM %s WHERE k=? AND c1=? and c2>=?", qualifiedTable))
+                                                        .build();
+
+        updateWriter.addRow("v0.0", "a", 0, 0);
+        updateWriter.addRow("v0.1", "a", 0, 1);
+        updateWriter.addRow("v0.2", "a", 0, 2);
+        updateWriter.addRow("v0.0", "b", 0, 0);
+        updateWriter.addRow("v0.1", "b", 0, 1);
+        updateWriter.addRow("v0.2", "b", 0, 2);
+        updateWriter.close();
+        deleteWriter.addRow("a", 0, 1);
+        deleteWriter.addRow("b", 0, 2);
+        deleteWriter.close();
+        loadSSTables(dataDir, keyspace);
+
+        UntypedResultSet resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(3, resultSet.size());
+
+        Iterator<UntypedResultSet.Row> iter = resultSet.iterator();
+        UntypedResultSet.Row r1 = iter.next();
+        assertEquals("a", r1.getString("k"));
+        assertEquals(0, r1.getInt("c1"));
+        assertEquals(0, r1.getInt("c2"));
+        UntypedResultSet.Row r2 = iter.next();
+        assertEquals("b", r2.getString("k"));
+        assertEquals(0, r2.getInt("c1"));
+        assertEquals(0, r2.getInt("c2"));
+        UntypedResultSet.Row r3 = iter.next();
+        assertEquals("b", r3.getString("k"));
+        assertEquals(0, r3.getInt("c1"));
+        assertEquals(1, r3.getInt("c2"));
+    }
+
+    @Test
+    public void testDeleteRangeEmptyKeyComponent() throws Exception
+    {
+
+
+        final String schema = "CREATE TABLE " + qualifiedTable + " ("
+                              + "  k text,"
+                              + "  c1 int,"
+                              + "  c2 int,"
+                              + "  v text,"
+                              + "  PRIMARY KEY (k, c1, c2)"
+                              + ")";
+
+        CQLSSTableWriter updateWriter = CQLSSTableWriter.builder()
+                                                        .inDirectory(dataDir)
+                                                        .forTable(schema)
+                                                        .using(String.format("UPDATE %s SET v=? WHERE k=? AND c1=? AND c2=?", qualifiedTable))
+                                                        .build();
+        CQLSSTableWriter deleteWriter = CQLSSTableWriter.builder()
+                                                        .inDirectory(dataDir)
+                                                        .forTable(schema)
+                                                        .using(String.format("DELETE FROM %s WHERE k=? AND c1=?", qualifiedTable))
+                                                        .build();
+
+        updateWriter.addRow("v0.0", "a", 0, 0);
+        updateWriter.addRow("v0.1", "a", 0, 1);
+        updateWriter.addRow("v0.2", "a", 1, 2);
+        updateWriter.addRow("v0.0", "b", 0, 0);
+        updateWriter.addRow("v0.1", "b", 0, 1);
+        updateWriter.addRow("v0.2", "b", 1, 2);
+        updateWriter.close();
+        deleteWriter.addRow("a", 0);
+        deleteWriter.addRow("b", 0);
+        deleteWriter.close();
+        loadSSTables(dataDir, keyspace);
+
+        UntypedResultSet resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(2, resultSet.size());
+
+        Iterator<UntypedResultSet.Row> iter = resultSet.iterator();
+        UntypedResultSet.Row r1 = iter.next();
+        assertEquals("a", r1.getString("k"));
+        assertEquals(1, r1.getInt("c1"));
+        assertEquals(2, r1.getInt("c2"));
+        UntypedResultSet.Row r2 = iter.next();
+        assertEquals("b", r2.getString("k"));
+        assertEquals(1, r2.getInt("c1"));
+        assertEquals(2, r2.getInt("c2"));
+    }
+
+    @Test
+    public void testDeleteValue() throws Exception
+    {
+        final String schema = "CREATE TABLE " + qualifiedTable + " ("
+                              + "  k text,"
+                              + "  c1 int,"
+                              + "  c2 int,"
+                              + "  v text,"
+                              + "  PRIMARY KEY (k, c1, c2)"
+                              + ")";
+
+        CQLSSTableWriter insertWriter = CQLSSTableWriter.builder()
+                                                        .inDirectory(dataDir)
+                                                        .forTable(schema)
+                                                        .using(String.format("INSERT INTO %s (v, k, c1, c2) values (?, ?, ?, ?)", qualifiedTable))
+                                                        .build();
+
+        // UPDATE does not set the row's liveness information, just the cells'. So when we delete the value from rows
+        // added with the updateWriter, the entire row will no longer exist, not just the value.
+        CQLSSTableWriter updateWriter = CQLSSTableWriter.builder()
+                                                        .inDirectory(dataDir)
+                                                        .forTable(schema)
+                                                        .using(String.format("UPDATE %s SET v=? WHERE k=? AND c1=? AND c2=?", qualifiedTable))
+                                                        .build();
+
+        CQLSSTableWriter deleteWriter = CQLSSTableWriter.builder()
+                                                  .inDirectory(dataDir)
+                                                  .forTable(schema)
+                                                  .using("DELETE v FROM " + qualifiedTable +
+                                                         " WHERE k = ? AND c1 = ? AND c2 = ?")
+                                                  .build();
+
+        insertWriter.addRow("v0.2", "a", 1, 2);
+        insertWriter.close();
+
+        updateWriter.addRow("v0.3", "b", 3, 4);
+        updateWriter.close();
+
+        loadSSTables(dataDir, keyspace);
+
+        UntypedResultSet resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(2, resultSet.size());
+        Iterator<UntypedResultSet.Row> iter = resultSet.iterator();
+        UntypedResultSet.Row insertedRow = iter.next();
+        assertEquals("v0.2", insertedRow.getString("v"));
+        assertEquals("a", insertedRow.getString("k"));
+        assertEquals(1, insertedRow.getInt("c1"));
+        assertEquals(2, insertedRow.getInt("c2"));
+        UntypedResultSet.Row updatedRow = iter.next();
+        assertEquals("v0.3", updatedRow.getString("v"));
+        assertEquals("b", updatedRow.getString("k"));
+        assertEquals(3, updatedRow.getInt("c1"));
+        assertEquals(4, updatedRow.getInt("c2"));
+
+        deleteWriter.addRow("a", 1, 2);
+        deleteWriter.addRow("b", 3, 4);
+        deleteWriter.close();
+        loadSSTables(dataDir, keyspace);
+
+        resultSet = QueryProcessor.executeInternal("SELECT * FROM " + qualifiedTable);
+        assertEquals(1, resultSet.size());
+        iter = resultSet.iterator();
+        UntypedResultSet.Row modifiedRow = iter.next();
+        assertFalse(modifiedRow.has("v"));
+        assertEquals("a", modifiedRow.getString("k"));
+        assertEquals(1, modifiedRow.getInt("c1"));
+        assertEquals(2, modifiedRow.getInt("c2"));
+    }
 
     private static final int NUMBER_WRITES_IN_RUNNABLE = 10;
     private class WriterThread extends Thread
diff --git a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
index 03681b8..405f3da 100644
--- a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
@@ -17,10 +17,10 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.lang3.StringUtils;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -45,7 +45,7 @@
     public DescriptorTest() throws IOException
     {
         // create CF directories, one without CFID and one with it
-        tempDataDir = FileUtils.createTempFile("DescriptorTest", null).getParentFile();
+        tempDataDir = FileUtils.createTempFile("DescriptorTest", null).parent();
     }
 
     @BeforeClass
@@ -57,39 +57,39 @@
     @Test
     public void testFromFilename() throws Exception
     {
-        File cfIdDir = new File(tempDataDir.getAbsolutePath() + File.separator + ksname + File.separator + cfname + '-' + cfId);
+        File cfIdDir = new File(tempDataDir.absolutePath() + File.pathSeparator() + ksname + File.pathSeparator() + cfname + '-' + cfId);
         testFromFilenameFor(cfIdDir);
     }
 
     @Test
     public void testFromFilenameInBackup() throws Exception
     {
-        File backupDir = new File(StringUtils.join(new String[]{tempDataDir.getAbsolutePath(), ksname, cfname + '-' + cfId, Directories.BACKUPS_SUBDIR}, File.separator));
+        File backupDir = new File(StringUtils.join(new String[]{ tempDataDir.absolutePath(), ksname, cfname + '-' + cfId, Directories.BACKUPS_SUBDIR}, File.pathSeparator()));
         testFromFilenameFor(backupDir);
     }
 
     @Test
     public void testFromFilenameInSnapshot() throws Exception
     {
-        File snapshotDir = new File(StringUtils.join(new String[]{tempDataDir.getAbsolutePath(), ksname, cfname + '-' + cfId, Directories.SNAPSHOT_SUBDIR, "snapshot_name"}, File.separator));
+        File snapshotDir = new File(StringUtils.join(new String[]{ tempDataDir.absolutePath(), ksname, cfname + '-' + cfId, Directories.SNAPSHOT_SUBDIR, "snapshot_name"}, File.pathSeparator()));
         testFromFilenameFor(snapshotDir);
     }
 
     @Test
     public void testFromFilenameInLegacyDirectory() throws Exception
     {
-        File cfDir = new File(tempDataDir.getAbsolutePath() + File.separator + ksname + File.separator + cfname);
+        File cfDir = new File(tempDataDir.absolutePath() + File.pathSeparator() + ksname + File.pathSeparator() + cfname);
         testFromFilenameFor(cfDir);
     }
 
     private void testFromFilenameFor(File dir)
     {
-        checkFromFilename(new Descriptor(dir, ksname, cfname, 1, SSTableFormat.Type.BIG));
+        checkFromFilename(new Descriptor(dir, ksname, cfname, new SequenceBasedSSTableId(1), SSTableFormat.Type.BIG));
 
         // secondary index
         String idxName = "myidx";
-        File idxDir = new File(dir.getAbsolutePath() + File.separator + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName);
-        checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 4, SSTableFormat.Type.BIG));
+        File idxDir = new File(dir.absolutePath() + File.pathSeparator() + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName);
+        checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, new SequenceBasedSSTableId(4), SSTableFormat.Type.BIG));
     }
 
     private void checkFromFilename(Descriptor original)
@@ -103,7 +103,7 @@
         assertEquals(original.ksname, desc.ksname);
         assertEquals(original.cfname, desc.cfname);
         assertEquals(original.version, desc.version);
-        assertEquals(original.generation, desc.generation);
+        assertEquals(original.id, desc.id);
         assertEquals(Component.DATA, pair.right);
     }
 
@@ -112,8 +112,8 @@
     {
         // Descriptor should be equal when parent directory points to the same directory
         File dir = new File(".");
-        Descriptor desc1 = new Descriptor(dir, "ks", "cf", 1, SSTableFormat.Type.BIG);
-        Descriptor desc2 = new Descriptor(dir.getAbsoluteFile(), "ks", "cf", 1, SSTableFormat.Type.BIG);
+        Descriptor desc1 = new Descriptor(dir, "ks", "cf", new SequenceBasedSSTableId(1), SSTableFormat.Type.BIG);
+        Descriptor desc2 = new Descriptor(dir.toAbsolute(), "ks", "cf", new SequenceBasedSSTableId(1), SSTableFormat.Type.BIG);
         assertEquals(desc1, desc2);
         assertEquals(desc1.hashCode(), desc2.hashCode());
     }
@@ -124,7 +124,7 @@
         String[] names = {
              "ma-1-big-Data.db",
              // 2ndary index
-             ".idx1" + File.separator + "ma-1-big-Data.db",
+             ".idx1" + File.pathSeparator() + "ma-1-big-Data.db",
         };
 
         for (String name : names)
@@ -160,12 +160,12 @@
         // from Cassandra dirs
 
         String[] filePaths = new String[]{
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/snapshots/snapshot/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/backups/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/snapshots/snapshot/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/backups/nb-22-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/snapshots/snapshot/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/backups/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/snapshots/snapshot/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/backups/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
         };
 
         testKeyspaceTableParsing(filePaths, "ks1", "tab1");
@@ -173,12 +173,12 @@
         // indexes
 
         String[] filePathsIndexes = new String[]{
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/snapshots/snapshot/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/backups/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/.index/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/snapshots/snapshot/.index/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-34234234234234234234234234234234/backups/.index/nb-22-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/snapshots/snapshot/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/backups/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/snapshots/snapshot/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/ks1/tab1-3424234234324/backups/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
         };
 
         testKeyspaceTableParsing(filePathsIndexes, "ks1", "tab1.index");
@@ -186,23 +186,23 @@
         // what if even a snapshot of a keyspace and table called snapshots is called snapshots?
 
         String[] filePathsWithSnapshotKeyspaceAndTable = new String[]{
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/snapshots/snapshots/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/backups/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/snapshots/snapshots/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/backups/nb-22-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/snapshots/snapshots/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/backups/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/snapshots/snapshots/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/backups/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
         };
 
         testKeyspaceTableParsing(filePathsWithSnapshotKeyspaceAndTable, "snapshots", "snapshots");
 
         String[] filePathsWithSnapshotKeyspaceAndTableWithIndices = new String[]{
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/snapshots/snapshots/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/backups/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/.index/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/snapshots/snapshots/.index/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-74273842738947874273842738947878/backups/.index/nb-22-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/snapshots/snapshots/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/backups/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/snapshots/snapshots/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/snapshots/snapshots-742738427389478/backups/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
         };
 
         testKeyspaceTableParsing(filePathsWithSnapshotKeyspaceAndTableWithIndices, "snapshots", "snapshots.index");
@@ -210,12 +210,12 @@
         // what if keyspace and table is called backups?
 
         String[] filePathsWithBackupsKeyspaceAndTable = new String[]{
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/snapshots/snapshots/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/backups/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/snapshots/snapshots/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/backups/nb-22-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/snapshots/snapshots/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/backups/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/snapshots/snapshots/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/backups/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
         };
 
         testKeyspaceTableParsing(filePathsWithBackupsKeyspaceAndTable, "backups", "backups");
@@ -236,60 +236,60 @@
         // what if even a snapshot of a keyspace and table called backups is called snapshots?
 
         String[] filePathsWithBackupsKeyspaceAndTableWithIndices = new String[]{
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/snapshots/snapshots/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/backups/.index/na-1-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/.index/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/snapshots/snapshots/.index/nb-22-big-Index.db",
-        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-74273842738947874273842738947878/backups/.index/nb-22-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/snapshots/snapshots/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/backups/.index/na-1-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/snapshots/snapshots/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/path/to/cassandra/data/dir2/dir5/dir6/backups/backups-742738427389478/backups/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
         };
 
         testKeyspaceTableParsing(filePathsWithBackupsKeyspaceAndTableWithIndices, "backups", "backups.index");
 
         String[] outsideOfCassandra = new String[]{
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/na-1-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/snapshots/snapshots/na-1-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/backups/na-1-big-Index.db",
-        "/tmp/tests/keyspace/table-34234234234234234234234234234234/na-1-big-Index.db",
-        "/keyspace/table-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/snapshots/snapshots/nb-22-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/backups/nb-22-big-Index.db",
-        "/tmp/tests/keyspace/table-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/keyspace/table-34234234234234234234234234234234/nb-22-big-Index.db"
+        "/tmp/some/path/tests/keyspace/table-3424234234234/na-1-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-3424234234234/snapshots/snapshots/na-1-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-3424234234234/backups/na-1-big-Index.db",
+        "/tmp/tests/keyspace/table-3424234234234/na-1-big-Index.db",
+        "/keyspace/table-3424234234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-3424234234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-3424234234234/snapshots/snapshots/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-3424234234234/backups/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/tests/keyspace/table-3424234234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/keyspace/table-3424234234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db"
         };
 
         testKeyspaceTableParsing(outsideOfCassandra, "keyspace", "table");
 
         String[] outsideOfCassandraUppercaseKeyspaceAndTable = new String[]{
-        "/tmp/some/path/tests/Keyspace/Table-34234234234234234234234234234234/na-1-big-Index.db",
-        "/tmp/some/path/tests/Keyspace/Table-34234234234234234234234234234234/snapshots/snapshots/na-1-big-Index.db",
-        "/tmp/some/path/tests/Keyspace/Table-34234234234234234234234234234234/backups/na-1-big-Index.db",
-        "/tmp/tests/Keyspace/Table-34234234234234234234234234234234/na-1-big-Index.db",
-        "/Keyspace/Table-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/tmp/some/path/tests/Keyspace/Table-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/tmp/some/path/tests/Keyspace/Table-34234234234234234234234234234234/snapshots/snapshots/nb-22-big-Index.db",
-        "/tmp/some/path/tests/Keyspace/Table-34234234234234234234234234234234/backups/nb-22-big-Index.db",
-        "/tmp/tests/Keyspace/Table-34234234234234234234234234234234/nb-22-big-Index.db",
-        "/Keyspace/Table-34234234234234234234234234234234/nb-22-big-Index.db"
+        "/tmp/some/path/tests/Keyspace/Table-23424324234234/na-1-big-Index.db",
+        "/tmp/some/path/tests/Keyspace/Table-23424324234234/snapshots/snapshots/na-1-big-Index.db",
+        "/tmp/some/path/tests/Keyspace/Table-23424324234234/backups/na-1-big-Index.db",
+        "/tmp/tests/Keyspace/Table-23424324234234/na-1-big-Index.db",
+        "/Keyspace/Table-23424324234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/Keyspace/Table-23424324234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/Keyspace/Table-23424324234234/snapshots/snapshots/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/Keyspace/Table-23424324234234/backups/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/tests/Keyspace/Table-23424324234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/Keyspace/Table-23424324234234/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db"
         };
 
         testKeyspaceTableParsing(outsideOfCassandraUppercaseKeyspaceAndTable, "Keyspace", "Table");
 
         String[] outsideOfCassandraIndexes = new String[]{
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/.index/na-1-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/snapshots/snapshots/.index/na-1-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/backups/.index/na-1-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/.index/nb-22-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/snapshots/snapshots/.index/nb-22-big-Index.db",
-        "/tmp/some/path/tests/keyspace/table-34234234234234234234234234234234/backups/.index/nb-22-big-Index.db"
+        "/tmp/some/path/tests/keyspace/table-32423423423423/.index/na-1-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-32423423423423/snapshots/snapshots/.index/na-1-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-32423423423423/backups/.index/na-1-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-32423423423423/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-32423423423423/snapshots/snapshots/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db",
+        "/tmp/some/path/tests/keyspace/table-32423423423423/backups/.index/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db"
         };
 
         testKeyspaceTableParsing(outsideOfCassandraIndexes, "keyspace", "table.index");
 
         String[] counterFiles = new String[] {
         "/path/to/cassandra/data/dir2/dir6_other/Keyspace1/counter1-246467e01ea111ebbeafc3f73b4a4f2e/na-3-big-CRC.db",
-        "/path/to/cassandra/data/dir2/dir6_other/Keyspace1/counter1-246467e01ea111ebbeafc3f73b4a4f2e/nb-22-big-Index.db"
+        "/path/to/cassandra/data/dir2/dir6_other/Keyspace1/counter1-246467e01ea111ebbeafc3f73b4a4f2e/nb-3g1m_0nuf_3vj5m2k1125165rxa7-big-Index.db"
         };
 
         testKeyspaceTableParsing(counterFiles, "Keyspace1", "counter1");
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
index 05cbb0b..c494e3c 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
@@ -51,7 +51,7 @@
 import org.apache.cassandra.metrics.RestorableMeter;
 import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -61,6 +61,7 @@
 import static org.apache.cassandra.io.sstable.Downsampling.BASE_SAMPLING_LEVEL;
 import static org.apache.cassandra.io.sstable.IndexSummaryRedistribution.DOWNSAMPLE_THESHOLD;
 import static org.apache.cassandra.io.sstable.IndexSummaryRedistribution.UPSAMPLE_THRESHOLD;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -121,8 +122,8 @@
         Keyspace keyspace = Keyspace.open(ksname);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
 
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMaxIndexInterval).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build());
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMaxIndexInterval).build());
 
         IndexSummaryManager.instance.setMemoryPoolCapacityInMB(originalCapacity);
     }
@@ -191,7 +192,7 @@
                     .build()
                     .applyUnsafe();
             }
-            futures.add(cfs.forceFlush());
+            futures.add(cfs.forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS));
         }
         for (Future future : futures)
         {
@@ -227,7 +228,7 @@
             assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
 
         // double the min_index_interval
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build());
         IndexSummaryManager.instance.redistributeSummaries();
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
@@ -236,7 +237,7 @@
         }
 
         // return min_index_interval to its original value
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build());
         IndexSummaryManager.instance.redistributeSummaries();
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
@@ -246,7 +247,7 @@
 
         // halve the min_index_interval, but constrain the available space to exactly what we have now; as a result,
         // the summary shouldn't change
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval / 2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval / 2).build());
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         long summarySpace = sstable.getIndexSummaryOffHeapSize();
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN))
@@ -271,7 +272,7 @@
 
         // return min_index_interval to it's original value (double it), but only give the summary enough space
         // to have an effective index interval of twice the new min
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build());
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN))
         {
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace / 2.0));
@@ -283,8 +284,8 @@
         // raise the min_index_interval above our current effective interval, but set the max_index_interval lower
         // than what we actually have space for (meaning the index summary would ideally be smaller, but this would
         // result in an effective interval above the new max)
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 4).build(), true);
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMinIndexInterval * 4).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 4).build());
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMinIndexInterval * 4).build());
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN))
         {
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 10);
@@ -317,7 +318,7 @@
             assertEquals(cfs.metadata().params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
 
         // halve the max_index_interval
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(cfs.metadata().params.maxIndexInterval / 2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(cfs.metadata().params.maxIndexInterval / 2).build());
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN))
         {
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 1);
@@ -330,7 +331,7 @@
         }
 
         // return max_index_interval to its original value
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(cfs.metadata().params.maxIndexInterval * 2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(cfs.metadata().params.maxIndexInterval * 2).build());
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN))
         {
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 1);
@@ -516,7 +517,7 @@
             .applyUnsafe();
         }
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
         assertEquals(1, sstables.size());
@@ -584,7 +585,7 @@
                 .build()
                 .applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         assertTrue(manager.getAverageIndexInterval() >= cfs.metadata().params.minIndexInterval);
@@ -644,7 +645,7 @@
         {
             public CompactionInfo getCompactionInfo()
             {
-                return new CompactionInfo(cfs.metadata(), OperationType.UNKNOWN, 0, 0, UUID.randomUUID(), compacting);
+                return new CompactionInfo(cfs.metadata(), OperationType.UNKNOWN, 0, 0, nextTimeUUID(), compacting);
             }
 
             public boolean isGlobal()
@@ -656,7 +657,7 @@
         {
             CompactionManager.instance.active.beginCompaction(ongoingCompaction);
 
-            Thread t = NamedThreadFactory.createThread(new Runnable()
+            Thread t = NamedThreadFactory.createAnonymousThread(new Runnable()
             {
                 public void run()
                 {
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryRedistributionTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryRedistributionTest.java
index 07a2212..57e4d4e 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryRedistributionTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryRedistributionTest.java
@@ -38,7 +38,7 @@
 import org.apache.cassandra.metrics.StorageMetrics;
 import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 
 import static org.junit.Assert.assertEquals;
 
@@ -88,7 +88,7 @@
 
         int originalMinIndexInterval = cfs.metadata().params.minIndexInterval;
         // double the min_index_interval
-        MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build(), true);
+        SchemaTestUtil.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build());
         IndexSummaryManager.instance.redistributeSummaries();
 
         long newSize = 0;
@@ -125,7 +125,7 @@
                 .build()
                 .applyUnsafe();
             }
-            futures.add(cfs.forceFlush());
+            futures.add(cfs.forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS));
         }
         for (Future future : futures)
         {
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
index 01cd0dd..d0680f8 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
@@ -42,6 +42,7 @@
 import static org.apache.cassandra.io.sstable.IndexSummaryBuilder.downsample;
 import static org.apache.cassandra.io.sstable.IndexSummaryBuilder.entriesAtSamplingLevel;
 import static org.apache.cassandra.io.sstable.Downsampling.BASE_SAMPLING_LEVEL;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class IndexSummaryTest
@@ -53,7 +54,7 @@
     {
         DatabaseDescriptor.daemonInitialization();
 
-        final long seed = System.nanoTime();
+        final long seed = nanoTime();
         System.out.println("Using seed: " + seed);
         random.setSeed(seed);
     }
@@ -63,7 +64,7 @@
     @BeforeClass
     public static void setup()
     {
-        final long seed = System.nanoTime();
+        final long seed = nanoTime();
         System.out.println("Using seed: " + seed);
         random.setSeed(seed);
     }
@@ -111,7 +112,7 @@
     }
 
     /**
-     * Test an index summary whose total size is bigger than 2GB,
+     * Test an index summary whose total size is bigger than 2GiB,
      * the index summary builder should log an error but it should still
      * create an index summary, albeit one that does not cover the entire sstable.
      */
@@ -145,7 +146,7 @@
     }
 
     /**
-     * Test an index summary whose total size is bigger than 2GB,
+     * Test an index summary whose total size is bigger than 2GiB,
      * having updated IndexSummaryBuilder.defaultExpectedKeySize to match the size,
      * the index summary should be downsampled automatically.
      */
diff --git a/test/unit/org/apache/cassandra/io/sstable/LargePartitionsTest.java b/test/unit/org/apache/cassandra/io/sstable/LargePartitionsTest.java
index 7ff7845..17fb0f2 100644
--- a/test/unit/org/apache/cassandra/io/sstable/LargePartitionsTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LargePartitionsTest.java
@@ -59,24 +59,24 @@
         return new String(ch);
     }
 
-    private static final int rowKBytes = 8;
+    private static final int rowKibibytes = 8;
 
-    private void withPartitionSize(long partitionKBytes, long totalMBytes) throws Throwable
+    private void withPartitionSize(long partitionKibibytes, long totalMBytes) throws Throwable
     {
-        long totalKBytes = totalMBytes * 1024L;
+        long totalKibibytes = totalMBytes * 1024L;
 
         createTable("CREATE TABLE %s (pk text, ck text, val text, PRIMARY KEY (pk, ck))");
 
-        String name = "part=" + partitionKBytes + "k total=" + totalMBytes + 'M';
+        String name = "part=" + partitionKibibytes + "k total=" + totalMBytes + 'M';
 
         measured("INSERTs for " + name, () -> {
-            for (long writtenKBytes = 0L; writtenKBytes < totalKBytes; writtenKBytes += partitionKBytes)
+            for (long writtenKibibytes = 0L; writtenKibibytes < totalKibibytes; writtenKibibytes += partitionKibibytes)
             {
-                String pk = Long.toBinaryString(writtenKBytes);
-                for (long kbytes = 0L; kbytes < partitionKBytes; kbytes += rowKBytes)
+                String pk = Long.toBinaryString(writtenKibibytes);
+                for (long kibibytes = 0L; kibibytes < partitionKibibytes; kibibytes += rowKibibytes)
                 {
-                    String ck = Long.toBinaryString(kbytes);
-                    execute("INSERT INTO %s (pk, ck, val) VALUES (?,?,?)", pk, ck, randomText(rowKBytes * 1024));
+                    String ck = Long.toBinaryString(kibibytes);
+                    execute("INSERT INTO %s (pk, ck, val) VALUES (?,?,?)", pk, ck, randomText(rowKibibytes * 1024));
                 }
             }
         });
@@ -91,20 +91,20 @@
             keyCacheMetrics("after compaction");
         });
 
-        measured("SELECTs 1 for " + name, () -> selects(partitionKBytes, totalKBytes));
+        measured("SELECTs 1 for " + name, () -> selects(partitionKibibytes, totalKibibytes));
 
-        measured("SELECTs 2 for " + name, () -> selects(partitionKBytes, totalKBytes));
+        measured("SELECTs 2 for " + name, () -> selects(partitionKibibytes, totalKibibytes));
 
         CacheService.instance.keyCache.clear();
-        measured("Scan for " + name, () -> scan(partitionKBytes, totalKBytes));
+        measured("Scan for " + name, () -> scan(partitionKibibytes, totalKibibytes));
     }
 
-    private void selects(long partitionKBytes, long totalKBytes) throws Throwable
+    private void selects(long partitionKibibytes, long totalKibibytes) throws Throwable
     {
         for (int i = 0; i < 50000; i++)
         {
-            long pk = ThreadLocalRandom.current().nextLong(totalKBytes / partitionKBytes) * partitionKBytes;
-            long ck = ThreadLocalRandom.current().nextLong(partitionKBytes / rowKBytes) * rowKBytes;
+            long pk = ThreadLocalRandom.current().nextLong(totalKibibytes / partitionKibibytes) * partitionKibibytes;
+            long ck = ThreadLocalRandom.current().nextLong(partitionKibibytes / rowKibibytes) * rowKibibytes;
             execute("SELECT val FROM %s WHERE pk=? AND ck=?",
                     Long.toBinaryString(pk),
                     Long.toBinaryString(ck)).one();
@@ -114,9 +114,9 @@
         keyCacheMetrics("after all selects");
     }
 
-    private void scan(long partitionKBytes, long totalKBytes) throws Throwable
+    private void scan(long partitionKibibytes, long totalKibibytes) throws Throwable
     {
-        long pk = ThreadLocalRandom.current().nextLong(totalKBytes / partitionKBytes) * partitionKBytes;
+        long pk = ThreadLocalRandom.current().nextLong(totalKibibytes / partitionKibibytes) * partitionKibibytes;
         Iterator<UntypedResultSet.Row> iter = execute("SELECT val FROM %s WHERE pk=?", Long.toBinaryString(pk)).iterator();
         int i = 0;
         while (iter.hasNext())
diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
index 579fc15..063afe5 100644
--- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
@@ -17,19 +17,20 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
+
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Random;
-import java.util.UUID;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -41,47 +42,38 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.cql3.statements.SelectStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.compaction.AbstractCompactionTask;
 import org.apache.cassandra.db.compaction.CompactionManager;
-import org.apache.cassandra.db.compaction.Verifier;
 import org.apache.cassandra.db.repair.PendingAntiCompaction;
 import org.apache.cassandra.db.streaming.CassandraOutgoingFile;
-import org.apache.cassandra.db.ReadExecutionController;
-import org.apache.cassandra.db.SinglePartitionReadCommand;
 import org.apache.cassandra.db.SinglePartitionSliceCommandTest;
 import org.apache.cassandra.db.compaction.Verifier;
-import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 import org.apache.cassandra.db.rows.RangeTombstoneMarker;
 import org.apache.cassandra.db.rows.Unfiltered;
-import org.apache.cassandra.db.rows.UnfilteredRowIterator;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.sstable.format.big.BigFormat;
-import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.CacheService;
-import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.streaming.OutgoingStream;
 import org.apache.cassandra.streaming.StreamPlan;
-import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static java.util.Collections.singleton;
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -124,7 +116,7 @@
         String scp = System.getProperty(LEGACY_SSTABLE_PROP);
         Assert.assertNotNull("System property " + LEGACY_SSTABLE_PROP + " not set", scp);
 
-        LEGACY_SSTABLE_ROOT = new File(scp).getAbsoluteFile();
+        LEGACY_SSTABLE_ROOT = new File(scp).toAbsolute();
         Assert.assertTrue("System property " + LEGACY_SSTABLE_ROOT + " does not specify a directory", LEGACY_SSTABLE_ROOT.isDirectory());
 
         SchemaLoader.prepareServer();
@@ -150,14 +142,10 @@
     /**
      * Get a descriptor for the legacy sstable at the given version.
      */
-    protected Descriptor getDescriptor(String legacyVersion, String table)
+    protected Descriptor getDescriptor(String legacyVersion, String table) throws IOException
     {
-        return new Descriptor(SSTableFormat.Type.BIG.info.getVersion(legacyVersion),
-                              getTableDir(legacyVersion, table),
-                              "legacy_tables",
-                              table,
-                              1,
-                              SSTableFormat.Type.BIG);
+        Path file = Files.list(getTableDir(legacyVersion, table).toPath()).findFirst().orElseThrow(() -> new RuntimeException(String.format("No files for version=%s and table=%s", legacyVersion, table)));
+        return Descriptor.fromFilename(new File(file));
     }
 
     @Test
@@ -201,7 +189,7 @@
                 boolean isTransient = false;
                 for (SSTableReader sstable : cfs.getLiveSSTables())
                 {
-                    UUID random = UUID.randomUUID();
+                    TimeUUID random = nextTimeUUID();
                     sstable.descriptor.getMetadataSerializer().mutateRepairMetadata(sstable.descriptor, UNREPAIRED_SSTABLE, random, isTransient);
                     sstable.reloadSSTableMetadata();
                     assertEquals(UNREPAIRED_SSTABLE, sstable.getRepairedAt());
@@ -233,7 +221,7 @@
                 // set pending
                 for (SSTableReader sstable : cfs.getLiveSSTables())
                 {
-                    UUID random = UUID.randomUUID();
+                    TimeUUID random = nextTimeUUID();
                     try
                     {
                         cfs.getCompactionStrategyManager().mutateRepaired(Collections.singleton(sstable), UNREPAIRED_SSTABLE, random, false);
@@ -251,7 +239,7 @@
                 {
                     try
                     {
-                        cfs.getCompactionStrategyManager().mutateRepaired(Collections.singleton(sstable), UNREPAIRED_SSTABLE, UUID.randomUUID(), true);
+                        cfs.getCompactionStrategyManager().mutateRepaired(Collections.singleton(sstable), UNREPAIRED_SSTABLE, nextTimeUUID(), true);
                         if (!sstable.descriptor.version.hasIsTransient())
                             fail("We should fail setting pending repair on unsupported sstables "+sstable);
                     }
@@ -379,7 +367,7 @@
             boolean shouldFail = !cfs.getLiveSSTables().stream().allMatch(sstable -> sstable.descriptor.version.hasPendingRepair());
             IPartitioner p = Iterables.getFirst(cfs.getLiveSSTables(), null).getPartitioner();
             Range<Token> r = new Range<>(p.getMinimumToken(), p.getMinimumToken());
-            PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, Collections.singleton(r), UUIDGen.getTimeUUID(), 0, 0);
+            PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, singleton(r), nextTimeUUID(), 0, 0);
             PendingAntiCompaction.AcquireResult res = acquisitionCallable.call();
             assertEquals(shouldFail, res == null);
             if (res != null)
@@ -645,10 +633,10 @@
             }
         }
 
-        StorageService.instance.forceKeyspaceFlush("legacy_tables");
+        StorageService.instance.forceKeyspaceFlush("legacy_tables", ColumnFamilyStore.FlushReason.UNIT_TESTS);
 
         File ksDir = new File(LEGACY_SSTABLE_ROOT, String.format("%s/legacy_tables", BigFormat.latestVersion));
-        ksDir.mkdirs();
+        ksDir.tryCreateDirectories();
         copySstablesFromTestData(String.format("legacy_%s_simple", BigFormat.latestVersion), ksDir);
         copySstablesFromTestData(String.format("legacy_%s_simple_counter", BigFormat.latestVersion), ksDir);
         copySstablesFromTestData(String.format("legacy_%s_clust", BigFormat.latestVersion), ksDir);
@@ -658,11 +646,11 @@
     public static void copySstablesFromTestData(String table, File ksDir) throws IOException
     {
         File cfDir = new File(ksDir, table);
-        cfDir.mkdir();
+        cfDir.tryCreateDirectory();
 
         for (File srcDir : Keyspace.open("legacy_tables").getColumnFamilyStore(table).getDirectories().getCFDirectories())
         {
-            for (File file : srcDir.listFiles())
+            for (File file : srcDir.tryList())
             {
                 copyFile(cfDir, file);
             }
@@ -673,7 +661,7 @@
     {
         File tableDir = getTableDir(legacyVersion, table);
         Assert.assertTrue("The table directory " + tableDir + " was not found", tableDir.isDirectory());
-        for (File file : tableDir.listFiles())
+        for (File file : tableDir.tryList())
         {
             copyFile(cfDir, file);
         }
@@ -689,10 +677,10 @@
         byte[] buf = new byte[65536];
         if (file.isFile())
         {
-            File target = new File(cfDir, file.getName());
+            File target = new File(cfDir, file.name());
             int rd;
-            try (FileInputStream is = new FileInputStream(file);
-                 FileOutputStream os = new FileOutputStream(target);) {
+            try (FileInputStreamPlus is = new FileInputStreamPlus(file);
+                 FileOutputStreamPlus os = new FileOutputStreamPlus(target);) {
                 while ((rd = is.read(buf)) >= 0)
                     os.write(buf, 0, rd);
                 }
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
index 2510c5e..ec01865 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
@@ -18,10 +18,9 @@
 
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.*;
 import java.util.function.*;
 
@@ -47,6 +46,7 @@
 import org.apache.cassandra.io.util.*;
 import org.apache.cassandra.schema.*;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -95,7 +95,7 @@
         maxValueSize = DatabaseDescriptor.getMaxValueSize();
         DatabaseDescriptor.setMaxValueSize(1024 * 1024);
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         logger.info("Seed {}", seed);
         random = new Random(seed);
 
@@ -117,7 +117,7 @@
                    .add("reg2", ByteBuffer.wrap(reg2));
             writer.append(builder.build().unfilteredIterator());
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         ssTableReader = writer.finish(true);
         txn.update(ssTableReader, false);
@@ -148,18 +148,18 @@
 
     private void bruteForceCorruptionTest(SSTableReader ssTableReader, Consumer<SSTableReader> walker) throws Throwable
     {
-        RandomAccessFile raf = new RandomAccessFile(ssTableReader.getFilename(), "rw");
+        FileChannel fc = new File(ssTableReader.getFilename()).newReadWriteChannel();
 
         int corruptedCounter = 0;
 
-        int fileLength = (int)raf.length(); // in current test, it does fit into int
+        int fileLength = (int)fc.size(); // in current test, it does fit into int
         for (int i = 0; i < numberOfRuns; i++)
         {
             final int corruptionPosition = random.nextInt(fileLength - 1); //ensure at least one byte will be corrupted
             // corrupt max from position to end of file
             final int corruptionSize = Math.min(maxCorruptionSize, random.nextInt(fileLength - corruptionPosition));
 
-            byte[] backup = corruptSstable(raf, corruptionPosition, corruptionSize);
+            byte[] backup = corruptSstable(fc, corruptionPosition, corruptionSize);
 
             try
             {
@@ -174,12 +174,12 @@
                 if (ChunkCache.instance != null)
                     ChunkCache.instance.invalidateFile(ssTableReader.getFilename());
 
-                restore(raf, corruptionPosition, backup);
+                restore(fc, corruptionPosition, backup);
             }
         }
 
         assertTrue(corruptedCounter > 0);
-        FileUtils.closeQuietly(raf);
+        FileUtils.closeQuietly(fc);
     }
 
     private Consumer<SSTableReader> sstableScanner()
@@ -214,11 +214,11 @@
             for (int i = 0; i < numberOfPks; i++)
             {
                 DecoratedKey dk = Util.dk(String.format("pkvalue_%07d", i));
-                try (UnfilteredRowIterator rowIter = sstable.iterator(dk,
-                                                                      Slices.ALL,
-                                                                      ColumnFilter.all(cfs.metadata()),
-                                                                      false,
-                                                                      SSTableReadsListener.NOOP_LISTENER))
+                try (UnfilteredRowIterator rowIter = sstable.rowIterator(dk,
+                                                                         Slices.ALL,
+                                                                         ColumnFilter.all(cfs.metadata()),
+                                                                         false,
+                                                                         SSTableReadsListener.NOOP_LISTENER))
                 {
                     while (rowIter.hasNext())
                     {
@@ -230,29 +230,28 @@
                             // no-op read
                         }
                     }
-                    rowIter.close();
                 }
             }
         };
     }
 
-    private byte[] corruptSstable(RandomAccessFile raf, int position, int corruptionSize) throws IOException
+    private byte[] corruptSstable(FileChannel fc, int position, int corruptionSize) throws IOException
     {
         byte[] backup = new byte[corruptionSize];
-        raf.seek(position);
-        raf.read(backup);
+        fc.position(position);
+        fc.read(ByteBuffer.wrap(backup));
 
-        raf.seek(position);
+        fc.position(position);
         byte[] corruption = new byte[corruptionSize];
         random.nextBytes(corruption);
-        raf.write(corruption);
+        fc.write(ByteBuffer.wrap(corruption));
 
         return backup;
     }
 
-    private void restore(RandomAccessFile raf, int position, byte[] backup) throws IOException
+    private void restore(FileChannel fc, int position, byte[] backup) throws IOException
     {
-        raf.seek(position);
-        raf.write(backup);
+        fc.position(position);
+        fc.write(ByteBuffer.wrap(backup));
     }
 }
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
index d07187b..4572e5c 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
@@ -17,24 +17,29 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.function.Function;
+import java.util.function.Supplier;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import com.google.common.collect.Sets;
+import org.apache.cassandra.io.util.File;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.ColumnIdentifier;
@@ -62,6 +67,7 @@
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.SequentialWriter;
 import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.MockSchema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.IndexMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -76,6 +82,7 @@
  * Test the functionality of {@link SSTableHeaderFix}.
  * It writes an 'big-m' version sstable(s) and executes against these.
  */
+@RunWith(Parameterized.class)
 public class SSTableHeaderFixTest
 {
     static
@@ -85,12 +92,23 @@
 
     private File temporaryFolder;
 
+    @Parameterized.Parameter
+    public Supplier<? extends SSTableId> sstableIdGen;
+
+    @Parameterized.Parameters
+    public static Collection<Object[]> parameters()
+    {
+        return MockSchema.sstableIdGenerators();
+    }
+
     @Before
     public void setup()
     {
+        MockSchema.sstableIds.clear();
+        MockSchema.sstableIdGenerator = sstableIdGen;
         File f = FileUtils.createTempFile("SSTableUDTFixTest", "");
-        f.delete();
-        f.mkdirs();
+        f.tryDelete();
+        f.tryCreateDirectories();
         temporaryFolder = f;
     }
 
@@ -735,6 +753,20 @@
         }
     }
 
+    @Test
+    public void ignoresStaleFilesTest() throws Exception
+    {
+        File dir = temporaryFolder;
+        IntStream.range(1, 2).forEach(g -> generateFakeSSTable(dir, g));
+
+        File newFile = new File(dir.toAbsolute(), "something_something-something.something");
+        Assert.assertTrue(newFile.createFileIfNotExists());
+
+        SSTableHeaderFix headerFix = builder().withPath(dir.toPath())
+                                              .build();
+        headerFix.execute();
+    }
+
     private static final Pattern p = Pattern.compile(".* Column '([^']+)' needs to be updated from type .*");
 
     private SSTableHeaderFix.Builder builder()
@@ -790,11 +822,11 @@
         try
         {
 
-            Descriptor desc = new Descriptor(version, dir, "ks", "cf", generation, SSTableFormat.Type.BIG);
+            Descriptor desc = new Descriptor(version, dir, "ks", "cf", MockSchema.sstableId(generation), SSTableFormat.Type.BIG);
 
             // Just create the component files - we don't really need those.
             for (Component component : requiredComponents)
-                assertTrue(new File(desc.filenameFor(component)).createNewFile());
+                assertTrue(new File(desc.filenameFor(component)).createFileIfNotExists());
 
             AbstractType<?> partitionKey = headerMetadata.partitionKeyType;
             List<AbstractType<?>> clusteringKey = headerMetadata.clusteringColumns()
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableIdTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableIdTest.java
new file mode 100644
index 0000000..82fbf32
--- /dev/null
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableIdTest.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.sstable;
+
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import com.google.common.collect.Sets;
+import com.google.common.primitives.UnsignedBytes;
+import org.junit.Test;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.utils.TimeUUID;
+import org.awaitility.Awaitility;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.quicktheories.QuickTheory.qt;
+import static org.quicktheories.generators.SourceDSL.longs;
+
+public class SSTableIdTest
+{
+    @Test
+    public void testSequenceBasedIdProperties()
+    {
+        testSSTableIdProperties(SequenceBasedSSTableId.Builder.instance);
+    }
+
+    @Test
+    public void testUUIDBasedIdProperties()
+    {
+        testSSTableIdProperties(UUIDBasedSSTableId.Builder.instance);
+    }
+
+    private void testSSTableIdProperties(SSTableId.Builder<?> builder)
+    {
+        List<SSTableId> ids = Stream.generate(builder.generator(Stream.empty()))
+                                    .limit(100).collect(Collectors.toList());
+        assertThat(ids).isSorted();
+        assertThat(Sets.newHashSet(ids)).hasSameSizeAs(ids);
+
+        List<ByteBuffer> serIds = ids.stream().map(SSTableId::asBytes).collect(Collectors.toList());
+        assertThat(serIds).isSortedAccordingTo((o1, o2) -> UnsignedBytes.lexicographicalComparator().compare(o1.array(), o2.array()));
+
+        List<SSTableId> deserIds = serIds.stream().map(builder::fromBytes).collect(Collectors.toList());
+        assertThat(deserIds).containsExactlyElementsOf(ids);
+
+        List<String> stringifiedIds = ids.stream().map(SSTableId::toString).collect(Collectors.toList());
+        if (!(builder instanceof SequenceBasedSSTableId.Builder))
+        {
+            // the legacy string representation is not sortable
+            assertThat(stringifiedIds).isSorted();
+        }
+
+        List<SSTableId> destringifiedIds = stringifiedIds.stream().map(builder::fromString).collect(Collectors.toList());
+        assertThat(destringifiedIds).containsExactlyElementsOf(ids);
+
+        generatorFuzzTest(builder);
+    }
+
+    @Test
+    public void testUUIDBytesSerDe()
+    {
+        qt().forAll(longs().all(), longs().all()).checkAssert((msb, lsb) -> {
+            msb = (msb & ~0xf000) | 0x1000; // v1
+            TimeUUID uuid = TimeUUID.fromBytes(msb, lsb);
+            UUIDBasedSSTableId id = new UUIDBasedSSTableId(uuid);
+
+            testBytesSerialization(id);
+            testStringSerialization(id);
+        });
+    }
+
+    private void testBytesSerialization(UUIDBasedSSTableId id)
+    {
+        ByteBuffer buf = id.asBytes();
+        assertThat(buf.remaining()).isEqualTo(UUIDBasedSSTableId.BYTES_LEN);
+        assertThat(UUIDBasedSSTableId.Builder.instance.isUniqueIdentifier(buf)).isTrue();
+        assertThat(SequenceBasedSSTableId.Builder.instance.isUniqueIdentifier(buf)).isFalse();
+        SSTableId fromBytes = SSTableIdFactory.instance.fromBytes(buf);
+        assertThat(fromBytes).isEqualTo(id);
+    }
+
+    private void testStringSerialization(UUIDBasedSSTableId id)
+    {
+        String s = id.toString();
+        assertThat(s).hasSize(UUIDBasedSSTableId.STRING_LEN);
+        assertThat(s).matches(Pattern.compile("[0-9a-z]{4}_[0-9a-z]{4}_[0-9a-z]{18}"));
+        assertThat(UUIDBasedSSTableId.Builder.instance.isUniqueIdentifier(s)).isTrue();
+        assertThat(SequenceBasedSSTableId.Builder.instance.isUniqueIdentifier(s)).isFalse();
+        SSTableId fromString = SSTableIdFactory.instance.fromString(s);
+        assertThat(fromString).isEqualTo(id);
+    }
+
+    @Test
+    public void testComparator()
+    {
+        List<SSTableId> ids = new ArrayList<>(Collections.nCopies(300, null));
+        for (int i = 0; i < 100; i++)
+        {
+            ids.set(i + 100, new SequenceBasedSSTableId(ThreadLocalRandom.current().nextInt(1000000)));
+            ids.set(i, new UUIDBasedSSTableId(TimeUUID.Generator.atUnixMillis(ThreadLocalRandom.current().nextLong(10000), 0)));
+        }
+
+        List<SSTableId> shuffledIds = new ArrayList<>(ids);
+        Collections.shuffle(shuffledIds);
+        assertThat(shuffledIds).isNotEqualTo(ids);
+
+        List<SSTableId> sortedIds = new ArrayList<>(shuffledIds);
+        sortedIds.sort(SSTableIdFactory.COMPARATOR);
+        assertThat(sortedIds).isNotEqualTo(shuffledIds);
+        assertThat(sortedIds).isSortedAccordingTo(SSTableIdFactory.COMPARATOR);
+
+        assertThat(sortedIds.subList(0, 100)).containsOnlyNulls();
+        assertThat(sortedIds.subList(100, 200)).allMatch(id -> id instanceof SequenceBasedSSTableId);
+        assertThat(sortedIds.subList(200, 300)).allMatch(id -> id instanceof UUIDBasedSSTableId);
+
+        assertThat(sortedIds.subList(100, 200)).isSortedAccordingTo(Comparator.comparing(o -> ((SequenceBasedSSTableId) o)));
+        assertThat(sortedIds.subList(200, 300)).isSortedAccordingTo(Comparator.comparing(o -> ((UUIDBasedSSTableId) o)));
+    }
+
+    private static <T extends SSTableId> void generatorFuzzTest(SSTableId.Builder<T> builder)
+    {
+        final int NUM_THREADS = 10, IDS_PER_THREAD = 10;
+        Set<SSTableId> ids = new CopyOnWriteArraySet<>();
+        Supplier<T> generator = builder.generator(Stream.empty());
+        ExecutorPlus service = executorFactory().pooled("test", NUM_THREADS);
+        CyclicBarrier barrier = new CyclicBarrier(NUM_THREADS);
+        for (int i = 0; i < NUM_THREADS; i++)
+        {
+            service.submit(() -> {
+                for (int k = 0; k < IDS_PER_THREAD; k++)
+                {
+                    barrier.await();
+                    ids.add(generator.get());
+                }
+                return null;
+            });
+        }
+
+        Awaitility.await().atMost(Duration.ofSeconds(10)).untilAsserted(() -> assertThat(service.getCompletedTaskCount()).isEqualTo(NUM_THREADS));
+        assertThat(ids).hasSize(NUM_THREADS * IDS_PER_THREAD);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
index b187704..c2403dd 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
@@ -25,6 +24,7 @@
 
 import com.google.common.io.Files;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -33,7 +33,6 @@
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.schema.Schema;
@@ -90,7 +89,7 @@
     @Before
     public void setup() throws Exception
     {
-        tmpdir = Files.createTempDir();
+        tmpdir = new File(Files.createTempDir());
     }
 
     @After
@@ -100,7 +99,6 @@
             FileUtils.deleteRecursive(tmpdir);
         } catch (FSWriteError e) {
             /*
-              Windows does not allow a mapped file to be deleted, so we probably forgot to clean the buffers somewhere.
               We force a GC here to force buffer deallocation, and then try deleting the directory again.
               For more information, see: http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4715154
               If this is not the problem, the exception will be rethrown anyway.
@@ -143,7 +141,7 @@
         }
 
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
-        cfs.forceBlockingFlush(); // wait for sstables to be on disk else we won't be able to stream them
+        Util.flush(cfs); // wait for sstables to be on disk else we won't be able to stream them
 
         final CountDownLatch latch = new CountDownLatch(1);
         SSTableLoader loader = new SSTableLoader(dataDir, new TestClient(), new OutputHandler.SystemOutput(false, false));
@@ -175,7 +173,7 @@
                                                   .withBufferSizeInMB(1)
                                                   .build();
 
-        int NB_PARTITIONS = 5000; // Enough to write >1MB and get at least one completed sstable before we've closed the writer
+        int NB_PARTITIONS = 5000; // Enough to write >1MiB and get at least one completed sstable before we've closed the writer
 
         for (int i = 0; i < NB_PARTITIONS; i++)
         {
@@ -184,10 +182,10 @@
         }
 
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD2);
-        cfs.forceBlockingFlush(); // wait for sstables to be on disk else we won't be able to stream them
+        Util.flush(cfs); // wait for sstables to be on disk else we won't be able to stream them
 
         //make sure we have some tables...
-        assertTrue(Objects.requireNonNull(dataDir.listFiles()).length > 0);
+        assertTrue(Objects.requireNonNull(dataDir.tryList()).length > 0);
 
         final CountDownLatch latch = new CountDownLatch(2);
         //writer is still open so loader should not load anything
@@ -215,8 +213,8 @@
     @Test
     public void testLoadingSSTableToDifferentKeyspace() throws Exception
     {
-        File dataDir = new File(tmpdir.getAbsolutePath() + File.separator + KEYSPACE1 + File.separator + CF_STANDARD1);
-        assert dataDir.mkdirs();
+        File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + KEYSPACE1 + File.pathSeparator() + CF_STANDARD1);
+        assert dataDir.tryCreateDirectories();
         TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD1);
 
         String schema = "CREATE TABLE %s.%s (key ascii, name ascii, val ascii, val1 ascii, PRIMARY KEY (key, name))";
@@ -232,14 +230,14 @@
         }
 
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
-        cfs.forceBlockingFlush(); // wait for sstables to be on disk else we won't be able to stream them
+        Util.flush(cfs); // wait for sstables to be on disk else we won't be able to stream them
 
         final CountDownLatch latch = new CountDownLatch(1);
         SSTableLoader loader = new SSTableLoader(dataDir, new TestClient(), new OutputHandler.SystemOutput(false, false), 1, KEYSPACE2);
         loader.stream(Collections.emptySet(), completionStreamListener(latch)).get();
 
         cfs = Keyspace.open(KEYSPACE2).getColumnFamilyStore(CF_STANDARD1);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         List<FilteredPartition> partitions = Util.getAll(Util.cmd(cfs).build());
 
@@ -269,24 +267,7 @@
 
     private void testLoadingTable(String tableName) throws Exception
     {
-        testLoadingTable(CF_BACKUPS, false);
-    }
-
-    @Test
-    public void testLoadingLegacyBackupsTable() throws Exception
-    {
-        testLoadingTable(CF_BACKUPS, true);
-    }
-
-    @Test
-    public void testLoadingLegacySnapshotsTable() throws Exception
-    {
-        testLoadingTable(CF_SNAPSHOTS, true);
-    }
-
-    private void testLoadingTable(String tableName, boolean isLegacyTable) throws Exception
-    {
-        File dataDir = dataDir(tableName, isLegacyTable);
+        File dataDir = dataDir(tableName);
         TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE1, tableName);
 
         try (CQLSSTableWriter writer = CQLSSTableWriter.builder()
@@ -299,7 +280,7 @@
         }
 
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(tableName);
-        cfs.forceBlockingFlush(); // wait for sstables to be on disk else we won't be able to stream them
+        Util.flush(cfs); // wait for sstables to be on disk else we won't be able to stream them
 
         final CountDownLatch latch = new CountDownLatch(1);
         SSTableLoader loader = new SSTableLoader(dataDir, new TestClient(), new OutputHandler.SystemOutput(false, false));
@@ -321,17 +302,10 @@
 
     private File dataDir(String cf)
     {
-        return dataDir(cf, false);
-    }
-
-    private File dataDir(String cf, boolean isLegacyTable)
-    {
-        // Add -{tableUuid} suffix to table dir if not a legacy table
-        File dataDir = new File(tmpdir.getAbsolutePath() + File.separator + SSTableLoaderTest.KEYSPACE1 + File.separator + cf
-                                + (isLegacyTable ? "" : String.format("-%s", TableId.generate().toHexString())));
-        assert dataDir.mkdirs();
+        File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + SSTableLoaderTest.KEYSPACE1 + File.pathSeparator() + cf);
+        assert dataDir.tryCreateDirectories();
         //make sure we have no tables...
-        assertEquals(Objects.requireNonNull(dataDir.listFiles()).length, 0);
+        assertEquals(Objects.requireNonNull(dataDir.tryList()).length, 0);
         return dataDir;
     }
 
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
index aecddf9..2e5a17a 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
@@ -95,7 +95,7 @@
             .build()
             .applyUnsafe();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertEquals(1, store.getLiveSSTables().size());
         int ttltimestamp = (int) (System.currentTimeMillis() / 1000);
         int firstDelTime = 0;
@@ -113,7 +113,7 @@
         .applyUnsafe();
 
         ttltimestamp = (int) (System.currentTimeMillis() / 1000);
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertEquals(2, store.getLiveSSTables().size());
         List<SSTableReader> sstables = new ArrayList<>(store.getLiveSSTables());
         if (sstables.get(0).getSSTableMetadata().maxLocalDeletionTime < sstables.get(1).getSSTableMetadata().maxLocalDeletionTime)
@@ -163,7 +163,7 @@
         .build()
         .applyUnsafe();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertEquals(1, store.getLiveSSTables().size());
         int ttltimestamp = (int) (System.currentTimeMillis() / 1000);
         int firstMaxDelTime = 0;
@@ -175,7 +175,7 @@
 
         RowUpdateBuilder.deleteRow(store.metadata(), timestamp + 1, "deletetest", "todelete").applyUnsafe();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertEquals(2, store.getLiveSSTables().size());
         boolean foundDelete = false;
         for (SSTableReader sstable : store.getLiveSSTables())
@@ -212,7 +212,7 @@
                     .applyUnsafe();
             }
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertEquals(1, store.getLiveSSTables().size());
         for (SSTableReader sstable : store.getLiveSSTables())
         {
@@ -233,7 +233,7 @@
             .applyUnsafe();
         }
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         store.forceMajorCompaction();
         assertEquals(1, store.getLiveSSTables().size());
         for (SSTableReader sstable : store.getLiveSSTables())
@@ -260,7 +260,7 @@
         ColumnFamily cells = ArrayBackedSortedColumns.factory.create(cfs.metadata);
         cells.addColumn(new BufferCounterCell(cellname("col"), state.context, 1L, Long.MIN_VALUE));
         new Mutation(Util.dk("k").getKey(), cells).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertTrue(cfs.getLiveSSTables().iterator().next().getSSTableMetadata().hasLegacyCounterShards);
         cfs.truncateBlocking();
 
@@ -271,7 +271,7 @@
         cells = ArrayBackedSortedColumns.factory.create(cfs.metadata);
         cells.addColumn(new BufferCounterCell(cellname("col"), state.context, 1L, Long.MIN_VALUE));
         new Mutation(Util.dk("k").getKey(), cells).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertTrue(cfs.getLiveSSTables().iterator().next().getSSTableMetadata().hasLegacyCounterShards);
         cfs.truncateBlocking();
 
@@ -282,7 +282,7 @@
         cells = ArrayBackedSortedColumns.factory.create(cfs.metadata);
         cells.addColumn(new BufferCounterCell(cellname("col"), state.context, 1L, Long.MIN_VALUE));
         new Mutation(Util.dk("k").getKey(), cells).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertTrue(cfs.getLiveSSTables().iterator().next().getSSTableMetadata().hasLegacyCounterShards);
         cfs.truncateBlocking();
 
@@ -292,7 +292,7 @@
         cells = ArrayBackedSortedColumns.factory.create(cfs.metadata);
         cells.addColumn(new BufferCounterCell(cellname("col"), state.context, 1L, Long.MIN_VALUE));
         new Mutation(Util.dk("k").getKey(), cells).applyUnsafe();
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertFalse(cfs.getLiveSSTables().iterator().next().getSSTableMetadata().hasLegacyCounterShards);
         cfs.truncateBlocking();
     } */
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index 1246130..f064f19 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -17,15 +17,16 @@
  */
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.*;
 import java.util.concurrent.*;
+import java.util.stream.Stream;
 
 import com.google.common.collect.Sets;
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -123,7 +124,7 @@
                 .build()
                 .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         List<Range<Token>> ranges = new ArrayList<>();
@@ -167,7 +168,7 @@
                 .build()
                 .applyUnsafe();
             }
-            store.forceBlockingFlush();
+            Util.flush(store);
             CompactionManager.instance.performMaximal(store, false);
 
             // check that all our keys are found correctly
@@ -207,7 +208,7 @@
             .build()
             .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         clearAndLoad(store);
         assert store.metric.maxPartitionSize.getValue() != 0;
@@ -235,7 +236,7 @@
             .applyUnsafe();
         }
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
         assertEquals(0, sstable.getReadMeter().count());
@@ -266,7 +267,7 @@
             .applyUnsafe();
 
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
@@ -297,7 +298,7 @@
             .build()
             .applyUnsafe();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         // check if opening and querying works
         assertIndexQueryWorks(store);
@@ -319,7 +320,7 @@
             .build()
             .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
@@ -354,7 +355,7 @@
                     .build()
                     .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
@@ -421,7 +422,7 @@
                 .build()
                 .applyUnsafe();
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
         Descriptor desc = sstable.descriptor;
@@ -485,7 +486,7 @@
         // check that only the summary is regenerated when it is deleted
         components.add(Component.FILTER);
         summaryModified = Files.getLastModifiedTime(summaryPath).toMillis();
-        summaryFile.delete();
+        summaryFile.tryDelete();
 
         TimeUnit.MILLISECONDS.sleep(1000); // sleep to ensure modified time will be different
         bloomModified = Files.getLastModifiedTime(bloomPath).toMillis();
@@ -522,7 +523,7 @@
         .build()
         .applyUnsafe();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         for(ColumnFamilyStore indexCfs : store.indexManager.getAllIndexColumnFamilyStores())
         {
@@ -550,7 +551,7 @@
             .build()
             .applyUnsafe();
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         boolean foundScanner = false;
         for (SSTableReader s : store.getLiveSSTables())
         {
@@ -582,7 +583,7 @@
             .applyUnsafe();
 
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         // construct a range which is present in the sstable, but whose
@@ -619,7 +620,7 @@
             .applyUnsafe();
 
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         Collection<SSTableReader> sstables = store.getLiveSSTables();
@@ -696,7 +697,7 @@
             .applyUnsafe();
 
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         Collection<SSTableReader> sstables = store.getLiveSSTables();
@@ -751,7 +752,7 @@
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD);
         SSTableReader sstable = getNewSSTable(cfs);
-        Descriptor notLiveDesc = new Descriptor(new File("/tmp"), "", "", 0);
+        Descriptor notLiveDesc = new Descriptor(new File("/tmp"), "", "", SSTableIdFactory.instance.defaultBuilder().generator(Stream.empty()).get());
         SSTableReader.moveAndOpenSSTable(cfs, sstable.descriptor, notLiveDesc, sstable.components, false);
     }
 
@@ -761,7 +762,7 @@
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD);
         SSTableReader sstable = getNewSSTable(cfs);
-        Descriptor notLiveDesc = new Descriptor(new File("/tmp"), "", "", 0);
+        Descriptor notLiveDesc = new Descriptor(new File("/tmp"), "", "", SSTableIdFactory.instance.defaultBuilder().generator(Stream.empty()).get());
         SSTableReader.moveAndOpenSSTable(cfs, notLiveDesc, sstable.descriptor, sstable.components, false);
     }
 
@@ -773,9 +774,10 @@
         SSTableReader sstable = getNewSSTable(cfs);
         cfs.clearUnsafe();
         sstable.selfRef().release();
-        File tmpdir = Files.createTempDirectory("testMoveAndOpen").toFile();
+        File tmpdir = new File(Files.createTempDirectory("testMoveAndOpen"));
         tmpdir.deleteOnExit();
-        Descriptor notLiveDesc = new Descriptor(tmpdir, sstable.descriptor.ksname, sstable.descriptor.cfname, 100);
+        SSTableId id = SSTableIdFactory.instance.defaultBuilder().generator(Stream.empty()).get();
+        Descriptor notLiveDesc = new Descriptor(tmpdir, sstable.descriptor.ksname, sstable.descriptor.cfname, id);
         // make sure the new directory is empty and that the old files exist:
         for (Component c : sstable.components)
         {
@@ -789,7 +791,7 @@
         {
             File f = new File(notLiveDesc.filenameFor(c));
             assertTrue(f.exists());
-            assertTrue(f.toString().contains("-100-"));
+            assertTrue(f.toString().contains(String.format("-%s-", id)));
             f.deleteOnExit();
             assertFalse(new File(sstable.descriptor.filenameFor(c)).exists());
         }
@@ -806,7 +808,7 @@
             .build()
             .applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         return Sets.difference(cfs.getLiveSSTables(), before).iterator().next();
     }
 
@@ -836,7 +838,7 @@
 
         // delete the compression info, so it is corrupted.
         File compressionInfoFile = new File(desc.filenameFor(Component.COMPRESSION_INFO));
-        compressionInfoFile.delete();
+        compressionInfoFile.tryDelete();
         assertFalse("CompressionInfo file should not exist", compressionInfoFile.exists());
 
         // discovert the components on disk after deletion
@@ -856,7 +858,7 @@
 
         // mark the toc file not readable in order to trigger the FSReadError
         File tocFile = new File(desc.filenameFor(Component.TOC));
-        tocFile.setReadable(false);
+        tocFile.trySetReadable(false);
 
         expectedException.expect(FSReadError.class);
         expectedException.expectMessage("TOC.txt");
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
index 1895653..c88e0b0 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.ExecutionException;
@@ -52,13 +51,17 @@
 import org.apache.cassandra.db.lifecycle.SSTableSet;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableWriter;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.metrics.StorageMetrics;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
 
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.db.compaction.OperationType.COMPACTION;
+import static org.apache.cassandra.utils.FBUtilities.nowInSeconds;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -80,7 +83,7 @@
                 .build()
                 .apply();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
         assertEquals(1, sstables.size());
         assertEquals(sstables.iterator().next().bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
@@ -89,7 +92,7 @@
              LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
              SSTableRewriter writer = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
              CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, scanners.scanners, controller, nowInSec, nextTimeUUID()))
         {
             writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
             while(ci.hasNext())
@@ -99,7 +102,7 @@
             writer.finish();
         }
         LifecycleTransaction.waitForDeletions();
-        assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
+        assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames()));
 
         validateCFS(cfs);
         truncate(cfs);
@@ -121,7 +124,7 @@
              LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
              SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false, true);
              CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, scanners.scanners, controller, nowInSec, nextTimeUUID()))
         {
             writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
             while (ci.hasNext())
@@ -131,7 +134,7 @@
             writer.finish();
         }
         LifecycleTransaction.waitForDeletions();
-        assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
+        assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames()));
 
         validateCFS(cfs);
     }
@@ -154,7 +157,7 @@
              LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
              SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false, true);
              CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, scanners.scanners, controller, nowInSec, nextTimeUUID()))
         {
             writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
             while (ci.hasNext())
@@ -186,7 +189,7 @@
             writer.finish();
         }
         LifecycleTransaction.waitForDeletions();
-        assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
+        assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames()));
 
         validateCFS(cfs);
         truncate(cfs);
@@ -211,7 +214,7 @@
              CompactionController controller = new CompactionController(cfs, compacting, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
 
@@ -244,7 +247,7 @@
 
         // tmplink and tmp files should be gone:
         assertEquals(sum, cfs.metric.totalDiskSpaceUsed.getCount());
-        assertFileCounts(s.descriptor.directory.list());
+        assertFileCounts(s.descriptor.directory.tryListNames());
         validateCFS(cfs);
     }
 
@@ -266,7 +269,7 @@
              CompactionController controller = new CompactionController(cfs, compacting, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
 
@@ -287,7 +290,7 @@
         assertEquals(files, cfs.getLiveSSTables().size());
         LifecycleTransaction.waitForDeletions();
 
-        assertFileCounts(s.descriptor.directory.list());
+        assertFileCounts(s.descriptor.directory.tryListNames());
         validateCFS(cfs);
     }
 
@@ -304,7 +307,7 @@
                             SSTableRewriter rewriter,
                             LifecycleTransaction txn)
             {
-                try (CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+                try (CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
                 {
                     int files = 1;
                     while (ci.hasNext())
@@ -335,7 +338,7 @@
                             SSTableRewriter rewriter,
                             LifecycleTransaction txn)
             {
-                try (CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+                try (CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
                 {
                     int files = 1;
                     while (ci.hasNext())
@@ -371,7 +374,7 @@
                             SSTableRewriter rewriter,
                             LifecycleTransaction txn)
             {
-                try(CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+                try(CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
                 {
                     int files = 1;
                     while (ci.hasNext())
@@ -426,7 +429,7 @@
 
         assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount());
         assertEquals(1, cfs.getLiveSSTables().size());
-        assertFileCounts(s.descriptor.directory.list());
+        assertFileCounts(s.descriptor.directory.tryListNames());
         assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst);
         assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast);
         validateCFS(cfs);
@@ -449,7 +452,7 @@
              CompactionController controller = new CompactionController(cfs, compacting, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
             while(ci.hasNext())
@@ -473,7 +476,7 @@
         LifecycleTransaction.waitForDeletions();
 
         assertEquals(files - 1, cfs.getLiveSSTables().size()); // we never wrote anything to the last file
-        assertFileCounts(s.descriptor.directory.list());
+        assertFileCounts(s.descriptor.directory.tryListNames());
         validateCFS(cfs);
     }
 
@@ -495,7 +498,7 @@
              CompactionController controller = new CompactionController(cfs, compacting, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
             while(ci.hasNext())
@@ -513,7 +516,7 @@
         }
 
         LifecycleTransaction.waitForDeletions();
-        assertFileCounts(s.descriptor.directory.list());
+        assertFileCounts(s.descriptor.directory.tryListNames());
         validateCFS(cfs);
     }
 
@@ -535,7 +538,7 @@
              CompactionController controller = new CompactionController(cfs, compacting, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID()))
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
             while(ci.hasNext())
@@ -554,7 +557,7 @@
         assertEquals(files, sstables.size());
         assertEquals(files, cfs.getLiveSSTables().size());
         LifecycleTransaction.waitForDeletions();
-        assertFileCounts(s.descriptor.directory.list());
+        assertFileCounts(s.descriptor.directory.tryListNames());
 
         validateCFS(cfs);
     }
@@ -572,10 +575,10 @@
             SSTableSplitter splitter = new SSTableSplitter(cfs, txn, 10);
             splitter.split();
 
-            assertFileCounts(s.descriptor.directory.list());
+            assertFileCounts(s.descriptor.directory.tryListNames());
             LifecycleTransaction.waitForDeletions();
 
-            for (File f : s.descriptor.directory.listFiles())
+            for (File f : s.descriptor.directory.tryList())
             {
                 // we need to clear out the data dir, otherwise tests running after this breaks
                 FileUtils.deleteRecursive(f);
@@ -621,7 +624,7 @@
              LifecycleTransaction txn = offline ? LifecycleTransaction.offline(OperationType.UNKNOWN, compacting)
                                        : cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 100, 10000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID())
         )
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
@@ -651,7 +654,7 @@
 
         LifecycleTransaction.waitForDeletions();
 
-        int filecount = assertFileCounts(s.descriptor.directory.list());
+        int filecount = assertFileCounts(s.descriptor.directory.tryListNames());
         assertEquals(filecount, 1);
         if (!offline)
         {
@@ -664,16 +667,16 @@
             assertEquals(0, cfs.getLiveSSTables().size());
             cfs.truncateBlocking();
         }
-        filecount = assertFileCounts(s.descriptor.directory.list());
+        filecount = assertFileCounts(s.descriptor.directory.tryListNames());
         if (offline)
         {
             // the file is not added to the CFS, therefore not truncated away above
             assertEquals(1, filecount);
-            for (File f : s.descriptor.directory.listFiles())
+            for (File f : s.descriptor.directory.tryList())
             {
                 FileUtils.deleteRecursive(f);
             }
-            filecount = assertFileCounts(s.descriptor.directory.list());
+            filecount = assertFileCounts(s.descriptor.directory.tryListNames());
         }
 
         assertEquals(0, filecount);
@@ -697,7 +700,7 @@
                     .build()
                     .apply();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         cfs.forceMajorCompaction();
         validateKeys(keyspace);
 
@@ -711,7 +714,7 @@
              CompactionController controller = new CompactionController(cfs, compacting, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
              SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID())
         )
         {
             rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
@@ -749,7 +752,7 @@
              CompactionController controller = new CompactionController(cfs, sstables, 0);
              LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
              SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false, true);
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())
+             CompactionIterator ci = new CompactionIterator(COMPACTION, singletonList(scanner), controller, nowInSeconds(), nextTimeUUID())
         )
         {
             writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
@@ -792,7 +795,7 @@
              SSTableRewriter writer = SSTableRewriter.constructWithoutEarlyOpening(txn, false, 1000);
              SSTableRewriter writer2 = SSTableRewriter.constructWithoutEarlyOpening(txn, false, 1000);
              CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
-             CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())
+             CompactionIterator ci = new CompactionIterator(COMPACTION, scanners.scanners, controller, nowInSec, nextTimeUUID())
              )
         {
             writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
@@ -834,7 +837,7 @@
                 }
             }
         };
-        Thread t = NamedThreadFactory.createThread(r);
+        Thread t = NamedThreadFactory.createAnonymousThread(r);
         try
         {
             t.start();
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
index eff95fc..922200a 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
@@ -30,6 +30,7 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DataRange;
@@ -182,9 +183,9 @@
         assert boundaries.length % 2 == 0;
         for (DataRange range : dataRanges(sstable.metadata(), scanStart, scanEnd))
         {
-            try(ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata()),
-                                                             range,
-                                                             SSTableReadsListener.NOOP_LISTENER))
+            try(UnfilteredPartitionIterator scanner = sstable.partitionIterator(ColumnFilter.all(sstable.metadata()),
+                                                                                range,
+                                                                                SSTableReadsListener.NOOP_LISTENER))
             {
                 for (int b = 0; b < boundaries.length; b += 2)
                     for (int i = boundaries[b]; i <= boundaries[b + 1]; i++)
@@ -215,7 +216,7 @@
 
         for (int i = 2; i < 10; i++)
             insertRowWithKey(store.metadata(), i);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         assertEquals(1, store.getLiveSSTables().size());
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
@@ -321,7 +322,7 @@
         for (int i = 0; i < 3; i++)
             for (int j = 2; j < 10; j++)
                 insertRowWithKey(store.metadata(), i * 100 + j);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         assertEquals(1, store.getLiveSSTables().size());
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
@@ -441,7 +442,7 @@
         store.disableAutoCompaction();
 
         insertRowWithKey(store.metadata(), 205);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         assertEquals(1, store.getLiveSSTables().size());
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
index 731cee2..cdd3ee0 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
@@ -19,9 +19,10 @@
 
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
+import org.apache.cassandra.io.util.File;
 import java.io.IOException;
 import java.util.*;
+import java.util.stream.Stream;
 
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.TableMetadata;
@@ -70,17 +71,17 @@
     }
     */
 
-    public static File tempSSTableFile(String keyspaceName, String cfname, int generation) throws IOException
+    public static File tempSSTableFile(String keyspaceName, String cfname, SSTableId id) throws IOException
     {
         File tempdir = FileUtils.createTempFile(keyspaceName, cfname);
-        if(!tempdir.delete() || !tempdir.mkdir())
+        if(!tempdir.tryDelete() || !tempdir.tryCreateDirectory())
             throw new IOException("Temporary directory creation failed.");
         tempdir.deleteOnExit();
-        File cfDir = new File(tempdir, keyspaceName + File.separator + cfname);
-        cfDir.mkdirs();
+        File cfDir = new File(tempdir, keyspaceName + File.pathSeparator() + cfname);
+        cfDir.tryCreateDirectories();
         cfDir.deleteOnExit();
-        File datafile = new File(new Descriptor(cfDir, keyspaceName, cfname, generation, SSTableFormat.Type.BIG).filenameFor(Component.DATA));
-        if (!datafile.createNewFile())
+        File datafile = new File(new Descriptor(cfDir, keyspaceName, cfname, id, SSTableFormat.Type.BIG).filenameFor(Component.DATA));
+        if (!datafile.createFileIfNotExists())
             throw new IOException("unable to create file " + datafile);
         datafile.deleteOnExit();
         return datafile;
@@ -132,7 +133,7 @@
         private String cfname = CFNAME;
         private Descriptor dest = null;
         private boolean cleanup = true;
-        private int generation = 0;
+        private SSTableId id = SSTableIdFactory.instance.defaultBuilder().generator(Stream.empty()).get();
 
         Context() {}
 
@@ -160,11 +161,11 @@
         }
 
         /**
-         * Sets the generation number for the generated SSTable. Ignored if "dest()" is set.
+         * Sets the identifier for the generated SSTable. Ignored if "dest()" is set.
          */
-        public Context generation(int generation)
+        public Context id(SSTableId id)
         {
-            this.generation = generation;
+            this.id = id;
             return this;
         }
 
@@ -215,11 +216,11 @@
 
         public Collection<SSTableReader> write(int expectedSize, Appender appender) throws IOException
         {
-            File datafile = (dest == null) ? tempSSTableFile(ksname, cfname, generation) : new File(dest.filenameFor(Component.DATA));
+            File datafile = (dest == null) ? tempSSTableFile(ksname, cfname, id) : new File(dest.filenameFor(Component.DATA));
             TableMetadata metadata = Schema.instance.getTableMetadata(ksname, cfname);
             ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.id);
             SerializationHeader header = appender.header();
-            SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.getAbsolutePath()), expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header);
+            SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.absolutePath()), expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header);
             while (appender.append(writer)) { /* pass */ }
             Collection<SSTableReader> readers = writer.finish(true);
 
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java
index 31d0b89..5d20367 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java
@@ -18,10 +18,9 @@
 
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.nio.ByteBuffer;
-import java.util.UUID;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.*;
@@ -33,19 +32,20 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
 import org.apache.cassandra.io.sstable.format.SSTableWriter;
-import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
-import static junit.framework.Assert.fail;
-import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
-import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.junit.Assert.fail;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
+import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class SSTableWriterTest extends SSTableWriterTestBase
 {
     @Test
-    public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedException
+    public void testAbortTxnWithOpenEarlyShouldRemoveSSTable()
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
@@ -65,7 +65,7 @@
 
             SSTableReader s = writer.setMaxDataAge(1000).openEarly();
             assert s != null;
-            assertFileCounts(dir.list());
+            assertFileCounts(dir.tryListNames());
             for (int i = 10000; i < 20000; i++)
             {
                 UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
@@ -75,24 +75,20 @@
             }
             SSTableReader s2 = writer.setMaxDataAge(1000).openEarly();
             assertTrue(s.last.compareTo(s2.last) < 0);
-            assertFileCounts(dir.list());
+            assertFileCounts(dir.tryListNames());
             s.selfRef().release();
             s2.selfRef().release();
 
-            int datafiles = assertFileCounts(dir.list());
+            int datafiles = assertFileCounts(dir.tryListNames());
             assertEquals(datafiles, 1);
 
-            // These checks don't work on Windows because the writer has the channel still
-            // open till .abort() is called (via the builder)
-            if (!FBUtilities.isWindows)
-            {
-                LifecycleTransaction.waitForDeletions();
-                assertFileCounts(dir.list());
-            }
+            LifecycleTransaction.waitForDeletions();
+            assertFileCounts(dir.tryListNames());
+
             writer.abort();
             txn.abort();
             LifecycleTransaction.waitForDeletions();
-            datafiles = assertFileCounts(dir.list());
+            datafiles = assertFileCounts(dir.tryListNames());
             assertEquals(datafiles, 0);
             validateCFS(cfs);
         }
@@ -100,7 +96,7 @@
 
 
     @Test
-    public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws InterruptedException
+    public void testAbortTxnWithClosedWriterShouldRemoveSSTable()
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
@@ -118,7 +114,7 @@
                 writer.append(builder.build().unfilteredIterator());
             }
 
-            assertFileCounts(dir.list());
+            assertFileCounts(dir.tryListNames());
             for (int i = 10000; i < 20000; i++)
             {
                 UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
@@ -127,28 +123,24 @@
                 writer.append(builder.build().unfilteredIterator());
             }
             SSTableReader sstable = writer.finish(true);
-            int datafiles = assertFileCounts(dir.list());
+            int datafiles = assertFileCounts(dir.tryListNames());
             assertEquals(datafiles, 1);
 
             sstable.selfRef().release();
-            // These checks don't work on Windows because the writer has the channel still
-            // open till .abort() is called (via the builder)
-            if (!FBUtilities.isWindows)
-            {
-                LifecycleTransaction.waitForDeletions();
-                assertFileCounts(dir.list());
-            }
+
+            LifecycleTransaction.waitForDeletions();
+            assertFileCounts(dir.tryListNames());
 
             txn.abort();
             LifecycleTransaction.waitForDeletions();
-            datafiles = assertFileCounts(dir.list());
+            datafiles = assertFileCounts(dir.tryListNames());
             assertEquals(datafiles, 0);
             validateCFS(cfs);
         }
     }
 
     @Test
-    public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws InterruptedException
+    public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables()
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
@@ -169,7 +161,7 @@
                 writer1.append(builder.build().unfilteredIterator());
             }
 
-            assertFileCounts(dir.list());
+            assertFileCounts(dir.tryListNames());
             for (int i = 10000; i < 20000; i++)
             {
                 UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
@@ -180,21 +172,17 @@
             SSTableReader sstable = writer1.finish(true);
             txn.update(sstable, false);
 
-            assertFileCounts(dir.list());
+            assertFileCounts(dir.tryListNames());
 
-            int datafiles = assertFileCounts(dir.list());
+            int datafiles = assertFileCounts(dir.tryListNames());
             assertEquals(datafiles, 2);
 
-            // These checks don't work on Windows because the writer has the channel still
-            // open till .abort() is called (via the builder)
-            if (!FBUtilities.isWindows)
-            {
-                LifecycleTransaction.waitForDeletions();
-                assertFileCounts(dir.list());
-            }
+            LifecycleTransaction.waitForDeletions();
+            assertFileCounts(dir.tryListNames());
+
             txn.abort();
             LifecycleTransaction.waitForDeletions();
-            datafiles = assertFileCounts(dir.list());
+            datafiles = assertFileCounts(dir.tryListNames());
             assertEquals(datafiles, 0);
             validateCFS(cfs);
         }
@@ -228,11 +216,11 @@
             try
             {
                 DecoratedKey dk = Util.dk("large_value");
-                UnfilteredRowIterator rowIter = sstable.iterator(dk,
-                                                                 Slices.ALL,
-                                                                 ColumnFilter.all(cfs.metadata()),
-                                                                 false,
-                                                                 SSTableReadsListener.NOOP_LISTENER);
+                UnfilteredRowIterator rowIter = sstable.rowIterator(dk,
+                                                                    Slices.ALL,
+                                                                    ColumnFilter.all(cfs.metadata()),
+                                                                    false,
+                                                                    SSTableReadsListener.NOOP_LISTENER);
                 while (rowIter.hasNext())
                 {
                     rowIter.next();
@@ -249,7 +237,7 @@
         }
     }
 
-    private static void assertValidRepairMetadata(long repairedAt, UUID pendingRepair, boolean isTransient)
+    private static void assertValidRepairMetadata(long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_SMALL_MAX_VALUE);
@@ -269,7 +257,7 @@
         LifecycleTransaction.waitForDeletions();
     }
 
-    private static void assertInvalidRepairMetadata(long repairedAt, UUID pendingRepair, boolean isTransient)
+    private static void assertInvalidRepairMetadata(long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_SMALL_MAX_VALUE);
@@ -297,11 +285,11 @@
     {
         assertValidRepairMetadata(UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false);
         assertValidRepairMetadata(1, NO_PENDING_REPAIR, false);
-        assertValidRepairMetadata(UNREPAIRED_SSTABLE, UUID.randomUUID(), false);
-        assertValidRepairMetadata(UNREPAIRED_SSTABLE, UUID.randomUUID(), true);
+        assertValidRepairMetadata(UNREPAIRED_SSTABLE, nextTimeUUID(), false);
+        assertValidRepairMetadata(UNREPAIRED_SSTABLE, nextTimeUUID(), true);
 
         assertInvalidRepairMetadata(UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, true);
-        assertInvalidRepairMetadata(1, UUID.randomUUID(), false);
+        assertInvalidRepairMetadata(1, nextTimeUUID(), false);
         assertInvalidRepairMetadata(1, NO_PENDING_REPAIR, true);
 
     }
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
index 962e1a1..83ad136 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
@@ -18,15 +18,14 @@
 
 package org.apache.cassandra.io.sstable;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.cassandra.io.util.File;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -44,12 +43,11 @@
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 public class SSTableWriterTestBase extends SchemaLoader
 {
@@ -68,15 +66,6 @@
     {
         DatabaseDescriptor.daemonInitialization();
 
-        if (FBUtilities.isWindows)
-        {
-            standardMode = DatabaseDescriptor.getDiskAccessMode();
-            indexMode = DatabaseDescriptor.getIndexAccessMode();
-
-            DatabaseDescriptor.setDiskAccessMode(Config.DiskAccessMode.standard);
-            DatabaseDescriptor.setIndexAccessMode(Config.DiskAccessMode.standard);
-        }
-
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE,
                                     KeyspaceParams.simple(1),
@@ -84,7 +73,7 @@
                                     SchemaLoader.standardCFMD(KEYSPACE, CF_SMALL_MAX_VALUE));
 
         maxValueSize = DatabaseDescriptor.getMaxValueSize();
-        DatabaseDescriptor.setMaxValueSize(1024 * 1024); // set max value size to 1MB
+        DatabaseDescriptor.setMaxValueSize(1024 * 1024); // set max value size to 1MiB
     }
 
     @AfterClass
@@ -135,23 +124,23 @@
      */
     public static void validateCFS(ColumnFamilyStore cfs)
     {
-        Set<Integer> liveDescriptors = new HashSet<>();
+        Set<SSTableId> liveDescriptors = new HashSet<>();
         long spaceUsed = 0;
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
             assertFalse(sstable.isMarkedCompacted());
             assertEquals(1, sstable.selfRef().globalCount());
-            liveDescriptors.add(sstable.descriptor.generation);
+            liveDescriptors.add(sstable.descriptor.id);
             spaceUsed += sstable.bytesOnDisk();
         }
         for (File dir : cfs.getDirectories().getCFDirectories())
         {
-            for (File f : dir.listFiles())
+            for (File f : dir.tryList())
             {
-                if (f.getName().contains("Data"))
+                if (f.name().contains("Data"))
                 {
-                    Descriptor d = Descriptor.fromFilename(f.getAbsolutePath());
-                    assertTrue(d.toString(), liveDescriptors.contains(d.generation));
+                    Descriptor d = Descriptor.fromFilename(f.absolutePath());
+                    assertTrue(d.toString(), liveDescriptors.contains(d.id));
                 }
             }
         }
@@ -163,7 +152,7 @@
             assertFalse(CompactionManager.instance.submitMaximal(cfs, cfs.gcBefore((int) (System.currentTimeMillis() / 1000)), false).isEmpty());
     }
 
-    public static SSTableWriter getWriter(ColumnFamilyStore cfs, File directory, LifecycleTransaction txn, long repairedAt, UUID pendingRepair, boolean isTransient)
+    public static SSTableWriter getWriter(ColumnFamilyStore cfs, File directory, LifecycleTransaction txn, long repairedAt, TimeUUID pendingRepair, boolean isTransient)
     {
         Descriptor desc = cfs.newSSTableDescriptor(directory);
         return SSTableWriter.create(desc, 0, repairedAt, pendingRepair, isTransient, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS), cfs.indexManager.listIndexes(), txn);
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriterTest.java
index 7ae69ea..14b48b7 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriterTest.java
@@ -24,9 +24,9 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.DiskBoundaries;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.compaction.OperationType;
@@ -69,7 +69,7 @@
     {
 
         SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, 0, 1);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
 
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
index 5f19206..b5aaf8e 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.io.sstable.format;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -26,6 +25,8 @@
 import java.util.Iterator;
 
 import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -85,19 +86,21 @@
         FlushObserver observer = new FlushObserver();
 
         String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
-        File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
+        File directory = new File(sstableDirectory + File.pathSeparator() + KS_NAME + File.pathSeparator() + CF_NAME);
         directory.deleteOnExit();
 
-        if (!directory.exists() && !directory.mkdirs())
-            throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
+        if (!directory.exists() && !directory.tryCreateDirectories())
+            throw new FSWriteError(new IOException("failed to create tmp directory"), directory.absolutePath());
 
         SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
+        Descriptor descriptor = new Descriptor(sstableFormat.info.getLatestVersion(),
+                                               directory,
+                                               cfm.keyspace,
+                                               cfm.name,
+                                               new SequenceBasedSSTableId(0),
+                                               sstableFormat);
 
-        BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(),
-                                                                  directory,
-                                                                  KS_NAME, CF_NAME,
-                                                                  0,
-                                                                  sstableFormat),
+        BigTableWriter writer = new BigTableWriter(descriptor,
                                                    10L, 0L, null, false, TableMetadataRef.forOfflineTools(cfm),
                                                    new MetadataCollector(cfm.comparator).sstableLevel(0),
                                                    new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS),
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/VersionAndTypeTest.java b/test/unit/org/apache/cassandra/io/sstable/format/VersionAndTypeTest.java
index 633993f..4e62b9c 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/VersionAndTypeTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/VersionAndTypeTest.java
@@ -20,7 +20,7 @@
 import org.junit.Test;
 
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
 
 public class VersionAndTypeTest
 {
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java
index 3cf96f2..e6d2020 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java
@@ -19,8 +19,9 @@
 package org.apache.cassandra.io.sstable.format.big;
 
 import java.io.ByteArrayInputStream;
-import java.io.File;
+import java.io.UncheckedIOException;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.Collection;
@@ -28,6 +29,7 @@
 import java.util.function.Function;
 
 import com.google.common.collect.ImmutableSet;
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -116,7 +118,7 @@
             .applyUnsafe();
             expectedRowCount++;
         }
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         sstable = store.getLiveSSTables().iterator().next();
     }
@@ -161,7 +163,14 @@
             {
                 Pair<DataInputPlus, Long> pair = getSSTableComponentData(sstable, component, bufferMapper);
 
-                btzcw.writeComponent(component.type, pair.left, pair.right);
+                try
+                {
+                    btzcw.writeComponent(component.type, pair.left, pair.right);
+                }
+                catch (ClosedChannelException e)
+                {
+                    throw new UncheckedIOException(e);
+                }
             }
         }
 
@@ -182,11 +191,11 @@
         for (int i = 0; i < store.metadata().params.minIndexInterval; i++)
         {
             DecoratedKey dk = Util.dk(String.valueOf(i));
-            UnfilteredRowIterator rowIter = sstable.iterator(dk,
-                                                             Slices.ALL,
-                                                             ColumnFilter.all(store.metadata()),
-                                                             false,
-                                                             SSTableReadsListener.NOOP_LISTENER);
+            UnfilteredRowIterator rowIter = sstable.rowIterator(dk,
+                                                                Slices.ALL,
+                                                                ColumnFilter.all(store.metadata()),
+                                                                false,
+                                                                SSTableReadsListener.NOOP_LISTENER);
             while (rowIter.hasNext())
             {
                 rowIter.next();
diff --git a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
index 79cf831..9b5ff62 100644
--- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.cassandra.io.sstable.metadata;
 
-import java.io.File;
-import java.io.FileOutputStream;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.io.util.*;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
@@ -29,6 +29,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
@@ -39,11 +40,6 @@
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.sstable.format.big.BigFormat;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.io.util.RandomAccessReader;
-import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.Throwables;
 
 import static org.junit.Assert.assertEquals;
@@ -66,7 +62,7 @@
         MetadataSerializer serializer = new MetadataSerializer();
         File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
 
-        Descriptor desc = new Descriptor(statsFile.getParentFile(), "", "", 0, SSTableFormat.Type.BIG);
+        Descriptor desc = new Descriptor(statsFile.parent(), "", "", new SequenceBasedSSTableId(0), SSTableFormat.Type.BIG);
         try (RandomAccessReader in = RandomAccessReader.open(statsFile))
         {
             Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));
@@ -93,7 +89,7 @@
         // Serialize w/ overflowed histograms:
         MetadataSerializer serializer = new MetadataSerializer();
         File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
-        Descriptor desc = new Descriptor(statsFile.getParentFile(), "", "", 0, SSTableFormat.Type.BIG);
+        Descriptor desc = new Descriptor(statsFile.parent(), "", "", new SequenceBasedSSTableId(0), SSTableFormat.Type.BIG);
 
         try (RandomAccessReader in = RandomAccessReader.open(statsFile))
         {
@@ -110,7 +106,7 @@
     {
         // Serialize to tmp file
         File statsFile = FileUtils.createTempFile(Component.STATS.name, null);
-        try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(statsFile)))
+        try (DataOutputStreamPlus out = new FileOutputStreamPlus(statsFile))
         {
             serializer.serialize(metadata, out, version);
         }
@@ -176,7 +172,7 @@
         File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV));
         // Reading both as earlier version should yield identical results.
         SSTableFormat.Type stype = SSTableFormat.Type.current();
-        Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.getParentFile(), "", "", 0, stype);
+        Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.parent(), "", "", new SequenceBasedSSTableId(0), stype);
         try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb);
              RandomAccessReader inLa = RandomAccessReader.open(statsFileLa))
         {
diff --git a/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java b/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java
index c5c3b60..040a080 100644
--- a/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java
+++ b/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java
@@ -37,6 +37,7 @@
 import java.util.Arrays;
 import java.util.Random;
 
+import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.vint.VIntCoding;
 import org.junit.Test;
 
@@ -44,6 +45,7 @@
 import com.google.common.primitives.UnsignedInteger;
 import com.google.common.primitives.UnsignedLong;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.FBUtilities.preventIllegalAccessWarnings;
 import static org.junit.Assert.*;
 
@@ -171,7 +173,7 @@
 
     static Field baos_bytes;
     static {
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 210187780999648L;
         System.out.println("Seed " + seed);
         r = new Random(seed);
@@ -615,4 +617,20 @@
         }
     }
 
+    @Test
+    public void testWriteBytes() throws Exception
+    {
+        setUp();
+        DataOutputStreamPlus dosp = new BufferedDataOutputStreamPlus(adapter, 8);
+        for (int i = 0; i < 1000; i++)
+        {
+            long val = r.nextLong();
+            int size = r.nextInt(9);
+            byte[] bytes = ByteBufferUtil.bytes(val).array();
+            canonical.write(bytes, 0, size);
+            dosp.writeBytes(val, size);
+        }
+        dosp.flush();
+        assertArrayEquals(canonical.toByteArray(), generated.toByteArray());
+    }
 }
diff --git a/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java b/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java
index 764190c..bb54f25 100644
--- a/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java
+++ b/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java
@@ -21,10 +21,7 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.SyncUtil;
 
-import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -193,7 +190,7 @@
             w.finish();
     
             // will use cachedlength
-            try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.getPath());
+            try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.path());
                  FileHandle fh = builder.complete();
                  RandomAccessReader r = fh.createReader())
             {
@@ -354,7 +351,7 @@
             for (final int offset : Arrays.asList(0, 8))
             {
                 File file1 = writeTemporaryFile(new byte[16]);
-                try (FileHandle.Builder builder = new FileHandle.Builder(file1.getPath()).bufferSize(bufferSize);
+                try (FileHandle.Builder builder = new FileHandle.Builder(file1.path()).bufferSize(bufferSize);
                      FileHandle fh = builder.complete();
                      RandomAccessReader file = fh.createReader())
                 {
@@ -366,7 +363,7 @@
             for (final int n : Arrays.asList(1, 2, 4, 8))
             {
                 File file1 = writeTemporaryFile(new byte[16]);
-                try (FileHandle.Builder builder = new FileHandle.Builder(file1.getPath()).bufferSize(bufferSize);
+                try (FileHandle.Builder builder = new FileHandle.Builder(file1.path()).bufferSize(bufferSize);
                      FileHandle fh = builder.complete();
                      RandomAccessReader file = fh.createReader())
                 {
@@ -427,11 +424,11 @@
         tmpFile.deleteOnExit();
 
         // Create the BRAF by filename instead of by file.
-        try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.getPath());
+        try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.path());
              FileHandle fh = builder.complete();
              RandomAccessReader r = fh.createReader())
         {
-            assert tmpFile.getPath().equals(r.getPath());
+            assert tmpFile.path().equals(r.getPath());
 
             // Create a mark and move the rw there.
             final DataPosition mark = r.mark();
@@ -607,9 +604,9 @@
     {
         File f = FileUtils.createTempFile("BRAFTestFile", null);
         f.deleteOnExit();
-        FileOutputStream fout = new FileOutputStream(f);
+        FileOutputStreamPlus fout = new FileOutputStreamPlus(f);
         fout.write(data);
-        SyncUtil.sync(fout);
+        fout.sync();
         fout.close();
         return f;
     }
diff --git a/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java
index 4963712..196a0b2 100644
--- a/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java
@@ -18,9 +18,9 @@
 
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.Arrays;
 import java.util.concurrent.ThreadLocalRandom;
 
@@ -119,10 +119,10 @@
         assert data.exists();
 
         // simulate corruption of file
-        try (RandomAccessFile dataFile = new RandomAccessFile(data, "rw"))
+        try (FileChannel dataFile = data.newReadWriteChannel())
         {
-            dataFile.seek(1024);
-            dataFile.write((byte) 5);
+            dataFile.position(1024);
+            dataFile.write(ByteBuffer.wrap(new byte[] {5}));
         }
 
         try (RandomAccessReader reader = ChecksummedRandomAccessReader.open(data, crc))
diff --git a/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java
index 6837d1d..5d92d45 100644
--- a/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java
@@ -18,7 +18,6 @@
 */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -44,7 +43,7 @@
     public void cleanup()
     {
         for (TestableSW sw : writers)
-            sw.file.delete();
+            sw.file.tryDelete();
         writers.clear();
     }
 
diff --git a/test/unit/org/apache/cassandra/io/util/DataOutputTest.java b/test/unit/org/apache/cassandra/io/util/DataOutputTest.java
index b6291c0..41631af 100644
--- a/test/unit/org/apache/cassandra/io/util/DataOutputTest.java
+++ b/test/unit/org/apache/cassandra/io/util/DataOutputTest.java
@@ -24,15 +24,12 @@
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
 import java.nio.channels.Channels;
 import java.util.ArrayDeque;
+import java.util.Arrays;
 import java.util.Deque;
 import java.util.Random;
 import java.util.concurrent.Callable;
@@ -109,7 +106,7 @@
     @Test
     public void testDataOutputDirectByteBuffer() throws IOException
     {
-        ByteBuffer buf = wrap(new byte[345], true);
+        ByteBuffer buf = wrap(new byte[381], true);
         BufferedDataOutputStreamPlus write = new BufferedDataOutputStreamPlus(null, buf.duplicate());
         DataInput canon = testWrite(write);
         DataInput test = new DataInputStream(new ByteArrayInputStream(ByteBufferUtil.getArray(buf)));
@@ -119,7 +116,7 @@
     @Test
     public void testDataOutputHeapByteBuffer() throws IOException
     {
-        ByteBuffer buf = wrap(new byte[345], false);
+        ByteBuffer buf = wrap(new byte[381], false);
         BufferedDataOutputStreamPlus write = new BufferedDataOutputStreamPlus(null, buf.duplicate());
         DataInput canon = testWrite(write);
         DataInput test = new DataInputStream(new ByteArrayInputStream(ByteBufferUtil.getArray(buf)));
@@ -210,11 +207,11 @@
             checkThrowsException(validateReallocationCallable(write, DataOutputBuffer.MAX_ARRAY_SIZE + 1),
                                  BufferOverflowException.class);
             //Check that it does throw
-            checkThrowsException(() -> 
+            checkThrowsException(() ->
                                  {
                                      write.write(42);
                                      return null;
-                                 }, 
+                                 },
                                  BufferOverflowException.class);
         }
     }
@@ -314,8 +311,8 @@
         try (SafeMemoryWriter write = new SafeMemoryWriter(10))
         {
             DataInput canon = testWrite(write);
-            byte[] bytes = new byte[345];
-            write.currentBuffer().getBytes(0, bytes, 0, 345);
+            byte[] bytes = new byte[381];
+            write.currentBuffer().getBytes(0, bytes, 0, 381);
             DataInput test = new DataInputStream(new ByteArrayInputStream(bytes));
             testRead(test, canon);
         }
@@ -327,16 +324,16 @@
         File file = FileUtils.createTempFile("dataoutput", "test");
         try
         {
-            DataOutputStreamPlus write = new WrappedDataOutputStreamPlus(new FileOutputStream(file));
+            DataOutputStreamPlus write = new WrappedDataOutputStreamPlus(new FileOutputStreamPlus(file));
             DataInput canon = testWrite(write);
             write.close();
-            DataInputStream test = new DataInputStream(new FileInputStream(file));
+            DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
             testRead(test, canon);
             test.close();
         }
         finally
         {
-            Assert.assertTrue(file.delete());
+            Assert.assertTrue(file.tryDelete());
         }
     }
 
@@ -346,16 +343,16 @@
         File file = FileUtils.createTempFile("dataoutput", "test");
         try
         {
-            DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(new FileOutputStream(file));
+            DataOutputStreamPlus write = new FileOutputStreamPlus(file);
             DataInput canon = testWrite(write);
             write.close();
-            DataInputStream test = new DataInputStream(new FileInputStream(file));
+            DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
             testRead(test, canon);
             test.close();
         }
         finally
         {
-            Assert.assertTrue(file.delete());
+            Assert.assertTrue(file.tryDelete());
         }
     }
 
@@ -366,17 +363,16 @@
         try
         {
             @SuppressWarnings("resource")
-            final RandomAccessFile raf = new RandomAccessFile(file, "rw");
-            DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(raf.getChannel());
+            DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(file.newReadWriteChannel());
             DataInput canon = testWrite(write);
             write.close();
-            DataInputStream test = new DataInputStream(new FileInputStream(file));
+            DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
             testRead(test, canon);
             test.close();
         }
         finally
         {
-            Assert.assertTrue(file.delete());
+            Assert.assertTrue(file.tryDelete());
         }
     }
 
@@ -390,10 +386,10 @@
         DataInput canon = testWrite(write);
         write.flush();
         write.close();
-        DataInputStream test = new DataInputStream(new FileInputStream(file));
+        DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
         testRead(test, canon);
         test.close();
-        Assert.assertTrue(file.delete());
+        Assert.assertTrue(file.tryDelete());
     }
 
     private DataInput testWrite(DataOutputPlus test) throws IOException
@@ -465,6 +461,21 @@
             canon.writeFloat(v);
         }
 
+        byte[] rndBytes = new byte[Long.BYTES];
+        for (int i = 1; i <= Long.BYTES; i++)
+        {
+            Arrays.fill(rndBytes, 0, rndBytes.length, (byte) 0);
+            rnd.nextBytes(rndBytes);
+            // keep only first i random bytes
+            Arrays.fill(rndBytes,  i, rndBytes.length, (byte) 0);
+            long val = ByteBufferUtil.toLong(ByteBuffer.wrap(rndBytes));
+            test.writeBytes(val, i);
+            byte[] arr = new byte[i];
+            System.arraycopy(rndBytes, 0, arr, 0, i);
+            canon.write(arr);
+        }
+
+
         // 27
         return new DataInputStream(new ByteArrayInputStream(bos.toByteArray()));
     }
@@ -483,6 +494,10 @@
         assert test.readByte() == canon.readByte();
         assert test.readDouble() == canon.readDouble();
         assert test.readFloat() == canon.readFloat();
+        for (int i = 1; i <= Long.BYTES; i++)
+        {
+            Assert.assertArrayEquals(ByteBufferUtil.readBytes(canon, i), ByteBufferUtil.readBytes(test, i));
+        }
         try
         {
             test.readInt();
diff --git a/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java b/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java
index b040d27..0f72e33 100644
--- a/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java
@@ -29,6 +29,7 @@
 
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -38,7 +39,7 @@
     private ByteBuffer allocateBuffer(int size)
     {
         ByteBuffer ret = ByteBuffer.allocate(Ints.checkedCast(size));
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         System.out.println("Seed " + seed);
 
diff --git a/test/unit/org/apache/cassandra/io/util/FileTest.java b/test/unit/org/apache/cassandra/io/util/FileTest.java
new file mode 100644
index 0000000..0a7c478
--- /dev/null
+++ b/test/unit/org/apache/cassandra/io/util/FileTest.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.RateLimiter;
+import org.apache.commons.lang3.RandomUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.assertj.core.api.Assertions;
+import org.psjava.util.Triple;
+
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR;
+
+public class FileTest
+{
+    private static final java.io.File dir;
+    static
+    {
+        CassandraRelevantProperties.USE_NIX_RECURSIVE_DELETE.setBoolean(false);
+        java.io.File parent = new java.io.File(JAVA_IO_TMPDIR.getString()); //checkstyle: permit this instantiation
+        String dirName = Long.toHexString(ThreadLocalRandom.current().nextLong());
+        while (new java.io.File(parent, dirName).exists()) //checkstyle: permit this instantiation
+            dirName = Long.toHexString(ThreadLocalRandom.current().nextLong());
+        dir = new java.io.File(parent, dirName); //checkstyle: permit this instantiation
+        dir.mkdirs();
+        new File(dir).deleteRecursiveOnExit();
+
+        // PathUtils touches StorageService which touches StreamManager which requires configs be setup
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+
+    @Test
+    public void testEquivalence() throws IOException
+    {
+        java.io.File notExists = new java.io.File(dir, "notExists"); //checkstyle: permit this instantiation
+        java.io.File regular = new java.io.File(dir, "regular"); //checkstyle: permit this instantiation
+        regular.createNewFile();
+        java.io.File regularLink = new java.io.File(dir, "regularLink"); //checkstyle: permit this instantiation
+        Files.createSymbolicLink(regularLink.toPath(), regular.toPath());
+        java.io.File emptySubdir = new java.io.File(dir, "empty"); //checkstyle: permit this instantiation
+        java.io.File emptySubdirLink = new java.io.File(dir, "emptyLink"); //checkstyle: permit this instantiation
+        emptySubdir.mkdir();
+        Files.createSymbolicLink(emptySubdirLink.toPath(), emptySubdir.toPath());
+        java.io.File nonEmptySubdir = new java.io.File(dir, "nonEmpty"); //checkstyle: permit this instantiation
+        java.io.File nonEmptySubdirLink = new java.io.File(dir, "nonEmptyLink"); //checkstyle: permit this instantiation
+        nonEmptySubdir.mkdir();
+        Files.createSymbolicLink(nonEmptySubdirLink.toPath(), nonEmptySubdir.toPath());
+        new java.io.File(nonEmptySubdir, "something").createNewFile(); //checkstyle: permit this instantiation
+
+        testEquivalence("");
+
+        List<Runnable> setup = ImmutableList.of(
+            () -> {},
+            () -> dir.setWritable(false),
+            () -> dir.setReadable(false),
+            () -> dir.setWritable(true)
+        );
+
+        for (Runnable run : setup)
+        {
+            run.run();
+            testEquivalence(notExists.getPath());
+            testEquivalence(nonAbsolute(notExists));
+            testEquivalence(regular.getPath());
+            testEquivalence(nonAbsolute(regular));
+            testEquivalence(regularLink.getPath());
+            testEquivalence(nonAbsolute(regularLink));
+            testEquivalence(emptySubdir.getPath());
+            testEquivalence(nonAbsolute(emptySubdir));
+            testEquivalence(emptySubdirLink.getPath());
+            testEquivalence(nonAbsolute(emptySubdirLink));
+            testEquivalence(nonEmptySubdir.getPath());
+            testEquivalence(nonAbsolute(nonEmptySubdir));
+            testEquivalence(nonEmptySubdirLink.getPath());
+            testEquivalence(nonAbsolute(nonEmptySubdirLink));
+        }
+
+        emptySubdirLink.delete();
+        regularLink.delete();
+        regular.delete();
+        emptySubdir.delete();
+    }
+
+    private static String nonAbsolute(java.io.File file)
+    {
+        return file.getParent() + File.pathSeparator() + ".." + File.pathSeparator() + file.getParentFile().getName() + File.pathSeparator() + file.getName();
+    }
+
+    private void    testEquivalence(String path) throws IOException
+    {
+        java.io.File file = new java.io.File(path); //checkstyle: permit this instantiation
+        if (file.exists()) testExists(path);
+        else testNotExists(path);
+    }
+
+    private void testBasic(String path) throws IOException
+    {
+        // TODO: confirm - it seems that accuracy of lastModified may differ between APIs on Linux??
+        testEquivalence(path, f -> f.lastModified() / 1000, f -> f.lastModified() / 1000);
+        testEquivalence(path, java.io.File::length, File::length);
+        testEquivalence(path, java.io.File::canExecute, File::isExecutable);
+        testEquivalence(path, java.io.File::canRead, File::isReadable);
+        testEquivalence(path, java.io.File::canWrite, File::isWritable);
+        testEquivalence(path, java.io.File::exists, File::exists);
+        testEquivalence(path, java.io.File::isAbsolute, File::isAbsolute);
+        testEquivalence(path, java.io.File::isDirectory, File::isDirectory);
+        testEquivalence(path, java.io.File::isFile, File::isFile);
+        testEquivalence(path, java.io.File::getPath, File::path);
+        testEquivalence(path, java.io.File::getAbsolutePath, File::absolutePath);
+        testEquivalence(path, java.io.File::getCanonicalPath, File::canonicalPath);
+        testEquivalence(path, java.io.File::getParent, File::parentPath);
+        testEquivalence(path, java.io.File::toPath, File::toPath);
+        testEquivalence(path, java.io.File::list, File::tryListNames);
+        testEquivalence(path, java.io.File::listFiles, File::tryList);
+        java.io.File file = new java.io.File(path); //checkstyle: permit this instantiation
+        if (file.getParentFile() != null) testBasic(file.getParent());
+        if (!file.equals(file.getAbsoluteFile())) testBasic(file.getAbsolutePath());
+        if (!file.equals(file.getCanonicalFile())) testBasic(file.getCanonicalPath());
+    }
+
+    private void testPermissionsEquivalence(String path)
+    {
+        ImmutableList<Triple<BiFunction<java.io.File, Boolean, Boolean>, BiFunction<File, Boolean, Boolean>, Function<java.io.File, Boolean>>> tests = ImmutableList.of(
+            Triple.create(java.io.File::setReadable, File::trySetReadable, java.io.File::canRead),
+            Triple.create(java.io.File::setWritable, File::trySetWritable, java.io.File::canWrite),
+            Triple.create(java.io.File::setExecutable, File::trySetExecutable, java.io.File::canExecute)
+        );
+        for (Triple<BiFunction<java.io.File, Boolean, Boolean>, BiFunction<File, Boolean, Boolean>, Function<java.io.File, Boolean>> test : tests)
+        {
+            java.io.File file = new java.io.File(path); //checkstyle: permit this instantiation
+            boolean cur = test.v3.apply(file);
+            boolean canRead = file.canRead();
+            boolean canWrite = file.canWrite();
+            boolean canExecute = file.canExecute();
+            testEquivalence(path, f -> test.v1.apply(f, !cur), f -> test.v2.apply(f, !cur), (f, success) -> {
+                testEquivalence(path, java.io.File::canExecute, File::isExecutable);
+                testEquivalence(path, java.io.File::canRead, File::isReadable);
+                testEquivalence(path, java.io.File::canWrite, File::isWritable);
+                Assert.assertEquals(success != cur, test.v3.apply(file));
+                test.v1.apply(f, cur);
+            });
+            Assert.assertEquals(canRead, file.canRead());
+            Assert.assertEquals(canWrite, file.canWrite());
+            Assert.assertEquals(canExecute, file.canExecute());
+        }
+    }
+
+    private void testCreation(String path, IOConsumer<java.io.File> afterEach)
+    {
+        testEquivalence(path, java.io.File::createNewFile, File::createFileIfNotExists, afterEach);
+        testEquivalence(path, java.io.File::mkdir, File::tryCreateDirectory, afterEach);
+        testEquivalence(path, java.io.File::mkdirs, File::tryCreateDirectories, afterEach);
+    }
+
+    private void testExists(String path) throws IOException
+    {
+        testBasic(path);
+        testPermissionsEquivalence(path);
+        testCreation(path, ignore -> {});
+        testEquivalence(path, java.io.File::delete, File::tryDelete, (f, s) -> {if (s) f.createNewFile(); });
+        testTryVsConfirm(path, java.io.File::delete, File::delete, (f, s) -> {if (s) f.createNewFile(); });
+    }
+
+    private void testNotExists(String path) throws IOException
+    {
+        testBasic(path);
+        testPermissionsEquivalence(path);
+        testCreation(path, java.io.File::delete);
+        testEquivalence(path, java.io.File::delete, File::tryDelete);
+        testTryVsConfirm(path, java.io.File::delete, File::delete);
+    }
+
+    interface IOFn<I, O> { O apply(I in) throws IOException; }
+    interface IOConsumer<I1> { void accept(I1 i1) throws IOException; }
+    interface IOBiConsumer<I1, I2> { void accept(I1 i1, I2 i2) throws IOException; }
+
+    private <T> void testEquivalence(String path, IOFn<java.io.File, T> canonical, IOFn<File, T> test)
+    {
+        testEquivalence(path, canonical, test, ignore -> {});
+    }
+
+    private <T> void testEquivalence(String path, IOFn<java.io.File, T> canonical, IOFn<File, T> test, IOConsumer<java.io.File> afterEach)
+    {
+        testEquivalence(path, canonical, test, (f, ignore) -> afterEach.accept(f));
+    }
+
+    private <T> void testEquivalence(String path, IOFn<java.io.File, T> canonical, IOFn<File, T> test, IOBiConsumer<java.io.File, Boolean> afterEach)
+    {
+        java.io.File file = new java.io.File(path); //checkstyle: permit this instantiation
+        Object expect;
+        try
+        {
+            expect = canonical.apply(file);
+        }
+        catch (Throwable e)
+        {
+            expect = new Failed(e);
+        }
+        try { afterEach.accept(file, !(expect instanceof Failed) && !Boolean.FALSE.equals(expect)); } catch (IOException e) { throw new AssertionError(e); }
+        Object actual;
+        try
+        {
+            actual = test.apply(new File(path));
+        }
+        catch (Throwable e)
+        {
+            actual = new Failed(e);
+        }
+        try { afterEach.accept(file, !(actual instanceof Failed) && !Boolean.FALSE.equals(actual)); } catch (IOException e) { throw new AssertionError(e); }
+        if (expect instanceof String[] && actual instanceof String[]) Assert.assertArrayEquals((String[])expect, (String[])actual);
+        else if (expect instanceof java.io.File[] && actual instanceof File[]) assertArrayEquals((java.io.File[]) expect, (File[]) actual);
+        else Assert.assertEquals(path + "," + canonical.toString(), expect, actual);
+    }
+
+    private void testTryVsConfirm(String path, Predicate<java.io.File> canonical, IOConsumer<File> test)
+    {
+        testTryVsConfirm(path, canonical, test, (f, s) -> {});
+    }
+    private void testTryVsConfirm(String path, Predicate<java.io.File> canonical, IOConsumer<File> test, IOConsumer<java.io.File> afterEach)
+    {
+        testTryVsConfirm(path, canonical, test, (f, ignore) -> afterEach.accept(f));
+    }
+    private void testTryVsConfirm(String path, Predicate<java.io.File> canonical, IOConsumer<File> test, IOBiConsumer<java.io.File, Boolean> afterEach)
+    {
+        java.io.File file = new java.io.File(path); //checkstyle: permit this instantiation
+        boolean expect = canonical.test(file);
+        try { afterEach.accept(file, expect); } catch (IOException e) { throw new AssertionError(e); }
+        boolean actual;
+        try
+        {
+            test.accept(new File(path));
+            actual = true;
+        }
+        catch (Throwable e)
+        {
+            actual = false;
+        }
+        try { afterEach.accept(file, actual); } catch (IOException e) { throw new AssertionError(e); }
+        Assert.assertEquals(path + "," + canonical.toString(), expect, actual);
+    }
+
+    private static void assertArrayEquals(java.io.File[] expect, File[] actual)
+    {
+        Assert.assertEquals(expect.length, actual.length);
+        for (int i = 0 ; i < expect.length ; ++i)
+            Assert.assertEquals(expect[i].getPath(), actual[i].path());
+    }
+
+    private static class Failed
+    {
+        final Throwable with;
+
+        private Failed(Throwable with)
+        {
+            this.with = with;
+        }
+
+        @Override
+        public boolean equals(Object obj)
+        {
+            return obj instanceof Failed;
+        }
+
+        @Override
+        public String toString()
+        {
+            StringWriter sw = new StringWriter();
+            with.printStackTrace(new PrintWriter(sw));
+            return sw.toString();
+        }
+    }
+
+    @Test
+    public void testDeletes() throws IOException
+    {
+        File subdir = new File(dir, "deletes");
+        File file = new File(dir, "f");
+        subdir.tryCreateDirectory();
+        Assert.assertTrue(new File(subdir, "subsubdir").tryCreateDirectory());
+        subdir.deleteRecursive();
+        Assert.assertFalse(subdir.exists());
+
+        subdir.tryCreateDirectory();
+        file.createFileIfNotExists();
+        Assert.assertTrue(new File(subdir, "subsubdir").tryCreateDirectory());
+        long start = System.nanoTime();
+        RateLimiter rateLimiter = RateLimiter.create(2);
+        subdir.deleteRecursive(rateLimiter);
+        file.delete(rateLimiter);
+        long end = System.nanoTime();
+        Assert.assertTrue("" + NANOSECONDS.toMillis(end - start), SECONDS.toNanos(1) <= end - start);
+        Assert.assertFalse(subdir.exists());
+        Assert.assertFalse(file.exists());
+    }
+
+    @Test
+    public void testAncestry()
+    {
+        Assert.assertTrue(new File("somewhere/../").isAncestorOf(new File("somewhere")));
+        Assert.assertTrue(new File("../").isAncestorOf(new File("")));
+    }
+
+    @Test
+    public void testOverwrite() throws Exception
+    {
+        File f = new File(dir, UUID.randomUUID().toString());
+
+        // write
+        ByteBuffer buf = ByteBuffer.wrap(RandomUtils.nextBytes(100));
+        try (FileChannel fc = f.newWriteChannel(File.WriteMode.OVERWRITE))
+        {
+            fc.write(buf);
+        }
+        Assertions.assertThat(f.length()).isEqualTo(buf.array().length);
+        Assertions.assertThat(Files.readAllBytes(f.toPath())).isEqualTo(buf.array());
+
+        // overwrite
+        buf = ByteBuffer.wrap(RandomUtils.nextBytes(50));
+        try (FileChannel fc = f.newWriteChannel(File.WriteMode.OVERWRITE))
+        {
+            fc.write(buf);
+        }
+        Assertions.assertThat(f.length()).isEqualTo(buf.array().length);
+        Assertions.assertThat(Files.readAllBytes(f.toPath())).isEqualTo(buf.array());
+    }
+
+    @Test
+    public void testAppend() throws Exception
+    {
+        File f = new File(dir, UUID.randomUUID().toString());
+
+        // write
+        ByteBuffer buf1 = ByteBuffer.wrap(RandomUtils.nextBytes(100));
+        try (FileChannel fc = f.newWriteChannel(File.WriteMode.APPEND))
+        {
+            fc.write(buf1);
+        }
+        Assertions.assertThat(f.length()).isEqualTo(buf1.array().length);
+        Assertions.assertThat(Files.readAllBytes(f.toPath())).isEqualTo(buf1.array());
+
+        // overwrite
+        ByteBuffer buf2 = ByteBuffer.wrap(RandomUtils.nextBytes(50));
+        try (FileChannel fc = f.newWriteChannel(File.WriteMode.APPEND))
+        {
+            fc.write(buf2);
+        }
+        Assertions.assertThat(f.length()).isEqualTo(buf1.array().length + buf2.array().length);
+        ByteBuffer buf = ByteBuffer.allocate(buf1.array().length + buf2.array().length);
+        buf.put(buf1.array()).put(buf2.array());
+        Assertions.assertThat(Files.readAllBytes(f.toPath())).isEqualTo(buf.array());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java b/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java
index 7d19f51..6f25b2e 100644
--- a/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java
+++ b/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java
@@ -18,9 +18,7 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -30,6 +28,7 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.assertj.core.api.Assertions;
 
@@ -53,26 +52,26 @@
         // test straightforward conversions for each unit
         assertEquals("FileUtils.parseFileSize() failed to parse a whole number of bytes",
             256L, FileUtils.parseFileSize("256 bytes"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of kilobytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of kibibytes",
             2048L, FileUtils.parseFileSize("2 KiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of megabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of mebibytes",
             4194304L, FileUtils.parseFileSize("4 MiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of gigabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of gibibytes",
             3221225472L, FileUtils.parseFileSize("3 GiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of terabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a whole number of tebibytes",
             5497558138880L, FileUtils.parseFileSize("5 TiB"));
         // test conversions of fractional units
-        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of kilobytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of kibibytes",
             1536L, FileUtils.parseFileSize("1.5 KiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of kilobytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of kibibytes",
             4434L, FileUtils.parseFileSize("4.33 KiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of megabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of mebibytes",
             2359296L, FileUtils.parseFileSize("2.25 MiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of megabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of mebibytes",
             3292529L, FileUtils.parseFileSize("3.14 MiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of gigabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of gibibytes",
             1299227607L, FileUtils.parseFileSize("1.21 GiB"));
-        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of terabytes",
+        assertEquals("FileUtils.parseFileSize() failed to parse a rational number of tebibytes",
             6621259022467L, FileUtils.parseFileSize("6.022 TiB"));
     }
 
@@ -88,11 +87,11 @@
         byte[] b = Files.readAllBytes(file.toPath());
         assertEquals(expected, new String(b, StandardCharsets.UTF_8));
 
-        FileUtils.truncate(file.getAbsolutePath(), 10);
+        FileUtils.truncate(file.absolutePath(), 10);
         b = Files.readAllBytes(file.toPath());
         assertEquals("The quick ", new String(b, StandardCharsets.UTF_8));
 
-        FileUtils.truncate(file.getAbsolutePath(), 0);
+        FileUtils.truncate(file.absolutePath(), 0);
         b = Files.readAllBytes(file.toPath());
         assertEquals(0, b.length);
     }
@@ -103,7 +102,7 @@
         File folder = createFolder(Paths.get(DatabaseDescriptor.getAllDataFileLocations()[0], "testFolderSize"));
         folder.deleteOnExit();
 
-        File childFolder = createFolder(Paths.get(folder.getPath(), "child"));
+        File childFolder = createFolder(Paths.get(folder.path(), "child"));
 
         File[] files = {
                        createFile(new File(folder, "001"), 10000),
@@ -222,16 +221,16 @@
 
     private File createFolder(Path path)
     {
-        File folder = path.toFile();
+        File folder = new File(path);
         FileUtils.createDirectory(folder);
         return folder;
     }
 
     private File createFile(File file, long size)
     {
-        try (RandomAccessFile f = new RandomAccessFile(file, "rw"))
+        try
         {
-            f.setLength(size);
+            Util.setFileLength(file, size);
         }
         catch (Exception e)
         {
diff --git a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
index 2814bab..7194d30 100644
--- a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
+++ b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Random;
@@ -38,12 +37,14 @@
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.CompressionParams;
 
-import static junit.framework.Assert.assertNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class MmappedRegionsTest
 {
     private static final Logger logger = LoggerFactory.getLogger(MmappedRegionsTest.class);
@@ -57,7 +58,7 @@
     private static ByteBuffer allocateBuffer(int size)
     {
         ByteBuffer ret = ByteBuffer.allocate(Ints.checkedCast(size));
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         logger.info("Seed {}", seed);
 
@@ -301,11 +302,11 @@
         File f = FileUtils.createTempFile("testMapForCompressionMetadata", "1");
         f.deleteOnExit();
 
-        File cf = FileUtils.createTempFile(f.getName() + ".metadata", "1");
+        File cf = FileUtils.createTempFile(f.name() + ".metadata", "1");
         cf.deleteOnExit();
 
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-        try(SequentialWriter writer = new CompressedSequentialWriter(f, cf.getAbsolutePath(),
+        try(SequentialWriter writer = new CompressedSequentialWriter(f, cf.absolutePath(),
                                                                      null, SequentialWriterOption.DEFAULT,
                                                                      CompressionParams.snappy(), sstableMetadataCollector))
         {
@@ -313,7 +314,7 @@
             writer.finish();
         }
 
-        CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), true);
+        CompressionMetadata metadata = new CompressionMetadata(cf.absolutePath(), f.length(), true);
         try(ChannelProxy channel = new ChannelProxy(f);
             MmappedRegions regions = MmappedRegions.map(channel, metadata))
         {
diff --git a/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java b/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java
index 829cfcf..31fa53c 100644
--- a/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java
@@ -25,10 +25,9 @@
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.nio.channels.ReadableByteChannel;
 import java.util.ArrayDeque;
 import java.util.Queue;
@@ -41,6 +40,7 @@
 import com.google.common.primitives.UnsignedInteger;
 import com.google.common.primitives.UnsignedLong;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class NIODataInputStreamTest
@@ -51,7 +51,7 @@
 
     void init()
     {
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         System.out.println("Seed " + seed);
         r = new Random(seed);
@@ -223,11 +223,11 @@
         assertEquals(8190 - 10 - 4096, is.available());
 
         File f = FileUtils.createTempFile("foo", "bar");
-        RandomAccessFile fos = new RandomAccessFile(f, "rw");
-        fos.write(new byte[10]);
-        fos.seek(0);
+        FileChannel fos = f.newReadWriteChannel();
+        fos.write(ByteBuffer.wrap(new byte[10]));
+        fos.position(0);
 
-        is = new NIODataInputStream(fos.getChannel(), 9);
+        is = new NIODataInputStream(fos, 9);
 
         int remaining = 10;
         assertEquals(10, is.available());
diff --git a/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java
index 8904daa..f933cf1 100644
--- a/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java
@@ -20,7 +20,6 @@
  */
 package org.apache.cassandra.io.util;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
@@ -43,6 +42,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -138,8 +138,8 @@
     @Test
     public void testVeryLarge() throws IOException
     {
-        final long SIZE = 1L << 32; // 2GB
-        Parameters params = new Parameters(SIZE, 1 << 20); // 1MB
+        final long SIZE = 1L << 32; // 2GiB
+        Parameters params = new Parameters(SIZE, 1 << 20); // 1MiB
 
 
         try (ChannelProxy channel = new ChannelProxy("abc", new FakeFileChannel(SIZE));
@@ -290,14 +290,14 @@
     private static void testReadFully(Parameters params) throws IOException
     {
         final File f = writeFile(params);
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath())
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path())
                                                      .bufferType(params.bufferType).bufferSize(params.bufferSize))
         {
             builder.mmapped(params.mmappedRegions);
             try (FileHandle fh = builder.complete();
                  RandomAccessReader reader = fh.createReader())
             {
-                assertEquals(f.getAbsolutePath(), reader.getPath());
+                assertEquals(f.absolutePath(), reader.getPath());
                 assertEquals(f.length(), reader.length());
                 assertEquals(f.length(), reader.bytesRemaining());
                 assertEquals(Math.min(Integer.MAX_VALUE, f.length()), reader.available());
@@ -331,11 +331,11 @@
 
         assert f.exists();
 
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath());
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path());
              FileHandle fh = builder.complete();
              RandomAccessReader reader = fh.createReader())
         {
-            assertEquals(f.getAbsolutePath(), reader.getPath());
+            assertEquals(f.absolutePath(), reader.getPath());
             assertEquals(expected.length(), reader.length());
 
             ByteBuffer b = ByteBufferUtil.read(reader, expected.length());
@@ -362,7 +362,7 @@
 
         assert f.exists();
 
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath());
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path());
              FileHandle fh = builder.complete();
              RandomAccessReader reader = fh.createReader())
         {
@@ -428,7 +428,7 @@
         final File f = FileUtils.createTempFile("testMark", "1");
         final byte[] expected = new byte[1 << 16];
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         logger.info("Seed {}", seed);
         Random r = new Random(seed);
@@ -442,7 +442,7 @@
 
         assert f.exists();
 
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()))
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path()))
         {
             final Runnable worker = () ->
             {
@@ -519,7 +519,7 @@
     {
         Parameters params = new Parameters(8192, 4096);
         final File f = writeFile(params);
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath())
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path())
                                                      .bufferType(params.bufferType).bufferSize(params.bufferSize))
         {
             builder.mmapped(params.mmappedRegions);
@@ -537,7 +537,7 @@
     {
         Parameters params = new Parameters(8192, 4096);
         final File f = writeFile(params);
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath())
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path())
                                                      .bufferType(params.bufferType).bufferSize(params.bufferSize))
         {
             try (FileHandle fh = builder.complete();
@@ -552,7 +552,7 @@
     private static void testSkipBytes(Parameters params, int expectationMultiples) throws IOException
     {
         final File f = writeFile(params);
-        try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath())
+        try (FileHandle.Builder builder = new FileHandle.Builder(f.path())
                                                      .bufferType(params.bufferType).bufferSize(params.bufferSize))
         {
             builder.mmapped(params.mmappedRegions);
diff --git a/test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java b/test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java
deleted file mode 100644
index 08c9ddf..0000000
--- a/test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.io.util;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-public class RewindableDataInputStreamPlusTest
-{
-
-    private final int INITIAL_BUFFER_SIZE = 1;
-
-    private File file;
-
-    @Before
-    public void setup() throws Exception
-    {
-        this.file = new File(FileUtils.getTempDir(), "subdir/test.buffer");
-    }
-
-    @Test
-    public void testMarkAndResetSimple() throws Exception
-    {
-        byte[] testData;
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            // boolean
-            out.writeBoolean(true);
-            // byte
-            out.writeByte(0x1);
-            // char
-            out.writeChar('a');
-            // short
-            out.writeShort(1);
-            // int
-            out.writeInt(1);
-            // long
-            out.writeLong(1L);
-            // float
-            out.writeFloat(1.0f);
-            // double
-            out.writeDouble(1.0d);
-
-            // String
-            out.writeUTF("abc");
-            testData = baos.toByteArray();
-        }
-
-        for (int memCapacity = 0; memCapacity <= 16; memCapacity++)
-        {
-            int diskCapacity = 16 - memCapacity;
-            try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                          INITIAL_BUFFER_SIZE, memCapacity, file,
-                                                                                          diskCapacity))
-            {
-                try {
-                    //should mark before resetting
-                    reader.reset(null);
-                    fail("Should have thrown IOException");
-                } catch (IOException e) {}
-
-                assertTrue(reader.readBoolean());
-
-                reader.mark();
-
-                try {
-                    //cannot mark already marked stream
-                    reader.mark();
-                    fail("Should have thrown IllegalStateException");
-                } catch (IllegalStateException e) {}
-
-                assertEquals(0x1, reader.readByte());
-                assertEquals('a', reader.readChar());
-                assertEquals(3, reader.bytesPastMark(null));
-                reader.reset(null);
-
-                try {
-                    //cannot mark when reading from cache
-                    reader.mark();
-                    fail("Should have thrown IllegalStateException");
-                } catch (IllegalStateException e) {}
-
-                //read again previous sequence
-                assertEquals(0x1, reader.readByte());
-                assertEquals('a', reader.readChar());
-                //finish reading again previous sequence
-                assertEquals(1, reader.readShort());
-
-                reader.mark();
-                assertEquals(1, reader.readInt());
-                assertEquals(1L, reader.readLong());
-                assertEquals(1.0f, reader.readFloat(), 0);
-                assertEquals(16, reader.bytesPastMark(null));
-                reader.reset(null);
-
-                //read again previous sequence
-                assertEquals(1, reader.readInt());
-                assertEquals(1L, reader.readLong());
-                assertEquals(1.0f, reader.readFloat(), 0);
-                //finish reading again previous sequence
-
-                //mark again
-                reader.mark();
-                assertEquals(1.0d, reader.readDouble(), 0);
-                assertEquals(8, reader.bytesPastMark(null));
-                reader.reset(null);
-
-                //read again previous sequence
-                assertEquals(1.0d, reader.readDouble(), 0);
-                //finish reading again previous sequence
-
-                //mark and reset
-                reader.mark();
-                reader.reset(null);
-
-                assertEquals("abc", reader.readUTF());
-
-                //check max file size
-                assertEquals(diskCapacity, file.length());
-            }
-            assertFalse(file.exists());
-        }
-    }
-
-    @Test
-    public void testVeryLargeCapacity() throws Exception
-    {
-        byte[] testData;
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            out.writeUTF("abc");
-            testData = baos.toByteArray();
-        }
-
-        try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                      INITIAL_BUFFER_SIZE, Integer.MAX_VALUE, file,
-                                                                                      Integer.MAX_VALUE))
-        {
-            reader.mark();
-            assertEquals("abc", reader.readUTF());
-            reader.reset();
-            assertEquals("abc", reader.readUTF());
-        }
-        assertFalse(file.exists());
-
-
-        baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            out.writeBoolean(true);
-            out.writeBoolean(true);
-            testData = baos.toByteArray();
-        }
-    }
-
-    @Test
-    public void testMarkAndResetBigBuffer() throws Exception
-    {
-        byte[] testData;
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            // boolean
-            out.writeBoolean(true);
-            // byte
-            out.writeByte(0x1);
-            // char
-            out.writeChar('a');
-            // short
-            out.writeShort(1);
-            // int
-            out.writeInt(1);
-            // long
-            out.writeLong(1L);
-            // float
-            out.writeFloat(1.0f);
-            // double
-            out.writeDouble(1.0d);
-
-            // String
-            out.writeUTF("abc");
-            testData = baos.toByteArray();
-
-            // 1 (boolean) + 1 (byte) + 2 (char) + 2 (short) + 4 (int) + 8 (long)
-            // + 4 (float) + 8 (double) + 5 bytes (utf string)
-        }
-
-        for (int memCapacity = 0; memCapacity <= 18; memCapacity++)
-        {
-            int diskCapacity = 18 - memCapacity;
-            try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                          INITIAL_BUFFER_SIZE, memCapacity, file,
-                                                                                          diskCapacity))
-            {
-                //read a big amount before resetting
-                reader.mark();
-                assertTrue(reader.readBoolean());
-                assertEquals(0x1, reader.readByte());
-                assertEquals('a', reader.readChar());
-                assertEquals(1, reader.readShort());
-                assertEquals(1, reader.readInt());
-                assertEquals(1L, reader.readLong());
-                reader.reset();
-
-                //read from buffer
-                assertTrue(reader.readBoolean());
-                assertEquals(0x1, reader.readByte());
-                assertEquals('a', reader.readChar());
-                assertEquals(1, reader.readShort());
-                assertEquals(1, reader.readInt());
-                assertEquals(1L, reader.readLong());
-
-                assertEquals(17, reader.available());
-
-                //mark again
-                reader.mark();
-                assertEquals(1.0f, reader.readFloat(), 0);
-                assertEquals(1.0d, reader.readDouble(), 0);
-                assertEquals("abc", reader.readUTF());
-                reader.reset();
-
-                assertEquals(17, reader.available());
-
-                assertEquals(1.0f, reader.readFloat(), 0);
-                assertEquals(1.0d, reader.readDouble(), 0);
-                assertEquals("abc", reader.readUTF());
-            }
-            assertFalse(file.exists());
-        }
-    }
-
-
-    @Test
-    public void testCircularSpillFile() throws Exception
-    {
-        byte[] testData;
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            // boolean
-            out.writeBoolean(true);
-            // byte
-            out.writeByte(0x1);
-            // char
-            out.writeChar('a');
-            // short
-            out.writeShort(1);
-            // int
-            out.writeInt(1);
-
-            // String
-            out.writeUTF("ab");
-            testData = baos.toByteArray();
-
-            // 1 (boolean) + 1 (byte) + 2 (char) + 2 (short) + 4 (int) + 4 bytes (utf string)
-        }
-
-        //read at most 4 bytes multiple times (and then check file size)
-        int MEM_SIZE = 0;
-        int DISK_SIZE = 4;
-        try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                      INITIAL_BUFFER_SIZE, MEM_SIZE, file,
-                                                                                      DISK_SIZE))
-        {
-            //read 2 bytes and reset
-            reader.mark();
-            assertTrue(reader.readBoolean());
-            assertEquals(0x1, reader.readByte());
-            assertEquals(2, reader.bytesPastMark(null));
-            reader.reset();
-
-            //read again previous sequence
-            assertTrue(reader.readBoolean());
-            assertEquals(0x1, reader.readByte());
-            //finish reading again previous sequence
-
-            //read 4 bytes and reset
-            reader.mark();
-            assertEquals('a', reader.readChar());
-            assertEquals(1, reader.readShort());
-            assertEquals(4, reader.bytesPastMark(null));
-            reader.reset();
-
-            //read again previous sequence
-            assertEquals('a', reader.readChar());
-            assertEquals(1, reader.readShort());
-            //finish reading again previous sequence
-
-            //read 4 bytes and reset
-            reader.mark();
-            assertEquals(1, reader.readInt());
-            assertEquals(4, reader.bytesPastMark(null));
-            reader.reset();
-
-            //read again previous sequence
-            assertEquals(1, reader.readInt());
-
-            //check max file size
-            assertEquals(DISK_SIZE, file.length());
-        }
-        assertFalse(file.exists());
-    }
-
-    @Test
-    public void testExhaustCapacity() throws Exception
-    {
-        byte[] testData;
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            // boolean
-            out.writeBoolean(true);
-            // byte
-            out.writeByte(0x1);
-            // char
-            out.writeChar('a');
-            // short
-            out.writeShort(1);
-            testData = baos.toByteArray();
-        }
-
-        //test capacity exhausted when reading more than 4 bytes
-        testCapacityExhausted(testData, 0, 2);
-        testCapacityExhausted(testData, 2, 0);
-        testCapacityExhausted(testData, 1, 1);
-    }
-
-    private void testCapacityExhausted(byte[] testData, int memSize, int diskSize) throws IOException
-    {
-        try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                      INITIAL_BUFFER_SIZE, memSize, file,
-                                                                                      diskSize))
-        {
-            //read 2 bytes and reset
-            reader.mark();
-            assertTrue(reader.readBoolean());
-            assertEquals(0x1, reader.readByte());
-            assertEquals(2, reader.bytesPastMark(null));
-            reader.reset();
-
-            //read again previous sequence
-            assertTrue(reader.readBoolean());
-            assertEquals(0x1, reader.readByte());
-            //finish reading again previous sequence
-
-            reader.mark();
-            //read 3 bytes - START
-            assertEquals('a', reader.readChar());
-            //read 1 more bytes - CAPACITY will exhaust when trying to reset :(
-            assertEquals(1, reader.readShort());
-
-            try
-            {
-                reader.reset();
-                fail("Should have thrown IOException");
-            }
-            catch (IOException e) {}
-
-            //check max file size
-            assertEquals(diskSize, file.length());
-        }
-        assertFalse(file.exists());
-    }
-
-    @Test
-    public void testMarkAndResetUnsignedRead() throws Exception
-    {
-        byte[] testData;
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (DataOutputStream out = new DataOutputStream(baos))
-        {
-            // byte
-            out.writeByte(0x1);
-            // short
-            out.writeShort(2);
-            testData = baos.toByteArray();
-        }
-
-        for (int memCapacity = 0; memCapacity <= 1; memCapacity++)
-        {
-            int diskCapacity = 1 - memCapacity;
-            try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                          INITIAL_BUFFER_SIZE, memCapacity, file,
-                                                                                          diskCapacity))
-            {
-                reader.mark();
-                assertEquals(1, reader.readUnsignedByte());
-                reader.reset();
-                assertEquals(1, reader.readUnsignedByte());
-
-                //will read first byte of short 2
-                reader.mark();
-                assertEquals(0, reader.readUnsignedByte());
-                reader.reset();
-
-                assertEquals(2, reader.readUnsignedShort());
-
-                reader.mark();
-                reader.reset();
-                assertEquals(0, reader.available());
-            }
-        }
-        assertFalse(file.exists());
-    }
-
-    @Test
-    public void testMarkAndResetSkipBytes() throws Exception
-    {
-        String testStr = "1234567890";
-        byte[] testData = testStr.getBytes();
-
-        for (int memCapacity = 0; memCapacity <= 7; memCapacity++)
-        {
-            int diskCapacity = 7 - memCapacity;
-            try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                          INITIAL_BUFFER_SIZE, memCapacity, file,
-                                                                                          diskCapacity))
-            {
-                reader.mark();
-                // read first 5 bytes and rewind
-                byte[] out = new byte[5];
-                reader.readFully(out, 0, 5);
-                assertEquals("12345", new String(out));
-
-                // then skip 2 bytes (67)
-                reader.skipBytes(2);
-
-                assertEquals(7, reader.bytesPastMark(null));
-                reader.reset();
-
-                //now read part of the previously skipped bytes
-                out = new byte[5];
-                reader.readFully(out);
-                assertEquals("12345", new String(out));
-
-                //skip 3 bytes (2 from cache, 1 from stream)
-                reader.skip(3);
-
-                // mark and read 2 more bytes
-                reader.mark();
-                out = new byte[2];
-                reader.readFully(out);
-                assertEquals("90", new String(out));
-                assertEquals(0, reader.available());
-                reader.reset();
-
-                //reset and read only the next byte "9" in the third position
-                reader.readFully(out, 1, 1);
-                assertEquals("99", new String(out));
-
-                //now we read the remainder via readline
-                assertEquals(1, reader.available());
-                assertEquals("0", reader.readLine());
-
-            }
-            assertFalse(file.exists());
-        }
-    }
-
-    @Test
-    public void testMarkAndResetReadFully() throws Exception
-    {
-        String testStr = "1234567890";
-        byte[] testData = testStr.getBytes();
-
-        for (int memCapacity = 0; memCapacity <= 5; memCapacity++)
-        {
-            int diskCapacity = 5 - memCapacity;
-            try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData),
-                                                                                          INITIAL_BUFFER_SIZE, memCapacity, file,
-                                                                                          diskCapacity))
-            {
-                reader.mark();
-                // read first 5 bytes and rewind
-                byte[] out = new byte[5];
-                reader.readFully(out, 0, 5);
-                assertEquals("12345", new String(out));
-                reader.reset();
-
-                // read half from cache, half from parent stream
-                out = new byte[7];
-                reader.readFully(out);
-                assertEquals("1234567", new String(out));
-
-                // mark and read 3 more bytes
-                reader.mark();
-                out = new byte[3];
-                reader.readFully(out);
-                assertEquals("890", new String(out));
-                assertEquals(0, reader.available());
-                reader.reset();
-
-                //reset and read only the next byte "8" in the third position
-                reader.readFully(out, 2, 1);
-                assertEquals("898", new String(out));
-
-                //now we read the remainder via readline
-                assertEquals(2, reader.available());
-                assertEquals("90", reader.readLine());
-            }
-            assertFalse(file.exists());
-        }
-    }
-}
diff --git a/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java b/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java
index c1ffda2..80543b0 100644
--- a/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.io.util;
 
 import java.io.DataOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -55,7 +54,7 @@
     public void cleanup()
     {
         for (TestableSW sw : writers)
-            sw.file.delete();
+            sw.file.tryDelete();
         writers.clear();
     }
 
@@ -100,14 +99,14 @@
         protected void assertInProgress() throws Exception
         {
             Assert.assertTrue(file.exists());
-            byte[] bytes = readFileToByteArray(file);
+            byte[] bytes = readFileToByteArray(file.toJavaIOFile());
             Assert.assertTrue(Arrays.equals(partialContents, bytes));
         }
 
         protected void assertPrepared() throws Exception
         {
             Assert.assertTrue(file.exists());
-            byte[] bytes = readFileToByteArray(file);
+            byte[] bytes = readFileToByteArray(file.toJavaIOFile());
             Assert.assertTrue(Arrays.equals(fullContents, bytes));
         }
 
@@ -125,7 +124,7 @@
         protected static File tempFile(String prefix)
         {
             File file = FileUtils.createTempFile(prefix, "test");
-            file.delete();
+            file.tryDelete();
             return file;
         }
     }
@@ -133,7 +132,7 @@
     @Test
     public void resetAndTruncateTest()
     {
-        File tempFile = new File(Files.createTempDir(), "reset.txt");
+        File tempFile = new File(Files.createTempDir().toPath(), "reset.txt");
         final int bufferSize = 48;
         final int writeSize = 64;
         byte[] toWrite = new byte[writeSize];
@@ -177,7 +176,7 @@
     @Test
     public void outputStream()
     {
-        File tempFile = new File(Files.createTempDir(), "test.txt");
+        File tempFile = new File(Files.createTempDir().toPath(), "test.txt");
         Assert.assertFalse("temp file shouldn't exist yet", tempFile.exists());
 
         SequentialWriterOption option = SequentialWriterOption.newBuilder().finishOnClose(true).build();
diff --git a/test/unit/org/apache/cassandra/locator/AssureSufficientLiveNodesTest.java b/test/unit/org/apache/cassandra/locator/AssureSufficientLiveNodesTest.java
index d5f62d7..1903e15 100644
--- a/test/unit/org/apache/cassandra/locator/AssureSufficientLiveNodesTest.java
+++ b/test/unit/org/apache/cassandra/locator/AssureSufficientLiveNodesTest.java
@@ -45,7 +45,7 @@
 import org.apache.cassandra.exceptions.UnavailableException;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.reads.NeverSpeculativeRetryPolicy;
@@ -253,9 +253,9 @@
                                                       Consumer<Keyspace> test) throws Throwable
     {
         String keyspaceName = keyspaceNameGen.get();
-        KeyspaceMetadata initKsMeta = KeyspaceMetadata.create(keyspaceName, init, Tables.of(SchemaLoader.standardCFMD("Foo", "Bar").build()));
+        KeyspaceMetadata initKsMeta = KeyspaceMetadata.create(keyspaceName, init, Tables.of(SchemaLoader.standardCFMD(keyspaceName, "Bar").build()));
         KeyspaceMetadata alterToKsMeta = initKsMeta.withSwapped(alterTo);
-        MigrationManager.announceNewKeyspace(initKsMeta, true);
+        SchemaTestUtil.announceNewKeyspace(initKsMeta);
         Keyspace racedKs = Keyspace.open(keyspaceName);
         ExecutorService es = Executors.newFixedThreadPool(2);
         try (AutoCloseable ignore = () -> {
diff --git a/test/unit/org/apache/cassandra/locator/DynamicEndpointSnitchTest.java b/test/unit/org/apache/cassandra/locator/DynamicEndpointSnitchTest.java
index b7d4243..98a9d16 100644
--- a/test/unit/org/apache/cassandra/locator/DynamicEndpointSnitchTest.java
+++ b/test/unit/org/apache/cassandra/locator/DynamicEndpointSnitchTest.java
@@ -20,7 +20,6 @@
 
 import java.io.IOException;
 import java.util.*;
-import java.util.concurrent.TimeUnit;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/locator/InetAddressAndPortSerializerTest.java b/test/unit/org/apache/cassandra/locator/InetAddressAndPortSerializerTest.java
index c6ea3d7..78ecfb5 100644
--- a/test/unit/org/apache/cassandra/locator/InetAddressAndPortSerializerTest.java
+++ b/test/unit/org/apache/cassandra/locator/InetAddressAndPortSerializerTest.java
@@ -65,8 +65,8 @@
         }
         else
         {
-            assertEquals(address.address, roundtripped.address);
-            assertEquals(InetAddressAndPort.getDefaultPort(), roundtripped.port);
+            assertEquals(address.getAddress(), roundtripped.getAddress());
+            assertEquals(InetAddressAndPort.getDefaultPort(), roundtripped.getPort());
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/locator/InetAddressAndPortTest.java b/test/unit/org/apache/cassandra/locator/InetAddressAndPortTest.java
index 78b9a6f..74e3646 100644
--- a/test/unit/org/apache/cassandra/locator/InetAddressAndPortTest.java
+++ b/test/unit/org/apache/cassandra/locator/InetAddressAndPortTest.java
@@ -47,13 +47,13 @@
 
         //Test default port
         InetAddressAndPort address = InetAddressAndPort.getByName("127.0.0.1");
-        assertEquals(InetAddress.getByName("127.0.0.1"), address.address);
-        assertEquals(InetAddressAndPort.defaultPort, address.port);
+        assertEquals(InetAddress.getByName("127.0.0.1"), address.getAddress());
+        assertEquals(InetAddressAndPort.defaultPort, address.getPort());
 
         //Test overriding default port
         address = InetAddressAndPort.getByName("127.0.0.1:42");
-        assertEquals(InetAddress.getByName("127.0.0.1"), address.address);
-        assertEquals(42, address.port);
+        assertEquals(InetAddress.getByName("127.0.0.1"), address.getAddress());
+        assertEquals(42, address.getPort());
     }
 
     @Test
@@ -69,13 +69,13 @@
 
         //Test default port
         InetAddressAndPort address = InetAddressAndPort.getByName("2001:0db8:0000:0000:0000:ff00:0042:8329");
-        assertEquals(InetAddress.getByName("2001:0db8:0000:0000:0000:ff00:0042:8329"), address.address);
-        assertEquals(InetAddressAndPort.defaultPort, address.port);
+        assertEquals(InetAddress.getByName("2001:0db8:0000:0000:0000:ff00:0042:8329"), address.getAddress());
+        assertEquals(InetAddressAndPort.defaultPort, address.getPort());
 
         //Test overriding default port
         address = InetAddressAndPort.getByName("[2001:0db8:0000:0000:0000:ff00:0042:8329]:42");
-        assertEquals(InetAddress.getByName("2001:0db8:0000:0000:0000:ff00:0042:8329"), address.address);
-        assertEquals(42, address.port);
+        assertEquals(InetAddress.getByName("2001:0db8:0000:0000:0000:ff00:0042:8329"), address.getAddress());
+        assertEquals(42, address.getPort());
     }
 
     @Test
@@ -165,6 +165,41 @@
         assertEquals(ipv6forJMX, InetAddressAndPort.getByName(ipv6).getHostAddressAndPortForJMX());
     }
 
+    @Test
+    public void getHostNameForIPv4WithoutPortTest() throws Exception
+    {
+        byte[] ipBytes = new byte[] { 127, 0, 0, 1};
+        InetAddressAndPort obj = InetAddressAndPort.getByAddress(InetAddress.getByAddress("resolved4", ipBytes));
+        assertEquals("resolved4", obj.getHostName());
+        assertEquals("resolved4", obj.getHostName(false));
+    }
+
+    @Test
+    public void getHostNameForIPv6WithoutPortTest() throws Exception
+    {
+        byte[] ipBytes = new byte[] { 0x20, 0x01, 0xd, (byte) 0xb8, 0, 0, 0, 0, 0, 0, (byte) 0xff, 0, 0x00, 0x42, (byte) 0x83, 0x29};
+        InetAddressAndPort obj = InetAddressAndPort.getByAddress(InetAddress.getByAddress("resolved6", ipBytes));
+        assertEquals("resolved6", obj.getHostName());
+        assertEquals("resolved6", obj.getHostName(false));
+    }
+
+    @Test
+    public void getHostNameForIPv4WitPortTest() throws Exception
+    {
+        InetAddress ipv4 = InetAddress.getByAddress("resolved4", new byte[] { 127, 0, 0, 1});
+        InetAddressAndPort obj = InetAddressAndPort.getByAddressOverrideDefaults(ipv4, 42);
+        assertEquals("resolved4:42", obj.getHostName(true));
+    }
+
+    @Test
+    public void getHostNameForIPv6WithPortTest() throws Exception
+    {
+        byte[] ipBytes = new byte[] { 0x20, 0x01, 0xd, (byte) 0xb8, 0, 0, 0, 0, 0, 0, (byte) 0xff, 0, 0x00, 0x42, (byte) 0x83, 0x29 };
+        InetAddress ipv6 = InetAddress.getByAddress("resolved6", ipBytes);
+        InetAddressAndPort obj = InetAddressAndPort.getByAddressOverrideDefaults(ipv6, 42);
+        assertEquals("resolved6:42", obj.getHostName(true));
+    }
+
     private void shouldThrow(ThrowingRunnable t, Class expectedClass)
     {
         try
diff --git a/test/unit/org/apache/cassandra/locator/NetworkTopologyStrategyTest.java b/test/unit/org/apache/cassandra/locator/NetworkTopologyStrategyTest.java
index ffdb59f..81d6694 100644
--- a/test/unit/org/apache/cassandra/locator/NetworkTopologyStrategyTest.java
+++ b/test/unit/org/apache/cassandra/locator/NetworkTopologyStrategyTest.java
@@ -481,7 +481,7 @@
         StorageService.instance.getTokenMetadata().updateNormalToken(new LongToken(1), FBUtilities.getBroadcastAddressAndPort());
         
         ClientWarn.instance.captureWarnings();
-        strategy.maybeWarnOnOptions();
+        strategy.maybeWarnOnOptions(null);
         assertTrue(ClientWarn.instance.getWarnings().stream().anyMatch(s -> s.contains("Your replication factor")));
     }
 }
diff --git a/test/unit/org/apache/cassandra/locator/PendingRangesTest.java b/test/unit/org/apache/cassandra/locator/PendingRangesTest.java
index 7959366..992c2dd 100644
--- a/test/unit/org/apache/cassandra/locator/PendingRangesTest.java
+++ b/test/unit/org/apache/cassandra/locator/PendingRangesTest.java
@@ -542,4 +542,4 @@
                                   DatabaseDescriptor.getEndpointSnitch(),
                                   Collections.singletonMap("replication_factor", Integer.toString(replicationFactor)));
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/locator/ReconnectableSnitchHelperTest.java b/test/unit/org/apache/cassandra/locator/ReconnectableSnitchHelperTest.java
index 0c2f272..d5a0ec3 100644
--- a/test/unit/org/apache/cassandra/locator/ReconnectableSnitchHelperTest.java
+++ b/test/unit/org/apache/cassandra/locator/ReconnectableSnitchHelperTest.java
@@ -20,7 +20,6 @@
 
 import java.net.InetAddress;
 import java.net.UnknownHostException;
-import java.util.Collections;
 
 import org.junit.After;
 import org.junit.BeforeClass;
diff --git a/test/unit/org/apache/cassandra/locator/ReplicaCollectionTest.java b/test/unit/org/apache/cassandra/locator/ReplicaCollectionTest.java
index e2d4797..cde69d2 100644
--- a/test/unit/org/apache/cassandra/locator/ReplicaCollectionTest.java
+++ b/test/unit/org/apache/cassandra/locator/ReplicaCollectionTest.java
@@ -25,15 +25,14 @@
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
-import org.apache.cassandra.dht.Murmur3Partitioner;
+
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict;
-import org.apache.cassandra.utils.FBUtilities;
+
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.AbstractMap;
 import java.util.Comparator;
@@ -43,7 +42,6 @@
 import java.util.stream.Collectors;
 
 import static com.google.common.collect.Iterables.*;
-import static com.google.common.collect.Iterables.filter;
 import static org.apache.cassandra.locator.Replica.fullReplica;
 import static org.apache.cassandra.locator.Replica.transientReplica;
 import static org.apache.cassandra.locator.ReplicaUtils.*;
diff --git a/test/unit/org/apache/cassandra/locator/ReplicaPlansTest.java b/test/unit/org/apache/cassandra/locator/ReplicaPlansTest.java
index 4d0dd47..8231b03 100644
--- a/test/unit/org/apache/cassandra/locator/ReplicaPlansTest.java
+++ b/test/unit/org/apache/cassandra/locator/ReplicaPlansTest.java
@@ -89,7 +89,7 @@
                 Keyspace ks = ks(ImmutableSet.of(EP1, EP2, EP3), ImmutableMap.of("DC1", "3", "DC2", "3"));
                 EndpointsForToken natural = EndpointsForToken.of(token, full(EP1), full(EP2), full(EP3), full(EP4), full(EP5), full(EP6));
                 EndpointsForToken pending = EndpointsForToken.empty(token);
-                ReplicaPlan.ForTokenWrite plan = ReplicaPlans.forWrite(ks, ConsistencyLevel.EACH_QUORUM, natural, pending, Predicates.alwaysTrue(), ReplicaPlans.writeNormal);
+                ReplicaPlan.ForWrite plan = ReplicaPlans.forWrite(ks, ConsistencyLevel.EACH_QUORUM, natural, pending, Predicates.alwaysTrue(), ReplicaPlans.writeNormal);
                 assertEquals(natural, plan.liveAndDown);
                 assertEquals(natural, plan.live);
                 assertEquals(natural, plan.contacts());
@@ -99,7 +99,7 @@
                 Keyspace ks = ks(ImmutableSet.of(EP1, EP2, EP3), ImmutableMap.of("DC1", "3", "DC2", "3"));
                 EndpointsForToken natural = EndpointsForToken.of(token, full(EP1), full(EP2), trans(EP3), full(EP4), full(EP5), trans(EP6));
                 EndpointsForToken pending = EndpointsForToken.empty(token);
-                ReplicaPlan.ForTokenWrite plan = ReplicaPlans.forWrite(ks, ConsistencyLevel.EACH_QUORUM, natural, pending, Predicates.alwaysTrue(), ReplicaPlans.writeNormal);
+                ReplicaPlan.ForWrite plan = ReplicaPlans.forWrite(ks, ConsistencyLevel.EACH_QUORUM, natural, pending, Predicates.alwaysTrue(), ReplicaPlans.writeNormal);
                 assertEquals(natural, plan.liveAndDown);
                 assertEquals(natural, plan.live);
                 EndpointsForToken expectContacts = EndpointsForToken.of(token, full(EP1), full(EP2), full(EP4), full(EP5));
diff --git a/test/unit/org/apache/cassandra/locator/SimpleStrategyTest.java b/test/unit/org/apache/cassandra/locator/SimpleStrategyTest.java
index 4c1ff26..ca4841d 100644
--- a/test/unit/org/apache/cassandra/locator/SimpleStrategyTest.java
+++ b/test/unit/org/apache/cassandra/locator/SimpleStrategyTest.java
@@ -159,7 +159,7 @@
     {
         TokenMetadata tmd;
         AbstractReplicationStrategy strategy;
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             tmd = new TokenMetadata();
             strategy = getStrategy(keyspaceName, tmd, new SimpleSnitch());
@@ -214,7 +214,7 @@
         tmd.addBootstrapToken(bsToken, bootstrapEndpoint);
 
         AbstractReplicationStrategy strategy = null;
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             strategy = getStrategy(keyspaceName, tmd, new SimpleSnitch());
 
@@ -348,7 +348,7 @@
         StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), FBUtilities.getBroadcastAddressAndPort());
         
         ClientWarn.instance.captureWarnings();
-        strategy.maybeWarnOnOptions();
+        strategy.maybeWarnOnOptions(null);
         assertTrue(ClientWarn.instance.getWarnings().stream().anyMatch(s -> s.contains("Your replication factor")));
     }
 
diff --git a/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java b/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java
index 9e34b93..c1b76b3 100644
--- a/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java
+++ b/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java
@@ -19,17 +19,21 @@
 
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.collect.HashMultimap;
 import com.google.common.collect.ImmutableMultimap;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Multimap;
 
 import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -55,7 +59,12 @@
     public static void beforeClass() throws Throwable
     {
         DatabaseDescriptor.daemonInitialization();
-        tmd = StorageService.instance.getTokenMetadata();
+    }
+
+    @Before
+    public void before() throws Throwable
+    {
+        tmd = new TokenMetadata();
         tmd.updateNormalToken(token(ONE), InetAddressAndPort.getByName("127.0.0.1"));
         tmd.updateNormalToken(token(SIX), InetAddressAndPort.getByName("127.0.0.6"));
     }
@@ -348,4 +357,43 @@
         assertEquals(0, tokenMetadata.getSizeOfLeavingEndpoints());
         assertEquals(0, tokenMetadata.getSizeOfMovingEndpoints());
     }
+
+
+    @Test
+    public void testRemoveEndpointTokenChange() throws Exception
+    {
+        TokenMetadata metadata = StorageService.instance.getTokenMetadata();
+        metadata.clearUnsafe();
+
+        Collection<Token> tokens = new HashSet<>();
+        tokens.add(DatabaseDescriptor.getPartitioner().getRandomToken());
+        tokens.add(DatabaseDescriptor.getPartitioner().getRandomToken());
+
+        InetAddressAndPort ep1 = InetAddressAndPort.getByName("127.0.0.1");
+        InetAddressAndPort ep2 = InetAddressAndPort.getByName("127.0.0.2");
+
+        Multimap<InetAddressAndPort, Token> endpointTokens = HashMultimap.create();
+        for (Token token : tokens)
+            endpointTokens.put(ep1, token);
+
+        endpointTokens.put(ep2, DatabaseDescriptor.getPartitioner().getRandomToken());
+
+        long ver = metadata.getRingVersion();
+        metadata.updateNormalTokens(endpointTokens);
+        assertTrue(metadata.getRingVersion() > ver);
+
+        // Remove a normal endpoint
+        assertTrue(metadata.isMember(ep2));
+        ver = metadata.getRingVersion();
+        metadata.removeEndpoint(ep2);
+        assertFalse(metadata.isMember(ep2));
+        assertTrue(metadata.getRingVersion() > ver);
+
+        // Remove a non-exist endpoint (e.g. proxy node is not part of token metadata)
+        InetAddressAndPort ep3 = InetAddressAndPort.getByName("127.0.0.3");
+        assertFalse(metadata.isMember(ep3));
+        ver = metadata.getRingVersion();
+        metadata.removeEndpoint(ep3);
+        assertEquals(ver, metadata.getRingVersion());
+    }
 }
diff --git a/test/unit/org/apache/cassandra/metrics/BatchMetricsTest.java b/test/unit/org/apache/cassandra/metrics/BatchMetricsTest.java
index 439ed73..b90f19a 100644
--- a/test/unit/org/apache/cassandra/metrics/BatchMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/BatchMetricsTest.java
@@ -21,6 +21,7 @@
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -28,21 +29,20 @@
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Session;
-import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.service.EmbeddedCassandraService;
-import org.apache.cassandra.metrics.DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot;
 
 import static org.apache.cassandra.cql3.statements.BatchStatement.metrics;
-import static org.apache.cassandra.metrics.DecayingEstimatedHistogramReservoir.*;
+import static org.apache.cassandra.metrics.DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot;
+import static org.apache.cassandra.metrics.DecayingEstimatedHistogramReservoir.Range;
 import static org.junit.Assert.assertEquals;
 import static org.quicktheories.QuickTheory.qt;
 import static org.quicktheories.generators.Generate.intArrays;
 import static org.quicktheories.generators.SourceDSL.integers;
 
-public class BatchMetricsTest extends SchemaLoader
+public class BatchMetricsTest
 {
     private static final int MAX_ROUNDS_TO_PERFORM = 3;
     private static final int MAX_DISTINCT_PARTITIONS = 128;
@@ -63,13 +63,11 @@
     @BeforeClass()
     public static void setup() throws ConfigurationException, IOException
     {
-        Schema.instance.clear();
-
-        cassandra = new EmbeddedCassandraService();
-        cassandra.start();
-
+        DatabaseDescriptor.daemonInitialization();
         DatabaseDescriptor.setWriteRpcTimeout(TimeUnit.SECONDS.toMillis(10));
 
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
+
         cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
         session = cluster.connect();
 
@@ -82,6 +80,15 @@
         psCounter = session.prepare("UPDATE " + KEYSPACE + '.' + COUNTER_TABLE + " SET val = val + 1 WHERE id = ?;");
     }
 
+    @AfterClass
+    public static void tearDown()
+    {
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
+    }
+
     private void executeLoggerBatch(BatchStatement.Type batchStatementType, int distinctPartitions, int statementsPerPartition)
     {
         BatchStatement batch = new BatchStatement(batchStatementType);
diff --git a/test/unit/org/apache/cassandra/metrics/CQLMetricsTest.java b/test/unit/org/apache/cassandra/metrics/CQLMetricsTest.java
index 3971b9f..c41ded3 100644
--- a/test/unit/org/apache/cassandra/metrics/CQLMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/CQLMetricsTest.java
@@ -20,35 +20,35 @@
 
 import java.io.IOException;
 
+import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Session;
-import org.apache.cassandra.SchemaLoader;
+import com.datastax.driver.core.exceptions.InvalidQueryException;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.service.EmbeddedCassandraService;
 
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-public class CQLMetricsTest extends SchemaLoader
+public class CQLMetricsTest
 {
-    private static EmbeddedCassandraService cassandra;
-
     private static Cluster cluster;
     private static Session session;
+    private static EmbeddedCassandraService cassandra;
 
     @BeforeClass()
     public static void setup() throws ConfigurationException, IOException
     {
-        Schema.instance.clear();
-
-        cassandra = new EmbeddedCassandraService();
-        cassandra.start();
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
 
         cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
         session = cluster.connect();
@@ -57,11 +57,43 @@
         session.execute("CREATE TABLE IF NOT EXISTS junit.metricstest (id int PRIMARY KEY, val text);");
     }
 
+    @AfterClass
+    public static void tearDown()
+    {
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
+    }
+
+    @Test
+    public void testConnectionWithUseDisabled()
+    {
+        long useCountBefore = QueryProcessor.metrics.useStatementsExecuted.getCount();
+        DatabaseDescriptor.setUseStatementsEnabled(false);
+
+        try (Session ignored = cluster.connect("junit"))
+        {
+            fail("expected USE statement to fail with use_statements_enabled = false");
+        }
+        catch (InvalidQueryException e)
+        {
+            Assert.assertEquals(useCountBefore, QueryProcessor.metrics.useStatementsExecuted.getCount());
+            assertTrue(e.getMessage().contains("USE statements prohibited"));
+        }
+        finally
+        {
+            DatabaseDescriptor.setUseStatementsEnabled(true);
+        }
+    }
+
     @Test
     public void testPreparedStatementsCount()
     {
         int n = QueryProcessor.metrics.preparedStatementsCount.getValue();
+        long useCountBefore = QueryProcessor.metrics.useStatementsExecuted.getCount();
         session.execute("use junit");
+        Assert.assertEquals(useCountBefore + 1, QueryProcessor.metrics.useStatementsExecuted.getCount());
         session.prepare("SELECT * FROM junit.metricstest WHERE id = ?");
         assertEquals(n+2, (int) QueryProcessor.metrics.preparedStatementsCount.getValue());
     }
@@ -104,15 +136,15 @@
         clearMetrics();
         PreparedStatement metricsStatement = session.prepare("INSERT INTO junit.metricstest (id, val) VALUES (?, ?)");
 
-        assertEquals(Double.NaN, QueryProcessor.metrics.preparedStatementsRatio.getValue());
+        assertEquals(Double.NaN, QueryProcessor.metrics.preparedStatementsRatio.getValue(), 0.0);
 
         for (int i = 0; i < 10; i++)
             session.execute(metricsStatement.bind(i, "val" + i));
-        assertEquals(1.0, QueryProcessor.metrics.preparedStatementsRatio.getValue());
+        assertEquals(1.0, QueryProcessor.metrics.preparedStatementsRatio.getValue(), 0.0);
 
         for (int i = 0; i < 10; i++)
             session.execute(String.format("INSERT INTO junit.metricstest (id, val) VALUES (%d, '%s')", i, "val" + i));
-        assertEquals(0.5, QueryProcessor.metrics.preparedStatementsRatio.getValue());
+        assertEquals(0.5, QueryProcessor.metrics.preparedStatementsRatio.getValue(), 0.0);
     }
 
     private void clearMetrics()
diff --git a/test/unit/org/apache/cassandra/metrics/CassandraMetricsRegistryTest.java b/test/unit/org/apache/cassandra/metrics/CassandraMetricsRegistryTest.java
index cd9866c..02e4228 100644
--- a/test/unit/org/apache/cassandra/metrics/CassandraMetricsRegistryTest.java
+++ b/test/unit/org/apache/cassandra/metrics/CassandraMetricsRegistryTest.java
@@ -24,13 +24,17 @@
 
 import java.lang.management.ManagementFactory;
 import java.util.Collection;
+import java.util.concurrent.TimeUnit;
 
+import com.codahale.metrics.Timer;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry.MetricName;
+
 import org.junit.Test;
 
 import com.codahale.metrics.jvm.BufferPoolMetricSet;
 import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
 import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
+import org.apache.cassandra.utils.EstimatedHistogram;
 
 
 public class CassandraMetricsRegistryTest
@@ -107,4 +111,53 @@
         assertArrayEquals(count, CassandraMetricsRegistry.delta(count, new long[3]));
         assertArrayEquals(new long[6], CassandraMetricsRegistry.delta(count, new long[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
     }
+
+    /**
+     * Test the updated timer values are estimated correctly (i.e., in the valid range, 1.2) in the micros based histogram.
+     */
+    @Test
+    public void testTimer()
+    {
+        long[] offsets = EstimatedHistogram.newOffsets(DecayingEstimatedHistogramReservoir.LOW_BUCKET_COUNT, false);
+        Timer timer = new Timer(CassandraMetricsRegistry.createReservoir(TimeUnit.MICROSECONDS));
+        timer.update(42, TimeUnit.NANOSECONDS);
+        timer.update(100, TimeUnit.NANOSECONDS);
+        timer.update(42, TimeUnit.MICROSECONDS);
+        timer.update(100, TimeUnit.MICROSECONDS);
+        timer.update(42, TimeUnit.MILLISECONDS);
+        timer.update(100, TimeUnit.MILLISECONDS);
+        timer.update(100, TimeUnit.SECONDS);
+        timer.update(100, TimeUnit.MINUTES);
+        int maxSeconds = 21356;
+        timer.update(maxSeconds, TimeUnit.SECONDS);
+        long[] counts = timer.getSnapshot().getValues();
+        int expectedBucketsWithValues = 8;
+        int bucketsWithValues = 0;
+        for (int i = 0; i < counts.length; i++)
+        {
+            if (counts[i] != 0)
+            {
+                bucketsWithValues ++;
+                assertTrue(
+                inRange(offsets[i], TimeUnit.NANOSECONDS.toMicros(42), 1.2)
+                || inRange(offsets[i], TimeUnit.NANOSECONDS.toMicros(100), 1.2)
+                || inRange(offsets[i], TimeUnit.MICROSECONDS.toMicros(42), 1.2)
+                || inRange(offsets[i], TimeUnit.MICROSECONDS.toMicros(100), 1.2)
+                || inRange(offsets[i], TimeUnit.MILLISECONDS.toMicros(42), 1.2)
+                || inRange(offsets[i], TimeUnit.MILLISECONDS.toMicros(100), 1.2)
+                || inRange(offsets[i], TimeUnit.SECONDS.toMicros(100), 1.2)
+                || inRange(offsets[i], TimeUnit.MINUTES.toMicros(100), 1.2)
+                || inRange(offsets[i], TimeUnit.SECONDS.toMicros(maxSeconds), 1.2)
+                );
+            }
+        }
+        assertEquals("42 and 100 nanos should both be put in the first bucket",
+                            2, counts[0]);
+        assertEquals(expectedBucketsWithValues, bucketsWithValues);
+    }
+
+    private boolean inRange(long anchor, long input, double range)
+    {
+        return input / ((double) anchor) < range;
+    }
 }
diff --git a/test/unit/org/apache/cassandra/metrics/ClientRequestMetricsTest.java b/test/unit/org/apache/cassandra/metrics/ClientRequestMetricsTest.java
new file mode 100644
index 0000000..650bd95
--- /dev/null
+++ b/test/unit/org/apache/cassandra/metrics/ClientRequestMetricsTest.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import java.io.IOException;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.BatchStatement;
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.Session;
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.service.EmbeddedCassandraService;
+
+import static com.datastax.driver.core.Cluster.builder;
+import static org.junit.Assert.assertEquals;
+
+public class ClientRequestMetricsTest
+{
+    private static Cluster cluster;
+    private static Session session;
+
+    private static final String KEYSPACE = "junit";
+    private static final String TABLE = "clientrequestsmetricstest";
+
+    private static PreparedStatement writePS;
+    private static PreparedStatement paxosPS;
+    private static PreparedStatement readPS;
+    private static PreparedStatement readRangePS;
+
+    private static final ClientRequestMetrics readMetrics = ClientRequestsMetricsHolder.readMetrics;
+    private static final ClientWriteRequestMetrics writeMetrics = ClientRequestsMetricsHolder.writeMetrics;
+
+    private static EmbeddedCassandraService cassandra;
+
+    @BeforeClass
+    public static void setup() throws ConfigurationException, IOException
+    {
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
+
+        cluster = builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
+        session = cluster.connect();
+
+        session.execute("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };");
+        session.execute("USE " + KEYSPACE);
+        session.execute("CREATE TABLE IF NOT EXISTS " + TABLE + " (id int, ord int, val text, PRIMARY KEY (id, ord));");
+
+        writePS = session.prepare("INSERT INTO " + KEYSPACE + '.' + TABLE + " (id, ord, val) VALUES (?, ?, ?);");
+        paxosPS = session.prepare("INSERT INTO " + KEYSPACE + '.' + TABLE + " (id, ord, val) VALUES (?, ?, ?) IF NOT EXISTS;");
+        readPS = session.prepare("SELECT * FROM " + KEYSPACE + '.' + TABLE + " WHERE id=?;");
+        readRangePS = session.prepare("SELECT * FROM " + KEYSPACE + '.' + TABLE + " WHERE id=? AND ord>=? AND ord <= ?;");
+    }
+
+    @AfterClass
+    public static void tearDown()
+    {
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
+    }
+
+    @Test
+    public void testWriteStatement()
+    {
+        ClientRequestMetricsContainer writeMetricsContainer = new ClientRequestMetricsContainer(writeMetrics);
+        ClientRequestMetricsContainer readMetricsContainer = new ClientRequestMetricsContainer(readMetrics);
+
+        executeWrite(1, 1, "aaaa");
+
+        assertEquals(1, writeMetricsContainer.compareLocalRequest());
+        assertEquals(0, writeMetricsContainer.compareRemoteRequest());
+
+        assertEquals(0, readMetricsContainer.compareLocalRequest());
+        assertEquals(0, readMetricsContainer.compareRemoteRequest());
+    }
+
+    @Test
+    public void testPaxosStatement()
+    {
+        ClientRequestMetricsContainer writeMetricsContainer = new ClientRequestMetricsContainer(writeMetrics);
+        ClientRequestMetricsContainer readMetricsContainer = new ClientRequestMetricsContainer(readMetrics);
+
+        executePAXOS(2, 2, "aaaa");
+
+        assertEquals(1, readMetricsContainer.compareLocalRequest());
+        assertEquals(0, readMetricsContainer.compareRemoteRequest());
+
+        assertEquals(1, writeMetricsContainer.compareLocalRequest());
+        assertEquals(0, writeMetricsContainer.compareRemoteRequest());
+    }
+
+    @Test
+    public void testBatchStatement()
+    {
+        ClientRequestMetricsContainer writeMetricsContainer = new ClientRequestMetricsContainer(writeMetrics);
+        ClientRequestMetricsContainer readMetricsContainer = new ClientRequestMetricsContainer(readMetrics);
+
+        executeBatch(10, 10);
+
+        assertEquals(0, readMetricsContainer.compareLocalRequest());
+        assertEquals(0, readMetricsContainer.compareRemoteRequest());
+
+        assertEquals(10, writeMetricsContainer.compareLocalRequest());
+        assertEquals(0, writeMetricsContainer.compareRemoteRequest());
+    }
+
+    @Test
+    public void testReadStatement()
+    {
+        executeWrite(1, 1, "aaaa");
+
+        ClientRequestMetricsContainer writeMetricsContainer = new ClientRequestMetricsContainer(writeMetrics);
+        ClientRequestMetricsContainer readMetricsContainer = new ClientRequestMetricsContainer(readMetrics);
+
+        executeRead(1);
+
+        assertEquals(1, readMetricsContainer.compareLocalRequest());
+        assertEquals(0, readMetricsContainer.compareRemoteRequest());
+
+        assertEquals(0, writeMetricsContainer.compareLocalRequest());
+        assertEquals(0, writeMetricsContainer.compareRemoteRequest());
+    }
+
+    @Test
+    public void testRangeStatement()
+    {
+        executeBatch(1, 100);
+
+        ClientRequestMetricsContainer writeMetricsContainer = new ClientRequestMetricsContainer(writeMetrics);
+        ClientRequestMetricsContainer readMetricsContainer = new ClientRequestMetricsContainer(readMetrics);
+
+        executeSlice(1, 0, 99);
+
+        assertEquals(1, readMetricsContainer.compareLocalRequest());
+        assertEquals(0, readMetricsContainer.compareRemoteRequest());
+
+        assertEquals(0, writeMetricsContainer.compareLocalRequest());
+        assertEquals(0, writeMetricsContainer.compareRemoteRequest());
+    }
+
+
+    private static class ClientRequestMetricsContainer
+    {
+        private final ClientRequestMetrics metrics;
+
+        private final long localRequests;
+        private final long remoteRequests;
+
+        public ClientRequestMetricsContainer(ClientRequestMetrics clientRequestMetrics)
+        {
+            metrics = clientRequestMetrics;
+            localRequests = metrics.localRequests.getCount();
+            remoteRequests = metrics.remoteRequests.getCount();
+        }
+
+        public long compareLocalRequest()
+        {
+            return metrics.localRequests.getCount() - localRequests;
+        }
+
+        public long compareRemoteRequest()
+        {
+            return metrics.remoteRequests.getCount() - remoteRequests;
+        }
+    }
+
+    private void executeWrite(int id, int ord, String val)
+    {
+        BoundStatement bs = writePS.bind(id, ord, val);
+        session.execute(bs);
+    }
+
+    private void executePAXOS(int id, int ord, String val)
+    {
+        BoundStatement bs = paxosPS.bind(id, ord, val);
+        session.execute(bs);
+    }
+
+    private void executeBatch(int distinctPartitions, int numClusteringKeys)
+    {
+        BatchStatement batch = new BatchStatement();
+
+        for (int i = 0; i < distinctPartitions; i++)
+        {
+            for (int y = 0; y < numClusteringKeys; y++)
+            {
+                batch.add(writePS.bind(i, y, "aaaaaaaa"));
+            }
+        }
+        session.execute(batch);
+    }
+
+    private void executeRead(int id)
+    {
+        BoundStatement bs = readPS.bind(id);
+        session.execute(bs);
+    }
+
+    private void executeSlice(int id, int start_range, int end_range)
+    {
+        BoundStatement bs = readRangePS.bind(id, start_range, end_range);
+        session.execute(bs);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/metrics/ClientRequestSizeMetricsTest.java b/test/unit/org/apache/cassandra/metrics/ClientRequestSizeMetricsTest.java
index 1f2f771..f19fca5 100644
--- a/test/unit/org/apache/cassandra/metrics/ClientRequestSizeMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/ClientRequestSizeMetricsTest.java
@@ -69,7 +69,9 @@
         // We explicitly disable scheme fetching to avoid that effect
         try
         {
-            reinitializeNetwork(builder -> builder.withQueryOptions(new QueryOptions().setMetadataEnabled(false)));
+            reinitializeNetwork(builder -> {}, builder -> builder.withQueryOptions(new QueryOptions().setMetadataEnabled(false)));
+            sessionNet(version); // Ensure that the connection is open
+
             // We want to ignore all the messages sent by the driver upon connection as well as
             // the event sent upon schema updates
             clearMetrics();
diff --git a/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java b/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java
index b62078c..64fb00d 100644
--- a/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java
+++ b/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java
@@ -19,593 +19,644 @@
 package org.apache.cassandra.metrics;
 
 import java.util.Arrays;
-import java.util.BitSet;
+import java.util.Collection;
 import java.util.Random;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.LockSupport;
+import java.util.function.Function;
 
-import org.assertj.core.api.Assertions;
+import com.google.common.collect.ImmutableList;
 import org.junit.Assert;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.experimental.runners.Enclosed;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
-import com.codahale.metrics.Clock;
 import com.codahale.metrics.Snapshot;
 import org.apache.cassandra.utils.EstimatedHistogram;
+import org.apache.cassandra.utils.MonotonicClock;
+import org.apache.cassandra.utils.MonotonicClockTranslation;
 import org.apache.cassandra.utils.Pair;
 import org.quicktheories.core.Gen;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.quicktheories.QuickTheory.qt;
 import static org.quicktheories.generators.SourceDSL.*;
 
+@RunWith(Enclosed.class)
 public class DecayingEstimatedHistogramReservoirTest
 {
-    private static final double DOUBLE_ASSERT_DELTA = 0;
-
-    public static final int numExamples = 1000000;
-    public static final Gen<long[]> offsets = integers().from(DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT)
-                                                        .upToAndIncluding(DecayingEstimatedHistogramReservoir.MAX_BUCKET_COUNT - 10)
-                                                        .zip(booleans().all(), EstimatedHistogram::newOffsets);
-
-
-    @Test
-    public void testFindIndex()
+    public static class NonParameterizedTests
     {
-        qt().withExamples(numExamples)
-            .forAll(booleans().all()
-                              .flatMap(b -> offsets.flatMap(offs -> this.offsetsAndValue(offs, b, 0))))
-            .check(this::checkFindIndex);
-    }
+        public static final int numExamples = 1000000;
 
-    private boolean checkFindIndex(Pair<long[], Long> offsetsAndValue)
-    {
-        long[] offsets = offsetsAndValue.left;
-        long value = offsetsAndValue.right;
-
-        int model = findIndexModel(offsets, value);
-        int actual = DecayingEstimatedHistogramReservoir.findIndex(offsets, value);
-
-        return model == actual;
-    }
-
-    private int findIndexModel(long[] offsets, long value)
-    {
-        int modelIndex = Arrays.binarySearch(offsets, value);
-        if (modelIndex < 0)
-            modelIndex = -modelIndex - 1;
-
-        return modelIndex;
-    };
-
-    @Test
-    public void showEstimationWorks()
-    {
-        qt().withExamples(numExamples)
-            .forAll(offsets.flatMap(offs -> this.offsetsAndValue(offs, false, 9)))
-            .check(this::checkEstimation);
-    }
-
-    public boolean checkEstimation(Pair<long[], Long> offsetsAndValue)
-    {
-        long[] offsets = offsetsAndValue.left;
-        long value = offsetsAndValue.right;
-        boolean considerZeros = offsets[0] == 0;
-
-        int modelIndex = Arrays.binarySearch(offsets, value);
-        if (modelIndex < 0)
-            modelIndex = -modelIndex - 1;
-
-        int estimate = (int) DecayingEstimatedHistogramReservoir.fastLog12(value);
-
-        if (considerZeros)
-            return estimate - 3 == modelIndex || estimate - 2 == modelIndex;
-        else
-            return estimate - 4 == modelIndex || estimate - 3 == modelIndex;
-    }
-
-
-    private Gen<Pair<long[], Long>> offsetsAndValue(long[] offsets, boolean useMaxLong, long minValue)
-    {
-        return longs().between(minValue, useMaxLong ? Long.MAX_VALUE : offsets[offsets.length - 1] + 100)
-                      .mix(longs().between(minValue, minValue + 10),50)
-                      .map(value -> Pair.create(offsets, value));
-    }
-
-    //shows that the max before overflow is 238 buckets regardless of consider zeros
-    @Test
-    @Ignore
-    public void showHistorgramOffsetOverflow()
-    {
-        qt().forAll(integers().from(DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT).upToAndIncluding(1000))
-            .check(count -> {
-                long[] offsets = EstimatedHistogram.newOffsets(count, false);
-                for (long offset : offsets)
-                    if (offset < 0)
-                        return false;
-
-                return true;
-            });
-    }
-
-    @Test
-    public void testStriping() throws InterruptedException
-    {
-        TestClock clock = new TestClock();
-        int nStripes = 4;
-        DecayingEstimatedHistogramReservoir model = new DecayingEstimatedHistogramReservoir(clock);
-        DecayingEstimatedHistogramReservoir test = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION,
-                                                                                           DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT,
-                                                                                           nStripes,
-                                                                                           clock);
-
-        long seed = System.nanoTime();
-        System.out.println("DecayingEstimatedHistogramReservoirTest#testStriping.seed = " + seed);
-        Random valGen = new Random(seed);
-        ExecutorService executors = Executors.newFixedThreadPool(nStripes * 2);
-        for (int i = 0; i < 1_000_000; i++)
+        public static final Gen<long[]> offsets = integers().from(DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT)
+                                                            .upToAndIncluding(DecayingEstimatedHistogramReservoir.MAX_BUCKET_COUNT - 10)
+                                                            .zip(booleans().all(), EstimatedHistogram::newOffsets);
+        
+        @Test
+        public void testFindIndex()
         {
-            long value = Math.abs(valGen.nextInt());
-            executors.submit(() -> {
-                model.update(value);
-                LockSupport.parkNanos(2);
-                test.update(value);
-            });
+            qt().withExamples(numExamples)
+                .forAll(booleans().all()
+                                  .flatMap(b -> offsets.flatMap(offs -> this.offsetsAndValue(offs, b, 0))))
+                .check(this::checkFindIndex);
         }
 
-        executors.shutdown();
-        Assert.assertTrue(executors.awaitTermination(1, TimeUnit.MINUTES));
-
-        Snapshot modelSnapshot = model.getSnapshot();
-        Snapshot testSnapshot = test.getSnapshot();
-
-        assertEquals(modelSnapshot.getMean(), testSnapshot.getMean(), DOUBLE_ASSERT_DELTA);
-        assertEquals(modelSnapshot.getMin(), testSnapshot.getMin(), DOUBLE_ASSERT_DELTA);
-        assertEquals(modelSnapshot.getMax(), testSnapshot.getMax(), DOUBLE_ASSERT_DELTA);
-        assertEquals(modelSnapshot.getMedian(), testSnapshot.getMedian(), DOUBLE_ASSERT_DELTA);
-        for (double i = 0.0; i < 1.0; i += 0.1)
-            assertEquals(modelSnapshot.getValue(i), testSnapshot.getValue(i), DOUBLE_ASSERT_DELTA);
-
-
-        int stripedValues = 0;
-        for (int i = model.size(); i < model.size() * model.stripeCount(); i++)
+        @Test
+        public void showEstimationWorks()
         {
-            stripedValues += model.stripedBucketValue(i, true);
+            qt().withExamples(numExamples)
+                .forAll(offsets.flatMap(offs -> this.offsetsAndValue(offs, false, 9)))
+                .check(this::checkEstimation);
         }
-        assertTrue("no striping found", stripedValues > 0);
+
+        //shows that the max before overflow is 238 buckets regardless of consider zeros
+        @Test
+        @Ignore
+        public void showHistogramOffsetOverflow()
+        {
+            qt().forAll(integers().from(DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT).upToAndIncluding(1000))
+                .check(count -> {
+                    long[] offsets = EstimatedHistogram.newOffsets(count, false);
+                    for (long offset : offsets)
+                        if (offset < 0)
+                            return false;
+
+                    return true;
+                });
+        }
+
+        private boolean checkFindIndex(Pair<long[], Long> offsetsAndValue)
+        {
+            long[] offsets = offsetsAndValue.left;
+            long value = offsetsAndValue.right;
+
+            int model = findIndexModel(offsets, value);
+            int actual = DecayingEstimatedHistogramReservoir.findIndex(offsets, value);
+
+            return model == actual;
+        }
+
+        private int findIndexModel(long[] offsets, long value)
+        {
+            int modelIndex = Arrays.binarySearch(offsets, value);
+            if (modelIndex < 0)
+                modelIndex = -modelIndex - 1;
+
+            return modelIndex;
+        }
+
+        private Gen<Pair<long[], Long>> offsetsAndValue(long[] offsets, boolean useMaxLong, long minValue)
+        {
+            return longs().between(minValue, useMaxLong ? Long.MAX_VALUE : offsets[offsets.length - 1] + 100)
+                          .mix(longs().between(minValue, minValue + 10),50)
+                          .map(value -> Pair.create(offsets, value));
+        }
+
+        public boolean checkEstimation(Pair<long[], Long> offsetsAndValue)
+        {
+            long[] offsets = offsetsAndValue.left;
+            long value = offsetsAndValue.right;
+            boolean considerZeros = offsets[0] == 0;
+
+            int modelIndex = Arrays.binarySearch(offsets, value);
+            if (modelIndex < 0)
+                modelIndex = -modelIndex - 1;
+
+            int estimate = (int) DecayingEstimatedHistogramReservoir.fastLog12(value);
+
+            if (considerZeros)
+                return estimate - 3 == modelIndex || estimate - 2 == modelIndex;
+            else
+                return estimate - 4 == modelIndex || estimate - 3 == modelIndex;
+        }
     }
 
-    @Test
-    public void testSimple()
+    @RunWith(Parameterized.class)
+    public static class ParameterizedTests
     {
+        private static final double DOUBLE_ASSERT_DELTA = 0;
+
+        @Parameterized.Parameter
+        public String description;
+        
+        @Parameterized.Parameter(1)
+        public Function<DecayingEstimatedHistogramReservoir, Snapshot> toSnapshot;
+
+        @Parameterized.Parameters(name="{0}")
+        public static Collection<Object[]> suppliers()
         {
-            // 0 and 1 map to the same, first bucket
+            Function<DecayingEstimatedHistogramReservoir, Snapshot> snapshot = DecayingEstimatedHistogramReservoir::getSnapshot;
+            Function<DecayingEstimatedHistogramReservoir, Snapshot> decayingOnly = DecayingEstimatedHistogramReservoir::getPercentileSnapshot;
+            return ImmutableList.of(new Object[] { "normal", snapshot }, new Object[] { "decaying buckets", decayingOnly });
+        }
+
+        @Test
+        public void testStriping() throws InterruptedException
+        {
+            TestClock clock = new TestClock();
+            int nStripes = 4;
+            DecayingEstimatedHistogramReservoir model = new DecayingEstimatedHistogramReservoir(clock);
+            DecayingEstimatedHistogramReservoir test = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION,
+                                                                                               DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT,
+                                                                                               nStripes,
+                                                                                               clock);
+
+            long seed = nanoTime();
+            System.out.println("DecayingEstimatedHistogramReservoirTest#testStriping.seed = " + seed);
+            Random valGen = new Random(seed);
+            ExecutorService executors = Executors.newFixedThreadPool(nStripes * 2);
+            for (int i = 0; i < 1_000_000; i++)
+            {
+                long value = Math.abs(valGen.nextInt());
+                executors.submit(() -> {
+                    model.update(value);
+                    LockSupport.parkNanos(2);
+                    test.update(value);
+                });
+            }
+
+            executors.shutdown();
+            Assert.assertTrue(executors.awaitTermination(1, TimeUnit.MINUTES));
+
+            Snapshot modelSnapshot = toSnapshot.apply(model);
+            Snapshot testSnapshot = toSnapshot.apply(test);
+
+            assertEquals(modelSnapshot.getMean(), testSnapshot.getMean(), DOUBLE_ASSERT_DELTA);
+            assertEquals(modelSnapshot.getMin(), testSnapshot.getMin(), DOUBLE_ASSERT_DELTA);
+            assertEquals(modelSnapshot.getMax(), testSnapshot.getMax(), DOUBLE_ASSERT_DELTA);
+            assertEquals(modelSnapshot.getMedian(), testSnapshot.getMedian(), DOUBLE_ASSERT_DELTA);
+            for (double i = 0.0; i < 1.0; i += 0.1)
+                assertEquals(modelSnapshot.getValue(i), testSnapshot.getValue(i), DOUBLE_ASSERT_DELTA);
+
+
+            int stripedValues = 0;
+            for (int i = model.size(); i < model.size() * model.stripeCount(); i++)
+            {
+                stripedValues += model.stripedBucketValue(i, true);
+            }
+            assertTrue("no striping found", stripedValues > 0);
+        }
+
+        @Test
+        public void testSimple()
+        {
+            {
+                // 0 and 1 map to the same, first bucket
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir();
+                histogram.update(0);
+                assertEquals(1, histogram.getSnapshot().getValues()[0]);
+                histogram.update(1);
+                assertEquals(2, histogram.getSnapshot().getValues()[0]);
+            }
+            {
+                // 0 and 1 map to different buckets
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(true);
+                histogram.update(0);
+                assertEquals(1, histogram.getSnapshot().getValues()[0]);
+                histogram.update(1);
+                Snapshot snapshot = histogram.getSnapshot();
+                assertEquals(1, snapshot.getValues()[0]);
+                assertEquals(1, snapshot.getValues()[1]);
+            }
+        }
+
+        @Test
+        public void testOverflow()
+        {
+            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION, 1, 1);
+            histogram.update(100);
+            assert histogram.isOverflowed();
+            assertEquals(Long.MAX_VALUE, toSnapshot.apply(histogram).getMax());
+        }
+
+        @Test
+        public void testMinMax()
+        {
             DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir();
-            histogram.update(0);
-            assertEquals(1, histogram.getSnapshot().getValues()[0]);
-            histogram.update(1);
-            assertEquals(2, histogram.getSnapshot().getValues()[0]);
-        }
-        {
-            // 0 and 1 map to different buckets
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(true);
-            histogram.update(0);
-            assertEquals(1, histogram.getSnapshot().getValues()[0]);
-            histogram.update(1);
-            Snapshot snapshot = histogram.getSnapshot();
-            assertEquals(1, snapshot.getValues()[0]);
-            assertEquals(1, snapshot.getValues()[1]);
-        }
-    }
-
-    @Test
-    public void testOverflow()
-    {
-        DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION, 1, 1);
-        histogram.update(100);
-        assert histogram.isOverflowed();
-        assertEquals(Long.MAX_VALUE, histogram.getSnapshot().getMax());
-    }
-
-    @Test
-    public void testMinMax()
-    {
-        DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir();
-        histogram.update(16);
-        Snapshot snapshot = histogram.getSnapshot();
-        assertEquals(15, snapshot.getMin());
-        assertEquals(17, snapshot.getMax());
-    }
-
-    @Test
-    public void testMean()
-    {
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-            for (int i = 0; i < 40; i++)
-                histogram.update(0);
-            for (int i = 0; i < 20; i++)
-                histogram.update(1);
-            for (int i = 0; i < 10; i++)
-                histogram.update(2);
-            assertEquals(1.14D, histogram.getSnapshot().getMean(), 0.1D);
-        }
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(true,
-                                                                                                    DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT,
-                                                                                                    DecayingEstimatedHistogramReservoir.DEFAULT_STRIPE_COUNT,
-                                                                                                    clock);
-            for (int i = 0; i < 40; i++)
-                histogram.update(0);
-            for (int i = 0; i < 20; i++)
-                histogram.update(1);
-            for (int i = 0; i < 10; i++)
-                histogram.update(2);
-            assertEquals(0.57D, histogram.getSnapshot().getMean(), 0.1D);
-        }
-    }
-
-    @Test
-    public void testStdDev()
-    {
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-            for (int i = 0; i < 20; i++)
-                histogram.update(10);
-            for (int i = 0; i < 40; i++)
-                histogram.update(20);
-            for (int i = 0; i < 20; i++)
-                histogram.update(30);
-
-            Snapshot snapshot = histogram.getSnapshot();
-            assertEquals(20.0D, snapshot.getMean(), 2.0D);
-            assertEquals(7.07D, snapshot.getStdDev(), 2.0D);
-        }
-    }
-
-    @Test
-    public void testFindingCorrectBuckets()
-    {
-        TestClock clock = new TestClock();
-
-        DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION, 90, 1, clock);
-        histogram.update(23282687);
-        assertFalse(histogram.isOverflowed());
-        assertEquals(1, histogram.getSnapshot().getValues()[89]);
-
-        histogram.update(9);
-        assertEquals(1, histogram.getSnapshot().getValues()[8]);
-
-        histogram.update(21);
-        histogram.update(22);
-        Snapshot snapshot = histogram.getSnapshot();
-        assertEquals(2, snapshot.getValues()[13]);
-        assertEquals(6277304.5D, snapshot.getMean(), DOUBLE_ASSERT_DELTA);
-    }
-
-    @Test
-    public void testPercentile()
-    {
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-            // percentile of empty histogram is 0
-            assertEquals(0D, histogram.getSnapshot().getValue(0.99), DOUBLE_ASSERT_DELTA);
-
-            histogram.update(1);
-            // percentile of a histogram with one element should be that element
-            assertEquals(1D, histogram.getSnapshot().getValue(0.99), DOUBLE_ASSERT_DELTA);
-
-            histogram.update(10);
-            assertEquals(10D, histogram.getSnapshot().getValue(0.99), DOUBLE_ASSERT_DELTA);
+            histogram.update(16);
+            Snapshot snapshot = toSnapshot.apply(histogram);
+            assertEquals(15, snapshot.getMin());
+            assertEquals(17, snapshot.getMax());
         }
 
+        @Test
+        public void testMean()
         {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-
-            histogram.update(1);
-            histogram.update(2);
-            histogram.update(3);
-            histogram.update(4);
-            histogram.update(5);
-
-            Snapshot snapshot = histogram.getSnapshot();
-            assertEquals(0, snapshot.getValue(0.00), DOUBLE_ASSERT_DELTA);
-            assertEquals(3, snapshot.getValue(0.50), DOUBLE_ASSERT_DELTA);
-            assertEquals(3, snapshot.getValue(0.60), DOUBLE_ASSERT_DELTA);
-            assertEquals(5, snapshot.getValue(1.00), DOUBLE_ASSERT_DELTA);
-        }
-
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-
-            for (int i = 11; i <= 20; i++)
-                histogram.update(i);
-
-            // Right now the histogram looks like:
-            //    10   12   14   17   20
-            //     0    2    2    3    3
-            // %:  0   20   40   70  100
-            Snapshot snapshot = histogram.getSnapshot();
-            assertEquals(12, snapshot.getValue(0.01), DOUBLE_ASSERT_DELTA);
-            assertEquals(14, snapshot.getValue(0.30), DOUBLE_ASSERT_DELTA);
-            assertEquals(17, snapshot.getValue(0.50), DOUBLE_ASSERT_DELTA);
-            assertEquals(17, snapshot.getValue(0.60), DOUBLE_ASSERT_DELTA);
-            assertEquals(20, snapshot.getValue(0.80), DOUBLE_ASSERT_DELTA);
-        }
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(true,
-                                                                                                    DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT,
-                                                                                                    DecayingEstimatedHistogramReservoir.DEFAULT_STRIPE_COUNT,
-                                                                                                    clock);
-            histogram.update(0);
-            histogram.update(0);
-            histogram.update(1);
-
-            Snapshot snapshot = histogram.getSnapshot();
-            assertEquals(0, snapshot.getValue(0.5), DOUBLE_ASSERT_DELTA);
-            assertEquals(1, snapshot.getValue(0.99), DOUBLE_ASSERT_DELTA);
-        }
-    }
-
-
-    @Test
-    public void testDecayingPercentile()
-    {
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-            // percentile of empty histogram is 0
-            assertEquals(0, histogram.getSnapshot().getValue(1.0), DOUBLE_ASSERT_DELTA);
-
-            for (int v = 1; v <= 100; v++)
             {
-                for (int i = 0; i < 10_000; i++)
-                {
-                    histogram.update(v);
-                }
-            }
+                TestClock clock = new TestClock();
 
-            Snapshot snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(05, snapshot.getValue(0.05));
-            assertEstimatedQuantile(20, snapshot.getValue(0.20));
-            assertEstimatedQuantile(40, snapshot.getValue(0.40));
-            assertEstimatedQuantile(99, snapshot.getValue(0.99));
-
-            clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(05, snapshot.getValue(0.05));
-            assertEstimatedQuantile(20, snapshot.getValue(0.20));
-            assertEstimatedQuantile(40, snapshot.getValue(0.40));
-            assertEstimatedQuantile(99, snapshot.getValue(0.99));
-
-            for (int v = 1; v <= 50; v++)
-            {
-                for (int i = 0; i < 10_000; i++)
-                {
-                    histogram.update(v);
-                }
-            }
-
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(04, snapshot.getValue(0.05));
-            assertEstimatedQuantile(14, snapshot.getValue(0.20));
-            assertEstimatedQuantile(27, snapshot.getValue(0.40));
-            assertEstimatedQuantile(98, snapshot.getValue(0.99));
-
-            clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(04, snapshot.getValue(0.05));
-            assertEstimatedQuantile(14, snapshot.getValue(0.20));
-            assertEstimatedQuantile(27, snapshot.getValue(0.40));
-            assertEstimatedQuantile(98, snapshot.getValue(0.99));
-
-            for (int v = 1; v <= 50; v++)
-            {
-                for (int i = 0; i < 10_000; i++)
-                {
-                    histogram.update(v);
-                }
-            }
-
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(03, snapshot.getValue(0.05));
-            assertEstimatedQuantile(12, snapshot.getValue(0.20));
-            assertEstimatedQuantile(23, snapshot.getValue(0.40));
-            assertEstimatedQuantile(96, snapshot.getValue(0.99));
-
-            clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(03, snapshot.getValue(0.05));
-            assertEstimatedQuantile(12, snapshot.getValue(0.20));
-            assertEstimatedQuantile(23, snapshot.getValue(0.40));
-            assertEstimatedQuantile(96, snapshot.getValue(0.99));
-
-            for (int v = 11; v <= 20; v++)
-            {
-                for (int i = 0; i < 5_000; i++)
-                {
-                    histogram.update(v);
-                }
-            }
-
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(04, snapshot.getValue(0.05));
-            assertEstimatedQuantile(12, snapshot.getValue(0.20));
-            assertEstimatedQuantile(20, snapshot.getValue(0.40));
-            assertEstimatedQuantile(95, snapshot.getValue(0.99));
-
-            clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
-            snapshot = histogram.getSnapshot();
-            assertEstimatedQuantile(04, snapshot.getValue(0.05));
-            assertEstimatedQuantile(12, snapshot.getValue(0.20));
-            assertEstimatedQuantile(20, snapshot.getValue(0.40));
-            assertEstimatedQuantile(95, snapshot.getValue(0.99));
-
-        }
-
-        {
-            TestClock clock = new TestClock();
-
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-            // percentile of empty histogram is 0
-            assertEquals(0, histogram.getSnapshot().getValue(0.99), DOUBLE_ASSERT_DELTA);
-
-            for (int m = 0; m < 40; m++)
-            {
-                for (int i = 0; i < 1_000_000; i++)
-                {
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+                for (int i = 0; i < 40; i++)
+                    histogram.update(0);
+                for (int i = 0; i < 20; i++)
+                    histogram.update(1);
+                for (int i = 0; i < 10; i++)
                     histogram.update(2);
-                }
-                // percentile of a histogram with one element should be that element
-                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
-                assertEquals(2, histogram.getSnapshot().getValue(0.99), DOUBLE_ASSERT_DELTA);
+                assertEquals(1.14D, toSnapshot.apply(histogram).getMean(), 0.1D);
             }
+            {
+                TestClock clock = new TestClock();
 
-            clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S * 100);
-            assertEquals(0, histogram.getSnapshot().getValue(0.99), DOUBLE_ASSERT_DELTA);
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(true,
+                                                                                                        DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT,
+                                                                                                        DecayingEstimatedHistogramReservoir.DEFAULT_STRIPE_COUNT,
+                                                                                                        clock);
+                for (int i = 0; i < 40; i++)
+                    histogram.update(0);
+                for (int i = 0; i < 20; i++)
+                    histogram.update(1);
+                for (int i = 0; i < 10; i++)
+                    histogram.update(2);
+                assertEquals(0.57D, toSnapshot.apply(histogram).getMean(), 0.1D);
+            }
         }
 
+        @Test
+        public void testStdDev()
+        {
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+                for (int i = 0; i < 20; i++)
+                    histogram.update(10);
+                for (int i = 0; i < 40; i++)
+                    histogram.update(20);
+                for (int i = 0; i < 20; i++)
+                    histogram.update(30);
+
+                Snapshot snapshot = toSnapshot.apply(histogram);
+                assertEquals(20.0D, snapshot.getMean(), 2.0D);
+                assertEquals(7.07D, snapshot.getStdDev(), 2.0D);
+            }
+        }
+
+        @Test
+        public void testFindingCorrectBuckets()
         {
             TestClock clock = new TestClock();
 
-            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(DecayingEstimatedHistogramReservoir.DEFAULT_ZERO_CONSIDERATION, 90, 1, clock);
+            histogram.update(23282687);
+            assertFalse(histogram.isOverflowed());
+            assertEquals(1, histogram.getSnapshot().getValues()[89]);
 
-            histogram.update(20);
+            histogram.update(9);
+            assertEquals(1, histogram.getSnapshot().getValues()[8]);
+
             histogram.update(21);
             histogram.update(22);
             Snapshot snapshot = histogram.getSnapshot();
-            assertEquals(1, snapshot.getValues()[12]);
             assertEquals(2, snapshot.getValues()[13]);
-
-            clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
-
-            histogram.update(20);
-            histogram.update(21);
-            histogram.update(22);
-            snapshot = histogram.getSnapshot();
-            assertEquals(2, snapshot.getValues()[12]);
-            assertEquals(4, snapshot.getValues()[13]);
+            assertEquals(6277304.5D, snapshot.getMean(), DOUBLE_ASSERT_DELTA);
         }
-    }
 
-    @Test
-    public void testDecayingMean()
-    {
+        @Test
+        public void testPercentile()
+        {
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+                // percentile of empty histogram is 0
+                assertEquals(0D, toSnapshot.apply(histogram).getValue(0.99), DOUBLE_ASSERT_DELTA);
+
+                histogram.update(1);
+                // percentile of a histogram with one element should be that element
+                assertEquals(1D, toSnapshot.apply(histogram).getValue(0.99), DOUBLE_ASSERT_DELTA);
+
+                histogram.update(10);
+                assertEquals(10D, toSnapshot.apply(histogram).getValue(0.99), DOUBLE_ASSERT_DELTA);
+            }
+
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+
+                histogram.update(1);
+                histogram.update(2);
+                histogram.update(3);
+                histogram.update(4);
+                histogram.update(5);
+
+                Snapshot snapshot = toSnapshot.apply(histogram);
+                assertEquals(0, snapshot.getValue(0.00), DOUBLE_ASSERT_DELTA);
+                assertEquals(3, snapshot.getValue(0.50), DOUBLE_ASSERT_DELTA);
+                assertEquals(3, snapshot.getValue(0.60), DOUBLE_ASSERT_DELTA);
+                assertEquals(5, snapshot.getValue(1.00), DOUBLE_ASSERT_DELTA);
+            }
+
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+
+                for (int i = 11; i <= 20; i++)
+                    histogram.update(i);
+
+                // Right now the histogram looks like:
+                //    10   12   14   17   20
+                //     0    2    2    3    3
+                // %:  0   20   40   70  100
+                Snapshot snapshot = toSnapshot.apply(histogram);
+                assertEquals(12, snapshot.getValue(0.01), DOUBLE_ASSERT_DELTA);
+                assertEquals(14, snapshot.getValue(0.30), DOUBLE_ASSERT_DELTA);
+                assertEquals(17, snapshot.getValue(0.50), DOUBLE_ASSERT_DELTA);
+                assertEquals(17, snapshot.getValue(0.60), DOUBLE_ASSERT_DELTA);
+                assertEquals(20, snapshot.getValue(0.80), DOUBLE_ASSERT_DELTA);
+            }
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(true,
+                                                                                                        DecayingEstimatedHistogramReservoir.DEFAULT_BUCKET_COUNT,
+                                                                                                        DecayingEstimatedHistogramReservoir.DEFAULT_STRIPE_COUNT,
+                                                                                                        clock);
+                histogram.update(0);
+                histogram.update(0);
+                histogram.update(1);
+
+                Snapshot snapshot = toSnapshot.apply(histogram);
+                assertEquals(0, snapshot.getValue(0.5), DOUBLE_ASSERT_DELTA);
+                assertEquals(1, snapshot.getValue(0.99), DOUBLE_ASSERT_DELTA);
+            }
+        }
+
+        @Test
+        public void testDecayingPercentile()
+        {
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+                // percentile of empty histogram is 0
+                assertEquals(0, toSnapshot.apply(histogram).getValue(1.0), DOUBLE_ASSERT_DELTA);
+
+                for (int v = 1; v <= 100; v++)
+                {
+                    for (int i = 0; i < 10_000; i++)
+                    {
+                        histogram.update(v);
+                    }
+                }
+
+                Snapshot snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(5, snapshot.getValue(0.05));
+                assertEstimatedQuantile(20, snapshot.getValue(0.20));
+                assertEstimatedQuantile(40, snapshot.getValue(0.40));
+                assertEstimatedQuantile(99, snapshot.getValue(0.99));
+
+                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(5, snapshot.getValue(0.05));
+                assertEstimatedQuantile(20, snapshot.getValue(0.20));
+                assertEstimatedQuantile(40, snapshot.getValue(0.40));
+                assertEstimatedQuantile(99, snapshot.getValue(0.99));
+
+                for (int v = 1; v <= 50; v++)
+                {
+                    for (int i = 0; i < 10_000; i++)
+                    {
+                        histogram.update(v);
+                    }
+                }
+
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(4, snapshot.getValue(0.05));
+                assertEstimatedQuantile(14, snapshot.getValue(0.20));
+                assertEstimatedQuantile(27, snapshot.getValue(0.40));
+                assertEstimatedQuantile(98, snapshot.getValue(0.99));
+
+                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(4, snapshot.getValue(0.05));
+                assertEstimatedQuantile(14, snapshot.getValue(0.20));
+                assertEstimatedQuantile(27, snapshot.getValue(0.40));
+                assertEstimatedQuantile(98, snapshot.getValue(0.99));
+
+                for (int v = 1; v <= 50; v++)
+                {
+                    for (int i = 0; i < 10_000; i++)
+                    {
+                        histogram.update(v);
+                    }
+                }
+
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(3, snapshot.getValue(0.05));
+                assertEstimatedQuantile(12, snapshot.getValue(0.20));
+                assertEstimatedQuantile(23, snapshot.getValue(0.40));
+                assertEstimatedQuantile(96, snapshot.getValue(0.99));
+
+                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(3, snapshot.getValue(0.05));
+                assertEstimatedQuantile(12, snapshot.getValue(0.20));
+                assertEstimatedQuantile(23, snapshot.getValue(0.40));
+                assertEstimatedQuantile(96, snapshot.getValue(0.99));
+
+                for (int v = 11; v <= 20; v++)
+                {
+                    for (int i = 0; i < 5_000; i++)
+                    {
+                        histogram.update(v);
+                    }
+                }
+
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(4, snapshot.getValue(0.05));
+                assertEstimatedQuantile(12, snapshot.getValue(0.20));
+                assertEstimatedQuantile(20, snapshot.getValue(0.40));
+                assertEstimatedQuantile(95, snapshot.getValue(0.99));
+
+                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
+                snapshot = toSnapshot.apply(histogram);
+                assertEstimatedQuantile(4, snapshot.getValue(0.05));
+                assertEstimatedQuantile(12, snapshot.getValue(0.20));
+                assertEstimatedQuantile(20, snapshot.getValue(0.40));
+                assertEstimatedQuantile(95, snapshot.getValue(0.99));
+
+            }
+
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+                // percentile of empty histogram is 0
+                assertEquals(0, toSnapshot.apply(histogram).getValue(0.99), DOUBLE_ASSERT_DELTA);
+
+                for (int m = 0; m < 40; m++)
+                {
+                    for (int i = 0; i < 1_000_000; i++)
+                    {
+                        histogram.update(2);
+                    }
+                    // percentile of a histogram with one element should be that element
+                    clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
+                    assertEquals(2, toSnapshot.apply(histogram).getValue(0.99), DOUBLE_ASSERT_DELTA);
+                }
+
+                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S * 100);
+                assertEquals(0, toSnapshot.apply(histogram).getValue(0.99), DOUBLE_ASSERT_DELTA);
+            }
+
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+
+                histogram.update(20);
+                histogram.update(21);
+                histogram.update(22);
+                Snapshot snapshot = histogram.getSnapshot();
+                assertEquals(1, snapshot.getValues()[12]);
+                assertEquals(2, snapshot.getValues()[13]);
+
+                clock.addSeconds(DecayingEstimatedHistogramReservoir.HALF_TIME_IN_S);
+
+                histogram.update(20);
+                histogram.update(21);
+                histogram.update(22);
+                snapshot = histogram.getSnapshot();
+                assertEquals(2, snapshot.getValues()[12]);
+                assertEquals(4, snapshot.getValues()[13]);
+            }
+        }
+
+        @Test
+        public void testDecayingMean()
+        {
+            {
+                TestClock clock = new TestClock();
+
+                DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+
+                clock.addNanos(DecayingEstimatedHistogramReservoir.LANDMARK_RESET_INTERVAL_IN_NS - TimeUnit.SECONDS.toNanos(1L));
+
+                while (clock.now() < DecayingEstimatedHistogramReservoir.LANDMARK_RESET_INTERVAL_IN_NS + TimeUnit.SECONDS.toNanos(1L))
+                {
+                    clock.addNanos(TimeUnit.MILLISECONDS.toNanos(900));
+                    for (int i = 0; i < 1_000_000; i++)
+                    {
+                        histogram.update(1000);
+                        histogram.update(2000);
+                        histogram.update(3000);
+                        histogram.update(4000);
+                        histogram.update(5000);
+                    }
+                    assertEquals(3000D, toSnapshot.apply(histogram).getMean(), 500D);
+                }
+            }
+        }
+
+        @Test
+        public void testAggregation()
         {
             TestClock clock = new TestClock();
 
             DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+            DecayingEstimatedHistogramReservoir another = new DecayingEstimatedHistogramReservoir(clock);
 
-            clock.addMillis(DecayingEstimatedHistogramReservoir.LANDMARK_RESET_INTERVAL_IN_MS - 1_000L);
+            clock.addNanos(DecayingEstimatedHistogramReservoir.LANDMARK_RESET_INTERVAL_IN_NS - TimeUnit.SECONDS.toNanos(1L));
 
-            while (clock.getTime() < DecayingEstimatedHistogramReservoir.LANDMARK_RESET_INTERVAL_IN_MS + 1_000L)
+            histogram.update(1000);
+            clock.addMillis(100);
+            another.update(2000);
+            clock.addMillis(100);
+            histogram.update(2000);
+            clock.addMillis(100);
+            another.update(3000);
+            clock.addMillis(100);
+            histogram.update(3000);
+            clock.addMillis(100);
+            another.update(4000);
+
+            DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot snapshot = (DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot) histogram.getSnapshot();
+            DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot anotherSnapshot = (DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot) another.getSnapshot();
+
+            assertEquals(2000, snapshot.getMean(), 500D);
+            assertEquals(3000, anotherSnapshot.getMean(), 500D);
+
+            snapshot.add(anotherSnapshot);
+
+            // Another had newer decayLandmark, the aggregated snapshot should use it
+            assertEquals(anotherSnapshot.getSnapshotLandmark(), snapshot.getSnapshotLandmark());
+            assertEquals(2500, snapshot.getMean(), 500D);
+        }
+
+        @Test
+        public void testSize()
+        {
+            TestClock clock = new TestClock();
+
+            DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
+            histogram.update(42);
+            histogram.update(42);
+            assertEquals(2, toSnapshot.apply(histogram).size());
+        }
+
+        private void assertEstimatedQuantile(long expectedValue, double actualValue)
+        {
+            assertTrue("Expected at least [" + expectedValue + "] but actual is [" + actualValue + ']', actualValue >= expectedValue);
+            assertTrue("Expected less than [" + Math.round(expectedValue * 1.2) + "] but actual is [" + actualValue + ']', actualValue < Math.round(expectedValue * 1.2));
+        }
+
+        public static class TestClock implements MonotonicClock
+        {
+            private long tick = 0;
+
+            public void addNanos(long nanos)
             {
-                clock.addMillis(900);
-                for (int i = 0; i < 1_000_000; i++)
-                {
-                    histogram.update(1000);
-                    histogram.update(2000);
-                    histogram.update(3000);
-                    histogram.update(4000);
-                    histogram.update(5000);
-                }
-                assertEquals(3000D, histogram.getSnapshot().getMean(), 500D);
+                tick += nanos;
+            }
+
+            public void addMillis(long millis)
+            {
+                tick += TimeUnit.MILLISECONDS.toNanos(millis);
+            }
+
+            public void addSeconds(long seconds)
+            {
+                tick += TimeUnit.SECONDS.toNanos(seconds);
+            }
+
+            public long now()
+            {
+                return tick;
+            }
+
+            @Override
+            public long error()
+            {
+                return 0;
+            }
+
+            @Override
+            public MonotonicClockTranslation translate()
+            {
+                throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public boolean isAfter(long instant)
+            {
+                throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public boolean isAfter(long now, long instant)
+            {
+                throw new UnsupportedOperationException();
             }
         }
     }
-
-    @Test
-    public void testAggregation()
-    {
-        TestClock clock = new TestClock();
-
-        DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-        DecayingEstimatedHistogramReservoir another = new DecayingEstimatedHistogramReservoir(clock);
-
-        clock.addMillis(DecayingEstimatedHistogramReservoir.LANDMARK_RESET_INTERVAL_IN_MS - 1_000L);
-
-        histogram.update(1000);
-        clock.addMillis(100);
-        another.update(2000);
-        clock.addMillis(100);
-        histogram.update(2000);
-        clock.addMillis(100);
-        another.update(3000);
-        clock.addMillis(100);
-        histogram.update(3000);
-        clock.addMillis(100);
-        another.update(4000);
-
-        DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot snapshot = (DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot) histogram.getSnapshot();
-        DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot anotherSnapshot = (DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot) another.getSnapshot();
-
-        assertEquals(2000, snapshot.getMean(), 500D);
-        assertEquals(3000, anotherSnapshot.getMean(), 500D);
-
-        snapshot.add(anotherSnapshot);
-
-        // Another had newer decayLandmark, the aggregated snapshot should use it
-        assertEquals(anotherSnapshot.getSnapshotLandmark(), snapshot.getSnapshotLandmark());
-        assertEquals(2500, snapshot.getMean(), 500D);
-    }
-
-    @Test
-    public void testSize()
-    {
-        TestClock clock = new TestClock();
-
-        DecayingEstimatedHistogramReservoir histogram = new DecayingEstimatedHistogramReservoir(clock);
-        histogram.update(42);
-        histogram.update(42);
-        assertEquals(2, histogram.getSnapshot().size());
-    }
-
-    private void assertEstimatedQuantile(long expectedValue, double actualValue)
-    {
-        assertTrue("Expected at least [" + expectedValue + "] but actual is [" + actualValue + "]", actualValue >= expectedValue);
-        assertTrue("Expected less than [" + Math.round(expectedValue * 1.2) + "] but actual is [" + actualValue + "]", actualValue < Math.round(expectedValue * 1.2));
-    }
-
-    public class TestClock extends Clock {
-        private long tick = 0;
-
-        public void addMillis(long millis)
-        {
-            tick += millis * 1_000_000L;
-        }
-
-        public void addSeconds(long seconds)
-        {
-            tick += seconds * 1_000_000_000L;
-        }
-
-        public long getTick()
-        {
-            return tick;
-        }
-
-        public long getTime()
-        {
-            return tick / 1_000_000L;
-        };
-    }
 }
diff --git a/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java b/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java
index 15feca4..744c5df 100644
--- a/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java
@@ -52,7 +52,7 @@
     @Test
     public void testHintsMetrics() throws Exception
     {
-        DatabaseDescriptor.getHintsDirectory().mkdirs();
+        DatabaseDescriptor.getHintsDirectory().tryCreateDirectories();
 
         for (int i = 0; i < 99; i++)
             HintsService.instance.metrics.incrPastWindow(InetAddressAndPort.getLocalHost());
diff --git a/test/unit/org/apache/cassandra/metrics/KeyspaceMetricsTest.java b/test/unit/org/apache/cassandra/metrics/KeyspaceMetricsTest.java
index e941a84..7c00da5 100644
--- a/test/unit/org/apache/cassandra/metrics/KeyspaceMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/KeyspaceMetricsTest.java
@@ -18,40 +18,37 @@
 
 package org.apache.cassandra.metrics;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 import java.io.IOException;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.service.EmbeddedCassandraService;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.Session;
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.service.EmbeddedCassandraService;
 
-public class KeyspaceMetricsTest extends SchemaLoader
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class KeyspaceMetricsTest
 {
     private static Session session;
+    private static Cluster cluster;
+    private static EmbeddedCassandraService cassandra;
 
     @BeforeClass
     public static void setup() throws ConfigurationException, IOException
     {
-        Schema.instance.clear();
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
 
-        EmbeddedCassandraService cassandra = new EmbeddedCassandraService();
-        cassandra.start();
-
-        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
+        cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
         session = cluster.connect();
     }
 
@@ -73,10 +70,13 @@
         // no metrics after drop
         assertEquals(metrics.get().collect(Collectors.joining(",")), 0, metrics.get().count());
     }
-    
+
     @AfterClass
-    public static void teardown()
+    public static void tearDown()
     {
-        session.close();
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
     }
 }
diff --git a/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java b/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java
index d61c550..3a4dc8a 100644
--- a/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java
@@ -22,8 +22,8 @@
 
 import org.junit.Test;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 
 public class LatencyMetricsTest
 {
@@ -105,4 +105,4 @@
 
         parent.release();
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/metrics/SamplerTest.java b/test/unit/org/apache/cassandra/metrics/SamplerTest.java
index 3d24c1b..dba19a3 100644
--- a/test/unit/org/apache/cassandra/metrics/SamplerTest.java
+++ b/test/unit/org/apache/cassandra/metrics/SamplerTest.java
@@ -224,7 +224,7 @@
     public void waitForEmpty(int timeoutMs) throws TimeoutException
     {
         int timeout = 0;
-        while (!Sampler.samplerExecutor.getQueue().isEmpty())
+        while (Sampler.samplerExecutor.getPendingTaskCount() > 0)
         {
             timeout++;
             Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
diff --git a/test/unit/org/apache/cassandra/metrics/TableMetricsTest.java b/test/unit/org/apache/cassandra/metrics/TableMetricsTest.java
index 1e8175e..4c9de77 100644
--- a/test/unit/org/apache/cassandra/metrics/TableMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/TableMetricsTest.java
@@ -31,17 +31,16 @@
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Session;
-import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.service.EmbeddedCassandraService;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-public class TableMetricsTest extends SchemaLoader
+public class TableMetricsTest
 {
     private static Session session;
 
@@ -49,15 +48,15 @@
     private static final String TABLE = "tablemetricstest";
     private static final String COUNTER_TABLE = "tablemetricscountertest";
 
+    private static EmbeddedCassandraService cassandra;
+    private static Cluster cluster;
+
     @BeforeClass
     public static void setup() throws ConfigurationException, IOException
     {
-        Schema.instance.clear();
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
 
-        EmbeddedCassandraService cassandra = new EmbeddedCassandraService();
-        cassandra.start();
-
-        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
+        cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
         session = cluster.connect();
 
         session.execute(String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };", KEYSPACE));
@@ -276,9 +275,13 @@
         assertEquals(metrics.get().collect(Collectors.joining(",")), 0, metrics.get().count());
     }
 
+
     @AfterClass
-    public static void teardown()
+    public static void tearDown()
     {
-        session.close();
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
     }
 }
diff --git a/test/unit/org/apache/cassandra/metrics/ThreadPoolMetricsTest.java b/test/unit/org/apache/cassandra/metrics/ThreadPoolMetricsTest.java
index 7302655..5c80b8e 100644
--- a/test/unit/org/apache/cassandra/metrics/ThreadPoolMetricsTest.java
+++ b/test/unit/org/apache/cassandra/metrics/ThreadPoolMetricsTest.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.metrics;
 
-import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -28,6 +27,7 @@
 import org.apache.cassandra.Util;
 import org.apache.cassandra.concurrent.*;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.junit.Assert.*;
 
 public class ThreadPoolMetricsTest
@@ -35,25 +35,23 @@
     @Test
     public void testJMXEnabledThreadPoolMetricsWithNoBlockedThread()
     {
-        JMXEnabledThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(2,
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.SECONDS,
-                                                                                 new ArrayBlockingQueue<>(2),
-                                                                                 new NamedThreadFactory("ThreadPoolMetricsTest-1"),
-                                                                                 "internal");
-        testMetricsWithNoBlockedThreads(executor, executor.metrics);
+        ThreadPoolExecutorPlus executor = (ThreadPoolExecutorPlus) executorFactory()
+                .withJmxInternal()
+                .configurePooled("ThreadPoolMetricsTest-1", 2)
+                .withQueueLimit(2)
+                .build();
+        testMetricsWithNoBlockedThreads(executor, ((ThreadPoolExecutorJMXAdapter)executor.onShutdown()).metrics());
     }
 
     @Test
     public void testJMXEnabledThreadPoolMetricsWithBlockedThread()
     {
-        JMXEnabledThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(2,
-                                                                                 Integer.MAX_VALUE,
-                                                                                 TimeUnit.SECONDS,
-                                                                                 new ArrayBlockingQueue<>(2),
-                                                                                 new NamedThreadFactory("ThreadPoolMetricsTest-2"),
-                                                                                 "internal");
-        testMetricsWithBlockedThreads(executor, executor.metrics);
+        ThreadPoolExecutorPlus executor = (ThreadPoolExecutorPlus) executorFactory()
+                .withJmxInternal()
+                .configurePooled("ThreadPoolMetricsTest-2", 2)
+                .withQueueLimit(2)
+                .build();
+        testMetricsWithBlockedThreads(executor, ((ThreadPoolExecutorJMXAdapter)executor.onShutdown()).metrics());
     }
 
     @Test
@@ -66,7 +64,7 @@
         testMetricsWithNoBlockedThreads(executor, executor.metrics);
     }
 
-    private static void testMetricsWithBlockedThreads(LocalAwareExecutorService threadPool, ThreadPoolMetrics metrics)
+    private static void testMetricsWithBlockedThreads(ExecutorPlus threadPool, ThreadPoolMetrics metrics)
     {
         assertEquals(2, metrics.maxPoolSize.getValue().intValue());
 
@@ -187,7 +185,7 @@
         spinAssertEquals(2L, metrics.totalBlocked::getCount);
     }
 
-    private static void testMetricsWithNoBlockedThreads(LocalAwareExecutorService threadPool, ThreadPoolMetrics metrics)
+    private static void testMetricsWithNoBlockedThreads(ExecutorPlus threadPool, ThreadPoolMetrics metrics)
     {
         spinAssertEquals(0, metrics.activeTasks::getValue);
         spinAssertEquals(0L, metrics.completedTasks::getValue);
diff --git a/test/unit/org/apache/cassandra/net/AsyncChannelPromiseTest.java b/test/unit/org/apache/cassandra/net/AsyncChannelPromiseTest.java
index c4e6295..4c16f6f 100644
--- a/test/unit/org/apache/cassandra/net/AsyncChannelPromiseTest.java
+++ b/test/unit/org/apache/cassandra/net/AsyncChannelPromiseTest.java
@@ -17,13 +17,21 @@
  */
 package org.apache.cassandra.net;
 
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableList;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Test;
 
-import io.netty.channel.ChannelPromise;
 import io.netty.channel.embedded.EmbeddedChannel;
+import org.apache.cassandra.utils.concurrent.AbstractTestAsyncPromise;
+import org.apache.cassandra.utils.concurrent.Promise;
 
-public class AsyncChannelPromiseTest extends TestAbstractAsyncPromise
+public class AsyncChannelPromiseTest extends AbstractTestAsyncPromise
 {
     @After
     public void shutdown()
@@ -31,42 +39,66 @@
         exec.shutdownNow();
     }
 
-    private ChannelPromise newPromise()
+    private static List<Supplier<Promise<Void>>> suppliers(AtomicInteger listeners, boolean includedUncancellable)
     {
-        return new AsyncChannelPromise(new EmbeddedChannel());
+        List<Supplier<Promise<Void>>> cancellable = ImmutableList.of(
+            () -> new AsyncChannelPromise(new EmbeddedChannel()),
+            () -> AsyncChannelPromise.withListener(new EmbeddedChannel(), f -> listeners.incrementAndGet())
+        );
+
+        if (!includedUncancellable)
+            return cancellable;
+
+        return ImmutableList.<Supplier<Promise<Void>>>builder()
+               .addAll(cancellable)
+               .addAll(cancellable.stream().map(s -> (Supplier<Promise<Void>>) () -> cancelSuccess(s.get())).collect(Collectors.toList()))
+               .build();
     }
 
     @Test
     public void testSuccess()
     {
-        for (boolean setUncancellable : new boolean[] { false, true })
-            for (boolean tryOrSet : new boolean[]{ false, true })
-                testOneSuccess(newPromise(), setUncancellable, tryOrSet, null, null);
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Void>>> suppliers = suppliers(initialListeners, true);
+        for (boolean tryOrSet : new boolean[]{ false, true })
+                for (Supplier<Promise<Void>> supplier : suppliers)
+                    testOneSuccess(supplier.get(), tryOrSet, null, null);
+        Assert.assertEquals(2 * 2, initialListeners.get());
     }
 
     @Test
     public void testFailure()
     {
-        for (boolean setUncancellable : new boolean[] { false, true })
-            for (boolean tryOrSet : new boolean[] { false, true })
-                for (Throwable v : new Throwable[] { null, new NullPointerException() })
-                    testOneFailure(newPromise(), setUncancellable, tryOrSet, v, null);
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Void>>> suppliers = suppliers(initialListeners, true);
+        for (boolean tryOrSet : new boolean[]{ false, true })
+            for (Throwable v : new Throwable[] { null, new NullPointerException() })
+                for (Supplier<Promise<Void>> supplier : suppliers)
+                    testOneFailure(supplier.get(), tryOrSet, v, null);
+        Assert.assertEquals(2 * 2 * 2, initialListeners.get());
     }
 
 
     @Test
     public void testCancellation()
     {
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Void>>> suppliers = suppliers(initialListeners, false);
         for (boolean interruptIfRunning : new boolean[] { true, false })
-            testOneCancellation(newPromise(), interruptIfRunning, null);
+            for (Supplier<Promise<Void>> supplier : suppliers)
+                testOneCancellation(supplier.get(), interruptIfRunning, null);
+        Assert.assertEquals(2, initialListeners.get());
     }
 
 
     @Test
     public void testTimeout()
     {
-        for (boolean setUncancellable : new boolean[] { true, false })
-            testOneTimeout(newPromise(), setUncancellable);
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Void>>> suppliers = suppliers(initialListeners, true);
+        for (Supplier<Promise<Void>> supplier : suppliers)
+            testOneTimeout(supplier.get());
+        Assert.assertEquals(0, initialListeners.get());
     }
 
 }
diff --git a/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java b/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java
index 3d0508c..1ffcdef 100644
--- a/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java
+++ b/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java
@@ -23,6 +23,7 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertTrue;
 
 public class AsyncOneResponseTest
@@ -43,9 +44,9 @@
         final long expectedTimeoutMillis = 1000; // Should time out after roughly this time
         final long schedulingError = 10; // Scheduling is imperfect
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
         boolean timeout = !response.await(expectedTimeoutMillis, TimeUnit.MILLISECONDS);
-        long endTime = System.nanoTime();
+        long endTime = nanoTime();
 
         assertTrue(timeout);
         assertTrue(TimeUnit.NANOSECONDS.toMillis(endTime - startTime) > (expectedTimeoutMillis - schedulingError));
diff --git a/test/unit/org/apache/cassandra/net/AsyncPromiseTest.java b/test/unit/org/apache/cassandra/net/AsyncPromiseTest.java
deleted file mode 100644
index 0d2a2e9..0000000
--- a/test/unit/org/apache/cassandra/net/AsyncPromiseTest.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.net;
-
-import org.junit.After;
-import org.junit.Test;
-
-import io.netty.util.concurrent.ImmediateEventExecutor;
-import io.netty.util.concurrent.Promise;
-
-public class AsyncPromiseTest extends TestAbstractAsyncPromise
-{
-    @After
-    public void shutdown()
-    {
-        exec.shutdownNow();
-    }
-
-    private <V> Promise<V> newPromise()
-    {
-        return new AsyncPromise<>(ImmediateEventExecutor.INSTANCE);
-    }
-
-    @Test
-    public void testSuccess()
-    {
-        for (boolean setUncancellable : new boolean[] { false, true })
-            for (boolean tryOrSet : new boolean[]{ false, true })
-                for (Integer v : new Integer[]{ null, 1 })
-                    testOneSuccess(newPromise(), setUncancellable, tryOrSet, v, 2);
-    }
-
-    @Test
-    public void testFailure()
-    {
-        for (boolean setUncancellable : new boolean[] { false, true })
-            for (boolean tryOrSet : new boolean[] { false, true })
-                for (Throwable v : new Throwable[] { null, new NullPointerException() })
-                    testOneFailure(newPromise(), setUncancellable, tryOrSet, v, 2);
-    }
-
-
-    @Test
-    public void testCancellation()
-    {
-        for (boolean interruptIfRunning : new boolean[] { true, false })
-            testOneCancellation(newPromise(), interruptIfRunning, 2);
-    }
-
-
-    @Test
-    public void testTimeout()
-    {
-        for (boolean setUncancellable : new boolean[] { true, false })
-            testOneTimeout(newPromise(), setUncancellable);
-    }
-
-}
diff --git a/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java b/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java
index b575747..1d6dcf4 100644
--- a/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java
+++ b/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java
@@ -18,10 +18,11 @@
 
 package org.apache.cassandra.net;
 
-import java.io.EOFException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
 import java.nio.channels.WritableByteChannel;
+import java.nio.charset.StandardCharsets;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.After;
@@ -33,10 +34,9 @@
 import io.netty.buffer.Unpooled;
 import io.netty.channel.embedded.EmbeddedChannel;
 import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
-import org.apache.cassandra.net.AsyncStreamingInputPlus;
-import org.apache.cassandra.net.AsyncStreamingInputPlus.InputTimeoutException;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 public class AsyncStreamingInputPlusTest
 {
@@ -59,14 +59,6 @@
             buf.release(buf.refCnt());
     }
 
-//    @Test
-//    public void isOpen()
-//    {
-//        Assert.assertTrue(inputPlus.isOpen());
-//        inputPlus.requestClosure();
-//        Assert.assertFalse(inputPlus.isOpen());
-//    }
-
     @Test
     public void append_closed()
     {
@@ -104,30 +96,8 @@
         buf.writerIndex(8);
         inputPlus.append(buf);
         Assert.assertEquals(16, inputPlus.unsafeAvailable());
-
-//        ByteBuffer out = ByteBuffer.allocate(4);
-//        int readCount = inputPlus.read(out);
-//        Assert.assertEquals(4, readCount);
-//        out.flip();
-//        Assert.assertEquals(42, out.getInt());
-//        Assert.assertEquals(12, inputPlus.unsafeAvailable());
-
-//        out = ByteBuffer.allocate(8);
-//        readCount = inputPlus.read(out);
-//        Assert.assertEquals(8, readCount);
-//        out.flip();
-//        Assert.assertEquals(42, out.getLong());
-//        Assert.assertEquals(4, inputPlus.unsafeAvailable());
     }
 
-//    @Test (expected = EOFException.class)
-//    public void read_closed() throws IOException
-//    {
-//        inputPlus.requestClosure();
-//        ByteBuffer buf = ByteBuffer.allocate(1);
-//        inputPlus.read(buf);
-//    }
-
     @Test
     public void available_closed()
     {
@@ -160,6 +130,60 @@
     }
 
     @Test
+    public void rebufferAndCloseToleratesInterruption() throws InterruptedException
+    {
+        ByteBuf beforeInterrupt = channel.alloc().heapBuffer(1024);
+        beforeInterrupt.writeCharSequence("BEFORE", StandardCharsets.US_ASCII);
+        ByteBuf afterInterrupt = channel.alloc().heapBuffer(1024);
+        afterInterrupt.writeCharSequence("AFTER", StandardCharsets.US_ASCII);
+        final int totalBytes = beforeInterrupt.readableBytes() + afterInterrupt.readableBytes();
+
+        inputPlus = new AsyncStreamingInputPlus(channel);
+        Thread consumer = new Thread(() -> {
+            try
+            {
+                byte[] buffer = new byte[totalBytes];
+                Assert.assertEquals(totalBytes, inputPlus.read(buffer, 0, totalBytes));
+            }
+            catch (Throwable tr)
+            {
+                fail("Unexpected exception: " + tr);
+            }
+
+            try
+            {
+                inputPlus.readByte();
+                fail("Expected EOFException");
+            }
+            catch (ClosedChannelException ex)
+            {
+                // expected
+            }
+            catch (Throwable tr)
+            {
+                fail("Unexpected: " + tr);
+            }
+        });
+
+        try
+        {
+            consumer.start();
+            inputPlus.append(beforeInterrupt);
+            consumer.interrupt();
+            inputPlus.append(afterInterrupt);
+            inputPlus.requestClosure();
+            consumer.interrupt();
+        }
+        finally
+        {
+            consumer.join(TimeUnit.MINUTES.toMillis(1), 0);
+
+            // Check the input plus is closed by attempting to append to it
+            Assert.assertFalse(inputPlus.append(beforeInterrupt));
+        }
+    }
+
+    @Test
     public void consumeUntil_SingleBuffer_Partial_HappyPath() throws IOException
     {
         consumeUntilTestCycle(1, 8, 0, 4);
@@ -183,13 +207,13 @@
         consumeUntilTestCycle(2, 8, 0, 16);
     }
 
-    @Test(expected = EOFException.class)
+    @Test(expected = ClosedChannelException.class)
     public void consumeUntil_SingleBuffer_Fails() throws IOException
     {
         consumeUntilTestCycle(1, 8, 0, 9);
     }
 
-    @Test(expected = EOFException.class)
+    @Test(expected = ClosedChannelException.class)
     public void consumeUntil_MultipleBuffer_Fails() throws IOException
     {
         consumeUntilTestCycle(2, 8, 0, 17);
@@ -256,25 +280,4 @@
             isOpen = false;
         }
     }
-
-    @Test
-    public void rebufferTimeout() throws IOException
-    {
-        long timeoutMillis = 1000;
-        inputPlus = new AsyncStreamingInputPlus(channel, timeoutMillis, TimeUnit.MILLISECONDS);
-
-        long startNanos = System.nanoTime();
-        try
-        {
-            inputPlus.readInt();
-            Assert.fail("should not have been able to read from the queue");
-        }
-        catch (InputTimeoutException e)
-        {
-            // this is the success case, and is expected. any other exception is a failure.
-        }
-
-        long durationNanos = System.nanoTime() - startNanos;
-        Assert.assertTrue(TimeUnit.MILLISECONDS.toNanos(timeoutMillis) <= durationNanos);
-    }
 }
diff --git a/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java b/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java
index 305dc55..4775fd1 100644
--- a/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java
+++ b/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java
@@ -18,14 +18,13 @@
 
 package org.apache.cassandra.net;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.nio.file.Files;
 import java.util.Random;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import io.netty.buffer.ByteBuf;
@@ -105,7 +104,7 @@
                 buffer.putLong(1);
                 buffer.putLong(2);
                 buffer.flip();
-            }, new StreamManager.StreamRateLimiter(FBUtilities.getBroadcastAddressAndPort()));
+            }, StreamManager.getRateLimiter(FBUtilities.getBroadcastAddressAndPort()));
 
             assertEquals(40, out.position());
             assertEquals(40, out.flushed());
@@ -119,8 +118,26 @@
     }
 
     @Test
-    public void testWriteFileToChannelZeroCopy() throws IOException
+    public void testWriteFileToChannelEntireSSTableNoThrottling() throws IOException
     {
+        // Disable throttling by setting entire SSTable throughput and entire SSTable inter-DC throughput to 0
+        DatabaseDescriptor.setEntireSSTableStreamThroughputOutboundMebibytesPerSec(0);
+        DatabaseDescriptor.setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(0);
+        StreamManager.StreamRateLimiter.updateEntireSSTableThroughput();
+        StreamManager.StreamRateLimiter.updateEntireSSTableInterDCThroughput();
+
+        testWriteFileToChannel(true);
+    }
+
+    @Test
+    public void testWriteFileToChannelEntireSSTable() throws IOException
+    {
+        // Enable entire SSTable throttling by setting it to 200 Mbps
+        DatabaseDescriptor.setEntireSSTableStreamThroughputOutboundMebibytesPerSec(200);
+        DatabaseDescriptor.setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(200);
+        StreamManager.StreamRateLimiter.updateEntireSSTableThroughput();
+        StreamManager.StreamRateLimiter.updateEntireSSTableInterDCThroughput();
+
         testWriteFileToChannel(true);
     }
 
@@ -136,10 +153,10 @@
         int length = (int) file.length();
 
         EmbeddedChannel channel = new TestChannel(4);
-        StreamManager.StreamRateLimiter limiter = new StreamManager.StreamRateLimiter(FBUtilities.getBroadcastAddressAndPort());
+        StreamManager.StreamRateLimiter limiter = zeroCopy ? StreamManager.getEntireSSTableRateLimiter(FBUtilities.getBroadcastAddressAndPort())
+                                                           : StreamManager.getRateLimiter(FBUtilities.getBroadcastAddressAndPort());
 
-        try (RandomAccessFile raf = new RandomAccessFile(file.getPath(), "r");
-             FileChannel fileChannel = raf.getChannel();
+        try (FileChannel fileChannel = file.newReadChannel();
              AsyncStreamingOutputPlus out = new AsyncStreamingOutputPlus(channel))
         {
             assertTrue(fileChannel.isOpen());
@@ -159,7 +176,7 @@
 
     private File populateTempData(String name) throws IOException
     {
-        File file = Files.createTempFile(name, ".txt").toFile();
+        File file = new File(Files.createTempFile(name, ".txt"));
         file.deleteOnExit();
 
         Random r = new Random();
diff --git a/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java b/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java
index f90fcd1..36bc53d 100644
--- a/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java
+++ b/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java
@@ -29,8 +29,6 @@
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.net.ChunkedInputPlus;
-import org.apache.cassandra.net.ShareableBytes;
 
 import static org.junit.Assert.*;
 
@@ -156,4 +154,4 @@
         Arrays.fill(buffer.array(), (byte) fill);
         return ShareableBytes.wrap(buffer);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/net/ConnectionTest.java b/test/unit/org/apache/cassandra/net/ConnectionTest.java
index 5c637ac..ec447aa 100644
--- a/test/unit/org/apache/cassandra/net/ConnectionTest.java
+++ b/test/unit/org/apache/cassandra/net/ConnectionTest.java
@@ -68,6 +68,7 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
@@ -80,7 +81,8 @@
 import static org.apache.cassandra.net.ConnectionType.SMALL_MESSAGES;
 import static org.apache.cassandra.net.OutboundConnectionSettings.Framing.LZ4;
 import static org.apache.cassandra.net.OutboundConnections.LARGE_MESSAGE_THRESHOLD;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 public class ConnectionTest
 {
@@ -336,7 +338,7 @@
             });
             unsafeSetHandler(Verb._TEST_1, () -> msg -> receiveDone.countDown());
             Message<?> message = Message.builder(Verb._TEST_1, new Object())
-                                        .withExpiresAt(System.nanoTime() + SECONDS.toNanos(30L))
+                                        .withExpiresAt(nanoTime() + SECONDS.toNanos(30L))
                                         .build();
             for (int i = 0 ; i < count ; ++i)
                 outbound.enqueue(message);
@@ -444,7 +446,7 @@
 
             AtomicInteger serialized = new AtomicInteger();
             Message<?> message = Message.builder(Verb._TEST_1, new Object())
-                                        .withExpiresAt(System.nanoTime() + SECONDS.toNanos(30L))
+                                        .withExpiresAt(nanoTime() + SECONDS.toNanos(30L))
                                         .build();
             unsafeSetSerializer(Verb._TEST_1, () -> new IVersionedSerializer<Object>()
             {
@@ -663,7 +665,7 @@
     {
         testManual((settings, inbound, outbound, endpoint) -> {
             Message<?> message = Message.builder(Verb._TEST_1, noPayload)
-                                        .withExpiresAt(System.nanoTime() + SECONDS.toNanos(30L))
+                                        .withExpiresAt(nanoTime() + SECONDS.toNanos(30L))
                                         .build();
 
             for (int i = 0 ; i < 1000 ; ++i)
@@ -683,12 +685,12 @@
                     for (int i = 0; i < 5; i++)
                     {
                         Message<?> message = Message.builder(Verb._TEST_1, noPayload)
-                                                    .withExpiresAt(System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(50L))
+                                                    .withExpiresAt(nanoTime() + MILLISECONDS.toNanos(50L))
                                                     .build();
                         OutboundMessageQueue queue = outbound.queue;
                         while (true)
                         {
-                            try (OutboundMessageQueue.WithLock withLock = queue.lockOrCallback(System.nanoTime(), null))
+                            try (OutboundMessageQueue.WithLock withLock = queue.lockOrCallback(nanoTime(), null))
                             {
                                 if (withLock != null)
                                 {
diff --git a/test/unit/org/apache/cassandra/net/ForwardingInfoTest.java b/test/unit/org/apache/cassandra/net/ForwardingInfoTest.java
index 16dec9f..6e0d11a 100644
--- a/test/unit/org/apache/cassandra/net/ForwardingInfoTest.java
+++ b/test/unit/org/apache/cassandra/net/ForwardingInfoTest.java
@@ -91,9 +91,9 @@
             {
                 InetAddressAndPort original = addresses.get(ii);
                 InetAddressAndPort roundtripped = iterator.next();
-                assertEquals(original.address, roundtripped.address);
+                assertEquals(original.getAddress(), roundtripped.getAddress());
                 //3.0 can't send port numbers so you get the defaults
-                assertEquals(65532, roundtripped.port);
+                assertEquals(65532, roundtripped.getPort());
             }
         }
     }
diff --git a/test/unit/org/apache/cassandra/net/FramingTest.java b/test/unit/org/apache/cassandra/net/FramingTest.java
index 78d0a84..81f95f3 100644
--- a/test/unit/org/apache/cassandra/net/FramingTest.java
+++ b/test/unit/org/apache/cassandra/net/FramingTest.java
@@ -40,10 +40,8 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.compress.BufferType;
-import org.apache.cassandra.io.util.DataInputBuffer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.io.util.DataOutputBufferFixed;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.memory.BufferPools;
diff --git a/test/unit/org/apache/cassandra/net/HandshakeTest.java b/test/unit/org/apache/cassandra/net/HandshakeTest.java
index e680b83..75ae103 100644
--- a/test/unit/org/apache/cassandra/net/HandshakeTest.java
+++ b/test/unit/org/apache/cassandra/net/HandshakeTest.java
@@ -23,6 +23,7 @@
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -33,7 +34,6 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.OutboundConnectionInitiator.Result;
 import org.apache.cassandra.net.OutboundConnectionInitiator.Result.MessagingSuccess;
 
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
@@ -83,7 +83,7 @@
                               new OutboundConnectionSettings(endpoint)
                                                     .withAcceptVersions(acceptOutbound)
                                                     .withDefaults(ConnectionCategory.MESSAGING),
-                              req, new AsyncPromise<>(eventLoop));
+                              req, AsyncPromise.withExecutor(eventLoop));
             return future.get();
         }
         finally
diff --git a/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java b/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java
index 2c92a39..4f60d01 100644
--- a/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java
+++ b/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java
@@ -298,4 +298,4 @@
         assertEquals(numItems, itemsPolled.cardinality());
         assertTrue(queue.relaxedIsEmpty());
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/net/MessageTest.java b/test/unit/org/apache/cassandra/net/MessageTest.java
index d3f5617..3850bc4 100644
--- a/test/unit/org/apache/cassandra/net/MessageTest.java
+++ b/test/unit/org/apache/cassandra/net/MessageTest.java
@@ -21,7 +21,6 @@
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.nio.charset.StandardCharsets;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Stream;
 
@@ -39,9 +38,9 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.tracing.Tracing.TraceType;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.FreeRunningClock;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.net.Message.serializer;
 import static org.apache.cassandra.net.MessagingService.VERSION_3014;
@@ -51,7 +50,8 @@
 import static org.apache.cassandra.net.ParamType.RESPOND_TO;
 import static org.apache.cassandra.net.ParamType.TRACE_SESSION;
 import static org.apache.cassandra.net.ParamType.TRACE_TYPE;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 import static org.junit.Assert.*;
 
@@ -100,7 +100,7 @@
                    .withFlag(MessageFlag.CALL_BACK_ON_FAILURE)
                    .withFlag(MessageFlag.TRACK_REPAIRED_DATA)
                    .withParam(TRACE_TYPE, TraceType.QUERY)
-                   .withParam(TRACE_SESSION, UUID.randomUUID())
+                   .withParam(TRACE_SESSION, nextTimeUUID())
                    .build();
 
         testInferMessageSize(msg, VERSION_30);
@@ -138,7 +138,7 @@
         long createAtNanos = approxTime.now();
         long expiresAtNanos = createAtNanos + TimeUnit.SECONDS.toNanos(1);
         TraceType traceType = TraceType.QUERY;
-        UUID traceSession = UUID.randomUUID();
+        TimeUUID traceSession = nextTimeUUID();
 
         Message<NoPayload> msg =
             Message.builder(Verb._TEST_1, noPayload)
@@ -160,7 +160,7 @@
         assertEquals(traceType, msg.traceType());
         assertEquals(traceSession, msg.traceSession());
         assertNull(msg.forwardTo());
-        assertNull(msg.respondTo());
+        assertEquals(from, msg.respondTo());
     }
 
     @Test
@@ -173,7 +173,7 @@
                    .withCreatedAt(approxTime.now())
                    .withExpiresAt(approxTime.now() + TimeUnit.SECONDS.toNanos(1))
                    .withFlag(MessageFlag.CALL_BACK_ON_FAILURE)
-                   .withParam(TRACE_SESSION, UUID.randomUUID())
+                   .withParam(TRACE_SESSION, nextTimeUUID())
                    .build();
         testCycle(msg);
     }
@@ -251,7 +251,7 @@
     {
         try
         {
-            UUID sessionId = Tracing.instance.newSession(traceType);
+            TimeUUID sessionId = Tracing.instance.newSession(traceType);
             Message<NoPayload> msg = Message.builder(Verb._TEST_1, noPayload).withTracingParams().build();
             assertEquals(sessionId, msg.header.traceSession());
             assertEquals(traceType, msg.header.traceType());
diff --git a/test/unit/org/apache/cassandra/net/MessagingServiceTest.java b/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
index fd78e2a..349d865 100644
--- a/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
+++ b/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
@@ -20,8 +20,11 @@
  */
 package org.apache.cassandra.net;
 
+import java.io.IOException;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
+import java.nio.channels.AsynchronousSocketChannel;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
@@ -29,6 +32,9 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.*;
 import java.util.regex.Matcher;
 
@@ -44,6 +50,7 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
+import org.awaitility.Awaitility;
 import org.caffinitas.ohc.histo.EstimatedHistogram;
 import org.junit.After;
 import org.junit.Assert;
@@ -57,10 +64,12 @@
 public class MessagingServiceTest
 {
     private final static long[] bucketOffsets = new EstimatedHistogram(160).getBucketOffsets();
+    public static AtomicInteger rejectedConnections = new AtomicInteger();
     public static final IInternodeAuthenticator ALLOW_NOTHING_AUTHENTICATOR = new IInternodeAuthenticator()
     {
         public boolean authenticate(InetAddress remoteAddress, int remotePort)
         {
+            rejectedConnections.incrementAndGet();
             return false;
         }
 
@@ -92,6 +101,7 @@
         messagingService.metrics.resetDroppedMessages();
         messagingService.closeOutbound(InetAddressAndPort.getByName("127.0.0.2"));
         messagingService.closeOutbound(InetAddressAndPort.getByName("127.0.0.3"));
+        DatabaseDescriptor.setInternodeAuthenticator(originalAuthenticator);
     }
 
     @After
@@ -100,7 +110,7 @@
         DatabaseDescriptor.setInternodeAuthenticator(originalAuthenticator);
         DatabaseDescriptor.setInternodeMessagingEncyptionOptions(originalServerEncryptionOptions);
         DatabaseDescriptor.setShouldListenOnBroadcastAddress(false);
-        DatabaseDescriptor.setListenAddress(originalListenAddress.address);
+        DatabaseDescriptor.setListenAddress(originalListenAddress.getAddress());
         FBUtilities.reset();
     }
 
@@ -156,7 +166,7 @@
         addDCLatency(sentAt, now);
         assertNotNull(dcLatency.get("datacenter1"));
         assertEquals(1, dcLatency.get("datacenter1").dcLatency.getCount());
-        long expectedBucket = bucketOffsets[Math.abs(Arrays.binarySearch(bucketOffsets, MILLISECONDS.toNanos(latency))) - 1];
+        long expectedBucket = bucketOffsets[Math.abs(Arrays.binarySearch(bucketOffsets, MILLISECONDS.toMicros(latency))) - 1];
         assertEquals(expectedBucket, dcLatency.get("datacenter1").dcLatency.getSnapshot().getMax());
     }
 
@@ -186,7 +196,7 @@
         Map<Verb, Timer> queueWaitLatency = MessagingService.instance().metrics.internalLatency;
         MessagingService.instance().metrics.recordInternalLatency(verb, latency, MILLISECONDS);
         assertEquals(1, queueWaitLatency.get(verb).getCount());
-        long expectedBucket = bucketOffsets[Math.abs(Arrays.binarySearch(bucketOffsets, MILLISECONDS.toNanos(latency))) - 1];
+        long expectedBucket = bucketOffsets[Math.abs(Arrays.binarySearch(bucketOffsets, MILLISECONDS.toMicros(latency))) - 1];
         assertEquals(expectedBucket, queueWaitLatency.get(verb).getSnapshot().getMax());
     }
 
@@ -216,21 +226,53 @@
      * @throws Exception
      */
     @Test
-    public void testFailedInternodeAuth() throws Exception
+    public void testFailedOutboundInternodeAuth() throws Exception
     {
         MessagingService ms = MessagingService.instance();
         DatabaseDescriptor.setInternodeAuthenticator(ALLOW_NOTHING_AUTHENTICATOR);
         InetAddressAndPort address = InetAddressAndPort.getByName("127.0.0.250");
 
         //Should return null
-        Message messageOut = Message.out(Verb.ECHO_REQ, NoPayload.noPayload);
-        assertFalse(ms.isConnected(address, messageOut));
+        int rejectedBefore = rejectedConnections.get();
+        Message<?> messageOut = Message.out(Verb.ECHO_REQ, NoPayload.noPayload);
+        ms.send(messageOut, address);
+        Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> rejectedConnections.get() > rejectedBefore);
 
         //Should tolerate null
         ms.closeOutbound(address);
         ms.send(messageOut, address);
     }
 
+    @Test
+    public void testFailedInboundInternodeAuth() throws IOException, InterruptedException
+    {
+        ServerEncryptionOptions serverEncryptionOptions = new ServerEncryptionOptions()
+            .withInternodeEncryption(ServerEncryptionOptions.InternodeEncryption.none);
+
+        DatabaseDescriptor.setInternodeAuthenticator(ALLOW_NOTHING_AUTHENTICATOR);
+        InetAddress listenAddress = FBUtilities.getJustLocalAddress();
+
+        InboundConnectionSettings settings = new InboundConnectionSettings().withEncryption(serverEncryptionOptions);
+        InboundSockets connections = new InboundSockets(settings);
+
+        try (AsynchronousSocketChannel testChannel = AsynchronousSocketChannel.open())
+        {
+            connections.open().await();
+            Assert.assertTrue(connections.isListening());
+
+            int rejectedBefore = rejectedConnections.get();
+            Future<Void> connectFuture = testChannel.connect(new InetSocketAddress(listenAddress, DatabaseDescriptor.getStoragePort()));
+            Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> rejectedConnections.get() > rejectedBefore);
+
+            connectFuture.cancel(true);
+        }
+        finally
+        {
+            connections.close().await();
+            Assert.assertFalse(connections.isListening());
+        }
+    }
+
 //    @Test
 //    public void reconnectWithNewIp() throws Exception
 //    {
@@ -310,9 +352,12 @@
     @Test
     public void listenOptionalSecureConnection() throws InterruptedException
     {
-        ServerEncryptionOptions serverEncryptionOptions = new ServerEncryptionOptions()
-                                                          .withOptional(true);
-        listen(serverEncryptionOptions, false);
+        for (int i = 0; i < 500; i++) // test used to be flaky, so run in a loop to make sure stable (see CASSANDRA-17033)
+        {
+            ServerEncryptionOptions serverEncryptionOptions = new ServerEncryptionOptions()
+                                                              .withOptional(true);
+            listen(serverEncryptionOptions, false);
+        }
     }
 
     @Test
@@ -329,7 +374,7 @@
         if (listenOnBroadcastAddr)
         {
             DatabaseDescriptor.setShouldListenOnBroadcastAddress(true);
-            listenAddress = InetAddresses.increment(FBUtilities.getBroadcastAddressAndPort().address);
+            listenAddress = InetAddresses.increment(FBUtilities.getBroadcastAddressAndPort().getAddress());
             DatabaseDescriptor.setListenAddress(listenAddress);
             FBUtilities.reset();
         }
@@ -339,18 +384,18 @@
         InboundSockets connections = new InboundSockets(settings);
         try
         {
-            connections.open().await();
-            Assert.assertTrue(connections.isListening());
+            connections.open().sync();
+            Assert.assertTrue("connections is not listening", connections.isListening());
 
             Set<InetAddressAndPort> expect = new HashSet<>();
             expect.add(InetAddressAndPort.getByAddressOverrideDefaults(listenAddress, DatabaseDescriptor.getStoragePort()));
-            if (settings.encryption.enable_legacy_ssl_storage_port)
+            if (settings.encryption.legacy_ssl_storage_port_enabled)
                 expect.add(InetAddressAndPort.getByAddressOverrideDefaults(listenAddress, DatabaseDescriptor.getSSLStoragePort()));
             if (listenOnBroadcastAddr)
             {
-                expect.add(InetAddressAndPort.getByAddressOverrideDefaults(FBUtilities.getBroadcastAddressAndPort().address, DatabaseDescriptor.getStoragePort()));
-                if (settings.encryption.enable_legacy_ssl_storage_port)
-                    expect.add(InetAddressAndPort.getByAddressOverrideDefaults(FBUtilities.getBroadcastAddressAndPort().address, DatabaseDescriptor.getSSLStoragePort()));
+                expect.add(InetAddressAndPort.getByAddressOverrideDefaults(FBUtilities.getBroadcastAddressAndPort().getAddress(), DatabaseDescriptor.getStoragePort()));
+                if (settings.encryption.legacy_ssl_storage_port_enabled)
+                    expect.add(InetAddressAndPort.getByAddressOverrideDefaults(FBUtilities.getBroadcastAddressAndPort().getAddress(), DatabaseDescriptor.getSSLStoragePort()));
             }
 
             Assert.assertEquals(expect.size(), connections.sockets().size());
@@ -358,12 +403,12 @@
             final int legacySslPort = DatabaseDescriptor.getSSLStoragePort();
             for (InboundSockets.InboundSocket socket : connections.sockets())
             {
-                Assert.assertEquals(serverEncryptionOptions.isEnabled(), socket.settings.encryption.isEnabled());
-                Assert.assertEquals(serverEncryptionOptions.isOptional(), socket.settings.encryption.isOptional());
-                if (!serverEncryptionOptions.isEnabled())
-                    assertNotEquals(legacySslPort, socket.settings.bindAddress.port);
-                if (legacySslPort == socket.settings.bindAddress.port)
-                    Assert.assertFalse(socket.settings.encryption.isOptional());
+                Assert.assertEquals(serverEncryptionOptions.getEnabled(), socket.settings.encryption.getEnabled());
+                Assert.assertEquals(serverEncryptionOptions.getOptional(), socket.settings.encryption.getOptional());
+                if (!serverEncryptionOptions.getEnabled())
+                    assertNotEquals(legacySslPort, socket.settings.bindAddress.getPort());
+                if (legacySslPort == socket.settings.bindAddress.getPort())
+                    Assert.assertFalse(socket.settings.encryption.getOptional());
                 Assert.assertTrue(socket.settings.bindAddress.toString(), expect.remove(socket.settings.bindAddress));
             }
         }
diff --git a/test/unit/org/apache/cassandra/net/MockMessagingService.java b/test/unit/org/apache/cassandra/net/MockMessagingService.java
index 3749baf..54f9071 100644
--- a/test/unit/org/apache/cassandra/net/MockMessagingService.java
+++ b/test/unit/org/apache/cassandra/net/MockMessagingService.java
@@ -151,4 +151,4 @@
             return false;
         };
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/net/MockMessagingSpy.java b/test/unit/org/apache/cassandra/net/MockMessagingSpy.java
index 2197787..d3ea8f7 100644
--- a/test/unit/org/apache/cassandra/net/MockMessagingSpy.java
+++ b/test/unit/org/apache/cassandra/net/MockMessagingSpy.java
@@ -22,7 +22,6 @@
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -33,7 +32,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import junit.framework.AssertionFailedError;
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 
 /**
  * Allows inspecting the behavior of mocked messaging by observing {@link MatcherResponse}.
@@ -45,8 +44,8 @@
     private final AtomicInteger messagesIntercepted = new AtomicInteger();
     private final AtomicInteger mockedMessageResponses = new AtomicInteger();
 
-    private final BlockingQueue<Message<?>> interceptedMessages = new LinkedBlockingQueue<>();
-    private final BlockingQueue<Message<?>> deliveredResponses = new LinkedBlockingQueue<>();
+    private final BlockingQueue<Message<?>> interceptedMessages = newBlockingQueue();
+    private final BlockingQueue<Message<?>> deliveredResponses = newBlockingQueue();
 
     private static final Executor executor = Executors.newSingleThreadExecutor();
 
@@ -232,7 +231,7 @@
             {
                 T result = queue.poll(time, unit);
                 if (result != null)
-                    setException(new AssertionFailedError("Received unexpected message: " + result));
+                    setException(new AssertionError("Received unexpected message: " + result));
                 else
                     set(true);
             }
@@ -242,4 +241,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/net/OutboundConnectionSettingsTest.java b/test/unit/org/apache/cassandra/net/OutboundConnectionSettingsTest.java
index 66773f8..89549b4 100644
--- a/test/unit/org/apache/cassandra/net/OutboundConnectionSettingsTest.java
+++ b/test/unit/org/apache/cassandra/net/OutboundConnectionSettingsTest.java
@@ -34,8 +34,6 @@
 import org.apache.cassandra.locator.Replica;
 
 import static org.apache.cassandra.config.DatabaseDescriptor.getEndpointSnitch;
-import static org.apache.cassandra.net.MessagingService.current_version;
-import static org.apache.cassandra.net.ConnectionType.*;
 import static org.apache.cassandra.net.OutboundConnectionsTest.LOCAL_ADDR;
 import static org.apache.cassandra.net.OutboundConnectionsTest.REMOTE_ADDR;
 
diff --git a/test/unit/org/apache/cassandra/net/OutboundMessageQueueTest.java b/test/unit/org/apache/cassandra/net/OutboundMessageQueueTest.java
index bb7dc63..5cc9d1a 100644
--- a/test/unit/org/apache/cassandra/net/OutboundMessageQueueTest.java
+++ b/test/unit/org/apache/cassandra/net/OutboundMessageQueueTest.java
@@ -33,7 +33,7 @@
 import org.apache.cassandra.utils.FreeRunningClock;
 
 import static org.apache.cassandra.net.NoPayload.noPayload;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 // TODO: incomplete
 public class OutboundMessageQueueTest
diff --git a/test/unit/org/apache/cassandra/net/ProxyHandlerConnectionsTest.java b/test/unit/org/apache/cassandra/net/ProxyHandlerConnectionsTest.java
index 970ccd2..34aab91 100644
--- a/test/unit/org/apache/cassandra/net/ProxyHandlerConnectionsTest.java
+++ b/test/unit/org/apache/cassandra/net/ProxyHandlerConnectionsTest.java
@@ -54,7 +54,7 @@
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.cassandra.net.ConnectionTest.SETTINGS;
 import static org.apache.cassandra.net.OutboundConnectionSettings.Framing.CRC;
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 
 public class ProxyHandlerConnectionsTest
 {
diff --git a/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java b/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java
index 34f61a6..e9a60cd 100644
--- a/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java
+++ b/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java
@@ -24,8 +24,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.net.PrunableArrayQueue;
-
 import static org.junit.Assert.*;
 
 public class PrunableArrayQueueTest
@@ -200,4 +198,4 @@
             assertEquals("Queue size should be zero after draining. Seed: " + seed + ". Iteration: " + i, 0, testQueue.size());
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/net/ResourceLimitsTest.java b/test/unit/org/apache/cassandra/net/ResourceLimitsTest.java
index f2f8a01..5c2ecbe 100644
--- a/test/unit/org/apache/cassandra/net/ResourceLimitsTest.java
+++ b/test/unit/org/apache/cassandra/net/ResourceLimitsTest.java
@@ -23,9 +23,7 @@
 import java.util.concurrent.TimeUnit;
 import java.util.function.LongFunction;
 
-import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.KillerForTests;
 import org.junit.Assert;
diff --git a/test/unit/org/apache/cassandra/net/SocketUtils.java b/test/unit/org/apache/cassandra/net/SocketUtils.java
index a0a1490..78a49bd 100644
--- a/test/unit/org/apache/cassandra/net/SocketUtils.java
+++ b/test/unit/org/apache/cassandra/net/SocketUtils.java
@@ -54,4 +54,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/net/TestAbstractAsyncPromise.java b/test/unit/org/apache/cassandra/net/TestAbstractAsyncPromise.java
deleted file mode 100644
index fd61b09..0000000
--- a/test/unit/org/apache/cassandra/net/TestAbstractAsyncPromise.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.net;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import com.google.common.util.concurrent.Uninterruptibles;
-import org.junit.Assert;
-
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
-import io.netty.util.concurrent.Promise;
-
-abstract class TestAbstractAsyncPromise extends TestAbstractPromise
-{
-    <V> void testOneSuccess(Promise<V> promise, boolean setUncancellable, boolean tryOrSet, V value, V otherValue)
-    {
-        List<V> results = new ArrayList<>();
-        List<Integer> order = new ArrayList<>();
-        class ListenerFactory
-        {
-            int count = 0;
-
-            public GenericFutureListener<Future<V>> get()
-            {
-                int id = count++;
-                return p -> { results.add(p.getNow()); order.add(id); };
-            }
-            public GenericFutureListener<Future<V>> getRecursive()
-            {
-                int id = count++;
-                return p -> { promise.addListener(get()); results.add(p.getNow()); order.add(id); };
-            }
-        }
-        ListenerFactory listeners = new ListenerFactory();
-        Async async = new Async();
-        promise.addListener(listeners.get());
-        promise.addListeners(listeners.getRecursive(), listeners.get());
-        promise.addListener(listeners.getRecursive());
-        success(promise, Promise::getNow, null);
-        success(promise, Promise::isSuccess, false);
-        success(promise, Promise::isDone, false);
-        success(promise, Promise::isCancelled, false);
-        success(promise, Promise::isCancellable, true);
-        if (setUncancellable)
-        {
-            success(promise, Promise::setUncancellable, true);
-            success(promise, Promise::setUncancellable, true);
-            success(promise, p -> p.cancel(true), false);
-            success(promise, p -> p.cancel(false), false);
-        }
-        success(promise, Promise::isCancellable, !setUncancellable);
-        async.success(promise, Promise::get, value);
-        async.success(promise, p -> p.get(1L, TimeUnit.SECONDS), value);
-        async.success(promise, Promise::await, promise);
-        async.success(promise, Promise::awaitUninterruptibly, promise);
-        async.success(promise, p -> p.await(1L, TimeUnit.SECONDS), true);
-        async.success(promise, p -> p.await(1000L), true);
-        async.success(promise, p -> p.awaitUninterruptibly(1L, TimeUnit.SECONDS), true);
-        async.success(promise, p -> p.awaitUninterruptibly(1000L), true);
-        async.success(promise, Promise::sync, promise);
-        async.success(promise, Promise::syncUninterruptibly, promise);
-        if (tryOrSet) promise.trySuccess(value);
-        else promise.setSuccess(value);
-        success(promise, p -> p.cancel(true), false);
-        success(promise, p -> p.cancel(false), false);
-        failure(promise, p -> p.setSuccess(null), IllegalStateException.class);
-        failure(promise, p -> p.setFailure(new NullPointerException()), IllegalStateException.class);
-        success(promise, Promise::getNow, value);
-        success(promise, p -> p.trySuccess(otherValue), false);
-        success(promise, p -> p.tryFailure(new NullPointerException()), false);
-        success(promise, Promise::getNow, value);
-        success(promise, Promise::cause, null);
-        promise.addListener(listeners.get());
-        promise.addListeners(listeners.getRecursive(), listeners.get());
-        promise.addListener(listeners.getRecursive());
-        success(promise, Promise::isSuccess, true);
-        success(promise, Promise::isDone, true);
-        success(promise, Promise::isCancelled, false);
-        success(promise, Promise::isCancellable, false);
-        async.verify();
-        Assert.assertEquals(listeners.count, results.size());
-        Assert.assertEquals(listeners.count, order.size());
-        for (V result : results)
-            Assert.assertEquals(value, result);
-        for (int i = 0 ; i < order.size() ; ++i)
-            Assert.assertEquals(i, order.get(i).intValue());
-    }
-
-    <V> void testOneFailure(Promise<V> promise, boolean setUncancellable, boolean tryOrSet, Throwable cause, V otherValue)
-    {
-        List<Throwable> results = new ArrayList<>();
-        List<Integer> order = new ArrayList<>();
-        Async async = new Async();
-        class ListenerFactory
-        {
-            int count = 0;
-
-            public GenericFutureListener<Future<V>> get()
-            {
-                int id = count++;
-                return p -> { results.add(p.cause()); order.add(id); };
-            }
-            public GenericFutureListener<Future<V>> getRecursive()
-            {
-                int id = count++;
-                return p -> { promise.addListener(get()); results.add(p.cause()); order.add(id); };
-            }
-        }
-        ListenerFactory listeners = new ListenerFactory();
-        promise.addListener(listeners.get());
-        promise.addListeners(listeners.getRecursive(), listeners.get());
-        promise.addListener(listeners.getRecursive());
-        success(promise, Promise::isSuccess, false);
-        success(promise, Promise::isDone, false);
-        success(promise, Promise::isCancelled, false);
-        success(promise, Promise::isCancellable, true);
-        if (setUncancellable)
-        {
-            success(promise, Promise::setUncancellable, true);
-            success(promise, Promise::setUncancellable, true);
-            success(promise, p -> p.cancel(true), false);
-            success(promise, p -> p.cancel(false), false);
-        }
-        success(promise, Promise::isCancellable, !setUncancellable);
-        success(promise, Promise::getNow, null);
-        success(promise, Promise::cause, null);
-        async.failure(promise, Promise::get, ExecutionException.class);
-        async.failure(promise, p -> p.get(1L, TimeUnit.SECONDS), ExecutionException.class);
-        async.success(promise, Promise::await, promise);
-        async.success(promise, Promise::awaitUninterruptibly, promise);
-        async.success(promise, p -> p.await(1L, TimeUnit.SECONDS), true);
-        async.success(promise, p -> p.await(1000L), true);
-        async.success(promise, p -> p.awaitUninterruptibly(1L, TimeUnit.SECONDS), true);
-        async.success(promise, p -> p.awaitUninterruptibly(1000L), true);
-        async.failure(promise, Promise::sync, cause);
-        async.failure(promise, Promise::syncUninterruptibly, cause);
-        if (tryOrSet) promise.tryFailure(cause);
-        else promise.setFailure(cause);
-        success(promise, p -> p.cancel(true), false);
-        success(promise, p -> p.cancel(false), false);
-        failure(promise, p -> p.setSuccess(null), IllegalStateException.class);
-        failure(promise, p -> p.setFailure(new NullPointerException()), IllegalStateException.class);
-        success(promise, Promise::cause, cause);
-        success(promise, Promise::getNow, null);
-        success(promise, p -> p.trySuccess(otherValue), false);
-        success(promise, p -> p.tryFailure(new NullPointerException()), false);
-        success(promise, Promise::getNow, null);
-        success(promise, Promise::cause, cause);
-        promise.addListener(listeners.get());
-        promise.addListeners(listeners.getRecursive(), listeners.get());
-        promise.addListener(listeners.getRecursive());
-        success(promise, Promise::isSuccess, false);
-        success(promise, Promise::isDone, true);
-        success(promise, Promise::isCancelled, false);
-        success(promise, Promise::isCancellable, false);
-        async.verify();
-        Assert.assertEquals(listeners.count, results.size());
-        Assert.assertEquals(listeners.count, order.size());
-        for (Throwable result : results)
-            Assert.assertEquals(cause, result);
-        for (int i = 0 ; i < order.size() ; ++i)
-            Assert.assertEquals(i, order.get(i).intValue());
-    }
-
-    public <V> void testOneCancellation(Promise<V> promise, boolean interruptIfRunning, V otherValue)
-    {
-        Async async = new Async();
-        success(promise, Promise::isCancellable, true);
-        success(promise, Promise::getNow, null);
-        success(promise, Promise::cause, null);
-        async.failure(promise, Promise::get, CancellationException.class);
-        async.failure(promise, p -> p.get(1L, TimeUnit.SECONDS), CancellationException.class);
-        async.success(promise, Promise::await, promise);
-        async.success(promise, Promise::awaitUninterruptibly, promise);
-        async.success(promise, p -> p.await(1L, TimeUnit.SECONDS), true);
-        async.success(promise, p -> p.await(1000L), true);
-        async.success(promise, p -> p.awaitUninterruptibly(1L, TimeUnit.SECONDS), true);
-        async.success(promise, p -> p.awaitUninterruptibly(1000L), true);
-        async.failure(promise, Promise::sync, CancellationException.class);
-        async.failure(promise, Promise::syncUninterruptibly, CancellationException.class);
-        promise.cancel(interruptIfRunning);
-        failure(promise, p -> p.setFailure(null), IllegalStateException.class);
-        failure(promise, p -> p.setFailure(null), IllegalStateException.class);
-        Assert.assertTrue(promise.cause() instanceof CancellationException);
-        success(promise, Promise::getNow, null);
-        success(promise, p -> p.trySuccess(otherValue), false);
-        success(promise, Promise::getNow, null);
-        Assert.assertTrue(promise.cause() instanceof CancellationException);
-        success(promise, Promise::isSuccess, false);
-        success(promise, Promise::isDone, true);
-        success(promise, Promise::isCancelled, true);
-        success(promise, Promise::isCancellable, false);
-        async.verify();
-    }
-
-
-    public <V> void testOneTimeout(Promise<V> promise, boolean setUncancellable)
-    {
-        Async async = new Async();
-        if (setUncancellable)
-            success(promise, Promise::setUncancellable, true);
-        success(promise, Promise::isCancellable, !setUncancellable);
-        async.failure(promise, p -> p.get(1L, TimeUnit.MILLISECONDS), TimeoutException.class);
-        async.success(promise, p -> p.await(1L, TimeUnit.MILLISECONDS), false);
-        async.success(promise, p -> p.await(1L), false);
-        async.success(promise, p -> p.awaitUninterruptibly(1L, TimeUnit.MILLISECONDS), false);
-        async.success(promise, p -> p.awaitUninterruptibly(1L), false);
-        Uninterruptibles.sleepUninterruptibly(10L, TimeUnit.MILLISECONDS);
-        async.verify();
-    }
-
-}
diff --git a/test/unit/org/apache/cassandra/net/TestAbstractPromise.java b/test/unit/org/apache/cassandra/net/TestAbstractPromise.java
deleted file mode 100644
index 32d11c3..0000000
--- a/test/unit/org/apache/cassandra/net/TestAbstractPromise.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.net;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import org.junit.Assert;
-
-import io.netty.util.concurrent.Promise;
-import net.openhft.chronicle.core.util.ThrowingBiConsumer;
-import net.openhft.chronicle.core.util.ThrowingConsumer;
-import net.openhft.chronicle.core.util.ThrowingFunction;
-
-abstract class TestAbstractPromise
-{
-    final ExecutorService exec = Executors.newCachedThreadPool();
-
-    class Async
-    {
-        final List<ThrowingBiConsumer<Long, TimeUnit, ?>> waitingOn = new ArrayList<>();
-        void verify()
-        {
-            for (int i = 0 ; i < waitingOn.size() ; ++i)
-            {
-                try
-                {
-                    waitingOn.get(i).accept(1L, TimeUnit.SECONDS);
-                }
-                catch (Throwable t)
-                {
-                    throw new AssertionError("" + i, t);
-                }
-            }
-        }
-        <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Throwable failsWith)
-        {
-            waitingOn.add(exec.submit(() -> TestAbstractPromise.failure(promise, action, failsWith))::get);
-        }
-        <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Class<? extends Throwable> failsWith)
-        {
-            waitingOn.add(exec.submit(() -> TestAbstractPromise.failure(promise, action, failsWith))::get);
-        }
-        <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Predicate<Throwable> failsWith)
-        {
-            waitingOn.add(exec.submit(() -> TestAbstractPromise.failure(promise, action, failsWith))::get);
-        }
-        <P extends Promise<?>, R> void success(P promise, ThrowingFunction<P, R, ?> action, R result)
-        {
-            waitingOn.add(exec.submit(() -> TestAbstractPromise.success(promise, action, result))::get);
-        }
-    }
-
-    private static <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Throwable failsWith)
-    {
-        failure(promise, action, t -> Objects.equals(failsWith, t));
-    }
-
-    static <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Class<? extends Throwable> failsWith)
-    {
-        failure(promise, action, failsWith::isInstance);
-    }
-
-    private static <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Predicate<Throwable> failsWith)
-    {
-        Throwable fail = null;
-        try
-        {
-            action.accept(promise);
-        }
-        catch (Throwable t)
-        {
-            fail = t;
-        }
-        if (!failsWith.test(fail))
-            throw new AssertionError(fail);
-    }
-
-    static <P extends Promise<?>, R> void success(P promise, ThrowingFunction<P, R, ?> action, R result)
-    {
-        try
-        {
-            Assert.assertEquals(result, action.apply(promise));
-        }
-        catch (Throwable t)
-        {
-            throw new AssertionError(t);
-        }
-    }
-
-}
diff --git a/test/unit/org/apache/cassandra/net/TestChannel.java b/test/unit/org/apache/cassandra/net/TestChannel.java
index 17da6fa..d76e045 100644
--- a/test/unit/org/apache/cassandra/net/TestChannel.java
+++ b/test/unit/org/apache/cassandra/net/TestChannel.java
@@ -19,28 +19,45 @@
 package org.apache.cassandra.net;
 
 import java.io.IOException;
+import java.net.SocketAddress;
 import java.nio.ByteBuffer;
 import java.nio.channels.WritableByteChannel;
 
+import com.google.common.net.InetAddresses;
+
 import io.netty.buffer.ByteBuf;
 import io.netty.buffer.Unpooled;
 import io.netty.channel.ChannelOutboundBuffer;
 import io.netty.channel.FileRegion;
 import io.netty.channel.embedded.EmbeddedChannel;
+import org.apache.cassandra.locator.InetAddressAndPort;
 
 public class TestChannel extends EmbeddedChannel
 {
+    public static final InetAddressAndPort REMOTE_ADDR = InetAddressAndPort.getByAddressOverrideDefaults(InetAddresses.forString("127.0.0.2"), 0);
+
     final int inFlightLimit;
     int inFlight;
 
     ChannelOutboundBuffer flush;
     long flushBytes;
 
+    public TestChannel()
+    {
+        this(Integer.MAX_VALUE);
+    }
+
     public TestChannel(int inFlightLimit)
     {
         this.inFlightLimit = inFlightLimit;
     }
 
+    @Override
+    public SocketAddress remoteAddress()
+    {
+        return REMOTE_ADDR;
+    }
+
     // we override ByteBuf to prevent retain() from working, to avoid release() since it is not needed in our usage
     // since the lifetime must live longer, we simply copy any outbound ByteBuf here for our tests
     protected void doWrite(ChannelOutboundBuffer in)
diff --git a/test/unit/org/apache/cassandra/net/WriteCallbackInfoTest.java b/test/unit/org/apache/cassandra/net/WriteCallbackInfoTest.java
deleted file mode 100644
index b4bf8b7..0000000
--- a/test/unit/org/apache/cassandra/net/WriteCallbackInfoTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*    http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*/
-package org.apache.cassandra.net;
-
-import java.util.UUID;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.junit.Assert;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.Mutation;
-import org.apache.cassandra.db.RegularAndStaticColumns;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.schema.MockSchema;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.service.paxos.Commit;
-import org.apache.cassandra.utils.ByteBufferUtil;
-
-import static org.apache.cassandra.locator.ReplicaUtils.full;
-
-public class WriteCallbackInfoTest
-{
-    @BeforeClass
-    public static void initDD()
-    {
-        DatabaseDescriptor.daemonInitialization();
-    }
-
-    @Test
-    public void testShouldHint() throws Exception
-    {
-        testShouldHint(Verb.COUNTER_MUTATION_REQ, ConsistencyLevel.ALL, true, false);
-        for (Verb verb : new Verb[] { Verb.PAXOS_COMMIT_REQ, Verb.MUTATION_REQ })
-        {
-            testShouldHint(verb, ConsistencyLevel.ALL, true, true);
-            testShouldHint(verb, ConsistencyLevel.ANY, true, false);
-            testShouldHint(verb, ConsistencyLevel.ALL, false, false);
-        }
-    }
-
-    private void testShouldHint(Verb verb, ConsistencyLevel cl, boolean allowHints, boolean expectHint) throws Exception
-    {
-        TableMetadata metadata = MockSchema.newTableMetadata("", "");
-        Object payload = verb == Verb.PAXOS_COMMIT_REQ
-                         ? new Commit(UUID.randomUUID(), new PartitionUpdate.Builder(metadata, ByteBufferUtil.EMPTY_BYTE_BUFFER, RegularAndStaticColumns.NONE, 1).build())
-                         : new Mutation(PartitionUpdate.simpleBuilder(metadata, "").build());
-
-        RequestCallbacks.WriteCallbackInfo wcbi = new RequestCallbacks.WriteCallbackInfo(Message.out(verb, payload), full(InetAddressAndPort.getByName("192.168.1.1")), null, cl, allowHints);
-        Assert.assertEquals(expectHint, wcbi.shouldHint());
-        if (expectHint)
-        {
-            Assert.assertNotNull(wcbi.mutation());
-        }
-        else
-        {
-            boolean fail = false;
-            try
-            {
-                wcbi.mutation();
-            }
-            catch (Throwable t)
-            {
-                fail = true;
-            }
-            Assert.assertTrue(fail);
-        }
-    }
-}
diff --git a/test/unit/org/apache/cassandra/repair/AbstractRepairTest.java b/test/unit/org/apache/cassandra/repair/AbstractRepairTest.java
index d57607a..eb4ca9f 100644
--- a/test/unit/org/apache/cassandra/repair/AbstractRepairTest.java
+++ b/test/unit/org/apache/cassandra/repair/AbstractRepairTest.java
@@ -20,7 +20,6 @@
 
 import java.net.UnknownHostException;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
@@ -36,7 +35,9 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 @Ignore
 public abstract class AbstractRepairTest
@@ -82,9 +83,9 @@
 
     protected static final Set<Range<Token>> ALL_RANGES = ImmutableSet.of(RANGE1, RANGE2, RANGE3);
 
-    public static UUID registerSession(ColumnFamilyStore cfs, boolean isIncremental, boolean isGlobal)
+    public static TimeUUID registerSession(ColumnFamilyStore cfs, boolean isIncremental, boolean isGlobal)
     {
-        UUID sessionId = UUIDGen.getTimeUUID();
+        TimeUUID sessionId = nextTimeUUID();
 
         long repairedAt = isIncremental ? System.currentTimeMillis() : ActiveRepairService.UNREPAIRED_SSTABLE;
         ActiveRepairService.instance.registerParentRepairSession(sessionId,
diff --git a/test/unit/org/apache/cassandra/repair/LocalSyncTaskTest.java b/test/unit/org/apache/cassandra/repair/LocalSyncTaskTest.java
index 443d59e..47d50a2 100644
--- a/test/unit/org/apache/cassandra/repair/LocalSyncTaskTest.java
+++ b/test/unit/org/apache/cassandra/repair/LocalSyncTaskTest.java
@@ -21,11 +21,8 @@
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
@@ -42,17 +39,18 @@
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.StreamCoordinator;
-import org.apache.cassandra.streaming.DefaultConnectionFactory;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.StreamPlan;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTree;
 import org.apache.cassandra.utils.MerkleTrees;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -86,7 +84,7 @@
         final InetAddressAndPort ep2 = InetAddressAndPort.getByName("127.0.0.2");
 
         Range<Token> range = new Range<>(partitioner.getMinimumToken(), partitioner.getRandomToken());
-        RepairJobDesc desc = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
+        RepairJobDesc desc = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
 
         MerkleTrees tree1 = createInitialTree(desc);
 
@@ -107,7 +105,7 @@
     public void testDifference() throws Throwable
     {
         Range<Token> range = new Range<>(partitioner.getMinimumToken(), partitioner.getRandomToken());
-        UUID parentRepairSession = UUID.randomUUID();
+        TimeUUID parentRepairSession = nextTimeUUID();
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
 
@@ -116,7 +114,7 @@
                                                                  ActiveRepairService.UNREPAIRED_SSTABLE, false,
                                                                  PreviewKind.NONE);
 
-        RepairJobDesc desc = new RepairJobDesc(parentRepairSession, UUID.randomUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
+        RepairJobDesc desc = new RepairJobDesc(parentRepairSession, nextTimeUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
 
         MerkleTrees tree1 = createInitialTree(desc);
         MerkleTrees tree2 = createInitialTree(desc);
@@ -136,14 +134,14 @@
         TreeResponse r2 = new TreeResponse(InetAddressAndPort.getByName("127.0.0.2"), tree2);
         LocalSyncTask task = new LocalSyncTask(desc, r1.endpoint, r2.endpoint, MerkleTrees.difference(r1.trees, r2.trees),
                                                NO_PENDING_REPAIR, true, true, PreviewKind.NONE);
-        DefaultConnectionFactory.MAX_CONNECT_ATTEMPTS = 1;
+        NettyStreamingConnectionFactory.MAX_CONNECT_ATTEMPTS = 1;
         try
         {
             task.run();
         }
         finally
         {
-            DefaultConnectionFactory.MAX_CONNECT_ATTEMPTS = 3;
+            NettyStreamingConnectionFactory.MAX_CONNECT_ATTEMPTS = 3;
         }
 
         // ensure that the changed range was recorded
@@ -153,9 +151,9 @@
     @Test
     public void fullRepairStreamPlan() throws Exception
     {
-        UUID sessionID = registerSession(cfs, true, true);
+        TimeUUID sessionID = registerSession(cfs, true, true);
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
-        RepairJobDesc desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
+        RepairJobDesc desc = new RepairJobDesc(sessionID, nextTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
 
         TreeResponse r1 = new TreeResponse(local, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
         TreeResponse r2 = new TreeResponse(PARTICIPANT2, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
@@ -179,9 +177,9 @@
     @Test
     public void incrementalRepairStreamPlan() throws Exception
     {
-        UUID sessionID = registerSession(cfs, true, true);
+        TimeUUID sessionID = registerSession(cfs, true, true);
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
-        RepairJobDesc desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
+        RepairJobDesc desc = new RepairJobDesc(sessionID, nextTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
 
         TreeResponse r1 = new TreeResponse(local, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
         TreeResponse r2 = new TreeResponse(PARTICIPANT2, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
@@ -199,11 +197,11 @@
      * Don't reciprocate streams if the other endpoint is a transient replica
      */
     @Test
-    public void transientRemoteStreamPlan()
+    public void transientRemoteStreamPlan() throws NoSuchRepairSessionException
     {
-        UUID sessionID = registerSession(cfs, true, true);
+        TimeUUID sessionID = registerSession(cfs, true, true);
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
-        RepairJobDesc desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
+        RepairJobDesc desc = new RepairJobDesc(sessionID, nextTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
 
         TreeResponse r1 = new TreeResponse(local, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
         TreeResponse r2 = new TreeResponse(PARTICIPANT2, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
@@ -218,11 +216,11 @@
      * Don't request streams if the other endpoint is a transient replica
      */
     @Test
-    public void transientLocalStreamPlan()
+    public void transientLocalStreamPlan() throws NoSuchRepairSessionException
     {
-        UUID sessionID = registerSession(cfs, true, true);
+        TimeUUID sessionID = registerSession(cfs, true, true);
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
-        RepairJobDesc desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
+        RepairJobDesc desc = new RepairJobDesc(sessionID, nextTimeUUID(), KEYSPACE1, CF_STANDARD, prs.getRanges());
 
         TreeResponse r1 = new TreeResponse(local, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
         TreeResponse r2 = new TreeResponse(PARTICIPANT2, createInitialTree(desc, DatabaseDescriptor.getPartitioner()));
@@ -235,10 +233,10 @@
 
     private MerkleTrees createInitialTree(RepairJobDesc desc, IPartitioner partitioner)
     {
-        MerkleTrees tree = new MerkleTrees(partitioner);
-        tree.addMerkleTrees((int) Math.pow(2, 15), desc.ranges);
-        tree.init();
-        return tree;
+        MerkleTrees trees = new MerkleTrees(partitioner);
+        trees.addMerkleTrees((int) Math.pow(2, 15), desc.ranges);
+        trees.init();
+        return trees;
     }
 
     private MerkleTrees createInitialTree(RepairJobDesc desc)
diff --git a/test/unit/org/apache/cassandra/repair/RepairJobTest.java b/test/unit/org/apache/cassandra/repair/RepairJobTest.java
index 87863b5..c7c68c4 100644
--- a/test/unit/org/apache/cassandra/repair/RepairJobTest.java
+++ b/test/unit/org/apache/cassandra/repair/RepairJobTest.java
@@ -27,7 +27,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
@@ -38,19 +37,23 @@
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.ListenableFuture;
+import org.assertj.core.api.Assertions;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
+import org.apache.cassandra.concurrent.ExecutorFactory;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.dht.ByteOrderedPartitioner;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.exceptions.RepairException;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
@@ -59,6 +62,10 @@
 import org.apache.cassandra.repair.messages.SyncRequest;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.service.paxos.Paxos;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupRequest;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupResponse;
+import org.apache.cassandra.service.paxos.cleanup.PaxosCleanupSession;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.SessionSummary;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -67,13 +74,22 @@
 import org.apache.cassandra.utils.MerkleTrees;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.Throwables;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.asserts.SyncTaskListAssert;
 
+import static java.util.Collections.emptySet;
+import static org.apache.cassandra.repair.RepairParallelism.SEQUENTIAL;
+import static org.apache.cassandra.streaming.PreviewKind.NONE;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.apache.cassandra.utils.asserts.SyncTaskAssert.assertThat;
 import static org.apache.cassandra.utils.asserts.SyncTaskListAssert.assertThat;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_START_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS2_CLEANUP_REQ;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class RepairJobTest
 {
@@ -88,7 +104,7 @@
     private static final Range<Token> RANGE_1 = range(0, 1);
     private static final Range<Token> RANGE_2 = range(2, 3);
     private static final Range<Token> RANGE_3 = range(4, 5);
-    private static final RepairJobDesc JOB_DESC = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), KEYSPACE, CF, Collections.emptyList());
+    private static final RepairJobDesc JOB_DESC = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), KEYSPACE, CF, Collections.emptyList());
     private static final List<Range<Token>> FULL_RANGE = Collections.singletonList(new Range<>(MURMUR3_PARTITIONER.getMinimumToken(),
                                                                                                MURMUR3_PARTITIONER.getMaximumToken()));
     private static InetAddressAndPort addr1;
@@ -106,18 +122,22 @@
     {
         private final List<Callable<?>> syncCompleteCallbacks = new ArrayList<>();
 
-        public MeasureableRepairSession(UUID parentRepairSession, UUID id, CommonRange commonRange, String keyspace,
+        public MeasureableRepairSession(TimeUUID parentRepairSession, CommonRange commonRange, String keyspace,
                                         RepairParallelism parallelismDegree, boolean isIncremental, boolean pullRepair,
-                                        PreviewKind previewKind, boolean optimiseStreams, String... cfnames)
+                                        PreviewKind previewKind, boolean optimiseStreams, boolean repairPaxos, boolean paxosOnly,
+                                        String... cfnames)
         {
-            super(parentRepairSession, id, commonRange, keyspace, parallelismDegree, isIncremental, pullRepair, previewKind, optimiseStreams, cfnames);
+            super(parentRepairSession, commonRange, keyspace, parallelismDegree, isIncremental, pullRepair,
+                  previewKind, optimiseStreams, repairPaxos, paxosOnly, cfnames);
         }
 
-        protected DebuggableThreadPoolExecutor createExecutor()
+        protected ExecutorPlus createExecutor()
         {
-            DebuggableThreadPoolExecutor executor = super.createExecutor();
-            executor.setKeepAliveTime(THREAD_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
-            return executor;
+            return ExecutorFactory.Global.executorFactory()
+                    .configurePooled("RepairJobTask", Integer.MAX_VALUE)
+                    .withDefaultThreadGroup()
+                    .withKeepAlive(THREAD_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)
+                    .build();
         }
 
         @Override
@@ -162,21 +182,21 @@
     {
         Set<InetAddressAndPort> neighbors = new HashSet<>(Arrays.asList(addr2, addr3));
 
-        UUID parentRepairSession = UUID.randomUUID();
+        TimeUUID parentRepairSession = nextTimeUUID();
         ActiveRepairService.instance.registerParentRepairSession(parentRepairSession, FBUtilities.getBroadcastAddressAndPort(),
                                                                  Collections.singletonList(Keyspace.open(KEYSPACE).getColumnFamilyStore(CF)), FULL_RANGE, false,
                                                                  ActiveRepairService.UNREPAIRED_SSTABLE, false, PreviewKind.NONE);
 
-        this.session = new MeasureableRepairSession(parentRepairSession, UUIDGen.getTimeUUID(),
-                                                    new CommonRange(neighbors, Collections.emptySet(), FULL_RANGE),
-                                                    KEYSPACE, RepairParallelism.SEQUENTIAL, false, false,
-                                                    PreviewKind.NONE, false, CF);
+        this.session = new MeasureableRepairSession(parentRepairSession,
+                                                    new CommonRange(neighbors, emptySet(), FULL_RANGE),
+                                                    KEYSPACE, SEQUENTIAL, false, false,
+                                                    NONE, false, true, false, CF);
 
         this.job = new RepairJob(session, CF);
-        this.sessionJobDesc = new RepairJobDesc(session.parentRepairSession, session.getId(),
-                                                session.keyspace, CF, session.ranges());
+        this.sessionJobDesc = new RepairJobDesc(session.state.parentRepairSession, session.getId(),
+                                                session.state.keyspace, CF, session.ranges());
 
-        FBUtilities.setBroadcastInetAddress(addr1.address);
+        FBUtilities.setBroadcastInetAddress(addr1.getAddress());
     }
 
     @After
@@ -204,6 +224,7 @@
 
         job.run();
 
+        Thread.sleep(1000);
         RepairResult result = job.get(TEST_TIMEOUT_S, TimeUnit.SECONDS);
 
         // Since there are no differences, there should be nothing to sync.
@@ -251,13 +272,17 @@
         // SyncTasks themselves should not contain significant memory
         SyncTaskListAssert.assertThat(syncTasks).hasSizeLessThan(0.2 * singleTreeSize);
 
+        // Remember the size of the session before we've executed any tasks
+        long sizeBeforeExecution = ObjectSizes.measureDeep(session);
+
         // block syncComplete execution until test has verified session still retains the trees
         CompletableFuture<?> future = new CompletableFuture<>();
         session.registerSyncCompleteCallback(future::get);
         ListenableFuture<List<SyncStat>> syncResults = job.executeTasks(syncTasks);
 
         // Immediately following execution the internal execution queue should still retain the trees
-        assertThat(ObjectSizes.measureDeep(session)).isGreaterThan(singleTreeSize);
+        long sizeDuringExecution = ObjectSizes.measureDeep(session);
+        assertThat(sizeDuringExecution).isGreaterThan(sizeBeforeExecution + (syncTasks.size() * singleTreeSize));
         // unblock syncComplete callback, session should remove trees
         future.complete(null);
 
@@ -266,9 +291,8 @@
         long millisUntilFreed;
         for (millisUntilFreed = 0; millisUntilFreed < TEST_TIMEOUT_S * 1000; millisUntilFreed += THREAD_TIMEOUT_MILLIS)
         {
-            // The measured size of the syncingTasks, and result of the computation should be much smaller
             TimeUnit.MILLISECONDS.sleep(THREAD_TIMEOUT_MILLIS);
-            if (ObjectSizes.measureDeep(session) < 0.8 * singleTreeSize)
+            if (ObjectSizes.measureDeep(session) < (sizeDuringExecution - (syncTasks.size() * singleTreeSize)))
                 break;
         }
 
@@ -291,6 +315,34 @@
     }
 
     @Test
+    public void testValidationFailure() throws InterruptedException, TimeoutException
+    {
+        Map<InetAddressAndPort, MerkleTrees> mockTrees = new HashMap<>();
+        mockTrees.put(addr1, createInitialTree(false));
+        mockTrees.put(addr2, createInitialTree(false));
+        mockTrees.put(addr3, null);
+
+        interceptRepairMessages(mockTrees, new ArrayList<>());
+
+        try 
+        {
+            job.run();
+            job.get(TEST_TIMEOUT_S, TimeUnit.SECONDS);
+            fail("The repair job should have failed on a simulated validation error.");
+        }
+        catch (ExecutionException e)
+        {
+            Assertions.assertThat(e.getCause()).isInstanceOf(RepairException.class);
+        }
+
+        // When the job fails, all three outstanding validation tasks should be aborted.
+        List<ValidationTask> tasks = job.validationTasks;
+        assertEquals(3, tasks.size());
+        assertFalse(tasks.stream().anyMatch(ValidationTask::isActive));
+        assertFalse(tasks.stream().allMatch(ValidationTask::isDone));
+    }
+
+    @Test
     public void testCreateStandardSyncTasks()
     {
         testCreateStandardSyncTasks(false);
@@ -525,7 +577,7 @@
         for (InetAddressAndPort local : new InetAddressAndPort[]{ addr1, addr2, addr3 })
         {
             FBUtilities.reset();
-            FBUtilities.setBroadcastInetAddress(local.address);
+            FBUtilities.setBroadcastInetAddress(local.getAddress());
             testLocalSyncWithTransient(local, false);
         }
     }
@@ -536,7 +588,7 @@
         for (InetAddressAndPort local : new InetAddressAndPort[]{ addr1, addr2, addr3 })
         {
             FBUtilities.reset();
-            FBUtilities.setBroadcastInetAddress(local.address);
+            FBUtilities.setBroadcastInetAddress(local.getAddress());
             testLocalSyncWithTransient(local, true);
         }
     }
@@ -592,7 +644,7 @@
 
     private static void testLocalAndRemoteTransient(boolean pullRepair)
     {
-        FBUtilities.setBroadcastInetAddress(addr4.address);
+        FBUtilities.setBroadcastInetAddress(addr4.getAddress());
         List<TreeResponse> treeResponses = Arrays.asList(treeResponse(addr1, RANGE_1, "one", RANGE_2, "one", RANGE_3, "one"),
                                                          treeResponse(addr2, RANGE_1, "two", RANGE_2, "two", RANGE_3, "two"),
                                                          treeResponse(addr3, RANGE_1, "three", RANGE_2, "three", RANGE_3, "three"),
@@ -684,7 +736,7 @@
                                                          treeResponse(addr2, RANGE_1, "different", RANGE_2, "same", RANGE_3, "different"),
                                                          treeResponse(addr3, RANGE_1, "same", RANGE_2, "same", RANGE_3, "same"));
 
-        RepairJobDesc desc = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), "ks", "cf", Collections.emptyList());
+        RepairJobDesc desc = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), "ks", "cf", Collections.emptyList());
         Map<SyncNodePair, SyncTask> tasks = toMap(RepairJob.createOptimisedSyncingSyncTasks(desc,
                                                                                             treeResponses,
                                                                                             addr1, // local
@@ -786,19 +838,19 @@
 
     private MerkleTrees createInitialTree(boolean invalidate)
     {
-        MerkleTrees tree = new MerkleTrees(MURMUR3_PARTITIONER);
-        tree.addMerkleTrees((int) Math.pow(2, 15), FULL_RANGE);
-        tree.init();
+        MerkleTrees trees = new MerkleTrees(MURMUR3_PARTITIONER);
+        trees.addMerkleTrees((int) Math.pow(2, 15), FULL_RANGE);
+        trees.init();
 
         if (invalidate)
         {
             // change a range in one of the trees
             Token token = MURMUR3_PARTITIONER.midpoint(FULL_RANGE.get(0).left, FULL_RANGE.get(0).right);
-            tree.invalidate(token);
-            tree.get(token).hash("non-empty hash!".getBytes());
+            trees.invalidate(token);
+            trees.get(token).hash("non-empty hash!".getBytes());
         }
 
-        return tree;
+        return trees;
     }
 
     private void interceptRepairMessages(Map<InetAddressAndPort, MerkleTrees> mockTrees,
@@ -806,32 +858,49 @@
     {
         MessagingService.instance().inboundSink.add(message -> message.verb().isResponse());
         MessagingService.instance().outboundSink.add((message, to) -> {
-            if (message == null || !(message.payload instanceof RepairMessage))
+                if (message == null || !(message.payload instanceof RepairMessage))
+                    return false;
+
+                if (message.verb() == PAXOS2_CLEANUP_START_PREPARE_REQ)
+                {
+                    Message<?> messageIn = message.responseWith(Paxos.newBallot(null, ConsistencyLevel.SERIAL));
+                    MessagingService.instance().inboundSink.accept(messageIn);
+                    return false;
+                }
+
+                if (message.verb() == PAXOS2_CLEANUP_REQ)
+                {
+                    PaxosCleanupRequest request = (PaxosCleanupRequest) message.payload;
+                    PaxosCleanupSession.finishSession(to, new PaxosCleanupResponse(request.session, true, null));
+                    return false;
+                }
+
+                if (!(message.payload instanceof RepairMessage))
+                    return false;
+
+                // So different Thread's messages don't overwrite each other.
+                synchronized (MESSAGE_LOCK)
+                {
+                    messageCapture.add(message);
+                }
+
+                switch (message.verb())
+                {
+                    case SNAPSHOT_MSG:
+                        MessagingService.instance().callbacks.removeAndRespond(message.id(), to, message.emptyResponse());
+                        break;
+                    case VALIDATION_REQ:
+                        session.validationComplete(sessionJobDesc, to, mockTrees.get(to));
+                        break;
+                    case SYNC_REQ:
+                        SyncRequest syncRequest = (SyncRequest) message.payload;
+                        session.syncComplete(sessionJobDesc, new SyncNodePair(syncRequest.src, syncRequest.dst),
+                                             true, Collections.emptyList());
+                        break;
+                    default:
+                        break;
+                }
                 return false;
-
-            // So different Thread's messages don't overwrite each other.
-            synchronized (MESSAGE_LOCK)
-            {
-                messageCapture.add(message);
-            }
-
-            switch (message.verb())
-            {
-                case SNAPSHOT_MSG:
-                    MessagingService.instance().callbacks.removeAndRespond(message.id(), to, message.emptyResponse());
-                    break;
-                case VALIDATION_REQ:
-                    session.validationComplete(sessionJobDesc, to, mockTrees.get(to));
-                    break;
-                case SYNC_REQ:
-                    SyncRequest syncRequest = (SyncRequest) message.payload;
-                    session.syncComplete(sessionJobDesc, new SyncNodePair(syncRequest.src, syncRequest.dst),
-                                         true, Collections.emptyList());
-                    break;
-                default:
-                    break;
-            }
-            return false;
         });
     }
 }
diff --git a/test/unit/org/apache/cassandra/repair/RepairSessionTest.java b/test/unit/org/apache/cassandra/repair/RepairSessionTest.java
index 2ad5831..4db6efb 100644
--- a/test/unit/org/apache/cassandra/repair/RepairSessionTest.java
+++ b/test/unit/org/apache/cassandra/repair/RepairSessionTest.java
@@ -38,8 +38,9 @@
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -58,16 +59,15 @@
         Gossiper.instance.initializeNodeUnsafe(remote, UUID.randomUUID(), 1);
 
         // Set up RepairSession
-        UUID parentSessionId = UUIDGen.getTimeUUID();
-        UUID sessionId = UUID.randomUUID();
+        TimeUUID parentSessionId = nextTimeUUID();
         IPartitioner p = Murmur3Partitioner.instance;
         Range<Token> repairRange = new Range<>(p.getToken(ByteBufferUtil.bytes(0)), p.getToken(ByteBufferUtil.bytes(100)));
         Set<InetAddressAndPort> endpoints = Sets.newHashSet(remote);
-        RepairSession session = new RepairSession(parentSessionId, sessionId,
+        RepairSession session = new RepairSession(parentSessionId,
                                                   new CommonRange(endpoints, Collections.emptySet(), Arrays.asList(repairRange)),
                                                   "Keyspace1", RepairParallelism.SEQUENTIAL,
                                                   false, false,
-                                                  PreviewKind.NONE, false, "Standard1");
+                                                  PreviewKind.NONE, false, false, false, "Standard1");
 
         // perform convict
         session.convict(remote, Double.MAX_VALUE);
diff --git a/test/unit/org/apache/cassandra/repair/StreamingRepairTaskTest.java b/test/unit/org/apache/cassandra/repair/StreamingRepairTaskTest.java
index 1057a67..a499138 100644
--- a/test/unit/org/apache/cassandra/repair/StreamingRepairTaskTest.java
+++ b/test/unit/org/apache/cassandra/repair/StreamingRepairTaskTest.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.repair;
 
-import java.util.UUID;
-
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -35,7 +33,9 @@
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.StreamPlan;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class StreamingRepairTaskTest extends AbstractRepairTest
 {
@@ -60,11 +60,11 @@
     }
 
     @Test
-    public void incrementalStreamPlan()
+    public void incrementalStreamPlan() throws NoSuchRepairSessionException
     {
-        UUID sessionID = registerSession(cfs, true, true);
+        TimeUUID sessionID = registerSession(cfs, true, true);
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
-        RepairJobDesc desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), ks, tbl, prs.getRanges());
+        RepairJobDesc desc = new RepairJobDesc(sessionID, nextTimeUUID(), ks, tbl, prs.getRanges());
 
         SyncRequest request = new SyncRequest(desc, PARTICIPANT1, PARTICIPANT2, PARTICIPANT3, prs.getRanges(), PreviewKind.NONE, false);
         StreamingRepairTask task = new StreamingRepairTask(desc, request.initiator, request.src, request.dst, request.ranges, desc.sessionId, PreviewKind.NONE, false);
@@ -76,9 +76,9 @@
     @Test
     public void fullStreamPlan() throws Exception
     {
-        UUID sessionID = registerSession(cfs, false, true);
+        TimeUUID sessionID = registerSession(cfs, false, true);
         ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(sessionID);
-        RepairJobDesc desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), ks, tbl, prs.getRanges());
+        RepairJobDesc desc = new RepairJobDesc(sessionID, nextTimeUUID(), ks, tbl, prs.getRanges());
         SyncRequest request = new SyncRequest(desc, PARTICIPANT1, PARTICIPANT2, PARTICIPANT3, prs.getRanges(), PreviewKind.NONE, false);
         StreamingRepairTask task = new StreamingRepairTask(desc, request.initiator, request.src, request.dst, request.ranges, null, PreviewKind.NONE, false);
 
diff --git a/test/unit/org/apache/cassandra/repair/SymmetricRemoteSyncTaskTest.java b/test/unit/org/apache/cassandra/repair/SymmetricRemoteSyncTaskTest.java
index cba64ae..710bee6 100644
--- a/test/unit/org/apache/cassandra/repair/SymmetricRemoteSyncTaskTest.java
+++ b/test/unit/org/apache/cassandra/repair/SymmetricRemoteSyncTaskTest.java
@@ -30,12 +30,12 @@
 import org.apache.cassandra.repair.messages.RepairMessage;
 import org.apache.cassandra.repair.messages.SyncRequest;
 import org.apache.cassandra.streaming.PreviewKind;
-import org.apache.cassandra.utils.MerkleTree;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class SymmetricRemoteSyncTaskTest extends AbstractRepairTest
 {
-    private static final RepairJobDesc DESC = new RepairJobDesc(UUIDGen.getTimeUUID(), UUIDGen.getTimeUUID(), "ks", "tbl", ALL_RANGES);
+    private static final RepairJobDesc DESC = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), "ks", "tbl", ALL_RANGES);
     private static final List<Range<Token>> RANGE_LIST = ImmutableList.of(RANGE1);
     private static class InstrumentedSymmetricRemoteSyncTask extends SymmetricRemoteSyncTask
     {
diff --git a/test/unit/org/apache/cassandra/repair/ValidationTaskTest.java b/test/unit/org/apache/cassandra/repair/ValidationTaskTest.java
new file mode 100644
index 0000000..1ea40e9
--- /dev/null
+++ b/test/unit/org/apache/cassandra/repair/ValidationTaskTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.repair;
+
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.utils.MerkleTrees;
+
+import org.junit.Test;
+
+import java.net.UnknownHostException;
+import java.util.UUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class ValidationTaskTest 
+{
+    @Test
+    public void shouldDeactivateOnFailure() throws UnknownHostException
+    {
+        ValidationTask task = createTask();
+        assertTrue(task.isActive());
+        task.treesReceived(null);
+        assertFalse(task.isActive());
+    }
+
+    @Test
+    public void shouldIgnoreTreesWhenDeactivated() throws Exception
+    {
+        ValidationTask task = createTask();
+        assertTrue(task.isActive());
+        task.abort();
+        assertFalse(task.isActive());
+        task.treesReceived(new MerkleTrees(null));
+        assertNull(task.get());
+    }
+
+    @Test
+    public void shouldReleaseTreesOnAbort() throws Exception
+    {
+        ValidationTask task = createTask();
+        assertTrue(task.isActive());
+
+        IPartitioner partitioner = Murmur3Partitioner.instance;
+        MerkleTrees trees = new MerkleTrees(partitioner);
+        trees.addMerkleTree(128, new Range<>(partitioner.getMinimumToken(), partitioner.getMaximumToken()));
+        task.treesReceived(trees);
+        assertEquals(1, trees.size());
+        
+        // This relies on the fact that MerkleTrees clears its range -> tree map on release.
+        task.abort();
+        assertEquals(0, trees.size());
+    }
+    
+    private ValidationTask createTask() throws UnknownHostException {
+        InetAddressAndPort addressAndPort = InetAddressAndPort.getByName("127.0.0.1");
+        RepairJobDesc desc = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), UUID.randomUUID().toString(), UUID.randomUUID().toString(), null);
+        return new ValidationTask(desc, addressAndPort, 0, PreviewKind.NONE);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/repair/ValidatorTest.java b/test/unit/org/apache/cassandra/repair/ValidatorTest.java
index cf3411a..86704d3 100644
--- a/test/unit/org/apache/cassandra/repair/ValidatorTest.java
+++ b/test/unit/org/apache/cassandra/repair/ValidatorTest.java
@@ -23,10 +23,10 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.compaction.CompactionsTest;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
@@ -38,6 +38,7 @@
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.repair.state.ValidationState;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.BufferDecoratedKey;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -55,10 +56,10 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.MerkleTree;
 import org.apache.cassandra.utils.MerkleTrees;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
-import static org.junit.Assert.assertArrayEquals;
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -68,7 +69,7 @@
 public class ValidatorTest
 {
     private static final long TEST_TIMEOUT = 60; //seconds
-    private static int testSizeMegabytes;
+    private static int testSizeMebibytes;
 
     private static final String keyspace = "ValidatorTest";
     private static final String columnFamily = "Standard1";
@@ -82,27 +83,27 @@
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(keyspace, columnFamily));
         partitioner = Schema.instance.getTableMetadata(keyspace, columnFamily).partitioner;
-        testSizeMegabytes = DatabaseDescriptor.getRepairSessionSpaceInMegabytes();
+        testSizeMebibytes = DatabaseDescriptor.getRepairSessionSpaceInMiB();
     }
 
     @After
     public void tearDown()
     {
         MessagingService.instance().outboundSink.clear();
-        DatabaseDescriptor.setRepairSessionSpaceInMegabytes(testSizeMegabytes);
+        DatabaseDescriptor.setRepairSessionSpaceInMiB(testSizeMebibytes);
     }
 
     @Before
     public void setup()
     {
-        DatabaseDescriptor.setRepairSessionSpaceInMegabytes(testSizeMegabytes);
+        DatabaseDescriptor.setRepairSessionSpaceInMiB(testSizeMebibytes);
     }
 
     @Test
     public void testValidatorComplete() throws Throwable
     {
         Range<Token> range = new Range<>(partitioner.getMinimumToken(), partitioner.getRandomToken());
-        final RepairJobDesc desc = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), keyspace, columnFamily, Arrays.asList(range));
+        final RepairJobDesc desc = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), keyspace, columnFamily, Arrays.asList(range));
 
         final CompletableFuture<Message> outgoingMessageSink = registerOutgoingMessageSink();
 
@@ -110,22 +111,23 @@
 
         ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(columnFamily);
 
-        Validator validator = new Validator(desc, remote, 0, PreviewKind.NONE);
-        MerkleTrees tree = new MerkleTrees(partitioner);
-        tree.addMerkleTrees((int) Math.pow(2, 15), validator.desc.ranges);
-        validator.prepare(cfs, tree);
+        Validator validator = new Validator(new ValidationState(desc, remote), 0, PreviewKind.NONE);
+        validator.state.phase.start(10, 10);
+        MerkleTrees trees = new MerkleTrees(partitioner);
+        trees.addMerkleTrees((int) Math.pow(2, 15), validator.desc.ranges);
+        validator.prepare(cfs, trees, null);
 
-        // and confirm that the tree was split
-        assertTrue(tree.size() > 1);
+        // and confirm that the trees were split
+        assertTrue(trees.size() > 1);
 
         // add a row
         Token mid = partitioner.midpoint(range.left, range.right);
         validator.add(EmptyIterators.unfilteredRow(cfs.metadata(), new BufferDecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")), false));
         validator.complete();
 
-        // confirm that the tree was validated
-        Token min = tree.partitioner().getMinimumToken();
-        assertNotNull(tree.hash(new Range<>(min, min)));
+        // confirm that the trees were validated
+        Token min = trees.partitioner().getMinimumToken();
+        assertNotNull(trees.hash(new Range<>(min, min)));
 
         Message message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS);
         assertEquals(Verb.VALIDATION_RSP, message.verb());
@@ -140,14 +142,14 @@
     public void testValidatorFailed() throws Throwable
     {
         Range<Token> range = new Range<>(partitioner.getMinimumToken(), partitioner.getRandomToken());
-        final RepairJobDesc desc = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), keyspace, columnFamily, Arrays.asList(range));
+        final RepairJobDesc desc = new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), keyspace, columnFamily, Arrays.asList(range));
 
         final CompletableFuture<Message> outgoingMessageSink = registerOutgoingMessageSink();
 
         InetAddressAndPort remote = InetAddressAndPort.getByName("127.0.0.2");
 
-        Validator validator = new Validator(desc, remote, 0, PreviewKind.NONE);
-        validator.fail();
+        Validator validator = new Validator(new ValidationState(desc, remote), 0, PreviewKind.NONE);
+        validator.fail(new Throwable());
 
         Message message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS);
         assertEquals(Verb.VALIDATION_RSP, message.verb());
@@ -186,16 +188,16 @@
 
         CompactionsTest.populate(keyspace, columnFamily, 0, n, 0); //ttl=3s
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
 
         // wait enough to force single compaction
         TimeUnit.SECONDS.sleep(5);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
-        UUID repairSessionId = UUIDGen.getTimeUUID();
-        final RepairJobDesc desc = new RepairJobDesc(repairSessionId, UUIDGen.getTimeUUID(), cfs.keyspace.getName(),
-                                                     cfs.getTableName(), Collections.singletonList(new Range<>(sstable.first.getToken(),
+        TimeUUID repairSessionId = nextTimeUUID();
+        final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(),
+                                                     cfs.getTableName(), singletonList(new Range<>(sstable.first.getToken(),
                                                                                                                sstable.last.getToken())));
 
         InetAddressAndPort host = InetAddressAndPort.getByName("127.0.0.2");
@@ -205,7 +207,7 @@
                                                                  false, PreviewKind.NONE);
 
         final CompletableFuture<Message> outgoingMessageSink = registerOutgoingMessageSink();
-        Validator validator = new Validator(desc, host, 0, true, false, PreviewKind.NONE);
+        Validator validator = new Validator(new ValidationState(desc, host), 0, true, false, PreviewKind.NONE);
         ValidationManager.instance.submitValidation(cfs, validator);
 
         Message message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS);
@@ -235,7 +237,7 @@
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnFamily);
         cfs.clearUnsafe();
 
-        DatabaseDescriptor.setRepairSessionSpaceInMegabytes(1);
+        DatabaseDescriptor.setRepairSessionSpaceInMiB(1);
 
         // disable compaction while flushing
         cfs.disableAutoCompaction();
@@ -243,16 +245,16 @@
         // 2 ** 14 rows would normally use 2^14 leaves, but with only 1 meg we should only use 2^12
         CompactionsTest.populate(keyspace, columnFamily, 0, 1 << 14, 0);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
 
         // wait enough to force single compaction
         TimeUnit.SECONDS.sleep(5);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
-        UUID repairSessionId = UUIDGen.getTimeUUID();
-        final RepairJobDesc desc = new RepairJobDesc(repairSessionId, UUIDGen.getTimeUUID(), cfs.keyspace.getName(),
-                                                     cfs.getTableName(), Collections.singletonList(new Range<>(sstable.first.getToken(),
+        TimeUUID repairSessionId = nextTimeUUID();
+        final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(),
+                                                     cfs.getTableName(), singletonList(new Range<>(sstable.first.getToken(),
                                                                                                                sstable.last.getToken())));
 
         InetAddressAndPort host = InetAddressAndPort.getByName("127.0.0.2");
@@ -262,7 +264,7 @@
                                                                  false, PreviewKind.NONE);
 
         final CompletableFuture<Message> outgoingMessageSink = registerOutgoingMessageSink();
-        Validator validator = new Validator(desc, host, 0, true, false, PreviewKind.NONE);
+        Validator validator = new Validator(new ValidationState(desc, host), 0, true, false, PreviewKind.NONE);
         ValidationManager.instance.submitValidation(cfs, validator);
 
         Message message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS);
@@ -294,7 +296,7 @@
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnFamily);
         cfs.clearUnsafe();
 
-        DatabaseDescriptor.setRepairSessionSpaceInMegabytes(1);
+        DatabaseDescriptor.setRepairSessionSpaceInMiB(1);
 
         // disable compaction while flushing
         cfs.disableAutoCompaction();
@@ -302,19 +304,19 @@
         // 2 ** 14 rows would normally use 2^14 leaves, but with only 1 meg we should only use 2^12
         CompactionsTest.populate(keyspace, columnFamily, 0, 1 << 14, 0);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertEquals(1, cfs.getLiveSSTables().size());
 
         // wait enough to force single compaction
         TimeUnit.SECONDS.sleep(5);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
-        UUID repairSessionId = UUIDGen.getTimeUUID();
+        TimeUUID repairSessionId = nextTimeUUID();
 
         List<Range<Token>> ranges = splitHelper(new Range<>(sstable.first.getToken(), sstable.last.getToken()), 2);
 
 
-        final RepairJobDesc desc = new RepairJobDesc(repairSessionId, UUIDGen.getTimeUUID(), cfs.keyspace.getName(),
+        final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(),
                                                      cfs.getTableName(), ranges);
 
         InetAddressAndPort host = InetAddressAndPort.getByName("127.0.0.2");
@@ -324,13 +326,13 @@
                                                                  false, PreviewKind.NONE);
 
         final CompletableFuture<Message> outgoingMessageSink = registerOutgoingMessageSink();
-        Validator validator = new Validator(desc, host, 0, true, false, PreviewKind.NONE);
+        Validator validator = new Validator(new ValidationState(desc, host), 0, true, false, PreviewKind.NONE);
         ValidationManager.instance.submitValidation(cfs, validator);
 
         Message message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS);
         MerkleTrees trees = ((ValidationResponse) message.payload).trees;
 
-        // Should have 4 trees each with a depth of on average 10 (since each range should have gotten 0.25 megabytes)
+        // Should have 4 trees each with a depth of on average 10 (since each range should have gotten 0.25 mebibytes)
         Iterator<Map.Entry<Range<Token>, MerkleTree>> iterator = trees.iterator();
         int numTrees = 0;
         double totalResolution = 0;
@@ -349,7 +351,7 @@
         assertEquals(trees.rowCount(), 1 << 14);
         assertEquals(4, numTrees);
 
-        // With a single tree and a megabyte we should had a total resolution of 2^12 leaves; with multiple
+        // With a single tree and a mebibyte we should had a total resolution of 2^12 leaves; with multiple
         // ranges we should get similar overall resolution, but not more.
         assertTrue(totalResolution > (1 << 11) && totalResolution < (1 << 13));
     }
diff --git a/test/unit/org/apache/cassandra/repair/asymmetric/DifferenceHolderTest.java b/test/unit/org/apache/cassandra/repair/asymmetric/DifferenceHolderTest.java
index 8ec0177..cab28e9 100644
--- a/test/unit/org/apache/cassandra/repair/asymmetric/DifferenceHolderTest.java
+++ b/test/unit/org/apache/cassandra/repair/asymmetric/DifferenceHolderTest.java
@@ -36,7 +36,7 @@
 import org.apache.cassandra.utils.MerkleTrees;
 import org.apache.cassandra.utils.MerkleTreesTest;
 
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class DifferenceHolderTest
@@ -54,13 +54,13 @@
         InetAddressAndPort a1 = InetAddressAndPort.getByName("127.0.0.1");
         InetAddressAndPort a2 = InetAddressAndPort.getByName("127.0.0.2");
 
-        MerkleTrees mt1 = new MerkleTrees(Murmur3Partitioner.instance);
-        MerkleTrees mt2 = new MerkleTrees(Murmur3Partitioner.instance);
-        mt1.init();
-        mt2.init();
+        MerkleTrees mts1 = new MerkleTrees(Murmur3Partitioner.instance);
+        MerkleTrees mts2 = new MerkleTrees(Murmur3Partitioner.instance);
+        mts1.init();
+        mts2.init();
 
-        TreeResponse tr1 = new TreeResponse(a1, mt1);
-        TreeResponse tr2 = new TreeResponse(a2, mt2);
+        TreeResponse tr1 = new TreeResponse(a1, mts1);
+        TreeResponse tr2 = new TreeResponse(a2, mts2);
 
         DifferenceHolder dh = new DifferenceHolder(Lists.newArrayList(tr1, tr2));
         assertTrue(dh.get(a1).get(a2).isEmpty());
@@ -75,35 +75,35 @@
         InetAddressAndPort a1 = InetAddressAndPort.getByName("127.0.0.1");
         InetAddressAndPort a2 = InetAddressAndPort.getByName("127.0.0.2");
         // merkle tree building stolen from MerkleTreesTest:
-        MerkleTrees mt1 = new MerkleTrees(partitioner);
-        MerkleTrees mt2 = new MerkleTrees(partitioner);
-        mt1.addMerkleTree(32, fullRange);
-        mt2.addMerkleTree(32, fullRange);
-        mt1.init();
-        mt2.init();
+        MerkleTrees mts1 = new MerkleTrees(partitioner);
+        MerkleTrees mts2 = new MerkleTrees(partitioner);
+        mts1.addMerkleTree(32, fullRange);
+        mts2.addMerkleTree(32, fullRange);
+        mts1.init();
+        mts2.init();
         // add dummy hashes to both trees
-        for (MerkleTree.TreeRange range : mt1.rangeIterator())
+        for (MerkleTree.TreeRange range : mts1.rangeIterator())
             range.addAll(new MerkleTreesTest.HIterator(range.right));
-        for (MerkleTree.TreeRange range : mt2.rangeIterator())
+        for (MerkleTree.TreeRange range : mts2.rangeIterator())
             range.addAll(new MerkleTreesTest.HIterator(range.right));
 
         MerkleTree.TreeRange leftmost = null;
         MerkleTree.TreeRange middle = null;
 
-        mt1.maxsize(fullRange, maxsize + 2); // give some room for splitting
+        mts1.maxsize(fullRange, maxsize + 2); // give some room for splitting
 
         // split the leftmost
-        Iterator<MerkleTree.TreeRange> ranges = mt1.rangeIterator();
+        Iterator<MerkleTree.TreeRange> ranges = mts1.rangeIterator();
         leftmost = ranges.next();
-        mt1.split(leftmost.right);
+        mts1.split(leftmost.right);
 
         // set the hashes for the leaf of the created split
-        middle = mt1.get(leftmost.right);
+        middle = mts1.get(leftmost.right);
         middle.hash(digest("arbitrary!"));
-        mt1.get(partitioner.midpoint(leftmost.left, leftmost.right)).hash(digest("even more arbitrary!"));
+        mts1.get(partitioner.midpoint(leftmost.left, leftmost.right)).hash(digest("even more arbitrary!"));
 
-        TreeResponse tr1 = new TreeResponse(a1, mt1);
-        TreeResponse tr2 = new TreeResponse(a2, mt2);
+        TreeResponse tr1 = new TreeResponse(a1, mts1);
+        TreeResponse tr2 = new TreeResponse(a2, mts2);
 
         DifferenceHolder dh = new DifferenceHolder(Lists.newArrayList(tr1, tr2));
         assertTrue(dh.get(a1).get(a2).size() == 1);
diff --git a/test/unit/org/apache/cassandra/repair/asymmetric/RangeMapTest.java b/test/unit/org/apache/cassandra/repair/asymmetric/RangeMapTest.java
index a4327ca..04d7cc4 100644
--- a/test/unit/org/apache/cassandra/repair/asymmetric/RangeMapTest.java
+++ b/test/unit/org/apache/cassandra/repair/asymmetric/RangeMapTest.java
@@ -31,7 +31,7 @@
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 
-import static junit.framework.TestCase.assertEquals;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
diff --git a/test/unit/org/apache/cassandra/repair/asymmetric/ReduceHelperTest.java b/test/unit/org/apache/cassandra/repair/asymmetric/ReduceHelperTest.java
index be5a553..f2ee14f 100644
--- a/test/unit/org/apache/cassandra/repair/asymmetric/ReduceHelperTest.java
+++ b/test/unit/org/apache/cassandra/repair/asymmetric/ReduceHelperTest.java
@@ -44,9 +44,9 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.InetAddressAndPort;
 
-import static junit.framework.TestCase.fail;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class ReduceHelperTest
 {
diff --git a/test/unit/org/apache/cassandra/repair/asymmetric/StreamFromOptionsTest.java b/test/unit/org/apache/cassandra/repair/asymmetric/StreamFromOptionsTest.java
index e2a7700..d13937e 100644
--- a/test/unit/org/apache/cassandra/repair/asymmetric/StreamFromOptionsTest.java
+++ b/test/unit/org/apache/cassandra/repair/asymmetric/StreamFromOptionsTest.java
@@ -31,7 +31,7 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.InetAddressAndPort;
 
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
diff --git a/test/unit/org/apache/cassandra/repair/consistent/AbstractConsistentSessionTest.java b/test/unit/org/apache/cassandra/repair/consistent/AbstractConsistentSessionTest.java
index 4570328..b7d49fc 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/AbstractConsistentSessionTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/AbstractConsistentSessionTest.java
@@ -20,7 +20,6 @@
 
 import java.net.UnknownHostException;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
@@ -36,7 +35,9 @@
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 @Ignore
 public abstract class AbstractConsistentSessionTest
@@ -76,9 +77,9 @@
     protected static final Range<Token> RANGE3 = new Range<>(t(4), t(5));
 
 
-    protected static UUID registerSession(ColumnFamilyStore cfs)
+    protected static TimeUUID registerSession(ColumnFamilyStore cfs)
     {
-        UUID sessionId = UUIDGen.getTimeUUID();
+        TimeUUID sessionId = nextTimeUUID();
 
         ActiveRepairService.instance.registerParentRepairSession(sessionId,
                                                                  COORDINATOR,
diff --git a/test/unit/org/apache/cassandra/repair/consistent/CoordinatorMessagingTest.java b/test/unit/org/apache/cassandra/repair/consistent/CoordinatorMessagingTest.java
index 420cd54..0f457b6 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/CoordinatorMessagingTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/CoordinatorMessagingTest.java
@@ -20,8 +20,6 @@
 
 import java.util.Collection;
 import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
@@ -31,8 +29,12 @@
 import java.util.function.Supplier;
 
 import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
+
+import org.apache.cassandra.repair.CoordinatedRepairResult;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.Promise;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -48,6 +50,7 @@
 import org.apache.cassandra.net.MockMessagingSpy;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.repair.AbstractRepairTest;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.repair.RepairSessionResult;
 import org.apache.cassandra.repair.messages.FinalizePromise;
 import org.apache.cassandra.repair.messages.FinalizePropose;
@@ -61,6 +64,7 @@
 import static org.apache.cassandra.net.MockMessagingService.all;
 import static org.apache.cassandra.net.MockMessagingService.to;
 import static org.apache.cassandra.net.MockMessagingService.verb;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.fail;
 
 public class CoordinatorMessagingTest extends AbstractRepairTest
@@ -92,7 +96,7 @@
     }
 
     @Test
-    public void testMockedMessagingHappyPath() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingHappyPath() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         CountDownLatch prepareLatch = createLatch();
         CountDownLatch finalizeLatch = createLatch();
@@ -101,32 +105,30 @@
         MockMessagingSpy spyFinalize = createFinalizeSpy(Collections.emptySet(), Collections.emptySet(), finalizeLatch);
         MockMessagingSpy spyCommit = createCommitSpy();
 
-        UUID uuid = registerSession(cfs, true, true);
+        TimeUUID uuid = registerSession(cfs, true, true);
         CoordinatorSession coordinator = ActiveRepairService.instance.consistent.coordinated.registerSession(uuid, PARTICIPANTS, false);
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean hasFailures = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
 
         // execute repair and start prepare phase
-        ListenableFuture<Boolean> sessionResult = coordinator.execute(sessionSupplier, hasFailures);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
         Assert.assertFalse(sessionResult.isDone());
-        Assert.assertFalse(hasFailures.get());
+
         // prepare completed
         prepareLatch.countDown();
         spyPrepare.interceptMessageOut(3).get(1, TimeUnit.SECONDS);
         Assert.assertFalse(sessionResult.isDone());
-        Assert.assertFalse(hasFailures.get());
 
         // set result from local repair session
-        repairFuture.set(Lists.newArrayList(createResult(coordinator), createResult(coordinator), createResult(coordinator)));
+        repairFuture.trySuccess(CoordinatedRepairResult.success(Lists.newArrayList(createResult(coordinator), createResult(coordinator), createResult(coordinator))));
 
         // finalize phase
         finalizeLatch.countDown();
@@ -134,8 +136,7 @@
 
         // commit phase
         spyCommit.interceptMessageOut(3).get(1, TimeUnit.SECONDS);
-        Assert.assertTrue(sessionResult.get());
-        Assert.assertFalse(hasFailures.get());
+        Assert.assertFalse(sessionResult.get().hasFailed());
 
         // expect no other messages except from intercepted so far
         spyPrepare.interceptNoMsg(100, TimeUnit.MILLISECONDS);
@@ -148,7 +149,7 @@
 
 
     @Test
-    public void testMockedMessagingPrepareFailureP1() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingPrepareFailureP1() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         CountDownLatch latch = createLatch();
         createPrepareSpy(Collections.singleton(PARTICIPANT1), Collections.emptySet(), latch);
@@ -156,7 +157,7 @@
     }
 
     @Test
-    public void testMockedMessagingPrepareFailureP12() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingPrepareFailureP12() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         CountDownLatch latch = createLatch();
         createPrepareSpy(Lists.newArrayList(PARTICIPANT1, PARTICIPANT2), Collections.emptySet(), latch);
@@ -164,7 +165,7 @@
     }
 
     @Test
-    public void testMockedMessagingPrepareFailureP3() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingPrepareFailureP3() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         CountDownLatch latch = createLatch();
         createPrepareSpy(Collections.singleton(PARTICIPANT3), Collections.emptySet(), latch);
@@ -172,7 +173,7 @@
     }
 
     @Test
-    public void testMockedMessagingPrepareFailureP123() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingPrepareFailureP123() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         CountDownLatch latch = createLatch();
         createPrepareSpy(Lists.newArrayList(PARTICIPANT1, PARTICIPANT2, PARTICIPANT3), Collections.emptySet(), latch);
@@ -180,34 +181,33 @@
     }
 
     @Test(expected = TimeoutException.class)
-    public void testMockedMessagingPrepareFailureWrongSessionId() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingPrepareFailureWrongSessionId() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         CountDownLatch latch = createLatch();
-        createPrepareSpy(Collections.singleton(PARTICIPANT1), Collections.emptySet(), (msgOut) -> UUID.randomUUID(), latch);
+        createPrepareSpy(Collections.singleton(PARTICIPANT1), Collections.emptySet(), (msgOut) -> nextTimeUUID(), latch);
         testMockedMessagingPrepareFailure(latch);
     }
 
-    private void testMockedMessagingPrepareFailure(CountDownLatch prepareLatch) throws InterruptedException, ExecutionException, TimeoutException
+    private void testMockedMessagingPrepareFailure(CountDownLatch prepareLatch) throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         // we expect FailSession messages to all participants
         MockMessagingSpy sendFailSessionExpectedSpy = createFailSessionSpy(Lists.newArrayList(PARTICIPANT1, PARTICIPANT2, PARTICIPANT3));
 
-        UUID uuid = registerSession(cfs, true, true);
+        TimeUUID uuid = registerSession(cfs, true, true);
         CoordinatorSession coordinator = ActiveRepairService.instance.consistent.coordinated.registerSession(uuid, PARTICIPANTS, false);
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean proposeFailed = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
 
         // execute repair and start prepare phase
-        ListenableFuture<Boolean> sessionResult = coordinator.execute(sessionSupplier, proposeFailed);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
         prepareLatch.countDown();
         // prepare completed
         try
@@ -220,33 +220,33 @@
         }
         sendFailSessionExpectedSpy.interceptMessageOut(3).get(1, TimeUnit.SECONDS);
         Assert.assertFalse(repairSubmitted.get());
-        Assert.assertTrue(proposeFailed.get());
+        Assert.assertTrue(sessionResult.isDone());
+        Assert.assertNotNull(sessionResult.cause());
         Assert.assertEquals(ConsistentSession.State.FAILED, coordinator.getState());
         Assert.assertFalse(ActiveRepairService.instance.consistent.local.isSessionInProgress(uuid));
     }
 
     @Test
-    public void testMockedMessagingPrepareTimeout() throws InterruptedException, ExecutionException, TimeoutException
+    public void testMockedMessagingPrepareTimeout() throws InterruptedException, ExecutionException, TimeoutException, NoSuchRepairSessionException
     {
         MockMessagingSpy spyPrepare = createPrepareSpy(Collections.emptySet(), Collections.singleton(PARTICIPANT3), new CountDownLatch(0));
         MockMessagingSpy sendFailSessionUnexpectedSpy = createFailSessionSpy(Lists.newArrayList(PARTICIPANT1, PARTICIPANT2, PARTICIPANT3));
 
-        UUID uuid = registerSession(cfs, true, true);
+        TimeUUID uuid = registerSession(cfs, true, true);
         CoordinatorSession coordinator = ActiveRepairService.instance.consistent.coordinated.registerSession(uuid, PARTICIPANTS, false);
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean hasFailures = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
 
         // execute repair and start prepare phase
-        ListenableFuture<Boolean> sessionResult = coordinator.execute(sessionSupplier, hasFailures);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
         try
         {
             sessionResult.get(1, TimeUnit.SECONDS);
@@ -264,7 +264,6 @@
         spyPrepare.expectMockedMessage(2).get(100, TimeUnit.MILLISECONDS);
         sendFailSessionUnexpectedSpy.interceptNoMsg(100, TimeUnit.MILLISECONDS);
         Assert.assertFalse(repairSubmitted.get());
-        Assert.assertFalse(hasFailures.get());
         Assert.assertEquals(ConsistentSession.State.PREPARING, coordinator.getState());
         Assert.assertFalse(ActiveRepairService.instance.consistent.local.isSessionInProgress(uuid));
     }
@@ -278,7 +277,7 @@
 
     private MockMessagingSpy createPrepareSpy(Collection<InetAddressAndPort> failed,
                                               Collection<InetAddressAndPort> timeout,
-                                              Function<PrepareConsistentRequest, UUID> sessionIdFunc,
+                                              Function<PrepareConsistentRequest, TimeUUID> sessionIdFunc,
                                               CountDownLatch latch)
     {
         return MockMessagingService.when(verb(Verb.PREPARE_CONSISTENT_REQ)).respond((msgOut, to) ->
diff --git a/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionTest.java b/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionTest.java
index 1cee312..85574c7 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionTest.java
@@ -19,33 +19,44 @@
 package org.apache.cassandra.repair.consistent;
 
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Supplier;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.repair.AbstractRepairTest;
+import org.apache.cassandra.repair.CoordinatedRepairResult;
 import org.apache.cassandra.repair.RepairSessionResult;
 import org.apache.cassandra.repair.messages.FailSession;
 import org.apache.cassandra.repair.messages.FinalizeCommit;
 import org.apache.cassandra.repair.messages.FinalizePropose;
 import org.apache.cassandra.repair.messages.PrepareConsistentRequest;
 import org.apache.cassandra.repair.messages.RepairMessage;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.Promise;
 
-import static org.apache.cassandra.repair.consistent.ConsistentSession.State.*;
+import static org.apache.cassandra.repair.consistent.ConsistentSession.State.FAILED;
+import static org.apache.cassandra.repair.consistent.ConsistentSession.State.FINALIZE_PROMISED;
+import static org.apache.cassandra.repair.consistent.ConsistentSession.State.PREPARED;
+import static org.apache.cassandra.repair.consistent.ConsistentSession.State.PREPARING;
+import static org.apache.cassandra.repair.consistent.ConsistentSession.State.REPAIRING;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class CoordinatorSessionTest extends AbstractRepairTest
 {
@@ -54,9 +65,9 @@
     {
         CoordinatorSession.Builder builder = CoordinatorSession.builder();
         builder.withState(PREPARING);
-        builder.withSessionID(UUIDGen.getTimeUUID());
+        builder.withSessionID(nextTimeUUID());
         builder.withCoordinator(COORDINATOR);
-        builder.withUUIDTableIds(Sets.newHashSet(UUIDGen.getTimeUUID(), UUIDGen.getTimeUUID()));
+        builder.withUUIDTableIds(Sets.newHashSet(UUID.randomUUID(), UUID.randomUUID()));
         builder.withRepairedAt(System.currentTimeMillis());
         builder.withRanges(Sets.newHashSet(RANGE1, RANGE2, RANGE3));
         builder.withParticipants(Sets.newHashSet(PARTICIPANT1, PARTICIPANT2, PARTICIPANT3));
@@ -209,18 +220,17 @@
     {
         InstrumentedCoordinatorSession coordinator = createInstrumentedSession();
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean hasFailures = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
         Assert.assertTrue(coordinator.sentMessages.isEmpty());
-        ListenableFuture sessionResult = coordinator.execute(sessionSupplier, hasFailures);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
 
         for (InetAddressAndPort participant : PARTICIPANTS)
         {
@@ -252,7 +262,7 @@
                                                                     createResult(coordinator));
 
         coordinator.sentMessages.clear();
-        repairFuture.set(results);
+        repairFuture.trySuccess(CoordinatedRepairResult.success(results));
 
         // propose messages should have been sent once all repair sessions completed successfully
         for (InetAddressAndPort participant : PARTICIPANTS)
@@ -285,7 +295,7 @@
         }
 
         Assert.assertTrue(sessionResult.isDone());
-        Assert.assertFalse(hasFailures.get());
+        sessionResult.syncUninterruptibly();
     }
 
     @Test
@@ -293,18 +303,17 @@
     {
         InstrumentedCoordinatorSession coordinator = createInstrumentedSession();
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean hasFailures = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
         Assert.assertTrue(coordinator.sentMessages.isEmpty());
-        ListenableFuture sessionResult = coordinator.execute(sessionSupplier, hasFailures);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
         for (InetAddressAndPort participant : PARTICIPANTS)
         {
             PrepareConsistentRequest expected = new PrepareConsistentRequest(coordinator.sessionID, COORDINATOR, new HashSet<>(PARTICIPANTS));
@@ -329,6 +338,7 @@
 
         Assert.assertEquals(ConsistentSession.State.REPAIRING, coordinator.getState());
 
+        List<Collection<Range<Token>>> ranges = Arrays.asList(coordinator.ranges, coordinator.ranges, coordinator.ranges);
         ArrayList<RepairSessionResult> results = Lists.newArrayList(createResult(coordinator),
                                                                     null,
                                                                     createResult(coordinator));
@@ -336,7 +346,7 @@
         coordinator.sentMessages.clear();
         Assert.assertFalse(coordinator.failCalled);
         coordinator.onFail = () -> Assert.assertEquals(REPAIRING, coordinator.getState());
-        repairFuture.set(results);
+        repairFuture.trySuccess(CoordinatedRepairResult.create(ranges, results));
         Assert.assertTrue(coordinator.failCalled);
 
         // all participants should have been notified of session failure
@@ -347,7 +357,7 @@
         }
 
         Assert.assertTrue(sessionResult.isDone());
-        Assert.assertTrue(hasFailures.get());
+        Assert.assertNotNull(sessionResult.cause());
     }
 
     @Test
@@ -355,18 +365,17 @@
     {
         InstrumentedCoordinatorSession coordinator = createInstrumentedSession();
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean hasFailures = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
         Assert.assertTrue(coordinator.sentMessages.isEmpty());
-        ListenableFuture sessionResult = coordinator.execute(sessionSupplier, hasFailures);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
         for (InetAddressAndPort participant : PARTICIPANTS)
         {
             PrepareConsistentRequest expected = new PrepareConsistentRequest(coordinator.sessionID, COORDINATOR, new HashSet<>(PARTICIPANTS));
@@ -413,7 +422,7 @@
         Assert.assertFalse(coordinator.sentMessages.containsKey(PARTICIPANT2));
 
         Assert.assertTrue(sessionResult.isDone());
-        Assert.assertTrue(hasFailures.get());
+        Assert.assertNotNull(sessionResult.cause());
     }
 
     @Test
@@ -421,18 +430,17 @@
     {
         InstrumentedCoordinatorSession coordinator = createInstrumentedSession();
         AtomicBoolean repairSubmitted = new AtomicBoolean(false);
-        SettableFuture<List<RepairSessionResult>> repairFuture = SettableFuture.create();
-        Supplier<ListenableFuture<List<RepairSessionResult>>> sessionSupplier = () ->
+        Promise<CoordinatedRepairResult> repairFuture = AsyncPromise.uncancellable();
+        Supplier<Future<CoordinatedRepairResult>> sessionSupplier = () ->
         {
             repairSubmitted.set(true);
             return repairFuture;
         };
 
         // coordinator sends prepare requests to create local session and perform anticompaction
-        AtomicBoolean hasFailures = new AtomicBoolean(false);
         Assert.assertFalse(repairSubmitted.get());
         Assert.assertTrue(coordinator.sentMessages.isEmpty());
-        ListenableFuture sessionResult = coordinator.execute(sessionSupplier, hasFailures);
+        Future<CoordinatedRepairResult> sessionResult = coordinator.execute(sessionSupplier);
 
         for (InetAddressAndPort participant : PARTICIPANTS)
         {
@@ -464,7 +472,7 @@
                                                                     createResult(coordinator));
 
         coordinator.sentMessages.clear();
-        repairFuture.set(results);
+        repairFuture.trySuccess(CoordinatedRepairResult.success(results));
 
         // propose messages should have been sent once all repair sessions completed successfully
         for (InetAddressAndPort participant : PARTICIPANTS)
@@ -500,6 +508,6 @@
         }
 
         Assert.assertTrue(sessionResult.isDone());
-        Assert.assertTrue(hasFailures.get());
+        Assert.assertNotNull(sessionResult.cause());
     }
 }
diff --git a/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionsTest.java b/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionsTest.java
index b9b1fbf..725833e 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionsTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/CoordinatorSessionsTest.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.repair.consistent;
 
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.Sets;
 import org.junit.Assert;
@@ -29,6 +28,7 @@
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.repair.AbstractRepairTest;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -38,7 +38,9 @@
 import org.apache.cassandra.repair.messages.PrepareConsistentResponse;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class CoordinatorSessionsTest extends AbstractRepairTest
 {
@@ -88,12 +90,12 @@
             return new InstrumentedCoordinatorSession(builder);
         }
 
-        public InstrumentedCoordinatorSession getSession(UUID sessionId)
+        public InstrumentedCoordinatorSession getSession(TimeUUID sessionId)
         {
             return (InstrumentedCoordinatorSession) super.getSession(sessionId);
         }
 
-        public InstrumentedCoordinatorSession registerSession(UUID sessionId, Set<InetAddressAndPort> peers, boolean isForced)
+        public InstrumentedCoordinatorSession registerSession(TimeUUID sessionId, Set<InetAddressAndPort> peers, boolean isForced) throws NoSuchRepairSessionException
         {
             return (InstrumentedCoordinatorSession) super.registerSession(sessionId, peers, isForced);
         }
@@ -108,16 +110,16 @@
         cfs = Schema.instance.getColumnFamilyStoreInstance(cfm.id);
     }
 
-    private static UUID registerSession()
+    private static TimeUUID registerSession()
     {
         return registerSession(cfs, true, true);
     }
 
     @Test
-    public void registerSessionTest()
+    public void registerSessionTest() throws NoSuchRepairSessionException
     {
         CoordinatorSessions sessions = new CoordinatorSessions();
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         CoordinatorSession session = sessions.registerSession(sessionID, PARTICIPANTS, false);
 
         Assert.assertEquals(ConsistentSession.State.PREPARING, session.getState());
@@ -134,10 +136,10 @@
     }
 
     @Test
-    public void handlePrepareResponse()
+    public void handlePrepareResponse() throws NoSuchRepairSessionException
     {
         InstrumentedCoordinatorSessions sessions = new InstrumentedCoordinatorSessions();
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
 
         InstrumentedCoordinatorSession session = sessions.registerSession(sessionID, PARTICIPANTS, false);
         Assert.assertEquals(0, session.prepareResponseCalls);
@@ -145,24 +147,24 @@
         sessions.handlePrepareResponse(new PrepareConsistentResponse(sessionID, PARTICIPANT1, true));
         Assert.assertEquals(1, session.prepareResponseCalls);
         Assert.assertEquals(PARTICIPANT1, session.preparePeer);
-        Assert.assertEquals(true, session.prepareSuccess);
+        Assert.assertTrue(session.prepareSuccess);
     }
 
     @Test
     public void handlePrepareResponseNoSession()
     {
         InstrumentedCoordinatorSessions sessions = new InstrumentedCoordinatorSessions();
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
 
         sessions.handlePrepareResponse(new PrepareConsistentResponse(fakeID, PARTICIPANT1, true));
         Assert.assertNull(sessions.getSession(fakeID));
     }
 
     @Test
-    public void handlePromiseResponse()
+    public void handlePromiseResponse() throws NoSuchRepairSessionException
     {
         InstrumentedCoordinatorSessions sessions = new InstrumentedCoordinatorSessions();
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
 
         InstrumentedCoordinatorSession session = sessions.registerSession(sessionID, PARTICIPANTS, false);
         Assert.assertEquals(0, session.finalizePromiseCalls);
@@ -170,24 +172,24 @@
         sessions.handleFinalizePromiseMessage(new FinalizePromise(sessionID, PARTICIPANT1, true));
         Assert.assertEquals(1, session.finalizePromiseCalls);
         Assert.assertEquals(PARTICIPANT1, session.promisePeer);
-        Assert.assertEquals(true, session.promiseSuccess);
+        Assert.assertTrue(session.promiseSuccess);
     }
 
     @Test
     public void handlePromiseResponseNoSession()
     {
         InstrumentedCoordinatorSessions sessions = new InstrumentedCoordinatorSessions();
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
 
         sessions.handleFinalizePromiseMessage(new FinalizePromise(fakeID, PARTICIPANT1, true));
         Assert.assertNull(sessions.getSession(fakeID));
     }
 
     @Test
-    public void handleFailureMessage()
+    public void handleFailureMessage() throws NoSuchRepairSessionException
     {
         InstrumentedCoordinatorSessions sessions = new InstrumentedCoordinatorSessions();
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
 
         InstrumentedCoordinatorSession session = sessions.registerSession(sessionID, PARTICIPANTS, false);
         Assert.assertEquals(0, session.failCalls);
@@ -200,7 +202,7 @@
     public void handleFailureMessageNoSession()
     {
         InstrumentedCoordinatorSessions sessions = new InstrumentedCoordinatorSessions();
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
 
         sessions.handleFailSessionMessage(new FailSession(fakeID));
         Assert.assertNull(sessions.getSession(fakeID));
diff --git a/test/unit/org/apache/cassandra/repair/consistent/LocalSessionAccessor.java b/test/unit/org/apache/cassandra/repair/consistent/LocalSessionAccessor.java
index 790f719..6fd1c9e 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/LocalSessionAccessor.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/LocalSessionAccessor.java
@@ -19,10 +19,11 @@
 package org.apache.cassandra.repair.consistent;
 
 import java.util.Set;
-import java.util.UUID;
 
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.repair.NoSuchRepairSessionException;
 import org.apache.cassandra.service.ActiveRepairService;
+import org.apache.cassandra.utils.TimeUUID;
 
 /**
  * makes package private hacks available to compaction tests
@@ -36,26 +37,34 @@
         ARS.consistent.local.start();
     }
 
-    public static void prepareUnsafe(UUID sessionID, InetAddressAndPort coordinator, Set<InetAddressAndPort> peers)
+    public static void prepareUnsafe(TimeUUID sessionID, InetAddressAndPort coordinator, Set<InetAddressAndPort> peers)
     {
-        ActiveRepairService.ParentRepairSession prs = ARS.getParentRepairSession(sessionID);
+        ActiveRepairService.ParentRepairSession prs = null;
+        try
+        {
+            prs = ARS.getParentRepairSession(sessionID);
+        }
+        catch (NoSuchRepairSessionException e)
+        {
+            throw new RuntimeException(e);
+        }
         assert prs != null;
         LocalSession session = ARS.consistent.local.createSessionUnsafe(sessionID, prs, peers);
         ARS.consistent.local.putSessionUnsafe(session);
     }
 
-    public static long finalizeUnsafe(UUID sessionID)
+    public static long finalizeUnsafe(TimeUUID sessionID)
     {
         LocalSession session = setState(sessionID, ConsistentSession.State.FINALIZED);
         return session.repairedAt;
     }
 
-    public static void failUnsafe(UUID sessionID)
+    public static void failUnsafe(TimeUUID sessionID)
     {
         setState(sessionID, ConsistentSession.State.FAILED);
     }
 
-    public static LocalSession setState(UUID sessionId, ConsistentSession.State state)
+    public static LocalSession setState(TimeUUID sessionId, ConsistentSession.State state)
     {
         LocalSession session = ARS.consistent.local.getSession(sessionId);
         assert session != null;
diff --git a/test/unit/org/apache/cassandra/repair/consistent/LocalSessionTest.java b/test/unit/org/apache/cassandra/repair/consistent/LocalSessionTest.java
index 80a12c0..c83335b 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/LocalSessionTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/LocalSessionTest.java
@@ -32,8 +32,6 @@
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
 
 import org.junit.Assert;
 import org.junit.Before;
@@ -67,21 +65,25 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
+import org.apache.cassandra.utils.concurrent.Future;
+import org.apache.cassandra.utils.concurrent.Promise;
 
 import static org.apache.cassandra.repair.consistent.ConsistentSession.State.*;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.psjava.util.AssertStatus.assertTrue;
 
 public class LocalSessionTest extends AbstractRepairTest
 {
-    private static final UUID TID1 = UUIDGen.getTimeUUID();
-    private static final UUID TID2 = UUIDGen.getTimeUUID();
+    private static final UUID TID1 = UUID.randomUUID();
+    private static final UUID TID2 = UUID.randomUUID();
 
     static LocalSession.Builder createBuilder()
     {
         LocalSession.Builder builder = LocalSession.builder();
         builder.withState(PREPARING);
-        builder.withSessionID(UUIDGen.getTimeUUID());
+        builder.withSessionID(nextTimeUUID());
         builder.withCoordinator(COORDINATOR);
         builder.withUUIDTableIds(Sets.newHashSet(TID1, TID2));
         builder.withRepairedAt(System.currentTimeMillis());
@@ -138,16 +140,16 @@
             sentMessages.get(destination).add(message.payload);
         }
 
-        SettableFuture<Object> prepareSessionFuture = null;
+        AsyncPromise<List<Void>> prepareSessionFuture = null;
         boolean prepareSessionCalled = false;
 
         @Override
-        ListenableFuture prepareSession(KeyspaceRepairManager repairManager,
-                                        UUID sessionID,
-                                        Collection<ColumnFamilyStore> tables,
-                                        RangesAtEndpoint ranges,
-                                        ExecutorService executor,
-                                        BooleanSupplier isCancelled)
+        Future<List<Void>> prepareSession(KeyspaceRepairManager repairManager,
+                                          TimeUUID sessionID,
+                                          Collection<ColumnFamilyStore> tables,
+                                          RangesAtEndpoint ranges,
+                                          ExecutorService executor,
+                                          BooleanSupplier isCancelled)
         {
             prepareSessionCalled = true;
             if (prepareSessionFuture != null)
@@ -161,17 +163,17 @@
         }
 
         boolean failSessionCalled = false;
-        public void failSession(UUID sessionID, boolean sendMessage)
+        public void failSession(TimeUUID sessionID, boolean sendMessage)
         {
             failSessionCalled = true;
             super.failSession(sessionID, sendMessage);
         }
 
-        public LocalSession prepareForTest(UUID sessionID)
+        public LocalSession prepareForTest(TimeUUID sessionID)
         {
-            prepareSessionFuture = SettableFuture.create();
+            prepareSessionFuture = new AsyncPromise<>();
             handlePrepareMessage(PARTICIPANT1, new PrepareConsistentRequest(sessionID, COORDINATOR, PARTICIPANTS));
-            prepareSessionFuture.set(new Object());
+            prepareSessionFuture.trySuccess(null);
             sentMessages.clear();
             return getSession(sessionID);
         }
@@ -192,11 +194,11 @@
             return true;
         }
 
-        public Map<UUID, Integer> completedSessions = new HashMap<>();
+        public Map<TimeUUID, Integer> completedSessions = new HashMap<>();
 
         protected void sessionCompleted(LocalSession session)
         {
-            UUID sessionID = session.sessionID;
+            TimeUUID sessionID = session.sessionID;
             int calls = completedSessions.getOrDefault(sessionID, 0);
             completedSessions.put(sessionID, calls + 1);
         }
@@ -228,7 +230,7 @@
         repairCfs.truncateBlocking();
     }
 
-    private static UUID registerSession()
+    private static TimeUUID registerSession()
     {
         return registerSession(cfs, true, true);
     }
@@ -266,12 +268,12 @@
     @Test
     public void prepareSuccessCase()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
         // replacing future so we can inspect state before and after anti compaction callback
-        sessions.prepareSessionFuture = SettableFuture.create();
+        sessions.prepareSessionFuture = new AsyncPromise<>();
         Assert.assertFalse(sessions.prepareSessionCalled);
         sessions.handlePrepareMessage(PARTICIPANT1, new PrepareConsistentRequest(sessionID, COORDINATOR, PARTICIPANTS));
         Assert.assertTrue(sessions.prepareSessionCalled);
@@ -284,7 +286,7 @@
         Assert.assertEquals(session, sessions.loadUnsafe(sessionID));
 
         // anti compaction has now finished, so state in memory and on disk should be PREPARED
-        sessions.prepareSessionFuture.set(new Object());
+        sessions.prepareSessionFuture.trySuccess(null);
         session = sessions.getSession(sessionID);
         Assert.assertNotNull(session);
         Assert.assertEquals(PREPARED, session.getState());
@@ -301,12 +303,12 @@
     @Test
     public void prepareAntiCompactFailure()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
         // replacing future so we can inspect state before and after anti compaction callback
-        sessions.prepareSessionFuture = SettableFuture.create();
+        sessions.prepareSessionFuture = new AsyncPromise<>();
         Assert.assertFalse(sessions.prepareSessionCalled);
         sessions.handlePrepareMessage(PARTICIPANT1, new PrepareConsistentRequest(sessionID, COORDINATOR, PARTICIPANTS));
         Assert.assertTrue(sessions.prepareSessionCalled);
@@ -319,7 +321,7 @@
         Assert.assertEquals(session, sessions.loadUnsafe(sessionID));
 
         // anti compaction has now finished, so state in memory and on disk should be PREPARED
-        sessions.prepareSessionFuture.setException(new RuntimeException());
+        sessions.prepareSessionFuture.tryFailure(new RuntimeException());
         session = sessions.getSession(sessionID);
         Assert.assertNotNull(session);
         Assert.assertEquals(FAILED, session.getState());
@@ -338,7 +340,7 @@
     @Test
     public void prepareWithNonExistantParentSession()
     {
-        UUID sessionID = UUIDGen.getTimeUUID();
+        TimeUUID sessionID = nextTimeUUID();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.handlePrepareMessage(PARTICIPANT1, new PrepareConsistentRequest(sessionID, COORDINATOR, PARTICIPANTS));
         Assert.assertNull(sessions.getSession(sessionID));
@@ -351,12 +353,12 @@
     @Test
     public void prepareCancellation()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         AtomicReference<BooleanSupplier> isCancelledRef = new AtomicReference<>();
-        SettableFuture future = SettableFuture.create();
+        Promise<List<Void>> future = new AsyncPromise<>();
 
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions() {
-            ListenableFuture prepareSession(KeyspaceRepairManager repairManager, UUID sessionID, Collection<ColumnFamilyStore> tables, RangesAtEndpoint ranges, ExecutorService executor, BooleanSupplier isCancelled)
+            Future<List<Void>> prepareSession(KeyspaceRepairManager repairManager, TimeUUID sessionID, Collection<ColumnFamilyStore> tables, RangesAtEndpoint ranges, ExecutorService executor, BooleanSupplier isCancelled)
             {
                 isCancelledRef.set(isCancelled);
                 return future;
@@ -375,14 +377,14 @@
         Assert.assertTrue(isCancelled.getAsBoolean());
 
         // now that the session has failed, it send a negative response to the coordinator (even if the anti-compaction completed successfully)
-        future.set(new Object());
+        future.trySuccess(null);
         assertMessagesSent(sessions, COORDINATOR, new PrepareConsistentResponse(sessionID, PARTICIPANT1, false));
     }
 
     @Test
     public void maybeSetRepairing()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -403,7 +405,7 @@
     public void maybeSetRepairingDuplicates()
     {
 
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -437,7 +439,7 @@
     public void maybeSetRepairingNonExistantSession()
     {
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
         sessions.maybeSetRepairing(fakeID);
         Assert.assertTrue(sessions.sentMessages.isEmpty());
     }
@@ -449,7 +451,7 @@
     @Test
     public void finalizeProposeSuccessCase()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -475,7 +477,7 @@
     @Test
     public void finalizeProposeInvalidStateFailure()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -494,7 +496,7 @@
     public void finalizeProposeNonExistantSessionFailure()
     {
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
         sessions.handleFinalizeProposeMessage(COORDINATOR, new FinalizePropose(fakeID));
         Assert.assertNull(sessions.getSession(fakeID));
         assertMessagesSent(sessions, COORDINATOR, new FailSession(fakeID));
@@ -507,7 +509,7 @@
     @Test
     public void finalizeCommitSuccessCase()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -532,7 +534,7 @@
     {
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
         sessions.handleFinalizeCommitMessage(PARTICIPANT1, new FinalizeCommit(fakeID));
         Assert.assertNull(sessions.getSession(fakeID));
         Assert.assertTrue(sessions.sentMessages.isEmpty());
@@ -541,7 +543,7 @@
     @Test
     public void failSession()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -563,7 +565,7 @@
     @Test
     public void handleFailMessage()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -579,7 +581,7 @@
     @Test
     public void sendStatusRequest() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -596,7 +598,7 @@
     @Test
     public void handleStatusRequest() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -616,7 +618,7 @@
         sessions.start();
 
         sessions.sentMessages.clear();
-        UUID sessionID = UUIDGen.getTimeUUID();
+        TimeUUID sessionID = nextTimeUUID();
         sessions.handleStatusRequest(PARTICIPANT2, new StatusRequest(sessionID));
         assertNoMessagesSent(sessions, PARTICIPANT1);
         assertMessagesSent(sessions, PARTICIPANT2, new StatusResponse(sessionID, FAILED));
@@ -626,7 +628,7 @@
     @Test
     public void handleStatusResponseFinalized() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -639,7 +641,7 @@
     @Test
     public void handleStatusResponseFinalizedRedundant() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -652,7 +654,7 @@
     @Test
     public void handleStatusResponseFailed() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -665,7 +667,7 @@
     @Test
     public void handleStatusResponseFailedRedundant() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -678,7 +680,7 @@
     @Test
     public void handleStatusResponseNoop() throws Exception
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         LocalSession session = sessions.prepareForTest(sessionID);
@@ -691,7 +693,7 @@
     @Test
     public void handleStatusResponseNoSession() throws Exception
     {
-        UUID sessionID = UUIDGen.getTimeUUID();
+        TimeUUID sessionID = nextTimeUUID();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -705,10 +707,10 @@
     @Test
     public void isSessionInProgress()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
-        sessions.prepareSessionFuture = SettableFuture.create();  // prevent moving to prepared
+        sessions.prepareSessionFuture = new AsyncPromise<>();  // prevent moving to prepared
         sessions.handlePrepareMessage(PARTICIPANT1, new PrepareConsistentRequest(sessionID, COORDINATOR, PARTICIPANTS));
 
         LocalSession session = sessions.getSession(sessionID);
@@ -732,12 +734,12 @@
     @Test
     public void isSessionInProgressFailed()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
-        sessions.prepareSessionFuture = SettableFuture.create();
+        sessions.prepareSessionFuture = new AsyncPromise<>();
         sessions.handlePrepareMessage(PARTICIPANT1, new PrepareConsistentRequest(sessionID, COORDINATOR, PARTICIPANTS));
-        sessions.prepareSessionFuture.set(new Object());
+        sessions.prepareSessionFuture.trySuccess(null);
 
         Assert.assertTrue(sessions.isSessionInProgress(sessionID));
         sessions.failSession(sessionID);
@@ -747,7 +749,7 @@
     @Test
     public void isSessionInProgressNonExistantSession()
     {
-        UUID fakeID = UUIDGen.getTimeUUID();
+        TimeUUID fakeID = nextTimeUUID();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         Assert.assertFalse(sessions.isSessionInProgress(fakeID));
@@ -756,7 +758,7 @@
     @Test
     public void finalRepairedAtFinalized()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -773,7 +775,7 @@
     @Test
     public void finalRepairedAtFailed()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
 
@@ -789,7 +791,7 @@
     @Test
     public void finalRepairedAtNoSession()
     {
-        UUID fakeID = registerSession();
+        TimeUUID fakeID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         long repairedAt = sessions.getFinalSessionRepairedAt(fakeID);
@@ -799,7 +801,7 @@
     @Test(expected = IllegalStateException.class)
     public void finalRepairedAtInProgress()
     {
-        UUID sessionID = registerSession();
+        TimeUUID sessionID = registerSession();
         InstrumentedLocalSessions sessions = new InstrumentedLocalSessions();
         sessions.start();
         sessions.prepareForTest(sessionID);
@@ -816,9 +818,9 @@
         InstrumentedLocalSessions initialSessions = new InstrumentedLocalSessions();
         initialSessions.start();
         Assert.assertEquals(0, initialSessions.getNumSessions());
-        UUID id1 = registerSession();
-        UUID id2 = registerSession();
-        UUID id3 = registerSession();
+        TimeUUID id1 = registerSession();
+        TimeUUID id2 = registerSession();
+        TimeUUID id3 = registerSession();
 
         initialSessions.prepareForTest(id1);
         initialSessions.prepareForTest(id2);
@@ -866,9 +868,9 @@
         InstrumentedLocalSessions initialSessions = new InstrumentedLocalSessions();
         initialSessions.start();
         Assert.assertEquals(0, initialSessions.getNumSessions());
-        UUID id1 = registerSession();
-        UUID id2 = registerSession();
-        UUID id3 = registerSession();
+        TimeUUID id1 = registerSession();
+        TimeUUID id2 = registerSession();
+        TimeUUID id3 = registerSession();
 
         initialSessions.prepareForTest(id1);
         initialSessions.prepareForTest(id2);
diff --git a/test/unit/org/apache/cassandra/repair/consistent/PendingRepairStatTest.java b/test/unit/org/apache/cassandra/repair/consistent/PendingRepairStatTest.java
index 6c42724..b3bab20 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/PendingRepairStatTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/PendingRepairStatTest.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.Collections;
 import java.util.Set;
-import java.util.UUID;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
@@ -31,6 +30,7 @@
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
@@ -47,13 +47,14 @@
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 import static org.apache.cassandra.repair.consistent.ConsistentSession.State.FAILED;
 import static org.apache.cassandra.repair.consistent.ConsistentSession.State.FINALIZED;
 import static org.apache.cassandra.repair.consistent.ConsistentSession.State.PREPARING;
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class PendingRepairStatTest extends AbstractRepairTest
 {
@@ -91,7 +92,7 @@
     {
         LocalSession.Builder builder = LocalSession.builder();
         builder.withState(PREPARING);
-        builder.withSessionID(UUIDGen.getTimeUUID());
+        builder.withSessionID(nextTimeUUID());
         builder.withCoordinator(COORDINATOR);
         builder.withUUIDTableIds(Sets.newHashSet(cfm.id.asUUID()));
         builder.withRepairedAt(System.currentTimeMillis());
@@ -114,11 +115,11 @@
             int key = startKey + i;
             QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES (?, ?)", cfm.keyspace, cfm.name), key, key);
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         return Iterables.getOnlyElement(Sets.difference(cfs.getLiveSSTables(), existing));
     }
 
-    private static void mutateRepaired(SSTableReader sstable, long repairedAt, UUID pendingRepair)
+    private static void mutateRepaired(SSTableReader sstable, long repairedAt, TimeUUID pendingRepair)
     {
         try
         {
diff --git a/test/unit/org/apache/cassandra/repair/consistent/admin/SchemaArgsParserTest.java b/test/unit/org/apache/cassandra/repair/consistent/admin/SchemaArgsParserTest.java
index 9d98c9d..1a5605e 100644
--- a/test/unit/org/apache/cassandra/repair/consistent/admin/SchemaArgsParserTest.java
+++ b/test/unit/org/apache/cassandra/repair/consistent/admin/SchemaArgsParserTest.java
@@ -18,8 +18,6 @@
 
 package org.apache.cassandra.repair.consistent.admin;
 
-import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 
 import com.google.common.collect.Sets;
@@ -33,7 +31,6 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
-import org.assertj.core.util.Lists;
 
 public class SchemaArgsParserTest
 {
diff --git a/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java b/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java
index 761a77c..634fab3 100644
--- a/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java
+++ b/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java
@@ -51,7 +51,8 @@
 import org.apache.cassandra.streaming.SessionSummary;
 import org.apache.cassandra.streaming.StreamSummary;
 import org.apache.cassandra.utils.MerkleTrees;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 public class RepairMessageSerializationsTest
 {
@@ -85,7 +86,7 @@
     private RepairJobDesc buildRepairJobDesc()
     {
         List<Range<Token>> tokenRanges = buildTokenRanges();
-        return new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), "serializationsTestKeyspace", "repairMessages", tokenRanges);
+        return new RepairJobDesc(nextTimeUUID(), nextTimeUUID(), "serializationsTestKeyspace", "repairMessages", tokenRanges);
     }
 
     private List<Range<Token>> buildTokenRanges()
@@ -161,8 +162,8 @@
         InetAddressAndPort dst = InetAddressAndPort.getByName("127.0.0.3");
         List<SessionSummary> summaries = new ArrayList<>();
         summaries.add(new SessionSummary(src, dst,
-                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUIDGen.getTimeUUID()), 5, 100)),
-                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUIDGen.getTimeUUID()), 500, 10))
+                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUID.randomUUID()), 5, 100)),
+                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUID.randomUUID()), 500, 10))
         ));
         SyncResponse msg = new SyncResponse(buildRepairJobDesc(), new SyncNodePair(src, dst), true, summaries);
         serializeRoundTrip(msg, SyncResponse.serializer);
@@ -171,7 +172,7 @@
     @Test
     public void prepareMessage() throws IOException
     {
-        PrepareMessage msg = new PrepareMessage(UUID.randomUUID(), new ArrayList<TableId>() {{add(TableId.generate());}},
+        PrepareMessage msg = new PrepareMessage(nextTimeUUID(), new ArrayList<TableId>() {{add(TableId.generate());}},
                                                 buildTokenRanges(), true, 100000L, false,
                                                 PreviewKind.NONE);
         serializeRoundTrip(msg, PrepareMessage.serializer);
@@ -187,7 +188,7 @@
     @Test
     public void cleanupMessage() throws IOException
     {
-        CleanupMessage msg = new CleanupMessage(UUID.randomUUID());
+        CleanupMessage msg = new CleanupMessage(nextTimeUUID());
         serializeRoundTrip(msg, CleanupMessage.serializer);
     }
 }
diff --git a/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializerTest.java b/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializerTest.java
index fedf498..f2e0b5b0 100644
--- a/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializerTest.java
+++ b/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializerTest.java
@@ -20,7 +20,6 @@
 
 import java.io.IOException;
 
-import com.google.common.collect.Sets;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -29,7 +28,10 @@
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.utils.UUIDGen;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.apache.cassandra.locator.InetAddressAndPort.getByName;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 
 /**
  * verifies repair message serializers are working as advertised
@@ -64,7 +66,7 @@
         InetAddressAndPort peer2 = InetAddressAndPort.getByName("10.0.0.3");
         InetAddressAndPort peer3 = InetAddressAndPort.getByName("10.0.0.4");
         PrepareConsistentRequest expected =
-            new PrepareConsistentRequest(UUIDGen.getTimeUUID(), coordinator, Sets.newHashSet(peer1, peer2, peer3));
+            new PrepareConsistentRequest(nextTimeUUID(), coordinator, newHashSet(peer1, peer2, peer3));
         PrepareConsistentRequest actual = serdes(PrepareConsistentRequest.serializer, expected);
         Assert.assertEquals(expected, actual);
     }
@@ -73,7 +75,7 @@
     public void prepareConsistentResponse() throws Exception
     {
         PrepareConsistentResponse expected =
-            new PrepareConsistentResponse(UUIDGen.getTimeUUID(), InetAddressAndPort.getByName("10.0.0.2"), true);
+            new PrepareConsistentResponse(nextTimeUUID(), getByName("10.0.0.2"), true);
         PrepareConsistentResponse actual = serdes(PrepareConsistentResponse.serializer, expected);
         Assert.assertEquals(expected, actual);
     }
@@ -81,7 +83,7 @@
     @Test
     public void failSession() throws Exception
     {
-        FailSession expected = new FailSession(UUIDGen.getTimeUUID());
+        FailSession expected = new FailSession(nextTimeUUID());
         FailSession actual = serdes(FailSession.serializer, expected);
         Assert.assertEquals(expected, actual);;
     }
@@ -89,7 +91,7 @@
     @Test
     public void finalizeCommit() throws Exception
     {
-        FinalizeCommit expected = new FinalizeCommit(UUIDGen.getTimeUUID());
+        FinalizeCommit expected = new FinalizeCommit(nextTimeUUID());
         FinalizeCommit actual = serdes(FinalizeCommit.serializer, expected);
         Assert.assertEquals(expected, actual);;
     }
@@ -97,7 +99,7 @@
     @Test
     public void finalizePromise() throws Exception
     {
-        FinalizePromise expected = new FinalizePromise(UUIDGen.getTimeUUID(), InetAddressAndPort.getByName("10.0.0.2"), true);
+        FinalizePromise expected = new FinalizePromise(nextTimeUUID(), getByName("10.0.0.2"), true);
         FinalizePromise actual = serdes(FinalizePromise.serializer, expected);
         Assert.assertEquals(expected, actual);
     }
@@ -105,7 +107,7 @@
     @Test
     public void finalizePropose() throws Exception
     {
-        FinalizePropose expected = new FinalizePropose(UUIDGen.getTimeUUID());
+        FinalizePropose expected = new FinalizePropose(nextTimeUUID());
         FinalizePropose actual = serdes(FinalizePropose.serializer, expected);
         Assert.assertEquals(expected, actual);;
     }
diff --git a/test/unit/org/apache/cassandra/repair/messages/RepairOptionTest.java b/test/unit/org/apache/cassandra/repair/messages/RepairOptionTest.java
index 484d7a8..a6ca084 100644
--- a/test/unit/org/apache/cassandra/repair/messages/RepairOptionTest.java
+++ b/test/unit/org/apache/cassandra/repair/messages/RepairOptionTest.java
@@ -26,16 +26,12 @@
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.google.common.collect.ImmutableMap;
-
-import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.repair.RepairParallelism;
-import org.apache.cassandra.utils.FBUtilities;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -54,11 +50,7 @@
 
         // parse with empty options
         RepairOption option = RepairOption.parse(new HashMap<String, String>(), partitioner);
-
-        if (FBUtilities.isWindows && (DatabaseDescriptor.getDiskAccessMode() != Config.DiskAccessMode.standard || DatabaseDescriptor.getIndexAccessMode() != Config.DiskAccessMode.standard))
-            assertTrue(option.getParallelism() == RepairParallelism.PARALLEL);
-        else
-            assertTrue(option.getParallelism() == RepairParallelism.SEQUENTIAL);
+        assertTrue(option.getParallelism() == RepairParallelism.SEQUENTIAL);
 
         assertFalse(option.isPrimaryRange());
         assertFalse(option.isIncremental());
diff --git a/test/unit/org/apache/cassandra/schema/CreateTableValidationTest.java b/test/unit/org/apache/cassandra/schema/CreateTableValidationTest.java
index f2abc7c..5541e35 100644
--- a/test/unit/org/apache/cassandra/schema/CreateTableValidationTest.java
+++ b/test/unit/org/apache/cassandra/schema/CreateTableValidationTest.java
@@ -58,6 +58,7 @@
         createTable("CREATE TABLE %s (a int PRIMARY KEY, b int) WITH bloom_filter_fp_chance = 0.1");
     }
 
+    @Deprecated // these warning thresholds will be replaced by equivalent guardrails
     @Test
     public void testCreateKeyspaceTableWarning() throws IOException
     {
diff --git a/test/unit/org/apache/cassandra/schema/MemtableParamsTest.java b/test/unit/org/apache/cassandra/schema/MemtableParamsTest.java
new file mode 100644
index 0000000..7db5204
--- /dev/null
+++ b/test/unit/org/apache/cassandra/schema/MemtableParamsTest.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.Map;
+
+import com.google.common.collect.ImmutableMap;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.config.InheritingClass;
+import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.db.memtable.SkipListMemtableFactory;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+import static org.junit.Assert.assertEquals;
+
+public class MemtableParamsTest
+{
+    static final ParameterizedClass DEFAULT = SkipListMemtableFactory.CONFIGURATION;
+
+    @Test
+    public void testDefault()
+    {
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions(ImmutableMap.of());
+        assertEquals(ImmutableMap.of("default", DEFAULT), map);
+    }
+
+    @Test
+    public void testDefaultRemapped()
+    {
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+        (
+            ImmutableMap.of("remap", new InheritingClass("default", null, null))
+        );
+        assertEquals(ImmutableMap.of("default", DEFAULT,
+                                     "remap", DEFAULT),
+                     map);
+    }
+
+    @Test
+    public void testOne()
+    {
+        final InheritingClass one = new InheritingClass(null, "SkipList", null);
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions(ImmutableMap.of("one", one));
+        assertEquals(ImmutableMap.of("default", DEFAULT,
+                                     "one", one),
+                     map);
+    }
+
+    @Test
+    public void testExtends()
+    {
+        final InheritingClass one = new InheritingClass(null, "SkipList", null);
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+        (
+            ImmutableMap.of("one", one,
+                            "two", new InheritingClass("one",
+                                                       null,
+                                                       ImmutableMap.of("extra", "value")))
+        );
+
+        assertEquals(ImmutableMap.of("default", DEFAULT,
+                                     "one", one,
+                                     "two", new ParameterizedClass("SkipList",
+                                                                   ImmutableMap.of("extra", "value"))),
+                     map);
+    }
+
+    @Test
+    public void testExtendsReplace()
+    {
+        final InheritingClass one = new InheritingClass(null,
+                                                        "SkipList",
+                                                        ImmutableMap.of("extra", "valueOne"));
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+        (
+            ImmutableMap.of("one", one,
+                            "two", new InheritingClass("one",
+                                                       null,
+                                                       ImmutableMap.of("extra", "value")))
+        );
+        assertEquals(ImmutableMap.of("default", DEFAULT,
+                                     "one", one,
+                                     "two", new ParameterizedClass("SkipList",
+                                                                   ImmutableMap.of("extra", "value"))),
+                     map);
+    }
+
+    @Test
+    public void testDoubleExtends()
+    {
+        final InheritingClass one = new InheritingClass(null, "SkipList", null);
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+        (
+            ImmutableMap.of("one", one,
+                            "two", new InheritingClass("one",
+                                                       null,
+                                                       ImmutableMap.of("param", "valueTwo",
+                                                                       "extra", "value")),
+                            "three", new InheritingClass("two",
+                                                         "OtherClass",
+                                                         ImmutableMap.of("param", "valueThree",
+                                                                         "extraThree", "three")))
+        );
+        assertEquals(ImmutableMap.of("default", DEFAULT,
+                                     "one", one,
+                                     "two", new ParameterizedClass("SkipList",
+                                                                   ImmutableMap.of("param", "valueTwo",
+                                                                                   "extra", "value")),
+                                     "three", new ParameterizedClass("OtherClass",
+                                                                     ImmutableMap.of("param", "valueThree",
+                                                                                     "extra", "value",
+                                                                                     "extraThree", "three"))),
+                     map);
+    }
+
+    @Test
+    public void testInvalidExtends()
+    {
+        final InheritingClass one = new InheritingClass(null, "SkipList", null);
+        try
+        {
+            Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+            (
+                ImmutableMap.of("two", new InheritingClass("one",
+                                                           null,
+                                                           ImmutableMap.of("extra", "value")),
+                                "one", one)
+            );
+            Assert.fail("Expected exception.");
+        }
+        catch (ConfigurationException e)
+        {
+            // expected
+        }
+    }
+
+    @Test
+    public void testInvalidSelfExtends()
+    {
+        try
+        {
+            Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+            (
+                ImmutableMap.of("one", new InheritingClass("one",
+                                                           null,
+                                                           ImmutableMap.of("extra", "value")))
+            );
+            Assert.fail("Expected exception.");
+        }
+        catch (ConfigurationException e)
+        {
+            // expected
+        }
+    }
+
+    @Test
+    public void testReplaceDefault()
+    {
+        final InheritingClass one = new InheritingClass(null,
+                                                        "SkipList",
+                                                        ImmutableMap.of("extra", "valueOne"));
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions(ImmutableMap.of("default", one));
+        assertEquals(ImmutableMap.of("default", one), map);
+    }
+
+    @Test
+    public void testDefaultExtends()
+    {
+        final InheritingClass one = new InheritingClass(null,
+                                                        "SkipList",
+                                                        ImmutableMap.of("extra", "valueOne"));
+        Map<String, ParameterizedClass> map = MemtableParams.expandDefinitions
+        (
+            ImmutableMap.of("one", one,
+                            "default", new InheritingClass("one", null, ImmutableMap.of()))
+        );
+        assertEquals(ImmutableMap.of("one", one,
+                                     "default", one),
+                     map);
+    }
+    // Note: The factories constructed from these parameters are tested in the CreateTest and AlterTest.
+}
diff --git a/test/unit/org/apache/cassandra/schema/MigrationCoordinatorTest.java b/test/unit/org/apache/cassandra/schema/MigrationCoordinatorTest.java
index 7ad5e2d..3783320 100644
--- a/test/unit/org/apache/cassandra/schema/MigrationCoordinatorTest.java
+++ b/test/unit/org/apache/cassandra/schema/MigrationCoordinatorTest.java
@@ -19,31 +19,55 @@
 package org.apache.cassandra.schema;
 
 import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
-import java.util.List;
 import java.util.Queue;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Futures;
 import org.junit.Assert;
 import org.junit.Test;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.concurrent.ImmediateExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.metrics.MessagingMetrics;
+import org.apache.cassandra.net.EndpointMessagingVersions;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.RequestCallback;
+import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
+import org.mockito.ArgumentCaptor;
+import org.mockito.ArgumentMatchers;
+import org.mockito.internal.creation.MockSettingsImpl;
 
 import static com.google.common.util.concurrent.Futures.getUnchecked;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class MigrationCoordinatorTest
 {
@@ -57,6 +81,8 @@
     private static final UUID V1 = UUID.randomUUID();
     private static final UUID V2 = UUID.randomUUID();
 
+    private static final EndpointState validEndpointState = mock(EndpointState.class);
+
     static
     {
         try
@@ -71,105 +97,116 @@
         }
 
         DatabaseDescriptor.daemonInitialization();
+
+        when(validEndpointState.getApplicationState(ApplicationState.RELEASE_VERSION)).thenReturn(VersionedValue.unsafeMakeVersionedValue(FBUtilities.getReleaseVersionString(), 0));
     }
 
-    private static class InstrumentedCoordinator extends MigrationCoordinator
+    private static class Wrapper
     {
+        final Queue<Pair<InetAddressAndPort, RequestCallback<Collection<Mutation>>>> requests = new LinkedList<>();
+        final ScheduledExecutorService oneTimeExecutor = mock(ScheduledExecutorService.class);
+        final Gossiper gossiper = mock(Gossiper.class);
+        final Set<InetAddressAndPort> mergedSchemasFrom = new HashSet<>();
+        final EndpointMessagingVersions versions = mock(EndpointMessagingVersions.class);
+        final MessagingService messagingService = mock(MessagingService.class, new MockSettingsImpl<>().defaultAnswer(a -> {
+            throw new UnsupportedOperationException();
+        }).useConstructor(true, versions, mock(MessagingMetrics.class)));
 
-        Queue<Callback> requests = new LinkedList<>();
-        @Override
-        protected void sendMigrationMessage(MigrationCoordinator.Callback callback)
+        UUID localSchemaVersion = LOCAL_VERSION;
+
+        final MigrationCoordinator coordinator;
+
+        private Wrapper()
         {
-            requests.add(callback);
+            this(3);
         }
 
-        boolean shouldPullSchema = true;
-        @Override
-        protected boolean shouldPullSchema(UUID version)
+        private Wrapper(int maxOutstandingRequests)
         {
-            return shouldPullSchema;
+            when(oneTimeExecutor.scheduleWithFixedDelay(any(), anyLong(), anyLong(), any())).thenAnswer(a -> {
+                a.getArgument(0, Runnable.class).run();
+                return mock(ScheduledFuture.class);
+            });
+            when(gossiper.getEndpointStateForEndpoint(any())).thenReturn(validEndpointState);
+            when(gossiper.isAlive(any())).thenReturn(true);
+            doAnswer(a -> requests.add(Pair.create(a.getArgument(1, InetAddressAndPort.class), a.getArgument(2, RequestCallback.class))))
+                    .when(messagingService)
+                    .sendWithCallback(any(), any(), any());
+
+            when(versions.knows(any())).thenReturn(true);
+            when(versions.getRaw(any())).thenReturn(MessagingService.current_version);
+            this.coordinator = new MigrationCoordinator(messagingService,
+                                                        ImmediateExecutor.INSTANCE,
+                                                        oneTimeExecutor,
+                                                        maxOutstandingRequests,
+                                                        gossiper,
+                                                        () -> localSchemaVersion,
+                                                        (endpoint, ignored) -> mergedSchemasFrom.add(endpoint));
         }
 
-        boolean shouldPullFromEndpoint = true;
-        @Override
-        protected boolean shouldPullFromEndpoint(InetAddressAndPort endpoint)
+        private InetAddressAndPort configureMocksForEndpoint(String endpointName, EndpointState es, Integer msgVersion, boolean gossipOnlyMember) throws UnknownHostException
         {
-            return shouldPullFromEndpoint;
+            InetAddressAndPort endpoint = InetAddressAndPort.getByName(endpointName);
+            return configureMocksForEndpoint(endpoint, es, msgVersion, gossipOnlyMember);
         }
 
-        boolean shouldPullImmediately = true;
-        @Override
-        protected boolean shouldPullImmediately(InetAddressAndPort endpoint, UUID version)
+        private InetAddressAndPort configureMocksForEndpoint(InetAddressAndPort endpoint, EndpointState es, Integer msgVersion, boolean gossipOnlyMember) throws UnknownHostException
         {
-            return shouldPullImmediately;
-        }
+            when(gossiper.getEndpointStateForEndpoint(endpoint)).thenReturn(es);
+            if (msgVersion == null)
+            {
+                when(versions.knows(endpoint)).thenReturn(false);
+                when(versions.getRaw(endpoint)).thenThrow(new IllegalArgumentException());
+            }
+            else
+            {
+                when(versions.knows(endpoint)).thenReturn(true);
+                when(versions.getRaw(endpoint)).thenReturn(msgVersion);
+            }
+            when(gossiper.isGossipOnlyMember(endpoint)).thenReturn(gossipOnlyMember);
+            when(gossiper.isAlive(endpoint)).thenReturn(true);
 
-        Set<InetAddressAndPort> deadNodes = new HashSet<>();
-        protected boolean isAlive(InetAddressAndPort endpoint)
-        {
-            return !deadNodes.contains(endpoint);
-        }
-
-        UUID localVersion = LOCAL_VERSION;
-        @Override
-        protected boolean isLocalVersion(UUID version)
-        {
-            return localVersion.equals(version);
-        }
-
-        int maxOutstandingRequests = 3;
-        @Override
-        protected int getMaxOutstandingVersionRequests()
-        {
-            return maxOutstandingRequests;
-        }
-
-        Set<InetAddressAndPort> mergedSchemasFrom = new HashSet<>();
-        @Override
-        protected void mergeSchemaFrom(InetAddressAndPort endpoint, Collection<Mutation> mutations)
-        {
-            mergedSchemasFrom.add(endpoint);
+            return endpoint;
         }
     }
 
     @Test
     public void requestResponseCycle() throws InterruptedException
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
-        coordinator.maxOutstandingRequests = 1;
+        Wrapper wrapper = new Wrapper(1);
+        MigrationCoordinator coordinator = wrapper.coordinator;
 
-        Assert.assertTrue(coordinator.requests.isEmpty());
+        Assert.assertTrue(wrapper.requests.isEmpty());
 
         // first schema report should send a migration request
         getUnchecked(coordinator.reportEndpointVersion(EP1, V1));
-        Assert.assertEquals(1, coordinator.requests.size());
+        Assert.assertEquals(1, wrapper.requests.size());
         Assert.assertFalse(coordinator.awaitSchemaRequests(1));
 
         // second should not
         getUnchecked(coordinator.reportEndpointVersion(EP2, V1));
-        Assert.assertEquals(1, coordinator.requests.size());
+        Assert.assertEquals(1, wrapper.requests.size());
         Assert.assertFalse(coordinator.awaitSchemaRequests(1));
 
         // until the first request fails, then the second endpoint should be contacted
-        MigrationCoordinator.Callback request1 = coordinator.requests.poll();
-        Assert.assertEquals(EP1, request1.endpoint);
-        getUnchecked(request1.fail());
-        Assert.assertTrue(coordinator.mergedSchemasFrom.isEmpty());
+        Pair<InetAddressAndPort, RequestCallback<Collection<Mutation>>> request1 = wrapper.requests.poll();
+        Assert.assertEquals(EP1, request1.left);
+        request1.right.onFailure(null, null);
+        Assert.assertTrue(wrapper.mergedSchemasFrom.isEmpty());
         Assert.assertFalse(coordinator.awaitSchemaRequests(1));
 
         // ... then the second endpoint should be contacted
-        Assert.assertEquals(1, coordinator.requests.size());
-        MigrationCoordinator.Callback request2 = coordinator.requests.poll();
-        Assert.assertEquals(EP2, request2.endpoint);
+        Assert.assertEquals(1, wrapper.requests.size());
+        Pair<InetAddressAndPort, RequestCallback<Collection<Mutation>>> request2 = wrapper.requests.poll();
+        Assert.assertEquals(EP2, request2.left);
         Assert.assertFalse(coordinator.awaitSchemaRequests(1));
-        getUnchecked(request2.response(Collections.emptyList()));
-        Assert.assertEquals(EP2, Iterables.getOnlyElement(coordinator.mergedSchemasFrom));
+        request2.right.onResponse(Message.remoteResponse(request2.left, Verb.SCHEMA_PULL_RSP, Collections.emptyList()));
+        Assert.assertEquals(EP2, Iterables.getOnlyElement(wrapper.mergedSchemasFrom));
         Assert.assertTrue(coordinator.awaitSchemaRequests(1));
 
         // and migration tasks should not be sent out for subsequent version reports
         getUnchecked(coordinator.reportEndpointVersion(EP3, V1));
-        Assert.assertTrue(coordinator.requests.isEmpty());
-
+        Assert.assertTrue(wrapper.requests.isEmpty());
     }
 
     /**
@@ -179,14 +216,14 @@
     @Test
     public void versionsAreSignaledWhenDeleted()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        coordinator.reportEndpointVersion(EP1, V1);
-        WaitQueue.Signal signal = coordinator.getVersionInfoUnsafe(V1).register();
+        wrapper.coordinator.reportEndpointVersion(EP1, V1);
+        WaitQueue.Signal signal = wrapper.coordinator.getVersionInfoUnsafe(V1).register();
         Assert.assertFalse(signal.isSignalled());
 
-        coordinator.reportEndpointVersion(EP1, V2);
-        Assert.assertNull(coordinator.getVersionInfoUnsafe(V1));
+        wrapper.coordinator.reportEndpointVersion(EP1, V2);
+        Assert.assertNull(wrapper.coordinator.getVersionInfoUnsafe(V1));
 
         Assert.assertTrue(signal.isSignalled());
     }
@@ -199,31 +236,31 @@
     @Test
     public void versionsAreSignaledWhenEndpointsRemoved()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        coordinator.reportEndpointVersion(EP1, V1);
-        WaitQueue.Signal signal = coordinator.getVersionInfoUnsafe(V1).register();
+        wrapper.coordinator.reportEndpointVersion(EP1, V1);
+        WaitQueue.Signal signal = wrapper.coordinator.getVersionInfoUnsafe(V1).register();
         Assert.assertFalse(signal.isSignalled());
 
-        coordinator.removeAndIgnoreEndpoint(EP1);
-        Assert.assertNull(coordinator.getVersionInfoUnsafe(V1));
+        wrapper.coordinator.removeAndIgnoreEndpoint(EP1);
+        Assert.assertNull(wrapper.coordinator.getVersionInfoUnsafe(V1));
 
         Assert.assertTrue(signal.isSignalled());
     }
 
 
-    private static void assertNoContact(InstrumentedCoordinator coordinator, InetAddressAndPort endpoint, UUID version, boolean startupShouldBeUnblocked)
+    private static void assertNoContact(Wrapper wrapper, InetAddressAndPort endpoint, UUID version, boolean startupShouldBeUnblocked)
     {
-        Assert.assertTrue(coordinator.requests.isEmpty());
-        Future<Void> future = coordinator.reportEndpointVersion(EP1, V1);
+        Assert.assertTrue(wrapper.requests.isEmpty());
+        Future<Void> future = wrapper.coordinator.reportEndpointVersion(EP1, V1);
         if (future != null)
             getUnchecked(future);
-        Assert.assertTrue(coordinator.requests.isEmpty());
+        Assert.assertTrue(wrapper.requests.isEmpty());
 
-        Assert.assertEquals(startupShouldBeUnblocked, coordinator.awaitSchemaRequests(1));
+        Assert.assertEquals(startupShouldBeUnblocked, wrapper.coordinator.awaitSchemaRequests(1));
     }
 
-    private static void assertNoContact(InstrumentedCoordinator coordinator, boolean startupShouldBeUnblocked)
+    private static void assertNoContact(Wrapper coordinator, boolean startupShouldBeUnblocked)
     {
         assertNoContact(coordinator, EP1, V1, startupShouldBeUnblocked);
     }
@@ -231,91 +268,85 @@
     @Test
     public void dontContactNodesWithSameSchema()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        coordinator.localVersion = V1;
-        assertNoContact(coordinator, true);
+        wrapper.localSchemaVersion = V1;
+        assertNoContact(wrapper, true);
     }
 
     @Test
     public void dontContactIncompatibleNodes()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        coordinator.shouldPullFromEndpoint = false;
-        assertNoContact(coordinator, false);
+        when(wrapper.gossiper.getEndpointStateForEndpoint(any())).thenReturn(null); // shouldPullFromEndpoint should return false in this case
+        assertNoContact(wrapper, false);
     }
 
     @Test
     public void dontContactDeadNodes()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        coordinator.deadNodes.add(EP1);
-        assertNoContact(coordinator, EP1, V1, false);
+        when(wrapper.gossiper.isAlive(ArgumentMatchers.eq(EP1))).thenReturn(false);
+        assertNoContact(wrapper, EP1, V1, false);
     }
 
     /**
-     * If a node has become incompativle between when the task was scheduled and when it
+     * If a node has become incompatible between when the task was scheduled and when it
      * was run, we should detect that and fail the task
      */
     @Test
     public void testGossipRace()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator() {
-            protected boolean shouldPullImmediately(InetAddressAndPort endpoint, UUID version)
-            {
-                // this is the last thing that gets called before scheduling the pull, so set this flag here
-                shouldPullFromEndpoint = false;
-                return super.shouldPullImmediately(endpoint, version);
-            }
-        };
+        Wrapper wrapper = new Wrapper();
+        when(wrapper.gossiper.getEndpointStateForEndpoint(any())).thenReturn(validEndpointState, (EndpointState) null);
 
-        Assert.assertTrue(coordinator.shouldPullFromEndpoint(EP1));
-        assertNoContact(coordinator, EP1, V1, false);
+        assertNoContact(wrapper, EP1, V1, false);
     }
 
     @Test
     public void testWeKeepSendingRequests() throws Exception
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        getUnchecked(coordinator.reportEndpointVersion(EP3, V2));
-        coordinator.requests.remove().response(Collections.emptyList());
+        getUnchecked(wrapper.coordinator.reportEndpointVersion(EP3, V2));
+        Pair<InetAddressAndPort, RequestCallback<Collection<Mutation>>> cb = wrapper.requests.remove();
+        cb.right.onResponse(Message.remoteResponse(cb.left, Verb.SCHEMA_PULL_RSP, Collections.emptyList()));
 
-        getUnchecked(coordinator.reportEndpointVersion(EP1, V1));
-        getUnchecked(coordinator.reportEndpointVersion(EP2, V1));
+        getUnchecked(wrapper.coordinator.reportEndpointVersion(EP1, V1));
+        getUnchecked(wrapper.coordinator.reportEndpointVersion(EP2, V1));
 
-        MigrationCoordinator.Callback prev = null;
+        Pair<InetAddressAndPort, RequestCallback<Collection<Mutation>>> prev = null;
         Set<InetAddressAndPort> EPs = Sets.newHashSet(EP1, EP2);
         int ep1requests = 0;
         int ep2requests = 0;
 
-        for (int i=0; i<10; i++)
+        for (int i = 0; i < 10; i++)
         {
-            Assert.assertEquals(String.format("%s", i), 2, coordinator.requests.size());
+            Assert.assertEquals(String.format("%s", i), 2, wrapper.requests.size());
 
-            MigrationCoordinator.Callback next = coordinator.requests.remove();
+            Pair<InetAddressAndPort, RequestCallback<Collection<Mutation>>> next = wrapper.requests.remove();
 
             // we should be contacting endpoints in a round robin fashion
-            Assert.assertTrue(EPs.contains(next.endpoint));
-            if (prev != null && prev.endpoint.equals(next.endpoint))
-                Assert.fail(String.format("Not expecting prev %s to be equal to next %s", prev.endpoint, next.endpoint));
+            Assert.assertTrue(EPs.contains(next.left));
+            if (prev != null && prev.left.equals(next.left))
+                Assert.fail(String.format("Not expecting prev %s to be equal to next %s", prev.left, next.left));
 
             // should send a new request
-            next.fail().get();
+            next.right.onFailure(null, null);
             prev = next;
-            Assert.assertFalse(coordinator.awaitSchemaRequests(1));
+            Assert.assertFalse(wrapper.coordinator.awaitSchemaRequests(1));
 
-            Assert.assertEquals(2, coordinator.requests.size());
+            Assert.assertEquals(2, wrapper.requests.size());
         }
         logger.info("{} -> {}", EP1, ep1requests);
         logger.info("{} -> {}", EP2, ep2requests);
 
         // a single success should unblock startup though
-        coordinator.requests.remove().response(Collections.emptyList());
-        Assert.assertTrue(coordinator.awaitSchemaRequests(1));
-
+        cb = wrapper.requests.remove();
+        cb.right.onResponse(Message.remoteResponse(cb.left, Verb.SCHEMA_PULL_RSP, Collections.emptyList()));
+        Assert.assertTrue(wrapper.coordinator.awaitSchemaRequests(1));
     }
 
     /**
@@ -325,15 +356,88 @@
     @Test
     public void pullUnreceived()
     {
-        InstrumentedCoordinator coordinator = new InstrumentedCoordinator();
+        Wrapper wrapper = new Wrapper();
 
-        coordinator.shouldPullFromEndpoint = false;
-        assertNoContact(coordinator, false);
+        when(wrapper.gossiper.getEndpointStateForEndpoint(any())).thenReturn(null); // shouldPullFromEndpoint should return false in this case
+        assertNoContact(wrapper, false);
 
-        coordinator.shouldPullFromEndpoint = true;
-        Assert.assertEquals(0, coordinator.requests.size());
-        List<Future<Void>> futures = coordinator.pullUnreceivedSchemaVersions();
-        futures.forEach(Futures::getUnchecked);
-        Assert.assertEquals(1, coordinator.requests.size());
+        when(wrapper.gossiper.getEndpointStateForEndpoint(any())).thenReturn(validEndpointState);
+        Assert.assertEquals(0, wrapper.requests.size());
+        wrapper.coordinator.start();
+        Assert.assertEquals(1, wrapper.requests.size());
+    }
+
+    @Test
+    public void pushSchemaMutationsOnlyToViableNodes() throws UnknownHostException
+    {
+        Wrapper wrapper = new Wrapper();
+        Collection<Mutation> mutations = Arrays.asList(mock(Mutation.class));
+
+        EndpointState validState = mock(EndpointState.class);
+
+        InetAddressAndPort thisNode = wrapper.configureMocksForEndpoint(FBUtilities.getBroadcastAddressAndPort(), validState, MessagingService.current_version, false);
+        InetAddressAndPort unkonwnNode = wrapper.configureMocksForEndpoint("10.0.0.1:8000", validState, null, false);
+        InetAddressAndPort invalidMessagingVersionNode = wrapper.configureMocksForEndpoint("10.0.0.2:8000", validState, MessagingService.VERSION_30, false);
+        InetAddressAndPort regularNode = wrapper.configureMocksForEndpoint("10.0.0.3:8000", validState, MessagingService.current_version, false);
+
+        when(wrapper.gossiper.getLiveMembers()).thenReturn(Sets.newHashSet(thisNode, unkonwnNode, invalidMessagingVersionNode, regularNode));
+
+        ArgumentCaptor<Message> msg = ArgumentCaptor.forClass(Message.class);
+        ArgumentCaptor<InetAddressAndPort> targetEndpoint = ArgumentCaptor.forClass(InetAddressAndPort.class);
+        doNothing().when(wrapper.messagingService).send(msg.capture(), targetEndpoint.capture());
+
+        Pair<Set<InetAddressAndPort>, Set<InetAddressAndPort>> result = wrapper.coordinator.pushSchemaMutations(mutations);
+        assertThat(result.left()).containsExactlyInAnyOrder(regularNode);
+        assertThat(result.right()).containsExactlyInAnyOrder(thisNode, unkonwnNode, invalidMessagingVersionNode);
+        assertThat(msg.getAllValues()).hasSize(1);
+        assertThat(msg.getValue().payload).isEqualTo(mutations);
+        assertThat(msg.getValue().verb()).isEqualTo(Verb.SCHEMA_PUSH_REQ);
+        assertThat(targetEndpoint.getValue()).isEqualTo(regularNode);
+    }
+
+    @Test
+    public void reset() throws UnknownHostException
+    {
+        Collection<Mutation> mutations = Arrays.asList(mock(Mutation.class));
+
+        Wrapper wrapper = new Wrapper();
+        wrapper.localSchemaVersion = SchemaConstants.emptyVersion;
+
+        EndpointState invalidVersionState = mock(EndpointState.class);
+        when(invalidVersionState.getApplicationState(ApplicationState.RELEASE_VERSION)).thenReturn(VersionedValue.unsafeMakeVersionedValue("3.0", 0));
+        when(invalidVersionState.getSchemaVersion()).thenReturn(V1);
+
+        EndpointState validVersionState = mock(EndpointState.class);
+        when(validVersionState.getApplicationState(ApplicationState.RELEASE_VERSION)).thenReturn(VersionedValue.unsafeMakeVersionedValue(FBUtilities.getReleaseVersionString(), 0));
+        when(validVersionState.getSchemaVersion()).thenReturn(V2);
+
+        EndpointState localVersionState = mock(EndpointState.class);
+        when(localVersionState.getApplicationState(ApplicationState.RELEASE_VERSION)).thenReturn(VersionedValue.unsafeMakeVersionedValue(FBUtilities.getReleaseVersionString(), 0));
+        when(localVersionState.getSchemaVersion()).thenReturn(SchemaConstants.emptyVersion);
+
+        // some nodes
+        InetAddressAndPort thisNode = wrapper.configureMocksForEndpoint(FBUtilities.getBroadcastAddressAndPort(), localVersionState, MessagingService.current_version, false);
+        InetAddressAndPort noStateNode = wrapper.configureMocksForEndpoint("10.0.0.1:8000", null, MessagingService.current_version, false);
+        InetAddressAndPort diffMajorVersionNode = wrapper.configureMocksForEndpoint("10.0.0.2:8000", invalidVersionState, MessagingService.current_version, false);
+        InetAddressAndPort unkonwnNode = wrapper.configureMocksForEndpoint("10.0.0.2:8000", validVersionState, null, false);
+        InetAddressAndPort invalidMessagingVersionNode = wrapper.configureMocksForEndpoint("10.0.0.3:8000", validVersionState, MessagingService.VERSION_30, false);
+        InetAddressAndPort gossipOnlyMemberNode = wrapper.configureMocksForEndpoint("10.0.0.4:8000", validVersionState, MessagingService.current_version, true);
+        InetAddressAndPort regularNode1 = wrapper.configureMocksForEndpoint("10.0.0.5:8000", validVersionState, MessagingService.current_version, false);
+        InetAddressAndPort regularNode2 = wrapper.configureMocksForEndpoint("10.0.0.6:8000", validVersionState, MessagingService.current_version, false);
+        Set<InetAddressAndPort> nodes = new LinkedHashSet<>(Arrays.asList(thisNode, noStateNode, diffMajorVersionNode, unkonwnNode, invalidMessagingVersionNode, gossipOnlyMemberNode, regularNode1, regularNode2));
+        when(wrapper.gossiper.getLiveMembers()).thenReturn(nodes);
+        doAnswer(a -> {
+            Message msg = a.getArgument(0, Message.class);
+            InetAddressAndPort endpoint = a.getArgument(1, InetAddressAndPort.class);
+            RequestCallback callback = a.getArgument(2, RequestCallback.class);
+
+            assertThat(msg.verb()).isEqualTo(Verb.SCHEMA_PULL_REQ);
+            assertThat(endpoint).isEqualTo(regularNode1);
+            callback.onResponse(Message.remoteResponse(regularNode1, Verb.SCHEMA_PULL_RSP, mutations));
+            return null;
+        }).when(wrapper.messagingService).sendWithCallback(any(Message.class), any(InetAddressAndPort.class), any(RequestCallback.class));
+        wrapper.coordinator.reset();
+        assertThat(wrapper.mergedSchemasFrom).anyMatch(ep -> regularNode1.equals(ep) || regularNode2.equals(ep));
+        assertThat(wrapper.mergedSchemasFrom).hasSize(1);
     }
 }
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/schema/MigrationManagerDropKSTest.java b/test/unit/org/apache/cassandra/schema/MigrationManagerDropKSTest.java
index e28a0df..c045b55 100644
--- a/test/unit/org/apache/cassandra/schema/MigrationManagerDropKSTest.java
+++ b/test/unit/org/apache/cassandra/schema/MigrationManagerDropKSTest.java
@@ -24,6 +24,7 @@
 import org.junit.rules.ExpectedException;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
@@ -54,6 +55,7 @@
                                     SchemaLoader.standardCFMD(KEYSPACE1, TABLE1),
                                     SchemaLoader.standardCFMD(KEYSPACE1, TABLE2));
     }
+
     @Test
     public void dropKS() throws ConfigurationException
     {
@@ -70,10 +72,10 @@
                                            "dropKs", "col" + i, "anyvalue");
         ColumnFamilyStore cfs = Keyspace.open(cfm.keyspace).getColumnFamilyStore(cfm.name);
         assertNotNull(cfs);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         assertTrue(!cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).list().isEmpty());
 
-        MigrationManager.announceKeyspaceDrop(ks.name);
+        SchemaTestUtil.announceKeyspaceDrop(ks.name);
 
         assertNull(Schema.instance.getKeyspaceMetadata(ks.name));
 
diff --git a/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java b/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java
index 9a26032..e1556b7 100644
--- a/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java
+++ b/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java
@@ -18,14 +18,13 @@
 
 package org.apache.cassandra.schema;
 
-import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Optional;
 import java.util.function.Supplier;
 
 import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -39,7 +38,6 @@
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.marshal.ByteType;
 import org.apache.cassandra.db.marshal.BytesType;
@@ -50,7 +48,6 @@
 import org.apache.cassandra.locator.NetworkTopologyStrategy;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static java.util.Collections.singleton;
 import static org.apache.cassandra.Util.throwAssert;
 import static org.apache.cassandra.cql3.CQLTester.assertRows;
 import static org.apache.cassandra.cql3.CQLTester.row;
@@ -85,7 +82,7 @@
                                     SchemaLoader.standardCFMD(KEYSPACE1, TABLE2));
         SchemaLoader.createKeyspace(KEYSPACE3,
                                     KeyspaceParams.simple(5),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, TABLE1),
+                                    SchemaLoader.standardCFMD(KEYSPACE3, TABLE1),
                                     SchemaLoader.compositeIndexCFMD(KEYSPACE3, TABLE1i, true));
         SchemaLoader.createKeyspace(KEYSPACE6,
                                     KeyspaceParams.simple(1),
@@ -148,7 +145,7 @@
         TableMetadata newCf = addTestTable("MadeUpKeyspace", "NewCF", "new cf");
         try
         {
-            MigrationManager.announceNewTable(newCf);
+            SchemaTestUtil.announceNewTable(newCf);
             throw new AssertionError("You shouldn't be able to do anything to a keyspace that doesn't exist.");
         }
         catch (ConfigurationException expected)
@@ -166,7 +163,7 @@
         TableMetadata cfm = addTestTable(original.name, tableName, "A New Table");
 
         assertFalse(Schema.instance.getKeyspaceMetadata(ksName).tables.get(cfm.name).isPresent());
-        MigrationManager.announceNewTable(cfm);
+        SchemaTestUtil.announceNewTable(cfm);
 
         assertTrue(Schema.instance.getKeyspaceMetadata(ksName).tables.get(cfm.name).isPresent());
         assertEquals(cfm, Schema.instance.getKeyspaceMetadata(ksName).tables.get(cfm.name).get());
@@ -179,7 +176,7 @@
         // flush to exercise more than just hitting the memtable
         ColumnFamilyStore cfs = Keyspace.open(ksName).getColumnFamilyStore(tableName);
         assertNotNull(cfs);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         // and make sure we get out what we put in
         UntypedResultSet rows = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s", ksName, tableName));
@@ -202,10 +199,10 @@
                                            "dropCf", "col" + i, "anyvalue");
         ColumnFamilyStore store = Keyspace.open(cfm.keyspace).getColumnFamilyStore(cfm.name);
         assertNotNull(store);
-        store.forceBlockingFlush();
+        Util.flush(store);
         assertTrue(store.getDirectories().sstableLister(Directories.OnTxnErr.THROW).list().size() > 0);
 
-        MigrationManager.announceTableDrop(ks.name, cfm.name, false);
+        SchemaTestUtil.announceTableDrop(ks.name, cfm.name);
 
         assertFalse(Schema.instance.getKeyspaceMetadata(ks.name).tables.get(cfm.name).isPresent());
 
@@ -227,7 +224,7 @@
         Supplier<Object> lambda = () -> {
             for (File file : store.getDirectories().sstableLister(Directories.OnTxnErr.THROW).listFiles())
             {
-                if (file.getPath().endsWith("Data.db") && !new File(file.getPath().replace("Data.db", "Compacted")).exists())
+                if (file.path().endsWith("Data.db") && !new File(file.path().replace("Data.db", "Compacted")).exists())
                     return false;
             }
             return true;
@@ -241,7 +238,7 @@
     {
         TableMetadata cfm = addTestTable("newkeyspace1", "newstandard1", "A new cf for a new ks");
         KeyspaceMetadata newKs = KeyspaceMetadata.create(cfm.keyspace, KeyspaceParams.simple(5), Tables.of(cfm));
-        MigrationManager.announceNewKeyspace(newKs);
+        SchemaTestUtil.announceNewKeyspace(newKs);
 
         assertNotNull(Schema.instance.getKeyspaceMetadata(cfm.keyspace));
         assertEquals(Schema.instance.getKeyspaceMetadata(cfm.keyspace), newKs);
@@ -251,7 +248,7 @@
                                        "key0", "col0", "val0");
         ColumnFamilyStore store = Keyspace.open(cfm.keyspace).getColumnFamilyStore(cfm.name);
         assertNotNull(store);
-        store.forceBlockingFlush();
+        Util.flush(store);
 
         UntypedResultSet rows = QueryProcessor.executeInternal("SELECT * FROM newkeyspace1.newstandard1");
         assertRows(rows, row("key0", "col0", "val0"));
@@ -272,7 +269,7 @@
                                                          KEYSPACE3, TABLE1),
                                            "dropKs", "col" + i, "anyvalue");
 
-        MigrationManager.announceKeyspaceDrop(ks.name);
+        SchemaTestUtil.announceKeyspaceDrop(ks.name);
 
         assertNull(Schema.instance.getKeyspaceMetadata(ks.name));
     }
@@ -282,7 +279,7 @@
     {
         assertNull(Schema.instance.getKeyspaceMetadata(EMPTY_KEYSPACE));
         KeyspaceMetadata newKs = KeyspaceMetadata.create(EMPTY_KEYSPACE, KeyspaceParams.simple(5));
-        MigrationManager.announceNewKeyspace(newKs);
+        SchemaTestUtil.announceNewKeyspace(newKs);
         assertNotNull(Schema.instance.getKeyspaceMetadata(EMPTY_KEYSPACE));
 
         String tableName = "added_later";
@@ -292,7 +289,7 @@
         assertFalse(Schema.instance.getKeyspaceMetadata(newKs.name).tables.get(newCf.name).isPresent());
 
         //add the new CF to the empty space
-        MigrationManager.announceNewTable(newCf);
+        SchemaTestUtil.announceNewTable(newCf);
 
         assertTrue(Schema.instance.getKeyspaceMetadata(newKs.name).tables.get(newCf.name).isPresent());
         assertEquals(Schema.instance.getKeyspaceMetadata(newKs.name).tables.get(newCf.name).get(), newCf);
@@ -304,7 +301,7 @@
 
         ColumnFamilyStore cfs = Keyspace.open(newKs.name).getColumnFamilyStore(newCf.name);
         assertNotNull(cfs);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         UntypedResultSet rows = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s", EMPTY_KEYSPACE, tableName));
         assertRows(rows, row("key0", "col0", "val0"));
@@ -317,7 +314,7 @@
         TableMetadata cf = addTestTable("UpdatedKeyspace", "AddedStandard1", "A new cf for a new ks");
         KeyspaceMetadata oldKs = KeyspaceMetadata.create(cf.keyspace, KeyspaceParams.simple(5), Tables.of(cf));
 
-        MigrationManager.announceNewKeyspace(oldKs);
+        SchemaTestUtil.announceNewKeyspace(oldKs);
 
         assertNotNull(Schema.instance.getKeyspaceMetadata(cf.keyspace));
         assertEquals(Schema.instance.getKeyspaceMetadata(cf.keyspace), oldKs);
@@ -326,7 +323,7 @@
         KeyspaceMetadata newBadKs2 = KeyspaceMetadata.create(cf.keyspace + "trash", KeyspaceParams.simple(4));
         try
         {
-            MigrationManager.announceKeyspaceUpdate(newBadKs2);
+            SchemaTestUtil.announceKeyspaceUpdate(newBadKs2);
             throw new AssertionError("Should not have been able to update a KS with an invalid KS name.");
         }
         catch (ConfigurationException ex)
@@ -339,7 +336,7 @@
         replicationMap.put("replication_factor", "1");
 
         KeyspaceMetadata newKs = KeyspaceMetadata.create(cf.keyspace, KeyspaceParams.create(true, replicationMap));
-        MigrationManager.announceKeyspaceUpdate(newKs);
+        SchemaTestUtil.announceKeyspaceUpdate(newKs);
 
         KeyspaceMetadata newFetchedKs = Schema.instance.getKeyspaceMetadata(newKs.name);
         assertEquals(newFetchedKs.params.replication.klass, newKs.params.replication.klass);
@@ -459,7 +456,7 @@
                                                     TABLE1i),
                                        "key0", "col0", 1L, 1L);
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         ColumnFamilyStore indexCfs = cfs.indexManager.getIndexByName(indexName)
                                                      .getBackingTable()
                                                      .orElseThrow(throwAssert("Cannot access index cfs"));
@@ -471,7 +468,7 @@
                                      .get(indexName)
                                      .orElseThrow(throwAssert("Index not found"));
 
-        MigrationManager.announceTableUpdate(meta.unbuild().indexes(meta.indexes.without(existing.name)).build());
+        SchemaTestUtil.announceTableUpdate(meta.unbuild().indexes(meta.indexes.without(existing.name)).build());
 
         // check
         assertTrue(cfs.indexManager.listIndexes().isEmpty());
@@ -520,11 +517,14 @@
         TableMetadata table = addTestTable("ks0", "t", "");
         KeyspaceMetadata keyspace = KeyspaceMetadata.create("ks0", KeyspaceParams.simple(1), Tables.of(table));
 
-        Optional<Mutation> mutation = MigrationManager.evolveSystemKeyspace(keyspace, 0);
-        assertTrue(mutation.isPresent());
+        SchemaTransformation transformation = SchemaTransformations.updateSystemKeyspace(keyspace, 0);
+        Keyspaces before = Keyspaces.none();
+        Keyspaces after = transformation.apply(before);
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before, after);
 
-        Schema.instance.merge(singleton(mutation.get()));
-        assertEquals(keyspace, Schema.instance.getKeyspaceMetadata("ks0"));
+        assertTrue(diff.altered.isEmpty());
+        assertTrue(diff.dropped.isEmpty());
+        assertEquals(keyspace, diff.created.getNullable("ks0"));
     }
 
     @Test
@@ -533,12 +533,12 @@
         TableMetadata table = addTestTable("ks1", "t", "");
         KeyspaceMetadata keyspace = KeyspaceMetadata.create("ks1", KeyspaceParams.simple(1), Tables.of(table));
 
-        // create the keyspace, verify it's there
-        Schema.instance.merge(singleton(SchemaKeyspace.makeCreateKeyspaceMutation(keyspace, 0).build()));
-        assertEquals(keyspace, Schema.instance.getKeyspaceMetadata("ks1"));
+        SchemaTransformation transformation = SchemaTransformations.updateSystemKeyspace(keyspace, 0);
+        Keyspaces before = Keyspaces.of(keyspace);
+        Keyspaces after = transformation.apply(before);
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before, after);
 
-        Optional<Mutation> mutation = MigrationManager.evolveSystemKeyspace(keyspace, 0);
-        assertFalse(mutation.isPresent());
+        assertTrue(diff.isEmpty());
     }
 
     @Test
@@ -547,18 +547,18 @@
         TableMetadata table0 = addTestTable("ks2", "t", "");
         KeyspaceMetadata keyspace0 = KeyspaceMetadata.create("ks2", KeyspaceParams.simple(1), Tables.of(table0));
 
-        // create the keyspace, verify it's there
-        Schema.instance.merge(singleton(SchemaKeyspace.makeCreateKeyspaceMutation(keyspace0, 0).build()));
-        assertEquals(keyspace0, Schema.instance.getKeyspaceMetadata("ks2"));
-
         TableMetadata table1 = table0.unbuild().comment("comment").build();
         KeyspaceMetadata keyspace1 = KeyspaceMetadata.create("ks2", KeyspaceParams.simple(1), Tables.of(table1));
 
-        Optional<Mutation> mutation = MigrationManager.evolveSystemKeyspace(keyspace1, 1);
-        assertTrue(mutation.isPresent());
+        SchemaTransformation transformation = SchemaTransformations.updateSystemKeyspace(keyspace1, 1);
+        Keyspaces before = Keyspaces.of(keyspace0);
+        Keyspaces after = transformation.apply(before);
+        Keyspaces.KeyspacesDiff diff = Keyspaces.diff(before, after);
 
-        Schema.instance.merge(singleton(mutation.get()));
-        assertEquals(keyspace1, Schema.instance.getKeyspaceMetadata("ks2"));
+        assertTrue(diff.created.isEmpty());
+        assertTrue(diff.dropped.isEmpty());
+        assertEquals(1, diff.altered.size());
+        assertEquals(keyspace1, diff.altered.get(0).after);
     }
 
     private TableMetadata addTestTable(String ks, String cf, String comment)
diff --git a/test/unit/org/apache/cassandra/schema/MockSchema.java b/test/unit/org/apache/cassandra/schema/MockSchema.java
index 194f3db..f5c9986 100644
--- a/test/unit/org/apache/cassandra/schema/MockSchema.java
+++ b/test/unit/org/apache/cassandra/schema/MockSchema.java
@@ -18,28 +18,37 @@
 */
 package org.apache.cassandra.schema;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Function;
+import java.util.function.Supplier;
 
 import com.google.common.collect.ImmutableSet;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.db.memtable.SkipListMemtable;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.IndexSummary;
+import org.apache.cassandra.io.sstable.SSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.sstable.metadata.MetadataType;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.io.util.ChannelProxy;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileHandle;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.Memory;
@@ -50,6 +59,21 @@
 
 public class MockSchema
 {
+    public static Supplier<? extends SSTableId> sstableIdGenerator = Util.newSeqGen();
+
+    public static final ConcurrentMap<Integer, SSTableId> sstableIds = new ConcurrentHashMap<>();
+
+    public static SSTableId sstableId(int idx)
+    {
+        return sstableIds.computeIfAbsent(idx, ignored -> sstableIdGenerator.get());
+    }
+
+    public static Collection<Object[]> sstableIdGenerators()
+    {
+        return Arrays.asList(new Object[]{ Util.newSeqGen() },
+                             new Object[]{ Util.newUUIDGen() });
+    }
+
     static
     {
         Memory offsets = Memory.allocate(4);
@@ -65,7 +89,7 @@
 
     public static Memtable memtable(ColumnFamilyStore cfs)
     {
-        return new Memtable(cfs.metadata());
+        return SkipListMemtable.FACTORY.create(null, cfs.metadata, cfs);
     }
 
     public static SSTableReader sstable(int generation, ColumnFamilyStore cfs)
@@ -122,18 +146,12 @@
         Descriptor descriptor = new Descriptor(cfs.getDirectories().getDirectoryForNewSSTables(),
                                                cfs.keyspace.getName(),
                                                cfs.getTableName(),
-                                               generation, SSTableFormat.Type.BIG);
+                                               sstableId(generation), SSTableFormat.Type.BIG);
         Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
         for (Component component : components)
         {
             File file = new File(descriptor.filenameFor(component));
-            try
-            {
-                file.createNewFile();
-            }
-            catch (IOException e)
-            {
-            }
+            file.createFileIfNotExists();
         }
         // .complete() with size to make sstable.onDiskLength work
         try (FileHandle.Builder builder = new FileHandle.Builder(new ChannelProxy(tempFile)).bufferSize(size);
@@ -144,10 +162,7 @@
                 try
                 {
                     File file = new File(descriptor.filenameFor(Component.DATA));
-                    try (RandomAccessFile raf = new RandomAccessFile(file, "rw"))
-                    {
-                        raf.setLength(size);
-                    }
+                    Util.setFileLength(file, size);
                 }
                 catch (IOException e)
                 {
@@ -194,7 +209,7 @@
 
     public static ColumnFamilyStore newCFS(TableMetadata metadata)
     {
-        return new ColumnFamilyStore(ks, metadata.name, 0, new TableMetadataRef(metadata), new Directories(metadata), false, false, false);
+        return new ColumnFamilyStore(ks, metadata.name, Util.newSeqGen(), new TableMetadataRef(metadata), new Directories(metadata), false, false, false);
     }
 
     public static TableMetadata newTableMetadata(String ksname)
@@ -242,7 +257,7 @@
             File dir = new File(dirName);
             if (!dir.exists())
                 continue;
-            String[] children = dir.list();
+            String[] children = dir.tryListNames();
             for (String child : children)
                 FileUtils.deleteRecursive(new File(dir, child));
         }
diff --git a/test/unit/org/apache/cassandra/schema/RemoveWithoutDroppingTest.java b/test/unit/org/apache/cassandra/schema/RemoveWithoutDroppingTest.java
new file mode 100644
index 0000000..9c3271b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/schema/RemoveWithoutDroppingTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.stream.Collectors;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.schema.SchemaTransformation.SchemaTransformationResult;
+import org.mockito.Mockito;
+
+import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class RemoveWithoutDroppingTest
+{
+    static volatile boolean dropDataOverride = true;
+
+    static final SchemaChangeListener listener = Mockito.mock(SchemaChangeListener.class);
+
+    @BeforeClass
+    public static void beforeClass()
+    {
+        System.setProperty(SchemaUpdateHandlerFactoryProvider.SUH_FACTORY_CLASS_PROPERTY, TestSchemaUpdateHandlerFactory.class.getName());
+        CQLTester.prepareServer();
+        Schema.instance.registerListener(listener);
+    }
+
+    @Before
+    public void before()
+    {
+        Mockito.reset(listener);
+    }
+
+    public static void callbackOverride(BiConsumer<SchemaTransformationResult, Boolean> updateSchemaCallback, SchemaTransformationResult result, boolean dropData)
+    {
+        updateSchemaCallback.accept(result, dropDataOverride);
+    }
+
+    public static class TestSchemaUpdateHandlerFactory implements SchemaUpdateHandlerFactory
+    {
+        @Override
+        public SchemaUpdateHandler getSchemaUpdateHandler(boolean online, BiConsumer<SchemaTransformationResult, Boolean> updateSchemaCallback)
+        {
+            return online
+                   ? new DefaultSchemaUpdateHandler((result, dropData) -> callbackOverride(updateSchemaCallback, result, dropData))
+                   : new OfflineSchemaUpdateHandler((result, dropData) -> callbackOverride(updateSchemaCallback, result, dropData));
+        }
+    }
+
+    private void testRemoveKeyspace(String ks, String tab, boolean expectDropped) throws Throwable
+    {
+        executeInternal(String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}", ks));
+        executeInternal(String.format("CREATE TABLE %s.%s (id INT PRIMARY KEY, v INT)", ks, tab));
+        executeInternal(String.format("INSERT INTO %s.%s (id, v) VALUES (?, ?)", ks, tab), 1, 2);
+        executeInternal(String.format("INSERT INTO %s.%s (id, v) VALUES (?, ?)", ks, tab), 3, 4);
+        ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(ks, tab);
+        cfs.forceFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS).get();
+
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(ks);
+        TableMetadata tm = Schema.instance.getTableMetadata(ks, tab);
+
+        List<File> directories = cfs.getDirectories().getCFDirectories();
+        Set<File> filesBefore = directories.stream().flatMap(d -> Arrays.stream(d.tryList(f -> !f.isDirectory()))).collect(Collectors.toSet());
+        assertThat(filesBefore).isNotEmpty();
+
+        executeInternal(String.format("DROP KEYSPACE %s", ks));
+
+        Set<File> filesAfter = directories.stream().flatMap(d -> Arrays.stream(d.tryList(f -> !f.isDirectory()))).collect(Collectors.toSet());
+        if (expectDropped)
+            assertThat(filesAfter).isEmpty();
+        else
+            assertThat(filesAfter).hasSameElementsAs(filesBefore);
+
+        Mockito.verify(listener).onDropTable(tm, expectDropped);
+        Mockito.verify(listener).onDropKeyspace(ksm, expectDropped);
+    }
+
+    @Test
+    public void testRemoveWithoutDropping() throws Throwable
+    {
+        dropDataOverride = false;
+        String ks = "test_remove_without_dropping";
+        String tab = "test_table";
+        testRemoveKeyspace(ks, tab, false);
+    }
+
+    @Test
+    public void testRemoveWithDropping() throws Throwable
+    {
+        dropDataOverride = true;
+        String ks = "test_remove_with_dropping";
+        String tab = "test_table";
+        testRemoveKeyspace(ks, tab, true);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java b/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
index 4a1ee57..e807730 100644
--- a/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
@@ -19,10 +19,7 @@
 package org.apache.cassandra.schema;
 
 import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
 import java.nio.ByteBuffer;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
@@ -31,6 +28,7 @@
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.stream.Stream;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -49,8 +47,14 @@
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.UnfilteredRowIterators;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.NoPayload;
+import org.apache.cassandra.net.RequestCallback;
+import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.service.reads.repair.ReadRepairStrategy;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
 import org.jboss.byteman.contrib.bmunit.BMRule;
 import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
 
@@ -71,25 +75,8 @@
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1));
-    }
 
-    /** See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes */
-    @Test
-    public void testSchemaPullSynchronicity() throws Exception
-    {
-        for (String methodName : Arrays.asList("schemaKeyspaceAsMutations",
-                                               "truncateSchemaKeyspace",
-                                               "saveSystemKeyspace",
-                                               "updateVersion"))
-        {
-            Method method = Schema.class.getDeclaredMethod(methodName);
-            assertTrue(Modifier.isSynchronized(method.getModifiers()));
-        }
-
-        Method method = Schema.class.getDeclaredMethod("merge", Collection.class);
-        assertTrue(Modifier.isSynchronized(method.getModifiers()));
-        method = Schema.class.getDeclaredMethod("transform", SchemaTransformation.class, boolean.class, long.class);
-        assertTrue(Modifier.isSynchronized(method.getModifiers()));
+        MessagingService.instance().listen();
     }
 
     /** See CASSANDRA-16856/16996. Make sure schema pulls are synchronized to prevent concurrent schema pull/writes */
@@ -104,7 +91,7 @@
         String keyspace = "sandbox";
         ExecutorService pool = Executors.newFixedThreadPool(2);
 
-        Schema.instance.truncateSchemaKeyspace();; // Make sure there's nothing but the create we're about to do
+        SchemaKeyspace.truncate(); // Make sure there's nothing but the create we're about to do
         CyclicBarrier barrier = new CyclicBarrier(2);
 
         Future<Void> creation = pool.submit(() -> {
@@ -115,19 +102,13 @@
 
         Future<Collection<Mutation>> mutationsFromThread = pool.submit(() -> {
             barrier.await();
-
-            Collection<Mutation> mutations = Schema.instance.schemaKeyspaceAsMutations();
-            // Make sure we actually have a mutation to check for partial modification.
-            while (mutations.size() == 0)
-                mutations = Schema.instance.schemaKeyspaceAsMutations();
-
-            return mutations;
+            return Stream.generate(this::getSchemaMutations).filter(m -> !m.isEmpty()).findFirst().get();
         });
 
         creation.get(); // make sure the creation is finished
 
         Collection<Mutation> mutationsFromConcurrentAccess = mutationsFromThread.get();
-        Collection<Mutation> settledMutations = Schema.instance.schemaKeyspaceAsMutations();
+        Collection<Mutation> settledMutations = getSchemaMutations();
 
         // If the worker thread picked up the creation at all, it should have the same modifications.
         // In other words, we should see all modifications or none.
@@ -144,10 +125,20 @@
         pool.shutdownNow();
     }
 
+    private Collection<Mutation> getSchemaMutations()
+    {
+        AsyncPromise<Collection<Mutation>> p = new AsyncPromise<>();
+        MessagingService.instance().sendWithCallback(Message.out(Verb.SCHEMA_PULL_REQ, NoPayload.noPayload),
+                                                     FBUtilities.getBroadcastAddressAndPort(),
+                                                     (RequestCallback<Collection<Mutation>>) msg -> p.setSuccess(msg.payload));
+        p.syncUninterruptibly();
+        return p.getNow();
+    }
+
     @Test
     public void testConversionsInverses() throws Exception
     {
-        for (String keyspaceName : Schema.instance.getNonSystemKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonSystemKeyspaces().names())
         {
             for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores())
             {
@@ -194,7 +185,7 @@
     {
         KeyspaceMetadata ksm = Schema.instance.getKeyspaceInstance(keyspace).getMetadata();
         Mutation mutation = SchemaKeyspace.makeUpdateTableMutation(ksm, oldTable, newTable, FBUtilities.timestampMicros()).build();
-        Schema.instance.merge(Collections.singleton(mutation));
+        SchemaTestUtil.mergeAndAnnounceLocally(Collections.singleton(mutation));
     }
 
     private static void createTable(String keyspace, String cql)
@@ -203,7 +194,7 @@
 
         KeyspaceMetadata ksm = KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1), Tables.of(table));
         Mutation mutation = SchemaKeyspace.makeCreateTableMutation(ksm, table, FBUtilities.timestampMicros()).build();
-        Schema.instance.merge(Collections.singleton(mutation));
+        SchemaTestUtil.mergeAndAnnounceLocally(Collections.singleton(mutation));
     }
 
     private static void checkInverses(TableMetadata metadata) throws Exception
diff --git a/test/unit/org/apache/cassandra/schema/SchemaMutationsSerializerTest.java b/test/unit/org/apache/cassandra/schema/SchemaMutationsSerializerTest.java
new file mode 100644
index 0000000..7680697
--- /dev/null
+++ b/test/unit/org/apache/cassandra/schema/SchemaMutationsSerializerTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.io.util.DataInputBuffer;
+import org.apache.cassandra.io.util.DataOutputBuffer;
+import org.apache.cassandra.net.MessagingService;
+import org.assertj.core.api.Assertions;
+
+public class SchemaMutationsSerializerTest extends CQLTester
+{
+    @BeforeClass
+    public static void beforeClass()
+    {
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    @Test
+    public void testSerDe() throws IOException
+    {
+        createTable("CREATE TABLE %s (id INT PRIMARY KEY, v INT)");
+        Collection<Mutation> mutations = SchemaKeyspace.convertSchemaToMutations();
+        DataOutputBuffer out = new DataOutputBuffer();
+        SchemaMutationsSerializer.instance.serialize(mutations, out, MessagingService.current_version);
+        DataInputBuffer in = new DataInputBuffer(out.toByteArray());
+        Collection<Mutation> deserializedMutations = SchemaMutationsSerializer.instance.deserialize(in, MessagingService.current_version);
+        DataOutputBuffer out2 = new DataOutputBuffer();
+        SchemaMutationsSerializer.instance.serialize(deserializedMutations, out2, MessagingService.current_version);
+        Assertions.assertThat(out2.toByteArray()).isEqualTo(out.toByteArray());
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/schema/SchemaTest.java b/test/unit/org/apache/cassandra/schema/SchemaTest.java
index 64b1341..4185536 100644
--- a/test/unit/org/apache/cassandra/schema/SchemaTest.java
+++ b/test/unit/org/apache/cassandra/schema/SchemaTest.java
@@ -19,55 +19,55 @@
 package org.apache.cassandra.schema;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.utils.FBUtilities;
 
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
 public class SchemaTest
 {
     @BeforeClass
-    public static void setupDatabaseDescriptor()
+    public static void setup()
     {
         DatabaseDescriptor.daemonInitialization();
+        ServerTestUtils.prepareServer();
+        Schema.instance.loadFromDisk();
     }
 
     @Test
     public void testTransKsMigration() throws IOException
     {
-        CommitLog.instance.start();
-        SchemaLoader.cleanupAndLeaveDirs();
-        Schema.instance.loadFromDisk();
         assertEquals(0, Schema.instance.getNonSystemKeyspaces().size());
 
-        Gossiper.instance.start((int)(System.currentTimeMillis() / 1000));
-        Keyspace.setInitialized();
-
+        Gossiper.instance.start((int) (System.currentTimeMillis() / 1000));
         try
         {
             // add a few.
-            MigrationManager.announceNewKeyspace(KeyspaceMetadata.create("ks0", KeyspaceParams.simple(3)));
-            MigrationManager.announceNewKeyspace(KeyspaceMetadata.create("ks1", KeyspaceParams.simple(3)));
+            saveKeyspaces();
+            Schema.instance.reloadSchemaAndAnnounceVersion();
 
             assertNotNull(Schema.instance.getKeyspaceMetadata("ks0"));
             assertNotNull(Schema.instance.getKeyspaceMetadata("ks1"));
 
-            Schema.instance.unload(Schema.instance.getKeyspaceMetadata("ks0"));
-            Schema.instance.unload(Schema.instance.getKeyspaceMetadata("ks1"));
+            Schema.instance.transform(keyspaces -> keyspaces.without(Arrays.asList("ks0", "ks1")));
 
             assertNull(Schema.instance.getKeyspaceMetadata("ks0"));
             assertNull(Schema.instance.getKeyspaceMetadata("ks1"));
 
-            Schema.instance.loadFromDisk();
+            saveKeyspaces();
+            Schema.instance.reloadSchemaAndAnnounceVersion();
 
             assertNotNull(Schema.instance.getKeyspaceMetadata("ks0"));
             assertNotNull(Schema.instance.getKeyspaceMetadata("ks1"));
@@ -78,4 +78,37 @@
         }
     }
 
+    @Test
+    public void testKeyspaceCreationWhenNotInitialized() {
+        Keyspace.unsetInitialized();
+        try
+        {
+            SchemaTestUtil.addOrUpdateKeyspace(KeyspaceMetadata.create("test", KeyspaceParams.simple(1)), true);
+            assertNotNull(Schema.instance.getKeyspaceMetadata("test"));
+            assertNull(Schema.instance.getKeyspaceInstance("test"));
+
+            SchemaTestUtil.dropKeyspaceIfExist("test", true);
+            assertNull(Schema.instance.getKeyspaceMetadata("test"));
+            assertNull(Schema.instance.getKeyspaceInstance("test"));
+        }
+        finally
+        {
+            Keyspace.setInitialized();
+        }
+
+        SchemaTestUtil.addOrUpdateKeyspace(KeyspaceMetadata.create("test", KeyspaceParams.simple(1)), true);
+        assertNotNull(Schema.instance.getKeyspaceMetadata("test"));
+        assertNotNull(Schema.instance.getKeyspaceInstance("test"));
+
+        SchemaTestUtil.dropKeyspaceIfExist("test", true);
+        assertNull(Schema.instance.getKeyspaceMetadata("test"));
+        assertNull(Schema.instance.getKeyspaceInstance("test"));
+    }
+
+    private void saveKeyspaces()
+    {
+        Collection<Mutation> mutations = Arrays.asList(SchemaKeyspace.makeCreateKeyspaceMutation(KeyspaceMetadata.create("ks0", KeyspaceParams.simple(3)), FBUtilities.timestampMicros()).build(),
+                                                       SchemaKeyspace.makeCreateKeyspaceMutation(KeyspaceMetadata.create("ks1", KeyspaceParams.simple(3)), FBUtilities.timestampMicros()).build());
+        SchemaKeyspace.applyChanges(mutations);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/schema/SchemaTestUtil.java b/test/unit/org/apache/cassandra/schema/SchemaTestUtil.java
new file mode 100644
index 0000000..b937d15
--- /dev/null
+++ b/test/unit/org/apache/cassandra/schema/SchemaTestUtil.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.concurrent.Stage;
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.exceptions.AlreadyExistsException;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.utils.concurrent.Future;
+
+import static org.apache.cassandra.net.Verb.SCHEMA_PUSH_REQ;
+
+public class SchemaTestUtil
+{
+    private final static Logger logger = LoggerFactory.getLogger(SchemaTestUtil.class);
+
+    public static void announceNewKeyspace(KeyspaceMetadata ksm) throws ConfigurationException
+    {
+        ksm.validate();
+
+        if (Schema.instance.getKeyspaceMetadata(ksm.name) != null)
+            throw new AlreadyExistsException(ksm.name);
+
+        logger.info("Create new Keyspace: {}", ksm);
+        Schema.instance.transform(schema -> schema.withAddedOrUpdated(ksm));
+    }
+
+    public static void announceNewTable(TableMetadata cfm)
+    {
+        announceNewTable(cfm, true);
+    }
+
+    private static void announceNewTable(TableMetadata cfm, boolean throwOnDuplicate)
+    {
+        cfm.validate();
+
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(cfm.keyspace);
+        if (ksm == null)
+            throw new ConfigurationException(String.format("Cannot add table '%s' to non existing keyspace '%s'.", cfm.name, cfm.keyspace));
+            // If we have a table or a view which has the same name, we can't add a new one
+        else if (throwOnDuplicate && ksm.getTableOrViewNullable(cfm.name) != null)
+            throw new AlreadyExistsException(cfm.keyspace, cfm.name);
+
+        logger.info("Create new table: {}", cfm);
+        Schema.instance.transform(schema -> schema.withAddedOrUpdated(ksm.withSwapped(ksm.tables.with(cfm))));
+    }
+
+    static void announceKeyspaceUpdate(KeyspaceMetadata ksm)
+    {
+        ksm.validate();
+
+        KeyspaceMetadata oldKsm = Schema.instance.getKeyspaceMetadata(ksm.name);
+        if (oldKsm == null)
+            throw new ConfigurationException(String.format("Cannot update non existing keyspace '%s'.", ksm.name));
+
+        logger.info("Update Keyspace '{}' From {} To {}", ksm.name, oldKsm, ksm);
+        Schema.instance.transform(schema -> schema.withAddedOrUpdated(ksm));
+    }
+
+    public static void announceTableUpdate(TableMetadata updated)
+    {
+        updated.validate();
+
+        TableMetadata current = Schema.instance.getTableMetadata(updated.keyspace, updated.name);
+        if (current == null)
+            throw new ConfigurationException(String.format("Cannot update non existing table '%s' in keyspace '%s'.", updated.name, updated.keyspace));
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(current.keyspace);
+
+        updated.validateCompatibility(current);
+
+        logger.info("Update table '{}/{}' From {} To {}", current.keyspace, current.name, current, updated);
+        Schema.instance.transform(schema -> schema.withAddedOrUpdated(ksm.withSwapped(ksm.tables.withSwapped(updated))));
+    }
+
+    static void announceKeyspaceDrop(String ksName)
+    {
+        KeyspaceMetadata oldKsm = Schema.instance.getKeyspaceMetadata(ksName);
+        if (oldKsm == null)
+            throw new ConfigurationException(String.format("Cannot drop non existing keyspace '%s'.", ksName));
+
+        logger.info("Drop Keyspace '{}'", oldKsm.name);
+        Schema.instance.transform(schema -> schema.without(ksName));
+    }
+
+    public static SchemaTransformation dropTable(String ksName, String cfName)
+    {
+        return schema -> {
+            KeyspaceMetadata ksm = schema.getNullable(ksName);
+            TableMetadata tm = ksm != null ? ksm.getTableOrViewNullable(cfName) : null;
+            if (tm == null)
+                throw new ConfigurationException(String.format("Cannot drop non existing table '%s' in keyspace '%s'.", cfName, ksName));
+
+            return schema.withAddedOrUpdated(ksm.withSwapped(ksm.tables.without(cfName)));
+        };
+    }
+
+    public static void announceTableDrop(String ksName, String cfName)
+    {
+        logger.info("Drop table '{}/{}'", ksName, cfName);
+        Schema.instance.transform(dropTable(ksName, cfName));
+    }
+
+    public static void addOrUpdateKeyspace(KeyspaceMetadata ksm, boolean locally)
+    {
+        Schema.instance.transform(current -> current.withAddedOrUpdated(ksm));
+    }
+
+    public static void dropKeyspaceIfExist(String ksName, boolean locally)
+    {
+        Schema.instance.transform(current -> current.without(Collections.singletonList(ksName)));
+    }
+
+    public static void mergeAndAnnounceLocally(Collection<Mutation> schemaMutations)
+    {
+        SchemaPushVerbHandler.instance.doVerb(Message.out(SCHEMA_PUSH_REQ, schemaMutations));
+        Future<?> f = Stage.MIGRATION.submit(() -> {});
+        Assert.assertTrue(f.awaitThrowUncheckedOnInterrupt(10, TimeUnit.SECONDS));
+        f.rethrowIfFailed();
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/schema/TableMetadataTest.java b/test/unit/org/apache/cassandra/schema/TableMetadataTest.java
new file mode 100644
index 0000000..357ac01
--- /dev/null
+++ b/test/unit/org/apache/cassandra/schema/TableMetadataTest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.schema;
+
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import org.junit.Test;
+
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.marshal.BooleanType;
+import org.apache.cassandra.db.marshal.CompositeType;
+import org.apache.cassandra.db.marshal.FloatType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.IntegerType;
+import org.apache.cassandra.db.marshal.TupleType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+
+import static org.junit.Assert.assertEquals;
+
+public class TableMetadataTest
+{
+    @Test
+    public void testPartitionKeyAsCQLLiteral()
+    {
+        String keyspaceName = "keyspace";
+        String tableName = "table";
+
+        // composite type
+        CompositeType type1 = CompositeType.getInstance(UTF8Type.instance, UTF8Type.instance, UTF8Type.instance);
+        TableMetadata metadata1 = TableMetadata.builder(keyspaceName, tableName)
+                                               .addPartitionKeyColumn("key", type1)
+                                               .build();
+        assertEquals("('test:', 'composite!', 'type)')",
+                     metadata1.partitionKeyAsCQLLiteral(type1.decompose("test:", "composite!", "type)")));
+
+        // composite type with tuple
+        CompositeType type2 = CompositeType.getInstance(new TupleType(Arrays.asList(FloatType.instance, UTF8Type.instance)),
+                                                        IntegerType.instance);
+        TableMetadata metadata2 = TableMetadata.builder(keyspaceName, tableName)
+                                               .addPartitionKeyColumn("key", type2)
+                                               .build();
+        ByteBuffer tupleValue = TupleType.buildValue(new ByteBuffer[]{ FloatType.instance.decompose(0.33f),
+                                                                       UTF8Type.instance.decompose("tuple test") });
+        assertEquals("((0.33, 'tuple test'), 10)",
+                     metadata2.partitionKeyAsCQLLiteral(type2.decompose(tupleValue, BigInteger.valueOf(10))));
+
+        // plain type
+        TableMetadata metadata3 = TableMetadata.builder(keyspaceName, tableName)
+                                               .addPartitionKeyColumn("key", UTF8Type.instance).build();
+        assertEquals("'non-composite test'",
+                     metadata3.partitionKeyAsCQLLiteral(UTF8Type.instance.decompose("non-composite test")));
+    }
+
+    @Test
+    public void testPrimaryKeyAsCQLLiteral()
+    {
+        String keyspaceName = "keyspace";
+        String tableName = "table";
+
+        TableMetadata metadata;
+
+        // one partition key column, no clustering key
+        metadata = TableMetadata.builder(keyspaceName, tableName)
+                                .addPartitionKeyColumn("key", UTF8Type.instance)
+                                .build();
+        assertEquals("'Test'", metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("Test"), Clustering.EMPTY));
+
+        // two partition key columns, no clustering key
+        metadata = TableMetadata.builder(keyspaceName, tableName)
+                                .addPartitionKeyColumn("k1", UTF8Type.instance)
+                                .addPartitionKeyColumn("k2", Int32Type.instance)
+                                .build();
+        assertEquals("('Test', -12)",
+                     metadata.primaryKeyAsCQLLiteral(CompositeType.getInstance(UTF8Type.instance, Int32Type.instance)
+                                                                  .decompose("Test", -12), Clustering.EMPTY));
+
+        // one partition key column, one clustering key column
+        metadata = TableMetadata.builder(keyspaceName, tableName)
+                                .addPartitionKeyColumn("key", UTF8Type.instance)
+                                .addClusteringColumn("clustering", UTF8Type.instance)
+                                .build();
+        assertEquals("('k', 'Cluster')",
+                     metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("k"),
+                                                     Clustering.make(UTF8Type.instance.decompose("Cluster"))));
+        assertEquals("'k'",
+                     metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("k"), Clustering.EMPTY));
+        assertEquals("'k'",
+                     metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("k"), Clustering.STATIC_CLUSTERING));
+
+        // one partition key column, two clustering key columns
+        metadata = TableMetadata.builder(keyspaceName, tableName)
+                                .addPartitionKeyColumn("key", UTF8Type.instance)
+                                .addClusteringColumn("c1", UTF8Type.instance)
+                                .addClusteringColumn("c2", UTF8Type.instance)
+                                .build();
+        assertEquals("('k', 'c1', 'c2')",
+                     metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("k"),
+                                                     Clustering.make(UTF8Type.instance.decompose("c1"),
+                                                                     UTF8Type.instance.decompose("c2"))));
+        assertEquals("'k'",
+                     metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("k"), Clustering.EMPTY));
+        assertEquals("'k'",
+                     metadata.primaryKeyAsCQLLiteral(UTF8Type.instance.decompose("k"), Clustering.STATIC_CLUSTERING));
+
+        // two partition key columns, two clustering key columns
+        CompositeType composite = CompositeType.getInstance(Int32Type.instance, BooleanType.instance);
+        metadata = TableMetadata.builder(keyspaceName, tableName)
+                                .addPartitionKeyColumn("k1", Int32Type.instance)
+                                .addPartitionKeyColumn("k2", BooleanType.instance)
+                                .addClusteringColumn("c1", UTF8Type.instance)
+                                .addClusteringColumn("c2", UTF8Type.instance)
+                                .build();
+        assertEquals("(0, true, 'Cluster_1', 'Cluster_2')",
+                     metadata.primaryKeyAsCQLLiteral(composite.decompose(0, true),
+                                                     Clustering.make(UTF8Type.instance.decompose("Cluster_1"),
+                                                                     UTF8Type.instance.decompose("Cluster_2"))));
+        assertEquals("(1, true)",
+                     metadata.primaryKeyAsCQLLiteral(composite.decompose(1, true), Clustering.EMPTY));
+        assertEquals("(2, true)",
+                     metadata.primaryKeyAsCQLLiteral(composite.decompose(2, true), Clustering.STATIC_CLUSTERING));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/CustomSslContextFactoryConfigTest.java b/test/unit/org/apache/cassandra/security/CustomSslContextFactoryConfigTest.java
new file mode 100644
index 0000000..c1ab4a4
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/CustomSslContextFactoryConfigTest.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+public class CustomSslContextFactoryConfigTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-sslcontextfactory.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor() {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test
+    public void testValidCustomSslContextFactoryConfiguration() {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.client_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.DummySslContextFactoryImpl",
+                            config.client_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.client_encryption_options.ssl_context_factory.class_name,
+                            config.client_encryption_options.sslContextFactoryInstance.getClass().getName());
+        Assert.assertEquals(3, config.client_encryption_options.ssl_context_factory.parameters.size());
+        Assert.assertEquals("value1", config.client_encryption_options.ssl_context_factory.parameters.get("key1"));
+        Assert.assertEquals("value2", config.client_encryption_options.ssl_context_factory.parameters.get("key2"));
+        Assert.assertEquals("value3", config.client_encryption_options.ssl_context_factory.parameters.get("key3"));
+        DummySslContextFactoryImpl dummySslContextFactory =
+        (DummySslContextFactoryImpl)config.client_encryption_options.sslContextFactoryInstance;
+        Assert.assertEquals("dummy-keystore",dummySslContextFactory.getStringValueFor("keystore"));
+    }
+
+    @Test
+    public void testInvalidCustomSslContextFactoryConfiguration() {
+        Config config = DatabaseDescriptor.loadConfig();
+        try {
+            config.server_encryption_options.applyConfig();
+        } catch(ConfigurationException ce) {
+            Assert.assertEquals("Unable to create instance of ISslContextFactory for org.apache.cassandra.security" +
+                                ".MissingSslContextFactoryImpl", ce.getMessage());
+            Assert.assertNotNull("Unable to find root cause of pluggable ISslContextFactory loading failure",
+                                 ce.getCause());
+            Assert.assertTrue(ce.getCause() instanceof ClassNotFoundException);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/CustomSslContextFactoryInvalidConfigTest.java b/test/unit/org/apache/cassandra/security/CustomSslContextFactoryInvalidConfigTest.java
new file mode 100644
index 0000000..79e7d52
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/CustomSslContextFactoryInvalidConfigTest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+public class CustomSslContextFactoryInvalidConfigTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-sslcontextfactory-invalidconfiguration.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor() {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testValidCustomSslContextFactoryConfiguration() {
+        Config config = DatabaseDescriptor.loadConfig();
+        config.client_encryption_options.applyConfig();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java b/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java
new file mode 100644
index 0000000..0657eb6
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import javax.net.ssl.TrustManagerFactory;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import io.netty.handler.ssl.OpenSsl;
+import io.netty.handler.ssl.OpenSslContext;
+import io.netty.handler.ssl.SslContext;
+import io.netty.handler.ssl.SslProvider;
+import org.apache.cassandra.config.EncryptionOptions;
+
+public class DefaultSslContextFactoryTest
+{
+    private Map<String,Object> commonConfig = new HashMap<>();
+
+    @Before
+    public void setup()
+    {
+        commonConfig.put("truststore", "test/conf/cassandra_ssl_test.truststore");
+        commonConfig.put("truststore_password", "cassandra");
+        commonConfig.put("require_client_auth", Boolean.FALSE);
+        commonConfig.put("cipher_suites", Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA"));
+    }
+
+    private void addKeystoreOptions(Map<String,Object> config)
+    {
+        config.put("keystore", "test/conf/cassandra_ssl_test.keystore");
+        config.put("keystore_password", "cassandra");
+    }
+
+    @Test
+    public void getSslContextOpenSSL() throws IOException
+    {
+        EncryptionOptions options = new EncryptionOptions().withTrustStore("test/conf/cassandra_ssl_test.truststore")
+                                                           .withTrustStorePassword("cassandra")
+                                                           .withKeyStore("test/conf/cassandra_ssl_test.keystore")
+                                                           .withKeyStorePassword("cassandra")
+                                                           .withRequireClientAuth(false)
+                                                           .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA");
+        SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
+        Assert.assertNotNull(sslContext);
+        if (OpenSsl.isAvailable())
+            Assert.assertTrue(sslContext instanceof OpenSslContext);
+        else
+            Assert.assertTrue(sslContext instanceof SslContext);
+    }
+
+    @Test(expected = IOException.class)
+    public void buildTrustManagerFactoryWithInvalidTruststoreFile() throws IOException
+    {
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put("truststore", "/this/is/probably/not/a/file/on/your/test/machine");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.buildTrustManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildTrustManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put("truststore_password", "HomeOfBadPasswords");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void buildTrustManagerFactoryHappyPath() throws IOException
+    {
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = defaultSslContextFactoryImpl.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithInvalidKeystoreFile() throws IOException
+    {
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put("keystore", "/this/is/probably/not/a/file/on/your/test/machine");
+        config.put("keystore_password","ThisWontMatter");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.buildKeyManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeystoreOptions(config);
+        config.put("keystore_password", "HomeOfBadPasswords");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryHappyPath() throws IOException
+    {
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        // Make sure the exiry check didn't happen so far for the private key
+        Assert.assertFalse(defaultSslContextFactoryImpl.checkedExpiry);
+
+        addKeystoreOptions(config);
+        DefaultSslContextFactory defaultSslContextFactoryImpl2 = new DefaultSslContextFactory(config);
+        // Trigger the private key loading. That will also check for expired private key
+        defaultSslContextFactoryImpl2.buildKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(defaultSslContextFactoryImpl2.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+        DefaultSslContextFactory defaultSslContextFactoryImpl3 = new DefaultSslContextFactory(config);
+        Assert.assertFalse(defaultSslContextFactoryImpl3.checkedExpiry);
+        defaultSslContextFactoryImpl3.buildKeyManagerFactory();
+        Assert.assertTrue(defaultSslContextFactoryImpl3.checkedExpiry);
+    }
+
+    @Test
+    public void testDisableOpenSslForInJvmDtests() {
+        // The configuration name below is hard-coded intentionally to make sure we don't break the contract without
+        // changing the documentation appropriately
+        System.setProperty("cassandra.disable_tcactive_openssl","true");
+        Map<String,Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        Assert.assertEquals(SslProvider.JDK, defaultSslContextFactoryImpl.getSslProvider());
+        System.clearProperty("cassandra.disable_tcactive_openssl");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/DummySslContextFactoryImpl.java b/test/unit/org/apache/cassandra/security/DummySslContextFactoryImpl.java
new file mode 100644
index 0000000..3a14ff2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/DummySslContextFactoryImpl.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.util.List;
+import java.util.Map;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLException;
+
+import io.netty.handler.ssl.CipherSuiteFilter;
+import io.netty.handler.ssl.SslContext;
+
+/**
+ * TEST ONLY Class. DON'T use it for anything else.
+ */
+public class DummySslContextFactoryImpl implements ISslContextFactory
+{
+    private Map<String,Object> parameters;
+    public DummySslContextFactoryImpl(Map<String,Object> parameters) {
+        this.parameters=parameters;
+    }
+
+    @Override
+    public SSLContext createJSSESslContext(boolean verifyPeerCertificate) throws SSLException
+    {
+        return null;
+    }
+
+    @Override
+    public SslContext createNettySslContext(boolean verifyPeerCertificate, SocketType socketType,
+                                            CipherSuiteFilter cipherFilter) throws SSLException
+    {
+        return null;
+    }
+
+    @Override
+    public void initHotReloading() throws SSLException
+    {
+
+    }
+
+    @Override
+    public boolean shouldReload()
+    {
+        return false;
+    }
+
+    @Override
+    public List<String> getAcceptedProtocols()
+    {
+        return null;
+    }
+
+    @Override
+    public List<String> getCipherSuites()
+    {
+        return null;
+    }
+
+    /*
+     * For testing only
+     */
+    public String getStringValueFor(String configKey) {
+        return parameters.containsKey(configKey) ? parameters.get(configKey).toString() : null;
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java b/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java
index 0fd46b8..fdeda05 100644
--- a/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java
+++ b/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java
@@ -17,9 +17,7 @@
  */
 package org.apache.cassandra.security;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.HashMap;
@@ -29,6 +27,7 @@
 import javax.crypto.IllegalBlockSizeException;
 import javax.crypto.ShortBufferException;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -82,7 +81,7 @@
 
         File f = FileUtils.createTempFile("commitlog-enc-utils-", ".tmp");
         f.deleteOnExit();
-        FileChannel channel = new RandomAccessFile(f, "rw").getChannel();
+        FileChannel channel = f.newReadWriteChannel();
         EncryptionUtils.encryptAndWrite(ByteBuffer.wrap(buf), channel, true, encryptor);
         channel.close();
 
@@ -111,7 +110,7 @@
         Cipher encryptor = cipherFactory.getEncryptor(tdeOptions.cipher, tdeOptions.key_alias);
         File f = FileUtils.createTempFile("commitlog-enc-utils-", ".tmp");
         f.deleteOnExit();
-        FileChannel channel = new RandomAccessFile(f, "rw").getChannel();
+        FileChannel channel = f.newReadWriteChannel();
         EncryptionUtils.encryptAndWrite(compressedBuffer, channel, true, encryptor);
 
         // decrypt
diff --git a/test/unit/org/apache/cassandra/security/FileBasedSslContextFactoryTest.java b/test/unit/org/apache/cassandra/security/FileBasedSslContextFactoryTest.java
new file mode 100644
index 0000000..be49d16
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/FileBasedSslContextFactoryTest.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.util.HashMap;
+import java.util.Map;
+import javax.net.ssl.SSLException;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.config.ParameterizedClass;
+
+public class FileBasedSslContextFactoryTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(FileBasedSslContextFactoryTest.class);
+
+    private EncryptionOptions.ServerEncryptionOptions encryptionOptions;
+
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor()
+    {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Before
+    public void setup()
+    {
+        encryptionOptions = new EncryptionOptions.ServerEncryptionOptions()
+                            .withSslContextFactory(new ParameterizedClass(TestFileBasedSSLContextFactory.class.getName(),
+                                                                          new HashMap<>()))
+                            .withTrustStore("test/conf/cassandra_ssl_test.truststore")
+                            .withTrustStorePassword("cassandra")
+                            .withRequireClientAuth(false)
+                            .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA")
+                            .withKeyStore("test/conf/cassandra_ssl_test.keystore")
+                            .withKeyStorePassword("cassandra");
+    }
+
+    @Test
+    public void testHappyPath() throws SSLException
+    {
+        EncryptionOptions.ServerEncryptionOptions localEncryptionOptions = encryptionOptions;
+
+        Assert.assertEquals("org.apache.cassandra.security.FileBasedSslContextFactoryTest$TestFileBasedSSLContextFactory",
+                            localEncryptionOptions.ssl_context_factory.class_name);
+        Assert.assertNotNull("keystore_password must not be null", localEncryptionOptions.keystore_password);
+
+        TestFileBasedSSLContextFactory sslContextFactory =
+        (TestFileBasedSSLContextFactory) localEncryptionOptions.sslContextFactoryInstance;
+        sslContextFactory.buildKeyManagerFactory();
+        sslContextFactory.buildTrustManagerFactory();
+    }
+
+    /**
+     * Tests for empty {@code keystore_password} and empty {@code outbound_keystore_password} configurations.
+     */
+    @Test(expected = IllegalArgumentException.class)
+    public void testEmptyKeystorePasswords() throws SSLException
+    {
+        EncryptionOptions.ServerEncryptionOptions localEncryptionOptions = encryptionOptions.withKeyStorePassword(null);
+
+        Assert.assertEquals("org.apache.cassandra.security.FileBasedSslContextFactoryTest$TestFileBasedSSLContextFactory",
+                            localEncryptionOptions.ssl_context_factory.class_name);
+        Assert.assertNull("keystore_password must be null", localEncryptionOptions.keystore_password);
+
+        TestFileBasedSSLContextFactory sslContextFactory =
+        (TestFileBasedSSLContextFactory) localEncryptionOptions.sslContextFactoryInstance;
+        try
+        {
+            sslContextFactory.buildKeyManagerFactory();
+            sslContextFactory.buildTrustManagerFactory();
+        }
+        catch (Exception e)
+        {
+            Assert.assertEquals("'keystore_password' must be specified", e.getMessage());
+            throw e;
+        }
+    }
+
+    /**
+     * Tests for the empty password for the {@code keystore} used for the client communication.
+     */
+    @Test(expected = IllegalArgumentException.class)
+    public void testEmptyKeystorePassword() throws SSLException
+    {
+        EncryptionOptions.ServerEncryptionOptions localEncryptionOptions = encryptionOptions.withKeyStorePassword(null);
+
+        Assert.assertEquals("org.apache.cassandra.security.FileBasedSslContextFactoryTest$TestFileBasedSSLContextFactory",
+                            localEncryptionOptions.ssl_context_factory.class_name);
+        Assert.assertNull("keystore_password must be null", localEncryptionOptions.keystore_password);
+
+        TestFileBasedSSLContextFactory sslContextFactory =
+        (TestFileBasedSSLContextFactory) localEncryptionOptions.sslContextFactoryInstance;
+        try
+        {
+            sslContextFactory.buildKeyManagerFactory();
+            sslContextFactory.buildTrustManagerFactory();
+        }
+        catch (Exception e)
+        {
+            Assert.assertEquals("'keystore_password' must be specified", e.getMessage());
+            throw e;
+        }
+    }
+
+    @Test
+    public void testEmptyTruststorePassword() throws SSLException
+    {
+        EncryptionOptions.ServerEncryptionOptions localEncryptionOptions = encryptionOptions.withTrustStorePassword(null);
+        Assert.assertEquals("org.apache.cassandra.security.FileBasedSslContextFactoryTest$TestFileBasedSSLContextFactory",
+                            localEncryptionOptions.ssl_context_factory.class_name);
+        Assert.assertNotNull("keystore_password must not be null", localEncryptionOptions.keystore_password);
+        Assert.assertNull("truststore_password must be null", localEncryptionOptions.truststore_password);
+
+        TestFileBasedSSLContextFactory sslContextFactory =
+        (TestFileBasedSSLContextFactory) localEncryptionOptions.sslContextFactoryInstance;
+        sslContextFactory.buildTrustManagerFactory();
+    }
+
+    public static class TestFileBasedSSLContextFactory extends FileBasedSslContextFactory
+    {
+        public TestFileBasedSSLContextFactory(Map<String, Object> parameters)
+        {
+            super(parameters);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigTest.java b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigTest.java
new file mode 100644
index 0000000..ab6b00a
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigTest.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import javax.net.ssl.SSLException;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+public class PEMBasedSslContextFactoryConfigTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-pem-sslcontextfactory.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor()
+    {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test
+    public void testHappyPathInlinePEM() throws SSLException
+    {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.client_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                            config.client_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.client_encryption_options.ssl_context_factory.class_name,
+                            config.client_encryption_options.sslContextFactoryInstance.getClass().getName());
+        PEMBasedSslContextFactory sslContextFactory =
+        (PEMBasedSslContextFactory) config.client_encryption_options.sslContextFactoryInstance;
+        sslContextFactory.buildKeyManagerFactory();
+        sslContextFactory.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void testHappyPathFileBasedPEM() throws SSLException
+    {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.server_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                            config.server_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.server_encryption_options.ssl_context_factory.class_name,
+                            config.server_encryption_options.sslContextFactoryInstance.getClass().getName());
+        PEMBasedSslContextFactory sslContextFactory =
+        (PEMBasedSslContextFactory) config.server_encryption_options.sslContextFactoryInstance;
+        sslContextFactory.buildKeyManagerFactory();
+        sslContextFactory.buildTrustManagerFactory();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigWithMismatchingPasswordsTest.java b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigWithMismatchingPasswordsTest.java
new file mode 100644
index 0000000..c1e2dbe
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigWithMismatchingPasswordsTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import javax.net.ssl.SSLException;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+public class PEMBasedSslContextFactoryConfigWithMismatchingPasswordsTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-pem-sslcontextfactory-mismatching-passwords.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor()
+    {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test(expected = ConfigurationException.class)
+    public void testInLinePEMConfiguration() throws SSLException
+    {
+        Config config = DatabaseDescriptor.loadConfig();
+        try
+        {
+            config.client_encryption_options.applyConfig();
+        }
+        catch (ConfigurationException e)
+        {
+            assertErrorMessageAndRethrow(e);
+        }
+    }
+
+    @Test(expected = ConfigurationException.class)
+    public void testFileBasedPEMConfiguration() throws SSLException
+    {
+        Config config = DatabaseDescriptor.loadConfig();
+        try
+        {
+            config.server_encryption_options.applyConfig();
+        }
+        catch (ConfigurationException e)
+        {
+            assertErrorMessageAndRethrow(e);
+        }
+    }
+
+    private void assertErrorMessageAndRethrow(ConfigurationException e) throws ConfigurationException
+    {
+        String expectedMessage = "'keystore_password' and 'key_password' both configurations are given and the values do not match";
+        Throwable rootCause = getRootCause(e);
+        String actualMessage = rootCause.getMessage();
+        Assert.assertEquals(expectedMessage, actualMessage);
+        throw e;
+    }
+
+    private Throwable getRootCause(Throwable e)
+    {
+        Throwable rootCause = e;
+        while (rootCause.getCause() != null && rootCause.getCause() != rootCause)
+        {
+            rootCause = rootCause.getCause();
+        }
+        return rootCause;
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigWithUnencryptedKeysTest.java b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigWithUnencryptedKeysTest.java
new file mode 100644
index 0000000..066de8f
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryConfigWithUnencryptedKeysTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import javax.net.ssl.SSLException;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+public class PEMBasedSslContextFactoryConfigWithUnencryptedKeysTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-pem-sslcontextfactory-unencryptedkeys.yaml");
+        System.setProperty("cassandra.disable_tcactive_openssl", "true");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor()
+    {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test
+    public void testHappyPathInlinePEM() throws SSLException
+    {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.client_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                            config.client_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.client_encryption_options.ssl_context_factory.class_name,
+                            config.client_encryption_options.sslContextFactoryInstance.getClass().getName());
+        PEMBasedSslContextFactory sslContextFactory =
+        (PEMBasedSslContextFactory) config.client_encryption_options.sslContextFactoryInstance;
+        sslContextFactory.buildKeyManagerFactory();
+        sslContextFactory.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void testHappyPathFileBasedPEM() throws SSLException
+    {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.server_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                            config.server_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.server_encryption_options.ssl_context_factory.class_name,
+                            config.server_encryption_options.sslContextFactoryInstance.getClass().getName());
+        PEMBasedSslContextFactory sslContextFactory =
+        (PEMBasedSslContextFactory) config.server_encryption_options.sslContextFactoryInstance;
+        sslContextFactory.buildKeyManagerFactory();
+        sslContextFactory.buildTrustManagerFactory();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryInvalidConfigTest.java b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryInvalidConfigTest.java
new file mode 100644
index 0000000..2281137
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryInvalidConfigTest.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import javax.net.ssl.SSLException;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+public class PEMBasedSslContextFactoryInvalidConfigTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-pem-sslcontextfactory-invalidconfiguration.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor()
+    {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test(expected = ConfigurationException.class)
+    public void testFileBasedAndInlinePEMConfiguration() throws SSLException
+    {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.client_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                            config.client_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.client_encryption_options.ssl_context_factory.class_name,
+                            config.client_encryption_options.sslContextFactoryInstance.getClass().getName());
+        PEMBasedSslContextFactory sslContextFactory =
+        (PEMBasedSslContextFactory) config.client_encryption_options.sslContextFactoryInstance;
+        sslContextFactory.buildKeyManagerFactory();
+        sslContextFactory.buildTrustManagerFactory();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java
new file mode 100644
index 0000000..243d300
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import javax.net.ssl.TrustManagerFactory;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import io.netty.handler.ssl.OpenSsl;
+import io.netty.handler.ssl.OpenSslContext;
+import io.netty.handler.ssl.SslContext;
+import io.netty.handler.ssl.SslProvider;
+import org.apache.cassandra.config.EncryptionOptions;
+import org.apache.cassandra.config.ParameterizedClass;
+
+import static org.apache.cassandra.security.PEMBasedSslContextFactory.ConfigKey.ENCODED_CERTIFICATES;
+import static org.apache.cassandra.security.PEMBasedSslContextFactory.ConfigKey.ENCODED_KEY;
+import static org.apache.cassandra.security.PEMBasedSslContextFactory.ConfigKey.KEY_PASSWORD;
+
+public class PEMBasedSslContextFactoryTest
+{
+    private static final String private_key =
+    "-----BEGIN ENCRYPTED PRIVATE KEY-----\n" +
+    "MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/\n" +
+    "g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl\n" +
+    "xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29\n" +
+    "L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V\n" +
+    "sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/\n" +
+    "f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8\n" +
+    "RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR\n" +
+    "0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs\n" +
+    "evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU\n" +
+    "tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6\n" +
+    "wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN\n" +
+    "K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv\n" +
+    "zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5\n" +
+    "mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo\n" +
+    "WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ\n" +
+    "jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6\n" +
+    "eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny\n" +
+    "DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn\n" +
+    "AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY\n" +
+    "Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow\n" +
+    "Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut\n" +
+    "ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr\n" +
+    "bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH\n" +
+    "hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI\n" +
+    "RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw\n" +
+    "pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP\n" +
+    "FujZhqbKUDbYAcqTkoQ=\n" +
+    "-----END ENCRYPTED PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9w=\n" +
+    "-----END CERTIFICATE-----";
+    private static final String unencrypted_private_key =
+    "-----BEGIN PRIVATE KEY-----\n" +
+    "MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOSZVf8dLj1xLw\n" +
+    "efqjbogbAwhwRXd3tmEfQY1zHyudJF3XR1T0Xp26BKNvAYUxxZRDNg16M3prPRZv\n" +
+    "wkhDJdE9NaN+BpZPJUavRsZOfGDq/CHN8j/2PPKrn3G/b065JjV3GZjfV/Sln047\n" +
+    "DeZptaSyOg7ZN7F8qjGqxEcnw+szV/wTzhnjGjZMlcbOm4jFGQAn6xUOooQCGsoB\n" +
+    "9FgxKB0nvNG3xVe/2eCNfbS4DabT3Y1wfQqZ62hOa5ZS0rwT+pw3tQs3zFFY1Sfi\n" +
+    "G7qYbqZrKTheWIZGojVVm6mqH4yA2ofOe5N3RsBitCwU/D0dpnaG9Wcl98nmXueM\n" +
+    "B6Rk04v7AgMBAAECggEAYnxIKjrFz/JkJ5MmiszM5HV698r9YB0aqHnFIHPoykIL\n" +
+    "uiCjiumantDrFsCkosixULwvI/BRwbxstTpyrheU9psT6P1CONICVPvV8ylgJAYU\n" +
+    "l+ofn56cEXKxVuICSWFLDH7pM1479g+IJJQAchbKQpqxAGTuMu3SpvJolfuj5srt\n" +
+    "bM7/RYhJFLwDuvHNA3ivlogMneItP03+C25aaxstM+lBuBf68+n78zMgSvt6J/6Y\n" +
+    "G2TOMKnxveMlG2qu9l2lAw/2i8daG/qre08nTH7wpRx0gZLZqNpe45exkrzticzF\n" +
+    "FgWYjG2K2brX21jqHroFgMhdXF7zhhRgLoIeC0BrIQKBgQDCfGfWrJESKBbVai5u\n" +
+    "7wqD9nlzjv6N6FXfTDOPXO1vz5frdvtLVWbs0SMPy+NglkaZK0iqHvb9mf2of8eC\n" +
+    "0D5cmewjn7WCDBQMypIMYgT912ak/BBVuGXcxb6UgD+xARfSARo2C8NG1hfprw1W\n" +
+    "ad14CjS5xhFMs44HpVYhI7iPYwKBgQC7SqVG/b37vZ7CINemdvoMujLvvYXDJJM8\n" +
+    "N21LqNJfVXdukdH3T0xuLnh9Z/wPHjJDMF/9+1foxSEPHijtyz5P19EilNEC/3qw\n" +
+    "fI19+VZoY0mdhPtXSGzy+rbTE2v71QgwFLizSos14Gr+eNiIjF7FYccK05++K/zk\n" +
+    "cd8ZA3bwiQKBgQCl+HTFBs9mpz+VMOAfW2+l3hkXPNiPUc62mNkHZ05ZNNd44jjh\n" +
+    "uSf0wSUiveR08MmevQlt5K7zDQ8jVKh2QjB15gVXAVxsdtJFeDnax2trFP9LnLBz\n" +
+    "9sE2/qn9INU5wK0LUlWD+dXUBbCyg+jl7cJKRqtoPldVFYYHkFlIPqup8QKBgHXv\n" +
+    "hyuw1FUVDkdHzwOvn70r8q8sNHKxMVWVwWkHIZGOi+pAQGrusD4hXRX6yKnsZdIR\n" +
+    "QCD6iFy25R5T64nxlYdJaxPPid3NakB/7ckJnPOWseBSwMIxhQlr/nvjmve1Kba9\n" +
+    "FaEwq4B9lGIxToiNe4/nBiM3JzvlDxX67nUdzWOhAoGAdFvriyvjshSJ4JHgIY9K\n" +
+    "37BVB0VKMcFV2P8fLVWO5oyRtE1bJhU4QVpQmauABU4RGSojJ3NPIVH1wxmJeYtj\n" +
+    "Q3b7EZaqI6ovna2eK2qtUx4WwxhRaXTT8xueBI2lgL6sBSTGG+K69ZOzGQzG/Mfr\n" +
+    "RXKInnLInFD9JD94VqmMozo=\n" +
+    "-----END PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9w=\n" +
+    "-----END CERTIFICATE-----";
+    private static final String trusted_certificates =
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9w=\n" +
+    "-----END CERTIFICATE-----";
+    private final Map<String, Object> commonConfig = new HashMap<>();
+
+    @Before
+    public void setup()
+    {
+        commonConfig.put(ENCODED_CERTIFICATES.getKeyName(), trusted_certificates);
+        commonConfig.put("require_client_auth", Boolean.FALSE);
+        commonConfig.put("cipher_suites", Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA"));
+    }
+
+    private void addKeyStoreOptions(Map<String, Object> config)
+    {
+        config.put(ENCODED_KEY.getKeyName(), private_key);
+        config.put(KEY_PASSWORD.getKeyName(), "cassandra");
+    }
+
+    private void addUnencryptedKeyStoreOptions(Map<String, Object> config)
+    {
+        config.put(ENCODED_KEY.getKeyName(), unencrypted_private_key);
+    }
+
+    private void addFileBaseTrustStoreOptions(Map<String, Object> config)
+    {
+        config.put("truststore", "test/conf/cassandra_ssl_test.truststore.pem");
+    }
+
+    private void addFileBaseKeyStoreOptions(Map<String, Object> config)
+    {
+        config.put("keystore", "test/conf/cassandra_ssl_test.keystore.pem");
+        config.put("keystore_password", "cassandra");
+    }
+
+    private void addFileBaseUnencryptedKeyStoreOptions(Map<String, Object> config)
+    {
+        config.put("keystore", "test/conf/cassandra_ssl_test.unencrypted_keystore.pem");
+    }
+
+    @Test
+    public void getSslContextOpenSSL() throws IOException
+    {
+        ParameterizedClass sslContextFactory = new ParameterizedClass(PEMBasedSslContextFactory.class.getSimpleName()
+        , new HashMap<>());
+        EncryptionOptions options = new EncryptionOptions().withTrustStore("test/conf/cassandra_ssl_test.truststore.pem")
+                                                           .withKeyStore("test/conf/cassandra_ssl_test.keystore.pem")
+                                                           .withKeyStorePassword("cassandra")
+                                                           .withRequireClientAuth(false)
+                                                           .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA")
+                                                           .withSslContextFactory(sslContextFactory);
+        SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
+        Assert.assertNotNull(sslContext);
+        if (OpenSsl.isAvailable())
+            Assert.assertTrue(sslContext instanceof OpenSslContext);
+        else
+            Assert.assertTrue(sslContext instanceof SslContext);
+    }
+
+    @Test(expected = IOException.class)
+    public void buildTrustManagerFactoryWithInvalidTruststoreFile() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.remove("encoded_certificates");
+        config.put("truststore", "/this/is/probably/not/a/file/on/your/test/machine");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.buildTrustManagerFactory();
+    }
+
+    @Test
+    public void buildTrustManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = sslContextFactory.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+    }
+
+    @Test
+    public void buildFileBasedTrustManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.remove(ENCODED_CERTIFICATES.getKeyName());
+        addFileBaseTrustStoreOptions(config);
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.checkedExpiry = false;
+        TrustManagerFactory trustManagerFactory = sslContextFactory.buildTrustManagerFactory();
+        Assert.assertNotNull(trustManagerFactory);
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithInvalidKeystoreFile() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put("keystore", "/this/is/probably/not/a/file/on/your/test/machine");
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.checkedExpiry = false;
+        sslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildKeyManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeyStoreOptions(config);
+        config.put("keystore_password", "HomeOfBadPasswords");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        PEMBasedSslContextFactory sslContextFactory1 = new PEMBasedSslContextFactory(config);
+        // Make sure the exiry check didn't happen so far for the private key
+        Assert.assertFalse(sslContextFactory1.checkedExpiry);
+
+        addKeyStoreOptions(config);
+        PEMBasedSslContextFactory sslContextFactory2 = new PEMBasedSslContextFactory(config);
+        // Trigger the private key loading. That will also check for expired private key
+        sslContextFactory2.buildKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(sslContextFactory2.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+        PEMBasedSslContextFactory sslContextFactory3 = new PEMBasedSslContextFactory(config);
+        Assert.assertFalse(sslContextFactory3.checkedExpiry);
+        sslContextFactory3.buildKeyManagerFactory();
+        Assert.assertTrue(sslContextFactory3.checkedExpiry);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void buildKeyManagerFactoryWithConflictingPasswordConfigs() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeyStoreOptions(config);
+        config.put("keystore_password", config.get("keyPassword") + "-conflict");
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryWithMatchingPasswordConfigs() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addKeyStoreOptions(config);
+        config.put("keystore_password", config.get("keyPassword"));
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildFileBasedKeyManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        PEMBasedSslContextFactory sslContextFactory1 = new PEMBasedSslContextFactory(config);
+        // Make sure the expiry check didn't happen so far for the private key
+        Assert.assertFalse(sslContextFactory1.checkedExpiry);
+
+        addFileBaseKeyStoreOptions(config);
+        PEMBasedSslContextFactory sslContextFactory2 = new PEMBasedSslContextFactory(config);
+        // Trigger the private key loading. That will also check for expired private key
+        sslContextFactory2.buildKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(sslContextFactory2.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+        PEMBasedSslContextFactory sslContextFactory3 = new PEMBasedSslContextFactory(config);
+        Assert.assertFalse(sslContextFactory3.checkedExpiry);
+        sslContextFactory3.buildKeyManagerFactory();
+        Assert.assertTrue(sslContextFactory3.checkedExpiry);
+    }
+
+    @Test
+    public void buildKeyManagerFactoryWithUnencryptedKey() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addUnencryptedKeyStoreOptions(config);
+
+        Assert.assertTrue("Unencrypted Key test must not specify a key password",
+                          StringUtils.isEmpty((String) config.get(KEY_PASSWORD.getKeyName())));
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildKeyManagerFactoryWithFileBasedUnencryptedKey() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addFileBaseUnencryptedKeyStoreOptions(config);
+
+        Assert.assertTrue("Unencrypted Key test must not specify a key password",
+                          StringUtils.isEmpty((String) config.get(KEY_PASSWORD.getKeyName())));
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        sslContextFactory.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void testDisableOpenSslForInJvmDtests()
+    {
+        // The configuration name below is hard-coded intentionally to make sure we don't break the contract without
+        // changing the documentation appropriately
+        System.setProperty("cassandra.disable_tcactive_openssl", "true");
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
+        Assert.assertEquals(SslProvider.JDK, sslContextFactory.getSslProvider());
+        System.clearProperty("cassandra.disable_tcactive_openssl");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testMultiplePrivateKeySources()
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addUnencryptedKeyStoreOptions(config);
+
+        // Check with a valid file path for the keystore
+        addFileBaseUnencryptedKeyStoreOptions(config);
+        new PEMBasedSslContextFactory(config);
+    }
+
+    @Test
+    public void testMultiplePrivateKeySourcesWithInvalidKeystorePath()
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addUnencryptedKeyStoreOptions(config);
+
+        // Check with an invalid file path for the keystore
+        config.put("keystore", "/path/to/nowhere");
+        new PEMBasedSslContextFactory(config);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testMultipleTrustedCertificatesSources()
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        // Check with a valid file path for the truststore
+        addFileBaseTrustStoreOptions(config);
+        new PEMBasedSslContextFactory(config);
+    }
+
+    @Test
+    public void testMultipleTrustedCertificatesSourcesWithInvalidTruststorePath()
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        // Check with an invalid file path for the truststore
+        config.put("truststore", "/path/to/nowhere");
+        new PEMBasedSslContextFactory(config);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMJKSSslContextFactoryConfigTest.java b/test/unit/org/apache/cassandra/security/PEMJKSSslContextFactoryConfigTest.java
new file mode 100644
index 0000000..8efd3e4
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMJKSSslContextFactoryConfigTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import javax.net.ssl.SSLException;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+public class PEMJKSSslContextFactoryConfigTest
+{
+    @BeforeClass
+    public static void setupDatabaseDescriptor()
+    {
+        System.setProperty("cassandra.config", "cassandra-pem-jks-sslcontextfactory.yaml");
+    }
+
+    @AfterClass
+    public static void tearDownDatabaseDescriptor() {
+        System.clearProperty("cassandra.config");
+    }
+
+    @Test
+    public void testPEMAndJKSCombination() throws SSLException
+    {
+
+        Config config = DatabaseDescriptor.loadConfig();
+        config.client_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                            config.client_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.client_encryption_options.ssl_context_factory.class_name,
+                            config.client_encryption_options.sslContextFactoryInstance.getClass().getName());
+        PEMBasedSslContextFactory clientSslContextFactory =
+        (PEMBasedSslContextFactory)config.client_encryption_options.sslContextFactoryInstance;
+        clientSslContextFactory.buildKeyManagerFactory();
+        clientSslContextFactory.buildTrustManagerFactory();
+
+        config.server_encryption_options.applyConfig();
+
+        Assert.assertEquals("org.apache.cassandra.security.DefaultSslContextFactory",
+                            config.server_encryption_options.ssl_context_factory.class_name);
+        Assert.assertEquals(config.server_encryption_options.ssl_context_factory.class_name,
+                            config.server_encryption_options.sslContextFactoryInstance.getClass().getName());
+        DefaultSslContextFactory serverSslContextFactory =
+        (DefaultSslContextFactory)config.server_encryption_options.sslContextFactoryInstance;
+        serverSslContextFactory.buildKeyManagerFactory();
+        serverSslContextFactory.buildTrustManagerFactory();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/PEMReaderTest.java b/test/unit/org/apache/cassandra/security/PEMReaderTest.java
new file mode 100644
index 0000000..5f4a867
--- /dev/null
+++ b/test/unit/org/apache/cassandra/security/PEMReaderTest.java
@@ -0,0 +1,454 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.security;
+
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.security.PrivateKey;
+import java.security.cert.Certificate;
+import java.util.Set;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class PEMReaderTest
+{
+    private static final String encoded_encrypted_key =
+    "-----BEGIN ENCRYPTED PRIVATE KEY-----\n" +
+    "MIIE6jAcBgoqhkiG9w0BDAEDMA4ECOWqSzq5PBIdAgIFxQSCBMjXsCK30J0aT3J/\n" +
+    "g5kcbmevTOY1pIhJGbf5QYYrMUPiuDK2ydxIbiPzoTE4/S+OkCeHhlqwn/YydpBl\n" +
+    "xgjZZ1Z5rLJHO27d2biuESqanDiBVXYuVmHmaifRnFy0uUTFkStB5mjVZEiJgO29\n" +
+    "L83hL60uWru71EVuVriC2WCfmZ/EXp6wyYszOqCFQ8Quk/rDO6XuaBl467MJbx5V\n" +
+    "sucGT6E9XKNd9hB14/Izb2jtVM5kqKxoiHpz1na6yhEYJiE5D1uOonznWjBnjwB/\n" +
+    "f0x+acpDfVDoJKTlRdz+DEcbOF7mb9lBVVjP6P/AAsmQzz6JKwHjvCrjYfQmyyN8\n" +
+    "RI4KRQnWgm4L3dtByLqY8HFU4ogisCMCgI+hZQ+OKMz/hoRO540YGiPcTRY3EOUR\n" +
+    "0bd5JxU6tCJDMTqKP9aSL2KmLoiLowdMkSPz7TCzLsZ2bGJemuCfpAs4XT1vXCHs\n" +
+    "evrUbOnh8et1IA8mZ9auThfqsZtNagJLEXA6hWIKp1FfVL3Q49wvMKZt4eTn/zwU\n" +
+    "tLL0m5yPo6/HAaOA3hbm/oghZS0dseshXl7PZrmZQtvYnIvjyoxEL7ducYDQCDP6\n" +
+    "wZ7Nzyh1QZAauSS15hl3vLFRZCA9hWAVgwQAviTvhB342O0i9qI7TQkcHk+qcTPN\n" +
+    "K+iGNbFZ8ma1izXNKSJ2PgI/QqFNIeJWvZrb9PhJRmaZVsTJ9fERm1ewpebZqkVv\n" +
+    "zMqMhlKgx9ggAaSKgnGZkwXwB6GrSbbzUrwRCKm3FieD1QE4VVYevaadVUU75GG5\n" +
+    "mrFKorJEH7kFZlic8OTjDksYnHbcgU36XZrGEXa2+ldVeGKL3CsXWciaQRcJg8yo\n" +
+    "WQDjZpcutGI0eMJWCqUkv8pYZC2/wZU4htCve5nVJUU4t9uuo9ex7lnwlLWPvheQ\n" +
+    "jUBMgzSRsZ+zwaIusvufAAxiKK/cJm4ubZSZPIjBbfd4U7VPxtirP4Accydu7EK6\n" +
+    "eG/MZwtAMFNJxfxUR+/aYzJU/q1ePw7fWVHrpt58t/22CX2SJBEiUGmSmuyER4Ny\n" +
+    "DPw6d6mhvPUS1jRhIZ9A81ht8MOX7VL5uVp307rt7o5vRpV1mo0iPiRHzGscMpJn\n" +
+    "AP36klEAUNTf0uLTKZa7KHiwhn5iPmsCrENHkOKJjxhRrqHjD2wy3YHs3ow2voyY\n" +
+    "Ua4Cids+c1hvRkNEDGNHm4+rKGFOGOsG/ZU7uj/6gflO4JXxNGiyTLflqMdWBvow\n" +
+    "Zd7hk1zCaGAAn8nZ0hPweGxQ4Q30I9IBZrimGxB0vjiUqNio9+qMf33dCHFJEuut\n" +
+    "ZGJMaUGVaPhXQcTy4uD5hzsPZV5xcsU4H3vBYyBcZgrusJ6OOgkuZQaU7p8rWQWr\n" +
+    "bUEVbXuZdwEmxsCe7H/vEVv5+aA4sF4kWnMMFL7/LIYaiEzkTqdJlRv/KyJJgcAH\n" +
+    "hg2BvR3XTAq8wiX0C98CdmTbsx2eyQdj5tCU606rEohFLKUxWkJYAKxCiUbxGGpI\n" +
+    "RheVmxkef9ErxJiq7hsAsGrSJvMtJuDKIasnD14SOEwD/7jRAq6WdL9VLpxtzlOw\n" +
+    "pWnIl8kUCO3WoaG9Jf+ZTIv2hnxJhaSzYrdXzGPNnaWKhBlwnXJRvQEdrIxZOimP\n" +
+    "FujZhqbKUDbYAcqTkoQ=\n" +
+    "-----END ENCRYPTED PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9w=\n" +
+    "-----END CERTIFICATE-----";
+
+    private static final String encoded_certificates =
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9w=\n" +
+    "-----END CERTIFICATE-----";
+
+    private static final String encoded_key =
+    "-----BEGIN PRIVATE KEY-----\n" +
+    "MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOSZVf8dLj1xLw\n" +
+    "efqjbogbAwhwRXd3tmEfQY1zHyudJF3XR1T0Xp26BKNvAYUxxZRDNg16M3prPRZv\n" +
+    "wkhDJdE9NaN+BpZPJUavRsZOfGDq/CHN8j/2PPKrn3G/b065JjV3GZjfV/Sln047\n" +
+    "DeZptaSyOg7ZN7F8qjGqxEcnw+szV/wTzhnjGjZMlcbOm4jFGQAn6xUOooQCGsoB\n" +
+    "9FgxKB0nvNG3xVe/2eCNfbS4DabT3Y1wfQqZ62hOa5ZS0rwT+pw3tQs3zFFY1Sfi\n" +
+    "G7qYbqZrKTheWIZGojVVm6mqH4yA2ofOe5N3RsBitCwU/D0dpnaG9Wcl98nmXueM\n" +
+    "B6Rk04v7AgMBAAECggEAYnxIKjrFz/JkJ5MmiszM5HV698r9YB0aqHnFIHPoykIL\n" +
+    "uiCjiumantDrFsCkosixULwvI/BRwbxstTpyrheU9psT6P1CONICVPvV8ylgJAYU\n" +
+    "l+ofn56cEXKxVuICSWFLDH7pM1479g+IJJQAchbKQpqxAGTuMu3SpvJolfuj5srt\n" +
+    "bM7/RYhJFLwDuvHNA3ivlogMneItP03+C25aaxstM+lBuBf68+n78zMgSvt6J/6Y\n" +
+    "G2TOMKnxveMlG2qu9l2lAw/2i8daG/qre08nTH7wpRx0gZLZqNpe45exkrzticzF\n" +
+    "FgWYjG2K2brX21jqHroFgMhdXF7zhhRgLoIeC0BrIQKBgQDCfGfWrJESKBbVai5u\n" +
+    "7wqD9nlzjv6N6FXfTDOPXO1vz5frdvtLVWbs0SMPy+NglkaZK0iqHvb9mf2of8eC\n" +
+    "0D5cmewjn7WCDBQMypIMYgT912ak/BBVuGXcxb6UgD+xARfSARo2C8NG1hfprw1W\n" +
+    "ad14CjS5xhFMs44HpVYhI7iPYwKBgQC7SqVG/b37vZ7CINemdvoMujLvvYXDJJM8\n" +
+    "N21LqNJfVXdukdH3T0xuLnh9Z/wPHjJDMF/9+1foxSEPHijtyz5P19EilNEC/3qw\n" +
+    "fI19+VZoY0mdhPtXSGzy+rbTE2v71QgwFLizSos14Gr+eNiIjF7FYccK05++K/zk\n" +
+    "cd8ZA3bwiQKBgQCl+HTFBs9mpz+VMOAfW2+l3hkXPNiPUc62mNkHZ05ZNNd44jjh\n" +
+    "uSf0wSUiveR08MmevQlt5K7zDQ8jVKh2QjB15gVXAVxsdtJFeDnax2trFP9LnLBz\n" +
+    "9sE2/qn9INU5wK0LUlWD+dXUBbCyg+jl7cJKRqtoPldVFYYHkFlIPqup8QKBgHXv\n" +
+    "hyuw1FUVDkdHzwOvn70r8q8sNHKxMVWVwWkHIZGOi+pAQGrusD4hXRX6yKnsZdIR\n" +
+    "QCD6iFy25R5T64nxlYdJaxPPid3NakB/7ckJnPOWseBSwMIxhQlr/nvjmve1Kba9\n" +
+    "FaEwq4B9lGIxToiNe4/nBiM3JzvlDxX67nUdzWOhAoGAdFvriyvjshSJ4JHgIY9K\n" +
+    "37BVB0VKMcFV2P8fLVWO5oyRtE1bJhU4QVpQmauABU4RGSojJ3NPIVH1wxmJeYtj\n" +
+    "Q3b7EZaqI6ovna2eK2qtUx4WwxhRaXTT8xueBI2lgL6sBSTGG+K69ZOzGQzG/Mfr\n" +
+    "RXKInnLInFD9JD94VqmMozo=\n" +
+    "-----END PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9w=\n" +
+    "-----END CERTIFICATE-----";
+    private static final String encoded_unencrypted_ec_private_key =
+    "-----BEGIN PRIVATE KEY-----\n" +
+    "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgMLP6H2Wdl28J5PHU\n" +
+    "gMLApCsjONhbyMd5br0byJaQpXShRANCAASmX26IPehdE1wdLW2fVndT9QbjURro\n" +
+    "h74aMnzlmq8GIBWnRzpd+JVJlHgeWLZIDwapthGCYUGivtH27wiO3g7d\n" +
+    "-----END PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIBizCCATACCQCtgEKhNta70DAKBggqhkjOPQQDAjBNMQswCQYDVQQGEwJVUzEL\n" +
+    "MAkGA1UECAwCQ0ExETAPBgNVBAcMCFNhbiBKb3NlMREwDwYDVQQKDAhQZXJzb25h\n" +
+    "bDELMAkGA1UECwwCSVQwHhcNMjExMDE5MDAzMDU4WhcNMjIxMDE0MDAzMDU4WjBN\n" +
+    "MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCFNhbiBKb3NlMREw\n" +
+    "DwYDVQQKDAhQZXJzb25hbDELMAkGA1UECwwCSVQwWTATBgcqhkjOPQIBBggqhkjO\n" +
+    "PQMBBwNCAASmX26IPehdE1wdLW2fVndT9QbjURroh74aMnzlmq8GIBWnRzpd+JVJ\n" +
+    "lHgeWLZIDwapthGCYUGivtH27wiO3g7dMAoGCCqGSM49BAMCA0kAMEYCIQDsNMGL\n" +
+    "b4+BEhgNXaXyHWkezUO/3hCmLDw2gUdwMXG+JQIhAIAm8wALKbb9jJDgFQTHyqGJ\n" +
+    "AVAkzYOwmRMYC9BHKjNs\n" +
+    "-----END CERTIFICATE-----";
+    private static final String encoded_encrypted_key_with_multiplecerts_in_chain =
+    "-----BEGIN ENCRYPTED PRIVATE KEY-----\n" +
+    "MIIE6TAbBgkqhkiG9w0BBQMwDgQI4QuRiKYzf88CAggABIIEyPRVmPp38SIFr8H3\n" +
+    "wi+oc6b+HJH7SPflXO6XZe4Tignw/aSyBTsLm2dWrzojRAYMIRd1xC7yQ2ffYrvx\n" +
+    "uoYbtOQeAminNqvwXdRTnwu6oC0rxdBT8RQ9NK7xL2tQyD/shmOeTJG/glXxaeqS\n" +
+    "rT0CZ5P5GJh6xdIWLEu3lEa3NSWVFE2YacUphmxBoaWjBjsJfWTgkF665SgP+2lh\n" +
+    "8R2WTcHrHjD8jR4jHB03wlup0LOmOwzplUmqHB9TyuA4wF6tlJajwBcPa0PNI6ny\n" +
+    "e9YcdcRr7Y0IxnPQr7PhQNV5AQb9TivwX4WaZxR+BXtwMglp+mz0ohjwLS3z6pqr\n" +
+    "tLrFhv2qcacSl+CKukFb9umV/QBkUk/iu+jwLcNJKPC965GWdieNbO0akBQpQsUN\n" +
+    "mqaF9DYHogW5lRnybl8WWPIR8tXmSCbSUIgzw4lRK+o15I4vaMI0NfkwFD/2y1sn\n" +
+    "t3m9LnVBukkpx3g/CPKd9PbZZeWpOTrnRJQfOu9Fj2lmkpGp0peCBqLJpO0pieVl\n" +
+    "87EQ0ZCErtAGLGIAhWnDUqRK0MaWZ+DMQNKYn5klF4YTVBkfRc9tQbIgBaa77wvz\n" +
+    "gvVWBuJtTFpCt9c8jByTH1gLbchC4bhLsy1nO7moevypMmNW4rqw9x5f0EIR3zCU\n" +
+    "L5/buoIh91TG5JB7BaIbVHtbB/Y2siARRXJibuw3ChBjqPOfzQ66j//NCMqhfTwT\n" +
+    "x2wn7L1BB4xyLJgVW9803FUTzaaL3EvJjzpdvrGC7vzcB6Jd0La9cjHhWSAPOKro\n" +
+    "nD9XPCbgLs10vW9g1Tc8drnZklhw6f7xrIQhWFg6VlwmVpvCQhEpX48rCCo2PH9X\n" +
+    "uzeJA+oqFEH3zfDp0r+q6jbAl+5TkkbBBgC2GCoI1vTYSKaWjeKKHkgzGG0QQLAz\n" +
+    "YXWMXvWKOME4wVPkeVxJv80RqDv0JsoOrnVoaFAzAHJMWa7UZQnjkrbVz/y8ZbV4\n" +
+    "oLJjQjvuOnU2PoXXs6SXbzOs1xx5zbX1UUH3Wc7/CCaUec2hemQJ5m6b1XJciyrY\n" +
+    "GHpMKWtXky9Mo1ruTP7ZH1nk03b4PTObKSx2gQD5AZ/ASuTeahMqMb/2PJkDkpHA\n" +
+    "sy8b1zOn2YTbf4K6NWVNIOkiaApmKhhX0Af6Lg8Wr2ymRTXdp/Um8f+SQLADpB/F\n" +
+    "xOydEN622wmihKDge9DrUFqPG/bdIiRGLXLg8efNboC6/cn/i/sheO7/YlrvcUNo\n" +
+    "qxDa/Bb1N/DgmtgAQ1ZP+AKjk6FKkwZRF1X/VZkZ6auscDlaPetF7razPeIJUrKN\n" +
+    "z/x4AD2txGYKmeYztYR577hPXBw+PPKdggRhIugb6z5Tk89C2pDEwfnByA/wcGJr\n" +
+    "w5avxrubosPrp0QtJpZMzouOvcD52VUiZzDfu9dqI/hpinyt5rETj5E19qxBjIZt\n" +
+    "X3Nq5lY2ktbyqVIo8Z8w4EUU+3XHZKqDwjyYvjxCxv5lVVnqvQrH9h3kENBMrPYQ\n" +
+    "4XonQHpUGG7g7pA3ylmHi+nEedr0H5qKHzyFZlRdI7CLVNoAtBSdwvmtGd2cVVXU\n" +
+    "EaToKNbHPXXYYPX/oVAWZYwD7PHXIRJkiEZnrFARNhLypicU7yjvejUPXcVy5HMh\n" +
+    "XqEbrODPp4VXfbYhVg==\n" +
+    "-----END ENCRYPTED PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDXjCCAkYCAhI0MA0GCSqGSIb3DQEBBAUAMHcxCzAJBgNVBAYTAlVTMRMwEQYD\n" +
+    "VQQIDApDYWxpZm9ybmlhMREwDwYDVQQHDAhTYW4gSm9zZTEXMBUGA1UECgwOUGVy\n" +
+    "c29uYWwsIEluYy4xEDAOBgNVBAsMB1Jvb3QgQ0ExFTATBgNVBAMMDG15ZG9tYWlu\n" +
+    "LmNvbTAeFw0yMTExMjIyMjQ5MzlaFw0yMjExMjIyMjQ5MzlaMHIxCzAJBgNVBAYT\n" +
+    "AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMREwDwYDVQQHDAhTYW4gSm9zZTEXMBUG\n" +
+    "A1UECgwOUGVyc29uYWwsIEluYy4xCzAJBgNVBAsMAklUMRUwEwYDVQQDDAxteWRv\n" +
+    "bWFpbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5fdA7wwD9\n" +
+    "9e5RcdLscvGB+hqJUEHuNC53SYKg5X4Sf0H4ExQUbsy8UaoWzWHhgGbCtTvUVavl\n" +
+    "72xsO74ei0EblopW7QknF0kaTO8Vi3mxhUAdtZFLG/o0NS9J16HdGDGojJwuqU9+\n" +
+    "sMQt1w0HCTMlriELnxaUFKP7M9b0uK5VODEKJ38QKNGXUDt66D7BVYeT/6hz2cXK\n" +
+    "QWDoHk/JadALSzW5ES8KIHfxCLnl2TcKxQhJ4CnL8qeGvc8N3VyTh2AXajaJW5RB\n" +
+    "8Oy4CVoYxcdmP1IapxCD+yNcmNt9XpUTD+6eM5gnvtbye+MSfwPz2MW+fWEDZXOv\n" +
+    "3VxhJyTRFNVTAgMBAAEwDQYJKoZIhvcNAQEEBQADggEBADYK/pn6QG7bvUL0Xnnw\n" +
+    "1KPf1nx36gfJE2V0bNk4uyNNeYufMKS8gPLzC+a3RigCEDc+hIZFE5BJexHd7DXA\n" +
+    "CWgHZJtdjM/Xlgoxbf1yfGV3DWeIZlNFSFZujBIpbm1Ug2BAeV31YRWODPZlUSEZ\n" +
+    "0jv8NEs8/oEz9bM4jwRdn09lo4D9hE6o8qDnrzmN2LBZP5dDIJ6g/M+mq/SJFnho\n" +
+    "qBrfUABZhbgk2+tkZ89OI2xpASCLo6X/vqua2iho6km3x+cz6EI9BbvVr6xOOdVK\n" +
+    "m6gs/Bi4MGTh35jdmvyXoyBUOd1w3yBBj86qbEt2ZHYqreRTxntQYx06598Q9Dsi\n" +
+    "xdg=\n" +
+    "-----END CERTIFICATE-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDajCCAlICCQD/7mxPcMTPIDANBgkqhkiG9w0BAQsFADB3MQswCQYDVQQGEwJV\n" +
+    "UzETMBEGA1UECAwKQ2FsaWZvcm5pYTERMA8GA1UEBwwIU2FuIEpvc2UxFzAVBgNV\n" +
+    "BAoMDlBlcnNvbmFsLCBJbmMuMRAwDgYDVQQLDAdSb290IENBMRUwEwYDVQQDDAxt\n" +
+    "eWRvbWFpbi5jb20wHhcNMjExMTIyMjExODAwWhcNNDkwNDA5MjExODAwWjB3MQsw\n" +
+    "CQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTERMA8GA1UEBwwIU2FuIEpv\n" +
+    "c2UxFzAVBgNVBAoMDlBlcnNvbmFsLCBJbmMuMRAwDgYDVQQLDAdSb290IENBMRUw\n" +
+    "EwYDVQQDDAxteWRvbWFpbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\n" +
+    "AoIBAQCkIwuNGv3ckew/o2UwaDlYgXH9bh1jap4ZCb6qpjvR3tq9nCerY6XMli0Z\n" +
+    "Xxg0wMHDNUr/jmVYIdQjbz0DVNz/l6ZBJHzHCEgqR40pNM3NgC5sDyuNhF3WLNvj\n" +
+    "WgHEwYosfb/9kFRjKUPqqtJ0ccj87OP3XrE/4epCTdJdmugroAQSpXt1ZZfwwPO4\n" +
+    "K27DzMD9W01EmeLcUhMfrpUnKGCfL22c0sZZm/6Khk4BExC3pSILP/NREKeUEAHw\n" +
+    "+rxhNqbUyD/e4/DutdtJ5zONA+GVVGYCpu1Iy0W78Jve4MD2/TFPcEzf5omiWpPz\n" +
+    "WjpOWayD43ur0SZnYJ5haUlZ+bSLAgMBAAEwDQYJKoZIhvcNAQELBQADggEBABqN\n" +
+    "/eb+mKEw2zklPZuzy5uhOc7ImG2LP/ha3oc2ntwkqZQ2LmVA/2ZNVfcNrRRngIfn\n" +
+    "Ir9Kual7djmKmIQvanpnSyXOKOlIiuN0VOrewgzlBZZwlFJL/AH1l7K9uZfBbV5h\n" +
+    "oFfaR9wc+vRGsg3nqO9+hEuk6xbp0jk8QCt26EVhEPlijxzbxTQYiNPNSLuf/kPW\n" +
+    "C9xtIKSWIDT4N6DtH7BtHGQKQdRJ2b4SSUF4joEmBe6jcrLBeDybmuFtKqlVJKUk\n" +
+    "tzBd9CPseqMML1c518KzxlSkXNxTCa7PWEnuN5atLZ+pGGjxtGcDKkrZ9Cgi09G8\n" +
+    "MzB8b4C/goypyhBNlyI=\n" +
+    "-----END CERTIFICATE-----";
+    private static final String encoded_encrypted_dsa_key =
+    "-----BEGIN ENCRYPTED PRIVATE KEY-----\n" +
+    "MIICkTAbBgkqhkiG9w0BBQMwDgQIL1GiUDca2x0CAggABIICcCrSiNBy5kZNC7RK\n" +
+    "fVF3IZ9Ecl00OYIjvBhlWGkaiNt9ZAeWPpYx57HSQAygzJ8ba+3jtz1dV8Bhz5D5\n" +
+    "4kggzolC820I/QLPxClH5R6ZPzDHGu/JFuNNZWASq2JHZfolEP+itdnz0F6VvQx9\n" +
+    "imngOGIJMkRWIzvwuCnq8xpNSEJHJcs8kyLBP/3qT+kUiEwJ5KrmJ8DQsHluwpyo\n" +
+    "GoiGEtiA+MNUSzc/DkuLxhC45k7K4afe4QzpWl9eR/z91F9Q06jdc5mnUWke8qCg\n" +
+    "ZopBVWbSI6fMZVlqLxKeSFljDZ+moW+2/Lh8cjEvNMccVTwWQE7JH9RLnVogqixV\n" +
+    "HIpN58wrlHd4uMVH47wD18A11AGEbghO1MEShX4SCEJIdZsr67bNx8mIkhSWjqAx\n" +
+    "BT9OmITzdbnR9sHi4CyEWhGMAMDp7YBySwpt+U7Y6DvRwWJbXUaF8zYJUlrp2IbH\n" +
+    "qdSE+oJPKxGB1s2B5KGrJUA0JkElkBUYm6ghXZlTI32w8HoV9fDOhjx1ATu7SdZY\n" +
+    "8DX0mwVVdc88Msr5RPxdeB3V4yN2iFJs4i3usicPkB0N+29LJ/lKQlm/pia0yl9j\n" +
+    "yDNN3R0RiCJdYHme4t1PqRqeTfjMauz05ObennQmkzMxD8mlBZ0zhaKL5I4TuQod\n" +
+    "PITFgYihTR9OYfa8lryvHQNCIi5iZ6M9myLUxbjPoeVBdp8pSlMAjmekEHo47vn3\n" +
+    "7IGF7AfnKVNymc/1Kim3WQx8D7nryCb8EUyb7BuNA7izGKq+NW51l39J8RGVcFSx\n" +
+    "sVkpFUbqGXusUgLWWwi21EJHCJceFnOWJRgsoeqNeCt5VCUSag==\n" +
+    "-----END ENCRYPTED PRIVATE KEY-----\n" +
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIEeDCCBB4CCQD9UojL1A7cmzAJBgcqhkjOOAQDME0xCzAJBgNVBAYTAlVTMQsw\n" +
+    "CQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxETAPBgNVBAoMCFBlcnNvbmFs\n" +
+    "MQswCQYDVQQLDAJJVDAeFw0yMTEwMTgyMzU0MjRaFw0yMjEwMTgyMzU0MjRaME0x\n" +
+    "CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxETAP\n" +
+    "BgNVBAoMCFBlcnNvbmFsMQswCQYDVQQLDAJJVDCCA0YwggI5BgcqhkjOOAQBMIIC\n" +
+    "LAKCAQEAzxMsktQYQ06q71pwqWAa9YvDIkF34RJgQT5tqCZdzwtP7HXyrwaZBgLK\n" +
+    "oC4grIPKyUlPmIbm+oucucbTzDCYCx2d09VMVR471vqJi7vRCYnBIqMTTOSvbTGw\n" +
+    "8VuwA2prSt4TiqFYeqYM0Afv/KrI2MLNTP+z8RFfiLkijZIxZ9CzyHmTOEqHxOft\n" +
+    "Ln145NNcuPy3U7Bmh5k7i+RR+5jYweuLzOfTK4bTUnG111mjAhAeo+ST14ydfiuQ\n" +
+    "2e85UwLucVheh4REqm+n7g0k0B/+nG+9Os1nxfQVRVKHP4/iTrNLpf35EoPeh9XT\n" +
+    "D0BtV9lqQuttCB1aPLH66TKl9v4FowIhAMXhJ3LooBT7ypUWMtN07FvpPpoLERBr\n" +
+    "Pk9MeGwQnBPFAoIBAAcVYJ+RH1i8JFyD8MUwwOKgkmVzbRvc28B5F17oek76R6fi\n" +
+    "yyqyhHrkxmjwxktXuwRlWQBJqCTqpmMstLplqsVjcETHo7KRaHgDpT9tHf14PZvP\n" +
+    "qpoxYNpa5z4wPpOFhZ3K6VVaFUSlqSSHhaS1HqVzZC0FefFFd3TDKu0EWHDDkLM8\n" +
+    "luOxDUMydKpRtrVba+iF4kx9NcCyXjSMhFAOw3cWhGvoq6R3h4UkRJO67pzYtqK4\n" +
+    "yAETPsQKT0c8NUM+VUiPAHL/+f1EqaVtzatk9G0OHWZXgDxuhx2CAM6QfELew+ys\n" +
+    "D/QUUT0tXHQRxRCKmYE+uacwyQx5G0DeCHN0c9gDggEFAAKCAQBFGXF7flzfRH/8\n" +
+    "r/qDuNC+9y3fRtDWXaados+XQkzujizBBK9dkkAd/j/pgP5p7/bRvoeR507Oyrge\n" +
+    "6xBbfE7SlRu0cx08Ihlw7a6mCowMnP0psFBwW7npKJRYPXxRDpu9oUuuJHi1WYp7\n" +
+    "8/Ekg6hG21zAYO0JLGyU6aH5Rvuls754R+rxqveVqLiig5NTnfw7ymLb2kF1z8g7\n" +
+    "fDvX6OS242ceQIQHvq97cGnysEVSlavuHfbh3PKXklh91ip/BYuzanQjiEb97YIU\n" +
+    "DhMQyPGvk+iDdhD0PwgwOZB0P0mL7Xszw6p+lfzUc+g8jETjrnzksnVEg8On6q3Q\n" +
+    "RDJG9VnuMAkGByqGSM44BAMDSQAwRgIhAIay5hyZrwHslGXmTAx498903l7CQsdi\n" +
+    "fUrmGkxbPiZXAiEAitS8+gyG64c0jV7V/u8Z1NEL8MI47K2P9Jbn3cqzz3k=\n" +
+    "-----END CERTIFICATE-----";
+    private static final String invalid_encoded_private_key =
+    "-----BEGIN PRIVATE KEY-----\n" +
+    "MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOSZVf8dLj1xLw\n" +
+    "efqjbogbAwhwRXd3tmEfQY1zHyudJF3XR1T0Xp26BKNvAYUxxZRDNg16M3prPRZv\n" +
+    "wkhDJdE9NaN+BpZPJUavRsZOfGDq/CHN8j/2PPKrn3G/b065JjV3GZjfV/Sln047\n" +
+    "DeZptaSyOg7ZN7F8qjGqxEcnw+szV/wTzhnjGjZMlcbOm4jFGQAn6xUOooQCGsoB\n" +
+    "9FgxKB0nvNG3xVe/2eCNfbS4DabT3Y1wfQqZ62hOa5ZS0rwT+pw3tQs3zFFY1Sfi\n" +
+    "G7qYbqZrKTheWIZGojVVm6mqH4yA2ofOe5N3RsBitCwU/D0dpnaG9Wcl98nmXueM\n" +
+    "B6Rk04v7AgMBAAECggEAYnxIKjrFz/JkJ5MmiszM5HV698r9YB0aqHnFIHPoykIL\n" +
+    "uiCjiumantDrFsCkosixULwvI/BRwbxstTpyrheU9psT6P1CONICVPvV8ylgJAYU\n" +
+    "l+ofn56cEXKxVuICSWFLDH7pM1479g+IJJQAchbKQpqxAGTuMu3SpvJolfuj5srt\n" +
+    "bM7/RYhJFLwDuvHNA3ivlogMneItP03+C25aaxstM+lBuBf68+n78zMgSvt6J/6Y\n" +
+    "G2TOMKnxveMlG2qu9l2lAw/2i8daG/qre08nTH7wpRx0gZLZqNpe45exkrzticzF\n" +
+    "FgWYjG2K2brX21jqHroFgMhdXF7zhhRgLoIeC0BrIQKBgQDCfGfWrJESKBbVai5u\n" +
+    "7wqD9nlzjv6N6FXfTDOPXO1vz5frdvtLVWbs0SMPy+NglkaZK0iqHvb9mf2of8eC\n" +
+    "0D5cmewjn7WCDBQMypIMYgT912ak/BBVuGXcxb6UgD+xARfSARo2C8NG1hfprw1W\n" +
+    "ad14CjS5xhFMs44HpVYhI7iPYwKBgQC7SqVG/b37vZ7CINemdvoMujLvvYXDJJM8\n" +
+    "N21LqNJfVXdukdH3T0xuLnh9Z/wPHjJDMF/9+1foxSEPHijtyz5P19EilNEC/3qw\n" +
+    "fI19+VZoY0mdhPtXSGzy+rbTE2v71QgwFLizSos14Gr+eNiIjF7FYccK05++K/zk\n" +
+    "cd8ZA3bwiQKBgQCl+HTFBs9mpz+VMOAfW2+l3hkXPNiPUc62mNkHZ05ZNNd44jjh\n" +
+    "uSf0wSUiveR08MmevQlt5K7zDQ8jVKh2QjB15gVXAVxsdtJFeDnax2trFP9LnLBz\n" +
+    "9sE2/qn9INU5wK0LUlWD+dXUBbCyg+jl7cJKRqtoPldVFYYHkFlIPqup8QKBgHXv\n" +
+    "hyuw1FUVDkdHzwOvn70r8q8sNHKxMVWVwWkHIZGOi+pAQGrusD4hXRX6yKnsZdIR\n" +
+    "QCD6iFy25R5T64nxlYdJaxPPid3NakB/7ckJnPOWseBSwMIxhQlr/nvjmve1Kba9\n" +
+    "FaEwq4B9lGIxToiNe4/nBiM3JzvlDxX67nUdzWOhAoGAdFvriyvjshSJ4JHgIY9K\n" +
+    "37BVB0VKMcFV2P8fLVWO5oyRtE1bJhU4QVpQmauABU4RGSojJ3NPIVH1wxmJeYtj\n" +
+    "Q3b7EZaqI6ovna2eK2qtUx4WwxhRaXTT8xueBI2lgL6sBSTGG+K69ZOzGQzG/Mfr\n" +
+    "RXKInnLInFD9JD94VqmMozoInvalidData=\n" +
+    "-----END PRIVATE KEY-----\n";
+    private static final String invalid_encoded_certificate =
+    "-----BEGIN CERTIFICATE-----\n" +
+    "MIIDkTCCAnmgAwIBAgIETxH5JDANBgkqhkiG9w0BAQsFADB5MRAwDgYDVQQGEwdV\n" +
+    "bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD\n" +
+    "VQQKEwdVbmtub3duMRQwEgYDVQQLDAtzc2xfdGVzdGluZzEZMBcGA1UEAxMQQXBh\n" +
+    "Y2hlIENhc3NhbmRyYTAeFw0xNjAzMTgyMTI4MDJaFw0xNjA2MTYyMTI4MDJaMHkx\n" +
+    "EDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25vd24xEDAOBgNVBAcTB1Vu\n" +
+    "a25vd24xEDAOBgNVBAoTB1Vua25vd24xFDASBgNVBAsMC3NzbF90ZXN0aW5nMRkw\n" +
+    "FwYDVQQDExBBcGFjaGUgQ2Fzc2FuZHJhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\n" +
+    "MIIBCgKCAQEAjkmVX/HS49cS8Hn6o26IGwMIcEV3d7ZhH0GNcx8rnSRd10dU9F6d\n" +
+    "ugSjbwGFMcWUQzYNejN6az0Wb8JIQyXRPTWjfgaWTyVGr0bGTnxg6vwhzfI/9jzy\n" +
+    "q59xv29OuSY1dxmY31f0pZ9OOw3mabWksjoO2TexfKoxqsRHJ8PrM1f8E84Z4xo2\n" +
+    "TJXGzpuIxRkAJ+sVDqKEAhrKAfRYMSgdJ7zRt8VXv9ngjX20uA2m092NcH0Kmeto\n" +
+    "TmuWUtK8E/qcN7ULN8xRWNUn4hu6mG6mayk4XliGRqI1VZupqh+MgNqHznuTd0bA\n" +
+    "YrQsFPw9HaZ2hvVnJffJ5l7njAekZNOL+wIDAQABoyEwHzAdBgNVHQ4EFgQUcdiD\n" +
+    "N6aylI91kAd34Hl2AzWY51QwDQYJKoZIhvcNAQELBQADggEBAG9q29ilUgCWQP5v\n" +
+    "iHkZHj10gXGEoMkdfrPBf8grC7dpUcaw1Qfku/DJ7kPvMALeEsmFDk/t78roeNbh\n" +
+    "IYBLJlzI1HZN6VPtpWQGsqxltAy5XN9Xw9mQM/tu70ShgsodGmE1UoW6eE5+/GMv\n" +
+    "6Fg+zLuICPvs2cFNmWUvukN5LW146tJSYCv0Q/rCPB3m9dNQ9pBxrzPUHXw4glwG\n" +
+    "qGnGddXmOC+tSW5lDLLG1BRbKv4zxv3UlrtIjqlJtZb/sQMT6WtG2ihAz7SKOBHa\n" +
+    "HOWUwuPTetWIuJCKP7P4mWWtmSmjLy+BFX5seNEngn3RzJ2L8uuTJQ/88OsqgGru\n" +
+    "n3MVF9wInvalidData=\n" +
+    "-----END CERTIFICATE-----";
+
+    @Test
+    public void readEncryptedKey() throws IOException, GeneralSecurityException
+    {
+        PrivateKey privateKey = PEMReader.extractPrivateKey(encoded_encrypted_key, "cassandra");
+        Assert.assertNotNull(privateKey);
+    }
+
+    @Test
+    public void readEncryptedDSAKey() throws IOException, GeneralSecurityException
+    {
+        PrivateKey privateKey = PEMReader.extractPrivateKey(encoded_encrypted_dsa_key, "mytest");
+        Assert.assertNotNull(privateKey);
+    }
+
+    @Test(expected = GeneralSecurityException.class)
+    public void readEncryptedDSAKeyWithBadPassword() throws IOException, GeneralSecurityException
+    {
+        try
+        {
+            PEMReader.extractPrivateKey(encoded_encrypted_dsa_key, "bad-password");
+        } catch(GeneralSecurityException e) {
+            Assert.assertTrue(e.getMessage().startsWith("Failed to decrypt the private key data. Either the password " +
+                                                        "provided for the key is wrong or the private key data is " +
+                                                        "corrupted. msg="));
+            throw e;
+        }
+    }
+
+    @Test(expected = GeneralSecurityException.class)
+    public void readInvalidEncryptedKey() throws IOException, GeneralSecurityException
+    {
+        // Test by injecting junk data in the given key and making it invalid
+        PrivateKey privateKey = PEMReader.extractPrivateKey(encoded_encrypted_key.replaceAll("\\s",
+                                                                                             String.valueOf(System.nanoTime())),
+                                                            "cassandra");
+        Assert.assertNotNull(privateKey);
+    }
+
+    @Test(expected = GeneralSecurityException.class)
+    public void readInvalidBase64PrivateKey() throws IOException, GeneralSecurityException
+    {
+        PEMReader.extractPrivateKey(invalid_encoded_private_key);
+    }
+
+    @Test
+    public void readUnencryptedKey() throws IOException, GeneralSecurityException
+    {
+        PrivateKey privateKey = PEMReader.extractPrivateKey(encoded_key);
+        Assert.assertNotNull(privateKey);
+    }
+
+    @Test
+    public void readUnencryptedECKey() throws IOException, GeneralSecurityException
+    {
+        PrivateKey privateKey = PEMReader.extractPrivateKey(encoded_unencrypted_ec_private_key);
+        Assert.assertNotNull(privateKey);
+    }
+
+    @Test
+    public void readCertChain() throws GeneralSecurityException
+    {
+        Certificate[] certificates = PEMReader.extractCertificates(encoded_encrypted_key);
+        Assert.assertNotNull("CertChain must not be null", certificates);
+        Assert.assertTrue("CertChain must have only one certificate", certificates.length == 1);
+    }
+
+    @Test
+    public void readCertChainWithMoreThanOneCerts() throws GeneralSecurityException
+    {
+        Certificate[] certificates = PEMReader.extractCertificates(encoded_encrypted_key_with_multiplecerts_in_chain);
+        Assert.assertNotNull("CertChain must not be null", certificates);
+    }
+
+    @Test(expected = GeneralSecurityException.class)
+    public void readInvalidCertificate() throws GeneralSecurityException
+    {
+        // Test by injecting junk data in the given key and making it invalid
+        Certificate[] certificates = PEMReader.extractCertificates(encoded_encrypted_key.replaceAll("\\s",
+                                                                                                    String.valueOf(System.nanoTime())));
+        Assert.assertNotNull("CertChain must not be null", certificates);
+        Assert.assertTrue("CertChain must have only one certificate", certificates.length == 1);
+    }
+
+    @Test(expected = GeneralSecurityException.class)
+    public void readInvalidBase64Certificate() throws GeneralSecurityException
+    {
+        PEMReader.extractCertificates(invalid_encoded_certificate);
+    }
+
+    @Test
+    public void readTrustedCertificates() throws GeneralSecurityException
+    {
+        Certificate[] certificates = PEMReader.extractCertificates(encoded_certificates);
+        Assert.assertNotNull("Trusted certificate list must not be null", certificates);
+        Assert.assertTrue("Trusted certificate list must have only one certificate", certificates.length == 1);
+    }
+
+    @Test(expected = UnsupportedOperationException.class)
+    public void tamperSupportedAlgorithms() throws GeneralSecurityException
+    {
+        Set<String> original = PEMReader.SUPPORTED_PRIVATE_KEY_ALGORITHMS;
+        for (int i = 0; i < original.size(); i++)
+        {
+            original.remove("RSA");
+            original.remove("DSA");
+            original.remove("EC");
+            original.add("TAMPERED");
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
index 4cbc095..e5aa4b1 100644
--- a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
+++ b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
@@ -18,10 +18,11 @@
 */
 package org.apache.cassandra.security;
 
-import java.io.File;
+import org.apache.cassandra.io.util.File;
 import java.io.IOException;
 import java.security.cert.CertificateException;
-import javax.net.ssl.TrustManagerFactory;
+import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.commons.io.FileUtils;
 import org.junit.Assert;
@@ -30,16 +31,12 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import io.netty.handler.ssl.JdkSslContext;
-import io.netty.handler.ssl.OpenSsl;
-import io.netty.handler.ssl.OpenSslContext;
 import io.netty.handler.ssl.SslContext;
 import io.netty.handler.ssl.util.SelfSignedCertificate;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
-
-import static org.junit.Assert.assertArrayEquals;
+import org.apache.cassandra.config.ParameterizedClass;
 
 public class SSLFactoryTest
 {
@@ -69,34 +66,6 @@
                             .withTrustStorePassword("cassandra")
                             .withRequireClientAuth(false)
                             .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA");
-
-        SSLFactory.checkedExpiry = false;
-    }
-
-    @Test
-    public void getSslContext_OpenSSL() throws IOException
-    {
-        // only try this test if OpenSsl is available
-        if (!OpenSsl.isAvailable())
-        {
-            logger.warn("OpenSSL not available in this application, so not testing the netty-openssl code paths");
-            return;
-        }
-
-        EncryptionOptions options = addKeystoreOptions(encryptionOptions);
-        SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, true);
-        Assert.assertNotNull(sslContext);
-        Assert.assertTrue(sslContext instanceof OpenSslContext);
-    }
-
-    @Test
-    public void getSslContext_JdkSsl() throws IOException
-    {
-        EncryptionOptions options = addKeystoreOptions(encryptionOptions);
-        SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, false);
-        Assert.assertNotNull(sslContext);
-        Assert.assertTrue(sslContext instanceof JdkSslContext);
-        Assert.assertEquals(encryptionOptions.cipher_suites, sslContext.cipherSuites());
     }
 
     private ServerEncryptionOptions addKeystoreOptions(ServerEncryptionOptions options)
@@ -105,48 +74,14 @@
                       .withKeyStorePassword("cassandra");
     }
 
-    @Test(expected = IOException.class)
-    public void buildTrustManagerFactory_NoFile() throws IOException
+    private ServerEncryptionOptions addPEMKeystoreOptions(ServerEncryptionOptions options)
     {
-        SSLFactory.buildTrustManagerFactory(encryptionOptions.withTrustStore("/this/is/probably/not/a/file/on/your/test/machine"));
-    }
-
-    @Test(expected = IOException.class)
-    public void buildTrustManagerFactory_BadPassword() throws IOException
-    {
-        SSLFactory.buildTrustManagerFactory(encryptionOptions.withTrustStorePassword("HomeOfBadPasswords"));
-    }
-
-    @Test
-    public void buildTrustManagerFactory_HappyPath() throws IOException
-    {
-        TrustManagerFactory trustManagerFactory = SSLFactory.buildTrustManagerFactory(encryptionOptions);
-        Assert.assertNotNull(trustManagerFactory);
-    }
-
-    @Test(expected = IOException.class)
-    public void buildKeyManagerFactory_NoFile() throws IOException
-    {
-        EncryptionOptions options = addKeystoreOptions(encryptionOptions)
-                                    .withKeyStore("/this/is/probably/not/a/file/on/your/test/machine");
-        SSLFactory.buildKeyManagerFactory(options);
-    }
-
-    @Test(expected = IOException.class)
-    public void buildKeyManagerFactory_BadPassword() throws IOException
-    {
-        EncryptionOptions options = addKeystoreOptions(encryptionOptions)
-                                    .withKeyStorePassword("HomeOfBadPasswords");
-        SSLFactory.buildKeyManagerFactory(options);
-    }
-
-    @Test
-    public void buildKeyManagerFactory_HappyPath() throws IOException
-    {
-        Assert.assertFalse(SSLFactory.checkedExpiry);
-        EncryptionOptions options = addKeystoreOptions(encryptionOptions);
-        SSLFactory.buildKeyManagerFactory(options);
-        Assert.assertTrue(SSLFactory.checkedExpiry);
+        ParameterizedClass sslContextFactoryClass = new ParameterizedClass("org.apache.cassandra.security.PEMBasedSslContextFactory",
+                                                                           new HashMap<>());
+        return options.withSslContextFactory(sslContextFactoryClass)
+                      .withKeyStore("test/conf/cassandra_ssl_test.keystore.pem")
+                      .withKeyStorePassword("cassandra")
+                      .withTrustStore("test/conf/cassandra_ssl_test.truststore.pem");
     }
 
     @Test
@@ -159,17 +94,47 @@
 
             SSLFactory.initHotReloading(options, options, true);
 
-            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl
-                                                                                                           .isAvailable());
+            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
             File keystoreFile = new File(options.keystore);
 
             SSLFactory.checkCertFilesForHotReloading(options, options);
 
-            keystoreFile.setLastModified(System.currentTimeMillis() + 15000);
+            keystoreFile.trySetLastModified(System.currentTimeMillis() + 15000);
 
             SSLFactory.checkCertFilesForHotReloading(options, options);
-            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl
-                                                                                                          .isAvailable());
+            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
+
+            Assert.assertNotSame(oldCtx, newCtx);
+        }
+        catch (Exception e)
+        {
+            throw e;
+        }
+        finally
+        {
+            DatabaseDescriptor.loadConfig();
+        }
+    }
+
+    @Test
+    public void testPEMSslContextReload_HappyPath() throws IOException, InterruptedException
+    {
+        try
+        {
+            ServerEncryptionOptions options = addPEMKeystoreOptions(encryptionOptions)
+                                              .withInternodeEncryption(ServerEncryptionOptions.InternodeEncryption.all);
+
+            SSLFactory.initHotReloading(options, options, true);
+
+            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
+            File keystoreFile = new File(options.keystore);
+
+            SSLFactory.checkCertFilesForHotReloading(options, options);
+
+            keystoreFile.trySetLastModified(System.currentTimeMillis() + 15000);
+
+            SSLFactory.checkCertFilesForHotReloading(options, options);
+            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
 
             Assert.assertNotSame(oldCtx, newCtx);
         }
@@ -201,18 +166,16 @@
             ServerEncryptionOptions options = addKeystoreOptions(encryptionOptions);
 
             SSLFactory.initHotReloading(options, options, true);
-            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl
-                                                                                                          .isAvailable());
+            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
             File keystoreFile = new File(options.keystore);
 
             SSLFactory.checkCertFilesForHotReloading(options, options);
-            keystoreFile.setLastModified(System.currentTimeMillis() + 5000);
+            keystoreFile.trySetLastModified(System.currentTimeMillis() + 5000);
 
             ServerEncryptionOptions modOptions = new ServerEncryptionOptions(options)
                                                  .withKeyStorePassword("bad password");
             SSLFactory.checkCertFilesForHotReloading(modOptions, modOptions);
-            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl
-                                                                                                          .isAvailable());
+            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
 
             Assert.assertSame(oldCtx, newCtx);
         }
@@ -230,21 +193,19 @@
             ServerEncryptionOptions options = addKeystoreOptions(encryptionOptions);
 
             File testKeystoreFile = new File(options.keystore + ".test");
-            FileUtils.copyFile(new File(options.keystore),testKeystoreFile);
-            options = options.withKeyStore(testKeystoreFile.getPath());
+            FileUtils.copyFile(new File(options.keystore).toJavaIOFile(), testKeystoreFile.toJavaIOFile());
+            options = options.withKeyStore(testKeystoreFile.path());
 
 
             SSLFactory.initHotReloading(options, options, true);
-            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl
-                                                                                                          .isAvailable());
+            SslContext oldCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
             SSLFactory.checkCertFilesForHotReloading(options, options);
 
-            testKeystoreFile.setLastModified(System.currentTimeMillis() + 15000);
-            FileUtils.forceDelete(testKeystoreFile);
+            testKeystoreFile.trySetLastModified(System.currentTimeMillis() + 15000);
+            FileUtils.forceDelete(testKeystoreFile.toJavaIOFile());
 
             SSLFactory.checkCertFilesForHotReloading(options, options);
-            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl
-                                                                                                          .isAvailable());
+            SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
 
             Assert.assertSame(oldCtx, newCtx);
         }
@@ -255,7 +216,7 @@
         finally
         {
             DatabaseDescriptor.loadConfig();
-            FileUtils.deleteQuietly(new File(encryptionOptions.keystore + ".test"));
+            FileUtils.deleteQuietly(new File(encryptionOptions.keystore + ".test").toJavaIOFile());
         }
     }
 
@@ -267,7 +228,7 @@
                                     .withCipherSuites("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256");
 
         SslContext ctx1 = SSLFactory.getOrCreateSslContext(options, true,
-                                                           SSLFactory.SocketType.SERVER, OpenSsl.isAvailable());
+                                                           ISslContextFactory.SocketType.SERVER);
 
         Assert.assertTrue(ctx1.isServer());
         Assert.assertEquals(ctx1.cipherSuites(), options.cipher_suites);
@@ -275,9 +236,69 @@
         options = options.withCipherSuites("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256");
 
         SslContext ctx2 = SSLFactory.getOrCreateSslContext(options, true,
-                                                           SSLFactory.SocketType.CLIENT, OpenSsl.isAvailable());
+                                                           ISslContextFactory.SocketType.CLIENT);
 
         Assert.assertTrue(ctx2.isClient());
         Assert.assertEquals(ctx2.cipherSuites(), options.cipher_suites);
     }
+
+    @Test
+    public void testCacheKeyEqualityForCustomSslContextFactory() {
+
+        Map<String,String> parameters1 = new HashMap<>();
+        parameters1.put("key1", "value1");
+        parameters1.put("key2", "value2");
+        EncryptionOptions encryptionOptions1 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters1))
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(true)
+        .withRequireEndpointVerification(false);
+
+        SSLFactory.CacheKey cacheKey1 = new SSLFactory.CacheKey(encryptionOptions1, ISslContextFactory.SocketType.SERVER
+        );
+
+        Map<String,String> parameters2 = new HashMap<>();
+        parameters2.put("key1", "value1");
+        parameters2.put("key2", "value2");
+        EncryptionOptions encryptionOptions2 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters2))
+        .withProtocol("TLSv1.1")
+        .withRequireClientAuth(true)
+        .withRequireEndpointVerification(false);
+
+        SSLFactory.CacheKey cacheKey2 = new SSLFactory.CacheKey(encryptionOptions2, ISslContextFactory.SocketType.SERVER
+        );
+
+        Assert.assertEquals(cacheKey1, cacheKey2);
+    }
+
+    @Test
+    public void testCacheKeyInequalityForCustomSslContextFactory() {
+
+        Map<String,String> parameters1 = new HashMap<>();
+        parameters1.put("key1", "value11");
+        parameters1.put("key2", "value12");
+        EncryptionOptions encryptionOptions1 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters1))
+        .withProtocol("TLSv1.1");
+
+        SSLFactory.CacheKey cacheKey1 = new SSLFactory.CacheKey(encryptionOptions1, ISslContextFactory.SocketType.SERVER
+        );
+
+        Map<String,String> parameters2 = new HashMap<>();
+        parameters2.put("key1", "value21");
+        parameters2.put("key2", "value22");
+        EncryptionOptions encryptionOptions2 =
+        new EncryptionOptions()
+        .withSslContextFactory(new ParameterizedClass(DummySslContextFactoryImpl.class.getName(), parameters2))
+        .withProtocol("TLSv1.1");
+
+        SSLFactory.CacheKey cacheKey2 = new SSLFactory.CacheKey(encryptionOptions2, ISslContextFactory.SocketType.SERVER
+        );
+
+        Assert.assertNotEquals(cacheKey1, cacheKey2);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/serializers/TimestampSerializerTest.java b/test/unit/org/apache/cassandra/serializers/TimestampSerializerTest.java
index ac63f31..e8abbae 100644
--- a/test/unit/org/apache/cassandra/serializers/TimestampSerializerTest.java
+++ b/test/unit/org/apache/cassandra/serializers/TimestampSerializerTest.java
@@ -26,6 +26,8 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class TimestampSerializerTest
 {
     public static final long ONE_SECOND = 1000L;
@@ -200,7 +202,7 @@
     public void testNumeric()
     {
         // now (positive
-        final long now = System.currentTimeMillis();
+        final long now = currentTimeMillis();
         assertEquals(now, TimestampSerializer.dateStringToTimestamp(Long.toString(now)));
 
         // negative
@@ -226,7 +228,7 @@
     public void testNow()
     {
         final long threshold = 5;
-        final long now = System.currentTimeMillis();
+        final long now = currentTimeMillis();
         final long parsed = TimestampSerializer.dateStringToTimestamp("now");
         assertTrue("'now' timestamp not within expected tolerance.", now <= parsed && parsed <= now + threshold);
     }
diff --git a/test/unit/org/apache/cassandra/service/AbstractFilesystemOwnershipCheckTest.java b/test/unit/org/apache/cassandra/service/AbstractFilesystemOwnershipCheckTest.java
new file mode 100644
index 0000000..0fc8559
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/AbstractFilesystemOwnershipCheckTest.java
@@ -0,0 +1,544 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.Random;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.StartupChecksOptions;
+import org.apache.cassandra.exceptions.StartupException;
+import org.apache.cassandra.io.util.File;
+
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.DEFAULT_FS_OWNERSHIP_FILENAME;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.ERROR_PREFIX;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.INCONSISTENT_FILES_FOUND;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.INVALID_FILE_COUNT;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.INVALID_PROPERTY_VALUE;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.MISMATCHING_TOKEN;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.MISSING_PROPERTY;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.MULTIPLE_OWNERSHIP_FILES;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.NO_OWNERSHIP_FILE;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.READ_EXCEPTION;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.TOKEN;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.UNSUPPORTED_VERSION;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.VERSION;
+import static org.apache.cassandra.service.FileSystemOwnershipCheck.VOLUME_COUNT;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.check_filesystem_ownership;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Ignore
+public abstract class AbstractFilesystemOwnershipCheckTest
+{
+    protected File tempDir;
+    protected String token;
+
+    protected StartupChecksOptions options = new StartupChecksOptions();
+
+    protected void setup()
+    {
+        cleanTempDir();
+        tempDir = new File(com.google.common.io.Files.createTempDir());
+        token = makeRandomString(10);
+        System.clearProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME.getKey());
+        System.clearProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey());
+        System.clearProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_ENABLE.getKey());
+    }
+
+    static File writeFile(File dir, String filename, Properties props) throws IOException
+    {
+        File tokenFile = new File(dir, filename); //checkstyle: permit this instantiation
+        assertTrue(tokenFile.createFileIfNotExists());
+        try (OutputStream os = Files.newOutputStream(tokenFile.toPath()))
+        {
+            props.store(os, "Test properties");
+        }
+        assertTrue(tokenFile.isReadable());
+        return tokenFile;
+    }
+
+    private static void executeAndFail(FileSystemOwnershipCheck checker,
+                                       StartupChecksOptions options,
+                                       String messageTemplate,
+                                       Object...messageArgs)
+    {
+        try
+        {
+            checker.execute(options);
+            fail("Expected an exception but none thrown");
+        } catch (StartupException e) {
+            String expected = ERROR_PREFIX + String.format(messageTemplate, messageArgs);
+            assertEquals(expected, e.getMessage());
+        }
+    }
+
+    private static Properties makeProperties(int version, int volumeCount, String token)
+    {
+        Properties props = new Properties();
+        props.setProperty(VERSION, Integer.toString(version));
+        props.setProperty(VOLUME_COUNT, Integer.toString(volumeCount));
+        props.setProperty(TOKEN, token);
+        return props;
+    }
+
+    private static File writeFile(File dir, int volumeCount, String token) throws IOException
+    {
+        return AbstractFilesystemOwnershipCheckTest.writeFile(dir, DEFAULT_FS_OWNERSHIP_FILENAME, 1, volumeCount, token);
+    }
+
+    private static File writeFile(File dir, final String filename, int version, int volumeCount, String token)
+    throws IOException
+    {
+        return writeFile(dir, filename, AbstractFilesystemOwnershipCheckTest.makeProperties(version, volumeCount, token));
+    }
+
+    private static File mkdirs(File parent, String path)
+    {
+        File childDir = new File(parent, path); //checkstyle: permit this instantiation
+        assertTrue(childDir.tryCreateDirectories());
+        assertTrue(childDir.exists());
+        return childDir;
+    }
+
+    private static FileSystemOwnershipCheck checker(Supplier<Iterable<String>> dirs)
+    {
+        return new FileSystemOwnershipCheck(dirs);
+    }
+
+    private static FileSystemOwnershipCheck checker(File...dirs)
+    {
+        return checker(() -> Arrays.stream(dirs).map(File::absolutePath).collect(Collectors.toList()));
+    }
+
+    private static FileSystemOwnershipCheck checker(String...dirs)
+    {
+        return checker(() -> Arrays.asList(dirs));
+    }
+
+    public static String makeRandomString(int length)
+    {
+        Random random = new Random();
+        char[] chars = new char[length];
+        for (int i = 0; i < length; ++i)
+            chars[i] = (char) ('a' + random.nextInt('z' - 'a' + 1));
+        return new String(chars);
+    }
+
+    protected void cleanTempDir()
+    {
+        if (tempDir != null && tempDir.exists())
+            delete(tempDir);
+    }
+
+    private void delete(File file)
+    {
+        file.trySetReadable(true);
+        file.trySetWritable(true);
+        file.trySetExecutable(true);
+        File[] files = file.tryList();
+        if (files != null)
+        {
+            for (File child : files)
+            {
+                delete(child);
+            }
+        }
+        file.delete();
+    }
+
+    @BeforeClass
+    public static void setupConfig()
+    {
+        // PathUtils touches StorageService which touches StreamManager which requires configs be setup
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    @After
+    public void teardown() throws IOException
+    {
+        cleanTempDir();
+    }
+
+    // tests for enabling/disabling/configuring the check
+    @Test
+    public void skipCheckDisabledIfSystemPropertyIsEmpty() throws Exception
+    {
+        // no exceptions thrown from the supplier because the check is skipped
+        options.disable(check_filesystem_ownership);
+        System.clearProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_ENABLE.getKey());
+        AbstractFilesystemOwnershipCheckTest.checker(() -> { throw new RuntimeException("FAIL"); }).execute(options);
+    }
+
+    @Test
+    public void skipCheckDisabledIfSystemPropertyIsFalseButOptionsEnabled() throws Exception
+    {
+        // no exceptions thrown from the supplier because the check is skipped
+        options.enable(check_filesystem_ownership);
+        System.setProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_ENABLE.getKey(), "false");
+        AbstractFilesystemOwnershipCheckTest.checker(() -> { throw new RuntimeException("FAIL"); }).execute(options);
+    }
+
+    @Test
+    public void checkEnabledButClusterPropertyIsEmpty()
+    {
+        System.setProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey(), "");
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(tempDir), options, MISSING_PROPERTY, CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey());
+    }
+
+    @Test
+    public void checkEnabledButClusterPropertyIsUnset()
+    {
+        Assume.assumeFalse(options.getConfig(check_filesystem_ownership).containsKey("ownership_token"));
+        System.clearProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey());
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(tempDir), options, MISSING_PROPERTY, CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey());
+    }
+
+    // tests for presence/absence of files in dirs
+    @Test
+    public void noRootDirectoryPresent() throws Exception
+    {
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker("/no/such/location"), options, NO_OWNERSHIP_FILE, "'/no/such/location'");
+    }
+
+    @Test
+    public void noDirectoryStructureOrTokenFilePresent() throws Exception
+    {
+        // The root directory exists, but is completely empty
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(tempDir), options, NO_OWNERSHIP_FILE, quote(tempDir.absolutePath()));
+    }
+
+    @Test
+    public void directoryStructureButNoTokenFiles() throws Exception
+    {
+        File childDir = new File(tempDir, "cassandra/data"); //checkstyle: permit this instantiation
+        assertTrue(childDir.tryCreateDirectories());
+        assertTrue(childDir.exists());
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(childDir), options, NO_OWNERSHIP_FILE, quote(childDir.absolutePath()));
+    }
+
+    @Test
+    public void multipleFilesFoundInSameTree() throws Exception
+    {
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir, 1, token);
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir.parent(), 1, token);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir), options, MULTIPLE_OWNERSHIP_FILES, leafDir);
+    }
+
+    @Test
+    public void singleValidFileInEachTree() throws Exception
+    {
+        // Happy path. Each target directory has exactly 1 token file in the
+        // dir above it, they all contain the supplied token and the correct
+        // count.
+        File[] leafDirs = new File[] { AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d1/data"),
+                                       AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d2/commitlogs"),
+                                       AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d3/hints") };
+        for (File dir : leafDirs)
+            AbstractFilesystemOwnershipCheckTest.writeFile(dir.parent(), 3, token);
+        AbstractFilesystemOwnershipCheckTest.checker(leafDirs).execute(options);
+    }
+
+    @Test
+    public void multipleDirsSingleTree() throws Exception
+    {
+        // Happy path. Each target directory has exactly 1 token file in the
+        // dir above it (as they all share a single parent). Each contains
+        // the supplied token and the correct count (1 in this case).
+        File[] leafDirs = new File[] { AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d1/data"),
+                                       AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d2/commitlogs"),
+                                       AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d3/hints") };
+        AbstractFilesystemOwnershipCheckTest.writeFile(tempDir, 1, token);
+        AbstractFilesystemOwnershipCheckTest.checker(leafDirs).execute(options);
+    }
+
+    @Test
+    public void someDirsContainNoFile() throws Exception
+    {
+        File leafDir1 = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir1, 3, token);
+        File leafDir2 = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/commitlogs");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir2, 3, token);
+        File leafDir3 = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/hints");
+
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir1, leafDir2, leafDir3),
+                                                            options,
+                                                            NO_OWNERSHIP_FILE,
+                                                            quote(leafDir3.absolutePath()));
+    }
+
+    @Test
+    public void propsFileUnreadable() throws Exception
+    {
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        File tokenFile = AbstractFilesystemOwnershipCheckTest.writeFile(leafDir.parent(), 1, token);
+        assertTrue(tokenFile.trySetReadable(false));
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            READ_EXCEPTION,
+                                                            leafDir.absolutePath());
+    }
+
+    @Test
+    public void propsFileIllegalContent() throws Exception
+    {
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        File propsFile = new File(leafDir, DEFAULT_FS_OWNERSHIP_FILENAME); //checkstyle: permit this instantiation
+        assertTrue(propsFile.createFileIfNotExists());
+        try (OutputStream os = Files.newOutputStream(propsFile.toPath()))
+        {
+            os.write(AbstractFilesystemOwnershipCheckTest.makeRandomString(40).getBytes());
+        }
+        assertTrue(propsFile.isReadable());
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, VERSION),
+                                                            leafDir.absolutePath());
+    }
+
+    @Test
+    public void propsParentDirUnreadable() throws Exception
+    {
+        // The props file itself is readable, but its dir is not
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir, 1, token);
+        assertTrue(leafDir.trySetReadable(false));
+        AbstractFilesystemOwnershipCheckTest.checker(leafDir).execute(options);
+    }
+
+    @Test
+    public void propsParentDirUntraversable() throws Exception
+    {
+        // top level dir can't be listed, so no files are found
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir.parent(), 1, token);
+        assertTrue(tempDir.trySetExecutable(false));
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            NO_OWNERSHIP_FILE,
+                                                            quote(leafDir.absolutePath()));
+    }
+
+    @Test
+    public void overrideFilename() throws Exception
+    {
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), "other_file", AbstractFilesystemOwnershipCheckTest.makeProperties(1, 1, token));
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir), options, NO_OWNERSHIP_FILE, quote(leafDir.absolutePath()));
+        System.setProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_FILENAME.getKey(), "other_file");
+        AbstractFilesystemOwnershipCheckTest.checker(leafDir).execute(options);
+    }
+
+    // check consistency between discovered files
+    @Test
+    public void differentTokensFoundInTrees() throws Exception
+    {
+        File file1 = AbstractFilesystemOwnershipCheckTest.writeFile(AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d1/data"), 3, token);
+        File file2 = AbstractFilesystemOwnershipCheckTest.writeFile(AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d2/commitlogs"), 3, token);
+        File file3 = AbstractFilesystemOwnershipCheckTest.writeFile(AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d3/hints"), 3, "mismatchingtoken");
+        String errorSuffix = String.format("['%s', '%s'], ['%s']",
+                                           file1.absolutePath(),
+                                           file2.absolutePath(),
+                                           file3.absolutePath());
+
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(file1.parent(), file2.parent(), file3.parent()),
+                                                            options,
+                                                            INCONSISTENT_FILES_FOUND,
+                                                            errorSuffix);
+    }
+
+    @Test
+    public void differentExpectedCountsFoundInTrees() throws Exception
+    {
+        File file1 = AbstractFilesystemOwnershipCheckTest.writeFile(AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d1/data"), 1, token);
+        File file2 = AbstractFilesystemOwnershipCheckTest.writeFile(AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d2/commitlogs"), 2, token);
+        File file3 = AbstractFilesystemOwnershipCheckTest.writeFile(AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d3/hints"), 3, "mismatchingtoken");
+        String errorSuffix = String.format("['%s'], ['%s'], ['%s']",
+                                           file1.absolutePath(),
+                                           file2.absolutePath(),
+                                           file3.absolutePath());
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(file1.parent(), file2.parent(), file3.parent()),
+                                                            options,
+                                                            INCONSISTENT_FILES_FOUND,
+                                                            errorSuffix);
+    }
+
+    // tests on property values in discovered files
+    @Test
+    public void emptyPropertiesFile() throws Exception
+    {
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, new Properties());
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, VERSION),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void missingVersionProp() throws Exception
+    {
+        Properties p = new Properties();
+        p.setProperty(VOLUME_COUNT, "1");
+        p.setProperty(TOKEN, "foo");
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, p);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, VERSION),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void nonNumericVersionProp() throws Exception
+    {
+        Properties p = new Properties();
+        p.setProperty(VERSION, "abc");
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, p);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, VERSION),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void unsupportedVersionProp() throws Exception
+    {
+        Properties p = new Properties();
+        p.setProperty(VERSION, "99");
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, p);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(UNSUPPORTED_VERSION, "99"),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void missingVolumeCountProp() throws Exception
+    {
+        Properties p = new Properties();
+        p.setProperty(VERSION, "1");
+        p.setProperty(TOKEN, token);
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, p);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, VOLUME_COUNT),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void nonNumericVolumeCountProp() throws Exception
+    {
+        Properties p = new Properties();
+        p.setProperty(VERSION, "1");
+        p.setProperty(VOLUME_COUNT, "bar");
+        p.setProperty(TOKEN, token);
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, p);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, VOLUME_COUNT),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void missingTokenProp() throws Exception
+    {
+        Properties p = new Properties();
+        p.setProperty(VERSION, "1");
+        p.setProperty(VOLUME_COUNT, "1");
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        writeFile(leafDir.parent(), DEFAULT_FS_OWNERSHIP_FILENAME, p);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, TOKEN),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void emptyTokenProp() throws Exception
+    {
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir.parent(), 1, "");
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            String.format(INVALID_PROPERTY_VALUE, TOKEN),
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    @Test
+    public void mismatchingTokenProp() throws Exception
+    {
+        // Ownership token file exists in parent, but content doesn't match property
+        File leafDir = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "cassandra/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir.parent(), 1, AbstractFilesystemOwnershipCheckTest.makeRandomString(15));
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir),
+                                                            options,
+                                                            MISMATCHING_TOKEN,
+                                                            leafDir.parent().toPath().resolve(DEFAULT_FS_OWNERSHIP_FILENAME));
+    }
+
+    // Validate volume_count prop values match number of files found
+    @Test
+    public void expectedVolumeCountMoreThanActual() throws Exception
+    {
+        // The files on disk indicate that we should expect 2 ownership files,
+        // but we only read 1, implying a disk mount is missing
+        File[] leafDirs = new File[] { AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d1/data"),
+                                       AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d2/commitlogs"),
+                                       AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d3/hints") };
+        AbstractFilesystemOwnershipCheckTest.writeFile(tempDir, 2, token);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDirs), options, INVALID_FILE_COUNT);
+    }
+
+    @Test
+    public void expectedVolumeCountLessThanActual() throws Exception
+    {
+        // The files on disk indicate that we should expect 1 ownership file,
+        // but we read 2, implying a extra unexpected disk mount is mounted
+        File leafDir1 = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d1/data");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir1, 1, token);
+        File leafDir2 = AbstractFilesystemOwnershipCheckTest.mkdirs(tempDir, "d2/commitlogs");
+        AbstractFilesystemOwnershipCheckTest.writeFile(leafDir2, 1, token);
+        AbstractFilesystemOwnershipCheckTest.executeAndFail(AbstractFilesystemOwnershipCheckTest.checker(leafDir1, leafDir2), options, INVALID_FILE_COUNT);
+    }
+
+    private String quote(String toQuote)
+    {
+        return String.format("'%s'", toQuote);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java b/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
index ad680f5..9c1660a 100644
--- a/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
+++ b/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
@@ -27,18 +27,17 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Condition;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Uninterruptibles;
 
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.Condition;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -46,7 +45,7 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
+import org.apache.cassandra.concurrent.ExecutorPlus;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -67,7 +66,6 @@
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.Refs;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
 import static org.apache.cassandra.repair.messages.RepairOption.DATACENTERS_KEY;
 import static org.apache.cassandra.repair.messages.RepairOption.FORCE_REPAIR_KEY;
@@ -76,6 +74,8 @@
 import static org.apache.cassandra.repair.messages.RepairOption.RANGES_KEY;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
 import static org.apache.cassandra.service.ActiveRepairService.getRepairedAt;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -282,14 +282,14 @@
     public void testSnapshotAddSSTables() throws Exception
     {
         ColumnFamilyStore store = prepareColumnFamilyStore();
-        UUID prsId = UUID.randomUUID();
+        TimeUUID prsId = nextTimeUUID();
         Set<SSTableReader> original = Sets.newHashSet(store.select(View.select(SSTableSet.CANONICAL, (s) -> !s.isRepaired())).sstables);
         Collection<Range<Token>> ranges = Collections.singleton(new Range<>(store.getPartitioner().getMinimumToken(), store.getPartitioner().getMinimumToken()));
         ActiveRepairService.instance.registerParentRepairSession(prsId, FBUtilities.getBroadcastAddressAndPort(), Collections.singletonList(store),
                                                                  ranges, true, System.currentTimeMillis(), true, PreviewKind.NONE);
         store.getRepairManager().snapshot(prsId.toString(), ranges, false);
 
-        UUID prsId2 = UUID.randomUUID();
+        TimeUUID prsId2 = nextTimeUUID();
         ActiveRepairService.instance.registerParentRepairSession(prsId2, FBUtilities.getBroadcastAddressAndPort(),
                                                                  Collections.singletonList(store),
                                                                  ranges,
@@ -326,7 +326,7 @@
                 .build()
                 .applyUnsafe();
             }
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
     }
 
@@ -383,7 +383,7 @@
         ExecutorService validationExecutor = ActiveRepairService.initializeExecutor(2, Config.RepairCommandPoolFullStrategy.reject);
         try
         {
-            Condition blocked = new SimpleCondition();
+            Condition blocked = newOneTimeCondition();
             CountDownLatch completed = new CountDownLatch(2);
 
             /*
@@ -433,8 +433,8 @@
         ExecutorService validationExecutor = ActiveRepairService.initializeExecutor(2, Config.RepairCommandPoolFullStrategy.queue);
         try
         {
-            Condition allSubmitted = new SimpleCondition();
-            Condition blocked = new SimpleCondition();
+            Condition allSubmitted = newOneTimeCondition();
+            Condition blocked = newOneTimeCondition();
             CountDownLatch completed = new CountDownLatch(5);
             ExecutorService testExecutor = Executors.newSingleThreadExecutor();
             for (int i = 0; i < 5; i++)
@@ -452,12 +452,12 @@
             allSubmitted.await(TASK_SECONDS + 1, TimeUnit.SECONDS);
 
             // Give the tasks we expect to execute immediately chance to be scheduled
-            Util.spinAssertEquals(2 , ((DebuggableThreadPoolExecutor) validationExecutor)::getActiveTaskCount, 1);
-            Util.spinAssertEquals(3 , ((DebuggableThreadPoolExecutor) validationExecutor)::getPendingTaskCount, 1);
+            Util.spinAssertEquals(2 , ((ExecutorPlus) validationExecutor)::getActiveTaskCount, 1);
+            Util.spinAssertEquals(3 , ((ExecutorPlus) validationExecutor)::getPendingTaskCount, 1);
 
             // verify that we've reached a steady state with 2 threads actively processing and 3 queued tasks
-            Assert.assertEquals(2, ((DebuggableThreadPoolExecutor) validationExecutor).getActiveTaskCount());
-            Assert.assertEquals(3, ((DebuggableThreadPoolExecutor) validationExecutor).getPendingTaskCount());
+            Assert.assertEquals(2, ((ExecutorPlus) validationExecutor).getActiveTaskCount());
+            Assert.assertEquals(3, ((ExecutorPlus) validationExecutor).getPendingTaskCount());
             // allow executing tests to complete
             blocked.signalAll();
             completed.await(TASK_SECONDS + 1, TimeUnit.SECONDS);
@@ -482,7 +482,7 @@
 
         public void run()
         {
-            Uninterruptibles.awaitUninterruptibly(blocked, TASK_SECONDS, TimeUnit.SECONDS);
+            blocked.awaitUninterruptibly(TASK_SECONDS, TimeUnit.SECONDS);
             complete.countDown();
         }
     }
diff --git a/test/unit/org/apache/cassandra/service/BootstrapTransientTest.java b/test/unit/org/apache/cassandra/service/BootstrapTransientTest.java
index 7bb2b87..ea96765 100644
--- a/test/unit/org/apache/cassandra/service/BootstrapTransientTest.java
+++ b/test/unit/org/apache/cassandra/service/BootstrapTransientTest.java
@@ -22,10 +22,8 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 
-import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableMap;
 
 import org.apache.cassandra.locator.EndpointsByReplica;
@@ -48,7 +46,6 @@
 import org.apache.cassandra.locator.ReplicaCollection;
 import org.apache.cassandra.locator.SimpleStrategy;
 import org.apache.cassandra.locator.TokenMetadata;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
 
 import static org.apache.cassandra.locator.Replica.fullReplica;
diff --git a/test/unit/org/apache/cassandra/service/ClientStateTest.java b/test/unit/org/apache/cassandra/service/ClientStateTest.java
new file mode 100644
index 0000000..56d0893
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/ClientStateTest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.collect.Iterables;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.auth.AuthCacheService;
+import org.apache.cassandra.auth.AuthKeyspace;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.DataResource;
+import org.apache.cassandra.auth.IResource;
+import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.auth.Roles;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableMetadata;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class ClientStateTest
+{
+    @BeforeClass
+    public static void beforeClass()
+    {
+        System.setProperty("org.apache.cassandra.disable_mbean_registration", "true");
+        SchemaLoader.prepareServer();
+        DatabaseDescriptor.setAuthFromRoot(true);
+        // create the system_auth keyspace so the IRoleManager can function as normal
+        SchemaLoader.createKeyspace(SchemaConstants.AUTH_KEYSPACE_NAME,
+                                    KeyspaceParams.simple(1),
+                                    Iterables.toArray(AuthKeyspace.metadata().tables, TableMetadata.class));
+
+        AuthCacheService.initializeAndRegisterCaches();
+    }
+
+    @AfterClass
+    public static void afterClass()
+    {
+        System.clearProperty("org.apache.cassandra.disable_mbean_registration");
+    }
+
+    @Test
+    public void permissionsCheckStartsAtHeadOfResourceChain()
+    {
+        // verify that when performing a permissions check, we start from the
+        // root IResource in the applicable hierarchy and proceed to the more
+        // granular resources until we find the required permission (or until
+        // we reach the end of the resource chain). This is because our typical
+        // usage is to grant blanket permissions on the root resources to users
+        // and so we save lookups, cache misses and cache space by traversing in
+        // this order. e.g. for DataResources, we typically grant perms on the
+        // 'data' resource, so when looking up a users perms on a specific table
+        // it makes sense to follow: data -> keyspace -> table
+
+        final AtomicInteger getPermissionsRequestCount = new AtomicInteger(0);
+        final IResource rootResource = DataResource.root();
+        final IResource tableResource = DataResource.table("test_ks", "test_table");
+        final AuthenticatedUser testUser = new AuthenticatedUser("test_user")
+        {
+            public Set<Permission> getPermissions(IResource resource)
+            {
+                getPermissionsRequestCount.incrementAndGet();
+                if (resource.equals(rootResource))
+                    return Permission.ALL;
+
+                fail(String.format("Permissions requested for unexpected resource %s", resource));
+                // need a return to make the compiler happy
+                return null;
+            }
+
+            public boolean canLogin() { return true; }
+        };
+
+        Roles.cache.invalidate();
+
+        // finally, need to configure CassandraAuthorizer so we don't shortcircuit out of the authz process
+        DatabaseDescriptor.setAuthorizer(new AuthTestUtils.LocalCassandraAuthorizer());
+
+        // check permissions on the table, which should check for the root resource first
+        // & return successfully without needing to proceed further
+        ClientState state = ClientState.forInternalCalls();
+        state.login(testUser);
+        state.ensurePermission(Permission.SELECT, tableResource);
+        assertEquals(1, getPermissionsRequestCount.get());
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/service/ClientWarningsTest.java b/test/unit/org/apache/cassandra/service/ClientWarningsTest.java
index ee652e2..09fda7c 100644
--- a/test/unit/org/apache/cassandra/service/ClientWarningsTest.java
+++ b/test/unit/org/apache/cassandra/service/ClientWarningsTest.java
@@ -27,6 +27,7 @@
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.cql3.CQLTester;
@@ -38,7 +39,7 @@
 import org.apache.cassandra.transport.SimpleClient;
 import org.apache.cassandra.transport.messages.QueryMessage;
 
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
 @RunWith(Parameterized.class)
@@ -60,7 +61,7 @@
     public static void setUp()
     {
         requireNetwork();
-        DatabaseDescriptor.setBatchSizeWarnThresholdInKB(1);
+        DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(1);
     }
 
     @Test
@@ -122,7 +123,7 @@
                 client.execute(query);
             }
             ColumnFamilyStore store = Keyspace.open(KEYSPACE).getColumnFamilyStore(currentTable());
-            store.forceBlockingFlush();
+            Util.flush(store);
 
             for (int i = 0; i < iterations; i++)
             {
@@ -132,7 +133,7 @@
                                                                     i), QueryOptions.DEFAULT);
                 client.execute(query);
             }
-            store.forceBlockingFlush();
+            Util.flush(store);
 
             {
                 QueryMessage query = new QueryMessage(String.format("SELECT * FROM %s.%s WHERE pk = 1",
diff --git a/test/unit/org/apache/cassandra/service/DiskFailurePolicyTest.java b/test/unit/org/apache/cassandra/service/DiskFailurePolicyTest.java
index c3b4dfa..b041a1c 100644
--- a/test/unit/org/apache/cassandra/service/DiskFailurePolicyTest.java
+++ b/test/unit/org/apache/cassandra/service/DiskFailurePolicyTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.service;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -38,6 +37,7 @@
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.KillerForTests;
@@ -104,7 +104,7 @@
                              { ignore, false, new CorruptSSTableException(new IOException(), "blah"), true, false, false},
                              { stop, false, new CorruptSSTableException(new IOException(), "blah"), true, false, false},
                              { stop_paranoid, false, new CorruptSSTableException(new IOException(), "blah"), false, false, false},
-                             { best_effort, false, new FSReadError(new IOException(new OutOfMemoryError("Java heap space test")), "best_effort_oom"), true, false, false},
+                             { best_effort, false, new FSReadError(new IOException(new OutOfMemoryError("Java heap space")), "best_effort_oom"), true, false, false},
                              { best_effort, false, new FSReadError(new IOException(), "best_effort_io_exception"), true, false, false},
                              }
         );
@@ -141,15 +141,20 @@
         }
         catch (OutOfMemoryError e)
         {
-            if (!e.getMessage().equals("Java heap space test"))
+            if (testPolicy == best_effort)
+            {
+                if (t.getCause().getCause() != e)
+                    throw e;
+            }
+            else
                 throw e;
         }
 
-        if (testPolicy == best_effort && ((FSReadError) t).path.getName().equals("best_effort_io_exception"))
+        if (testPolicy == best_effort && ((FSReadError) t).path.equals("best_effort_io_exception"))
             assertTrue(DisallowedDirectories.isUnreadable(new File("best_effort_io_exception")));
 
         // when we have OOM, as cause, there is no reason to remove data
-        if (testPolicy == best_effort && ((FSReadError) t).path.getName().equals("best_effort_oom"))
+        if (testPolicy == best_effort && ((FSReadError) t).path.equals("best_effort_oom"))
             assertFalse(DisallowedDirectories.isUnreadable(new File("best_effort_oom")));
 
         assertEquals(expectJVMKilled, killerForTests.wasKilled());
diff --git a/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java b/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
index 02d625c..b6d039b 100644
--- a/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
+++ b/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
@@ -123,7 +123,7 @@
         PendingRangeCalculatorService.instance.blockUntilFinished();
 
         AbstractReplicationStrategy strategy;
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             strategy = getStrategy(keyspaceName, tmd);
             for (Token token : keyTokens)
diff --git a/test/unit/org/apache/cassandra/service/MoveTest.java b/test/unit/org/apache/cassandra/service/MoveTest.java
index 9777602..6dce8f3 100644
--- a/test/unit/org/apache/cassandra/service/MoveTest.java
+++ b/test/unit/org/apache/cassandra/service/MoveTest.java
@@ -23,14 +23,12 @@
 import java.util.*;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Condition;
 import java.util.function.Consumer;
 
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Multimap;
 
-import org.apache.cassandra.config.OverrideConfigurationLoader;
 import org.apache.cassandra.diag.DiagnosticEventService;
 import org.apache.cassandra.gms.GossiperEvent;
 import org.apache.cassandra.locator.EndpointsForRange;
@@ -40,16 +38,12 @@
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
 
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
 import org.apache.cassandra.locator.ReplicaCollection;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.marshal.BytesType;
@@ -194,7 +188,7 @@
                                                                                     .addPartitionKeyColumn("key", BytesType.instance)
                                                                                     .build()));
 
-        MigrationManager.announceNewKeyspace(keyspace);
+        SchemaTestUtil.announceNewKeyspace(keyspace);
     }
 
     private static Object[] configOptions(Integer[] replicas)
@@ -563,7 +557,7 @@
     private void assertPendingRanges(TokenMetadata tmd, Map<Range<Token>, EndpointsForRange> pendingRanges, String keyspaceName) throws ConfigurationException
     {
         boolean keyspaceFound = false;
-        for (String nonSystemKeyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String nonSystemKeyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             if(!keyspaceName.equals(nonSystemKeyspaceName))
                 continue;
@@ -632,7 +626,7 @@
         assertTrue(tmd.isMoving(hosts.get(MOVING_NODE)));
 
         AbstractReplicationStrategy strategy;
-        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
+        for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names())
         {
             strategy = getStrategy(keyspaceName, tmd);
             if(strategy instanceof NetworkTopologyStrategy)
diff --git a/test/unit/org/apache/cassandra/service/NativeTransportServiceTest.java b/test/unit/org/apache/cassandra/service/NativeTransportServiceTest.java
index e70ef0d..d5f825f 100644
--- a/test/unit/org/apache/cassandra/service/NativeTransportServiceTest.java
+++ b/test/unit/org/apache/cassandra/service/NativeTransportServiceTest.java
@@ -23,8 +23,6 @@
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
-import javax.xml.crypto.Data;
-
 import com.google.common.collect.Sets;
 import org.junit.After;
 import org.junit.BeforeClass;
diff --git a/test/unit/org/apache/cassandra/service/OptionalTasksTest.java b/test/unit/org/apache/cassandra/service/OptionalTasksTest.java
index 5d141a0..952abcd 100644
--- a/test/unit/org/apache/cassandra/service/OptionalTasksTest.java
+++ b/test/unit/org/apache/cassandra/service/OptionalTasksTest.java
@@ -26,10 +26,11 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 
 import static org.apache.cassandra.SchemaLoader.standardCFMD;
@@ -48,7 +49,7 @@
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), standardCFMD(KEYSPACE, TABLE));
     }
-    
+
     @Test
     public void shouldIgnoreDroppedKeyspace()
     {
@@ -56,23 +57,24 @@
         TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
         ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(Objects.requireNonNull(metadata).id);
         Objects.requireNonNull(cfs).metric.coordinatorReadLatency.update(100, TimeUnit.NANOSECONDS);
-        
+
         // Remove the Keyspace name to make it invisible to the updater...
-        Keyspace removed = Schema.instance.removeKeyspaceInstance(KEYSPACE);
+        KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(KEYSPACE);
+        SchemaTestUtil.dropKeyspaceIfExist(KEYSPACE, true);
 
         try
         {
-            long originalValue = cfs.sampleReadLatencyNanos;
+            long originalValue = cfs.sampleReadLatencyMicros;
 
             // ...and ensure that the speculation threshold updater doesn't run.
             SPECULATION_THRESHOLD_UPDATER.run();
 
-            assertEquals(originalValue, cfs.sampleReadLatencyNanos);
+            assertEquals(originalValue, cfs.sampleReadLatencyMicros);
         }
         finally
         {
             // Restore the removed Keyspace to put things back the way we found them.
-            Schema.instance.storeKeyspaceInstance(removed);
+            SchemaTestUtil.addOrUpdateKeyspace(ksm, true);
         }
     }
 
@@ -84,11 +86,11 @@
         ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(Objects.requireNonNull(metadata).id);
         Objects.requireNonNull(cfs).metric.coordinatorReadLatency.update(100, TimeUnit.NANOSECONDS);
 
-        long originalValue = cfs.sampleReadLatencyNanos;
-        
+        long originalValue = cfs.sampleReadLatencyMicros;
+
         // ...and ensure that the speculation threshold updater runs.
         SPECULATION_THRESHOLD_UPDATER.run();
-        
-        assertNotEquals(originalValue, cfs.sampleReadLatencyNanos);
+
+        assertNotEquals(originalValue, cfs.sampleReadLatencyMicros);
     }
 }
diff --git a/test/unit/org/apache/cassandra/service/PartitionDenylistTest.java b/test/unit/org/apache/cassandra/service/PartitionDenylistTest.java
new file mode 100644
index 0000000..ed51518
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/PartitionDenylistTest.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.SchemaTestUtil;
+import org.apache.cassandra.schema.Tables;
+
+import static org.apache.cassandra.cql3.QueryProcessor.process;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+public class PartitionDenylistTest
+{
+    private final static String ks_cql = "partition_denylist_keyspace";
+
+    @BeforeClass
+    public static void init()
+    {
+        CQLTester.prepareServer();
+
+        KeyspaceMetadata schema = KeyspaceMetadata.create(ks_cql,
+                                                          KeyspaceParams.simple(1),
+                                                          Tables.of(
+            CreateTableStatement.parse("CREATE TABLE table1 ("
+                                       + "keyone text, "
+                                       + "keytwo text, "
+                                       + "qux text, "
+                                       + "quz text, "
+                                       + "foo text, "
+                                       + "PRIMARY KEY((keyone, keytwo), qux, quz) ) ", ks_cql).build(),
+            CreateTableStatement.parse("CREATE TABLE table2 ("
+                                       + "keyone text, "
+                                       + "keytwo text, "
+                                       + "keythree text, "
+                                       + "value text, "
+                                       + "PRIMARY KEY((keyone, keytwo), keythree) ) ", ks_cql).build(),
+            CreateTableStatement.parse("CREATE TABLE table3 ("
+                                       + "keyone text, "
+                                       + "keytwo text, "
+                                       + "keythree text, "
+                                       + "value text, "
+                                       + "PRIMARY KEY((keyone, keytwo), keythree) ) ", ks_cql).build()
+        ));
+        SchemaTestUtil.addOrUpdateKeyspace(schema, false);
+        DatabaseDescriptor.setPartitionDenylistEnabled(true);
+        DatabaseDescriptor.setDenylistRangeReadsEnabled(true);
+        DatabaseDescriptor.setDenylistConsistencyLevel(ConsistencyLevel.ONE);
+        DatabaseDescriptor.setDenylistRefreshSeconds(1);
+        StorageService.instance.initServer(0);
+    }
+
+    @Before
+    public void setup()
+    {
+        DatabaseDescriptor.setPartitionDenylistEnabled(true);
+        resetDenylist();
+
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('aaa', 'bbb', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('bbb', 'ccc', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('ccc', 'ddd', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('ddd', 'eee', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('eee', 'fff', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('fff', 'ggg', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('ggg', 'hhh', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('hhh', 'iii', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('iii', 'jjj', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('jjj', 'kkk', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+
+
+        for (int i = 0; i < 50; i++)
+            process(String.format("INSERT INTO " + ks_cql + ".table2 (keyone, keytwo, keythree, value) VALUES ('%d', '%d', '%d', '%d')", i, i, i, i), ConsistencyLevel.ONE);
+
+        for (int i = 0; i < 50; i++)
+            process(String.format("INSERT INTO " + ks_cql + ".table3 (keyone, keytwo, keythree, value) VALUES ('%d', '%d', '%d', '%d')", i, i, i, i), ConsistencyLevel.ONE);
+
+        denylist("table1", "bbb:ccc");
+        refreshList();
+    }
+
+
+    private static void denylist(String table, final String key)
+    {
+        StorageProxy.instance.denylistKey(ks_cql, table, key);
+    }
+
+    private static void refreshList()
+    {
+        StorageProxy.instance.loadPartitionDenylist();
+    }
+
+    /**
+     * @return Whether the *attempt* to remove the denylisted key and refresh succeeded. Doesn't necessarily indicate the key
+     * was previously blocked and found.
+     */
+    private static boolean removeDenylist(final String ks, final String table, final String key)
+    {
+        return StorageProxy.instance.removeDenylistKey(ks, table, key);
+    }
+
+    @Test
+    public void testRead()
+    {
+        process("SELECT * FROM " + ks_cql + ".table1 WHERE keyone='aaa' and keytwo='bbb'", ConsistencyLevel.ONE);
+    }
+
+    @Test
+    public void testReadDenylisted()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1 WHERE keyone='bbb' and keytwo='ccc'", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Unable to read denylisted partition");
+    }
+
+    @Test
+    public void testIsKeyDenylistedAPI()
+    {
+        Assert.assertTrue(StorageProxy.instance.isKeyDenylisted(ks_cql, "table1", "bbb:ccc"));
+        resetDenylist();
+        Assert.assertFalse(StorageProxy.instance.isKeyDenylisted(ks_cql, "table1", "bbb:ccc"));
+
+        // Confirm an add mutates cache state
+        denylist("table1", "bbb:ccc");
+        Assert.assertTrue(StorageProxy.instance.isKeyDenylisted(ks_cql, "table1", "bbb:ccc"));
+
+        // Confirm removal then mutates cache w/out explicit reload
+        StorageProxy.instance.removeDenylistKey(ks_cql, "table1", "bbb:ccc");
+        Assert.assertFalse(StorageProxy.instance.isKeyDenylisted(ks_cql, "table1", "bbb:ccc"));
+    }
+
+    @Test
+    public void testReadUndenylisted()
+    {
+        resetDenylist();
+        process("SELECT * FROM " + ks_cql + ".table1 WHERE keyone='ccc' and keytwo='ddd'", ConsistencyLevel.ONE);
+    }
+
+    @Test
+    public void testWrite()
+    {
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('eee', 'fff', 'ccc', 'ddd', 'v')", ConsistencyLevel.ONE);
+        process("DELETE FROM " + ks_cql + ".table1 WHERE keyone='eee' and keytwo='fff'", ConsistencyLevel.ONE);
+    }
+
+    @Test
+    public void testWriteDenylisted()
+    {
+        assertThatThrownBy(() -> process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('bbb', 'ccc', 'eee', 'fff', 'w')", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Unable to write to denylisted partition");
+    }
+
+    @Test
+    public void testCASWriteDenylisted()
+    {
+        assertThatThrownBy(() -> process("UPDATE " + ks_cql + ".table1 SET foo='w' WHERE keyone='bbb' AND keytwo='ccc' AND qux='eee' AND quz='fff' IF foo='v'", ConsistencyLevel.LOCAL_SERIAL))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Unable to CAS write to denylisted partition");
+    }
+
+    @Test
+    public void testWriteUndenylisted()
+    {
+        resetDenylist();
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('bbb', 'ccc', 'eee', 'fff', 'w')", ConsistencyLevel.ONE);
+    }
+
+    @Test
+    public void testRangeSlice()
+    {
+        UntypedResultSet rows;
+        rows = process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) < token('bbb', 'ccc')", ConsistencyLevel.ONE);
+        Assert.assertEquals(1, rows.size());
+
+        // 10 entries total in our table
+        rows = process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) > token('bbb', 'ccc')", ConsistencyLevel.ONE);
+        Assert.assertEquals(8, rows.size());
+
+        rows = process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) >= token('aaa', 'bbb') and token(keyone, keytwo) < token('bbb', 'ccc')", ConsistencyLevel.ONE);
+        Assert.assertEquals(1, rows.size());
+
+        rows = process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) > token('bbb', 'ccc') and token(keyone, keytwo) <= token('ddd', 'eee')", ConsistencyLevel.ONE);
+        Assert.assertEquals(2, rows.size());
+    }
+
+    @Test
+    public void testRangeDenylisted()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Attempted to read a range containing 1 denylisted keys");
+    }
+
+    @Test
+    public void testRangeDenylisted2()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) >= token('aaa', 'bbb') and token (keyone, keytwo) <= token('bbb', 'ccc')", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Attempted to read a range containing 1 denylisted keys");
+    }
+
+    @Test
+    public void testRangeDenylisted3()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) >= token('bbb', 'ccc') and token (keyone, keytwo) <= token('ccc', 'ddd')", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Attempted to read a range containing 1 denylisted keys");
+    }
+
+    @Test
+    public void testRangeDenylisted4()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) > token('aaa', 'bbb') and token (keyone, keytwo) < token('ccc', 'ddd')", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Attempted to read a range containing 1 denylisted keys");
+    }
+
+    @Test
+    public void testRangeDenylisted5()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) > token('aaa', 'bbb')", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Attempted to read a range containing 1 denylisted keys");
+    }
+
+    @Test
+    public void testRangeDenylisted6()
+    {
+        assertThatThrownBy(() -> process("SELECT * FROM " + ks_cql + ".table1 WHERE token(keyone, keytwo) < token('ddd', 'eee')", ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Attempted to read a range containing 1 denylisted keys");
+    }
+
+    @Test
+    public void testInsertUnknownPKIsGraceful()
+    {
+        Assert.assertTrue(StorageProxy.instance.denylistKey(ks_cql, "table1", "hohoho"));
+    }
+
+    @Test
+    public void testInsertInvalidTableIsGraceful()
+    {
+        Assert.assertFalse(StorageProxy.instance.denylistKey(ks_cql, "asldkfjadlskjf", "alksdjfads"));
+    }
+
+    @Test
+    public void testInsertInvalidKSIsGraceful()
+    {
+        Assert.assertFalse(StorageProxy.instance.denylistKey("asdklfjas", "asldkfjadlskjf", "alksdjfads"));
+    }
+
+    @Test
+    public void testDisabledDenylistThrowsNoExceptions()
+    {
+        process(String.format("TRUNCATE TABLE %s.table2", ks_cql), ConsistencyLevel.ONE);
+        process(String.format("TRUNCATE TABLE %s.table3", ks_cql), ConsistencyLevel.ONE);
+        denyAllKeys();
+        DatabaseDescriptor.setPartitionDenylistEnabled(false);
+        process("INSERT INTO " + ks_cql + ".table1 (keyone, keytwo, qux, quz, foo) VALUES ('bbb', 'ccc', 'eee', 'fff', 'w')", ConsistencyLevel.ONE);
+        process("SELECT * FROM " + ks_cql + ".table1 WHERE keyone='bbb' and keytwo='ccc'", ConsistencyLevel.ONE);
+        process("SELECT * FROM " + ks_cql + ".table1", ConsistencyLevel.ONE);
+
+        for (int i = 0; i < 50; i++)
+        {
+            process(String.format("INSERT INTO %s.table2 (keyone, keytwo, keythree, value) VALUES ('%s', '%s', '%s', '%s')", ks_cql, i, i, i, i), ConsistencyLevel.ONE);
+            process(String.format("SELECT * FROM %s.table2 WHERE keyone='%s' and keytwo='%s'", ks_cql, i, i), ConsistencyLevel.ONE);
+        }
+
+        for (int i = 0; i < 50; i++)
+        {
+            process(String.format("INSERT INTO %s.table3 (keyone, keytwo, keythree, value) VALUES ('%s', '%s', '%s', '%s')", ks_cql, i, i, i, i), ConsistencyLevel.ONE);
+            process(String.format("SELECT * FROM %s.table3 WHERE keyone='%s' and keytwo='%s'", ks_cql, i, i), ConsistencyLevel.ONE);
+        }
+    }
+
+    /**
+     * Want to make sure we don't throw anything or explode when people try to remove a key that's not there
+     */
+    @Test
+    public void testRemoveMissingIsGraceful()
+    {
+        confirmDenied("table1", "bbb", "ccc");
+        Assert.assertTrue(removeDenylist(ks_cql, "table1", "bbb:ccc"));
+
+        // We expect this to silently not find and succeed at *trying* to remove it
+        Assert.assertTrue(removeDenylist(ks_cql, "table1", "bbb:ccc"));
+        refreshList();
+
+        confirmAllowed("table1", "bbb", "ccc");
+    }
+
+    /**
+     * We need to confirm that the entire cache is reloaded rather than being an additive change; we don't want keys to
+     * persist after their removal and reload from CQL.
+     */
+    @Test
+    public void testRemoveWorksOnReload()
+    {
+        denyAllKeys();
+        refreshList();
+
+        confirmDenied("table1", "aaa", "bbb");
+        confirmDenied("table1", "eee", "fff");
+        confirmDenied("table1", "iii", "jjj");
+
+        // poke a hole in the middle and reload
+        removeDenylist(ks_cql, "table1", "eee:fff");
+        refreshList();
+
+        confirmAllowed("table1", "eee", "fff");
+        confirmDenied("table1", "aaa", "bbb");
+        confirmDenied("table1", "iii", "jjj");
+    }
+
+    /**
+     * We go through a few steps here:
+     *  1) Add more keys than we're allowed
+     *  2) Confirm that the overflow keys are *not* denied
+     *  3) Raise the allowable limit
+     *  4) Confirm that the overflow keys are now denied (and no longer really "overflow" for that matter)
+     */
+    @Test
+    public void testShrinkAndGrow()
+    {
+        denyAllKeys();
+        refreshList();
+
+        // Initial control; check denial of both initial and final keys
+        confirmDenied("table1", "aaa", "bbb");
+        confirmDenied("table1", "iii", "jjj");
+
+        // Lower our limit to 5 allowable denies and then check and see that things past the limit are ignored
+        StorageProxy.instance.setDenylistMaxKeysPerTable(5);
+        StorageProxy.instance.setDenylistMaxKeysTotal(5);
+        refreshList();
+
+        // Confirm overflowed keys are allowed; first come first served
+        confirmDenied("table1", "aaa", "bbb");
+        confirmAllowed("table1", "iii", "jjj");
+
+        // Now we raise the limit back up and do nothing else and confirm it's blocked
+        StorageProxy.instance.setDenylistMaxKeysPerTable(1000);
+        StorageProxy.instance.setDenylistMaxKeysTotal(1000);
+        refreshList();
+        confirmDenied("table1", "aaa", "bbb");
+        confirmDenied("table1", "iii", "jjj");
+
+        // Unblock via overflow the table 1 sentinel we'll check in a second
+        StorageProxy.instance.setDenylistMaxKeysPerTable(5);
+        StorageProxy.instance.setDenylistMaxKeysTotal(5);
+        refreshList();
+        confirmAllowed("table1", "iii", "jjj");
+
+        // Now, we remove the denylist entries for our first 5, drop the limit back down, and confirm those overflowed keys now block
+        removeDenylist(ks_cql, "table1", "aaa:bbb");
+        removeDenylist(ks_cql, "table1", "bbb:ccc");
+        removeDenylist(ks_cql, "table1", "ccc:ddd");
+        removeDenylist(ks_cql, "table1", "ddd:eee");
+        removeDenylist(ks_cql, "table1", "eee:fff");
+        refreshList();
+        confirmDenied("table1", "iii", "jjj");
+    }
+
+    /*
+    We need to test that, during a violation of our global allowable limit, we still enforce our limit of keys queried
+    on a per-table basis.
+     */
+    @Test
+    public void testTableLimitRespected()
+    {
+        StorageProxy.instance.setDenylistMaxKeysPerTable(5);
+        StorageProxy.instance.setDenylistMaxKeysTotal(12);
+        denyAllKeys();
+        refreshList();
+
+        // Table 1: expect first 5 denied
+        confirmDenied("table1", "aaa", "bbb");
+        confirmDenied("table1", "eee", "fff");
+        confirmAllowed("table1", "fff", "ggg");
+
+        // Table 2: expect first 5 denied
+        for (int i = 0; i < 5; i++)
+            confirmDenied("table2", Integer.toString(i), Integer.toString(i));
+
+        // Confirm remainder are allowed because we hit our table limit at 5
+        for (int i = 5; i < 50; i++)
+            confirmAllowed("table2", Integer.toString(i), Integer.toString(i));
+
+        // Table 3: expect only first 2 denied; global limit enforcement
+        confirmDenied("table3", "0", "0");
+        confirmDenied("table3", "1", "1");
+
+        // And our final 48 should be allowed
+        for (int i = 2; i < 50; i++)
+            confirmAllowed("table3", Integer.toString(i), Integer.toString(i));
+    }
+
+    /**
+     * Test that we don't allow overflowing global limit due to accumulation of allowable table queries
+     */
+    @Test
+    public void testGlobalLimitRespected()
+    {
+        StorageProxy.instance.setDenylistMaxKeysPerTable(50);
+        StorageProxy.instance.setDenylistMaxKeysTotal(15);
+        denyAllKeys();
+        refreshList();
+
+        // Table 1: expect all 10 denied
+        confirmDenied("table1", "aaa", "bbb");
+        confirmDenied("table1", "jjj", "kkk");
+
+        // Table 2: expect only 5 denied up to global limit trigger
+        for (int i = 0; i < 5; i++)
+            confirmDenied("table2", Integer.toString(i), Integer.toString(i));
+
+        // Remainder of Table 2 should be allowed; testing overflow boundary logic
+        for (int i = 5; i < 50; i++)
+            confirmAllowed("table2", Integer.toString(i), Integer.toString(i));
+
+        // Table 3: expect all allowed as we're past global limit by the time we got to this table load. This confirms that
+        // we bypass load completely on tables once we're at our global limit.
+        for (int i = 0; i < 50; i++)
+            confirmAllowed("table3", Integer.toString(i), Integer.toString(i));
+    }
+
+    private void confirmDenied(String table, String keyOne, String keyTwo)
+    {
+        String query = String.format("SELECT * FROM " + ks_cql + "." + table + " WHERE keyone='%s' and keytwo='%s'", keyOne, keyTwo);
+        assertThatThrownBy(() -> process(query, ConsistencyLevel.ONE))
+                           .isInstanceOf(InvalidRequestException.class)
+                           .hasMessageContaining("Unable to read denylisted partition");
+    }
+
+    private void confirmAllowed(String table, String keyOne, String keyTwo)
+    {
+        process(String.format("SELECT * FROM %s.%s WHERE keyone='%s' and keytwo='%s'", ks_cql, table, keyOne, keyTwo), ConsistencyLevel.ONE);
+    }
+
+    private void resetDenylist()
+    {
+        process("TRUNCATE system_distributed.partition_denylist", ConsistencyLevel.ONE);
+        StorageProxy.instance.setDenylistMaxKeysTotal(1000);
+        StorageProxy.instance.setDenylistMaxKeysPerTable(1000);
+        StorageProxy.instance.loadPartitionDenylist();
+    }
+
+    private void denyAllKeys()
+    {
+        denylist("table1", "aaa:bbb");
+        denylist("table1", "bbb:ccc");
+        denylist("table1", "ccc:ddd");
+        denylist("table1", "ddd:eee");
+        denylist("table1", "eee:fff");
+        denylist("table1", "fff:ggg");
+        denylist("table1", "ggg:hhh");
+        denylist("table1", "hhh:iii");
+        denylist("table1", "iii:jjj");
+        denylist("table1", "jjj:kkk");
+
+        for (int i = 0; i < 50; i++)
+        {
+            denylist("table2", String.format("%d:%d", i, i));
+            denylist("table3", String.format("%d:%d", i, i));
+        }
+
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/PaxosStateTest.java b/test/unit/org/apache/cassandra/service/PaxosStateTest.java
index 7d69223..e73147d 100644
--- a/test/unit/org/apache/cassandra/service/PaxosStateTest.java
+++ b/test/unit/org/apache/cassandra/service/PaxosStateTest.java
@@ -18,12 +18,26 @@
 package org.apache.cassandra.service;
 
 import java.nio.ByteBuffer;
-import java.util.UUID;
 
 import com.google.common.collect.Iterables;
-import org.apache.cassandra.service.paxos.PrepareVerbHandler;
-import org.apache.cassandra.service.paxos.ProposeVerbHandler;
+
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.v1.PrepareVerbHandler;
+import org.apache.cassandra.service.paxos.v1.ProposeVerbHandler;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.exceptions.ReadTimeoutException;
+import org.apache.cassandra.service.paxos.PaxosOperationLock;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -37,8 +51,10 @@
 import org.apache.cassandra.service.paxos.PaxosState;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.atUnixMicros;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class PaxosStateTest
@@ -60,7 +76,7 @@
     public void testCommittingAfterTruncation() throws Exception
     {
         ColumnFamilyStore cfs = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1");
-        String key = "key" + System.nanoTime();
+        String key = "key" + nanoTime();
         ByteBuffer value = ByteBufferUtil.bytes(0);
         RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), FBUtilities.timestampMicros(), key);
         builder.clustering("a").add("val", value);
@@ -71,25 +87,25 @@
 
         // Commit the proposal & verify the data is present
         Commit beforeTruncate = newProposal(0, update);
-        PaxosState.commit(beforeTruncate);
+        PaxosState.commitDirect(beforeTruncate);
         assertDataPresent(cfs, Util.dk(key), "val", value);
 
         // Truncate then attempt to commit again, mutation should
         // be ignored as the proposal predates the truncation
         cfs.truncateBlocking();
-        PaxosState.commit(beforeTruncate);
+        PaxosState.commitDirect(beforeTruncate);
         assertNoDataPresent(cfs, Util.dk(key));
 
         // Now try again with a ballot created after the truncation
         long timestamp = SystemKeyspace.getTruncatedAt(update.metadata().id) + 1;
         Commit afterTruncate = newProposal(timestamp, update);
-        PaxosState.commit(afterTruncate);
+        PaxosState.commitDirect(afterTruncate);
         assertDataPresent(cfs, Util.dk(key), "val", value);
     }
 
-    private Commit newProposal(long ballotMillis, PartitionUpdate update)
+    private Commit newProposal(long ballotMicros, PartitionUpdate update)
     {
-        return Commit.newProposal(UUIDGen.getTimeUUID(ballotMillis), update);
+        return Commit.newProposal(atUnixMicros(ballotMicros, NONE), update);
     }
 
     private void assertDataPresent(ColumnFamilyStore cfs, DecoratedKey key, String name, ByteBuffer value)
@@ -108,7 +124,7 @@
     public void testPrepareProposePaxos() throws Throwable
     {
         ColumnFamilyStore cfs = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1");
-        String key = "key" + System.nanoTime();
+        String key = "key" + nanoTime();
         ByteBuffer value = ByteBufferUtil.bytes(0);
         RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), FBUtilities.timestampMicros(), key);
         builder.clustering("a").add("val", value);
@@ -117,11 +133,44 @@
         // CFS should be empty initially
         assertNoDataPresent(cfs, Util.dk(key));
 
-        UUID ballot = UUIDGen.getRandomTimeUUIDFromMicros(System.currentTimeMillis());
+        Ballot ballot = atUnixMicros(1000 * System.currentTimeMillis(), NONE);
 
         Commit commit = Commit.newPrepare(Util.dk(key), cfs.metadata(), ballot);
 
         assertTrue("paxos prepare stage failed", PrepareVerbHandler.doPrepare(commit).promised);
         assertTrue("paxos propose stage failed", ProposeVerbHandler.doPropose(commit));
     }
+
+    public void testPaxosLock() throws ExecutionException, InterruptedException, ExecutionException
+    {
+        DecoratedKey key = new BufferDecoratedKey(Murmur3Partitioner.MINIMUM, ByteBufferUtil.EMPTY_BYTE_BUFFER);
+        TableMetadata metadata = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1").metadata.get();
+        Supplier<PaxosOperationLock> locker = () -> PaxosState.lock(key, metadata, System.nanoTime() + TimeUnit.SECONDS.toNanos(1L), ConsistencyLevel.SERIAL, false);
+        ExecutorService executor = Executors.newFixedThreadPool(1);
+        Future<?> future;
+        try (PaxosOperationLock lock = locker.get())
+        {
+            try
+            {
+                try (PaxosOperationLock lock2 = locker.get())
+                {
+                    Assert.fail();
+                }
+            }
+            catch (ReadTimeoutException rte)
+            {
+            }
+
+            future = executor.submit(() -> {
+                try (PaxosOperationLock lock2 = locker.get())
+                {
+                }
+            });
+        }
+        finally
+        {
+            executor.shutdown();
+        }
+        future.get();
+    }
 }
diff --git a/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java b/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java
index a7551f4..a5b32bf 100644
--- a/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java
+++ b/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java
@@ -25,9 +25,9 @@
 import org.apache.cassandra.transport.*;
 import org.apache.cassandra.transport.messages.*;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class ProtocolBetaVersionTest extends CQLTester
 {
@@ -35,7 +35,7 @@
     public static void setUp()
     {
         requireNetwork();
-        DatabaseDescriptor.setBatchSizeWarnThresholdInKB(1);
+        DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(1);
     }
 
     private ProtocolVersion getBetaVersion()
diff --git a/test/unit/org/apache/cassandra/service/QueryPagerTest.java b/test/unit/org/apache/cassandra/service/QueryPagerTest.java
index 50bb3e2..323de1a 100644
--- a/test/unit/org/apache/cassandra/service/QueryPagerTest.java
+++ b/test/unit/org/apache/cassandra/service/QueryPagerTest.java
@@ -326,7 +326,7 @@
 
     public void multiQueryTest(boolean testPagingState, ProtocolVersion protocolVersion)
     {
-        ReadQuery command = new SinglePartitionReadCommand.Group(new ArrayList<SinglePartitionReadCommand>()
+        ReadQuery command = SinglePartitionReadCommand.Group.create(new ArrayList<SinglePartitionReadCommand>()
         {{
             add(sliceQuery("k1", "c2", "c6", 10));
             add(sliceQuery("k4", "c3", "c5", 10));
diff --git a/test/unit/org/apache/cassandra/service/RemoveTest.java b/test/unit/org/apache/cassandra/service/RemoveTest.java
index ea8c8d8..acc54cb 100644
--- a/test/unit/org/apache/cassandra/service/RemoveTest.java
+++ b/test/unit/org/apache/cassandra/service/RemoveTest.java
@@ -143,7 +143,7 @@
     {
         // start removal in background and send replication confirmations
         final AtomicBoolean success = new AtomicBoolean(false);
-        Thread remover = NamedThreadFactory.createThread(() ->
+        Thread remover = NamedThreadFactory.createAnonymousThread(() ->
         {
             try
             {
diff --git a/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java b/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java
index e4a5947..5545d93 100644
--- a/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java
+++ b/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java
@@ -28,8 +28,10 @@
 import org.junit.Test;
 
 import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.VersionAndType;
+import org.apache.cassandra.io.util.File;
 import org.assertj.core.util.Files;
 import org.quicktheories.core.Gen;
 import org.quicktheories.generators.Generate;
@@ -99,7 +101,7 @@
                                         tables(),
                                         generations(),
                                         sstableVersionString(),
-                                        (f, k, t, g, v) -> new Descriptor(v, Files.currentFolder(), k, t, g, f));
+                                        (f, k, t, g, v) -> new Descriptor(v, new File(Files.currentFolder()), k, t, new SequenceBasedSSTableId(g), f));
     }
 
     private Gen<List<Descriptor>> descriptorLists(int minSize)
diff --git a/test/unit/org/apache/cassandra/service/SerializationsTest.java b/test/unit/org/apache/cassandra/service/SerializationsTest.java
index f4b3b1c..f9f4766 100644
--- a/test/unit/org/apache/cassandra/service/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/service/SerializationsTest.java
@@ -26,6 +26,7 @@
 import java.util.UUID;
 
 import com.google.common.collect.Lists;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -39,25 +40,25 @@
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.IVersionedSerializer;
-import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.repair.SyncNodePair;
 import org.apache.cassandra.repair.RepairJobDesc;
 import org.apache.cassandra.repair.Validator;
 import org.apache.cassandra.repair.messages.*;
+import org.apache.cassandra.repair.state.ValidationState;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.streaming.SessionSummary;
 import org.apache.cassandra.streaming.StreamSummary;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTrees;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class SerializationsTest extends AbstractSerializationsTester
 {
     private static PartitionerSwitcher partitionerSwitcher;
-    private static UUID RANDOM_UUID;
+    private static TimeUUID RANDOM_UUID;
     private static Range<Token> FULL_RANGE;
     private static RepairJobDesc DESC;
 
@@ -68,7 +69,7 @@
     {
         DatabaseDescriptor.daemonInitialization();
         partitionerSwitcher = Util.switchPartitioner(RandomPartitioner.instance);
-        RANDOM_UUID = UUID.fromString("b5c3d033-75aa-4c2f-a819-947aac7a0c54");
+        RANDOM_UUID = TimeUUID.fromString("743325d0-4c4b-11ec-8a88-2d67081686db");
         FULL_RANGE = new Range<>(Util.testPartitioner().getMinimumToken(), Util.testPartitioner().getMinimumToken());
         DESC = new RepairJobDesc(RANDOM_UUID, RANDOM_UUID, "Keyspace1", "Standard1", Arrays.asList(FULL_RANGE));
     }
@@ -103,7 +104,7 @@
         if (EXECUTE_WRITES)
             testValidationRequestWrite();
 
-        try (DataInputStreamPlus in = getInput("service.ValidationRequest.bin"))
+        try (FileInputStreamPlus in = getInput("service.ValidationRequest.bin"))
         {
             ValidationRequest message = ValidationRequest.serializer.deserialize(in, getVersion());
             assert DESC.equals(message.desc);
@@ -115,20 +116,20 @@
     {
         IPartitioner p = RandomPartitioner.instance;
 
-        MerkleTrees mt = new MerkleTrees(p);
+        MerkleTrees mts = new MerkleTrees(p);
 
         // empty validation
-        mt.addMerkleTree((int) Math.pow(2, 15), FULL_RANGE);
-        Validator v0 = new Validator(DESC, FBUtilities.getBroadcastAddressAndPort(), -1, PreviewKind.NONE);
-        ValidationResponse c0 = new ValidationResponse(DESC, mt);
+        mts.addMerkleTree((int) Math.pow(2, 15), FULL_RANGE);
+        Validator v0 = new Validator(new ValidationState(DESC, FBUtilities.getBroadcastAddressAndPort()), -1, PreviewKind.NONE);
+        ValidationResponse c0 = new ValidationResponse(DESC, mts);
 
         // validation with a tree
-        mt = new MerkleTrees(p);
-        mt.addMerkleTree(Integer.MAX_VALUE, FULL_RANGE);
+        mts = new MerkleTrees(p);
+        mts.addMerkleTree(Integer.MAX_VALUE, FULL_RANGE);
         for (int i = 0; i < 10; i++)
-            mt.split(p.getRandomToken());
-        Validator v1 = new Validator(DESC, FBUtilities.getBroadcastAddressAndPort(), -1, PreviewKind.NONE);
-        ValidationResponse c1 = new ValidationResponse(DESC, mt);
+            mts.split(p.getRandomToken());
+        Validator v1 = new Validator(new ValidationState(DESC, FBUtilities.getBroadcastAddressAndPort()), -1, PreviewKind.NONE);
+        ValidationResponse c1 = new ValidationResponse(DESC, mts);
 
         // validation failed
         ValidationResponse c3 = new ValidationResponse(DESC);
@@ -142,7 +143,7 @@
         if (EXECUTE_WRITES)
             testValidationCompleteWrite();
 
-        try (DataInputStreamPlus in = getInput("service.ValidationComplete.bin"))
+        try (FileInputStreamPlus in = getInput("service.ValidationComplete.bin"))
         {
             // empty validation
             ValidationResponse message = ValidationResponse.serializer.deserialize(in, getVersion());
@@ -187,7 +188,7 @@
         InetAddressAndPort src = InetAddressAndPort.getByNameOverrideDefaults("127.0.0.2", PORT);
         InetAddressAndPort dest = InetAddressAndPort.getByNameOverrideDefaults("127.0.0.3", PORT);
 
-        try (DataInputStreamPlus in = getInput("service.SyncRequest.bin"))
+        try (FileInputStreamPlus in = getInput("service.SyncRequest.bin"))
         {
             SyncRequest message = SyncRequest.serializer.deserialize(in, getVersion());
             assert DESC.equals(message.desc);
@@ -206,8 +207,8 @@
         // sync success
         List<SessionSummary> summaries = new ArrayList<>();
         summaries.add(new SessionSummary(src, dest,
-                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUIDGen.getTimeUUID()), 5, 100)),
-                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUIDGen.getTimeUUID()), 500, 10))
+                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUID.randomUUID()), 5, 100)),
+                                         Lists.newArrayList(new StreamSummary(TableId.fromUUID(UUID.randomUUID()), 500, 10))
         ));
         SyncResponse success = new SyncResponse(DESC, src, dest, true, summaries);
         // sync fail
@@ -226,7 +227,7 @@
         InetAddressAndPort dest = InetAddressAndPort.getByNameOverrideDefaults("127.0.0.3", PORT);
         SyncNodePair nodes = new SyncNodePair(src, dest);
 
-        try (DataInputStreamPlus in = getInput("service.SyncComplete.bin"))
+        try (FileInputStreamPlus in = getInput("service.SyncComplete.bin"))
         {
             // success
             SyncResponse message = SyncResponse.serializer.deserialize(in, getVersion());
diff --git a/test/unit/org/apache/cassandra/service/StartupChecksTest.java b/test/unit/org/apache/cassandra/service/StartupChecksTest.java
index 67217b3..72f8804 100644
--- a/test/unit/org/apache/cassandra/service/StartupChecksTest.java
+++ b/test/unit/org/apache/cassandra/service/StartupChecksTest.java
@@ -17,12 +17,15 @@
  */
 package org.apache.cassandra.service;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.List;
 
+import org.apache.cassandra.config.StartupChecksOptions;
+import org.apache.cassandra.io.util.File;
 import org.junit.*;
 
 import org.apache.cassandra.SchemaLoader;
@@ -30,9 +33,14 @@
 import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.exceptions.StartupException;
-import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.service.DataResurrectionCheck.Heartbeat;
+import org.apache.cassandra.utils.Clock;
 
-import static org.junit.Assert.assertFalse;
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.io.util.FileUtils.createTempFile;
+import static org.apache.cassandra.service.DataResurrectionCheck.HEARTBEAT_FILE_CONFIG_PROPERTY;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.check_data_resurrection;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -41,10 +49,14 @@
     public static final String INVALID_LEGACY_SSTABLE_ROOT_PROP = "invalid-legacy-sstable-root";
     StartupChecks startupChecks;
     Path sstableDir;
+    static File heartbeatFile;
+
+    StartupChecksOptions options = new StartupChecksOptions();
 
     @BeforeClass
     public static void setupServer()
     {
+        heartbeatFile = createTempFile("cassandra-heartbeat-", "");
         SchemaLoader.prepareServer();
     }
 
@@ -54,19 +66,29 @@
         for (ColumnFamilyStore cfs : Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStores())
             cfs.clearUnsafe();
         for (File dataDir : Directories.getKSChildDirectories(SchemaConstants.SYSTEM_KEYSPACE_NAME))
-            FileUtils.deleteRecursive(dataDir);
+            dataDir.deleteRecursive();
 
         File dataDir = new File(DatabaseDescriptor.getAllDataFileLocations()[0]);
-        sstableDir = Paths.get(dataDir.getAbsolutePath(), "Keyspace1", "Standard1");
+        sstableDir = Paths.get(dataDir.absolutePath(), "Keyspace1", "Standard1");
         Files.createDirectories(sstableDir);
 
+        options.enable(check_data_resurrection);
+        options.getConfig(check_data_resurrection)
+               .put(HEARTBEAT_FILE_CONFIG_PROPERTY, heartbeatFile.absolutePath());
+
         startupChecks = new StartupChecks();
     }
 
     @After
     public void tearDown() throws IOException
     {
-        FileUtils.deleteRecursive(sstableDir.toFile());
+        new File(sstableDir).deleteRecursive();
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        heartbeatFile.delete();
     }
 
     @Test
@@ -79,17 +101,17 @@
         verifyFailure(startupChecks, "Detected unreadable sstables");
 
         // we should ignore invalid sstables in a snapshots directory
-        FileUtils.deleteRecursive(sstableDir.toFile());
+        new File(sstableDir).deleteRecursive();
         Path snapshotDir = sstableDir.resolve("snapshots");
         Files.createDirectories(snapshotDir);
-        copyInvalidLegacySSTables(snapshotDir); startupChecks.verify();
+        copyInvalidLegacySSTables(snapshotDir); startupChecks.verify(options);
 
         // and in a backups directory
-        FileUtils.deleteRecursive(sstableDir.toFile());
+        new File(sstableDir).deleteRecursive();
         Path backupDir = sstableDir.resolve("backups");
         Files.createDirectories(backupDir);
         copyInvalidLegacySSTables(backupDir);
-        startupChecks.verify();
+        startupChecks.verify(options);
     }
 
     @Test
@@ -98,16 +120,41 @@
         startupChecks = startupChecks.withTest(StartupChecks.checkSSTablesFormat);
 
         copyLegacyNonSSTableFiles(sstableDir);
-        assertFalse(sstableDir.toFile().listFiles().length == 0);
+        assertNotEquals(0, new File(sstableDir).tryList().length);
 
-        startupChecks.verify();
+        startupChecks.verify(options);
+    }
+
+    @Test
+    public void checkReadAheadKbSettingCheck() throws Exception
+    {
+        // This test just validates if the verify function
+        // doesn't throw any exceptions
+        startupChecks = startupChecks.withTest(StartupChecks.checkReadAheadKbSetting);
+        startupChecks.verify(options);
+    }
+
+    @Test
+    public void testGetReadAheadKBPath()
+    {
+        Path sdaDirectory = StartupChecks.getReadAheadKBPath("/dev/sda12");
+        Assert.assertEquals(Paths.get("/sys/block/sda/queue/read_ahead_kb"), sdaDirectory);
+
+        Path scsiDirectory = StartupChecks.getReadAheadKBPath("/dev/scsi1");
+        Assert.assertEquals(Paths.get("/sys/block/scsi/queue/read_ahead_kb"), scsiDirectory);
+
+        Path dirWithoutNumbers = StartupChecks.getReadAheadKBPath("/dev/sca");
+        Assert.assertEquals(Paths.get("/sys/block/sca/queue/read_ahead_kb"), dirWithoutNumbers);
+
+        Path invalidDir = StartupChecks.getReadAheadKBPath("/tmp/xpto");
+        Assert.assertNull(invalidDir);
     }
 
     @Test
     public void maxMapCountCheck() throws Exception
     {
         startupChecks = startupChecks.withTest(StartupChecks.checkMaxMapCount);
-        startupChecks.verify();
+        startupChecks.verify(options);
     }
 
     private void copyLegacyNonSSTableFiles(Path targetDir) throws IOException
@@ -122,13 +169,40 @@
             Files.copy(Paths.get(legacySSTableRoot.toString(), filename), targetDir.resolve(filename));
     }
 
+    @Test
+    public void testDataResurrectionCheck() throws Exception
+    {
+        DataResurrectionCheck check = new DataResurrectionCheck() {
+            @Override
+            List<String> getKeyspaces()
+            {
+                return singletonList("abc");
+            }
+
+            @Override
+            List<TableGCPeriod> getTablesGcPeriods(String userKeyspace)
+            {
+                return singletonList(new TableGCPeriod("def", 10));
+            }
+        };
+
+        Heartbeat heartbeat = new Heartbeat(Instant.ofEpochMilli(Clock.Global.currentTimeMillis()));
+        heartbeat.serializeToJsonFile(heartbeatFile);
+
+        Thread.sleep(15 * 1000);
+
+        startupChecks.withTest(check);
+
+        verifyFailure(startupChecks, "Invalid tables: abc.def");
+    }
+
     private void copyInvalidLegacySSTables(Path targetDir) throws IOException
     {
-        File legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
+        File legacySSTableRoot = new File(Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
                                            "Keyspace1",
-                                           "Standard1").toFile();
-        for (File f : legacySSTableRoot.listFiles())
-            Files.copy(f.toPath(), targetDir.resolve(f.getName()));
+                                           "Standard1"));
+        for (File f : legacySSTableRoot.tryList())
+            Files.copy(f.toPath(), targetDir.resolve(f.name()));
 
     }
 
@@ -136,7 +210,7 @@
     {
         try
         {
-            tests.verify();
+            tests.verify(options);
             fail("Expected a startup exception but none was thrown");
         }
         catch (StartupException e)
diff --git a/test/unit/org/apache/cassandra/service/StorageProxyTest.java b/test/unit/org/apache/cassandra/service/StorageProxyTest.java
new file mode 100644
index 0000000..6f45673
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/StorageProxyTest.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import java.net.UnknownHostException;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.gms.EndpointState;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.gms.HeartBeatState;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.jboss.byteman.contrib.bmunit.BMRule;
+import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
+
+import static org.apache.cassandra.locator.ReplicaUtils.full;
+import static org.assertj.core.api.Assertions.assertThat;
+
+@RunWith(BMUnitRunner.class)
+public class StorageProxyTest
+{
+    @BeforeClass
+    public static void initDD()
+    {
+        DatabaseDescriptor.daemonInitialization();
+        ServerTestUtils.mkdirs();
+    }
+
+    @Test
+    public void testSetGetPaxosVariant()
+    {
+        Assert.assertEquals(Config.PaxosVariant.v1, DatabaseDescriptor.getPaxosVariant());
+        Assert.assertEquals("v1", StorageProxy.instance.getPaxosVariant());
+        StorageProxy.instance.setPaxosVariant("v2");
+        Assert.assertEquals("v2", StorageProxy.instance.getPaxosVariant());
+        Assert.assertEquals(Config.PaxosVariant.v2, DatabaseDescriptor.getPaxosVariant());
+        DatabaseDescriptor.setPaxosVariant(Config.PaxosVariant.v1);
+        Assert.assertEquals(Config.PaxosVariant.v1, DatabaseDescriptor.getPaxosVariant());
+        Assert.assertEquals(Config.PaxosVariant.v1, DatabaseDescriptor.getPaxosVariant());
+    }
+
+    @Test
+    public void testShouldHint() throws Exception
+    {
+        // HAPPY PATH with all defaults
+        shouldHintTest(replica -> {
+            assertThat(StorageProxy.shouldHint(replica)).isTrue();
+            assertThat(StorageProxy.shouldHint(replica, /* tryEnablePersistentWindow */ false)).isTrue();
+        });
+    }
+
+    @Test
+    public void testShouldHintOnWindowExpiry() throws Exception
+    {
+        shouldHintTest(replica -> {
+            // wait for 5 ms, we will shorten the hints window later
+            Uninterruptibles.sleepUninterruptibly(5, TimeUnit.MILLISECONDS);
+
+            final int originalHintWindow = DatabaseDescriptor.getMaxHintWindow();
+            try
+            {
+                DatabaseDescriptor.setMaxHintWindow(1); // 1 ms. It should not hint
+                assertThat(StorageProxy.shouldHint(replica)).isFalse();
+            }
+            finally
+            {
+                DatabaseDescriptor.setMaxHintWindow(originalHintWindow);
+            }
+        });
+    }
+
+    @Test
+    @BMRule(name = "Hints size exceeded the limit",
+            targetClass="org.apache.cassandra.hints.HintsService",
+            targetMethod="getTotalHintsSize",
+            action="return 2097152;") // 2MB
+    public void testShouldHintOnExceedingSize() throws Exception
+    {
+        shouldHintTest(replica -> {
+            final int originalHintsSizeLimit = DatabaseDescriptor.getMaxHintsSizePerHostInMiB();
+            try
+            {
+                DatabaseDescriptor.setMaxHintsSizePerHostInMiB(1);
+                assertThat(StorageProxy.shouldHint(replica)).isFalse();
+            }
+            finally
+            {
+                DatabaseDescriptor.setMaxHintsSizePerHostInMiB(originalHintsSizeLimit);
+            }
+        });
+    }
+
+
+    /**
+     * Ensure that the timer backing the JMX endpoint to transiently enable blocking read repairs both enables
+     * and disables the way we'd expect.
+     */
+    @Test
+    public void testTransientLoggingTimer()
+    {
+        StorageProxy.instance.logBlockingReadRepairAttemptsForNSeconds(2);
+        Assert.assertTrue(StorageProxy.instance.isLoggingReadRepairs());
+        Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS);
+        Assert.assertFalse(StorageProxy.instance.isLoggingReadRepairs());
+    }
+
+    private void shouldHintTest(Consumer<Replica> test) throws UnknownHostException
+    {
+        InetAddressAndPort testEp = InetAddressAndPort.getByName("192.168.1.1");
+        Replica replica = full(testEp);
+        StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), testEp);
+        EndpointState state = new EndpointState(new HeartBeatState(0, 0));
+        Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.markDead(replica.endpoint(), state));
+
+        try
+        {
+            test.accept(replica);
+        }
+        finally
+        {
+            StorageService.instance.getTokenMetadata().removeEndpoint(testEp);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/StorageServiceServerM3PTest.java b/test/unit/org/apache/cassandra/service/StorageServiceServerM3PTest.java
index 69b0642..bb78c83 100644
--- a/test/unit/org/apache/cassandra/service/StorageServiceServerM3PTest.java
+++ b/test/unit/org/apache/cassandra/service/StorageServiceServerM3PTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.service;
 
-import java.io.File;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -28,6 +27,7 @@
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.PropertyFileSnitch;
 
diff --git a/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java b/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
index 60bed4c..119b810 100644
--- a/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
+++ b/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
@@ -19,10 +19,7 @@
 
 package org.apache.cassandra.service;
 
-import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.*;
@@ -38,7 +35,6 @@
 import org.apache.cassandra.audit.AuditLogManager;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.db.WindowsFailedSnapshotTracker;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner.LongToken;
@@ -57,9 +53,10 @@
 import org.apache.cassandra.utils.FBUtilities;
 import org.assertj.core.api.Assertions;
 
+import static org.apache.cassandra.ServerTestUtils.cleanup;
+import static org.apache.cassandra.ServerTestUtils.mkdirs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 
 public class StorageServiceServerTest
 {
@@ -72,6 +69,9 @@
         IEndpointSnitch snitch = new PropertyFileSnitch();
         DatabaseDescriptor.setEndpointSnitch(snitch);
         Keyspace.setInitialized();
+        mkdirs();
+        cleanup();
+        StorageService.instance.initServer(0);
     }
 
     @Test
@@ -88,75 +88,6 @@
         StorageService.instance.takeSnapshot(UUID.randomUUID().toString());
     }
 
-    private void checkTempFilePresence(File f, boolean exist)
-    {
-        for (int i = 0; i < 5; i++)
-        {
-            File subdir = new File(f, Integer.toString(i));
-            subdir.mkdir();
-            for (int j = 0; j < 5; j++)
-            {
-                File subF = new File(subdir, Integer.toString(j));
-                assert(exist ? subF.exists() : !subF.exists());
-            }
-        }
-    }
-
-    @Test
-    public void testSnapshotFailureHandler() throws IOException
-    {
-        assumeTrue(FBUtilities.isWindows);
-
-        // Initial "run" of Cassandra, nothing in failed snapshot file
-        WindowsFailedSnapshotTracker.deleteOldSnapshots();
-
-        File f = new File(System.getenv("TEMP") + File.separator + Integer.toString(new Random().nextInt()));
-        f.mkdir();
-        f.deleteOnExit();
-        for (int i = 0; i < 5; i++)
-        {
-            File subdir = new File(f, Integer.toString(i));
-            subdir.mkdir();
-            for (int j = 0; j < 5; j++)
-                new File(subdir, Integer.toString(j)).createNewFile();
-        }
-
-        checkTempFilePresence(f, true);
-
-        // Confirm deletion is recursive
-        for (int i = 0; i < 5; i++)
-            WindowsFailedSnapshotTracker.handleFailedSnapshot(new File(f, Integer.toString(i)));
-
-        assertTrue(new File(WindowsFailedSnapshotTracker.TODELETEFILE).exists());
-
-        // Simulate shutdown and restart of C* node, closing out the list of failed snapshots.
-        WindowsFailedSnapshotTracker.resetForTests();
-
-        // Perform new run, mimicking behavior of C* at startup
-        WindowsFailedSnapshotTracker.deleteOldSnapshots();
-        checkTempFilePresence(f, false);
-
-        // Check to make sure we don't delete non-temp, non-datafile locations
-        WindowsFailedSnapshotTracker.resetForTests();
-        PrintWriter tempPrinter = new PrintWriter(new FileWriter(WindowsFailedSnapshotTracker.TODELETEFILE, true));
-        tempPrinter.println(".safeDir");
-        tempPrinter.close();
-
-        File protectedDir = new File(".safeDir");
-        protectedDir.mkdir();
-        File protectedFile = new File(protectedDir, ".safeFile");
-        protectedFile.createNewFile();
-
-        WindowsFailedSnapshotTracker.handleFailedSnapshot(protectedDir);
-        WindowsFailedSnapshotTracker.deleteOldSnapshots();
-
-        assertTrue(protectedDir.exists());
-        assertTrue(protectedFile.exists());
-
-        protectedFile.delete();
-        protectedDir.delete();
-    }
-
     @Test
     public void testTableSnapshot() throws IOException
     {
@@ -190,9 +121,9 @@
         configOptions.put("DC2", "2");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         Collection<Range<Token>> primaryRanges = StorageService.instance.getLocalPrimaryRangeForEndpoint(InetAddressAndPort.getByName("127.0.0.1"));
         Assertions.assertThat(primaryRanges.size()).as(primaryRanges.toString()).isEqualTo(1);
@@ -230,9 +161,9 @@
         configOptions.put("DC2", "1");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangeForEndpointWithinDC(meta.name,
                                                                                                             InetAddressAndPort.getByName("127.0.0.1"));
@@ -273,9 +204,9 @@
         configOptions.put("DC2", "1");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangesForEndpoint(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
         Assertions.assertThat(primaryRanges.size()).as(primaryRanges.toString()).isEqualTo(1);
@@ -310,9 +241,9 @@
         configOptions.put("DC2", "2");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         // endpoints in DC1 should not have primary range
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangesForEndpoint(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
@@ -349,9 +280,9 @@
         configOptions.put("DC2", "2");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         // endpoints in DC1 should not have primary range
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangeForEndpointWithinDC(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
@@ -401,9 +332,9 @@
         configOptions.put("DC2", "2");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         // endpoints in DC1 should not have primary range
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangesForEndpoint(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
@@ -468,9 +399,9 @@
         configOptions.put("DC2", "2");
         configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         // endpoints in DC1 should have primary ranges which also cover DC2
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangeForEndpointWithinDC(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
@@ -527,9 +458,9 @@
         metadata.updateNormalToken(new StringToken("B"), InetAddressAndPort.getByName("127.0.0.2"));
         metadata.updateNormalToken(new StringToken("C"), InetAddressAndPort.getByName("127.0.0.3"));
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.simpleTransient(2));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangesForEndpoint(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
         Assertions.assertThat(primaryRanges.size()).as(primaryRanges.toString()).isEqualTo(1);
@@ -558,9 +489,9 @@
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("replication_factor", "2");
 
-        Keyspace.clear("Keyspace1");
+        SchemaTestUtil.dropKeyspaceIfExist("Keyspace1", false);
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.simpleTransient(2));
-        Schema.instance.load(meta);
+        SchemaTestUtil.addOrUpdateKeyspace(meta, false);
 
         Collection<Range<Token>> primaryRanges = StorageService.instance.getPrimaryRangeForEndpointWithinDC(meta.name, InetAddressAndPort.getByName("127.0.0.1"));
         Assertions.assertThat(primaryRanges.size()).as(primaryRanges.toString()).isEqualTo(1);
@@ -673,7 +604,7 @@
             StorageService.instance.enableAuditLog("foobar", null, null, null, null, null, null, null);
             Assert.fail();
         }
-        catch (IllegalStateException ex)
+        catch (ConfigurationException | IllegalStateException ex)
         {
             StorageService.instance.disableAuditLog();
         }
diff --git a/test/unit/org/apache/cassandra/service/StorageServiceTest.java b/test/unit/org/apache/cassandra/service/StorageServiceTest.java
index 5f9d325..e4589d6 100644
--- a/test/unit/org/apache/cassandra/service/StorageServiceTest.java
+++ b/test/unit/org/apache/cassandra/service/StorageServiceTest.java
@@ -18,30 +18,40 @@
 
 package org.apache.cassandra.service;
 
-import org.apache.cassandra.locator.EndpointsByReplica;
-import org.apache.cassandra.locator.ReplicaCollection;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.ImmutableMultimap;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.dht.RandomPartitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.locator.AbstractEndpointSnitch;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
+import org.apache.cassandra.locator.EndpointsByReplica;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.locator.ReplicaCollection;
 import org.apache.cassandra.locator.ReplicaMultimap;
 import org.apache.cassandra.locator.SimpleSnitch;
 import org.apache.cassandra.locator.SimpleStrategy;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.mockito.Mockito;
 
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.awaitility.Awaitility.await;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class StorageServiceTest
@@ -98,6 +108,7 @@
         };
 
         DatabaseDescriptor.setEndpointSnitch(snitch);
+        CommitLog.instance.start();
     }
 
     private AbstractReplicationStrategy simpleStrategy(TokenMetadata tmd)
@@ -163,6 +174,36 @@
     }
 
     @Test
+    public void testSetGetSSTablePreemptiveOpenIntervalInMB()
+    {
+        StorageService.instance.setSSTablePreemptiveOpenIntervalInMB(-1);
+        Assert.assertEquals(-1, StorageService.instance.getSSTablePreemptiveOpenIntervalInMB());
+    }
+
+    @Test
+    public void testScheduledExecutorsShutdownOnDrain() throws Throwable
+    {
+        final AtomicInteger numberOfRuns = new AtomicInteger(0);
+
+        ScheduledFuture<?> f = ScheduledExecutors.scheduledTasks.scheduleAtFixedRate(numberOfRuns::incrementAndGet,
+                                                                                     0, 1, SECONDS);
+
+        // Prove the task was scheduled more than once before checking cancelled.
+        await("first run").atMost(1, MINUTES).until(() -> numberOfRuns.get() > 1);
+
+        assertFalse(f.isCancelled());
+        StorageService.instance.drain();
+        assertTrue(f.isCancelled());
+
+        assertTrue(ScheduledExecutors.scheduledTasks.isTerminated());
+        assertTrue(ScheduledExecutors.nonPeriodicTasks.isTerminated());
+        assertTrue(ScheduledExecutors.optionalTasks.isTerminated());
+
+        // fast tasks are shut down as part of the Runtime shutdown hook.
+        assertFalse(ScheduledExecutors.scheduledFastTasks.isTerminated());
+    }
+
+    @Test
     public void testRebuildFailOnNonExistingDatacenter()
     {
         String nonExistentDC = "NON_EXISTENT_DC";
@@ -174,10 +215,10 @@
         }
         catch (IllegalArgumentException ex)
         {
-            assertEquals(String.format("Provided datacenter '%s' is not a valid datacenter, available datacenters are: %s",
-                                       nonExistentDC,
-                                       SimpleSnitch.DATA_CENTER_NAME),
-                         ex.getMessage());
+            Assert.assertEquals(String.format("Provided datacenter '%s' is not a valid datacenter, available datacenters are: %s",
+                                              nonExistentDC,
+                                              SimpleSnitch.DATA_CENTER_NAME),
+                                ex.getMessage());
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/service/SystemPropertiesBasedFileSystemOwnershipCheckTest.java b/test/unit/org/apache/cassandra/service/SystemPropertiesBasedFileSystemOwnershipCheckTest.java
new file mode 100644
index 0000000..cb54f81
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/SystemPropertiesBasedFileSystemOwnershipCheckTest.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import org.junit.Before;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+
+public class SystemPropertiesBasedFileSystemOwnershipCheckTest extends AbstractFilesystemOwnershipCheckTest
+{
+    @Before
+    public void setup()
+    {
+        super.setup();
+        System.setProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_OWNERSHIP_TOKEN.getKey(), token);
+        System.setProperty(CassandraRelevantProperties.FILE_SYSTEM_CHECK_ENABLE.getKey(), "true");
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java b/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java
index 5d8d191..7a9bbf3 100644
--- a/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java
+++ b/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java
@@ -49,7 +49,9 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static java.util.concurrent.TimeUnit.DAYS;
 import static org.apache.cassandra.net.NoPayload.noPayload;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -92,7 +94,7 @@
 
             public String getDatacenter(InetAddressAndPort endpoint)
             {
-                byte[] address = endpoint.address.getAddress();
+                byte[] address = endpoint.getAddress().getAddress();
                 if (address[1] == 1)
                     return "datacenter1";
                 else
@@ -145,7 +147,7 @@
     {
         long startingCount = ks.metric.idealCLWriteLatency.latency.getCount();
         //Specify query start time in past to ensure minimum latency measurement
-        AbstractWriteResponseHandler awr = createWriteResponseHandler(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM, System.nanoTime() - TimeUnit.DAYS.toNanos(1));
+        AbstractWriteResponseHandler awr = createWriteResponseHandler(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM, nanoTime() - DAYS.toNanos(1));
 
         //dc1
         awr.onResponse(createDummyMessage(0));
@@ -260,13 +262,13 @@
 
     private static AbstractWriteResponseHandler createWriteResponseHandler(ConsistencyLevel cl, ConsistencyLevel ideal)
     {
-        return createWriteResponseHandler(cl, ideal, System.nanoTime());
+        return createWriteResponseHandler(cl, ideal, nanoTime());
     }
 
     private static AbstractWriteResponseHandler createWriteResponseHandler(ConsistencyLevel cl, ConsistencyLevel ideal, long queryStartTime)
     {
         return ks.getReplicationStrategy().getWriteResponseHandler(ReplicaPlans.forWrite(ks, cl, targets, pending, Predicates.alwaysTrue(), ReplicaPlans.writeAll),
-                                                                   null, WriteType.SIMPLE, queryStartTime, ideal);
+                                                                   null, WriteType.SIMPLE, null, queryStartTime, ideal);
     }
 
     private static Message createDummyMessage(int target)
diff --git a/test/unit/org/apache/cassandra/service/WriteResponseHandlerTransientTest.java b/test/unit/org/apache/cassandra/service/WriteResponseHandlerTransientTest.java
index 19ed66d..f912c72 100644
--- a/test/unit/org/apache/cassandra/service/WriteResponseHandlerTransientTest.java
+++ b/test/unit/org/apache/cassandra/service/WriteResponseHandlerTransientTest.java
@@ -52,8 +52,6 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
-import static org.apache.cassandra.locator.Replica.fullReplica;
-import static org.apache.cassandra.locator.Replica.transientReplica;
 import static org.apache.cassandra.locator.ReplicaUtils.full;
 import static org.apache.cassandra.locator.ReplicaUtils.trans;
 
@@ -111,7 +109,7 @@
 
             public String getDatacenter(InetAddressAndPort endpoint)
             {
-                byte[] address = endpoint.address.getAddress();
+                byte[] address = endpoint.getAddress().getAddress();
                 if (address[1] == 1)
                     return DC1;
                 else
@@ -152,27 +150,27 @@
         EndpointsForToken natural = EndpointsForToken.of(dummy.getToken(), full(EP1), full(EP2), trans(EP3), full(EP5));
         EndpointsForToken pending = EndpointsForToken.of(dummy.getToken(), full(EP4), trans(EP6));
         ReplicaLayout.ForTokenWrite layout = new ReplicaLayout.ForTokenWrite(ks.getReplicationStrategy(), natural, pending);
-        ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(ks, ConsistencyLevel.QUORUM, layout, layout, ReplicaPlans.writeAll);
+        ReplicaPlan.ForWrite replicaPlan = ReplicaPlans.forWrite(ks, ConsistencyLevel.QUORUM, layout, layout, ReplicaPlans.writeAll);
 
         Assert.assertTrue(Iterables.elementsEqual(EndpointsForRange.of(full(EP4), trans(EP6)),
                                                   replicaPlan.pending()));
     }
 
-    private static ReplicaPlan.ForTokenWrite expected(EndpointsForToken natural, EndpointsForToken selected)
+    private static ReplicaPlan.ForWrite expected(EndpointsForToken natural, EndpointsForToken selected)
     {
-        return new ReplicaPlan.ForTokenWrite(ks, ks.getReplicationStrategy(), ConsistencyLevel.QUORUM, EndpointsForToken.empty(dummy.getToken()), natural, natural, selected);
+        return new ReplicaPlan.ForWrite(ks, ks.getReplicationStrategy(), ConsistencyLevel.QUORUM, EndpointsForToken.empty(dummy.getToken()), natural, natural, selected);
     }
 
-    private static ReplicaPlan.ForTokenWrite getSpeculationContext(EndpointsForToken natural, Predicate<InetAddressAndPort> livePredicate)
+    private static ReplicaPlan.ForWrite getSpeculationContext(EndpointsForToken natural, Predicate<InetAddressAndPort> livePredicate)
     {
         ReplicaLayout.ForTokenWrite liveAndDown = new ReplicaLayout.ForTokenWrite(ks.getReplicationStrategy(), natural, EndpointsForToken.empty(dummy.getToken()));
         ReplicaLayout.ForTokenWrite live = new ReplicaLayout.ForTokenWrite(ks.getReplicationStrategy(), natural.filter(r -> livePredicate.test(r.endpoint())), EndpointsForToken.empty(dummy.getToken()));
         return ReplicaPlans.forWrite(ks, ConsistencyLevel.QUORUM, liveAndDown, live, ReplicaPlans.writeNormal);
     }
 
-    private static void assertSpeculationReplicas(ReplicaPlan.ForTokenWrite expected, EndpointsForToken replicas, Predicate<InetAddressAndPort> livePredicate)
+    private static void assertSpeculationReplicas(ReplicaPlan.ForWrite expected, EndpointsForToken replicas, Predicate<InetAddressAndPort> livePredicate)
     {
-        ReplicaPlan.ForTokenWrite actual = getSpeculationContext(replicas, livePredicate);
+        ReplicaPlan.ForWrite actual = getSpeculationContext(replicas, livePredicate);
         assertEquals(expected.pending(), actual.pending());
         assertEquals(expected.live(), actual.live());
         assertEquals(expected.contacts(), actual.contacts());
diff --git a/test/unit/org/apache/cassandra/service/YamlBasedFileSystemOwnershipCheckTest.java b/test/unit/org/apache/cassandra/service/YamlBasedFileSystemOwnershipCheckTest.java
new file mode 100644
index 0000000..9b962e9
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/YamlBasedFileSystemOwnershipCheckTest.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service;
+
+import org.junit.Before;
+
+import static org.apache.cassandra.config.StartupChecksOptions.ENABLED_PROPERTY;
+import static org.apache.cassandra.service.StartupChecks.StartupCheckType.check_filesystem_ownership;
+
+public class YamlBasedFileSystemOwnershipCheckTest extends AbstractFilesystemOwnershipCheckTest
+{
+    @Before
+    public void setup()
+    {
+        super.setup();
+        options.getConfig(check_filesystem_ownership).put(ENABLED_PROPERTY, "true");
+        options.getConfig(check_filesystem_ownership).put("ownership_token", token);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/AbstractPaxosRepairTest.java b/test/unit/org/apache/cassandra/service/paxos/AbstractPaxosRepairTest.java
new file mode 100644
index 0000000..9721879
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/AbstractPaxosRepairTest.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+import com.google.common.collect.Iterables;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.service.paxos.AbstractPaxosRepair.State;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.service.paxos.AbstractPaxosRepair.DONE;
+
+/**
+ * test the state change logic of AbstractPaxosRepair
+ */
+public class AbstractPaxosRepairTest
+{
+    private static DecoratedKey dk(int k)
+    {
+        return Murmur3Partitioner.instance.decorateKey(ByteBufferUtil.bytes(k));
+    }
+
+    private static final DecoratedKey DK1 = dk(1);
+
+    private static State STARTED = new State();
+
+    private static class PaxosTestRepair extends AbstractPaxosRepair
+    {
+        public PaxosTestRepair()
+        {
+            super(Murmur3Partitioner.instance.decorateKey(ByteBufferUtil.bytes(1)), null);
+        }
+
+        public State restart(State state, long waitUntil)
+        {
+            return STARTED;
+        }
+
+        public void setState(State update)
+        {
+            updateState(state(), null, (i1, i2) -> update);
+        }
+    }
+
+    private static class Event
+    {
+        final AbstractPaxosRepair repair;
+        final AbstractPaxosRepair.Result result;
+
+        public Event(AbstractPaxosRepair repair, AbstractPaxosRepair.Result result)
+        {
+            this.repair = repair;
+            this.result = result;
+        }
+
+        public String toString()
+        {
+            return "Event{" +
+                   "repair=" + repair +
+                   ", result=" + result +
+                   '}';
+        }
+
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            Event event = (Event) o;
+            return Objects.equals(repair, event.repair) && Objects.equals(result, event.result);
+        }
+
+        public int hashCode()
+        {
+            return Objects.hash(repair, result);
+        }
+    }
+
+    private static class Listener implements AbstractPaxosRepair.Listener
+    {
+
+        final List<Event> events = new ArrayList<>();
+
+        public void onComplete(AbstractPaxosRepair repair, AbstractPaxosRepair.Result result)
+        {
+            events.add(new Event(repair, result));
+        }
+
+        public Event getOnlyEvent()
+        {
+            return Iterables.getOnlyElement(events);
+        }
+    }
+
+    @Test
+    public void stateUpdate()
+    {
+        // listeners shoulnd't be called on state updates
+        PaxosTestRepair repair = new PaxosTestRepair();
+        Listener listener = new Listener();
+        repair.addListener(listener);
+        Assert.assertNull(repair.state());
+        repair.start();
+        Assert.assertSame(STARTED, repair.state());
+        Assert.assertTrue(listener.events.isEmpty());
+    }
+
+    @Test
+    public void resultUpdate()
+    {
+        // listeners should be called on state updates
+        PaxosTestRepair repair = new PaxosTestRepair();
+        Listener listener = new Listener();
+        repair.addListener(listener);
+        repair.start();
+        Assert.assertTrue(listener.events.isEmpty());
+
+        repair.setState(DONE);
+        Assert.assertEquals(new Event(repair, DONE), listener.getOnlyEvent());
+    }
+
+    @Test
+    public void stateUpdateException()
+    {
+        // state should be set to failure and listeners called on exception
+        Throwable e = new Throwable();
+
+        PaxosTestRepair repair = new PaxosTestRepair();
+        Listener listener = new Listener();
+        repair.addListener(listener);
+        repair.start();
+
+        repair.updateState(repair.state(), null, (i1, i2) -> {throw e;});
+        Assert.assertEquals(new Event(repair, new AbstractPaxosRepair.Failure(e)), listener.getOnlyEvent());
+
+    }
+
+    @Test
+    public void postResultListenerAttachment()
+    {
+        // listener should be called immediately if the repair is already complete
+        PaxosTestRepair repair = new PaxosTestRepair();
+        repair.start();
+
+        repair.setState(DONE);
+
+        Listener listener = new Listener();
+        Assert.assertTrue(listener.events.isEmpty());
+        repair.addListener(listener);
+        Assert.assertEquals(new Event(repair, DONE), listener.getOnlyEvent());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/ContentionStrategyTest.java b/test/unit/org/apache/cassandra/service/paxos/ContentionStrategyTest.java
new file mode 100644
index 0000000..8b67c42
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/ContentionStrategyTest.java
@@ -0,0 +1,466 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.paxos;
+
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.DoubleSupplier;
+import java.util.function.LongBinaryOperator;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+import static org.apache.cassandra.service.paxos.ContentionStrategy.*;
+import static org.apache.cassandra.service.paxos.ContentionStrategy.WaitRandomizerFactory.*;
+import static org.apache.cassandra.service.paxos.ContentionStrategyTest.WaitRandomizerType.*;
+
+public class ContentionStrategyTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(ContentionStrategyTest.class);
+
+    static
+    {
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    private static final long MAX = maxQueryTimeoutMicros()/2;
+
+    private static final WaitParseValidator DEFAULT_WAIT_RANDOMIZER_VALIDATOR = new WaitParseValidator(defaultWaitRandomizer(), QEXP, 1.5);
+    private static final BoundParseValidator DEFAULT_MIN_VALIDATOR = new BoundParseValidator(defaultMinWait(), true, assertBound(0, MAX, 0, selectors.maxReadWrite(0f).getClass(), 0.50, 0, modifiers.multiply(0f).getClass(), 0.66));
+    private static final BoundParseValidator DEFAULT_MAX_VALIDATOR = new BoundParseValidator(defaultMaxWait(), false, assertBound(10000, 100000, 100000, selectors.maxReadWrite(0f).getClass(), 0.95, 0, modifiers.multiplyByAttemptsExp(0f).getClass(), 1.8));
+    private static final BoundParseValidator DEFAULT_MIN_DELTA_VALIDATOR = new BoundParseValidator(defaultMinDelta(), true, assertBound(5000, MAX, 5000, selectors.maxReadWrite(0f).getClass(), 0.50, 0, modifiers.multiply(0f).getClass(), 0.5));
+
+    private static List<BoundParseValidator> VALIDATE = ImmutableList.of(
+            new BoundParseValidator("p95(rw)", false, assertBound(0, MAX, MAX, selectors.maxReadWrite(0f).getClass(), 0.95, 0, modifiers.identity().getClass(), 1)),
+            new BoundParseValidator("5ms<=p50(rw)*0.66", false, assertBound(5000, MAX, MAX, selectors.maxReadWrite(0f).getClass(), 0.50, 0, modifiers.multiply(0).getClass(), 0.66)),
+            new BoundParseValidator("5us <= p50(r)*1.66*attempts", true, assertBound(5, MAX, 5, selectors.read(0f).getClass(), 0.50, 0, modifiers.multiplyByAttempts(0f).getClass(), 1.66)),
+            new BoundParseValidator("0<=p50(w)*0.66^attempts", true, assertBound(0, MAX, 0, selectors.write(0f).getClass(), 0.50, 0, modifiers.multiplyByAttemptsExp(0f).getClass(), 0.66)),
+            new BoundParseValidator("125us", true, assertBound(125, 125, 125, selectors.constant(0).getClass(), 0.0f, 125, modifiers.identity().getClass(), 1)),
+            new BoundParseValidator("5us <= p95(r)*1.8^attempts <= 100us", true, assertBound(5, 100, 5, selectors.read(0f).getClass(), 0.95, 0, modifiers.multiplyByAttemptsExp(0f).getClass(), 1.8)),
+            DEFAULT_MIN_VALIDATOR, DEFAULT_MAX_VALIDATOR, DEFAULT_MIN_DELTA_VALIDATOR
+    );
+
+    private static List<WaitParseValidator> VALIDATE_RANDOMIZER = ImmutableList.of(
+            new WaitParseValidator("quantizedexponential(0.5)", QEXP, 0.5),
+            new WaitParseValidator("exponential(2.5)", EXP, 2.5),
+            new WaitParseValidator("exp(10)", EXP, 10),
+            new WaitParseValidator("uniform", UNIFORM, 0),
+            DEFAULT_WAIT_RANDOMIZER_VALIDATOR
+    );
+
+    static class BoundParseValidator
+    {
+        final String spec;
+        final boolean isMin;
+        final Consumer<Bound> validator;
+
+        BoundParseValidator(String spec, boolean isMin, Consumer<Bound> validator)
+        {
+            this.spec = spec;
+            this.isMin = isMin;
+            this.validator = validator;
+        }
+
+        void validate(Bound bound)
+        {
+            validator.accept(bound);
+        }
+    }
+
+    enum WaitRandomizerType
+    {
+        UNIFORM(Uniform.class, (p, f) -> f.uniform()),
+        EXP(Exponential.class, (p, f) -> f.exponential(p)),
+        QEXP(QuantizedExponential.class, (p, f) -> f.quantizedExponential(p));
+
+        final Class<? extends WaitRandomizer> clazz;
+        final BiFunction<Double, WaitRandomizerFactory, WaitRandomizer> getter;
+
+        WaitRandomizerType(Class<? extends WaitRandomizer> clazz, BiFunction<Double, WaitRandomizerFactory, WaitRandomizer> getter)
+        {
+            this.clazz = clazz;
+            this.getter = getter;
+        }
+    }
+
+
+    static class WaitParseValidator
+    {
+        final String spec;
+        final WaitRandomizerType type;
+        final double power;
+
+        WaitParseValidator(String spec, WaitRandomizerType type, double power)
+        {
+            this.spec = spec;
+            this.type = type;
+            this.power = power;
+        }
+
+        void validate(WaitRandomizer randomizer)
+        {
+            Assert.assertSame(type.clazz, randomizer.getClass());
+            if (AbstractExponential.class.isAssignableFrom(type.clazz))
+                Assert.assertEquals(power, ((AbstractExponential) randomizer).power, 0.00001);
+        }
+    }
+
+    private static class WaitRandomizerOutputValidator
+    {
+        static void validate(WaitRandomizerType type, long seed, int trials, int samplesPerTrial)
+        {
+            Random random = new Random(seed);
+            WaitRandomizer randomizer = type.getter.apply(2d, new WaitRandomizerFactory()
+            {
+                @Override public LongBinaryOperator uniformLongSupplier() { return (min, max) -> min + random.nextInt((int) (max - min)); }
+                @Override public DoubleSupplier uniformDoubleSupplier() { return random::nextDouble; }
+            });
+
+            for (int i = 0 ; i < trials ; ++i)
+            {
+                int min = random.nextInt(1 << 20);
+                int max = min + 1024 + random.nextInt(1 << 20);
+                double minMean = minMean(type, min, max);
+                double maxMean = maxMean(type, min, max);
+                double sampleMean = sampleMean(samplesPerTrial, min, max, randomizer);
+                Assert.assertTrue(minMean <= sampleMean);
+                Assert.assertTrue(maxMean >= sampleMean);
+            }
+        }
+
+        private static double minMean(WaitRandomizerType type, int min, int max)
+        {
+            switch (type)
+            {
+                case UNIFORM: return min + (max - min) * (4d/10);
+                case EXP: case QEXP: return min + (max - min) * (6d/10);
+                default: throw new IllegalStateException();
+            }
+        }
+
+        private static double maxMean(WaitRandomizerType type, int min, int max)
+        {
+            switch (type)
+            {
+                case UNIFORM: return min + (max - min) * (6d/10);
+                case EXP: case QEXP: return min + (max - min) * (8d/10);
+                default: throw new IllegalStateException();
+            }
+        }
+
+        private static double sampleMean(int samples, int min, int max, WaitRandomizer randomizer)
+        {
+            double sum = 0;
+            int attempts = 1;
+            for (int i = 0 ; i < samples ; ++i)
+            {
+                long wait = randomizer.wait(min, max, attempts = (attempts & 15) + 1);
+                Assert.assertTrue(wait >= min);
+                Assert.assertTrue(wait <= max);
+                sum += wait;
+            }
+            double mean = sum / samples;
+            Assert.assertTrue(mean >= min);
+            Assert.assertTrue(mean <= max);
+            return mean;
+        }
+    }
+
+    private static Consumer<Bound> assertBound(
+                             long min, long max, long onFailure,
+                             Class<? extends LatencySelector> selectorClass,
+                             double selectorPercentile,
+                             long selectorConst,
+                             Class<? extends LatencyModifier> modifierClass,
+                             double modifierVal
+    )
+    {
+        return bound -> {
+            Assert.assertEquals(min, bound.min);
+            Assert.assertEquals(max, bound.max);
+            Assert.assertEquals(onFailure, bound.onFailure);
+            Assert.assertSame(selectorClass, bound.selector.getClass());
+            if (selectorClass == selectors.constant(0).getClass())
+            {
+                LatencySupplier fail = v -> { throw new UnsupportedOperationException(); };
+                Assert.assertEquals(selectorConst, bound.selector.select(fail, fail));
+            }
+            else
+            {
+                AtomicReference<Double> percentile = new AtomicReference<>();
+                LatencySupplier set = v -> { percentile.set(v); return 0; };
+                bound.selector.select(set, set);
+                Assert.assertNotNull(percentile.get());
+                Assert.assertEquals(selectorPercentile, percentile.get(), 0.00001);
+            }
+            Assert.assertSame(modifierClass, bound.modifier.getClass());
+            Assert.assertEquals(1000000L * modifierVal, bound.modifier.modify(1000000, 1), 0.00001);
+        };
+    }
+
+    private static void assertParseFailure(String spec)
+    {
+
+        try
+        {
+            Bound bound = parseBound(spec, false);
+            Assert.fail("expected parse failure, but got " + bound);
+        }
+        catch (IllegalArgumentException e)
+        {
+            // expected
+        }
+    }
+
+    @Test
+    public void strategyParseTest()
+    {
+        for (BoundParseValidator min : VALIDATE.stream().filter(v -> v.isMin).toArray(BoundParseValidator[]::new))
+        {
+            for (BoundParseValidator max : VALIDATE.stream().filter(v -> !v.isMin).toArray(BoundParseValidator[]::new))
+            {
+                for (BoundParseValidator minDelta : VALIDATE.stream().filter(v -> v.isMin).toArray(BoundParseValidator[]::new))
+                {
+                    for (WaitParseValidator random : VALIDATE_RANDOMIZER)
+                    {
+                        {
+                            ParsedStrategy parsed = parseStrategy("min=" + min.spec + ",max=" + max.spec + ",delta=" + minDelta.spec + ",random=" + random.spec);
+                            Assert.assertEquals(parsed.min, min.spec);
+                            min.validate(parsed.strategy.min);
+                            Assert.assertEquals(parsed.max, max.spec);
+                            max.validate(parsed.strategy.max);
+                            Assert.assertEquals(parsed.minDelta, minDelta.spec);
+                            minDelta.validate(parsed.strategy.minDelta);
+                            Assert.assertEquals(parsed.waitRandomizer, random.spec);
+                            random.validate(parsed.strategy.waitRandomizer);
+                        }
+                        ParsedStrategy parsed = parseStrategy("random=" + random.spec);
+                        Assert.assertEquals(parsed.min, DEFAULT_MIN_VALIDATOR.spec);
+                        DEFAULT_MIN_VALIDATOR.validate(parsed.strategy.min);
+                        Assert.assertEquals(parsed.max, DEFAULT_MAX_VALIDATOR.spec);
+                        DEFAULT_MAX_VALIDATOR.validate(parsed.strategy.max);
+                        Assert.assertEquals(parsed.minDelta, DEFAULT_MIN_DELTA_VALIDATOR.spec);
+                        DEFAULT_MIN_DELTA_VALIDATOR.validate(parsed.strategy.minDelta);
+                        Assert.assertEquals(parsed.waitRandomizer, random.spec);
+                        random.validate(parsed.strategy.waitRandomizer);
+                    }
+                    ParsedStrategy parsed = parseStrategy("delta=" + minDelta.spec);
+                    Assert.assertEquals(parsed.min, DEFAULT_MIN_VALIDATOR.spec);
+                    DEFAULT_MIN_VALIDATOR.validate(parsed.strategy.min);
+                    Assert.assertEquals(parsed.max, DEFAULT_MAX_VALIDATOR.spec);
+                    DEFAULT_MAX_VALIDATOR.validate(parsed.strategy.max);
+                    Assert.assertEquals(parsed.minDelta, minDelta.spec);
+                    minDelta.validate(parsed.strategy.minDelta);
+                }
+                ParsedStrategy parsed = parseStrategy("max=" + max.spec);
+                Assert.assertEquals(parsed.min, DEFAULT_MIN_VALIDATOR.spec);
+                DEFAULT_MIN_VALIDATOR.validate(parsed.strategy.min);
+                Assert.assertEquals(parsed.max, max.spec);
+                max.validate(parsed.strategy.max);
+                Assert.assertEquals(parsed.minDelta, DEFAULT_MIN_DELTA_VALIDATOR.spec);
+                DEFAULT_MIN_DELTA_VALIDATOR.validate(parsed.strategy.minDelta);
+            }
+            ParsedStrategy parsed = parseStrategy("min=" + min.spec);
+            Assert.assertEquals(parsed.min, min.spec);
+            min.validate(parsed.strategy.min);
+            Assert.assertEquals(parsed.max, DEFAULT_MAX_VALIDATOR.spec);
+            DEFAULT_MAX_VALIDATOR.validate(parsed.strategy.max);
+            Assert.assertEquals(parsed.minDelta, DEFAULT_MIN_DELTA_VALIDATOR.spec);
+            DEFAULT_MIN_DELTA_VALIDATOR.validate(parsed.strategy.minDelta);
+        }
+    }
+
+    @Test
+    public void testParseRoundTrip()
+    {
+        LatencySelectorFactory selectorFactory = new LatencySelectorFactory()
+        {
+            LatencySelectorFactory delegate = ContentionStrategy.selectors;
+            public LatencySelector constant(long latency) { return selector(delegate.constant(latency), String.format("%dms", latency)); }
+            public LatencySelector read(double percentile) { return selector(delegate.read(percentile), String.format("p%d(r)", (int) (percentile * 100))); }
+            public LatencySelector write(double percentile) { return selector(delegate.write(percentile), String.format("p%d(w)", (int) (percentile * 100))); }
+            public LatencySelector maxReadWrite(double percentile) { return selector(delegate.maxReadWrite(percentile), String.format("p%d(rw)", (int) percentile * 100)); }
+
+            private LatencySelector selector(LatencySelector selector, String str) {
+                return new LatencySelector()
+                {
+                    public long select(LatencySupplier read, LatencySupplier write)
+                    {
+                        return selector.select(read, write);
+                    }
+
+                    public String toString()
+                    {
+                        return str;
+                    }
+                };
+            }
+        };
+
+        LatencyModifierFactory modifierFactory = new LatencyModifierFactory()
+        {
+            LatencyModifierFactory delegate = ContentionStrategy.modifiers;
+            public LatencyModifier identity() { return modifier(delegate.identity(), ""); }
+            public LatencyModifier multiply(double constant) { return modifier(delegate.multiply(constant), String.format(" * %.2f", constant)); }
+            public LatencyModifier multiplyByAttempts(double multiply) { return modifier(delegate.multiplyByAttempts(multiply), String.format(" * %.2f * attempts", multiply)); }
+            public LatencyModifier multiplyByAttemptsExp(double base) { return modifier(delegate.multiplyByAttemptsExp(base), String.format(" * %.2f ^ attempts", base)); }
+
+            private LatencyModifier modifier(LatencyModifier modifier, String str) {
+                return new LatencyModifier()
+                {
+                    @Inline
+                    public long modify(long latency, int attempts)
+                    {
+                        return modifier.modify(latency, attempts);
+                    }
+
+                    public String toString()
+                    {
+                        return str;
+                    }
+                };
+            }
+        };
+
+        LatencyModifier[] latencyModifiers = new LatencyModifier[]{
+        modifierFactory.multiply(0.5),
+        modifierFactory.multiplyByAttempts(0.5),
+        modifierFactory.multiplyByAttemptsExp(0.5)
+        };
+
+        LatencySelector[] latencySelectors = new LatencySelector[]{
+        selectorFactory.read(0.5),
+        selectorFactory.write(0.5),
+        selectorFactory.maxReadWrite(0.99)
+        };
+
+        for (boolean min : new boolean[] { true, false})
+        {
+            String left = min ? "10ms <= " : "";
+            for (boolean max : new boolean[] { true, false})
+            {
+                String right = max ? " <= 10ms" : "";
+
+                for (LatencySelector selector : latencySelectors)
+                {
+                    for (LatencyModifier modifier : latencyModifiers)
+                    {
+                        String mid = String.format("%s%s", selector, modifier);
+                        String input = left + mid + right;
+                        Bound bound = parseBound(input, false, selectorFactory, modifierFactory);
+                        Assert.assertTrue(String.format("Bound: %d" , bound.min), !min || bound.min == 10000);
+                        Assert.assertTrue(String.format("Bound: %d" , bound.max), !max || bound.max == 10000);
+                        Assert.assertEquals(selector.toString(), bound.selector.toString());
+                        Assert.assertEquals(modifier.toString(), bound.modifier.toString());
+                    }
+                }
+            }
+        }
+    }
+
+    @Test
+    public void boundParseTest()
+    {
+        VALIDATE.forEach(v -> v.validate(parseBound(v.spec, v.isMin)));
+    }
+
+    @Test
+    public void waitRandomizerParseTest()
+    {
+        VALIDATE_RANDOMIZER.forEach(v -> v.validate(parseWaitRandomizer(v.spec)));
+    }
+
+    @Test
+    public void waitRandomizerSampleTest()
+    {
+        waitRandomizerSampleTest(2);
+    }
+
+    private void waitRandomizerSampleTest(int count)
+    {
+        while (count-- > 0)
+        {
+            long seed = ThreadLocalRandom.current().nextLong();
+            logger.info("Seed {}", seed);
+            for (WaitRandomizerType type : WaitRandomizerType.values())
+            {
+                WaitRandomizerOutputValidator.validate(type, seed, 100, 1000000);
+            }
+        }
+    }
+
+    @Test
+    public void boundParseFailureTest()
+    {
+        assertParseFailure("10ms <= p95(r) <= 5ms");
+        assertParseFailure("10 <= p95(r)");
+        assertParseFailure("10 <= 20 <= 30");
+        assertParseFailure("p95(r) < 5");
+        assertParseFailure("p95(x)");
+        assertParseFailure("p95()");
+        assertParseFailure("p95");
+        assertParseFailure("p50(rw)+0.66");
+    }
+
+    @Test
+    public void testBackoffTime()
+    {
+        ContentionStrategy strategy = parseStrategy("min=0ms,max=100ms,random=uniform").strategy;
+        double total = 0;
+        int count = 100000;
+        for (int i = 0 ; i < count ; ++i)
+        {
+            long now = System.nanoTime();
+            long waitUntil = strategy.computeWaitUntilForContention(1, null, null, null, null);
+            long waitLength = Math.max(waitUntil - now, 0);
+            total += waitLength;
+        }
+        Assert.assertTrue(Math.abs(TimeUnit.MILLISECONDS.toNanos(50) - (total / count)) < TimeUnit.MILLISECONDS.toNanos(1L));
+    }
+
+    @Test
+    public void testBackoffTimeElapsed()
+    {
+        ContentionStrategy strategy = parseStrategy("min=0ms,max=10ms,random=uniform").strategy;
+        double total = 0;
+        int count = 1000;
+        for (int i = 0 ; i < count ; ++i)
+        {
+            long start = System.nanoTime();
+            strategy.doWaitForContention(Long.MAX_VALUE, 1, null, null, null, null);
+            long end = System.nanoTime();
+            total += end - start;
+        }
+        // make sure we have slept at least 4ms on average, given a mean wait time of 5ms
+        double avg = total / count;
+        double nanos = avg - TimeUnit.MILLISECONDS.toNanos(4);
+        Assert.assertTrue(nanos > 0);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/PaxosProposeTest.java b/test/unit/org/apache/cassandra/service/paxos/PaxosProposeTest.java
new file mode 100644
index 0000000..7872490
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/PaxosProposeTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicLongFieldUpdater;
+
+import org.junit.Test;
+
+import static org.apache.cassandra.service.paxos.PaxosPropose.*;
+
+public class PaxosProposeTest
+{
+    static class V
+    {
+        static final AtomicLongFieldUpdater<V> updater = AtomicLongFieldUpdater.newUpdater(V.class, "v");
+        volatile long v;
+        public boolean valid()
+        {
+            return v == 0;
+        }
+    }
+    @Test
+    public void testShouldSignal()
+    {
+        int[] signalledAtK = new int[12];
+        V[] v = new V[] { new V(), new V(), new V(), new V(), new V(), new V(), new V(), new V(), new V(), new V(), new V(), new V() };
+        boolean[] signalled = new boolean[12];
+        for (int total = 2 ; total < 16 ; ++total)
+        {
+            for (int required = (total/2) + 1 ; required < total ; ++required)
+            {
+                for (int i = 0 ; i < total ; ++i)
+                {
+                    for (int j = 0 ; j < total - i ; ++j)
+                    {
+                        Arrays.fill(signalled, false);
+                        Arrays.fill(signalledAtK, Integer.MAX_VALUE);
+                        for (int x = 0 ; x < v.length ; ++x)
+                            v[x].v = 0;
+
+                        for (int k = 0 ; k <= total - (i + j) ; ++k)
+                        {
+                            signalled[0] = v[0].valid() && shouldSignal(responses(i, j, k), required, total, true, V.updater, v[0]);
+                            signalled[1] = v[1].valid() && shouldSignal(responses(j, i, k), required, total, true, V.updater, v[1]);
+                            signalled[2] = v[2].valid() && shouldSignal(responses(j, k, i), required, total, true, V.updater, v[2]);
+                            signalled[3] = v[3].valid() && shouldSignal(responses(k, i, j), required, total, true, V.updater, v[3]);
+                            signalled[4] = v[4].valid() && shouldSignal(responses(i, k, j), required, total, true, V.updater, v[4]);
+                            signalled[5] = v[5].valid() && shouldSignal(responses(k, j, i), required, total, true, V.updater, v[5]);
+                            signalled[6] = v[6].valid() && shouldSignal(responses(i, j, k), required, total, false, V.updater, v[6]);
+                            signalled[7] = v[7].valid() && shouldSignal(responses(j, i, k), required, total, false, V.updater, v[7]);
+                            signalled[8] = v[8].valid() && shouldSignal(responses(j, k, i), required, total, false, V.updater, v[8]);
+                            signalled[9] = v[9].valid() && shouldSignal(responses(k, i, j), required, total, false, V.updater, v[9]);
+                            signalled[10] = v[10].valid() && shouldSignal(responses(i, k, j), required, total, false, V.updater, v[10]);
+                            signalled[11] = v[11].valid() && shouldSignal(responses(k, j, i), required, total, false, V.updater, v[11]);
+                            for (int x = 0 ; x < 12 ; ++x)
+                            {
+                                if (signalled[x] && signalledAtK[x] < k)
+                                    throw new IllegalStateException(String.format("(%d,%d,%d): (%d,%d,%d,%d)", total, required, x, i, j, k, signalledAtK[x]));
+                                else if (signalled[x])
+                                    signalledAtK[x] = k;
+                            }
+                        }
+
+                        for (int x = 0 ; x < 12 ; ++x)
+                        {
+                            if (signalledAtK[x] == Integer.MAX_VALUE)
+                                throw new IllegalStateException(String.format("(%d,%d,%d): (%d, %d)", total, required, x, i, j));
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    private static long responses(int i, int j, int k)
+    {
+        return i * ACCEPT_INCREMENT + j * REFUSAL_INCREMENT + k * FAILURE_INCREMENT;
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java b/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java
new file mode 100644
index 0000000..387ec65
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java
@@ -0,0 +1,531 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.util.*;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+import java.util.stream.Stream;
+
+import com.google.common.collect.Lists;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.dht.Murmur3Partitioner.LongToken;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Pair;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.cassandra.dht.Range.deoverlap;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.service.paxos.Ballot.none;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.atUnixMicros;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.nextBallot;
+import static org.apache.cassandra.service.paxos.Commit.latest;
+import static org.apache.cassandra.service.paxos.PaxosRepairHistory.trim;
+
+public class PaxosRepairHistoryTest
+{
+    static final Logger logger = LoggerFactory.getLogger(PaxosRepairHistoryTest.class);
+    private static final AtomicInteger tableNum = new AtomicInteger();
+    static
+    {
+        System.setProperty("cassandra.partitioner", Murmur3Partitioner.class.getName());
+        DatabaseDescriptor.daemonInitialization();
+        assert DatabaseDescriptor.getPartitioner() instanceof Murmur3Partitioner;
+    }
+
+    private static final Token MIN_TOKEN = Murmur3Partitioner.instance.getMinimumToken();
+
+    private static Token t(long t)
+    {
+        return new LongToken(t);
+    }
+
+    private static Ballot b(int b)
+    {
+        return Ballot.atUnixMicrosWithLsb(b, 0, NONE);
+    }
+
+    private static Range<Token> r(Token l, Token r)
+    {
+        return new Range<>(l, r);
+    }
+
+    private static Range<Token> r(long l, long r)
+    {
+        return r(t(l), t(r));
+    }
+
+    private static Pair<Token, Ballot> pt(long t, int b)
+    {
+        return Pair.create(t(t), b(b));
+    }
+
+    private static Pair<Token, Ballot> pt(long t, Ballot b)
+    {
+        return Pair.create(t(t), b);
+    }
+
+    private static Pair<Token, Ballot> pt(Token t, int b)
+    {
+        return Pair.create(t, b(b));
+    }
+
+    private static PaxosRepairHistory h(Pair<Token, Ballot>... points)
+    {
+        int length = points.length + (points[points.length - 1].left == null ? 0 : 1);
+        Token[] tokens = new Token[length - 1];
+        Ballot[] ballots = new Ballot[length];
+        for (int i = 0 ; i < length - 1 ; ++i)
+        {
+            tokens[i] = points[i].left;
+            ballots[i] = points[i].right;
+        }
+        ballots[length - 1] = length == points.length ? points[length - 1].right : none();
+        return new PaxosRepairHistory(tokens, ballots);
+    }
+
+    static
+    {
+        assert t(100).equals(t(100));
+        assert b(111).equals(b(111));
+    }
+
+    private static class Builder
+    {
+        PaxosRepairHistory history = PaxosRepairHistory.EMPTY;
+
+        Builder add(Ballot ballot, Range<Token>... ranges)
+        {
+            history = PaxosRepairHistory.add(history, Lists.newArrayList(ranges), ballot);
+            return this;
+        }
+
+        Builder clear()
+        {
+            history = PaxosRepairHistory.EMPTY;
+            return this;
+        }
+    }
+
+    static Builder builder()
+    {
+        return new Builder();
+    }
+
+    private static void checkSystemTableIO(PaxosRepairHistory history)
+    {
+        Assert.assertEquals(history, PaxosRepairHistory.fromTupleBufferList(history.toTupleBufferList()));
+        String tableName = "test" + tableNum.getAndIncrement();
+        SystemKeyspace.savePaxosRepairHistory("test", tableName, history, false);
+        Assert.assertEquals(history, SystemKeyspace.loadPaxosRepairHistory("test", tableName));
+    }
+
+    @BeforeClass
+    public static void init() throws Exception
+    {
+        SchemaLoader.prepareServer();
+    }
+
+    @Test
+    public void testAdd()
+    {
+        Builder builder = builder();
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5), pt(30, none()), pt(40, 5)),
+                            builder.add(b(5), r(10, 20), r(30, 40)).history);
+
+        Assert.assertEquals(none(), builder.history.ballotForToken(t(0)));
+        Assert.assertEquals(none(), builder.history.ballotForToken(t(10)));
+        Assert.assertEquals(b(5), builder.history.ballotForToken(t(11)));
+        Assert.assertEquals(b(5), builder.history.ballotForToken(t(20)));
+        Assert.assertEquals(none(), builder.history.ballotForToken(t(21)));
+
+        builder.clear();
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5), pt(30, none()), pt(40, 6)),
+                            builder.add(b(5), r(10, 20)).add(b(6), r(30, 40)).history);
+        builder.clear();
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5), pt(30, 6), pt(40, 5)),
+                            builder.add(b(5), r(10, 40)).add(b(6), r(20, 30)).history);
+
+        builder.clear();
+        Assert.assertEquals(h(pt(10, none()), pt(20, 6), pt(30, 5)),
+                            builder.add(b(6), r(10, 20)).add(b(5), r(15, 30)).history);
+
+        builder.clear();
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5), pt(30, 6)),
+                            builder.add(b(5), r(10, 25)).add(b(6), r(20, 30)).history);
+    }
+
+    @Test
+    public void testTrim()
+    {
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5), pt(30, none()), pt(40, 5), pt(50, none()), pt(60, 5)),
+                            trim(h(pt(0, none()), pt(70, 5)), Lists.newArrayList(r(10, 20), r(30, 40), r(50, 60))));
+
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5)),
+                            trim(h(pt(0, none()), pt(20, 5)), Lists.newArrayList(r(10, 30))));
+
+        Assert.assertEquals(h(pt(10, none()), pt(20, 5)),
+                            trim(h(pt(10, none()), pt(30, 5)), Lists.newArrayList(r(0, 20))));
+    }
+
+    @Test
+    public void testFullRange()
+    {
+        // test full range is collapsed
+        Builder builder = builder();
+        Assert.assertEquals(h(pt(null, 5)),
+                            builder.add(b(5), r(MIN_TOKEN, MIN_TOKEN)).history);
+
+        Assert.assertEquals(b(5), builder.history.ballotForToken(MIN_TOKEN));
+        Assert.assertEquals(b(5), builder.history.ballotForToken(t(0)));
+    }
+
+    @Test
+    public void testWrapAroundRange()
+    {
+        Builder builder = builder();
+        Assert.assertEquals(h(pt(-100, 5), pt(100, none()), pt(null, 5)),
+                            builder.add(b(5), r(100, -100)).history);
+
+        Assert.assertEquals(b(5), builder.history.ballotForToken(MIN_TOKEN));
+        Assert.assertEquals(b(5), builder.history.ballotForToken(t(-101)));
+        Assert.assertEquals(b(5), builder.history.ballotForToken(t(-100)));
+        Assert.assertEquals(none(), builder.history.ballotForToken(t(-99)));
+        Assert.assertEquals(none(), builder.history.ballotForToken(t(0)));
+        Assert.assertEquals(none(), builder.history.ballotForToken(t(100)));
+        Assert.assertEquals(b(5), builder.history.ballotForToken(t(101)));
+    }
+
+    private static Token[] tks(long ... tks)
+    {
+        return LongStream.of(tks).mapToObj(LongToken::new).toArray(Token[]::new);
+    }
+
+    private static Ballot[] uuids(String ... uuids)
+    {
+        return Stream.of(uuids).map(Ballot::fromString).toArray(Ballot[]::new);
+    }
+
+    @Test
+    public void testRegression()
+    {
+        Assert.assertEquals(none(), trim(
+                new PaxosRepairHistory(
+                        tks(-9223372036854775807L, -3952873730080618203L, -1317624576693539401L, 1317624576693539401L, 6588122883467697005L),
+                        uuids("1382954c-1dd2-11b2-8fb2-f45d70d6d6d8", "138260a4-1dd2-11b2-abb2-c13c36b179e1", "1382951a-1dd2-11b2-1dd8-b7e242b38dbe", "138294fc-1dd2-11b2-83c4-43fb3a552386", "13829510-1dd2-11b2-f353-381f2ed963fa", "1382954c-1dd2-11b2-8fb2-f45d70d6d6d8")),
+                Collections.singleton(new Range<>(new LongToken(-1317624576693539401L), new LongToken(1317624576693539401L))))
+            .ballotForToken(new LongToken(-4208619967696141037L)));
+    }
+
+    @Test
+    public void testInequality()
+    {
+        Collection<Range<Token>> ranges = Collections.singleton(new Range<>(Murmur3Partitioner.MINIMUM, Murmur3Partitioner.MINIMUM));
+        PaxosRepairHistory a = PaxosRepairHistory.add(PaxosRepairHistory.EMPTY, ranges, none());
+        PaxosRepairHistory b = PaxosRepairHistory.add(PaxosRepairHistory.EMPTY, ranges, nextBallot(NONE));
+        Assert.assertNotEquals(a, b);
+    }
+
+    @Test
+    public void testRandomTrims()
+    {
+        ExecutorService executor = Executors.newFixedThreadPool(FBUtilities.getAvailableProcessors());
+        List<Future<?>> results = new ArrayList<>();
+        int count = 1000;
+        for (int numberOfAdditions : new int[] { 1, 10, 100 })
+        {
+            for (float maxCoveragePerRange : new float[] { 0.01f, 0.1f, 0.5f })
+            {
+                for (float chanceOfMinToken : new float[] { 0.01f, 0.1f })
+                {
+                    results.addAll(testRandomTrims(executor, count, numberOfAdditions, 3, maxCoveragePerRange, chanceOfMinToken));
+                }
+            }
+        }
+        FBUtilities.waitOnFutures(results);
+        executor.shutdown();
+    }
+
+    private List<Future<?>> testRandomTrims(ExecutorService executor, int tests, int numberOfAdditions, int maxNumberOfRangesPerAddition, float maxCoveragePerRange, float chanceOfMinToken)
+    {
+        return ThreadLocalRandom.current()
+                .longs(tests)
+                .mapToObj(seed -> executor.submit(() -> testRandomTrims(seed, numberOfAdditions, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken)))
+                .collect(Collectors.toList());
+    }
+
+    private void testRandomTrims(long seed, int numberOfAdditions, int maxNumberOfRangesPerAddition, float maxCoveragePerRange, float chanceOfMinToken)
+    {
+        Random random = new Random(seed);
+        logger.info("Seed {} ({}, {}, {}, {})", seed, numberOfAdditions, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken);
+        PaxosRepairHistory history = RandomPaxosRepairHistory.build(random, numberOfAdditions, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken);
+        // generate a random list of ranges that cover the whole ring
+        long[] tokens = random.longs(16).distinct().toArray();
+        if (random.nextBoolean())
+            tokens[0] = Long.MIN_VALUE;
+        Arrays.sort(tokens);
+        List<List<Range<Token>>> ranges = IntStream.range(0, tokens.length <= 3 ? 1 : 1 + random.nextInt((tokens.length - 1) / 2))
+                .mapToObj(ignore -> new ArrayList<Range<Token>>())
+                .collect(Collectors.toList());
+
+        for (int i = 1 ; i < tokens.length ; ++i)
+            ranges.get(random.nextInt(ranges.size())).add(new Range<>(new LongToken(tokens[i - 1]), new LongToken(tokens[i])));
+        ranges.get(random.nextInt(ranges.size())).add(new Range<>(new LongToken(tokens[tokens.length - 1]), new LongToken(tokens[0])));
+
+        List<PaxosRepairHistory> splits = new ArrayList<>();
+        for (List<Range<Token>> rs : ranges)
+        {
+            PaxosRepairHistory trimmed = PaxosRepairHistory.trim(history, rs);
+            splits.add(trimmed);
+            if (rs.isEmpty())
+                continue;
+
+            Range<Token> prev = rs.get(rs.size() - 1);
+            for (Range<Token> range : rs)
+            {
+                if (prev.right.equals(range.left))
+                {
+                    Assert.assertEquals(history.ballotForToken(((LongToken)range.left).decreaseSlightly()), trimmed.ballotForToken(((LongToken)range.left).decreaseSlightly()));
+                    Assert.assertEquals(history.ballotForToken(range.left), trimmed.ballotForToken(range.left));
+                }
+                else
+                {
+                    if (!range.left.isMinimum())
+                        Assert.assertEquals(none(), trimmed.ballotForToken(range.left));
+                    if (!prev.right.isMinimum())
+                        Assert.assertEquals(none(), trimmed.ballotForToken(prev.right.increaseSlightly()));
+                }
+                Assert.assertEquals(history.ballotForToken(range.left.increaseSlightly()), trimmed.ballotForToken(range.left.increaseSlightly()));
+                if (!range.left.increaseSlightly().equals(range.right))
+                    Assert.assertEquals(history.ballotForToken(((LongToken)range.right).decreaseSlightly()), trimmed.ballotForToken(((LongToken)range.right).decreaseSlightly()));
+
+                if (range.right.isMinimum())
+                    Assert.assertEquals(history.ballotForToken(new LongToken(Long.MAX_VALUE)), trimmed.ballotForToken(new LongToken(Long.MAX_VALUE)));
+                else
+                    Assert.assertEquals(history.ballotForToken(range.right), trimmed.ballotForToken(range.right));
+                prev = range;
+            }
+        }
+
+        PaxosRepairHistory merged = PaxosRepairHistory.EMPTY;
+        for (PaxosRepairHistory split : splits)
+            merged = PaxosRepairHistory.merge(merged, split);
+
+        Assert.assertEquals(history, merged);
+        checkSystemTableIO(history);
+    }
+
+    @Test
+    public void testRandomAdds()
+    {
+        ExecutorService executor = Executors.newFixedThreadPool(FBUtilities.getAvailableProcessors());
+        List<Future<?>> results = new ArrayList<>();
+        int count = 1000;
+        for (int numberOfAdditions : new int[] { 1, 10, 100 })
+        {
+            for (float maxCoveragePerRange : new float[] { 0.01f, 0.1f, 0.5f })
+            {
+                for (float chanceOfMinToken : new float[] { 0.01f, 0.1f })
+                {
+                    results.addAll(testRandomAdds(executor, count, 3, numberOfAdditions, 3, maxCoveragePerRange, chanceOfMinToken));
+                }
+            }
+        }
+        FBUtilities.waitOnFutures(results);
+        executor.shutdown();
+    }
+
+    private List<Future<?>> testRandomAdds(ExecutorService executor, int tests, int numberOfMerges, int numberOfAdditions, int maxNumberOfRangesPerAddition, float maxCoveragePerRange, float chanceOfMinToken)
+    {
+        return ThreadLocalRandom.current()
+                .longs(tests)
+                .mapToObj(seed -> executor.submit(() -> testRandomAdds(seed, numberOfMerges, numberOfAdditions, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken)))
+                .collect(Collectors.toList());
+    }
+
+    private void testRandomAdds(long seed, int numberOfMerges, int numberOfAdditions, int maxNumberOfRangesPerAddition, float maxCoveragePerRange, float chanceOfMinToken)
+    {
+        Random random = new Random(seed);
+        String id = String.format("%d, %d, %d, %d, %f, %f", seed, numberOfMerges, numberOfAdditions, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken);
+        logger.info(id);
+        List<RandomWithCanonical> merge = new ArrayList<>();
+        while (numberOfMerges-- > 0)
+        {
+            RandomWithCanonical build = new RandomWithCanonical();
+            build.addRandom(random, numberOfAdditions, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken);
+            merge.add(build);
+        }
+
+        RandomWithCanonical check = new RandomWithCanonical();
+        for (RandomWithCanonical add : merge)
+            check = check.merge(add);
+        check.serdeser();
+
+        for (Token token : check.canonical.keySet())
+        {
+            LongToken tk = (LongToken) token;
+            Assert.assertEquals(id, check.ballotForToken(tk.decreaseSlightly()), check.test.ballotForToken(tk.decreaseSlightly()));
+            Assert.assertEquals(id, check.ballotForToken(tk), check.test.ballotForToken(token));
+            Assert.assertEquals(id, check.ballotForToken(tk.increaseSlightly()), check.test.ballotForToken(token.increaseSlightly()));
+        }
+
+        // check some random
+        {
+            int count = 1000;
+            while (count-- > 0)
+            {
+                LongToken token = new LongToken(random.nextLong());
+                Assert.assertEquals(id, check.ballotForToken(token), check.test.ballotForToken(token));
+            }
+        }
+
+    }
+
+    static class RandomPaxosRepairHistory
+    {
+        PaxosRepairHistory test = PaxosRepairHistory.EMPTY;
+
+        void add(Collection<Range<Token>> ranges, Ballot ballot)
+        {
+            test = PaxosRepairHistory.add(test, ranges, ballot);
+        }
+
+        void merge(RandomPaxosRepairHistory other)
+        {
+            test = PaxosRepairHistory.merge(test, other.test);
+        }
+
+        void addOneRandom(Random random, int maxRangeCount, float maxCoverage, float minChance)
+        {
+            int count = maxRangeCount == 1 ? 1 : 1 + random.nextInt(maxRangeCount - 1);
+            Ballot ballot = atUnixMicros(random.nextInt(Integer.MAX_VALUE), NONE);
+            List<Range<Token>> ranges = new ArrayList<>();
+            while (count-- > 0)
+            {
+                long length = (long) (2 * random.nextDouble() * maxCoverage * Long.MAX_VALUE);
+                if (length == 0) length = 1;
+                Range<Token> range;
+                if (random.nextFloat() <= minChance)
+                {
+                    if (random.nextBoolean()) range = new Range<>(Murmur3Partitioner.MINIMUM, new LongToken(Long.MIN_VALUE + length));
+                    else range = new Range<>(new LongToken(Long.MAX_VALUE - length), Murmur3Partitioner.MINIMUM);
+                }
+                else
+                {
+                    long start = random.nextLong();
+                    range = new Range<>(new LongToken(start), new LongToken(start + length));
+                }
+                ranges.add(range);
+            }
+            ranges.sort(Range::compareTo);
+            add(deoverlap(ranges), ballot);
+        }
+
+        void addRandom(Random random, int count, int maxNumberOfRangesPerAddition, float maxCoveragePerAddition, float minTokenChance)
+        {
+            while (count-- > 0)
+                addOneRandom(random, maxNumberOfRangesPerAddition, maxCoveragePerAddition, minTokenChance);
+        }
+
+        static PaxosRepairHistory build(Random random, int count, int maxNumberOfRangesPerAddition, float maxCoveragePerRange, float chanceOfMinToken)
+        {
+            RandomPaxosRepairHistory result = new RandomPaxosRepairHistory();
+            result.addRandom(random, count, maxNumberOfRangesPerAddition, maxCoveragePerRange, chanceOfMinToken);
+            return result.test;
+        }
+    }
+
+    static class RandomWithCanonical extends RandomPaxosRepairHistory
+    {
+        NavigableMap<Token, Ballot> canonical = new TreeMap<>();
+        {
+            canonical.put(Murmur3Partitioner.MINIMUM, none());
+        }
+
+        Ballot ballotForToken(LongToken token)
+        {
+            return canonical
+                    .floorEntry(token.token == Long.MIN_VALUE ? token : token.decreaseSlightly())
+                    .getValue();
+        }
+
+        RandomWithCanonical merge(RandomWithCanonical other)
+        {
+            RandomWithCanonical result = new RandomWithCanonical();
+            result.test = PaxosRepairHistory.merge(test, other.test);
+            result.canonical = new TreeMap<>();
+            result.canonical.putAll(canonical);
+            for (Map.Entry<Token, Ballot> entry : other.canonical.entrySet())
+            {
+                Token left = entry.getKey();
+                Token right = other.canonical.higherKey(left);
+                if (right == null) right = Murmur3Partitioner.MINIMUM;
+                result.addCanonical(new Range<>(left, right), entry.getValue());
+            }
+            return result;
+        }
+
+        void serdeser()
+        {
+            PaxosRepairHistory tmp = PaxosRepairHistory.fromTupleBufferList(test.toTupleBufferList());
+            Assert.assertEquals(test, tmp);
+            test = tmp;
+        }
+
+        void add(Collection<Range<Token>> addRanges, Ballot ballot)
+        {
+            super.add(addRanges, ballot);
+            for (Range<Token> range : addRanges)
+                addCanonical(range, ballot);
+        }
+
+        void addCanonical(Range<Token> range, Ballot ballot)
+        {
+            canonical.put(range.left, canonical.floorEntry(range.left).getValue());
+            if (!range.right.isMinimum())
+                canonical.put(range.right, canonical.floorEntry(range.right).getValue());
+
+            for (Range<Token> r : range.unwrap())
+            {
+                (r.right.isMinimum()
+                        ? canonical.subMap(r.left, true, new LongToken(Long.MAX_VALUE), true)
+                        : canonical.subMap(r.left, true, r.right, false)
+                ).entrySet().forEach(e -> e.setValue(latest(e.getValue(), ballot)));
+            }
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/PaxosRepairTest.java b/test/unit/org/apache/cassandra/service/paxos/PaxosRepairTest.java
new file mode 100644
index 0000000..e226cf3
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/PaxosRepairTest.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos;
+
+import java.net.UnknownHostException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Test;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+
+import org.apache.cassandra.utils.CassandraVersion;
+
+import static org.apache.cassandra.service.paxos.PaxosRepair.validateVersionCompatibility;
+import static org.junit.Assert.*;
+
+import static org.apache.cassandra.service.paxos.PaxosRepairTest.Requirements.*;
+
+public class PaxosRepairTest
+{
+    private static final String DC0 = "DC0";
+    private static final String DC1 = "DC1";
+    private static final String DC2 = "DC2";
+
+    private static InetAddressAndPort intToInet(int i)
+    {
+        try
+        {
+            return InetAddressAndPort.getByAddress(new byte[]{127, 0, 0, (byte)i});
+        }
+        catch (UnknownHostException e)
+        {
+            throw new AssertionError(e);
+        }
+    }
+
+    // should make reading the tests slightly easier
+    enum Requirements
+    {
+        NORMAL(false, false),
+        STRICT_QUORUM(true, false),
+        NO_DC_CHECKS(false, true),
+        STRICT_QUORUM_AND_NO_DC_CHECKS(true, true);
+
+        final boolean strict_quorum;
+        final boolean no_dc_check;
+
+        Requirements(boolean strict_quorum, boolean no_dc_check)
+        {
+            this.strict_quorum = strict_quorum;
+            this.no_dc_check = no_dc_check;
+        }
+    }
+
+    private static class Topology
+    {
+        Set<InetAddressAndPort> dead = new HashSet<>();
+        Map<InetAddressAndPort, String> endpointToDc = new HashMap<>();
+
+
+        Topology alive(String dc, int... epNums)
+        {
+            for (int e : epNums)
+            {
+                InetAddressAndPort ep = intToInet(e);
+                endpointToDc.put(ep, dc);
+                dead.remove(ep);
+            }
+            return this;
+        }
+
+        Topology dead(int... eps)
+        {
+            for (int i : eps)
+            {
+                InetAddressAndPort ep = intToInet(i);
+                assert endpointToDc.containsKey(ep);
+                dead.add(ep);
+            }
+            return this;
+        }
+
+        String getDc(InetAddressAndPort ep)
+        {
+            assert endpointToDc.containsKey(ep);
+            return endpointToDc.get(ep);
+        }
+
+        Set<InetAddressAndPort> all()
+        {
+            return new HashSet<>(endpointToDc.keySet());
+        }
+
+        Set<InetAddressAndPort> live()
+        {
+            Set<InetAddressAndPort> eps = all();
+            eps.removeAll(dead);
+            return eps;
+        }
+
+        boolean hasSufficientLiveNodes(Requirements requirements)
+        {
+            return PaxosRepair.hasSufficientLiveNodesForTopologyChange(all(), live(), this::getDc, requirements.no_dc_check, requirements.strict_quorum);
+        }
+    }
+
+    static Topology topology(String dc, int... epNums)
+    {
+        Topology t = new Topology();
+        t.alive(dc, epNums);
+        return t;
+    }
+
+    @Test
+    public void singleDcHasSufficientLiveNodesTest()
+    {
+        assertTrue(topology(DC0, 1).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2).dead(1).hasSufficientLiveNodes(NORMAL));
+        assertFalse(topology(DC0, 1, 2).dead(1, 2).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2, 3).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2, 3).dead(1).hasSufficientLiveNodes(NORMAL));
+        assertFalse(topology(DC0, 1, 2, 3).dead(1, 2).hasSufficientLiveNodes(NORMAL));
+    }
+
+    @Test
+    public void multiDcHasSufficientLiveNodesTest()
+    {
+        assertTrue(topology(DC0, 1, 2, 3).alive(DC1, 4, 5, 6).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2, 3).alive(DC1, 4, 5, 6).dead(1, 4).hasSufficientLiveNodes(NORMAL));
+        assertFalse(topology(DC0, 1, 2, 3).alive(DC1, 4, 5, 6).dead(4, 5).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2, 3).alive(DC1, 4, 5, 6).dead(4, 5).hasSufficientLiveNodes(NO_DC_CHECKS));
+        assertFalse(topology(DC0, 1, 2, 3).alive(DC1, 4, 5, 6).dead(1, 4, 5).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2).alive(DC1, 3, 4).alive(DC2, 5, 6).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2).alive(DC1, 3, 4).alive(DC2, 5, 6).dead(1).hasSufficientLiveNodes(NORMAL));
+        assertFalse(topology(DC0, 1, 2).alive(DC1, 3, 4).alive(DC2, 5, 6).dead(1).hasSufficientLiveNodes(STRICT_QUORUM));
+        assertFalse(topology(DC0, 1, 2).alive(DC1, 3, 4).alive(DC2, 5, 6).dead(5, 6).hasSufficientLiveNodes(NORMAL));
+        assertTrue(topology(DC0, 1, 2).alive(DC1, 3, 4).alive(DC2, 5, 6).dead(5, 6).hasSufficientLiveNodes(STRICT_QUORUM_AND_NO_DC_CHECKS));
+    }
+
+    private static CassandraVersion version(String v)
+    {
+        return new CassandraVersion(v);
+    }
+
+    @Test
+    public void versionValidationTest()
+    {
+        assertTrue(validateVersionCompatibility(version("4.1.0")));
+        assertTrue(validateVersionCompatibility(version("4.1.0-SNAPSHOT")));
+        assertFalse(validateVersionCompatibility(null));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/PaxosStateTest.java b/test/unit/org/apache/cassandra/service/paxos/PaxosStateTest.java
new file mode 100644
index 0000000..23488fb
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/PaxosStateTest.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.paxos;
+
+import java.nio.ByteBuffer;
+import java.util.function.Function;
+
+import com.google.common.collect.Iterables;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.schema.TableMetadata;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.service.paxos.PaxosState.Snapshot;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.config.Config.PaxosStatePurging.gc_grace;
+import static org.apache.cassandra.config.Config.PaxosStatePurging.legacy;
+import static org.apache.cassandra.config.Config.PaxosStatePurging.repaired;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.atUnixMicros;
+import static org.apache.cassandra.service.paxos.Commit.*;
+import static org.junit.Assert.*;
+
+public class PaxosStateTest
+{
+    static TableMetadata metadata;
+    @BeforeClass
+    public static void setUpClass() throws Throwable
+    {
+        SchemaLoader.loadSchema();
+        SchemaLoader.schemaDefinition("PaxosStateTest");
+        metadata = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1").metadata.get();
+        metadata.withSwapped(metadata.params.unbuild().gcGraceSeconds(3600).build());
+    }
+
+    @AfterClass
+    public static void stopGossiper()
+    {
+        Gossiper.instance.stop();
+    }
+
+    @Test
+    public void testCommittingAfterTruncation() throws Exception
+    {
+        ColumnFamilyStore cfs = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1");
+        String key = "key" + System.nanoTime();
+        ByteBuffer value = ByteBufferUtil.bytes(0);
+        RowUpdateBuilder builder = new RowUpdateBuilder(metadata, FBUtilities.timestampMicros(), key);
+        builder.clustering("a").add("val", value);
+        PartitionUpdate update = Iterables.getOnlyElement(builder.build().getPartitionUpdates());
+
+        // CFS should be empty initially
+        assertNoDataPresent(cfs, Util.dk(key));
+
+        // Commit the proposal & verify the data is present
+        Commit beforeTruncate = newProposal(0, update);
+        PaxosState.commitDirect(beforeTruncate);
+        assertDataPresent(cfs, Util.dk(key), "val", value);
+
+        // Truncate then attempt to commit again, mutation should
+        // be ignored as the proposal predates the truncation
+        cfs.truncateBlocking();
+        PaxosState.commitDirect(beforeTruncate);
+        assertNoDataPresent(cfs, Util.dk(key));
+
+        // Now try again with a ballot created after the truncation
+        long timestamp = SystemKeyspace.getTruncatedAt(update.metadata().id) + 1;
+        Commit afterTruncate = newProposal(timestamp, update);
+        PaxosState.commitDirect(afterTruncate);
+        assertDataPresent(cfs, Util.dk(key), "val", value);
+    }
+
+    @Test
+    public void testReadExpired()
+    {
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+
+        String key = "key" + System.nanoTime();
+        Accepted accepted = newProposal(1, key).accepted();
+        PaxosState.legacyPropose(accepted);
+
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+        // not expired if read in the past
+        assertPaxosState(key, accepted, state -> state.current(accepted.ballot).accepted);
+        // not expired if read with paxos state purging enabled
+        assertPaxosState(key, accepted, state -> state.current(Integer.MAX_VALUE).accepted);
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+        // not expired if read in the past
+        assertPaxosState(key, accepted, state -> state.current(accepted.ballot).accepted);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+        // clear cache to read from disk
+        PaxosState.RECENT.clear();
+
+        Committed committed = accepted.committed();
+        Committed empty = emptyProposal(key).accepted().committed();
+        PaxosState.commitDirect(committed);
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(committed.ballot).committed);
+        // not expired if read with paxos state purging enabled
+        assertPaxosState(key, committed, state -> state.current(Integer.MAX_VALUE).committed);
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(committed.ballot).committed);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+    }
+
+    @Test
+    public void testReadTTLd()
+    {
+        String key = "key" + System.nanoTime();
+        String key2 = key + 'A';
+        Accepted accepted = new AcceptedWithTTL(newProposal(1, key), 1);
+        PaxosState.legacyPropose(accepted);
+        PaxosState.legacyPropose(new AcceptedWithTTL(newProposal(1, key2), 10000));
+
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+        // not expired if read in the past
+        assertPaxosState(key, accepted, state -> state.current(0).accepted);
+        // TTL, so still expired if read with paxos state purging enabled
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+        // not expired if read in the past
+        assertPaxosState(key, accepted, state -> state.current(0).accepted);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+        DatabaseDescriptor.setPaxosStatePurging(legacy);
+        // not expired if read in the past
+        assertPaxosState(key, accepted, state -> state.current(0).accepted);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+        // clear cache to read from disk
+        PaxosState.RECENT.clear();
+
+        Committed committed = new CommittedWithTTL(accepted, 1);
+        Committed empty = emptyProposal(key).accepted().committed();
+        PaxosState.commitDirect(committed);
+
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(0).committed);
+        // not expired if read with paxos state purging enabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(0).committed);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+        DatabaseDescriptor.setPaxosStatePurging(legacy);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(0).committed);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+    }
+
+    @Test
+    public void testWriteTTLd()
+    {
+        String key = "key" + System.nanoTime();
+        Accepted accepted = newProposal(1, key).accepted();
+
+        DatabaseDescriptor.setPaxosStatePurging(legacy); // write with TTLs
+        PaxosState.legacyPropose(accepted);
+        accepted = new AcceptedWithTTL(accepted, -1); // for equality test
+
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+        // not expired if read in the past (or now)
+        assertPaxosState(key, accepted, state -> state.current(0).accepted);
+        // TTL, so still expired if read with paxos state purging enabled
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+        // not expired if read in the past (or now)
+        assertPaxosState(key, accepted, state -> state.current(0).accepted);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+
+        DatabaseDescriptor.setPaxosStatePurging(legacy);
+        // not expired if read in the past (or now)
+        assertPaxosState(key, accepted, state -> state.current(0).accepted);
+        // TTL, so expired in the future
+        assertPaxosState(key, null, state -> state.current(Integer.MAX_VALUE).accepted);
+
+        PaxosState.RECENT.clear();
+
+        Committed committed = new Committed(accepted);
+        Committed empty = emptyProposal(key).accepted().committed();
+        DatabaseDescriptor.setPaxosStatePurging(legacy); // write with TTLs
+        committed = new CommittedWithTTL(committed, -1); // for equality test
+        PaxosState.commitDirect(committed);
+
+        DatabaseDescriptor.setPaxosStatePurging(repaired);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(0).committed);
+        // not expired if read with paxos state purging enabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+        DatabaseDescriptor.setPaxosStatePurging(gc_grace);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(0).committed);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+        DatabaseDescriptor.setPaxosStatePurging(legacy);
+        // not expired if read in the past
+        assertPaxosState(key, committed, state -> state.current(0).committed);
+        // expired if read with paxos state purging disabled
+        assertPaxosState(key, empty, state -> state.current(Integer.MAX_VALUE).committed);
+    }
+
+    private static void assertPaxosState(String key, Commit expect, Function<PaxosState, Object> test)
+    {
+        // TODO: test from cache after write
+        PaxosState.RECENT.clear();
+        // test from disk
+        try (PaxosState state = PaxosState.get(Util.dk(key), metadata))
+        {
+            Assert.assertEquals(expect, test.apply(state));
+        }
+        // test from cache
+        try (PaxosState state = PaxosState.get(Util.dk(key), metadata))
+        {
+            Assert.assertEquals(expect, test.apply(state));
+        }
+    }
+
+    private Commit newProposal(long ballotMillis, PartitionUpdate update)
+    {
+        return Commit.newProposal(atUnixMicros(ballotMillis, NONE), update);
+    }
+
+    private Proposal emptyProposal(String k)
+    {
+        return Proposal.empty(Ballot.none(), Util.dk(k), Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1").metadata.get());
+    }
+
+    private Proposal newProposal(long ballotMicros, String k)
+    {
+        return Proposal.empty(atUnixMicros(ballotMicros, NONE), Util.dk(k), Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1").metadata.get());
+    }
+
+    private void assertDataPresent(ColumnFamilyStore cfs, DecoratedKey key, String name, ByteBuffer value)
+    {
+        Row row = Util.getOnlyRowUnfiltered(Util.cmd(cfs, key).build());
+        assertEquals(0, ByteBufferUtil.compareUnsigned(value,
+                row.getCell(cfs.metadata.get().getColumn(ByteBufferUtil.bytes(name))).buffer()));
+    }
+
+    private void assertNoDataPresent(ColumnFamilyStore cfs, DecoratedKey key)
+    {
+        Util.assertEmpty(Util.cmd(cfs, key).build());
+    }
+
+    private static void assertAcceptedMerge(Accepted expected, Accepted left, Accepted right)
+    {
+        Committed empty = Committed.none(expected.update.partitionKey(), expected.update.metadata());
+        Snapshot snapshotLeft = new Snapshot(null, null, left, empty);
+        Snapshot snapshotRight = new Snapshot(null, null, right, empty);
+        Accepted merged = Snapshot.merge(snapshotLeft, snapshotRight).accepted;
+        Assert.assertSame(expected, merged);
+    }
+
+    @Test
+    public void testAcceptMerging()
+    {
+        Accepted accepted = newProposal(1, "1").accepted();
+        AcceptedWithTTL acceptedWithTTL1 = new AcceptedWithTTL(accepted, 100);
+        AcceptedWithTTL acceptedWithTTL2 = new AcceptedWithTTL(accepted, 200);
+
+        assertAcceptedMerge(accepted, accepted, acceptedWithTTL1);
+        assertAcceptedMerge(accepted, acceptedWithTTL1, accepted);
+        assertAcceptedMerge(acceptedWithTTL2, acceptedWithTTL1, acceptedWithTTL2);
+        assertAcceptedMerge(acceptedWithTTL2, acceptedWithTTL2, acceptedWithTTL1);
+    }
+
+    private static void assertCommittedMerge(Committed expected, Committed left, Committed right)
+    {
+        Committed merged = Snapshot.merge(new Snapshot(null, null, null, left), new Snapshot(null, null, null, right)).committed;
+        Assert.assertSame(expected, merged);
+    }
+
+    @Test
+    public void testCommitMerging()
+    {
+        Committed committed = newProposal(1, "1").accepted().committed();
+        Committed committedWithTTL1 = new CommittedWithTTL(committed, 100);
+        Committed committedWithTTL2 = new CommittedWithTTL(committed, 200);
+
+        assertCommittedMerge(committed, committed, committedWithTTL1);
+        assertCommittedMerge(committed, committedWithTTL1, committed);
+        assertCommittedMerge(committedWithTTL2, committedWithTTL1, committedWithTTL2);
+        assertCommittedMerge(committedWithTTL2, committedWithTTL2, committedWithTTL1);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/cleanup/PaxosTableRepairsTest.java b/test/unit/org/apache/cassandra/service/paxos/cleanup/PaxosTableRepairsTest.java
new file mode 100644
index 0000000..22441fe
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/cleanup/PaxosTableRepairsTest.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.cleanup;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.AbstractPaxosRepair;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.BallotGenerator;
+import org.apache.cassandra.service.paxos.cleanup.PaxosTableRepairs.KeyRepair;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+
+public class PaxosTableRepairsTest
+{
+    private static DecoratedKey dk(int k)
+    {
+        return Murmur3Partitioner.instance.decorateKey(ByteBufferUtil.bytes(k));
+    }
+
+    private static final DecoratedKey DK1 = dk(1);
+    private static final DecoratedKey DK2 = dk(2);
+
+    private static class MockRepair extends AbstractPaxosRepair
+    {
+        private static State STARTED = new State();
+        private boolean reportCompleted = false;
+        private boolean failOnStart = false;
+
+        public MockRepair(DecoratedKey key)
+        {
+            super(key, null);
+        }
+
+        public State restart(State state, long waitUntil)
+        {
+            if (failOnStart)
+                throw new RuntimeException();
+            return STARTED;
+        }
+
+        void complete()
+        {
+            set(DONE);
+        }
+
+        public boolean isComplete()
+        {
+            return reportCompleted || super.isComplete();
+        }
+    }
+
+    private static class MockTableRepairs extends PaxosTableRepairs
+    {
+        @Override
+        MockRepair createRepair(DecoratedKey key, Ballot incompleteBallot, ConsistencyLevel consistency, TableMetadata cfm)
+        {
+            return new MockRepair(key);
+        }
+
+        MockRepair startOrGetOrQueue(DecoratedKey key, int i)
+        {
+            return (MockRepair) startOrGetOrQueue(key, BallotGenerator.Global.atUnixMicros(i, NONE), ConsistencyLevel.SERIAL, null, r -> {});
+        }
+    }
+
+    /**
+     * repairs with different keys shouldn't interfere with each other
+     */
+    @Test
+    public void testMultipleRepairs()
+    {
+        MockTableRepairs repairs = new MockTableRepairs();
+
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK2, 1);
+
+        Assert.assertTrue(repair1.isStarted());
+        Assert.assertTrue(repair2.isStarted());
+        Assert.assertTrue(repairs.hasActiveRepairs(DK1));
+        Assert.assertTrue(repairs.hasActiveRepairs(DK2));
+
+        repair1.complete();
+        repair2.complete();
+
+        // completing the repairs should have cleaned repairs map
+        Assert.assertFalse(repairs.hasActiveRepairs(DK1));
+        Assert.assertFalse(repairs.hasActiveRepairs(DK2));
+    }
+
+    @Test
+    public void testRepairQueueing()
+    {
+        MockTableRepairs repairs = new MockTableRepairs();
+
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+        MockRepair repair3 = repairs.startOrGetOrQueue(DK1, 2);
+
+        Assert.assertTrue(repair1.isStarted());
+        Assert.assertFalse(repair2.isStarted());
+        Assert.assertFalse(repair3.isStarted());
+
+        KeyRepair keyRepair = repairs.getKeyRepairUnsafe(DK1);
+        Assert.assertEquals(repair1, keyRepair.activeRepair());
+        Assert.assertTrue(keyRepair.queueContains(repair2));
+        Assert.assertTrue(keyRepair.queueContains(repair3));
+
+        repair1.complete();
+        Assert.assertTrue(repair2.isStarted());
+        Assert.assertTrue(repairs.hasActiveRepairs(DK1));
+        Assert.assertEquals(repair2, keyRepair.activeRepair());
+        Assert.assertTrue(keyRepair.queueContains(repair3));
+
+        repair2.complete();
+        Assert.assertTrue(repair3.isStarted());
+        Assert.assertTrue(repairs.hasActiveRepairs(DK1));
+
+        // completing the final repair should cleanup the map
+        repair3.complete();
+        Assert.assertFalse(repairs.hasActiveRepairs(DK1));
+    }
+
+    @Test
+    public void testRepairCancellation()
+    {
+        MockTableRepairs repairs = new MockTableRepairs();
+
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+        MockRepair repair3 = repairs.startOrGetOrQueue(DK1, 2);
+
+        Assert.assertTrue(repair1.isStarted());
+        Assert.assertFalse(repair2.isStarted());
+        Assert.assertFalse(repair3.isStarted());
+        Assert.assertTrue(repairs.hasActiveRepairs(DK1));
+
+        repairs.clear();
+        Assert.assertTrue(repair2.isComplete());
+        Assert.assertTrue(repair3.isComplete());
+        Assert.assertFalse(repairs.hasActiveRepairs(DK1));
+
+        MockRepair repair4 = repairs.startOrGetOrQueue(DK1, 0);
+        Assert.assertTrue(repair4.isStarted());
+        Assert.assertTrue(repairs.hasActiveRepairs(DK1));
+        repair4.complete();
+    }
+
+    @Test
+    public void testQueuedCancellation()
+    {
+        // if a queued repair is cancelled, it should be removed without affecting the active repair
+        MockTableRepairs repairs = new MockTableRepairs();
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+        MockRepair repair3 = repairs.startOrGetOrQueue(DK1, 2);
+
+        KeyRepair keyRepair = repairs.getKeyRepairUnsafe(DK1);
+        Assert.assertEquals(repair1, keyRepair.activeRepair());
+        Assert.assertTrue(keyRepair.queueContains(repair2));
+
+        repair2.cancel();
+        Assert.assertEquals(repair1, keyRepair.activeRepair());
+        Assert.assertFalse(keyRepair.queueContains(repair2));
+
+        repair1.complete();
+        Assert.assertEquals(repair3, keyRepair.activeRepair());
+        Assert.assertFalse(keyRepair.queueContains(repair2));
+    }
+
+    @Test
+    public void testFailureToStart()
+    {
+        // if an exception is thrown during repair scheduling, the next repair should be scheduled or things should be cleaned up
+        MockTableRepairs repairs = new MockTableRepairs();
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+        MockRepair repair3 = repairs.startOrGetOrQueue(DK1, 2);
+
+        repair2.failOnStart = true;
+        KeyRepair keyRepair = repairs.getKeyRepairUnsafe(DK1);
+        Assert.assertEquals(repair1, keyRepair.activeRepair());
+        Assert.assertTrue(keyRepair.queueContains(repair2));
+        Assert.assertTrue(keyRepair.queueContains(repair3));
+        Assert.assertFalse(repair2.isComplete());
+
+        repair1.complete();
+        Assert.assertEquals(repair3, keyRepair.activeRepair());
+        Assert.assertFalse(keyRepair.queueContains(repair2));
+        Assert.assertTrue(repair2.isComplete());
+    }
+
+    @Test
+    public void testCompletedQueuedRepair()
+    {
+        // if a queued repair has been somehow completed (or cancelled) without also being removed, it should be skipped
+        MockTableRepairs repairs = new MockTableRepairs();
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+        MockRepair repair3 = repairs.startOrGetOrQueue(DK1, 2);
+
+        repair2.reportCompleted = true;
+        KeyRepair keyRepair = repairs.getKeyRepairUnsafe(DK1);
+        Assert.assertEquals(repair1, keyRepair.activeRepair());
+        Assert.assertTrue(keyRepair.queueContains(repair2));
+        Assert.assertTrue(keyRepair.queueContains(repair3));
+
+        repair1.complete();
+        Assert.assertEquals(repair3, keyRepair.activeRepair());
+        Assert.assertFalse(keyRepair.queueContains(repair2));
+    }
+
+    @Test
+    public void testEviction()
+    {
+        MockTableRepairs repairs = new MockTableRepairs();
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+
+        repairs.evictHungRepairs(System.nanoTime());
+        KeyRepair keyRepair = repairs.getKeyRepairUnsafe(DK1);
+        Assert.assertTrue(repair1.isComplete());
+        Assert.assertEquals(repair2, keyRepair.activeRepair());
+    }
+
+    @Test
+    public void testClearRepairs()
+    {
+        MockTableRepairs repairs = new MockTableRepairs();
+        MockRepair repair1 = repairs.startOrGetOrQueue(DK1, 0);
+        MockRepair repair2 = repairs.startOrGetOrQueue(DK1, 1);
+
+        KeyRepair keyRepair = repairs.getKeyRepairUnsafe(DK1);
+        Assert.assertEquals(repair1, keyRepair.activeRepair());
+
+        repairs.clear();
+        Assert.assertEquals(0, keyRepair.pending());
+        Assert.assertNull(repairs.getKeyRepairUnsafe(DK1));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTrackerTest.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTrackerTest.java
new file mode 100644
index 0000000..1f9db08
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTrackerTest.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+
+import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosState.MaybePromise.Outcome;
+import org.junit.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.Paxos;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.service.paxos.PaxosState.MaybePromise.Outcome.REJECT;
+import static org.apache.cassandra.service.paxos.PaxosState.ballotTracker;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.PAXOS_CFS;
+
+public class PaxosBallotTrackerTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(PaxosBallotTrackerTest.class);
+
+    protected static String ks;
+    protected static final String tbl = "tbl";
+    protected static TableMetadata cfm;
+
+    // which stage the ballot is tested at
+    enum Stage { PREPARE, PROPOSE, COMMIT }
+    enum Order
+    {
+        FIRST, // first ballot
+        SUBSEQUENT, // newest ballot
+        SUPERSEDED // ballot
+    }
+
+    @BeforeClass
+    public static void setUpClass() throws Exception
+    {
+        SchemaLoader.prepareServer();
+
+        ks = "coordinatorsessiontest";
+        cfm = CreateTableStatement.parse("CREATE TABLE tbl (k INT PRIMARY KEY, v INT)", ks).build();
+        SchemaLoader.createKeyspace(ks, KeyspaceParams.simple(1), cfm);
+    }
+
+    @Before
+    public void setUp()
+    {
+        PAXOS_CFS.truncateBlocking();
+        PaxosState.unsafeReset();
+    }
+
+    private static DecoratedKey dk(int v)
+    {
+        return DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.bytes(v));
+    }
+
+    private static void testHighBound(Stage stage, Order order)
+    {
+        logger.info("testHighBound for {} {} ", stage, order);
+        Ballot ballot1 = Paxos.ballotForConsistency(1001, ConsistencyLevel.SERIAL);
+        Ballot ballot2 = Paxos.ballotForConsistency(1002, ConsistencyLevel.SERIAL);
+
+        Ballot opBallot;
+        Ballot expected;
+        switch (order)
+        {
+            case FIRST:
+                opBallot = ballot1;
+                expected = ballot1;
+                break;
+            case SUBSEQUENT:
+                ballotTracker().updateHighBoundUnsafe(Ballot.none(), ballot1);
+                Assert.assertEquals(ballot1, ballotTracker().getHighBound());
+                opBallot = ballot2;
+                expected = ballot2;
+                break;
+            case SUPERSEDED:
+                ballotTracker().updateHighBoundUnsafe(Ballot.none(), ballot2);
+                Assert.assertEquals(ballot2, ballotTracker().getHighBound());
+                opBallot = ballot1;
+                expected = ballot2;
+                break;
+            default:
+                throw new AssertionError();
+        }
+
+        DecoratedKey key = dk(1);
+        Commit.Proposal commit = new Commit.Proposal(opBallot, PaxosRowsTest.nonEmptyUpdate(opBallot, cfm, key));
+
+        switch (stage)
+        {
+            case PREPARE:
+                try (PaxosState state = PaxosState.get(commit))
+                {
+                    state.promiseIfNewer(commit.ballot, true);
+                }
+                break;
+            case PROPOSE:
+                try (PaxosState state = PaxosState.get(commit))
+                {
+                    state.acceptIfLatest(commit);
+                }
+                break;
+            case COMMIT:
+                PaxosState.commitDirect(commit);
+                break;
+            default:
+                throw new AssertionError();
+        }
+
+        Assert.assertEquals(expected, ballotTracker().getHighBound());
+    }
+
+    /**
+     * Tests that the ballot high bound is set correctly for all update types
+     */
+    @Test
+    public void highBound()
+    {
+        for (Stage stage: Stage.values())
+        {
+            for (Order order: Order.values())
+            {
+                setUp();
+                testHighBound(stage, order);
+            }
+        }
+    }
+
+    @Test
+    public void lowBoundSet() throws IOException
+    {
+        PaxosBallotTracker ballotTracker = ballotTracker();
+        Ballot ballot1 = Paxos.ballotForConsistency(1001, ConsistencyLevel.SERIAL);
+        Ballot ballot2 = Paxos.ballotForConsistency(1002, ConsistencyLevel.SERIAL);
+        Ballot ballot3 = Paxos.ballotForConsistency(1003, ConsistencyLevel.SERIAL);
+
+        Assert.assertEquals(Ballot.none(), ballotTracker.getLowBound());
+
+        ballotTracker.updateLowBound(ballot2);
+        Assert.assertEquals(ballot2, ballotTracker.getLowBound());
+
+        ballotTracker.updateLowBound(ballot1);
+        Assert.assertEquals(ballot2, ballotTracker.getLowBound());
+
+        ballotTracker.updateLowBound(ballot3);
+        Assert.assertEquals(ballot3, ballotTracker.getLowBound());
+    }
+
+    @Test
+    public void lowBoundPrepare() throws IOException
+    {
+        PaxosBallotTracker ballotTracker = ballotTracker();
+        Ballot ballot1 = Paxos.ballotForConsistency(1001, ConsistencyLevel.SERIAL);
+        Ballot ballot2 = Paxos.ballotForConsistency(1002, ConsistencyLevel.SERIAL);
+        Ballot ballot3 = Paxos.ballotForConsistency(1003, ConsistencyLevel.SERIAL);
+        Ballot ballot4 = Paxos.ballotForConsistency(1004, ConsistencyLevel.SERIAL);
+
+        ballotTracker.updateLowBound(ballot1);
+        Assert.assertNotNull(ballotTracker.getLowBound());
+
+        DecoratedKey key = dk(1);
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            PaxosState.MaybePromise promise = state.promiseIfNewer(ballot2, true);
+            Assert.assertEquals(Outcome.PROMISE, promise.outcome());
+            Assert.assertNull(promise.supersededBy());
+        }
+
+        // set the lower bound into the 'future', and prepare with an earlier ballot
+        ballotTracker.updateLowBound(ballot4);
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            PaxosState.MaybePromise promise = state.promiseIfNewer(ballot3, true);
+            Assert.assertEquals(REJECT, promise.outcome());
+            Assert.assertEquals(ballot4, promise.supersededBy());
+        }
+    }
+
+    @Test
+    public void lowBoundAccept() throws IOException
+    {
+        PaxosBallotTracker ballotTracker = ballotTracker();
+        Ballot ballot1 = Paxos.ballotForConsistency(1001, ConsistencyLevel.SERIAL);
+        Ballot ballot2 = Paxos.ballotForConsistency(1002, ConsistencyLevel.SERIAL);
+        Ballot ballot3 = Paxos.ballotForConsistency(1003, ConsistencyLevel.SERIAL);
+        Ballot ballot4 = Paxos.ballotForConsistency(1004, ConsistencyLevel.SERIAL);
+
+        ballotTracker.updateLowBound(ballot1);
+        Assert.assertNotNull(ballotTracker.getLowBound());
+
+        DecoratedKey key = dk(1);
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            Ballot result = state.acceptIfLatest(new Commit.Proposal(ballot2, PartitionUpdate.emptyUpdate(cfm, key)));
+            Assert.assertNull(result);
+        }
+
+        // set the lower bound into the 'future', and prepare with an earlier ballot
+        ballotTracker.updateLowBound(ballot4);
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            Ballot result = state.acceptIfLatest(new Commit.Proposal(ballot3, PartitionUpdate.emptyUpdate(cfm, key)));
+            Assert.assertEquals(ballot4, result);
+        }
+    }
+
+    /**
+     * updating the lower bound should persist it to disk
+     */
+    @Test
+    public void persistentLowBound() throws IOException
+    {
+        PaxosBallotTracker ballotTracker = ballotTracker();
+        Ballot ballot1 = Paxos.ballotForConsistency(1001, ConsistencyLevel.SERIAL);
+        Assert.assertEquals(Ballot.none(), ballotTracker.getLowBound());
+
+        // a new tracker shouldn't load a ballot
+        PaxosBallotTracker tracker2 = PaxosBallotTracker.load(ballotTracker.getDirectory());
+        Assert.assertEquals(Ballot.none(), tracker2.getLowBound());
+
+        // updating the lower bound should flush it to disk
+        ballotTracker.updateLowBound(ballot1);
+        Assert.assertEquals(ballot1, ballotTracker.getLowBound());
+
+        // then loading a new tracker should find the lower bound
+        PaxosBallotTracker tracker3 = PaxosBallotTracker.load(ballotTracker.getDirectory());
+        Assert.assertEquals(ballot1, tracker3.getLowBound());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosMockUpdateSupplier.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosMockUpdateSupplier.java
new file mode 100644
index 0000000..d1733b4
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosMockUpdateSupplier.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.util.*;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.memtable.Memtable;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.CloseableIterator;
+
+public class PaxosMockUpdateSupplier implements PaxosUncommittedTracker.UpdateSupplier
+{
+    private final Map<TableId, NavigableMap<PartitionPosition, PaxosKeyState>> states = new HashMap<>();
+
+    private NavigableMap<PartitionPosition, PaxosKeyState> mapFor(TableId tableId)
+    {
+        return states.computeIfAbsent(tableId, key -> new TreeMap<>());
+    }
+
+    private void updateTo(TableId tableId, PaxosKeyState newState)
+    {
+        NavigableMap<PartitionPosition, PaxosKeyState> map = mapFor(tableId);
+        PaxosKeyState current = map.get(newState.key);
+        if (current != null && PaxosKeyState.BALLOT_COMPARATOR.compare(current, newState) > 0)
+            return;
+
+        map.put(newState.key, newState);
+    }
+
+    void inProgress(TableId tableId, DecoratedKey key, Ballot ballot)
+    {
+        updateTo(tableId, new PaxosKeyState(tableId, key, ballot, false));
+    }
+
+    void committed(TableId tableId, DecoratedKey key, Ballot ballot)
+    {
+        updateTo(tableId, new PaxosKeyState(tableId, key, ballot, true));
+    }
+
+    public CloseableIterator<PaxosKeyState> repairIterator(TableId tableId, Collection<Range<Token>> ranges)
+    {
+        Iterator<PaxosKeyState> iterator = Iterators.filter(mapFor(tableId).values().iterator(), k -> Iterables.any(ranges, r -> r.contains(k.key.getToken())));
+
+        return new CloseableIterator<PaxosKeyState>()
+        {
+            public void close() {}
+
+            public boolean hasNext()
+            {
+                return iterator.hasNext();
+            }
+
+            public PaxosKeyState next()
+            {
+                return iterator.next();
+            }
+        };
+    }
+
+    public CloseableIterator<PaxosKeyState> flushIterator(Memtable memtable)
+    {
+        ArrayList<PaxosKeyState> keyStates = new ArrayList<>();
+        for (Map.Entry<TableId, NavigableMap<PartitionPosition, PaxosKeyState>> statesEntry : states.entrySet())
+        {
+            for (Map.Entry<PartitionPosition, PaxosKeyState> entry : statesEntry.getValue().entrySet())
+            {
+                keyStates.add(entry.getValue());
+            }
+        }
+        states.clear();
+
+        Iterator<PaxosKeyState> iterator = keyStates.iterator();
+
+        return new CloseableIterator<PaxosKeyState>()
+        {
+            public void close() {}
+
+            public boolean hasNext()
+            {
+                return iterator.hasNext();
+            }
+
+            public PaxosKeyState next()
+            {
+                return iterator.next();
+            }
+        };
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosRowsTest.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosRowsTest.java
new file mode 100644
index 0000000..911ac72
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosRowsTest.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import org.junit.*;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.marshal.UUIDType;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.BTreeRow;
+import org.apache.cassandra.db.rows.BufferCell;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.CloseableIterator;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.btree.BTree;
+
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.PAXOS_CFM;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.PAXOS_CFS;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.createBallots;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.dk;
+
+public class PaxosRowsTest
+{
+    protected static String ks;
+    protected static final String tbl = "tbl";
+    protected static TableMetadata metadata;
+    protected static TableId tableId;
+
+    static Commit emptyCommitFor(Ballot ballot, DecoratedKey key)
+    {
+        return new Commit(ballot, PartitionUpdate.emptyUpdate(metadata, key));
+    }
+
+    static Commit nonEmptyCommitFor(Ballot ballot, DecoratedKey key)
+    {
+        return new Commit(ballot, nonEmptyUpdate(ballot, metadata, key));
+    }
+
+    static PartitionUpdate nonEmptyUpdate(Ballot ballot, TableMetadata cfm, DecoratedKey key)
+    {
+        ColumnMetadata valueColumn = cfm.getColumn(new ColumnIdentifier("v", false));
+        return PartitionUpdate.singleRowUpdate(cfm, key, BTreeRow.create(Clustering.EMPTY, LivenessInfo.EMPTY, Row.Deletion.LIVE, BTree.singleton(new BufferCell(valueColumn, ballot.unixMicros(), Cell.NO_TTL, Cell.NO_DELETION_TIME, ByteBufferUtil.bytes(1), null))));
+    }
+
+    static Row paxosRowFor(DecoratedKey key)
+    {
+        SinglePartitionReadCommand command = SinglePartitionReadCommand.create(PAXOS_CFM,
+                FBUtilities.nowInSeconds(),
+                key,
+                new BufferClustering(UUIDType.instance.decompose(tableId.asUUID())));
+        try (ReadExecutionController opGroup = command.executionController();
+             UnfilteredPartitionIterator iterator = command.executeLocally(opGroup);
+             UnfilteredRowIterator partition = Iterators.getOnlyElement(iterator))
+        {
+            return (Row) Iterators.getOnlyElement(partition);
+        }
+    }
+
+    @BeforeClass
+    public static void setUpClass() throws Exception
+    {
+        SchemaLoader.prepareServer();
+
+        ks = "coordinatorsessiontest";
+        metadata = CreateTableStatement.parse("CREATE TABLE tbl (k INT PRIMARY KEY, v INT)", ks).build();
+        tableId = metadata.id;
+    }
+
+    @Before
+    public void setUp() throws Exception
+    {
+        PAXOS_CFS.truncateBlocking();
+    }
+
+    @Test
+    public void testRowInterpretation()
+    {
+        DecoratedKey key = dk(5);
+        Ballot[] ballots = createBallots(3);
+
+        SystemKeyspace.savePaxosWritePromise(key, metadata, ballots[0]);
+        Assert.assertEquals(new PaxosKeyState(tableId, key, ballots[0], false), PaxosRows.getCommitState(key, paxosRowFor(key), null));
+        SystemKeyspace.savePaxosProposal(emptyCommitFor(ballots[0], key));
+        Assert.assertEquals(new PaxosKeyState(tableId, key, ballots[0], true), PaxosRows.getCommitState(key, paxosRowFor(key), null));
+
+        SystemKeyspace.savePaxosWritePromise(key, metadata, ballots[1]);
+        Assert.assertEquals(new PaxosKeyState(tableId, key, ballots[1], false), PaxosRows.getCommitState(key, paxosRowFor(key), null));
+        SystemKeyspace.savePaxosProposal(nonEmptyCommitFor(ballots[1], key));
+        Assert.assertEquals(new PaxosKeyState(tableId, key, ballots[1], false), PaxosRows.getCommitState(key, paxosRowFor(key), null));
+        SystemKeyspace.savePaxosCommit(nonEmptyCommitFor(ballots[1], key));
+        Assert.assertEquals(new PaxosKeyState(tableId, key, ballots[1], true), PaxosRows.getCommitState(key, paxosRowFor(key), null));
+
+        // test cfid filter mismatch
+        Assert.assertNull(PaxosRows.getCommitState(key, paxosRowFor(key), TableId.fromUUID(UUID.randomUUID())));
+
+        SystemKeyspace.savePaxosCommit(emptyCommitFor(ballots[2], key));
+        Assert.assertEquals(new PaxosKeyState(tableId, key, ballots[2], true), PaxosRows.getCommitState(key, paxosRowFor(key), null));
+    }
+
+    @Test
+    public void testIterator()
+    {
+        Ballot[] ballots = createBallots(10);
+        List<PaxosKeyState> expected = new ArrayList<>(ballots.length);
+        for (int i=0; i<ballots.length; i++)
+        {
+            Ballot ballot = ballots[i];
+            DecoratedKey key = dk(i);
+
+            if (i%2 == 0)
+            {
+                SystemKeyspace.savePaxosProposal(nonEmptyCommitFor(ballot, key));
+                expected.add(new PaxosKeyState(tableId, key, ballot, false));
+            }
+            else
+            {
+                SystemKeyspace.savePaxosCommit(nonEmptyCommitFor(ballot, key));
+                expected.add(new PaxosKeyState(tableId, key, ballot, true));
+            }
+        }
+
+        PartitionRangeReadCommand command = PartitionRangeReadCommand.allDataRead(PAXOS_CFM, FBUtilities.nowInSeconds());
+        try (ReadExecutionController opGroup = command.executionController();
+             UnfilteredPartitionIterator partitions = command.executeLocally(opGroup);
+             CloseableIterator<PaxosKeyState> iterator = PaxosRows.toIterator(partitions, metadata.id, true))
+        {
+            Assert.assertEquals(expected, Lists.newArrayList(iterator));
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTrackerTest.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTrackerTest.java
new file mode 100644
index 0000000..ee0878c
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTrackerTest.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.util.Collections;
+
+import com.google.common.collect.Sets;
+import com.google.common.io.Files;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.rows.BTreeRow;
+import org.apache.cassandra.db.rows.BufferCell;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosStateTracker.stateDirectory;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.*;
+import static org.apache.cassandra.service.paxos.uncommitted.UncommittedTableDataTest.assertIteratorContents;
+import static org.apache.cassandra.service.paxos.uncommitted.UncommittedTableDataTest.uncommitted;
+
+
+public class PaxosStateTrackerTest
+{
+    private File directory1 = null;
+    private File directory2 = null;
+    private File[] directories = null;
+    protected static String ks;
+    protected static TableMetadata cfm1;
+    protected static TableMetadata cfm2;
+
+    @BeforeClass
+    public static void setUpClass() throws Exception
+    {
+        SchemaLoader.prepareServer();
+
+        ks = "coordinatorsessiontest";
+        cfm1 = TableMetadata.builder(ks, "tbl1").addPartitionKeyColumn("k", Int32Type.instance).addRegularColumn("v", Int32Type.instance).build();
+        cfm2 = TableMetadata.builder(ks, "tbl2").addPartitionKeyColumn("k", Int32Type.instance).addRegularColumn("v", Int32Type.instance).build();
+        SchemaLoader.createKeyspace(ks, KeyspaceParams.simple(1), cfm1, cfm2);
+    }
+
+    @Before
+    public void setUp() throws Exception
+    {
+        PAXOS_CFS.truncateBlocking();
+        PAXOS_REPAIR_CFS.truncateBlocking();
+
+        if (directory1 != null)
+            FileUtils.deleteRecursive(directory1);
+        if (directory2 != null)
+            FileUtils.deleteRecursive(directory2);
+
+        directory1 = new File(Files.createTempDir());
+        directory2 = new File(Files.createTempDir());
+        directories = new File[]{directory1, directory2};
+    }
+
+    private static class SystemProp implements AutoCloseable
+    {
+        private final String prop;
+        private final String prev;
+
+        public SystemProp(String prop, String prev)
+        {
+            this.prop = prop;
+            this.prev = prev;
+        }
+
+        public void close()
+        {
+            if (prev == null)
+                System.clearProperty(prop);
+            else
+                System.setProperty(prop, prev);
+        }
+
+        public static SystemProp set(String prop, String val)
+        {
+            String prev = System.getProperty(prop);
+            System.setProperty(prop, val);
+            return new SystemProp(prop, prev);
+        }
+
+        public static SystemProp set(String prop, boolean val)
+        {
+            return set(prop, Boolean.toString(val));
+        }
+    }
+
+    private static PartitionUpdate update(TableMetadata cfm, int k, Ballot ballot)
+    {
+        ColumnMetadata col = cfm.getColumn(new ColumnIdentifier("v", false));
+        Cell cell = BufferCell.live(col, ballot.unixMicros(), ByteBufferUtil.bytes(0));
+        Row row = BTreeRow.singleCellRow(Clustering.EMPTY, cell);
+        return PartitionUpdate.singleRowUpdate(cfm, dk(k), row);
+    }
+
+    private static Commit commit(TableMetadata cfm, int k, Ballot ballot)
+    {
+        return new Commit(ballot, update(cfm, k, ballot));
+    }
+
+    private static void savePaxosRepair(TableMetadata cfm, Range<Token> range, Ballot lowBound)
+    {
+        PaxosRepairHistory current = SystemKeyspace.loadPaxosRepairHistory(cfm.keyspace, cfm.name);
+        PaxosRepairHistory updated = PaxosRepairHistory.add(current, Collections.singleton(range), lowBound);
+        SystemKeyspace.savePaxosRepairHistory(cfm.keyspace, cfm.name, updated, true);
+    }
+
+    private static void savePaxosRepair(TableMetadata cfm, int left, int right, Ballot lowBound)
+    {
+        IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
+        Range<Token> range = new Range<>(partitioner.getToken(ByteBufferUtil.bytes(left)), partitioner.getToken(ByteBufferUtil.bytes(right)));
+        savePaxosRepair(cfm, range, lowBound);
+    }
+
+    private static void initDirectory(File directory) throws IOException
+    {
+        PaxosStateTracker.create(new File[]{directory}).ballots().flush();
+    }
+
+    @Test
+    public void autoRebuild() throws Throwable
+    {
+        Ballot[] ballots = createBallots(6);
+
+        // save a promise, proposal, and commit to each table
+        SystemKeyspace.savePaxosWritePromise(dk(0), cfm1, ballots[0]);
+        SystemKeyspace.savePaxosWritePromise(dk(1), cfm2, ballots[1]);
+        SystemKeyspace.savePaxosProposal(commit(cfm1, 2, ballots[2]));
+        SystemKeyspace.savePaxosProposal(commit(cfm2, 3, ballots[3]));
+        SystemKeyspace.savePaxosCommit(commit(cfm1, 4, ballots[4]));
+        SystemKeyspace.savePaxosCommit(commit(cfm2, 5, ballots[5]));
+
+        PaxosStateTracker tracker = PaxosStateTracker.create(directories);
+        Assert.assertTrue(tracker.isRebuildNeeded());
+        Assert.assertEquals(Sets.newHashSet(), tracker.uncommitted().tableIds());
+
+        tracker.maybeRebuild();
+
+        Assert.assertEquals(stateDirectory(directory1), tracker.uncommitted().getDirectory());
+
+        Assert.assertEquals(Sets.newHashSet(cfm1.id, cfm2.id), tracker.uncommitted().tableIds());
+
+        UncommittedTableData tableData1 = tracker.uncommitted().getTableState(cfm1.id);
+        assertIteratorContents(tableData1.iterator(ALL_RANGES),  kl(uncommitted(0, ballots[0]),
+                                                                    uncommitted(2, ballots[2])));
+
+        UncommittedTableData tableData2 = tracker.uncommitted().getTableState(cfm2.id);
+        assertIteratorContents(tableData2.iterator(ALL_RANGES),  kl(uncommitted(1, ballots[1]),
+                                                                    uncommitted(3, ballots[3])));
+    }
+
+    @Test
+    public void manualRebuild() throws Throwable
+    {
+        initDirectory(directory1);
+        {
+            PaxosStateTracker tracker = PaxosStateTracker.create(directories);
+            Assert.assertFalse(tracker.isRebuildNeeded());
+            Assert.assertEquals(Ballot.none(), tracker.ballots().getLowBound());
+        }
+
+        Ballot[] ballots = createBallots(4);
+        savePaxosRepair(cfm1, 0, 10, ballots[0]);
+        savePaxosRepair(cfm1, 10, 20, ballots[1]);
+        SystemKeyspace.savePaxosWritePromise(dk(0), cfm1, ballots[2]);
+        SystemKeyspace.savePaxosProposal(commit(cfm1, 2, ballots[3]));
+
+        try (SystemProp forceRebuild = SystemProp.set(PaxosStateTracker.FORCE_REBUILD_PROP, true))
+        {
+            PaxosStateTracker tracker = PaxosStateTracker.create(directories);
+            Assert.assertTrue(tracker.isRebuildNeeded());
+            Assert.assertEquals(Ballot.none(), tracker.ballots().getLowBound());
+            tracker.maybeRebuild();
+
+            UncommittedTableData tableData1 = tracker.uncommitted().getTableState(cfm1.id);
+            assertIteratorContents(tableData1.iterator(ALL_RANGES),  kl(uncommitted(0, ballots[2]),
+                                                                        uncommitted(2, ballots[3])));
+            Assert.assertEquals(ballots[1], tracker.ballots().getLowBound());
+            Assert.assertEquals(ballots[3], tracker.ballots().getHighBound());
+        }
+    }
+
+    // test we can find paxos data in any directory
+    @Test
+    public void testMultiDirectories() throws Throwable
+    {
+        initDirectory(directory2);
+        PaxosStateTracker tracker = PaxosStateTracker.create(directories);
+        Assert.assertFalse(tracker.isRebuildNeeded());
+        Assert.assertEquals(stateDirectory(directory2), tracker.uncommitted().getDirectory());
+    }
+
+    // test multiple paxos directories throws exception
+    @Test(expected=IllegalStateException.class)
+    public void testConflictingDirectories() throws Throwable
+    {
+        initDirectory(directory1);
+        initDirectory(directory2);
+        PaxosStateTracker tracker = PaxosStateTracker.create(directories);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTests.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTests.java
new file mode 100644
index 0000000..2804508
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.util.*;
+
+import com.google.common.collect.Lists;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.dht.*;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.schema.SchemaConstants.SYSTEM_KEYSPACE_NAME;
+import static org.apache.cassandra.service.paxos.Ballot.Flag.GLOBAL;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.nextBallot;
+
+class PaxosUncommittedTests
+{
+    static
+    {
+        DatabaseDescriptor.daemonInitialization();
+        CommitLog.instance.start();
+    }
+
+    static final IPartitioner PARTITIONER = new ByteOrderedPartitioner();
+    static final Token MIN_TOKEN = PARTITIONER.getMinimumToken();
+    static final Range<Token> FULL_RANGE = new Range<>(MIN_TOKEN, MIN_TOKEN);
+    static final Collection<Range<Token>> ALL_RANGES = Collections.singleton(FULL_RANGE);
+    static final ColumnFamilyStore PAXOS_CFS = Keyspace.open(SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PAXOS);
+    static final TableMetadata PAXOS_CFM = PAXOS_CFS.metadata.get();
+    static final ColumnFamilyStore PAXOS_REPAIR_CFS = Keyspace.open(SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PAXOS_REPAIR_HISTORY);
+    static final TableMetadata PAXOS_REPAIR_CFM = PAXOS_REPAIR_CFS.metadata();
+
+    static Ballot[] createBallots(int num)
+    {
+        Ballot[] ballots = new Ballot[num];
+        for (int i=0; i<num; i++)
+            ballots[i] = nextBallot(0, GLOBAL);
+
+        return ballots;
+    }
+
+    static DecoratedKey dk(int v)
+    {
+        return DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.bytes(v));
+    }
+
+    static List<PaxosKeyState> kl(Iterator<PaxosKeyState> iter)
+    {
+        return Lists.newArrayList(iter);
+    }
+
+    static List<PaxosKeyState> kl(PaxosKeyState... states)
+    {
+        return Lists.newArrayList(states);
+    }
+
+    static Token tk(int v)
+    {
+        return dk(v).getToken();
+    }
+
+    static Range<Token> r(Token start, Token stop)
+    {
+        return new Range<>(start != null ? start : MIN_TOKEN, stop != null ? stop : MIN_TOKEN);
+    }
+
+    static Range<Token> r(int start, int stop)
+    {
+        return r(PARTITIONER.getToken(ByteBufferUtil.bytes(start)), PARTITIONER.getToken(ByteBufferUtil.bytes(stop)));
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerIntegrationTest.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerIntegrationTest.java
new file mode 100644
index 0000000..8c3dd25
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerIntegrationTest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import org.junit.*;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosState;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static org.apache.cassandra.service.paxos.Ballot.Flag.NONE;
+import static org.apache.cassandra.service.paxos.BallotGenerator.Global.nextBallot;
+import static org.apache.cassandra.service.paxos.Commit.*;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.ALL_RANGES;
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.PAXOS_CFS;
+
+public class PaxosUncommittedTrackerIntegrationTest
+{
+    protected static String ks;
+    protected static final String tbl = "tbl";
+    protected static TableMetadata cfm;
+
+    @BeforeClass
+    public static void setUpClass() throws Exception
+    {
+        SchemaLoader.prepareServer();
+
+        ks = "coordinatorsessiontest";
+        cfm = CreateTableStatement.parse("CREATE TABLE tbl (k INT PRIMARY KEY, v INT)", ks).build();
+        SchemaLoader.createKeyspace(ks, KeyspaceParams.simple(1), cfm);
+    }
+
+    @Before
+    public void setUp() throws Exception
+    {
+        PAXOS_CFS.truncateBlocking();
+    }
+
+    private static DecoratedKey dk(int v)
+    {
+        return DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.bytes(v));
+    }
+
+    @Test
+    public void commitCycle()
+    {
+        PaxosUncommittedTracker tracker = PaxosState.uncommittedTracker();
+        PaxosBallotTracker ballotTracker = PaxosState.ballotTracker();
+        Assert.assertNull(tracker.getTableState(cfm.id));
+        Assert.assertEquals(Ballot.none(), ballotTracker.getLowBound());
+        Assert.assertEquals(Ballot.none(), ballotTracker.getHighBound());
+
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfm.id, ALL_RANGES))
+        {
+            Assert.assertFalse(iterator.hasNext());
+        }
+
+        DecoratedKey key = dk(1);
+        Ballot ballot = nextBallot(NONE);
+        Proposal proposal = new Proposal(ballot, PaxosRowsTest.nonEmptyUpdate(ballot, cfm, key));
+
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            state.promiseIfNewer(proposal.ballot, true);
+        }
+
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfm.id, ALL_RANGES))
+        {
+            Assert.assertEquals(key, Iterators.getOnlyElement(iterator).getKey());
+        }
+
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            state.acceptIfLatest(proposal);
+        }
+
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfm.id, ALL_RANGES))
+        {
+            Assert.assertEquals(key, Iterators.getOnlyElement(iterator).getKey());
+        }
+
+        PaxosState.commitDirect(proposal.agreed());
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfm.id, ALL_RANGES))
+        {
+            Assert.assertFalse(iterator.hasNext());
+        }
+    }
+
+    @Test
+    public void inMemoryCommit()
+    {
+        PaxosUncommittedTracker tracker = PaxosState.uncommittedTracker();
+
+        DecoratedKey key = dk(1);
+        Ballot ballot = nextBallot(NONE);
+        Proposal proposal = new Proposal(ballot, PaxosRowsTest.nonEmptyUpdate(ballot, cfm, key));
+
+        try (PaxosState state = PaxosState.get(key, cfm))
+        {
+            state.promiseIfNewer(proposal.ballot, true);
+            state.acceptIfLatest(proposal);
+        }
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfm.id, ALL_RANGES))
+        {
+            Assert.assertEquals(key, Iterators.getOnlyElement(iterator).getKey());
+        }
+
+        Util.flush(PAXOS_CFS);
+
+        PaxosState.commitDirect(proposal.agreed());
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfm.id, ALL_RANGES))
+        {
+            Assert.assertEquals(Lists.newArrayList(), Lists.newArrayList(iterator));
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerTest.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerTest.java
new file mode 100644
index 0000000..fa9f0e5
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerTest.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.util.*;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.*;
+
+public class PaxosUncommittedTrackerTest
+{
+    private static final String KS = "ks";
+    private static final String TBL = "tbl";
+    private static TableId cfid;
+    private File directory = null;
+    private PaxosUncommittedTracker tracker;
+    private PaxosMockUpdateSupplier updates;
+    private UncommittedTableData state;
+
+    @BeforeClass
+    public static void setupClass()
+    {
+        SchemaLoader.prepareServer();
+        TableMetadata tableMetadata = TableMetadata.builder("ks", "tbl")
+                                                   .addPartitionKeyColumn("k", Int32Type.instance)
+                                                   .addRegularColumn("v", Int32Type.instance)
+                                                   .build();
+        cfid = tableMetadata.id;
+        SchemaLoader.createKeyspace(KS, KeyspaceParams.simple(1), tableMetadata);
+    }
+
+    @Before
+    public void setUp()
+    {
+        if (directory != null)
+            FileUtils.deleteRecursive(directory);
+
+        directory = new File(Files.createTempDir());
+
+        tracker = new PaxosUncommittedTracker(directory);
+        tracker.start();
+        updates = new PaxosMockUpdateSupplier();
+        PaxosUncommittedTracker.unsafSetUpdateSupplier(updates);
+        state = tracker.getOrCreateTableState(cfid);
+    }
+
+    private static List<UncommittedPaxosKey> uncommittedList(PaxosUncommittedTracker tracker, Collection<Range<Token>> ranges)
+    {
+        try (CloseableIterator<UncommittedPaxosKey> iterator = tracker.uncommittedKeyIterator(cfid, ranges))
+        {
+            return Lists.newArrayList(iterator);
+        }
+    }
+
+    private static List<UncommittedPaxosKey> uncommittedList(PaxosUncommittedTracker tracker, Range<Token> range)
+    {
+        return uncommittedList(tracker, Collections.singleton(range));
+    }
+
+    private static List<UncommittedPaxosKey> uncommittedList(PaxosUncommittedTracker tracker)
+    {
+        return uncommittedList(tracker, FULL_RANGE);
+    }
+
+    @Test
+    public void inmemory() throws Exception
+    {
+        Assert.assertEquals(0, state.numFiles());
+        int size = 5;
+        List<PaxosKeyState> expected = new ArrayList<>(size);
+        int key = 0;
+        for (Ballot ballot : createBallots(size))
+        {
+            DecoratedKey dk = dk(key++);
+            updates.inProgress(cfid, dk, ballot);
+            expected.add(new PaxosKeyState(cfid, dk, ballot, false));
+        }
+
+        Assert.assertEquals(expected, uncommittedList(tracker));
+        Assert.assertEquals(0, state.numFiles());
+    }
+
+    @Test
+    public void onDisk() throws Exception
+    {
+        Assert.assertEquals(0, state.numFiles());
+        int size = 5;
+        List<PaxosKeyState> expected = new ArrayList<>(size);
+        int key = 0;
+        for (Ballot ballot : createBallots(size))
+        {
+            DecoratedKey dk = dk(key++);
+            updates.inProgress(cfid, dk, ballot);
+            expected.add(new PaxosKeyState(cfid, dk, ballot, false));
+        }
+        tracker.flushUpdates(null);
+
+        Assert.assertEquals(expected, uncommittedList(tracker));
+
+        Assert.assertEquals(1, state.numFiles());
+        Assert.assertEquals(expected, kl(state.iterator(Collections.singleton(FULL_RANGE))));
+    }
+
+    @Test
+    public void mixed() throws Exception
+    {
+        Assert.assertEquals(0, state.numFiles());
+        int size = 10;
+        PaxosKeyState[] expectedArr = new PaxosKeyState[size];
+        List<PaxosKeyState> inMemory = new ArrayList<>(size / 2);
+        List<PaxosKeyState> onDisk = new ArrayList<>(size / 2);
+        Ballot[] ballots = createBallots(size);
+
+        for (int i=0; i<size; i+=2)
+        {
+            Ballot ballot = ballots[i];
+            DecoratedKey dk = dk(i);
+            updates.inProgress(cfid, dk, ballot);
+            PaxosKeyState ballotState = new PaxosKeyState(cfid, dk, ballot, false);;
+            onDisk.add(ballotState);
+            expectedArr[i] = ballotState;
+        }
+
+        tracker.flushUpdates(null);
+
+        for (int i=1; i<size; i+=2)
+        {
+            Ballot ballot = ballots[i];
+            DecoratedKey dk = dk(i);
+            updates.inProgress(cfid, dk, ballot);
+            PaxosKeyState ballotState = new PaxosKeyState(cfid, dk, ballot, false);;
+            inMemory.add(ballotState);
+            expectedArr[i] = ballotState;
+        }
+
+        List<PaxosKeyState> expected = kl(expectedArr);
+
+        Assert.assertEquals(expected, uncommittedList(tracker));
+
+        Assert.assertEquals(1, state.numFiles());
+        Assert.assertEquals(onDisk, kl(state.iterator(Collections.singleton(FULL_RANGE))));
+    }
+
+    @Test
+    public void committed()
+    {
+        UncommittedTableData tableData = UncommittedTableData.load(directory, cfid);
+        Assert.assertEquals(0, tableData.numFiles());
+        Ballot ballot = createBallots(1)[0];
+
+        DecoratedKey dk = dk(1);
+        updates.inProgress(cfid, dk, ballot);
+
+        Assert.assertEquals(kl(new PaxosKeyState(cfid, dk, ballot, false)), uncommittedList(tracker));
+
+        updates.committed(cfid, dk, ballot);
+        Assert.assertTrue(uncommittedList(tracker).isEmpty());
+    }
+
+    /**
+     * Test that commits don't resolve in progress transactions with more recent ballots
+     */
+    @Test
+    public void pastCommit()
+    {
+        Ballot[] ballots = createBallots(2);
+        DecoratedKey dk = dk(1);
+        Assert.assertTrue(ballots[1].uuidTimestamp() > ballots[0].uuidTimestamp());
+
+        updates.inProgress(cfid, dk, ballots[1]);
+        updates.committed(cfid, dk, ballots[0]);
+
+        Assert.assertEquals(kl(new PaxosKeyState(cfid, dk, ballots[1], false)), uncommittedList(tracker));
+    }
+
+    @Test
+    public void tokenRange() throws Exception
+    {
+        Assert.assertEquals(0, state.numFiles());
+        int size = 10;
+        PaxosKeyState[] expectedArr = new PaxosKeyState[size];
+        Ballot[] ballots = createBallots(size);
+
+        for (int i=0; i<size; i+=2)
+        {
+            Ballot ballot = ballots[i];
+            DecoratedKey dk = dk(i);
+            updates.inProgress(cfid, dk, ballot);
+            PaxosKeyState ballotState = new PaxosKeyState(cfid, dk, ballot, false);;
+            expectedArr[i] = ballotState;
+        }
+
+        tracker.flushUpdates(null);
+
+        for (int i=1; i<size; i+=2)
+        {
+            Ballot ballot = ballots[i];
+            DecoratedKey dk = dk(i);
+            updates.inProgress(cfid, dk, ballot);
+            PaxosKeyState ballotState = new PaxosKeyState(cfid, dk, ballot, false);;
+            expectedArr[i] = ballotState;
+        }
+
+        List<PaxosKeyState> expected = kl(expectedArr);
+
+        Assert.assertEquals(expected.subList(0, 5), uncommittedList(tracker, r(null, tk(4))));
+        Assert.assertEquals(expected.subList(3, 7), uncommittedList(tracker, r(2, 6)));
+        Assert.assertEquals(expected.subList(8, 10), uncommittedList(tracker, r(tk(7), null)));
+        Assert.assertEquals(Lists.newArrayList(Iterables.concat(expected.subList(1, 5), expected.subList(6, 9))),
+                                             uncommittedList(tracker, Lists.newArrayList(r(0, 4), r(5, 8))));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableDataTest.java b/test/unit/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableDataTest.java
new file mode 100644
index 0000000..70b1f25
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableDataTest.java
@@ -0,0 +1,650 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.paxos.uncommitted;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.PaxosRepairHistory;
+import org.apache.cassandra.service.paxos.uncommitted.UncommittedTableData.Data;
+import org.apache.cassandra.service.paxos.uncommitted.UncommittedTableData.FilterFactory;
+import org.apache.cassandra.service.paxos.uncommitted.UncommittedTableData.FlushWriter;
+import org.apache.cassandra.service.paxos.uncommitted.UncommittedTableData.Merge;
+import org.apache.cassandra.utils.CloseableIterator;
+
+import static org.apache.cassandra.service.paxos.uncommitted.PaxosUncommittedTests.*;
+
+public class UncommittedTableDataTest
+{
+    private static final String KS = "ks";
+    private static final String TBL = "tbl";
+    private static final TableId CFID = TableId.fromUUID(UUID.randomUUID());
+
+    private static final String TBL2 = "tbl2";
+    private static final TableId CFID2 = TableId.fromUUID(UUID.randomUUID());
+
+    private File directory = null;
+
+    private static class MockDataFile
+    {
+        final File data;
+        final File crc;
+
+        public MockDataFile(File data, File crc)
+        {
+            this.data = data;
+            this.crc = crc;
+        }
+
+        boolean exists()
+        {
+            return data.exists() && crc.exists();
+        }
+
+        boolean isDeleted()
+        {
+            return !data.exists() && !crc.exists();
+        }
+    }
+
+    private static final FilterFactory NOOP_FACTORY = new FilterFactory()
+    {
+        List<Range<Token>> getReplicatedRanges()
+        {
+            return new ArrayList<>(ALL_RANGES);
+        }
+
+        PaxosRepairHistory getPaxosRepairHistory()
+        {
+            return PaxosRepairHistory.EMPTY;
+        }
+    };
+
+    private static UncommittedTableData load(File directory, TableId cfid)
+    {
+        return UncommittedTableData.load(directory, cfid, NOOP_FACTORY);
+    }
+
+    MockDataFile mockFile(String table, TableId cfid, long generation, boolean temp)
+    {
+        String fname = UncommittedDataFile.fileName(KS, table, cfid, generation) + (temp ? UncommittedDataFile.TMP_SUFFIX : "");
+        File data = new File(directory, fname);
+        File crc = new File(directory, UncommittedDataFile.crcName(fname));
+        try
+        {
+            Files.write("data", data.toJavaIOFile(), Charset.defaultCharset());
+            Files.write("crc", crc.toJavaIOFile(), Charset.defaultCharset());
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
+
+        MockDataFile file = new MockDataFile(data, crc);
+        Assert.assertTrue(file.exists());
+        return file;
+    }
+
+    MockDataFile mockFile(long generation, boolean temp)
+    {
+        return mockFile(TBL, CFID, generation, temp);
+    }
+
+    @Before
+    public void setUp() throws Exception
+    {
+        if (directory != null)
+            FileUtils.deleteRecursive(directory);
+
+        directory = new File(Files.createTempDir());
+    }
+
+    static PaxosKeyState uncommitted(int key, Ballot ballot)
+    {
+        return new PaxosKeyState(CFID, dk(key), ballot, false);
+    }
+
+    static PaxosKeyState committed(int key, Ballot ballot)
+    {
+        return new PaxosKeyState(CFID, dk(key), ballot, true);
+    }
+
+    private static FlushWriter startFlush(UncommittedTableData tableData, List<PaxosKeyState> updates) throws IOException
+    {
+        FlushWriter writer = tableData.flushWriter();
+        writer.appendAll(updates);
+        return writer;
+    }
+
+    private static FlushWriter startFlush(UncommittedTableData tableData, PaxosKeyState... updates) throws IOException
+    {
+        return startFlush(tableData, Lists.newArrayList(updates));
+    }
+
+    private static void flush(UncommittedTableData tableData, List<PaxosKeyState> updates) throws IOException
+    {
+        startFlush(tableData, updates).finish();
+    }
+
+    private static void flush(UncommittedTableData tableData, PaxosKeyState... states) throws IOException
+    {
+        flush(tableData, Lists.newArrayList(states));
+    }
+
+    private void mergeWithUpdates(UncommittedTableData tableData, List<PaxosKeyState> toAdd) throws IOException
+    {
+        flush(tableData, toAdd);
+        tableData.createMergeTask().run();
+    }
+
+    static void assertIteratorContents(CloseableIterator<PaxosKeyState> iterator, Iterable<PaxosKeyState> expected)
+    {
+        try (CloseableIterator<PaxosKeyState> iter = iterator)
+        {
+            Assert.assertEquals(Lists.newArrayList(expected), Lists.newArrayList(iter));
+        }
+    }
+
+    static void assertFileContents(UncommittedDataFile file, int generation, List<PaxosKeyState> expected)
+    {
+        Assert.assertEquals(generation, file.generation());
+        assertIteratorContents(file.iterator(ALL_RANGES), expected);
+    }
+
+    static void assertIteratorContents(UncommittedTableData tableData, int generation, List<PaxosKeyState> expected)
+    {
+        Assert.assertEquals(generation, tableData.nextGeneration() - 1);
+        assertIteratorContents(tableData.iterator(ALL_RANGES), expected);
+    }
+
+
+    /**
+     * Test various merge scenarios
+     */
+    @Test
+    public void testMergeWriter() throws IOException
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        mergeWithUpdates(tableData, kl(new PaxosKeyState(CFID, dk(3), ballots[1], false),
+                                       new PaxosKeyState(CFID, dk(5), ballots[1], false),
+                                       new PaxosKeyState(CFID, dk(7), ballots[1], false),
+                                       new PaxosKeyState(CFID, dk(9), ballots[1], false)));
+
+        assertIteratorContents(tableData, 1, kl(new PaxosKeyState(CFID, dk(3), ballots[1], false),
+                                                new PaxosKeyState(CFID, dk(5), ballots[1], false),
+                                                new PaxosKeyState(CFID, dk(7), ballots[1], false),
+                                                new PaxosKeyState(CFID, dk(9), ballots[1], false)));
+
+        // add a commit from the past for key 3, update key 5, and commit key 7
+        mergeWithUpdates(tableData, kl(new PaxosKeyState(CFID, dk(3), ballots[0], true),
+                                       new PaxosKeyState(CFID, dk(5), ballots[2], false),
+                                       new PaxosKeyState(CFID, dk(7), ballots[2], true)));
+
+        // key 7 should be gone because committed keys aren't written out
+        assertIteratorContents(tableData, 3, kl(new PaxosKeyState(CFID, dk(3), ballots[1], false),
+                                                new PaxosKeyState(CFID, dk(5), ballots[2], false),
+                                                new PaxosKeyState(CFID, dk(9), ballots[1], false)));
+
+        // add a new key and update an adjacent one
+        mergeWithUpdates(tableData, kl(new PaxosKeyState(CFID, dk(4), ballots[3], false),
+                                       new PaxosKeyState(CFID, dk(5), ballots[3], false)));
+        assertIteratorContents(tableData, 5, kl(new PaxosKeyState(CFID, dk(3), ballots[1], false),
+                                                new PaxosKeyState(CFID, dk(4), ballots[3], false),
+                                                new PaxosKeyState(CFID, dk(5), ballots[3], false),
+                                                new PaxosKeyState(CFID, dk(9), ballots[1], false)));
+
+        // add 2 new keys
+        mergeWithUpdates(tableData, kl(new PaxosKeyState(CFID, dk(6), ballots[4], false),
+                                       new PaxosKeyState(CFID, dk(7), ballots[4], false)));
+        assertIteratorContents(tableData, 7, kl(new PaxosKeyState(CFID, dk(3), ballots[1], false),
+                                                new PaxosKeyState(CFID, dk(4), ballots[3], false),
+                                                new PaxosKeyState(CFID, dk(5), ballots[3], false),
+                                                new PaxosKeyState(CFID, dk(6), ballots[4], false),
+                                                new PaxosKeyState(CFID, dk(7), ballots[4], false),
+                                                new PaxosKeyState(CFID, dk(9), ballots[1], false)));
+    }
+
+    @Test
+    public void committedOpsArentWritten() throws Exception
+    {
+        Ballot[] ballots = createBallots(2);
+
+        UncommittedTableData tableData = load(directory, CFID);
+        mergeWithUpdates(tableData, kl(new PaxosKeyState(CFID, dk(1), ballots[0], false)));
+        assertIteratorContents(tableData, 1, kl(new PaxosKeyState(CFID, dk(1), ballots[0], false)));
+
+        mergeWithUpdates(tableData, kl(new PaxosKeyState(CFID, dk(1), ballots[1], true)));
+        assertIteratorContents(tableData, 3, kl());
+    }
+
+    @Test
+    public void testIterator() throws Exception
+    {
+        Ballot[] ballots = createBallots(10);
+        List<PaxosKeyState> expected = new ArrayList<>(ballots.length);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        for (int i=0; i<ballots.length; i++)
+        {
+            Ballot ballot = ballots[i];
+            DecoratedKey dk = dk(i);
+            expected.add(new PaxosKeyState(CFID, dk, ballot, false));
+        }
+
+        mergeWithUpdates(tableData, expected);
+
+        assertIteratorContents(tableData.iterator(Collections.singleton(r(null, tk(4)))), expected.subList(0, 5));
+        assertIteratorContents(tableData.iterator(Collections.singleton(r(2, 6))), expected.subList(3, 7));
+        assertIteratorContents(tableData.iterator(Collections.singleton(r(tk(7), null))), expected.subList(8, 10));
+        assertIteratorContents(tableData.iterator(Lists.newArrayList(r(0, 4), r(5, 8))),
+                               Iterables.concat(expected.subList(1, 5), expected.subList(6, 9)));
+
+
+    }
+
+    @Test
+    public void flush() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        List<PaxosKeyState> updates = kl(uncommitted(3, ballots[1]),
+                                         uncommitted(5, ballots[1]),
+                                         committed(7, ballots[1]),
+                                         uncommitted(9, ballots[1]));
+        flush(tableData, updates);
+
+        Data data = tableData.data();
+        UncommittedDataFile updateFile = Iterables.getOnlyElement(data.files);
+        assertFileContents(updateFile, 0, updates);
+        assertIteratorContents(tableData.iterator(ALL_RANGES), updates);
+    }
+
+    @Test
+    public void merge() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+        flush(tableData,
+              uncommitted(3, ballots[1]),
+              committed(7, ballots[1]));
+        flush(tableData,
+              uncommitted(5, ballots[1]),
+              uncommitted(9, ballots[1]));
+
+        List<File> updateFiles = tableData.data().files.stream().map(UncommittedDataFile::file).collect(Collectors.toList());
+        Assert.assertTrue(Iterables.all(updateFiles, File::exists));
+
+        tableData.createMergeTask().run();
+
+        Assert.assertFalse(Iterables.any(updateFiles, File::exists));
+
+        Data data = tableData.data();
+        Assert.assertEquals(1, data.files.size());
+
+        List<PaxosKeyState> expected = kl(uncommitted(3, ballots[1]),
+                                          uncommitted(5, ballots[1]),
+                                          uncommitted(9, ballots[1]));
+        assertFileContents(Iterables.getOnlyElement(data.files), 2, expected);
+        assertIteratorContents(tableData.iterator(ALL_RANGES), expected);
+    }
+
+    /**
+     * nothing should break when a merge results in an empty file
+     */
+    @Test
+    public void emptyMerge() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+        flush(tableData,
+              committed(3, ballots[1]),
+              committed(7, ballots[1]));
+
+        tableData.createMergeTask().run();
+
+        Data data = tableData.data();
+
+        assertFileContents(Iterables.getOnlyElement(data.files), 1, Collections.emptyList());
+        assertIteratorContents(tableData.iterator(ALL_RANGES), Collections.emptyList());
+    }
+
+    @Test
+    public void load() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+        flush(tableData,
+              uncommitted(3, ballots[1]),
+              committed(7, ballots[1]));
+        tableData.createMergeTask().run();
+        flush(tableData,
+              uncommitted(5, ballots[1]),
+              uncommitted(9, ballots[1]));
+        List<PaxosKeyState> expected = kl(uncommitted(3, ballots[1]),
+                                          uncommitted(5, ballots[1]),
+                                          uncommitted(9, ballots[1]));
+        assertIteratorContents(tableData.iterator(ALL_RANGES), expected);
+
+        // cleanup shouldn't touch files for other tables
+        MockDataFile mockStateFile = mockFile(TBL2, CFID2, 2, false);
+        MockDataFile mockUpdateFile = mockFile(TBL2, CFID2, 3, false);
+        MockDataFile mockTempUpdate = mockFile(TBL2, CFID2, 4, true);
+
+        UncommittedTableData tableData2 = load(directory, CFID);
+        assertIteratorContents(tableData2.iterator(ALL_RANGES), expected);
+        Assert.assertTrue(mockStateFile.exists() && mockUpdateFile.exists() && mockTempUpdate.exists());
+    }
+
+    /**
+     * Shouldn't break if there wasn't a merge before shutdown
+     */
+    @Test
+    public void loadWithoutStateFile() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        List<PaxosKeyState> updates = kl(uncommitted(3, ballots[1]),
+                                         uncommitted(5, ballots[1]),
+                                         committed(7, ballots[1]),
+                                         uncommitted(9, ballots[1]));
+        flush(tableData, updates);
+        assertIteratorContents(tableData.iterator(ALL_RANGES), updates);
+
+        UncommittedTableData data2 = load(directory, CFID);
+        assertIteratorContents(data2.iterator(ALL_RANGES), updates);
+    }
+
+    /**
+     * Test that incomplete update flushes are cleaned up
+     */
+    @Test
+    public void updateRecovery() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        List<PaxosKeyState> updates = kl(uncommitted(3, ballots[1]),
+                                         uncommitted(5, ballots[1]),
+                                         committed(7, ballots[1]),
+                                         uncommitted(9, ballots[1]));
+        flush(tableData, updates);
+        assertIteratorContents(tableData.iterator(ALL_RANGES), updates);
+        MockDataFile tmpUpdate = mockFile(tableData.nextGeneration(), true);
+
+        UncommittedTableData tableData2 = load(directory, CFID);
+        Assert.assertEquals(1, tableData2.nextGeneration());
+        assertIteratorContents(tableData2.iterator(ALL_RANGES), updates);
+        Assert.assertTrue(tmpUpdate.isDeleted());
+    }
+
+    /**
+     * Test that incomplete state merges are cleaned up
+     */
+    @Test
+    public void stateRecovery() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        List<PaxosKeyState> updates = kl(uncommitted(3, ballots[1]),
+                                         uncommitted(5, ballots[1]),
+                                         committed(7, ballots[1]),
+                                         uncommitted(9, ballots[1]));
+        flush(tableData, updates);
+        assertIteratorContents(tableData.iterator(ALL_RANGES), updates);
+        MockDataFile tmpUpdate = mockFile(tableData.nextGeneration(), true);
+
+        UncommittedTableData tableData2 = load(directory, CFID);
+        Assert.assertEquals(1, tableData2.nextGeneration());
+        assertIteratorContents(tableData2.iterator(ALL_RANGES), updates);
+        Assert.assertTrue(tmpUpdate.isDeleted());
+    }
+
+    @Test
+    public void orphanCrc() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+
+        List<PaxosKeyState> updates = kl(uncommitted(3, ballots[1]),
+                                         uncommitted(5, ballots[1]),
+                                         uncommitted(7, ballots[1]),
+                                         uncommitted(9, ballots[1]));
+        flush(tableData, updates);
+        long updateGeneration = Iterables.getOnlyElement(tableData.data().files).generation();
+        tableData.createMergeTask().run();
+        assertIteratorContents(tableData.iterator(ALL_RANGES), updates);
+
+        MockDataFile oldUpdate = mockFile(updateGeneration, false);
+        FileUtils.deleteWithConfirm(oldUpdate.data);
+        UncommittedTableData tableData2 = load(directory, CFID);
+        assertIteratorContents(tableData2.iterator(ALL_RANGES), updates);
+        Assert.assertTrue(oldUpdate.isDeleted());
+    }
+
+    @Test
+    public void referenceCountingTest() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+        flush(tableData,
+              uncommitted(3, ballots[1]),
+              committed(7, ballots[1]));
+
+        // initial state
+        UncommittedDataFile updateFile = Iterables.getOnlyElement(tableData.data().files);
+        Assert.assertEquals(0, updateFile.getActiveReaders());
+        Assert.assertFalse(updateFile.isMarkedDeleted());
+
+        // referenced state
+        CloseableIterator<PaxosKeyState> iterator = tableData.iterator(ALL_RANGES);
+        Assert.assertEquals(1, updateFile.getActiveReaders());
+        Assert.assertFalse(updateFile.isMarkedDeleted());
+
+        // marked deleted state
+        tableData.createMergeTask().run();
+        Assert.assertEquals(1, updateFile.getActiveReaders());
+        Assert.assertTrue(updateFile.isMarkedDeleted());
+        Assert.assertTrue(updateFile.file().exists());
+
+        // unreference and delete
+        iterator.close();
+        Assert.assertEquals(0, updateFile.getActiveReaders());
+        Assert.assertTrue(updateFile.isMarkedDeleted());
+        Assert.assertFalse(updateFile.file().exists());
+    }
+
+    /**
+     * Test that we don't compact update sequences with gaps. ie: we shouldn't compact update generation 4
+     * if we can't include generation 3
+     */
+    @Test
+    public void outOfOrderFlush() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+        FlushWriter pendingFlush = startFlush(tableData,
+                                              uncommitted(3, ballots[1]),
+                                              committed(7, ballots[1]));
+        Assert.assertNull(tableData.currentMerge());
+
+        flush(tableData,
+              uncommitted(5, ballots[1]),
+              uncommitted(9, ballots[1]));
+
+        // schedule a merge
+        Merge merge = tableData.createMergeTask();
+        Assert.assertFalse(!merge.dependsOnActiveFlushes());
+        Assert.assertFalse(merge.isScheduled);
+
+        // completing the first flush should cause the merge to be scheduled
+        pendingFlush.finish();
+        Assert.assertTrue(!merge.dependsOnActiveFlushes());
+        Assert.assertTrue(merge.isScheduled);
+
+        while (tableData.currentMerge() != null)
+            Thread.sleep(1);
+
+        // confirm that the merge has completed
+        Assert.assertEquals(3, tableData.nextGeneration());
+        Data data = tableData.data();
+        Assert.assertEquals(2, Iterables.getOnlyElement(data.files).generation());
+        assertIteratorContents(tableData.iterator(ALL_RANGES), kl(uncommitted(3, ballots[1]),
+                                                                  uncommitted(5, ballots[1]),
+                                                                  uncommitted(9, ballots[1])));
+    }
+
+    @Test
+    public void abortedFlush() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = load(directory, CFID);
+        FlushWriter pendingFlush = startFlush(tableData,
+                                              uncommitted(3, ballots[1]),
+                                              committed(7, ballots[1]));
+        Assert.assertNull(tableData.currentMerge());
+
+        List<PaxosKeyState> secondFlushUpdates = kl(uncommitted(5, ballots[1]),
+                                                    uncommitted(9, ballots[1]));
+        flush(tableData, secondFlushUpdates);
+        tableData.createMergeTask();
+
+        // the second flush should have triggered a merge
+        Merge merge = tableData.currentMerge();
+        Assert.assertFalse(!merge.dependsOnActiveFlushes());
+        Assert.assertFalse(merge.isScheduled);
+
+        // completing the first merge should cause the merge to be scheduled
+        pendingFlush.abort(null);
+        Assert.assertTrue(!merge.dependsOnActiveFlushes());
+        Assert.assertTrue(merge.isScheduled);
+
+        while (tableData.currentMerge() != null)
+            Thread.sleep(1);
+
+        // confirm that the merge has completed
+        Assert.assertEquals(3, tableData.nextGeneration());
+        Data data = tableData.data();
+        Assert.assertEquals(2, Iterables.getOnlyElement(data.files).generation());
+        assertIteratorContents(tableData.iterator(ALL_RANGES), secondFlushUpdates);
+    }
+
+    /**
+     * keys that aren't locally replicated shouldn't be written on merge
+     */
+    @Test
+    public void rangePurge() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = UncommittedTableData.load(directory, CFID, new FilterFactory() {
+            List<Range<Token>> getReplicatedRanges()
+            {
+                return Lists.newArrayList(new Range<>(tk(4), tk(7)));
+            }
+
+            PaxosRepairHistory getPaxosRepairHistory()
+            {
+                return PaxosRepairHistory.EMPTY;
+            }
+        });
+
+        flush(tableData, uncommitted(3, ballots[1]),
+                         uncommitted(5, ballots[1]),
+                         uncommitted(7, ballots[1]),
+                         uncommitted(9, ballots[1]));
+        tableData.createMergeTask().run();
+        assertIteratorContents(tableData.iterator(ALL_RANGES), kl(uncommitted(5, ballots[1]),
+                                                                  uncommitted(7, ballots[1])));
+    }
+
+    @Test
+    public void rangePurge2() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = UncommittedTableData.load(directory, CFID, new FilterFactory() {
+            List<Range<Token>> getReplicatedRanges()
+            {
+                return Lists.newArrayList(new Range<>(tk(4), tk(6)), new Range(tk(8), tk(10)));
+            }
+
+            PaxosRepairHistory getPaxosRepairHistory()
+            {
+                return PaxosRepairHistory.EMPTY;
+            }
+        });
+
+        flush(tableData, uncommitted(3, ballots[1]),
+              uncommitted(5, ballots[1]),
+              uncommitted(7, ballots[1]),
+              uncommitted(9, ballots[1]));
+        tableData.createMergeTask().run();
+        assertIteratorContents(tableData.iterator(ALL_RANGES), kl(uncommitted(5, ballots[1]),
+                                                                  uncommitted(9, ballots[1])));
+    }
+
+    /**
+     * ballots below the low bound should be purged
+     */
+    @Test
+    public void lowBoundPurge() throws Throwable
+    {
+        Ballot[] ballots = createBallots(5);
+        UncommittedTableData tableData = UncommittedTableData.load(directory, CFID, new FilterFactory() {
+            List<Range<Token>> getReplicatedRanges()
+            {
+                return Lists.newArrayList(ALL_RANGES);
+            }
+
+            PaxosRepairHistory getPaxosRepairHistory()
+            {
+                return PaxosRepairHistory.add(PaxosRepairHistory.EMPTY, ALL_RANGES, ballots[1]);
+            }
+        });
+
+        flush(tableData, uncommitted(3, ballots[0]),
+                         uncommitted(5, ballots[1]),
+                         uncommitted(7, ballots[2]));
+        tableData.createMergeTask().run();
+        assertIteratorContents(tableData.iterator(ALL_RANGES),  kl(uncommitted(5, ballots[1]),
+                                                                   uncommitted(7, ballots[2])));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java b/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java
index 900a40f..593e3e4 100644
--- a/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java
@@ -76,6 +76,8 @@
 import static org.apache.cassandra.Util.assertColumn;
 import static org.apache.cassandra.Util.assertColumns;
 import static org.apache.cassandra.db.ClusteringBound.Kind;
+import static org.apache.cassandra.db.ConsistencyLevel.ALL;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -131,7 +133,7 @@
     public void testResolveNewerSingleRow()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
                                                                                                      .add("c1", "v1")
@@ -163,7 +165,7 @@
     public void testResolveDisjointSingleRow()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
                                                                                                      .add("c1", "v1")
@@ -200,7 +202,7 @@
     public void testResolveDisjointMultipleRows() throws UnknownHostException
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
                                                                                                      .add("c1", "v1")
@@ -247,7 +249,7 @@
     public void testResolveDisjointMultipleRowsWithRangeTombstones()
     {
         EndpointsForRange replicas = makeReplicas(4);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
 
         RangeTombstone tombstone1 = tombstone("1", "11", 1, nowInSec);
         RangeTombstone tombstone2 = tombstone("3", "31", 1, nowInSec);
@@ -328,7 +330,7 @@
     public void testResolveWithOneEmpty()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1")
                                                                                                      .add("c2", "v2")
@@ -359,7 +361,7 @@
     {
         EndpointsForRange replicas = makeReplicas(2);
         TestableReadRepair readRepair = new TestableReadRepair(command);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         resolver.preprocess(response(command, replicas.get(0).endpoint(), EmptyIterators.unfilteredPartition(cfm)));
         resolver.preprocess(response(command, replicas.get(1).endpoint(), EmptyIterators.unfilteredPartition(cfm)));
 
@@ -375,7 +377,7 @@
     public void testResolveDeleted()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         // one response with columns timestamped before a delete in another response
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
@@ -401,7 +403,7 @@
     public void testResolveMultipleDeleted()
     {
         EndpointsForRange replicas = makeReplicas(4);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         // deletes and columns with interleaved timestamp, with out of order return sequence
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, fullPartitionDelete(cfm, dk, 0, nowInSec)));
@@ -486,7 +488,7 @@
     private void resolveRangeTombstonesOnBoundary(long timestamp1, long timestamp2)
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -560,7 +562,7 @@
     private void testRepairRangeTombstoneBoundary(int timestamp1, int timestamp2, int timestamp3) throws UnknownHostException
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -613,7 +615,7 @@
     {
         EndpointsForRange replicas = makeReplicas(2);
 
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -652,7 +654,7 @@
     public void testRepairRangeTombstoneWithPartitionDeletion2()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -736,7 +738,7 @@
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -788,7 +790,7 @@
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -832,7 +834,7 @@
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -882,7 +884,7 @@
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -938,7 +940,7 @@
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -958,7 +960,7 @@
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -978,7 +980,7 @@
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest1, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(), verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
@@ -998,7 +1000,7 @@
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(), verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -1018,7 +1020,7 @@
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(), verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
@@ -1038,7 +1040,7 @@
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest1, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
@@ -1059,7 +1061,7 @@
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1") .buildUpdate()), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -1080,7 +1082,7 @@
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest2, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
@@ -1101,7 +1103,7 @@
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest2, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
@@ -1122,7 +1124,7 @@
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest2, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, false, command));
@@ -1143,7 +1145,7 @@
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest2, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1") .buildUpdate()), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
@@ -1163,7 +1165,7 @@
         TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
         verifier.expectDigest(peer1, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
 
@@ -1185,7 +1187,7 @@
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
         verifier.expectDigest(peer1, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         // peer2 is advertising an older version, so when we deserialize its response there are two things to note:
@@ -1217,7 +1219,7 @@
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
         verifier.expectDigest(peer1, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest2, true, command));
diff --git a/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java b/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java
index 6fc8fbf..6f6bf36 100644
--- a/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java
@@ -45,8 +45,11 @@
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.schema.KeyspaceParams;
 
+import static java.util.concurrent.TimeUnit.DAYS;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_QUORUM;
 import static org.apache.cassandra.locator.ReplicaUtils.full;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
@@ -72,7 +75,7 @@
                 full(InetAddressAndPort.getByName("127.0.0.254")),
                 full(InetAddressAndPort.getByName("127.0.0.253"))
         );
-        cfs.sampleReadLatencyNanos = 0;
+        cfs.sampleReadLatencyMicros = 0;
     }
 
     @Before
@@ -92,7 +95,7 @@
     {
         assertEquals(0, cfs.metric.speculativeInsufficientReplicas.getCount());
         assertEquals(0, ks.metric.speculativeInsufficientReplicas.getCount());
-        AbstractReadExecutor executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, ConsistencyLevel.LOCAL_QUORUM), System.nanoTime(), true);
+        AbstractReadExecutor executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, LOCAL_QUORUM), nanoTime(), true);
         executor.maybeTryAdditionalReplicas();
         try
         {
@@ -107,7 +110,7 @@
         assertEquals(1, ks.metric.speculativeInsufficientReplicas.getCount());
 
         //Shouldn't increment
-        executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, ConsistencyLevel.LOCAL_QUORUM), System.nanoTime(), false);
+        executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, LOCAL_QUORUM), nanoTime(), false);
         executor.maybeTryAdditionalReplicas();
         try
         {
@@ -133,7 +136,7 @@
         assertEquals(0, cfs.metric.speculativeFailedRetries.getCount());
         assertEquals(0, ks.metric.speculativeRetries.getCount());
         assertEquals(0, ks.metric.speculativeFailedRetries.getCount());
-        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(TimeUnit.DAYS.toMillis(365)), plan(ConsistencyLevel.LOCAL_QUORUM, targets, targets.subList(0, 2)), System.nanoTime());
+        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(DAYS.toMillis(365)), plan(LOCAL_QUORUM, targets, targets.subList(0, 2)), nanoTime());
         executor.maybeTryAdditionalReplicas();
         new Thread()
         {
@@ -174,7 +177,7 @@
         assertEquals(0, cfs.metric.speculativeFailedRetries.getCount());
         assertEquals(0, ks.metric.speculativeRetries.getCount());
         assertEquals(0, ks.metric.speculativeFailedRetries.getCount());
-        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(ConsistencyLevel.LOCAL_QUORUM, targets, targets.subList(0, 2)), System.nanoTime());
+        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(LOCAL_QUORUM, targets, targets.subList(0, 2)), nanoTime());
         executor.maybeTryAdditionalReplicas();
         try
         {
@@ -200,13 +203,13 @@
     {
         MockSinglePartitionReadCommand command = new MockSinglePartitionReadCommand(TimeUnit.DAYS.toMillis(365));
         ReplicaPlan.ForTokenRead plan = plan(ConsistencyLevel.LOCAL_ONE, targets, targets.subList(0, 1));
-        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, command, plan, System.nanoTime());
+        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, command, plan, nanoTime());
 
         // Issue an initial request against the first endpoint...
         executor.executeAsync();
 
         // ...and then force a speculative retry against another endpoint.
-        cfs.sampleReadLatencyNanos = 0L;
+        cfs.sampleReadLatencyMicros = 0L;
         executor.maybeTryAdditionalReplicas();
 
         new Thread(() ->
@@ -246,7 +249,7 @@
 
         MockSinglePartitionReadCommand(long timeout)
         {
-            super(false, 0, false, cfs.metadata(), 0, null, null, null, Util.dk("ry@n_luvs_teh_y@nk33z"), null, null);
+            super(false, 0, false, cfs.metadata(), 0, null, null, null, Util.dk("ry@n_luvs_teh_y@nk33z"), null, null, false);
             this.timeout = timeout;
         }
 
diff --git a/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java b/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java
index 86b307e..dca62f6 100644
--- a/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java
@@ -150,4 +150,4 @@
             SpeculativeRetryPolicy.fromString(string);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java
index d82a503..2542202 100644
--- a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java
@@ -42,6 +42,7 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.CloseableIterator;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class RangeCommandIteratorTest
@@ -97,34 +98,34 @@
             builder.add("val", String.valueOf(i));
             builder.build().applyUnsafe();
         }
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         PartitionRangeReadCommand command = (PartitionRangeReadCommand) Util.cmd(cfs).build();
         AbstractBounds<PartitionPosition> keyRange = command.dataRange().keyRange();
 
         // without range merger, there will be 2 batches requested: 1st batch with 1 range and 2nd batch with remaining ranges
         CloseableIterator<ReplicaPlan.ForRangeRead> replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
-        RangeCommandIterator data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, System.nanoTime());
+        RangeCommandIterator data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 2, vnodeCount);
 
         // without range merger and initial cf=5, there will be 1 batches requested: 5 vnode ranges for 1st batch
         replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
-        data = new RangeCommandIterator(replicaPlans, command, vnodeCount, 1000, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, vnodeCount, 1000, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 1, vnodeCount);
 
         // without range merger and max cf=1, there will be 5 batches requested: 1 vnode range per batch
         replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
-        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, vnodeCount, vnodeCount);
 
         // with range merger, there will be only 1 batch requested, as all ranges share the same replica - localhost
         replicaPlans = replicaPlanIterator(keyRange, keyspace, true);
-        data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 1, vnodeCount);
 
         // with range merger and max cf=1, there will be only 1 batch requested, as all ranges share the same replica - localhost
         replicaPlans = replicaPlanIterator(keyRange, keyspace, true);
-        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 1, vnodeCount);
     }
 
diff --git a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java
index 294be2a..259a65f 100644
--- a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java
@@ -39,6 +39,7 @@
 import org.apache.cassandra.schema.IndexMetadata;
 
 import static org.apache.cassandra.db.ConsistencyLevel.ONE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -73,7 +74,7 @@
 
         // verify that a low concurrency factor is not capped by the max concurrency factor
         PartitionRangeReadCommand command = command(cfs, 50, 50);
-        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, System.nanoTime());
+        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, nanoTime());
              ReplicaPlanIterator ranges = new ReplicaPlanIterator(command.dataRange().keyRange(), keyspace, ONE))
         {
             assertEquals(2, partitions.concurrencyFactor());
@@ -83,7 +84,7 @@
 
         // verify that a high concurrency factor is capped by the max concurrency factor
         command = command(cfs, 1000, 50);
-        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, System.nanoTime());
+        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, nanoTime());
              ReplicaPlanIterator ranges = new ReplicaPlanIterator(command.dataRange().keyRange(), keyspace, ONE))
         {
             assertEquals(MAX_CONCURRENCY_FACTOR, partitions.concurrencyFactor());
@@ -93,7 +94,7 @@
 
         // with 0 estimated results per range the concurrency factor should be 1
         command = command(cfs, 1000, 0);
-        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, System.nanoTime());
+        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, nanoTime());
              ReplicaPlanIterator ranges = new ReplicaPlanIterator(command.dataRange().keyRange(), keyspace, ONE))
         {
             assertEquals(1, partitions.concurrencyFactor());
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
index d36808f..bb10c67 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
@@ -71,7 +71,7 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.service.StorageService;
@@ -80,6 +80,7 @@
 import static org.apache.cassandra.locator.Replica.fullReplica;
 import static org.apache.cassandra.locator.ReplicaUtils.FULL_RANGE;
 import static org.apache.cassandra.net.Verb.INTERNAL_RSP;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 @Ignore
 public abstract  class AbstractReadRepairTest
@@ -96,9 +97,9 @@
     static Replica replica2;
     static Replica replica3;
     static EndpointsForRange replicas;
-    static ReplicaPlan.ForRead<?> replicaPlan;
+    static ReplicaPlan.ForRead<?, ?> replicaPlan;
 
-    static long now = TimeUnit.NANOSECONDS.toMicros(System.nanoTime());
+    static long now = TimeUnit.NANOSECONDS.toMicros(nanoTime());
     static DecoratedKey key;
     static Cell<?> cell1;
     static Cell<?> cell2;
@@ -222,13 +223,13 @@
         cfm = CreateTableStatement.parse(ddl, ksName).build();
         assert cfm.params.readRepair == repairStrategy;
         KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, KeyspaceParams.simple(3), Tables.of(cfm));
-        MigrationManager.announceNewKeyspace(ksm, false);
+        SchemaTestUtil.announceNewKeyspace(ksm);
 
         ks = Keyspace.open(ksName);
         cfs = ks.getColumnFamilyStore("tbl");
 
-        cfs.sampleReadLatencyNanos = 0;
-        cfs.additionalWriteLatencyNanos = 0;
+        cfs.sampleReadLatencyMicros = 0;
+        cfs.additionalWriteLatencyMicros = 0;
 
         target1 = InetAddressAndPort.getByName("127.0.0.255");
         target2 = InetAddressAndPort.getByName("127.0.0.254");
@@ -273,8 +274,8 @@
     {
         assert configured : "configureClass must be called in a @BeforeClass method";
 
-        cfs.sampleReadLatencyNanos = 0;
-        cfs.additionalWriteLatencyNanos = 0;
+        cfs.sampleReadLatencyMicros = 0;
+        cfs.additionalWriteLatencyMicros = 0;
     }
 
     static ReplicaPlan.ForRangeRead replicaPlan(ConsistencyLevel consistencyLevel, EndpointsForRange replicas)
@@ -282,17 +283,17 @@
         return replicaPlan(ks, consistencyLevel, replicas, replicas);
     }
 
-    static ReplicaPlan.ForTokenWrite repairPlan(ReplicaPlan.ForRangeRead readPlan)
+    static ReplicaPlan.ForWrite repairPlan(ReplicaPlan.ForRangeRead readPlan)
     {
-        return repairPlan(readPlan, readPlan.candidates());
+        return repairPlan(readPlan, readPlan.readCandidates());
     }
 
-    static ReplicaPlan.ForTokenWrite repairPlan(EndpointsForRange liveAndDown, EndpointsForRange targets)
+    static ReplicaPlan.ForWrite repairPlan(EndpointsForRange liveAndDown, EndpointsForRange targets)
     {
         return repairPlan(replicaPlan(liveAndDown, targets), liveAndDown);
     }
 
-    static ReplicaPlan.ForTokenWrite repairPlan(ReplicaPlan.ForRangeRead readPlan, EndpointsForRange liveAndDown)
+    static ReplicaPlan.ForWrite repairPlan(ReplicaPlan.ForRangeRead readPlan, EndpointsForRange liveAndDown)
     {
         Token token = readPlan.range().left.getToken();
         EndpointsForToken pending = EndpointsForToken.empty(token);
@@ -320,7 +321,7 @@
 
     public InstrumentedReadRepair createInstrumentedReadRepair(ReplicaPlan.Shared<?, ?> replicaPlan)
     {
-        return createInstrumentedReadRepair(command, replicaPlan, System.nanoTime());
+        return createInstrumentedReadRepair(command, replicaPlan, nanoTime());
 
     }
 
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java
index 43a1275..3a938a2 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java
@@ -43,12 +43,15 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.service.reads.ReadCallback;
 
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class BlockingReadRepairTest extends AbstractReadRepairTest
 {
     private static class InstrumentedReadRepairHandler
             extends BlockingPartitionRepair
     {
-        public InstrumentedReadRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan)
+        public InstrumentedReadRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan)
         {
             super(Util.dk("not a real usable value"), repairs, writePlan, e -> targets.contains(e));
         }
@@ -67,7 +70,7 @@
         configureClass(ReadRepairStrategy.BLOCKING);
     }
 
-    private static InstrumentedReadRepairHandler createRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan)
+    private static InstrumentedReadRepairHandler createRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan)
     {
         return new InstrumentedReadRepairHandler(repairs, writePlan);
     }
@@ -78,7 +81,7 @@
         return createRepairHandler(repairs, repairPlan(replicas, replicas));
     }
 
-    private static class InstrumentedBlockingReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    private static class InstrumentedBlockingReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
             extends BlockingReadRepair<E, P> implements InstrumentedReadRepair<E, P>
     {
         public InstrumentedBlockingReadRepair(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
@@ -140,7 +143,7 @@
         repairs.put(replica1, repair1);
         repairs.put(replica2, repair2);
 
-        ReplicaPlan.ForTokenWrite writePlan = repairPlan(replicas, EndpointsForRange.copyOf(Lists.newArrayList(repairs.keySet())));
+        ReplicaPlan.ForWrite writePlan = repairPlan(replicas, EndpointsForRange.copyOf(Lists.newArrayList(repairs.keySet())));
         InstrumentedReadRepairHandler handler = createRepairHandler(repairs, writePlan);
 
         Assert.assertTrue(handler.mutationsSent.isEmpty());
@@ -266,7 +269,7 @@
         repairs.put(remote1, mutation(cell1));
 
         EndpointsForRange participants = EndpointsForRange.of(replica1, replica2, remote1, remote2);
-        ReplicaPlan.ForTokenWrite writePlan = repairPlan(replicaPlan(ks, ConsistencyLevel.LOCAL_QUORUM, participants));
+        ReplicaPlan.ForWrite writePlan = repairPlan(replicaPlan(ks, ConsistencyLevel.LOCAL_QUORUM, participants));
         InstrumentedReadRepairHandler handler = createRepairHandler(repairs, writePlan);
         handler.sendInitialRepairs();
         Assert.assertEquals(2, handler.mutationsSent.size());
@@ -287,6 +290,6 @@
 
     private boolean getCurrentRepairStatus(BlockingPartitionRepair handler)
     {
-        return handler.awaitRepairsUntil(System.nanoTime(), TimeUnit.NANOSECONDS);
+        return handler.awaitRepairsUntil(nanoTime(), NANOSECONDS);
     }
 }
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java
index ae83efb..8399c83 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java
@@ -49,6 +49,9 @@
 import org.apache.cassandra.service.reads.ReadCallback;
 import org.apache.cassandra.service.reads.repair.ReadRepairEvent.ReadRepairEventType;
 
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Variation of {@link BlockingReadRepair} using diagnostic events instead of instrumentation for test validation.
  */
@@ -82,7 +85,7 @@
         repairs.put(replica2, repair2);
 
 
-        ReplicaPlan.ForTokenWrite writePlan = repairPlan(replicas, EndpointsForRange.copyOf(Lists.newArrayList(repairs.keySet())));
+        ReplicaPlan.ForWrite writePlan = repairPlan(replicas, EndpointsForRange.copyOf(Lists.newArrayList(repairs.keySet())));
         DiagnosticPartitionReadRepairHandler handler = createRepairHandler(repairs, writePlan);
 
         Assert.assertTrue(handler.updatesByEp.isEmpty());
@@ -110,7 +113,7 @@
 
     private boolean getCurrentRepairStatus(BlockingPartitionRepair handler)
     {
-        return handler.awaitRepairsUntil(System.nanoTime(), TimeUnit.NANOSECONDS);
+        return handler.awaitRepairsUntil(nanoTime(), NANOSECONDS);
     }
 
     public InstrumentedReadRepair createInstrumentedReadRepair(ReadCommand command, ReplicaPlan.Shared<?,?> replicaPlan, long queryStartNanoTime)
@@ -118,7 +121,7 @@
         return new DiagnosticBlockingRepairHandler(command, replicaPlan, queryStartNanoTime);
     }
 
-    private static DiagnosticPartitionReadRepairHandler createRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan)
+    private static DiagnosticPartitionReadRepairHandler createRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan)
     {
         return new DiagnosticPartitionReadRepairHandler(key, repairs, writePlan);
     }
@@ -165,7 +168,7 @@
         }
     }
 
-    private static class DiagnosticPartitionReadRepairHandler<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    private static class DiagnosticPartitionReadRepairHandler<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
             extends BlockingPartitionRepair
     {
         private final Map<InetAddressAndPort, String> updatesByEp = new HashMap<>();
@@ -176,7 +179,7 @@
             return e -> candidates.contains(e);
         }
 
-        DiagnosticPartitionReadRepairHandler(DecoratedKey key, Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan)
+        DiagnosticPartitionReadRepairHandler(DecoratedKey key, Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan)
         {
             super(key, repairs, writePlan, isLocal());
             DiagnosticEventService.instance().subscribe(PartitionRepairEvent.class, this::onRepairEvent);
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/InstrumentedReadRepair.java b/test/unit/org/apache/cassandra/service/reads/repair/InstrumentedReadRepair.java
index 81ab07e..f9bbea9 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/InstrumentedReadRepair.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/InstrumentedReadRepair.java
@@ -22,14 +22,13 @@
 
 import org.apache.cassandra.locator.Endpoints;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.locator.ReplicaLayout;
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.apache.cassandra.service.reads.ReadCallback;
 
-public interface InstrumentedReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+public interface InstrumentedReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         extends ReadRepair<E, P>
 {
     Set<InetAddressAndPort> getReadRecipients();
 
-    ReadCallback getReadCallback();
+    ReadCallback<E, P> getReadCallback();
 }
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepairTest.java
index 5ea790b..82bb8de 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/ReadOnlyReadRepairTest.java
@@ -32,12 +32,11 @@
 import org.apache.cassandra.locator.Endpoints;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.locator.ReplicaLayout;
 import org.apache.cassandra.service.reads.ReadCallback;
 
 public class ReadOnlyReadRepairTest extends AbstractReadRepairTest
 {
-    private static class InstrumentedReadOnlyReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    private static class InstrumentedReadOnlyReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
             extends ReadOnlyReadRepair implements InstrumentedReadRepair
     {
         public InstrumentedReadOnlyReadRepair(ReadCommand command, ReplicaPlan.Shared<E, P> replicaPlan, long queryStartNanoTime)
@@ -93,7 +92,7 @@
     public void repairPartitionFailure()
     {
         ReplicaPlan.SharedForRangeRead readPlan = ReplicaPlan.shared(replicaPlan(replicas, replicas));
-        ReplicaPlan.ForTokenWrite writePlan = repairPlan(replicas, replicas);
+        ReplicaPlan.ForWrite writePlan = repairPlan(replicas, replicas);
         InstrumentedReadRepair repair = createInstrumentedReadRepair(readPlan);
         repair.repairPartition(null, Collections.emptyMap(), writePlan);
     }
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java
index dad9aa4..8b5b2c1 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java
@@ -53,12 +53,14 @@
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.MigrationManager;
+import org.apache.cassandra.schema.SchemaTestUtil;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.locator.ReplicaUtils.full;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class ReadRepairTest
 {
@@ -70,10 +72,10 @@
     static Replica target3;
     static EndpointsForRange targets;
 
-    private static class InstrumentedReadRepairHandler<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+    private static class InstrumentedReadRepairHandler<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
             extends BlockingPartitionRepair
     {
-        public InstrumentedReadRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForTokenWrite writePlan)
+        public InstrumentedReadRepairHandler(Map<Replica, Mutation> repairs, ReplicaPlan.ForWrite writePlan)
         {
             super(Util.dk("not a valid key"), repairs, writePlan, e -> targets.endpoints().contains(e));
         }
@@ -86,7 +88,7 @@
         }
     }
 
-    static long now = TimeUnit.NANOSECONDS.toMicros(System.nanoTime());
+    static long now = TimeUnit.NANOSECONDS.toMicros(nanoTime());
     static DecoratedKey key;
     static Cell<?> cell1;
     static Cell<?> cell2;
@@ -117,12 +119,12 @@
 
         cfm = CreateTableStatement.parse("CREATE TABLE tbl (k int primary key, v text)", ksName).build();
         KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, KeyspaceParams.simple(3), Tables.of(cfm));
-        MigrationManager.announceNewKeyspace(ksm, false);
+        SchemaTestUtil.announceNewKeyspace(ksm);
 
         ks = Keyspace.open(ksName);
         cfs = ks.getColumnFamilyStore("tbl");
 
-        cfs.sampleReadLatencyNanos = 0;
+        cfs.sampleReadLatencyMicros = 0;
 
         target1 = full(InetAddressAndPort.getByName("127.0.0.255"));
         target2 = full(InetAddressAndPort.getByName("127.0.0.254"));
@@ -162,7 +164,7 @@
     private static InstrumentedReadRepairHandler createRepairHandler(Map<Replica, Mutation> repairs, EndpointsForRange all, EndpointsForRange targets)
     {
         ReplicaPlan.ForRangeRead readPlan = AbstractReadRepairTest.replicaPlan(ks, ConsistencyLevel.LOCAL_QUORUM, all, targets);
-        ReplicaPlan.ForTokenWrite writePlan = AbstractReadRepairTest.repairPlan(readPlan);
+        ReplicaPlan.ForWrite writePlan = AbstractReadRepairTest.repairPlan(readPlan);
         return new InstrumentedReadRepairHandler(repairs, writePlan);
     }
 
@@ -348,6 +350,6 @@
 
     private boolean getCurrentRepairStatus(BlockingPartitionRepair handler)
     {
-        return handler.awaitRepairsUntil(System.nanoTime(), TimeUnit.NANOSECONDS);
+        return handler.awaitRepairsUntil(nanoTime(), NANOSECONDS);
     }
 }
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/RepairedDataVerifierTest.java b/test/unit/org/apache/cassandra/service/reads/repair/RepairedDataVerifierTest.java
index 169e09d..592bff8 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/RepairedDataVerifierTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/RepairedDataVerifierTest.java
@@ -287,7 +287,8 @@
                   DataLimits.NONE,
                   metadata.partitioner.decorateKey(ByteBufferUtil.bytes(key)),
                   new ClusteringIndexSliceFilter(Slices.ALL, false),
-                  null);
+                  null,
+                  false);
         }
     }
 }
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java b/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java
index 84276d5..eecd106 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java
@@ -23,7 +23,6 @@
 import java.util.Map;
 import java.util.function.Consumer;
 
-import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.ReadCommand;
@@ -35,11 +34,10 @@
 import org.apache.cassandra.locator.Endpoints;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.Replica;
-import org.apache.cassandra.locator.ReplicaLayout;
 import org.apache.cassandra.locator.ReplicaPlan;
 import org.apache.cassandra.service.reads.DigestResolver;
 
-public class TestableReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>>
+public class TestableReadRepair<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E, P>>
         implements ReadRepair<E, P>
 {
     public final Map<InetAddressAndPort, Mutation> sent = new HashMap<>();
@@ -113,7 +111,7 @@
     }
 
     @Override
-    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForTokenWrite writePlan)
+    public void repairPartition(DecoratedKey partitionKey, Map<Replica, Mutation> mutations, ReplicaPlan.ForWrite writePlan)
     {
         for (Map.Entry<Replica, Mutation> entry: mutations.entrySet())
             sent.put(entry.getKey().endpoint(), entry.getValue());
@@ -128,4 +126,4 @@
     {
         return partitionListenerClosed && rowListenerClosed;
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/service/reads/thresholds/WarningsSnapshotTest.java b/test/unit/org/apache/cassandra/service/reads/thresholds/WarningsSnapshotTest.java
new file mode 100644
index 0000000..2102b0b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/reads/thresholds/WarningsSnapshotTest.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service.reads.thresholds;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.Test;
+
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.quicktheories.core.Gen;
+import org.quicktheories.generators.SourceDSL;
+import org.quicktheories.impl.Constraint;
+
+import static org.apache.cassandra.service.reads.thresholds.WarningsSnapshot.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.quicktheories.QuickTheory.qt;
+
+public class WarningsSnapshotTest
+{
+    private static final InetAddressAndPort HOME = address(127, 0, 0, 1);
+    private static final InetAddressAndPort VACATION_HOME = address(127, 0, 0, 2);
+
+    @Test
+    public void staticMergeEmtpy()
+    {
+        WarningsSnapshot result = merge(null, empty(), null, empty());
+        assertThat(result).isNull();
+    }
+
+    @Test
+    public void staticMergeNonEmtpy()
+    {
+        qt().forAll(nonEmpty(), nonEmpty()).check((a, b) -> {
+            WarningsSnapshot result = merge(a, b, null, empty());
+            return result != null && !result.isEmpty();
+        });
+    }
+
+    @Test
+    public void mergeEmtpy()
+    {
+        WarningsSnapshot result = empty().merge(empty());
+        assertThat(result).isEqualTo(empty());
+    }
+
+    @Test
+    public void mergeSelf()
+    {
+        qt().forAll(all()).check(self -> self.merge(self).equals(self));
+    }
+
+    @Test
+    public void mergeSelfWithEmpty()
+    {
+        qt().forAll(all()).check(self -> self.merge(empty()).equals(self) && empty().merge(self).equals(self));
+    }
+
+    @Test
+    public void mergeNonEmpty()
+    {
+        WarningsSnapshot expected = builder()
+                                    .tombstonesAbort(ImmutableSet.of(HOME), 42)
+                                    .localReadSizeWarning(ImmutableSet.of(VACATION_HOME), 12)
+                                    .build();
+        // validate builder to protect against empty = empty passing this test
+        assertThat(expected.tombstones.aborts.instances).isEqualTo(ImmutableSet.of(HOME));
+        assertThat(expected.tombstones.aborts.maxValue).isEqualTo(42);
+        assertThat(expected.localReadSize.warnings.instances).isEqualTo(ImmutableSet.of(VACATION_HOME));
+        assertThat(expected.localReadSize.warnings.maxValue).isEqualTo(12);
+
+        WarningsSnapshot output = empty().merge(expected);
+        assertThat(output).isEqualTo(expected).isEqualTo(expected.merge(empty()));
+        assertThat(output.merge(expected)).isEqualTo(expected);
+    }
+
+    @Test
+    public void mergeNonEmpty2()
+    {
+        WarningsSnapshot a = builder()
+                             .tombstonesAbort(ImmutableSet.of(HOME), 42)
+                             .build();
+        WarningsSnapshot b = builder()
+                             .localReadSizeWarning(ImmutableSet.of(VACATION_HOME), 12)
+                             .build();
+        WarningsSnapshot expected = builder()
+                                    .tombstonesAbort(ImmutableSet.of(HOME), 42)
+                                    .localReadSizeWarning(ImmutableSet.of(VACATION_HOME), 12)
+                                    .build();
+
+        // validate builder to protect against empty = empty passing this test
+        assertThat(a.tombstones.aborts.instances).isEqualTo(expected.tombstones.aborts.instances).isEqualTo(ImmutableSet.of(HOME));
+        assertThat(a.tombstones.aborts.maxValue).isEqualTo(expected.tombstones.aborts.maxValue).isEqualTo(42);
+        assertThat(b.localReadSize.warnings.instances).isEqualTo(expected.localReadSize.warnings.instances).isEqualTo(ImmutableSet.of(VACATION_HOME));
+        assertThat(b.localReadSize.warnings.maxValue).isEqualTo(expected.localReadSize.warnings.maxValue).isEqualTo(12);
+
+        WarningsSnapshot output = a.merge(b);
+        assertThat(output).isEqualTo(expected).isEqualTo(expected.merge(empty()));
+        assertThat(output.merge(expected)).isEqualTo(expected);
+    }
+
+    @Test
+    public void mergeConflict()
+    {
+        WarningsSnapshot a          = builder().tombstonesAbort(ImmutableSet.of(HOME), 42).build();
+        WarningsSnapshot b          = builder().tombstonesAbort(ImmutableSet.of(VACATION_HOME), 12).build();
+        WarningsSnapshot expected   = builder().tombstonesAbort(ImmutableSet.of(HOME, VACATION_HOME), 42).build();
+
+        // validate builder to protect against empty = empty passing this test
+        assertThat(a.tombstones.aborts.instances).isEqualTo(ImmutableSet.of(HOME));
+        assertThat(a.tombstones.aborts.maxValue).isEqualTo(42);
+        assertThat(b.tombstones.aborts.instances).isEqualTo(ImmutableSet.of(VACATION_HOME));
+        assertThat(b.tombstones.aborts.maxValue).isEqualTo(12);
+        assertThat(expected.tombstones.aborts.instances).isEqualTo(ImmutableSet.of(HOME, VACATION_HOME));
+        assertThat(expected.tombstones.aborts.maxValue).isEqualTo(42);
+
+        WarningsSnapshot output = a.merge(b);
+        assertThat(output).isEqualTo(expected).isEqualTo(expected.merge(empty()));
+        assertThat(output.merge(expected)).isEqualTo(expected);
+    }
+
+    private static InetAddressAndPort address(int a, int b, int c, int d)
+    {
+        try
+        {
+            InetAddress address = InetAddress.getByAddress(new byte[]{ (byte) a, (byte) b, (byte) c, (byte) d });
+            return InetAddressAndPort.getByAddress(address);
+        }
+        catch (UnknownHostException e)
+        {
+            throw new AssertionError(e);
+        }
+    }
+
+    private static Gen<WarningsSnapshot> all()
+    {
+        Gen<Boolean> empty = SourceDSL.booleans().all();
+        Gen<WarningsSnapshot> nonEmpty = nonEmpty();
+        Gen<WarningsSnapshot> gen = rs ->
+            empty.generate(rs) ? empty() : nonEmpty.generate(rs);
+        return gen.describedAs(WarningsSnapshot::toString);
+    }
+
+    private static Gen<WarningsSnapshot> nonEmpty()
+    {
+        Gen<Counter> counter = counter();
+        Gen<WarningsSnapshot> gen = rs -> {
+            Builder builder = builder();
+            builder.tombstonesWarning(counter.generate(rs));
+            builder.tombstonesAbort(counter.generate(rs));
+            builder.localReadSizeWarning(counter.generate(rs));
+            builder.localReadSizeAbort(counter.generate(rs));
+            builder.rowIndexSizeWarning(counter.generate(rs));
+            builder.rowIndexSizeAbort(counter.generate(rs));
+            return builder.build();
+        };
+        return gen.assuming(WarningsSnapshot::isDefined).describedAs(WarningsSnapshot::toString);
+    }
+
+    private static Gen<Counter> counter()
+    {
+        Gen<Boolean> empty = SourceDSL.booleans().all();
+        Constraint maxValue = Constraint.between(1, Long.MAX_VALUE);
+        Gen<ImmutableSet<InetAddressAndPort>> instances = SourceDSL.arbitrary()
+                                                                   .pick(ImmutableSet.of(HOME), ImmutableSet.of(VACATION_HOME), ImmutableSet.of(HOME, VACATION_HOME));
+        Gen<Counter> gen = rs ->
+                           empty.generate(rs) ? Counter.empty()
+                                              : new Counter(instances.generate(rs), rs.next(maxValue));
+        return gen.describedAs(Counter::toString);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
new file mode 100644
index 0000000..d03a2dd
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.snapshot;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.cassandra.config.DurationSpec;
+import org.apache.cassandra.db.Directories;
+import org.apache.cassandra.io.util.File;
+import org.assertj.core.util.Lists;
+
+import static org.apache.cassandra.service.snapshot.SnapshotLoader.SNAPSHOT_DIR_PATTERN;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SnapshotLoaderTest
+{
+    static String DATA_DIR_1 = "data1";
+    static String DATA_DIR_2 = "data2";
+    static String DATA_DIR_3 = "data3";
+    static String[] DATA_DIRS = new String[]{DATA_DIR_1, DATA_DIR_2, DATA_DIR_3};
+
+    static String KEYSPACE_1 = "ks1";
+    static String TABLE1_NAME = "table_1";
+    static UUID TABLE1_ID = UUID.randomUUID();
+    static String TABLE2_NAME = "table2";
+    static UUID TABLE2_ID = UUID.randomUUID();
+
+    static String KEYSPACE_2 = "ks2";
+    static String TABLE3_NAME = "table_3";
+    static UUID TABLE3_ID = UUID.randomUUID();
+
+    static String TAG1 = "tag1";
+    static String TAG2 = "tag2";
+    static String TAG3 = "tag3";
+
+    static String INVALID_NAME = "#test#";
+    static String INVALID_ID = "XPTO";
+
+    @ClassRule
+    public static TemporaryFolder tmpDir = new TemporaryFolder();
+
+    @Test
+    public void testMatcher()
+    {
+        String INDEX_SNAPSHOT = "/user/.ccm/test/node1/data0/ks/indexed_table-24b241e0c58f11eca526336fc2c671ab/snapshots/test";
+        assertThat(SNAPSHOT_DIR_PATTERN.matcher(INDEX_SNAPSHOT).find()).isTrue();
+
+        String TABLE_SNAPSHOT = "/Users/pmottagomes/.ccm/test/node1/data0/ks/my_table-1a025b40c58f11eca526336fc2c671ab/snapshots/test";
+        assertThat(SNAPSHOT_DIR_PATTERN.matcher(TABLE_SNAPSHOT).find()).isTrue();
+
+        String DROPPED_SNAPSHOT = "/Users/pmottagomes/.ccm/test/node1/data0/ks/my_table-e5c58330c58d11eca526336fc2c671ab/snapshots/dropped-1650997415751-my_table";
+        assertThat(SNAPSHOT_DIR_PATTERN.matcher(DROPPED_SNAPSHOT).find()).isTrue();
+    }
+
+    @Test
+    public void testNoSnapshots() throws IOException
+    {
+        // Create table directories on all data directories without snapshots
+        File baseDir  = new File(tmpDir.newFolder());
+        for (String dataDir : DATA_DIRS)
+        {
+            createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE1_NAME, TABLE1_ID));
+            createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE2_NAME, TABLE2_ID));
+            createDir(baseDir, dataDir, KEYSPACE_2, tableDirName(TABLE3_NAME, TABLE3_ID));
+        }
+
+        // Check no snapshots are found
+        SnapshotLoader loader = new SnapshotLoader(Arrays.asList(Paths.get(baseDir.toString(), DATA_DIR_1),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_2),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_3)));
+        assertThat(loader.loadSnapshots()).isEmpty();
+    }
+
+    @Test
+    public void testSnapshotsWithoutManifests() throws IOException
+    {
+        Set<File> tag1Files = new HashSet<>();
+        Set<File> tag2Files = new HashSet<>();
+        Set<File> tag3Files = new HashSet<>();
+
+        // Create one snapshot per table - without manifests:
+        // - ks1.t1 : tag1
+        // - ks1.t2 : tag2
+        // - ks2.t3 : tag3
+        File baseDir  = new File(tmpDir.newFolder());
+        for (String dataDir : DATA_DIRS)
+        {
+            tag1Files.add(createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE1_NAME, TABLE1_ID), Directories.SNAPSHOT_SUBDIR, TAG1));
+            tag2Files.add(createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE2_NAME, TABLE2_ID), Directories.SNAPSHOT_SUBDIR, TAG2));
+            tag3Files.add(createDir(baseDir, dataDir, KEYSPACE_2, tableDirName(TABLE3_NAME, TABLE3_ID), Directories.SNAPSHOT_SUBDIR, TAG3));
+        }
+
+        // Verify all 3 snapshots are found correctly from data directories
+        SnapshotLoader loader = new SnapshotLoader(Arrays.asList(Paths.get(baseDir.toString(), DATA_DIR_1),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_2),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_3)));
+        Set<TableSnapshot> snapshots = loader.loadSnapshots();
+        assertThat(snapshots).hasSize(3);
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, null, null, tag1Files));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE2_NAME, TABLE2_ID,  TAG2, null, null, tag2Files));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_2, TABLE3_NAME, TABLE3_ID,  TAG3, null, null, tag3Files));
+    }
+
+    @Test
+    public void testSnapshotsWithManifests() throws IOException
+    {
+        Set<File> tag1Files = new HashSet<>();
+        Set<File> tag2Files = new HashSet<>();
+        Set<File> tag3Files = new HashSet<>();
+
+        // Create one snapshot per table:
+        // - ks1.t1 : tag1
+        // - ks1.t2 : tag2
+        // - ks2.t3 : tag3
+        File baseDir  = new File(tmpDir.newFolder());
+        for (String dataDir : DATA_DIRS)
+        {
+            tag1Files.add(createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE1_NAME, TABLE1_ID), Directories.SNAPSHOT_SUBDIR, TAG1));
+            tag2Files.add(createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE2_NAME, TABLE2_ID), Directories.SNAPSHOT_SUBDIR, TAG2));
+            tag3Files.add(createDir(baseDir, dataDir, KEYSPACE_2, tableDirName(TABLE3_NAME, TABLE3_ID), Directories.SNAPSHOT_SUBDIR, TAG3));
+        }
+
+        // Write manifest for snapshot tag1 on random location
+        Instant tag1Ts = Instant.now();
+        File tag1ManifestLocation = tag1Files.toArray(new File[0])[ThreadLocalRandom.current().nextInt(tag1Files.size())];
+        writeManifest(tag1ManifestLocation, tag1Ts, null);
+
+        // Write manifest for snapshot tag2 on random location
+        Instant tag2Ts = Instant.now().plusSeconds(10);
+        DurationSpec.IntSecondsBound tag2Ttl = new DurationSpec.IntSecondsBound("10h");
+        File tag2ManifestLocation = tag2Files.toArray(new File[0])[ThreadLocalRandom.current().nextInt(tag2Files.size())];
+        writeManifest(tag2ManifestLocation, tag2Ts, tag2Ttl);
+
+        // Write manifest for snapshot tag3 on random location
+        Instant tag3Ts = Instant.now().plusSeconds(20);
+        File tag3ManifestLocation = tag3Files.toArray(new File[0])[ThreadLocalRandom.current().nextInt(tag3Files.size())];
+        writeManifest(tag3ManifestLocation, tag3Ts, null);
+
+        // Verify all 3 snapshots are found correctly from data directories
+        SnapshotLoader loader = new SnapshotLoader(Arrays.asList(Paths.get(baseDir.toString(), DATA_DIR_1),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_2),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_3)));
+        Set<TableSnapshot> snapshots = loader.loadSnapshots();
+        assertThat(snapshots).hasSize(3);
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, tag1Ts, null, tag1Files));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE2_NAME, TABLE2_ID,  TAG2, tag2Ts, tag2Ts.plusSeconds(tag2Ttl.toSeconds()), tag2Files));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_2, TABLE3_NAME, TABLE3_ID,  TAG3, tag3Ts, null, tag3Files));
+    }
+
+    @Test
+    public void testInvalidSnapshotsAreNotLoaded() throws IOException
+    {
+        Set<File> tag1Files = new HashSet<>();
+        Set<File> tag2Files = new HashSet<>();
+        Set<File> tag3Files = new HashSet<>();
+
+        // Create invalid snapshot directory structure
+        // - /data_dir/#test#/table1-validuuid/snapshot/tag1
+        // - /data_dir/ks1/#test#-validuuid/snapshot/tag2
+        // - /data_dir/ks2/table3-invaliduuid/snapshot/tag3
+        File baseDir  = new File(tmpDir.newFolder());
+        for (String dataDir : DATA_DIRS)
+        {
+            tag1Files.add(createDir(baseDir, dataDir, INVALID_NAME, tableDirName(TABLE1_NAME, TABLE1_ID), Directories.SNAPSHOT_SUBDIR, TAG1));
+            tag2Files.add(createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(INVALID_NAME, TABLE2_ID), Directories.SNAPSHOT_SUBDIR, TAG2));
+            tag3Files.add(createDir(baseDir, dataDir, KEYSPACE_2, String.format("%s-%s", TABLE3_NAME, INVALID_ID), Directories.SNAPSHOT_SUBDIR, TAG3));
+        }
+
+        // Check no snapshots are loaded
+        SnapshotLoader loader = new SnapshotLoader(Arrays.asList(Paths.get(baseDir.toString(), DATA_DIR_1),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_2),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_3)));
+        assertThat(loader.loadSnapshots()).isEmpty();
+    }
+
+    @Test
+    public void testParseUUID()
+    {
+        assertThat(SnapshotLoader.Visitor.parseUUID("c7e513243f0711ec9bbc0242ac130002")).isEqualTo(UUID.fromString("c7e51324-3f07-11ec-9bbc-0242ac130002"));
+    }
+
+    private void writeManifest(File snapshotDir, Instant creationTime, DurationSpec.IntSecondsBound ttl) throws IOException
+    {
+        SnapshotManifest manifest = new SnapshotManifest(Lists.newArrayList("f1", "f2", "f3"), ttl, creationTime);
+        manifest.serializeToJsonFile(getManifestFile(snapshotDir));
+    }
+
+    private static File createDir(File baseDir, String... subdirs)
+    {
+        File file = new File(Paths.get(baseDir.toString(), subdirs).toString());
+        file.toJavaIOFile().mkdirs();
+        return file;
+    }
+
+    static String tableDirName(String tableName, UUID tableId)
+    {
+        return String.format("%s-%s", tableName, removeDashes(tableId));
+    }
+
+    static String removeDashes(UUID id)
+    {
+        return id.toString().replace("-", "");
+    }
+
+    public static File getManifestFile(File snapshotDir)
+    {
+        return new File(snapshotDir, "manifest.json");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
new file mode 100644
index 0000000..edbfff2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.snapshot;
+
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.service.DefaultFSErrorHandler;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.service.snapshot.TableSnapshotTest.createFolders;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.FBUtilities.now;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertTrue;
+
+public class SnapshotManagerTest
+{
+    static long ONE_DAY_SECS = 86400;
+
+    @BeforeClass
+    public static void beforeClass()
+    {
+        DatabaseDescriptor.daemonInitialization();
+        FileUtils.setFSErrorHandler(new DefaultFSErrorHandler());
+    }
+
+    @ClassRule
+    public static TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+    private TableSnapshot generateSnapshotDetails(String tag, Instant expiration)
+    {
+        try
+        {
+            return new TableSnapshot("ks",
+                                     "tbl",
+                                     UUID.randomUUID(),
+                                     tag,
+                                     Instant.EPOCH,
+                                     expiration,
+                                     createFolders(temporaryFolder));
+        }
+        catch (Exception ex)
+        {
+            throw new RuntimeException(ex);
+        }
+    }
+
+
+    @Test
+    public void testLoadSnapshots() throws Exception
+    {
+        TableSnapshot expired = generateSnapshotDetails("expired", Instant.EPOCH);
+        TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusSeconds(ONE_DAY_SECS));
+        TableSnapshot nonExpiring = generateSnapshotDetails("non-expiring", null);
+        List<TableSnapshot> snapshots = Arrays.asList(expired, nonExpired, nonExpiring);
+
+        // Create SnapshotManager with 3 snapshots: expired, non-expired and non-expiring
+        SnapshotManager manager = new SnapshotManager(3, 3);
+        manager.addSnapshots(snapshots);
+
+        // Only expiring snapshots should be loaded
+        assertThat(manager.getExpiringSnapshots()).hasSize(2);
+        assertThat(manager.getExpiringSnapshots()).contains(expired);
+        assertThat(manager.getExpiringSnapshots()).contains(nonExpired);
+    }
+
+    @Test
+    public void testClearExpiredSnapshots()
+    {
+        SnapshotManager manager = new SnapshotManager(3, 3);
+
+        // Add 3 snapshots: expired, non-expired and non-expiring
+        TableSnapshot expired = generateSnapshotDetails("expired", Instant.EPOCH);
+        TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusMillis(ONE_DAY_SECS));
+        TableSnapshot nonExpiring = generateSnapshotDetails("non-expiring", null);
+        manager.addSnapshot(expired);
+        manager.addSnapshot(nonExpired);
+        manager.addSnapshot(nonExpiring);
+
+        // Only expiring snapshot should be indexed and all should exist
+        assertThat(manager.getExpiringSnapshots()).hasSize(2);
+        assertThat(manager.getExpiringSnapshots()).contains(expired);
+        assertThat(manager.getExpiringSnapshots()).contains(nonExpired);
+        assertThat(expired.exists()).isTrue();
+        assertThat(nonExpired.exists()).isTrue();
+        assertThat(nonExpiring.exists()).isTrue();
+
+        // After clearing expired snapshots, expired snapshot should be removed while the others should remain
+        manager.clearExpiredSnapshots();
+        assertThat(manager.getExpiringSnapshots()).hasSize(1);
+        assertThat(manager.getExpiringSnapshots()).contains(nonExpired);
+        assertThat(expired.exists()).isFalse();
+        assertThat(nonExpired.exists()).isTrue();
+        assertThat(nonExpiring.exists()).isTrue();
+    }
+
+    @Test
+    public void testScheduledCleanup() throws Exception
+    {
+        SnapshotManager manager = new SnapshotManager(0, 1);
+        try
+        {
+            // Start snapshot manager which should start expired snapshot cleanup thread
+            manager.start();
+
+            // Add 2 expiring snapshots: one to expire in 2 seconds, another in 1 day
+            TableSnapshot toExpire = generateSnapshotDetails("to-expire", now().plusSeconds(2));
+            TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusMillis(ONE_DAY_SECS));
+            manager.addSnapshot(toExpire);
+            manager.addSnapshot(nonExpired);
+
+            // Check both snapshots still exist
+            assertThat(toExpire.exists()).isTrue();
+            assertThat(nonExpired.exists()).isTrue();
+            assertThat(manager.getExpiringSnapshots()).hasSize(2);
+            assertThat(manager.getExpiringSnapshots()).contains(toExpire);
+            assertThat(manager.getExpiringSnapshots()).contains(nonExpired);
+
+            await().pollInterval(2, SECONDS)
+                   .timeout(10, SECONDS)
+                   .until(() -> manager.getExpiringSnapshots().size() == 1);
+
+            assertThat(manager.getExpiringSnapshots()).contains(nonExpired);
+            assertThat(toExpire.exists()).isFalse();
+            assertThat(nonExpired.exists()).isTrue();
+        }
+        finally
+        {
+            manager.stop();
+        }
+    }
+
+    @Test // see CASSANDRA-18211
+    public void testConcurrentClearingOfSnapshots() throws Exception
+    {
+
+        AtomicReference<Long> firstInvocationTime = new AtomicReference<>(0L);
+        AtomicReference<Long> secondInvocationTime = new AtomicReference<>(0L);
+
+        SnapshotManager manager = new SnapshotManager(0, 5)
+        {
+            @Override
+            public synchronized void clearSnapshot(TableSnapshot snapshot)
+            {
+                if (snapshot.getTag().equals("mysnapshot"))
+                {
+                    firstInvocationTime.set(currentTimeMillis());
+                    Uninterruptibles.sleepUninterruptibly(10, SECONDS);
+                }
+                else if (snapshot.getTag().equals("mysnapshot2"))
+                {
+                    secondInvocationTime.set(currentTimeMillis());
+                }
+                super.clearSnapshot(snapshot);
+            }
+        };
+
+        TableSnapshot expiringSnapshot = generateSnapshotDetails("mysnapshot", Instant.now().plusSeconds(15));
+        manager.addSnapshot(expiringSnapshot);
+
+        manager.resumeSnapshotCleanup();
+
+        Thread nonExpiringSnapshotCleanupThred = new Thread(() -> manager.clearSnapshot(generateSnapshotDetails("mysnapshot2", null)));
+
+        // wait until the first snapshot expires
+        await().pollInterval(1, SECONDS)
+               .pollDelay(0, SECONDS)
+               .timeout(1, MINUTES)
+               .until(() -> firstInvocationTime.get() > 0);
+
+        // this will block until the first snapshot is cleaned up
+        nonExpiringSnapshotCleanupThred.start();
+        nonExpiringSnapshotCleanupThred.join();
+
+        assertTrue(secondInvocationTime.get() - firstInvocationTime.get() > 10_000);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
new file mode 100644
index 0000000..d3b11c0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.snapshot;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Arrays;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.assertj.core.api.Assertions.assertThatIOException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.cassandra.config.DurationSpec;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
+
+public class SnapshotManifestTest
+{
+    @Rule
+    public TemporaryFolder tempFolder = new TemporaryFolder();
+
+    @Test
+    public void testDeserializeFromInvalidFile() throws IOException {
+        File manifestFile = new File(tempFolder.newFile("invalid"));
+        assertThatIOException().isThrownBy(
+            () -> {
+                SnapshotManifest.deserializeFromJsonFile(manifestFile);
+            });
+
+        FileOutputStreamPlus out = new FileOutputStreamPlus(manifestFile);
+        out.write(1);
+        out.write(2);
+        out.write(3);
+        out.close();
+        assertThatIOException().isThrownBy(
+            () -> SnapshotManifest.deserializeFromJsonFile(manifestFile));
+    }
+
+    @Test
+    public void testDeserializeManifest() throws IOException
+    {
+        Map<String, Object> map = new HashMap<>();
+        String createdAt = "2021-07-03T10:37:30Z";
+        String expiresAt = "2021-08-03T10:37:30Z";
+        map.put("created_at", createdAt);
+        map.put("expires_at", expiresAt);
+        map.put("files", Arrays.asList("db1", "db2", "db3"));
+
+        ObjectMapper mapper = new ObjectMapper();
+        File manifestFile = new File(tempFolder.newFile("manifest.json"));
+        mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map);
+        SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
+
+        assertThat(manifest.getExpiresAt()).isEqualTo(Instant.parse(expiresAt));
+        assertThat(manifest.getCreatedAt()).isEqualTo(Instant.parse(createdAt));
+        assertThat(manifest.getFiles()).contains("db1").contains("db2").contains("db3").hasSize(3);
+    }
+
+    @Test
+    public void testOptionalFields() throws IOException {
+        Map<String, Object> map = new HashMap<>();
+        map.put("files", Arrays.asList("db1", "db2", "db3"));
+        ObjectMapper mapper = new ObjectMapper();
+        File manifestFile = new File(tempFolder.newFile("manifest.json"));
+        mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map);
+        SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
+
+        assertThat(manifest.getExpiresAt()).isNull();
+        assertThat(manifest.getCreatedAt()).isNull();
+        assertThat(manifest.getFiles()).contains("db1").contains("db2").contains("db3").hasSize(3);
+    }
+
+    @Test
+    public void testIngoredFields() throws IOException {
+        Map<String, Object> map = new HashMap<>();
+        map.put("files", Arrays.asList("db1", "db2", "db3"));
+        map.put("dummy", "dummy");
+        ObjectMapper mapper = new ObjectMapper();
+        File manifestFile = new File(tempFolder.newFile("manifest.json"));
+        mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map);
+        SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
+        assertThat(manifest.getFiles()).contains("db1").contains("db2").contains("db3").hasSize(3);
+    }
+
+    @Test
+    public void testSerializeAndDeserialize() throws Exception {
+        SnapshotManifest manifest = new SnapshotManifest(Arrays.asList("db1", "db2", "db3"), new DurationSpec.IntSecondsBound("2m"), Instant.ofEpochMilli(currentTimeMillis()));
+        File manifestFile = new File(tempFolder.newFile("manifest.json"));
+
+        manifest.serializeToJsonFile(manifestFile);
+        manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
+        assertThat(manifest.getExpiresAt()).isNotNull();
+        assertThat(manifest.getCreatedAt()).isNotNull();
+        assertThat(manifest.getFiles()).contains("db1").contains("db2").contains("db3").hasSize(3);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java b/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
new file mode 100644
index 0000000..4bb1756
--- /dev/null
+++ b/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.service.snapshot;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.UUID;
+
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
+import org.apache.cassandra.io.util.FileUtils;
+
+import static org.apache.cassandra.utils.FBUtilities.now;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TableSnapshotTest
+{
+    @Before
+    public void setup()
+    {
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    @ClassRule
+    public static TemporaryFolder tempFolder = new TemporaryFolder();
+
+    public static Set<File> createFolders(TemporaryFolder temp) throws IOException {
+        File folder = new File(temp.newFolder());
+        Set<File> folders = new HashSet<>();
+        for (String folderName : Arrays.asList("foo", "bar", "buzz")) {
+            File subfolder = new File(folder, folderName);
+            subfolder.tryCreateDirectories();
+            assertThat(subfolder.exists());
+            folders.add(subfolder);
+        };
+        return folders;
+    }
+
+    @Test
+    public void testSnapshotExists() throws IOException
+    {
+        Set<File> folders = createFolders(tempFolder);
+
+        TableSnapshot snapshot = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        null,
+        null,
+        folders);
+
+        assertThat(snapshot.exists()).isTrue();
+
+        folders.forEach(FileUtils::deleteRecursive);
+
+        assertThat(snapshot.exists()).isFalse();
+    }
+
+    @Test
+    public void testSnapshotExpiring() throws IOException
+    {
+        Set<File> folders = createFolders(tempFolder);
+
+        TableSnapshot snapshot = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        null,
+        null,
+        folders);
+
+        assertThat(snapshot.isExpiring()).isFalse();
+        assertThat(snapshot.isExpired(now())).isFalse();
+
+        snapshot = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        now(),
+        null,
+        folders);
+
+        assertThat(snapshot.isExpiring()).isFalse();
+        assertThat(snapshot.isExpired(now())).isFalse();
+
+        snapshot = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        now(),
+        now().plusSeconds(1000),
+        folders);
+
+        assertThat(snapshot.isExpiring()).isTrue();
+        assertThat(snapshot.isExpired(now())).isFalse();
+
+        snapshot = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        now(),
+        now().minusSeconds(1000),
+        folders);
+
+        assertThat(snapshot.isExpiring()).isTrue();
+        assertThat(snapshot.isExpired(now())).isTrue();
+    }
+
+    private Long writeBatchToFile(File file) throws IOException
+    {
+        FileOutputStreamPlus out = new FileOutputStreamPlus(file);
+        out.write(1);
+        out.write(2);
+        out.write(3);
+        out.close();
+        return 3L;
+    }
+
+    @Test
+    public void testComputeSizeOnDisk() throws IOException
+    {
+        Set<File> folders = createFolders(tempFolder);
+
+        TableSnapshot tableDetails = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        null,
+        null,
+        folders);
+
+        Long res = 0L;
+
+        for (File dir : folders)
+        {
+            writeBatchToFile(new File(dir, "tmp"));
+            res += FileUtils.folderSize(dir);
+        }
+
+        assertThat(tableDetails.computeSizeOnDiskBytes()).isGreaterThan(0L);
+        assertThat(tableDetails.computeSizeOnDiskBytes()).isEqualTo(res);
+    }
+
+    @Test
+    public void testComputeTrueSize() throws IOException
+    {
+        Set<File> folders = createFolders(tempFolder);
+
+        TableSnapshot tableDetails = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some",
+        null,
+        null,
+        folders);
+
+        Long res = 0L;
+
+        for (File dir : folders)
+        {
+            File file = new File(dir, "tmp");
+            writeBatchToFile(file);
+            res += file.length();
+        }
+
+        assertThat(tableDetails.computeTrueSizeBytes()).isGreaterThan(0L);
+        assertThat(tableDetails.computeTrueSizeBytes()).isEqualTo(res);
+    }
+
+    @Test
+    public void testGetCreatedAt() throws IOException
+    {
+        Set<File> folders = createFolders(tempFolder);
+
+        // When createdAt is not null, getCreatedAt() should return it
+        Instant createdAt = Instant.EPOCH;
+        TableSnapshot withCreatedAt = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some1",
+        createdAt,
+        null,
+        folders);
+        assertThat(withCreatedAt.getCreatedAt()).isEqualTo(createdAt);
+
+        // When createdAt is  null, it should return the snapshot folder minimum update time
+        TableSnapshot withoutCreatedAt = new TableSnapshot(
+        "ks",
+        "tbl",
+        UUID.randomUUID(),
+        "some1",
+        null,
+        null,
+        folders);
+        assertThat(withoutCreatedAt.getCreatedAt()).isEqualTo(Instant.ofEpochMilli(folders.stream().mapToLong(f -> f.lastModified()).min().getAsLong()));
+    }
+
+    @Test
+    public void testGetLiveFileFromSnapshotFile()
+    {
+        testGetLiveFileFromSnapshotFile("~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/snapshots/1643481737850/me-1-big-Data.db",
+                                        "~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/me-1-big-Data.db");
+    }
+
+    @Test
+    public void testGetLiveFileFromSnapshotIndexFile()
+    {
+        testGetLiveFileFromSnapshotFile("~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/snapshots/1643481737850/.tbl_val_idx/me-1-big-Summary.db",
+                                        "~/.ccm/test/node1/data0/test_ks/tbl-e03faca0813211eca100c705ea09b5ef/.tbl_val_idx/me-1-big-Summary.db");
+    }
+
+    public void testGetLiveFileFromSnapshotFile(String snapshotFile, String expectedLiveFile)
+    {
+        assertThat(TableSnapshot.getLiveFileFromSnapshotFile(Paths.get(snapshotFile)).toString()).isEqualTo(expectedLiveFile);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/streaming/EntireSSTableStreamingCorrectFilesCountTest.java b/test/unit/org/apache/cassandra/streaming/EntireSSTableStreamingCorrectFilesCountTest.java
index 085302f..34ce09c 100644
--- a/test/unit/org/apache/cassandra/streaming/EntireSSTableStreamingCorrectFilesCountTest.java
+++ b/test/unit/org/apache/cassandra/streaming/EntireSSTableStreamingCorrectFilesCountTest.java
@@ -24,7 +24,8 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.UUID;
+
+import javax.annotation.Nullable;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -36,6 +37,7 @@
 import io.netty.channel.ChannelPromise;
 import io.netty.channel.embedded.EmbeddedChannel;
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
@@ -47,7 +49,6 @@
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.RangesAtEndpoint;
 import org.apache.cassandra.net.AsyncStreamingOutputPlus;
@@ -55,12 +56,12 @@
 import org.apache.cassandra.net.SharedDefaultFileRegion;
 import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
-import javax.annotation.Nullable;
-
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -102,7 +103,7 @@
             .applyUnsafe();
         }
 
-        store.forceBlockingFlush();
+        Util.flush(store);
         CompactionManager.instance.performMaximal(store, false);
 
         sstable = store.getLiveSSTables().iterator().next();
@@ -194,13 +195,13 @@
     {
         StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP,
                                                                     1,
-                                                                    new DefaultConnectionFactory(),
+                                                                    new NettyStreamingConnectionFactory(),
                                                                     false,
                                                                     false,
                                                                     null,
                                                                     PreviewKind.NONE);
 
-        StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(),
+        StreamResultFuture future = StreamResultFuture.createInitiator(nextTimeUUID(),
                                                                        StreamOperation.BOOTSTRAP,
                                                                        Collections.singleton(streamEventHandler),
                                                                        streamCoordinator);
@@ -213,7 +214,7 @@
                                                          Collections.emptyList(),
                                                          StreamSession.State.INITIALIZED));
 
-        StreamSession session = streamCoordinator.getOrCreateNextSession(peer);
+        StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
         session.init(future);
 
         return session;
diff --git a/test/unit/org/apache/cassandra/streaming/SessionInfoTest.java b/test/unit/org/apache/cassandra/streaming/SessionInfoTest.java
index 4f0c494..45172fe 100644
--- a/test/unit/org/apache/cassandra/streaming/SessionInfoTest.java
+++ b/test/unit/org/apache/cassandra/streaming/SessionInfoTest.java
@@ -57,13 +57,13 @@
         assert info.getTotalFilesSent() == 0;
 
         // receive in progress
-        info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 50, 100));
+        info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 50, 50, 100));
         // still in progress, but not completed yet
         assert info.getTotalSizeReceived() == 50;
         assert info.getTotalSizeSent() == 0;
         assert info.getTotalFilesReceived() == 0;
         assert info.getTotalFilesSent() == 0;
-        info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 100, 100));
+        info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 100, 100, 100));
         // 1 file should be completed
         assert info.getTotalSizeReceived() == 100;
         assert info.getTotalSizeSent() == 0;
diff --git a/test/unit/org/apache/cassandra/streaming/StreamManagerTest.java b/test/unit/org/apache/cassandra/streaming/StreamManagerTest.java
index 625d9d5..a32c1a9 100644
--- a/test/unit/org/apache/cassandra/streaming/StreamManagerTest.java
+++ b/test/unit/org/apache/cassandra/streaming/StreamManagerTest.java
@@ -22,24 +22,38 @@
 import org.junit.Test;
 
 import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.DataRateSpec;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.service.StorageService;
 
 import static org.apache.cassandra.streaming.StreamManager.StreamRateLimiter;
-import static org.apache.cassandra.streaming.StreamManager.StreamRateLimiter.BYTES_PER_MEGABIT;
+import static org.apache.cassandra.streaming.StreamManager.StreamRateLimiter.BYTES_PER_MEBIBYTE;
 import static org.junit.Assert.assertEquals;
 
 public class StreamManagerTest
 {
-    private static int defaultStreamThroughputMbPerSec;
-    private static int defaultInterDCStreamThroughputMbPerSec;
+    private static double defaultStreamThroughputBytesPerSec;
+    private static double defaultInterDCStreamThroughputBytesPerSec;
+    private static final int MAX_INT_CONFIG_VALUE = Integer.MAX_VALUE - 1;
+    private static final double INTEGER_MAX_VALUE_MEGABITS_IN_BYTES = DataRateSpec.LongBytesPerSecondBound
+                                                                          .megabitsPerSecondInBytesPerSecond(MAX_INT_CONFIG_VALUE)
+                                                                          .toBytesPerSecond();
+
+    private static double defaultEntireSSTableStreamThroughputBytesPerSec;
+    private static double defaultEntireSSTableInterDCStreamThroughputBytesPerSec;
+
+    private static final double BYTES_PER_MEGABIT = 125_000;
 
     @BeforeClass
     public static void setupClass()
     {
         Config c = DatabaseDescriptor.loadConfig();
-        defaultStreamThroughputMbPerSec = c.stream_throughput_outbound_megabits_per_sec;
-        defaultInterDCStreamThroughputMbPerSec = c.inter_dc_stream_throughput_outbound_megabits_per_sec;
+
+        defaultStreamThroughputBytesPerSec = c.stream_throughput_outbound.toBytesPerSecond();
+        defaultInterDCStreamThroughputBytesPerSec = c.inter_dc_stream_throughput_outbound.toBytesPerSecond();
+        defaultEntireSSTableStreamThroughputBytesPerSec = c.entire_sstable_stream_throughput_outbound.toBytesPerSecond();
+        defaultEntireSSTableInterDCStreamThroughputBytesPerSec = c.entire_sstable_inter_dc_stream_throughput_outbound.toBytesPerSecond();
+
         DatabaseDescriptor.daemonInitialization(() -> c);
     }
 
@@ -47,45 +61,75 @@
     public void testUpdateStreamThroughput()
     {
         // Initialized value check
-        assertEquals(defaultStreamThroughputMbPerSec * BYTES_PER_MEGABIT, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
+        assertEquals(defaultStreamThroughputBytesPerSec, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
 
         // Positive value check
-        StorageService.instance.setStreamThroughputMbPerSec(500);
-        assertEquals(500.0d * BYTES_PER_MEGABIT, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
+        StorageService.instance.setStreamThroughputMbitPerSec(500); //60MiB/s
+        assertEquals(500 * BYTES_PER_MEGABIT, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
 
         // Max positive value check
-        StorageService.instance.setStreamThroughputMbPerSec(Integer.MAX_VALUE);
-        assertEquals(Integer.MAX_VALUE * BYTES_PER_MEGABIT, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
+        StorageService.instance.setStreamThroughputMbitPerSec(MAX_INT_CONFIG_VALUE);
+        assertEquals(INTEGER_MAX_VALUE_MEGABITS_IN_BYTES, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
 
         // Zero value check
-        StorageService.instance.setStreamThroughputMbPerSec(0);
+        StorageService.instance.setStreamThroughputMbitPerSec(0);
         assertEquals(Double.MAX_VALUE, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
+    }
 
-        // Negative value check
-        StorageService.instance.setStreamThroughputMbPerSec(-200);
-        assertEquals(Double.MAX_VALUE, StreamRateLimiter.getRateLimiterRateInBytes(), 0);
+    @Test
+    public void testUpdateEntireSSTableStreamThroughput()
+    {
+        // Initialized value check (defaults to StreamRateLimiter.getRateLimiterRateInBytes())
+        assertEquals(defaultEntireSSTableStreamThroughputBytesPerSec, StreamRateLimiter.getEntireSSTableRateLimiterRateInBytes(), 0);
+
+        // Positive value check
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(1500);
+        assertEquals(1500d * BYTES_PER_MEBIBYTE, StreamRateLimiter.getEntireSSTableRateLimiterRateInBytes(), 0);
+
+        // Max positive value check
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(MAX_INT_CONFIG_VALUE);
+        assertEquals((MAX_INT_CONFIG_VALUE) * BYTES_PER_MEBIBYTE, StreamRateLimiter.getEntireSSTableRateLimiterRateInBytes(), 0);
+
+        // Zero value check
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(0);
+        assertEquals(Double.MAX_VALUE, StreamRateLimiter.getEntireSSTableRateLimiterRateInBytes(), 0);
     }
 
     @Test
     public void testUpdateInterDCStreamThroughput()
     {
         // Initialized value check
-        assertEquals(defaultInterDCStreamThroughputMbPerSec * BYTES_PER_MEGABIT, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
+        assertEquals(defaultInterDCStreamThroughputBytesPerSec, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
 
         // Positive value check
-        StorageService.instance.setInterDCStreamThroughputMbPerSec(200);
-        assertEquals(200.0d * BYTES_PER_MEGABIT, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(200); //approximately 24MiB/s
+        assertEquals(200 * BYTES_PER_MEGABIT, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
 
         // Max positive value check
-        StorageService.instance.setInterDCStreamThroughputMbPerSec(Integer.MAX_VALUE);
-        assertEquals(Integer.MAX_VALUE * BYTES_PER_MEGABIT, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(MAX_INT_CONFIG_VALUE);
+        assertEquals(INTEGER_MAX_VALUE_MEGABITS_IN_BYTES, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
 
         // Zero value check
-        StorageService.instance.setInterDCStreamThroughputMbPerSec(0);
-        assertEquals(Double.MAX_VALUE, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
-
-        // Negative value check
-        StorageService.instance.setInterDCStreamThroughputMbPerSec(-200);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(0);
         assertEquals(Double.MAX_VALUE, StreamRateLimiter.getInterDCRateLimiterRateInBytes(), 0);
     }
-}
+
+    @Test
+    public void testUpdateEntireSSTableInterDCStreamThroughput()
+    {
+        // Initialized value check (Defaults to StreamRateLimiter.getInterDCRateLimiterRateInBytes())
+        assertEquals(defaultEntireSSTableInterDCStreamThroughputBytesPerSec, StreamRateLimiter.getEntireSSTableInterDCRateLimiterRateInBytes(), 0);
+
+        // Positive value check
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(1200);
+        assertEquals(1200.0d * BYTES_PER_MEBIBYTE, StreamRateLimiter.getEntireSSTableInterDCRateLimiterRateInBytes(), 0);
+
+        // Max positive value check
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(MAX_INT_CONFIG_VALUE);
+        assertEquals(MAX_INT_CONFIG_VALUE * BYTES_PER_MEBIBYTE, StreamRateLimiter.getEntireSSTableInterDCRateLimiterRateInBytes(), 0);
+
+        // Zero value check
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(0);
+        assertEquals(Double.MAX_VALUE, StreamRateLimiter.getEntireSSTableInterDCRateLimiterRateInBytes(), 0);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/streaming/StreamRateLimiterTest.java b/test/unit/org/apache/cassandra/streaming/StreamRateLimiterTest.java
new file mode 100644
index 0000000..3b72c4d
--- /dev/null
+++ b/test/unit/org/apache/cassandra/streaming/StreamRateLimiterTest.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming;
+
+import java.net.UnknownHostException;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class StreamRateLimiterTest
+{
+    InetAddressAndPort REMOTE_PEER_ADDRESS;
+
+    @Before
+    public void prepareServer() throws UnknownHostException
+    {
+        ServerTestUtils.daemonInitialization();
+        ServerTestUtils.prepareServer();
+        REMOTE_PEER_ADDRESS = InetAddressAndPort.getByName("127.0.0.4");
+    }
+
+    @Test
+    public void testIsRateLimited()
+    {
+        // Enable rate limiting for local traffic and inter-DC traffic
+        StorageService.instance.setStreamThroughputMbitPerSec(200);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(200);
+
+        // Rate-limiter enabled for a local peer
+        assertTrue(StreamManager.getRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+
+        // Rate-limiter enabled for a remote peer
+        assertTrue(StreamManager.getRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+
+        // Disable rate limiting for local traffic, but enable it for inter-DC traffic
+        StorageService.instance.setStreamThroughputMbitPerSec(0);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(200);
+
+        // Rate-limiter disabled for a local peer
+        assertFalse(StreamManager.getRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+
+        // Rate-limiter enabled for a remote peer
+        assertTrue(StreamManager.getRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+
+        // Enable rate limiting for local traffic, but disable it for inter-DC traffic
+        StorageService.instance.setStreamThroughputMbitPerSec(200);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(0);
+
+        // Rate-limiter enabled for a local peer
+        assertTrue(StreamManager.getRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+
+        // Rate-limiter enabled for a remote peer (because there is a local rate-limit)
+        assertTrue(StreamManager.getRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+
+        // Disable rate liming for local and inter-DC traffic
+        StorageService.instance.setStreamThroughputMbitPerSec(0);
+        StorageService.instance.setInterDCStreamThroughputMbitPerSec(0);
+
+        // Rate-limiter enabled for a local and remote peers
+        assertFalse(StreamManager.getRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+        assertFalse(StreamManager.getRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+    }
+
+    @Test
+    public void testEntireSSTableStreamingIsRateLimited()
+    {
+        // Enable rate limiting for local traffic and inter-DC traffic
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(200);
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(200);
+
+        // Rate-limiter enabled for a local peer
+        assertTrue(StreamManager.getEntireSSTableRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+
+        // Rate-limiter enabled for a remote peer
+        assertTrue(StreamManager.getEntireSSTableRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+
+        // Disable rate limiting for local traffic, but enable it for inter-DC traffic
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(0);
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(200);
+
+        // Rate-limiter disabled for a local peer
+        assertFalse(StreamManager.getEntireSSTableRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+
+        // Rate-limiter enabled for a remote peer
+        assertTrue(StreamManager.getEntireSSTableRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+
+        // Enable rate limiting for local traffic, but disable it for inter-DC traffic
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(200);
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(0);
+
+        // Rate-limiter enabled for a local peer
+        assertTrue(StreamManager.getEntireSSTableRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+
+        // Rate-limiter enabled for a remote peer (because there is a local rate-limit)
+        assertTrue(StreamManager.getEntireSSTableRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+
+        // Disable rate liming for local and inter-DC traffic
+        StorageService.instance.setEntireSSTableStreamThroughputMebibytesPerSec(0);
+        StorageService.instance.setEntireSSTableInterDCStreamThroughputMebibytesPerSec(0);
+
+        // Rate-limiter enabled for a local and remote peers
+        assertFalse(StreamManager.getEntireSSTableRateLimiter(FBUtilities.getBroadcastAddressAndPort()).isRateLimited());
+        assertFalse(StreamManager.getEntireSSTableRateLimiter(REMOTE_PEER_ADDRESS).isRateLimited());
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java b/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java
index 0bf7f20..dad1e79 100644
--- a/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java
+++ b/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java
@@ -17,12 +17,12 @@
  */
 package org.apache.cassandra.streaming;
 
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
@@ -31,9 +31,9 @@
 import org.junit.After;
 import org.junit.Test;
 
-import io.netty.channel.embedded.EmbeddedChannel;
 import org.junit.Assert;
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -43,12 +43,17 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.TestChannel;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.streaming.async.NettyStreamingChannel;
+import org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory;
 import org.apache.cassandra.streaming.messages.OutgoingStreamMessage;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.Ref;
 
+import static org.apache.cassandra.net.MessagingService.current_version;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
@@ -57,6 +62,15 @@
     public static final String KEYSPACE1 = "StreamTransferTaskTest";
     public static final String CF_STANDARD = "Standard1";
 
+    static final StreamingChannel.Factory FACTORY = new NettyStreamingConnectionFactory()
+    {
+        @Override
+        public NettyStreamingChannel create(InetSocketAddress to, int messagingVersion, StreamingChannel.Kind kind)
+        {
+            return new NettyStreamingChannel(messagingVersion, new TestChannel(), kind);
+        }
+    };
+
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
@@ -77,14 +91,15 @@
     public void testScheduleTimeout() throws Exception
     {
         InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
-        StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, peer, (template, messagingVersion) -> new EmbeddedChannel(), false, 0, UUID.randomUUID(), PreviewKind.ALL);
+        StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, peer, FACTORY, null, current_version, false, 0, nextTimeUUID(), PreviewKind.ALL);
+        session.init(new StreamResultFuture(nextTimeUUID(), StreamOperation.OTHER, nextTimeUUID(), PreviewKind.NONE));
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
 
         // create two sstables
         for (int i = 0; i < 2; i++)
         {
             SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         // create streaming task that streams those two sstables
@@ -125,9 +140,9 @@
     public void testFailSessionDuringTransferShouldNotReleaseReferences() throws Exception
     {
         InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
-        StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new DefaultConnectionFactory(), false, false, null, PreviewKind.NONE);
-        StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.OTHER, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
-        StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, peer, null, false, 0, null, PreviewKind.NONE);
+        StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
+        StreamResultFuture future = StreamResultFuture.createInitiator(nextTimeUUID(), StreamOperation.OTHER, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
+        StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, peer, FACTORY, null, current_version, false, 0, null, PreviewKind.NONE);
         session.init(future);
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
 
@@ -135,7 +150,7 @@
         for (int i = 0; i < 2; i++)
         {
             SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
-            cfs.forceBlockingFlush();
+            Util.flush(cfs);
         }
 
         // create streaming task that streams those two sstables
@@ -174,7 +189,7 @@
 
         //wait for stream to abort asynchronously
         int tries = 10;
-        while (ScheduledExecutors.nonPeriodicTasks.getActiveCount() > 0)
+        while (ScheduledExecutors.nonPeriodicTasks.getActiveTaskCount() > 0)
         {
             if(tries < 1)
                 throw new RuntimeException("test did not complete in time");
diff --git a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
index a4aa77f..cffd3b2 100644
--- a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
+++ b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
@@ -54,6 +54,7 @@
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.concurrent.Refs;
 
 import static org.apache.cassandra.SchemaLoader.compositeIndexCFMD;
@@ -116,7 +117,7 @@
     public void testEmptyStreamPlan() throws Exception
     {
         StreamResultFuture futureResult = new StreamPlan(StreamOperation.OTHER).execute();
-        final UUID planId = futureResult.planId;
+        final TimeUUID planId = futureResult.planId;
         Futures.addCallback(futureResult, new FutureCallback<StreamState>()
         {
             public void onSuccess(StreamState result)
@@ -148,7 +149,7 @@
                                                   .requestRanges(LOCAL, KEYSPACE2, RangesAtEndpoint.toDummyList(ranges), RangesAtEndpoint.toDummyList(Collections.emptyList()))
                                                   .execute();
 
-        UUID planId = futureResult.planId;
+        TimeUUID planId = futureResult.planId;
         StreamState result = futureResult.get();
         assert planId.equals(result.planId);
         assert result.streamOperation == StreamOperation.OTHER;
@@ -174,7 +175,7 @@
         long timestamp = 1234;
         for (int i = 1; i <= 3; i++)
             mutator.mutate("key" + i, "col" + i, timestamp);
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Util.compactAll(cfs, Integer.MAX_VALUE).get();
         assertEquals(1, cfs.getLiveSSTables().size());
 
@@ -362,7 +363,7 @@
                 .build()
                 .apply();
 
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
 
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         cfs.clearUnsafe();
@@ -554,7 +555,7 @@
         // write a lot more data so the data is spread in more than 1 chunk.
         for (int i = 1; i <= 6000; i++)
             mutator.mutate("key" + i, "col" + i, System.currentTimeMillis());
-        cfs.forceBlockingFlush();
+        Util.flush(cfs);
         Util.compactAll(cfs, Integer.MAX_VALUE).get();
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         cfs.clearUnsafe();
diff --git a/test/unit/org/apache/cassandra/streaming/async/NettyStreamingMessageSenderTest.java b/test/unit/org/apache/cassandra/streaming/async/NettyStreamingMessageSenderTest.java
deleted file mode 100644
index 76bfa76..0000000
--- a/test/unit/org/apache/cassandra/streaming/async/NettyStreamingMessageSenderTest.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.streaming.async;
-
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import com.google.common.net.InetAddresses;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import io.netty.channel.ChannelPromise;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.TestChannel;
-import org.apache.cassandra.net.TestScheduledFuture;
-import org.apache.cassandra.streaming.PreviewKind;
-import org.apache.cassandra.streaming.StreamOperation;
-import org.apache.cassandra.streaming.StreamResultFuture;
-import org.apache.cassandra.streaming.StreamSession;
-import org.apache.cassandra.streaming.messages.CompleteMessage;
-
-public class NettyStreamingMessageSenderTest
-{
-    private static final InetAddressAndPort REMOTE_ADDR = InetAddressAndPort.getByAddressOverrideDefaults(InetAddresses.forString("127.0.0.2"), 0);
-
-    private TestChannel channel;
-    private StreamSession session;
-    private NettyStreamingMessageSender sender;
-    private NettyStreamingMessageSender.FileStreamTask fileStreamTask;
-
-    @BeforeClass
-    public static void before()
-    {
-        DatabaseDescriptor.daemonInitialization();
-    }
-
-    @Before
-    public void setUp()
-    {
-        channel = new TestChannel(Integer.MAX_VALUE);
-        channel.attr(NettyStreamingMessageSender.TRANSFERRING_FILE_ATTR).set(Boolean.FALSE);
-        UUID pendingRepair = UUID.randomUUID();
-        session = new StreamSession(StreamOperation.BOOTSTRAP, REMOTE_ADDR, (template, messagingVersion) -> null, true, 0, pendingRepair, PreviewKind.ALL);
-        StreamResultFuture future = StreamResultFuture.createFollower(0, UUID.randomUUID(), StreamOperation.REPAIR, REMOTE_ADDR, channel, pendingRepair, session.getPreviewKind());
-        session.init(future);
-        session.attachOutbound(channel);
-
-        sender = session.getMessageSender();
-        sender.setControlMessageChannel(channel);
-    }
-
-    @After
-    public void tearDown()
-    {
-        if (fileStreamTask != null)
-            fileStreamTask.unsetChannel();
-    }
-
-    @Test
-    public void KeepAliveTask_normalSend()
-    {
-        Assert.assertTrue(channel.isOpen());
-        NettyStreamingMessageSender.KeepAliveTask task = sender.new KeepAliveTask(channel, session);
-        task.run();
-        Assert.assertTrue(channel.releaseOutbound());
-    }
-
-    @Test
-    public void KeepAliveTask_channelClosed()
-    {
-        channel.close();
-        Assert.assertFalse(channel.isOpen());
-        channel.releaseOutbound();
-        NettyStreamingMessageSender.KeepAliveTask task = sender.new KeepAliveTask(channel, session);
-        task.future = new TestScheduledFuture();
-        Assert.assertFalse(task.future.isCancelled());
-        task.run();
-        Assert.assertTrue(task.future.isCancelled());
-        Assert.assertFalse(channel.releaseOutbound());
-    }
-
-    @Test
-    public void KeepAliveTask_closed()
-    {
-        Assert.assertTrue(channel.isOpen());
-        NettyStreamingMessageSender.KeepAliveTask task = sender.new KeepAliveTask(channel, session);
-        task.future = new TestScheduledFuture();
-        Assert.assertFalse(task.future.isCancelled());
-
-        sender.setClosed();
-        Assert.assertFalse(sender.connected());
-        task.run();
-        Assert.assertTrue(task.future.isCancelled());
-        Assert.assertFalse(channel.releaseOutbound());
-    }
-
-    @Test
-    public void KeepAliveTask_CurrentlyStreaming()
-    {
-        Assert.assertTrue(channel.isOpen());
-        channel.attr(NettyStreamingMessageSender.TRANSFERRING_FILE_ATTR).set(Boolean.TRUE);
-        NettyStreamingMessageSender.KeepAliveTask task = sender.new KeepAliveTask(channel, session);
-        task.future = new TestScheduledFuture();
-        Assert.assertFalse(task.future.isCancelled());
-
-        Assert.assertTrue(sender.connected());
-        task.run();
-        Assert.assertFalse(task.future.isCancelled());
-        Assert.assertFalse(channel.releaseOutbound());
-    }
-
-    @Test
-    public void FileStreamTask_acquirePermit_closed()
-    {
-        fileStreamTask = sender.new FileStreamTask(null);
-        sender.setClosed();
-        Assert.assertFalse(fileStreamTask.acquirePermit(1));
-    }
-
-    @Test
-    public void FileStreamTask_acquirePermit_HapppyPath()
-    {
-        int permits = sender.semaphoreAvailablePermits();
-        fileStreamTask = sender.new FileStreamTask(null);
-        Assert.assertTrue(fileStreamTask.acquirePermit(1));
-        Assert.assertEquals(permits - 1, sender.semaphoreAvailablePermits());
-    }
-
-    @Test
-    public void FileStreamTask_BadChannelAttr()
-    {
-        int permits = sender.semaphoreAvailablePermits();
-        channel.attr(NettyStreamingMessageSender.TRANSFERRING_FILE_ATTR).set(Boolean.TRUE);
-        fileStreamTask = sender.new FileStreamTask(null);
-        fileStreamTask.injectChannel(channel);
-        fileStreamTask.run();
-        Assert.assertEquals(StreamSession.State.FAILED, session.state());
-        Assert.assertTrue(channel.releaseOutbound()); // when the session fails, it will send a SessionFailed msg
-        Assert.assertEquals(permits, sender.semaphoreAvailablePermits());
-    }
-
-    @Test
-    public void FileStreamTask_HappyPath()
-    {
-        int permits = sender.semaphoreAvailablePermits();
-        fileStreamTask = sender.new FileStreamTask(new CompleteMessage());
-        fileStreamTask.injectChannel(channel);
-        fileStreamTask.run();
-        Assert.assertNotEquals(StreamSession.State.FAILED, session.state());
-        Assert.assertTrue(channel.releaseOutbound());
-        Assert.assertEquals(permits, sender.semaphoreAvailablePermits());
-    }
-
-    @Test
-    public void onControlMessageComplete_HappyPath()
-    {
-        Assert.assertTrue(channel.isOpen());
-        Assert.assertTrue(sender.connected());
-        ChannelPromise promise = channel.newPromise();
-        promise.setSuccess();
-        Assert.assertNull(sender.onControlMessageComplete(promise, new CompleteMessage()));
-        Assert.assertTrue(channel.isOpen());
-        Assert.assertTrue(sender.connected());
-        Assert.assertNotEquals(StreamSession.State.FAILED, session.state());
-    }
-
-    @Test
-    public void onControlMessageComplete_Exception() throws InterruptedException, ExecutionException, TimeoutException
-    {
-        Assert.assertTrue(channel.isOpen());
-        Assert.assertTrue(sender.connected());
-        ChannelPromise promise = channel.newPromise();
-        promise.setFailure(new RuntimeException("this is just a testing exception"));
-        Future f = sender.onControlMessageComplete(promise, new CompleteMessage());
-
-        f.get(5, TimeUnit.SECONDS);
-
-        Assert.assertFalse(channel.isOpen());
-        Assert.assertFalse(sender.connected());
-        Assert.assertEquals(StreamSession.State.FAILED, session.state());
-    }
-}
diff --git a/test/unit/org/apache/cassandra/streaming/async/StreamingInboundHandlerTest.java b/test/unit/org/apache/cassandra/streaming/async/StreamingInboundHandlerTest.java
index a43b116..b156ab7 100644
--- a/test/unit/org/apache/cassandra/streaming/async/StreamingInboundHandlerTest.java
+++ b/test/unit/org/apache/cassandra/streaming/async/StreamingInboundHandlerTest.java
@@ -20,9 +20,7 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.UUID;
 
-import com.google.common.net.InetAddresses;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -36,28 +34,32 @@
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.net.AsyncStreamingInputPlus;
+import org.apache.cassandra.net.TestChannel;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.streaming.StreamDeserializingTask;
 import org.apache.cassandra.streaming.StreamManager;
 import org.apache.cassandra.streaming.StreamOperation;
 import org.apache.cassandra.streaming.StreamResultFuture;
 import org.apache.cassandra.streaming.StreamSession;
+import org.apache.cassandra.streaming.StreamingChannel;
 import org.apache.cassandra.streaming.messages.CompleteMessage;
 import org.apache.cassandra.streaming.messages.IncomingStreamMessage;
 import org.apache.cassandra.streaming.messages.StreamInitMessage;
 import org.apache.cassandra.streaming.messages.StreamMessageHeader;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+import static org.apache.cassandra.net.TestChannel.REMOTE_ADDR;
 
 public class StreamingInboundHandlerTest
 {
     private static final int VERSION = MessagingService.current_version;
-    private static final InetAddressAndPort REMOTE_ADDR = InetAddressAndPort.getByAddressOverrideDefaults(InetAddresses.forString("127.0.0.2"), 0);
 
-    private StreamingInboundHandler handler;
+    private NettyStreamingChannel streamingChannel;
     private EmbeddedChannel channel;
-    private AsyncStreamingInputPlus buffers;
     private ByteBuf buf;
 
     @BeforeClass
@@ -69,10 +71,9 @@
     @Before
     public void setup()
     {
-        handler = new StreamingInboundHandler(REMOTE_ADDR, VERSION, null);
-        channel = new EmbeddedChannel(handler);
-        buffers = new AsyncStreamingInputPlus(channel);
-        handler.setPendingBuffers(buffers);
+        channel = new TestChannel();
+        streamingChannel = new NettyStreamingChannel(VERSION, channel, StreamingChannel.Kind.CONTROL);
+        channel.pipeline().addLast("stream", streamingChannel);
     }
 
     @After
@@ -88,32 +89,18 @@
     }
 
     @Test
-    public void channelRead_Closed()
-    {
-        int size = 8;
-        buf = channel.alloc().buffer(size);
-        Assert.assertEquals(1, buf.refCnt());
-        buf.writerIndex(size);
-        handler.close();
-        channel.writeInbound(buf);
-        Assert.assertEquals(0, buffers.unsafeAvailable());
-        Assert.assertEquals(0, buf.refCnt());
-        Assert.assertFalse(channel.releaseInbound());
-    }
-
-    @Test
     public void channelRead_WrongObject()
     {
         channel.writeInbound("homer");
-        Assert.assertEquals(0, buffers.unsafeAvailable());
+        Assert.assertEquals(0, streamingChannel.in.unsafeAvailable());
         Assert.assertFalse(channel.releaseInbound());
     }
 
     @Test
     public void StreamDeserializingTask_deriveSession_StreamInitMessage()
     {
-        StreamInitMessage msg = new StreamInitMessage(REMOTE_ADDR, 0, UUID.randomUUID(), StreamOperation.REPAIR, UUID.randomUUID(), PreviewKind.ALL);
-        StreamingInboundHandler.StreamDeserializingTask task = handler.new StreamDeserializingTask(null, channel);
+        StreamInitMessage msg = new StreamInitMessage(REMOTE_ADDR, 0, nextTimeUUID(), StreamOperation.REPAIR, nextTimeUUID(), PreviewKind.ALL);
+        StreamDeserializingTask task = new StreamDeserializingTask(null, streamingChannel, streamingChannel.messagingVersion);
         StreamSession session = task.deriveSession(msg);
         Assert.assertNotNull(session);
     }
@@ -122,15 +109,15 @@
     public void StreamDeserializingTask_deriveSession_NoSession()
     {
         CompleteMessage msg = new CompleteMessage();
-        StreamingInboundHandler.StreamDeserializingTask task = handler.new StreamDeserializingTask(null, channel);
+        StreamDeserializingTask task = new StreamDeserializingTask(null, streamingChannel, streamingChannel.messagingVersion);
         task.deriveSession(msg);
     }
 
     @Test (expected = IllegalStateException.class)
     public void StreamDeserializingTask_deserialize_ISM_NoSession() throws IOException
     {
-        StreamMessageHeader header = new StreamMessageHeader(TableId.generate(), REMOTE_ADDR, UUID.randomUUID(), true,
-                                                             0, 0, 0, UUID.randomUUID());
+        StreamMessageHeader header = new StreamMessageHeader(TableId.generate(), REMOTE_ADDR, nextTimeUUID(), true,
+                                                             0, 0, 0, nextTimeUUID());
 
         ByteBuffer temp = ByteBuffer.allocate(1024);
         DataOutputPlus out = new DataOutputBuffer(temp);
@@ -145,11 +132,11 @@
     @Test
     public void StreamDeserializingTask_deserialize_ISM_HasSession()
     {
-        UUID planId = UUID.randomUUID();
-        StreamResultFuture future = StreamResultFuture.createFollower(0, planId, StreamOperation.REPAIR, REMOTE_ADDR, channel, UUID.randomUUID(), PreviewKind.ALL);
+        TimeUUID planId = nextTimeUUID();
+        StreamResultFuture future = StreamResultFuture.createFollower(0, planId, StreamOperation.REPAIR, REMOTE_ADDR, streamingChannel, streamingChannel.messagingVersion, nextTimeUUID(), PreviewKind.ALL);
         StreamManager.instance.registerFollower(future);
         StreamMessageHeader header = new StreamMessageHeader(TableId.generate(), REMOTE_ADDR, planId, false,
-                                                             0, 0, 0, UUID.randomUUID());
+                                                             0, 0, 0, nextTimeUUID());
 
         // IncomingStreamMessage.serializer.deserialize
         StreamSession session = StreamManager.instance.findSession(header.sender, header.planId, header.sessionIndex, header.sendByFollower);
diff --git a/test/unit/org/apache/cassandra/streaming/async/StreamingMultiplexedChannelTest.java b/test/unit/org/apache/cassandra/streaming/async/StreamingMultiplexedChannelTest.java
new file mode 100644
index 0000000..d43f6d5
--- /dev/null
+++ b/test/unit/org/apache/cassandra/streaming/async/StreamingMultiplexedChannelTest.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming.async;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import io.netty.channel.ChannelPromise;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.net.TestChannel;
+import org.apache.cassandra.streaming.PreviewKind;
+import org.apache.cassandra.streaming.StreamOperation;
+import org.apache.cassandra.streaming.StreamResultFuture;
+import org.apache.cassandra.streaming.StreamSession;
+import org.apache.cassandra.streaming.StreamingChannel;
+import org.apache.cassandra.streaming.messages.CompleteMessage;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.apache.cassandra.net.MessagingService.current_version;
+import static org.apache.cassandra.net.TestChannel.REMOTE_ADDR;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
+public class StreamingMultiplexedChannelTest
+{
+    private NettyStreamingChannel streamingChannel;
+    private TestChannel channel;
+    private StreamSession session;
+    private StreamingMultiplexedChannel sender;
+    private StreamingMultiplexedChannel.FileStreamTask fileStreamTask;
+
+    @BeforeClass
+    public static void before()
+    {
+        DatabaseDescriptor.daemonInitialization();
+    }
+
+    @Before
+    public void setUp()
+    {
+        channel = new TestChannel();
+        streamingChannel = new NettyStreamingChannel(current_version, channel, StreamingChannel.Kind.CONTROL);
+        TimeUUID pendingRepair = nextTimeUUID();
+        session = new StreamSession(StreamOperation.BOOTSTRAP, REMOTE_ADDR, new NettyStreamingConnectionFactory(), streamingChannel, current_version, true, 0, pendingRepair, PreviewKind.ALL);
+        StreamResultFuture future = StreamResultFuture.createFollower(0, nextTimeUUID(), StreamOperation.REPAIR, REMOTE_ADDR, streamingChannel, current_version, pendingRepair, session.getPreviewKind());
+        session.init(future);
+        session.attachOutbound(streamingChannel);
+
+        sender = session.getChannel();
+        sender.setControlChannel(streamingChannel);
+    }
+
+    @After
+    public void tearDown()
+    {
+        if (fileStreamTask != null)
+            fileStreamTask.unsetChannel();
+    }
+
+    @Test
+    public void FileStreamTask_acquirePermit_closed()
+    {
+        fileStreamTask = sender.new FileStreamTask(null);
+        sender.setClosed();
+        Assert.assertFalse(fileStreamTask.acquirePermit(1));
+    }
+
+    @Test
+    public void FileStreamTask_acquirePermit_HapppyPath()
+    {
+        int permits = sender.semaphoreAvailablePermits();
+        fileStreamTask = sender.new FileStreamTask(null);
+        Assert.assertTrue(fileStreamTask.acquirePermit(1));
+        Assert.assertEquals(permits - 1, sender.semaphoreAvailablePermits());
+    }
+
+    @Test
+    public void FileStreamTask_BadChannelAttr()
+    {
+        int permits = sender.semaphoreAvailablePermits();
+        channel.attr(NettyStreamingChannel.TRANSFERRING_FILE_ATTR).set(Boolean.TRUE);
+        fileStreamTask = sender.new FileStreamTask(null);
+        fileStreamTask.injectChannel(streamingChannel);
+        fileStreamTask.run();
+        Assert.assertEquals(StreamSession.State.FAILED, session.state());
+        Assert.assertTrue(channel.releaseOutbound()); // when the session fails, it will send a SessionFailed msg
+        Assert.assertEquals(permits, sender.semaphoreAvailablePermits());
+    }
+
+    @Test
+    public void FileStreamTask_HappyPath()
+    {
+        int permits = sender.semaphoreAvailablePermits();
+        fileStreamTask = sender.new FileStreamTask(new CompleteMessage());
+        fileStreamTask.injectChannel(streamingChannel);
+        fileStreamTask.run();
+        Assert.assertNotEquals(StreamSession.State.FAILED, session.state());
+        Assert.assertTrue(channel.releaseOutbound());
+        Assert.assertEquals(permits, sender.semaphoreAvailablePermits());
+    }
+
+    @Test
+    public void onControlMessageComplete_HappyPath()
+    {
+        Assert.assertTrue(channel.isOpen());
+        Assert.assertTrue(sender.connected());
+        ChannelPromise promise = channel.newPromise();
+        promise.setSuccess();
+        Assert.assertNull(sender.onMessageComplete(promise, new CompleteMessage()));
+        Assert.assertTrue(channel.isOpen());
+        Assert.assertTrue(sender.connected());
+        Assert.assertNotEquals(StreamSession.State.FAILED, session.state());
+    }
+
+    @Test
+    public void onControlMessageComplete_Exception() throws InterruptedException, ExecutionException, TimeoutException
+    {
+        Assert.assertTrue(channel.isOpen());
+        Assert.assertTrue(sender.connected());
+        ChannelPromise promise = channel.newPromise();
+        promise.setFailure(new RuntimeException("this is just a testing exception"));
+        Future f = sender.onMessageComplete(promise, new CompleteMessage());
+
+        f.get(5, TimeUnit.SECONDS);
+
+        Assert.assertFalse(channel.isOpen());
+        Assert.assertFalse(sender.connected());
+        Assert.assertEquals(StreamSession.State.FAILED, session.state());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
index 11f6b55..619e0d9 100644
--- a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
@@ -17,9 +17,14 @@
  */
 package org.apache.cassandra.streaming.compression;
 
-import java.io.*;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.IOException;
 import java.util.*;
 
+import org.apache.cassandra.io.sstable.SequenceBasedSSTableId;
+import org.apache.cassandra.io.util.File;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -32,7 +37,7 @@
 import org.apache.cassandra.io.compress.CompressionMetadata;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
-import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.io.util.SequentialWriterOption;
 import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.sstable.Component;
@@ -114,8 +119,8 @@
         assert valuesToCheck != null && valuesToCheck.length > 0;
 
         // write compressed data file of longs
-        File parentDir = tempFolder.newFolder();
-        Descriptor desc = new Descriptor(parentDir, "ks", "cf", 1);
+        File parentDir = new File(tempFolder.newFolder());
+        Descriptor desc = new Descriptor(parentDir, "ks", "cf", new SequenceBasedSSTableId(1));
         File tmp = new File(desc.filenameFor(Component.DATA));
         MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
         CompressionParams param = CompressionParams.snappy(32, minCompressRatio);
@@ -134,7 +139,7 @@
             writer.finish();
         }
 
-        CompressionMetadata comp = CompressionMetadata.create(tmp.getAbsolutePath());
+        CompressionMetadata comp = CompressionMetadata.create(tmp.absolutePath());
         List<SSTableReader.PartitionPositionBounds> sections = new ArrayList<>();
         for (long l : valuesToCheck)
         {
@@ -154,7 +159,7 @@
             size += (c.length + 4); // 4bytes CRC
         byte[] toRead = new byte[size];
 
-        try (RandomAccessFile f = new RandomAccessFile(tmp, "r"))
+        try (RandomAccessReader f = RandomAccessReader.open(tmp))
         {
             int pos = 0;
             for (CompressionMetadata.Chunk c : chunks)
diff --git a/test/unit/org/apache/cassandra/streaming/messages/KeepAliveMessageTest.java b/test/unit/org/apache/cassandra/streaming/messages/KeepAliveMessageTest.java
new file mode 100644
index 0000000..7ba637b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/streaming/messages/KeepAliveMessageTest.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.streaming.messages;
+
+import java.io.IOException;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class KeepAliveMessageTest
+{
+
+    @Test
+    public void testSerializedSize() throws IOException
+    {
+        assertThat(StreamMessage.serializedSize(new KeepAliveMessage(), 0), equalTo(1L));
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java b/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java
index 5c0cb9d..514857e 100644
--- a/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java
+++ b/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java
@@ -28,6 +28,7 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.io.FileUtils;
 
 import org.junit.After;
@@ -65,10 +66,10 @@
     @After
     public void tearDown() throws IOException
     {
-        if (path.toFile().exists() && path.toFile().isDirectory())
+        if (new File(path).exists() && new File(path).isDirectory())
         {
             //Deletes directory and all of it's contents
-            FileUtils.deleteDirectory(path.toFile());
+            FileUtils.deleteDirectory(new File(path).toJavaIOFile());
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java b/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java
index fce4cda..507aa53 100644
--- a/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java
+++ b/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java
@@ -21,6 +21,7 @@
 import org.junit.Test;
 
 import com.datastax.driver.core.exceptions.NoHostAvailableException;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.tools.ToolRunner.ToolResult;
 import org.hamcrest.CoreMatchers;
 
@@ -36,7 +37,9 @@
         assertEquals(1, tool.getExitCode());
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsString("Missing sstable directory argument"));
         
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread",
+                                                        "Shutdown-checker",
+                                                        "cluster[0-9]-connection-reaper-[0-9]" });
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -58,7 +61,11 @@
         if (!(tool.getException().getCause().getCause().getCause() instanceof NoHostAvailableException))
             throw tool.getException();
 
-        assertNoUnexpectedThreadsStarted(null, new String[]{"globalEventExecutor-1-1", "globalEventExecutor-1-2"});
+        assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread",
+                                                        "globalEventExecutor-[1-9]-[1-9]",
+                                                        "globalEventExecutor-[1-9]-[1-9]",
+                                                        "Shutdown-checker",
+                                                        "cluster[0-9]-connection-reaper-[0-9]" });
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -82,7 +89,11 @@
         if (!(tool.getException().getCause().getCause().getCause() instanceof NoHostAvailableException))
             throw tool.getException();
 
-        assertNoUnexpectedThreadsStarted(null, new String[] { "globalEventExecutor-1-1", "globalEventExecutor-1-2" });
+        assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread",
+                                                        "globalEventExecutor-[1-9]-[1-9]",
+                                                        "globalEventExecutor-[1-9]-[1-9]",
+                                                        "Shutdown-checker",
+                                                        "cluster[0-9]-connection-reaper-[0-9]" });
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -106,7 +117,11 @@
         if (!(tool.getException().getCause().getCause().getCause() instanceof NoHostAvailableException))
             throw tool.getException();
 
-        assertNoUnexpectedThreadsStarted(null, new String[] { "globalEventExecutor-1-1", "globalEventExecutor-1-2" });
+        assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread",
+                                                        "globalEventExecutor-[1-9]-[1-9]",
+                                                        "globalEventExecutor-[1-9]-[1-9]",
+                                                        "Shutdown-checker",
+                                                        "cluster[0-9]-connection-reaper-[0-9]" });
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -137,4 +152,50 @@
         assertEquals(-1, tool.getExitCode());
         throw tool.getException().getCause().getCause().getCause();
     }
+
+    @Test(expected = NoHostAvailableException.class)
+    public void testBulkLoader_WithArgs5() throws Throwable
+    {
+        ToolResult tool = ToolRunner.invokeClass(BulkLoader.class,
+                                                 "-d",
+                                                 "127.9.9.1:9041",
+                                                 "--throttle",
+                                                 "10",
+                                                 "--inter-dc-throttle",
+                                                 "15",
+                                                 "--entire-sstable-throttle-mib",
+                                                 "20",
+                                                 "--entire-sstable-inter-dc-throttle-mib",
+                                                 "25",
+                                                 OfflineToolUtils.sstableDirName("legacy_sstables", "legacy_ma_simple"));
+        assertEquals(-1, tool.getExitCode());
+        assertEquals(10 * 125_000, DatabaseDescriptor.getStreamThroughputOutboundBytesPerSec(), 0.0);
+        assertEquals(15 * 125_000, DatabaseDescriptor.getInterDCStreamThroughputOutboundBytesPerSec(), 0.0);
+        assertEquals(20, DatabaseDescriptor.getEntireSSTableStreamThroughputOutboundMebibytesPerSec(), 0.0);
+        assertEquals(25, DatabaseDescriptor.getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(), 0.0);
+        throw tool.getException().getCause().getCause().getCause();
+    }
+
+    @Test(expected = NoHostAvailableException.class)
+    public void testBulkLoader_WithArgs6() throws Throwable
+    {
+        ToolResult tool = ToolRunner.invokeClass(BulkLoader.class,
+                                                 "-d",
+                                                 "127.9.9.1:9041",
+                                                 "--throttle-mib",
+                                                 "3",
+                                                 "--inter-dc-throttle-mib",
+                                                 "4",
+                                                 "--entire-sstable-throttle-mib",
+                                                 "5",
+                                                 "--entire-sstable-inter-dc-throttle-mib",
+                                                 "6",
+                                                 OfflineToolUtils.sstableDirName("legacy_sstables", "legacy_ma_simple"));
+        assertEquals(-1, tool.getExitCode());
+        assertEquals(3 * 1024 * 1024, DatabaseDescriptor.getStreamThroughputOutboundBytesPerSec(), 0.0);
+        assertEquals(4 * 1024 * 1024, DatabaseDescriptor.getInterDCStreamThroughputOutboundBytesPerSec(), 0.0);
+        assertEquals(5, DatabaseDescriptor.getEntireSSTableStreamThroughputOutboundMebibytesPerSec(), 0.0);
+        assertEquals(6, DatabaseDescriptor.getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(), 0.0);
+        throw tool.getException().getCause().getCause().getCause();
+    }
 }
diff --git a/test/unit/org/apache/cassandra/tools/ClearSnapshotTest.java b/test/unit/org/apache/cassandra/tools/ClearSnapshotTest.java
deleted file mode 100644
index 975e45b..0000000
--- a/test/unit/org/apache/cassandra/tools/ClearSnapshotTest.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.tools;
-
-import java.io.IOException;
-import java.util.Map;
-
-import javax.management.openmbean.TabularData;
-
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.tools.ToolRunner.ToolResult;
-import org.hamcrest.CoreMatchers;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-
-public class ClearSnapshotTest extends CQLTester
-{
-    private static NodeProbe probe;
-
-    @BeforeClass
-    public static void setup() throws Exception
-    {
-        startJMXServer();
-        probe = new NodeProbe(jmxHost, jmxPort);
-    }
-
-    @AfterClass
-    public static void teardown() throws IOException
-    {
-        probe.close();
-    }
-
-    @Test
-    public void testClearSnapshot_NoArgs()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("clearsnapshot");
-        assertEquals(2, tool.getExitCode());
-        assertTrue("Tool stderr: " +  tool.getCleanedStderr(), tool.getCleanedStderr().contains("Specify snapshot name or --all"));
-        
-        tool = ToolRunner.invokeNodetool("clearsnapshot", "--all");
-        tool.assertOnCleanExit();
-    }
-
-    @Test
-    public void testClearSnapshot_AllAndName()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("clearsnapshot", "-t", "some-name", "--all");
-        assertEquals(2, tool.getExitCode());
-        assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Specify only one of snapshot name or --all"));
-    }
-
-    @Test
-    public void testClearSnapshot_RemoveByName()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("snapshot","-t","some-name");
-        tool.assertOnCleanExit();
-        assertTrue(!tool.getStdout().isEmpty());
-        
-        Map<String, TabularData> snapshots_before = probe.getSnapshotDetails();
-        Assert.assertTrue(snapshots_before.containsKey("some-name"));
-        
-        tool = ToolRunner.invokeNodetool("clearsnapshot","-t","some-name");
-        tool.assertOnCleanExit();
-        assertTrue(!tool.getStdout().isEmpty());
-        
-        Map<String, TabularData> snapshots_after = probe.getSnapshotDetails();
-        Assert.assertFalse(snapshots_after.containsKey("some-name"));
-    }
-
-    @Test
-    public void testClearSnapshot_RemoveMultiple()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("snapshot","-t","some-name");
-        tool.assertOnCleanExit();
-        assertTrue(!tool.getStdout().isEmpty());
-
-        tool = ToolRunner.invokeNodetool("snapshot","-t","some-other-name");
-        tool.assertOnCleanExit();
-            assertTrue(!tool.getStdout().isEmpty());
-
-        Map<String, TabularData> snapshots_before = probe.getSnapshotDetails();
-        Assert.assertTrue(snapshots_before.size() == 2);
-
-        tool = ToolRunner.invokeNodetool("clearsnapshot","--all");
-        tool.assertOnCleanExit();
-        assertTrue(!tool.getStdout().isEmpty());
-        
-        Map<String, TabularData> snapshots_after = probe.getSnapshotDetails();
-        Assert.assertTrue(snapshots_after.size() == 0);
-    }
-    
-}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/tools/CompactionStressTest.java b/test/unit/org/apache/cassandra/tools/CompactionStressTest.java
index 7125f2f..176a240 100644
--- a/test/unit/org/apache/cassandra/tools/CompactionStressTest.java
+++ b/test/unit/org/apache/cassandra/tools/CompactionStressTest.java
@@ -18,8 +18,8 @@
 
 package org.apache.cassandra.tools;
 
-import java.io.File;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.tools.ToolRunner.ToolResult;
@@ -38,7 +38,7 @@
     {
         ClassLoader classLoader = getClass().getClassLoader();
         File file = new File(classLoader.getResource("blogpost.yaml").getFile());
-        String profileFile = file.getAbsolutePath();
+        String profileFile = file.absolutePath();
 
         ToolResult tool = ToolRunner.invokeClass("org.apache.cassandra.stress.CompactionStress",
                                                  "write",
diff --git a/test/unit/org/apache/cassandra/tools/GetFullQueryLogTest.java b/test/unit/org/apache/cassandra/tools/GetFullQueryLogTest.java
deleted file mode 100644
index 44007a5..0000000
--- a/test/unit/org/apache/cassandra/tools/GetFullQueryLogTest.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.tools;
-
-import java.io.IOException;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.fql.FullQueryLoggerOptions;
-import org.apache.cassandra.tools.ToolRunner.ToolResult;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class GetFullQueryLogTest extends CQLTester
-{
-    private static NodeProbe probe;
-
-    @ClassRule
-    public static TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-    @BeforeClass
-    public static void setup() throws Exception
-    {
-        startJMXServer();
-        probe = new NodeProbe(jmxHost, jmxPort);
-    }
-
-    @AfterClass
-    public static void teardown() throws IOException
-    {
-        probe.close();
-    }
-
-    @After
-    public void afterTest() throws InterruptedException
-    {
-        disableFullQueryLog();
-    }
-
-    @Test
-    public void getFullQueryLogTest()
-    {
-        testDefaultOutput(getFullQueryLog());
-    }
-
-    @Test
-    public void enableFullQueryLogTest()
-    {
-        enableFullQueryLog();
-        testChangedOutput(getFullQueryLog());
-    }
-
-    @Test
-    public void resetFullQueryLogTest()
-    {
-        enableFullQueryLog();
-        testChangedOutput(getFullQueryLog());
-
-        // reset and get and test that it reset configuration into defaults
-        resetFullQueryLog();
-
-        testDefaultOutput(getFullQueryLog());
-    }
-
-    private String getFullQueryLog()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("getfullquerylog");
-        tool.assertOnCleanExit();
-        return tool.getStdout();
-    }
-
-    private void resetFullQueryLog()
-    {
-        ToolRunner.invokeNodetool("resetfullquerylog").assertOnCleanExit();
-    }
-
-    private void disableFullQueryLog()
-    {
-        ToolRunner.invokeNodetool("disablefullquerylog").assertOnCleanExit();
-    }
-
-    private void enableFullQueryLog()
-    {
-        ToolRunner.invokeNodetool("enablefullquerylog",
-                                  "--path",
-                                  temporaryFolder.getRoot().toString(),
-                                  "--blocking",
-                                  "false",
-                                  "--max-archive-retries",
-                                  "5",
-                                  "--archive-command",
-                                  "/path/to/script.sh %path",
-                                  "--max-log-size",
-                                  "100000",
-                                  "--max-queue-weight",
-                                  "10000",
-                                  "--roll-cycle",
-                                  "DAILY")
-                  .assertOnCleanExit();
-    }
-
-    private void testChangedOutput(final String getFullQueryLogOutput)
-    {
-        final String output = getFullQueryLogOutput.replaceAll("( )+", " ").trim();
-        assertTrue(output.contains("enabled true"));
-        assertTrue(output.contains("log_dir " + temporaryFolder.getRoot().toString()));
-        assertTrue(output.contains("archive_command /path/to/script.sh %path"));
-        assertTrue(output.contains("roll_cycle DAILY"));
-        assertTrue(output.contains("max_log_size 100000"));
-        assertTrue(output.contains("max_queue_weight 10000"));
-        assertTrue(output.contains("max_archive_retries 5"));
-        assertTrue(output.contains("block false"));
-    }
-
-    private void testDefaultOutput(final String getFullQueryLogOutput)
-    {
-        final FullQueryLoggerOptions options = new FullQueryLoggerOptions();
-        final String output = getFullQueryLogOutput.replaceAll("( )+", " ").trim();
-        assertTrue(output.contains("enabled false"));
-        assertFalse(output.contains("log_dir " + temporaryFolder.getRoot().toString()));
-        assertFalse(output.contains("archive_command /path/to/script.sh %path"));
-        assertTrue(output.contains("roll_cycle " + options.roll_cycle));
-        assertTrue(output.contains("max_log_size " + options.max_log_size));
-        assertTrue(output.contains("max_queue_weight " + options.max_queue_weight));
-        assertTrue(output.contains("max_archive_retries " + options.max_archive_retries));
-        assertTrue(output.contains("block " + options.block));
-    }
-}
diff --git a/test/unit/org/apache/cassandra/tools/GetVersionTest.java b/test/unit/org/apache/cassandra/tools/GetVersionTest.java
index b23c1fe..9eaf57f 100644
--- a/test/unit/org/apache/cassandra/tools/GetVersionTest.java
+++ b/test/unit/org/apache/cassandra/tools/GetVersionTest.java
@@ -29,7 +29,7 @@
     {
         ToolResult tool = ToolRunner.invokeClass(GetVersion.class);
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/HashPasswordTest.java b/test/unit/org/apache/cassandra/tools/HashPasswordTest.java
new file mode 100644
index 0000000..f84bd8b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/HashPasswordTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.Collections;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner.ToolResult;
+import org.mindrot.jbcrypt.BCrypt;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class HashPasswordTest extends CQLTester
+{
+    @Rule
+    public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+    private static final String plaintextPassword = "foobar";
+    private static final String hashPasswordTool = "tools/bin/hash_password";
+
+    /* If help changed you also need to change the docs*/
+    @Test
+    public void testHelpAndShouldChangeDocs()
+    {
+        ToolResult tool = ToolRunner.invoke(hashPasswordTool, "-h");
+        tool.assertOnCleanExit();
+        String help = "usage: hash_password [options]\n" +
+                       "--\n" +
+                       "Hashes a plain text password and prints the hashed password.\n" +
+                       "Options are:\n" +
+                       " -e,--environment-var <arg>   Use value of the specified environment\n" +
+                       "                              variable as the password\n" +
+                       " -h,--help                    Display this help message\n" +
+                       " -i,--input <arg>             Input is a file (or - for stdin) to read the\n" +
+                       "                              password from. Make sure that the whole input including newlines is\n" +
+                       "                              considered. For example, the shell command 'echo -n foobar | hash_password\n" +
+                       "                              -i -' will work as intended and just hash 'foobar'.\n" +
+                       " -p,--plain <arg>             Argument is the plain text password\n" +
+                       " -r,--logrounds <arg>         Number of hash rounds (default: 10).\n";
+        assertEquals(help, tool.getStdout());
+    }
+
+    @Test
+    public void testPlain()
+    {
+        ToolResult tool = ToolRunner.invoke(hashPasswordTool, "--plain", plaintextPassword);
+        tool.assertOnCleanExit();
+        String hashed = tool.getStdout();
+        assertTrue("Hashed password does not validate: " + hashed, BCrypt.checkpw(plaintextPassword, hashed));
+    }
+
+    @Test
+    public void testStdIn()
+    {
+        ToolResult tool = ToolRunner.invoke(Collections.emptyMap(),
+                                            new ByteArrayInputStream(plaintextPassword.getBytes()),
+                                            Arrays.asList(hashPasswordTool, "--input", "-"));
+        tool.assertOnCleanExit();
+        String hashed = tool.getStdout();
+        assertTrue("Hashed password does not validate: " + hashed, BCrypt.checkpw(plaintextPassword, hashed));
+    }
+
+    @Test
+    public void testFile() throws IOException
+    {
+        File file = temporaryFolder.newFile();
+        Files.write(file.toPath(), plaintextPassword.getBytes());
+
+        ToolResult tool = ToolRunner.invoke(hashPasswordTool, "--input", file.getAbsolutePath());
+        tool.assertOnCleanExit();
+        String hashed = tool.getStdout();
+        assertTrue("Hashed password does not validate: " + hashed, BCrypt.checkpw(plaintextPassword, hashed));
+    }
+
+    @Test
+    public void testEnvVar()
+    {
+        ToolResult tool = ToolRunner.invoke(Collections.singletonMap("THE_PASSWORD", plaintextPassword),
+                                            null,
+                                            Arrays.asList(hashPasswordTool, "--environment-var", "THE_PASSWORD"));
+        tool.assertOnCleanExit();
+        String hashed = tool.getStdout();
+        assertTrue("Hashed password does not validate: " + hashed, BCrypt.checkpw(plaintextPassword, hashed));
+    }
+
+    @Test
+    public void testLogRounds()
+    {
+        ToolResult tool = ToolRunner.invoke(hashPasswordTool, "--plain", plaintextPassword, "-r", "10");
+        tool.assertOnCleanExit();
+        String hashed = tool.getStdout();
+        assertTrue("Hashed password does not validate: " + hashed, BCrypt.checkpw(plaintextPassword, hashed));
+    }
+
+    @Test
+    public void testShortPass()
+    {
+        ToolResult tool = ToolRunner.invoke(hashPasswordTool, "--plain", "A");
+        tool.assertOnExitCode();
+        assertThat(tool.getStderr(), containsString("password is very short"));
+    }
+
+    @Test
+    public void testErrorMessages()
+    {
+        assertToolError("One of the options --environment-var, --plain or --input must be used.", hashPasswordTool);
+        assertToolError("Environment variable 'non_existing_environment_variable_name' is undefined.",
+                        hashPasswordTool,
+                        "--environment-var",
+                        "non_existing_environment_variable_name");
+        assertToolError("Failed to read from '/foo/bar/baz/blah/yadda': ",
+                        hashPasswordTool,
+                        "--input",
+                        "/foo/bar/baz/blah/yadda");
+    }
+
+    private static void assertToolError(String expectedMessage, String... args)
+    {
+        ToolResult tool = ToolRunner.invoke(args);
+        assertEquals(1, tool.getExitCode());
+        assertThat(tool.getStderr(), containsString(expectedMessage));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java b/test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java
index cb9f5a5..ede3d6c 100644
--- a/test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java
+++ b/test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java
@@ -160,6 +160,16 @@
         diff(excludeObjects, excludeAttributes, excludeOperations, "test/data/jmxdump/cassandra-4.0-jmx.yaml");
     }
 
+    @Test
+    public void diff41() throws Throwable
+    {
+        List<String> excludeObjects = Arrays.asList();
+        List<String> excludeAttributes = Arrays.asList();
+        List<String> excludeOperations = Arrays.asList();
+
+        diff(excludeObjects, excludeAttributes, excludeOperations, "test/data/jmxdump/cassandra-4.1-jmx.yaml");
+    }
+
     private void diff(List<String> excludeObjects, List<String> excludeAttributes, List<String> excludeOperations, String original) throws Throwable
     {
         setupStandardTables();
diff --git a/test/unit/org/apache/cassandra/tools/JMXStandardsTest.java b/test/unit/org/apache/cassandra/tools/JMXStandardsTest.java
new file mode 100644
index 0000000..87b9ff9
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/JMXStandardsTest.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import javax.management.ObjectName;
+import javax.management.openmbean.CompositeData;
+import javax.management.openmbean.OpenDataException;
+import javax.management.openmbean.TabularData;
+
+import com.google.common.collect.ImmutableSet;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.utils.BreaksJMX;
+import org.assertj.core.api.Assertions;
+import org.reflections.Reflections;
+import org.reflections.scanners.Scanners;
+import org.reflections.util.ConfigurationBuilder;
+
+public class JMXStandardsTest
+{
+    private static final Logger logger = LoggerFactory.getLogger(JMXStandardsTest.class);
+
+    /**
+     * JMX typlically works well with java.* and javax.*, but not all types are serializable and will work, so this class
+     * goes with a explicit approval list, new APIs may fail if a java.* or javax.* is used not in this allow list, if
+     * that is the case it is fine to add here.
+     * <p>
+     * It is never fine to allow non java.* and javax.* types, they can not be handled by clients, so should never be
+     * allowed.
+     */
+    private static final Set<Class<?>> ALLOWED_TYPES = ImmutableSet.<Class<?>>builder()
+                                                       .add(Void.class).add(Void.TYPE)
+                                                       .add(Boolean.class).add(Boolean.TYPE)
+                                                       .add(Byte.class).add(Byte.TYPE)
+                                                       .add(Short.class).add(Short.TYPE)
+                                                       .add(Integer.class).add(Integer.TYPE)
+                                                       .add(Long.class).add(Long.TYPE)
+                                                       .add(Float.class).add(Float.TYPE)
+                                                       .add(Double.class).add(Double.TYPE)
+                                                       .add(String.class)
+                                                       .add(ByteBuffer.class)
+                                                       .add(InetAddress.class)
+                                                       .add(File.class)
+                                                       .add(List.class).add(Map.class).add(Set.class).add(SortedMap.class).add(Collection.class)
+                                                       .add(ObjectName.class).add(TabularData.class).add(CompositeData.class)
+                                                       // Exceptions
+                                                       // https://www.oracle.com/java/technologies/javase/management-extensions-best-practices.html
+                                                       // "It is recommended that exceptions thrown by MBeans be drawn from
+                                                       // the standard set defined in the java.* and javax.* packages on the
+                                                       // Java SE platform. If an MBean throws a non-standard exception, a
+                                                       // client that does not have that exception class will likely see
+                                                       // another exception such as ClassNotFoundException instead."
+                                                       .add(ExecutionException.class)
+                                                       .add(InterruptedException.class)
+                                                       .add(UnknownHostException.class)
+                                                       .add(IOException.class)
+                                                       .add(TimeoutException.class)
+                                                       .add(IllegalStateException.class)
+                                                       .add(ClassNotFoundException.class)
+                                                       .add(OpenDataException.class)
+                                                       .build();
+    /**
+     * This list is a set of types under java.* and javax.*, but are too vague that could cause issues; this does not
+     * mean issues will happen with JMX, only that issues may happen only after running and can not be detected at
+     * compile time.
+     */
+    private static final Set<Class<?>> DANGEROUS_TYPES = ImmutableSet.<Class<?>>builder()
+                                                         .add(Object.class)
+                                                         .add(Comparable.class)
+                                                         .add(Serializable.class)
+                                                         .add(Exception.class)
+                                                         .build();
+
+    @Test
+    public void interfaces() throws ClassNotFoundException
+    {
+        Reflections reflections = new Reflections(ConfigurationBuilder.build("org.apache.cassandra").setExpandSuperTypes(false));
+        Pattern mbeanPattern = Pattern.compile(".*MBean$");
+        Set<String> matches = reflections.getAll(Scanners.SubTypes).stream()
+                                         .filter(s -> mbeanPattern.matcher(s).find())
+                                         .collect(Collectors.toSet());
+
+        List<String> warnings = new ArrayList<>();
+        List<String> errors = new ArrayList<>();
+        for (String className : matches)
+        {
+            for (Class<?> klass = Class.forName(className); klass != null && !Object.class.equals(klass); klass = klass.getSuperclass())
+            {
+                Assertions.assertThat(klass).isInterface();
+                Method[] methods = klass.getDeclaredMethods();
+                for (int i = 0; i < methods.length; i++)
+                {
+                    Method method = methods[i];
+                    checkType(method, "return", method.getGenericReturnType(), warnings, errors);
+                    Stream.of(method.getGenericParameterTypes()).forEach(t -> checkType(method, "parameter", t, warnings, errors));
+                    Stream.of(method.getGenericExceptionTypes()).forEach(t -> checkType(method, "throws", t, warnings, errors));
+                }
+            }
+        }
+        if (!warnings.isEmpty())
+            warnings.forEach(logger::warn);
+        if (!errors.isEmpty())
+            throw new AssertionError("Errors detected while validating MBeans\n" + String.join("\n", errors));
+    }
+
+    private static void checkType(Method method, String sig, Type type, Collection<String> warnings, Collection<String> errors)
+    {
+        if (type instanceof Class<?>)
+        {
+            Class<?> klass = (Class<?>) type;
+            int numArrays = 0;
+            while (klass.isArray())
+            {
+                numArrays++;
+                klass = klass.getComponentType();
+            }
+            if (!ALLOWED_TYPES.contains(klass))
+            {
+                StringBuilder typeName = new StringBuilder(klass.getCanonicalName());
+                for (int i = 0; i < numArrays; i++)
+                    typeName.append("[]");
+                if (DANGEROUS_TYPES.contains(klass))
+                {
+                    warnings.add(String.format("Dangerous type used at signature %s, type %s; method '%s'", sig, typeName, method));
+                }
+                else
+                {
+                    String msg = String.format("Error at signature %s; type %s is not in the supported set of types, method method '%s'", sig, typeName, method);
+                    (method.isAnnotationPresent(BreaksJMX.class) ? warnings : errors).add(msg);
+                }
+            }
+        }
+        else if (type instanceof ParameterizedType)
+        {
+            ParameterizedType param = (ParameterizedType) type;
+            Type klass = param.getRawType();
+            Type[] args = param.getActualTypeArguments();
+            checkType(method, sig + ": " + param, klass, warnings, errors);
+            Stream.of(args).forEach(t -> checkType(method, sig + " of " + param, t, warnings, errors));
+        }
+        else
+        {
+            Assert.fail("Unknown type: " + type.getClass());
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/JMXToolTest.java b/test/unit/org/apache/cassandra/tools/JMXToolTest.java
index e6f4615..dbd18f3 100644
--- a/test/unit/org/apache/cassandra/tools/JMXToolTest.java
+++ b/test/unit/org/apache/cassandra/tools/JMXToolTest.java
@@ -187,4 +187,4 @@
     {
         return SourceDSL.maps().of(Generators.IDENTIFIER_GEN, infoGen).ofSizeBetween(0, 10);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java b/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java
index b1fe732..4fc2325 100644
--- a/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java
+++ b/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java
@@ -17,32 +17,38 @@
  */
 
 package org.apache.cassandra.tools;
-import java.io.File;
+
+import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.nio.file.Paths;
+import java.security.Permission;
 
 import com.google.common.net.HostAndPort;
 import org.junit.Test;
 
+import org.apache.cassandra.io.util.File;
+
 import static org.apache.cassandra.tools.OfflineToolUtils.sstableDirName;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 // LoaderOptionsTester for custom configuration
 public class LoaderOptionsTest
 {
     @Test
-    public void testNativePort() throws Exception {
+    public void testNativePort() throws Exception
+    {
         //Default Cassandra config
-        File config = Paths.get(".", "test", "conf", "cassandra.yaml").normalize().toFile();
-        String[] args = {"-d", "127.9.9.1", "-f", config.getAbsolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")};
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
         LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
         assertEquals(9042, options.nativePort);
 
 
         // SSL Enabled Cassandra config
-        config = Paths.get(".", "test", "conf", "unit-test-conf/test-native-port.yaml").normalize().toFile();
-        String[] args2 = {"-d", "127.9.9.1", "-f", config.getAbsolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")};
+        config = new File(Paths.get(".", "test", "conf", "unit-test-conf/test-native-port.yaml").normalize());
+        String[] args2 = { "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
         options = LoaderOptions.builder().parseArgs(args2).build();
         assertEquals(9142, options.nativePort);
 
@@ -52,8 +58,8 @@
 
         // test native port set from command line
 
-        config = Paths.get(".", "test", "conf", "unit-test-conf/test-native-port.yaml").normalize().toFile();
-        String[] args3 = {"-d", "127.9.9.1", "-p", "9300", "-f", config.getAbsolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")};
+        config = new File(Paths.get(".", "test", "conf", "unit-test-conf/test-native-port.yaml").normalize().toFile());
+        String[] args3 = { "-d", "127.9.9.1", "-p", "9300", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
         options = LoaderOptions.builder().parseArgs(args3).build();
         assertEquals(9300, options.nativePort);
 
@@ -72,11 +78,142 @@
     public void testEncryptionSettings() throws Exception
     {
         String[] args = { "-d", "127.9.9.1", "-ts", "test.jks", "-tspw", "truststorePass1", "-ks", "test.jks", "-kspw",
-                "testdata1", "--ssl-ciphers", "TLS_RSA_WITH_AES_256_CBC_SHA",
-                "--ssl-alg", "SunX509", "--store-type", "JKS", "--ssl-protocol", "TLS",
-                sstableDirName("legacy_sstables", "legacy_ma_simple") };
+                          "testdata1", "--ssl-ciphers", "TLS_RSA_WITH_AES_256_CBC_SHA",
+                          "--ssl-alg", "SunX509", "--store-type", "JKS", "--ssl-protocol", "TLS",
+                          sstableDirName("legacy_sstables", "legacy_ma_simple") };
         LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
         assertEquals("test.jks", options.clientEncOptions.keystore);
     }
+
+    @Test
+    public void testThrottleDefaultSettings()
+    {
+        LoaderOptions options = LoaderOptions.builder().build();
+        assertEquals(0, options.throttleBytes, 0);
+        assertEquals(0, options.interDcThrottleBytes, 0);
+    }
+
+    @Test
+    public void testDeprecatedThrottleSettings() throws IOException
+    {
+        // Default Cassandra config
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "-t", "200", "-idct", "400", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+        LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
+        // converts from megabits to bytes
+        assertEquals(200 * 125_000, options.throttleBytes, 0);
+        assertEquals(400 * 125_000, options.interDcThrottleBytes, 0);
+    }
+
+    @Test
+    public void testDeprecatedThrottleSettingsWithLongSettingNames() throws IOException
+    {
+        // Default Cassandra config
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "--throttle", "200", "--inter-dc-throttle", "400", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+        LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
+        // converts from megabits to bytes
+        assertEquals(200 * 125_000, options.throttleBytes, 0);
+        assertEquals(400 * 125_000, options.interDcThrottleBytes, 0);
+    }
+
+    @Test
+    public void testThrottleSettingsWithLongSettingNames() throws IOException
+    {
+        // Default Cassandra config
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "--throttle-mib", "24", "--inter-dc-throttle-mib", "48", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+        LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
+        // converts from mebibytes to bytes
+        assertEquals(24 * 1024 * 1024, options.throttleBytes, 0);
+        assertEquals(48 * 1024 * 1024, options.interDcThrottleBytes, 0);
+    }
+
+    @Test
+    public void failsWhenThrottleSettingAndDeprecatedAreProvided() throws IOException
+    {
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "-t", "200", "-tmib", "200", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+
+        failureHelper(args, 1);
+    }
+
+    @Test
+    public void failsWhenThrottleSettingAndDeprecatedAreProvidedWithLongSettingNames() throws IOException
+    {
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "--throttle", "200", "--throttle-mib", "200", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+
+        failureHelper(args, 1);
+    }
+
+    @Test
+    public void failsWhenInterDCThrottleSettingAndDeprecatedAreProvided() throws IOException
+    {
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "-idct", "200", "-idctmib", "200", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+
+        failureHelper(args, 1);
+    }
+
+    @Test
+    public void failsWhenInterDCThrottleSettingAndDeprecatedAreProvidedWithLongSettingNames() throws IOException
+    {
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = { "--inter-dc-throttle", "200", "--inter-dc-throttle-mib", "200", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+
+        failureHelper(args, 1);
+    }
+
+    @Test
+    public void testEntireSSTableDefaultSettings()
+    {
+        LoaderOptions options = LoaderOptions.builder().build();
+        assertEquals(0, options.entireSSTableThrottleMebibytes);
+        assertEquals(0, options.entireSSTableInterDcThrottleMebibytes);
+    }
+
+    @Test
+    public void testEntireSSTableSettingsWithLongSettingNames() throws IOException
+    {
+        // Use long names for the args, i.e. entire-sstable-throttle
+        File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize());
+        String[] args = new String[]{ "--entire-sstable-throttle-mib", "350", "--entire-sstable-inter-dc-throttle-mib", "600", "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple") };
+        LoaderOptions options = LoaderOptions.builder().parseArgs(args).build();
+        assertEquals(350, options.entireSSTableThrottleMebibytes);
+        assertEquals(600, options.entireSSTableInterDcThrottleMebibytes);
+    }
+
+    private void failureHelper(String[] args, int expectedErrorCode)
+    {
+        // install security manager to get informed about the exit-code
+        System.setSecurityManager(new SecurityManager()
+        {
+            public void checkExit(int status)
+            {
+                throw new SystemExitException(status);
+            }
+
+            public void checkPermission(Permission perm)
+            {
+            }
+
+            public void checkPermission(Permission perm, Object context)
+            {
+            }
+        });
+        try
+        {
+            LoaderOptions.builder().parseArgs(args).build();
+        }
+        catch (SystemExitException e)
+        {
+            assertEquals(expectedErrorCode, e.status);
+        }
+        finally
+        {
+            System.setSecurityManager(null);
+        }
+    }
 }
 
diff --git a/test/unit/org/apache/cassandra/tools/NodeToolGossipInfoTest.java b/test/unit/org/apache/cassandra/tools/NodeToolGossipInfoTest.java
deleted file mode 100644
index 8e9735b..0000000
--- a/test/unit/org/apache/cassandra/tools/NodeToolGossipInfoTest.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.tools;
-
-import java.io.IOException;
-
-import org.apache.commons.lang3.StringUtils;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.net.NoPayload;
-import org.apache.cassandra.tools.ToolRunner.ToolResult;
-import org.apache.cassandra.utils.FBUtilities;
-import org.assertj.core.api.Assertions;
-
-import static org.apache.cassandra.net.Verb.ECHO_REQ;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class NodeToolGossipInfoTest extends CQLTester
-{
-    private static NodeProbe probe;
-
-    @BeforeClass
-    public static void setup() throws Exception
-    {
-        requireNetwork();
-        startJMXServer();
-        probe = new NodeProbe(jmxHost, jmxPort);
-    }
-
-    @AfterClass
-    public static void teardown() throws IOException
-    {
-        probe.close();
-    }
-
-    @Test
-    public void testMaybeChangeDocs()
-    {
-        // If you added, modified options or help, please update docs if necessary
-        ToolResult tool = ToolRunner.invokeNodetool("help", "gossipinfo");
-        String help =   "NAME\n" + 
-                        "        nodetool gossipinfo - Shows the gossip information for the cluster\n" + 
-                        "\n" + 
-                        "SYNOPSIS\n" + 
-                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" + 
-                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" + 
-                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" + 
-                        "                [(-u <username> | --username <username>)] gossipinfo\n" + 
-                        "\n" + 
-                        "OPTIONS\n" + 
-                        "        -h <host>, --host <host>\n" + 
-                        "            Node hostname or ip address\n" + 
-                        "\n" + 
-                        "        -p <port>, --port <port>\n" + 
-                        "            Remote jmx agent port number\n" + 
-                        "\n" + 
-                        "        -pp, --print-port\n" + 
-                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
-                        "\n" + 
-                        "        -pw <password>, --password <password>\n" + 
-                        "            Remote jmx agent password\n" + 
-                        "\n" + 
-                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
-                        "            Path to the JMX password file\n" + 
-                        "\n" + 
-                        "        -u <username>, --username <username>\n" + 
-                        "            Remote jmx agent username\n" + 
-                        "\n" + 
-                        "\n";
-        Assertions.assertThat(tool.getStdout()).isEqualTo(help);
-    }
-
-    @Test
-    public void testGossipInfo() throws Throwable
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("gossipinfo");
-        Assertions.assertThat(tool.getStdout()).contains("/127.0.0.1");
-        Assertions.assertThat(tool.getStdout()).containsPattern("heartbeat:[0-9]+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("STATUS:[0-9]+:NORMAL,.+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("SCHEMA:.+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("DC:[0-9]+:datacenter1");
-        Assertions.assertThat(tool.getStdout()).containsPattern("RACK:[0-9]+:rack1");
-        Assertions.assertThat(tool.getStdout()).containsPattern("RELEASE_VERSION:.+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("RPC_ADDRESS:[0-9]+:127.0.0.1");
-        Assertions.assertThat(tool.getStdout()).containsPattern("NET_VERSION:[0-9]+:.+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("HOST_ID:[0-9]+:.+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("NATIVE_ADDRESS_AND_PORT:[0-9]+:127.0.0.1:[0-9]+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("STATUS_WITH_PORT:[0-9]+:NORMAL,.+");
-        Assertions.assertThat(tool.getStdout()).containsPattern("TOKENS:[0-9]+:<hidden>");
-        assertTrue(tool.getCleanedStderr().isEmpty());
-        assertEquals(0, tool.getExitCode());
-
-        // Make sure heartbeats are detected
-        Message<NoPayload> echoMessageOut = Message.out(ECHO_REQ, NoPayload.noPayload);
-        MessagingService.instance().send(echoMessageOut, FBUtilities.getBroadcastAddressAndPort());
-
-        String origHeartbeatCount = StringUtils.substringBetween(tool.getStdout(), "heartbeat:", "\n");
-        tool = ToolRunner.invokeNodetool("gossipinfo");
-        assertTrue(tool.getCleanedStderr().isEmpty());
-        assertEquals(0, tool.getExitCode());
-        String newHeartbeatCount = StringUtils.substringBetween(tool.getStdout(), "heartbeat:", "\n");
-        assertTrue(Integer.parseInt(origHeartbeatCount) <= Integer.parseInt(newHeartbeatCount));
-    }
-}
diff --git a/test/unit/org/apache/cassandra/tools/NodeToolTPStatsTest.java b/test/unit/org/apache/cassandra/tools/NodeToolTPStatsTest.java
deleted file mode 100644
index b4acc1b..0000000
--- a/test/unit/org/apache/cassandra/tools/NodeToolTPStatsTest.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.tools;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang3.tuple.Pair;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.net.NoPayload;
-import org.apache.cassandra.tools.ToolRunner.ToolResult;
-import org.apache.cassandra.utils.FBUtilities;
-import org.assertj.core.api.Assertions;
-import org.yaml.snakeyaml.Yaml;
-
-import static org.apache.cassandra.net.Verb.ECHO_REQ;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-public class NodeToolTPStatsTest extends CQLTester
-{
-    private static NodeProbe probe;
-
-    @BeforeClass
-    public static void setup() throws Exception
-    {
-        requireNetwork();
-        startJMXServer();
-        probe = new NodeProbe(jmxHost, jmxPort);
-    }
-
-    @AfterClass
-    public static void teardown() throws IOException
-    {
-        probe.close();
-    }
-
-    @Test
-    @SuppressWarnings("SingleCharacterStringConcatenation")
-    public void testMaybeChangeDocs()
-    {
-        // If you added, modified options or help, please update docs if necessary
-        ToolResult tool = ToolRunner.invokeNodetool("help", "tpstats");
-        String help =   "NAME\n" + 
-                        "        nodetool tpstats - Print usage statistics of thread pools\n" + 
-                        "\n" + 
-                        "SYNOPSIS\n" + 
-                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" + 
-                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" + 
-                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" + 
-                        "                [(-u <username> | --username <username>)] tpstats\n" + 
-                        "                [(-F <format> | --format <format>)]\n" + 
-                        "\n" + 
-                        "OPTIONS\n" + 
-                        "        -F <format>, --format <format>\n" + 
-                        "            Output format (json, yaml)\n" + 
-                        "\n" + 
-                        "        -h <host>, --host <host>\n" + 
-                        "            Node hostname or ip address\n" + 
-                        "\n" + 
-                        "        -p <port>, --port <port>\n" + 
-                        "            Remote jmx agent port number\n" + 
-                        "\n" + 
-                        "        -pp, --print-port\n" + 
-                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
-                        "\n" + 
-                        "        -pw <password>, --password <password>\n" + 
-                        "            Remote jmx agent password\n" + 
-                        "\n" + 
-                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
-                        "            Path to the JMX password file\n" + 
-                        "\n" + 
-                        "        -u <username>, --username <username>\n" + 
-                        "            Remote jmx agent username\n" +  
-                        "\n" + 
-                        "\n";
-        Assertions.assertThat(tool.getStdout()).isEqualTo(help);
-    }
-
-    @Test
-    public void testTPStats() throws Throwable
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("tpstats");
-        Assertions.assertThat(tool.getStdout()).containsPattern("Pool Name \\s* Active Pending Completed Blocked All time blocked");
-        Assertions.assertThat(tool.getStdout()).containsIgnoringCase("Latencies waiting in queue (micros) per dropped message types");
-        assertTrue(tool.getCleanedStderr().isEmpty());
-        assertEquals(0, tool.getExitCode());
-
-        // Does inserting data alter tpstats?
-        String nonZeroedThreadsRegExp = "((?m)\\D.*[1-9].*)";
-        ArrayList<String> origStats = getAllGroupMatches(nonZeroedThreadsRegExp, tool.getStdout());
-        Collections.sort(origStats);
-
-        createTable("CREATE TABLE %s (pk int, c int, PRIMARY KEY(pk))");
-        execute("INSERT INTO %s (pk, c) VALUES (?, ?)", 1, 1);
-        flush();
-
-        tool = ToolRunner.invokeNodetool("tpstats");
-        assertTrue(tool.getCleanedStderr().isEmpty());
-        assertEquals(0, tool.getExitCode());
-        ArrayList<String> newStats = getAllGroupMatches(nonZeroedThreadsRegExp, tool.getStdout());
-        Collections.sort(newStats);
-
-        assertNotEquals(origStats, newStats);
-
-        // Does sending a message alter Gossip & ECHO stats?
-        String origGossip = getAllGroupMatches("((?m)GossipStage.*)", tool.getStdout()).get(0);
-        Assertions.assertThat(tool.getStdout()).doesNotContainPattern("ECHO_REQ\\D.*[1-9].*");
-        Assertions.assertThat(tool.getStdout()).doesNotContainPattern("ECHO_RSP\\D.*[1-9].*");
-
-        Message<NoPayload> echoMessageOut = Message.out(ECHO_REQ, NoPayload.noPayload);
-        MessagingService.instance().send(echoMessageOut, FBUtilities.getBroadcastAddressAndPort());
-
-        tool = ToolRunner.invokeNodetool("tpstats");
-        assertTrue(tool.getCleanedStderr().isEmpty());
-        assertEquals(0, tool.getExitCode());
-        String newGossip = getAllGroupMatches("((?m)GossipStage.*)", tool.getStdout()).get(0);
-
-        assertNotEquals(origGossip, newGossip);
-        Assertions.assertThat(tool.getStdout()).containsPattern("ECHO_REQ\\D.*[1-9].*");
-        Assertions.assertThat(tool.getStdout()).containsPattern("ECHO_RSP\\D.*[1-9].*");
-    }
-
-    @Test
-    public void testFormatArg()
-    {
-        Arrays.asList(Pair.of("-F", "json"), Pair.of("--format", "json")).forEach(arg -> {
-            ToolResult tool = ToolRunner.invokeNodetool("tpstats", arg.getLeft(), arg.getRight());
-            String json = tool.getStdout();
-            assertThat(isJSONString(json)).isTrue();
-            assertThat(json).containsPattern("\"WaitLatencies\"\\s*:\\s*\\{\\s*\"");
-            assertTrue(tool.getCleanedStderr().isEmpty());
-            assertEquals(0, tool.getExitCode());
-        });
-
-        Arrays.asList( Pair.of("-F", "yaml"), Pair.of("--format", "yaml")).forEach(arg -> {
-            ToolResult tool = ToolRunner.invokeNodetool("tpstats", arg.getLeft(), arg.getRight());
-            String yaml = tool.getStdout();
-            assertThat(isYAMLString(yaml)).isTrue();
-            assertThat(yaml).containsPattern("WaitLatencies:\\s*[A-Z|_]+:\\s+-\\s");
-            assertTrue(tool.getCleanedStderr().isEmpty());
-            assertEquals(0, tool.getExitCode());
-        });
-    }
-
-    public static boolean isJSONString(String str)
-    {
-        try
-        {
-            ObjectMapper mapper = new ObjectMapper();
-            mapper.readTree(str);
-            return true;
-        }
-        catch(IOException e)
-        {
-            return false;
-        }
-    }
-
-    public static boolean isYAMLString(String str)
-    {
-        try
-        {
-            Yaml yaml = new Yaml();
-            yaml.load(str);
-            return true;
-        }
-        catch(Exception e)
-        {
-            return false;
-        }
-    }
-
-    private ArrayList<String> getAllGroupMatches(String regExp, String in)
-    {
-        Pattern pattern = Pattern.compile(regExp);
-        Matcher m = pattern.matcher(in);
-
-        ArrayList<String> matches = new ArrayList<>();
-        while (m.find())
-            matches.add(m.group(1));
-
-        return matches;
-    }
-}
diff --git a/test/unit/org/apache/cassandra/tools/NodetoolNetStatsTest.java b/test/unit/org/apache/cassandra/tools/NodetoolNetStatsTest.java
deleted file mode 100644
index bcf8704..0000000
--- a/test/unit/org/apache/cassandra/tools/NodetoolNetStatsTest.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.tools;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.List;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.net.Message;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.net.NoPayload;
-import org.apache.cassandra.schema.TableId;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.streaming.SessionInfo;
-import org.apache.cassandra.streaming.StreamSession.State;
-import org.apache.cassandra.streaming.StreamSummary;
-import org.apache.cassandra.tools.ToolRunner.ToolResult;
-import org.apache.cassandra.tools.nodetool.NetStats;
-import org.apache.cassandra.utils.FBUtilities;
-import org.assertj.core.api.Assertions;
-import org.hamcrest.CoreMatchers;
-
-import static org.apache.cassandra.net.Verb.ECHO_REQ;
-import static org.junit.Assert.assertThat;
-
-public class NodetoolNetStatsTest extends CQLTester
-{
-    private static NodeProbe probe;
-
-    @BeforeClass
-    public static void setup() throws Exception
-    {
-        StorageService.instance.initServer();
-        startJMXServer();
-        probe = new NodeProbe(jmxHost, jmxPort);
-    }
-
-    @AfterClass
-    public static void teardown() throws IOException
-    {
-        probe.close();
-    }
-
-    @Test
-    public void testMaybeChangeDocs()
-    {
-        // If you added, modified options or help, please update docs if necessary
-        ToolResult tool = ToolRunner.invokeNodetool("help", "netstats");
-        String help =   "NAME\n" + 
-                        "        nodetool netstats - Print network information on provided host\n" + 
-                        "        (connecting node by default)\n" + 
-                        "\n" + 
-                        "SYNOPSIS\n" + 
-                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" + 
-                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" + 
-                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" + 
-                        "                [(-u <username> | --username <username>)] netstats\n" + 
-                        "                [(-H | --human-readable)]\n" + 
-                        "\n" + 
-                        "OPTIONS\n" + 
-                        "        -h <host>, --host <host>\n" + 
-                        "            Node hostname or ip address\n" + 
-                        "\n" + 
-                        "        -H, --human-readable\n" + 
-                        "            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB\n" + 
-                        "\n" + 
-                        "        -p <port>, --port <port>\n" + 
-                        "            Remote jmx agent port number\n" + 
-                        "\n" + 
-                        "        -pp, --print-port\n" + 
-                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
-                        "\n" + 
-                        "        -pw <password>, --password <password>\n" + 
-                        "            Remote jmx agent password\n" + 
-                        "\n" + 
-                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
-                        "            Path to the JMX password file\n" + 
-                        "\n" + 
-                        "        -u <username>, --username <username>\n" + 
-                        "            Remote jmx agent username\n" + 
-                        "\n" + 
-                        "\n";
-        Assertions.assertThat(tool.getStdout()).isEqualTo(help);
-        tool.assertOnCleanExit();
-    }
-
-    @Test
-    public void testNetStats()
-    {
-        Message<NoPayload> echoMessageOut = Message.out(ECHO_REQ, NoPayload.noPayload);
-        MessagingService.instance().send(echoMessageOut, FBUtilities.getBroadcastAddressAndPort());
-        
-        ToolResult tool = ToolRunner.invokeNodetool("netstats");
-        assertThat(tool.getStdout(), CoreMatchers.containsString("Gossip messages                 n/a         0              2         0"));
-        tool.assertOnCleanExit();
-    }
-
-    @Test
-    public void testHumanReadable() throws IOException
-    {
-        List<StreamSummary> streamSummaries = Collections.singletonList(new StreamSummary(TableId.generate(), 1, 1024));
-        SessionInfo info = new SessionInfo(InetAddressAndPort.getLocalHost(),
-                                           1,
-                                           InetAddressAndPort.getLocalHost(),
-                                           streamSummaries,
-                                           streamSummaries,
-                                           State.COMPLETE);
-
-        try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream out = new PrintStream(baos))
-        {
-            NetStats nstats = new NetStats();
-
-            nstats.printReceivingSummaries(out, info, false);
-            String stdout = getSummariesStdout(baos, out);
-            Assertions.assertThat(stdout).doesNotContain("Kib");
-
-            baos.reset();
-            nstats.printSendingSummaries(out, info, false);
-            stdout = getSummariesStdout(baos, out);
-            Assertions.assertThat(stdout).doesNotContain("KiB");
-
-            baos.reset();
-            nstats.printReceivingSummaries(out, info, true);
-            stdout = getSummariesStdout(baos, out);
-            Assertions.assertThat(stdout).contains("KiB");
-
-            baos.reset();
-            nstats.printSendingSummaries(out, info, true);
-            stdout = getSummariesStdout(baos, out);
-            Assertions.assertThat(stdout).contains("KiB");            
-        }
-    }
-
-    private String getSummariesStdout(ByteArrayOutputStream baos, PrintStream ps) throws IOException
-    {
-        baos.flush();
-        ps.flush();
-        return baos.toString(StandardCharsets.UTF_8.toString());
-    }
-}
diff --git a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
index 7bd1143..3bb2825 100644
--- a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
+++ b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
@@ -32,6 +31,7 @@
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
+import org.apache.cassandra.io.util.File;
 import org.apache.commons.io.FileUtils;
 import org.junit.BeforeClass;
 import org.slf4j.LoggerFactory;
@@ -53,17 +53,12 @@
 
     private static List<ThreadInfo> initialThreads;
 
-    static final String[] EXPECTED_THREADS_WITH_SCHEMA = {
-    "PerDiskMemtableFlushWriter_0:[1-9]",
-    "MemtablePostFlush:[1-9]",
-    "MemtableFlushWriter:[1-9]",
-    "MemtableReclaimMemory:[1-9]",
-    };
     static final String[] OPTIONAL_THREADS_WITH_SCHEMA = {
     "ScheduledTasks:[1-9]",
+    "ScheduledFastTasks:[1-9]",
     "OptionalTasks:[1-9]",
-    "Reference-Reaper:[1-9]",
-    "LocalPool-Cleaner:[1-9]",
+    "Reference-Reaper",
+    "LocalPool-Cleaner(-networking|-chunk-cache)",
     "CacheCleanupExecutor:[1-9]",
     "CompactionExecutor:[1-9]",
     "ValidationExecutor:[1-9]",
@@ -73,9 +68,13 @@
     "Strong-Reference-Leak-Detector:[1-9]",
     "Background_Reporter:[1-9]",
     "EXPIRING-MAP-REAPER:[1-9]",
+    "ObjectCleanerThread",
+    "process reaper",  // spawned by the jvm when executing external processes
+                       // and may still be active when we check
+    "Attach Listener", // spawned in intellij IDEA
     };
 
-    public void assertNoUnexpectedThreadsStarted(String[] expectedThreadNames, String[] optionalThreadNames)
+    public void assertNoUnexpectedThreadsStarted(String[] optionalThreadNames)
     {
         ThreadMXBean threads = ManagementFactory.getThreadMXBean();
 
@@ -89,31 +88,20 @@
                                     .map(ThreadInfo::getThreadName)
                                     .collect(Collectors.toSet());
 
-        List<Pattern> expected = expectedThreadNames != null
-                                 ? Arrays.stream(expectedThreadNames).map(Pattern::compile).collect(Collectors.toList())
-                                 : Collections.emptyList();
-
         List<Pattern> optional = optionalThreadNames != null
                                  ? Arrays.stream(optionalThreadNames).map(Pattern::compile).collect(Collectors.toList())
                                  : Collections.emptyList();
 
         current.removeAll(initial);
 
-        List<Pattern> notPresent = expected.stream()
-                                           .filter(threadNamePattern -> !current.stream().anyMatch(threadName -> threadNamePattern.matcher(threadName).matches()))
-                                           .collect(Collectors.toList());
-
         Set<String> remain = current.stream()
-                                    .filter(threadName -> expected.stream().anyMatch(pattern -> pattern.matcher(threadName).matches()))
-                                    .filter(threadName -> optional.stream().anyMatch(pattern -> pattern.matcher(threadName).matches()))
+                                    .filter(threadName -> optional.stream().noneMatch(pattern -> pattern.matcher(threadName).matches()))
                                     .collect(Collectors.toSet());
 
         if (!remain.isEmpty())
             System.err.println("Unexpected thread names: " + remain);
-        if (!notPresent.isEmpty())
-            System.err.println("Mandatory thread missing: " + notPresent);
 
-        assertTrue("Wrong thread status", remain.isEmpty() && notPresent.isEmpty());
+        assertTrue("Wrong thread status, active threads unaccounted for: " + remain, remain.isEmpty());
     }
 
     public void assertSchemaNotLoaded()
@@ -204,20 +192,20 @@
     public static String findOneSSTable(String ks, String cf) throws IOException
     {
         File cfDir = sstableDir(ks, cf);
-        File[] sstableFiles = cfDir.listFiles((file) -> file.isFile() && file.getName().endsWith("-Data.db"));
-        return sstableFiles[0].getAbsolutePath();
+        File[] sstableFiles = cfDir.tryList((file) -> file.isFile() && file.name().endsWith("-Data.db"));
+        return sstableFiles[0].absolutePath();
     }
 
     public static String sstableDirName(String ks, String cf) throws IOException
     {
-        return sstableDir(ks, cf).getAbsolutePath();
+        return sstableDir(ks, cf).absolutePath();
     }
 
     public static File sstableDir(String ks, String cf) throws IOException
     {
         File dataDir = copySSTables();
         File ksDir = new File(dataDir, ks);
-        File[] cfDirs = ksDir.listFiles((dir, name) -> cf.equals(name) || name.startsWith(cf + '-'));
+        File[] cfDirs = ksDir.tryList((dir, name) -> cf.equals(name) || name.startsWith(cf + '-'));
         return cfDirs[0];
     }
 
@@ -225,13 +213,13 @@
     {
         File dataDir = new File("build/test/cassandra/data");
         File srcDir = new File("test/data/legacy-sstables/ma");
-        FileUtils.copyDirectory(new File(srcDir, "legacy_tables"), new File(dataDir, "legacy_sstables"));
+        FileUtils.copyDirectory(new File(srcDir, "legacy_tables").toJavaIOFile(), new File(dataDir, "legacy_sstables").toJavaIOFile());
         return dataDir;
     }
     
     protected void assertCorrectEnvPostTest()
     {
-        assertNoUnexpectedThreadsStarted(EXPECTED_THREADS_WITH_SCHEMA, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertSchemaLoaded();
         assertServerNotLoaded();
     }
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java b/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java
index 4fc2cbe..e76769e 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java
@@ -37,7 +37,7 @@
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
 
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java b/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java
index f8d0f99..1530a2e 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java
@@ -182,7 +182,7 @@
      */
     private void assertPostTestEnv()
     {
-        assertNoUnexpectedThreadsStarted(null, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
         assertKeyspaceNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExportTest.java b/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
index 6b5395c..a5b70a2 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
@@ -108,7 +108,7 @@
      */
     private void assertPostTestEnv()
     {
-        assertNoUnexpectedThreadsStarted(null, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java b/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java
index a6606bf..e6d7adc 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java
@@ -36,7 +36,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java b/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java
index 2f6e64b..4998b6e 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java
@@ -55,7 +55,7 @@
             assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Options:"));
             assertEquals(1, tool.getExitCode());
         }
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -218,7 +218,7 @@
 
     private void assertGoodEnvPostTest()
     {
-        assertNoUnexpectedThreadsStarted(null, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java b/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java
index ee07f99..3f4314c 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java
@@ -36,7 +36,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java b/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java
index 859b6b8..3531075 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java
@@ -18,10 +18,10 @@
 
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.io.util.FileUtils;
@@ -41,7 +41,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -79,7 +79,7 @@
                                                        "--is-repaired",
                                                        findOneSSTable("legacy_sstables", "legacy_ma_simple"));
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(null, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -95,7 +95,7 @@
                                                  "--is-unrepaired",
                                                  findOneSSTable("legacy_sstables", "legacy_ma_simple"));
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(null, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -110,10 +110,10 @@
         tmpFile.deleteOnExit();
         Files.write(tmpFile.toPath(), findOneSSTable("legacy_sstables", "legacy_ma_simple").getBytes());
         
-        String file = tmpFile.getAbsolutePath();
+        String file = tmpFile.absolutePath();
         ToolResult tool = ToolRunner.invokeClass(SSTableRepairedAtSetter.class, "--really-set", "--is-repaired", "-f", file);
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(null, OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java b/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java
index 62e0166..944b8de 100644
--- a/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java
+++ b/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -26,6 +25,8 @@
 import java.util.stream.Collectors;
 
 import org.junit.Before;
+
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import org.apache.cassandra.cql3.CQLTester;
@@ -64,10 +65,10 @@
     public void testSplittingSSTable() throws Throwable
     {
         ToolResult tool  = ToolRunner.invokeClass(StandaloneSplitter.class, "-s", "1", sstableFileName);
-        List<File> splitFiles = Arrays.asList(sstablesDir.listFiles());
+        List<File> splitFiles = Arrays.asList(sstablesDir.tryList());
         splitFiles.stream().forEach(f -> {
-            if (f.getName().endsWith("Data.db") && !origSstables.contains(f))
-                assertTrue(f.getName() + " is way bigger than 1MB: [" + f.length() + "] bytes",
+            if (f.name().endsWith("Data.db") && !origSstables.contains(f))
+                assertTrue(f.name() + " is way bigger than 1MiB: [" + f.length() + "] bytes",
                            f.length() <= 1024 * 1024 * 1.2); //give a 20% margin on size check
         });
         assertTrue(origSstables.size() < splitFiles.size());
@@ -81,16 +82,16 @@
     {
         ArrayList<String> args = new ArrayList<>(Arrays.asList("-s", "1"));
 
-        args.addAll(Arrays.asList(sstablesDir.listFiles())
+        args.addAll(Arrays.asList(sstablesDir.tryList())
                           .stream()
-                          .map(f -> f.getAbsolutePath())
+                          .map(f -> f.absolutePath())
                           .collect(Collectors.toList()));
 
         ToolResult tool  = ToolRunner.invokeClass(StandaloneSplitter.class, args.toArray(new String[args.size()]));
-        List<File> splitFiles = Arrays.asList(sstablesDir.listFiles());
+        List<File> splitFiles = Arrays.asList(sstablesDir.tryList());
         splitFiles.stream().forEach(f -> {
-            if (f.getName().endsWith("Data.db") && !origSstables.contains(f))
-                assertTrue(f.getName() + " is way bigger than 1MB: [" + f.length() + "] bytes",
+            if (f.name().endsWith("Data.db") && !origSstables.contains(f))
+                assertTrue(f.name() + " is way bigger than 1MiB: [" + f.length() + "] bytes",
                            f.length() <= 1024 * 1024 * 1.2); //give a 20% margin on size check
         });
         assertTrue(origSstables.size() < splitFiles.size());
@@ -102,7 +103,7 @@
     public void testNoSnapshotOption() throws Throwable
     {
         ToolResult tool  = ToolRunner.invokeClass(StandaloneSplitter.class, "-s", "1", "--no-snapshot", sstableFileName);
-        assertTrue(origSstables.size() < Arrays.asList(sstablesDir.listFiles()).size());
+        assertTrue(origSstables.size() < Arrays.asList(sstablesDir.tryList()).size());
         assertTrue(tool.getStdout(), tool.getStdout().isEmpty());
         assertTrue(tool.getCleanedStderr(), tool.getCleanedStderr().isEmpty());
         assertEquals(0, tool.getExitCode());
@@ -116,13 +117,13 @@
             executeFormattedQuery(formatQuery("INSERT INTO %s (id, val) VALUES (?, ?)"), "mockData" + i, "mockData" + i);
 
         ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
-        cfs.forceBlockingFlush();
+        org.apache.cassandra.Util.flush(cfs);
 
         Set<SSTableReader> sstables = cfs.getLiveSSTables();
         sstableFileName = sstables.iterator().next().getFilename();
-        assertTrue("Generated sstable must be at least 1MB", (new File(sstableFileName)).length() > 1024*1024);
-        sstablesDir = new File(sstableFileName).getParentFile();
-        origSstables = Arrays.asList(sstablesDir.listFiles());
+        assertTrue("Generated sstable must be at least 1MiB", (new File(sstableFileName)).length() > 1024*1024);
+        sstablesDir = new File(sstableFileName).parent();
+        origSstables = Arrays.asList(sstablesDir.tryList());
         System.setProperty(Util.ALLOW_TOOL_REINIT_FOR_TEST, "true"); // Necessary for testing
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java b/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java
index ca1c649..e9d2070 100644
--- a/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java
+++ b/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java
@@ -18,13 +18,13 @@
 
 package org.apache.cassandra.tools;
 
-import java.io.File;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -141,7 +141,7 @@
     private List<String> getSStableFiles(String ks, String table) throws StartupException
     {
         ColumnFamilyStore cfs = Keyspace.open(ks).getColumnFamilyStore(table);
-        cfs.forceBlockingFlush();
+        org.apache.cassandra.Util.flush(cfs);
         ColumnFamilyStore.scrubDataDirectories(cfs.metadata());
 
         Set<SSTableReader> sstables = cfs.getLiveSSTables();
@@ -149,8 +149,8 @@
             return Lists.emptyList();
 
         String sstableFileName = sstables.iterator().next().getFilename();
-        File sstablesDir = new File(sstableFileName).getParentFile();
-        return Arrays.asList(sstablesDir.listFiles())
+        File sstablesDir = new File(sstableFileName).parent();
+        return Arrays.asList(sstablesDir.tryList())
                      .stream()
                      .filter(f -> f.isFile())
                      .map(file -> file.toString())
diff --git a/test/unit/org/apache/cassandra/tools/StandaloneVerifierOnSSTablesTest.java b/test/unit/org/apache/cassandra/tools/StandaloneVerifierOnSSTablesTest.java
index 7df28d2..65dd125 100644
--- a/test/unit/org/apache/cassandra/tools/StandaloneVerifierOnSSTablesTest.java
+++ b/test/unit/org/apache/cassandra/tools/StandaloneVerifierOnSSTablesTest.java
@@ -83,9 +83,8 @@
 
         createAndPopulateTable(keyspaceName, workingTable, x -> {});
 
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, workingTable, "-c");
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, workingTable, "--force", "-c");
         assertEquals(0, tool.getExitCode());
-        assertCorrectEnvPostTest();
         tool.assertOnCleanExit();
     }
 
@@ -98,13 +97,13 @@
         createAndPopulateTable(keyspace, tableName, cfs -> {
             // let's just copy old version files from test data into the source dir
             File testDataDir = new File("test/data/legacy-sstables/ma/legacy_tables/legacy_ma_simple");
-            for (File cfsDir : cfs.getDirectories().getCFDirectories())
+            for (org.apache.cassandra.io.util.File cfsDir : cfs.getDirectories().getCFDirectories())
             {
-                FileUtils.copyDirectory(testDataDir, cfsDir);
+                FileUtils.copyDirectory(testDataDir, cfsDir.toJavaIOFile());
             }
         });
 
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspace, tableName, "-c");
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspace, tableName, "-f", "-c");
 
         assertEquals(1, tool.getExitCode());
         Assertions.assertThat(tool.getStdout()).contains("is not the latest version, run upgradesstables");
@@ -118,9 +117,8 @@
 
         createAndPopulateTable(keyspaceName, workingTable, x -> {});
 
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, workingTable);
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, workingTable, "--force");
         assertEquals(0, tool.getExitCode());
-        assertCorrectEnvPostTest();
         tool.assertOnCleanExit();
     }
 
@@ -138,7 +136,7 @@
             }
         });
 
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, corruptStatsTable);
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, corruptStatsTable, "-f");
 
         assertEquals(1, tool.getExitCode());
         Assertions.assertThat(tool.getStderr()).contains("Error Loading", corruptStatsTable);
@@ -163,7 +161,7 @@
             }
         });
 
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, corruptDataTable);
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, keyspaceName, corruptDataTable, "--force");
         assertEquals(1, tool.getExitCode());
         Assertions.assertThat(tool.getStdout()).contains("Invalid SSTable", corruptDataTable);
     }
@@ -217,6 +215,6 @@
                          .apply();
         }
 
-        cfs.forceBlockingFlush();
+        org.apache.cassandra.Util.flush(cfs);
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/StandaloneVerifierTest.java b/test/unit/org/apache/cassandra/tools/StandaloneVerifierTest.java
index 9e6bb14..9d8f797 100644
--- a/test/unit/org/apache/cassandra/tools/StandaloneVerifierTest.java
+++ b/test/unit/org/apache/cassandra/tools/StandaloneVerifierTest.java
@@ -30,7 +30,7 @@
 import static org.junit.Assert.assertThat;
 
 /**
-* Note: the complete coverage is composed of:
+ * Note: the complete coverage is composed of:
  * - {@link StandaloneVerifierOnSSTablesTest}
  * - {@link StandaloneVerifierTest}
  * - {@link org.apache.cassandra.db.VerifyTest}
@@ -42,15 +42,23 @@
     {
         // If you added, modified options or help, please update docs if necessary
         ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "-h");
-        String help = "usage: sstableverify [options] <keyspace> <column_family>\n" + 
+        String help = "usage: sstableverify [options] <keyspace> <column_family> force\n" +
                        "--\n" + 
                        "Verify the sstable for the provided table.\n" + 
-                       "--\n" + 
-                       "Options are:\n" + 
+                       "--\n" +
+                       "NOTE: There are significant risks associated with using this tool; it\n" +
+                       "likely doesn't do what you expect and there are known edge cases. You must\n" +
+                       "provide a -f or --force argument in order to allow usage of the tool ->\n" +
+                       "see CASSANDRA-9947 and CASSANDRA-17017 for known risks.\n" +
+                       "https://issues.apache.org/jira/browse/CASSANDRA-9947\n" +
+                       "https://issues.apache.org/jira/browse/CASSANDRA-17017\n" +
+                       "--\n" +
+                       "Options are:\n" +
                        " -c,--check_version          make sure sstables are the latest version\n" + 
                        "    --debug                  display stack traces\n" + 
-                       " -e,--extended               extended verification\n" + 
-                       " -h,--help                   display this help message\n" + 
+                       " -e,--extended               extended verification\n" +
+                       " -f,--force                  force verify - see CASSANDRA-17017\n" +
+                       " -h,--help                   display this help message\n" +
                        " -q,--quick                  do a quick check, don't read all data\n" + 
                        " -r,--mutate_repair_status   don't mutate repair status\n" + 
                        " -t,--token_range <range>    long token range of the format left,right.\n" + 
@@ -62,7 +70,7 @@
     @Test
     public void testWrongArgFailsAndPrintsHelp()
     {
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "--debugwrong", "system_schema", "tables");
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "--debugwrong", "system_schema", "tables", "-f");
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Unrecognized option"));
         assertEquals(1, tool.getExitCode());
@@ -71,24 +79,24 @@
     @Test
     public void testDefaultCall()
     {
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "system_schema", "tables");
-        assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("using the following options"));
-        Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
-        assertEquals(0, tool.getExitCode());
-        assertCorrectEnvPostTest();
-        tool.assertOnCleanExit();
-
+        Arrays.asList("-f", "--force").forEach(arg -> {
+            ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "system_schema", "tables", arg);
+            assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("using the following options"));
+            Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
+            assertEquals(0, tool.getExitCode());
+            assertCorrectEnvPostTest();
+            tool.assertOnCleanExit();
+        });
     }
 
     @Test
     public void testDebugArg()
     {
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "--debug", "system_schema", "tables");
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "--debug", "system_schema", "tables", "-f");
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("debug=true"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertCorrectEnvPostTest();
         tool.assertOnCleanExit();
-
     }
 
     @Test
@@ -96,9 +104,10 @@
     {
         Arrays.asList("-e", "--extended").forEach(arg -> {
             ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class,
-                                                       arg,
-                                                       "system_schema",
-                                                       "tables");
+                                                     arg,
+                                                     "system_schema",
+                                                     "tables",
+                                                     "--force");
             assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("extended=true"));
             Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
             assertCorrectEnvPostTest();
@@ -111,9 +120,10 @@
     {
         Arrays.asList("-q", "--quick").forEach(arg -> {
             ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class,
-                                                       arg,
-                                                       "system_schema",
-                                                       "tables");
+                                                     arg,
+                                                     "system_schema",
+                                                     "tables",
+                                                     "-f");
             assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("quick=true"));
             Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
             assertCorrectEnvPostTest();
@@ -126,9 +136,10 @@
     {
         Arrays.asList("-r", "--mutate_repair_status").forEach(arg -> {
             ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class,
-                                                       arg,
-                                                       "system_schema",
-                                                       "tables");
+                                                     arg,
+                                                     "system_schema",
+                                                     "tables",
+                                                     "--force");
             assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("mutateRepairStatus=true"));
             Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
             assertCorrectEnvPostTest();
@@ -153,9 +164,10 @@
     {
         Arrays.asList("-v", "--verbose").forEach(arg -> {
             ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class,
-                                                       arg,
-                                                       "system_schema",
-                                                       "tables");
+                                                     arg,
+                                                     "system_schema",
+                                                     "tables",
+                                                     "-f");
             assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("verbose=true"));
             Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
             assertCorrectEnvPostTest();
@@ -164,11 +176,47 @@
     }
 
     @Test
+    public void testTooFewArgs()
+    {
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "one_arg");
+        assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
+        assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
+        assertEquals(1, tool.getExitCode());
+    }
+
+    @Test
     public void testTooManyArgs()
     {
-        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "another arg", "system_schema", "tables");
+        ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class, "one_arg", "two_arg", "toomany_arg");
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Too many arguments"));
         assertEquals(1, tool.getExitCode());
     }
+
+    @Test
+    public void testFailsWithoutForce()
+    {
+        Arrays.asList("-r", "--mutate_repair_status").forEach(arg -> {
+            ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class,
+                                                     arg,
+                                                     "system_schema",
+                                                     "tables",
+                                                     "debug");
+            assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
+            assertEquals(1, tool.getExitCode());
+        });
+    }
+
+    @Test
+    public void testBadForceArgument()
+    {
+        Arrays.asList("bf", "badforce", "garbage", "forrce").forEach(arg -> {
+            ToolResult tool = ToolRunner.invokeClass(StandaloneVerifier.class,
+                                                     "system_schema",
+                                                     "tables",
+                                                     arg);
+            assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage"));
+            assertEquals(1, tool.getExitCode());
+        });
+    }
 }
diff --git a/test/unit/org/apache/cassandra/tools/ToolRunner.java b/test/unit/org/apache/cassandra/tools/ToolRunner.java
index 0ad88ef..e3b9595 100644
--- a/test/unit/org/apache/cassandra/tools/ToolRunner.java
+++ b/test/unit/org/apache/cassandra/tools/ToolRunner.java
@@ -34,6 +34,7 @@
 import java.util.Objects;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
@@ -47,6 +48,7 @@
 import org.apache.cassandra.utils.Pair;
 import org.assertj.core.util.Lists;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -119,11 +121,13 @@
 
         private final InputStream input;
         private final T out;
+        private final boolean autoCloseOut;
 
-        private StreamGobbler(InputStream input, T out)
+        private StreamGobbler(InputStream input, T out, boolean autoCloseOut)
         {
             this.input = input;
             this.out = out;
+            this.autoCloseOut = autoCloseOut;
         }
 
         public void run()
@@ -136,6 +140,8 @@
                     int read = input.read(buffer);
                     if (read == -1)
                     {
+                        if (autoCloseOut)
+                            out.close();
                         return;
                     }
                     out.write(buffer, 0, read);
@@ -394,8 +400,11 @@
          */
         public void assertCleanStdErr()
         {
-            assertTrue("Failed because cleaned stdErr wasn't empty: " + getCleanedStderr(),
-                       getCleanedStderr().isEmpty());
+            String raw = getStderr();
+            String cleaned = getCleanedStderr();
+            assertTrue("Failed to clean stderr completely.\nRaw (length=" + raw.length() + "):\n" + raw + 
+                       "\nCleaned (length=" + cleaned.length() + "):\n" + cleaned,
+                       cleaned.trim().isEmpty());
         }
 
         public void assertOnExitCode()
@@ -451,6 +460,55 @@
             assertOnExitCode();
             assertCleanStdErr();
         }
+
+        public AssertHelp asserts()
+        {
+            return new AssertHelp();
+        }
+
+        public final class AssertHelp
+        {
+            public AssertHelp success()
+            {
+                if (exitCode != 0)
+                    fail("was not successful");
+                return this;
+            }
+
+            public AssertHelp failure()
+            {
+                if (exitCode == 0)
+                    fail("was not successful");
+                return this;
+            }
+
+            public AssertHelp errorContains(String messages)
+            {
+                return errorContainsAny(messages);
+            }
+
+            public AssertHelp errorContainsAny(String... messages)
+            {
+                assertThat(messages).hasSizeGreaterThan(0);
+                assertThat(stderr).isNotNull();
+                if (!Stream.of(messages).anyMatch(stderr::contains))
+                    fail("stderr does not contain " + Arrays.toString(messages));
+                return this;
+            }
+
+            private void fail(String msg)
+            {
+                StringBuilder sb = new StringBuilder();
+                sb.append("nodetool command ").append(String.join(" ", allArgs)).append(": ").append(msg).append('\n');
+                if (stdout != null)
+                    sb.append("stdout:\n").append(stdout).append('\n');
+                if (stderr != null)
+                    sb.append("stderr:\n").append(stderr).append('\n');
+                if (e != null)
+                    sb.append("Exception:\n").append(Throwables.getStackTraceAsString(e)).append('\n');
+                throw new AssertionError(sb.toString());
+            }
+        }
     }
 
     public interface ObservableTool extends AutoCloseable
@@ -536,19 +594,19 @@
             if (includeStdinWatcher)
                 numWatchers = 3;
             ioWatchers = new Thread[numWatchers];
-            ioWatchers[0] = new Thread(new StreamGobbler<>(process.getErrorStream(), err));
+            ioWatchers[0] = new Thread(new StreamGobbler<>(process.getErrorStream(), err, false));
             ioWatchers[0].setDaemon(true);
             ioWatchers[0].setName("IO Watcher stderr");
             ioWatchers[0].start();
 
-            ioWatchers[1] = new Thread(new StreamGobbler<>(process.getInputStream(), out));
+            ioWatchers[1] = new Thread(new StreamGobbler<>(process.getInputStream(), out, false));
             ioWatchers[1].setDaemon(true);
             ioWatchers[1].setName("IO Watcher stdout");
             ioWatchers[1].start();
 
             if (includeStdinWatcher)
             {
-                ioWatchers[2] = new Thread(new StreamGobbler<>(stdin, process.getOutputStream()));
+                ioWatchers[2] = new Thread(new StreamGobbler<>(stdin, process.getOutputStream(), true));
                 ioWatchers[2].setDaemon(true);
                 ioWatchers[2].setName("IO Watcher stdin");
                 ioWatchers[2].start();
diff --git a/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java b/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java
index 58a444c..1a99643 100644
--- a/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java
+++ b/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java
@@ -34,7 +34,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -49,7 +49,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -64,7 +64,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("No sstables to split"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -79,7 +79,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -94,7 +94,7 @@
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null, null);
+        assertNoUnexpectedThreadsStarted(null);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/cassandrastress/CassandrastressTest.java b/test/unit/org/apache/cassandra/tools/cassandrastress/CassandrastressTest.java
index aa64dac..8d0711a 100644
--- a/test/unit/org/apache/cassandra/tools/cassandrastress/CassandrastressTest.java
+++ b/test/unit/org/apache/cassandra/tools/cassandrastress/CassandrastressTest.java
@@ -23,7 +23,6 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.service.GCInspector;
 import org.apache.cassandra.tools.ToolRunner;
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java b/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java
new file mode 100644
index 0000000..0e172f3
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.io.IOException;
+import java.util.Map;
+import javax.management.openmbean.TabularData;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.NodeProbe;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ClearSnapshotTest extends CQLTester
+{
+    private static NodeProbe probe;
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+        probe = new NodeProbe(jmxHost, jmxPort);
+    }
+
+    @AfterClass
+    public static void teardown() throws IOException
+    {
+        probe.close();
+    }
+
+    @Test
+    public void testClearSnapshot_NoArgs()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clearsnapshot");
+        assertThat(tool.getExitCode()).isEqualTo(2);
+        assertThat(tool.getCleanedStderr()).contains("Specify snapshot name or --all");
+        
+        tool = ToolRunner.invokeNodetool("clearsnapshot", "--all");
+        tool.assertOnCleanExit();
+    }
+
+    @Test
+    public void testClearSnapshot_AllAndName()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clearsnapshot", "-t", "some-name", "--all");
+        assertThat(tool.getExitCode()).isEqualTo(2);
+        assertThat(tool.getCleanedStderr()).contains("Specify only one of snapshot name or --all");
+    }
+
+    @Test
+    public void testClearSnapshot_RemoveByName()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("snapshot","-t","some-name");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isNotEmpty();
+        
+        Map<String, TabularData> snapshots_before = probe.getSnapshotDetails();
+        assertThat(snapshots_before).containsKey("some-name");
+
+        tool = ToolRunner.invokeNodetool("clearsnapshot","-t","some-name");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isNotEmpty();
+        
+        Map<String, TabularData> snapshots_after = probe.getSnapshotDetails();
+        assertThat(snapshots_after).doesNotContainKey("some-name");
+    }
+
+    @Test
+    public void testClearSnapshot_RemoveMultiple()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("snapshot","-t","some-name");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isNotEmpty();
+
+        tool = ToolRunner.invokeNodetool("snapshot","-t","some-other-name");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isNotEmpty();
+
+        Map<String, TabularData> snapshots_before = probe.getSnapshotDetails();
+        assertThat(snapshots_before).hasSize(2);
+
+        tool = ToolRunner.invokeNodetool("clearsnapshot","--all");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isNotEmpty();
+        
+        Map<String, TabularData> snapshots_after = probe.getSnapshotDetails();
+        assertThat(snapshots_after).isEmpty();
+    }
+    
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/ClientStatsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/ClientStatsTest.java
new file mode 100644
index 0000000..5975f66
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/ClientStatsTest.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.net.InetAddress;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.LoggerFactory;
+
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.Logger;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.read.ListAppender;
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Session;
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.service.EmbeddedCassandraService;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.tools.ToolRunner;
+import org.assertj.core.groups.Tuple;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ClientStatsTest
+{
+    private static Cluster cluster;
+    private Session session;
+
+    private static EmbeddedCassandraService cassandra;
+
+    @BeforeClass
+    public static void setup() throws Throwable
+    {
+        // Since we run EmbeddedCassandraServer, we need to manually associate JMX address; otherwise it won't start
+        int jmxPort = CQLTester.getAutomaticallyAllocatedPort(InetAddress.getLoopbackAddress());
+        System.setProperty("cassandra.jmx.local.port", String.valueOf(jmxPort));
+
+        cassandra = ServerTestUtils.startEmbeddedCassandraService();
+        cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
+    }
+
+    @Before
+    public void config() throws Throwable
+    {
+        session = cluster.connect();
+        ResultSet result = session.execute("select release_version from system.local");
+    }
+
+    @After
+    public void afterTest()
+    {
+        if (session != null)
+            session.close();
+    }
+
+    @AfterClass
+    public static void tearDown()
+    {
+        if (cluster != null)
+            cluster.close();
+        if (cassandra != null)
+            cassandra.stop();
+    }
+
+    @Test
+    public void testClientStatsHelp()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "clientstats");
+        tool.assertOnCleanExit();
+
+        String help = "NAME\n" +
+                      "        nodetool clientstats - Print information about connected clients\n" +
+                      "\n" +
+                      "SYNOPSIS\n" +
+                      "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                      "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                      "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                      "                [(-u <username> | --username <username>)] clientstats [--all]\n" +
+                        "                [--by-protocol] [--clear-history] [--client-options]\n" +
+                      "\n" +
+                      "OPTIONS\n" +
+                      "        --all\n" +
+                      "            Lists all connections\n" +
+                      "\n" +
+                      "        --by-protocol\n" +
+                      "            Lists most recent client connections by protocol version\n" +
+                      "\n" +
+                      "        --clear-history\n" +
+                      "            Clear the history of connected clients\n" +
+                        "\n" +
+                        "        --client-options\n" +
+                        "            Lists all connections and the client options\n" +
+                      "\n" +
+                      "        -h <host>, --host <host>\n" +
+                      "            Node hostname or ip address\n" +
+                      "\n" +
+                      "        -p <port>, --port <port>\n" +
+                      "            Remote jmx agent port number\n" +
+                      "\n" +
+                      "        -pp, --print-port\n" +
+                      "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                      "\n" +
+                      "        -pw <password>, --password <password>\n" +
+                      "            Remote jmx agent password\n" +
+                      "\n" +
+                      "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                      "            Path to the JMX password file\n" +
+                      "\n" +
+                      "        -u <username>, --username <username>\n" +
+                      "            Remote jmx agent username\n" +
+                      "\n" +
+                      "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testClientStats()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clientstats");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("Total connected clients: 2");
+        assertThat(stdout).contains("User      Connections");
+        assertThat(stdout).contains("anonymous 2");
+    }
+
+    @Test
+    public void testClientStatsByProtocol()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clientstats", "--by-protocol");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("Clients by protocol version");
+        assertThat(stdout).contains("Protocol-Version IP-Address Last-Seen");
+        assertThat(stdout).containsPattern("[0-9]/v[0-9] +/127.0.0.1 [a-zA-Z]{3} [0-9]+, [0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}");
+    }
+
+    @Test
+    public void testClientStatsAll()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clientstats", "--all");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).containsPattern("Address +SSL +Cipher +Protocol +Version +User +Keyspace +Requests +Driver-Name +Driver-Version");
+        assertThat(stdout).containsPattern("/127.0.0.1:[0-9]+ false undefined undefined [0-9]+ +anonymous +[0-9]+ +DataStax Java Driver 3.11.0");
+        assertThat(stdout).contains("Total connected clients: 2");
+        assertThat(stdout).contains("User      Connections");
+        assertThat(stdout).contains("anonymous 2");
+    }
+
+    @Test
+    public void testClientStatsClientOptions()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clientstats", "--client-options");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).containsPattern("Address +SSL +Cipher +Protocol +Version +User +Keyspace +Requests +Driver-Name +Driver-Version +Client-Options");
+        assertThat(stdout).containsPattern("/127.0.0.1:[0-9]+ false undefined undefined [0-9]+ +anonymous +[0-9]+ +DataStax Java Driver 3.11.0");
+        assertThat(stdout).containsPattern("DRIVER_NAME=DataStax Java Driver");
+        assertThat(stdout).containsPattern("DRIVER_VERSION=3.11.0");
+        assertThat(stdout).containsPattern("CQL_VERSION=3.0.0");
+        assertThat(stdout).contains("Total connected clients: 2");
+        assertThat(stdout).contains("User      Connections");
+        assertThat(stdout).contains("anonymous 2");
+    }
+
+    @Test
+    public void testClientStatsClearHistory()
+    {
+        ListAppender<ILoggingEvent> listAppender = new ListAppender<>();
+        Logger ssLogger = (Logger) LoggerFactory.getLogger(StorageService.class);
+
+        ssLogger.addAppender(listAppender);
+        listAppender.start();
+
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("clientstats", "--clear-history");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("Clearing connection history");
+        assertThat(listAppender.list)
+        .extracting(ILoggingEvent::getMessage, ILoggingEvent::getLevel)
+        .contains(Tuple.tuple("Cleared connection history", Level.INFO));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/CompactTest.java b/test/unit/org/apache/cassandra/tools/nodetool/CompactTest.java
new file mode 100644
index 0000000..34059aa
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/CompactTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.Arrays;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.tools.ToolRunner.invokeNodetool;
+
+public class CompactTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Throwable
+    {
+        startJMXServer();
+    }
+
+    @Test
+    public void keyPresent() throws Throwable
+    {
+        long token = 42;
+        long key = Murmur3Partitioner.LongToken.keyForToken(token).getLong();
+        createTable("CREATE TABLE %s (id bigint, value text, PRIMARY KEY ((id)))");
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
+        cfs.disableAutoCompaction();
+        // write SSTables for the specific key
+        for (int i = 0; i < 10; i++)
+        {
+            execute("INSERT INTO %s (id, value) VALUES (?, ?)", key, "This is just some text... part " + i);
+            flush(keyspace());
+        }
+        Assertions.assertThat(cfs.getTracker().getView().liveSSTables()).hasSize(10);
+        invokeNodetool("compact", "--partition", Long.toString(key), keyspace(), currentTable()).assertOnCleanExit();
+
+        // only 1 SSTable should exist
+        Assertions.assertThat(cfs.getTracker().getView().liveSSTables()).hasSize(1);
+    }
+
+    @Test
+    public void keyNotPresent() throws Throwable
+    {
+        long token = 42;
+        long key = Murmur3Partitioner.LongToken.keyForToken(token).getLong();
+        createTable("CREATE TABLE %s (id bigint, value text, PRIMARY KEY ((id)))");
+        ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
+        cfs.disableAutoCompaction();
+        // write SSTables for the specific key
+        for (int i = 0; i < 10; i++)
+        {
+            execute("INSERT INTO %s (id, value) VALUES (?, ?)", key, "This is just some text... part " + i);
+            flush(keyspace());
+        }
+        Assertions.assertThat(cfs.getTracker().getView().liveSSTables()).hasSize(10);
+
+        for (long keyNotFound : Arrays.asList(key - 1, key + 1))
+        {
+            invokeNodetool("compact", "--partition", Long.toString(keyNotFound), keyspace(), currentTable()).assertOnCleanExit();
+
+            // only 1 SSTable should exist
+            Assertions.assertThat(cfs.getTracker().getView().liveSSTables()).hasSize(10);
+        }
+    }
+
+    @Test
+    public void tableNotFound()
+    {
+        invokeNodetool("compact", "--partition", Long.toString(42), keyspace(), "doesnotexist")
+        .asserts()
+        .failure()
+        .errorContains(String.format("java.lang.IllegalArgumentException: Unknown keyspace/cf pair (%s.doesnotexist)", keyspace()));
+    }
+
+    @Test
+    public void keyWrongType()
+    {
+        createTable("CREATE TABLE %s (id bigint, value text, PRIMARY KEY ((id)))");
+
+        invokeNodetool("compact", "--partition", "this_will_not_work", keyspace(), currentTable())
+        .asserts()
+        .failure()
+        .errorContains(String.format("Unable to parse partition key 'this_will_not_work' for table %s.%s; Unable to make long from 'this_will_not_work'", keyspace(), currentTable()));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/CompactionStatsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/CompactionStatsTest.java
new file mode 100644
index 0000000..b42a166
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/CompactionStatsTest.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.compaction.CompactionInfo;
+import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.MockSchema;
+import org.apache.cassandra.tools.ToolRunner;
+import org.apache.cassandra.utils.TimeUUID;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionStatsTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "compactionstats");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                "        nodetool compactionstats - Print statistics on compactions\n" +
+                "\n" +
+                "SYNOPSIS\n" +
+                "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                "                [(-u <username> | --username <username>)] compactionstats\n" +
+                "                [(-H | --human-readable)] [(-V | --vtable)]\n" +
+                "\n" +
+                "OPTIONS\n" +
+                "        -h <host>, --host <host>\n" +
+                "            Node hostname or ip address\n" +
+                "\n" +
+                "        -H, --human-readable\n" +
+                "            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB\n" +
+                "\n" +
+                "        -p <port>, --port <port>\n" +
+                "            Remote jmx agent port number\n" +
+                "\n" +
+                "        -pp, --print-port\n" +
+                "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                "\n" +
+                "        -pw <password>, --password <password>\n" +
+                "            Remote jmx agent password\n" +
+                "\n" +
+                "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                "            Path to the JMX password file\n" +
+                "\n" +
+                "        -u <username>, --username <username>\n" +
+                "            Remote jmx agent username\n" +
+                "\n" +
+                "        -V, --vtable\n" +
+                "            Display fields matching vtable output\n" +
+                "\n" +
+                "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testCompactionStats()
+    {
+        createTable("CREATE TABLE %s (pk int, ck int, PRIMARY KEY (pk, ck))");
+        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
+
+        long bytesCompacted = 123;
+        long bytesTotal = 123456;
+        TimeUUID compactionId = nextTimeUUID();
+        List<SSTableReader> sstables = IntStream.range(0, 10)
+                .mapToObj(i -> MockSchema.sstable(i, i * 10L, i * 10L + 9, cfs))
+                .collect(Collectors.toList());
+        CompactionInfo.Holder compactionHolder = new CompactionInfo.Holder()
+        {
+            public CompactionInfo getCompactionInfo()
+            {
+                return new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, bytesCompacted, bytesTotal, compactionId, sstables);
+            }
+
+            public boolean isGlobal()
+            {
+                return false;
+            }
+        };
+
+        CompactionManager.instance.active.beginCompaction(compactionHolder);
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("compactionstats");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 1");
+        Assertions.assertThat(stdout).containsPattern("id\\s+compaction type\\s+keyspace\\s+table\\s+completed\\s+total\\s+unit\\s+progress");
+        String expectedStatsPattern = String.format("%s\\s+%s\\s+%s\\s+%s\\s+%s\\s+%s\\s+%s\\s+%.2f%%",
+            compactionId, OperationType.COMPACTION, CQLTester.KEYSPACE, currentTable(), bytesCompacted, bytesTotal,
+            CompactionInfo.Unit.BYTES, (double) bytesCompacted / bytesTotal * 100);
+        Assertions.assertThat(stdout).containsPattern(expectedStatsPattern);
+
+        CompactionManager.instance.active.finishCompaction(compactionHolder);
+        tool = ToolRunner.invokeNodetool("compactionstats");
+        tool.assertOnCleanExit();
+        stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 0");
+    }
+
+    @Test
+    public void testCompactionStatsVtable()
+    {
+        createTable("CREATE TABLE %s (pk int, ck int, PRIMARY KEY (pk, ck))");
+        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
+
+        long bytesCompacted = 123;
+        long bytesTotal = 123456;
+        TimeUUID compactionId = nextTimeUUID();
+        List<SSTableReader> sstables = IntStream.range(0, 10)
+            .mapToObj(i -> MockSchema.sstable(i, i * 10L, i * 10L + 9, cfs))
+            .collect(Collectors.toList());
+        CompactionInfo.Holder compactionHolder = new CompactionInfo.Holder()
+        {
+            public CompactionInfo getCompactionInfo()
+            {
+                return new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, bytesCompacted, bytesTotal, compactionId, sstables);
+            }
+
+            public boolean isGlobal()
+            {
+                return false;
+            }
+        };
+
+        CompactionManager.instance.active.beginCompaction(compactionHolder);
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("compactionstats", "-V");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 1");
+        Assertions.assertThat(stdout).containsPattern("keyspace\\s+table\\s+task id\\s+completion ratio\\s+kind\\s+progress\\s+sstables\\s+total\\s+unit");
+        String expectedStatsPattern = String.format("%s\\s+%s\\s+%s\\s+%.2f%%\\s+%s\\s+%s\\s+%s\\s+%s\\s+%s",
+            CQLTester.KEYSPACE, currentTable(), compactionId, (double) bytesCompacted / bytesTotal * 100,
+            OperationType.COMPACTION, bytesCompacted, sstables.size(), bytesTotal, CompactionInfo.Unit.BYTES);
+        Assertions.assertThat(stdout).containsPattern(expectedStatsPattern);
+
+        CompactionManager.instance.active.finishCompaction(compactionHolder);
+        tool = ToolRunner.invokeNodetool("compactionstats", "-V");
+        tool.assertOnCleanExit();
+        stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 0");
+    }
+
+    @Test
+    public void testCompactionStatsHumanReadable()
+    {
+        createTable("CREATE TABLE %s (pk int, ck int, PRIMARY KEY (pk, ck))");
+        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
+
+        long bytesCompacted = 123;
+        long bytesTotal = 123456;
+        TimeUUID compactionId = nextTimeUUID();
+        List<SSTableReader> sstables = IntStream.range(0, 10)
+            .mapToObj(i -> MockSchema.sstable(i, i * 10L, i * 10L + 9, cfs))
+            .collect(Collectors.toList());
+        CompactionInfo.Holder compactionHolder = new CompactionInfo.Holder()
+        {
+            public CompactionInfo getCompactionInfo()
+            {
+                return new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, bytesCompacted, bytesTotal, compactionId, sstables);
+            }
+
+            public boolean isGlobal()
+            {
+                return false;
+            }
+        };
+
+        CompactionManager.instance.active.beginCompaction(compactionHolder);
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("compactionstats", "--human-readable");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 1");
+        Assertions.assertThat(stdout).containsPattern("id\\s+compaction type\\s+keyspace\\s+table\\s+completed\\s+total\\s+unit\\s+progress");
+        String expectedStatsPattern = String.format("%s\\s+%s\\s+%s\\s+%s\\s+%s\\s+%s\\s+%s\\s+%.2f%%",
+            compactionId, OperationType.COMPACTION, CQLTester.KEYSPACE, currentTable(), "123 bytes", "120.56 KiB",
+            CompactionInfo.Unit.BYTES, (double) bytesCompacted / bytesTotal * 100);
+        Assertions.assertThat(stdout).containsPattern(expectedStatsPattern);
+
+        CompactionManager.instance.active.finishCompaction(compactionHolder);
+        tool = ToolRunner.invokeNodetool("compactionstats", "--human-readable");
+        tool.assertOnCleanExit();
+        stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 0");
+    }
+
+    @Test
+    public void testCompactionStatsVtableHumanReadable()
+    {
+        createTable("CREATE TABLE %s (pk int, ck int, PRIMARY KEY (pk, ck))");
+        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
+
+        long bytesCompacted = 123;
+        long bytesTotal = 123456;
+        TimeUUID compactionId = nextTimeUUID();
+        List<SSTableReader> sstables = IntStream.range(0, 10)
+            .mapToObj(i -> MockSchema.sstable(i, i * 10L, i * 10L + 9, cfs))
+            .collect(Collectors.toList());
+        CompactionInfo.Holder compactionHolder = new CompactionInfo.Holder()
+        {
+            public CompactionInfo getCompactionInfo()
+            {
+                return new CompactionInfo(cfs.metadata(), OperationType.COMPACTION, bytesCompacted, bytesTotal, compactionId, sstables);
+            }
+
+            public boolean isGlobal()
+            {
+                return false;
+            }
+        };
+
+        CompactionManager.instance.active.beginCompaction(compactionHolder);
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("compactionstats", "--vtable", "--human-readable");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 1");
+        Assertions.assertThat(stdout).containsPattern("keyspace\\s+table\\s+task id\\s+completion ratio\\s+kind\\s+progress\\s+sstables\\s+total\\s+unit");
+        String expectedStatsPattern = String.format("%s\\s+%s\\s+%s\\s+%.2f%%\\s+%s\\s+%s\\s+%s\\s+%s\\s+%s",
+            CQLTester.KEYSPACE, currentTable(), compactionId, (double) bytesCompacted / bytesTotal * 100,
+            OperationType.COMPACTION, "123 bytes", sstables.size(), "120.56 KiB", CompactionInfo.Unit.BYTES);
+        Assertions.assertThat(stdout).containsPattern(expectedStatsPattern);
+
+        CompactionManager.instance.active.finishCompaction(compactionHolder);
+        tool = ToolRunner.invokeNodetool("compactionstats", "--vtable", "--human-readable");
+        tool.assertOnCleanExit();
+        stdout = tool.getStdout();
+        assertThat(stdout).contains("pending tasks: 0");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/DataPathsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/DataPathsTest.java
new file mode 100644
index 0000000..d09aceb
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/DataPathsTest.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools.nodetool;
+
+import org.apache.commons.lang3.StringUtils;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DataPathsTest extends CQLTester
+{
+    private static final String SUBCOMMAND = "datapaths";
+    
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    public void testHelp()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", SUBCOMMAND);
+        tool.assertOnCleanExit();
+        
+        String help = "NAME\n" +
+                      "        nodetool datapaths - Print all directories where data of tables are\n" +
+                      "        stored\n" +
+                      '\n' +
+                      "SYNOPSIS\n" +
+                      "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                      "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                      "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                      "                [(-u <username> | --username <username>)] datapaths\n" +
+                      "                [(-F <format> | --format <format>)] [--] [<keyspace.table>...]\n" +
+                      '\n' +
+                      "OPTIONS\n" +
+                      "        -F <format>, --format <format>\n" +
+                      "            Output format (json, yaml)\n" +
+                      '\n' +
+                      "        -h <host>, --host <host>\n" +
+                      "            Node hostname or ip address\n" +
+                      '\n' +
+                      "        -p <port>, --port <port>\n" +
+                      "            Remote jmx agent port number\n" +
+                      '\n' +
+                      "        -pp, --print-port\n" +
+                      "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                      '\n' +
+                      "        -pw <password>, --password <password>\n" +
+                      "            Remote jmx agent password\n" +
+                      '\n' +
+                      "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                      "            Path to the JMX password file\n" +
+                      '\n' +
+                      "        -u <username>, --username <username>\n" +
+                      "            Remote jmx agent username\n" +
+                      '\n' +
+                      "        --\n" +
+                      "            This option can be used to separate command-line options from the\n" +
+                      "            list of argument, (useful when arguments might be mistaken for\n" +
+                      "            command-line options\n" +
+                      '\n' +
+                      "        [<keyspace.table>...]\n" +
+                      "            List of table (or keyspace) names\n" +
+                      '\n' +
+                      '\n';
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testAllOutput()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND);
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace: system_schema");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Keyspace:")).isGreaterThan(1);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tTable:")).isGreaterThan(1);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tPaths:")).isGreaterThan(1);
+    }
+
+    @Test
+    public void testSelectedKeyspace()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "system_traces");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace: system_traces");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Keyspace:")).isEqualTo(1);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tTable:")).isGreaterThan(1);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tPaths:")).isGreaterThan(1);
+    }
+
+    @Test
+    public void testSelectedMultipleKeyspaces()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "system_traces", "system_auth");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace: system_traces");
+        assertThat(tool.getStdout()).contains("Keyspace: system_auth");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Keyspace:")).isEqualTo(2);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tTable:")).isGreaterThan(1);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tPaths:")).isGreaterThan(1);
+    }
+
+    @Test
+    public void testSelectedTable()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "system_auth.roles");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace: system_auth");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Keyspace:")).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("Table: roles");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tTable:")).isEqualTo(1);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tPaths:")).isEqualTo(1);
+    }
+
+    @Test
+    public void testSelectedMultipleTables()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "system_auth.roles", "system_auth.role_members");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace: system_auth");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Keyspace:")).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("Table: roles");
+        assertThat(tool.getStdout()).contains("Table: role_members");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tTable:")).isEqualTo(2);
+        assertThat(StringUtils.countMatches(tool.getStdout(), "\tPaths:")).isEqualTo(2);
+    }
+
+    @Test
+    public void testFormatArgJson()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "--format", "json");
+        tool.assertOnCleanExit();
+    }
+
+    @Test
+    public void testFormatArgYaml()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "--format", "yaml");
+        tool.assertOnCleanExit();
+    }
+
+    @Test
+    public void testFormatArgBad()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(SUBCOMMAND, "--format", "bad");
+        assertThat(tool.getStdout()).contains("arguments for -F are yaml and json only.");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/GetAuditLogTest.java b/test/unit/org/apache/cassandra/tools/nodetool/GetAuditLogTest.java
new file mode 100644
index 0000000..069710b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/GetAuditLogTest.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class GetAuditLogTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+    }
+
+    @After
+    public void afterTest()
+    {
+        disableAuditLog();
+    }
+
+    @Test
+    public void getDefaultOutputTest()
+    {
+        testDefaultOutput(getAuditLog());
+    }
+
+    @Test
+    public void getSimpleOutputTest()
+    {
+        enableAuditLogSimple();
+        testChangedOutputSimple(getAuditLog());
+    }
+
+    @Test
+    public void getComplexOutputTest()
+    {
+        enableAuditLogComplex();
+        testChangedOutputComplex(getAuditLog());
+    }
+
+    @Test
+    public void disablingAuditLogResetsOutputTest()
+    {
+        enableAuditLogComplex();
+        disableAuditLog();
+        testDefaultOutput(getAuditLog());
+    }
+
+    private String getAuditLog()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("getauditlog");
+        tool.assertOnCleanExit();
+        return tool.getStdout();
+    }
+
+    private void disableAuditLog()
+    {
+        ToolRunner.invokeNodetool("disableauditlog").assertOnCleanExit();
+    }
+
+    private void enableAuditLogSimple()
+    {
+        ToolRunner.invokeNodetool("enableauditlog").assertOnCleanExit();
+    }
+
+    private void enableAuditLogComplex()
+    {
+        ToolRunner.invokeNodetool("enableauditlog",
+                                  "--included-keyspaces", "ks1,ks2,ks3",
+                                  "--excluded-categories", "ddl,dcl").assertOnCleanExit();
+    }
+
+    @SuppressWarnings("DynamicRegexReplaceableByCompiledPattern")
+    private void testChangedOutputSimple(final String getAuditLogOutput)
+    {
+        final String output = getAuditLogOutput.replaceAll("( )+", " ").trim();
+        assertThat(output).startsWith("enabled true");
+        assertThat(output).contains("logger BinAuditLogger");
+        assertThat(output).contains("roll_cycle HOURLY");
+        assertThat(output).contains("block true");
+        assertThat(output).contains("max_log_size 17179869184");
+        assertThat(output).contains("max_queue_weight 268435456");
+        assertThat(output).contains("max_archive_retries 10");
+        assertThat(output).contains("included_keyspaces \n");
+        assertThat(output).contains("excluded_keyspaces system,system_schema,system_virtual_schema");
+        assertThat(output).contains("included_categories \n");
+        assertThat(output).contains("excluded_categories \n");
+        assertThat(output).contains("included_users \n");
+        assertThat(output).endsWith("excluded_users");
+    }
+
+    @SuppressWarnings("DynamicRegexReplaceableByCompiledPattern")
+    private void testChangedOutputComplex(final String getAuditLogOutput)
+    {
+        final String output = getAuditLogOutput.replaceAll("( )+", " ").trim();
+        assertThat(output).startsWith("enabled true");
+        assertThat(output).contains("logger BinAuditLogger");
+        assertThat(output).contains("roll_cycle HOURLY");
+        assertThat(output).contains("block true");
+        assertThat(output).contains("max_log_size 17179869184");
+        assertThat(output).contains("max_queue_weight 268435456");
+        assertThat(output).contains("max_archive_retries 10");
+        assertThat(output).contains("included_keyspaces ks1,ks2,ks3");
+        assertThat(output).contains("excluded_keyspaces system,system_schema,system_virtual_schema");
+        assertThat(output).contains("included_categories \n");
+        assertThat(output).contains("excluded_categories DDL,DCL");
+        assertThat(output).contains("included_users \n");
+        assertThat(output).endsWith("excluded_users");
+    }
+
+    @SuppressWarnings("DynamicRegexReplaceableByCompiledPattern")
+    private void testDefaultOutput(final String getAuditLogOutput)
+    {
+        final String output = getAuditLogOutput.replaceAll("( )+", " ").trim();
+        assertThat(output).startsWith("enabled false");
+        assertThat(output).contains("logger BinAuditLogger");
+        assertThat(output).contains("roll_cycle HOURLY");
+        assertThat(output).contains("block true");
+        assertThat(output).contains("max_log_size 17179869184");
+        assertThat(output).contains("max_queue_weight 268435456");
+        assertThat(output).contains("max_archive_retries 10");
+        assertThat(output).contains("included_keyspaces \n");
+        assertThat(output).contains("excluded_keyspaces system,system_schema,system_virtual_schema");
+        assertThat(output).contains("included_categories \n");
+        assertThat(output).contains("excluded_categories \n");
+        assertThat(output).contains("included_users \n");
+        assertThat(output).endsWith("excluded_users");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/GetAuthCacheConfigTest.java b/test/unit/org/apache/cassandra/tools/nodetool/GetAuthCacheConfigTest.java
new file mode 100644
index 0000000..6afc179
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/GetAuthCacheConfigTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthCache;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.NetworkPermissionsCacheMBean;
+import org.apache.cassandra.auth.PasswordAuthenticator;
+import org.apache.cassandra.auth.PermissionsCacheMBean;
+import org.apache.cassandra.auth.Roles;
+import org.apache.cassandra.auth.RolesCacheMBean;
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class GetAuthCacheConfigTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "getauthcacheconfig");
+        tool.assertOnCleanExit();
+
+        String help = "NAME\n" +
+                      "        nodetool getauthcacheconfig - Get configuration of Auth cache\n" +
+                      "\n" +
+                      "SYNOPSIS\n" +
+                      "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                      "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                      "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                      "                [(-u <username> | --username <username>)] getauthcacheconfig\n" +
+                      "                --cache-name <cache-name>\n" +
+                      "\n" +
+                      "OPTIONS\n" +
+                      "        --cache-name <cache-name>\n" +
+                      "            Name of Auth cache (required)\n" +
+                      "\n" +
+                      "        -h <host>, --host <host>\n" +
+                      "            Node hostname or ip address\n" +
+                      "\n" +
+                      "        -p <port>, --port <port>\n" +
+                      "            Remote jmx agent port number\n" +
+                      "\n" +
+                      "        -pp, --print-port\n" +
+                      "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                      "\n" +
+                      "        -pw <password>, --password <password>\n" +
+                      "            Remote jmx agent password\n" +
+                      "\n" +
+                      "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                      "            Path to the JMX password file\n" +
+                      "\n" +
+                      "        -u <username>, --username <username>\n" +
+                      "            Remote jmx agent username\n" +
+                      "\n" +
+                      "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidCacheName()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("getauthcacheconfig");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("Required option '--cache-name' is missing"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("getauthcacheconfig", "--cache-name");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("Required values for option 'cache-name' not provided"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("getauthcacheconfig", "--cache-name", "wrong");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("Unknown cache name: wrong"));
+        assertThat(tool.getStderr()).isEmpty();
+    }
+
+    @Test
+    public void testGetConfig()
+    {
+        assertGetConfig(AuthenticatedUser.permissionsCache, PermissionsCacheMBean.CACHE_NAME);
+
+        PasswordAuthenticator passwordAuthenticator = (PasswordAuthenticator) DatabaseDescriptor.getAuthenticator();
+        assertGetConfig(passwordAuthenticator.getCredentialsCache(), PasswordAuthenticator.CredentialsCacheMBean.CACHE_NAME);
+
+        assertGetConfig(AuthorizationProxy.jmxPermissionsCache, AuthorizationProxy.JmxPermissionsCacheMBean.CACHE_NAME);
+
+        assertGetConfig(AuthenticatedUser.networkPermissionsCache, NetworkPermissionsCacheMBean.CACHE_NAME);
+
+        assertGetConfig(Roles.cache, RolesCacheMBean.CACHE_NAME);
+    }
+
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    private void assertGetConfig(AuthCache<?, ?> authCache, String cacheName)
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("getauthcacheconfig", "--cache-name", cacheName);
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEqualTo("Validity Period: " + authCache.getValidity() + "\n" +
+                                               "Update Interval: " + authCache.getUpdateInterval() + "\n" +
+                                               "Max Entries: " + authCache.getMaxEntries() + "\n" +
+                                               "Active Update: " + authCache.getActiveUpdate() + "\n");
+    }
+
+    private String wrapByDefaultNodetoolMessage(String s)
+    {
+        return "nodetool: " + s + "\nSee 'nodetool help' or 'nodetool help <command>'.\n";
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/GetDefaultKeyspaceRFTest.java b/test/unit/org/apache/cassandra/tools/nodetool/GetDefaultKeyspaceRFTest.java
new file mode 100644
index 0000000..7db6f10
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/GetDefaultKeyspaceRFTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class GetDefaultKeyspaceRFTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "getdefaultrf");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool getdefaultrf - Gets default keyspace replication factor.\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] getdefaultrf\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testGetDefaultRF()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("getdefaultrf");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout().trim()).isEqualTo(Integer.toString(DatabaseDescriptor.getDefaultKeyspaceRF()));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/GetFullQueryLogTest.java b/test/unit/org/apache/cassandra/tools/nodetool/GetFullQueryLogTest.java
new file mode 100644
index 0000000..61d34ec
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/GetFullQueryLogTest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.fql.FullQueryLoggerOptions;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class GetFullQueryLogTest extends CQLTester
+{
+    @ClassRule
+    public static TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+    }
+
+    @After
+    public void afterTest() throws InterruptedException
+    {
+        disableFullQueryLog();
+    }
+
+    @Test
+    public void getFullQueryLogTest()
+    {
+        testDefaultOutput(getFullQueryLog());
+    }
+
+    @Test
+    public void enableFullQueryLogTest()
+    {
+        enableFullQueryLog();
+        testChangedOutput(getFullQueryLog());
+    }
+
+    @Test
+    public void resetFullQueryLogTest()
+    {
+        enableFullQueryLog();
+        testChangedOutput(getFullQueryLog());
+
+        // reset and get and test that it reset configuration into defaults
+        resetFullQueryLog();
+
+        testDefaultOutput(getFullQueryLog());
+    }
+
+    private String getFullQueryLog()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("getfullquerylog");
+        tool.assertOnCleanExit();
+        return tool.getStdout();
+    }
+
+    private void resetFullQueryLog()
+    {
+        ToolRunner.invokeNodetool("resetfullquerylog").assertOnCleanExit();
+    }
+
+    private void disableFullQueryLog()
+    {
+        ToolRunner.invokeNodetool("disablefullquerylog").assertOnCleanExit();
+    }
+
+    private void enableFullQueryLog()
+    {
+        ToolRunner.invokeNodetool("enablefullquerylog",
+                                  "--path",
+                                  temporaryFolder.getRoot().toString(),
+                                  "--blocking",
+                                  "false",
+                                  "--max-archive-retries",
+                                  "5",
+                                  "--archive-command",
+                                  "/path/to/script.sh %path",
+                                  "--max-log-size",
+                                  "100000",
+                                  "--max-queue-weight",
+                                  "10000",
+                                  "--roll-cycle",
+                                  "DAILY")
+                  .assertOnCleanExit();
+    }
+
+    @SuppressWarnings("DynamicRegexReplaceableByCompiledPattern")
+    private void testChangedOutput(final String getFullQueryLogOutput)
+    {
+        final String output = getFullQueryLogOutput.replaceAll("( )+", " ").trim();
+        assertThat(output).contains("enabled true");
+        assertThat(output).contains("log_dir " + temporaryFolder.getRoot().toString());
+        assertThat(output).contains("archive_command /path/to/script.sh %path");
+        assertThat(output).contains("roll_cycle DAILY");
+        assertThat(output).contains("max_log_size 100000");
+        assertThat(output).contains("max_queue_weight 10000");
+        assertThat(output).contains("max_archive_retries 5");
+        assertThat(output).contains("block false");
+    }
+
+    @SuppressWarnings("DynamicRegexReplaceableByCompiledPattern")
+    private void testDefaultOutput(final String getFullQueryLogOutput)
+    {
+        final FullQueryLoggerOptions options = new FullQueryLoggerOptions();
+        final String output = getFullQueryLogOutput.replaceAll("( )+", " ").trim();
+        assertThat(output).contains("enabled false");
+        assertThat(output).doesNotContain("log_dir " + temporaryFolder.getRoot().toString());
+        assertThat(output).doesNotContain("archive_command /path/to/script.sh %path");
+        assertThat(output).contains("roll_cycle " + options.roll_cycle);
+        assertThat(output).contains("max_log_size " + options.max_log_size);
+        assertThat(output).contains("max_queue_weight " + options.max_queue_weight);
+        assertThat(output).contains("max_archive_retries " + options.max_archive_retries);
+        assertThat(output).contains("block " + options.block);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/GossipInfoTest.java b/test/unit/org/apache/cassandra/tools/nodetool/GossipInfoTest.java
new file mode 100644
index 0000000..7ae4b77
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/GossipInfoTest.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.NoPayload;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.tools.ToolRunner;
+import org.apache.cassandra.utils.FBUtilities;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.cassandra.net.Verb.ECHO_REQ;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class GossipInfoTest extends CQLTester
+{
+    private static String token;
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+        token = StorageService.instance.getTokens().get(0);
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "gossipinfo");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                "        nodetool gossipinfo - Shows the gossip information for the cluster\n" +
+                "\n" +
+                "SYNOPSIS\n" +
+                "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                "                [(-u <username> | --username <username>)] gossipinfo\n" +
+                "                [(-r | --resolve-ip)]\n" +
+                "\n" +
+                "OPTIONS\n" +
+                "        -h <host>, --host <host>\n" +
+                "            Node hostname or ip address\n" +
+                "\n" +
+                "        -p <port>, --port <port>\n" +
+                "            Remote jmx agent port number\n" +
+                "\n" +
+                "        -pp, --print-port\n" +
+                "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                "\n" +
+                "        -pw <password>, --password <password>\n" +
+                "            Remote jmx agent password\n" +
+                "\n" +
+                "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                "            Path to the JMX password file\n" +
+                "\n" +
+                "        -r, --resolve-ip\n" +
+                "            Show node domain names instead of IPs\n" +
+                "\n" +
+                "        -u <username>, --username <username>\n" +
+                "            Remote jmx agent username\n" +
+                "\n" +
+                "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testGossipInfo()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("gossipinfo");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        Assertions.assertThat(stdout).contains("/127.0.0.1");
+        Assertions.assertThat(stdout).containsPattern("\\s+generation:[0-9]+");
+        Assertions.assertThat(stdout).containsPattern("heartbeat:[0-9]+");
+        Assertions.assertThat(stdout).containsPattern("STATUS:[0-9]+:NORMAL," + token);
+        Assertions.assertThat(stdout).containsPattern("SCHEMA:.+");
+        Assertions.assertThat(stdout).containsPattern("DC:[0-9]+:datacenter1");
+        Assertions.assertThat(stdout).containsPattern("RACK:[0-9]+:rack1");
+        Assertions.assertThat(stdout).containsPattern("RELEASE_VERSION:.+");
+        Assertions.assertThat(stdout).containsPattern("RPC_ADDRESS:[0-9]+:127.0.0.1");
+        Assertions.assertThat(stdout).containsPattern("NET_VERSION:[0-9]+:.+");
+        Assertions.assertThat(stdout).containsPattern("HOST_ID:[0-9]+:.+");
+        Assertions.assertThat(stdout).containsPattern("NATIVE_ADDRESS_AND_PORT:[0-9]+:127.0.0.1:[0-9]+");
+        Assertions.assertThat(stdout).containsPattern("SSTABLE_VERSIONS:[0-9]+:");
+        Assertions.assertThat(stdout).containsPattern("STATUS_WITH_PORT:[0-9]+:NORMAL,.+");
+        Assertions.assertThat(stdout).containsPattern("TOKENS:[0-9]+:<hidden>");
+
+        // Make sure heartbeats are detected
+        Message<NoPayload> echoMessageOut = Message.out(ECHO_REQ, NoPayload.noPayload);
+        MessagingService.instance().send(echoMessageOut, FBUtilities.getBroadcastAddressAndPort());
+
+        String origHeartbeatCount = StringUtils.substringBetween(stdout, "heartbeat:", "\n");
+        tool = ToolRunner.invokeNodetool("gossipinfo");
+        tool.assertOnCleanExit();
+        String newHeartbeatCount = StringUtils.substringBetween(stdout, "heartbeat:", "\n");
+        assertThat(Integer.parseInt(origHeartbeatCount)).isLessThanOrEqualTo(Integer.parseInt(newHeartbeatCount));
+    }
+
+    @Test
+    public void testGossipInfoWithPortPrint()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("-pp", "gossipinfo");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        Assertions.assertThat(stdout).containsPattern("/127.0.0.1\\:[0-9]+\\s+generation");
+    }
+
+    @Test
+    public void testGossipInfoWithResolveIp()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("gossipinfo", "--resolve-ip");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        Assertions.assertThat(stdout).containsPattern("^localhost\\s+generation");
+    }
+
+    @Test
+    public void testGossipInfoWithPortPrintAndResolveIp()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("-pp", "gossipinfo", "--resolve-ip");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        Assertions.assertThat(stdout).containsPattern("^localhost\\:[0-9]+\\s+generation");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java
new file mode 100644
index 0000000..40d7ffb
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.datastax.driver.core.EndPoint;
+import com.datastax.driver.core.PlainTextAuthProvider;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.IAuthenticator;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.PasswordAuthenticator;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.getRolesReadCount;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InvalidateCredentialsCacheTest extends CQLTester
+{
+    private static IAuthenticator.SaslNegotiator roleANegotiator;
+    private static IAuthenticator.SaslNegotiator roleBNegotiator;
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+
+        PasswordAuthenticator passwordAuthenticator = (PasswordAuthenticator) DatabaseDescriptor.getAuthenticator();
+        roleANegotiator = passwordAuthenticator.newSaslNegotiator(null);
+        roleANegotiator.evaluateResponse(new PlainTextAuthProvider(ROLE_A.getRoleName(), "ignored")
+                .newAuthenticator((EndPoint) null, null)
+                .initialResponse());
+        roleBNegotiator = passwordAuthenticator.newSaslNegotiator(null);
+        roleBNegotiator.evaluateResponse(new PlainTextAuthProvider(ROLE_B.getRoleName(), "ignored")
+                .newAuthenticator((EndPoint) null, null)
+                .initialResponse());
+
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "invalidatecredentialscache");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool invalidatecredentialscache - Invalidate the credentials cache\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] invalidatecredentialscache\n" +
+                        "                [--] [<role>...]\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "        --\n" +
+                        "            This option can be used to separate command-line options from the\n" +
+                        "            list of argument, (useful when arguments might be mistaken for\n" +
+                        "            command-line options\n" +
+                        "\n" +
+                        "        [<role>...]\n" +
+                        "            List of roles to invalidate. By default, all roles\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidateSingleCredential()
+    {
+        // cache credential
+        roleANegotiator.getAuthenticatedUser();
+        long originalReadsCount = getRolesReadCount();
+
+        // enure credential is cached
+        assertThat(roleANegotiator.getAuthenticatedUser()).isEqualTo(new AuthenticatedUser(ROLE_A.getRoleName()));
+        assertThat(originalReadsCount).isEqualTo(getRolesReadCount());
+
+        // invalidate credential
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatecredentialscache", ROLE_A.getRoleName());
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure credential is reloaded
+        assertThat(roleANegotiator.getAuthenticatedUser()).isEqualTo(new AuthenticatedUser(ROLE_A.getRoleName()));
+        assertThat(originalReadsCount).isLessThan(getRolesReadCount());
+    }
+
+    @Test
+    public void testInvalidateAllCredentials()
+    {
+        // cache credentials
+        roleANegotiator.getAuthenticatedUser();
+        roleBNegotiator.getAuthenticatedUser();
+        long originalReadsCount = getRolesReadCount();
+
+        // enure credentials are cached
+        assertThat(roleANegotiator.getAuthenticatedUser()).isEqualTo(new AuthenticatedUser(ROLE_A.getRoleName()));
+        assertThat(roleBNegotiator.getAuthenticatedUser()).isEqualTo(new AuthenticatedUser(ROLE_B.getRoleName()));
+        assertThat(originalReadsCount).isEqualTo(getRolesReadCount());
+
+        // invalidate both credentials
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatecredentialscache");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure credential for roleA is reloaded
+        assertThat(roleANegotiator.getAuthenticatedUser()).isEqualTo(new AuthenticatedUser(ROLE_A.getRoleName()));
+        long readsCountAfterFirstReLoad = getRolesReadCount();
+        assertThat(originalReadsCount).isLessThan(readsCountAfterFirstReLoad);
+
+        // ensure credential for roleB is reloaded
+        assertThat(roleBNegotiator.getAuthenticatedUser()).isEqualTo(new AuthenticatedUser(ROLE_B.getRoleName()));
+        long readsCountAfterSecondReLoad = getRolesReadCount();
+        assertThat(readsCountAfterFirstReLoad).isLessThan(readsCountAfterSecondReLoad);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java
new file mode 100644
index 0000000..f44a274
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.Set;
+import javax.security.auth.Subject;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthCacheService;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.CassandraPrincipal;
+import org.apache.cassandra.auth.IAuthorizer;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.JMXResource;
+import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.getRolePermissionsReadCount;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InvalidateJmxPermissionsCacheTest extends CQLTester
+{
+    private static final AuthorizationProxy authorizationProxy = new AuthTestUtils.NoAuthSetupAuthorizationProxy();
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+
+        JMXResource rootJmxResource = JMXResource.root();
+        Set<Permission> jmxPermissions = rootJmxResource.applicablePermissions();
+
+        IAuthorizer authorizer = DatabaseDescriptor.getAuthorizer();
+        authorizer.grant(AuthenticatedUser.SYSTEM_USER, jmxPermissions, rootJmxResource, ROLE_A);
+        authorizer.grant(AuthenticatedUser.SYSTEM_USER, jmxPermissions, rootJmxResource, ROLE_B);
+
+        AuthCacheService.initializeAndRegisterCaches();
+
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "invalidatejmxpermissionscache");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool invalidatejmxpermissionscache - Invalidate the JMX permissions\n" +
+                        "        cache\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] invalidatejmxpermissionscache\n" +
+                        "                [--] [<role>...]\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "        --\n" +
+                        "            This option can be used to separate command-line options from the\n" +
+                        "            list of argument, (useful when arguments might be mistaken for\n" +
+                        "            command-line options\n" +
+                        "\n" +
+                        "        [<role>...]\n" +
+                        "            List of roles to invalidate. By default, all roles\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidateSingleJMXPermission()
+    {
+        Subject userSubject = subject(ROLE_A.getRoleName());
+
+        // cache role permission
+        authorizationProxy.authorize(userSubject, "queryNames", null);
+        long originalReadsCount = getRolePermissionsReadCount();
+
+        // enure role permission is cached
+        assertThat(authorizationProxy.authorize(userSubject, "queryNames", null)).isTrue();
+        assertThat(originalReadsCount).isEqualTo(getRolePermissionsReadCount());
+
+        // invalidate role permission
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatejmxpermissionscache", ROLE_A.getRoleName());
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure role permission is reloaded
+        assertThat(authorizationProxy.authorize(userSubject, "queryNames", null)).isTrue();
+        assertThat(originalReadsCount).isLessThan(getRolePermissionsReadCount());
+    }
+
+    @Test
+    public void testInvalidateAllJMXPermissions()
+    {
+        Subject roleASubject = subject(ROLE_A.getRoleName());
+        Subject roleBSubject = subject(ROLE_B.getRoleName());
+
+        // cache role permissions
+        authorizationProxy.authorize(roleASubject, "queryNames", null);
+        authorizationProxy.authorize(roleBSubject, "queryNames", null);
+        long originalReadsCount = getRolePermissionsReadCount();
+
+        // enure role permissions are cached
+        assertThat(authorizationProxy.authorize(roleASubject, "queryNames", null)).isTrue();
+        assertThat(authorizationProxy.authorize(roleBSubject, "queryNames", null)).isTrue();
+        assertThat(originalReadsCount).isEqualTo(getRolePermissionsReadCount());
+
+        // invalidate both role permissions
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatejmxpermissionscache");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure role permission for roleA is reloaded
+        assertThat(authorizationProxy.authorize(roleASubject, "queryNames", null)).isTrue();
+        long readsCountAfterFirstReLoad = getRolePermissionsReadCount();
+        assertThat(originalReadsCount).isLessThan(readsCountAfterFirstReLoad);
+
+        // ensure role permission for roleB is reloaded
+        assertThat(authorizationProxy.authorize(roleBSubject, "queryNames", null)).isTrue();
+        long readsCountAfterSecondReLoad = getRolePermissionsReadCount();
+        assertThat(readsCountAfterFirstReLoad).isLessThan(readsCountAfterSecondReLoad);
+    }
+
+    private static Subject subject(String roleName)
+    {
+        Subject subject = new Subject();
+        subject.getPrincipals().add(new CassandraPrincipal(roleName));
+        return subject;
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java
new file mode 100644
index 0000000..c54e526
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthCacheService;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.getNetworkPermissionsReadCount;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InvalidateNetworkPermissionsCacheTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+        AuthCacheService.initializeAndRegisterCaches();
+
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "invalidatenetworkpermissionscache");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool invalidatenetworkpermissionscache - Invalidate the network\n" +
+                        "        permissions cache\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)]\n" +
+                        "                invalidatenetworkpermissionscache [--] [<role>...]\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "        --\n" +
+                        "            This option can be used to separate command-line options from the\n" +
+                        "            list of argument, (useful when arguments might be mistaken for\n" +
+                        "            command-line options\n" +
+                        "\n" +
+                        "        [<role>...]\n" +
+                        "            List of roles to invalidate. By default, all roles\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidateSingleNetworkPermission()
+    {
+        AuthenticatedUser role = new AuthenticatedUser(ROLE_A.getRoleName());
+
+        // cache network permission
+        role.hasLocalAccess();
+        long originalReadsCount = getNetworkPermissionsReadCount();
+
+        // enure network permission is cached
+        assertThat(role.hasLocalAccess()).isTrue();
+        assertThat(originalReadsCount).isEqualTo(getNetworkPermissionsReadCount());
+
+        // invalidate network permission
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatenetworkpermissionscache", ROLE_A.getRoleName());
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure network permission is reloaded
+        assertThat(role.hasLocalAccess()).isTrue();
+        assertThat(originalReadsCount).isLessThan(getNetworkPermissionsReadCount());
+    }
+
+    @Test
+    public void testInvalidateAllNetworkPermissions()
+    {
+        AuthenticatedUser roleA = new AuthenticatedUser(ROLE_A.getRoleName());
+        AuthenticatedUser roleB = new AuthenticatedUser(ROLE_B.getRoleName());
+
+        // cache network permissions
+        roleA.hasLocalAccess();
+        roleB.hasLocalAccess();
+        long originalReadsCount = getNetworkPermissionsReadCount();
+
+        // enure network permissions are cached
+        assertThat(roleA.hasLocalAccess()).isTrue();
+        assertThat(roleB.hasLocalAccess()).isTrue();
+        assertThat(originalReadsCount).isEqualTo(getNetworkPermissionsReadCount());
+
+        // invalidate both network permissions
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatenetworkpermissionscache");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure network permission for roleA is reloaded
+        assertThat(roleA.hasLocalAccess()).isTrue();
+        long readsCountAfterFirstReLoad = getNetworkPermissionsReadCount();
+        assertThat(originalReadsCount).isLessThan(readsCountAfterFirstReLoad);
+
+        // ensure network permission for roleB is reloaded
+        assertThat(roleB.hasLocalAccess()).isTrue();
+        long readsCountAfterSecondReLoad = getNetworkPermissionsReadCount();
+        assertThat(readsCountAfterFirstReLoad).isLessThan(readsCountAfterSecondReLoad);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java
new file mode 100644
index 0000000..e6d2ba1
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthCacheService;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.DataResource;
+import org.apache.cassandra.auth.FunctionResource;
+import org.apache.cassandra.auth.IAuthorizer;
+import org.apache.cassandra.auth.IResource;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.auth.JMXResource;
+import org.apache.cassandra.auth.Permission;
+import org.apache.cassandra.auth.RoleResource;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.getRolePermissionsReadCount;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InvalidatePermissionsCacheTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+        AuthCacheService.initializeAndRegisterCaches();
+
+        List<IResource> resources = Arrays.asList(
+                DataResource.root(),
+                DataResource.keyspace(KEYSPACE),
+                DataResource.allTables(KEYSPACE),
+                DataResource.table(KEYSPACE, "t1"),
+                RoleResource.root(),
+                RoleResource.role("role_x"),
+                FunctionResource.root(),
+                FunctionResource.keyspace(KEYSPACE),
+                // Particular function is excluded from here and covered by a separate test because in order to grant
+                // permissions we need to have a function registered. However, the function cannot be registered via
+                // CQLTester.createFunction from static contex. That's why we initialize it in a separate test case.
+                JMXResource.root(),
+                JMXResource.mbean("org.apache.cassandra.auth:type=*"));
+
+        IAuthorizer authorizer = DatabaseDescriptor.getAuthorizer();
+        for (IResource resource : resources)
+        {
+            Set<Permission> permissions = resource.applicablePermissions();
+            authorizer.grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_A);
+            authorizer.grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_B);
+        }
+
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "invalidatepermissionscache");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool invalidatepermissionscache - Invalidate the permissions cache\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] invalidatepermissionscache\n" +
+                        "                [--all-functions] [--all-keyspaces] [--all-mbeans] [--all-roles]\n" +
+                        "                [--all-tables] [--function <function>]\n" +
+                        "                [--functions-in-keyspace <functions-in-keyspace>]\n" +
+                        "                [--keyspace <keyspace>] [--mbean <mbean>] [--role <role>]\n" +
+                        "                [--table <table>] [--] [<role>]\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        --all-functions\n" +
+                        "            Invalidate permissions for 'ALL FUNCTIONS'\n" +
+                        "\n" +
+                        "        --all-keyspaces\n" +
+                        "            Invalidate permissions for 'ALL KEYSPACES'\n" +
+                        "\n" +
+                        "        --all-mbeans\n" +
+                        "            Invalidate permissions for 'ALL MBEANS'\n" +
+                        "\n" +
+                        "        --all-roles\n" +
+                        "            Invalidate permissions for 'ALL ROLES'\n" +
+                        "\n" +
+                        "        --all-tables\n" +
+                        "            Invalidate permissions for 'ALL TABLES'\n" +
+                        "\n" +
+                        "        --function <function>\n" +
+                        "            Function to invalidate permissions for (you must specify\n" +
+                        "            --functions-in-keyspace for using this option; function format:\n" +
+                        "            name[arg1^..^agrN], for example: foo[Int32Type^DoubleType])\n" +
+                        "\n" +
+                        "        --functions-in-keyspace <functions-in-keyspace>\n" +
+                        "            Keyspace to invalidate permissions for\n" +
+                        "\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        --keyspace <keyspace>\n" +
+                        "            Keyspace to invalidate permissions for\n" +
+                        "\n" +
+                        "        --mbean <mbean>\n" +
+                        "            MBean to invalidate permissions for\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        --role <role>\n" +
+                        "            Role to invalidate permissions for\n" +
+                        "\n" +
+                        "        --table <table>\n" +
+                        "            Table to invalidate permissions for (you must specify --keyspace for\n" +
+                        "            using this option)\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "        --\n" +
+                        "            This option can be used to separate command-line options from the\n" +
+                        "            list of argument, (useful when arguments might be mistaken for\n" +
+                        "            command-line options\n" +
+                        "\n" +
+                        "        [<role>]\n" +
+                        "            A role for which permissions to specified resources need to be\n" +
+                        "            invalidated\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidatePermissionsWithIncorrectParameters()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "--all-keyspaces");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("No resource options allowed without a <role> being specified"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "role1");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("No resource options specified"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "role1", "--invalid-option");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("A single <role> is only supported / you have a typo in the resource options spelling"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "role1", "--all-tables");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("--all-tables option should be passed along with --keyspace option"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "role1", "--table", "t1");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("--table option should be passed along with --keyspace option"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "role1", "--function", "f[Int32Type]");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("--function option should be passed along with --functions-in-keyspace option"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("invalidatepermissionscache", "role1", "--functions-in-keyspace",
+                KEYSPACE, "--function", "f[x]");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout())
+                .isEqualTo(wrapByDefaultNodetoolMessage("An error was encountered when looking up function definition: Unable to find abstract-type class 'org.apache.cassandra.db.marshal.x'"));
+        assertThat(tool.getStderr()).isEmpty();
+    }
+
+    @Test
+    public void testInvalidatePermissionsForEveryResourceExceptFunction()
+    {
+        assertInvalidation(DataResource.root(), Collections.singletonList("--all-keyspaces"));
+        assertInvalidation(DataResource.keyspace(KEYSPACE), Arrays.asList("--keyspace", KEYSPACE));
+        assertInvalidation(DataResource.allTables(KEYSPACE), Arrays.asList("--keyspace", KEYSPACE, "--all-tables"));
+        assertInvalidation(DataResource.table(KEYSPACE, "t1"),
+                Arrays.asList("--keyspace", KEYSPACE, "--table", "t1"));
+        assertInvalidation(RoleResource.root(), Collections.singletonList("--all-roles"));
+        assertInvalidation(RoleResource.role("role_x"), Arrays.asList("--role", "role_x"));
+        assertInvalidation(FunctionResource.root(), Collections.singletonList("--all-functions"));
+        assertInvalidation(FunctionResource.keyspace(KEYSPACE), Arrays.asList("--functions-in-keyspace", KEYSPACE));
+        assertInvalidation(JMXResource.root(), Collections.singletonList("--all-mbeans"));
+        assertInvalidation(JMXResource.mbean("org.apache.cassandra.auth:type=*"),
+                Arrays.asList("--mbean", "org.apache.cassandra.auth:type=*"));
+    }
+
+    @Test
+    public void testInvalidatePermissionsForFunction() throws Throwable
+    {
+        String keyspaceAndFunctionName = createFunction(KEYSPACE, "int",
+                " CREATE FUNCTION %s (val int)" +
+                        " CALLED ON NULL INPUT" +
+                        " RETURNS int" +
+                        " LANGUAGE java" +
+                        " AS 'return val;'");
+        String functionName = StringUtils.split(keyspaceAndFunctionName, ".")[1];
+
+        FunctionResource resource = FunctionResource.function(KEYSPACE, functionName, Collections.singletonList(Int32Type.instance));
+        Set<Permission> permissions = resource.applicablePermissions();
+        DatabaseDescriptor.getAuthorizer().grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_A);
+        DatabaseDescriptor.getAuthorizer().grant(AuthenticatedUser.SYSTEM_USER, permissions, resource, ROLE_B);
+
+        assertInvalidation(resource,
+                Arrays.asList("--functions-in-keyspace", KEYSPACE, "--function", functionName + "[Int32Type]"));
+    }
+
+    private void assertInvalidation(IResource resource, List<String> options)
+    {
+        Set<Permission> dataPermissions = resource.applicablePermissions();
+
+        AuthenticatedUser role = new AuthenticatedUser(ROLE_A.getRoleName());
+
+        // cache permission
+        role.getPermissions(resource);
+        long originalReadsCount = getRolePermissionsReadCount();
+
+        // enure permission is cached
+        assertThat(role.getPermissions(resource)).isEqualTo(dataPermissions);
+        assertThat(originalReadsCount).isEqualTo(getRolePermissionsReadCount());
+
+        // invalidate permission
+        List<String> args = new ArrayList<>();
+        args.add("invalidatepermissionscache");
+        args.add(ROLE_A.getRoleName());
+        args.addAll(options);
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(args);
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure permission is reloaded
+        assertThat(role.getPermissions(resource)).isEqualTo(dataPermissions);
+        assertThat(originalReadsCount).isLessThan(getRolePermissionsReadCount());
+    }
+
+    @Test
+    public void testInvalidatePermissionsForAllRoles()
+    {
+        DataResource rootDataResource = DataResource.root();
+        Set<Permission> dataPermissions = rootDataResource.applicablePermissions();
+
+        AuthenticatedUser roleA = new AuthenticatedUser(ROLE_A.getRoleName());
+        AuthenticatedUser roleB = new AuthenticatedUser(ROLE_B.getRoleName());
+
+        // cache permissions
+        roleA.getPermissions(rootDataResource);
+        roleB.getPermissions(rootDataResource);
+        long originalReadsCount = getRolePermissionsReadCount();
+
+        // enure permissions are cached
+        assertThat(roleA.getPermissions(rootDataResource)).isEqualTo(dataPermissions);
+        assertThat(roleB.getPermissions(rootDataResource)).isEqualTo(dataPermissions);
+        assertThat(originalReadsCount).isEqualTo(getRolePermissionsReadCount());
+
+        // invalidate both permissions
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidatepermissionscache");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure permission for roleA is reloaded
+        assertThat(roleA.getPermissions(rootDataResource)).isEqualTo(dataPermissions);
+        long readsCountAfterFirstReLoad = getRolePermissionsReadCount();
+        assertThat(originalReadsCount).isLessThan(readsCountAfterFirstReLoad);
+
+        // ensure permission for roleB is reloaded
+        assertThat(roleB.getPermissions(rootDataResource)).isEqualTo(dataPermissions);
+        long readsCountAfterSecondReLoad = getRolePermissionsReadCount();
+        assertThat(readsCountAfterFirstReLoad).isLessThan(readsCountAfterSecondReLoad);
+    }
+
+    private String wrapByDefaultNodetoolMessage(String s)
+    {
+        return "nodetool: " + s + "\nSee 'nodetool help' or 'nodetool help <command>'.\n";
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java
new file mode 100644
index 0000000..bb8fb92
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthCacheService;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.IRoleManager;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.apache.cassandra.auth.AuthTestUtils.getRolesReadCount;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InvalidateRolesCacheTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+
+        IRoleManager roleManager = DatabaseDescriptor.getRoleManager();
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+        AuthCacheService.initializeAndRegisterCaches();
+
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "invalidaterolescache");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool invalidaterolescache - Invalidate the roles cache\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] invalidaterolescache [--]\n" +
+                        "                [<role>...]\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "        --\n" +
+                        "            This option can be used to separate command-line options from the\n" +
+                        "            list of argument, (useful when arguments might be mistaken for\n" +
+                        "            command-line options\n" +
+                        "\n" +
+                        "        [<role>...]\n" +
+                        "            List of roles to invalidate. By default, all roles\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidateSingleRole()
+    {
+        AuthenticatedUser role = new AuthenticatedUser(ROLE_A.getRoleName());
+
+        // cache role
+        role.canLogin();
+        long originalReadsCount = getRolesReadCount();
+
+        // enure role is cached
+        assertThat(role.canLogin()).isTrue();
+        assertThat(originalReadsCount).isEqualTo(getRolesReadCount());
+
+        // invalidate role
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidaterolescache", ROLE_A.getRoleName());
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure role is reloaded
+        assertThat(role.canLogin()).isTrue();
+        assertThat(originalReadsCount).isLessThan(getRolesReadCount());
+    }
+
+    @Test
+    public void testInvalidateAllRoles()
+    {
+        AuthenticatedUser roleA = new AuthenticatedUser(ROLE_A.getRoleName());
+        AuthenticatedUser roleB = new AuthenticatedUser(ROLE_B.getRoleName());
+
+        // cache roles
+        roleA.canLogin();
+        roleB.canLogin();
+        long originalReadsCount = getRolesReadCount();
+
+        // enure roles are cached
+        assertThat(roleA.canLogin()).isTrue();
+        assertThat(roleB.canLogin()).isTrue();
+        assertThat(originalReadsCount).isEqualTo(getRolesReadCount());
+
+        // invalidate both roles
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("invalidaterolescache");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        // ensure role for roleA is reloaded
+        assertThat(roleA.canLogin()).isTrue();
+        long readsCountAfterFirstReLoad = getRolesReadCount();
+        assertThat(originalReadsCount).isLessThan(readsCountAfterFirstReLoad);
+
+        // ensure role for roleB is reloaded
+        assertThat(roleB.canLogin()).isTrue();
+        long readsCountAfterSecondReLoad = getRolesReadCount();
+        assertThat(readsCountAfterFirstReLoad).isLessThan(readsCountAfterSecondReLoad);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/NetStatsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/NetStatsTest.java
new file mode 100644
index 0000000..f8bd530
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/NetStatsTest.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.List;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.NoPayload;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.streaming.SessionInfo;
+import org.apache.cassandra.streaming.StreamSession.State;
+import org.apache.cassandra.streaming.StreamSummary;
+import org.apache.cassandra.tools.ToolRunner;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.net.Verb.ECHO_REQ;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class NetStatsTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "netstats");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool netstats - Print network information on provided host\n" + 
+                        "        (connecting node by default)\n" + 
+                        "\n" + 
+                        "SYNOPSIS\n" + 
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" + 
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" + 
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" + 
+                        "                [(-u <username> | --username <username>)] netstats\n" + 
+                        "                [(-H | --human-readable)]\n" + 
+                        "\n" + 
+                        "OPTIONS\n" + 
+                        "        -h <host>, --host <host>\n" + 
+                        "            Node hostname or ip address\n" + 
+                        "\n" + 
+                        "        -H, --human-readable\n" + 
+                        "            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB\n" + 
+                        "\n" + 
+                        "        -p <port>, --port <port>\n" + 
+                        "            Remote jmx agent port number\n" + 
+                        "\n" + 
+                        "        -pp, --print-port\n" + 
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
+                        "\n" + 
+                        "        -pw <password>, --password <password>\n" + 
+                        "            Remote jmx agent password\n" + 
+                        "\n" + 
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
+                        "            Path to the JMX password file\n" + 
+                        "\n" + 
+                        "        -u <username>, --username <username>\n" + 
+                        "            Remote jmx agent username\n" + 
+                        "\n" + 
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testNetStats()
+    {
+        Message<NoPayload> echoMessageOut = Message.out(ECHO_REQ, NoPayload.noPayload);
+        MessagingService.instance().send(echoMessageOut, FBUtilities.getBroadcastAddressAndPort());
+
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("netstats");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Gossip messages                 n/a         0              2         0");
+    }
+
+    @Test
+    public void testHumanReadable() throws IOException
+    {
+        List<StreamSummary> streamSummaries = Collections.singletonList(new StreamSummary(TableId.generate(), 1, 1024));
+        SessionInfo info = new SessionInfo(InetAddressAndPort.getLocalHost(),
+                                           1,
+                                           InetAddressAndPort.getLocalHost(),
+                                           streamSummaries,
+                                           streamSummaries,
+                                           State.COMPLETE);
+
+        try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream out = new PrintStream(baos))
+        {
+            NetStats nstats = new NetStats();
+
+            nstats.printReceivingSummaries(out, info, false);
+            String stdout = getSummariesStdout(baos, out);
+            assertThat(stdout).doesNotContain("Kib");
+
+            baos.reset();
+            nstats.printSendingSummaries(out, info, false);
+            stdout = getSummariesStdout(baos, out);
+            assertThat(stdout).doesNotContain("Kib");
+
+            baos.reset();
+            nstats.printReceivingSummaries(out, info, true);
+            stdout = getSummariesStdout(baos, out);
+            assertThat(stdout).contains("KiB");
+
+            baos.reset();
+            nstats.printSendingSummaries(out, info, true);
+            stdout = getSummariesStdout(baos, out);
+            assertThat(stdout).contains("KiB");
+        }
+    }
+
+    private String getSummariesStdout(ByteArrayOutputStream baos, PrintStream ps) throws IOException
+    {
+        baos.flush();
+        ps.flush();
+        return baos.toString(StandardCharsets.UTF_8.toString());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/RingTest.java b/test/unit/org/apache/cassandra/tools/nodetool/RingTest.java
index bc83f50..00c8bba 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/RingTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/RingTest.java
@@ -28,13 +28,8 @@
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.tools.ToolRunner;
 import org.apache.cassandra.utils.FBUtilities;
-import org.assertj.core.api.Assertions;
 
-import static org.hamcrest.CoreMatchers.*;
-import static org.hamcrest.Matchers.matchesPattern;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
 
 public class RingTest extends CQLTester
 {
@@ -43,9 +38,9 @@
     @BeforeClass
     public static void setup() throws Exception
     {
-        StorageService.instance.initServer();
-        token = StorageService.instance.getTokens().get(0);
+        requireNetwork();
         startJMXServer();
+        token = StorageService.instance.getTokens().get(0);
     }
 
     /**
@@ -56,27 +51,21 @@
     {
         final HostStatWithPort host = new HostStatWithPort(null, FBUtilities.getBroadcastAddressAndPort(),
                                                            false, null);
-        validateRingOutput(host.ipOrDns(false),
-                            "ring");
-        Arrays.asList("-pp", "--print-port").forEach(arg -> {
-            validateRingOutput(host.ipOrDns(true),
-                               "-pp", "ring");
-        });
+        validateRingOutput(host.ipOrDns(false), "ring");
+        Arrays.asList("-pp", "--print-port").forEach(arg -> validateRingOutput(host.ipOrDns(true), "-pp", "ring"));
 
         final HostStatWithPort hostResolved = new HostStatWithPort(null, FBUtilities.getBroadcastAddressAndPort(),
                                                                    true, null);
-        Arrays.asList("-r", "--resolve-ip").forEach(arg -> {
-            validateRingOutput(hostResolved.ipOrDns(false),
-                               "ring", "-r");
-        });
-        validateRingOutput(hostResolved.ipOrDns(true),
-                            "-pp", "ring", "-r");
+        Arrays.asList("-r", "--resolve-ip").forEach(arg ->
+                validateRingOutput(hostResolved.ipOrDns(false), "ring", "-r"));
+        validateRingOutput(hostResolved.ipOrDns(true), "-pp", "ring", "-r");
     }
 
+    @SuppressWarnings("DynamicRegexReplaceableByCompiledPattern")
     private void validateRingOutput(String hostForm, String... args)
     {
-        ToolRunner.ToolResult nodetool = ToolRunner.invokeNodetool(args);
-        nodetool.assertOnCleanExit();
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(args);
+        tool.assertOnCleanExit();
         /*
          Datacenter: datacenter1
          ==========
@@ -85,35 +74,37 @@
          127.0.0.1       rack1       Up     Normal  45.71 KiB       100.00%             4652409154190094022
 
          */
-        String[] lines = nodetool.getStdout().split("\\R");
-        assertThat(lines[1].trim(), endsWith(SimpleSnitch.DATA_CENTER_NAME));
-        assertThat(lines[3], matchesPattern("Address *Rack *Status *State *Load *Owns *Token *"));
+        String[] lines = tool.getStdout().split("\\R");
+        assertThat(lines[1].trim()).endsWith(SimpleSnitch.DATA_CENTER_NAME);
+        assertThat(lines[3]).containsPattern("Address *Rack *Status *State *Load *Owns *Token *");
         String hostRing = lines[lines.length-4].trim(); // this command has a couple extra newlines and an empty error message at the end. Not messing with it.
-        assertThat(hostRing, startsWith(hostForm));
-        assertThat(hostRing, containsString(SimpleSnitch.RACK_NAME));
-        assertThat(hostRing, containsString("Up"));
-        assertThat(hostRing, containsString("Normal"));
-        assertThat(hostRing, matchesPattern(".*\\d+\\.\\d+ KiB.*"));
-        assertThat(hostRing, matchesPattern(".*\\d+\\.\\d+%.*"));
-        assertThat(hostRing, endsWith(token));
-        assertThat(hostRing, not(containsString("?")));
+        assertThat(hostRing).startsWith(hostForm);
+        assertThat(hostRing).contains(SimpleSnitch.RACK_NAME);
+        assertThat(hostRing).contains("Up");
+        assertThat(hostRing).contains("Normal");
+        assertThat(hostRing).containsPattern("\\d+\\.?\\d+ KiB");
+        assertThat(hostRing).containsPattern("\\d+\\.\\d+%");
+        assertThat(hostRing).endsWith(token);
+        assertThat(hostRing).doesNotContain("?");
     }
 
     @Test
     public void testWrongArgFailsAndPrintsHelp()
     {
         ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("--wrongarg", "ring");
-        Assertions.assertThat(tool.getStdout()).containsIgnoringCase("nodetool help");
-        assertEquals(1, tool.getExitCode());
-        assertTrue(tool.getCleanedStderr().isEmpty());
+        tool.assertCleanStdErr();
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("nodetool help");
     }
 
     @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
     public void testMaybeChangeDocs()
     {
         // If you added, modified options or help, please update docs if necessary
 
         ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "ring");
+        tool.assertOnCleanExit();
 
         String help = "NAME\n" + "        nodetool ring - Print information about the token ring\n"
                       + "\n"
@@ -156,7 +147,7 @@
                       + "            awareness)\n"
                       + "\n"
                       + "\n";
-        Assertions.assertThat(tool.getStdout()).isEqualTo(help);
+        assertThat(tool.getStdout()).isEqualTo(help);
     }
 
     @Test
@@ -164,14 +155,12 @@
     {
         // Bad KS
         ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("ring", "mockks");
-        Assertions.assertThat(tool.getStdout()).contains("The keyspace mockks, does not exist");
-        assertEquals(0, tool.getExitCode());
-        assertTrue(tool.getCleanedStderr().isEmpty());
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("The keyspace mockks, does not exist");
 
         // Good KS
         tool = ToolRunner.invokeNodetool("ring", "system_schema");
-        Assertions.assertThat(tool.getStdout()).contains("Datacenter: datacenter1");
-        assertEquals(0, tool.getExitCode());
-        assertTrue(tool.getCleanedStderr().isEmpty());
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Datacenter: datacenter1");
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java
new file mode 100644
index 0000000..2c90486
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.auth.AuthCache;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.auth.NetworkPermissionsCacheMBean;
+import org.apache.cassandra.auth.PasswordAuthenticator;
+import org.apache.cassandra.auth.PermissionsCacheMBean;
+import org.apache.cassandra.auth.Roles;
+import org.apache.cassandra.auth.RolesCacheMBean;
+import org.apache.cassandra.auth.jmx.AuthorizationProxy;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SetAuthCacheConfigTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        CQLTester.setUpClass();
+        CQLTester.requireAuthentication();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "setauthcacheconfig");
+        tool.assertOnCleanExit();
+
+        String help = "NAME\n" +
+                      "        nodetool setauthcacheconfig - Set configuration for Auth cache\n" +
+                      "\n" +
+                      "SYNOPSIS\n" +
+                      "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                      "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                      "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                      "                [(-u <username> | --username <username>)] setauthcacheconfig\n" +
+                      "                --cache-name <cache-name> [--disable-active-update]\n" +
+                      "                [--enable-active-update] [--max-entries <max-entries>]\n" +
+                      "                [--update-interval <update-interval>]\n" +
+                      "                [--validity-period <validity-period>]\n" +
+                      "\n" +
+                      "OPTIONS\n" +
+                      "        --cache-name <cache-name>\n" +
+                      "            Name of Auth cache (required)\n" +
+                      "\n" +
+                      "        --disable-active-update\n" +
+                      "            Disable active update\n" +
+                      "\n" +
+                      "        --enable-active-update\n" +
+                      "            Enable active update\n" +
+                      "\n" +
+                      "        -h <host>, --host <host>\n" +
+                      "            Node hostname or ip address\n" +
+                      "\n" +
+                      "        --max-entries <max-entries>\n" +
+                      "            Max entries\n" +
+                      "\n" +
+                      "        -p <port>, --port <port>\n" +
+                      "            Remote jmx agent port number\n" +
+                      "\n" +
+                      "        -pp, --print-port\n" +
+                      "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                      "\n" +
+                      "        -pw <password>, --password <password>\n" +
+                      "            Remote jmx agent password\n" +
+                      "\n" +
+                      "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                      "            Path to the JMX password file\n" +
+                      "\n" +
+                      "        -u <username>, --username <username>\n" +
+                      "            Remote jmx agent username\n" +
+                      "\n" +
+                      "        --update-interval <update-interval>\n" +
+                      "            Update interval in milliseconds\n" +
+                      "\n" +
+                      "        --validity-period <validity-period>\n" +
+                      "            Validity period in milliseconds\n" +
+                      "\n" +
+                      "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testInvalidCacheName()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("setauthcacheconfig");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("Required option '--cache-name' is missing"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("setauthcacheconfig", "--cache-name");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("Required values for option 'cache-name' not provided"));
+        assertThat(tool.getStderr()).isEmpty();
+
+        tool = ToolRunner.invokeNodetool("setauthcacheconfig", "--cache-name", "wrong", "--validity-period", "1");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("Unknown cache name: wrong"));
+        assertThat(tool.getStderr()).isEmpty();
+    }
+
+    @Test
+    public void testNoOptionalParameters()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("setauthcacheconfig", "--cache-name", "PermissionCache");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("At least one optional parameter need to be passed"));
+        assertThat(tool.getStderr()).isEmpty();
+    }
+
+    @Test
+    public void testBothEnableAndDisableActiveUpdate()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("setauthcacheconfig",
+                                                               "--cache-name", "PermissionCache",
+                                                               "--enable-active-update",
+                                                               "--disable-active-update");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).isEqualTo(wrapByDefaultNodetoolMessage("enable-active-update and disable-active-update cannot be used together"));
+        assertThat(tool.getStderr()).isEmpty();
+    }
+
+    @Test
+    public void testSetConfig()
+    {
+        assertSetConfig(AuthenticatedUser.permissionsCache, PermissionsCacheMBean.CACHE_NAME);
+
+        PasswordAuthenticator passwordAuthenticator = (PasswordAuthenticator) DatabaseDescriptor.getAuthenticator();
+        assertSetConfig(passwordAuthenticator.getCredentialsCache(), PasswordAuthenticator.CredentialsCacheMBean.CACHE_NAME);
+
+        assertSetConfig(AuthorizationProxy.jmxPermissionsCache, AuthorizationProxy.JmxPermissionsCacheMBean.CACHE_NAME);
+
+        assertSetConfig(AuthenticatedUser.networkPermissionsCache, NetworkPermissionsCacheMBean.CACHE_NAME);
+
+        assertSetConfig(Roles.cache, RolesCacheMBean.CACHE_NAME);
+    }
+
+    @Test
+    public void testSetConfigDisabled()
+    {
+        assertSetConfigDisabled(AuthenticatedUser.permissionsCache, PermissionsCacheMBean.CACHE_NAME);
+
+        PasswordAuthenticator passwordAuthenticator = (PasswordAuthenticator) DatabaseDescriptor.getAuthenticator();
+        assertSetConfigDisabled(passwordAuthenticator.getCredentialsCache(), PasswordAuthenticator.CredentialsCacheMBean.CACHE_NAME);
+
+        assertSetConfigDisabled(AuthorizationProxy.jmxPermissionsCache, AuthorizationProxy.JmxPermissionsCacheMBean.CACHE_NAME);
+
+        assertSetConfigDisabled(AuthenticatedUser.networkPermissionsCache, NetworkPermissionsCacheMBean.CACHE_NAME);
+
+        assertSetConfigDisabled(Roles.cache, RolesCacheMBean.CACHE_NAME);
+    }
+
+    private void assertSetConfig(AuthCache<?, ?> authCache, String cacheName)
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("setauthcacheconfig",
+                                                               "--cache-name", cacheName,
+                                                               "--validity-period", "1",
+                                                               "--update-interval", "2",
+                                                               "--max-entries", "3",
+                                                               "--disable-active-update");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEqualTo("Changed Validity Period to 1\n" +
+                                               "Changed Update Interval to 2\n" +
+                                               "Changed Max Entries to 3\n" +
+                                               "Changed Active Update to false\n");
+
+        assertThat(authCache.getValidity()).isEqualTo(1);
+        assertThat(authCache.getUpdateInterval()).isEqualTo(2);
+        assertThat(authCache.getMaxEntries()).isEqualTo(3);
+        assertThat(authCache.getActiveUpdate()).isFalse();
+    }
+
+    private void assertSetConfigDisabled(AuthCache<?, ?> authCache, String cacheName)
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("setauthcacheconfig",
+                                                               "--cache-name", cacheName,
+                                                               "--validity-period", "1",
+                                                               "--update-interval", "-1",
+                                                               "--max-entries", "3",
+                                                               "--disable-active-update");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEqualTo("Changed Validity Period to 1\n" +
+                                               "Changed Update Interval to -1\n" +
+                                               "Changed Max Entries to 3\n" +
+                                               "Changed Active Update to false\n");
+        // -1 means disabled and means update_interval will be assigned the value of validity_period
+        assertThat(authCache.getValidity()).isEqualTo(1);
+        assertThat(authCache.getUpdateInterval()).isEqualTo(1);
+        assertThat(authCache.getMaxEntries()).isEqualTo(3);
+        assertThat(authCache.getActiveUpdate()).isFalse();
+    }
+
+    private String wrapByDefaultNodetoolMessage(String s)
+    {
+        return "nodetool: " + s + "\nSee 'nodetool help' or 'nodetool help <command>'.\n";
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetDefaultKeyspaceRFTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetDefaultKeyspaceRFTest.java
new file mode 100644
index 0000000..92250a3
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetDefaultKeyspaceRFTest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SetDefaultKeyspaceRFTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "setdefaultrf");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool setdefaultrf - Sets default keyspace replication factor.\n" +
+                        "\n" +
+                        "SYNOPSIS\n" +
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] setdefaultrf [--] <value>\n" +
+                        "\n" +
+                        "OPTIONS\n" +
+                        "        -h <host>, --host <host>\n" +
+                        "            Node hostname or ip address\n" +
+                        "\n" +
+                        "        -p <port>, --port <port>\n" +
+                        "            Remote jmx agent port number\n" +
+                        "\n" +
+                        "        -pp, --print-port\n" +
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+                        "\n" +
+                        "        -pw <password>, --password <password>\n" +
+                        "            Remote jmx agent password\n" +
+                        "\n" +
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+                        "            Path to the JMX password file\n" +
+                        "\n" +
+                        "        -u <username>, --username <username>\n" +
+                        "            Remote jmx agent username\n" +
+                        "\n" +
+                        "        --\n" +
+                        "            This option can be used to separate command-line options from the\n" +
+                        "            list of argument, (useful when arguments might be mistaken for\n" +
+                        "            command-line options\n" +
+                        "\n" +
+                        "        <value>\n" +
+                        "            Default replication factor\n" +
+                        "\n" +
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testSetDefaultRF()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("setdefaultrf", "2");
+        tool.assertOnCleanExit();
+        assertThat(DatabaseDescriptor.getDefaultKeyspaceRF()).isEqualTo(2);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
new file mode 100644
index 0000000..40f82b0
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.service.StorageService;
+
+import static org.apache.cassandra.tools.ToolRunner.ToolResult;
+import static org.apache.cassandra.tools.ToolRunner.invokeNodetool;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Tests for {@code nodetool setcolumnindexsize} and {@code nodetool getcolumnindexsize}.
+ */
+public class SetGetColumnIndexSizeTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+    }
+
+    @Test
+    public void testNull()
+    {
+        assertSetInvalidColumnIndexSize(null, "Required parameters are missing: column_index_size", 1);
+    }
+
+    @Test
+    public void testPositive()
+    {
+        assertSetGetValidColumnIndexSize(7);
+    }
+
+    @Test
+    public void testMaxValue()
+    {
+        assertSetGetValidColumnIndexSize(2097151);
+    }
+
+    @Test
+    public void testZero()
+    {
+        assertSetGetValidColumnIndexSize(0);
+    }
+
+    @Test
+    public void testNegative()
+    {
+        assertSetInvalidColumnIndexSize("-7", "Invalid data storage: value must be non-negative", 1);
+    }
+
+    @Test
+    public void testInvalidValue()
+    {
+        assertSetInvalidColumnIndexSize("2097152", "column_index_size must be positive value <= 2147483646B, but was 2147483648B", 2);
+    }
+
+    @Test
+    public void testUnparseable()
+    {
+        assertSetInvalidColumnIndexSize("1.2", "column_index_size: can not convert \"1.2\" to a int", 1);
+        assertSetInvalidColumnIndexSize("value", "column_index_size: can not convert \"value\" to a int", 1);
+    }
+
+    private static void assertSetGetValidColumnIndexSize(int columnIndexSizeInKB)
+    {
+        ToolResult tool = invokeNodetool("setcolumnindexsize", String.valueOf(columnIndexSizeInKB));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughput(columnIndexSizeInKB);
+
+        assertThat(StorageService.instance.getColumnIndexSizeInKiB()).isEqualTo(columnIndexSizeInKB);
+    }
+
+    private static void assertSetInvalidColumnIndexSize(String columnIndexSizeInKB, String expectedErrorMessage, int expectedErrorCode)
+    {
+        ToolResult tool = columnIndexSizeInKB == null ? invokeNodetool("setcolumnindexsize")
+                                             : invokeNodetool("setcolumnindexsize", columnIndexSizeInKB);
+        assertThat(tool.getExitCode()).isEqualTo(expectedErrorCode);
+        assertThat(expectedErrorCode == 1 ? tool.getStdout() : tool.getStderr()).contains(expectedErrorMessage);
+    }
+
+    private static void assertGetThroughput(int expected)
+    {
+        ToolResult tool = invokeNodetool("getcolumnindexsize");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Current value for column_index_size: " + expected + " KiB");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetCompactionThroughputTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetCompactionThroughputTest.java
new file mode 100644
index 0000000..d15cc0c
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetCompactionThroughputTest.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.tools.ToolRunner.ToolResult;
+import static org.apache.cassandra.tools.ToolRunner.invokeNodetool;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Tests for {@code nodetool setcompactionthroughput} and {@code nodetool getcompactionthroughput}.
+ */
+public class SetGetCompactionThroughputTest extends CQLTester
+{
+    private static final int MAX_INT_CONFIG_VALUE_IN_MBIT = Integer.MAX_VALUE - 1;
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+    }
+
+    @Test
+    public void testNull()
+    {
+        assertSetInvalidThroughput(null, "Required parameters are missing: compaction_throughput");
+    }
+
+    @Test
+    public void testPositive()
+    {
+        assertSetGetValidThroughput(7);
+    }
+
+    @Test
+    public void testMaxValue()
+    {
+        assertSetGetValidThroughput(MAX_INT_CONFIG_VALUE_IN_MBIT);
+    }
+
+    @Test
+    public void testUpperBound()
+    {
+        assertSetInvalidThroughputMib(String.valueOf(Integer.MAX_VALUE));
+    }
+
+    @Test
+    public void testZero()
+    {
+        assertSetGetValidThroughput(0);
+    }
+
+    @Test
+    public void testUnparseable()
+    {
+        assertSetInvalidThroughput("1.2", "compaction_throughput: can not convert \"1.2\" to a Integer");
+        assertSetInvalidThroughput("value", "compaction_throughput: can not convert \"value\" to a Integer");
+        assertSetInvalidThroughput();
+        assertPreciseMibFlagNeeded();
+    }
+
+    private static void assertSetGetValidThroughput(int throughput)
+    {
+        ToolResult tool = invokeNodetool("setcompactionthroughput", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughput(throughput);
+        assertGetThroughputDouble(throughput);
+    }
+
+    private static void assertSetInvalidThroughput(String throughput, String expectedErrorMessage)
+    {
+        ToolResult tool = throughput == null ? invokeNodetool("setcompactionthroughput")
+                                             : invokeNodetool("setcompactionthroughput", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains(expectedErrorMessage);
+    }
+
+    private static void assertSetInvalidThroughput()
+    {
+        DatabaseDescriptor.setCompactionThroughputBytesPerSec(500);
+        ToolResult tool = invokeNodetool("getstreamthroughput");
+        assertThat(tool.getExitCode()).isEqualTo(2);
+        assertThat(tool.getStderr()).contains("Use the -d flag to quiet this error and get the exact throughput in megabits/s");
+    }
+
+    private static void assertSetInvalidThroughputMib(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setcompactionthroughput", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("compaction_throughput: 2147483647 is too large; it should be less than" +
+                                              " 2147483647 in MiB/s");
+    }
+
+    private static void assertPreciseMibFlagNeeded()
+    {
+        DatabaseDescriptor.setCompactionThroughputBytesPerSec(15);
+        ToolResult tool = invokeNodetool("getcompactionthroughput");
+        assertThat(tool.getExitCode()).isEqualTo(2);
+        assertThat(tool.getStderr()).contains("Use the -d flag to quiet this error and get the exact throughput in MiB/s");
+    }
+
+    private static void assertGetThroughput(int expected)
+    {
+        ToolResult tool = invokeNodetool("getcompactionthroughput");
+        tool.assertOnCleanExit();
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current compaction throughput: " + expected + " MB/s");
+        else
+            assertThat(tool.getStdout()).contains("Current compaction throughput: 0 MB/s");
+    }
+
+    private static void assertGetThroughputDouble(double expected)
+    {
+        ToolResult tool = invokeNodetool("getcompactionthroughput", "-d");
+        tool.assertOnCleanExit();
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current compaction throughput: " + expected + " MiB/s");
+        else
+            assertThat(tool.getStdout()).contains("Current compaction throughput: 0.0 MiB/s");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableInterDCStreamThroughputTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableInterDCStreamThroughputTest.java
new file mode 100644
index 0000000..4aea013
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableInterDCStreamThroughputTest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.streaming.StreamManager.StreamRateLimiter;
+import static org.apache.cassandra.tools.ToolRunner.ToolResult;
+import static org.apache.cassandra.tools.ToolRunner.invokeNodetool;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.withPrecision;
+
+/**
+ * Tests for entire SSTable {@code nodetool setinterdcstreamthroughput} and {@code nodetool getinterdcstreamthroughput}.
+ */
+public class SetGetEntireSSTableInterDCStreamThroughputTest extends CQLTester
+{
+    private static final int MAX_INT_CONFIG_VALUE_MIB = Integer.MAX_VALUE - 1;
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+    }
+
+    @Test
+    public void testNull()
+    {
+        assertSetInvalidThroughput(null, "Required parameters are missing: inter_dc_stream_throughput");
+    }
+
+    @Test
+    public void testPositive()
+    {
+        assertSetGetValidThroughput(7, 7 * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+    }
+
+    @Test
+    public void testMaxValue()
+    {
+        assertSetGetValidThroughput(MAX_INT_CONFIG_VALUE_MIB, MAX_INT_CONFIG_VALUE_MIB * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+    }
+
+    @Test
+    public void testUpperBound()
+    {
+        assertSetInvalidEntireSStableInterDCThroughputMib(String.valueOf(Integer.MAX_VALUE));
+    }
+
+    @Test
+    public void testZero()
+    {
+        assertSetGetValidThroughput(0, Double.MAX_VALUE);
+    }
+
+    @Test
+    public void testUnparseable()
+    {
+        assertSetInvalidThroughput("1.2", "inter_dc_stream_throughput: can not convert \"1.2\" to a int");
+        assertSetInvalidThroughput("value", "inter_dc_stream_throughput: can not convert \"value\" to a int");
+    }
+
+    private static void assertSetGetValidThroughput(int throughput, double rateInBytes)
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", "-e", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughput(throughput);
+
+        assertThat(StreamRateLimiter.getEntireSSTableInterDCRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.01));
+    }
+
+    private static void assertSetInvalidThroughput(String throughput, String expectedErrorMessage)
+    {
+        ToolResult tool = throughput == null ? invokeNodetool("setinterdcstreamthroughput", "-e")
+                                             : invokeNodetool("setinterdcstreamthroughput", "-e", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains(expectedErrorMessage);
+    }
+
+    private static void assertSetInvalidEntireSStableInterDCThroughputMib(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", "-e", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("entire_sstable_inter_dc_stream_throughput_outbound: 2147483647 is too large;" +
+                                              " it should be less than 2147483647 in MiB/s");
+    }
+
+    private static void assertGetThroughput(double expected)
+    {
+        ToolResult tool = invokeNodetool("getinterdcstreamthroughput", "-e");
+        tool.assertOnCleanExit();
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current entire SSTable inter-datacenter stream throughput: " + expected + " MiB/s");
+        else
+            assertThat(tool.getStdout()).contains("Current entire SSTable inter-datacenter stream throughput: unlimited");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableStreamThroughputTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableStreamThroughputTest.java
new file mode 100644
index 0000000..028f32e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableStreamThroughputTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+
+import static org.apache.cassandra.streaming.StreamManager.StreamRateLimiter;
+import static org.apache.cassandra.tools.ToolRunner.ToolResult;
+import static org.apache.cassandra.tools.ToolRunner.invokeNodetool;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.withPrecision;
+
+/**
+ * Tests for entire SSTable {@code nodetool setstreamthroughput} and {@code nodetool getstreamthroughput}.
+ */
+public class SetGetEntireSSTableStreamThroughputTest extends CQLTester
+{
+    private static final int MAX_INT_CONFIG_VALUE_MIB = Integer.MAX_VALUE - 1;
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+    }
+
+    @Test
+    public void testNull()
+    {
+        assertSetInvalidThroughput(null, "Required parameters are missing: stream_throughput");
+    }
+
+    @Test
+    public void testPositive()
+    {
+        assertSetGetValidThroughput(7, 7 * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+    }
+
+    @Test
+    public void testMaxValue()
+    {
+        assertSetGetValidThroughput(MAX_INT_CONFIG_VALUE_MIB, MAX_INT_CONFIG_VALUE_MIB * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+    }
+
+    @Test
+    public void testUpperBound()
+    {
+        assertSetInvalidEntireSStableThroughputMib(String.valueOf(Integer.MAX_VALUE));
+    }
+
+    @Test
+    public void testZero()
+    {
+        assertSetGetValidThroughput(0, Double.MAX_VALUE);
+    }
+
+    @Test
+    public void testUnparseable()
+    {
+        assertSetInvalidThroughput("1.2", "stream_throughput: can not convert \"1.2\" to a int");
+        assertSetInvalidThroughput("value", "stream_throughput: can not convert \"value\" to a int");
+    }
+
+    private static void assertSetGetValidThroughput(int throughput)
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-e", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughput(throughput);
+    }
+
+    private static void assertSetGetValidThroughput(int throughput, double rateInBytes)
+    {
+        assertSetGetValidThroughput(throughput);
+
+        assertThat(StreamRateLimiter.getEntireSSTableRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.01));
+    }
+
+    private static void assertSetInvalidEntireSStableThroughputMib(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-e", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("entire_sstable_stream_throughput_outbound: 2147483647 is too large; it " +
+                                              "should be less than 2147483647 in MiB/s");
+    }
+
+    private static void assertSetInvalidThroughput(String throughput, String expectedErrorMessage)
+    {
+        ToolResult tool = throughput == null ? invokeNodetool("setstreamthroughput", "-e")
+                                             : invokeNodetool("setstreamthroughput", "-e", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains(expectedErrorMessage);
+    }
+
+    private static void assertGetThroughput(double expected)
+    {
+        ToolResult tool = invokeNodetool("getstreamthroughput", "-e");
+        tool.assertOnCleanExit();
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current entire SSTable stream throughput: " + expected + " MiB/s");
+        else
+            assertThat(tool.getStdout()).contains("Current entire SSTable stream throughput: unlimited");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetInterDCStreamThroughputTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetInterDCStreamThroughputTest.java
index 699c27b..0ef6b2c 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/SetGetInterDCStreamThroughputTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetInterDCStreamThroughputTest.java
@@ -21,6 +21,7 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.config.DataRateSpec;
 import org.apache.cassandra.cql3.CQLTester;
 
 import static org.apache.cassandra.streaming.StreamManager.StreamRateLimiter;
@@ -34,6 +35,14 @@
  */
 public class SetGetInterDCStreamThroughputTest extends CQLTester
 {
+    private static final int MAX_INT_CONFIG_VALUE_IN_MBIT = Integer.MAX_VALUE - 1;
+    private static final double BYTES_PER_MEGABIT = 125_000;
+    private static final int MAX_INT_CONFIG_VALUE_MIB = (int) (MAX_INT_CONFIG_VALUE_IN_MBIT * BYTES_PER_MEGABIT) / 1024 / 1024;
+    private static final double INTEGER_MAX_VALUE_MEGABITS_IN_BYTES = DataRateSpec.LongBytesPerSecondBound
+                                                                      .megabitsPerSecondInBytesPerSecond(MAX_INT_CONFIG_VALUE_IN_MBIT)
+                                                                      .toBytesPerSecond();
+    private static final double MEBIBYTES_PER_MEGABIT = 0.11920928955078125;
+
     @BeforeClass
     public static void setup() throws Exception
     {
@@ -49,25 +58,43 @@
     @Test
     public void testPositive()
     {
-        assertSetGetValidThroughput(7, 7 * StreamRateLimiter.BYTES_PER_MEGABIT);
+        assertSetGetValidThroughput(7, 7 * BYTES_PER_MEGABIT);
+        assertSetGetValidThroughputMiB(7, 7 * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+        assertSetMbitGetMibValidThroughput(7, 7 * BYTES_PER_MEGABIT);
+    }
+
+    @Test
+    public void testSmallPositive()
+    {
+        // As part of CASSANDRA-15234 we had to do some tweaks with precision. This test has to ensure no regressions
+        // happen, hopefully. Internally data rate parameters values and rate limitter are set in double. Users can set
+        // and get only integers
+        assertSetGetValidThroughput(1, 1 * BYTES_PER_MEGABIT);
+        assertSetGetValidThroughputMiB(1, 1 * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+        assertSetMbitGetMibValidThroughput(1, 1 * BYTES_PER_MEGABIT);
     }
 
     @Test
     public void testMaxValue()
     {
-        assertSetGetValidThroughput(Integer.MAX_VALUE, Integer.MAX_VALUE * StreamRateLimiter.BYTES_PER_MEGABIT);
+        assertSetGetValidThroughput(MAX_INT_CONFIG_VALUE_IN_MBIT, INTEGER_MAX_VALUE_MEGABITS_IN_BYTES);
+        assertSetGetValidThroughputMiB(MAX_INT_CONFIG_VALUE_MIB, MAX_INT_CONFIG_VALUE_MIB * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+        assertSetMbitGetMibValidThroughput(MAX_INT_CONFIG_VALUE_IN_MBIT, INTEGER_MAX_VALUE_MEGABITS_IN_BYTES);
+    }
+
+    @Test
+    public void testUpperBound()
+    {
+        assertSetInvalidThroughputMib(String.valueOf(Integer.MAX_VALUE));
+        assertSetInvalidThroughputMbit(String.valueOf(Integer.MAX_VALUE));
     }
 
     @Test
     public void testZero()
     {
         assertSetGetValidThroughput(0, Double.MAX_VALUE);
-    }
-
-    @Test
-    public void testNegative()
-    {
-        assertSetGetValidThroughput(-7, Double.MAX_VALUE);
+        assertSetGetValidThroughputMiB(0, Double.MAX_VALUE);
+        assertSetMbitGetMibValidThroughput(0, Double.MAX_VALUE);
     }
 
     @Test
@@ -75,6 +102,8 @@
     {
         assertSetInvalidThroughput("1.2", "inter_dc_stream_throughput: can not convert \"1.2\" to a int");
         assertSetInvalidThroughput("value", "inter_dc_stream_throughput: can not convert \"value\" to a int");
+        assertSetGetMoreFlagsIsInvalid();
+        assertDFlagNeeded();
     }
 
     private static void assertSetGetValidThroughput(int throughput, double rateInBytes)
@@ -85,6 +114,39 @@
 
         assertGetThroughput(throughput);
 
+        assertThat(StreamRateLimiter.getInterDCRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.04));
+    }
+
+    private static void assertDFlagNeeded()
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-m", String.valueOf(1));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        tool = invokeNodetool("getstreamthroughput");
+        assertThat(tool.getExitCode()).isEqualTo(2);
+        assertThat(tool.getStderr()).contains("Use the -d flag to quiet this error and get the exact throughput in megabits/s");
+    }
+
+    private static void assertSetGetValidThroughputMiB(int throughput, double rateInBytes)
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", "-m", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughputMiB(throughput);
+
+        assertThat(StreamRateLimiter.getInterDCRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.01));
+    }
+
+    private static void assertSetMbitGetMibValidThroughput(int throughput, double rateInBytes)
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughputMiB(throughput * MEBIBYTES_PER_MEGABIT);
+
         assertThat(StreamRateLimiter.getInterDCRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.01));
     }
 
@@ -96,10 +158,65 @@
         assertThat(tool.getStdout()).contains(expectedErrorMessage);
     }
 
+    private static void assertSetInvalidThroughputMib(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", "-m", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("inter_dc_stream_throughput_outbound: 2147483647 is too large; it should be" +
+                                              " less than 2147483647 in megabits/s");
+    }
+
+    private static void assertSetInvalidThroughputMbit(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("Invalid data rate: 2147483647 megabits per second; stream_throughput_outbound" +
+                                              " and inter_dc_stream_throughput_outbound should be between 0 and 2147483646 in " +
+                                              "megabits per second");
+    }
+
+    private static void assertSetGetMoreFlagsIsInvalid()
+    {
+        ToolResult tool = invokeNodetool("setinterdcstreamthroughput", "-m", "5", "-e", "5");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use -e and -m at the same time");
+
+        tool = invokeNodetool("getinterdcstreamthroughput", "-m", "-e");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+
+        tool = invokeNodetool("getinterdcstreamthroughput", "-m", "-d");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+
+        tool = invokeNodetool("getinterdcstreamthroughput", "-d", "-e");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+
+        tool = invokeNodetool("getinterdcstreamthroughput", "-m", "-e", "-d");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+    }
+
     private static void assertGetThroughput(int expected)
     {
         ToolResult tool = invokeNodetool("getinterdcstreamthroughput");
         tool.assertOnCleanExit();
-        assertThat(tool.getStdout()).contains("Current inter-datacenter stream throughput: " + expected + " Mb/s");
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current inter-datacenter stream throughput: " + expected + " Mb/s");
+        else
+            assertThat(tool.getStdout()).contains("Current inter-datacenter stream throughput: unlimited");
+    }
+
+    private static void assertGetThroughputMiB(double expected)
+    {
+        ToolResult tool = invokeNodetool("getinterdcstreamthroughput", "-m");
+        tool.assertOnCleanExit();
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current inter-datacenter stream throughput: " + expected + " MiB/s");
+        else
+            assertThat(tool.getStdout()).contains("Current inter-datacenter stream throughput: unlimited");
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetStreamThroughputTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetStreamThroughputTest.java
index 3bab4e8..911e0dc 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/SetGetStreamThroughputTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetStreamThroughputTest.java
@@ -21,6 +21,7 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import org.apache.cassandra.config.DataRateSpec;
 import org.apache.cassandra.cql3.CQLTester;
 
 import static org.assertj.core.api.Assertions.withPrecision;
@@ -35,6 +36,14 @@
  */
 public class SetGetStreamThroughputTest extends CQLTester
 {
+    private static final int MAX_INT_CONFIG_VALUE_IN_MBIT = Integer.MAX_VALUE - 1;
+    private static final double BYTES_PER_MEGABIT = 125_000;
+    private static final int MAX_INT_CONFIG_VALUE_MIB = (int) (MAX_INT_CONFIG_VALUE_IN_MBIT * BYTES_PER_MEGABIT) / 1024 / 1024;
+    private static final double INTEGER_MAX_VALUE_MEGABITS_IN_BYTES = DataRateSpec.LongBytesPerSecondBound
+                                                                      .megabitsPerSecondInBytesPerSecond(MAX_INT_CONFIG_VALUE_IN_MBIT)
+                                                                      .toBytesPerSecond();
+    private static final double MEBIBYTES_PER_MEGABIT = 0.11920928955078125;
+
     @BeforeClass
     public static void setup() throws Exception
     {
@@ -50,25 +59,43 @@
     @Test
     public void testPositive()
     {
-        assertSetGetValidThroughput(7, 7 * StreamRateLimiter.BYTES_PER_MEGABIT);
+        assertSetGetValidThroughput(7, 7 * BYTES_PER_MEGABIT);
+        assertSetGetValidThroughputMiB(7, 7 * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+        assertSetMbitGetMibValidThroughput(7, 7 * BYTES_PER_MEGABIT);
+    }
+
+    @Test
+    public void testSmallPositive()
+    {
+        // As part of CASSANDRA-15234 we had to do some tweaks with precision. This test has to ensure no regressions
+        // happen, hopefully. Internally data rate parameters values and rate limitter are set in double. Users can set
+        // and get only integers
+        assertSetGetValidThroughput(1, 1 * BYTES_PER_MEGABIT);
+        assertSetGetValidThroughputMiB(1, 1 * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+        assertSetMbitGetMibValidThroughput(1, 1 * BYTES_PER_MEGABIT);
     }
 
     @Test
     public void testMaxValue()
     {
-        assertSetGetValidThroughput(Integer.MAX_VALUE, Integer.MAX_VALUE * StreamRateLimiter.BYTES_PER_MEGABIT);
+        assertSetGetValidThroughput(MAX_INT_CONFIG_VALUE_IN_MBIT, INTEGER_MAX_VALUE_MEGABITS_IN_BYTES);
+        assertSetGetValidThroughputMiB(MAX_INT_CONFIG_VALUE_MIB, MAX_INT_CONFIG_VALUE_MIB * StreamRateLimiter.BYTES_PER_MEBIBYTE);
+        assertSetMbitGetMibValidThroughput(MAX_INT_CONFIG_VALUE_IN_MBIT, INTEGER_MAX_VALUE_MEGABITS_IN_BYTES);
+    }
+
+    @Test
+    public void testUpperBound()
+    {
+        assertSetInvalidThroughputMib(String.valueOf(Integer.MAX_VALUE));
+        assertSetInvalidThroughputMbit(String.valueOf(Integer.MAX_VALUE));
     }
 
     @Test
     public void testZero()
     {
         assertSetGetValidThroughput(0, Double.MAX_VALUE);
-    }
-
-    @Test
-    public void testNegative()
-    {
-        assertSetGetValidThroughput(-7, Double.MAX_VALUE);
+        assertSetGetValidThroughputMiB(0, Double.MAX_VALUE);
+        assertSetMbitGetMibValidThroughput(0, Double.MAX_VALUE);
     }
 
     @Test
@@ -76,6 +103,9 @@
     {
         assertSetInvalidThroughput("1.2", "stream_throughput: can not convert \"1.2\" to a int");
         assertSetInvalidThroughput("value", "stream_throughput: can not convert \"value\" to a int");
+        assertSetGetMoreFlagsIsInvalid();
+        assertDFlagNeeded();
+        assertSetGetMoreFlagsIsInvalid();
     }
 
     private static void assertSetGetValidThroughput(int throughput, double rateInBytes)
@@ -86,6 +116,39 @@
 
         assertGetThroughput(throughput);
 
+        assertThat(StreamRateLimiter.getRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.04));
+    }
+
+    private static void assertSetGetValidThroughputMiB(int throughput, double rateInBytes)
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-m", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughputMiB(throughput);
+
+        assertThat(StreamRateLimiter.getRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.01));
+    }
+
+    private static void assertDFlagNeeded()
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-m", String.valueOf(1));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        tool = invokeNodetool("getstreamthroughput");
+        assertThat(tool.getExitCode()).isEqualTo(2);
+        assertThat(tool.getStderr()).contains("Use the -d flag to quiet this error and get the exact throughput in megabits/s");
+    }
+
+    private static void assertSetMbitGetMibValidThroughput(int throughput, double rateInBytes)
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", String.valueOf(throughput));
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEmpty();
+
+        assertGetThroughputMiB(throughput * MEBIBYTES_PER_MEGABIT);
+
         assertThat(StreamRateLimiter.getRateLimiterRateInBytes()).isEqualTo(rateInBytes, withPrecision(0.01));
     }
 
@@ -97,10 +160,64 @@
         assertThat(tool.getStdout()).contains(expectedErrorMessage);
     }
 
+    private static void assertSetInvalidThroughputMib(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-m", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("stream_throughput_outbound: 2147483647 is too large; it should be less " +
+                                              "than 2147483647 in megabits/s");
+    }
+
+    private static void assertSetInvalidThroughputMbit(String throughput)
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", throughput);
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("Invalid data rate: 2147483647 megabits per second; stream_throughput_outbound " +
+                                              "and inter_dc_stream_throughput_outbound should be between 0 and 2147483646 in megabits per second");
+    }
+
+    private static void assertSetGetMoreFlagsIsInvalid()
+    {
+        ToolResult tool = invokeNodetool("setstreamthroughput", "-m", "5", "-e", "5");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use -e and -m at the same time");
+
+        tool = invokeNodetool("getstreamthroughput", "-m", "-e");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+
+        tool = invokeNodetool("getstreamthroughput", "-m", "-d");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+
+        tool = invokeNodetool("getstreamthroughput", "-d", "-e");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+
+        tool = invokeNodetool("getstreamthroughput", "-m", "-e", "-d");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("You cannot use more than one flag with this command");
+    }
+
     private static void assertGetThroughput(int expected)
     {
         ToolResult tool = invokeNodetool("getstreamthroughput");
         tool.assertOnCleanExit();
-        assertThat(tool.getStdout()).contains("Current stream throughput: " + expected + " Mb/s");
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current stream throughput: " + expected + " Mb/s");
+        else
+            assertThat(tool.getStdout()).contains("Current stream throughput: unlimited");
+    }
+
+    private static void assertGetThroughputMiB(double expected)
+    {
+        ToolResult tool = invokeNodetool("getstreamthroughput", "-m");
+        tool.assertOnCleanExit();
+
+        if (expected > 0)
+            assertThat(tool.getStdout()).contains("Current stream throughput: " + expected + " MiB/s");
+        else
+            assertThat(tool.getStdout()).contains("Current stream throughput: unlimited");
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SjkTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SjkTest.java
index c183edf..9812ef2 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/SjkTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SjkTest.java
@@ -26,7 +26,7 @@
     @Test
     public void sjkHelpReturnsRc0()
     {
-        ToolRunner.ToolResult result = ToolRunner.invokeNodetool("sjk", "--help");
-        result.assertOnExitCode();
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("sjk", "--help");
+        tool.assertOnExitCode();
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SnapshotTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SnapshotTest.java
index 428438f..2fbaf95 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/SnapshotTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SnapshotTest.java
@@ -18,12 +18,11 @@
 
 package org.apache.cassandra.tools.nodetool;
 
-import java.io.File;
-
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.io.util.File;
 import org.apache.cassandra.tools.ToolRunner;
 
 import static org.apache.cassandra.tools.ToolRunner.invokeNodetool;
@@ -83,9 +82,9 @@
     @Test
     public void testInvalidSnapshotName()
     {
-        ToolRunner.ToolResult tool = invokeNodetool("snapshot", "-t", "invalid" + File.pathSeparatorChar + "name");
+        ToolRunner.ToolResult tool = invokeNodetool("snapshot", "-t", "invalid" + File.pathSeparator() + "name");
         assertThat(tool.getExitCode()).isEqualTo(2);
-        assertThat(tool.getStderr()).contains("Snapshot name cannot contain " + File.pathSeparatorChar);
+        assertThat(tool.getStderr()).contains("Snapshot name cannot contain " + File.pathSeparator());
     }
 
     @Test
@@ -99,6 +98,24 @@
     }
 
     @Test
+    public void testTTLOption()
+    {
+        ToolRunner.ToolResult tool = invokeNodetool("snapshot", "-t", "ttl", "--ttl", "5h");
+        tool.assertOnCleanExit();
+
+        assertThat(tool.getExitCode()).isEqualTo(0);
+        assertThat(tool.getStdout()).contains("Requested creating snapshot(s) for [all keyspaces] with snapshot name [ttl] and options {skipFlush=false, ttl=5h}");
+    }
+
+    @Test
+    public void testInvalidTTLOption()
+    {
+        ToolRunner.ToolResult tool = invokeNodetool("snapshot", "-t", "ttl", "--ttl", "infinity");
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("Invalid duration: infinity");
+    }
+
+    @Test
     public void testTableOption()
     {
         ToolRunner.ToolResult tool = invokeNodetool("snapshot", "-t", "table", "--table", "keyspaces", "system_schema");
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/StatusTest.java b/test/unit/org/apache/cassandra/tools/nodetool/StatusTest.java
index 13acb19..9d8496c 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/StatusTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/StatusTest.java
@@ -30,9 +30,7 @@
 import org.apache.cassandra.tools.ToolRunner;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static org.hamcrest.CoreMatchers.*;
-import static org.hamcrest.Matchers.matchesPattern;
-import static org.junit.Assert.assertThat;
+import static org.assertj.core.api.Assertions.assertThat;
 
 public class StatusTest extends CQLTester
 {
@@ -43,10 +41,10 @@
     @BeforeClass
     public static void setup() throws Exception
     {
-        StorageService.instance.initServer();
+        requireNetwork();
+        startJMXServer();
         localHostId = StorageService.instance.getLocalHostId();
         token = StorageService.instance.getTokens().get(0);
-        startJMXServer();
     }
 
     /**
@@ -78,26 +76,27 @@
         schemaChange("DROP KEYSPACE " + CQLTester.KEYSPACE);
         schemaChange("DROP KEYSPACE " + CQLTester.KEYSPACE_PER_TEST);
 
-        ToolRunner.ToolResult nodetool = ToolRunner.invokeNodetool("status");
-        nodetool.assertOnCleanExit();
-        String[] lines = PATTERN.split(nodetool.getStdout());
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("status");
+        tool.assertOnCleanExit();
+        String[] lines = PATTERN.split(tool.getStdout());
 
         String hostStatus = lines[lines.length-3].trim();
-        assertThat(hostStatus, startsWith("UN"));
-        assertThat(hostStatus, containsString(FBUtilities.getJustLocalAddress().getHostAddress()));
-        assertThat(hostStatus, matchesPattern(".*\\d+\\.?\\d+ KiB.*"));
-        assertThat(hostStatus, containsString(localHostId));
-        assertThat(hostStatus, containsString(token));
-        assertThat(hostStatus, endsWith(SimpleSnitch.RACK_NAME));
+        assertThat(hostStatus).startsWith("UN");
+        assertThat(hostStatus).contains(FBUtilities.getJustLocalAddress().getHostAddress());
+        assertThat(hostStatus).containsPattern("\\d+\\.?\\d+ KiB");
+        assertThat(hostStatus).contains(localHostId);
+        assertThat(hostStatus).contains(token);
+        assertThat(hostStatus).endsWith(SimpleSnitch.RACK_NAME);
 
         String bootstrappingWarn = lines[lines.length-1].trim();
-        assertThat(bootstrappingWarn, containsString("probably still bootstrapping. Effective ownership information is meaningless."));
+        assertThat(bootstrappingWarn)
+                .contains("probably still bootstrapping. Effective ownership information is meaningless.");
     }
 
     private void validateStatusOutput(String hostForm, String... args)
     {
-        ToolRunner.ToolResult nodetool = ToolRunner.invokeNodetool(args);
-        nodetool.assertOnCleanExit();
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool(args);
+        tool.assertOnCleanExit();
         /*
          Datacenter: datacenter1
          =======================
@@ -106,16 +105,16 @@
          --  Address    Load       Owns (effective)  Host ID                               Token                Rack
          UN  localhost  45.71 KiB  100.0%            0b1b5e91-ad3b-444e-9c24-50578486978a  1849950853373272258  rack1
          */
-        String[] lines = PATTERN.split(nodetool.getStdout());
-        assertThat(lines[0].trim(), endsWith(SimpleSnitch.DATA_CENTER_NAME));
+        String[] lines = PATTERN.split(tool.getStdout());
+        assertThat(lines[0].trim()).endsWith(SimpleSnitch.DATA_CENTER_NAME);
         String hostStatus = lines[lines.length-1].trim();
-        assertThat(hostStatus, startsWith("UN"));
-        assertThat(hostStatus, containsString(hostForm));
-        assertThat(hostStatus, matchesPattern(".*\\d+\\.?\\d+ KiB.*"));
-        assertThat(hostStatus, matchesPattern(".*\\d+\\.\\d+%.*"));
-        assertThat(hostStatus, containsString(localHostId));
-        assertThat(hostStatus, containsString(token));
-        assertThat(hostStatus, endsWith(SimpleSnitch.RACK_NAME));
-        assertThat(hostStatus, not(containsString("?")));
+        assertThat(hostStatus).startsWith("UN");
+        assertThat(hostStatus).contains(hostForm);
+        assertThat(hostStatus).containsPattern("\\d+\\.?\\d+ KiB");
+        assertThat(hostStatus).containsPattern("\\d+\\.\\d+%");
+        assertThat(hostStatus).contains(localHostId);
+        assertThat(hostStatus).contains(token);
+        assertThat(hostStatus).endsWith(SimpleSnitch.RACK_NAME);
+        assertThat(hostStatus).doesNotContain("?");
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/TableStatsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/TableStatsTest.java
new file mode 100644
index 0000000..578bd88
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/TableStatsTest.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+import org.yaml.snakeyaml.Yaml;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatCode;
+
+public class TableStatsTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "tablestats");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool tablestats - Print statistics on tables\n" + 
+                        "\n" + 
+                        "SYNOPSIS\n" + 
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+                        "                [(-u <username> | --username <username>)] tablestats\n" +
+                        "                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]\n" +
+                        "                [(-l | --sstable-location-check)] [(-s <sort_key> | --sort <sort_key>)]\n" +
+                        "                [(-t <top> | --top <top>)] [--] [<keyspace.table>...]\n" +
+                        "\n" + 
+                        "OPTIONS\n" +
+                        "        -F <format>, --format <format>\n" +
+                        "            Output format (json, yaml)\n" + 
+                        "\n" + 
+                        "        -h <host>, --host <host>\n" + 
+                        "            Node hostname or ip address\n" + 
+                        "\n" + 
+                        "        -H, --human-readable\n" + 
+                        "            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB\n" + 
+                        "\n" + 
+                        "        -i\n" + 
+                        "            Ignore the list of tables and display the remaining tables\n" +
+                        "\n" +
+                        "        -l, --sstable-location-check\n" +
+                        "            Check whether or not the SSTables are in the correct location.\n" +
+                        "\n" + 
+                        "        -p <port>, --port <port>\n" + 
+                        "            Remote jmx agent port number\n" + 
+                        "\n" + 
+                        "        -pp, --print-port\n" + 
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
+                        "\n" + 
+                        "        -pw <password>, --password <password>\n" + 
+                        "            Remote jmx agent password\n" + 
+                        "\n" + 
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
+                        "            Path to the JMX password file\n" + 
+                        "\n" + 
+                        "        -s <sort_key>, --sort <sort_key>\n" + 
+                        "            Sort tables by specified sort key\n" + 
+                        "            (average_live_cells_per_slice_last_five_minutes,\n" + 
+                        "            average_tombstones_per_slice_last_five_minutes,\n" + 
+                        "            bloom_filter_false_positives, bloom_filter_false_ratio,\n" + 
+                        "            bloom_filter_off_heap_memory_used, bloom_filter_space_used,\n" + 
+                        "            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,\n" + 
+                        "            compacted_partition_minimum_bytes,\n" + 
+                        "            compression_metadata_off_heap_memory_used, dropped_mutations,\n" + 
+                        "            full_name, index_summary_off_heap_memory_used, local_read_count,\n" + 
+                        "            local_read_latency_ms, local_write_latency_ms,\n" + 
+                        "            maximum_live_cells_per_slice_last_five_minutes,\n" + 
+                        "            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,\n" + 
+                        "            memtable_data_size, memtable_off_heap_memory_used,\n" + 
+                        "            memtable_switch_count, number_of_partitions_estimate,\n" + 
+                        "            off_heap_memory_used_total, pending_flushes, percent_repaired,\n" + 
+                        "            read_latency, reads, space_used_by_snapshots_total, space_used_live,\n" + 
+                        "            space_used_total, sstable_compression_ratio, sstable_count,\n" + 
+                        "            table_name, write_latency, writes)\n" + 
+                        "\n" + 
+                        "        -t <top>, --top <top>\n" + 
+                        "            Show only the top K tables for the sort key (specify the number K of\n" + 
+                        "            tables to be shown\n" + 
+                        "\n" + 
+                        "        -u <username>, --username <username>\n" + 
+                        "            Remote jmx agent username\n" + 
+                        "\n" + 
+                        "        --\n" + 
+                        "            This option can be used to separate command-line options from the\n" + 
+                        "            list of argument, (useful when arguments might be mistaken for\n" + 
+                        "            command-line options\n" + 
+                        "\n" + 
+                        "        [<keyspace.table>...]\n" + 
+                        "            List of tables (or keyspace) names\n" + 
+                        "\n" + 
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testTableStats()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace : system_schema");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Table:")).isGreaterThan(1);
+
+        tool = ToolRunner.invokeNodetool("tablestats", "system_distributed");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace : system_distributed");
+        assertThat(tool.getStdout()).doesNotContain("Keyspace : system_schema");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Table:")).isGreaterThan(1);
+    }
+
+    @Test
+    public void testTableIgnoreArg()
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-i", "system_schema.aggregates");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).contains("Keyspace : system_schema");
+        assertThat(tool.getStdout()).doesNotContain("Table: system_schema.aggregates");
+        assertThat(StringUtils.countMatches(tool.getStdout(), "Table:")).isGreaterThan(1);
+    }
+
+    @Test
+    public void testHumanReadableArg()
+    {
+        Arrays.asList("-H", "--human-readable").forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg);
+            tool.assertOnCleanExit();
+            assertThat(tool.getStdout()).contains(" KiB");
+        });
+    }
+
+    @Test
+    public void testSortArg()
+    {
+        Pattern regExp = Pattern.compile("((?m)Table: .*$)");
+
+        Arrays.asList("-s", "--sort").forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "table_name");
+            Matcher m = regExp.matcher(tool.getStdout());
+            ArrayList<String> orig = new ArrayList<>();
+            while (m.find())
+                orig.add(m.group(1));
+
+            tool = ToolRunner.invokeNodetool("tablestats", arg, "sstable_count");
+            tool.assertOnCleanExit();
+            m = regExp.matcher(tool.getStdout());
+            ArrayList<String> sorted = new ArrayList<>();
+            while (m.find())
+                sorted.add(m.group(1));
+
+            assertThat(sorted).isNotEqualTo(orig);
+            Collections.sort(orig);
+            Collections.sort(sorted);
+            assertThat(sorted).isEqualTo(orig);
+        });
+
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-s", "wrongSort");
+        assertThat(tool.getStdout()).contains("argument for sort must be one of");
+        tool.assertCleanStdErr();
+        assertThat(tool.getExitCode()).isEqualTo(1);
+    }
+
+    @Test
+    public void testTopArg()
+    {
+        Arrays.asList("-t", "--top").forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-s", "table_name", arg, "1");
+            tool.assertOnCleanExit();
+            assertThat(StringUtils.countMatches(tool.getStdout(), "Table:")).isEqualTo(1);
+        });
+
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-s", "table_name", "-t", "-1");
+        tool.assertCleanStdErr();
+        assertThat(tool.getExitCode()).isEqualTo(1);
+        assertThat(tool.getStdout()).contains("argument for top must be a positive integer");
+    }
+
+    @Test
+    public void testSSTableLocationCheckArg()
+    {
+        Arrays.asList("-l", "--sstable-location-check").forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "system.local");
+            tool.assertOnCleanExit();
+            assertThat(StringUtils.countMatches(tool.getStdout(), "SSTables in correct location: ")).isEqualTo(1);
+        });
+
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", "system.local");
+        tool.assertCleanStdErr();
+        assertThat(tool.getStdout()).doesNotContain("SSTables in correct location: ");
+    }
+
+    @Test
+    public void testFormatJson()
+    {
+        Arrays.asList("-F", "--format").forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "json");
+            tool.assertOnCleanExit();
+            String json = tool.getStdout();
+            assertThatCode(() -> new ObjectMapper().readTree(json)).doesNotThrowAnyException();
+            assertThat(json).containsPattern("\"sstable_count\"\\s*:\\s*[0-9]+")
+                            .containsPattern("\"old_sstable_count\"\\s*:\\s*[0-9]+");
+        });
+    }
+
+    @Test
+    public void testFormatYaml()
+    {
+        Arrays.asList("-F", "--format").forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "yaml");
+            tool.assertOnCleanExit();
+            String yaml = tool.getStdout();
+            assertThatCode(() -> new Yaml().load(yaml)).doesNotThrowAnyException();
+            assertThat(yaml).containsPattern("sstable_count:\\s*[0-9]+")
+                            .containsPattern("old_sstable_count:\\s*[0-9]+");
+        });
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/TpStatsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/TpStatsTest.java
new file mode 100644
index 0000000..124f297
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/TpStatsTest.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.net.Message;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.net.NoPayload;
+import org.apache.cassandra.tools.ToolRunner;
+import org.apache.cassandra.utils.FBUtilities;
+import org.yaml.snakeyaml.Yaml;
+
+import static org.apache.cassandra.net.Verb.ECHO_REQ;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TpStatsTest extends CQLTester
+{
+
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        requireNetwork();
+        startJMXServer();
+    }
+
+    @Test
+    @SuppressWarnings("SingleCharacterStringConcatenation")
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "tpstats");
+        tool.assertOnCleanExit();
+
+        String help =   "NAME\n" +
+                        "        nodetool tpstats - Print usage statistics of thread pools\n" + 
+                        "\n" + 
+                        "SYNOPSIS\n" + 
+                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" + 
+                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" + 
+                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" + 
+                        "                [(-u <username> | --username <username>)] tpstats\n" + 
+                        "                [(-F <format> | --format <format>)]\n" + 
+                        "\n" + 
+                        "OPTIONS\n" + 
+                        "        -F <format>, --format <format>\n" + 
+                        "            Output format (json, yaml)\n" + 
+                        "\n" + 
+                        "        -h <host>, --host <host>\n" + 
+                        "            Node hostname or ip address\n" + 
+                        "\n" + 
+                        "        -p <port>, --port <port>\n" + 
+                        "            Remote jmx agent port number\n" + 
+                        "\n" + 
+                        "        -pp, --print-port\n" + 
+                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
+                        "\n" + 
+                        "        -pw <password>, --password <password>\n" + 
+                        "            Remote jmx agent password\n" + 
+                        "\n" + 
+                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
+                        "            Path to the JMX password file\n" + 
+                        "\n" + 
+                        "        -u <username>, --username <username>\n" + 
+                        "            Remote jmx agent username\n" +  
+                        "\n" + 
+                        "\n";
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+
+    @Test
+    public void testTpStats() throws Throwable
+    {
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tpstats");
+        tool.assertOnCleanExit();
+        String stdout = tool.getStdout();
+        assertThat(stdout).containsPattern("Pool Name \\s+ Active Pending Completed Blocked All time blocked");
+        assertThat(stdout).contains("Latencies waiting in queue (micros) per dropped message types");
+
+        // Does inserting data alter tpstats?
+        String nonZeroedThreadsRegExp = "((?m)\\D.*[1-9].*)";
+        ArrayList<String> origStats = getAllGroupMatches(nonZeroedThreadsRegExp, stdout);
+        Collections.sort(origStats);
+
+        createTable("CREATE TABLE %s (pk int, c int, PRIMARY KEY(pk))");
+        execute("INSERT INTO %s (pk, c) VALUES (?, ?)", 1, 1);
+        flush();
+
+        tool = ToolRunner.invokeNodetool("tpstats");
+        tool.assertOnCleanExit();
+        stdout = tool.getStdout();
+        ArrayList<String> newStats = getAllGroupMatches(nonZeroedThreadsRegExp, stdout);
+        Collections.sort(newStats);
+
+        assertThat(origStats).isNotEqualTo(newStats);
+
+        // Does sending a message alter Gossip & ECHO stats?
+        String origGossip = getAllGroupMatches("((?m)GossipStage.*)", stdout).get(0);
+        assertThat(stdout).doesNotContainPattern("ECHO_REQ\\D.*[1-9].*");
+        assertThat(stdout).doesNotContainPattern("ECHO_RSP\\D.*[1-9].*");
+
+        Message<NoPayload> echoMessageOut = Message.out(ECHO_REQ, NoPayload.noPayload);
+        MessagingService.instance().send(echoMessageOut, FBUtilities.getBroadcastAddressAndPort());
+
+        tool = ToolRunner.invokeNodetool("tpstats");
+        tool.assertOnCleanExit();
+        stdout = tool.getStdout();
+        String newGossip = getAllGroupMatches("((?m)GossipStage.*)", stdout).get(0);
+
+        assertThat(origGossip).isNotEqualTo(newGossip);
+        assertThat(stdout).containsPattern("ECHO_REQ\\D.*[1-9].*");
+        assertThat(stdout).containsPattern("ECHO_RSP\\D.*[0-9].*");
+    }
+
+    @Test
+    public void testFormatArg()
+    {
+        Arrays.asList(Pair.of("-F", "json"), Pair.of("--format", "json")).forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tpstats", arg.getLeft(), arg.getRight());
+            tool.assertOnCleanExit();
+            String json = tool.getStdout();
+            assertThat(isJSONString(json)).isTrue();
+            assertThat(json).containsPattern("\"WaitLatencies\"\\s*:\\s*\\{\\s*\"");
+        });
+
+        Arrays.asList( Pair.of("-F", "yaml"), Pair.of("--format", "yaml")).forEach(arg -> {
+            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tpstats", arg.getLeft(), arg.getRight());
+            tool.assertOnCleanExit();
+            String yaml = tool.getStdout();
+            assertThat(isYAMLString(yaml)).isTrue();
+            assertThat(yaml).containsPattern("WaitLatencies:\\s*[A-Z|_]+:\\s+-\\s");
+        });
+    }
+
+    public static boolean isJSONString(String str)
+    {
+        try
+        {
+            ObjectMapper mapper = new ObjectMapper();
+            mapper.readTree(str);
+            return true;
+        }
+        catch(IOException e)
+        {
+            return false;
+        }
+    }
+
+    public static boolean isYAMLString(String str)
+    {
+        try
+        {
+            Yaml yaml = new Yaml();
+            yaml.load(str);
+            return true;
+        }
+        catch(Exception e)
+        {
+            return false;
+        }
+    }
+
+    private ArrayList<String> getAllGroupMatches(String regExp, String in)
+    {
+        Pattern pattern = Pattern.compile(regExp);
+        Matcher m = pattern.matcher(in);
+
+        ArrayList<String> matches = new ArrayList<>();
+        while (m.find())
+            matches.add(m.group(1));
+
+        return matches;
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/VerifyTest.java b/test/unit/org/apache/cassandra/tools/nodetool/VerifyTest.java
new file mode 100644
index 0000000..47ca5fe
--- /dev/null
+++ b/test/unit/org/apache/cassandra/tools/nodetool/VerifyTest.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.tools.nodetool;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.auth.AuthTestUtils;
+import org.apache.cassandra.auth.AuthenticatedUser;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.tools.ToolRunner;
+
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_A;
+import static org.apache.cassandra.auth.AuthTestUtils.ROLE_B;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class VerifyTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        SchemaLoader.prepareServer();
+        AuthTestUtils.LocalCassandraRoleManager roleManager = new AuthTestUtils.LocalCassandraRoleManager();
+        SchemaLoader.setupAuth(roleManager,
+                               new AuthTestUtils.LocalPasswordAuthenticator(),
+                               new AuthTestUtils.LocalCassandraAuthorizer(),
+                               new AuthTestUtils.LocalCassandraNetworkAuthorizer());
+
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions());
+        roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions());
+
+        startJMXServer();
+    }
+
+    /**
+     * We calcify the help file as last seen as a "trigger" to notify a developer that, upon addition of a new flag or
+     * functionality to this tool option, they will need to update help output and/or documentation as necessary.
+     */
+    @Test
+    public void testMaybeChangeDocs()
+    {
+        // If you added, modified options or help, please update docs if necessary
+        ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("help", "verify");
+        tool.assertOnCleanExit();
+
+        String help =
+        "NAME\n" +
+        "        nodetool verify - Verify (check data checksum for) one or more tables\n" +
+        "\n" +
+        "SYNOPSIS\n" +
+        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
+        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
+        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
+        "                [(-u <username> | --username <username>)] verify\n" +
+        "                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]\n" +
+        "                [(-f | --force)] [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)]\n" +
+        "                [--] [<keyspace> <tables>...]\n" +
+        "\n" +
+
+        "OPTIONS\n" +
+        "        -c, --check-version\n" +
+        "            Also check that all sstables are the latest version\n" +
+        "\n" +
+        "        -d, --dfp\n" +
+        "            Invoke the disk failure policy if a corrupt sstable is found\n" +
+        "\n" +
+        "        -e, --extended-verify\n" +
+        "            Verify each cell data, beyond simply checking sstable checksums\n" +
+        "\n" +
+        "        -f, --force\n" +
+        "            Override disabling of verify tool - see CASSANDRA-9947 for caveats\n" +
+        "\n" +
+        "        -h <host>, --host <host>\n" +
+        "            Node hostname or ip address\n" +
+        "\n" +
+        "        -p <port>, --port <port>\n" +
+        "            Remote jmx agent port number\n" +
+        "\n" +
+        "        -pp, --print-port\n" +
+        "            Operate in 4.0 mode with hosts disambiguated by port number\n" +
+        "\n" +
+        "        -pw <password>, --password <password>\n" +
+        "            Remote jmx agent password\n" +
+        "\n" +
+        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" +
+        "            Path to the JMX password file\n" +
+        "\n" +
+        "        -q, --quick\n" +
+        "            Do a quick check - avoid reading all data to verify checksums\n" +
+        "\n" +
+        "        -r, --rsc\n" +
+        "            Mutate the repair status on corrupt sstables\n" +
+        "\n" +
+        "        -t, --check-tokens\n" +
+        "            Verify that all tokens in sstables are owned by this node\n" +
+        "\n" +
+        "        -u <username>, --username <username>\n" +
+        "            Remote jmx agent username\n" +
+        "\n" +
+        "        --\n" +
+        "            This option can be used to separate command-line options from the\n" +
+        "            list of argument, (useful when arguments might be mistaken for\n" +
+        "            command-line options\n" +
+        "\n" +
+        "        [<keyspace> <tables>...]\n" +
+        "            The keyspace followed by one or many tables\n" +
+        "\n\n";
+
+        assertThat(tool.getStdout()).isEqualTo(help);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java b/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java
index 9782b5b..99a6929 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java
@@ -111,4 +111,4 @@
         }
         assertEquals(String.format("a\tbb\tccc%n"), baos.toString());
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/stats/NodetoolTableStatsTest.java b/test/unit/org/apache/cassandra/tools/nodetool/stats/NodetoolTableStatsTest.java
deleted file mode 100644
index ce3ee22..0000000
--- a/test/unit/org/apache/cassandra/tools/nodetool/stats/NodetoolTableStatsTest.java
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.tools.nodetool.stats;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang3.StringUtils;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.tools.ToolRunner;
-import org.apache.cassandra.tools.ToolRunner.ToolResult;
-import org.assertj.core.api.Assertions;
-import org.hamcrest.CoreMatchers;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import org.yaml.snakeyaml.Yaml;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatCode;
-
-public class NodetoolTableStatsTest extends CQLTester
-{
-    @BeforeClass
-    public static void setup() throws Exception
-    {
-        StorageService.instance.initServer();
-        startJMXServer();
-    }
-
-    @Test
-    public void testMaybeChangeDocs()
-    {
-        // If you added, modified options or help, please update docs if necessary
-        ToolResult tool = ToolRunner.invokeNodetool("help", "tablestats");
-        String help =   "NAME\n" +
-                        "        nodetool tablestats - Print statistics on tables\n" + 
-                        "\n" + 
-                        "SYNOPSIS\n" + 
-                        "        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]\n" +
-                        "                [(-pp | --print-port)] [(-pw <password> | --password <password>)]\n" +
-                        "                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]\n" +
-                        "                [(-u <username> | --username <username>)] tablestats\n" +
-                        "                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]\n" +
-                        "                [(-l | --sstable-location-check)] [(-s <sort_key> | --sort <sort_key>)]\n" +
-                        "                [(-t <top> | --top <top>)] [--] [<keyspace.table>...]\n" +
-                        "\n" + 
-                        "OPTIONS\n" +
-                        "        -F <format>, --format <format>\n" +
-                        "            Output format (json, yaml)\n" + 
-                        "\n" + 
-                        "        -h <host>, --host <host>\n" + 
-                        "            Node hostname or ip address\n" + 
-                        "\n" + 
-                        "        -H, --human-readable\n" + 
-                        "            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB\n" + 
-                        "\n" + 
-                        "        -i\n" + 
-                        "            Ignore the list of tables and display the remaining tables\n" +
-                        "\n" +
-                        "        -l, --sstable-location-check\n" +
-                        "            Check whether or not the SSTables are in the correct location.\n" +
-                        "\n" + 
-                        "        -p <port>, --port <port>\n" + 
-                        "            Remote jmx agent port number\n" + 
-                        "\n" + 
-                        "        -pp, --print-port\n" + 
-                        "            Operate in 4.0 mode with hosts disambiguated by port number\n" + 
-                        "\n" + 
-                        "        -pw <password>, --password <password>\n" + 
-                        "            Remote jmx agent password\n" + 
-                        "\n" + 
-                        "        -pwf <passwordFilePath>, --password-file <passwordFilePath>\n" + 
-                        "            Path to the JMX password file\n" + 
-                        "\n" + 
-                        "        -s <sort_key>, --sort <sort_key>\n" + 
-                        "            Sort tables by specified sort key\n" + 
-                        "            (average_live_cells_per_slice_last_five_minutes,\n" + 
-                        "            average_tombstones_per_slice_last_five_minutes,\n" + 
-                        "            bloom_filter_false_positives, bloom_filter_false_ratio,\n" + 
-                        "            bloom_filter_off_heap_memory_used, bloom_filter_space_used,\n" + 
-                        "            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,\n" + 
-                        "            compacted_partition_minimum_bytes,\n" + 
-                        "            compression_metadata_off_heap_memory_used, dropped_mutations,\n" + 
-                        "            full_name, index_summary_off_heap_memory_used, local_read_count,\n" + 
-                        "            local_read_latency_ms, local_write_latency_ms,\n" + 
-                        "            maximum_live_cells_per_slice_last_five_minutes,\n" + 
-                        "            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,\n" + 
-                        "            memtable_data_size, memtable_off_heap_memory_used,\n" + 
-                        "            memtable_switch_count, number_of_partitions_estimate,\n" + 
-                        "            off_heap_memory_used_total, pending_flushes, percent_repaired,\n" + 
-                        "            read_latency, reads, space_used_by_snapshots_total, space_used_live,\n" + 
-                        "            space_used_total, sstable_compression_ratio, sstable_count,\n" + 
-                        "            table_name, write_latency, writes)\n" + 
-                        "\n" + 
-                        "        -t <top>, --top <top>\n" + 
-                        "            Show only the top K tables for the sort key (specify the number K of\n" + 
-                        "            tables to be shown\n" + 
-                        "\n" + 
-                        "        -u <username>, --username <username>\n" + 
-                        "            Remote jmx agent username\n" + 
-                        "\n" + 
-                        "        --\n" + 
-                        "            This option can be used to separate command-line options from the\n" + 
-                        "            list of argument, (useful when arguments might be mistaken for\n" + 
-                        "            command-line options\n" + 
-                        "\n" + 
-                        "        [<keyspace.table>...]\n" + 
-                        "            List of tables (or keyspace) names\n" + 
-                        "\n" + 
-                        "\n";
-        Assertions.assertThat(tool.getStdout()).isEqualTo(help);
-        tool.assertOnCleanExit();
-    }
-
-    @Test
-    public void testTableStats()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("tablestats");
-
-        assertThat(tool.getStdout(), CoreMatchers.containsString("Keyspace : system_schema"));
-        assertTrue(StringUtils.countMatches(tool.getStdout(), "Table:") > 1);
-        tool.assertOnCleanExit();
-
-        tool = ToolRunner.invokeNodetool("tablestats", "system_distributed");
-        assertThat(tool.getStdout(), CoreMatchers.containsString("Keyspace : system_distributed"));
-        assertThat(tool.getStdout(), CoreMatchers.not(CoreMatchers.containsString("Keyspace : system_schema")));
-        assertTrue(StringUtils.countMatches(tool.getStdout(), "Table:") > 1);
-        tool.assertOnCleanExit();
-    }
-
-    @Test
-    public void testTableIgnoreArg()
-    {
-        ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-i", "system_schema.aggregates");
-
-        assertThat(tool.getStdout(), CoreMatchers.containsString("Keyspace : system_schema"));
-        assertThat(tool.getStdout(), CoreMatchers.not(CoreMatchers.containsString("Table: system_schema.aggregates")));
-        assertTrue(StringUtils.countMatches(tool.getStdout(), "Table:") > 1);
-        tool.assertOnCleanExit();
-    }
-
-    @Test
-    public void testHumanReadableArg()
-    {
-        Arrays.asList("-H", "--human-readable").forEach(arg -> {
-            ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg);
-            assertThat(argFormat(arg), tool.getStdout(), CoreMatchers.containsString(" KiB"));
-            assertTrue(String.format("Expected empty stderr for option [%s] but found: %s",
-                                     arg,
-                                     tool.getCleanedStderr()),
-                       tool.getCleanedStderr().isEmpty());
-            assertEquals(String.format("Expected exit code 0 for option [%s] but found: %s", arg, tool.getExitCode()),
-                         0,
-                         tool.getExitCode());
-        });
-    }
-
-    @Test
-    public void testSortArg()
-    {
-        Pattern regExp = Pattern.compile("((?m)Table: .*$)");
-
-        Arrays.asList("-s", "--sort").forEach(arg -> {
-            ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "table_name");
-            Matcher m = regExp.matcher(tool.getStdout());
-            ArrayList<String> orig = new ArrayList<>();
-            while (m.find())
-                orig.add(m.group(1));
-
-            tool = ToolRunner.invokeNodetool("tablestats", arg, "sstable_count");
-            m = regExp.matcher(tool.getStdout());
-            ArrayList<String> sorted = new ArrayList<>();
-            while (m.find())
-                sorted.add(m.group(1));
-
-            assertNotEquals(argFormat(arg), orig, sorted);
-            Collections.sort(orig);
-            Collections.sort(sorted);
-            assertEquals(argFormat(arg), orig, sorted);
-            assertTrue(argFormat(arg), tool.getCleanedStderr().isEmpty());
-            assertEquals(0, tool.getExitCode());
-        });
-
-        ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-s", "wrongSort");
-        assertThat(tool.getStdout(), CoreMatchers.containsString("argument for sort must be one of"));
-        tool.assertCleanStdErr();
-        assertEquals(1, tool.getExitCode());
-    }
-
-    @Test
-    public void testTopArg()
-    {
-        Arrays.asList("-t", "--top").forEach(arg -> {
-            ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-s", "table_name", arg, "1");
-            assertEquals(argFormat(arg), StringUtils.countMatches(tool.getStdout(), "Table:"), 1);
-            assertTrue(argFormat(arg), tool.getCleanedStderr().isEmpty());
-            assertEquals(argFormat(arg), 0, tool.getExitCode());
-        });
-
-        ToolResult tool = ToolRunner.invokeNodetool("tablestats", "-s", "table_name", "-t", "-1");
-        assertThat(tool.getStdout(), CoreMatchers.containsString("argument for top must be a positive integer"));
-        tool.assertCleanStdErr();
-        assertEquals(1, tool.getExitCode());
-    }
-
-    @Test
-    public void testSSTableLocationCheckArg()
-    {
-        Arrays.asList("-l", "--sstable-location-check").forEach(arg -> {
-            ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "system.local");
-            assertEquals(argFormat(arg), StringUtils.countMatches(tool.getStdout(), "SSTables in correct location: "), 1);
-            assertTrue(argFormat(arg), tool.getCleanedStderr().isEmpty());
-            assertEquals(argFormat(arg), 0, tool.getExitCode());
-        });
-
-        ToolResult tool = ToolRunner.invokeNodetool("tablestats", "system.local");
-        assertThat(tool.getStdout(), CoreMatchers.not(CoreMatchers.containsString("SSTables in correct location: ")));
-        tool.assertCleanStdErr();
-        assertEquals(0, tool.getExitCode());
-    }
-
-    private String argFormat(String arg)
-    {
-        return "Arg: [" + arg + ']';
-    }
-
-    @Test
-    public void testFormatJson()
-    {
-        Arrays.asList("-F", "--format").forEach(arg -> {
-            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "json");
-            tool.assertOnCleanExit();
-            String json = tool.getStdout();
-            assertThatCode(() -> new ObjectMapper().readTree(json)).doesNotThrowAnyException();
-            assertThat(json).containsPattern("\"sstable_count\"\\s*:\\s*[0-9]+")
-                            .containsPattern("\"old_sstable_count\"\\s*:\\s*[0-9]+");
-        });
-    }
-
-    @Test
-    public void testFormatYaml()
-    {
-        Arrays.asList("-F", "--format").forEach(arg -> {
-            ToolRunner.ToolResult tool = ToolRunner.invokeNodetool("tablestats", arg, "yaml");
-            tool.assertOnCleanExit();
-            String yaml = tool.getStdout();
-            assertThatCode(() -> new Yaml().load(yaml)).doesNotThrowAnyException();
-            assertThat(yaml).containsPattern("sstable_count:\\s*[0-9]+")
-                            .containsPattern("old_sstable_count:\\s*[0-9]+");
-        });
-    }
-}
diff --git a/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java b/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java
index 025f812..81687ba 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java
@@ -397,6 +397,7 @@
                                  "    \"tables\" : {\n" +
                                  "      \"table6\" : {\n" +
                                  "        \"average_tombstones_per_slice_last_five_minutes\" : 6.0,\n" +
+                                 "        \"top_tombstone_partitions\" : null,\n" +
                                  "        \"bloom_filter_off_heap_memory_used\" : \"667408\",\n" +
                                  "        \"bytes_pending_repair\" : 0,\n" +
                                  "        \"memtable_switch_count\" : 6,\n" +
@@ -412,6 +413,7 @@
                                  "        \"local_read_count\" : 5,\n" +
                                  "        \"sstable_compression_ratio\" : 0.68,\n" +
                                  "        \"dropped_mutations\" : \"666666\",\n" +
+                                 "        \"top_size_partitions\" : null,\n" +
                                  "        \"bloom_filter_false_positives\" : 400,\n" +
                                  "        \"off_heap_memory_used_total\" : \"162470810\",\n" +
                                  "        \"memtable_off_heap_memory_used\" : \"161803398\",\n" +
@@ -419,6 +421,7 @@
                                  "        \"bloom_filter_space_used\" : \"101112\",\n" +
                                  "        \"sstables_in_each_level\" : [ ],\n" +
                                  "        \"compacted_partition_maximum_bytes\" : 20,\n" +
+                                 "        \"sstable_bytes_in_each_level\" : [ ],\n" +
                                  "        \"space_used_total\" : \"0\",\n" +
                                  "        \"local_write_count\" : 0,\n" +
                                  "        \"droppable_tombstone_ratio\" : \"0.66667\",\n" +
@@ -460,6 +463,7 @@
                                  "  tables:\n" +
                                  "    table6:\n" +
                                  "      average_tombstones_per_slice_last_five_minutes: 6.0\n" +
+                                 "      top_tombstone_partitions: null\n" +
                                  "      bloom_filter_off_heap_memory_used: '667408'\n" +
                                  "      bytes_pending_repair: 0\n" +
                                  "      memtable_switch_count: 6\n" +
@@ -475,6 +479,7 @@
                                  "      local_read_count: 5\n" +
                                  "      sstable_compression_ratio: 0.68\n" +
                                  "      dropped_mutations: '666666'\n" +
+                                 "      top_size_partitions: null\n" +
                                  "      bloom_filter_false_positives: 400\n" +
                                  "      off_heap_memory_used_total: '162470810'\n" +
                                  "      memtable_off_heap_memory_used: '161803398'\n" +
@@ -482,6 +487,7 @@
                                  "      bloom_filter_space_used: '101112'\n" +
                                  "      sstables_in_each_level: []\n" +
                                  "      compacted_partition_maximum_bytes: 20\n" +
+                                 "      sstable_bytes_in_each_level: []\n" +
                                  "      space_used_total: '0'\n" +
                                  "      local_write_count: 0\n" +
                                  "      droppable_tombstone_ratio: '0.66667'\n" +
diff --git a/test/unit/org/apache/cassandra/tracing/TracingTest.java b/test/unit/org/apache/cassandra/tracing/TracingTest.java
index 61e08b0..0f181d7 100644
--- a/test/unit/org/apache/cassandra/tracing/TracingTest.java
+++ b/test/unit/org/apache/cassandra/tracing/TracingTest.java
@@ -25,13 +25,13 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.commons.lang3.StringUtils;
 
@@ -88,7 +88,7 @@
     {
         List<String> traces = new ArrayList<>();
         Tracing tracing = new TracingImpl(traces);
-        UUID uuid = tracing.newSession(Tracing.TraceType.NONE);
+        TimeUUID uuid = tracing.newSession(Tracing.TraceType.NONE);
         tracing.begin("test-request", Collections.<String,String>emptyMap());
         tracing.get(uuid).trace("test-1");
         tracing.get(uuid).trace("test-2");
@@ -189,7 +189,7 @@
             return get();
         }
 
-        protected UUID newSession(UUID sessionId, TraceType traceType, Map<String,ByteBuffer> customPayload)
+        protected TimeUUID newSession(TimeUUID sessionId, TraceType traceType, Map<String,ByteBuffer> customPayload)
         {
             if (!customPayload.isEmpty())
                 logger.info("adding custom payload items {}", StringUtils.join(customPayload.keySet(), ','));
@@ -198,7 +198,7 @@
             return super.newSession(sessionId, traceType, customPayload);
         }
 
-        protected TraceState newTraceState(InetAddressAndPort ia, UUID uuid, Tracing.TraceType tt)
+        protected TraceState newTraceState(InetAddressAndPort ia, TimeUUID uuid, Tracing.TraceType tt)
         {
             return new TraceState(ia, uuid, tt)
             {
diff --git a/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java b/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java
index fef25fb..b538417 100644
--- a/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java
+++ b/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java
@@ -30,6 +30,7 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.*;
 
+import org.apache.cassandra.transport.ClientResourceLimits.Overload;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -54,11 +55,15 @@
 import org.apache.cassandra.transport.CQLMessageHandler.MessageConsumer;
 import org.apache.cassandra.transport.messages.*;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.concurrent.SimpleCondition;
+import org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter;
+import org.apache.cassandra.utils.concurrent.Condition;
 
 import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED;
+import static org.apache.cassandra.io.util.FileUtils.ONE_MIB;
 import static org.apache.cassandra.net.FramingTest.randomishBytes;
 import static org.apache.cassandra.transport.Flusher.MAX_FRAMED_PAYLOAD_SIZE;
+import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition;
+import static org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter.NO_OP_LIMITER;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -101,6 +106,8 @@
         alloc = GlobalBufferPoolAllocator.instance;
         // set connection-local queue size to 0 so that all capacity is allocated from reserves
         DatabaseDescriptor.setNativeTransportReceiveQueueCapacityInBytes(0);
+        // set transport to max frame size possible
+        DatabaseDescriptor.setNativeTransportMaxFrameSize(256 * (int) ONE_MIB);
     }
 
     @Test
@@ -630,7 +637,7 @@
             this.frameEncoder = frameEncoder;
         }
 
-        public void accept(Channel channel, Message.Request message, Dispatcher.FlushItemConverter toFlushItem)
+        public void accept(Channel channel, Message.Request message, Dispatcher.FlushItemConverter toFlushItem, Overload backpressure)
         {
             if (flusher == null)
                 flusher = new SimpleClient.SimpleFlusher(frameEncoder);
@@ -652,7 +659,7 @@
 
     static class ServerConfigurator extends PipelineConfigurator
     {
-        private final SimpleCondition pipelineReady = new SimpleCondition();
+        private final Condition pipelineReady = newOneTimeCondition();
         private final MessageConsumer<Message.Request> consumer;
         private final AllocationObserver allocationObserver;
         private final Message.Decoder<Message.Request> decoder;
@@ -763,6 +770,12 @@
                     return delegate.endpointWaitQueue();
                 }
 
+                @Override
+                public NonBlockingRateLimiter requestRateLimiter()
+                {
+                    return NO_OP_LIMITER;
+                }
+                
                 public void release()
                 {
                     delegate.release();
diff --git a/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java b/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java
index 7f3c9cb..120de2a 100644
--- a/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java
+++ b/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java
@@ -26,7 +26,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
@@ -39,17 +38,18 @@
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.exceptions.AuthenticationException;
+import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.audit.AuditEvent;
 import org.apache.cassandra.audit.AuditLogEntryType;
 import org.apache.cassandra.audit.AuditLogManager;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.OverrideConfigurationLoader;
 import org.apache.cassandra.config.ParameterizedClass;
-import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.diag.DiagnosticEventService;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.service.EmbeddedCassandraService;
 
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -57,7 +57,7 @@
 public class CQLUserAuditTest
 {
     private static EmbeddedCassandraService embedded;
-    private static final BlockingQueue<AuditEvent> auditEvents = new LinkedBlockingQueue<>();
+    private static final BlockingQueue<AuditEvent> auditEvents = newBlockingQueue();
 
     @BeforeClass
     public static void setup() throws Exception
@@ -69,11 +69,10 @@
             config.audit_logging_options.enabled = true;
             config.audit_logging_options.logger = new ParameterizedClass("DiagnosticEventAuditLogger", null);
         });
-        CQLTester.prepareServer();
 
         System.setProperty("cassandra.superuser_setup_delay_ms", "0");
-        embedded = new EmbeddedCassandraService();
-        embedded.start();
+
+        embedded = ServerTestUtils.startEmbeddedCassandraService();
 
         executeAs(Arrays.asList("CREATE ROLE testuser WITH LOGIN = true AND SUPERUSER = false AND PASSWORD = 'foo'",
                                 "CREATE ROLE testuser_nologin WITH LOGIN = false AND SUPERUSER = false AND PASSWORD = 'foo'",
@@ -230,9 +229,9 @@
         AuditEvent event = auditEvents.poll(100, TimeUnit.MILLISECONDS);
         assertEquals(expectedAuthType, event.getType());
         assertTrue(!authFailed || event.getType() == AuditLogEntryType.LOGIN_ERROR);
-        assertEquals(InetAddressAndPort.getLoopbackAddress().address,
-                     event.getEntry().getSource().address);
-        assertTrue(event.getEntry().getSource().port > 0);
+        assertEquals(InetAddressAndPort.getLoopbackAddress().getAddress(),
+                     event.getEntry().getSource().getAddress());
+        assertTrue(event.getEntry().getSource().getPort() > 0);
         if (event.getType() != AuditLogEntryType.LOGIN_ERROR)
             assertEquals(username, event.toMap().get("user"));
 
@@ -250,4 +249,4 @@
         auditEvents.drainTo(ret);
         return ret;
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/transport/ClientNotificiationsTest.java b/test/unit/org/apache/cassandra/transport/ClientNotificiationsTest.java
index bd1ec63..9f95784 100644
--- a/test/unit/org/apache/cassandra/transport/ClientNotificiationsTest.java
+++ b/test/unit/org/apache/cassandra/transport/ClientNotificiationsTest.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.transport;
 
 import java.util.Collections;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.Before;
@@ -29,6 +28,8 @@
 
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.transport.messages.RegisterMessage;
 import org.apache.cassandra.utils.FBUtilities;
@@ -43,7 +44,7 @@
     @Before
     public void setup()
     {
-        requireNetwork(builder -> builder.withEventNotifier(notifier));
+        requireNetwork(builder -> builder.withEventNotifier(notifier), builder -> {});
     }
 
     @Parameterized.Parameter(0)
@@ -74,6 +75,7 @@
 
             InetAddressAndPort broadcastAddress = FBUtilities.getBroadcastAddressAndPort();
             InetAddressAndPort nativeAddress = FBUtilities.getBroadcastNativeAddressAndPort();
+            KeyspaceMetadata ks = KeyspaceMetadata.create("ks", KeyspaceParams.simple(1));
 
             // Necessary or else the NEW_NODE notification is deferred (CASSANDRA-11038)
             // (note: this works because the notifications are for the local address)
@@ -84,9 +86,9 @@
             notifier.onJoinCluster(broadcastAddress);
             notifier.onMove(broadcastAddress);
             notifier.onLeaveCluster(broadcastAddress);
-            notifier.onCreateKeyspace("ks");
-            notifier.onAlterKeyspace("ks");
-            notifier.onDropKeyspace("ks");
+            notifier.onCreateKeyspace(ks);
+            notifier.onAlterKeyspace(ks, ks);
+            notifier.onDropKeyspace(ks, true);
 
             handler.assertNextEvent(Event.StatusChange.nodeUp(nativeAddress));
             handler.assertNextEvent(Event.StatusChange.nodeDown(nativeAddress));
diff --git a/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java b/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java
index 9cc900a..8e94997 100644
--- a/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java
+++ b/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java
@@ -19,7 +19,6 @@
 package org.apache.cassandra.transport;
 
 import java.io.IOException;
-import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.TimeUnit;
@@ -29,9 +28,9 @@
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.primitives.Ints;
+import org.apache.cassandra.service.StorageService;
 import org.junit.*;
 
-import com.codahale.metrics.Gauge;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.QueryOptions;
@@ -40,13 +39,11 @@
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.virtual.*;
 import org.apache.cassandra.exceptions.OverloadedException;
-import org.apache.cassandra.metrics.CassandraMetricsRegistry;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.transport.messages.QueryMessage;
 import org.apache.cassandra.utils.FBUtilities;
 import org.awaitility.Awaitility;
 
-import static org.apache.cassandra.Util.spinAssertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
@@ -55,36 +52,34 @@
 
 public class ClientResourceLimitsTest extends CQLTester
 {
-
     private static final long LOW_LIMIT = 600L;
     private static final long HIGH_LIMIT = 5000000000L;
 
-    private static final QueryOptions V5_DEFAULT_OPTIONS = QueryOptions.create(
-        QueryOptions.DEFAULT.getConsistency(),
-        QueryOptions.DEFAULT.getValues(),
-        QueryOptions.DEFAULT.skipMetadata(),
-        QueryOptions.DEFAULT.getPageSize(),
-        QueryOptions.DEFAULT.getPagingState(),
-        QueryOptions.DEFAULT.getSerialConsistency(),
-        ProtocolVersion.V5,
-        KEYSPACE);
+    private static final QueryOptions V5_DEFAULT_OPTIONS = 
+        QueryOptions.create(QueryOptions.DEFAULT.getConsistency(),
+                            QueryOptions.DEFAULT.getValues(),
+                            QueryOptions.DEFAULT.skipMetadata(),
+                            QueryOptions.DEFAULT.getPageSize(),
+                            QueryOptions.DEFAULT.getPagingState(),
+                            QueryOptions.DEFAULT.getSerialConsistency(),
+                            ProtocolVersion.V5,
+                            KEYSPACE);
 
     @BeforeClass
     public static void setUp()
     {
         DatabaseDescriptor.setNativeTransportReceiveQueueCapacityInBytes(1);
-        DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytesPerIp(LOW_LIMIT);
-        DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytes(LOW_LIMIT);
+        DatabaseDescriptor.setNativeTransportMaxRequestDataInFlightPerIpInBytes(LOW_LIMIT);
+        DatabaseDescriptor.setNativeTransportConcurrentRequestDataInFlightInBytes(LOW_LIMIT);
 
-        // The driver control connections would send queries that might interfere with the tests.
-        requireNetworkWithoutDriver();
+        requireNetwork();
     }
 
     @AfterClass
     public static void tearDown()
     {
-        DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytesPerIp(3000000000L);
-        DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytes(HIGH_LIMIT);
+        DatabaseDescriptor.setNativeTransportMaxRequestDataInFlightPerIpInBytes(3000000000L);
+        DatabaseDescriptor.setNativeTransportConcurrentRequestDataInFlightInBytes(HIGH_LIMIT);
     }
 
     @Before
@@ -124,7 +119,7 @@
         }
     }
 
-    @SuppressWarnings("resource")
+    @SuppressWarnings({"resource", "SameParameterValue"})
     private SimpleClient client(boolean throwOnOverload, int largeMessageThreshold)
     {
         try
@@ -184,27 +179,34 @@
                          (provider) -> provider.endpointWaitQueue().signal());
     }
 
-    private void backPressureTest(Runnable limitLifter, Consumer<ClientResourceLimits.ResourceProvider> signaller)
-    throws Throwable
+    private void backPressureTest(Runnable limitLifter, Consumer<ClientResourceLimits.ResourceProvider> signaller) throws Throwable
     {
         final AtomicReference<Exception> error = new AtomicReference<>();
         final CountDownLatch started = new CountDownLatch(1);
         final CountDownLatch complete = new CountDownLatch(1);
-        try(SimpleClient client = client(false))
+        
+        try (SimpleClient client = client(false))
         {
-            QueryMessage queryMessage = new QueryMessage("CREATE TABLE atable (pk int PRIMARY KEY, v text)",
-                                                         V5_DEFAULT_OPTIONS);
-            client.execute(queryMessage);
+            // The first query does not trigger backpressure/pause the connection:
+            QueryMessage queryMessage = 
+                    new QueryMessage("CREATE TABLE atable (pk int PRIMARY KEY, v text)", V5_DEFAULT_OPTIONS);
+            Message.Response belowThresholdResponse = client.execute(queryMessage);
+            assertEquals(0, getPausedConnectionsGauge().getValue().intValue());
+            assertNoWarningContains(belowThresholdResponse, "bytes in flight");
+            
+            // A second query triggers backpressure but is allowed to complete...
+            Message.Response aboveThresholdResponse = client.execute(queryMessage());
+            assertEquals(1, getPausedConnectionsGauge().getValue().intValue());
+            assertWarningsContain(aboveThresholdResponse, "bytes in flight");
 
-            // There should be no paused client connections yet
-            Gauge<Integer> pausedConnections = getPausedConnectionsGauge();
-            int before = pausedConnections.getValue();
-
+            // ...and a third request is paused.
+            final AtomicReference<Message.Response> response = new AtomicReference<>();
+            
             Thread t = new Thread(() -> {
                 try
                 {
                     started.countDown();
-                    client.execute(queryMessage());
+                    response.set(client.execute(queryMessage()));
                     complete.countDown();
                 }
                 catch (Exception e)
@@ -216,19 +218,13 @@
             });
             t.start();
 
-            // When the client attempts to execute the second query, the backpressure
-            // mechanism should report the client connection is paused
-            assertTrue(started.await(5, TimeUnit.SECONDS));
-            spinAssertEquals("Timed out after waiting 5 seconds for paused " +
-                             "connections metric to increment due to backpressure",
-                             before + 1, pausedConnections::getValue, 5, TimeUnit.SECONDS);
-
             // verify the request hasn't completed
             assertFalse(complete.await(1, TimeUnit.SECONDS));
 
             // backpressure has been applied, if we increase the limits of the exhausted reserve and signal
             // the appropriate WaitQueue, it should be released and the client request will complete
             limitLifter.run();
+            
             // We need a ResourceProvider to get access to the WaitQueue
             ClientResourceLimits.Allocator allocator = ClientResourceLimits.getAllocatorForEndpoint(FBUtilities.getJustLocalAddress());
             ClientResourceLimits.ResourceProvider queueHandle = new ClientResourceLimits.ResourceProvider.Default(allocator);
@@ -237,8 +233,10 @@
             // SimpleClient has a 10 second timeout, so if we have to wait
             // longer than that assume that we're not going to receive a
             // reply. If all's well, the completion should happen immediately
-            assertTrue(complete.await(11, TimeUnit.SECONDS));
+            assertTrue(complete.await(SimpleClient.TIMEOUT_SECONDS + 1, TimeUnit.SECONDS));
             assertNull(error.get());
+            assertEquals(0, getPausedConnectionsGauge().getValue().intValue());
+            assertNoWarningContains(response.get(), "bytes in flight");
         }
     }
 
@@ -304,17 +302,6 @@
         return new QueryMessage(query.toString(), V5_DEFAULT_OPTIONS);
     }
 
-    @SuppressWarnings({"rawtypes", "unchecked"})
-    private Gauge<Integer> getPausedConnectionsGauge()
-    {
-        String metricName = "org.apache.cassandra.metrics.Client.PausedConnections";
-        Map<String, Gauge> metrics = CassandraMetricsRegistry.Metrics.getGauges((name, metric) -> name.equals(metricName));
-        if (metrics.size() != 1)
-            fail(String.format("Expected a single registered metric for paused client connections, found %s",
-                               metrics.size()));
-        return metrics.get(metricName);
-    }
-
     @Test
     public void testQueryUpdatesConcurrentMetricsUpdate() throws Throwable
     {
@@ -395,7 +382,7 @@
             // change global limit, query will still fail because endpoint limit
             ClientResourceLimits.setGlobalLimit(HIGH_LIMIT);
             Assert.assertEquals("new global limit not returned by EndpointPayloadTrackers", HIGH_LIMIT, ClientResourceLimits.getGlobalLimit());
-            Assert.assertEquals("new global limit not returned by DatabaseDescriptor", HIGH_LIMIT, DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytes());
+            Assert.assertEquals("new global limit not returned by DatabaseDescriptor", HIGH_LIMIT, DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightInBytes());
 
             try
             {
@@ -410,7 +397,7 @@
             // change endpoint limit, query will now succeed
             ClientResourceLimits.setEndpointLimit(HIGH_LIMIT);
             Assert.assertEquals("new endpoint limit not returned by EndpointPayloadTrackers", HIGH_LIMIT, ClientResourceLimits.getEndpointLimit());
-            Assert.assertEquals("new endpoint limit not returned by DatabaseDescriptor", HIGH_LIMIT, DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp());
+            Assert.assertEquals("new endpoint limit not returned by DatabaseDescriptor", HIGH_LIMIT, DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes());
             client.execute(queryMessage());
 
             // ensure new clients also see the new raised limits
@@ -421,7 +408,7 @@
             // lower the global limit and ensure the query fails again
             ClientResourceLimits.setGlobalLimit(LOW_LIMIT);
             Assert.assertEquals("new global limit not returned by EndpointPayloadTrackers", LOW_LIMIT, ClientResourceLimits.getGlobalLimit());
-            Assert.assertEquals("new global limit not returned by DatabaseDescriptor", LOW_LIMIT, DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytes());
+            Assert.assertEquals("new global limit not returned by DatabaseDescriptor", LOW_LIMIT, DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightInBytes());
             try
             {
                 client.execute(queryMessage());
@@ -435,7 +422,7 @@
             // lower the endpoint limit and ensure existing clients also have requests that fail
             ClientResourceLimits.setEndpointLimit(60);
             Assert.assertEquals("new endpoint limit not returned by EndpointPayloadTrackers", 60, ClientResourceLimits.getEndpointLimit());
-            Assert.assertEquals("new endpoint limit not returned by DatabaseDescriptor", 60, DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp());
+            Assert.assertEquals("new endpoint limit not returned by DatabaseDescriptor", 60, DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes());
             try
             {
                 client.execute(smallMessage);
@@ -462,11 +449,30 @@
             // put the test state back
             ClientResourceLimits.setEndpointLimit(LOW_LIMIT);
             Assert.assertEquals("new endpoint limit not returned by EndpointPayloadTrackers", LOW_LIMIT, ClientResourceLimits.getEndpointLimit());
-            Assert.assertEquals("new endpoint limit not returned by DatabaseDescriptor", LOW_LIMIT, DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp());
+            Assert.assertEquals("new endpoint limit not returned by DatabaseDescriptor", LOW_LIMIT, DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes());
         }
         finally
         {
             client.close();
         }
     }
+
+    @Test
+    public void shouldChangeRequestsPerSecondAtRuntime()
+    {
+        StorageService.instance.setNativeTransportMaxRequestsPerSecond(100);
+        assertEquals(100, ClientResourceLimits.getNativeTransportMaxRequestsPerSecond(), 0);
+        assertEquals(100, ClientResourceLimits.GLOBAL_REQUEST_LIMITER.getRate(), 0);
+        assertEquals(100, StorageService.instance.getNativeTransportMaxRequestsPerSecond());
+
+        StorageService.instance.setNativeTransportMaxRequestsPerSecond(1000);
+        assertEquals(1000, ClientResourceLimits.getNativeTransportMaxRequestsPerSecond(), 0);
+        assertEquals(1000, ClientResourceLimits.GLOBAL_REQUEST_LIMITER.getRate(), 0);
+        assertEquals(1000, StorageService.instance.getNativeTransportMaxRequestsPerSecond());
+
+        StorageService.instance.setNativeTransportMaxRequestsPerSecond(500);
+        assertEquals(500, ClientResourceLimits.getNativeTransportMaxRequestsPerSecond(), 0);
+        assertEquals(500, ClientResourceLimits.GLOBAL_REQUEST_LIMITER.getRate(), 0);
+        assertEquals(500, StorageService.instance.getNativeTransportMaxRequestsPerSecond());
+    }
 }
diff --git a/test/unit/org/apache/cassandra/transport/ErrorMessageTest.java b/test/unit/org/apache/cassandra/transport/ErrorMessageTest.java
index cfeddba..c0e3f5e 100644
--- a/test/unit/org/apache/cassandra/transport/ErrorMessageTest.java
+++ b/test/unit/org/apache/cassandra/transport/ErrorMessageTest.java
@@ -101,7 +101,7 @@
         int contentions = 1;
         int receivedBlockFor = 3;
         ConsistencyLevel consistencyLevel = ConsistencyLevel.SERIAL;
-        CasWriteTimeoutException ex = new CasWriteTimeoutException(WriteType.CAS, consistencyLevel, receivedBlockFor, receivedBlockFor, contentions);
+        CasWriteTimeoutException ex = new CasWriteTimeoutException(WriteType.CAS, consistencyLevel, 0, receivedBlockFor, contentions);
 
         ErrorMessage deserialized = encodeThenDecode(ErrorMessage.fromException(ex), ProtocolVersion.V5);
         assertTrue(deserialized.error instanceof CasWriteTimeoutException);
@@ -110,10 +110,10 @@
         assertEquals(WriteType.CAS, deserializedEx.writeType);
         assertEquals(contentions, deserializedEx.contentions);
         assertEquals(consistencyLevel, deserializedEx.consistency);
-        assertEquals(receivedBlockFor, deserializedEx.received);
+        assertEquals(0, deserializedEx.received);
         assertEquals(receivedBlockFor, deserializedEx.blockFor);
         assertEquals(ex.getMessage(), deserializedEx.getMessage());
-        assertTrue(deserializedEx.getMessage().contains("CAS operation timed out - encountered contentions"));
+        assertTrue(deserializedEx.getMessage().contains("CAS operation timed out: received 0 of 3 required responses after 1 contention retries"));
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/transport/RateLimitingTest.java b/test/unit/org/apache/cassandra/transport/RateLimitingTest.java
new file mode 100644
index 0000000..0b3fb34
--- /dev/null
+++ b/test/unit/org/apache/cassandra/transport/RateLimitingTest.java
@@ -0,0 +1,331 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.transport;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import com.codahale.metrics.Meter;
+import com.google.common.base.Ticker;
+import org.awaitility.Awaitility;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.exceptions.OverloadedException;
+import org.apache.cassandra.metrics.CassandraMetricsRegistry;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.transport.messages.QueryMessage;
+import org.apache.cassandra.utils.Throwables;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import static org.apache.cassandra.Util.spinAssertEquals;
+import static org.apache.cassandra.transport.ProtocolVersion.V4;
+
+@SuppressWarnings("UnstableApiUsage")
+@RunWith(Parameterized.class)
+public class RateLimitingTest extends CQLTester
+{
+    public static final String BACKPRESSURE_WARNING_SNIPPET = "Request breached global limit";
+    
+    private static final int LARGE_PAYLOAD_THRESHOLD_BYTES = 1000;
+    private static final int OVERLOAD_PERMITS_PER_SECOND = 1;
+
+    private static final long MAX_LONG_CONFIG_VALUE = Long.MAX_VALUE - 1;
+
+    @Parameterized.Parameter
+    public ProtocolVersion version;
+
+    @Parameterized.Parameters(name="{0}")
+    public static Collection<Object[]> versions()
+    {
+        return ProtocolVersion.SUPPORTED.stream()
+                                        .map(v -> new Object[]{v})
+                                        .collect(Collectors.toList());
+    }
+
+    private AtomicLong tick;
+    private Ticker ticker;
+
+    @BeforeClass
+    public static void setup()
+    {
+        // If we don't exceed the queue capacity, we won't actually use the global/endpoint 
+        // bytes-in-flight limits, and the assertions we make below around releasing them would be useless.
+        DatabaseDescriptor.setNativeTransportReceiveQueueCapacityInBytes(1);
+
+        requireNetwork();
+    }
+
+    @Before
+    public void resetLimits()
+    {
+        // Reset to the original start time in case a test advances the clock.
+        tick = new AtomicLong(ClientResourceLimits.GLOBAL_REQUEST_LIMITER.getStartedNanos());
+
+        ticker = new Ticker()
+        {
+            @Override
+            public long read()
+            {
+                return tick.get();
+            }
+        };
+
+        ClientResourceLimits.setGlobalLimit(MAX_LONG_CONFIG_VALUE);
+    }
+
+    @Test
+    public void shouldThrowOnOverloadSmallMessages() throws Exception
+    {
+        int payloadSize = LARGE_PAYLOAD_THRESHOLD_BYTES / 4;
+        testOverload(payloadSize, true);
+    }
+
+    @Test
+    public void shouldThrowOnOverloadLargeMessages() throws Exception
+    {
+        int payloadSize = LARGE_PAYLOAD_THRESHOLD_BYTES * 2;
+        testOverload(payloadSize, true);
+    }
+
+    @Test
+    public void shouldBackpressureSmallMessages() throws Exception
+    {
+        int payloadSize = LARGE_PAYLOAD_THRESHOLD_BYTES / 4;
+        testOverload(payloadSize, false);
+    }
+
+    @Test
+    public void shouldBackpressureLargeMessages() throws Exception
+    {
+        int payloadSize = LARGE_PAYLOAD_THRESHOLD_BYTES * 2;
+        testOverload(payloadSize, false);
+    }
+
+    @Test
+    public void shouldReleaseSmallMessageOnBytesInFlightOverload() throws Exception
+    {
+        testBytesInFlightOverload(LARGE_PAYLOAD_THRESHOLD_BYTES / 4);
+    }
+
+    @Test
+    public void shouldReleaseLargeMessageOnBytesInFlightOverload() throws Exception
+    {
+        testBytesInFlightOverload(LARGE_PAYLOAD_THRESHOLD_BYTES * 2);
+    }
+
+    private void testBytesInFlightOverload(int payloadSize) throws Exception
+    {
+        try (SimpleClient client = client().connect(false, true))
+        {
+            StorageService.instance.setNativeTransportRateLimitingEnabled(false);
+            QueryMessage queryMessage = new QueryMessage("CREATE TABLE IF NOT EXISTS " + KEYSPACE + ".atable (pk int PRIMARY KEY, v text)", queryOptions());
+            client.execute(queryMessage);
+
+            StorageService.instance.setNativeTransportRateLimitingEnabled(true);
+            ClientResourceLimits.GLOBAL_REQUEST_LIMITER.setRate(OVERLOAD_PERMITS_PER_SECOND, ticker);
+            ClientResourceLimits.setGlobalLimit(1);
+
+            try
+            {
+                // The first query takes the one available permit, but should fail on the bytes in flight limit.
+                client.execute(queryMessage(payloadSize));
+            }
+            catch (RuntimeException e)
+            {
+                assertTrue(Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException));
+            }
+        }
+        finally
+        {
+            // Sanity check bytes in flight limiter.
+            Awaitility.await().untilAsserted(() -> assertEquals(0, ClientResourceLimits.getCurrentGlobalUsage()));
+            StorageService.instance.setNativeTransportRateLimitingEnabled(false);
+        }
+    }
+
+    private void testOverload(int payloadSize, boolean throwOnOverload) throws Exception
+    {
+        try (SimpleClient client = client().connect(false, throwOnOverload))
+        {
+            StorageService.instance.setNativeTransportRateLimitingEnabled(false);
+            QueryMessage queryMessage = new QueryMessage("CREATE TABLE IF NOT EXISTS " + KEYSPACE + ".atable (pk int PRIMARY KEY, v text)", queryOptions());
+            client.execute(queryMessage);
+
+            StorageService.instance.setNativeTransportRateLimitingEnabled(true);
+            ClientResourceLimits.GLOBAL_REQUEST_LIMITER.setRate(OVERLOAD_PERMITS_PER_SECOND, ticker);
+
+            if (throwOnOverload)
+                testThrowOnOverload(payloadSize, client);
+            else
+            {
+                testBackpressureOnOverload(payloadSize, client);
+            }   
+        }
+        finally
+        {
+            // Sanity the check bytes in flight limiter.
+            Awaitility.await().untilAsserted(() -> assertEquals(0, ClientResourceLimits.getCurrentGlobalUsage()));
+            StorageService.instance.setNativeTransportRateLimitingEnabled(false);
+        }
+    }
+
+    private void testBackpressureOnOverload(int payloadSize, SimpleClient client) throws Exception
+    {
+        // The first query takes the one available permit.
+        Message.Response firstResponse = client.execute(queryMessage(payloadSize));
+        assertEquals(0, getPausedConnectionsGauge().getValue().intValue());
+        assertNoWarningContains(firstResponse, BACKPRESSURE_WARNING_SNIPPET);
+        
+        // The second query activates backpressure.
+        long overloadQueryStartTime = System.currentTimeMillis();
+        Message.Response response = client.execute(queryMessage(payloadSize));
+
+        // V3 does not support client warnings, but otherwise we should get one for this query.
+        if (version.isGreaterOrEqualTo(V4))
+            assertWarningsContain(response, BACKPRESSURE_WARNING_SNIPPET);
+
+        AtomicReference<Throwable> error = new AtomicReference<>();
+        CountDownLatch started = new CountDownLatch(1);
+        CountDownLatch complete = new CountDownLatch(1);
+        AtomicReference<Message.Response> pausedQueryResponse = new AtomicReference<>();
+        
+        Thread queryRunner = new Thread(() ->
+        {
+            try
+            {
+                started.countDown();
+                pausedQueryResponse.set(client.execute(queryMessage(payloadSize)));
+                complete.countDown();
+            }
+            catch (Throwable t)
+            {
+                error.set(t);
+            }
+        });
+
+        // Advance the rater limiter so that this query will see an available permit. This also
+        // means it should not produce a client warning, which we verify below.
+        // (Note that we advance 2 intervals for the 2 prior queries.)
+        tick.addAndGet(2 * ClientResourceLimits.GLOBAL_REQUEST_LIMITER.getIntervalNanos());
+        
+        queryRunner.start();
+
+        // ...and the request should complete without error.
+        assertTrue(complete.await(SimpleClient.TIMEOUT_SECONDS + 1, TimeUnit.SECONDS));
+        assertNull(error.get());
+        assertNoWarningContains(pausedQueryResponse.get(), BACKPRESSURE_WARNING_SNIPPET);
+
+        // At least the number of milliseconds in the permit interval should already have elapsed 
+        // since the start of the query that pauses the connection.
+        double permitIntervalMillis = (double) TimeUnit.SECONDS.toMillis(1L) / OVERLOAD_PERMITS_PER_SECOND;
+        long sinceQueryStarted = System.currentTimeMillis() - overloadQueryStartTime;
+        long remainingMillis = ((long) permitIntervalMillis) - sinceQueryStarted;
+        assertTrue("Query completed before connection unpause!", remainingMillis <= 0);
+        
+        spinAssertEquals("Timed out after waiting 5 seconds for paused connections metric to normalize.",
+                         0, () -> getPausedConnectionsGauge().getValue(), 5, TimeUnit.SECONDS);
+    }
+
+    private void testThrowOnOverload(int payloadSize, SimpleClient client)
+    {
+        // The first query takes the one available permit...
+        long dispatchedPrior = getRequestDispatchedMeter().getCount();
+        client.execute(queryMessage(payloadSize));
+        assertEquals(dispatchedPrior + 1, getRequestDispatchedMeter().getCount());
+        
+        try
+        {   
+            // ...and the second breaches the limit....
+            client.execute(queryMessage(payloadSize));
+        }
+        catch (RuntimeException e)
+        {
+            assertTrue(Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException));
+        }
+
+        // The last request attempt was rejected and therefore not dispatched.
+        assertEquals(dispatchedPrior + 1, getRequestDispatchedMeter().getCount());
+
+        // Advance the timeline and verify that we can take a permit again.
+        // (Note that we don't take one when we throw on overload.)
+        tick.addAndGet(ClientResourceLimits.GLOBAL_REQUEST_LIMITER.getIntervalNanos());
+        client.execute(queryMessage(payloadSize));
+
+        assertEquals(dispatchedPrior + 2, getRequestDispatchedMeter().getCount());
+    }
+
+    private QueryMessage queryMessage(int length)
+    {
+        StringBuilder query = new StringBuilder("INSERT INTO " + KEYSPACE + ".atable (pk, v) VALUES (1, '");
+        
+        for (int i = 0; i < length; i++)
+        {
+            query.append('a');
+        }
+        
+        query.append("')");
+        return new QueryMessage(query.toString(), queryOptions());
+    }
+
+    private SimpleClient client()
+    {
+        return SimpleClient.builder(nativeAddr.getHostAddress(), nativePort)
+                           .protocolVersion(version)
+                           .useBeta()
+                           .largeMessageThreshold(LARGE_PAYLOAD_THRESHOLD_BYTES)
+                           .build();
+    }
+
+    private QueryOptions queryOptions()
+    {
+        return QueryOptions.create(QueryOptions.DEFAULT.getConsistency(),
+                                   QueryOptions.DEFAULT.getValues(),
+                                   QueryOptions.DEFAULT.skipMetadata(),
+                                   QueryOptions.DEFAULT.getPageSize(),
+                                   QueryOptions.DEFAULT.getPagingState(),
+                                   QueryOptions.DEFAULT.getSerialConsistency(),
+                                   version,
+                                   KEYSPACE);
+    }
+
+    protected static Meter getRequestDispatchedMeter()
+    {
+        String metricName = "org.apache.cassandra.metrics.Client.RequestDispatched";
+        Map<String, Meter> metrics = CassandraMetricsRegistry.Metrics.getMeters((name, metric) -> name.equals(metricName));
+        if (metrics.size() != 1)
+            fail(String.format("Expected a single registered metric for request dispatched, found %s",metrics.size()));
+        return metrics.get(metricName);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/triggers/TriggerExecutorTest.java b/test/unit/org/apache/cassandra/triggers/TriggerExecutorTest.java
index 199e637..257d0c2 100644
--- a/test/unit/org/apache/cassandra/triggers/TriggerExecutorTest.java
+++ b/test/unit/org/apache/cassandra/triggers/TriggerExecutorTest.java
@@ -34,13 +34,11 @@
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.schema.TriggerMetadata;
 import org.apache.cassandra.schema.Triggers;
-import org.apache.cassandra.triggers.TriggerExecutorTest.SameKeySameCfTrigger;
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 
 public class TriggerExecutorTest
 {
diff --git a/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java b/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java
index 31111bd..6b875a3 100644
--- a/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java
+++ b/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java
@@ -22,23 +22,24 @@
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
-import org.apache.cassandra.schema.TableMetadata;
-import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTestUtil;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.schema.TriggerMetadata;
 import org.apache.cassandra.schema.Triggers;
-import org.apache.cassandra.schema.MigrationManager;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class TriggersSchemaTest
 {
-    String ksName = "ks" + System.nanoTime();
-    String cfName = "cf" + System.nanoTime();
-    String triggerName = "trigger_" + System.nanoTime();
+    String ksName = "ks" + nanoTime();
+    String cfName = "cf" + nanoTime();
+    String triggerName = "trigger_" + nanoTime();
     String triggerClass = "org.apache.cassandra.triggers.NoSuchTrigger.class";
 
     @BeforeClass
@@ -57,7 +58,7 @@
                                 .build();
 
         KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, KeyspaceParams.simple(1), Tables.of(tm));
-        MigrationManager.announceNewKeyspace(ksm);
+        SchemaTestUtil.announceNewKeyspace(ksm);
 
         TableMetadata tm2 = Schema.instance.getTableMetadata(ksName, cfName);
         assertFalse(tm2.triggers.isEmpty());
@@ -69,14 +70,14 @@
     public void addNewCfWithTriggerToKs() throws Exception
     {
         KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, KeyspaceParams.simple(1));
-        MigrationManager.announceNewKeyspace(ksm);
+        SchemaTestUtil.announceNewKeyspace(ksm);
 
         TableMetadata metadata =
             CreateTableStatement.parse(String.format("CREATE TABLE %s (k int PRIMARY KEY, v int)", cfName), ksName)
                                 .triggers(Triggers.of(TriggerMetadata.create(triggerName, triggerClass)))
                                 .build();
 
-        MigrationManager.announceNewTable(metadata);
+        SchemaTestUtil.announceNewTable(metadata);
 
         metadata = Schema.instance.getTableMetadata(ksName, cfName);
         assertFalse(metadata.triggers.isEmpty());
@@ -91,7 +92,7 @@
             CreateTableStatement.parse(String.format("CREATE TABLE %s (k int PRIMARY KEY, v int)", cfName), ksName)
                                 .build();
         KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, KeyspaceParams.simple(1), Tables.of(tm1));
-        MigrationManager.announceNewKeyspace(ksm);
+        SchemaTestUtil.announceNewKeyspace(ksm);
 
         TriggerMetadata td = TriggerMetadata.create(triggerName, triggerClass);
         TableMetadata tm2 =
@@ -100,7 +101,7 @@
                   .unbuild()
                   .triggers(Triggers.of(td))
                   .build();
-        MigrationManager.announceTableUpdate(tm2);
+        SchemaTestUtil.announceTableUpdate(tm2);
 
         TableMetadata tm3 = Schema.instance.getTableMetadata(ksName, cfName);
         assertFalse(tm3.triggers.isEmpty());
@@ -117,14 +118,14 @@
                                 .triggers(Triggers.of(td))
                                 .build();
         KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, KeyspaceParams.simple(1), Tables.of(tm));
-        MigrationManager.announceNewKeyspace(ksm);
+        SchemaTestUtil.announceNewKeyspace(ksm);
 
         TableMetadata tm1 = Schema.instance.getTableMetadata(ksName, cfName);
         TableMetadata tm2 =
             tm1.unbuild()
                .triggers(tm1.triggers.without(triggerName))
                .build();
-        MigrationManager.announceTableUpdate(tm2);
+        SchemaTestUtil.announceTableUpdate(tm2);
 
         TableMetadata tm3 = Schema.instance.getTableMetadata(ksName, cfName);
         assertTrue(tm3.triggers.isEmpty());
diff --git a/test/unit/org/apache/cassandra/triggers/TriggersTest.java b/test/unit/org/apache/cassandra/triggers/TriggersTest.java
index 2cf0e84..892a022 100644
--- a/test/unit/org/apache/cassandra/triggers/TriggersTest.java
+++ b/test/unit/org/apache/cassandra/triggers/TriggersTest.java
@@ -29,8 +29,6 @@
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -38,6 +36,7 @@
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.toInt;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -123,7 +122,7 @@
     @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class)
     public void onCqlUpdateWithConditionsRejectGeneratedUpdatesForDifferentPartition() throws Exception
     {
-        String cf = "cf" + System.nanoTime();
+        String cf = "cf" + nanoTime();
         try
         {
             setupTableWithTrigger(cf, CrossPartitionTrigger.class);
@@ -139,7 +138,7 @@
     @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class)
     public void onCqlUpdateWithConditionsRejectGeneratedUpdatesForDifferentTable() throws Exception
     {
-        String cf = "cf" + System.nanoTime();
+        String cf = "cf" + nanoTime();
         try
         {
             setupTableWithTrigger(cf, CrossTableTrigger.class);
@@ -155,7 +154,7 @@
     @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class)
     public void ifTriggerThrowsErrorNoMutationsAreApplied() throws Exception
     {
-        String cf = "cf" + System.nanoTime();
+        String cf = "cf" + nanoTime();
         try
         {
             setupTableWithTrigger(cf, ErrorTrigger.class);
diff --git a/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java b/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java
index 8119dcb..c6b49d9 100644
--- a/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java
+++ b/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java
@@ -389,4 +389,4 @@
     private static class SomeUncheckedException extends RuntimeException
     {
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/utils/AssertionUtils.java b/test/unit/org/apache/cassandra/utils/AssertionUtils.java
new file mode 100644
index 0000000..d5b1981
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/AssertionUtils.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import com.google.common.base.Throwables;
+
+import org.assertj.core.api.Condition;
+
+public class AssertionUtils
+{
+    private AssertionUtils()
+    {
+    }
+
+    /**
+     * When working with jvm-dtest the thrown error is in a different {@link ClassLoader} causing type checks
+     * to fail; this method relies on naming instead.
+     */
+    public static Condition<Object> is(Class<?> klass)
+    {
+        String name = klass.getCanonicalName();
+        return new Condition<Object>() {
+            @Override
+            public boolean matches(Object value)
+            {
+                return value.getClass().getCanonicalName().equals(name);
+            }
+
+            @Override
+            public String toString()
+            {
+                return name;
+            }
+        };
+    }
+
+    public static <T extends Throwable> Condition<Throwable> isThrowable(Class<T> klass)
+    {
+        // org.assertj.core.api.AbstractAssert.is has <? super ? extends Throwable> which blocks <T>, so need to
+        // always return Throwable
+        return (Condition<Throwable>) (Condition<?>) is(klass);
+    }
+
+    /**
+     * When working with jvm-dtest the thrown error is in a different {@link ClassLoader} causing type checks
+     * to fail; this method relies on naming instead.
+     *
+     * This method is different than {@link #is(Class)} as it tries to mimic instanceOf rather than equality.
+     */
+    public static Condition<Object> isInstanceof(Class<?> klass)
+    {
+        String name = klass.getCanonicalName();
+        return new Condition<Object>() {
+            @Override
+            public boolean matches(Object value)
+            {
+                if (value == null)
+                    return false;
+                return matches(value.getClass());
+            }
+
+            private boolean matches(Class<?> input)
+            {
+                for (Class<?> klass = input; klass != null; klass = klass.getSuperclass())
+                {
+                    // extends
+                    if (klass.getCanonicalName().equals(name))
+                        return true;
+                    // implements
+                    for (Class<?> i : klass.getInterfaces())
+                    {
+                        if (matches(i))
+                            return true;
+                    }
+                }
+                return false;
+            }
+
+            @Override
+            public String toString()
+            {
+                return name;
+            }
+        };
+    }
+
+    public static Condition<Throwable> rootCause(Condition<Throwable> other)
+    {
+        return new Condition<Throwable>() {
+            @Override
+            public boolean matches(Throwable value)
+            {
+                return other.matches(Throwables.getRootCause(value));
+            }
+
+            @Override
+            public String toString()
+            {
+                return "Root cause " + other;
+            }
+        };
+    }
+
+    public static Condition<Throwable> rootCauseIs(Class<? extends Throwable> klass)
+    {
+        return rootCause((Condition<Throwable>) (Condition<?>) is(klass));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/AssertionUtilsTest.java b/test/unit/org/apache/cassandra/utils/AssertionUtilsTest.java
new file mode 100644
index 0000000..e3ec93a
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/AssertionUtilsTest.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils;
+
+import org.junit.Test;
+
+import org.assertj.core.api.Assertions;
+
+public class AssertionUtilsTest
+{
+    @Test
+    public void isInstanceof()
+    {
+        Assertions.assertThat(new C())
+                  .is(AssertionUtils.isInstanceof(A.class));
+
+        Assertions.assertThat(new D())
+                  .is(AssertionUtils.isInstanceof(A.class))
+                  .is(AssertionUtils.isInstanceof(B.class));
+
+        Assertions.assertThat(null instanceof A)
+                  .isEqualTo(AssertionUtils.isInstanceof(A.class).matches(null));
+    }
+
+    interface A {}
+    interface B extends A {}
+    static class C implements A {}
+    static class D implements B {}
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/utils/BloomFilterTest.java b/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
index 1c3afff..96464c9 100644
--- a/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
+++ b/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
@@ -18,7 +18,11 @@
 */
 package org.apache.cassandra.utils;
 
-import java.io.*;
+import org.apache.cassandra.io.util.*;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -29,10 +33,6 @@
 
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.IFilter.FilterKey;
 import org.apache.cassandra.utils.KeyGenerator.RandomStringGenerator;
 import org.apache.cassandra.utils.obs.IBitSet;
@@ -210,12 +210,12 @@
         File file = FileUtils.createDeletableTempFile("bloomFilterTest-", ".dat");
         BloomFilter filter = (BloomFilter) FilterFactory.getFilter(((long) Integer.MAX_VALUE / 8) + 1, 0.01d);
         filter.add(FilterTestHelper.wrap(test));
-        DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(file));
+        DataOutputStreamPlus out = new FileOutputStreamPlus(file);
         BloomFilterSerializer.serialize(filter, out);
         out.close();
         filter.close();
 
-        DataInputStream in = new DataInputStream(new FileInputStream(file));
+        DataInputStream in = new DataInputStream(new FileInputStreamPlus(file));
         BloomFilter filter2 = BloomFilterSerializer.deserialize(in, false);
         Assert.assertTrue(filter2.isPresent(FilterTestHelper.wrap(test)));
         FileUtils.closeQuietly(in);
diff --git a/test/unit/org/apache/cassandra/utils/CassandraGenerators.java b/test/unit/org/apache/cassandra/utils/CassandraGenerators.java
index 4d51f5c..ad9cb6b 100644
--- a/test/unit/org/apache/cassandra/utils/CassandraGenerators.java
+++ b/test/unit/org/apache/cassandra/utils/CassandraGenerators.java
@@ -283,7 +283,7 @@
                 {
                     // to make sure the correct indents are taken, convert to CQL, then replace newlines with the indents
                     // then prefix with the indents.
-                    String cql = SchemaCQLHelper.getTableMetadataAsCQL((TableMetadata) value, true, true, false);
+                    String cql = SchemaCQLHelper.getTableMetadataAsCQL((TableMetadata) value, null);
                     cql = NEWLINE_PATTERN.matcher(cql).replaceAll(Matcher.quoteReplacement("\n  " + spacer));
                     cql = "\n  " + spacer + cql;
                     value = cql;
diff --git a/test/unit/org/apache/cassandra/utils/CassandraVersionTest.java b/test/unit/org/apache/cassandra/utils/CassandraVersionTest.java
index b9789f5..6f34499 100644
--- a/test/unit/org/apache/cassandra/utils/CassandraVersionTest.java
+++ b/test/unit/org/apache/cassandra/utils/CassandraVersionTest.java
@@ -25,7 +25,6 @@
 import java.util.List;
 
 import com.google.common.base.Splitter;
-
 import org.apache.commons.lang3.ArrayUtils;
 
 import org.junit.Assert;
@@ -253,6 +252,16 @@
         v2 = new CassandraVersion("4.0");
         assertTrue(v1.compareTo(v2) < 0);
         assertTrue(v2.compareTo(v1) > 0);
+
+        assertEquals(-1, v1.compareTo(v2));
+
+        v1 = new CassandraVersion("1.2.3");
+        v2 = new CassandraVersion("1.2.3.1");
+        assertEquals(-1, v1.compareTo(v2));
+
+        v1 = new CassandraVersion("1.2.3.1");
+        v2 = new CassandraVersion("1.2.3.2");
+        assertEquals(-1, v1.compareTo(v2));
     }
 
     @Test
@@ -347,7 +356,7 @@
     @Test
     public void testParseIdentifiersPositive() throws Throwable
     {
-        String[] result = parseIdentifiers("DUMMY", "+a.b.cde.f_g.");
+        String[] result = parseIdentifiers("DUMMY", "a.b.cde.f_g.");
         String[] expected = {"a", "b", "cde", "f_g"};
         assertArrayEquals(expected, result);
     }
diff --git a/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java b/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java
index 833f2e4..e0ee67c 100644
--- a/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java
+++ b/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java
@@ -23,6 +23,7 @@
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Optional;
 import java.util.TreeMap;
@@ -46,6 +47,7 @@
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -227,4 +229,29 @@
             executor.shutdown();
         }
     }
+
+    @Test
+    public void testCamelToSnake()
+    {
+        AssertionError error = null;
+        for (Pair<String, String> a : Arrays.asList(Pair.create("Testing", "testing"),
+                                                    Pair.create("fooBarBaz", "foo_bar_baz"),
+                                                    Pair.create("foo_bar_baz", "foo_bar_baz")
+        ))
+        {
+            try
+            {
+                assertThat(FBUtilities.camelToSnake(a.left)).isEqualTo(a.right);
+            }
+            catch (AssertionError e)
+            {
+                if (error == null)
+                    error = e;
+                else
+                    error.addSuppressed(e);
+            }
+        }
+        if (error != null)
+            throw error;
+    }
 }
diff --git a/test/unit/org/apache/cassandra/utils/FailingRunnable.java b/test/unit/org/apache/cassandra/utils/FailingRunnable.java
new file mode 100644
index 0000000..ab5dbc8
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/FailingRunnable.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils;
+
+import com.google.common.base.Throwables;
+
+public interface FailingRunnable extends Runnable
+{
+    void doRun() throws Throwable;
+
+    default void run()
+    {
+        try
+        {
+            doRun();
+        }
+        catch (Throwable t)
+        {
+            Throwables.throwIfUnchecked(t);
+            throw new RuntimeException(t);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/GeneratorsTest.java b/test/unit/org/apache/cassandra/utils/GeneratorsTest.java
index 7fa0561..b9358cc 100644
--- a/test/unit/org/apache/cassandra/utils/GeneratorsTest.java
+++ b/test/unit/org/apache/cassandra/utils/GeneratorsTest.java
@@ -45,4 +45,4 @@
     {
         qt().forAll(Generators.DNS_DOMAIN_NAME).checkAssert(InternetDomainName::from);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/utils/JVMStabilityInspectorTest.java b/test/unit/org/apache/cassandra/utils/JVMStabilityInspectorTest.java
index a8dd22a..3a3415e 100644
--- a/test/unit/org/apache/cassandra/utils/JVMStabilityInspectorTest.java
+++ b/test/unit/org/apache/cassandra/utils/JVMStabilityInspectorTest.java
@@ -20,6 +20,7 @@
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.SocketException;
+import java.util.Arrays;
 
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -29,6 +30,11 @@
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
+import org.assertj.core.api.Assertions;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.service.CassandraDaemon;
+import org.apache.cassandra.service.DefaultFSErrorHandler;
+import org.apache.cassandra.service.StorageService;
 
 import static java.util.Arrays.asList;
 import static org.junit.Assert.assertEquals;
@@ -53,43 +59,62 @@
 
         Config.DiskFailurePolicy oldPolicy = DatabaseDescriptor.getDiskFailurePolicy();
         Config.CommitFailurePolicy oldCommitPolicy = DatabaseDescriptor.getCommitFailurePolicy();
+        FileUtils.setFSErrorHandler(new DefaultFSErrorHandler());
         try
         {
-            killerForTests.reset();
-            JVMStabilityInspector.inspectThrowable(new IOException());
-            assertFalse(killerForTests.wasKilled());
+            CassandraDaemon daemon = new CassandraDaemon();
+            daemon.completeSetup();
+            for (boolean daemonSetupCompleted : Arrays.asList(false, true))
+            {
+                // disk policy acts differently depending on if setup is complete or not; which is defined by
+                // the daemon thread not being null
+                StorageService.instance.registerDaemon(daemonSetupCompleted ? daemon : null);
 
-            DatabaseDescriptor.setDiskFailurePolicy(Config.DiskFailurePolicy.die);
-            killerForTests.reset();
-            JVMStabilityInspector.inspectThrowable(new FSReadError(new IOException(), "blah"));
-            assertTrue(killerForTests.wasKilled());
+                try
+                {
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectThrowable(new IOException());
+                    assertFalse(killerForTests.wasKilled());
 
-            killerForTests.reset();
-            JVMStabilityInspector.inspectThrowable(new FSWriteError(new IOException(), "blah"));
-            assertTrue(killerForTests.wasKilled());
+                    DatabaseDescriptor.setDiskFailurePolicy(Config.DiskFailurePolicy.die);
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectThrowable(new FSReadError(new IOException(), "blah"));
+                    assertTrue(killerForTests.wasKilled());
 
-            killerForTests.reset();
-            JVMStabilityInspector.inspectThrowable(new CorruptSSTableException(new IOException(), "blah"));
-            assertTrue(killerForTests.wasKilled());
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectThrowable(new FSWriteError(new IOException(), "blah"));
+                    assertTrue(killerForTests.wasKilled());
 
-            killerForTests.reset();
-            JVMStabilityInspector.inspectThrowable(new RuntimeException(new CorruptSSTableException(new IOException(), "blah")));
-            assertTrue(killerForTests.wasKilled());
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectThrowable(new CorruptSSTableException(new IOException(), "blah"));
+                    assertTrue(killerForTests.wasKilled());
 
-            DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die);
-            killerForTests.reset();
-            JVMStabilityInspector.inspectCommitLogThrowable(new Throwable());
-            assertTrue(killerForTests.wasKilled());
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectThrowable(new RuntimeException(new CorruptSSTableException(new IOException(), "blah")));
+                    assertTrue(killerForTests.wasKilled());
 
-            killerForTests.reset();
-            JVMStabilityInspector.inspectThrowable(new Exception(new IOException()));
-            assertFalse(killerForTests.wasKilled());
+                    DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die);
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectCommitLogThrowable(new Throwable());
+                    assertTrue(killerForTests.wasKilled());
+
+                    killerForTests.reset();
+                    JVMStabilityInspector.inspectThrowable(new Exception(new IOException()));
+                    assertFalse(killerForTests.wasKilled());
+                }
+                catch (Exception | Error e)
+                {
+                    throw new AssertionError("Failure when daemonSetupCompleted=" + daemonSetupCompleted, e);
+                }
+            }
         }
         finally
         {
             JVMStabilityInspector.replaceKiller(originalKiller);
             DatabaseDescriptor.setDiskFailurePolicy(oldPolicy);
             DatabaseDescriptor.setCommitFailurePolicy(oldCommitPolicy);
+            StorageService.instance.registerDaemon(null);
+            FileUtils.setFSErrorHandler(null);
         }
     }
 
@@ -126,6 +151,15 @@
     }
 
     @Test
+    public void testForceHeapSpaceOomExclude()
+    {
+        OutOfMemoryError error = new OutOfMemoryError("Java heap space");
+        Assertions.assertThatThrownBy(() -> JVMStabilityInspector.inspectThrowable(error))
+                  .isInstanceOf(OutOfMemoryError.class)
+                  .isEqualTo(error);
+    }
+
+    @Test
     public void fileHandleTest()
     {
         KillerForTests killerForTests = new KillerForTests();
diff --git a/test/unit/org/apache/cassandra/utils/KeyGenerator.java b/test/unit/org/apache/cassandra/utils/KeyGenerator.java
index df958678..f1ff7e3 100644
--- a/test/unit/org/apache/cassandra/utils/KeyGenerator.java
+++ b/test/unit/org/apache/cassandra/utils/KeyGenerator.java
@@ -18,8 +18,13 @@
 */
 package org.apache.cassandra.utils;
 
-import java.io.*;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
 import java.nio.ByteBuffer;
+import java.nio.file.NoSuchFileException;
 import java.util.Random;
 
 public class KeyGenerator
@@ -119,7 +124,7 @@
 
         static 
         {
-            try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("/usr/share/dict/words")))) 
+            try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStreamPlus("/usr/share/dict/words"))))
             {
                 while (br.ready()) 
                 {
@@ -154,9 +159,9 @@
         {
             try 
             {
-                reader = new BufferedReader(new InputStreamReader(new FileInputStream("/usr/share/dict/words")));
+                reader = new BufferedReader(new InputStreamReader(new FileInputStreamPlus("/usr/share/dict/words")));
             } 
-            catch (FileNotFoundException e) 
+            catch (NoSuchFileException e)
             {
                 throw new RuntimeException(e);
             }
diff --git a/test/unit/org/apache/cassandra/utils/MergeIteratorComparisonTest.java b/test/unit/org/apache/cassandra/utils/MergeIteratorComparisonTest.java
index 6d9d2f6..1b52fb5 100644
--- a/test/unit/org/apache/cassandra/utils/MergeIteratorComparisonTest.java
+++ b/test/unit/org/apache/cassandra/utils/MergeIteratorComparisonTest.java
@@ -23,7 +23,7 @@
 
 import com.google.common.base.Function;
 import com.google.common.base.Objects;
-import org.apache.cassandra.utils.AbstractIterator;
+
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
@@ -38,6 +38,8 @@
 import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.utils.MergeIterator.Reducer;
 
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+
 public class MergeIteratorComparisonTest
 {
     private static class CountingComparator<T> implements Comparator<T>
@@ -231,7 +233,7 @@
             @Override
             public UUID next()
             {
-                return UUIDGen.getTimeUUID();
+                return nextTimeUUID().asUUID();
             }
         }.result;
         testMergeIterator(reducer, lists);
@@ -257,14 +259,14 @@
     public void testTimeUuidType()
     {
         System.out.println("testTimeUuidType");
-        final AbstractType<UUID> type = TimeUUIDType.instance;
+        final AbstractType<TimeUUID> type = TimeUUIDType.instance;
         Reducer<ByteBuffer, Counted<ByteBuffer>> reducer = new Counter<ByteBuffer>();
 
         List<List<ByteBuffer>> lists = new SimpleListGenerator<ByteBuffer>(type, ITERATOR_COUNT, LIST_LENGTH) {
             @Override
             public ByteBuffer next()
             {
-                return type.decompose(UUIDGen.getTimeUUID());
+                return type.decompose(nextTimeUUID());
             }
         }.result;
         testMergeIterator(reducer, lists, type);
@@ -281,7 +283,7 @@
             @Override
             public ByteBuffer next()
             {
-                return type.decompose(UUIDGen.getTimeUUID());
+                return type.decompose(nextTimeUUID().asUUID());
             }
         }.result;
         testMergeIterator(reducer, lists, type);
@@ -300,7 +302,7 @@
             @Override
             public KeyedSet<Integer, UUID> next()
             {
-                return new KeyedSet<>(r.nextInt(5 * LIST_LENGTH), UUIDGen.getTimeUUID());
+                return new KeyedSet<>(r.nextInt(5 * LIST_LENGTH), nextTimeUUID().asUUID());
             }
         }.result;
         testMergeIterator(reducer, lists);
diff --git a/test/unit/org/apache/cassandra/utils/MergeIteratorTest.java b/test/unit/org/apache/cassandra/utils/MergeIteratorTest.java
index fe2cecf..056a4a7 100644
--- a/test/unit/org/apache/cassandra/utils/MergeIteratorTest.java
+++ b/test/unit/org/apache/cassandra/utils/MergeIteratorTest.java
@@ -21,7 +21,6 @@
 import java.util.Arrays;
 import java.util.Iterator;
 
-import org.apache.cassandra.utils.AbstractIterator;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Ordering;
 import org.junit.Before;
diff --git a/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java b/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
index 1cdcc22..ae70fd4 100644
--- a/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
+++ b/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
@@ -600,19 +600,19 @@
         Assert.assertEquals(1, MerkleTree.estimatedMaxDepthForBytes(Murmur3Partitioner.instance, 0, 32));
         Assert.assertEquals(1, MerkleTree.estimatedMaxDepthForBytes(Murmur3Partitioner.instance, 1, 32));
 
-        // The minimum of 1 megabyte split between RF=3 should yield trees of around 10
+        // The minimum of 1 mebibyte split between RF=3 should yield trees of around 10
         Assert.assertEquals(10, MerkleTree.estimatedMaxDepthForBytes(Murmur3Partitioner.instance,
                                                                      1048576 / 3, 32));
 
-        // With a single megabyte of space we should get 12
+        // With a single mebibyte of space we should get 12
         Assert.assertEquals(12, MerkleTree.estimatedMaxDepthForBytes(Murmur3Partitioner.instance,
                                                                      1048576, 32));
 
-        // With 100 megabytes we should get a limit of 19
+        // With 100 mebibytes we should get a limit of 19
         Assert.assertEquals(19, MerkleTree.estimatedMaxDepthForBytes(Murmur3Partitioner.instance,
                                                                      100 * 1048576, 32));
 
-        // With 300 megabytes we should get the old limit of 20
+        // With 300 mebibytes we should get the old limit of 20
         Assert.assertEquals(20, MerkleTree.estimatedMaxDepthForBytes(Murmur3Partitioner.instance,
                                                                      300 * 1048576, 32));
         Assert.assertEquals(20, MerkleTree.estimatedMaxDepthForBytes(RandomPartitioner.instance,
@@ -635,7 +635,7 @@
         Range<Token> fullRange = new Range<>(partitioner.getMinimumToken(), partitioner.getMinimumToken());
         MerkleTree tree = new MerkleTree(partitioner, fullRange, RECOMMENDED_DEPTH, 0);
 
-        // Test 16 kilobyte -> 16 megabytes
+        // Test 16 kibibyte -> 16 mebibytes
         for (int i = 14; i < 24; i ++)
         {
             long numBytes = 1 << i;
diff --git a/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java b/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java
index b2891a9..3839be0 100644
--- a/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java
+++ b/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java
@@ -17,7 +17,8 @@
  */
 package org.apache.cassandra.utils;
 
-import static org.apache.cassandra.utils.MonotonicClock.approxTime;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
 import static org.junit.Assert.*;
 
 import org.junit.Test;
@@ -27,7 +28,7 @@
     @Test
     public void testTimestampOrdering() throws Exception
     {
-        long nowNanos = System.nanoTime();
+        long nowNanos = nanoTime();
         long now = System.currentTimeMillis();
         long lastConverted = 0;
         for (long ii = 0; ii < 10000000; ii++)
@@ -39,14 +40,14 @@
                 Thread.sleep(1);
             }
 
-            nowNanos = Math.max(nowNanos, System.nanoTime());
+            nowNanos = Math.max(nowNanos, nanoTime());
             long convertedNow = approxTime.translate().toMillisSinceEpoch(nowNanos);
 
-            int maxDiff = FBUtilities.isWindows ? 15 : 1;
+            int maxDiff = 1;
             assertTrue("convertedNow = " + convertedNow + " lastConverted = " + lastConverted + " in iteration " + ii,
                        convertedNow >= (lastConverted - maxDiff));
 
-            maxDiff = FBUtilities.isWindows ? 25 : 2;
+            maxDiff = 2;
             assertTrue("now = " + now + " convertedNow = " + convertedNow + " in iteration " + ii,
                        (maxDiff - 2) <= convertedNow);
 
diff --git a/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java b/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java
index 1a26351..52a7f84 100644
--- a/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java
+++ b/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java
@@ -18,8 +18,8 @@
  */
 package org.apache.cassandra.utils;
 
-import java.io.File;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -32,7 +32,7 @@
     {
         File file = FileUtils.createDeletableTempFile("testSkipCache", "1");
 
-        NativeLibrary.trySkipCache(file.getPath(), 0, 0);
+        NativeLibrary.trySkipCache(file.path(), 0, 0);
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/utils/NoSpamLoggerTest.java b/test/unit/org/apache/cassandra/utils/NoSpamLoggerTest.java
index 58e6ea0..73aef09 100644
--- a/test/unit/org/apache/cassandra/utils/NoSpamLoggerTest.java
+++ b/test/unit/org/apache/cassandra/utils/NoSpamLoggerTest.java
@@ -18,13 +18,13 @@
 */
 package org.apache.cassandra.utils;
 
-import static org.junit.Assert.*;
-
 import java.util.ArrayDeque;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Queue;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 
 import org.apache.cassandra.utils.NoSpamLogger.Level;
 import org.apache.cassandra.utils.NoSpamLogger.NoSpamLogStatement;
@@ -34,146 +34,138 @@
 import org.slf4j.Logger;
 import org.slf4j.helpers.SubstituteLogger;
 
+import static org.junit.Assert.*;
 
 public class NoSpamLoggerTest
 {
     Map<Level, Queue<Pair<String, Object[]>>> logged = new HashMap<>();
 
-   Logger mock = new SubstituteLogger(null, null, true)
-   {
-
-       @Override
-       public void info(String statement, Object... args)
-       {
-           logged.get(Level.INFO).offer(Pair.create(statement, args));
-       }
-
-       @Override
-       public void warn(String statement, Object... args)
-       {
-           logged.get(Level.WARN).offer(Pair.create(statement, args));
-       }
-
-       @Override
-       public void error(String statement, Object... args)
-       {
-           logged.get(Level.ERROR).offer(Pair.create(statement, args));
-       }
-
-       @Override
-       public int hashCode()
-       {
-           return 42;//It's a valid hash code
-       }
-
-       @Override
-       public boolean equals(Object o)
-       {
-           return this == o;
-       }
-   };
-
-
-   static final String statement = "swizzle{}";
-   static final String param = "";
-   static long now;
-
-   @BeforeClass
-   public static void setUpClass() throws Exception
-   {
-       NoSpamLogger.CLOCK = new NoSpamLogger.Clock()
-       {
+    Logger mock = new SubstituteLogger(null, null, true)
+    {
         @Override
-        public long nanoTime()
+        public void info(String statement, Object... args)
         {
-            return now;
+            logged.get(Level.INFO).offer(Pair.create(statement, args));
         }
-       };
-   }
 
-   @Before
-   public void setUp() throws Exception
-   {
-       logged.put(Level.INFO, new ArrayDeque<Pair<String, Object[]>>());
-       logged.put(Level.WARN, new ArrayDeque<Pair<String, Object[]>>());
-       logged.put(Level.ERROR, new ArrayDeque<Pair<String, Object[]>>());
-       NoSpamLogger.clearWrappedLoggersForTest();
-   }
+        @Override
+        public void warn(String statement, Object... args)
+        {
+            logged.get(Level.WARN).offer(Pair.create(statement, args));
+        }
 
-   @Test
-   public void testNoSpamLogger() throws Exception
-   {
-       testLevel(Level.INFO);
-       testLevel(Level.WARN);
-       testLevel(Level.ERROR);
-   }
+        @Override
+        public void error(String statement, Object... args)
+        {
+            logged.get(Level.ERROR).offer(Pair.create(statement, args));
+        }
 
-   private void testLevel(Level l) throws Exception
-   {
-       setUp();
-       now = 5;
+        @Override
+        public int hashCode()
+        {
+            return 42; //It's a valid hash code
+        }
 
-       assertTrue(NoSpamLogger.log( mock, l, 5,  TimeUnit.NANOSECONDS, statement, param));
+        @Override
+        public boolean equals(Object o)
+        {
+            return this == o;
+        }
+    };
 
-       assertEquals(1, logged.get(l).size());
+    static final String statement = "swizzle{}";
+    static final String param = "";
+    static long now;
 
-       assertFalse(NoSpamLogger.log( mock, l, 5,  TimeUnit.NANOSECONDS, statement, param));
+    @BeforeClass
+    public static void setUpClass() throws Exception
+    {
+        NoSpamLogger.CLOCK = () -> now;
+    }
 
-       assertEquals(1, logged.get(l).size());
+    @Before
+    public void setUp() throws Exception
+    {
+        logged.put(Level.INFO, new ArrayDeque<Pair<String, Object[]>>());
+        logged.put(Level.WARN, new ArrayDeque<Pair<String, Object[]>>());
+        logged.put(Level.ERROR, new ArrayDeque<Pair<String, Object[]>>());
+        NoSpamLogger.clearWrappedLoggersForTest();
+    }
 
-       now += 5;
+    @Test
+    public void testNoSpamLogger() throws Exception
+    {
+        testLevel(Level.INFO);
+        testLevel(Level.WARN);
+        testLevel(Level.ERROR);
+    }
 
-       assertTrue(NoSpamLogger.log( mock, l, 5,  TimeUnit.NANOSECONDS, statement, param));
+    private void testLevel(Level l) throws Exception
+    {
+        setUp();
+        now = 5;
 
-       assertEquals(2, logged.get(l).size());
+        assertTrue(NoSpamLogger.log(mock, l, 5, TimeUnit.NANOSECONDS, statement, param));
 
-       assertTrue(NoSpamLogger.log( mock, l, "key", 5,  TimeUnit.NANOSECONDS, statement, param));
+        assertEquals(1, logged.get(l).size());
 
-       assertEquals(3, logged.get(l).size());
+        assertFalse(NoSpamLogger.log(mock, l, 5, TimeUnit.NANOSECONDS, statement, param));
 
-       assertFalse(NoSpamLogger.log( mock, l, "key", 5,  TimeUnit.NANOSECONDS, statement, param));
+        assertEquals(1, logged.get(l).size());
 
-       assertEquals(3, logged.get(l).size());
-   }
+        now += 5;
 
-   private void assertLoggedSizes(int info, int warn, int error)
-   {
-       assertEquals(info, logged.get(Level.INFO).size());
-       assertEquals(warn, logged.get(Level.WARN).size());
-       assertEquals(error, logged.get(Level.ERROR).size());
-   }
+        assertTrue(NoSpamLogger.log(mock, l, 5, TimeUnit.NANOSECONDS, statement, param));
 
-   @Test
-   public void testNoSpamLoggerDirect() throws Exception
-   {
-       now = 5;
-       NoSpamLogger logger = NoSpamLogger.getLogger( mock, 5, TimeUnit.NANOSECONDS);
+        assertEquals(2, logged.get(l).size());
 
-       assertTrue(logger.info(statement, param));
-       assertFalse(logger.info(statement, param));
-       assertFalse(logger.warn(statement, param));
-       assertFalse(logger.error(statement, param));
+        assertTrue(NoSpamLogger.log(mock, l, "key", 5, TimeUnit.NANOSECONDS, statement, param));
 
-       assertLoggedSizes(1, 0, 0);
+        assertEquals(3, logged.get(l).size());
 
-       NoSpamLogStatement statement = logger.getStatement("swizzle2{}", 10, TimeUnit.NANOSECONDS);
-       assertTrue(statement.warn(param)); // since a statement of this key hasn't logged yet
-       assertLoggedSizes(1, 1, 0);
+        assertFalse(NoSpamLogger.log(mock, l, "key", 5, TimeUnit.NANOSECONDS, statement, param));
 
-       now = 10;
-       assertFalse(statement.warn(param)); // we logged it above
-       assertLoggedSizes(1, 1, 0);
+        assertEquals(3, logged.get(l).size());
+    }
 
-       now = 15;
-       assertTrue(statement.warn(param)); // First log was at 5, now past the interval
-       assertLoggedSizes(1, 2, 0);
-   }
+    private void assertLoggedSizes(int info, int warn, int error)
+    {
+        assertEquals(info, logged.get(Level.INFO).size());
+        assertEquals(warn, logged.get(Level.WARN).size());
+        assertEquals(error, logged.get(Level.ERROR).size());
+    }
+
+    @Test
+    public void testNoSpamLoggerDirect() throws Exception
+    {
+        now = 5;
+        NoSpamLogger logger = NoSpamLogger.getLogger(mock, 5, TimeUnit.NANOSECONDS);
+
+        assertTrue(logger.info(statement, param));
+        assertFalse(logger.info(statement, param));
+        assertFalse(logger.warn(statement, param));
+        assertFalse(logger.error(statement, param));
+
+        assertLoggedSizes(1, 0, 0);
+
+        NoSpamLogStatement statement = logger.getStatement("swizzle2{}", 10, TimeUnit.NANOSECONDS);
+        assertTrue(statement.warn(param)); // since a statement of this key hasn't logged yet
+        assertLoggedSizes(1, 1, 0);
+
+        now = 10;
+        assertFalse(statement.warn(param)); // we logged it above
+        assertLoggedSizes(1, 1, 0);
+
+        now = 15;
+        assertTrue(statement.warn(param)); // First log was at 5, now past the interval
+        assertLoggedSizes(1, 2, 0);
+    }
 
     @Test
     public void testNegativeNowNanos() throws Exception
     {
         now = -6;
-        NoSpamLogger logger = NoSpamLogger.getLogger( mock, 5, TimeUnit.NANOSECONDS);
+        NoSpamLogger logger = NoSpamLogger.getLogger(mock, 5, TimeUnit.NANOSECONDS);
 
         assertTrue(logger.info(statement, param));
         assertFalse(logger.info(statement, param));
@@ -204,84 +196,114 @@
     }
 
     @Test
-   public void testNoSpamLoggerStatementDirect() throws Exception
-   {
-       NoSpamLogger.NoSpamLogStatement nospam = NoSpamLogger.getStatement( mock, statement, 5, TimeUnit.NANOSECONDS);
+    public void testNoSpamLoggerStatementDirect()
+    {
+        NoSpamLogger.NoSpamLogStatement nospam = NoSpamLogger.getStatement(mock, statement, 5, TimeUnit.NANOSECONDS);
 
-       now = 5;
+        now = 5;
 
-       assertTrue(nospam.info(statement, param));
-       assertFalse(nospam.info(statement, param));
-       assertFalse(nospam.warn(statement, param));
-       assertFalse(nospam.error(statement, param));
+        assertTrue(nospam.info(statement, param));
+        assertFalse(nospam.info(statement, param));
+        assertFalse(nospam.warn(statement, param));
+        assertFalse(nospam.error(statement, param));
 
-       assertLoggedSizes(1, 0, 0);
-   }
+        assertLoggedSizes(1, 0, 0);
+    }
 
-   private void checkMock(Level l)
-   {
-       Pair<String, Object[]> p = logged.get(l).poll();
-       assertNotNull(p);
-       assertEquals(statement, p.left);
-       Object objs[] = p.right;
-       assertEquals(1, objs.length);
-       assertEquals(param, objs[0]);
-       assertTrue(logged.get(l).isEmpty());
-   }
+    private void checkMock(Level l)
+    {
+        Pair<String, Object[]> p = logged.get(l).poll();
+        assertNotNull(p);
+        assertEquals(statement, p.left);
+        Object[] objs = p.right;
+        assertEquals(1, objs.length);
+        assertEquals(param, objs[0]);
+        assertTrue(logged.get(l).isEmpty());
+    }
 
-   /*
-    * Make sure that what is passed to the underlying logger is the correct set of objects
-    */
-   @Test
-   public void testLoggedResult() throws Exception
-   {
-       now = 5;
+    /*
+     * Make sure that what is passed to the underlying logger is the correct set of objects
+     */
+    @Test
+    public void testLoggedResult()
+    {
+        now = 5;
 
-       assertTrue(NoSpamLogger.log( mock, Level.INFO, 5,  TimeUnit.NANOSECONDS, statement, param));
-       checkMock(Level.INFO);
+        assertTrue(NoSpamLogger.log(mock, Level.INFO, 5, TimeUnit.NANOSECONDS, statement, param));
+        checkMock(Level.INFO);
 
-       now = 10;
+        now = 10;
 
-       assertTrue(NoSpamLogger.log( mock, Level.WARN, 5,  TimeUnit.NANOSECONDS, statement, param));
-       checkMock(Level.WARN);
+        assertTrue(NoSpamLogger.log(mock, Level.WARN, 5, TimeUnit.NANOSECONDS, statement, param));
+        checkMock(Level.WARN);
 
-       now = 15;
+        now = 15;
 
-       assertTrue(NoSpamLogger.log( mock, Level.ERROR, 5,  TimeUnit.NANOSECONDS, statement, param));
-       checkMock(Level.ERROR);
+        assertTrue(NoSpamLogger.log(mock, Level.ERROR, 5, TimeUnit.NANOSECONDS, statement, param));
+        checkMock(Level.ERROR);
 
-       now = 20;
+        now = 20;
 
-       NoSpamLogger logger = NoSpamLogger.getLogger(mock, 5, TimeUnit.NANOSECONDS);
+        NoSpamLogger logger = NoSpamLogger.getLogger(mock, 5, TimeUnit.NANOSECONDS);
 
-       assertTrue(logger.info(statement, param));
-       checkMock(Level.INFO);
+        assertTrue(logger.info(statement, param));
+        checkMock(Level.INFO);
 
-       now = 25;
+        now = 25;
 
-       assertTrue(logger.warn(statement, param));
-       checkMock(Level.WARN);
+        assertTrue(logger.warn(statement, param));
+        checkMock(Level.WARN);
 
-       now = 30;
+        now = 30;
 
-       assertTrue(logger.error(statement, param));
-       checkMock(Level.ERROR);
+        assertTrue(logger.error(statement, param));
+        checkMock(Level.ERROR);
 
-       NoSpamLogger.NoSpamLogStatement nospamStatement = logger.getStatement(statement);
+        NoSpamLogger.NoSpamLogStatement nospamStatement = logger.getStatement(statement);
 
-       now = 35;
+        now = 35;
 
-       assertTrue(nospamStatement.info(param));
-       checkMock(Level.INFO);
+        assertTrue(nospamStatement.info(param));
+        checkMock(Level.INFO);
 
-       now = 40;
+        now = 40;
 
-       assertTrue(nospamStatement.warn(param));
-       checkMock(Level.WARN);
+        assertTrue(nospamStatement.warn(param));
+        checkMock(Level.WARN);
 
-       now = 45;
+        now = 45;
 
-       assertTrue(nospamStatement.error(param));
-       checkMock(Level.ERROR);
-   }
+        assertTrue(nospamStatement.error(param));
+        checkMock(Level.ERROR);
+    }
+
+    @Test
+    public void testSupplierLogging()
+    {
+        AtomicInteger evaluationTimes = new AtomicInteger();
+        Object [] params = new Object[] {"hello"};
+        Supplier<Object[]> paramSupplier = () -> {
+            evaluationTimes.incrementAndGet();
+            return params;
+        };
+
+        now = 5;
+
+        NoSpamLogger.log(mock, Level.INFO, 5, TimeUnit.NANOSECONDS, "TESTING {}", paramSupplier);
+        assertEquals(1, evaluationTimes.get());
+        Pair<String, Object[]> loggedMsg = logged.get(Level.INFO).remove();
+        assertEquals("TESTING {}", loggedMsg.left);
+        assertArrayEquals(params, loggedMsg.right);
+
+        NoSpamLogger.log(mock, Level.INFO, 5, TimeUnit.NANOSECONDS, "TESTING {}", paramSupplier);
+        assertEquals(1, evaluationTimes.get());
+        assertTrue(logged.get(Level.INFO).isEmpty());
+
+        now = 10;
+        NoSpamLogger.log(mock, Level.INFO, 5, TimeUnit.NANOSECONDS, "TESTING {}", paramSupplier);
+        assertEquals(2, evaluationTimes.get());
+        loggedMsg = logged.get(Level.INFO).remove();
+        assertEquals("TESTING {}", loggedMsg.left);
+        assertArrayEquals(params, loggedMsg.right);
+    }
 }
diff --git a/test/unit/org/apache/cassandra/utils/SerializationsTest.java b/test/unit/org/apache/cassandra/utils/SerializationsTest.java
index 6597f3b..be235e6 100644
--- a/test/unit/org/apache/cassandra/utils/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/utils/SerializationsTest.java
@@ -21,6 +21,7 @@
 import java.io.DataInputStream;
 import java.io.IOException;
 
+import org.apache.cassandra.io.util.FileInputStreamPlus;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -30,14 +31,12 @@
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.utils.obs.OffHeapBitSet;
 
-import java.io.File;
-import java.io.FileInputStream;
+import org.apache.cassandra.io.util.File;
 
 public class SerializationsTest extends AbstractSerializationsTester
 {
@@ -80,7 +79,7 @@
             testBloomFilterWrite1000(true);
         }
 
-        try (DataInputStream in = getInput("4.0", "utils.BloomFilter1000.bin");
+        try (FileInputStreamPlus in = getInput("4.0", "utils.BloomFilter1000.bin");
              IFilter filter = BloomFilterSerializer.deserialize(in, false))
         {
             boolean present;
@@ -96,7 +95,7 @@
             }
         }
 
-        try (DataInputStream in = getInput("3.0", "utils.BloomFilter1000.bin");
+        try (FileInputStreamPlus in = getInput("3.0", "utils.BloomFilter1000.bin");
              IFilter filter = BloomFilterSerializer.deserialize(in, true))
         {
             boolean present;
@@ -123,7 +122,7 @@
     {
         Murmur3Partitioner partitioner = new Murmur3Partitioner();
 
-        try (DataInputStream in = new DataInputStream(new FileInputStream(new File(file)));
+        try (DataInputStream in = new DataInputStream(new FileInputStreamPlus(new File(file)));
              IFilter filter = BloomFilterSerializer.deserialize(in, oldBfFormat))
         {
             for (int i = 1; i <= 10; i++)
@@ -176,7 +175,7 @@
         if (EXECUTE_WRITES)
             testEstimatedHistogramWrite();
 
-        try (DataInputStreamPlus in = getInput("utils.EstimatedHistogram.bin"))
+        try (FileInputStreamPlus in = getInput("utils.EstimatedHistogram.bin"))
         {
             Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in));
             Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in));
diff --git a/test/unit/org/apache/cassandra/utils/SlidingTimeRateTest.java b/test/unit/org/apache/cassandra/utils/SlidingTimeRateTest.java
deleted file mode 100644
index 8dc4a14..0000000
--- a/test/unit/org/apache/cassandra/utils/SlidingTimeRateTest.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * No objects are created currently from SlidingTimeRate in Cassandra 4.0.
- * If you decide to use it, please check CASSANDRA-16713.
- * There still might be a bug, flaky test to be fixed before using it again.
- *
- * Skipping all tests for running now to clean he noise before 4.0 GA release.
- */
-public class SlidingTimeRateTest
-{
-    @Ignore
-    @Test
-    public void testUpdateAndGet()
-    {
-        SlidingTimeRate rate = new SlidingTimeRate(new TestTimeSource(), 10, 1, TimeUnit.SECONDS);
-        int updates = 100;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-        }
-        Assert.assertEquals(updates, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testUpdateAndGetBetweenWindows()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 100;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-            time.sleep(100, TimeUnit.MILLISECONDS);
-        }
-        Assert.assertEquals(10, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testUpdateAndGetPastWindowSize()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 100;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-        }
-
-        time.sleep(6, TimeUnit.SECONDS);
-
-        Assert.assertEquals(0, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testUpdateAndGetToPointInTime()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 10;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-            time.sleep(100, TimeUnit.MILLISECONDS);
-        }
-
-        time.sleep(1, TimeUnit.SECONDS);
-
-        Assert.assertEquals(5, rate.get(TimeUnit.SECONDS), 0.0);
-        Assert.assertEquals(10, rate.get(1, TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testDecay() throws InterruptedException
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 10;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-            time.sleep(100, TimeUnit.MILLISECONDS);
-        }
-        Assert.assertEquals(10, rate.get(TimeUnit.SECONDS), 0.0);
-
-        time.sleep(1, TimeUnit.SECONDS);
-
-        Assert.assertEquals(5, rate.get(TimeUnit.SECONDS), 0.0);
-
-        time.sleep(2, TimeUnit.SECONDS);
-
-        Assert.assertEquals(2.5, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testPruning()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-
-        rate.update(1);
-        Assert.assertEquals(1, rate.size());
-
-        time.sleep(6, TimeUnit.SECONDS);
-
-        rate.prune();
-        Assert.assertEquals(0, rate.size());
-    }
-
-    @Ignore
-    @Test
-    public void testConcurrentUpdateAndGet() throws InterruptedException
-    {
-        final ExecutorService executor = Executors.newFixedThreadPool(FBUtilities.getAvailableProcessors());
-        final TestTimeSource time = new TestTimeSource();
-        final SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 100000;
-        for (int i = 0; i < updates; i++)
-        {
-            executor.submit(() -> {
-                time.sleep(1, TimeUnit.MILLISECONDS);
-                rate.update(1);
-            });
-        }
-
-        executor.shutdown();
-
-        Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
-        Assert.assertEquals(1000, rate.get(TimeUnit.SECONDS), 100.0);
-    }
-}
diff --git a/test/unit/org/apache/cassandra/utils/TestTimeSource.java b/test/unit/org/apache/cassandra/utils/TestTimeSource.java
deleted file mode 100644
index 4ecd086..0000000
--- a/test/unit/org/apache/cassandra/utils/TestTimeSource.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-public class TestTimeSource implements TimeSource
-{
-    private final AtomicLong timeInMillis = new AtomicLong(System.currentTimeMillis());
-
-    @Override
-    public long currentTimeMillis()
-    {
-        return timeInMillis.get();
-    }
-
-    @Override
-    public long nanoTime()
-    {
-        return timeInMillis.get() * 1_000_000;
-    }
-
-    @Override
-    public TimeSource sleep(long sleepFor, TimeUnit unit)
-    {
-        long current = timeInMillis.get();
-        long sleepInMillis = TimeUnit.MILLISECONDS.convert(sleepFor, unit);
-        boolean elapsed;
-        do
-        {
-            long newTime = current + sleepInMillis;
-            elapsed = timeInMillis.compareAndSet(current, newTime);
-            if (!elapsed)
-            {
-                long updated = timeInMillis.get();
-                if (updated - current >= sleepInMillis)
-                {
-                    elapsed = true;
-                }
-                else
-                {
-                    sleepInMillis -= updated - current;
-                    current = updated;
-                }
-            }
-        }
-        while (!elapsed);
-        return this;
-    }
-
-    @Override
-    public TimeSource sleepUninterruptibly(long sleepFor, TimeUnit unit)
-    {
-        return sleep(sleepFor, unit);
-    }
-}
diff --git a/test/unit/org/apache/cassandra/utils/UUIDTest.java b/test/unit/org/apache/cassandra/utils/UUIDTest.java
index ec0839a..62946b6 100644
--- a/test/unit/org/apache/cassandra/utils/UUIDTest.java
+++ b/test/unit/org/apache/cassandra/utils/UUIDTest.java
@@ -37,6 +37,10 @@
 import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.cliffc.high_scale_lib.NonBlockingHashMap;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
+import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
+
 
 public class UUIDTest
 {
@@ -44,22 +48,22 @@
     public void verifyType1()
     {
 
-        UUID uuid = UUIDGen.getTimeUUID();
+        UUID uuid = nextTimeUUID().asUUID();
         assert uuid.version() == 1;
     }
 
     @Test
     public void verifyOrdering1()
     {
-        UUID one = UUIDGen.getTimeUUID();
-        UUID two = UUIDGen.getTimeUUID();
+        UUID one = nextTimeUUID().asUUID();
+        UUID two = nextTimeUUID().asUUID();
         assert one.timestamp() < two.timestamp();
     }
 
     @Test
     public void testDecomposeAndRaw()
     {
-        UUID a = UUIDGen.getTimeUUID();
+        UUID a = nextTimeUUID().asUUID();
         byte[] decomposed = UUIDGen.decompose(a);
         UUID b = UUIDGen.getUUID(ByteBuffer.wrap(decomposed));
         assert a.equals(b);
@@ -68,7 +72,7 @@
     @Test
     public void testToFromByteBuffer()
     {
-        UUID a = UUIDGen.getTimeUUID();
+        UUID a = nextTimeUUID().asUUID();
         ByteBuffer bb = UUIDGen.toByteBuffer(a);
         UUID b = UUIDGen.getUUID(bb);
         assert a.equals(b);
@@ -78,8 +82,8 @@
     public void testTimeUUIDType()
     {
         TimeUUIDType comp = TimeUUIDType.instance;
-        ByteBuffer first = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes());
-        ByteBuffer second = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes());
+        ByteBuffer first = ByteBuffer.wrap(nextTimeUUIDAsBytes());
+        ByteBuffer second = ByteBuffer.wrap(nextTimeUUIDAsBytes());
         assert comp.compare(first, second) < 0;
         assert comp.compare(second, first) > 0;
         ByteBuffer sameAsFirst = ByteBuffer.wrap(UUIDGen.decompose(UUIDGen.getUUID(first)));
@@ -90,8 +94,8 @@
     public void testUUIDTimestamp()
     {
         long now = System.currentTimeMillis();
-        UUID uuid = UUIDGen.getTimeUUID();
-        long tstamp = UUIDGen.getAdjustedTimestamp(uuid);
+        TimeUUID uuid = nextTimeUUID();
+        long tstamp = uuid.unix(MILLISECONDS);
 
         // I'll be damn is the uuid timestamp is more than 10ms after now
         assert now <= tstamp && now >= tstamp - 10 : "now = " + now + ", timestamp = " + tstamp;
@@ -118,7 +122,7 @@
 
                 for (long i = 0; i < iterations; i++)
                 {
-                    UUID uuid = UUIDGen.getTimeUUID();
+                    UUID uuid = nextTimeUUID().asUUID();
                     newTimestamp = uuid.timestamp();
 
                     if (lastTimestamp >= newTimestamp)
diff --git a/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java b/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java
index 311b924..5227e51 100644
--- a/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java
+++ b/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
 import java.nio.file.Path;
 import java.nio.file.Files;
 import java.util.ArrayList;
@@ -29,8 +28,11 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Supplier;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.io.util.File;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 import net.openhft.chronicle.queue.ChronicleQueue;
@@ -40,6 +42,7 @@
 import net.openhft.chronicle.wire.WireOut;
 import org.apache.cassandra.Util;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
@@ -50,7 +53,7 @@
 {
     public static Path tempDir() throws Exception
     {
-        return Files.createTempDirectory("binlogtest" + System.nanoTime());
+        return Files.createTempDirectory("binlogtest" + nanoTime());
     }
 
     private static final String testString = "ry@nlikestheyankees";
@@ -59,6 +62,13 @@
     private BinLog binLog;
     private Path path;
 
+    @BeforeClass
+    public static void setup()
+    {
+        // PathUtils touches StorageService which touches StreamManager which requires configs be setup
+        DatabaseDescriptor.daemonInitialization();
+    }
+
     @Before
     public void setUp() throws Exception
     {
@@ -78,9 +88,9 @@
         {
             binLog.stop();
         }
-        for (File f : path.toFile().listFiles())
+        for (File f : new File(path).tryList())
         {
-            f.delete();
+            f.tryDelete();
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java b/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java
index cd6b7a3..940d121 100644
--- a/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java
+++ b/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -26,6 +25,7 @@
 import java.util.List;
 import java.util.Random;
 
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
@@ -40,7 +40,7 @@
         DeletingArchiver da = new DeletingArchiver(45);
         List<File> files = generateFiles(10, 5);
         for (File f : files)
-            da.onReleased(1, f);
+            da.onReleased(1, f.toJavaIOFile());
         // adding 5 files, each with size 10, this means the first one should have been deleted:
         assertFalse(files.get(0).exists());
         for (int i = 1; i < files.size(); i++)
@@ -53,7 +53,7 @@
     {
         DeletingArchiver da = new DeletingArchiver(45);
         List<File> largeFiles = generateFiles(50, 1);
-        da.onReleased(1, largeFiles.get(0));
+        da.onReleased(1, largeFiles.get(0).toJavaIOFile());
         assertFalse(largeFiles.get(0).exists());
         assertEquals(0, da.getBytesInStoreFiles());
     }
@@ -67,11 +67,11 @@
 
         for (File f : smallFiles)
         {
-            da.onReleased(1, f);
+            da.onReleased(1, f.toJavaIOFile());
         }
         assertEquals(40, da.getBytesInStoreFiles());
         // we now have 40 bytes in deleting archiver, adding the large 40 byte file should delete all the small ones
-        da.onReleased(1, largeFiles.get(0));
+        da.onReleased(1, largeFiles.get(0).toJavaIOFile());
         for (File f : smallFiles)
             assertFalse(f.exists());
 
@@ -79,7 +79,7 @@
 
         // make sure that size tracking is ok - all 4 new small files should still be there and the large one should be gone
         for (File f : smallFiles)
-            da.onReleased(1, f);
+            da.onReleased(1, f.toJavaIOFile());
 
         assertFalse(largeFiles.get(0).exists());
         for (File f : smallFiles)
@@ -99,7 +99,7 @@
         {
             Path p = Files.createTempFile("logfile", ".cq4");
             Files.write(p, content);
-            files.add(p.toFile());
+            files.add(new File(p));
         }
         files.forEach(File::deleteOnExit);
         return files;
diff --git a/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java b/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java
index 284ff5a..8e3c2db 100644
--- a/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java
+++ b/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils.binlog;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -30,6 +29,7 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.Sets;
+import org.apache.cassandra.io.util.File;
 import org.junit.Test;
 
 import net.openhft.chronicle.queue.impl.single.SingleChronicleQueue;
@@ -48,21 +48,21 @@
         String script = s.left;
         String dir = s.right;
         Path logdirectory = Files.createTempDirectory("logdirectory");
-        File logfileToArchive = Files.createTempFile(logdirectory, "logfile", "xyz").toFile();
+        File logfileToArchive = new File(Files.createTempFile(logdirectory, "logfile", "xyz"));
         Files.write(logfileToArchive.toPath(), "content".getBytes());
 
         ExternalArchiver ea = new ExternalArchiver(script+" %path", null, 10);
-        ea.onReleased(1, logfileToArchive);
+        ea.onReleased(1, logfileToArchive.toJavaIOFile());
         while (logfileToArchive.exists())
         {
             Thread.sleep(100);
         }
 
-        File movedFile = new File(dir, logfileToArchive.getName());
+        File movedFile = new File(dir, logfileToArchive.name());
         assertTrue(movedFile.exists());
         movedFile.deleteOnExit();
         ea.stop();
-        assertEquals(0, logdirectory.toFile().listFiles().length);
+        assertEquals(0, new File(logdirectory).tryList().length);
     }
 
     @Test
@@ -75,7 +75,7 @@
         Path dir = Files.createTempDirectory("archive");
         for (int i = 0; i < 10; i++)
         {
-            File logfileToArchive = Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX).toFile();
+            File logfileToArchive = new File(Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX));
             logfileToArchive.deleteOnExit();
             Files.write(logfileToArchive.toPath(), ("content"+i).getBytes());
             existingFiles.add(logfileToArchive);
@@ -94,13 +94,13 @@
                     Thread.sleep(100);
                     break;
                 }
-                File movedFile = new File(moveDir, f.getName());
+                File movedFile = new File(moveDir, f.name());
                 assertTrue(movedFile.exists());
                 movedFile.deleteOnExit();
             }
         }
         ea.stop();
-        assertEquals(0, dir.toFile().listFiles().length);
+        assertEquals(0, new File(dir).tryList().length);
     }
 
     @Test
@@ -114,7 +114,7 @@
         List<File> existingFiles = new ArrayList<>();
         for (int i = 0; i < 10; i++)
         {
-            File logfileToArchive = Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX).toFile();
+            File logfileToArchive = new File(Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX));
             logfileToArchive.deleteOnExit();
             Files.write(logfileToArchive.toPath(), ("content"+i).getBytes());
             existingFiles.add(logfileToArchive);
@@ -124,7 +124,7 @@
         for (File f : existingFiles)
         {
             assertFalse(f.exists());
-            File movedFile = new File(moveDir, f.getName());
+            File movedFile = new File(moveDir, f.name());
             assertTrue(movedFile.exists());
             movedFile.deleteOnExit();
         }
@@ -144,7 +144,7 @@
         String script = s.left;
         String moveDir = s.right;
         Path logdirectory = Files.createTempDirectory("logdirectory");
-        File logfileToArchive = Files.createTempFile(logdirectory, "logfile", "xyz").toFile();
+        File logfileToArchive = new File(Files.createTempFile(logdirectory, "logfile", "xyz"));
         Files.write(logfileToArchive.toPath(), "content".getBytes());
         AtomicInteger tryCounter = new AtomicInteger();
         AtomicBoolean success = new AtomicBoolean();
@@ -154,7 +154,7 @@
             ExternalArchiver.exec(cmd);
             success.set(true);
         });
-        ea.onReleased(0, logfileToArchive);
+        ea.onReleased(0, logfileToArchive.toJavaIOFile());
         while (tryCounter.get() < 2) // while we have only executed this 0 or 1 times, the file should still be on disk
         {
             Thread.sleep(100);
@@ -167,7 +167,7 @@
         // there will be 3 attempts in total, 2 failing ones, then the successful one:
         assertEquals(3, tryCounter.get());
         assertFalse(logfileToArchive.exists());
-        File movedFile = new File(moveDir, logfileToArchive.getName());
+        File movedFile = new File(moveDir, logfileToArchive.name());
         assertTrue(movedFile.exists());
         ea.stop();
     }
@@ -188,7 +188,7 @@
         String script = s.left;
         String moveDir = s.right;
         Path logdirectory = Files.createTempDirectory("logdirectory");
-        File logfileToArchive = Files.createTempFile(logdirectory, "logfile", "xyz").toFile();
+        File logfileToArchive = new File(Files.createTempFile(logdirectory, "logfile", "xyz"));
         Files.write(logfileToArchive.toPath(), "content".getBytes());
 
         AtomicInteger tryCounter = new AtomicInteger();
@@ -206,7 +206,7 @@
                 throw t;
             }
         });
-        ea.onReleased(0, logfileToArchive);
+        ea.onReleased(0, logfileToArchive.toJavaIOFile());
         while (tryCounter.get() < 3)
             Thread.sleep(500);
         assertTrue(logfileToArchive.exists());
@@ -214,9 +214,9 @@
         Thread.sleep(5000);
         assertTrue(logfileToArchive.exists());
         assertFalse(success.get());
-        File [] fs = new File(moveDir).listFiles(f ->
+        File [] fs = new File(moveDir).tryList(f ->
                                                  {
-                                                     if (f.getName().startsWith("file."))
+                                                     if (f.name().startsWith("file."))
                                                      {
                                                          f.deleteOnExit();
                                                          return true;
@@ -230,24 +230,24 @@
 
     private Pair<String, String> createScript() throws IOException
     {
-        File f = Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE,
+        File f = new File(Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE,
                                                                                                          PosixFilePermission.OWNER_READ,
-                                                                                                         PosixFilePermission.OWNER_EXECUTE))).toFile();
+                                                                                                         PosixFilePermission.OWNER_EXECUTE))));
         f.deleteOnExit();
-        File dir = Files.createTempDirectory("archive").toFile();
+        File dir = new File(Files.createTempDirectory("archive"));
         dir.deleteOnExit();
-        String script = "#!/bin/sh\nmv $1 "+dir.getAbsolutePath();
+        String script = "#!/bin/sh\nmv $1 "+dir.absolutePath();
         Files.write(f.toPath(), script.getBytes());
-        return Pair.create(f.getAbsolutePath(), dir.getAbsolutePath());
+        return Pair.create(f.absolutePath(), dir.absolutePath());
     }
 
     private Pair<String, String> createFailingScript(int failures) throws IOException
     {
-        File f = Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE,
+        File f = new File(Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE,
                                                                                                          PosixFilePermission.OWNER_READ,
-                                                                                                         PosixFilePermission.OWNER_EXECUTE))).toFile();
+                                                                                                         PosixFilePermission.OWNER_EXECUTE))));
         f.deleteOnExit();
-        File dir = Files.createTempDirectory("archive").toFile();
+        File dir = new File(Files.createTempDirectory("archive"));
         dir.deleteOnExit();
         // this script counts files in dir.getAbsolutePath, then if there are more than failures files in there, it moves the actual file
         String script = "#!/bin/bash%n" +
@@ -262,7 +262,7 @@
                         "    mv $1 $DIR%n"+
                         "fi%n";
 
-        Files.write(f.toPath(), String.format(script, dir.getAbsolutePath(), failures).getBytes());
-        return Pair.create(f.getAbsolutePath(), dir.getAbsolutePath());
+        Files.write(f.toPath(), String.format(script, dir.absolutePath(), failures).getBytes());
+        return Pair.create(f.absolutePath(), dir.absolutePath());
     }
 }
diff --git a/test/unit/org/apache/cassandra/utils/btree/BTreeTest.java b/test/unit/org/apache/cassandra/utils/btree/BTreeTest.java
index 739807e..73f0ff7 100644
--- a/test/unit/org/apache/cassandra/utils/btree/BTreeTest.java
+++ b/test/unit/org/apache/cassandra/utils/btree/BTreeTest.java
@@ -26,7 +26,6 @@
 import org.junit.Assert;
 
 import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
 
 public class BTreeTest
 {
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestAsyncPromise.java b/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestAsyncPromise.java
new file mode 100644
index 0000000..8420ba5
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestAsyncPromise.java
@@ -0,0 +1,503 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import javax.annotation.Nullable;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+
+import io.netty.util.concurrent.GenericFutureListener;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public abstract class AbstractTestAsyncPromise extends AbstractTestPromise
+{
+    static
+    {
+        DatabaseDescriptor.clientInitialization();
+    }
+
+    public static <V> Promise<V> cancelSuccess(Promise<V> promise)
+    {
+        success(promise, Promise::isCancellable, true);
+        return cancelShared(promise);
+    }
+
+    public static <V> Promise<V> cancelExclusiveSuccess(Promise<V> promise)
+    {
+        success(promise, Promise::isCancellable, true);
+        success(promise, Promise::setUncancellableExclusive, true);
+        return cancelShared(promise);
+    }
+
+    private static <V> Promise<V> cancelShared(Promise<V> promise)
+    {
+        success(promise, Promise::setUncancellable, true);
+        success(promise, Promise::setUncancellable, true);
+        success(promise, Promise::setUncancellableExclusive, false);
+        success(promise, p -> p.cancel(true), false);
+        success(promise, p -> p.cancel(false), false);
+        success(promise, Promise::isCancellable, false);
+        success(promise, Promise::isCancelled, false);
+        return promise;
+    }
+
+    public static <V> Promise<V> cancelFailure(Promise<V> promise)
+    {
+        success(promise, Promise::isCancellable, false);
+        return cancelShared(promise);
+    }
+
+    protected <V> void testOneSuccess(Promise<V> promise, boolean tryOrSet, V value, V otherValue)
+    {
+        List<V> results = new ArrayList<>();
+        List<Integer> order = new ArrayList<>();
+        class ListenerFactory
+        {
+            int count = 0;
+
+            public GenericFutureListener<Future<V>> get()
+            {
+                int id = count++;
+                return p -> { results.add(p.getNow()); order.add(id); };
+            }
+            public GenericFutureListener<Future<V>> getListenerToFailure(Promise<V> promise)
+            {
+                int id = count++;
+                return p -> { Assert.assertTrue(p.cause() instanceof RuntimeException); results.add(promise.getNow()); order.add(id); };
+            }
+            public GenericFutureListener<Future<V>> getRecursive()
+            {
+                int id = count++;
+                return p -> { promise.addListener(get()); results.add(p.getNow()); order.add(id); };
+            }
+            public Runnable getRunnable(Future<V> p)
+            {
+                int id = count++;
+                return () -> { results.add(p.getNow()); order.add(id); };
+            }
+            public Runnable getRecursiveRunnable(Future<V> p)
+            {
+                int id = count++;
+                return () -> { promise.addListener(getRunnable(p)); results.add(p.getNow()); order.add(id); };
+            }
+            public Consumer<V> getConsumer()
+            {
+                int id = count++;
+                return result -> { results.add(result); order.add(id); };
+            }
+            public Consumer<V> getRecursiveConsumer()
+            {
+                int id = count++;
+                return result -> { promise.addCallback(getConsumer(), fail -> Assert.fail()); results.add(result); order.add(id); };
+            }
+            public Function<V, Future<V>> getAsyncFunction()
+            {
+                int id = count++;
+                return result -> { results.add(result); order.add(id); return ImmediateFuture.success(result); };
+            }
+            public Function<V, V> getFunction()
+            {
+                int id = count++;
+                return result -> { results.add(result); order.add(id); return result; };
+            }
+            public Function<V, Future<V>> getRecursiveAsyncFunction(Promise<V> promise)
+            {
+                int id = count++;
+                return result -> { promise.flatMap(getAsyncFunction()); results.add(result); order.add(id); return ImmediateFuture.success(result); };
+            }
+            public Function<V, Future<V>> getAsyncFailingFunction()
+            {
+                int id = count++;
+                return result -> { results.add(result); order.add(id); return ImmediateFuture.failure(new RuntimeException()); };
+            }
+            public Function<V, V> getFailingFunction()
+            {
+                int id = count++;
+                return result -> { results.add(result); order.add(id); throw new RuntimeException(); };
+            }
+            public Function<V, Future<V>> getRecursiveAsyncFailingFunction(Promise<V> promise)
+            {
+                int id = count++;
+                return result -> { promise.flatMap(getAsyncFailingFunction()); results.add(result); order.add(id); return ImmediateFuture.failure(new RuntimeException()); };
+            }
+            public FutureCallback<V> getCallback(Future<V> p)
+            {
+                int id = count++;
+                return new FutureCallback<V>()
+                {
+                    @Override
+                    public void onSuccess(@Nullable Object o)
+                    {
+                        results.add(p.getNow());
+                        order.add(id);
+                    }
+
+                    @Override
+                    public void onFailure(Throwable throwable)
+                    {
+                        Assert.fail();
+                    }
+                };
+            }
+            public FutureCallback<V> getRecursiveCallback(Future<V> p)
+            {
+                int id = count++;
+                return new FutureCallback<V>()
+                {
+                    @Override
+                    public void onSuccess(@Nullable Object o)
+                    {
+                        promise.addCallback(getCallback(p));
+                        results.add(p.getNow());
+                        order.add(id);
+                    }
+
+                    @Override
+                    public void onFailure(Throwable throwable)
+                    {
+                        Assert.fail();
+                    }
+                };
+            }
+        }
+        ListenerFactory listeners = new ListenerFactory();
+        Async async = new Async();
+        promise.addListener(listeners.get());
+        promise.addListener(listeners.getRunnable(promise));
+        promise.addListener(listeners.getRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListeners(listeners.getRecursive(), listeners.get());
+        promise.addListener(listeners.getRecursiveRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListener(listeners.getRecursive());
+        promise.addCallback(listeners.getCallback(promise));
+        promise.addCallback(listeners.getCallback(promise), MoreExecutors.directExecutor());
+        promise.addCallback(listeners.getRecursiveCallback(promise));
+        promise.addCallback(listeners.getRecursiveCallback(promise), MoreExecutors.directExecutor());
+        promise.addCallback(listeners.getConsumer(), fail -> Assert.fail());
+        promise.addCallback(listeners.getConsumer(), fail -> Assert.fail(), MoreExecutors.directExecutor());
+        promise.addCallback(listeners.getRecursiveConsumer(), fail -> Assert.fail());
+        promise.addCallback(listeners.getRecursiveConsumer(), fail -> Assert.fail(), MoreExecutors.directExecutor());
+        promise.map(listeners.getFunction()).addListener(listeners.get());
+        promise.map(listeners.getFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.map(listeners.getFailingFunction()).addListener(listeners.getListenerToFailure(promise));
+        promise.map(listeners.getFailingFunction(), MoreExecutors.directExecutor()).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getAsyncFunction()).addListener(listeners.get());
+        promise.flatMap(listeners.getAsyncFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.flatMap(listeners.getRecursiveAsyncFunction(promise)).addListener(listeners.get());
+        promise.flatMap(listeners.getRecursiveAsyncFunction(promise), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.flatMap(listeners.getAsyncFailingFunction()).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getAsyncFailingFunction(), MoreExecutors.directExecutor()).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getRecursiveAsyncFailingFunction(promise)).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getRecursiveAsyncFailingFunction(promise), MoreExecutors.directExecutor()).addListener(listeners.getListenerToFailure(promise));
+
+        success(promise, Promise::getNow, null);
+        success(promise, Promise::isSuccess, false);
+        success(promise, Promise::isDone, false);
+        success(promise, Promise::isCancelled, false);
+        async.success(promise, Promise::get, value);
+        async.success(promise, p -> p.get(1L, SECONDS), value);
+        async.success(promise, Promise::await, promise);
+        async.success(promise, Promise::awaitUninterruptibly, promise);
+        async.success(promise, p -> p.await(1L, SECONDS), true);
+        async.success(promise, p -> p.await(1000L), true);
+        async.success(promise, p -> p.awaitUninterruptibly(1L, SECONDS), true);
+        async.success(promise, p -> p.awaitUninterruptibly(1000L), true);
+        async.success(promise, Promise::sync, promise);
+        async.success(promise, Promise::syncUninterruptibly, promise);
+        if (tryOrSet) promise.trySuccess(value);
+        else promise.setSuccess(value);
+        success(promise, p -> p.cancel(true), false);
+        success(promise, p -> p.cancel(false), false);
+        failure(promise, p -> p.setSuccess(null), IllegalStateException.class);
+        failure(promise, p -> p.setFailure(new NullPointerException()), IllegalStateException.class);
+        success(promise, Promise::getNow, value);
+        success(promise, p -> p.trySuccess(otherValue), false);
+        success(promise, p -> p.tryFailure(new NullPointerException()), false);
+        success(promise, Promise::getNow, value);
+        success(promise, Promise::cause, null);
+        promise.addListener(listeners.get());
+        promise.addListener(listeners.getRunnable(promise));
+        promise.addListener(listeners.getRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListeners(listeners.getRecursive(), listeners.get());
+        promise.addListener(listeners.getRecursiveRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListener(listeners.getRecursive());
+        promise.addCallback(listeners.getCallback(promise));
+        promise.addCallback(listeners.getCallback(promise), MoreExecutors.directExecutor());
+        promise.addCallback(listeners.getRecursiveCallback(promise));
+        promise.addCallback(listeners.getRecursiveCallback(promise), MoreExecutors.directExecutor());
+        promise.addCallback(listeners.getConsumer(), fail -> Assert.fail());
+        promise.addCallback(listeners.getConsumer(), fail -> Assert.fail(), MoreExecutors.directExecutor());
+        promise.addCallback(listeners.getRecursiveConsumer(), fail -> Assert.fail());
+        promise.addCallback(listeners.getRecursiveConsumer(), fail -> Assert.fail(), MoreExecutors.directExecutor());
+        promise.map(listeners.getFunction()).addListener(listeners.get());
+        promise.map(listeners.getFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.map(listeners.getFailingFunction()).addListener(listeners.getListenerToFailure(promise));
+        promise.map(listeners.getFailingFunction(), MoreExecutors.directExecutor()).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getAsyncFunction()).addListener(listeners.get());
+        promise.flatMap(listeners.getAsyncFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.flatMap(listeners.getRecursiveAsyncFunction(promise)).addListener(listeners.get());
+        promise.flatMap(listeners.getRecursiveAsyncFunction(promise), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.flatMap(listeners.getAsyncFailingFunction()).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getAsyncFailingFunction(), MoreExecutors.directExecutor()).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getRecursiveAsyncFailingFunction(promise)).addListener(listeners.getListenerToFailure(promise));
+        promise.flatMap(listeners.getRecursiveAsyncFailingFunction(promise), MoreExecutors.directExecutor()).addListener(listeners.getListenerToFailure(promise));
+        success(promise, Promise::isSuccess, true);
+        success(promise, Promise::isDone, true);
+        success(promise, Promise::isCancelled, false);
+        async.verify();
+        Assert.assertEquals(listeners.count, results.size());
+        Assert.assertEquals(listeners.count, order.size());
+        for (V result : results)
+            Assert.assertEquals(value, result);
+        for (int i = 0 ; i < order.size() ; ++i)
+            Assert.assertEquals(i, order.get(i).intValue());
+    }
+
+    protected <V> void testOneFailure(Promise<V> promise, boolean tryOrSet, Throwable cause, V otherValue)
+    {
+        List<Throwable> results = new ArrayList<>();
+        List<Integer> order = new ArrayList<>();
+        Async async = new Async();
+        class ListenerFactory
+        {
+            int count = 0;
+
+            public GenericFutureListener<Future<V>> get()
+            {
+                int id = count++;
+                return p -> { results.add(p.cause()); order.add(id); };
+            }
+            public GenericFutureListener<Future<V>> getRecursive()
+            {
+                int id = count++;
+                return p -> { promise.addListener(get()); results.add(p.cause()); order.add(id); };
+            }
+            public Runnable getRunnable(Future<V> p)
+            {
+                int id = count++;
+                return () -> { results.add(p.cause()); order.add(id); };
+            }
+            public Runnable getRecursiveRunnable(Future<V> p)
+            {
+                int id = count++;
+                return () -> { promise.addListener(getRunnable(p)); results.add(p.cause()); order.add(id); };
+            }
+            public Consumer<Throwable> getConsumer()
+            {
+                int id = count++;
+                return result -> { results.add(result); order.add(id); };
+            }
+            public Consumer<Throwable> getRecursiveConsumer()
+            {
+                int id = count++;
+                return result -> { promise.addCallback(fail -> Assert.fail(), getConsumer()); results.add(result); order.add(id); };
+            }
+            public Function<V, Future<V>> getAsyncFunction()
+            {
+                return result -> { Assert.fail(); return ImmediateFuture.success(result); };
+            }
+            public Function<V, Future<V>> getRecursiveAsyncFunction()
+            {
+                return result -> { promise.flatMap(getAsyncFunction()); return ImmediateFuture.success(result); };
+            }
+            public FutureCallback<V> getCallback(Future<V> p)
+            {
+                int id = count++;
+                return new FutureCallback<V>()
+                {
+                    @Override
+                    public void onSuccess(@Nullable Object o)
+                    {
+                        Assert.fail();
+                    }
+
+                    @Override
+                    public void onFailure(Throwable throwable)
+                    {
+                        results.add(p.cause());
+                        order.add(id);
+                    }
+                };
+            }
+            public FutureCallback<V> getRecursiveCallback(Future<V> p)
+            {
+                int id = count++;
+                return new FutureCallback<V>()
+                {
+                    @Override
+                    public void onSuccess(@Nullable Object o)
+                    {
+                        Assert.fail();
+                    }
+
+                    @Override
+                    public void onFailure(Throwable throwable)
+                    {
+                        promise.addCallback(getCallback(p));
+                        results.add(p.cause());
+                        order.add(id);
+                    }
+                };
+            }
+        }
+        ListenerFactory listeners = new ListenerFactory();
+        promise.addListener(listeners.get());
+        promise.addListeners(listeners.getRecursive(), listeners.get());
+        promise.addListener(listeners.getRecursive());
+        promise.addListener(listeners.getRunnable(promise));
+        promise.addListener(listeners.getRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListener(listeners.getRecursiveRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListener(listeners.getRecursive());
+        promise.addCallback(listeners.getCallback(promise));
+        promise.addCallback(listeners.getRecursiveCallback(promise));
+        promise.addCallback(fail -> Assert.fail(), listeners.getConsumer());
+        promise.addCallback(fail -> Assert.fail(), listeners.getRecursiveConsumer());
+        promise.flatMap(listeners.getAsyncFunction()).addListener(listeners.get());
+        promise.flatMap(listeners.getAsyncFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        promise.flatMap(listeners.getRecursiveAsyncFunction()).addListener(listeners.get());
+        promise.flatMap(listeners.getRecursiveAsyncFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        success(promise, Promise::isSuccess, false);
+        success(promise, Promise::isDone, false);
+        success(promise, Promise::isCancelled, false);
+        success(promise, Promise::getNow, null);
+        success(promise, Promise::cause, null);
+        async.failure(promise, p -> p.get(), ExecutionException.class);
+        async.failure(promise, p -> p.get(1L, SECONDS), ExecutionException.class);
+        async.success(promise, Promise::await, promise);
+        async.success(promise, Promise::awaitThrowUncheckedOnInterrupt, promise);
+        async.success(promise, Promise::awaitUninterruptibly, promise);
+        async.success(promise, p -> p.await(1L, SECONDS), true);
+        async.success(promise, p -> p.await(1000L), true);
+        async.success(promise, p -> p.awaitUninterruptibly(1L, SECONDS), true);
+        async.success(promise, p -> p.awaitThrowUncheckedOnInterrupt(1L, SECONDS), true);
+        async.success(promise, p -> p.awaitUninterruptibly(1000L), true);
+        async.success(promise, p -> p.awaitUntil(nanoTime() + SECONDS.toNanos(1L)), true);
+        async.success(promise, p -> p.awaitUntilUninterruptibly(nanoTime() + SECONDS.toNanos(1L)), true);
+        async.success(promise, p -> p.awaitUntilThrowUncheckedOnInterrupt(nanoTime() + SECONDS.toNanos(1L)), true);
+        async.failure(promise, p -> p.sync(), cause);
+        async.failure(promise, p -> p.syncUninterruptibly(), cause);
+        if (tryOrSet) promise.tryFailure(cause);
+        else promise.setFailure(cause);
+        success(promise, p -> p.cancel(true), false);
+        success(promise, p -> p.cancel(false), false);
+        failure(promise, p -> p.setSuccess(null), IllegalStateException.class);
+        failure(promise, p -> p.setFailure(new NullPointerException()), IllegalStateException.class);
+        success(promise, Promise::cause, cause);
+        success(promise, Promise::getNow, null);
+        success(promise, p -> p.trySuccess(otherValue), false);
+        success(promise, p -> p.tryFailure(new NullPointerException()), false);
+        success(promise, Promise::getNow, null);
+        success(promise, Promise::cause, cause);
+        promise.addListener(listeners.get());
+        promise.addListeners(listeners.getRecursive(), listeners.get());
+        promise.addListener(listeners.getRecursive());
+        promise.addListener(listeners.getRunnable(promise));
+        promise.addListener(listeners.getRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListener(listeners.getRecursiveRunnable(promise), MoreExecutors.directExecutor());
+        promise.addListener(listeners.getRecursive());
+        promise.addCallback(listeners.getCallback(promise));
+        promise.addCallback(listeners.getRecursiveCallback(promise));
+        promise.addCallback(fail -> Assert.fail(), listeners.getConsumer());
+        promise.addCallback(fail -> Assert.fail(), listeners.getRecursiveConsumer());
+        promise.flatMap(listeners.getAsyncFunction()).addListener(listeners.get());
+        promise.flatMap(listeners.getAsyncFunction(), MoreExecutors.directExecutor()).addListener(listeners.get());
+        success(promise, Promise::isSuccess, false);
+        success(promise, Promise::isDone, true);
+        success(promise, Promise::isCancelled, false);
+        success(promise, Promise::isCancellable, false);
+        async.verify();
+        Assert.assertEquals(listeners.count, results.size());
+        Assert.assertEquals(listeners.count, order.size());
+        for (Throwable result : results)
+            Assert.assertEquals(cause, result);
+        for (int i = 0 ; i < order.size() ; ++i)
+            Assert.assertEquals(i, order.get(i).intValue());
+    }
+
+    // TODO: test listeners?
+    public <V> void testOneCancellation(Promise<V> promise, boolean interruptIfRunning, V otherValue)
+    {
+        Async async = new Async();
+        success(promise, Promise::isCancellable, true);
+        success(promise, Promise::getNow, null);
+        success(promise, Promise::cause, null);
+        async.failure(promise, p -> p.get(), CancellationException.class);
+        async.failure(promise, p -> p.get(1L, SECONDS), CancellationException.class);
+        async.success(promise, Promise::await, promise);
+        async.success(promise, Promise::awaitThrowUncheckedOnInterrupt, promise);
+        async.success(promise, Promise::awaitUninterruptibly, promise);
+        async.success(promise, p -> p.await(1L, SECONDS), true);
+        async.success(promise, p -> p.await(1000L), true);
+        async.success(promise, p -> p.awaitUninterruptibly(1L, SECONDS), true);
+        async.success(promise, p -> p.awaitThrowUncheckedOnInterrupt(1L, SECONDS), true);
+        async.success(promise, p -> p.awaitUninterruptibly(1000L), true);
+        async.success(promise, p -> p.awaitUntil(nanoTime() + SECONDS.toNanos(1L)), true);
+        async.success(promise, p -> p.awaitUntilUninterruptibly(nanoTime() + SECONDS.toNanos(1L)), true);
+        async.success(promise, p -> p.awaitUntilThrowUncheckedOnInterrupt(nanoTime() + SECONDS.toNanos(1L)), true);
+        async.failure(promise, p -> p.sync(), CancellationException.class);
+        async.failure(promise, p -> p.syncUninterruptibly(), CancellationException.class);
+        promise.cancel(interruptIfRunning);
+        failure(promise, p -> p.setFailure(null), IllegalStateException.class);
+        failure(promise, p -> p.setFailure(null), IllegalStateException.class);
+        Assert.assertTrue(promise.cause() instanceof CancellationException);
+        success(promise, Promise::getNow, null);
+        success(promise, p -> p.trySuccess(otherValue), false);
+        success(promise, Promise::getNow, null);
+        Assert.assertTrue(promise.cause() instanceof CancellationException);
+        success(promise, Promise::isSuccess, false);
+        success(promise, Promise::isDone, true);
+        success(promise, Promise::isCancelled, true);
+        success(promise, Promise::isCancellable, false);
+        async.verify();
+    }
+
+
+    public <V> void testOneTimeout(Promise<V> promise)
+    {
+        Async async = new Async();
+        async.failure(promise, p -> p.get(1L, MILLISECONDS), TimeoutException.class);
+        async.success(promise, p -> p.await(1L, MILLISECONDS), false);
+        async.success(promise, p -> p.awaitThrowUncheckedOnInterrupt(1L, MILLISECONDS), false);
+        async.success(promise, p -> p.await(1L), false);
+        async.success(promise, p -> p.awaitUninterruptibly(1L, MILLISECONDS), false);
+        async.success(promise, p -> p.awaitThrowUncheckedOnInterrupt(1L, MILLISECONDS), false);
+        async.success(promise, p -> p.awaitUninterruptibly(1L), false);
+        async.success(promise, p -> p.awaitUntil(nanoTime() + MILLISECONDS.toNanos(1L)), false);
+        async.success(promise, p -> p.awaitUntilUninterruptibly(nanoTime() + MILLISECONDS.toNanos(1L)), false);
+        async.success(promise, p -> p.awaitUntilThrowUncheckedOnInterrupt(nanoTime() + MILLISECONDS.toNanos(1L)), false);
+        Uninterruptibles.sleepUninterruptibly(10L, MILLISECONDS);
+        async.verify();
+    }
+
+}
+
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestAwaitable.java b/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestAwaitable.java
new file mode 100644
index 0000000..8b95eaf
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestAwaitable.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+import java.util.function.Consumer;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+
+import net.openhft.chronicle.core.util.ThrowingBiConsumer;
+import net.openhft.chronicle.core.util.ThrowingConsumer;
+import net.openhft.chronicle.core.util.ThrowingFunction;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public abstract class AbstractTestAwaitable<A extends Awaitable>
+{
+    protected final ExecutorService exec = Executors.newCachedThreadPool();
+
+    protected void testOneSuccess(A awaitable, Consumer<A> signal)
+    {
+        Async async = new Async();
+        //noinspection Convert2MethodRef
+        async.success(awaitable, a -> a.await(), awaitable);
+        async.success(awaitable, a -> a.awaitUninterruptibly(), awaitable);
+        async.success(awaitable, a -> a.awaitThrowUncheckedOnInterrupt(), awaitable);
+        async.success(awaitable, a -> a.await(1L, SECONDS), true);
+        async.success(awaitable, a -> a.awaitUninterruptibly(1L, SECONDS), true);
+        async.success(awaitable, a -> a.awaitThrowUncheckedOnInterrupt(1L, SECONDS), true);
+        async.success(awaitable, a -> a.awaitUntil(Long.MAX_VALUE), true);
+        async.success(awaitable, a -> a.awaitUntilUninterruptibly(Long.MAX_VALUE), true);
+        async.success(awaitable, a -> a.awaitUntilThrowUncheckedOnInterrupt(Long.MAX_VALUE), true);
+        signal.accept(awaitable);
+        async.verify();
+    }
+
+    public void testOneTimeout(A awaitable)
+    {
+        Async async = new Async();
+        async.success(awaitable, a -> a.await(1L, MILLISECONDS), false);
+        async.success(awaitable, a -> a.awaitUninterruptibly(1L, MILLISECONDS), false);
+        async.success(awaitable, a -> a.awaitThrowUncheckedOnInterrupt(1L, MILLISECONDS), false);
+        async.success(awaitable, a -> a.awaitUntil(nanoTime() + MILLISECONDS.toNanos(1L)), false);
+        async.success(awaitable, a -> a.awaitUntilUninterruptibly(nanoTime() + MILLISECONDS.toNanos(1L)), false);
+        async.success(awaitable, a -> a.awaitUntilThrowUncheckedOnInterrupt(nanoTime() + MILLISECONDS.toNanos(1L)), false);
+        Uninterruptibles.sleepUninterruptibly(10L, MILLISECONDS);
+        async.verify();
+    }
+
+    public void testOneInterrupt(A awaitable)
+    {
+        Async async = new Async();
+        async.failure(awaitable, a -> { Thread.currentThread().interrupt(); a.await(); }, InterruptedException.class);
+        async.failure(awaitable, a -> { Thread.currentThread().interrupt(); a.await(1L, SECONDS); }, InterruptedException.class);
+        async.success(awaitable, a -> { Thread.currentThread().interrupt(); return a.awaitUninterruptibly(1L, SECONDS); }, false);
+        async.failure(awaitable, a -> { Thread.currentThread().interrupt(); a.awaitThrowUncheckedOnInterrupt(1L, SECONDS); }, UncheckedInterruptedException.class);
+        async.failure(awaitable, a -> { Thread.currentThread().interrupt(); a.awaitUntil(nanoTime() + SECONDS.toNanos(1L)); }, InterruptedException.class);
+        async.success(awaitable, a -> { Thread.currentThread().interrupt(); return a.awaitUntilUninterruptibly(nanoTime() + SECONDS.toNanos(1L)); }, false);
+        async.failure(awaitable, a -> { Thread.currentThread().interrupt(); a.awaitUntilThrowUncheckedOnInterrupt(nanoTime() + SECONDS.toNanos(1L)); }, UncheckedInterruptedException.class);
+        Uninterruptibles.sleepUninterruptibly(2L, SECONDS);
+        async.verify();
+    }
+
+    class Async
+    {
+        final List<ThrowingBiConsumer<Long, TimeUnit, ?>> waitingOn = new ArrayList<>();
+        void verify()
+        {
+            for (int i = 0 ; i < waitingOn.size() ; ++i)
+            {
+                try
+                {
+                    waitingOn.get(i).accept(100L, MILLISECONDS);
+                }
+                catch (Throwable t)
+                {
+                    throw new AssertionError("" + i, t);
+                }
+            }
+        }
+        void failure(A awaitable, ThrowingConsumer<A, ?> action, Throwable failsWith)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestAwaitable.failure(awaitable, action, failsWith))::get);
+        }
+        void failure(A awaitable, ThrowingConsumer<A, ?> action, Class<? extends Throwable> failsWith)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestAwaitable.failure(awaitable, action, failsWith))::get);
+        }
+        void failure(A awaitable, ThrowingConsumer<A, ?> action, Predicate<Throwable> failsWith)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestAwaitable.failure(awaitable, action, failsWith))::get);
+        }
+        <P extends A, R> void success(P awaitable, ThrowingFunction<P, R, ?> action, R result)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestAwaitable.success(awaitable, action, result))::get);
+        }
+        <P extends A, R> void success(P awaitable, ThrowingFunction<P, R, ?> action, Predicate<R> result)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestAwaitable.success(awaitable, action, result))::get);
+        }
+    }
+
+    private static <A extends Awaitable> void failure(A awaitable, ThrowingConsumer<A, ?> action, Throwable failsWith)
+    {
+        failure(awaitable, action, t -> Objects.equals(failsWith, t));
+    }
+
+    static <A extends Awaitable> void failure(A awaitable, ThrowingConsumer<A, ?> action, Class<? extends Throwable> failsWith)
+    {
+        failure(awaitable, action, failsWith::isInstance);
+    }
+
+    private static <A extends Awaitable> void failure(A awaitable, ThrowingConsumer<A, ?> action, Predicate<Throwable> failsWith)
+    {
+        Throwable fail = null;
+        try
+        {
+            action.accept(awaitable);
+        }
+        catch (Throwable t)
+        {
+            fail = t;
+        }
+        if (!failsWith.test(fail))
+            throw new AssertionError(fail);
+    }
+
+    static <A extends Awaitable, R> void success(A awaitable, ThrowingFunction<A, R, ?> action, R result)
+    {
+        try
+        {
+            Assert.assertEquals(result, action.apply(awaitable));
+        }
+        catch (Throwable t)
+        {
+            throw new AssertionError(t);
+        }
+    }
+
+    static <A extends Awaitable, R> void success(A awaitable, ThrowingFunction<A, R, ?> action, Predicate<R> result)
+    {
+        try
+        {
+            Assert.assertTrue(result.test(action.apply(awaitable)));
+        }
+        catch (Throwable t)
+        {
+            throw new AssertionError(t);
+        }
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestPromise.java b/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestPromise.java
new file mode 100644
index 0000000..982d42d
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/AbstractTestPromise.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+
+import org.junit.Assert;
+
+import io.netty.util.concurrent.Promise;
+import net.openhft.chronicle.core.util.ThrowingBiConsumer;
+import net.openhft.chronicle.core.util.ThrowingConsumer;
+import net.openhft.chronicle.core.util.ThrowingFunction;
+
+public abstract class AbstractTestPromise
+{
+    protected final ExecutorService exec = Executors.newCachedThreadPool();
+
+    class Async
+    {
+        final List<ThrowingBiConsumer<Long, TimeUnit, ?>> waitingOn = new ArrayList<>();
+        void verify()
+        {
+            for (int i = 0 ; i < waitingOn.size() ; ++i)
+            {
+                try
+                {
+                    waitingOn.get(i).accept(1L, TimeUnit.SECONDS);
+                }
+                catch (Throwable t)
+                {
+                    throw new AssertionError("" + i, t);
+                }
+            }
+        }
+        <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Throwable failsWith)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestPromise.failure(promise, action, failsWith))::get);
+        }
+        <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Class<? extends Throwable> failsWith)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestPromise.failure(promise, action, failsWith))::get);
+        }
+        <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Predicate<Throwable> failsWith)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestPromise.failure(promise, action, failsWith))::get);
+        }
+        <P extends Promise<?>, R> void success(P promise, ThrowingFunction<P, R, ?> action, R result)
+        {
+            waitingOn.add(exec.submit(() -> AbstractTestPromise.success(promise, action, result))::get);
+        }
+    }
+
+    private static <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Throwable failsWith)
+    {
+        failure(promise, action, t -> Objects.equals(failsWith, t));
+    }
+
+    static <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Class<? extends Throwable> failsWith)
+    {
+        failure(promise, action, failsWith::isInstance);
+    }
+
+    private static <V> void failure(Promise<V> promise, ThrowingConsumer<Promise<V>, ?> action, Predicate<Throwable> failsWith)
+    {
+        Throwable fail = null;
+        try
+        {
+            action.accept(promise);
+        }
+        catch (Throwable t)
+        {
+            fail = t;
+        }
+        if (!failsWith.test(fail))
+            throw new AssertionError(fail);
+    }
+
+    static <P extends Promise<?>, R> void success(P promise, ThrowingFunction<P, R, ?> action, R result)
+    {
+        try
+        {
+            Assert.assertEquals(result, action.apply(promise));
+        }
+        catch (Throwable t)
+        {
+            throw new AssertionError(t);
+        }
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/AsyncPromiseTest.java b/test/unit/org/apache/cassandra/utils/concurrent/AsyncPromiseTest.java
new file mode 100644
index 0000000..cb1da09
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/AsyncPromiseTest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.concurrent;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.MoreExecutors;
+
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.utils.WithResources;
+
+public class AsyncPromiseTest extends AbstractTestAsyncPromise
+{
+    @After
+    public void shutdown()
+    {
+        exec.shutdownNow();
+    }
+
+    private static <V> List<Supplier<Promise<V>>> suppliers(AtomicInteger listeners, boolean includeUncancellable)
+    {
+        List<Supplier<Promise<V>>> cancellable = ImmutableList.of(
+            () -> new AsyncPromise<>(),
+            () -> new AsyncPromise<>(f -> listeners.incrementAndGet()),
+            () -> AsyncPromise.withExecutor(TestInExecutor.INSTANCE));
+        List<Supplier<Promise<V>>> uncancellable = ImmutableList.of(
+            () -> AsyncPromise.uncancellable(),
+            () -> AsyncPromise.uncancellable((GenericFutureListener<? extends Future<? super V>>) f -> listeners.incrementAndGet()),
+            () -> AsyncPromise.uncancellable(MoreExecutors.directExecutor()),
+            () -> AsyncPromise.uncancellable(TestInExecutor.INSTANCE)
+        );
+
+        if (!includeUncancellable)
+            return cancellable;
+
+        ImmutableList.Builder<Supplier<Promise<V>>> builder = ImmutableList.builder();
+        builder.addAll(cancellable)
+               .addAll(cancellable.stream().map(s -> (Supplier<Promise<V>>) () -> cancelSuccess(s.get())).collect(Collectors.toList()))
+               .addAll(cancellable.stream().map(s -> (Supplier<Promise<V>>) () -> cancelExclusiveSuccess(s.get())).collect(Collectors.toList()))
+               .addAll(uncancellable)
+               .addAll(uncancellable.stream().map(s -> (Supplier<Promise<V>>) () -> cancelFailure(s.get())).collect(Collectors.toList()));
+        return builder.build();
+    }
+
+    @Test
+    public void testSuccess()
+    {
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Integer>>> suppliers = suppliers(initialListeners, true);
+        for (boolean tryOrSet : new boolean[]{ false, true })
+            for (Integer v : new Integer[]{ null, 1 })
+                for (Supplier<Promise<Integer>> supplier : suppliers)
+                    testOneSuccess(supplier.get(), tryOrSet, v, 2);
+        Assert.assertEquals(5 * 2 * 2, initialListeners.get());
+    }
+
+    @Test
+    public void testFailure()
+    {
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Integer>>> suppliers = suppliers(initialListeners, true);
+        for (boolean tryOrSet : new boolean[] { false, true })
+            for (Throwable v : new Throwable[] { null, new NullPointerException() })
+                for (Supplier<Promise<Integer>> supplier : suppliers)
+                    testOneFailure(supplier.get(), tryOrSet, v, 2);
+        Assert.assertEquals(5 * 2 * 2, initialListeners.get());
+    }
+
+
+    @Test
+    public void testCancellation()
+    {
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Integer>>> suppliers = suppliers(initialListeners, false);
+        for (boolean interruptIfRunning : new boolean[] { true, false })
+            for (Supplier<Promise<Integer>> supplier : suppliers)
+                testOneCancellation(supplier.get(), interruptIfRunning, 2);
+        Assert.assertEquals(2, initialListeners.get());
+    }
+
+
+    @Test
+    public void testTimeout()
+    {
+        final AtomicInteger initialListeners = new AtomicInteger();
+        List<Supplier<Promise<Integer>>> suppliers = suppliers(initialListeners, true);
+        for (Supplier<Promise<Integer>> supplier : suppliers)
+            testOneTimeout(supplier.get());
+        Assert.assertEquals(0, initialListeners.get());
+    }
+
+    private static final class TestInExecutor implements ExecutorPlus
+    {
+        static final TestInExecutor INSTANCE = new TestInExecutor();
+        @Override
+        public void shutdown()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public List<Runnable> shutdownNow()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean isShutdown()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean isTerminated()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public <T> org.apache.cassandra.utils.concurrent.Future<T> submit(Callable<T> task)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public <T> org.apache.cassandra.utils.concurrent.Future<T> submit(Runnable task, T result)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public org.apache.cassandra.utils.concurrent.Future<?> submit(Runnable task)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void execute(WithResources withResources, Runnable task)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public <T> org.apache.cassandra.utils.concurrent.Future<T> submit(WithResources withResources, Callable<T> task)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public org.apache.cassandra.utils.concurrent.Future<?> submit(WithResources withResources, Runnable task)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public <T> org.apache.cassandra.utils.concurrent.Future<T> submit(WithResources withResources, Runnable task, T result)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean inExecutor()
+        {
+            return true;
+        }
+
+        @Override
+        public void execute(Runnable command)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int getCorePoolSize()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void setCorePoolSize(int newCorePoolSize)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int getMaximumPoolSize()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void setMaximumPoolSize(int newMaximumPoolSize)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int getActiveTaskCount()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public long getCompletedTaskCount()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int getPendingTaskCount()
+        {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+}
+
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/ConditionTest.java b/test/unit/org/apache/cassandra/utils/concurrent/ConditionTest.java
new file mode 100644
index 0000000..290ad70
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/ConditionTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.function.Supplier;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class ConditionTest extends AbstractTestAwaitable<Condition>
+{
+    @Test
+    public void testAsync()
+    {
+        testOne(Condition.Async::new);
+    }
+
+    @Test
+    public void testSync()
+    {
+        testOne(Condition.Sync::new);
+    }
+
+    void testOne(Supplier<Condition> cs)
+    {
+        Condition c = cs.get();
+        testOneTimeout(c);
+        Assert.assertFalse(c.isSignalled());
+
+        testOneInterrupt(c);
+        Assert.assertFalse(c.isSignalled());
+
+        testOneSuccess(c, Condition::signal);
+        Assert.assertTrue(c.isSignalled());
+
+        Condition c2 = cs.get();
+        testOneSuccess(c2, Condition::signalAll);
+        Assert.assertTrue(c2.isSignalled());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/CountDownLatchTest.java b/test/unit/org/apache/cassandra/utils/concurrent/CountDownLatchTest.java
new file mode 100644
index 0000000..3361eab
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/CountDownLatchTest.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.function.Supplier;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class CountDownLatchTest extends AbstractTestAwaitable<CountDownLatch>
+{
+    @Test
+    public void testAsync()
+    {
+        testOne(() -> new CountDownLatch.Async(1));
+        testNone(() -> new CountDownLatch.Async(0));
+    }
+
+    @Test
+    public void testSync()
+    {
+        testOne(() -> new CountDownLatch.Sync(1));
+        testNone(() -> new CountDownLatch.Sync(0));
+    }
+
+    void testOne(Supplier<CountDownLatch> cs)
+    {
+        CountDownLatch c = cs.get();
+        testOneTimeout(c);
+        Assert.assertEquals(1, c.count());
+
+        testOneInterrupt(c);
+        Assert.assertEquals(1, c.count());
+
+        testOneSuccess(c, CountDownLatch::decrement);
+        Assert.assertEquals(0, c.count());
+    }
+
+    void testNone(Supplier<CountDownLatch> cs)
+    {
+        CountDownLatch c = cs.get();
+        Assert.assertEquals(0, c.count());
+        testOneSuccess(c, ignore -> {});
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/ImmediateFutureTest.java b/test/unit/org/apache/cassandra/utils/concurrent/ImmediateFutureTest.java
new file mode 100644
index 0000000..c9c96db
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/ImmediateFutureTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Predicate;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.shared.ThrowingRunnable;
+
+public class ImmediateFutureTest
+{
+
+    private void testSimple(ImmediateFuture<Boolean> p, boolean isCancelled) throws InterruptedException
+    {
+        Assert.assertEquals(p, p.await());
+        Assert.assertEquals(p, p.awaitUninterruptibly());
+        Assert.assertEquals(p, p.awaitThrowUncheckedOnInterrupt());
+        Assert.assertTrue(p.await(1L, TimeUnit.MILLISECONDS));
+        Assert.assertTrue(p.await(1L));
+        Assert.assertTrue(p.awaitUntil(Long.MAX_VALUE));
+        Assert.assertTrue(p.awaitUntilUninterruptibly(Long.MAX_VALUE));
+        Assert.assertTrue(p.awaitUntilThrowUncheckedOnInterrupt(Long.MAX_VALUE));
+        Assert.assertTrue(p.isDone());
+        Assert.assertFalse(p.isCancellable());
+        Assert.assertEquals(isCancelled, p.isCancelled());
+        Assert.assertEquals(!isCancelled, p.setUncancellable());
+        Assert.assertFalse(p.setUncancellableExclusive());
+        Assert.assertFalse(p.cancel(true));
+        Assert.assertFalse(p.cancel(false));
+        Assert.assertFalse(p.trySuccess(false));
+        Assert.assertFalse(p.tryFailure(new InterruptedException()));
+    }
+
+    @Test
+    public void testSucceeded() throws InterruptedException, ExecutionException, TimeoutException
+    {
+        ImmediateFuture<Boolean> p = ImmediateFuture.success(true);
+        Assert.assertTrue(p.getNow());
+        Assert.assertTrue(p.get());
+        Assert.assertTrue(p.get(1L, TimeUnit.MILLISECONDS));
+        Assert.assertEquals(p, p.sync());
+        Assert.assertEquals(p, p.syncUninterruptibly());
+        Assert.assertFalse(p.isCancelled());
+        testSimple(p, false);
+    }
+
+    @Test
+    public void testFailed() throws InterruptedException
+    {
+        testFailed(ImmediateFuture.failure(new RuntimeException()), false, t -> t instanceof ExecutionException, t -> t instanceof RuntimeException);
+    }
+
+    @Test
+    public void testCancelled() throws InterruptedException
+    {
+        testFailed(ImmediateFuture.cancelled(), true, t -> t instanceof CancellationException, t -> t instanceof CancellationException);
+    }
+
+    private void testFailed(ImmediateFuture<Boolean> p, boolean isCancelled, Predicate<Throwable> get, Predicate<Throwable> sync) throws InterruptedException
+    {
+        Assert.assertNull(p.getNow());
+        assertFailure(p::get, get);
+        assertFailure(() -> p.get(1L, TimeUnit.MILLISECONDS), get);
+        assertFailure(p::sync, sync);
+        assertFailure(p::syncUninterruptibly, sync);
+        testSimple(p, isCancelled);
+    }
+
+    private static void assertFailure(ThrowingRunnable run, Predicate<Throwable> test)
+    {
+        Throwable failure = null;
+        try
+        {
+            run.run();
+        }
+        catch (Throwable t)
+        {
+            failure = t;
+        }
+        if (failure == null || !test.test(failure))
+            Assert.fail();
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/LoadingMapTest.java b/test/unit/org/apache/cassandra/utils/concurrent/LoadingMapTest.java
new file mode 100644
index 0000000..822e9eb
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/LoadingMapTest.java
@@ -0,0 +1,442 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.cassandra.concurrent.ExecutorPlus;
+import org.apache.cassandra.utils.Throwables;
+import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionFactory;
+
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.utils.FBUtilities.now;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class LoadingMapTest
+{
+    private LoadingMap<Integer, String> map;
+    private final ExecutorPlus executor = executorFactory().pooled("TEST", 10);
+    private final CyclicBarrier b1 = new CyclicBarrier(2);
+    private final CyclicBarrier b2 = new CyclicBarrier(2);
+
+    private Future<String> f1, f2;
+
+    @Before
+    public void beforeTest()
+    {
+        map = new LoadingMap<>();
+    }
+
+    @After
+    public void afterTest() throws TimeoutException
+    {
+        Instant deadline = now().plus(Duration.ofSeconds(5));
+        while (executor.getPendingTaskCount() > 0 || executor.getActiveTaskCount() > 0)
+        {
+            if (now().isAfter(deadline))
+                throw new TimeoutException();
+
+            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+        }
+
+        b1.reset();
+        b2.reset();
+        f1 = f2 = null;
+    }
+
+    @Test
+    public void loadForDifferentKeysShouldNotBlockEachOther() throws Exception
+    {
+        f1 = submitLoad(1, "one", b1, null);
+        await().untilAsserted(() -> assertThat(b1.getNumberWaiting()).isGreaterThan(0)); // wait until we enter loading function
+
+        f2 = submitLoad(2, "two", b2, null);
+        await().untilAsserted(() -> assertThat(b2.getNumberWaiting()).isGreaterThan(0)); // wait until we enter loading function
+
+        assertThat(map.get(1)).isNotNull();
+        assertThat(map.get(2)).isNotNull();
+        assertThat(map.getIfReady(1)).isNull();
+        assertThat(map.getIfReady(2)).isNull();
+        assertThat(f1).isNotDone();
+        assertThat(f2).isNotDone();
+
+        // since we were able to enter both loading functions, it means the can work concurrently
+
+        b2.await();
+        assertFuture(f2, "two");
+
+        assertThat(f1).isNotDone();
+
+        b1.await();
+        assertFuture(f1, "one");
+
+        assertThat(map.getIfReady(1)).isEqualTo("one");
+        assertThat(map.getIfReady(2)).isEqualTo("two");
+    }
+
+    @Test
+    public void loadInsideLoadShouldNotCauseDeadlock()
+    {
+        String v = map.blockingLoadIfAbsent(1, () -> {
+            assertThat(map.blockingLoadIfAbsent(2, () -> "two")).isEqualTo("two");
+            return "one";
+        });
+
+        assertThat(v).isEqualTo("one");
+
+        assertThat(map.getIfReady(1)).isEqualTo("one");
+        assertThat(map.getIfReady(2)).isEqualTo("two");
+    }
+
+    @Test
+    public void unloadForDifferentKeysShouldNotBlockEachOther() throws Exception
+    {
+        initMap();
+
+        f1 = submitUnload(1, "one", b1, null);
+        await().untilAsserted(() -> assertThat(b1.getNumberWaiting()).isGreaterThan(0)); // wait until we enter unloading function
+
+        f2 = submitUnload(2, "two", b2, null);
+        await().untilAsserted(() -> assertThat(b2.getNumberWaiting()).isGreaterThan(0)); // wait until we enter unloading function
+
+        assertThat(map.get(1)).isNotNull();
+        assertThat(map.get(2)).isNotNull();
+        assertThat(map.getIfReady(1)).isNull();
+        assertThat(map.getIfReady(2)).isNull();
+        assertThat(f1).isNotDone();
+        assertThat(f2).isNotDone();
+
+        // since we were able to enter both unloading functions, it means the can work concurrently
+
+        b2.await();
+        assertFuture(f2, "two");
+
+        assertThat(f1).isNotDone();
+
+        b1.await();
+        assertFuture(f1, "one");
+
+        assertThat(map.get(1)).isNull();
+        assertThat(map.get(2)).isNull();
+    }
+
+    @Test
+    public void unloadInsideUnloadShouldNotCauseDeadlock() throws LoadingMap.UnloadExecutionException
+    {
+        initMap();
+
+        String v = map.blockingUnloadIfPresent(1, v1 -> {
+            assertThat(map.getIfReady(1)).isNull();
+
+            try
+            {
+                assertThat(map.blockingUnloadIfPresent(2, v2 -> assertThat(map.getIfReady(2)).isNull())).isEqualTo("two");
+            }
+            catch (LoadingMap.UnloadExecutionException e)
+            {
+                throw Throwables.unchecked(e);
+            }
+        });
+
+        assertThat(v).isEqualTo("one");
+
+        assertThat(map.get(1)).isNull();
+        assertThat(map.get(2)).isNull();
+    }
+
+    @Test
+    public void twoConcurrentLoadAttemptsFirstOneShouldWin() throws Exception
+    {
+        f1 = submitLoad(1, "one", b1, null);
+        await().untilAsserted(() -> assertThat(b1.getNumberWaiting()).isGreaterThan(0)); // wait until we enter loading function
+
+        f2 = submitLoad(1, "two", b2, null);
+        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+
+        assertThat(f1).isNotDone();
+        assertThat(f2).isNotDone();
+
+        b1.await();
+
+        assertFutures("one", "one");
+        assertThat(map.getIfReady(1)).isEqualTo("one");
+        assertThat(b2.getNumberWaiting()).isZero();
+    }
+
+    @Test
+    public void twoConcurrentUnloadAttemptsFirstOneShouldWin() throws Exception
+    {
+        initMap();
+        f1 = submitUnload(1, "one", b1, null);
+        await().untilAsserted(() -> assertThat(b1.getNumberWaiting()).isGreaterThan(0)); // wait until we enter unloading function
+
+        f2 = submitUnload(1, "one", b2, null);
+
+        assertFuture(f2, null); // f2 should return immediately
+
+        b1.await(); // let f1 continue
+        assertFuture(f1, "one");
+
+        assertThat(map.getIfReady(1)).isNull();
+        assertThat(b2.getNumberWaiting()).isZero();
+    }
+
+    @Test
+    public void loadWhileUnloading() throws Exception
+    {
+        initMap();
+
+        f1 = submitUnload(1, "one", b1, null);
+        await().untilAsserted(() -> assertThat(b1.getNumberWaiting()).isGreaterThan(0)); // wait until we enter unloading function
+
+        f2 = submitLoad(1, "two", null, null);
+        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+
+        assertThat(f1).isNotDone();
+        assertThat(f2).isNotDone();
+
+        b1.await();
+
+        assertFutures("one", "two");
+        assertThat(map.getIfReady(1)).isEqualTo("two");
+    }
+
+    @Test
+    public void unloadWhileLoading() throws Exception
+    {
+        f1 = submitLoad(1, "one", b1, null);
+        await().untilAsserted(() -> assertThat(b1.getNumberWaiting()).isGreaterThan(0)); // wait until we enter loading function
+
+        f2 = submitUnload(1, "one", null, null);
+        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+
+        assertThat(f1).isNotDone();
+        assertThat(f2).isNotDone();
+
+        b1.await();
+
+        assertFutures("one", "one");
+        assertThat(map.getIfReady(1)).isNull();
+    }
+
+    @Test
+    public void loadWhenExists()
+    {
+        initMap();
+
+        f1 = submitLoad(1, "three", b1, null);
+        assertThat(b1.getNumberWaiting()).isZero();
+        assertFuture(f1, "one");
+    }
+
+    @Test
+    public void unloadWhenMissing()
+    {
+        f1 = submitUnload(1, null, b1, null);
+        assertThat(b1.getNumberWaiting()).isZero();
+        assertFuture(f1, null);
+    }
+
+    @Test
+    public void nullLoad()
+    {
+        f1 = submitLoad(1, null, null, null);
+        f1.awaitThrowUncheckedOnInterrupt(5, TimeUnit.SECONDS);
+        assertThat(f1.cause()).isInstanceOf(NullPointerException.class);
+
+        assertThat(map.get(1)).isNull();
+        assertThat(map.get(2)).isNull();
+    }
+
+    @Test
+    public void failedLoad()
+    {
+        f1 = submitLoad(1, null, null, () -> {
+            throw new RuntimeException("abc");
+        });
+        f1.awaitThrowUncheckedOnInterrupt(5, TimeUnit.SECONDS);
+        assertThat(f1.cause()).isInstanceOf(RuntimeException.class);
+        assertThat(f1.cause()).hasMessage("abc");
+
+        assertThat(map.get(1)).isNull();
+        assertThat(map.get(2)).isNull();
+    }
+
+    @Test
+    public void failedUnload()
+    {
+        initMap();
+
+        f1 = submitUnload(1, "one", null, () -> {
+            throw new RuntimeException("abc");
+        });
+        f1.awaitThrowUncheckedOnInterrupt(5, TimeUnit.SECONDS);
+        assertThat(f1.cause()).isInstanceOf(LoadingMap.UnloadExecutionException.class);
+        LoadingMap.UnloadExecutionException ex = (LoadingMap.UnloadExecutionException) f1.cause();
+
+        assertThat(ex).hasRootCauseInstanceOf(RuntimeException.class);
+        assertThat(ex).hasRootCauseMessage("abc");
+        assertThat((String) ex.value()).isEqualTo("one");
+
+        assertThat(map.get(1)).isNull();
+    }
+
+    @Test
+    public void fuzzTest()
+    {
+        AtomicInteger failures = new AtomicInteger();
+        AtomicInteger state = new AtomicInteger();
+        CyclicBarrier barrier = new CyclicBarrier(10);
+        AtomicBoolean stop = new AtomicBoolean();
+        for (int i = 0; i < 5; i++)
+        {
+            executor.submit(() -> {
+                while (!Thread.currentThread().isInterrupted() && !stop.get())
+                {
+                    try
+                    {
+                        barrier.await();
+                        Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(50), TimeUnit.MILLISECONDS);
+                        map.blockingLoadIfAbsent(1, () -> {
+                            int s = state.get();
+                            Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(50, 100), TimeUnit.MILLISECONDS);
+                            if (!state.compareAndSet(s, s + 1))
+                                failures.incrementAndGet();
+                            if (ThreadLocalRandom.current().nextInt(100) < 10)
+                                return null;
+                            if (ThreadLocalRandom.current().nextInt(100) < 10)
+                                throw new RuntimeException();
+                            return "one";
+                        });
+                    }
+                    catch (InterruptedException e)
+                    {
+                        break;
+                    }
+                    catch (Exception ignored)
+                    {
+                    }
+                }
+            });
+            executor.submit(() -> {
+                while (!Thread.currentThread().isInterrupted() && !stop.get())
+                {
+                    try
+                    {
+                        barrier.await();
+                        Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(50), TimeUnit.MILLISECONDS);
+                        map.blockingUnloadIfPresent(1, v -> {
+                            int s = state.incrementAndGet();
+                            Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(50, 100), TimeUnit.MILLISECONDS);
+                            if (!state.compareAndSet(s, s + 1))
+                                failures.incrementAndGet();
+                            if (ThreadLocalRandom.current().nextInt(100) < 10)
+                                throw new RuntimeException();
+                        });
+                    }
+                    catch (InterruptedException e)
+                    {
+                        break;
+                    }
+                    catch (Exception ignored)
+                    {
+                    }
+                }
+            });
+        }
+
+        Uninterruptibles.sleepUninterruptibly(15, TimeUnit.SECONDS);
+        stop.set(true);
+        while (executor.getActiveTaskCount() > 0)
+        {
+            barrier.reset();
+            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+        }
+        assertThat(failures.get()).isZero();
+        assertThat(state.get()).isGreaterThan(0);
+    }
+
+    private void assertFuture(Future<String> f, String v)
+    {
+        assertThat(f.awaitThrowUncheckedOnInterrupt(5, TimeUnit.SECONDS)).isTrue();
+        f.rethrowIfFailed();
+        assertThat(f.getNow()).isEqualTo(v);
+    }
+
+    private void assertFutures(String v1, String v2)
+    {
+        assertFuture(f1, v1);
+        assertFuture(f2, v2);
+    }
+
+    private Future<String> submitLoad(int key, String value, CyclicBarrier b, Throwables.DiscreteAction<?> extraAction)
+    {
+        return executor.submit(() -> map.blockingLoadIfAbsent(key, () -> {
+            Throwable a = null;
+            if (extraAction != null)
+                a = Throwables.perform(a, extraAction);
+            if (b != null)
+                a = Throwables.perform(a, b::await);
+            if (a != null)
+                throw Throwables.unchecked(a);
+            return value;
+        }));
+    }
+
+    private Future<String> submitUnload(int key, String expectedValue, CyclicBarrier b, Throwables.DiscreteAction<?> extraAction)
+    {
+        return executor.submit(() -> map.blockingUnloadIfPresent(key, v -> {
+            assertThat(v).isEqualTo(expectedValue);
+            Throwable a = null;
+            if (extraAction != null)
+                a = Throwables.perform(a, extraAction);
+            if (b != null)
+                a = Throwables.perform(a, b::await);
+            if (a != null)
+                throw Throwables.unchecked(a);
+        }));
+    }
+
+    private void initMap()
+    {
+        map.blockingLoadIfAbsent(1, () -> "one");
+        map.blockingLoadIfAbsent(2, () -> "two");
+
+        assertThat(map.getIfReady(1)).isEqualTo("one");
+        assertThat(map.getIfReady(2)).isEqualTo("two");
+    }
+
+    private ConditionFactory await()
+    {
+        return Awaitility.await().pollDelay(10, TimeUnit.MILLISECONDS).atMost(5, TimeUnit.SECONDS);
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/NonBlockingRateLimiterTest.java b/test/unit/org/apache/cassandra/utils/concurrent/NonBlockingRateLimiterTest.java
new file mode 100644
index 0000000..d63cef3
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/NonBlockingRateLimiterTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import com.google.common.base.Ticker;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@SuppressWarnings("UnstableApiUsage")
+public class NonBlockingRateLimiterTest
+{
+    private static final AtomicLong CLOCK = new AtomicLong(0);
+    private static final TimeUnit DELAY_UNIT = TimeUnit.NANOSECONDS;
+
+    private static final Ticker TICKER = new Ticker()
+    {
+        @Override
+        public long read() {
+            return CLOCK.get();
+        }
+    };
+
+    @Before
+    public void resetTicker()
+    {
+        CLOCK.set(0);
+    }
+
+    @Test
+    public void testUnconditionalReservation()
+    {
+        NonBlockingRateLimiter limiter = new NonBlockingRateLimiter(4, 0, TICKER);
+        long oneSecond = DELAY_UNIT.convert(1, TimeUnit.SECONDS);
+        long oneDelay = oneSecond / 4;
+
+        // Delays should begin accumulating without any ticker movement...
+        assertEquals(0, limiter.reserveAndGetDelay(DELAY_UNIT));
+        assertEquals(oneDelay, limiter.reserveAndGetDelay(DELAY_UNIT));
+        assertEquals(oneDelay * 2, limiter.reserveAndGetDelay(DELAY_UNIT));
+        assertEquals(oneDelay * 3, limiter.reserveAndGetDelay(DELAY_UNIT));
+
+        // ...but should be gone after advancing enough to free up a permit.
+        CLOCK.addAndGet(NonBlockingRateLimiter.NANOS_PER_SECOND);
+        assertEquals(0, limiter.reserveAndGetDelay(DELAY_UNIT));
+    }
+
+    @Test
+    public void testConditionalReservation()
+    {
+        NonBlockingRateLimiter limiter = new NonBlockingRateLimiter(1, 0, TICKER);
+        
+        // Take the available permit, but then fail a subsequent attempt.
+        assertTrue(limiter.tryReserve());
+        assertFalse(limiter.tryReserve());
+
+        // We only need to advance one second, as the second attempt should not get a permit.
+        CLOCK.addAndGet(NonBlockingRateLimiter.NANOS_PER_SECOND);
+        assertTrue(limiter.tryReserve());
+    }
+
+    @Test
+    public void testBurstPermitConsumption()
+    {
+        // Create a limiter that produces 2 permits/second and allows 1-second bursts.
+        NonBlockingRateLimiter limiter = new NonBlockingRateLimiter(1, NonBlockingRateLimiter.DEFAULT_BURST_NANOS, TICKER);
+
+        // Advance the clock to create a 1-second idle period, which makes one burst permit available.
+        CLOCK.addAndGet(NonBlockingRateLimiter.NANOS_PER_SECOND);
+        
+        // Take the burst permit.
+        assertTrue(limiter.tryReserve());
+        
+        // Take the "normal" permit.
+        assertTrue(limiter.tryReserve());
+        
+        // Then fail, as we've consumed both.
+        assertFalse(limiter.tryReserve());
+
+        // Advance 1 interval again...
+        CLOCK.addAndGet(NonBlockingRateLimiter.NANOS_PER_SECOND);
+
+        // ...and only one permit should be available, as we've reached a steady state.
+        assertTrue(limiter.tryReserve());
+        assertFalse(limiter.tryReserve());
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testMaximumRate()
+    {
+        new NonBlockingRateLimiter(Integer.MAX_VALUE, 0, Ticker.systemTicker());
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testMinimumRate()
+    {
+        new NonBlockingRateLimiter(-1, 0, Ticker.systemTicker());
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java b/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java
index 0d1f9f6..e99d368 100644
--- a/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java
+++ b/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java
@@ -22,7 +22,7 @@
 
 import org.junit.Assert;
 
-import java.io.File;
+import org.apache.cassandra.io.util.File;
 import java.lang.ref.WeakReference;
 import java.util.HashMap;
 import java.util.HashSet;
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/SemaphoreTest.java b/test/unit/org/apache/cassandra/utils/concurrent/SemaphoreTest.java
new file mode 100644
index 0000000..d52598b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/concurrent/SemaphoreTest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.concurrent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeoutException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
+public class SemaphoreTest
+{
+
+    @Test
+    public void testUnfair() throws InterruptedException
+    {
+        Semaphore s = Semaphore.newSemaphore(2);
+        List<Future<Boolean>> fs = start(s);
+        s.release(1);
+        while (s.permits() == 1) Thread.yield();
+        Assert.assertEquals(1, fs.stream().filter(Future::isDone).count());
+        s.release(1);
+        while (s.permits() == 1) Thread.yield();
+        Assert.assertEquals(2, fs.stream().filter(Future::isDone).count());
+        s.release(1);
+        while (s.permits() == 1) Thread.yield();
+        Assert.assertEquals(3, fs.stream().filter(Future::isDone).count());
+        s.release(1);
+        Assert.assertEquals(1, s.permits());
+    }
+
+    @Test
+    public void testFair() throws InterruptedException, ExecutionException, TimeoutException
+    {
+        Semaphore s = Semaphore.newFairSemaphore(2);
+        List<Future<Boolean>> fs = start(s);
+        s.release(1);
+        fs.get(0).get(1L, MINUTES);
+        s.release(1);
+        fs.get(1).get(1L, MINUTES);
+        s.release(1);
+        fs.get(2).get(1L, MINUTES);
+        s.release(1);
+        Assert.assertEquals(1, s.permits());
+    }
+
+    private List<java.util.concurrent.Future<Boolean>> start(Semaphore s) throws InterruptedException
+    {
+        ExecutorService exec = Executors.newCachedThreadPool();
+        try
+        {
+            Assert.assertTrue(s.tryAcquire(1));
+            s.drain();
+            Assert.assertFalse(s.tryAcquire(1));
+            Assert.assertFalse(s.tryAcquire(1, 1L, MILLISECONDS));
+            Thread.currentThread().interrupt();
+            try { s.acquireThrowUncheckedOnInterrupt(1); Assert.fail(); } catch (UncheckedInterruptedException ignore) { }
+            Thread.currentThread().interrupt();
+            try { s.tryAcquire(1, 1L, MILLISECONDS); Assert.fail(); } catch (InterruptedException ignore) { }
+            Thread.currentThread().interrupt();
+            try { s.tryAcquireUntil(1, nanoTime() + MILLISECONDS.toNanos(1L)); Assert.fail(); } catch (InterruptedException ignore) { }
+            List<Future<Boolean>> fs = new ArrayList<>();
+            fs.add(exec.submit(() -> s.tryAcquire(1, 1L, MINUTES)));
+            while (s instanceof Semaphore.Standard && ((Semaphore.Standard) s).waiting() == 0) Thread.yield();
+            fs.add(exec.submit(() -> s.tryAcquireUntil(1, System.nanoTime() + MINUTES.toNanos(1L))));
+            while (s instanceof Semaphore.Standard && ((Semaphore.Standard) s).waiting() == 1) Thread.yield();
+            fs.add(exec.submit(() -> { s.acquire(1); return true; } ));
+            return fs;
+        }
+        finally
+        {
+            exec.shutdown();
+        }
+    }
+
+}
diff --git a/test/unit/org/apache/cassandra/utils/concurrent/WeightedQueueTest.java b/test/unit/org/apache/cassandra/utils/concurrent/WeightedQueueTest.java
index 544e95c..d1ef045 100644
--- a/test/unit/org/apache/cassandra/utils/concurrent/WeightedQueueTest.java
+++ b/test/unit/org/apache/cassandra/utils/concurrent/WeightedQueueTest.java
@@ -22,12 +22,13 @@
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.Before;
 import org.junit.Test;
 
+import static org.apache.cassandra.utils.concurrent.BlockingQueues.newBlockingQueue;
+import static org.apache.cassandra.utils.concurrent.WeightedQueue.NATURAL_WEIGHER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
@@ -205,9 +206,9 @@
     public void testOfferWrappedQueueRefuses() throws Exception
     {
         queue = new WeightedQueue<>(10, new BadQueue(true), WeightedQueue.NATURAL_WEIGHER);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         assertFalse(queue.offer(new Object()));
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     /**
@@ -217,7 +218,7 @@
     public void testOfferWrappedQueueThrows() throws Exception
     {
         queue = new WeightedQueue<>(10, new BadQueue(false), WeightedQueue.NATURAL_WEIGHER);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         try
         {
             assertFalse(queue.offer(new Object()));
@@ -227,7 +228,7 @@
         {
             //expected and desired
         }
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     /**
@@ -246,7 +247,7 @@
     @Test
     public void testCustomWeigher() throws Exception
     {
-        queue = new WeightedQueue<>(10, new LinkedBlockingQueue<>(), weighable -> 10 );
+        queue = new WeightedQueue<>(10, newBlockingQueue(), weighable -> 10 );
         assertTrue(queue.offer(new Object()));
         assertFalse(queue.offer(new Object()));
     }
@@ -319,9 +320,9 @@
     public void testTimedOfferWrappedQueueRefuses() throws Exception
     {
         queue = new WeightedQueue<>(10, new BadQueue(true), WeightedQueue.NATURAL_WEIGHER);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         assertFalse(queue.offer(new Object(), 1, TimeUnit.MICROSECONDS));
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     /**
@@ -331,7 +332,7 @@
     public void testTimedOfferWrappedQueueThrows() throws Exception
     {
         queue = new WeightedQueue<>(10, new BadQueue(false), WeightedQueue.NATURAL_WEIGHER);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         try
         {
             assertFalse(queue.offer(new Object(), 1, TimeUnit.MICROSECONDS));
@@ -341,26 +342,26 @@
         {
             //expected and desired
         }
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
 
     @Test
     public void testPoll() throws Exception
     {
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         assertNull(queue.poll());
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         Object o = new Object();
         assertTrue(queue.offer(o));
-        assertEquals(9, queue.availableWeight.availablePermits());
+        assertEquals(9, queue.availableWeight.permits());
         WeightedQueue.Weighable weighable = weighable(9);
         assertTrue(queue.offer(weighable));
-        assertEquals(0, queue.availableWeight.availablePermits());
+        assertEquals(0, queue.availableWeight.permits());
         assertEquals(o, queue.poll());
-        assertEquals(1, queue.availableWeight.availablePermits());
+        assertEquals(1, queue.availableWeight.permits());
         assertEquals(weighable, queue.poll());
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     @Test(expected = NullPointerException.class)
@@ -373,9 +374,9 @@
     public void testPutFullBlocks() throws Exception
     {
         WeightedQueue.Weighable weighable = weighable(10);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         queue.put(weighable);
-        assertEquals(0, queue.availableWeight.availablePermits());
+        assertEquals(0, queue.availableWeight.permits());
         Object o = new Object();
         Thread t = new Thread(() -> {
             try
@@ -389,19 +390,19 @@
         t.start();
         Thread.sleep(100);
         assertTrue(t.getState() != Thread.State.TERMINATED);
-        assertEquals(0, queue.availableWeight.availablePermits());
+        assertEquals(0, queue.availableWeight.permits());
         assertEquals(weighable, queue.poll());
-        assertTrue(queue.availableWeight.availablePermits() > 0);
+        assertTrue(queue.availableWeight.permits() > 0);
         t.join();
         assertEquals(o, queue.poll());
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     @Test
     public void testPutWrappedQueueThrows() throws Exception
     {
         queue = new WeightedQueue<>(10, new BadQueue(false), WeightedQueue.NATURAL_WEIGHER);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         try
         {
             queue.put(new Object());
@@ -411,7 +412,7 @@
         {
             //expected and desired
         }
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     @Test(expected = IllegalArgumentException.class)
@@ -448,11 +449,11 @@
         t.start();
         Thread.sleep(500);
         assertTrue(t.getState() != Thread.State.TERMINATED);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
         queue.offer(new Object());
         t.join(60 * 1000);
         assertEquals(t.getState(), Thread.State.TERMINATED);
-        assertEquals(10, queue.availableWeight.availablePermits());
+        assertEquals(10, queue.availableWeight.permits());
     }
 
     @Test(expected = IllegalArgumentException.class)
@@ -464,7 +465,7 @@
     @Test(expected = IllegalArgumentException.class)
     public void testConstructor2LTZeroWeightThrows() throws Exception
     {
-        new WeightedQueue(0, new LinkedBlockingQueue<>(), WeightedQueue.NATURAL_WEIGHER);
+        new WeightedQueue(0, newBlockingQueue(), NATURAL_WEIGHER);
     }
 
     @Test(expected = NullPointerException.class)
@@ -476,7 +477,7 @@
     @Test(expected = NullPointerException.class)
     public void testConstructorNullWeigherThrows() throws Exception
     {
-        new WeightedQueue(1, new LinkedBlockingQueue<>(), null);
+        new WeightedQueue(1, newBlockingQueue(), null);
     }
 
     /**
diff --git a/test/unit/org/apache/cassandra/utils/memory/BufferPoolTest.java b/test/unit/org/apache/cassandra/utils/memory/BufferPoolTest.java
index 2b5df29..ab68e40 100644
--- a/test/unit/org/apache/cassandra/utils/memory/BufferPoolTest.java
+++ b/test/unit/org/apache/cassandra/utils/memory/BufferPoolTest.java
@@ -376,7 +376,7 @@
     @Test
     public void testChunkExhausted()
     {
-        final int size = BufferPool.NORMAL_CHUNK_SIZE / 64; // 1kbit
+        final int size = BufferPool.NORMAL_CHUNK_SIZE / 64; // 1kibibit
         int[] sizes = new int[128];
         Arrays.fill(sizes, size);
 
@@ -875,8 +875,8 @@
     @Test
     public void testRecyclePartialFreeChunk()
     {
-        // normal chunk size is 128kb
-        int halfNormalChunk = BufferPool.NORMAL_CHUNK_SIZE / 2; // 64kb, half of normal chunk
+        // normal chunk size is 128KiB
+        int halfNormalChunk = BufferPool.NORMAL_CHUNK_SIZE / 2; // 64KiB, half of normal chunk
         List<ByteBuffer> toRelease = new ArrayList<>();
 
         // allocate three buffers on different chunks
diff --git a/test/unit/org/apache/cassandra/utils/memory/MemtableCleanerThreadTest.java b/test/unit/org/apache/cassandra/utils/memory/MemtableCleanerThreadTest.java
index 7100a2a..2ce9a8e 100644
--- a/test/unit/org/apache/cassandra/utils/memory/MemtableCleanerThreadTest.java
+++ b/test/unit/org/apache/cassandra/utils/memory/MemtableCleanerThreadTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.cassandra.utils.memory;
 
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -29,12 +28,12 @@
 import org.junit.Test;
 
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.AsyncPromise;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.when;
 
@@ -46,9 +45,8 @@
     @Mock
     private MemtablePool pool;
 
-    @Mock
+//    @Mock
     private MemtableCleaner cleaner;
-
     private MemtableCleanerThread<MemtablePool> cleanerThread;
 
     @Before
@@ -57,14 +55,9 @@
         MockitoAnnotations.initMocks(this);
     }
 
-    private void startThread()
+    private void startThread(MemtableCleaner cleaner)
     {
         cleanerThread = new MemtableCleanerThread<>(pool, cleaner);
-        assertNotNull(cleanerThread);
-        cleanerThread.start();
-
-        for (int i = 0; i < TIMEOUT_MILLIS && !cleanerThread.isAlive(); i++)
-            FBUtilities.sleepQuietly(1);
     }
 
     private void stopThread() throws InterruptedException
@@ -86,20 +79,19 @@
     public void testCleanerInvoked() throws Exception
     {
         CountDownLatch cleanerExecutedLatch = new CountDownLatch(1);
-        CompletableFuture<Boolean> fut = new CompletableFuture<>();
+        AsyncPromise<Boolean > fut = new AsyncPromise<>();
         AtomicBoolean needsCleaning = new AtomicBoolean(false);
 
         when(pool.needsCleaning()).thenAnswer(invocation -> needsCleaning.get());
-
-        when(cleaner.clean()).thenAnswer(invocation -> {
+        cleaner = () -> {
             needsCleaning.set(false);
             cleanerExecutedLatch.countDown();
             return fut;
-        });
+        };
 
         // start the thread with needsCleaning returning false, the cleaner should not be invoked
         needsCleaning.set(false);
-        startThread();
+        startThread(cleaner);
         assertFalse(cleanerExecutedLatch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
         assertEquals(1, cleanerExecutedLatch.getCount());
         assertEquals(0, cleanerThread.numPendingTasks());
@@ -113,7 +105,7 @@
 
         // now complete the cleaning task
         needsCleaning.set(false);
-        fut.complete(true);
+        fut.setSuccess(true);
         waitForPendingTasks();
 
         stopThread();
@@ -123,22 +115,22 @@
     public void testCleanerError() throws Exception
     {
         AtomicReference<CountDownLatch> cleanerLatch = new AtomicReference<>(new CountDownLatch(1));
-        AtomicReference<CompletableFuture<Boolean>> fut = new AtomicReference<>(new CompletableFuture<>());
+        AtomicReference<AsyncPromise<Boolean>> fut = new AtomicReference<>(new AsyncPromise<>());
         AtomicBoolean needsCleaning = new AtomicBoolean(false);
         AtomicInteger numTimeCleanerInvoked = new AtomicInteger(0);
 
         when(pool.needsCleaning()).thenAnswer(invocation -> needsCleaning.get());
 
-        when(cleaner.clean()).thenAnswer(invocation -> {
+        cleaner = () -> {
             needsCleaning.set(false);
             numTimeCleanerInvoked.incrementAndGet();
             cleanerLatch.get().countDown();
             return fut.get();
-        });
+        };
 
         // start the thread with needsCleaning returning true, the cleaner should be invoked
         needsCleaning.set(true);
-        startThread();
+        startThread(cleaner);
         assertTrue(cleanerLatch.get().await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
         assertEquals(0, cleanerLatch.get().getCount());
         assertEquals(1, cleanerThread.numPendingTasks());
@@ -146,17 +138,17 @@
 
         // complete the cleaning task with an error, no other cleaning task should be invoked
         cleanerLatch.set(new CountDownLatch(1));
-        CompletableFuture<Boolean> oldFut = fut.get();
-        fut.set(new CompletableFuture<>());
+        AsyncPromise<Boolean> oldFut = fut.get();
+        fut.set(new AsyncPromise<>());
         needsCleaning.set(false);
-        oldFut.completeExceptionally(new RuntimeException("Test"));
+        oldFut.setFailure(new RuntimeException("Test"));
         assertFalse(cleanerLatch.get().await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
         assertEquals(1, cleanerLatch.get().getCount());
         assertEquals(1, numTimeCleanerInvoked.get());
 
         // now trigger cleaning again and verify that a new task is invoked
         cleanerLatch.set(new CountDownLatch(1));
-        fut.set(new CompletableFuture<>());
+        fut.set(new AsyncPromise<>());
         needsCleaning.set(true);
         cleanerThread.trigger();
         assertTrue(cleanerLatch.get().await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
@@ -166,16 +158,16 @@
         //  complete the cleaning task with false (nothing should be scheduled)
         cleanerLatch.set(new CountDownLatch(1));
         oldFut = fut.get();
-        fut.set(new CompletableFuture<>());
+        fut.set(new AsyncPromise<>());
         needsCleaning.set(false);
-        oldFut.complete(false);
+        oldFut.setSuccess(false);
         assertFalse(cleanerLatch.get().await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
         assertEquals(1, cleanerLatch.get().getCount());
         assertEquals(2, numTimeCleanerInvoked.get());
 
         // now trigger cleaning again and verify that a new task is invoked
         cleanerLatch.set(new CountDownLatch(1));
-        fut.set(new CompletableFuture<>());
+        fut.set(new AsyncPromise<>());
         needsCleaning.set(true);
         cleanerThread.trigger();
         assertTrue(cleanerLatch.get().await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
diff --git a/test/unit/org/apache/cassandra/utils/memory/NativeAllocatorTest.java b/test/unit/org/apache/cassandra/utils/memory/NativeAllocatorTest.java
index 031e089..79a54cd 100644
--- a/test/unit/org/apache/cassandra/utils/memory/NativeAllocatorTest.java
+++ b/test/unit/org/apache/cassandra/utils/memory/NativeAllocatorTest.java
@@ -26,6 +26,7 @@
 import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 
 public class NativeAllocatorTest
@@ -65,7 +66,7 @@
                 allocatorRef.get().offHeap().released(80);
                 isClean.countDown();
             }
-            return CompletableFuture.completedFuture(true);
+            return ImmediateFuture.success(true);
         });
         allocator = new NativeAllocator(pool);
         allocatorRef.set(allocator);
diff --git a/test/unit/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilderTest.java b/test/unit/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilderTest.java
old mode 100755
new mode 100644
diff --git a/test/unit/org/apache/cassandra/utils/vint/VIntCodingTest.java b/test/unit/org/apache/cassandra/utils/vint/VIntCodingTest.java
index c7c3324..15f9cdc 100644
--- a/test/unit/org/apache/cassandra/utils/vint/VIntCodingTest.java
+++ b/test/unit/org/apache/cassandra/utils/vint/VIntCodingTest.java
@@ -18,19 +18,28 @@
 */
 package org.apache.cassandra.utils.vint;
 
+import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
 
-import io.netty.buffer.Unpooled;
+import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputBuffer;
+import org.apache.cassandra.io.util.WrappedDataOutputStreamPlus;
 
 import org.junit.Test;
 
 import org.junit.Assert;
 
+import static org.junit.Assert.fail;
+
 public class VIntCodingTest
 {
+    private static final long[] LONGS = new long[] {53L, 10201L, 1097151L,
+                                                    168435455L, 33251130335L, 3281283447775L,
+                                                    417672546086779L, 52057592037927932L, 72057594037927937L};
 
     @Test
     public void testComputeSize() throws Exception
@@ -49,9 +58,9 @@
     {
         Assert.assertEquals(expectedSize, VIntCoding.computeUnsignedVIntSize(value));
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        DataOutputStream dos = new DataOutputStream(baos);
-        VIntCoding.writeUnsignedVInt(value, dos);
-        dos.flush();
+        WrappedDataOutputStreamPlus out = new WrappedDataOutputStreamPlus(baos);
+        VIntCoding.writeUnsignedVInt(value, out);
+        out.flush();
         Assert.assertEquals( expectedSize, baos.toByteArray().length);
 
         DataOutputBuffer dob = new DataOutputBuffer();
@@ -75,9 +84,9 @@
         int biggestOneByte = 127;
 
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        DataOutputStream dos = new DataOutputStream(baos);
-        VIntCoding.writeUnsignedVInt(biggestOneByte, dos);
-        dos.flush();
+        WrappedDataOutputStreamPlus out = new WrappedDataOutputStreamPlus(baos);
+        VIntCoding.writeUnsignedVInt(biggestOneByte, out);
+        out.flush();
         Assert.assertEquals( 1, baos.toByteArray().length);
 
         DataOutputBuffer dob = new DataOutputBuffer();
@@ -97,4 +106,99 @@
             Assert.assertEquals(i, result);
         }
     }
+
+    @Test
+    public void testWriteUnsignedVIntBufferedDOP() throws IOException
+    {
+        for (int i = 0; i < VIntCoding.MAX_SIZE - 1; i++)
+        {
+            long val = LONGS[i];
+            Assert.assertEquals(i + 1, VIntCoding.computeUnsignedVIntSize(val));
+            try (DataOutputBuffer out = new DataOutputBuffer())
+            {
+                VIntCoding.writeUnsignedVInt(val, out);
+                // read as ByteBuffer
+                Assert.assertEquals(val, VIntCoding.getUnsignedVInt(out.buffer(), 0));
+                // read as DataInput
+                InputStream is = new ByteArrayInputStream(out.toByteArray());
+                Assert.assertEquals(val, VIntCoding.readUnsignedVInt(new DataInputPlus.DataInputStreamPlus(is)));
+            }
+        }
+    }
+
+    @Test
+    public void testWriteUnsignedVIntUnbufferedDOP() throws IOException
+    {
+        for (int i = 0; i < VIntCoding.MAX_SIZE - 1; i++)
+        {
+            long val = LONGS[i];
+            Assert.assertEquals(i + 1, VIntCoding.computeUnsignedVIntSize(val));
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            try (WrappedDataOutputStreamPlus out = new WrappedDataOutputStreamPlus(baos))
+            {
+                VIntCoding.writeUnsignedVInt(val, out);
+                out.flush();
+                Assert.assertEquals( i + 1, baos.toByteArray().length);
+                // read as ByteBuffer
+                Assert.assertEquals(val, VIntCoding.getUnsignedVInt(ByteBuffer.wrap(baos.toByteArray()), 0));
+                // read as DataInput
+                InputStream is = new ByteArrayInputStream(baos.toByteArray());
+                Assert.assertEquals(val, VIntCoding.readUnsignedVInt(new DataInputPlus.DataInputStreamPlus(is)));
+            }
+        }
+    }
+
+    @Test
+    public void testWriteUnsignedVIntBB() throws IOException
+    {
+        for (int i = 0; i < VIntCoding.MAX_SIZE - 1; i++)
+        {
+            long val = LONGS[i];
+            Assert.assertEquals(i + 1, VIntCoding.computeUnsignedVIntSize(val));
+            ByteBuffer bb = ByteBuffer.allocate(VIntCoding.MAX_SIZE);
+            VIntCoding.writeUnsignedVInt(val, bb);
+            // read as ByteBuffer
+            Assert.assertEquals(val, VIntCoding.getUnsignedVInt(bb, 0));
+            // read as DataInput
+            InputStream is = new ByteArrayInputStream(bb.array());
+            Assert.assertEquals(val, VIntCoding.readUnsignedVInt(new DataInputPlus.DataInputStreamPlus(is)));
+        }
+    }
+
+    @Test
+    public void testWriteUnsignedVIntBBLessThan8Bytes() throws IOException
+    {
+        long val = 10201L;
+        Assert.assertEquals(2, VIntCoding.computeUnsignedVIntSize(val));
+        ByteBuffer bb = ByteBuffer.allocate(2);
+        VIntCoding.writeUnsignedVInt(val, bb);
+        // read as ByteBuffer
+        Assert.assertEquals(val, VIntCoding.getUnsignedVInt(bb, 0));
+        // read as DataInput
+        InputStream is = new ByteArrayInputStream(bb.array());
+        Assert.assertEquals(val, VIntCoding.readUnsignedVInt(new DataInputPlus.DataInputStreamPlus(is)));
+    }
+
+    @Test
+    public void testWriteUnsignedVIntBBHasLessThan8BytesLeft()
+    {
+        long val = 10201L;
+        Assert.assertEquals(2, VIntCoding.computeUnsignedVIntSize(val));
+        ByteBuffer bb = ByteBuffer.allocate(3);
+        bb.position(1);
+        VIntCoding.writeUnsignedVInt(val, bb);
+        // read as ByteBuffer
+        Assert.assertEquals(val, VIntCoding.getUnsignedVInt(bb, 1));
+    }
+
+    @Test
+    public void testWriteUnsignedVIntBBDoesNotHaveEnoughSpaceOverflows()
+    {
+        ByteBuffer bb = ByteBuffer.allocate(3);
+        try
+        {
+            VIntCoding.writeUnsignedVInt(52057592037927932L, bb);
+            fail();
+        } catch (BufferOverflowException e) {}
+    }
 }
diff --git a/tools/bin/cassandra-stressd b/tools/bin/cassandra-stressd
deleted file mode 100755
index 83f8006..0000000
--- a/tools/bin/cassandra-stressd
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DESC="Cassandra Stress Test Daemon"
-if [ "x$CASSANDRA_INCLUDE" = "x" ]; then
-    # Locations (in order) to use when searching for an include file.
-    for include in "`dirname "$0"`/cassandra.in.sh" \
-                   "$HOME/.cassandra.in.sh" \
-                   /usr/share/cassandra/cassandra.in.sh \
-                   /usr/local/share/cassandra/cassandra.in.sh \
-                   /opt/cassandra/cassandra.in.sh; do
-        if [ -r "$include" ]; then
-            . "$include"
-            break
-        fi
-    done
-elif [ -r "$CASSANDRA_INCLUDE" ]; then
-    . "$CASSANDRA_INCLUDE"
-fi
-
-if [ -z "$CLASSPATH" ]; then
-    echo "You must set the CLASSPATH var" >&2
-    exit 1
-fi
-
-case "$1" in
-  start)
-    echo "Starting $DESC: "
-    "$JAVA" -server -cp "$CLASSPATH" $JVM_OPTS \
-            -Dcassandra.storagedir="$cassandra_storagedir" \
-            -Dlogback.configurationFile=logback-tools.xml \
-            org.apache.cassandra.stress.StressServer $@ 1> ./stressd.out.log 2> ./stressd.err.log &
-    echo $! > ./stressd.pid
-    echo "done."
-  ;;
-  
-  stop)
-    PID=`cat ./stressd.pid 2> /dev/null`
-    
-    if [ "x$PID" = "x" ]; then
-      echo "$DESC is not running."
-    else
-      kill -9 $PID
-      rm ./stressd.pid
-      echo "$DESC is stopped."
-    fi
-  ;;
-
-  status)
-    PID=`cat ./stressd.pid 2> /dev/null`
-
-    if [ "x$PID" = "x" ]; then
-      echo "$DESC is not running."
-    else
-      echo "$DESC is running with pid $PID."
-    fi
-  ;;
-
-  *)
-    echo "Usage: $0 start|stop|status [-h <host>]"
-  ;;
-esac
-
-# vi:ai sw=4 ts=4 tw=0 et
diff --git a/tools/bin/hash_password b/tools/bin/hash_password
new file mode 100755
index 0000000..dc48ce1
--- /dev/null
+++ b/tools/bin/hash_password
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$CASSANDRA_INCLUDE" = "x" ]; then
+    # Locations (in order) to use when searching for an include file.
+    for include in "`dirname "$0"`/cassandra.in.sh" \
+                   "$HOME/.cassandra.in.sh" \
+                   /usr/share/cassandra/cassandra.in.sh \
+                   /usr/local/share/cassandra/cassandra.in.sh \
+                   /opt/cassandra/cassandra.in.sh; do
+        if [ -r "$include" ]; then
+            . "$include"
+            break
+        fi
+    done
+elif [ -r "$CASSANDRA_INCLUDE" ]; then
+    . "$CASSANDRA_INCLUDE"
+fi
+
+if [ -z "$CLASSPATH" ]; then
+    echo "You must set the CLASSPATH var" >&2
+    exit 1
+fi
+
+if [ "x${MAX_HEAP_SIZE}" = "x" ]; then
+    MAX_HEAP_SIZE="256M"
+fi
+
+if [ "x${MAX_DIRECT_MEMORY}" = "x" ]; then
+    MAX_DIRECT_MEMORY="2G"
+fi
+
+JVM_OPTS="${JVM_OPTS} -Xmx${MAX_HEAP_SIZE} -XX:MaxDirectMemorySize=${MAX_DIRECT_MEMORY}"
+
+"${JAVA}" ${JAVA_AGENT} -ea -cp "${CLASSPATH}" ${JVM_OPTS} \
+        -Dcassandra.storagedir="${cassandra_storagedir}" \
+        -Dlogback.configurationFile=logback-tools.xml \
+        org.apache.cassandra.tools.HashPassword "$@"
diff --git a/tools/fqltool/src/org/apache/cassandra/fqltool/QueryReplayer.java b/tools/fqltool/src/org/apache/cassandra/fqltool/QueryReplayer.java
index 4524e33..1cff804 100644
--- a/tools/fqltool/src/org/apache/cassandra/fqltool/QueryReplayer.java
+++ b/tools/fqltool/src/org/apache/cassandra/fqltool/QueryReplayer.java
@@ -27,7 +27,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
@@ -47,11 +46,13 @@
 import com.datastax.driver.core.Statement;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+
 public class QueryReplayer implements Closeable
 {
     private static final Logger logger = LoggerFactory.getLogger(QueryReplayer.class);
     private static final int PRINT_RATE = 5000;
-    private final ExecutorService es = Executors.newFixedThreadPool(1);
+    private final ExecutorService es = executorFactory().sequential("QueryReplayer");
     private final Iterator<List<FQLQuery>> queryIterator;
     private final List<Predicate<FQLQuery>> filters;
     private final List<Session> sessions;
diff --git a/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java b/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java
index eeebe20..3935e94 100644
--- a/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java
+++ b/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java
@@ -151,4 +151,4 @@
         }
         return sb.toString();
     }
-}
\ No newline at end of file
+}
diff --git a/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java b/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
index bc6756b..8aa9867 100644
--- a/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
+++ b/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
@@ -35,6 +35,7 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.SchemaTransformations;
 import org.apache.cassandra.cql3.CQLFragmentParser;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.CqlParser;
@@ -58,6 +59,8 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Utility to write SSTables.
  * <p>
@@ -242,14 +245,16 @@
             throw new InvalidRequestException(String.format("Invalid number of arguments, expecting %d values but got %d", boundNames.size(), values.size()));
 
         QueryOptions options = QueryOptions.forInternalCalls(null, values);
-        List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
-        SortedSet<Clustering<?>> clusterings = insert.createClustering(options);
+        ClientState state = ClientState.forInternalCalls();
+        List<ByteBuffer> keys = insert.buildPartitionKeyNames(options, state);
+        SortedSet<Clustering<?>> clusterings = insert.createClustering(options, state);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         // Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
         // and that forces a lot of initialization that we don't want.
         UpdateParameters params = new UpdateParameters(insert.metadata(),
                                                        insert.updatedColumns(),
+                                                       ClientState.forInternalCalls(),
                                                        options,
                                                        insert.getTimestamp(TimeUnit.MILLISECONDS.toMicros(now), options),
                                                        (int) TimeUnit.MILLISECONDS.toSeconds(now),
@@ -329,7 +334,7 @@
      */
     public File getInnermostDirectory()
     {
-        return cfs.getDirectories().getDirectoryForNewSSTables();
+        return cfs.getDirectories().getDirectoryForNewSSTables().toJavaIOFile();
     }
 
     /**
@@ -350,7 +355,7 @@
         private IPartitioner partitioner;
 
         private boolean sorted = false;
-        private long bufferSizeInMB = 128;
+        private long bufferSizeInMiB = 128;
 
         protected Builder()
         {
@@ -496,19 +501,38 @@
          * The size of the buffer to use.
          * <p>
          * This defines how much data will be buffered before being written as
-         * a new SSTable. This correspond roughly to the data size that will have the created
+         * a new SSTable. This corresponds roughly to the data size that will have the created
          * sstable.
          * <p>
-         * The default is 128MB, which should be reasonable for a 1GB heap. If you experience
+         * The default is 128MiB, which should be reasonable for a 1GiB heap. If you experience
          * OOM while using the writer, you should lower this value.
          *
-         * @param size the size to use in MB.
+         * @param size the size to use in MiB.
+         * @return this builder.
+         */
+        public Builder withBufferSizeInMiB(int size)
+        {
+            this.bufferSizeInMiB = size;
+            return this;
+        }
+
+        /**
+         * This method is deprecated in favor of the new withBufferSizeInMiB(int size)
+         * The size of the buffer to use.
+         * <p>
+         * This defines how much data will be buffered before being written as
+         * a new SSTable. This corresponds roughly to the data size that will have the created
+         * sstable.
+         * <p>
+         * The default is 128MiB, which should be reasonable for a 1GiB heap. If you experience
+         * OOM while using the writer, you should lower this value.
+         *
+         * @param size the size to use in MiB.
          * @return this builder.
          */
         public Builder withBufferSizeInMB(int size)
         {
-            this.bufferSizeInMB = size;
-            return this;
+            return withBufferSizeInMiB(size);
         }
 
         /**
@@ -524,7 +548,7 @@
          * the rows in order, which is rarely the case. If you can provide the
          * rows in order however, using this sorted might be more efficient.
          * <p>
-         * Note that if used, some option like withBufferSizeInMB will be ignored.
+         * Note that if used, some option like withBufferSizeInMiB will be ignored.
          *
          * @return this builder.
          */
@@ -555,7 +579,7 @@
                 UpdateStatement preparedInsert = prepareInsert();
                 AbstractSSTableSimpleWriter writer = sorted
                                                      ? new SSTableSimpleWriter(cfs.getDirectories().getDirectoryForNewSSTables(), cfs.metadata, preparedInsert.updatedColumns())
-                                                     : new SSTableSimpleUnsortedWriter(cfs.getDirectories().getDirectoryForNewSSTables(), cfs.metadata, preparedInsert.updatedColumns(), bufferSizeInMB);
+                                                     : new SSTableSimpleUnsortedWriter(cfs.getDirectories().getDirectoryForNewSSTables(), cfs.metadata, preparedInsert.updatedColumns(), bufferSizeInMiB);
 
                 if (formatType != null)
                     writer.setSSTableFormatType(formatType);
@@ -566,15 +590,14 @@
             }
         }
 
-        private static void createTypes(String keyspace, List<CreateTypeStatement.Raw> typeStatements)
+        private static Types createTypes(String keyspace, List<CreateTypeStatement.Raw> typeStatements)
         {
             KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspace);
             Types.RawBuilder builder = Types.rawBuilder(keyspace);
             for (CreateTypeStatement.Raw st : typeStatements)
                 st.addToRawBuilder(builder);
 
-            ksm = ksm.withSwapped(builder.build());
-            Schema.instance.load(ksm);
+            return builder.build();
         }
 
         public static ColumnFamilyStore createOfflineTable(String schema, List<File> directoryList)
@@ -590,10 +613,10 @@
         {
             String keyspace = schemaStatement.keyspace();
 
-            if (Schema.instance.getKeyspaceMetadata(keyspace) == null)
-                Schema.instance.load(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)));
+            Schema.instance.transform(SchemaTransformations.addKeyspace(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)), true));
 
-            createTypes(keyspace, typeStatements);
+            Types types = createTypes(keyspace, typeStatements);
+            Schema.instance.transform(SchemaTransformations.addTypes(types, true));
 
             KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspace);
 
@@ -611,13 +634,13 @@
                                      .build();
 
             Keyspace.setInitialized();
-            Directories directories = new Directories(tableMetadata, directoryList.stream().map(Directories.DataDirectory::new).collect(Collectors.toList()));
+            Directories directories = new Directories(tableMetadata, directoryList.stream().map(f -> new Directories.DataDirectory(new org.apache.cassandra.io.util.File(f.toPath()))).collect(Collectors.toList()));
 
             Keyspace ks = Keyspace.openWithoutSSTables(keyspace);
             ColumnFamilyStore cfs =  ColumnFamilyStore.createColumnFamilyStore(ks, tableMetadata.name, TableMetadataRef.forOfflineTools(tableMetadata), directories, false, false, true);
 
             ks.initCfCustom(cfs);
-            Schema.instance.load(ksm.withSwapped(ksm.tables.with(cfs.metadata())));
+            Schema.instance.transform(SchemaTransformations.addTable(tableMetadata, true));
 
             return cfs;
         }
diff --git a/tools/stress/src/org/apache/cassandra/stress/CompactionStress.java b/tools/stress/src/org/apache/cassandra/stress/CompactionStress.java
index 88aa6a4..0e7eec5 100644
--- a/tools/stress/src/org/apache/cassandra/stress/CompactionStress.java
+++ b/tools/stress/src/org/apache/cassandra/stress/CompactionStress.java
@@ -22,7 +22,10 @@
 import java.io.IOError;
 import java.net.URI;
 import java.util.*;
-import java.util.concurrent.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
 import javax.inject.Inject;
 
 import com.google.common.collect.Lists;
@@ -33,7 +36,6 @@
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
-import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
@@ -54,6 +56,7 @@
 import org.apache.cassandra.tools.nodetool.CompactionStats;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.concurrent.Future;
 
 /**
  * Tool that allows fast route to loading data for arbitrary schemas to disk
@@ -212,10 +215,9 @@
         public void run()
         {
             //Setup
-            SystemKeyspace.finishStartup(); //needed for early-open
             CompactionManager.instance.setMaximumCompactorThreads(threads);
             CompactionManager.instance.setCoreCompactorThreads(threads);
-            CompactionManager.instance.setRate(0);
+            CompactionManager.instance.setRateInBytes(0);
 
             StressProfile stressProfile = getStressProfile();
             ColumnFamilyStore cfs = initCf(stressProfile, true);
@@ -269,10 +271,10 @@
     @Command(name = "write", description = "write data directly to disk")
     public static class DataWriter extends CompactionStress
     {
-        private static double BYTES_IN_GB = 1024 * 1014 * 1024;
+        private static double BYTES_IN_GIB = 1024 * 1014 * 1024;
 
         @Option(name = { "-g", "--gbsize"}, description = "Total GB size on disk you wish to write", required = true)
-        Integer totalSizeGb;
+        Integer totalSizeGiB;
 
         @Option(name = { "-t", "--threads" }, description = "Number of sstable writer threads (default 2)")
         Integer threads = 2;
@@ -280,7 +282,7 @@
         @Option(name = { "-c", "--partition-count"}, description = "Number of partitions to loop over (default 1000000)")
         Integer partitions = 1000000;
 
-        @Option(name = { "-b", "--buffer-size-mb"}, description = "Buffer in MB writes before writing new sstable (default 128)")
+        @Option(name = { "-b", "--buffer-size-mb"}, description = "Buffer in MiB writes before writing new sstable (default 128)")
         Integer bufferSize = 128;
 
         @Option(name = { "-r", "--range-aware"}, description = "Splits the local ranges in number of data directories and makes sure we never write the same token in two different directories (default true)")
@@ -322,13 +324,13 @@
                 });
             }
 
-            double currentSizeGB;
-            while ((currentSizeGB = directories.getRawDiretoriesSize() / BYTES_IN_GB) < totalSizeGb)
+            double currentSizeGiB;
+            while ((currentSizeGiB = directories.getRawDiretoriesSize() / BYTES_IN_GIB) < totalSizeGiB)
             {
                 if (finished.getCount() == 0)
                     break;
 
-                System.out.println(String.format("Written %.2fGB of %dGB", currentSizeGB, totalSizeGb));
+                System.out.println(String.format("Written %.2fGB of %dGB", currentSizeGiB, totalSizeGiB));
 
                 Uninterruptibles.sleepUninterruptibly(3, TimeUnit.SECONDS);
             }
@@ -336,8 +338,8 @@
             workManager.stop();
             Uninterruptibles.awaitUninterruptibly(finished);
 
-            currentSizeGB = directories.getRawDiretoriesSize() / BYTES_IN_GB;
-            System.out.println(String.format("Finished writing %.2fGB", currentSizeGB));
+            currentSizeGiB = directories.getRawDiretoriesSize() / BYTES_IN_GIB;
+            System.out.println(String.format("Finished writing %.2fGB", currentSizeGiB));
         }
     }
 
diff --git a/tools/stress/src/org/apache/cassandra/stress/Stress.java b/tools/stress/src/org/apache/cassandra/stress/Stress.java
index 1fd808f..c104e5c 100644
--- a/tools/stress/src/org/apache/cassandra/stress/Stress.java
+++ b/tools/stress/src/org/apache/cassandra/stress/Stress.java
@@ -18,14 +18,10 @@
 package org.apache.cassandra.stress;
 
 import java.io.*;
-import java.net.Socket;
-import java.net.SocketException;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.stress.settings.StressSettings;
 import org.apache.cassandra.stress.util.MultiResultLogger;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.WindowsTimer;
 import org.apache.commons.lang3.exception.ExceptionUtils;
 
 public final class Stress
@@ -53,22 +49,11 @@
      *   thread count with a high error rate / low count to get some basic numbers
      */
 
-    private static volatile boolean stopped = false;
-
     public static void main(String[] arguments) throws Exception
     {
-        if (FBUtilities.isWindows)
-            WindowsTimer.startTimerPeriod(1);
-
-        int exitCode = run(arguments);
-
-        if (FBUtilities.isWindows)
-            WindowsTimer.endTimerPeriod(1);
-
-        System.exit(exitCode);
+        System.exit(run(arguments));
     }
 
-
     private static int run(String[] arguments)
     {
         try
@@ -112,52 +97,11 @@
                 logout.addStream(new PrintStream(settings.graph.temporaryLogFile));
             }
 
-            if (settings.sendToDaemon != null)
-            {
-                Socket socket = new Socket(settings.sendToDaemon, 2159);
-
-                ObjectOutputStream out = new ObjectOutputStream(socket.getOutputStream());
-                BufferedReader inp = new BufferedReader(new InputStreamReader(socket.getInputStream()));
-
-                Runtime.getRuntime().addShutdownHook(new ShutDown(socket, out));
-
-                out.writeObject(settings);
-
-                String line;
-
-                try
-                {
-                    while (!socket.isClosed() && (line = inp.readLine()) != null)
-                    {
-                        if (line.equals("END") || line.equals("FAILURE"))
-                        {
-                            out.writeInt(1);
-                            break;
-                        }
-
-                        logout.println(line);
-                    }
-                }
-                catch (SocketException e)
-                {
-                    if (!stopped)
-                        e.printStackTrace();
-                }
-
-                out.close();
-                inp.close();
-
-                socket.close();
-            }
-            else
-            {
-                StressAction stressAction = new StressAction(settings, logout);
-                stressAction.run();
-                logout.flush();
-                if (settings.graph.inGraphMode())
-                    new StressGraph(settings, arguments).generateGraph();
-            }
-
+            StressAction stressAction = new StressAction(settings, logout);
+            stressAction.run();
+            logout.flush();
+            if (settings.graph.inGraphMode())
+                new StressGraph(settings, arguments).generateGraph();
         }
         catch (Throwable t)
         {
@@ -175,37 +119,4 @@
     {
         StressSettings.printHelp();
     }
-
-    private static class ShutDown extends Thread
-    {
-        private final Socket socket;
-        private final ObjectOutputStream out;
-
-        public ShutDown(Socket socket, ObjectOutputStream out)
-        {
-            this.out = out;
-            this.socket = socket;
-        }
-
-        public void run()
-        {
-            try
-            {
-                if (!socket.isClosed())
-                {
-                    System.out.println("Control-C caught. Canceling running action and shutting down...");
-
-                    out.writeInt(1);
-                    out.close();
-
-                    stopped = true;
-                }
-            }
-            catch (IOException e)
-            {
-                e.printStackTrace();
-            }
-        }
-    }
-
 }
diff --git a/tools/stress/src/org/apache/cassandra/stress/StressAction.java b/tools/stress/src/org/apache/cassandra/stress/StressAction.java
index 3268182..f9912ac 100644
--- a/tools/stress/src/org/apache/cassandra/stress/StressAction.java
+++ b/tools/stress/src/org/apache/cassandra/stress/StressAction.java
@@ -39,6 +39,8 @@
 
 import com.google.common.util.concurrent.Uninterruptibles;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class StressAction implements Runnable
 {
 
@@ -315,7 +317,7 @@
 
         void start()
         {
-            start = System.nanoTime();
+            start = nanoTime();
         }
 
         /**
@@ -361,7 +363,7 @@
                 long intendedTime = rateLimiter.acquire(partitionCount);
                 op.intendedStartNs(intendedTime);
                 long now;
-                while ((now = System.nanoTime()) < intendedTime)
+                while ((now = nanoTime()) < intendedTime)
                 {
                     LockSupport.parkNanos(intendedTime - now);
                 }
diff --git a/tools/stress/src/org/apache/cassandra/stress/StressGraph.java b/tools/stress/src/org/apache/cassandra/stress/StressGraph.java
index 7a865b4..76a86db 100644
--- a/tools/stress/src/org/apache/cassandra/stress/StressGraph.java
+++ b/tools/stress/src/org/apache/cassandra/stress/StressGraph.java
@@ -20,7 +20,6 @@
 
 import java.io.BufferedReader;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
diff --git a/tools/stress/src/org/apache/cassandra/stress/StressProfile.java b/tools/stress/src/org/apache/cassandra/stress/StressProfile.java
index 5eb478c..cc668b5 100644
--- a/tools/stress/src/org/apache/cassandra/stress/StressProfile.java
+++ b/tools/stress/src/org/apache/cassandra/stress/StressProfile.java
@@ -35,7 +35,6 @@
 import com.google.common.util.concurrent.Uninterruptibles;
 
 import com.datastax.driver.core.*;
-import com.datastax.driver.core.TableMetadata;
 import com.datastax.driver.core.exceptions.AlreadyExistsException;
 import org.antlr.runtime.RecognitionException;
 import org.apache.cassandra.cql3.CQLFragmentParser;
diff --git a/tools/stress/src/org/apache/cassandra/stress/StressServer.java b/tools/stress/src/org/apache/cassandra/stress/StressServer.java
deleted file mode 100644
index c00fb54..0000000
--- a/tools/stress/src/org/apache/cassandra/stress/StressServer.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.cassandra.stress;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.PrintStream;
-import java.net.InetAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.cli.*;
-
-import org.apache.cassandra.concurrent.NamedThreadFactory;
-import org.apache.cassandra.stress.settings.StressSettings;
-import org.apache.cassandra.stress.util.MultiResultLogger;
-import org.apache.cassandra.stress.util.ResultLogger;
-
-public class StressServer
-{
-    private static final Options availableOptions = new Options();
-
-    static
-    {
-        availableOptions.addOption("h", "host", true, "Host to listen for connections.");
-    }
-
-    private static final AtomicInteger threadCounter = new AtomicInteger(1);
-
-    public static void main(String[] args) throws Exception
-    {
-        ServerSocket serverSocket = null;
-        CommandLineParser parser  = new PosixParser();
-
-        InetAddress address = InetAddress.getByName("127.0.0.1");
-
-        try
-        {
-            CommandLine cmd = parser.parse(availableOptions, args);
-
-            if (cmd.hasOption("h"))
-            {
-                address = InetAddress.getByName(cmd.getOptionValue("h"));
-            }
-        }
-        catch (ParseException e)
-        {
-            System.err.printf("Usage: ./bin/stressd start|stop|status [-h <host>]");
-            System.exit(1);
-        }
-
-        try
-        {
-            serverSocket = new ServerSocket(2159, 0, address);
-        }
-        catch (IOException e)
-        {
-            System.err.printf("Could not listen on port: %s:2159.%n", address.getHostAddress());
-            System.exit(1);
-        }
-
-        for (;;)
-            new StressThread(serverSocket.accept()).start();
-    }
-
-    public static class StressThread extends Thread
-    {
-        private final Socket socket;
-
-        public StressThread(Socket client)
-        {
-            this.socket = client;
-        }
-
-        public void run()
-        {
-            try
-            {
-                ObjectInputStream in = new ObjectInputStream(socket.getInputStream());
-                PrintStream out = new PrintStream(socket.getOutputStream());
-                ResultLogger log = new MultiResultLogger(out);
-
-                StressAction action = new StressAction((StressSettings) in.readObject(), log);
-                Thread actionThread = NamedThreadFactory.createThread(action, "stress-" + threadCounter.incrementAndGet());
-                actionThread.start();
-
-                while (actionThread.isAlive())
-                {
-                    try
-                    {
-                        if (in.readInt() == 1)
-                        {
-                            actionThread.interrupt();
-                            break;
-                        }
-                    }
-                    catch (Exception e)
-                    {
-                        // continue without problem
-                    }
-                }
-
-                out.close();
-                in.close();
-                socket.close();
-            }
-            catch (IOException e)
-            {
-                throw new RuntimeException(e.getMessage(), e);
-            }
-            catch (Exception e)
-            {
-                e.printStackTrace();
-            }
-        }
-
-    }
-
-}
diff --git a/tools/stress/src/org/apache/cassandra/stress/generate/PartitionGenerator.java b/tools/stress/src/org/apache/cassandra/stress/generate/PartitionGenerator.java
index 882b8b4..2196fe0 100644
--- a/tools/stress/src/org/apache/cassandra/stress/generate/PartitionGenerator.java
+++ b/tools/stress/src/org/apache/cassandra/stress/generate/PartitionGenerator.java
@@ -104,10 +104,10 @@
     public ByteBuffer convert(int c, Object v)
     {
         if (c < 0)
-            return partitionKey.get(-1-c).type.decompose(v);
+            return partitionKey.get(-1-c).type.decomposeUntyped(v);
         if (c < clusteringComponents.size())
-            return clusteringComponents.get(c).type.decompose(v);
-        return valueComponents.get(c - clusteringComponents.size()).type.decompose(v);
+            return clusteringComponents.get(c).type.decomposeUntyped(v);
+        return valueComponents.get(c - clusteringComponents.size()).type.decomposeUntyped(v);
     }
 
     public Object convert(int c, ByteBuffer v)
diff --git a/tools/stress/src/org/apache/cassandra/stress/generate/PartitionIterator.java b/tools/stress/src/org/apache/cassandra/stress/generate/PartitionIterator.java
index f485ab7..d4867fa 100644
--- a/tools/stress/src/org/apache/cassandra/stress/generate/PartitionIterator.java
+++ b/tools/stress/src/org/apache/cassandra/stress/generate/PartitionIterator.java
@@ -42,6 +42,7 @@
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.stress.generate.values.Generator;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
 
 // a partition is re-used to reduce garbage generation, as is its internal RowIterator
 // TODO: we should batch the generation of clustering components so we can bound the time and size necessary to
@@ -746,9 +747,13 @@
         {
             return seed * 31 + (((UUID) object).getLeastSignificantBits() ^ ((UUID) object).getMostSignificantBits());
         }
+        else if (object instanceof TimeUUID)
+        {
+            return seed * 31 + (((TimeUUID) object).lsb() ^ ((TimeUUID) object).msb());
+        }
         else
         {
-            return seed(type.decompose(object), BytesType.instance, seed);
+            return seed(type.decomposeUntyped(object), BytesType.instance, seed);
         }
     }
 
@@ -766,7 +771,7 @@
             if (i > 0)
                 sb.append("|");
             AbstractType type = generator.partitionKey.get(i++).type;
-            sb.append(type.getString(type.decompose(key)));
+            sb.append(type.getString(type.decomposeUntyped(key)));
         }
         return sb.toString();
     }
diff --git a/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java b/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java
index 3c15c87..f95061f 100644
--- a/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java
+++ b/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java
@@ -51,4 +51,4 @@
                 bytes[i++] = (byte)v;
         return ByteBuffer.wrap(Arrays.copyOf(bytes, size));
     }
-}
\ No newline at end of file
+}
diff --git a/tools/stress/src/org/apache/cassandra/stress/generate/values/Generator.java b/tools/stress/src/org/apache/cassandra/stress/generate/values/Generator.java
index 6b39d08..631e9b8 100644
--- a/tools/stress/src/org/apache/cassandra/stress/generate/values/Generator.java
+++ b/tools/stress/src/org/apache/cassandra/stress/generate/values/Generator.java
@@ -30,14 +30,14 @@
 {
 
     public final String name;
-    public final AbstractType<T> type;
+    public final AbstractType<?> type;
     public final Class<?> clazz;
     final long salt;
     final Distribution identityDistribution;
     final Distribution sizeDistribution;
     public final Distribution clusteringDistribution;
 
-    public Generator(AbstractType<T> type, GeneratorConfig config, String name, Class<?> clazz)
+    public Generator(AbstractType<?> type, GeneratorConfig config, String name, Class<?> clazz)
     {
         this.type = type;
         this.name = name;
diff --git a/tools/stress/src/org/apache/cassandra/stress/generate/values/TimeUUIDs.java b/tools/stress/src/org/apache/cassandra/stress/generate/values/TimeUUIDs.java
index 537d54e..969ecbf 100644
--- a/tools/stress/src/org/apache/cassandra/stress/generate/values/TimeUUIDs.java
+++ b/tools/stress/src/org/apache/cassandra/stress/generate/values/TimeUUIDs.java
@@ -24,7 +24,7 @@
 import java.util.UUID;
 
 import org.apache.cassandra.db.marshal.TimeUUIDType;
-import org.apache.cassandra.utils.UUIDGen;
+import org.apache.cassandra.utils.TimeUUID;
 
 public class TimeUUIDs extends Generator<UUID>
 {
@@ -46,6 +46,6 @@
     @Override
     public UUID generate()
     {
-        return UUIDGen.getTimeUUID(dateGen.generate().getTime(), 0L, clockSeqAndNode);
+        return TimeUUID.atUnixMicrosWithLsb(dateGen.generate().getTime() * 1000L, clockSeqAndNode).asUUID();
     }
 }
diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistribution.java b/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistribution.java
index 6d7f9e4..92580a5 100644
--- a/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistribution.java
+++ b/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistribution.java
@@ -25,7 +25,6 @@
 
 import org.apache.cassandra.stress.Operation;
 import org.apache.cassandra.stress.generate.Distribution;
-import org.apache.commons.math3.util.Pair;
 
 public class SampledOpDistribution implements OpDistribution
 {
diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistributionFactory.java b/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistributionFactory.java
index 59f2394..5757640 100644
--- a/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistributionFactory.java
+++ b/tools/stress/src/org/apache/cassandra/stress/operations/SampledOpDistributionFactory.java
@@ -30,7 +30,6 @@
 import org.apache.cassandra.stress.StressAction.MeasurementSink;
 import org.apache.cassandra.stress.generate.DistributionFactory;
 import org.apache.cassandra.stress.generate.DistributionFixed;
-import org.apache.cassandra.stress.generate.PartitionGenerator;
 import org.apache.cassandra.stress.report.Timer;
 import org.apache.commons.math3.distribution.EnumeratedDistribution;
 import org.apache.commons.math3.util.Pair;
diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java
index 8750790..1487a0d 100644
--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java
+++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java
@@ -33,7 +33,6 @@
 import org.apache.cassandra.cql3.conditions.ColumnCondition;
 import org.apache.cassandra.cql3.statements.ModificationStatement;
 import org.apache.cassandra.db.ConsistencyLevel;
-import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.stress.generate.DistributionFixed;
 import org.apache.cassandra.stress.generate.PartitionGenerator;
 import org.apache.cassandra.stress.generate.Row;
@@ -225,4 +224,4 @@
         }
         return statement.bind(bindBuffer);
     }
-}
\ No newline at end of file
+}
diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java
index 4cfea82..258e688 100644
--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java
+++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java
@@ -151,7 +151,7 @@
     {
         return StressCQLSSTableWriter.builder()
                                .withCfs(cfs)
-                               .withBufferSizeInMB(bufferSize)
+                               .withBufferSizeInMiB(bufferSize)
                                .forTable(tableSchema)
                                .using(insertStatement)
                                .rangeAware(makeRangeAware)
diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java
index fe5f129..ac06793 100644
--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java
+++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java
@@ -24,8 +24,6 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
-import javax.naming.OperationNotSupportedException;
-
 import com.datastax.driver.core.ColumnMetadata;
 import com.datastax.driver.core.PagingState;
 import com.datastax.driver.core.ResultSet;
diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java
index 6d93f4c..e8a640f 100644
--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java
+++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java
@@ -22,7 +22,6 @@
 
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
diff --git a/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java b/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java
index b2afd1b..b50dfd2 100644
--- a/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java
+++ b/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java
@@ -54,7 +54,10 @@
 import org.apache.cassandra.stress.util.Uncertainty;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class StressMetrics implements MeasurementSink
 {
@@ -65,8 +68,8 @@
     private final CountDownLatch stopped = new CountDownLatch(1);
     private final Callable<JmxCollector.GcStats> gcStatsCollector;
     private final HistogramLogWriter histogramWriter;
-    private final long epochNs = System.nanoTime();
-    private final long epochMs = System.currentTimeMillis();
+    private final long epochNs = nanoTime();
+    private final long epochMs = currentTimeMillis();
 
     private volatile JmxCollector.GcStats totalGcStats = new GcStats(0);
 
@@ -159,10 +162,10 @@
     private void reportingLoop(final long logIntervalMillis)
     {
         // align report timing to the nearest second
-        final long currentTimeMs = System.currentTimeMillis();
+        final long currentTimeMs = currentTimeMillis();
         final long startTimeMs = currentTimeMs - (currentTimeMs % 1000);
         // reporting interval starts rounded to the second
-        long reportingStartNs = (System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(currentTimeMs - startTimeMs));
+        long reportingStartNs = (nanoTime() - MILLISECONDS.toNanos(currentTimeMs - startTimeMs));
         final long parkIntervalNs = TimeUnit.MILLISECONDS.toNanos(logIntervalMillis);
         try
         {
@@ -178,7 +181,7 @@
                 reportingStartNs += parkIntervalNs;
             }
 
-            final long end = System.nanoTime();
+            final long end = nanoTime();
             recordInterval(end, end - reportingStartNs);
         }
         catch (Exception e)
@@ -198,7 +201,7 @@
     {
         long parkFor;
         while (!stop &&
-               (parkFor = until - System.nanoTime()) > 0)
+               (parkFor = until - nanoTime()) > 0)
         {
             LockSupport.parkNanos(parkFor);
         }
diff --git a/tools/stress/src/org/apache/cassandra/stress/report/Timer.java b/tools/stress/src/org/apache/cassandra/stress/report/Timer.java
index b3df52f..3938b0c 100644
--- a/tools/stress/src/org/apache/cassandra/stress/report/Timer.java
+++ b/tools/stress/src/org/apache/cassandra/stress/report/Timer.java
@@ -23,6 +23,8 @@
 
 import org.apache.cassandra.stress.StressAction.MeasurementSink;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 // a timer - this timer must be used by a single thread, and co-ordinates with other timers by
 public final class Timer
 {
@@ -42,7 +44,7 @@
 
     public void stop(long partitionCount, long rowCount, boolean error)
     {
-        sink.record(opType, intendedTimeNs, startTimeNs, System.nanoTime(), rowCount, partitionCount, error);
+        sink.record(opType, intendedTimeNs, startTimeNs, nanoTime(), rowCount, partitionCount, error);
         resetTimes();
     }
 
@@ -58,6 +60,6 @@
 
     public void start()
     {
-        startTimeNs = System.nanoTime();
+        startTimeNs = nanoTime();
     }
-}
\ No newline at end of file
+}
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java b/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java
index 018669a..eba276e 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java
@@ -37,7 +37,6 @@
     LOG("Where to log progress to, and the interval at which to do it", SettingsLog.helpPrinter()),
     TRANSPORT("Custom transport factories", SettingsTransport.helpPrinter()),
     PORT("The port to connect to cassandra nodes on", SettingsPort.helpPrinter()),
-    SENDTO("-send-to", "Specify a stress server to send this command to", SettingsMisc.sendToDaemonHelpPrinter()),
     GRAPH("-graph", "Graph recorded metrics", SettingsGraph.helpPrinter()),
     TOKENRANGE("Token range settings", SettingsTokenRange.helpPrinter())
     ;
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/Command.java b/tools/stress/src/org/apache/cassandra/stress/settings/Command.java
index d8ac5d1..8840209 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/Command.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/Command.java
@@ -138,4 +138,4 @@
         throw new AssertionError();
     }
 
-}
\ No newline at end of file
+}
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java b/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java
index ba94e3f..c026874 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java
@@ -65,7 +65,6 @@
         availableOptions.addOption("R",  "replication-strategy", true,   "Replication strategy to use (only on insert if keyspace does not exist), default:org.apache.cassandra.locator.SimpleStrategy");
         availableOptions.addOption("O",  "strategy-properties",  true,   "Replication strategy properties in the following format <dc_name>:<num>,<dc_name>:<num>,...");
         availableOptions.addOption("V",  "average-size-values",  false,  "Generate column values of average rather than specific size");
-        availableOptions.addOption("T",  "send-to",              true,   "Send this as a request to the stress daemon at specified address.");
         availableOptions.addOption("I",  "compression",          true,   "Specify the compression to use for sstable, default:no compression");
         availableOptions.addOption("Q",  "query-names",          true,   "Comma-separated list of column names to retrieve from each row.");
         availableOptions.addOption("Z",  "compaction-strategy",  true,   "CompactionStrategy to use.");
@@ -238,10 +237,6 @@
             if (cmd.hasOption("D"))
                 r.add("-node", "file=" + cmd.getOptionValue("D"));
 
-
-            if (cmd.hasOption("send-to"))
-                r.add("-send-to", cmd.getOptionValue("send-to"));
-
             if (cmd.hasOption("Z"))
                 r.add("-schema", "compaction=" + cmd.getOptionValue("Z"));
 
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java
index 314774a..af10719 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java
@@ -233,16 +233,4 @@
     {
         GroupedOptions.printOptions(System.out, type.toLowerCase(), new Uncertainty(), new Count(), new Duration());
     }
-
-    static Runnable helpPrinter(final Command type)
-    {
-        return new Runnable()
-        {
-            @Override
-            public void run()
-            {
-                printHelp(type);
-            }
-        };
-    }
 }
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandPreDefinedMixed.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandPreDefinedMixed.java
index 1df2a06..c272829 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandPreDefinedMixed.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandPreDefinedMixed.java
@@ -25,7 +25,6 @@
 
 import org.apache.cassandra.stress.Operation;
 import org.apache.cassandra.stress.generate.DistributionFactory;
-import org.apache.cassandra.stress.generate.PartitionGenerator;
 import org.apache.cassandra.stress.generate.SeedManager;
 import org.apache.cassandra.stress.operations.OpDistributionFactory;
 import org.apache.cassandra.stress.operations.SampledOpDistributionFactory;
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandUser.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandUser.java
index cbf8a3a..c96060c 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandUser.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandUser.java
@@ -22,7 +22,6 @@
 
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.net.URI;
 import java.util.Arrays;
 import java.util.Collections;
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java
index d040ccc..e953c15 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java
@@ -22,7 +22,6 @@
 
 
 import java.io.File;
-import java.io.IOException;
 import java.io.Serializable;
 import java.text.SimpleDateFormat;
 import java.util.Arrays;
@@ -55,7 +54,7 @@
 
         if (inGraphMode())
         {
-            temporaryLogFile = FileUtils.createTempFile("cassandra-stress", ".log");
+            temporaryLogFile = FileUtils.createTempFile("cassandra-stress", ".log").toJavaIOFile();
         }
         else
         {
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMisc.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMisc.java
index 3e69754..1f21a61 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMisc.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMisc.java
@@ -216,29 +216,4 @@
             }
         });
     }
-
-    static Runnable sendToDaemonHelpPrinter()
-    {
-        return () -> {
-            System.out.println("Usage: -sendto <host>");
-            System.out.println();
-            System.out.println("Specify a host running the stress server to send this stress command to");
-        };
-    }
-
-    static String getSendToDaemon(Map<String, String[]> clArgs)
-    {
-        String[] params = clArgs.remove("-send-to");
-        if (params == null)
-            params = clArgs.remove("-sendto");
-        if (params == null)
-            return null;
-        if (params.length != 1)
-        {
-            sendToDaemonHelpPrinter().run();
-            System.out.println("Invalid -sendto specifier: " + Arrays.toString(params));
-            System.exit(1);
-        }
-        return params[0];
-    }
 }
diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java b/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java
index 4287123..2f76dfb 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java
@@ -43,7 +43,6 @@
     public final SettingsSchema schema;
     public final SettingsTransport transport;
     public final SettingsPort port;
-    public final String sendToDaemon;
     public final SettingsGraph graph;
     public final SettingsTokenRange tokenRange;
 
@@ -59,7 +58,6 @@
                           SettingsSchema schema,
                           SettingsTransport transport,
                           SettingsPort port,
-                          String sendToDaemon,
                           SettingsGraph graph,
                           SettingsTokenRange tokenRange)
     {
@@ -75,7 +73,6 @@
         this.schema = schema;
         this.transport = transport;
         this.port = port;
-        this.sendToDaemon = sendToDaemon;
         this.graph = graph;
         this.tokenRange = tokenRange;
     }
@@ -189,7 +186,6 @@
         SettingsCommand command = SettingsCommand.get(clArgs);
         if (command == null)
             throw new IllegalArgumentException("No command specified");
-        String sendToDaemon = SettingsMisc.getSendToDaemon(clArgs);
         SettingsPort port = SettingsPort.get(clArgs);
         SettingsRate rate = SettingsRate.get(clArgs, command);
         SettingsPopulation generate = SettingsPopulation.get(clArgs, command);
@@ -220,7 +216,7 @@
             System.exit(1);
         }
 
-        return new StressSettings(command, rate, generate, insert, columns, errors, log, mode, node, schema, transport, port, sendToDaemon, graph, tokenRange);
+        return new StressSettings(command, rate, generate, insert, columns, errors, log, mode, node, schema, transport, port, graph, tokenRange);
     }
 
     private static Map<String, String[]> parseMap(String[] args)
@@ -294,8 +290,6 @@
         transport.printSettings(out);
         out.println("Port:");
         port.printSettings(out);
-        out.println("Send To Daemon:");
-        out.printf("  " + (sendToDaemon != null ? sendToDaemon : "*not set*") + "%n");
         out.println("Graph:");
         graph.printSettings(out);
         out.println("TokenRange:");
diff --git a/tools/stress/src/org/apache/cassandra/stress/util/JavaDriverClient.java b/tools/stress/src/org/apache/cassandra/stress/util/JavaDriverClient.java
index cf0eede..f05c513 100644
--- a/tools/stress/src/org/apache/cassandra/stress/util/JavaDriverClient.java
+++ b/tools/stress/src/org/apache/cassandra/stress/util/JavaDriverClient.java
@@ -158,7 +158,7 @@
         if (loadBalancingPolicy != null)
             clusterBuilder.withLoadBalancingPolicy(loadBalancingPolicy);
         clusterBuilder.withCompression(compression);
-        if (encryptionOptions.isEnabled())
+        if (encryptionOptions.getEnabled())
         {
             SSLContext sslContext;
             sslContext = SSLFactory.createSSLContext(encryptionOptions, true);
diff --git a/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java b/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java
index f0c99b8..417251f 100644
--- a/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java
+++ b/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java
@@ -102,4 +102,4 @@
 
         assertEquals("\n", output.toString());
     }
-}
\ No newline at end of file
+}